diff --git a/.agents/skills/source-command-ifrs-audit/SKILL.md b/.agents/skills/source-command-ifrs-audit/SKILL.md new file mode 100644 index 00000000000..021258a210b --- /dev/null +++ b/.agents/skills/source-command-ifrs-audit/SKILL.md @@ -0,0 +1,73 @@ +--- +name: "source-command-ifrs-audit" +description: "Scan the current codebase for IFRS Accounting Standards compliance gaps." +--- + +# source-command-ifrs-audit + +Use this skill when the user asks to run the migrated source command `ifrs-audit`. + +## Command Template + +Perform an IFRS Accounting Standards compliance audit of the current codebase. + +Reference: `.ai/rules/19-ifrs-compliance.md` + +Scan for: + +### 1. Scope and applicability + +- Check `docs/project.md` reporting standards selection +- Flag financial-reporting features when IFRS scope is unset +- Confirm IFRS is not mixed into PDPL-only privacy checks + +### 2. Monetary precision + +- Search for `number`, `float`, `double`, or JavaScript arithmetic used for money +- Confirm decimal-safe handling for monetary calculations +- Confirm currency codes are stored explicitly + +### 3. Accounting records + +- Confirm transaction date, posting date, reporting period, status, source + reference, and created/approved metadata where relevant +- Confirm posted records are not overwritten silently +- Confirm draft, posted, voided, and reversed states are distinct where relevant + +### 4. Audit trail + +- Confirm accounting mutations are traceable by actor, timestamp, source, and reason +- Confirm corrections use adjustments, reversals, or versioned history + +### 5. Period close and reversals + +- Confirm closed periods cannot be mutated without controlled adjustments +- Confirm void/reversal behavior exists for posted records + +### 6. Reports and exports + +- Confirm financial statements include reporting period and basis of preparation +- Confirm exports are reproducible from persisted source records +- Confirm comparative-period behavior exists or is explicitly out of scope + +### 7. Disclosure and notes + +- Flag missing accounting-policy note support where the app generates formal + financial statements +- Flag missing materiality/disclosure inputs where formal IFRS reports are claimed + +### 8. Tests + +- Confirm tests cover rounding, currency conversion, period boundaries, reversals, + report reproducibility, and audit logging + +### Output format + +Return findings grouped by severity: + +- **Critical** — immediate financial-reporting integrity risk +- **High** — must fix before audited or investor-facing reporting +- **Medium** — fix before expanding finance/reporting scope +- **Low** — documentation, process, or explicit-scope gap + +End with a summary checklist of compliant items. diff --git a/.agents/skills/source-command-pdpl-audit/SKILL.md b/.agents/skills/source-command-pdpl-audit/SKILL.md new file mode 100644 index 00000000000..7397df4cc84 --- /dev/null +++ b/.agents/skills/source-command-pdpl-audit/SKILL.md @@ -0,0 +1,64 @@ +--- +name: "source-command-pdpl-audit" +description: "Scan the current codebase for PDPL (Oman Royal Decree 6/2022) compliance gaps." +--- + +# source-command-pdpl-audit + +Use this skill when the user asks to run the migrated source command `pdpl-audit`. + +## Command Template + +Perform a PDPL compliance audit of the current codebase. + +Reference: `.ai/rules/15-pdpl-compliance.md` + +Scan for: + +### 1. PII in non-production contexts + +- Search tests, seeds, fixtures for real email patterns, phone numbers, national IDs +- Search git history for any PII accidentally committed +- Flag: `test@gmail.com`, Omani phone patterns (`+968 9...`), names in test data + +### 2. Logging + +- Search pino/console.log calls that include `email`, `phone`, `nationalId`, `ip` +- Confirm Sentry `beforeSend` strips user PII fields + +### 3. Data schema + +- Check DB schema for PII columns — confirm they have `-- pdpl:personal` comments +- Verify soft-delete pattern exists for user tables +- Confirm erasure path exists + +### 4. Privacy notice + +- Confirm Arabic-language privacy notice exists in `messages/ar.json` +- Confirm it covers: data types, purpose, legal basis, retention, rights, DPO contact + +### 5. Consent + +- Confirm consent flows are explicit and separate per purpose +- Confirm withdrawal mechanism exists + +### 6. Data residency + +- Note which cloud providers store data and in which regions +- Flag any Level 3/4 data stored outside Oman without documented TRA approval + +### 7. Breach response + +- Confirm production environment variables are set correctly (no localhost or development-only values where production URLs are required) +- Check if a breach notification runbook exists (`.local/incidents/` or similar) + +### Output format + +Return findings grouped by severity: + +- **Critical** — immediate compliance risk (e.g. real PII in tests) +- **High** — must fix before next release +- **Medium** — fix within 30 days +- **Low** — documentation or process gap + +End with a summary checklist of compliant items. diff --git a/.ai/README.md b/.ai/README.md new file mode 100644 index 00000000000..551b8e11354 --- /dev/null +++ b/.ai/README.md @@ -0,0 +1,52 @@ +# .ai/rules — Rule Map + +Tool-agnostic guidance files. Readable by Claude Code, Cursor, Codex, OpenCode, Gemini, and others. + +## Always active + +| File | When to load | +| ---------------------------- | ---------------------------------------------------------------------- | +| `00-constitution.md` | Every task — non-negotiables | +| `17-aws-well-architected.md` | Every non-trivial task — architecture, implementation, and review lens | +| `18-pr-readiness.md` | Every PR, review, ship, and merge task — docs, tests, and CI gates | + +## Stack-specific (load one) + +| File | When to load | +| ---------------------- | -------------------------------------- | +| `01-stack-a-nestjs.md` | NestJS + Drizzle + PostgreSQL → Render | +| `01-stack-b-convex.md` | Convex → Vercel | + +## By task area + +| File | When to load | +| --------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- | +| `02-openapi-contracts.md` | Any API change (Stack A) | +| `03-better-auth.md` | Auth flows, session management, user management | +| `04-drizzle-orm.md` | Database schema, migrations, queries (Stack A) | +| `05-nestjs-patterns.md` | NestJS modules, guards, pipes, interceptors (Stack A) | +| `06-convex-patterns.md` | Convex schema, functions, real-time (Stack B) | +| `07-vite-react-spa.md` | apps/web (React SPA, logged-in app) | +| `08-nextjs-www.md` | apps/www (Next.js marketing site) | +| `09-next-intl-i18n.md` | Any user-facing text (AR/EN, RTL) | +| `10-error-handling.md` | Error boundaries, Problem+JSON, retry logic | +| `11-testing.md` | Unit, integration, E2E, accessibility tests | +| `12-telemetry.md` | Sentry, pino, OTEL, SLOs | +| `13-security.md` | OWASP, ZAP, headers, input validation | +| `14-secret-management.md` | Doppler, secret rotation, per-app config | +| `15-pdpl-compliance.md` | Oman PDPL (always) + opt-in TRA/CDC/CBO/FSA/MOH privacy/regulatory overlays | +| `16-deployment.md` | Render vs Vercel, Docker, CI/CD | +| `19-ifrs-compliance.md` | Financial statements, accounting records, ledgers, revenue recognition, leases, impairments, audit exports, or IFRS-scoped projects | +| `20-environments.md` | Environment topology, Doppler/provider tiers, preview envs, and env drift enforcement | +| `21-agent-orchestration.md` | Task lifecycle, GitHub Issues/Projects workflow, slash-command sequencing, and Claude/Codex handoff | +| `22-kanban-console.md` | Every product change in this T3 Code fork - architecture, GitHub Projects SSOT, GitOps, UI/i18n, and validation | + +## Product Note + +This repo intentionally uses the minimal governance profile plus `22-kanban-console.md`. Stack A/B rules are not active unless a future phase explicitly adopts that architecture. + +## Philosophy + +- "Earn your rules" — load only what the task needs +- Concise by default (~60–100 lines); deeper specs linked, not embedded +- Single source of truth: AGENTS.md → .ai/rules/ → task-specific context diff --git a/.ai/i18n/agent-banners.json b/.ai/i18n/agent-banners.json new file mode 100644 index 00000000000..558026bc654 --- /dev/null +++ b/.ai/i18n/agent-banners.json @@ -0,0 +1,14 @@ +{ + "preflight.passed": { + "en": "Preflight passed", + "ar": "اكتمل الفحص المسبق" + }, + "envAudit.passed": { + "en": "Env audit passed", + "ar": "اكتمل تدقيق البيئات" + }, + "initProject.environmentTiers.prompt": { + "en": "How many environment tiers should this project use: 2 or 3?", + "ar": "كم عدد طبقات البيئة التي يجب أن يستخدمها هذا المشروع: 2 أم 3؟" + } +} diff --git a/.ai/rules/00-constitution.md b/.ai/rules/00-constitution.md new file mode 100644 index 00000000000..89acfbaa726 --- /dev/null +++ b/.ai/rules/00-constitution.md @@ -0,0 +1,46 @@ +# 00 — Constitution + +Non-negotiable rules. Apply to every task, every stack. + +## Code quality + +- `any` is banned — use `unknown` with type guards +- `class-validator` is banned — Zod only +- TypeScript strict mode always on (`noUncheckedIndexedAccess`, `exactOptionalPropertyTypes`) +- No hardcoded secrets — Doppler only; see `14-secret-management.md` +- Conventional Commits: `feat|fix|chore|docs|refactor|test|perf|ops` +- No `--no-verify` or `--no-gpg-sign` unless user explicitly requests + +## Data & privacy + +- **PDPL (Royal Decree 6/2022)** applies to every project — see `15-pdpl-compliance.md` +- No real PII in tests, logs, commits, PR text, or screenshots — use synthetic/anonymized data only +- Arabic-language privacy notices are mandatory (PDPL Art. 4) + +## i18n + +- AR (Arabic) and EN (English) strings required for every user-facing change +- RTL layout required wherever Arabic strings render +- Reference: `09-next-intl-i18n.md` + +## Auth + +- Better Auth **v1.4** is pinned — do not upgrade to 1.5 without reading migration notes + - 1.5 breaking: drizzle-adapter extracted to `@better-auth/drizzle-adapter`, InferUser/InferSession removed, API Key plugin moved to `@better-auth/api-key`, `$ERROR_CODES` type changed to `RawError` +- `BETTER_AUTH_URL` must match the exact request origin and be in `trustedOrigins` + +## Output rules + +- Non-code files (reports, docs, analysis, temp outputs) → `.local/`, never project root +- PRs ≤ 400 LOC excluding generated files — split scope and open follow-up issue if larger +- Generated files (Zod schemas, HTTP clients) must never be edited by hand + +## Validation + +Run before every commit: + +```bash +bun check # types + lint + tests — fix failures, do not skip +``` + +If CI fails on a clean check, investigate before bypassing. diff --git a/.ai/rules/13-security.md b/.ai/rules/13-security.md new file mode 100644 index 00000000000..4f85e0afe81 --- /dev/null +++ b/.ai/rules/13-security.md @@ -0,0 +1,113 @@ +# 13 — Security + +OWASP Top 10 controls, CI security scanning, and security headers. + +## OWASP Top 10 controls + +| Risk | Control | +| ----------------------------- | -------------------------------------------------------------------- | +| A01 Broken Access Control | AuthGuard on all protected routes; no client-side-only auth | +| A02 Cryptographic Failures | Secrets via Doppler; HTTPS everywhere; never log tokens | +| A03 Injection | Zod validation at all boundaries; Drizzle parameterized queries only | +| A04 Insecure Design | Problem+JSON error format; PDPL privacy by design | +| A05 Security Misconfiguration | Security headers (see below); no debug mode in prod | +| A06 Vulnerable Components | Dependabot + bun audit in CI; Trivy remains future scope | +| A07 Auth Failures | Better Auth v1.4; CSRF on by default; rate limiting | +| A08 Software Integrity | TruffleHog in CI; gitleaks pre-commit; SBOM remains future scope | +| A09 Logging Failures | pino structured logs; Sentry error tracking; no PII in logs | +| A10 SSRF | Validate and allowlist all outbound URLs in server-side fetch | + +## Security headers + +Set these on every response (NestJS `helmet`, Next.js `headers()` config): + +``` +# Customize per app (nonces/hashes for inline scripts, real API origins for connect-src). +Content-Security-Policy: default-src 'self'; script-src 'self'; style-src 'self' 'unsafe-inline'; img-src 'self' data: https:; font-src 'self'; connect-src 'self'; object-src 'none'; base-uri 'self'; frame-ancestors 'none'; form-action 'self'; upgrade-insecure-requests +X-Frame-Options: DENY +X-Content-Type-Options: nosniff +Referrer-Policy: strict-origin-when-cross-origin +Permissions-Policy: camera=(), microphone=(), geolocation=() +Strict-Transport-Security: max-age=31536000; includeSubDomains +``` + +## CI security scan pipeline + +| Tool | What it scans | +| ------------------- | ----------------------------------------------------------------- | +| TruffleHog | Verified secrets in git history | +| Semgrep | SAST — OWASP rules, secret patterns, custom rules in `.semgrep/` | +| bun audit | Dependency CVEs | +| Dependabot | npm/Bun-compatible dependency and GitHub Actions updates | +| Claude Code Action | Optional PR security review when Anthropic secrets are configured | +| Trivy (filesystem) | Future scope — dependencies + config files | +| Trivy (Docker) | Future scope — container image vulnerabilities | +| OWASP ZAP | Future scope — running app HTTP attacks | +| gitleaks | Local/pre-commit secret scanning | +| Anchore (CycloneDX) | Future scope — SBOM generation | + +Baseline CI lives in `.github/workflows/security.yml` and runs on PRs, pushes +to `main`, and `workflow_call` from derived repos. Semgrep Cloud upload is +enabled when `SEMGREP_APP_TOKEN` is configured; otherwise CI still runs in OSS +mode. + +## On-demand deep audit + +Use `/security-audit [target]` when a PR touches auth, secrets, CI workflow +execution, multitenancy, payment/accounting boundaries, or another high-risk +surface. The command runs `scripts/security-audit.sh`, writes reports under +`.local/security-audit//`, and must stay local-only through Claude +Code CLI. Do not route the Carlini-style probe through a raw API runner. + +## Semgrep custom rules + +Add project-specific rules in `.semgrep/`: + +```yaml +# .semgrep/no-console-log.yaml +rules: + - id: no-console-log-in-prod + pattern: console.log(...) + message: Use pino logger instead of console.log + severity: WARNING + languages: [typescript] +``` + +## Secret scanning (gitleaks) + +Configure `.gitleaks.toml`: + +```toml +# Prefer narrow paths — a broad *.test.ts allowlist can hide real secrets in tests. +[allowlist] + paths = [".local/"] +``` + +Run pre-commit: `gitleaks detect --source . --verbose` + +## Consuming repo workflow snippet + +```yaml +jobs: + security: + uses: //.github/workflows/security.yml@main +``` + +## OWASP ZAP (DAST) + +Run ZAP against the dev/staging environment in CI: + +```bash +docker run -t owasp/zap2docker-stable zap-baseline.py \ + -t https://staging.example.com \ + -r .local/zap-report.html +``` + +Results in `.local/zap-report.html` (gitignored). + +## Input validation checklist + +- [ ] All user input validated with Zod before use +- [ ] File uploads: type check, size limit, no executable extensions +- [ ] URL parameters: validate and sanitize before use in queries or redirects +- [ ] SQL: never interpolate user input — always use Drizzle ORM or parameterized queries diff --git a/.ai/rules/14-secret-management.md b/.ai/rules/14-secret-management.md new file mode 100644 index 00000000000..f6f266a582c --- /dev/null +++ b/.ai/rules/14-secret-management.md @@ -0,0 +1,127 @@ +# 14 — Secret Management + +Doppler is the single source of truth for all secrets. +Never hardcode secrets. Never commit `.env*` files (except `.env.example` with placeholders). + +## Doppler setup + +```bash +# Install +brew install dopplerhq/cli/doppler + +# Authenticate +doppler login + +# Link project (run once per app) +doppler setup --project my-project --config development +``` + +## Per-app config (monorepos) + +Each app has its own `doppler.yaml`: + +```yaml +# apps/api/doppler.yaml +setup: + project: my-project + config: api_development + +# apps/web/doppler.yaml +setup: + project: my-project + config: web_development +``` + +## Running with secrets + +```bash +# Development (single app) +doppler run -- bun dev + +# Or pull to .env.local (do not commit) +doppler secrets download --no-file --format env > .env.local +``` + +## Config environments + +| Doppler config | Environment | +| --------------- | ----------------- | +| `*_development` | Local dev | +| `*_staging` | Preview / staging | +| `*_production` | Production | + +## Convex secret sync (Stack B) + +No Doppler MCP available. Use the provided script: + +```bash +# Dry run +scripts/sync-env.sh --deployment prod --dry-run + +# Apply (pushes to Convex via npx convex env set) +scripts/sync-env.sh --deployment prod +``` + +The script filters `DOPPLER_*`, `VERCEL_*`, `GITHUB_*` automatically. +Before any sync, it reads the linked Doppler config and refuses cross-tier +writes. For example, a repo linked to `*_dev` cannot run +`scripts/sync-env.sh --deployment prod`. In Stack B, `--deployment stg` is +rewritten to `dev` with a stderr note because Convex staging shadows the dev +deployment. The rejection message names only config/deployment tiers and must +never print secret values. + +Run `bun preflight --only=env/*` or `/env-audit` before syncing tier secrets. +For fixable gaps, use `bun preflight --fix --write` from a `feature/*` branch. +The fix engine may create safe missing stubs, generate `BETTER_AUTH_SECRET`, +derive `BETTER_AUTH_URL`, and then re-run `scripts/sync-env.sh` for Stack B. +It must never overwrite existing secret values, print secret values, or perform +production writes from a non-interactive shell. + +## Render secret sync (Stack A) + +Use Render's dashboard or CLI to set environment variables per service. +In CI, use a Doppler service token: + +```yaml +# .github/workflows/deploy.yml +- name: Deploy to Render + env: + DOPPLER_TOKEN: ${{ secrets.DOPPLER_SERVICE_TOKEN }} + run: doppler run -- render deploy +``` + +## Doppler to GitHub Actions secrets + +Security CI reads GitHub Actions secrets, but Doppler remains the source of +truth. Sync only the required names: + +- `ANTHROPIC_API_KEY` or `CLAUDE_CODE_OAUTH_TOKEN` for optional AI review +- `SEMGREP_APP_TOKEN` for optional Semgrep Cloud upload +- `SLACK_SECURITY_WEBHOOK` for optional failure notifications + +```bash +doppler secrets get ANTHROPIC_API_KEY --plain | gh secret set ANTHROPIC_API_KEY +doppler secrets get SEMGREP_APP_TOKEN --plain | gh secret set SEMGREP_APP_TOKEN +doppler secrets get SLACK_SECURITY_WEBHOOK --plain | gh secret set SLACK_SECURITY_WEBHOOK +``` + +Skip optional secrets that are not used by the derived repo. + +## Secret rotation + +- Rotate on: team member offboarding, suspected breach, quarterly audit +- After rotation: update Doppler first, then redeploy all affected services +- Document rotation in incident log (`.local/incidents/`) + +## Checklist + +- [ ] No secrets in code, commits, PR text, or CI logs +- [ ] `.env*` in `.gitignore` (except `.env.example`) +- [ ] Doppler service tokens used in CI (not personal tokens) +- [ ] `bun preflight --only=env/*` or `/env-audit` artefact reviewed for any env/secrets PR +- [ ] gitleaks configured to scan pre-commit — see `13-security.md` +- [ ] `.env.example` kept up to date with all required variable names (no values) + +## Cross-references + +- Environment tier naming and drift rules: `20-environments.md` diff --git a/.ai/rules/15-pdpl-compliance.md b/.ai/rules/15-pdpl-compliance.md new file mode 100644 index 00000000000..223d4ac50f1 --- /dev/null +++ b/.ai/rules/15-pdpl-compliance.md @@ -0,0 +1,93 @@ +# 15 — PDPL Compliance (Oman) + +Royal Decree 6/2022. Fully enforced since **5 February 2026**. +This rule applies to **every project** regardless of stack. + +## Core obligations + +| Obligation | Requirement | +| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------ | +| **Privacy notice** | Arabic-language notice mandatory (Art. 4). Must include: data types collected, purpose, legal basis, retention period, rights, DPO contact | +| **Breach notification** | Notify NCSC within **72 hours** of discovery | +| **Data subject rights** | Access, correction, erasure, portability within 30 days | +| **Consent** | Explicit, informed, withdrawable. Separate consent per purpose | +| **Data minimization** | Collect only what is strictly necessary | +| **Retention** | Define and enforce retention periods. Delete after period ends | +| **DPO** | Data Protection Officer required for systematic processing | + +## Data residency + +Personal data must be processed in Oman or in countries with "adequate protection" per NCSC determination. +**Cloud provider choice matters**: + +- Convex (US-based) may not be adequate for Level 3/4 data without TRA approval +- Render.com: check data center location for each service region + +## Opt-in compliance layers + +Activate additional rules when applicable: + +Financial reporting standards are handled separately. For IFRS Accounting +Standards, load `19-ifrs-compliance.md` when a task touches financial +statements, accounting records, ledgers, revenue recognition, leases, +impairments, audit exports, or IFRS-scoped project requirements. + +| Rule | When to activate | +| ---------------------------------- | ------------------------------------------------------------------------ | +| **TRA Cloud** (Decision 1152/2024) | If you are a cloud service provider or storing Level 3/4 government data | +| **CDC** (Royal Decree 64/2020) | If serving government agencies or critical national infrastructure | +| **CBO** | If handling payments, money transfers, or fintech | +| **FSA** | If handling investments or securities | +| **MOH** | If handling health/medical data | + +## Data classification + +| Level | Examples | Special requirements | +| ------- | ------------------------------------------- | -------------------------------------------- | +| Level 1 | Public data | None | +| Level 2 | Internal business data | Standard PDPL controls | +| Level 3 | Sensitive personal data (health, financial) | Encryption at rest + transit; access logging | +| Level 4 | National security, critical infrastructure | Cannot leave Oman without TRA approval | + +## Technical controls + +```ts +// PII columns in schema must be tagged +// -- pdpl:personal (add as SQL comment) + +// Erasure: null out PII, preserve non-PII for business records +await db + .update(users) + .set({ email: null, name: null, phone: null, deletedAt: new Date() }) + .where(eq(users.id, userId)); + +// Audit log every data access (Art. 19) +await auditLog.write({ action: "data.accessed", subjectId, actorId, timestamp }); +``` + +## Development rules + +- Never use real PII in tests, seeds, or fixtures — synthetic data only +- Never log PII fields (`email`, `phone`, `nationalId`, `ip`) — use IDs +- In Sentry `beforeSend`: strip PII from `event.user` and related payloads (see `12-telemetry.md` for a scrub pattern) +- `.env.example`: never include real values + +## Breach response checklist + +- [ ] Isolate affected systems +- [ ] Assess scope (data types, number of subjects affected) +- [ ] Notify NCSC within **72 hours** (pdpl.ncsc.gov.om) +- [ ] If TRA-regulated: notify TRA within **12 hours** for severe breaches +- [ ] Notify affected data subjects if there is high risk to their rights +- [ ] Document in `.local/incidents/breach-YYYY-MM-DD.md` + +## Arabic privacy notice + +Minimum required sections in Arabic: + +1. من نحن وكيفية التواصل معنا (Who we are) +2. البيانات التي نجمعها (Data collected) +3. أغراض المعالجة والأساس القانوني (Purpose + legal basis) +4. مدة الاحتفاظ بالبيانات (Retention period) +5. حقوق أصحاب البيانات (Data subject rights) +6. معلومات مسؤول حماية البيانات (DPO contact) diff --git a/.ai/rules/17-aws-well-architected.md b/.ai/rules/17-aws-well-architected.md new file mode 100644 index 00000000000..a1eba0e3f67 --- /dev/null +++ b/.ai/rules/17-aws-well-architected.md @@ -0,0 +1,90 @@ +# 17 — AWS Well-Architected + +Use the AWS Well-Architected Framework as a mandatory design and review lens for +every non-trivial change. + +This starter is not AWS-only. Apply the principle behind each pillar and map it +to the stack in use (Render, Vercel, Convex, Neon, Doppler, Sentry, etc.) +instead of forcing AWS-specific services or implementation details. + +Source: AWS Well-Architected Framework + + +## Operating rules + +- In planning, name the impacted pillars and the intended tradeoffs +- In implementation, prefer small, reversible, observable changes +- In review, use a blame-free, lightweight conversation that surfaces risks and + actions rather than turning the framework into a checklist-only audit +- Escalate or block changes that materially weaken a pillar without explicit + justification and mitigation +- When pillars conflict, document the tradeoff in `docs/tasks/*.md` or PR text +- Treat this framework as additive to PDPL, security, testing, and deployment + rules; it does not replace them + +## General design principles + +- Stop guessing capacity needs; prefer elastic or measurable sizing decisions +- Test systems at production scale when the risk justifies it +- Automate with experimentation and rollback in mind +- Prefer evolutionary architectures over hard-to-reverse one-way doors +- Drive architecture decisions using data, not intuition alone +- Improve through game days or other realistic failure exercises + +## Pillar checklist + +| Pillar | Required behavior in this repo | +| ---------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Operational excellence | Operations as code, clear ownership, runbooks/checklists for risky work, small reversible deploys, post-incident learning | +| Security | Least privilege, defense in depth, secret hygiene, auditability, encryption and data handling aligned with PDPL | +| Reliability | Health checks, timeouts, retries with limits, idempotency where needed, graceful degradation, tested rollback or recovery path | +| Performance efficiency | Measure before tuning, choose fit-for-purpose compute/storage patterns, control latency, caching, and high-cost queries | +| Cost optimization | Right-size environments and dependencies, remove waste, track expensive paths, justify always-on resources | +| Sustainability | Minimize unnecessary compute, storage, and transfer; prefer retention limits, efficient defaults, and lower-footprint architectures that still meet requirements | + +## What to require during creation + +- Operational excellence: the change can be deployed, observed, and reversed + safely +- Security: new data flows, secrets, permissions, and external integrations are + identified and protected +- Reliability: dependency failure modes are known; user-visible failure behavior + is intentional +- Performance efficiency: expected latency and throughput impact are understood; + avoid premature over-engineering +- Cost optimization: new services, background work, polling, storage growth, and + third-party spend are justified +- Sustainability: avoid wasteful polling, over-fetching, excess retention, + duplicate processing, and oversized infrastructure + +## What to require during review + +Ask these questions for every meaningful change: + +1. How will this be operated, observed, and rolled back? +2. What secrets, privileges, or sensitive data does this add or expose? +3. What happens when a dependency is slow, unavailable, or returns bad data? +4. What is the expected latency, throughput, and scaling behavior? +5. What is the direct cost impact now and at 10x usage? +6. Can the same user outcome be achieved with less compute, storage, or network? + +## Review posture + +- Reviews should happen early on high-risk or one-way-door decisions, not only + before release +- Significant architecture changes should trigger another Well-Architected pass +- The output of a review is a prioritized action list, owners, and mitigations + +## Existing rule mappings + +- Security controls: `13-security.md`, `14-secret-management.md`, + `15-pdpl-compliance.md` +- Reliability and recovery: `10-error-handling.md`, `11-testing.md`, + `16-deployment.md` +- Performance, observability, and SLOs: `12-telemetry.md`, `16-deployment.md` + +## Known adaptation gap + +The upstream AWS framework includes provider-specific guidance. For this starter, +translate that guidance to the selected stack unless the project explicitly runs +on AWS. diff --git a/.ai/rules/18-pr-readiness.md b/.ai/rules/18-pr-readiness.md new file mode 100644 index 00000000000..f6fe075e13a --- /dev/null +++ b/.ai/rules/18-pr-readiness.md @@ -0,0 +1,76 @@ +# 18 — PR Readiness + +Apply these rules to every pull request, review, ship, and merge task. + +## Hard rules before merge + +- Relevant Markdown documentation must be updated when the change affects + behavior, setup, contributor workflow, CI, architecture, governance, or + operations +- If no documentation update is needed, the PR must explicitly state + **No documentation impact** +- Tests must be added or updated in the same PR for behavior-changing work +- If no test update is needed, the PR must explicitly state **No test impact** +- All required CI checks must be green before merge +- Agents must treat missing docs, missing tests, or failing CI as blocking + release risks + +## What counts as relevant docs + +At minimum, evaluate these paths when code or workflow changes: + +- `.github/**/*.md` +- `README*.md` +- `CONTRIBUTING*.md` +- `SECURITY*.md` +- `CODE_OF_CONDUCT*.md` +- `docs/**/*.md` +- `AGENTS.md` +- `CLAUDE.md` +- `review.md` +- `.cursor/BUGBOT.md` + +## CI behavior in this template + +- Docs-impact checks run as **warning-level** in CI (do not fail the PR-readiness + job by themselves) +- Tests and CI-green checks remain blocking +- PR authors must still either update relevant docs or explicitly mark + **No documentation impact** in the PR template +- Required CI checks are validated using `PR_READINESS_REQUIRED_CHECKS` + (default: `validate`) +- Derived repos should set `PR_READINESS_REQUIRED_CHECKS` in + `.github/workflows/pr-readiness.yml` to match their required checks + (comma-separated), for example: + `PR_READINESS_REQUIRED_CHECKS: "validate,security"` +- Do not include the `pr-readiness` job name itself in this list, or the check + creates a circular dependency by waiting for itself. +- Path-based docs expectations are evaluated automatically (warning-level), with + extra scrutiny for: + - `.github/workflows/**` and automation paths + - `.ai/rules/**`, `AGENTS.md`, `CLAUDE.md`, `.cursorrules` + - `.cursor/**`, `review.md` + - `scripts/**` + +## Required review questions + +1. Which Markdown docs changed, and do they match the implementation? +2. Which tests were added or updated in this PR? +3. Which CI checks are required, and are they green? +4. If docs or tests were not updated, is the PR explicit about why not? + +## GitHub requirements for derived repos + +- Include a pull request template with explicit docs, tests, and CI checklist + items +- Keep at least one CI workflow active on pull requests +- Configure GitHub branch protection in the derived repo to require the + validation workflow and PR-readiness workflow before merge + +## Agent behavior + +- `review` and `ship` tasks must call out docs drift, missing tests, and failing + CI before style issues +- If a PR exists, verify GitHub checks rather than assuming local success is + enough +- Do not approve or recommend merge while any hard rule above is unmet diff --git a/.ai/rules/19-ifrs-compliance.md b/.ai/rules/19-ifrs-compliance.md new file mode 100644 index 00000000000..686fc7a1aea --- /dev/null +++ b/.ai/rules/19-ifrs-compliance.md @@ -0,0 +1,68 @@ +# 19 — IFRS Accounting Standards + +Full IFRS Accounting Standards engineering guidance. Load this rule when a task +touches financial statements, accounting ledgers, revenue recognition, leases, +impairments, audit exports, accounting reports, or IFRS-scoped project +requirements. + +This rule applies to both stacks. It does not cover IFRS for SMEs, IFRS S1/S2 +sustainability disclosures, IFRS 17 insurance-specific reporting, tax filing +rules, XBRL/iFile submission, or professional accounting advice unless another +project rule explicitly opts in. + +## Core obligations + +| Area | Engineering requirement | +| ------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| **Financial statements** | Support complete statement sets when the product generates formal IFRS reports: financial position, profit or loss and other comprehensive income, changes in equity, cash flows, and notes/accounting policies | +| **Comparatives** | Preserve prior-period data needed for comparative reporting | +| **Materiality and disclosures** | Track report basis, reporting period, accounting policies, estimates, and disclosure inputs where formal statements are generated | +| **Audit trail** | Every accounting mutation must record actor, timestamp, source, reason, and adjustment path | +| **Period close** | Closed periods cannot be changed silently; use controlled adjustments or reversals | +| **Reconciliation** | Reports and exports must be reproducible from persisted source records | + +## Accounting data rules + +- Use decimal-safe money handling. Never use floating-point arithmetic for + monetary amounts. +- Store ISO currency codes explicitly on monetary records. +- Preserve transaction date, posting date, reporting period, created metadata, + approved metadata, and source-document references where relevant. +- Separate draft, posted, voided, and reversed states. +- Never silently overwrite posted accounting records. Use correcting entries, + reversals, or versioned adjustments. +- Multi-currency records must preserve original transaction currency, functional + currency, exchange-rate source, and rate date. +- Generated financial reports must state reporting period and basis of + preparation. +- Financial exports must be deterministic and reproducible from persisted + records, not one-off calculations. + +## Development rules + +- If `docs/project.md` selects `IFRS Accounting Standards`, load this rule for + all finance/accounting work. +- If a task touches financial reporting and `docs/project.md` does not select a + financial reporting standard, stop and confirm scope before implementation. +- If `docs/project.md` is still in template state, follow the bootstrap gate + unless the change is product-agnostic template maintenance. +- If a feature claims IFRS readiness without audit trails, period controls, or + reproducible reports, flag it as a compliance gap. + +## Tests + +Cover these scenarios for accounting/reporting behavior: + +- Monetary rounding and precision +- Currency conversion and rate-date handling +- Posting, voiding, and reversal flows +- Closed-period mutation attempts +- Report reproducibility from source records +- Comparative-period reporting where formal statements are generated +- Audit logging for accounting mutations + +## Oman note + +IFRS is common in Oman financial-reporting contexts, especially for companies +subject to FSA, CBO, audit, or lender reporting obligations. Always verify the +project-specific regulator and reporting scope in `docs/project.md`. diff --git a/.ai/rules/20-environments.md b/.ai/rules/20-environments.md new file mode 100644 index 00000000000..ea441e9a145 --- /dev/null +++ b/.ai/rules/20-environments.md @@ -0,0 +1,297 @@ +# 20 — Environment Topology + +Canonical rules for environment tiers, names, branch mapping, and the +1:1 relationship between Doppler configs and every external integration +(GitHub, Vercel, Render, Convex, Neon, Better Auth, Sentry, Resend). + +This rule is mandatory for tasks that touch environment topology. The planned +`bun preflight` and `/env-audit` enforcement described below treats drift as +`error` once the environment-topology implementation lands. + +## Canonical names + +Three-letter names are canonical. Long names are accepted aliases on input +only; tooling rewrites to canonical form before any provider call. + +| Canonical | Aliases (accepted) | Forbidden anywhere | +| --------- | --------------------------------- | ------------------------------------------------------ | +| `dev` | `development`, `develop`, `local` | `qa`, `test`, `int`, `sbox` | +| `stg` | `staging`, `stage`, `pre-prod` | `uat`, `preview` (preview is its own tier — see below) | +| `prod` | `production`, `live` | `master`, `release` | + +Preview is **not** a tier. It is an ephemeral instance of `dev` (Stack B) or a +shared `preview` env (Stack A). See § Ephemeral previews. + +## Tier count: 2 or 3 + +A repo declares its tier count in `docs/project.md` under +`Environment tiers: 2` or `Environment tiers: 3`. + +| Tiers | Long-lived envs | When to choose | +| ----------- | --------------- | ------------------------------------------------------------------------------------------- | +| 3 (default) | dev, stg, prod | Any product with external users; regulated workloads (PDPL, IFRS, CBO, MoH); paid customers | +| 2 (opt-in) | dev, prod | Internal tools, prototypes, single-developer projects, throwaway demos | + +`/init-project` asks the operator which tier count to use and writes the +choice to `docs/project.md`. Tooling reads the field; missing field = error. + +A 2-tier repo treats `stg` as forbidden — preflight rejects any Doppler +config, Vercel env, Render service, or Neon branch named `stg`. + +## Branch ↔ environment mapping (GitFlow-lite) + +| Branch pattern | Target env | Trigger | Approval | +| ------------------------------- | ----------------- | ------------------- | -------- | +| `feature/*`, `fix/*`, `chore/*` | ephemeral preview | PR open | none | +| `main` | `stg` | push to main | CI green | +| `release/*` | `prod` | push to `release/*` | CI green | + +Tags on `release/*` (e.g. `v1.4.2`) are the source of truth for prod +deployments. Direct push to `prod` from any other branch is forbidden by +GitHub Environments. + +For 2-tier repos, `release/*` deploys directly from `main` is allowed — +there is no `stg` to gate against. + +## Per-integration topology + +Every long-lived tier maps 1:1 across all integrations. Drift = error. + +### Doppler + +Project name comes from the `docs/project.md` Doppler project name field. +Configs follow `_`: + +``` +/ + api_dev api_stg api_prod + web_dev web_stg web_prod + www_dev www_stg www_prod +``` + +Per-app `doppler.yaml` selects the dev config by default; CI overrides via +`DOPPLER_CONFIG=_`. + +### Vercel (Stack B) + +Vercel only exposes three slots: `development`, `preview`, `production`. +Mapping is fixed: + +| Tier | Vercel env | +| ---- | --------------------------------------------------- | +| dev | `development` | +| stg | `preview` (with Git-branch protection: only `main`) | +| prod | `production` | + +`preview` is forbidden as a template tier name, but it is still the required +Vercel slot name for `stg` because Vercel's environment slots are fixed. + +Env variables in each Vercel slot must equal the corresponding Doppler +config — preflight `env/doppler-vercel-parity.ts` checks this. + +### Render (Stack A) + +One Render service per tier per app: + +``` +-dev -stg -prod +``` + +Each service's env-group token points to the matching Doppler service +token (`DOPPLER_TOKEN_`). + +### Convex (Stack B) + +Convex has only two long-lived deployments. `dev` doubles as `stg`: + +| Tier | Convex deployment | +| ---- | ------------------------------------------------ | +| dev | `dev:<...>` | +| stg | shares `dev:<...>` (Convex limitation, accepted) | +| prod | `prod:<...>` | + +`scripts/sync-env.sh` enforces the shared mapping: `--deployment stg` is +rewritten to `--deployment dev` with a logged note. +It also refuses cross-tier writes when the linked Doppler config suffix does +not match the requested deployment tier, except that 2-tier repos may sync +`dev` and `prod` regardless of the currently linked config. + +### Neon (Stack A, cloud) + +Branch-per-tier off the Neon project root: + +``` +neon-project/ + branches/ + dev stg prod +``` + +`DATABASE_URL` per tier points to the matching branch endpoint. Migrations +run dev → stg → prod in order; never skip stg in 3-tier repos. + +### Local PostgreSQL (Stack A, dev machine) + +DB-per-tier in a single local pgsql instance (chosen for parity with the +Neon branch model and zero Drizzle code changes): + +``` +_dev _stg _prod +``` + +Connection strings: + +``` +postgres://localhost:5432/_dev +postgres://localhost:5432/_stg # only when 3-tier +postgres://localhost:5432/_prod +``` + +`scripts/setup-local-db.sh` creates all three (or two) idempotently. + +### GitHub Environments + +GitHub Environments named `dev`, `stg`, `prod` (mirroring tiers). + +| Setting | dev | stg | prod | +| ------------------ | ----------- | ----------- | -------------------------------------------------- | +| Branch protection | none | `main` only | `release/*` only | +| Required reviewers | 0 | 0 | 0 (per Q7 — opt-in per repo via `docs/project.md`) | +| Wait timer | 0 | 0 | 0 (per Q7 — opt-in per repo) | +| Secrets visibility | dev secrets | stg secrets | prod secrets | + +Each env carries its tier-scoped `DOPPLER_TOKEN_` and any +provider-specific deploy token (`VERCEL_TOKEN`, `RENDER_API_KEY`, etc.). + +### Domains + +| Tier | Web | API | +| ----------- | -------------------------- | ------------------------------ | +| dev (local) | `https://.test` | `https://api..test` | +| stg | `https://stg..` | `https://api.stg..` | +| prod | `https://.` | `https://api..` | + +`BETTER_AUTH_URL` per tier matches exactly. Mismatch = OAuth breaks +silently — preflight `env/better-auth-url-tier.ts` blocks. + +### Sentry, Resend, and other observability + +| Service | Env separation | +| ------- | ------------------------------------------------------- | --- | ----- | +| Sentry | One project; tier set via `SENTRY_ENVIRONMENT=dev | stg | prod` | +| Resend | Per-tier API key OR shared key + `X-Entity-Tag: ` | +| pino | `LOG_LEVEL=debug` (dev), `info` (stg), `info` (prod) | + +## Ephemeral previews + +Per-PR isolated envs created on PR open, torn down on close/merge. + +| Layer | Stack A | Stack B | +| ------------ | --------------------------------------------------------------------- | -------------------------------- | +| Web/API host | shared `preview` env (paid PR previews not required) | per-PR Vercel preview deploy | +| Database | shared `preview` Neon branch + per-PR pgsql schema namespace | per-PR Neon branch from `prod` | +| Backend | n/a | per-PR Convex preview deployment | +| Secrets | reuse `*_dev` Doppler config (no per-PR copy) | reuse `*_dev` Doppler config | +| Domain | `pr-.preview..` | `-pr-.vercel.app` | +| Lifetime | until PR closes/merges | until PR closes/merges | +| Teardown | scheduled cleanup job (Stack A shared env grows; cron resets nightly) | automatic on PR close | + +Stack A PR previews share one `preview` env to avoid Render's paid PR +preview tier; the shared env is reset nightly via a Render cron and the +shared `preview` Neon branch is rebased on `stg` weekly. + +## Secret rules per tier + +| Rule | dev | stg | prod | +| -------------------------------------------- | ----- | -------- | ------------------------------- | +| Rotation cadence (warn at) | never | 180 days | 90 days | +| Auto-generated secrets allowed | yes | yes | no — humans only | +| Service token write permitted | yes | yes | yes (token must be tier-scoped) | +| `--fix` may overwrite existing | no | no | no | +| Auto-copy non-secret vars from previous tier | n/a | from dev | from stg | +| `gitleaks` scan severity | warn | error | error | + +"Non-secret" means the key matches the project's allowlist regex in +`scripts/preflight/non-secret-keys.json` (e.g. `LOG_LEVEL`, `SENTRY_ENVIRONMENT`, +`NEXT_PUBLIC_*`). Anything else is a secret and never auto-copied. + +## Enforcement + +Two invocation paths, same checks: + +1. `bun preflight` — full preflight including env/\* checks +2. `/env-audit` — alias for `bun preflight --only=env/*` (faster cycle for + topology-only review) + +Both run on every PR. Auto-fix is enabled by default; preflight runs in +`--fix --write` mode in CI for branches `feature/*`, `--check` only for +branches `main` and `release/*` (no auto-write into protected envs). + +### Checks registered (under `env/`) + +| Check id | What it verifies | +| ------------------------------ | ------------------------------------------------------------------------------------------------------- | +| `env/naming.ts` | All Doppler/Vercel/Render/Neon/GitHub env names ∈ canonical or alias set | +| `env/tier-count.ts` | `docs/project.md` `Environment tiers` field present and matches actual config count | +| `env/doppler-configs.ts` | All required `_` configs exist; auto-fix creates stubs | +| `env/doppler-key-parity.ts` | Non-secret key set equal across tiers; auto-fix copies missing non-secret keys with placeholder values | +| `env/doppler-vercel-parity.ts` | Stack B: Vercel env equals matching Doppler config | +| `env/render-services.ts` | Stack A: one Render service per tier per app | +| `env/convex-deployments.ts` | Stack B: dev + prod deployments exist; stg-→-dev shadow logged | +| `env/neon-branches.ts` | Stack A: Neon branch per tier | +| `env/local-pgsql-dbs.ts` | Stack A: local DB-per-tier exists (or `setup-local-db.sh` printed) | +| `env/github-environments.ts` | GitHub Environments dev/stg/prod exist with correct branch policies | +| `env/better-auth-url-tier.ts` | `BETTER_AUTH_URL` per Doppler config matches domain pattern for that tier | +| `env/sync-env-guard.ts` | `scripts/sync-env.sh` rejects when linked Doppler config doesn't match `--deployment` | +| `env/rotation-age.ts` | Reads Doppler `created_at`; warns on prod secrets > 90 days, stg > 180 | +| `env/ephemeral-teardown.ts` | Open Vercel previews / Convex preview deployments / Neon branches without matching open PR > 24h = warn | + +### Auto-fix scope + +`--fix` performs only: + +1. Create missing Doppler `_` configs as empty stubs (no values) +2. Copy missing non-secret keys (and only non-secret keys) from the tier + below with placeholder values; never copy secret values across tiers +3. Create missing GitHub Environments with correct branch policy +4. Create local pgsql DB-per-tier when on Stack A and operator opts in +5. Stop and print exact provider CLI command for anything else (Vercel + env writes, Render service creation, Neon branch creation) — never + touch prod automatically + +Auto-fix never: + +- writes secret values +- overwrites an existing key in any tier +- creates resources in prod without TTY confirmation +- runs in CI on `main` or `release/*` branches + +## Adoption / migration + +Existing repos derived from this template before rule 20 landed will +report at least: + +- `env/tier-count.ts` = `error` (field missing in `docs/project.md`) +- `env/doppler-configs.ts` = `error` for missing stg/prod configs + +`bun preflight --fix` resolves both via stub creation. Any failure to +auto-create requires an operator to run the printed CLI command. + +## Cross-references + +- `.ai/rules/14-secret-management.md` — Doppler is SSOT, rotation, gitleaks +- `.ai/rules/16-deployment.md` — provider deploy mechanics +- `docs/project.md` — `Environment tiers` field, services table +- `scripts/sync-env.sh` — Stack B Doppler→Convex sync (env-guard added by this rule) +- `scripts/preflight/checks/env/*` — implementation +- `docs/tasks/environments-topology.md` — implementation plan + +## Checklist (for any task touching environments) + +- [ ] `docs/project.md` `Environment tiers` field set (2 or 3) +- [ ] All required Doppler `_` configs exist +- [ ] Vercel/Render/Convex/Neon/GitHub envs match canonical names +- [ ] `BETTER_AUTH_URL` per tier matches domain pattern +- [ ] Non-secret key set equal across tiers +- [ ] Ephemeral preview teardown working (verified via `env/ephemeral-teardown.ts`) +- [ ] No `qa`/`uat`/`test`/`preview` env names outside the ephemeral preview slot +- [ ] `/env-audit` green on the PR diff --git a/.ai/rules/21-agent-orchestration.md b/.ai/rules/21-agent-orchestration.md new file mode 100644 index 00000000000..45e4404794b --- /dev/null +++ b/.ai/rules/21-agent-orchestration.md @@ -0,0 +1,60 @@ +# 21 — Agent Orchestration + +Use this rule when planning, executing, reviewing, opening, shipping, or +resuming agent-assisted work. + +## Canonical Lifecycle + +`Backlog -> Ready -> In Progress -> In Review -> Done` + +- Backlog: issue exists, rough goal known. +- Ready: acceptance criteria, priority, type, stack, compliance, and spec path are known. +- In Progress: implementation branch exists and the issue/project item is assigned. +- In Review: PR exists and links the issue/spec. +- Done: PR merged, issue/project item closed or moved to Done, reusable lessons extracted. + +## Command Order + +1. `/init-project` +2. `/user-stories ` +3. `/plan ` +4. `/orchestrate [phase-id]` when a coordinator should choose the + next safe command for a durable plan +5. `/phase ` for `docs/tasks` plans, or `/execute-task ` for issue-only/legacy work +6. `/review` +7. `/open-pr ` +8. Fix CI/review comments or use `.github/ai-loop.yml` when explicitly enabled +9. `/ship ` +10. `/extract-pr-learnings ` when useful + +## Source Of Truth + +- GitHub Issues: live work items. +- GitHub Projects: live board/status. +- `docs/tasks/*.md`: durable specs and execution logs for non-trivial work. +- `tasks.md`: legacy compatibility pointer only. + +Do not remove `docs/tasks` fallbacks from status tooling. Offline agent context +must keep working even when GitHub is unavailable. + +## Required GitHub Fields + +- `Status`: Backlog, Ready, In Progress, In Review, Done, Blocked +- `Priority`: P0, P1, P2, P3 +- `Type`: feature, fix, chore, docs, ops, security, research +- `Stack`: stack-a, stack-b, both, template +- `Compliance`: pdpl, ifrs, security, none +- `Spec Path`: free text path to durable spec + +## Agent Rules + +- Prefer `/phase` for modern plans under `docs/tasks`. +- Use `/orchestrate` only to coordinate existing lifecycle commands. It must + not bypass `/phase`, `/review`, `/open-pr`, or `/ship`. +- Use `/execute-task` only for legacy numbered tasks or issue-only work. +- Link every durable spec to a GitHub issue via `github_issue` frontmatter. +- Link every PR to the issue and mention the spec path when one exists. +- Keep issue/project state and the spec execution log consistent. +- If GitHub access is missing, continue only when offline fallback is allowed by the task and report the unavailable integration clearly. +- Stop before enabling `.github/ai-loop.yml` unless legacy workflows are gone, + `executor_bot_login` is configured, and required secrets are available. diff --git a/.ai/rules/22-kanban-console.md b/.ai/rules/22-kanban-console.md new file mode 100644 index 00000000000..f4e444ac477 --- /dev/null +++ b/.ai/rules/22-kanban-console.md @@ -0,0 +1,90 @@ +# 22 - Kanban Console Product Rules + +Use this rule for every product change in this T3 Code fork. + +## Product Boundary + +- This repo is the product implementation for `MohAnghabo/kanban-console`. +- `MohAnghabo/ai-starter-pro` remains the governance source. Keep adopted rules + and workflow guidance in sync when phase status or governance behavior changes. +- Do not implement Kanban Console product behavior in the governance template repo. +- Keep the local task plan at `docs/tasks/t3-kanban-project-console.md` aligned + with GitHub issue #43 and the governance copy of the same plan. + +## Architecture + +- Preserve the upstream T3 Code package split unless a task phase explicitly + changes it: + - `apps/server`: server runtime, provider orchestration, local command adapters, + polling, and audit logging. + - `apps/web`: React/Vite UI, mock-first screens, i18n/RTL, and client state. + - `apps/desktop`: desktop shell and local filesystem/secret access where needed. + - `packages/contracts`: shared schemas and protocol contracts only. + - `packages/shared`: reusable runtime utilities with explicit subpath exports. +- Prefer existing T3 Code patterns: Effect Schema/Effect services where already + used, TanStack Query/router patterns in web, and existing source-control + abstractions before adding parallel systems. +- Runtime boundaries must be typed and validated. Use the local package pattern: + Effect Schema where T3 Code already uses it; Zod is acceptable for adopted + governance scripts and isolated product boundaries when it is the simpler fit. + +## Delivery Order + +- Finish Phase 1 governance/GitOps/workflow setup before application features. +- Phase 2 must be a full clickable mock UI with no real GitHub, git, CLI, or + provider mutations. +- Real integrations come after mock contracts are stable. Add deterministic + synthetic fixtures before real provider tests. +- Keep PRs independently reviewable and close to the 400 LOC target. Split UI, + contracts, adapters, and workflow automation into separate PRs when possible. + +## GitHub Projects And Task State + +- GitHub Projects is the live Kanban/task-status source of truth. +- Local `docs/tasks/*.md` files are durable specs and reference material only; + they must not drive Kanban status inside the app. +- Kanban status changes must require confirmation before writing to GitHub + Projects. +- Meaningful app actions linked to issues or PRs must post or update concise + GitHub comments. Never include raw command output in comments. + +## GitOps Rules + +- Implementation branches must use one of: + `feature/*`, `fix/*`, `chore/*`, `docs/*`, `ops/*`, `refactor/*`, `test/*`, or + `perf/*`. +- Mutating work on `main` or `release/*` is blocked unless an explicit task rule + allows a check-only or release-prep action. +- Release branches prepare artifacts and readiness evidence by default. Do not + trigger deploys, tags, or merges without explicit confirmation and a later + release policy decision. +- Destructive git actions require a second confirmation. + +## Local Commands And Audit + +- All local commands must run from a selected managed repository cwd, not an + arbitrary shell cwd. +- CLI adapters need typed inputs, cwd pinning, timeouts, cancellation, redaction, + and local audit records. +- Treat diffs, command output, CI logs, and review comments as sensitive until + redacted. +- CodeRabbit, Doppler, Vercel, Render, and optional tools must degrade to + setup-required states when missing or unauthenticated. + +## UI And I18n + +- The UI should be dense, operational, and work-focused. Avoid marketing-style + layouts for the app surface. +- Every user-facing string needs English and Arabic translations. +- Verify RTL wherever Arabic renders. +- Build empty, loading, missing-auth, permission, error, and degraded states for + each major workflow. + +## Validation + +- Minimum before committing product changes: `bun check`. +- For governance/adoption changes, also run: + `bash scripts/verify-template-adoption.sh --profile minimal --manifest /Users/mohanghabo/Projects/ai-starter-pro/.template/adoption/minimal-files.txt` + and `bun preflight --cache-only --json`. +- For UI changes, add or update focused tests and run browser/Playwright smoke + once the screen is implemented. diff --git a/.claude/commands/env-audit.md b/.claude/commands/env-audit.md new file mode 100644 index 00000000000..fcf5b736bc0 --- /dev/null +++ b/.claude/commands/env-audit.md @@ -0,0 +1,28 @@ +--- +description: Run environment-topology preflight checks only. +argument-hint: [--fix] [--write] +--- + +Run the environment audit for the current repo. + +Use `$ARGUMENTS` as additional flags. + +Steps: + +1. Read `AGENTS.md`, `docs/project.md`, `.ai/rules/14-secret-management.md`, + and `.ai/rules/20-environments.md`. +2. Run: + ```bash + bun preflight --only='env/*' $ARGUMENTS + ``` +3. On `feature/*` branches, `--fix --write` may be used to create safe stubs + and update local project metadata. On `main` and `release/*`, run check-only. +4. If any check returns `error`, stop and summarize the failed tier or provider + contract. +5. Reference `.local/preflight/latest.md` and `.local/preflight/latest.json` + in any environment, secret, deployment, or CI PR. + +Success banner: + +- EN: Env audit passed +- AR: اكتمل تدقيق البيئات diff --git a/.claude/commands/execute-task.md b/.claude/commands/execute-task.md new file mode 100644 index 00000000000..36e09996b40 --- /dev/null +++ b/.claude/commands/execute-task.md @@ -0,0 +1,108 @@ +--- +description: Execute an issue-only or legacy numbered task end-to-end. +argument-hint: [optional details] +--- + +Execute Task: $ARGUMENTS + +Read in this order before touching any code: + +1. GitHub issue or URL from `$ARGUMENTS`; if `$ARGUMENTS` is a legacy number, + read `tasks.md` as a compatibility shim. +2. Linked `docs/tasks/*.md` spec if the issue body or `Spec Path` field names one. +3. `AGENTS.md` — commands and gotchas +4. `.ai/rules/00-constitution.md` — non-negotiables +5. `.ai/rules/17-aws-well-architected.md` — mandatory architecture and review lens +6. `.ai/rules/18-pr-readiness.md` — mandatory docs, tests, and CI gates +7. `.ai/rules/21-agent-orchestration.md` — lifecycle and GitHub linkage rules +8. Stack rule: `.ai/rules/01-stack-a-nestjs.md` or `.ai/rules/01-stack-b-convex.md` +9. Task-relevant rule files from `.ai/README.md` + +If the work has a `docs/tasks` durable spec, prefer +`/phase ` instead of this command. + +Then execute these steps exactly: + +Bootstrap gate: + +- If `docs/project.md` is still in template state (for example `YOUR_PRODUCT_NAME`, `YOUR_APP_NAME`, unchecked stack choice, or placeholder user text), stop and run `/init-project` before branching or coding. + +### 1. Branching + +```bash +git checkout main && git pull origin main +git checkout -b /issue-- +# Types: feat/ fix/ chore/ docs/ ops/ +``` + +Before branching for non-template-maintenance work, run: + +```bash +bun preflight --json +``` + +Abort before branching if any preflight check returns `error`. Warnings should +be summarized in the task notes but do not block by themselves. + +### 2. Definition of Ready gate + +Confirm before coding: + +- [ ] `docs/project.md` is initialized for this repo +- [ ] Goal is clear +- [ ] Happy path and edge cases are defined +- [ ] API/data model references are known +- [ ] Acceptance criteria exist +- [ ] GitHub issue and project fields are ready (`Status`, `Priority`, `Type`, `Stack`, `Compliance`, `Spec Path`) +- [ ] Well-Architected pillar impacts and tradeoffs are understood +- [ ] PR-readiness expectations for docs, tests, and CI are understood + +### 3. Implementation + +- Write failing tests first (TDD) +- Code only what the task requires — no scope creep +- Fix TypeScript errors as you go + +### 4. Validation + +```bash +bun check # types + lint + tests — must pass +bun contracts:check # Stack A only — if API changed +``` + +### 5. Commit & push + +```bash +git add # never git add -A blindly +git commit -m "feat(scope): description" +git push origin +``` + +### 6. PR (via gh CLI) + +```bash +gh pr create \ + --title "(scope): description" \ + --body "## Summary\n...\n\n## Testing Guide\n...\n\n## Risks\n..." \ + --base main +``` + +PR body must include: what changed, why, how to test (step by step), risks/mitigations. +PR body must also capture docs impact, test impact, and CI readiness using the +repo template checklist. +PR ≤ 400 LOC excluding generated files. + +### 7. Follow-ups + +If issues remain: `gh issue create --title "..." --body "..."` + +Return at the end: + +- Task number and scope +- GitHub issue URL and linked spec path, if any +- Branch name +- Files changed (summary) +- Validation status (PASS / FAIL with output) +- Well-Architected tradeoffs noted +- PR URL +- Follow-up issue URLs or "none" diff --git a/.claude/commands/extract-pr-learnings.md b/.claude/commands/extract-pr-learnings.md new file mode 100644 index 00000000000..7b9415a7de8 --- /dev/null +++ b/.claude/commands/extract-pr-learnings.md @@ -0,0 +1,99 @@ +--- +description: Extract non-obvious learnings from a merged PR and file a structured issue. +argument-hint: [pr-number] +--- + +Extract learnings from PR: $ARGUMENTS + +Audience: the repository owner (single maintainer). Output target: a GitHub issue in this repository. Do not tag or assign anyone else. + +## Resolve PR number + +If `$ARGUMENTS` is empty (invoked from CI), resolve the PR number from the workflow event payload: `jq -r '.pull_request.number // .number // empty' "$GITHUB_EVENT_PATH"`. If that yields nothing, exit with `skip: no pr context`. + +Use that number for all subsequent `gh` calls in this command — treat it as `PR_NUM`. + +## Gate — skip if trivial + +Fetch the PR first: `gh pr view "$PR_NUM" --json number,title,body,author,additions,deletions,files,commits,reviews,comments`. + +Do **not** create an issue if any of the following is true: + +- Author is a bot (`dependabot`, `renovate`, `github-actions`). +- Change is docs-only with no strategy or process shift. +- Change is purely generated files, lockfile bumps, or formatting. +- Diff is < 10 lines of non-generated code and has no review comments, CI failures, or process friction. +- PR body or reviews contain no signal (no "why", no surprise, no correction, no tradeoff discussion). + +If gated out, print one line — `skip: ` — and exit. Do not file an issue. + +## Extract — only the non-obvious + +Read the PR, the diff, the review comments, and the CI outcomes. Identify material that future-you would want written down. Prefer: + +- Hidden constraint or invariant discovered during implementation +- Mistaken assumption corrected by review or CI +- Workflow friction (tooling, quoting, env, CI flake) worth preventing +- Stack-specific gotcha (NestJS DI, Convex runtime, Drizzle, Better Auth) +- Gap between plan doc and reality +- Rule or convention that should be codified in `.ai/rules/` but is not + +Ignore: + +- What the PR did (title and diff already say that) +- Who did what and when (git log already says that) +- Generic programming advice +- Celebration, summary, or restatement of the diff + +If after honest review there is nothing non-obvious, skip. One-line skip is a valid outcome. + +## Map to template surface + +For each learning, identify where it should land: + +- `.ai/rules/.md` — codify as non-negotiable or guidance +- `tasks.md` — new follow-up task +- `CLAUDE.md` / `AGENTS.md` — command or gotcha update +- `.github/workflows/` — enforcement via CI +- `scripts/` — automation +- `docs/project.md` — scope or identity change +- `none` — observation worth remembering but not codifying yet + +## File the issue + +Create the issue with `gh issue create`: + +- Title: `[learning] ` +- Labels: `learning`, `triage` +- Assignee: repository owner +- Body structure (use this exactly): + +```markdown +Source: # + +## Learning + + + +## Why it matters + + + +## Template surface + +- [ ] `` — + +## Follow-ups + +- [ ] +``` + +Multiple independent learnings from one PR → file separate issues. Do not batch unrelated insights. + +## Rules + +- No PII, no secrets, no credential fragments in the issue body. +- No speculation beyond what the PR evidence supports. Cite the PR. +- AR/EN is not required — this is internal maintainer tracking, not user-facing. +- Keep the issue body under 300 words. Terse beats thorough. +- If uncertain whether a learning is real vs noise, skip. False positives rot the backlog faster than missing a real learning hurts. diff --git a/.claude/commands/ifrs-audit.md b/.claude/commands/ifrs-audit.md new file mode 100644 index 00000000000..c014f0efe80 --- /dev/null +++ b/.claude/commands/ifrs-audit.md @@ -0,0 +1,66 @@ +--- +description: Scan the current codebase for IFRS Accounting Standards compliance gaps. +--- + +Perform an IFRS Accounting Standards compliance audit of the current codebase. + +Reference: `.ai/rules/19-ifrs-compliance.md` + +Scan for: + +### 1. Scope and applicability + +- Check `docs/project.md` reporting standards selection +- Flag financial-reporting features when IFRS scope is unset +- Confirm IFRS is not mixed into PDPL-only privacy checks + +### 2. Monetary precision + +- Search for `number`, `float`, `double`, or JavaScript arithmetic used for money +- Confirm decimal-safe handling for monetary calculations +- Confirm currency codes are stored explicitly + +### 3. Accounting records + +- Confirm transaction date, posting date, reporting period, status, source + reference, and created/approved metadata where relevant +- Confirm posted records are not overwritten silently +- Confirm draft, posted, voided, and reversed states are distinct where relevant + +### 4. Audit trail + +- Confirm accounting mutations are traceable by actor, timestamp, source, and reason +- Confirm corrections use adjustments, reversals, or versioned history + +### 5. Period close and reversals + +- Confirm closed periods cannot be mutated without controlled adjustments +- Confirm void/reversal behavior exists for posted records + +### 6. Reports and exports + +- Confirm financial statements include reporting period and basis of preparation +- Confirm exports are reproducible from persisted source records +- Confirm comparative-period behavior exists or is explicitly out of scope + +### 7. Disclosure and notes + +- Flag missing accounting-policy note support where the app generates formal + financial statements +- Flag missing materiality/disclosure inputs where formal IFRS reports are claimed + +### 8. Tests + +- Confirm tests cover rounding, currency conversion, period boundaries, reversals, + report reproducibility, and audit logging + +### Output format + +Return findings grouped by severity: + +- **Critical** — immediate financial-reporting integrity risk +- **High** — must fix before audited or investor-facing reporting +- **Medium** — fix before expanding finance/reporting scope +- **Low** — documentation, process, or explicit-scope gap + +End with a summary checklist of compliant items. diff --git a/.claude/commands/init-project.md b/.claude/commands/init-project.md new file mode 100644 index 00000000000..4ebb47e7535 --- /dev/null +++ b/.claude/commands/init-project.md @@ -0,0 +1,63 @@ +--- +description: Initialize a new repo created from this template before non-trivial AI work begins. +--- + +Initialize the project context for this repo. + +Read in this order: + +1. `AGENTS.md` +2. `docs/project.md` +3. `.ai/rules/00-constitution.md` +4. `.ai/rules/17-aws-well-architected.md` + +Execution rules: + +1. Treat this as a required bootstrap step for newly created repos from the template. +2. Inspect `docs/project.md` and identify every placeholder, unchecked choice, or missing project-specific detail. +3. Ask only the minimum clarifying questions needed to replace template placeholders with real project context. +4. Update `docs/project.md` so it is specific enough for future agents to choose the correct stack, regulatory scope, services, and Well-Architected tradeoffs. +5. Capture at minimum: + - Product identity and one-liner + - Stack selection + - Domain/app naming + - Environment tiers (`2` or `3`) + - Primary users, language expectations, RTL need + - Regulator scope beyond PDPL + - External services in use + - Project-specific constraints and architectural guardrails +6. In the key constraints section, add any project-specific Well-Architected constraints that materially affect design or review. +7. In the key constraints section, add any project-specific PR-readiness constraints if the repo needs stricter docs, tests, or CI gates than the template default. +8. If the repo plans to enable the closed-loop PR auto-fix workflow, capture the GitHub App owner, trusted review bots, executor bot login, and which token path will be used (`CLAUDE_CODE_OAUTH_TOKEN` preferred, `ANTHROPIC_API_KEY` fallback). +9. Verify GitHub task tracking: + - `gh auth status` succeeds. + - Required labels from `docs/agent-orchestration.md` exist or are created. + - A GitHub Project is available with fields: `Status`, `Priority`, `Type`, + `Stack`, `Compliance`, and `Spec Path`. + - If Projects access is unavailable, record the gap and refuse non-trivial + derived-repo implementation. +10. Ensure the Codex project surface exists before marking bootstrap complete: + +- If `package.json` exposes `codex:sync`, run `bun codex:sync`. +- Verify `.codex/commands/` exists and contains wrappers for the repo's + `.claude/commands/*.md` files. +- Verify `.codex/environments/environment.toml` exists and mirrors the + current `package.json` scripts as Codex actions. +- If the repo was adopted from this template and `.codex` is missing, run + `bash scripts/adopt-template-rules.sh --target "$PWD" --profile minimal` + or the repo's selected adoption profile, then re-run the Codex sync/check. + +11. Refuse to mark bootstrap complete if `docs/project.md` is missing `Environment tiers: 2` or `Environment tiers: 3`. +12. Run `bun preflight`. Refuse to mark bootstrap complete while any preflight + check returns `error`; reference `.local/preflight/latest.md` in the final + summary. +13. On success, print the bilingual banner: + - EN: Preflight passed + - AR: اكتمل الفحص المسبق +14. End with: + +- Completed items +- Remaining gaps +- Risks if work proceeds before remaining gaps are resolved + +Do not write production code in this command. diff --git a/.claude/commands/open-pr.md b/.claude/commands/open-pr.md new file mode 100644 index 00000000000..4921713ca67 --- /dev/null +++ b/.claude/commands/open-pr.md @@ -0,0 +1,168 @@ +--- +description: Create a PR end-to-end and actively follow CI and review comments. +argument-hint: +--- + +Open PR workflow: $ARGUMENTS + +Use this command after implementation is complete on the task branch and local +validation passes. Cross-check with `.claude/commands/ship.md` before +recommending merge. + +GitHub Issues are the live work items. `docs/tasks/*.md` is the durable spec and +execution-log source for non-trivial work. `tasks.md` is only a legacy +compatibility pointer. + +Read in this order before running git/gh commands: + +1. `AGENTS.md` +2. `CLAUDE.md` +3. `.ai/rules/00-constitution.md` +4. `.ai/rules/17-aws-well-architected.md` +5. `.ai/rules/18-pr-readiness.md` +6. `.ai/rules/21-agent-orchestration.md` +7. Linked GitHub issue and `docs/tasks/.md` spec, when applicable +8. `.claude/commands/ship.md` + +Then execute these steps: + +### 1) Confirm branch and worktree + +```bash +git status --short --branch +``` + +Proceed only when one of these is true: + +- You are already on the appropriate task branch for the implemented work. +- You are on `main` with no implementation work yet, and the user asked this + command to create the branch. + +If you are on `main` before implementation, create a branch: + +```bash +git checkout main +git pull origin main +git checkout -b / +# type in: feat | fix | chore | docs | ops | refactor | test | perf +``` + +Stop and ask before continuing when: + +- the current branch is unrelated to the linked issue/spec; +- unrelated dirty changes are present; +- the branch mixes multiple unrelated GitHub issues or durable task specs; +- implementation work exists on `main`; +- the PR would exceed the repo's PR-size policy. + +### 2) Confirm SSOT linkage + +Before creating a PR, identify the live work item and durable spec: + +- GitHub issue number or URL for the work. +- `docs/tasks/.md` spec path for non-trivial work, if one exists. +- Phase or acceptance criteria completed by this branch. +- Any follow-up work that remains out of scope for this PR. + +If the issue body or GitHub Project `Spec Path` points to a durable spec, read +that spec and verify the execution log reflects this implementation pass. If +the work is issue-only or legacy-numbered, confirm why no durable spec is +needed. + +### 3) Validate before commit + +```bash +bun check +# Stack A only when API/contracts changed: +bun contracts:check +``` + +### 4) Commit and push + +```bash +git add +git commit -m "(scope): concise why-focused message" +git push -u origin HEAD +``` + +Do not commit unrelated local changes. If there is already a suitable commit on +the branch, do not create an empty commit; push the existing branch instead. + +### 5) Create the PR + +```bash +gh pr create --base main --title "(scope): short title" --body "$(cat <<'EOF' +## Summary +- ... + +## Linked Work +- Closes #... +- Spec: docs/tasks/.md + +## Testing Guide +1. ... + +## Risks and Rollback +- Risk: ... +- Rollback: ... + +## Readiness Checklist +- [ ] Relevant Markdown docs updated where needed +- [ ] No documentation impact +- [ ] Tests added or updated for this change +- [ ] No test impact +- [ ] All required CI checks are green +- [ ] GitHub issue linked +- [ ] Durable spec path linked when applicable +- [ ] Task execution log updated when applicable +EOF +)" +``` + +PR body requirements: + +- Explain what changed, why, how to test, risks, and rollback. +- Link the GitHub issue. +- Link the durable spec path when applicable. +- State docs impact and test impact explicitly. +- Keep PR size within policy unless the user approves a larger PR with a clear + split rationale. + +### 6) Follow CI status and PR comments (required) + +```bash +# Watch CI until all required checks complete. +gh pr checks --watch --interval 10 + +# Review comments after CI completes or on each re-run. +gh pr view --comments +gh api repos/{owner}/{repo}/pulls/{number}/comments +``` + +If comments or CI failures appear: + +- Address high-confidence review comments first (bugs, security, regressions). +- Re-run `bun check` (and `bun contracts:check` for Stack A when relevant). +- Push fixes, then repeat CI + comments monitoring until clear. + +### 7) Final readiness gate (ship parity) + +Before recommending merge, confirm all `ship` checks are satisfied: + +- docs impact addressed or explicitly no-impact +- tests impact addressed or explicitly no-impact +- required CI checks green +- PDPL / AR+EN / Well-Architected checks complete +- no unresolved TODOs or blockers +- PR size remains within policy +- GitHub issue and durable spec status are ready for review/merge +- `docs/tasks/.md` execution log is current when applicable + +Return at the end: + +- Branch name +- Commit SHA +- PR URL +- Linked GitHub issue and spec path, if any +- Current required CI status +- Outstanding comment/action list (or `none`) diff --git a/.claude/commands/orchestrate.md b/.claude/commands/orchestrate.md new file mode 100644 index 00000000000..7cdcb7b86d9 --- /dev/null +++ b/.claude/commands/orchestrate.md @@ -0,0 +1,89 @@ +--- +description: Resume a docs/tasks plan through the canonical command lifecycle. +argument-hint: [phase-id] +--- + +Orchestrate: $ARGUMENTS + +Use this command to decide and run the next safe command for a durable +`docs/tasks` plan. This is a coordinator runbook only. It delegates to +existing commands and does not replace `/plan`, `/phase`, `/review`, +`/open-pr`, or `/ship`. + +Read in this order: + +1. `AGENTS.md` +2. `docs/project.md` +3. `review.md` +4. `.cursor/BUGBOT.md` +5. `.ai/rules/00-constitution.md` +6. `.ai/rules/17-aws-well-architected.md` +7. `.ai/rules/18-pr-readiness.md` +8. `.ai/rules/21-agent-orchestration.md` +9. `.ai/rules/01-stack-a-nestjs.md` or `.ai/rules/01-stack-b-convex.md` + when `docs/project.md` selects a stack +10. `docs/agent-orchestration.md` +11. `.claude/commands/phase.md` +12. `.claude/commands/review.md` +13. `.claude/commands/open-pr.md` +14. `.claude/commands/ship.md` +15. `docs/tasks/.md` + +Execution rules: + +1. Parse `$ARGUMENTS` as `` plus an optional ``. +2. If `docs/project.md`, `review.md`, or `.cursor/BUGBOT.md` is still in + template state, stop and run `/init-project` first unless the task is + product-agnostic template maintenance. +3. Run `bun run scripts/plan-status.ts --github` when GitHub + access is available. If GitHub is unavailable, run the same command without + `--github` and report the offline fallback. +4. Select the next phase: + - If `` was supplied, use it. + - Otherwise, choose the first phase that is not completed and whose + dependency text is satisfied by completed earlier phases. +5. Stop before implementation if the chosen phase has unresolved gaps, open + questions, missing dependency evidence, or an unmerged prerequisite PR. +6. For implementation, invoke the `/phase ` workflow. + Follow `.claude/commands/phase.md` exactly and do not continue into another + phase unless the user explicitly asks. +7. After implementation and local validation, invoke `/review`. +8. If review is clear and the user wants a PR, invoke + `/open-pr `. +9. Monitor CI and PR comments through `/open-pr`. Do not recommend merge until + required checks are green and actionable comments are addressed. +10. For final pre-merge readiness, invoke `/ship `. +11. After merge, update the durable spec execution log and GitHub issue/project + state. Run `/extract-pr-learnings ` when the PR produced + reusable lessons. + +Stop points: + +- Uninitialized bootstrap files outside template-maintenance work. +- Missing or ambiguous phase dependencies. +- Open plan questions that affect implementation. +- Dirty working tree with unrelated changes. +- Failed `bun check`, `bun pr:check`, or required CI. +- Open PR review comments or requested changes. +- Any request to enable `.github/ai-loop.yml` while legacy workflows still + exist or `executor_bot_login`/required secrets are not configured. +- Merge approval. The orchestrator may prepare `/ship` output but must not + merge unless the user explicitly asks. + +Codex-equivalent workflow: + +- Codex follows this same runbook manually: inspect plan status, run the + equivalent shell/GitHub commands, edit files, validate, commit, push, and + report PR state. +- Codex local authentication is whatever `gh auth status` and local CLI tools + already provide. Do not write tokens or credentials to the repository. +- No model routing or multi-agent scheduler is implied by this command. + +Output format: + +- Current plan and selected phase. +- Command chosen next and why. +- Stop points encountered, or `none`. +- Files changed and validation results if implementation ran. +- PR URL and CI/comment status if a PR was opened. +- Next command to run. diff --git a/.claude/commands/pdpl-audit.md b/.claude/commands/pdpl-audit.md new file mode 100644 index 00000000000..e5c9b45c9d3 --- /dev/null +++ b/.claude/commands/pdpl-audit.md @@ -0,0 +1,57 @@ +--- +description: Scan the current codebase for PDPL (Oman Royal Decree 6/2022) compliance gaps. +--- + +Perform a PDPL compliance audit of the current codebase. + +Reference: `.ai/rules/15-pdpl-compliance.md` + +Scan for: + +### 1. PII in non-production contexts + +- Search tests, seeds, fixtures for real email patterns, phone numbers, national IDs +- Search git history for any PII accidentally committed +- Flag: `test@gmail.com`, Omani phone patterns (`+968 9...`), names in test data + +### 2. Logging + +- Search pino/console.log calls that include `email`, `phone`, `nationalId`, `ip` +- Confirm Sentry `beforeSend` strips user PII fields + +### 3. Data schema + +- Check DB schema for PII columns — confirm they have `-- pdpl:personal` comments +- Verify soft-delete pattern exists for user tables +- Confirm erasure path exists + +### 4. Privacy notice + +- Confirm Arabic-language privacy notice exists in `messages/ar.json` +- Confirm it covers: data types, purpose, legal basis, retention, rights, DPO contact + +### 5. Consent + +- Confirm consent flows are explicit and separate per purpose +- Confirm withdrawal mechanism exists + +### 6. Data residency + +- Note which cloud providers store data and in which regions +- Flag any Level 3/4 data stored outside Oman without documented TRA approval + +### 7. Breach response + +- Confirm production environment variables are set correctly (no localhost or development-only values where production URLs are required) +- Check if a breach notification runbook exists (`.local/incidents/` or similar) + +### Output format + +Return findings grouped by severity: + +- **Critical** — immediate compliance risk (e.g. real PII in tests) +- **High** — must fix before next release +- **Medium** — fix within 30 days +- **Low** — documentation or process gap + +End with a summary checklist of compliant items. diff --git a/.claude/commands/phase.md b/.claude/commands/phase.md new file mode 100644 index 00000000000..0300373a7fb --- /dev/null +++ b/.claude/commands/phase.md @@ -0,0 +1,21 @@ +--- +description: Implement a single phase from an existing docs/tasks spec and update execution logs. +argument-hint: +--- + +Use `$ARGUMENTS` to identify the task and phase. + +Execution rules: + +1. If `docs/project.md` is still in template state, stop and run `/init-project` first. +2. Open `docs/tasks/.md` and read the full spec. +3. Implement only the requested phase — do not silently scope-creep. +4. If the phase has missing prerequisites, stop and report them. +5. Update the task file after coding: + - Mark checklist items completed/in-progress. + - Append an execution log entry with files changed and key decisions. + - Record any Well-Architected tradeoffs or regressions introduced by the phase. + - Record any deviations from the plan. +6. Run `bun check` and report results. + +If requirements changed, update the plan before continuing. diff --git a/.claude/commands/plan-status.md b/.claude/commands/plan-status.md new file mode 100644 index 00000000000..b9bd4197f02 --- /dev/null +++ b/.claude/commands/plan-status.md @@ -0,0 +1,32 @@ +--- +description: Report current repo-spec progress and linked GitHub tracking state. +argument-hint: [task-name|--all] [--github] +--- + +Use `$ARGUMENTS` to select a specific task plan by name or `--all`. + +Execution rules: + +1. Read `AGENTS.md`, `docs/agent-orchestration.md`, and the relevant + `docs/tasks/*.md` plan files. `tasks.md` is legacy compatibility only. +2. Run `bun run scripts/plan-status.ts $ARGUMENTS` to get the current structured phase snapshot. +3. Inspect `git status --short` and any execution-log entries in the selected plan files before concluding that work is done. +4. Return a markdown table with these columns: + - `Plan` + - `GitHub` + - `Phase` + - `Status` + - `Dependencies` + - `Evidence` + - `Gaps` +5. Status must be conservative: + - Use `completed` only when the plan checklist/logs and the codebase both support that conclusion. + - Use `in-progress` when some work is done but the phase is not finished. + - Use `not-started` when the checklist is untouched. + - Use `unknown` when the plan format or repo evidence is insufficient. +6. If dependencies are missing from the plan, say `unspecified` instead of inventing them. +7. Use `--github` only when GitHub access is available and live linked issue + status is needed. Without it, keep deterministic offline repo-spec status. +8. Highlight gaps clearly after the table. Ask follow-up questions only if a blocking ambiguity remains. + +Do not implement code changes in this command. diff --git a/.claude/commands/plan.md b/.claude/commands/plan.md new file mode 100644 index 00000000000..7ddc73b0d5d --- /dev/null +++ b/.claude/commands/plan.md @@ -0,0 +1,180 @@ +--- +description: Create or update a spec-driven task plan as a local draft, then publish to a GitHub issue on confirm. +argument-hint: [--name ] [--publish] +--- + +From `$ARGUMENTS`, parse: + +- Optional `--name ` override (kebab-case; used for filename and + issue title when supplied) +- Optional flag anywhere: `--publish` (skip the interactive confirm step) +- Remaining text: `` + +`` may be: + +- a plain-language description of work to plan; +- a GitHub issue number such as `42` or `#42`; +- a GitHub issue URL. + +If `--name` is not supplied, derive a concise, descriptive kebab-case +`` from the issue title or description. The name should represent +the work, not a random codename. Prefer 3-6 meaningful words and remove filler +such as "add", "fix", "update", "implement" only when the remaining phrase is +still clear. + +Examples: + +- `Add /adopt-template command for existing JS/TS repos` -> + `adopt-template-existing-repos` +- `#45` with title "Add AI loop auth readiness and bootstrap checks" -> + `ai-loop-auth-readiness` +- `Bootstrap external product repo for T3 Kanban project console` -> + `t3-kanban-product-bootstrap` + +## Execution rules + +1. If `docs/project.md` is still in template state (for example `YOUR_PRODUCT_NAME`, `YOUR_APP_NAME`, unchecked stack choice, or placeholder user text), stop and run `/init-project` first. +2. Resolve `` before drafting: + - For a GitHub issue number or URL, run `gh issue view --json number,title,body,state,url` and use the issue title/body as the planning description. + - If the issue is closed, ask whether to plan a follow-up, reopen, or stop. Do not silently reuse a closed issue. + - For plain text, use the text as the planning description. +3. Derive `` unless `--name` was supplied. Before accepting the name: + - Search `docs/tasks/` for similar names and issue references. + - If a likely duplicate plan exists, ask whether to update it, choose a different name, or stop. + - Show the derived name to the user in the draft summary and allow it to be changed before publish. +4. Run the `/user-stories` discovery workflow for `` and the resolved planning description before drafting implementation phases. +5. Ask clarifying questions first if requirements are ambiguous. +6. Highlight concrete gaps, assumptions, and risks (including PDPL data handling if PII involved). +7. Record the expected impact and tradeoffs across all six AWS Well-Architected pillars. +8. Treat GitHub Issues/Projects as the live task system. Create a durable + `docs/tasks/.md` spec when work is non-trivial, + compliance-sensitive, architectural, multi-phase, security-sensitive, + reusable, or likely to need future agent resumption. Small low-risk work + may remain issue-only. +9. Prepare durable task plans using `docs/tasks/_template.md` as the structural source for the plan body. The canonical durable output is `docs/tasks/.md` only after the draft is confirmed and Step D promotes it. +10. For every phase, include an explicit `Dependencies` line. Use `none` or `unspecified` if there are no known dependencies yet. +11. Produce phased implementation steps with clear acceptance criteria. +12. Keep phases small enough to validate independently. +13. End with a "Ready to implement" checklist. + +Do not write production code in this command. + +## User-story discovery dependency + +`/plan` builds on `/user-stories`; `/user-stories` also remains available as a +standalone brainstorming command. + +Before Step A, run the same workflow defined in `.claude/commands/user-stories.md`: + +1. Look for an existing story draft at `.local/user-stories/.md`. +2. If it exists, read it and ask whether to use it as-is, edit it, or regenerate it from the resolved planning description. +3. If it does not exist, create `.local/user-stories/.md` using the `/user-stories` workflow. + If the draft is created or updated from `/plan`, set `source_plan: docs/tasks/.md` in the story draft frontmatter. +4. Continue only after the draft identifies the selected MVP stories, story IDs, assumptions, gaps, privacy/data handling notes, localization impact, and any IFRS/accounting impact. +5. In the task plan, reference the story draft path and selected story IDs in the `Objective`, `Requirements`, `Gaps and Questions`, `Assumptions`, `Risks`, and `Acceptance Criteria` sections as applicable. +6. Carry any unresolved user-story gaps forward into the plan. Do not silently drop `unspecified` dependencies, unknown data handling, unknown AR/EN or RTL impact, unknown regulator scope, or unknown IFRS/accounting impact. + +## Draft → Confirm → Publish flow + +This command does not write directly to `docs/tasks/` or to GitHub on the first pass. It stages a draft locally, waits for human confirmation, and only then promotes the file and creates or updates a GitHub issue. + +### Step A — write draft to `.local/` + +1. Ensure the directory `.local/tasks/` exists. `.local/` is gitignored (see `.gitignore`), so the draft never gets committed by accident. +2. Determine the canonical path: `docs/tasks/.md`. +3. If the canonical path already exists, read it first to preserve its YAML frontmatter (especially `github_issue`) and any prior `Execution Log` entries. Do not discard existing log entries when regenerating the plan. +4. Write the new draft to `.local/tasks/.md`. The draft must include a YAML frontmatter block at the top, even on first run: + + ``` + --- + task_name: + github_issue: + last_updated: + --- + ``` + + - On first creation from plain text, set `github_issue: null`. + - On first creation from an existing GitHub issue, set `github_issue: `. + - When updating an existing plan, copy the existing `github_issue` value forward unchanged. + +5. Show the user a short summary in the chat: path of the draft, the derived or overridden task name, issue source if any, the planning description, the phase names, the dependency lines, and any open gaps or assumptions. +6. Include the user-story draft path and selected story IDs in the summary. + +### Step B — confirm + +1. If `--publish` was passed, skip to Step C. +2. Otherwise, ask the user explicitly: "Publish `` plan to `docs/tasks/` and open or update its GitHub issue? (yes / rename / edit / cancel)". + - `yes` → proceed to Step C. + - `rename` → ask for the desired kebab-case task name, move the draft to `.local/tasks/.md`, update `task_name`, `source_plan`, and canonical paths, then re-prompt. + - `edit` → ask what to change, regenerate the draft in `.local/tasks/.md`, then re-prompt. + - `cancel` → stop. Leave the draft in `.local/tasks/` for later. Do not touch `docs/tasks/` or GitHub. + +### Step C — preflight GitHub access + +Before publishing, run these checks. Hard-fail (do not silently fall back) if any fail: + +1. `gh auth status` exits 0. +2. `gh repo view --json nameWithOwner -q .nameWithOwner` returns a value (proves origin resolves). +3. `gh label list --limit 100` succeeds. If required labels are missing, + create them. Required labels are listed in `docs/agent-orchestration.md`: + `plan`, `needs-triage`, `type:*`, `stack:*`, `priority:*`, and + `compliance:*`. Use `--force` only if a label exists with different color + or description and the user agrees. +4. Confirm the derived repo has a GitHub Project configured with the required + fields from `docs/agent-orchestration.md`: `Status`, `Priority`, `Type`, + `Stack`, `Compliance`, and `Spec Path`. If Projects API access is missing, + report the gap and leave the draft in `.local/tasks/`. + +If any of these fail, report the exact `gh` error, leave the draft in `.local/tasks/`, and stop. Do not partially publish. + +### Step D — promote draft to `docs/tasks/` + +1. Move (`mv`) `.local/tasks/.md` to `docs/tasks/.md`. Use `mv`, not copy, so we never end up with two divergent copies. +2. Confirm the file is staged for commit by the user — this command does not commit on its behalf. + +### Step E — create or update the GitHub issue + +1. Read `github_issue` from the YAML frontmatter of `docs/tasks/.md` + and treat it as the current issue number when present. +2. Title: `[plan] `. +3. Body: the full file contents, including the YAML frontmatter. GitHub renders frontmatter as a fenced block, which is acceptable. Convert the phase task lists to GitHub task list syntax (`- [ ]`) — the template already uses this, so no conversion is normally needed. +4. If `github_issue` is `null` or missing: + - Run: `gh issue create --title "[plan] " --label plan --label needs-triage --body-file docs/tasks/.md`. + - Capture the issue number from the returned URL and use it as the + current issue number for the rest of Step E. + - Update the YAML frontmatter in `docs/tasks/.md` to set `github_issue: ` and `last_updated: `. Save. + - Run: `gh issue edit --body-file docs/tasks/.md` to sync the updated frontmatter back to the issue body. +5. If `github_issue` is set, use it as the current issue number: + - Run: `gh issue view --json state -q .state` to confirm the issue still exists and its state. + - If the issue is `CLOSED`, ask the user whether to reopen it (`gh issue reopen `) or open a new one. Do not silently reopen. + - If the user chooses to reopen it, run `gh issue reopen ` and keep using that issue number. + - If the user chooses to open a new one, run the same `gh issue create` command from Step E.4, capture the new issue number from the returned URL, update `github_issue: ` in the frontmatter, and replace the current issue number with ``. + - Update `last_updated` in the frontmatter. Save. + - Run: `gh issue edit --title "[plan] " --body-file docs/tasks/.md` to sync the title/body. +6. Print the issue URL back to the user. +7. Add or update the corresponding GitHub Project item when project access is + available. At minimum set: `Status=Ready`, `Type`, `Priority`, `Stack`, + `Compliance`, and `Spec Path=docs/tasks/.md`. + +### Step F — report + +Final message to the user must include, in order: + +- Canonical file path: `docs/tasks/.md` +- Issue URL +- Whether the issue was created or updated +- Any open gaps or `unspecified` dependencies still in the plan that the user should resolve before `/execute-task` + +## Failure handling + +- Never leave the workspace in a half-published state. If Step E fails after Step D moved the file, leave `docs/tasks/.md` in place but report the `gh` failure clearly so the user can retry Step E manually. +- Do not retry `gh` commands in a loop. One attempt, surface the error verbatim, stop. +- Never use destructive force flags, `git reset`, or `gh issue delete` from this command. The only allowed `--force` use is the label-metadata update path in Step C after user agreement. + +## Notes + +- `.local/tasks/` is gitignored. Drafts are local-only by design. +- `docs/tasks/.md` remains the single source of truth. The GitHub issue is a mirror for visibility and triage. +- `/plan-status` continues to read `docs/tasks/` as the offline durable + fallback and can surface linked `github_issue` metadata. +- Re-running `/plan ` updates the existing issue in place via the `github_issue` frontmatter pointer. It does not open duplicates. diff --git a/.claude/commands/preflight.md b/.claude/commands/preflight.md new file mode 100644 index 00000000000..f14e8813c66 --- /dev/null +++ b/.claude/commands/preflight.md @@ -0,0 +1,26 @@ +--- +description: Run stack-aware integration preflight and write .local/preflight artefacts. +argument-hint: [--fix] [--write] [--only=] [--skip=] +--- + +Run the integration preflight for the current repo. + +Use `$ARGUMENTS` as additional flags. + +Steps: + +1. Read `AGENTS.md`, `docs/project.md`, and the relevant stack rule. +2. Run: + ```bash + bun preflight $ARGUMENTS + ``` +3. If the report has `error` results, stop and summarize the failing checks. +4. If the report has only `warn` / `info` / `skip` results, summarize them as + follow-ups. +5. Reference `.local/preflight/latest.md` and `.local/preflight/latest.json` + in any infrastructure-touching PR. + +Success banner: + +- EN: Preflight passed +- AR: اكتمل الفحص المسبق diff --git a/.claude/commands/review.md b/.claude/commands/review.md new file mode 100644 index 00000000000..8a4e5da591c --- /dev/null +++ b/.claude/commands/review.md @@ -0,0 +1,25 @@ +--- +description: Run a PR-style review focused on bugs, regressions, and validation gaps. +--- + +Perform a review of the current branch as if preparing for PR. + +Use a blame-free, lightweight conversation style. Treat the review as a risk-and-action exercise, not a compliance theater audit. + +If `docs/project.md` is still in template state, call that out as a setup gap before performing detailed review. + +Checklist: + +1. Identify behavioral regressions and high-risk changes first. +2. Check that relevant Markdown docs reflect the implementation, or that the PR explicitly marks no documentation impact. +3. Call out missing or weak tests, or missing no-test-impact justification. +4. Check for PDPL compliance: no real PII in tests, logs, or code. +5. Check for AR/EN string coverage if any user-facing text changed. +6. Check AWS Well-Architected impact: operational excellence, security, reliability, performance efficiency, cost optimization, and sustainability. +7. Run `bun check` and include failures/warnings in the report. +8. If a PR exists, verify GitHub checks are green (`bun pr:check` or `gh pr checks`). +9. Verify OpenAPI drift: if API changed, confirm `bun contracts:check` was run (Stack A). +10. Provide a concise severity-ordered findings list with file references. +11. Include open questions and assumptions that need confirmation. + +Prioritize correctness and release risk over style nits. diff --git a/.claude/commands/security-audit.md b/.claude/commands/security-audit.md new file mode 100644 index 00000000000..9bd865a69bc --- /dev/null +++ b/.claude/commands/security-audit.md @@ -0,0 +1,25 @@ +--- +description: Run a local deep security audit with Claude Code over a bounded target. +argument-hint: [target-directory] [--max-files ] [--dry-run] +--- + +Run the local-only security audit. + +Use `$ARGUMENTS` as script arguments. + +Rules: + +1. Do not run this through the raw Anthropic API. The audit intentionally uses + Claude Code CLI local context because raw API security probes may be + rate-limited or blocked. +2. Keep the target narrow, for example `convex/` or `apps/api/src/`. +3. Reports must land under `.local/security-audit/`, never project root. +4. Do not commit audit output. + +Run: + +```bash +bash scripts/security-audit.sh $ARGUMENTS +``` + +Review `.local/security-audit//SUMMARY.md` when complete. diff --git a/.claude/commands/ship.md b/.claude/commands/ship.md new file mode 100644 index 00000000000..fa74d5b2fe4 --- /dev/null +++ b/.claude/commands/ship.md @@ -0,0 +1,37 @@ +--- +description: Final pre-merge readiness pass for a planned task. +argument-hint: +--- + +Prepare the task for merge and handoff. + +Execution steps: + +1. If `docs/project.md` is still in template state, stop and run `/init-project` first. +2. Verify the associated `docs/tasks` spec is complete and current. +3. Verify the GitHub issue and Project item are linked and in `In Review` or + an equivalent pre-merge state. +4. Run `bun pr:check` if a PR exists; otherwise run `bun check` and report that CI could not be verified. +5. Confirm PR-readiness checklist: + - [ ] Relevant Markdown docs were updated, or the PR explicitly marks no documentation impact + - [ ] Tests were added or updated, or the PR explicitly marks no test impact + - [ ] All required CI checks are green +6. Confirm PDPL checklist: + - [ ] No real PII in tests, logs, or commits + - [ ] Arabic privacy strings updated if user-facing text changed +7. Confirm i18n checklist: + - [ ] AR and EN strings present for every new user-facing string +8. Confirm AWS Well-Architected checklist: + - [ ] Operational excellence impact and rollback are documented + - [ ] Security and data handling changes are reviewed + - [ ] Reliability failure modes are understood + - [ ] Performance impact is measured or bounded + - [ ] Cost impact is justified + - [ ] Sustainability impact is considered +9. Produce a release note style summary: + - What changed + - Why it changed + - Risks and rollback strategy +10. Confirm no unresolved TODOs or blockers. +11. Verify PR ≤ 400 LOC (excluding generated files). +12. If all gates pass, provide a PR title and bullet-point body. diff --git a/.claude/commands/upgrade-multitenant.md b/.claude/commands/upgrade-multitenant.md new file mode 100644 index 00000000000..ec5c738249e --- /dev/null +++ b/.claude/commands/upgrade-multitenant.md @@ -0,0 +1,90 @@ +--- +description: Guide through upgrading a single-tenant Better Auth setup to multi-tenant (organizations). +--- + +Guide the user through adding multi-tenant support using Better Auth's `organization` plugin. + +Reference: Better Auth v1.4 docs — https://www.better-auth.com/docs/plugins/organization + +## Step 1: Confirm prerequisites + +- [ ] Better Auth v1.4 is installed (not 1.5 — breaking changes exist) +- [ ] Single-tenant auth is working end-to-end +- [ ] Database migrations are in a working state + +## Step 2: Add organization plugin (Stack A) + +```ts +// apps/api/src/auth/auth.ts +import { betterAuth } from "better-auth"; +import { organization } from "better-auth/plugins"; + +export const auth = betterAuth({ + // ... existing config + plugins: [ + organization({ + allowUserToCreateOrganization: true, // or role-gated + creatorRole: "owner", + membershipRoles: ["owner", "admin", "member"], + }), + ], +}); +``` + +## Step 3: Run migrations + +Better Auth will generate new tables for organizations, memberships, and invitations. +Review the migration before applying: + +```bash +# Stack A (Drizzle) +bun drizzle-kit generate +# Review migration file +bun drizzle-kit migrate +``` + +## Step 4: Update auth middleware/guards + +All protected routes need to check both session AND organization membership: + +```ts +// apps/api/src/auth/auth.guard.ts +// After verifying session, verify org membership for org-scoped routes +const membership = await auth.api.getOrganizationMembership({ + headers: request.headers, +}); +if (!membership) throw new ForbiddenError("Not a member of this organization"); +``` + +## Step 5: Update data model + +Tag all resource tables with `organizationId` using a **staged migration** so existing rows are not broken: + +1. **Add nullable column** — `organizationId: uuid('organization_id').references(() => organizations.id)` (omit `.notNull()` until backfill completes). +2. **Backfill** — assign every existing row to a default organization (or run a one-off script / manual assignment per tenant). +3. **Enforce NOT NULL** — follow-up migration: alter column to `.notNull()`. +4. **Index** — `.index('by_org', ['organizationId'])` after the column is stable. + +Update all queries to filter by `organizationId`. + +## Step 6: Invitation flow + +Better Auth provides invitation-based onboarding: + +- `auth.api.createInvitation()` — send invite +- `auth.api.acceptInvitation()` — accept invite link +- Email delivery: integrate with your email provider (Resend recommended) + +## Step 7: Update tests + +- Add test fixtures for org + membership +- Test: owner can invite, member cannot invite, non-member cannot access +- Test: data isolation between organizations + +## Step 8: PDPL note + +Multi-tenant adds a new data boundary. Ensure: + +- Privacy notice updated to mention organizational data sharing +- Audit logs include `organizationId` +- Data erasure scoped to org membership (leaving org ≠ deleting account) diff --git a/.claude/commands/user-stories.md b/.claude/commands/user-stories.md new file mode 100644 index 00000000000..cafd71449f5 --- /dev/null +++ b/.claude/commands/user-stories.md @@ -0,0 +1,144 @@ +--- +description: Brainstorm and draft user stories for a feature as a local-only input to planning. +argument-hint: +--- + +From `$ARGUMENTS`, parse: + +- First token: `` (kebab-case; used for the draft filename) +- Remaining text: `` + +## Execution rules + +1. Read `AGENTS.md`, `docs/project.md`, `review.md`, `.cursor/BUGBOT.md`, `.ai/rules/00-constitution.md`, `.ai/rules/17-aws-well-architected.md`, and `.ai/rules/18-pr-readiness.md`. +2. If `docs/project.md`, `review.md`, or `.cursor/BUGBOT.md` is still in template state, user-story brainstorming may continue, but the output must clearly mark the missing bootstrap context as gaps. Do not promote stories into `docs/tasks/` until bootstrap is complete. +3. Ask clarifying questions first when the feature goal, users, roles, business rules, data involved, or compliance scope are ambiguous. +4. Highlight concrete gaps, assumptions, risks, and regulatory questions. Include PDPL data handling whenever personal data may be collected, processed, stored, logged, exported, or displayed. +5. If the feature touches financial statements, ledgers, revenue recognition, leases, impairments, audit exports, or accounting records, read `.ai/rules/19-ifrs-compliance.md` and include IFRS-specific story and acceptance criteria gaps. +6. Consider AR/EN and RTL needs for every user-facing workflow. If language scope is unknown, mark it as an explicit gap. +7. Keep stories implementation-neutral unless the selected stack or existing architecture creates a real constraint. +8. Do not write production code in this command. + +## Story quality rules + +Each story must include: + +- Stable ID: `US-001`, `US-002`, etc. +- Persona or role +- Story statement: `As a , I want , so that .` +- Priority: `must`, `should`, `could`, or `defer` +- Dependencies: `none`, `unspecified`, or a concrete dependency +- Acceptance criteria using `Given / When / Then` +- Data and privacy notes +- Localization notes +- Open questions, if any + +Prefer small stories that can be validated independently. Split stories when a +single story mixes multiple roles, approval states, integrations, or compliance +controls. + +## Draft flow + +This command is local-only. It never writes to `docs/tasks/` and never creates a +GitHub issue. + +### Step A - inspect context + +1. Determine whether project bootstrap is complete. +2. Identify selected stack if known: + - Stack A: NestJS + Express + Drizzle ORM + PostgreSQL + - Stack B: Convex + - Unknown: mark stack-dependent decisions as gaps +3. Identify relevant regulators and reporting scope from `docs/project.md`. +4. Search existing `.local/user-stories/` and `docs/tasks/` files for related + story or plan drafts so updates do not duplicate prior work. + +### Step B - clarify + +Ask only the questions needed to produce useful stories. Cover: + +- Primary users and secondary actors +- Trigger and desired outcome +- Happy path +- Edge cases and failure states +- Permissions and approvals +- Data collected, displayed, exported, logged, or retained +- Notifications, reports, integrations, and audit trails +- AR/EN, RTL, and accessibility expectations +- PDPL, regulator, and IFRS/accounting implications +- MVP versus post-MVP scope + +If the user asks to brainstorm first, produce a first-pass draft with clearly +marked assumptions instead of blocking indefinitely. + +### Step C - write draft to `.local/` + +1. Ensure `.local/user-stories/` exists. +2. Write or update `.local/user-stories/.md`. +3. Include YAML frontmatter: + + ``` + --- + feature_name: + source_plan: null # set to docs/tasks/.md when used by `/plan` + last_updated: + --- + ``` + +4. Use this structure: + + ``` + # User Stories: + + ## Goal + + ## Context Snapshot + + ## Personas + + ## Story Map + + ## User Stories + + ## Non-Functional Requirements + + ## Compliance and Data Handling + + ## Gaps and Questions + + ## Assumptions + + ## Risks + + ## Ready for /plan Checklist + ``` + +5. The `Ready for /plan Checklist` must include: + - [ ] Product bootstrap is complete, or remaining bootstrap gaps are accepted + - [ ] Target users and roles are named + - [ ] MVP stories are marked `must` + - [ ] Dependencies are concrete or marked `unspecified` + - [ ] PDPL handling is clear for all personal data + - [ ] AR/EN and RTL impact is clear + - [ ] IFRS/accounting impact is clear or explicitly out of scope + +### Step D - report + +Final response must include: + +- Draft path: `.local/user-stories/.md` +- Story IDs and titles grouped by priority +- Open gaps and assumptions +- Whether the draft is ready to feed into `/plan` +- If ready, suggest the exact `/plan ` command the user can run + next. Include `--name ` only when the user needs an explicit slug. + +## Relationship to `/plan` + +- `/user-stories` is the standalone discovery command. +- `/plan` must run the same story-discovery workflow before writing a task plan. +- A standalone story draft does not bypass `/plan` bootstrap checks; formal task plans still require initialized project context. +- When `/plan` creates or updates a story draft, set `source_plan` to `docs/tasks/.md`. +- When `/plan` uses a story draft, the plan must reference the draft path, + summarize selected story IDs, and carry unresolved gaps into the plan's + `Gaps and Questions`, `Assumptions`, `Risks`, and acceptance criteria sections. diff --git a/.codex/commands/env-audit.md b/.codex/commands/env-audit.md new file mode 100644 index 00000000000..0089f0bd041 --- /dev/null +++ b/.codex/commands/env-audit.md @@ -0,0 +1,19 @@ + +--- +description: "Run environment-topology preflight checks only." +argument-hint: "[--fix] [--write]" +--- + +# /env-audit for Codex + +Canonical runbook: `.claude/commands/env-audit.md` + +When the user invokes `/env-audit` in Codex, treat the text after the +command name as `$ARGUMENTS`, read the canonical runbook above, and execute the +same workflow from this repository root. + +Rules: +- Follow `AGENTS.md` first, then the canonical command runbook. +- Preserve the command's existing branch, GitHub, validation, and safety gates. +- Do not edit this generated wrapper directly. Update the canonical + `.claude/commands/env-audit.md` file, then run `bun codex:sync`. diff --git a/.codex/commands/execute-task.md b/.codex/commands/execute-task.md new file mode 100644 index 00000000000..030255daa2c --- /dev/null +++ b/.codex/commands/execute-task.md @@ -0,0 +1,19 @@ + +--- +description: "Execute an issue-only or legacy numbered task end-to-end." +argument-hint: " [optional details]" +--- + +# /execute-task for Codex + +Canonical runbook: `.claude/commands/execute-task.md` + +When the user invokes `/execute-task` in Codex, treat the text after the +command name as `$ARGUMENTS`, read the canonical runbook above, and execute the +same workflow from this repository root. + +Rules: +- Follow `AGENTS.md` first, then the canonical command runbook. +- Preserve the command's existing branch, GitHub, validation, and safety gates. +- Do not edit this generated wrapper directly. Update the canonical + `.claude/commands/execute-task.md` file, then run `bun codex:sync`. diff --git a/.codex/commands/extract-pr-learnings.md b/.codex/commands/extract-pr-learnings.md new file mode 100644 index 00000000000..82d65c74708 --- /dev/null +++ b/.codex/commands/extract-pr-learnings.md @@ -0,0 +1,19 @@ + +--- +description: "Extract non-obvious learnings from a merged PR and file a structured issue." +argument-hint: "[pr-number]" +--- + +# /extract-pr-learnings for Codex + +Canonical runbook: `.claude/commands/extract-pr-learnings.md` + +When the user invokes `/extract-pr-learnings` in Codex, treat the text after the +command name as `$ARGUMENTS`, read the canonical runbook above, and execute the +same workflow from this repository root. + +Rules: +- Follow `AGENTS.md` first, then the canonical command runbook. +- Preserve the command's existing branch, GitHub, validation, and safety gates. +- Do not edit this generated wrapper directly. Update the canonical + `.claude/commands/extract-pr-learnings.md` file, then run `bun codex:sync`. diff --git a/.codex/commands/ifrs-audit.md b/.codex/commands/ifrs-audit.md new file mode 100644 index 00000000000..38051f00f12 --- /dev/null +++ b/.codex/commands/ifrs-audit.md @@ -0,0 +1,18 @@ + +--- +description: "Scan the current codebase for IFRS Accounting Standards compliance gaps." +--- + +# /ifrs-audit for Codex + +Canonical runbook: `.claude/commands/ifrs-audit.md` + +When the user invokes `/ifrs-audit` in Codex, treat the text after the +command name as `$ARGUMENTS`, read the canonical runbook above, and execute the +same workflow from this repository root. + +Rules: +- Follow `AGENTS.md` first, then the canonical command runbook. +- Preserve the command's existing branch, GitHub, validation, and safety gates. +- Do not edit this generated wrapper directly. Update the canonical + `.claude/commands/ifrs-audit.md` file, then run `bun codex:sync`. diff --git a/.codex/commands/init-project.md b/.codex/commands/init-project.md new file mode 100644 index 00000000000..b6497ef9f87 --- /dev/null +++ b/.codex/commands/init-project.md @@ -0,0 +1,18 @@ + +--- +description: "Initialize a new repo created from this template before non-trivial AI work begins." +--- + +# /init-project for Codex + +Canonical runbook: `.claude/commands/init-project.md` + +When the user invokes `/init-project` in Codex, treat the text after the +command name as `$ARGUMENTS`, read the canonical runbook above, and execute the +same workflow from this repository root. + +Rules: +- Follow `AGENTS.md` first, then the canonical command runbook. +- Preserve the command's existing branch, GitHub, validation, and safety gates. +- Do not edit this generated wrapper directly. Update the canonical + `.claude/commands/init-project.md` file, then run `bun codex:sync`. diff --git a/.codex/commands/open-pr.md b/.codex/commands/open-pr.md new file mode 100644 index 00000000000..5424998b625 --- /dev/null +++ b/.codex/commands/open-pr.md @@ -0,0 +1,19 @@ + +--- +description: "Create a PR end-to-end and actively follow CI and review comments." +argument-hint: " " +--- + +# /open-pr for Codex + +Canonical runbook: `.claude/commands/open-pr.md` + +When the user invokes `/open-pr` in Codex, treat the text after the +command name as `$ARGUMENTS`, read the canonical runbook above, and execute the +same workflow from this repository root. + +Rules: +- Follow `AGENTS.md` first, then the canonical command runbook. +- Preserve the command's existing branch, GitHub, validation, and safety gates. +- Do not edit this generated wrapper directly. Update the canonical + `.claude/commands/open-pr.md` file, then run `bun codex:sync`. diff --git a/.codex/commands/orchestrate.md b/.codex/commands/orchestrate.md new file mode 100644 index 00000000000..d00b1746550 --- /dev/null +++ b/.codex/commands/orchestrate.md @@ -0,0 +1,19 @@ + +--- +description: "Resume a docs/tasks plan through the canonical command lifecycle." +argument-hint: " [phase-id]" +--- + +# /orchestrate for Codex + +Canonical runbook: `.claude/commands/orchestrate.md` + +When the user invokes `/orchestrate` in Codex, treat the text after the +command name as `$ARGUMENTS`, read the canonical runbook above, and execute the +same workflow from this repository root. + +Rules: +- Follow `AGENTS.md` first, then the canonical command runbook. +- Preserve the command's existing branch, GitHub, validation, and safety gates. +- Do not edit this generated wrapper directly. Update the canonical + `.claude/commands/orchestrate.md` file, then run `bun codex:sync`. diff --git a/.codex/commands/pdpl-audit.md b/.codex/commands/pdpl-audit.md new file mode 100644 index 00000000000..22235c9c06b --- /dev/null +++ b/.codex/commands/pdpl-audit.md @@ -0,0 +1,18 @@ + +--- +description: "Scan the current codebase for PDPL (Oman Royal Decree 6/2022) compliance gaps." +--- + +# /pdpl-audit for Codex + +Canonical runbook: `.claude/commands/pdpl-audit.md` + +When the user invokes `/pdpl-audit` in Codex, treat the text after the +command name as `$ARGUMENTS`, read the canonical runbook above, and execute the +same workflow from this repository root. + +Rules: +- Follow `AGENTS.md` first, then the canonical command runbook. +- Preserve the command's existing branch, GitHub, validation, and safety gates. +- Do not edit this generated wrapper directly. Update the canonical + `.claude/commands/pdpl-audit.md` file, then run `bun codex:sync`. diff --git a/.codex/commands/phase.md b/.codex/commands/phase.md new file mode 100644 index 00000000000..6bbb9915c4a --- /dev/null +++ b/.codex/commands/phase.md @@ -0,0 +1,19 @@ + +--- +description: "Implement a single phase from an existing docs/tasks spec and update execution logs." +argument-hint: " " +--- + +# /phase for Codex + +Canonical runbook: `.claude/commands/phase.md` + +When the user invokes `/phase` in Codex, treat the text after the +command name as `$ARGUMENTS`, read the canonical runbook above, and execute the +same workflow from this repository root. + +Rules: +- Follow `AGENTS.md` first, then the canonical command runbook. +- Preserve the command's existing branch, GitHub, validation, and safety gates. +- Do not edit this generated wrapper directly. Update the canonical + `.claude/commands/phase.md` file, then run `bun codex:sync`. diff --git a/.codex/commands/plan-status.md b/.codex/commands/plan-status.md new file mode 100644 index 00000000000..5962ef4c5a0 --- /dev/null +++ b/.codex/commands/plan-status.md @@ -0,0 +1,19 @@ + +--- +description: "Report current repo-spec progress and linked GitHub tracking state." +argument-hint: "[task-name|--all] [--github]" +--- + +# /plan-status for Codex + +Canonical runbook: `.claude/commands/plan-status.md` + +When the user invokes `/plan-status` in Codex, treat the text after the +command name as `$ARGUMENTS`, read the canonical runbook above, and execute the +same workflow from this repository root. + +Rules: +- Follow `AGENTS.md` first, then the canonical command runbook. +- Preserve the command's existing branch, GitHub, validation, and safety gates. +- Do not edit this generated wrapper directly. Update the canonical + `.claude/commands/plan-status.md` file, then run `bun codex:sync`. diff --git a/.codex/commands/plan.md b/.codex/commands/plan.md new file mode 100644 index 00000000000..1430247fbe7 --- /dev/null +++ b/.codex/commands/plan.md @@ -0,0 +1,19 @@ + +--- +description: "Create or update a spec-driven task plan as a local draft, then publish to a GitHub issue on confirm." +argument-hint: "[--name ] [--publish]" +--- + +# /plan for Codex + +Canonical runbook: `.claude/commands/plan.md` + +When the user invokes `/plan` in Codex, treat the text after the +command name as `$ARGUMENTS`, read the canonical runbook above, and execute the +same workflow from this repository root. + +Rules: +- Follow `AGENTS.md` first, then the canonical command runbook. +- Preserve the command's existing branch, GitHub, validation, and safety gates. +- Do not edit this generated wrapper directly. Update the canonical + `.claude/commands/plan.md` file, then run `bun codex:sync`. diff --git a/.codex/commands/preflight.md b/.codex/commands/preflight.md new file mode 100644 index 00000000000..7c202b65575 --- /dev/null +++ b/.codex/commands/preflight.md @@ -0,0 +1,19 @@ + +--- +description: "Run stack-aware integration preflight and write .local/preflight artefacts." +argument-hint: "[--fix] [--write] [--only=] [--skip=]" +--- + +# /preflight for Codex + +Canonical runbook: `.claude/commands/preflight.md` + +When the user invokes `/preflight` in Codex, treat the text after the +command name as `$ARGUMENTS`, read the canonical runbook above, and execute the +same workflow from this repository root. + +Rules: +- Follow `AGENTS.md` first, then the canonical command runbook. +- Preserve the command's existing branch, GitHub, validation, and safety gates. +- Do not edit this generated wrapper directly. Update the canonical + `.claude/commands/preflight.md` file, then run `bun codex:sync`. diff --git a/.codex/commands/review.md b/.codex/commands/review.md new file mode 100644 index 00000000000..9b20272f0f4 --- /dev/null +++ b/.codex/commands/review.md @@ -0,0 +1,18 @@ + +--- +description: "Run a PR-style review focused on bugs, regressions, and validation gaps." +--- + +# /review for Codex + +Canonical runbook: `.claude/commands/review.md` + +When the user invokes `/review` in Codex, treat the text after the +command name as `$ARGUMENTS`, read the canonical runbook above, and execute the +same workflow from this repository root. + +Rules: +- Follow `AGENTS.md` first, then the canonical command runbook. +- Preserve the command's existing branch, GitHub, validation, and safety gates. +- Do not edit this generated wrapper directly. Update the canonical + `.claude/commands/review.md` file, then run `bun codex:sync`. diff --git a/.codex/commands/security-audit.md b/.codex/commands/security-audit.md new file mode 100644 index 00000000000..b2547faf6d1 --- /dev/null +++ b/.codex/commands/security-audit.md @@ -0,0 +1,19 @@ + +--- +description: "Run a local deep security audit with Claude Code over a bounded target." +argument-hint: "[target-directory] [--max-files ] [--dry-run]" +--- + +# /security-audit for Codex + +Canonical runbook: `.claude/commands/security-audit.md` + +When the user invokes `/security-audit` in Codex, treat the text after the +command name as `$ARGUMENTS`, read the canonical runbook above, and execute the +same workflow from this repository root. + +Rules: +- Follow `AGENTS.md` first, then the canonical command runbook. +- Preserve the command's existing branch, GitHub, validation, and safety gates. +- Do not edit this generated wrapper directly. Update the canonical + `.claude/commands/security-audit.md` file, then run `bun codex:sync`. diff --git a/.codex/commands/ship.md b/.codex/commands/ship.md new file mode 100644 index 00000000000..15d4bc3f9e6 --- /dev/null +++ b/.codex/commands/ship.md @@ -0,0 +1,19 @@ + +--- +description: "Final pre-merge readiness pass for a planned task." +argument-hint: "" +--- + +# /ship for Codex + +Canonical runbook: `.claude/commands/ship.md` + +When the user invokes `/ship` in Codex, treat the text after the +command name as `$ARGUMENTS`, read the canonical runbook above, and execute the +same workflow from this repository root. + +Rules: +- Follow `AGENTS.md` first, then the canonical command runbook. +- Preserve the command's existing branch, GitHub, validation, and safety gates. +- Do not edit this generated wrapper directly. Update the canonical + `.claude/commands/ship.md` file, then run `bun codex:sync`. diff --git a/.codex/commands/upgrade-multitenant.md b/.codex/commands/upgrade-multitenant.md new file mode 100644 index 00000000000..8419254872d --- /dev/null +++ b/.codex/commands/upgrade-multitenant.md @@ -0,0 +1,18 @@ + +--- +description: "Guide through upgrading a single-tenant Better Auth setup to multi-tenant (organizations)." +--- + +# /upgrade-multitenant for Codex + +Canonical runbook: `.claude/commands/upgrade-multitenant.md` + +When the user invokes `/upgrade-multitenant` in Codex, treat the text after the +command name as `$ARGUMENTS`, read the canonical runbook above, and execute the +same workflow from this repository root. + +Rules: +- Follow `AGENTS.md` first, then the canonical command runbook. +- Preserve the command's existing branch, GitHub, validation, and safety gates. +- Do not edit this generated wrapper directly. Update the canonical + `.claude/commands/upgrade-multitenant.md` file, then run `bun codex:sync`. diff --git a/.codex/commands/user-stories.md b/.codex/commands/user-stories.md new file mode 100644 index 00000000000..72a99ec4f77 --- /dev/null +++ b/.codex/commands/user-stories.md @@ -0,0 +1,19 @@ + +--- +description: "Brainstorm and draft user stories for a feature as a local-only input to planning." +argument-hint: " " +--- + +# /user-stories for Codex + +Canonical runbook: `.claude/commands/user-stories.md` + +When the user invokes `/user-stories` in Codex, treat the text after the +command name as `$ARGUMENTS`, read the canonical runbook above, and execute the +same workflow from this repository root. + +Rules: +- Follow `AGENTS.md` first, then the canonical command runbook. +- Preserve the command's existing branch, GitHub, validation, and safety gates. +- Do not edit this generated wrapper directly. Update the canonical + `.claude/commands/user-stories.md` file, then run `bun codex:sync`. diff --git a/.codex/environments/environment.toml b/.codex/environments/environment.toml new file mode 100644 index 00000000000..fcacbc5d046 --- /dev/null +++ b/.codex/environments/environment.toml @@ -0,0 +1,225 @@ +# THIS IS AUTOGENERATED. DO NOT EDIT MANUALLY +version = 1 +name = "@t3tools/monorepo" + +[setup] +script = "" + +[setup.darwin] +script = ''' +cd "$CODEX_WORKTREE_PATH" + +bun install --frozen-lockfile +bun rwx:check +bun codex:check +''' + +[cleanup] +script = "" + +[cleanup.darwin] +script = ''' +cd "$CODEX_WORKTREE_PATH" + +rm -rf .local/tmp .cache/tmp +''' + +[[actions]] +name = "prepare" +icon = "tool" +command = "bun run prepare" + +[[actions]] +name = "Run All" +icon = "play" +command = "bun run dev" + +[[actions]] +name = "dev:server" +icon = "play" +command = "bun run dev:server" + +[[actions]] +name = "dev:web" +icon = "play" +command = "bun run dev:web" + +[[actions]] +name = "dev:marketing" +icon = "play" +command = "bun run dev:marketing" + +[[actions]] +name = "dev:desktop" +icon = "play" +command = "bun run dev:desktop" + +[[actions]] +name = "start" +icon = "tool" +command = "bun run start" + +[[actions]] +name = "start:desktop" +icon = "tool" +command = "bun run start:desktop" + +[[actions]] +name = "start:marketing" +icon = "tool" +command = "bun run start:marketing" + +[[actions]] +name = "start:mock-update-server" +icon = "tool" +command = "bun run start:mock-update-server" + +[[actions]] +name = "Build" +icon = "tool" +command = "bun run build" + +[[actions]] +name = "build:marketing" +icon = "tool" +command = "bun run build:marketing" + +[[actions]] +name = "build:desktop" +icon = "tool" +command = "bun run build:desktop" + +[[actions]] +name = "Typecheck" +icon = "tool" +command = "bun run typecheck" + +[[actions]] +name = "Lint" +icon = "tool" +command = "bun run lint" + +[[actions]] +name = "Test" +icon = "tool" +command = "bun run test" + +[[actions]] +name = "test:desktop-smoke" +icon = "tool" +command = "bun run test:desktop-smoke" + +[[actions]] +name = "fmt" +icon = "tool" +command = "bun run fmt" + +[[actions]] +name = "fmt:check" +icon = "tool" +command = "bun run fmt:check" + +[[actions]] +name = "build:contracts" +icon = "tool" +command = "bun run build:contracts" + +[[actions]] +name = "dist:desktop:artifact" +icon = "tool" +command = "bun run dist:desktop:artifact" + +[[actions]] +name = "dist:desktop:dmg" +icon = "tool" +command = "bun run dist:desktop:dmg" + +[[actions]] +name = "dist:desktop:dmg:arm64" +icon = "tool" +command = "bun run dist:desktop:dmg:arm64" + +[[actions]] +name = "dist:desktop:dmg:x64" +icon = "tool" +command = "bun run dist:desktop:dmg:x64" + +[[actions]] +name = "dist:desktop:linux" +icon = "tool" +command = "bun run dist:desktop:linux" + +[[actions]] +name = "dist:desktop:win" +icon = "tool" +command = "bun run dist:desktop:win" + +[[actions]] +name = "dist:desktop:win:arm64" +icon = "tool" +command = "bun run dist:desktop:win:arm64" + +[[actions]] +name = "dist:desktop:win:x64" +icon = "tool" +command = "bun run dist:desktop:win:x64" + +[[actions]] +name = "release:smoke" +icon = "tool" +command = "bun run release:smoke" + +[[actions]] +name = "clean" +icon = "tool" +command = "bun run clean" + +[[actions]] +name = "sync:vscode-icons" +icon = "tool" +command = "bun run sync:vscode-icons" + +[[actions]] +name = "Check" +icon = "tool" +command = "bun run check" + +[[actions]] +name = "Validate Local" +icon = "tool" +command = "bun run validate:local" + +[[actions]] +name = "Preflight" +icon = "tool" +command = "bun run preflight" + +[[actions]] +name = "env-audit" +icon = "tool" +command = "bun run env-audit" + +[[actions]] +name = "PR Check" +icon = "tool" +command = "bun run pr:check" + +[[actions]] +name = "Adoption Check" +icon = "tool" +command = "bun run adopt:check" + +[[actions]] +name = "security:audit" +icon = "tool" +command = "bun run security:audit" + +[[actions]] +name = "Codex Sync" +icon = "tool" +command = "bun run codex:sync" + +[[actions]] +name = "Codex Check" +icon = "tool" +command = "bun run codex:check" diff --git a/.cursor/BUGBOT.md b/.cursor/BUGBOT.md new file mode 100644 index 00000000000..963feb73ec2 --- /dev/null +++ b/.cursor/BUGBOT.md @@ -0,0 +1,49 @@ +# Bugbot Project Brief + +## Repository Context + +- **Repository mode**: product +- **Team/owner**: MohAnghabo +- **Default branch**: main + +## Review Priorities (highest first) + +1. Secret, token, credential, or real PII exposure in code, logs, fixtures, screenshots, comments, or docs. +2. Unsafe local command execution, missing confirmation gates, cwd escape, timeout gaps, or weak redaction. +3. GitHub Projects/task-state drift, duplicated task state, or writes that bypass confirmation. +4. PR watcher or auto-fix behavior that can spam comments, repeat commits, or run on untrusted signals. +5. Missing AR/EN strings or RTL regressions in user-facing UI. +6. Regressions to T3 Code package boundaries, provider session reliability, WebSocket event handling, or desktop startup flow. +7. Flag PRs that remove or bypass `bun preflight`, the `preflight` command, `/preflight`, or bootstrap enforcement. +8. Flag non-trivial PRs that omit GitHub issue linkage or fail to update the linked durable spec execution log. + +## Focus Paths + +- Include: `apps/**` +- Include: `packages/**` +- Include: `scripts/**` +- Include: `.github/**` +- Include: `.ai/rules/**` +- Include: `docs/**` +- Exclude: `apps/**/dist/**` +- Exclude: `packages/**/dist/**` + +## Bugbot Expectations + +- Flag high-confidence bugs, security issues, and reliability risks first. +- Avoid low-signal style-only comments unless they can cause defects. +- Re-check existing PR comments to avoid duplicates. +- Prefer actionable fixes with concrete code-level guidance. + +## Blocking Rules + +- No secrets or real PII in code, logs, fixtures, docs, screenshots, comments, or PR text. +- No unsafe GitHub Actions patterns using untrusted event input in shell commands. +- No project rule regressions in `AGENTS.md`, `CLAUDE.md`, `.cursorrules`, `.ai/rules/*`. +- No removal of required validation, PR readiness, preflight, or env-audit surfaces without an explicit replacement in the same PR. + +## Project Constraints + +- TypeScript strict mode; no `any`. +- Zod or Effect Schema at runtime boundaries, following the local package pattern. +- Keep changes small, reversible, tested, and documented. diff --git a/.cursorrules b/.cursorrules new file mode 100644 index 00000000000..d3d06f39c1b --- /dev/null +++ b/.cursorrules @@ -0,0 +1,25 @@ +Before non-trivial work, read `AGENTS.md`, `docs/project.md`, `review.md`, +`.cursor/BUGBOT.md`, `.ai/rules/00-constitution.md`, +`.ai/rules/17-aws-well-architected.md`, `.ai/rules/18-pr-readiness.md`, +`.ai/rules/21-agent-orchestration.md`, +then read the selected stack rule: +`.ai/rules/01-stack-a-nestjs.md` or `.ai/rules/01-stack-b-convex.md`. +For financial statements, accounting records, ledgers, revenue recognition, +leases, impairments, audit exports, or accounting reports, also read +`.ai/rules/19-ifrs-compliance.md`. + +If `docs/project.md`, `review.md`, or `.cursor/BUGBOT.md` still has template +placeholders, stop and initialize them first. + +Exception: template-maintenance updates that are intentionally product-agnostic +(for example shared repo rules, workflow automation, scaffolding templates, and +agent guidance) may proceed before these files are initialized, as long as the +change does not implement product/app-specific behavior. + +Before approving or merging a PR: +- relevant Markdown docs must be updated or explicitly marked no-impact +- tests must be added/updated or explicitly marked no-impact +- required CI checks must be green +- use `/open-pr` for branch → commit → push → PR + CI/comments follow-up, then `/ship` before merge + +See `AGENTS.md` and `.ai/rules/` for the full policy. diff --git a/.github/ai-loop.yml b/.github/ai-loop.yml new file mode 100644 index 00000000000..6c2b7f9576a --- /dev/null +++ b/.github/ai-loop.yml @@ -0,0 +1,18 @@ +{ + "schema_version": 1, + "enabled": false, + "trusted_review_bots": ["coderabbitai[bot]"], + "trusted_humans": [], + "human_trigger_phrase": "/autofix", + "executor_owner": "claude", + "executor_bot_login": "", + "attempt_budget_per_generation": 1, + "debounce_seconds": 90, + "debounce_max_seconds": 300, + "dispatch_grace_seconds": 120, + "executor_timeout_seconds": 1200, + "pause_label": "ai-fix:paused", + "required_ci_checks": ["validate"], + "prepush_commands": ["bun check"], + "legacy_workflows_present": [] +} diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 76aac7e4d85..efd82ac845d 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,33 +1,33 @@ - - -## What Changed - - +- Commands run: +- Result: -## Why +## Risks - +- Risks: +- Rollback: -## UI Changes +## Readiness Checklist - +- [ ] Relevant Markdown docs updated where needed (`README*`, `CONTRIBUTING*`, `SECURITY*`, `.github/**/*.md`, `docs/**`, `AGENTS.md`, `CLAUDE.md`, `review.md`, `.cursor/BUGBOT.md`) +- [ ] No documentation impact +- [ ] Tests added or updated for this change +- [ ] No test impact +- [ ] `bun check` passes locally +- [ ] All required CI checks are green -## Checklist - -- [ ] This PR is small and focused -- [ ] I explained what changed and why -- [ ] I included before/after screenshots for any UI changes -- [ ] I included a video for animation/interaction changes + diff --git a/.github/workflows/ai-fix-executor-claude.yml b/.github/workflows/ai-fix-executor-claude.yml new file mode 100644 index 00000000000..5bd8eefb14a --- /dev/null +++ b/.github/workflows/ai-fix-executor-claude.yml @@ -0,0 +1,140 @@ +name: ai-fix-executor-claude + +on: + workflow_dispatch: + inputs: + pr_number: + required: true + type: string + head_ref: + required: true + type: string + head_sha: + required: true + type: string + generation_sha: + required: true + type: string + finding_set_fingerprint: + required: true + type: string + findings_b64: + required: true + type: string + +concurrency: + group: ai-fix-executor-pr-${{ inputs.pr_number }} + cancel-in-progress: false + +jobs: + ai-fix-executor-claude: + runs-on: ubuntu-latest + permissions: + contents: write + issues: write + pull-requests: write + actions: read + id-token: write + steps: + - name: Create GitHub App token + id: app-token + uses: actions/create-github-app-token@v1 + with: + app-id: ${{ secrets.AI_FIX_APP_ID }} + private-key: ${{ secrets.AI_FIX_APP_PRIVATE_KEY }} + + - uses: actions/checkout@v4 + with: + ref: ${{ inputs.head_ref }} + fetch-depth: 0 + token: ${{ steps.app-token.outputs.token }} + + - uses: oven-sh/setup-bun@v2 + + - name: Read executor bot login + id: meta + shell: bash + run: | + echo "executor_bot_login=$(jq -r '.executor_bot_login' .github/ai-loop.yml)" >> "$GITHUB_OUTPUT" + + - name: Mark sticky state as running + env: + GITHUB_TOKEN: ${{ steps.app-token.outputs.token }} + AI_LOOP_PR_NUMBER: ${{ inputs.pr_number }} + run: bun run scripts/ai-loop/executor-state.ts start + + - name: Record starting SHA + id: before + shell: bash + run: echo "sha=$(git rev-parse HEAD)" >> "$GITHUB_OUTPUT" + + - name: Decode findings + id: findings + shell: bash + run: | + findings_file="$RUNNER_TEMP/ai-loop-findings.json" + printf '%s' '${{ inputs.findings_b64 }}' | base64 --decode > "$findings_file" + echo "path=$findings_file" >> "$GITHUB_OUTPUT" + + - name: Build executor prompt + id: prompt + shell: bash + run: | + findings_json="$(cat '${{ steps.findings.outputs.path }}')" + { + echo "prompt<> "$GITHUB_OUTPUT" + + - name: Run Claude executor + id: claude + continue-on-error: true + uses: anthropics/claude-code-action@v1 + with: + claude_code_oauth_token: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }} + anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }} + github_token: ${{ steps.app-token.outputs.token }} + bot_name: ${{ steps.meta.outputs.executor_bot_login }} + use_commit_signing: true + additional_permissions: "actions: read" + prompt: ${{ steps.prompt.outputs.prompt }} + + - name: Detect final executor state + id: result + shell: bash + run: | + after_sha="$(git rev-parse HEAD)" + echo "after_sha=$after_sha" >> "$GITHUB_OUTPUT" + if [ "$after_sha" != '${{ steps.before.outputs.sha }}' ]; then + echo "status=pushed_pending" >> "$GITHUB_OUTPUT" + echo "blocked_reason=" >> "$GITHUB_OUTPUT" + elif [ '${{ steps.claude.outcome }}' = 'success' ]; then + echo "status=blocked" >> "$GITHUB_OUTPUT" + echo "blocked_reason=no_safe_fix" >> "$GITHUB_OUTPUT" + else + echo "status=blocked" >> "$GITHUB_OUTPUT" + echo "blocked_reason=executor_failed" >> "$GITHUB_OUTPUT" + fi + + - name: Finalize sticky state + if: always() + env: + GITHUB_TOKEN: ${{ steps.app-token.outputs.token }} + AI_LOOP_PR_NUMBER: ${{ inputs.pr_number }} + AI_LOOP_FINAL_STATUS: ${{ steps.result.outputs.status }} + AI_LOOP_CURRENT_SHA: ${{ steps.result.outputs.after_sha }} + AI_LOOP_FINDING_SET_FINGERPRINT: ${{ inputs.finding_set_fingerprint }} + AI_LOOP_BLOCKED_REASON: ${{ steps.result.outputs.blocked_reason }} + run: bun run scripts/ai-loop/executor-state.ts finish diff --git a/.github/workflows/ai-fix-router.yml b/.github/workflows/ai-fix-router.yml new file mode 100644 index 00000000000..0cf648ce6f9 --- /dev/null +++ b/.github/workflows/ai-fix-router.yml @@ -0,0 +1,67 @@ +name: ai-fix-router + +on: + workflow_run: + workflows: + - ci + - pr-readiness + types: + - completed + pull_request_review: + types: + - submitted + pull_request_review_comment: + types: + - created + issue_comment: + types: + - created + +concurrency: + group: ai-fix-router-pr-${{ github.event.pull_request.number || github.event.issue.number || github.event.workflow_run.pull_requests[0].number || github.run_id }} + cancel-in-progress: true + +jobs: + ai-fix-router: + runs-on: ubuntu-latest + permissions: + contents: read + issues: write + pull-requests: read + actions: read + id-token: write + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 2 + + - uses: oven-sh/setup-bun@v2 + + - name: Read AI loop gate + id: gate + shell: bash + env: + APP_ID: ${{ secrets.AI_FIX_APP_ID }} + APP_KEY: ${{ secrets.AI_FIX_APP_PRIVATE_KEY }} + run: | + echo "enabled=$(jq -r '.enabled' .github/ai-loop.yml)" >> "$GITHUB_OUTPUT" + if [ -n "$APP_ID" ] && [ -n "$APP_KEY" ]; then + echo "has_app_credentials=true" >> "$GITHUB_OUTPUT" + else + echo "has_app_credentials=false" >> "$GITHUB_OUTPUT" + fi + + - name: Create GitHub App token + id: app-token + if: steps.gate.outputs.enabled == 'true' && steps.gate.outputs.has_app_credentials == 'true' + uses: actions/create-github-app-token@v1 + with: + app-id: ${{ secrets.AI_FIX_APP_ID }} + private-key: ${{ secrets.AI_FIX_APP_PRIVATE_KEY }} + + - name: Route AI findings + if: steps.gate.outputs.enabled == 'true' + env: + GITHUB_TOKEN: ${{ github.token }} + AI_LOOP_DISPATCH_TOKEN: ${{ steps.app-token.outputs.token }} + run: bun run scripts/ai-loop/router.ts diff --git a/.github/workflows/ai-review.yml b/.github/workflows/ai-review.yml new file mode 100644 index 00000000000..d4bf7037e88 --- /dev/null +++ b/.github/workflows/ai-review.yml @@ -0,0 +1,50 @@ +name: ai-review + +on: + pull_request: + types: + - opened + - synchronize + - ready_for_review + - reopened + +concurrency: + group: ai-review-pr-${{ github.event.pull_request.number }} + cancel-in-progress: true + +jobs: + ai-review: + runs-on: ubuntu-latest + permissions: + contents: read + pull-requests: write + id-token: write + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 2 + + - name: Read AI loop gate + id: gate + shell: bash + run: | + echo "enabled=$(jq -r '.enabled' .github/ai-loop.yml)" >> "$GITHUB_OUTPUT" + echo "executor_bot_login=$(jq -r '.executor_bot_login' .github/ai-loop.yml)" >> "$GITHUB_OUTPUT" + if git log -1 --pretty=%B | grep -q 'X-Autofix-Executor: claude'; then + echo "skip_for_fixer_child=true" >> "$GITHUB_OUTPUT" + else + echo "skip_for_fixer_child=false" >> "$GITHUB_OUTPUT" + fi + + - name: Review PR with Claude + if: steps.gate.outputs.enabled == 'true' && steps.gate.outputs.skip_for_fixer_child != 'true' + uses: anthropics/claude-code-action@v1 + with: + claude_code_oauth_token: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }} + anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }} + github_token: ${{ github.token }} + use_sticky_comment: true + prompt: | + Review this pull request for bugs, regressions, CI risks, docs drift, and missing tests. + Prioritize correctness and release risk over style comments. + Do not repeat issues that are already obvious from failing CI. diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e3329b1dad9..31717c6da50 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -8,8 +8,8 @@ on: jobs: quality: - name: Format, Lint, Typecheck, Test, Browser Test, Build - runs-on: blacksmith-8vcpu-ubuntu-2404 + name: Validate + runs-on: ubuntu-24.04 timeout-minutes: 10 steps: - name: Checkout @@ -76,7 +76,7 @@ jobs: release_smoke: name: Release Smoke - runs-on: blacksmith-8vcpu-ubuntu-2404 + runs-on: ubuntu-24.04 timeout-minutes: 10 steps: - name: Checkout diff --git a/.github/workflows/pr-readiness.yml b/.github/workflows/pr-readiness.yml new file mode 100644 index 00000000000..62d49e44a64 --- /dev/null +++ b/.github/workflows/pr-readiness.yml @@ -0,0 +1,36 @@ +name: pr-readiness + +on: + pull_request: + types: + - opened + - edited + - reopened + - synchronize + - ready_for_review + +concurrency: + group: pr-readiness-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +jobs: + pr-readiness: + runs-on: ubuntu-latest + permissions: + contents: read + pull-requests: read + checks: read + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Validate PR docs and test readiness + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + PR_READINESS_BASE_SHA: ${{ github.event.pull_request.base.sha }} + PR_READINESS_HEAD_SHA: ${{ github.event.pull_request.head.sha }} + PR_READINESS_BODY: ${{ github.event.pull_request.body }} + PR_READINESS_SKIP_CI: "0" + PR_READINESS_REQUIRED_CHECKS: "Validate,Release Smoke" + run: bash scripts/check-pr-readiness.sh diff --git a/.gitignore b/.gitignore index 9e14e917910..b63c7f3f910 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ node_modules .bun .turbo +.local/ .DS_Store *.log *.tsbuildinfo diff --git a/.oxfmtrc.json b/.oxfmtrc.json index 3d65d9c93bb..dd65eca3dde 100644 --- a/.oxfmtrc.json +++ b/.oxfmtrc.json @@ -11,7 +11,11 @@ "**/routeTree.gen.ts", "apps/web/public/mockServiceWorker.js", "apps/web/src/lib/vendor/qrcodegen.ts", - "*.icon/**" + "*.icon/**", + ".local", + ".codex", + ".github/workflows/ai-fix-executor-claude.yml", + ".github/ai-loop.yml" ], "sortPackageJson": {}, "overrides": [ diff --git a/AGENTS.md b/AGENTS.md index cea5090cce0..3c6830e1436 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -1,53 +1,205 @@ -# AGENTS.md - -## Task Completion Requirements - -- All of `bun fmt`, `bun lint`, and `bun typecheck` must pass before considering tasks completed. -- NEVER run `bun test`. Always use `bun run test` (runs Vitest). - -## Project Snapshot - -T3 Code is a minimal web GUI for using coding agents like Codex and Claude. - -This repository is a VERY EARLY WIP. Proposing sweeping changes that improve long-term maintainability is encouraged. - -## Core Priorities +# kanban-console + +Local desktop/web project console for managing GitHub Projects Kanban, monorepo git state, PR health, product artifacts, and agent workflows. + +This repo is a fork of `pingdotgg/t3code`. The governance source is `MohAnghabo/ai-starter-pro`. + +## Stack + +This product intentionally starts from T3 Code rather than Stack A or Stack B. Preserve the upstream T3 Code split unless a planned phase explicitly changes it: + +- `apps/server`: Node.js WebSocket server and provider/session runtime. +- `apps/web`: React/Vite UI. +- `apps/desktop`: Electron desktop shell where applicable. +- `packages/contracts`: Shared Effect Schema and TypeScript contracts; keep schema-only. +- `packages/shared`: Shared runtime utilities with explicit subpath exports. + +All general `.ai/rules/` files still apply. Load `.ai/rules/22-kanban-console.md` for every product change. Stack-specific Stack A/Stack B rules apply only when a later change explicitly adopts one of those stacks. + +## Commands + +```bash +bun check # format check + lint + typecheck + tests (required before every commit) +bun dev # parallel dev servers +bun validate:local # local validation plus desktop build +bun build # full monorepo build via Turbo +bun pr:check # PR readiness: local validation + docs/tests/CI checks +bun preflight # stack-aware integration checks; writes .local/preflight/latest.{md,json} +rwx run tasks.yml --init commit-sha="$(git rev-parse HEAD)" --init repository-url="$(git remote get-url origin)" # run hosted CI locally in RWX Cloud +bun rwx:sync # sync root tasks.yml to .rwx/ci.yml +bun rwx:check # fail if tasks.yml and .rwx/ci.yml drift +bash scripts/adopt-template-rules.sh --target /absolute/path/to/repo --profile minimal # apply governance kit to another repo +bash scripts/verify-template-adoption.sh --target /absolute/path/to/repo --profile minimal # verify adoption state in target repo +``` + +## Slash Commands + +Claude command runbooks live in `.claude/commands/`. Codex-compatible wrappers +live in `.codex/commands/` and delegate to the same canonical runbooks. When a +user invokes `/command ...` in Codex, read `.codex/commands/command.md` first +and follow its canonical `.claude/commands/command.md` runbook. When a Claude +command changes, run `bun codex:sync`; `bun check` enforces `bun codex:check` +so the two command surfaces stay aligned. + +| Command | Purpose | +| ------------------------------------- | ------------------------------------------------------------------------------- | +| `/init-project` | bootstrap a new repo from the template before non-trivial work | +| `/user-stories ` | brainstorm feature stories as a local draft for standalone use or `/plan` input | +| `/plan ` | user stories → spec-driven plan in docs/tasks | +| `/execute-task ` | branch → implement issue-only or legacy numbered work | +| `/plan-status [task-name\|--all]` | compare current codebase progress against task plans in a table | +| `/phase ` | implement one phase from a plan | +| `/orchestrate [phase-id]` | choose and run the next safe command for a docs/tasks plan | +| `/preflight [flags]` | run stack-aware integration preflight | +| `/env-audit [--fix] [--write]` | run environment topology checks via preflight env/\* | +| `/review` | pre-PR review (bugs, regressions, coverage) | +| `/open-pr ` | branch → commit → push → PR → monitor CI and comments | +| `/ship ` | final pre-merge readiness pass | +| `/extract-pr-learnings ` | capture reusable lessons after a merged PR | +| `/pdpl-audit` | scan for PDPL compliance gaps | +| `/ifrs-audit` | scan for IFRS Accounting Standards compliance gaps | +| `/security-audit [target]` | run local-only deep security audit into `.local/security-audit/` | +| `/upgrade-multitenant` | Better Auth org upgrade guide | + +## Preflight + +- `bun preflight` is the stack-aware integration gate for Doppler, Better Auth, + GitHub, stack providers, and environment topology. +- Reports are written to `.local/preflight/latest.md` and + `.local/preflight/latest.json`; reference them in infrastructure PRs. +- `/env-audit` is an alias over `bun preflight --only=env/*`. +- CI exposes `preflight` and `env-audit` check runs; `pr-readiness` waits for + `validate`, `preflight`, and `env-audit`. + +## Task Orchestration + +- GitHub Issues are the live work items; GitHub Projects is the live status board. +- `docs/tasks/*.md` remains the durable spec and execution-log source for + non-trivial work. +- `tasks.md` is a legacy compatibility pointer, not the active queue. +- Use `/phase ` for modern `docs/tasks` plans. +- Use `/orchestrate ` when you need an agent to choose the next + safe lifecycle command, stop at gates, and preserve issue/spec state. +- See `docs/agent-orchestration.md` and `.ai/rules/21-agent-orchestration.md` + for the canonical lifecycle and command sequence. + +## T3 Code Runtime Notes + +T3 Code is a minimal web GUI for using coding agents like Codex and Claude. This fork is still early WIP, so sweeping changes are acceptable when they improve maintainability and reliability. + +Core priorities: 1. Performance first. 2. Reliability first. -3. Keep behavior predictable under load and during failures (session restarts, reconnects, partial streams). - -If a tradeoff is required, choose correctness and robustness over short-term convenience. - -## Maintainability +3. Keep behavior predictable under load and during failures, including session restarts, reconnects, and partial streams. -Long term maintainability is a core priority. If you add new functionality, first check if there is shared logic that can be extracted to a separate module. Duplicate logic across multiple files is a code smell and should be avoided. Don't be afraid to change existing code. Don't take shortcuts by just adding local logic to solve a problem. +If a tradeoff is required, choose correctness and robustness over short-term convenience. Avoid duplicated logic; extract shared modules when behavior crosses package boundaries. -## Package Roles - -- `apps/server`: Node.js WebSocket server. Wraps Codex app-server (JSON-RPC over stdio), serves the React web app, and manages provider sessions. -- `apps/web`: React/Vite UI. Owns session UX, conversation/event rendering, and client-side state. Connects to the server via WebSocket. -- `packages/contracts`: Shared effect/Schema schemas and TypeScript contracts for provider events, WebSocket protocol, and model/session types. Keep this package schema-only — no runtime logic. -- `packages/shared`: Shared runtime utilities consumed by both server and web. Uses explicit subpath exports (e.g. `@t3tools/shared/git`) — no barrel index. - -## Codex App Server (Important) - -T3 Code is currently Codex-first. The server starts `codex app-server` (JSON-RPC over stdio) per provider session, then streams structured events to the browser through WebSocket push messages. - -How we use it in this codebase: +Codex app-server notes: - Session startup/resume and turn lifecycle are brokered in `apps/server/src/codexAppServerManager.ts`. - Provider dispatch and thread event logging are coordinated in `apps/server/src/providerManager.ts`. - WebSocket server routes NativeApi methods in `apps/server/src/wsServer.ts`. -- Web app consumes orchestration domain events via WebSocket push on channel `orchestration.domainEvent` (provider runtime activity is projected into orchestration events server-side). +- Web app consumes orchestration domain events via WebSocket push on channel `orchestration.domainEvent`. -Docs: - -- Codex App Server docs: https://developers.openai.com/codex/sdk/#app-server - -## Reference Repos +Reference repos: - Open-source Codex repo: https://github.com/openai/codex -- Codex-Monitor (Tauri, feature-complete, strong reference implementation): https://github.com/Dimillian/CodexMonitor - -Use these as implementation references when designing protocol handling, UX flows, and operational safeguards. +- Codex-Monitor: https://github.com/Dimillian/CodexMonitor + +## CI + +PR-readiness CI enforcement note: + +- The readiness checker validates required GitHub check-runs via + `PR_READINESS_REQUIRED_CHECKS` (default: `validate`) +- Derived repos should set this env var in `.github/workflows/pr-readiness.yml` + to their actual required checks (comma-separated). Required check names must + not contain commas because the readiness script uses comma-separated parsing. + For example: + `PR_READINESS_REQUIRED_CHECKS: "validate,preflight,env-audit,security"` +- Do not include the `pr-readiness` job name itself in this list, or the check + creates a circular dependency by waiting for itself. + +## Project Bootstrap + +Before the first non-trivial task in any repo created from this template: + +1. Fill `docs/project.md` +2. Fill `review.md` +3. Fill `.cursor/BUGBOT.md` +4. Run `/init-project` if your agent supports slash commands + +`/plan-status` can be run at the start of a conversation, and Claude Code may +also inject a compact plan snapshot automatically via the `SessionStart` hook. + +If `docs/project.md` still contains template placeholders such as +`YOUR_PRODUCT_NAME`, `YOUR_APP_NAME`, unchecked stack selection, or generic +`[who are they?]` text, agents must stop non-trivial implementation and collect +bootstrap answers first. + +If `review.md` or `.cursor/BUGBOT.md` still contains template placeholders +(for example `TEMPLATE_OR_PRODUCT`, `YOUR_TEAM_NAME`, `YOUR_PRIORITY_1`, or +`path/glob/**`), agents must stop non-trivial implementation and collect +bootstrap answers first. + +Exception for template maintainers: product-agnostic template updates (for +example repository rules, workflow automation, scaffolding templates, and +agent guidance) may proceed before bootstrap files are initialized, provided +the change does not implement project-specific product behavior. + +## Code Review (automatic) + +After every Claude Code session, `.claude/hooks/coderabbit-review.sh` runs automatically and prints a CodeRabbit review to the terminal. It reviews against the PR base branch (if a PR is open) or the previous commit (HEAD~1). Project constraints from `CLAUDE.md` and relevant `.ai/rules/` files are injected automatically based on what changed. + +**Required on each machine:** + +```bash +npm install -g coderabbit # install CLI +coderabbit auth # authenticate (browser opens) +``` + +The hook skips silently if the CLI is not installed, so it won't break machines that haven't set it up yet. + +## Closed-Loop PR Auto-Fix (optional) + +- `.github/ai-loop.yml` is the source of truth for the optional PR review/fix loop +- Keep it disabled by default in template-derived repos until the bootstrap is complete +- Required secrets when enabling: + - `AI_FIX_APP_ID` + - `AI_FIX_APP_PRIVATE_KEY` + - `CLAUDE_CODE_OAUTH_TOKEN` preferred, `ANTHROPIC_API_KEY` fallback +- Required bootstrap before enabling: + - install the org-owned GitHub App on the repo + - delete or scope legacy overlapping workflows before setting `enabled: true` + - set `executor_bot_login` in `.github/ai-loop.yml` + - configure `trusted_review_bots` + - verify branch protection allows App pushes if same-branch auto-fix is desired + +## Core Gotchas + +1. **Contracts are SSOT** — keep shared protocol/schema changes in `packages/contracts`; do not bury cross-package contracts in app-only code. +2. **Better Auth v1.4 pinned** — do not upgrade to 1.5 (breaking: drizzle-adapter extracted, InferUser/InferSession removed, API Key plugin moved, $ERROR_CODES type changed). +3. **Better Auth local domains** — set `BETTER_AUTH_URL` to exact origin (e.g. `https://api.app.test`) and include it in `trustedOrigins`. + Use `bash scripts/setup-domain.sh app --app-port 12000 --api-port 12001`; + local domain ports must be explicit, unique per service, and `>=10000`. +4. **AR/EN always** — every user-facing string requires both Arabic and English translations. +5. **PDPL always** — Royal Decree 6/2022, fully enforced. No real PII in tests, logs, commits, or PR text. +6. **AWS Well-Architected always** — every non-trivial change and review must consider operational excellence, security, reliability, performance efficiency, cost optimization, and sustainability. +7. **Pre-merge gates always** — before merge, relevant Markdown docs must match the change, tests must be in the PR or explicitly marked no-impact, and all required CI must be green. +8. **Non-code outputs** — save to `.local/`, never project root or `docs/` unless permanent project documentation. +9. **IFRS when financial reporting** — if a task touches financial statements, ledgers, revenue recognition, leases, impairments, audit exports, or accounting records, load `.ai/rules/19-ifrs-compliance.md`. Use decimal-safe money handling, preserve audit trails, and never silently overwrite posted accounting records. + +## AI Reading Order + +1. `AGENTS.md` — commands, gotchas (this file) +2. `docs/project.md` — initialized product identity, stack choice, domain, i18n, regulatory and reporting scope +3. `review.md` — review scope, quality gates, and risk profile +4. `.cursor/BUGBOT.md` — Bugbot-specific review context and priorities +5. `.ai/rules/00-constitution.md` — non-negotiables +6. `.ai/rules/17-aws-well-architected.md` — mandatory architecture and review lens +7. `.ai/rules/18-pr-readiness.md` — mandatory PR readiness gates +8. `.ai/rules/21-agent-orchestration.md` — task lifecycle and command sequence +9. `.ai/rules/22-kanban-console.md` — product-specific rules for this T3 Code fork +10. T3 Code runtime notes in this file — package roles, Codex app-server flow, and upstream references +11. Relevant rule files for your task — include `.ai/rules/19-ifrs-compliance.md` for financial-reporting/accounting work; see `.ai/README.md` for the full map diff --git a/CLAUDE.md b/CLAUDE.md index c3170642553..47dc3e3d863 120000 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -1 +1 @@ -AGENTS.md +AGENTS.md \ No newline at end of file diff --git a/apps/server/src/kanban/AgentWorkflowLauncher.test.ts b/apps/server/src/kanban/AgentWorkflowLauncher.test.ts new file mode 100644 index 00000000000..e051c393753 --- /dev/null +++ b/apps/server/src/kanban/AgentWorkflowLauncher.test.ts @@ -0,0 +1,279 @@ +import { afterEach, assert, describe, expect, it, vi } from "@effect/vitest"; +import { Effect, Layer } from "effect"; +import { ChildProcessSpawner } from "effect/unstable/process"; + +import * as GitHubCli from "../sourceControl/GitHubCli.ts"; +import type * as VcsProcess from "../vcs/VcsProcess.ts"; +import * as AgentWorkflowLauncher from "./AgentWorkflowLauncher.ts"; + +const processOutput = (stdout: string): VcsProcess.VcsProcessOutput => ({ + exitCode: ChildProcessSpawner.ExitCode(0), + stdout, + stderr: "", + stdoutTruncated: false, + stderrTruncated: false, +}); + +const execute = vi.fn(); + +const layer = AgentWorkflowLauncher.layer.pipe( + Layer.provide( + Layer.mock(GitHubCli.GitHubCli)({ + execute, + listOpenPullRequests: vi.fn(), + getPullRequest: vi.fn(), + getRepositoryCloneUrls: vi.fn(), + createRepository: vi.fn(), + createPullRequest: vi.fn(), + getDefaultBranch: vi.fn(), + checkoutPullRequest: vi.fn(), + }), + ), +); + +afterEach(() => { + execute.mockReset(); +}); + +describe("AgentWorkflowLauncher", () => { + it.effect("lists Claude and Codex recipes for the supported command surface", () => + Effect.gen(function* () { + const launcher = yield* AgentWorkflowLauncher.AgentWorkflowLauncher; + const recipes = launcher.listRecipes({ + taskName: "t3-kanban-project-console", + phaseId: "phase-5", + issueNumber: 43, + pullRequestNumber: 7, + claudeAvailable: true, + codexAvailable: false, + }); + + assert.equal(recipes.length, 24); + expect(recipes).toContainEqual({ + id: "claude-phase", + label: "Claude Implement phase", + agent: "Claude", + command: "/phase t3-kanban-project-console phase-5", + commandId: "phase", + available: true, + }); + expect(recipes).toContainEqual({ + id: "codex-extract-pr-learnings", + label: "Codex Extract PR learnings", + agent: "Codex", + command: "/extract-pr-learnings 7", + commandId: "extract-pr-learnings", + available: false, + }); + }).pipe(Effect.provide(layer)), + ); + + it.effect("builds the shared task context package without command output", () => + Effect.gen(function* () { + const launcher = yield* AgentWorkflowLauncher.AgentWorkflowLauncher; + const context = launcher.buildTaskContext({ + task: { + id: "task-1", + issue: "kanban-console#43", + title: "Launch agent workflow", + titleAr: "Launch agent workflow", + repo: "kanban-console", + column: "ready", + priority: "P1", + assignee: "Codex", + checks: { passing: 2, pending: 0, failing: 0 }, + agent: "Codex", + updated: "2026-05-06T14:00:00.000Z", + comments: 3, + }, + board: { + id: "board-1", + owner: "MohAnghabo", + title: "Kanban Project Console", + source: "github-projects", + columns: ["backlog", "ready", "in-progress", "review", "blocked", "done"], + }, + repo: { + id: "repo-1", + name: "kanban-console", + owner: "MohAnghabo", + path: "/repo", + branch: "feature/t3-kanban-phase-5-agent-launchers", + ahead: 1, + behind: 0, + openPrs: 1, + activeTasks: 3, + status: "healthy", + }, + issueUrl: "https://github.com/MohAnghabo/kanban-console/issues/43", + prUrl: "https://github.com/MohAnghabo/kanban-console/pull/7", + artifacts: [ + { + id: "artifact-plan", + repoId: "repo-1", + path: "docs/tasks/t3-kanban-project-console.md", + title: "Plan", + status: "clean", + updatedAt: "2026-05-06T14:00:00.000Z", + }, + ], + }); + + assert.deepStrictEqual(context.task, { + id: "task-1", + issue: "kanban-console#43", + title: "Launch agent workflow", + repo: "kanban-console", + column: "ready", + priority: "P1", + }); + assert.deepStrictEqual(context.validationCommands, ["bun check"]); + expect(JSON.stringify(context)).not.toContain("stdout"); + expect(JSON.stringify(context)).not.toContain("token"); + }).pipe(Effect.provide(layer)), + ); + + it.effect("requires confirmation before queueing agent workflows", () => + Effect.gen(function* () { + const launcher = yield* AgentWorkflowLauncher.AgentWorkflowLauncher; + const recipe = launcher.listRecipes({ + taskName: "t3-kanban-project-console", + phaseId: "phase-5", + claudeAvailable: true, + codexAvailable: true, + })[0]; + + if (!recipe) { + throw new Error("Expected at least one workflow recipe."); + } + + const error = yield* launcher + .queueWorkflow({ + recipe, + context: { + task: { + id: "task-1", + issue: "kanban-console#43", + title: "Launch agent workflow", + repo: "kanban-console", + column: "ready", + priority: "P1", + }, + project: { id: "board-1", owner: "MohAnghabo", title: "Kanban Console" }, + repo: { + id: "repo-1", + owner: "MohAnghabo", + name: "kanban-console", + path: "/repo", + branch: "feature/agent-launchers", + }, + issueUrl: "https://github.com/MohAnghabo/kanban-console/issues/43", + artifacts: [], + validationCommands: ["bun check"], + governanceRules: ["AGENTS.md"], + }, + confirmed: false, + }) + .pipe(Effect.flip); + + expect(error.detail).toContain("require explicit confirmation"); + }).pipe(Effect.provide(layer)), + ); + + it.effect("suppresses duplicate queued or running workflow sessions", () => + Effect.gen(function* () { + const launcher = yield* AgentWorkflowLauncher.AgentWorkflowLauncher; + const recipe = launcher + .listRecipes({ + taskName: "t3-kanban-project-console", + phaseId: "phase-5", + claudeAvailable: true, + codexAvailable: true, + }) + .find((item) => item.id === "codex-phase"); + + if (!recipe) { + throw new Error("Expected Codex phase workflow recipe."); + } + + const context = { + task: { + id: "task-1", + issue: "kanban-console#43", + title: "Launch agent workflow", + repo: "kanban-console", + column: "ready" as const, + priority: "P1" as const, + }, + project: { id: "board-1", owner: "MohAnghabo", title: "Kanban Console" }, + repo: { + id: "repo-1", + owner: "MohAnghabo", + name: "kanban-console", + path: "/repo", + branch: "feature/agent-launchers", + }, + issueUrl: "https://github.com/MohAnghabo/kanban-console/issues/43", + artifacts: [], + validationCommands: ["bun check"], + governanceRules: ["AGENTS.md"], + }; + + const first = yield* launcher.queueWorkflow({ + recipe, + context, + confirmed: true, + now: new Date("2026-05-06T14:00:00.000Z"), + }); + const second = yield* launcher.queueWorkflow({ + recipe, + context, + confirmed: true, + activeSessions: [first], + now: new Date("2026-05-06T14:01:00.000Z"), + }); + + assert.equal(first.status, "queued"); + assert.equal(second.id, first.id); + assert.equal(second.duplicateSuppressed, true); + expect(second.summary).toContain("Duplicate agent workflow suppressed"); + }).pipe(Effect.provide(layer)), + ); + + it.effect("posts concise GitHub comments for agent session lifecycle states", () => + Effect.gen(function* () { + execute.mockReturnValueOnce(Effect.succeed(processOutput("https://github.com/comment\n"))); + + const launcher = yield* AgentWorkflowLauncher.AgentWorkflowLauncher; + yield* launcher.postSessionComment({ + cwd: "/repo", + repository: "MohAnghabo/kanban-console", + issueNumber: 43, + event: "started", + confirmed: true, + session: { + id: "agent-session-1", + taskId: "task-1", + workflowId: "codex-phase", + agent: "Codex", + command: "/phase t3-kanban-project-console phase-5", + status: "queued", + duplicateKey: "task-1:codex-phase:ready:feature/agent-launchers", + duplicateSuppressed: false, + summary: "Codex workflow queued with a redacted task context package.", + startedAt: "2026-05-06T14:00:00.000Z", + }, + }); + + const body = execute.mock.calls[0]?.[0].args.at(-1); + expect(body).toContain("Kanban Console agent workflow started."); + expect(body).toContain("Raw command output is intentionally omitted."); + expect(body).not.toContain("stdout"); + expect(execute).toHaveBeenCalledWith({ + cwd: "/repo", + args: ["issue", "comment", "43", "--repo", "MohAnghabo/kanban-console", "--body", body], + timeoutMs: 30_000, + }); + }).pipe(Effect.provide(layer)), + ); +}); diff --git a/apps/server/src/kanban/AgentWorkflowLauncher.ts b/apps/server/src/kanban/AgentWorkflowLauncher.ts new file mode 100644 index 00000000000..48a3ca637b1 --- /dev/null +++ b/apps/server/src/kanban/AgentWorkflowLauncher.ts @@ -0,0 +1,367 @@ +import { Context, Effect, Layer, Schema } from "effect"; +import type { + KanbanConsoleAgentKind, + KanbanConsoleAgentWorkflow, + KanbanConsoleAgentWorkflowCommandId, + KanbanConsoleAgentWorkflowSession, + KanbanConsoleArtifact, + KanbanConsoleGitStatusSnapshot, + KanbanConsoleManagedRepo, + KanbanConsoleProjectBoard, + KanbanConsoleTask, + KanbanConsoleTaskContextPackage, +} from "@t3tools/contracts"; + +import * as GitHubCli from "../sourceControl/GitHubCli.ts"; + +const DEFAULT_TIMEOUT_MS = 30_000; + +const workflowCommandIds = [ + "init-project", + "user-stories", + "plan", + "phase", + "execute-task", + "review", + "open-pr", + "ship", + "extract-pr-learnings", + "pdpl-audit", + "ifrs-audit", + "orchestrate", +] as const satisfies ReadonlyArray; + +const workflowLabels: Record = { + "execute-task": "Execute task", + "extract-pr-learnings": "Extract PR learnings", + "ifrs-audit": "IFRS audit", + "init-project": "Initialize project", + "open-pr": "Open PR", + orchestrate: "Orchestrate next step", + "pdpl-audit": "PDPL audit", + phase: "Implement phase", + plan: "Plan work", + review: "Review", + ship: "Ship readiness", + "user-stories": "Draft user stories", +}; + +export class AgentWorkflowLauncherError extends Schema.TaggedErrorClass()( + "AgentWorkflowLauncherError", + { + operation: Schema.String, + detail: Schema.String, + cause: Schema.optional(Schema.Defect), + }, +) { + override get message(): string { + return `Agent workflow launcher failed in ${this.operation}: ${this.detail}`; + } +} + +export interface WorkflowRecipeOptions { + readonly taskName: string; + readonly phaseId?: string; + readonly issueNumber?: number; + readonly pullRequestNumber?: number; + readonly claudeAvailable: boolean; + readonly codexAvailable: boolean; +} + +export interface TaskContextOptions { + readonly task: KanbanConsoleTask; + readonly board: KanbanConsoleProjectBoard; + readonly repo: KanbanConsoleManagedRepo; + readonly issueUrl: string; + readonly prUrl?: string; + readonly artifacts: ReadonlyArray; + readonly gitStatus?: KanbanConsoleGitStatusSnapshot; + readonly validationCommands?: ReadonlyArray; + readonly governanceRules?: ReadonlyArray; +} + +export interface QueueWorkflowOptions { + readonly recipe: KanbanConsoleAgentWorkflow; + readonly context: KanbanConsoleTaskContextPackage; + readonly confirmed: boolean; + readonly activeSessions?: ReadonlyArray; + readonly now?: Date; +} + +export interface AgentSessionCommentOptions { + readonly cwd: string; + readonly repository: string; + readonly issueNumber: number; + readonly session: KanbanConsoleAgentWorkflowSession; + readonly event: "started" | "completed" | "failed" | "blocked"; + readonly confirmed: boolean; +} + +export interface AgentWorkflowLauncherShape { + readonly listRecipes: (input: WorkflowRecipeOptions) => ReadonlyArray; + readonly buildTaskContext: (input: TaskContextOptions) => KanbanConsoleTaskContextPackage; + readonly queueWorkflow: ( + input: QueueWorkflowOptions, + ) => Effect.Effect; + readonly postSessionComment: ( + input: AgentSessionCommentOptions, + ) => Effect.Effect; +} + +export class AgentWorkflowLauncher extends Context.Service< + AgentWorkflowLauncher, + AgentWorkflowLauncherShape +>()("t3/kanban/AgentWorkflowLauncher") {} + +function commandFor( + commandId: KanbanConsoleAgentWorkflowCommandId, + options: WorkflowRecipeOptions, +): string { + switch (commandId) { + case "init-project": + return "/init-project"; + case "user-stories": + return `/user-stories ${options.taskName} ""`; + case "plan": + return `/plan ${options.taskName} ""`; + case "phase": + return `/phase ${options.taskName} ${options.phaseId ?? "phase-1"}`; + case "execute-task": + return `/execute-task ${options.issueNumber ?? ""}`; + case "review": + return "/review"; + case "open-pr": + return `/open-pr feat ${options.taskName}`; + case "ship": + return `/ship ${options.taskName}`; + case "extract-pr-learnings": + return `/extract-pr-learnings ${options.pullRequestNumber ?? ""}`; + case "pdpl-audit": + return "/pdpl-audit"; + case "ifrs-audit": + return "/ifrs-audit"; + case "orchestrate": + return `/orchestrate ${options.taskName}`; + } +} + +function recipeId(agent: Exclude, commandId: string): string { + return `${agent.toLowerCase()}-${commandId}`; +} + +export function listWorkflowRecipes( + options: WorkflowRecipeOptions, +): ReadonlyArray { + return workflowCommandIds.flatMap((commandId) => { + const command = commandFor(commandId, options); + return [ + { + id: recipeId("Claude", commandId), + label: `Claude ${workflowLabels[commandId]}`, + agent: "Claude" as const, + command, + commandId, + available: options.claudeAvailable, + }, + { + id: recipeId("Codex", commandId), + label: `Codex ${workflowLabels[commandId]}`, + agent: "Codex" as const, + command, + commandId, + available: options.codexAvailable, + }, + ]; + }); +} + +export function buildTaskContext(input: TaskContextOptions): KanbanConsoleTaskContextPackage { + return { + task: { + id: input.task.id, + issue: input.task.issue, + title: input.task.title, + repo: input.task.repo, + column: input.task.column, + priority: input.task.priority, + }, + project: { + id: input.board.id, + owner: input.board.owner, + title: input.board.title, + }, + repo: { + id: input.repo.id, + owner: input.repo.owner, + name: input.repo.name, + path: input.repo.path, + branch: input.repo.branch, + }, + issueUrl: input.issueUrl, + ...(input.prUrl ? { prUrl: input.prUrl } : {}), + artifacts: input.artifacts.map((artifact) => ({ + path: artifact.path, + status: artifact.status, + })), + ...(input.gitStatus ? { gitStatus: input.gitStatus } : {}), + validationCommands: input.validationCommands ?? ["bun check"], + governanceRules: input.governanceRules ?? [ + "AGENTS.md", + "docs/project.md", + "review.md", + ".cursor/BUGBOT.md", + ".ai/rules/22-kanban-console.md", + ], + }; +} + +function duplicateKey(input: QueueWorkflowOptions): string { + return [ + input.context.task.id, + input.recipe.id, + input.context.task.column, + input.context.repo.branch, + ].join(":"); +} + +function activeDuplicate( + input: QueueWorkflowOptions, +): KanbanConsoleAgentWorkflowSession | undefined { + const key = duplicateKey(input); + return input.activeSessions?.find( + (session) => + session.duplicateKey === key && (session.status === "queued" || session.status === "running"), + ); +} + +function isoNow(input: QueueWorkflowOptions): string { + return (input.now ?? new Date()).toISOString(); +} + +export function queueWorkflowSession( + input: QueueWorkflowOptions, +): Effect.Effect { + if (!input.confirmed) { + return Effect.fail( + new AgentWorkflowLauncherError({ + operation: "queueWorkflow", + detail: "Agent workflow launches require explicit confirmation.", + }), + ); + } + + if (!input.recipe.available) { + return Effect.succeed({ + id: `blocked-${input.context.task.id}-${input.recipe.id}`, + taskId: input.context.task.id, + workflowId: input.recipe.id, + agent: input.recipe.agent, + command: input.recipe.command, + status: "blocked", + duplicateKey: duplicateKey(input), + duplicateSuppressed: false, + summary: `${input.recipe.agent} workflow is unavailable on this machine.`, + startedAt: isoNow(input), + finishedAt: isoNow(input), + }); + } + + const duplicate = activeDuplicate(input); + if (duplicate) { + return Effect.succeed({ + ...duplicate, + duplicateSuppressed: true, + summary: + "Duplicate agent workflow suppressed; an equivalent session is already queued or running.", + }); + } + + return Effect.succeed({ + id: `agent-${input.context.task.id}-${input.recipe.id}-${isoNow(input).replace(/[:.]/g, "-")}`, + taskId: input.context.task.id, + workflowId: input.recipe.id, + agent: input.recipe.agent, + command: input.recipe.command, + status: "queued", + duplicateKey: duplicateKey(input), + duplicateSuppressed: false, + summary: `${input.recipe.agent} workflow queued with a redacted task context package.`, + startedAt: isoNow(input), + }); +} + +export function sessionCommentBody(input: { + readonly session: KanbanConsoleAgentWorkflowSession; + readonly event: AgentSessionCommentOptions["event"]; +}): string { + const verb = { + blocked: "blocked", + completed: "completed", + failed: "failed", + started: "started", + }[input.event]; + + return [ + `Kanban Console agent workflow ${verb}.`, + "", + `- Task: ${input.session.taskId}`, + `- Agent: ${input.session.agent}`, + `- Command: ${input.session.command}`, + `- Status: ${input.session.status}`, + `- Summary: ${input.session.summary}`, + "", + "Raw command output is intentionally omitted.", + ].join("\n"); +} + +function launcherError( + operation: string, + cause: GitHubCli.GitHubCliError, +): AgentWorkflowLauncherError { + return new AgentWorkflowLauncherError({ + operation, + detail: cause.detail, + cause, + }); +} + +export const make = Effect.fn("makeAgentWorkflowLauncher")(function* () { + const github = yield* GitHubCli.GitHubCli; + + return AgentWorkflowLauncher.of({ + listRecipes: listWorkflowRecipes, + buildTaskContext, + queueWorkflow: queueWorkflowSession, + postSessionComment: (input) => { + if (!input.confirmed) { + return Effect.fail( + new AgentWorkflowLauncherError({ + operation: "postSessionComment", + detail: "GitHub issue comments for agent sessions require explicit confirmation.", + }), + ); + } + + return github + .execute({ + cwd: input.cwd, + args: [ + "issue", + "comment", + String(input.issueNumber), + "--repo", + input.repository, + "--body", + sessionCommentBody({ session: input.session, event: input.event }), + ], + timeoutMs: DEFAULT_TIMEOUT_MS, + }) + .pipe( + Effect.asVoid, + Effect.mapError((error) => launcherError("postSessionComment", error)), + ); + }, + }); +}); + +export const layer = Layer.effect(AgentWorkflowLauncher, make()); diff --git a/apps/server/src/kanban/GitHubProjectsProvider.test.ts b/apps/server/src/kanban/GitHubProjectsProvider.test.ts new file mode 100644 index 00000000000..2e7e300f928 --- /dev/null +++ b/apps/server/src/kanban/GitHubProjectsProvider.test.ts @@ -0,0 +1,301 @@ +import { afterEach, assert, describe, expect, it, vi } from "@effect/vitest"; +import { Effect, Layer } from "effect"; +import { ChildProcessSpawner } from "effect/unstable/process"; + +import * as GitHubCli from "../sourceControl/GitHubCli.ts"; +import type * as VcsProcess from "../vcs/VcsProcess.ts"; +import * as GitHubProjectsProvider from "./GitHubProjectsProvider.ts"; + +const processOutput = (stdout: string): VcsProcess.VcsProcessOutput => ({ + exitCode: ChildProcessSpawner.ExitCode(0), + stdout, + stderr: "", + stdoutTruncated: false, + stderrTruncated: false, +}); + +const execute = vi.fn(); + +const layer = GitHubProjectsProvider.layer.pipe( + Layer.provide( + Layer.mock(GitHubCli.GitHubCli)({ + execute, + listOpenPullRequests: vi.fn(), + getPullRequest: vi.fn(), + getRepositoryCloneUrls: vi.fn(), + createRepository: vi.fn(), + createPullRequest: vi.fn(), + getDefaultBranch: vi.fn(), + checkoutPullRequest: vi.fn(), + }), + ), +); + +afterEach(() => { + execute.mockReset(); +}); + +describe("GitHubProjectsProvider", () => { + it.effect("checks gh auth readiness without exposing raw command output", () => + Effect.gen(function* () { + execute.mockReturnValueOnce(Effect.succeed(processOutput("github.com\n"))); + + const provider = yield* GitHubProjectsProvider.GitHubProjectsProvider; + const result = yield* provider.checkAuthReadiness({ cwd: "/repo" }); + + assert.deepStrictEqual(result, { + status: "authenticated", + detail: "GitHub CLI is authenticated.", + }); + expect(execute).toHaveBeenCalledWith({ + cwd: "/repo", + args: ["auth", "status"], + timeoutMs: 30_000, + }); + }).pipe(Effect.provide(layer)), + ); + + it.effect("reads organization Projects and fields from gh project JSON", () => + Effect.gen(function* () { + execute + .mockReturnValueOnce( + Effect.succeed( + processOutput( + JSON.stringify({ + projects: [ + { + id: "PVT_kwDOExample", + number: 7, + title: "Kanban Console", + url: "https://github.com/orgs/MohAnghabo/projects/7", + }, + ], + }), + ), + ), + ) + .mockReturnValueOnce( + Effect.succeed( + processOutput( + JSON.stringify({ + fields: [ + { + id: "PVTSSF_status", + name: "Status", + type: "single_select", + options: [ + { id: "opt_ready", name: "Ready" }, + { id: "opt_progress", name: "In progress" }, + ], + }, + { id: "PVTF_priority", name: "Priority", type: "text" }, + ], + }), + ), + ), + ); + + const provider = yield* GitHubProjectsProvider.GitHubProjectsProvider; + const projects = yield* provider.listProjects({ + cwd: "/repo", + owner: "MohAnghabo", + limit: 10, + }); + const fields = yield* provider.listProjectFields({ + cwd: "/repo", + owner: "MohAnghabo", + projectNumber: 7, + }); + + assert.deepStrictEqual(projects, [ + { + id: "PVT_kwDOExample", + number: 7, + title: "Kanban Console", + url: "https://github.com/orgs/MohAnghabo/projects/7", + closed: false, + }, + ]); + assert.deepStrictEqual(fields, [ + { + id: "PVTSSF_status", + name: "Status", + type: "single_select", + options: [ + { id: "opt_ready", name: "Ready" }, + { id: "opt_progress", name: "In progress" }, + ], + }, + { id: "PVTF_priority", name: "Priority", type: "text", options: [] }, + ]); + }).pipe(Effect.provide(layer)), + ); + + it.effect("maps GitHub Project issue items into Kanban tasks", () => + Effect.gen(function* () { + execute.mockReturnValueOnce( + Effect.succeed( + processOutput( + JSON.stringify({ + items: [ + { + id: "PVTI_task_1", + content: { + type: "Issue", + number: 43, + title: "Connect live GitHub Projects state", + repository: { + name: "kanban-console", + nameWithOwner: "MohAnghabo/kanban-console", + }, + assignees: [{ login: "MohAnghabo" }], + updatedAt: "2026-05-06T13:54:28.000Z", + comments: 4, + }, + fieldValues: [ + { name: "Status", value: "In progress" }, + { name: "Priority", value: "P1" }, + { name: "Agent", value: "Codex" }, + { name: "Pull Request", value: "kanban-console#3" }, + ], + }, + ], + }), + ), + ), + ); + + const provider = yield* GitHubProjectsProvider.GitHubProjectsProvider; + const result = yield* provider.listProjectItems({ + cwd: "/repo", + owner: "MohAnghabo", + projectNumber: 7, + projectId: "PVT_kwDOExample", + projectTitle: "Kanban Console", + }); + + assert.deepStrictEqual(result.board, { + id: "PVT_kwDOExample", + owner: "MohAnghabo", + title: "Kanban Console", + source: "github-projects", + columns: ["backlog", "ready", "in-progress", "review", "blocked", "done"], + }); + assert.deepStrictEqual(result.tasks, [ + { + id: "PVTI_task_1", + issue: "kanban-console#43", + title: "Connect live GitHub Projects state", + titleAr: "Connect live GitHub Projects state", + repo: "kanban-console", + column: "in-progress", + priority: "P1", + assignee: "MohAnghabo", + pr: "kanban-console#3", + checks: { passing: 0, pending: 0, failing: 0 }, + agent: "Codex", + updated: "2026-05-06T13:54:28.000Z", + comments: 4, + }, + ]); + }).pipe(Effect.provide(layer)), + ); + + it.effect("requires confirmation before writing Project status or comments", () => + Effect.gen(function* () { + const provider = yield* GitHubProjectsProvider.GitHubProjectsProvider; + + const statusError = yield* provider + .updateProjectItemStatus({ + cwd: "/repo", + itemId: "PVTI_task_1", + fromColumn: "ready", + toColumn: "review", + confirmed: false, + projectId: "PVT_kwDOExample", + statusFieldId: "PVTSSF_status", + statusOptionId: "opt_review", + }) + .pipe(Effect.flip); + const commentError = yield* provider + .postStatusMoveComment({ + cwd: "/repo", + repository: "MohAnghabo/kanban-console", + issueNumber: 43, + body: "Status moved from Ready to In review.", + confirmed: false, + }) + .pipe(Effect.flip); + + expect(statusError.detail).toContain("require explicit confirmation"); + expect(commentError.detail).toContain("require explicit confirmation"); + expect(execute).not.toHaveBeenCalled(); + }).pipe(Effect.provide(layer)), + ); + + it.effect("updates Project status and posts issue comments after confirmation", () => + Effect.gen(function* () { + execute + .mockReturnValueOnce(Effect.succeed(processOutput(""))) + .mockReturnValueOnce(Effect.succeed(processOutput("https://github.com/comment\n"))); + + const provider = yield* GitHubProjectsProvider.GitHubProjectsProvider; + const transition = yield* provider.updateProjectItemStatus({ + cwd: "/repo", + itemId: "PVTI_task_1", + fromColumn: "ready", + toColumn: "review", + confirmed: true, + projectId: "PVT_kwDOExample", + statusFieldId: "PVTSSF_status", + statusOptionId: "opt_review", + }); + yield* provider.postStatusMoveComment({ + cwd: "/repo", + repository: "MohAnghabo/kanban-console", + issueNumber: 43, + body: "Status moved from Ready to In review.", + confirmed: true, + }); + + assert.deepStrictEqual(transition, { + taskId: "PVTI_task_1", + fromColumn: "ready", + toColumn: "review", + action: "none", + requiresConfirmation: false, + duplicateSuppressed: false, + message: "GitHub Project status updated.", + }); + expect(execute).toHaveBeenNthCalledWith(1, { + cwd: "/repo", + args: [ + "project", + "item-edit", + "--id", + "PVTI_task_1", + "--project-id", + "PVT_kwDOExample", + "--field-id", + "PVTSSF_status", + "--single-select-option-id", + "opt_review", + ], + timeoutMs: 30_000, + }); + expect(execute).toHaveBeenNthCalledWith(2, { + cwd: "/repo", + args: [ + "issue", + "comment", + "43", + "--repo", + "MohAnghabo/kanban-console", + "--body", + "Status moved from Ready to In review.", + ], + timeoutMs: 30_000, + }); + }).pipe(Effect.provide(layer)), + ); +}); diff --git a/apps/server/src/kanban/GitHubProjectsProvider.ts b/apps/server/src/kanban/GitHubProjectsProvider.ts new file mode 100644 index 00000000000..702470dc652 --- /dev/null +++ b/apps/server/src/kanban/GitHubProjectsProvider.ts @@ -0,0 +1,520 @@ +import { Context, Effect, Layer, Schema, SchemaIssue } from "effect"; +import type { + KanbanColumnId, + KanbanConsoleProjectBoard, + KanbanConsoleTask, + KanbanConsoleTaskTransitionResult, +} from "@t3tools/contracts"; + +import * as GitHubCli from "../sourceControl/GitHubCli.ts"; + +const DEFAULT_TIMEOUT_MS = 30_000; +const DEFAULT_PROJECT_ITEM_LIMIT = 100; + +const UnknownJson = Schema.Unknown; +const RawProjectList = Schema.Struct({ + projects: Schema.Array( + Schema.Struct({ + id: Schema.String, + number: Schema.Number, + title: Schema.String, + url: Schema.optional(Schema.String), + closed: Schema.optional(Schema.Boolean), + }), + ), +}); + +const RawProjectFields = Schema.Struct({ + fields: Schema.Array( + Schema.Struct({ + id: Schema.String, + name: Schema.String, + type: Schema.optional(Schema.String), + options: Schema.optional( + Schema.Array( + Schema.Struct({ + id: Schema.String, + name: Schema.String, + }), + ), + ), + }), + ), +}); + +export interface GitHubProjectSummary { + readonly id: string; + readonly number: number; + readonly title: string; + readonly url?: string; + readonly closed: boolean; +} + +export interface GitHubProjectFieldOption { + readonly id: string; + readonly name: string; +} + +export interface GitHubProjectField { + readonly id: string; + readonly name: string; + readonly type?: string; + readonly options: ReadonlyArray; +} + +export interface GitHubProjectsAuthReadiness { + readonly status: "authenticated" | "setup-required"; + readonly detail: string; +} + +export interface GitHubProjectItemStatusUpdate { + readonly itemId: string; + readonly fromColumn: KanbanColumnId; + readonly toColumn: KanbanColumnId; + readonly confirmed: boolean; + readonly projectId: string; + readonly statusFieldId: string; + readonly statusOptionId: string; +} + +export class GitHubProjectsProviderError extends Schema.TaggedErrorClass()( + "GitHubProjectsProviderError", + { + operation: Schema.String, + detail: Schema.String, + cause: Schema.optional(Schema.Defect), + }, +) { + override get message(): string { + return `GitHub Projects provider failed in ${this.operation}: ${this.detail}`; + } +} + +export interface GitHubProjectsProviderShape { + readonly checkAuthReadiness: (input: { + readonly cwd: string; + }) => Effect.Effect; + + readonly listProjects: (input: { + readonly cwd: string; + readonly owner: string; + readonly limit?: number; + }) => Effect.Effect, GitHubProjectsProviderError>; + + readonly listProjectFields: (input: { + readonly cwd: string; + readonly owner: string; + readonly projectNumber: number; + }) => Effect.Effect, GitHubProjectsProviderError>; + + readonly listProjectItems: (input: { + readonly cwd: string; + readonly owner: string; + readonly projectNumber: number; + readonly projectTitle: string; + readonly projectId: string; + readonly limit?: number; + }) => Effect.Effect< + { readonly board: KanbanConsoleProjectBoard; readonly tasks: ReadonlyArray }, + GitHubProjectsProviderError + >; + + readonly updateProjectItemStatus: ( + input: GitHubProjectItemStatusUpdate & { readonly cwd: string }, + ) => Effect.Effect; + + readonly postStatusMoveComment: (input: { + readonly cwd: string; + readonly repository: string; + readonly issueNumber: number; + readonly body: string; + readonly confirmed: boolean; + }) => Effect.Effect; +} + +export class GitHubProjectsProvider extends Context.Service< + GitHubProjectsProvider, + GitHubProjectsProviderShape +>()("t3/kanban/GitHubProjectsProvider") {} + +function decodeJson( + operation: string, + raw: string, + schema: S, +): Effect.Effect { + return Schema.decodeEffect(Schema.fromJsonString(schema))(raw).pipe( + Effect.mapError( + (error) => + new GitHubProjectsProviderError({ + operation, + detail: `GitHub CLI returned invalid JSON: ${SchemaIssue.makeFormatterDefault()(error.issue)}`, + cause: error, + }), + ), + ); +} + +function providerError( + operation: string, + cause: GitHubCli.GitHubCliError, +): GitHubProjectsProviderError { + return new GitHubProjectsProviderError({ + operation, + detail: cause.detail, + cause, + }); +} + +function trim(input: unknown): string | null { + return typeof input === "string" && input.trim().length > 0 ? input.trim() : null; +} + +function numberValue(input: unknown): number | null { + return typeof input === "number" && Number.isFinite(input) ? input : null; +} + +function objectValue(input: unknown): Record | null { + return typeof input === "object" && input !== null && !Array.isArray(input) + ? (input as Record) + : null; +} + +function arrayValue(input: unknown): ReadonlyArray { + return Array.isArray(input) ? input : []; +} + +function fieldValue(item: Record, names: ReadonlyArray): unknown { + const wanted = new Set(names.map((name) => name.toLowerCase())); + for (const rawField of arrayValue(item.fieldValues)) { + const field = objectValue(rawField); + if (!field) continue; + const name = trim(field.name) ?? trim(objectValue(field.field)?.name); + if (name && wanted.has(name.toLowerCase())) { + return field.value ?? field.name ?? field.text ?? field.title; + } + } + + for (const name of names) { + if (name in item) return item[name]; + } + + return undefined; +} + +function toColumn(value: unknown): KanbanColumnId { + const normalized = String(value ?? "") + .trim() + .toLowerCase() + .replace(/[_\s]+/g, "-"); + + if (["backlog", "icebox"].includes(normalized)) return "backlog"; + if (["ready", "todo", "to-do"].includes(normalized)) return "ready"; + if (["in-progress", "doing", "active"].includes(normalized)) return "in-progress"; + if (["review", "in-review", "pr-review"].includes(normalized)) return "review"; + if (["blocked", "blocker"].includes(normalized)) return "blocked"; + if (["done", "complete", "completed", "closed"].includes(normalized)) return "done"; + return "backlog"; +} + +function toPriority(value: unknown): KanbanConsoleTask["priority"] { + const normalized = String(value ?? "") + .trim() + .toUpperCase(); + return normalized === "P0" || normalized === "P1" || normalized === "P2" ? normalized : "P2"; +} + +function toAgent(value: unknown): KanbanConsoleTask["agent"] { + const normalized = String(value ?? "") + .trim() + .toLowerCase(); + if (normalized === "codex") return "Codex"; + if (normalized === "claude") return "Claude"; + return "Human"; +} + +function repoName(content: Record | null, fallbackOwner: string): string { + const repository = objectValue(content?.repository); + const nameWithOwner = trim(repository?.nameWithOwner); + if (nameWithOwner) return nameWithOwner.split("/").at(-1) ?? nameWithOwner; + return trim(repository?.name) ?? trim(content?.repo) ?? fallbackOwner; +} + +function issueLabel(content: Record | null, repo: string): string { + const number = numberValue(content?.number); + return number === null ? `${repo}#unknown` : `${repo}#${number}`; +} + +function assigneeName( + content: Record | null, + item: Record, +): string { + const assignees = arrayValue(content?.assignees); + const firstAssignee = objectValue(assignees[0]); + return ( + trim(firstAssignee?.login) ?? + trim(firstAssignee?.name) ?? + trim(fieldValue(item, ["Assignee"])) ?? + "Unassigned" + ); +} + +function linkedPullRequest(item: Record): string | undefined { + const direct = trim(fieldValue(item, ["Pull Request", "Pull Requests", "PR", "Linked PR"])); + if (direct) return direct; + + for (const rawField of arrayValue(item.fieldValues)) { + const field = objectValue(rawField); + const value = trim(field?.value) ?? trim(field?.text) ?? trim(field?.title); + if (value && /#\d+/u.test(value)) return value; + } + + return undefined; +} + +function mapItemToTask(item: Record, owner: string): KanbanConsoleTask | null { + const content = objectValue(item.content); + const title = trim(content?.title) ?? trim(item.title); + if (!title) return null; + + const repo = repoName(content, owner); + const comments = + numberValue(content?.comments) ?? numberValue(fieldValue(item, ["Comments"])) ?? 0; + const updatedAt = trim(content?.updatedAt) ?? trim(item.updatedAt) ?? new Date(0).toISOString(); + + return { + id: trim(item.id) ?? `${repo}-${issueLabel(content, repo)}`, + issue: issueLabel(content, repo), + title, + titleAr: title, + repo, + column: toColumn(fieldValue(item, ["Status", "status"])), + priority: toPriority(fieldValue(item, ["Priority", "priority"])), + assignee: assigneeName(content, item), + ...(linkedPullRequest(item) ? { pr: linkedPullRequest(item) } : {}), + checks: { passing: 0, pending: 0, failing: 0 }, + agent: toAgent(fieldValue(item, ["Agent", "Owner"])), + updated: updatedAt, + comments, + }; +} + +export const make = Effect.fn("makeGitHubProjectsProvider")(function* () { + const github = yield* GitHubCli.GitHubCli; + + return GitHubProjectsProvider.of({ + checkAuthReadiness: (input) => + github + .execute({ + cwd: input.cwd, + args: ["auth", "status"], + timeoutMs: DEFAULT_TIMEOUT_MS, + }) + .pipe( + Effect.match({ + onFailure: (error) => ({ + status: "setup-required" as const, + detail: error.detail, + }), + onSuccess: () => ({ + status: "authenticated" as const, + detail: "GitHub CLI is authenticated.", + }), + }), + ), + + listProjects: (input) => + github + .execute({ + cwd: input.cwd, + args: [ + "project", + "list", + "--owner", + input.owner, + "--limit", + String(input.limit ?? 20), + "--format", + "json", + ], + timeoutMs: DEFAULT_TIMEOUT_MS, + }) + .pipe( + Effect.map((result) => result.stdout.trim()), + Effect.flatMap((raw) => decodeJson("listProjects", raw, RawProjectList)), + Effect.map((decoded) => + decoded.projects.map((project) => ({ + id: project.id.trim(), + number: project.number, + title: project.title.trim(), + ...(project.url ? { url: project.url.trim() } : {}), + closed: project.closed ?? false, + })), + ), + Effect.mapError((error) => + Schema.is(GitHubProjectsProviderError)(error) + ? error + : providerError("listProjects", error), + ), + ), + + listProjectFields: (input) => + github + .execute({ + cwd: input.cwd, + args: [ + "project", + "field-list", + String(input.projectNumber), + "--owner", + input.owner, + "--format", + "json", + ], + timeoutMs: DEFAULT_TIMEOUT_MS, + }) + .pipe( + Effect.map((result) => result.stdout.trim()), + Effect.flatMap((raw) => decodeJson("listProjectFields", raw, RawProjectFields)), + Effect.map((decoded) => + decoded.fields.map((field) => ({ + id: field.id.trim(), + name: field.name.trim(), + ...(field.type ? { type: field.type.trim() } : {}), + options: (field.options ?? []).map((option) => ({ + id: option.id.trim(), + name: option.name.trim(), + })), + })), + ), + Effect.mapError((error) => + Schema.is(GitHubProjectsProviderError)(error) + ? error + : providerError("listProjectFields", error), + ), + ), + + listProjectItems: (input) => + github + .execute({ + cwd: input.cwd, + args: [ + "project", + "item-list", + String(input.projectNumber), + "--owner", + input.owner, + "--limit", + String(input.limit ?? DEFAULT_PROJECT_ITEM_LIMIT), + "--format", + "json", + ], + timeoutMs: DEFAULT_TIMEOUT_MS, + }) + .pipe( + Effect.map((result) => result.stdout.trim()), + Effect.flatMap((raw) => + decodeJson( + "listProjectItems", + raw, + Schema.Struct({ items: Schema.Array(UnknownJson) }), + ), + ), + Effect.map((decoded) => { + const tasks = decoded.items + .map(objectValue) + .filter((item): item is Record => item !== null) + .map((item) => mapItemToTask(item, input.owner)) + .filter((task): task is KanbanConsoleTask => task !== null); + + return { + board: { + id: input.projectId, + owner: input.owner, + title: input.projectTitle, + source: "github-projects" as const, + columns: ["backlog", "ready", "in-progress", "review", "blocked", "done"] as const, + }, + tasks, + }; + }), + Effect.mapError((error) => + Schema.is(GitHubProjectsProviderError)(error) + ? error + : providerError("listProjectItems", error), + ), + ), + + updateProjectItemStatus: (input) => { + if (!input.confirmed) { + return Effect.fail( + new GitHubProjectsProviderError({ + operation: "updateProjectItemStatus", + detail: "GitHub Project status updates require explicit confirmation.", + }), + ); + } + + return github + .execute({ + cwd: input.cwd, + args: [ + "project", + "item-edit", + "--id", + input.itemId, + "--project-id", + input.projectId, + "--field-id", + input.statusFieldId, + "--single-select-option-id", + input.statusOptionId, + ], + timeoutMs: DEFAULT_TIMEOUT_MS, + }) + .pipe( + Effect.as({ + taskId: input.itemId, + fromColumn: input.fromColumn, + toColumn: input.toColumn, + action: "none" as const, + requiresConfirmation: false, + duplicateSuppressed: false, + message: "GitHub Project status updated.", + }), + Effect.mapError((error) => providerError("updateProjectItemStatus", error)), + ); + }, + + postStatusMoveComment: (input) => { + if (!input.confirmed) { + return Effect.fail( + new GitHubProjectsProviderError({ + operation: "postStatusMoveComment", + detail: "GitHub issue comments for status moves require explicit confirmation.", + }), + ); + } + + return github + .execute({ + cwd: input.cwd, + args: [ + "issue", + "comment", + String(input.issueNumber), + "--repo", + input.repository, + "--body", + input.body, + ], + timeoutMs: DEFAULT_TIMEOUT_MS, + }) + .pipe( + Effect.asVoid, + Effect.mapError((error) => providerError("postStatusMoveComment", error)), + ); + }, + }); +}); + +export const layer = Layer.effect(GitHubProjectsProvider, make()); diff --git a/apps/server/src/kanban/GitStatusProvider.test.ts b/apps/server/src/kanban/GitStatusProvider.test.ts new file mode 100644 index 00000000000..be8e9aec417 --- /dev/null +++ b/apps/server/src/kanban/GitStatusProvider.test.ts @@ -0,0 +1,267 @@ +import * as NodeServices from "@effect/platform-node/NodeServices"; +import { Effect, FileSystem, Layer, Path, PlatformError, Scope } from "effect"; +import { assert, describe, it } from "@effect/vitest"; + +import { ServerConfig } from "../config.ts"; +import * as GitVcsDriver from "../vcs/GitVcsDriver.ts"; +import * as VcsProcess from "../vcs/VcsProcess.ts"; +import * as GitStatusProvider from "./GitStatusProvider.ts"; + +const ServerConfigLayer = ServerConfig.layerTest(process.cwd(), { + prefix: "t3-kanban-git-status-", +}); + +const GitLayer = GitVcsDriver.layer.pipe( + Layer.provide(ServerConfigLayer), + Layer.provideMerge(VcsProcess.layer), + Layer.provideMerge(NodeServices.layer), +); +const ProviderLayer = GitStatusProvider.layer.pipe(Layer.provide(GitLayer)); +const TestLayer = Layer.mergeAll(GitLayer, ProviderLayer); + +const policy = { + protectedBranches: ["main", "release/*"], + allowedWorkBranchPrefixes: ["feature/", "fix/", "chore/", "docs/"], + destructiveActionsRequireSecondConfirmation: true, +}; + +function makeTempDir( + prefix: string, +): Effect.Effect { + return Effect.gen(function* () { + const fileSystem = yield* FileSystem.FileSystem; + return yield* fileSystem.makeTempDirectoryScoped({ prefix }); + }); +} + +function writeFile( + cwd: string, + relativePath: string, + content: string, +): Effect.Effect { + return Effect.gen(function* () { + const fileSystem = yield* FileSystem.FileSystem; + const path = yield* Path.Path; + const absolutePath = path.join(cwd, relativePath); + yield* fileSystem.makeDirectory(path.dirname(absolutePath), { recursive: true }); + yield* fileSystem.writeFileString(absolutePath, content); + }); +} + +function runGit(cwd: string, args: ReadonlyArray) { + return Effect.gen(function* () { + const git = yield* GitVcsDriver.GitVcsDriver; + yield* git.execute({ + operation: "KanbanGitStatusProvider.test.git", + cwd, + args, + timeoutMs: 10_000, + }); + }); +} + +function initRepo() { + return Effect.gen(function* () { + const repoDir = yield* makeTempDir("kanban-git-status-"); + yield* runGit(repoDir, ["init", "-b", "main"]); + yield* runGit(repoDir, ["config", "user.email", "test@example.com"]); + yield* runGit(repoDir, ["config", "user.name", "Test User"]); + yield* writeFile(repoDir, "README.md", "initial\n"); + yield* runGit(repoDir, ["add", "README.md"]); + yield* runGit(repoDir, ["commit", "-m", "initial"]); + return repoDir; + }); +} + +describe("KanbanGitStatusProvider", () => { + it.layer(TestLayer)("reads branch, staged, unstaged, and untracked status", (it) => { + it.effect("maps real git state into the Kanban status contract", () => + Effect.gen(function* () { + const provider = yield* GitStatusProvider.KanbanGitStatusProvider; + const repoDir = yield* initRepo(); + yield* runGit(repoDir, ["checkout", "-b", "feature/gitops"]); + yield* writeFile(repoDir, "README.md", "initial\nunstaged\n"); + yield* writeFile(repoDir, "src/staged.ts", "export const staged = true;\n"); + yield* runGit(repoDir, ["add", "src/staged.ts"]); + yield* writeFile(repoDir, "notes/untracked.md", "untracked\n"); + + const status = yield* provider.readStatus({ + repoId: "repo-1", + cwd: repoDir, + policy, + }); + + assert.equal(status.branch, "feature/gitops"); + assert.equal(status.isRepo, true); + assert.equal( + status.files.some((file) => file.path === "README.md" && file.status === "unstaged"), + true, + ); + assert.equal( + status.files.some((file) => file.path === "src/staged.ts" && file.status === "staged"), + true, + ); + assert.equal( + status.files.some( + (file) => file.path === "notes/untracked.md" && file.status === "untracked", + ), + true, + ); + assert.equal( + status.policyViolations?.some((violation) => violation.kind === "missing-upstream"), + true, + ); + }), + ); + + it.effect("flags dirty protected branches", () => + Effect.gen(function* () { + const provider = yield* GitStatusProvider.KanbanGitStatusProvider; + const repoDir = yield* initRepo(); + yield* writeFile(repoDir, "README.md", "dirty on main\n"); + + const status = yield* provider.readStatus({ + repoId: "repo-1", + cwd: repoDir, + policy, + }); + + assert.equal(status.branch, "main"); + assert.equal( + status.policyViolations?.some( + (violation) => + violation.kind === "protected-branch" && violation.severity === "blocked", + ), + true, + ); + }), + ); + + it.effect("reads diffs and gates stage/unstage actions on confirmation", () => + Effect.gen(function* () { + const provider = yield* GitStatusProvider.KanbanGitStatusProvider; + const repoDir = yield* initRepo(); + yield* runGit(repoDir, ["checkout", "-b", "feature/stage-actions"]); + yield* writeFile(repoDir, "README.md", "initial\nchanged\n"); + + const diff = yield* provider.readFileDiff({ + repoId: "repo-1", + cwd: repoDir, + path: "README.md", + status: "unstaged", + }); + assert.equal(diff.truncated, false); + assert.equal(diff.diff.includes("+changed"), true); + + const blocked = yield* provider.stageFiles({ + repoId: "repo-1", + cwd: repoDir, + paths: ["README.md"], + confirmed: false, + }); + assert.equal(blocked.status, "blocked"); + + const staged = yield* provider.stageFiles({ + repoId: "repo-1", + cwd: repoDir, + paths: ["README.md"], + confirmed: true, + }); + assert.equal(staged.status, "applied"); + + let status = yield* provider.readStatus({ repoId: "repo-1", cwd: repoDir, policy }); + assert.equal( + status.files.some((file) => file.path === "README.md" && file.status === "staged"), + true, + ); + + const unstaged = yield* provider.unstageFiles({ + repoId: "repo-1", + cwd: repoDir, + paths: ["README.md"], + confirmed: true, + }); + assert.equal(unstaged.status, "applied"); + + status = yield* provider.readStatus({ repoId: "repo-1", cwd: repoDir, policy }); + assert.equal( + status.files.some((file) => file.path === "README.md" && file.status === "unstaged"), + true, + ); + }), + ); + + it.effect("normalizes renamed paths for diffs and file actions", () => + Effect.gen(function* () { + const provider = yield* GitStatusProvider.KanbanGitStatusProvider; + const repoDir = yield* initRepo(); + yield* runGit(repoDir, ["checkout", "-b", "feature/rename-status"]); + yield* runGit(repoDir, ["mv", "README.md", "README-renamed.md"]); + + const status = yield* provider.readStatus({ repoId: "repo-1", cwd: repoDir, policy }); + const renamed = status.files.find((file) => file.change === "renamed"); + + assert.equal(renamed?.path, "README-renamed.md"); + assert.equal(renamed?.sourcePath, "README.md"); + + const diff = yield* provider.readFileDiff({ + repoId: "repo-1", + cwd: repoDir, + path: renamed?.path ?? "README-renamed.md", + status: "staged", + }); + assert.equal(diff.diff.includes("README-renamed.md"), true); + + const unstaged = yield* provider.unstageFiles({ + repoId: "repo-1", + cwd: repoDir, + paths: [renamed?.path ?? "README-renamed.md"], + confirmed: true, + }); + assert.equal(unstaged.status, "applied"); + }), + ); + + it.effect("reports release and tag readiness gates", () => + Effect.gen(function* () { + const provider = yield* GitStatusProvider.KanbanGitStatusProvider; + const repoDir = yield* initRepo(); + yield* runGit(repoDir, ["tag", "v0.1.0"]); + yield* runGit(repoDir, ["checkout", "-b", "release/0.2.0"]); + yield* writeFile(repoDir, "docs/product/release-notes.md", "Release notes\n"); + yield* runGit(repoDir, ["add", "docs/product/release-notes.md"]); + yield* runGit(repoDir, ["commit", "-m", "release notes"]); + + const readiness = yield* provider.readReleaseReadiness({ + cwd: repoDir, + policy, + releaseNotesPath: "docs/product/release-notes.md", + targetTag: "v0.2.0", + providerStatuses: [{ id: "gate-ci", label: "CI", status: "passing" }], + }); + + assert.equal(readiness.branch, "release/0.2.0"); + assert.equal(readiness.latestTag, "v0.1.0"); + assert.equal(readiness.targetTag, "v0.2.0"); + assert.equal( + readiness.gates.every((gate) => gate.status === "passing"), + true, + ); + + yield* runGit(repoDir, ["tag", "v0.2.0"]); + const blocked = yield* provider.readReleaseReadiness({ + cwd: repoDir, + policy, + releaseNotesPath: "docs/product/release-notes.md", + targetTag: "v0.2.0", + }); + assert.equal( + blocked.gates.some( + (gate) => gate.id === "gate-tag-readiness" && gate.status === "blocked", + ), + true, + ); + }), + ); + }); +}); diff --git a/apps/server/src/kanban/GitStatusProvider.ts b/apps/server/src/kanban/GitStatusProvider.ts new file mode 100644 index 00000000000..44bc899b9a2 --- /dev/null +++ b/apps/server/src/kanban/GitStatusProvider.ts @@ -0,0 +1,614 @@ +import { Context, Effect, Layer, Schema } from "effect"; +import type { + KanbanConsoleGitFileActionRequest, + KanbanConsoleGitFileActionResult, + KanbanConsoleGitFileChangeKind, + KanbanConsoleGitFileDiff, + KanbanConsoleGitFileStatus, + KanbanConsoleGitHunkStagingSupport, + KanbanConsoleGitOpsPolicy, + KanbanConsoleGitPolicyViolation, + KanbanConsoleGitStatusSnapshot, + KanbanConsoleReleaseGateStatus, + KanbanConsoleReleaseReadiness, +} from "@t3tools/contracts"; + +import * as GitVcsDriver from "../vcs/GitVcsDriver.ts"; + +const DEFAULT_DIFF_MAX_BYTES = 256 * 1024; + +export class KanbanGitStatusProviderError extends Schema.TaggedErrorClass()( + "KanbanGitStatusProviderError", + { + operation: Schema.String, + detail: Schema.String, + cause: Schema.optional(Schema.Defect), + }, +) { + override get message(): string { + return `Kanban git status provider failed in ${this.operation}: ${this.detail}`; + } +} + +export interface ReadKanbanGitStatusInput { + readonly repoId: string; + readonly cwd: string; + readonly policy: KanbanConsoleGitOpsPolicy; +} + +export interface ReadKanbanGitFileDiffInput { + readonly repoId: string; + readonly cwd: string; + readonly path: string; + readonly status: KanbanConsoleGitFileStatus["status"]; + readonly maxOutputBytes?: number; +} + +export interface ReadKanbanReleaseReadinessInput { + readonly cwd: string; + readonly policy: KanbanConsoleGitOpsPolicy; + readonly releaseNotesPath?: string; + readonly targetTag?: string; + readonly providerStatuses?: ReadonlyArray<{ + readonly id: string; + readonly label: string; + readonly status: KanbanConsoleReleaseGateStatus; + }>; +} + +export interface KanbanGitStatusProviderShape { + readonly readStatus: ( + input: ReadKanbanGitStatusInput, + ) => Effect.Effect; + readonly readFileDiff: ( + input: ReadKanbanGitFileDiffInput, + ) => Effect.Effect; + readonly stageFiles: ( + input: KanbanConsoleGitFileActionRequest, + ) => Effect.Effect; + readonly unstageFiles: ( + input: KanbanConsoleGitFileActionRequest, + ) => Effect.Effect; + readonly readReleaseReadiness: ( + input: ReadKanbanReleaseReadinessInput, + ) => Effect.Effect; +} + +export class KanbanGitStatusProvider extends Context.Service< + KanbanGitStatusProvider, + KanbanGitStatusProviderShape +>()("t3/kanban/KanbanGitStatusProvider") {} + +function providerError(operation: string, cause: unknown): KanbanGitStatusProviderError { + return new KanbanGitStatusProviderError({ + operation, + detail: cause instanceof Error ? cause.message : String(cause), + cause, + }); +} + +function changeKind(raw: string): KanbanConsoleGitFileChangeKind { + switch (raw) { + case "A": + case "?": + return "added"; + case "M": + return "modified"; + case "D": + return "deleted"; + case "R": + return "renamed"; + case "C": + return "copied"; + case "U": + return "unmerged"; + default: + return "unknown"; + } +} + +function parseNumstat(output: string): Map { + const result = new Map(); + for (const line of output.split(/\r?\n/g)) { + if (!line.trim()) continue; + const [additionsRaw, deletionsRaw, path] = line.split("\t"); + if (!path) continue; + const additions = Number.parseInt(additionsRaw ?? "0", 10); + const deletions = Number.parseInt(deletionsRaw ?? "0", 10); + result.set(normalizeNumstatPath(path), { + additions: Number.isFinite(additions) ? additions : 0, + deletions: Number.isFinite(deletions) ? deletions : 0, + }); + } + return result; +} + +function parseBranchLine(line: string): { + branch: string; + upstream?: string; + ahead: number; + behind: number; +} { + const value = line.replace(/^##\s+/, "").trim(); + const [branchPart, trackingPart = ""] = value.split(/\s+\[/, 2); + const [branchRaw, upstreamRaw] = (branchPart ?? "").split("...", 2); + const branch = + branchRaw && !branchRaw.startsWith("HEAD ") && branchRaw !== "HEAD" ? branchRaw : "DETACHED"; + const tracking = `[${trackingPart}`; + const aheadMatch = /ahead\s+(\d+)/.exec(tracking); + const behindMatch = /behind\s+(\d+)/.exec(tracking); + return { + branch, + ...(upstreamRaw ? { upstream: upstreamRaw } : {}), + ahead: aheadMatch ? Number.parseInt(aheadMatch[1] ?? "0", 10) : 0, + behind: behindMatch ? Number.parseInt(behindMatch[1] ?? "0", 10) : 0, + }; +} + +function normalizePorcelainPath(path: string): string { + return path.replace(/^"|"$/g, "").replace(/\\"/g, '"').trim(); +} + +function parsePorcelainPath(path: string): { path: string; sourcePath?: string } { + const normalized = normalizePorcelainPath(path); + const arrowIndex = normalized.lastIndexOf(" -> "); + if (arrowIndex === -1) return { path: normalized }; + const sourcePath = normalizePorcelainPath(normalized.slice(0, arrowIndex)); + const targetPath = normalizePorcelainPath(normalized.slice(arrowIndex + " -> ".length)); + return { + path: targetPath, + ...(sourcePath ? { sourcePath } : {}), + }; +} + +function normalizeNumstatPath(path: string): string { + const normalized = normalizePorcelainPath(path); + const arrowIndex = normalized.lastIndexOf(" => "); + if (arrowIndex === -1) return normalized; + + const bracePrefixMatch = /^(.*)\{([^{}]+)$/.exec(normalized.slice(0, arrowIndex)); + const braceSuffixMatch = /^([^{}]+)\}(.*)$/.exec(normalized.slice(arrowIndex + " => ".length)); + if (bracePrefixMatch && braceSuffixMatch) { + return `${bracePrefixMatch[1] ?? ""}${braceSuffixMatch[1] ?? ""}${braceSuffixMatch[2] ?? ""}`; + } + + return normalizePorcelainPath(normalized.slice(arrowIndex + " => ".length)); +} + +function hunkSupport( + status: KanbanConsoleGitFileStatus["status"], + diffAvailable: boolean, +): KanbanConsoleGitHunkStagingSupport { + if (status === "untracked") return "not-applicable"; + return diffAvailable ? "supported" : "unsupported"; +} + +function makeFileStatus(input: { + readonly path: string; + readonly sourcePath?: string; + readonly status: KanbanConsoleGitFileStatus["status"]; + readonly change: KanbanConsoleGitFileChangeKind; + readonly stat?: { additions: number; deletions: number }; +}): KanbanConsoleGitFileStatus { + const diffAvailable = input.status !== "untracked" || input.change === "added"; + return { + path: input.path, + ...(input.sourcePath ? { sourcePath: input.sourcePath } : {}), + status: input.status, + change: input.change, + additions: input.stat?.additions ?? 0, + deletions: input.stat?.deletions ?? 0, + diffAvailable, + hunkStaging: hunkSupport(input.status, diffAvailable), + }; +} + +function makeFileStatusWithOptionalStat(input: { + readonly path: string; + readonly sourcePath?: string; + readonly status: KanbanConsoleGitFileStatus["status"]; + readonly change: KanbanConsoleGitFileChangeKind; + readonly stat: { additions: number; deletions: number } | undefined; +}): KanbanConsoleGitFileStatus { + return makeFileStatus({ + path: input.path, + ...(input.sourcePath ? { sourcePath: input.sourcePath } : {}), + status: input.status, + change: input.change, + ...(input.stat ? { stat: input.stat } : {}), + }); +} + +function parseStatusFiles(input: { + readonly porcelain: string; + readonly stagedStats: Map; + readonly unstagedStats: Map; +}): KanbanConsoleGitFileStatus[] { + const files: KanbanConsoleGitFileStatus[] = []; + + for (const rawLine of input.porcelain.split(/\r?\n/g)) { + if (!rawLine || rawLine.startsWith("##")) continue; + + const code = rawLine.slice(0, 2); + const parsedPath = parsePorcelainPath(rawLine.slice(3)); + const path = parsedPath.path; + if (!path) continue; + + if (code === "??") { + files.push(makeFileStatus({ path, status: "untracked", change: "added" })); + continue; + } + + const indexStatus = code[0] ?? " "; + const worktreeStatus = code[1] ?? " "; + if (indexStatus !== " " && indexStatus !== "?") { + files.push( + makeFileStatusWithOptionalStat({ + path, + ...(parsedPath.sourcePath ? { sourcePath: parsedPath.sourcePath } : {}), + status: "staged", + change: changeKind(indexStatus), + stat: input.stagedStats.get(path), + }), + ); + } + if (worktreeStatus !== " " && worktreeStatus !== "?") { + files.push( + makeFileStatusWithOptionalStat({ + path, + ...(parsedPath.sourcePath ? { sourcePath: parsedPath.sourcePath } : {}), + status: "unstaged", + change: changeKind(worktreeStatus), + stat: input.unstagedStats.get(path), + }), + ); + } + } + + return files.toSorted((a, b) => `${a.path}:${a.status}`.localeCompare(`${b.path}:${b.status}`)); +} + +function wildcardMatches(pattern: string, value: string): boolean { + if (pattern.endsWith("*")) return value.startsWith(pattern.slice(0, -1)); + return pattern === value; +} + +function isProtectedBranch(branch: string, policy: KanbanConsoleGitOpsPolicy): boolean { + return policy.protectedBranches.some((pattern) => wildcardMatches(pattern, branch)); +} + +function hasAllowedPrefix(branch: string, policy: KanbanConsoleGitOpsPolicy): boolean { + return policy.allowedWorkBranchPrefixes.some((prefix) => branch.startsWith(prefix)); +} + +function policyViolations(input: { + readonly branch: string; + readonly upstream?: string; + readonly behind: number; + readonly files: ReadonlyArray; + readonly policy: KanbanConsoleGitOpsPolicy; +}): KanbanConsoleGitPolicyViolation[] { + const violations: KanbanConsoleGitPolicyViolation[] = []; + const dirty = input.files.length > 0; + + if (dirty && isProtectedBranch(input.branch, input.policy)) { + violations.push({ + id: "protected-branch", + kind: "protected-branch", + severity: "blocked", + message: `Working tree changes are blocked on protected branch ${input.branch}.`, + }); + } + + if ( + !isProtectedBranch(input.branch, input.policy) && + !hasAllowedPrefix(input.branch, input.policy) + ) { + violations.push({ + id: "invalid-work-branch-prefix", + kind: "invalid-work-branch-prefix", + severity: "warning", + message: `Branch ${input.branch} does not use an allowed work prefix.`, + }); + } + + if (!input.upstream) { + violations.push({ + id: "missing-upstream", + kind: "missing-upstream", + severity: "warning", + message: `Branch ${input.branch} has no upstream tracking branch.`, + }); + } + + if (input.behind > 0) { + violations.push({ + id: "behind-upstream", + kind: "behind-upstream", + severity: "warning", + message: `Branch ${input.branch} is ${input.behind} commit(s) behind upstream.`, + }); + } + + if (dirty && input.branch.startsWith("release/")) { + violations.push({ + id: "dirty-release-branch", + kind: "dirty-release-branch", + severity: "blocked", + message: `Release branch ${input.branch} has uncommitted changes.`, + }); + } + + return violations; +} + +const readStatus = Effect.fn("KanbanGitStatusProvider.readStatus")(function* ( + git: GitVcsDriver.GitVcsDriverShape, + input: ReadKanbanGitStatusInput, +) { + const [statusOutput, stagedNumstat, unstagedNumstat, details] = yield* Effect.all( + [ + git.execute({ + operation: "KanbanGitStatusProvider.status", + cwd: input.cwd, + args: ["status", "--porcelain=v1", "--branch", "--untracked-files=all"], + }), + git.execute({ + operation: "KanbanGitStatusProvider.stagedNumstat", + cwd: input.cwd, + args: ["diff", "--cached", "--numstat"], + }), + git.execute({ + operation: "KanbanGitStatusProvider.unstagedNumstat", + cwd: input.cwd, + args: ["diff", "--numstat"], + }), + git.statusDetails(input.cwd), + ], + { concurrency: "unbounded" }, + ); + + const branchLine = statusOutput.stdout.split(/\r?\n/g).find((line) => line.startsWith("##")); + const branch = branchLine ? parseBranchLine(branchLine) : null; + const files = parseStatusFiles({ + porcelain: statusOutput.stdout, + stagedStats: parseNumstat(stagedNumstat.stdout), + unstagedStats: parseNumstat(unstagedNumstat.stdout), + }); + const branchName = branch?.branch ?? details.branch ?? "DETACHED"; + const upstream = branch?.upstream ?? details.upstreamRef ?? undefined; + const behind = branch?.behind ?? details.behindCount; + + return { + repoId: input.repoId, + cwd: input.cwd, + isRepo: details.isRepo, + branch: branchName, + ...(upstream ? { upstream } : {}), + ahead: branch?.ahead ?? details.aheadCount, + behind, + aheadOfDefault: details.aheadOfDefaultCount, + files, + policyViolations: policyViolations({ + branch: branchName, + ...(upstream ? { upstream } : {}), + behind, + files, + policy: input.policy, + }), + } satisfies KanbanConsoleGitStatusSnapshot; +}); + +function diffArgs(input: ReadKanbanGitFileDiffInput): ReadonlyArray { + if (input.status === "staged") return ["diff", "--cached", "--", input.path]; + if (input.status === "untracked") return ["diff", "--no-index", "--", "/dev/null", input.path]; + return ["diff", "--", input.path]; +} + +function nonEmptyDiff(input: ReadKanbanGitFileDiffInput, output: string): string { + const trimmed = output.trim(); + return trimmed.length > 0 ? trimmed : `No textual diff is available for ${input.path}.`; +} + +const readFileDiff = Effect.fn("KanbanGitStatusProvider.readFileDiff")(function* ( + git: GitVcsDriver.GitVcsDriverShape, + input: ReadKanbanGitFileDiffInput, +) { + const result = yield* git.execute({ + operation: "KanbanGitStatusProvider.readFileDiff", + cwd: input.cwd, + args: diffArgs(input), + allowNonZeroExit: input.status === "untracked", + maxOutputBytes: input.maxOutputBytes ?? DEFAULT_DIFF_MAX_BYTES, + truncateOutputAtMaxBytes: true, + }); + + return { + repoId: input.repoId, + path: input.path, + status: input.status, + diff: nonEmptyDiff(input, result.stdout), + truncated: result.stdoutTruncated, + } satisfies KanbanConsoleGitFileDiff; +}); + +function blockedAction( + action: KanbanConsoleGitFileActionResult["action"], + input: KanbanConsoleGitFileActionRequest, +): KanbanConsoleGitFileActionResult { + return { + repoId: input.repoId, + paths: input.paths, + action, + status: "blocked", + message: `${action === "stage" ? "Stage" : "Unstage"} requires explicit confirmation.`, + }; +} + +const stageFiles = Effect.fn("KanbanGitStatusProvider.stageFiles")(function* ( + git: GitVcsDriver.GitVcsDriverShape, + input: KanbanConsoleGitFileActionRequest, +) { + if (!input.confirmed) return blockedAction("stage", input); + yield* git.execute({ + operation: "KanbanGitStatusProvider.stageFiles", + cwd: input.cwd, + args: ["add", "--", ...input.paths], + }); + return { + repoId: input.repoId, + paths: input.paths, + action: "stage", + status: "applied", + message: `Staged ${input.paths.length} file(s).`, + } satisfies KanbanConsoleGitFileActionResult; +}); + +const unstageFiles = Effect.fn("KanbanGitStatusProvider.unstageFiles")(function* ( + git: GitVcsDriver.GitVcsDriverShape, + input: KanbanConsoleGitFileActionRequest, +) { + if (!input.confirmed) return blockedAction("unstage", input); + yield* git.execute({ + operation: "KanbanGitStatusProvider.unstageFiles", + cwd: input.cwd, + args: ["restore", "--staged", "--", ...input.paths], + }); + return { + repoId: input.repoId, + paths: input.paths, + action: "unstage", + status: "applied", + message: `Unstaged ${input.paths.length} file(s).`, + } satisfies KanbanConsoleGitFileActionResult; +}); + +function gate( + id: string, + label: string, + status: KanbanConsoleReleaseGateStatus, +): KanbanConsoleReleaseReadiness["gates"][number] { + return { id, label, status }; +} + +function releaseTagFromBranch(branch: string): string | undefined { + const version = branch.startsWith("release/") ? branch.slice("release/".length).trim() : ""; + if (!version) return undefined; + return version.startsWith("v") ? version : `v${version}`; +} + +const readOptionalGitOutput = ( + git: GitVcsDriver.GitVcsDriverShape, + input: { + readonly cwd: string; + readonly operation: string; + readonly args: ReadonlyArray; + }, +) => + git + .execute({ + operation: input.operation, + cwd: input.cwd, + args: input.args, + allowNonZeroExit: true, + maxOutputBytes: 64 * 1024, + }) + .pipe(Effect.map((result) => (result.exitCode === 0 ? result.stdout.trim() : ""))); + +const readReleaseReadiness = Effect.fn("KanbanGitStatusProvider.readReleaseReadiness")(function* ( + git: GitVcsDriver.GitVcsDriverShape, + input: ReadKanbanReleaseReadinessInput, +) { + const [details, statusOutput, latestTag, targetTagOutput, releaseNotesOutput] = yield* Effect.all( + [ + git.statusDetails(input.cwd), + git.execute({ + operation: "KanbanGitStatusProvider.releaseStatus", + cwd: input.cwd, + args: ["status", "--porcelain=v1", "--untracked-files=all"], + }), + readOptionalGitOutput(git, { + cwd: input.cwd, + operation: "KanbanGitStatusProvider.latestTag", + args: ["describe", "--tags", "--abbrev=0"], + }), + input.targetTag + ? readOptionalGitOutput(git, { + cwd: input.cwd, + operation: "KanbanGitStatusProvider.targetTag", + args: ["tag", "--list", input.targetTag], + }) + : Effect.succeed(""), + input.releaseNotesPath + ? readOptionalGitOutput(git, { + cwd: input.cwd, + operation: "KanbanGitStatusProvider.releaseNotes", + args: ["ls-files", "--error-unmatch", "--", input.releaseNotesPath], + }) + : Effect.succeed(""), + ], + { concurrency: "unbounded" }, + ); + + const branch = details.branch ?? "DETACHED"; + const targetTag = input.targetTag ?? releaseTagFromBranch(branch); + const dirty = statusOutput.stdout.trim().length > 0; + const providerStatusGates = (input.providerStatuses ?? []).map((providerStatus) => + gate(providerStatus.id, providerStatus.label, providerStatus.status), + ); + const providerGateStatus: KanbanConsoleReleaseGateStatus = + providerStatusGates.length === 0 + ? "pending" + : providerStatusGates.some((providerGate) => providerGate.status === "blocked") + ? "blocked" + : providerStatusGates.some((providerGate) => providerGate.status === "pending") + ? "pending" + : "passing"; + const gates: KanbanConsoleReleaseReadiness["gates"] = [ + gate( + "gate-release-branch", + "Release branch", + branch.startsWith("release/") ? "passing" : "blocked", + ), + gate("gate-clean-worktree", "Clean working tree", dirty ? "blocked" : "passing"), + gate( + "gate-release-notes", + "Release notes", + input.releaseNotesPath ? (releaseNotesOutput.length > 0 ? "passing" : "blocked") : "pending", + ), + gate("gate-provider-status", "Provider status", providerGateStatus), + gate( + "gate-tag-readiness", + "Tag readiness", + targetTag ? (targetTagOutput.length > 0 ? "blocked" : "passing") : "pending", + ), + ...providerStatusGates, + ]; + + return { + branch, + ...(latestTag ? { latestTag } : {}), + ...(targetTag ? { targetTag } : {}), + gates, + } satisfies KanbanConsoleReleaseReadiness; +}); + +export const make = Effect.fn("KanbanGitStatusProvider.make")(function* () { + const git = yield* GitVcsDriver.GitVcsDriver; + return { + readStatus: (input) => + readStatus(git, input).pipe(Effect.mapError((e) => providerError("readStatus", e))), + readFileDiff: (input) => + readFileDiff(git, input).pipe(Effect.mapError((e) => providerError("readFileDiff", e))), + stageFiles: (input) => + stageFiles(git, input).pipe(Effect.mapError((e) => providerError("stageFiles", e))), + unstageFiles: (input) => + unstageFiles(git, input).pipe(Effect.mapError((e) => providerError("unstageFiles", e))), + readReleaseReadiness: (input) => + readReleaseReadiness(git, input).pipe( + Effect.mapError((e) => providerError("readReleaseReadiness", e)), + ), + } satisfies KanbanGitStatusProviderShape; +}); + +export const layer = Layer.effect(KanbanGitStatusProvider, make()); diff --git a/apps/server/src/kanban/ProductArtifactsProvider.test.ts b/apps/server/src/kanban/ProductArtifactsProvider.test.ts new file mode 100644 index 00000000000..12796193bb3 --- /dev/null +++ b/apps/server/src/kanban/ProductArtifactsProvider.test.ts @@ -0,0 +1,276 @@ +import * as NodeServices from "@effect/platform-node/NodeServices"; +import { Effect, FileSystem, Layer, Path, PlatformError, Scope } from "effect"; +import { ChildProcessSpawner } from "effect/unstable/process"; +import { assert, describe, expect, it, vi, afterEach } from "@effect/vitest"; + +import { ServerConfig } from "../config.ts"; +import * as GitHubCli from "../sourceControl/GitHubCli.ts"; +import * as GitVcsDriver from "../vcs/GitVcsDriver.ts"; +import type * as VcsProcess from "../vcs/VcsProcess.ts"; +import * as VcsProcessLayer from "../vcs/VcsProcess.ts"; +import * as ProductArtifactsProvider from "./ProductArtifactsProvider.ts"; + +const ServerConfigLayer = ServerConfig.layerTest(process.cwd(), { + prefix: "t3-kanban-product-artifacts-", +}); + +const processOutput = (stdout: string): VcsProcess.VcsProcessOutput => ({ + exitCode: ChildProcessSpawner.ExitCode(0), + stdout, + stderr: "", + stdoutTruncated: false, + stderrTruncated: false, +}); + +const execute = vi.fn(); + +const GitLayer = GitVcsDriver.layer.pipe( + Layer.provide(ServerConfigLayer), + Layer.provideMerge(VcsProcessLayer.layer), + Layer.provideMerge(NodeServices.layer), +); + +const ProviderLayer = ProductArtifactsProvider.layer.pipe( + Layer.provide(GitLayer), + Layer.provide( + Layer.mock(GitHubCli.GitHubCli)({ + execute, + listOpenPullRequests: vi.fn(), + getPullRequest: vi.fn(), + getRepositoryCloneUrls: vi.fn(), + createRepository: vi.fn(), + createPullRequest: vi.fn(), + getDefaultBranch: vi.fn(), + checkoutPullRequest: vi.fn(), + }), + ), +); + +const TestLayer = Layer.mergeAll(GitLayer, ProviderLayer); + +afterEach(() => { + execute.mockReset(); +}); + +function makeTempDir( + prefix: string, +): Effect.Effect { + return Effect.gen(function* () { + const fileSystem = yield* FileSystem.FileSystem; + return yield* fileSystem.makeTempDirectoryScoped({ prefix }); + }); +} + +function writeFile( + cwd: string, + relativePath: string, + content: string, +): Effect.Effect { + return Effect.gen(function* () { + const fileSystem = yield* FileSystem.FileSystem; + const path = yield* Path.Path; + const absolutePath = path.join(cwd, relativePath); + yield* fileSystem.makeDirectory(path.dirname(absolutePath), { recursive: true }); + yield* fileSystem.writeFileString(absolutePath, content); + }); +} + +function runGit(cwd: string, args: ReadonlyArray) { + return Effect.gen(function* () { + const git = yield* GitVcsDriver.GitVcsDriver; + yield* git.execute({ + operation: "ProductArtifactsProvider.test.git", + cwd, + args, + timeoutMs: 10_000, + }); + }); +} + +function initRepo() { + return Effect.gen(function* () { + const repoDir = yield* makeTempDir("kanban-product-artifacts-"); + yield* runGit(repoDir, ["init", "-b", "main"]); + yield* runGit(repoDir, ["config", "user.email", "test@example.com"]); + yield* runGit(repoDir, ["config", "user.name", "Test User"]); + yield* writeFile(repoDir, "docs/product/overview.md", "# Overview\n\nSynthetic notes.\n"); + yield* writeFile(repoDir, "docs/product/nested/brief.md", "# Brief\n\nNested notes.\n"); + yield* writeFile(repoDir, "README.md", "initial\n"); + yield* runGit(repoDir, ["add", "."]); + yield* runGit(repoDir, ["commit", "-m", "initial"]); + yield* runGit(repoDir, ["checkout", "-b", "feature/product-artifacts"]); + return repoDir; + }); +} + +describe("ProductArtifactsProvider", () => { + it.layer(TestLayer)("browses and previews Markdown artifacts under docs/product", (it) => { + it.effect("lists product Markdown files with clean status", () => + Effect.gen(function* () { + const provider = yield* ProductArtifactsProvider.ProductArtifactsProvider; + const repoDir = yield* initRepo(); + + const artifacts = yield* provider.listArtifacts({ + repoId: "repo-1", + cwd: repoDir, + }); + + expect(artifacts.map((artifact) => artifact.path)).toEqual([ + "docs/product/nested/brief.md", + "docs/product/overview.md", + ]); + expect(artifacts.every((artifact) => artifact.status === "clean")).toBe(true); + + const content = yield* provider.readArtifact({ + repoId: "repo-1", + cwd: repoDir, + path: "docs/product/overview.md", + }); + assert.equal(content.title, "Overview"); + assert.equal(content.preview.includes("Synthetic notes."), true); + }), + ); + + it.effect("confines reads and writes to docs/product Markdown files", () => + Effect.gen(function* () { + const provider = yield* ProductArtifactsProvider.ProductArtifactsProvider; + const repoDir = yield* initRepo(); + + const outside = yield* Effect.exit( + provider.readArtifact({ + repoId: "repo-1", + cwd: repoDir, + path: "docs/product/../tasks/plan.md", + }), + ); + const nonMarkdown = yield* Effect.exit( + provider.writeArtifact({ + repoId: "repo-1", + cwd: repoDir, + path: "docs/product/notes.txt", + content: "not markdown", + confirmed: true, + }), + ); + + assert.equal(outside._tag, "Failure"); + assert.equal(nonMarkdown._tag, "Failure"); + }), + ); + + it.effect("blocks dirty file conflicts before writing", () => + Effect.gen(function* () { + const provider = yield* ProductArtifactsProvider.ProductArtifactsProvider; + const repoDir = yield* initRepo(); + yield* writeFile(repoDir, "docs/product/overview.md", "# Overview\n\nDirty edit.\n"); + + const result = yield* provider.writeArtifact({ + repoId: "repo-1", + cwd: repoDir, + path: "docs/product/overview.md", + content: "# Overview\n\nNew edit.\n", + confirmed: true, + }); + + assert.equal(result.status, "blocked"); + assert.equal(result.message.includes("dirty"), true); + }), + ); + + it.effect("writes clean artifacts and posts concise linked issue comments", () => + Effect.gen(function* () { + execute.mockReturnValueOnce(Effect.succeed(processOutput("commented\n"))); + const provider = yield* ProductArtifactsProvider.ProductArtifactsProvider; + const repoDir = yield* initRepo(); + + const result = yield* provider.writeArtifact({ + repoId: "repo-1", + cwd: repoDir, + path: "docs/product/overview.md", + content: "# Overview\n\nUpdated through provider.\n", + confirmed: true, + linkedRepository: "MohAnghabo/kanban-console", + linkedIssueNumber: 43, + }); + + assert.equal(result.status, "applied"); + assert.equal(result.commentTarget, "issue#43"); + expect(execute).toHaveBeenCalledWith({ + cwd: repoDir, + args: [ + "issue", + "comment", + "43", + "--repo", + "MohAnghabo/kanban-console", + "--body", + expect.stringContaining("Raw diff and command output intentionally omitted."), + ], + timeoutMs: 30_000, + }); + + const content = yield* provider.readArtifact({ + repoId: "repo-1", + cwd: repoDir, + path: "docs/product/overview.md", + }); + assert.equal(content.content.includes("Updated through provider."), true); + }), + ); + + it.effect("keeps the artifact write result explicit when comment posting fails", () => + Effect.gen(function* () { + execute.mockReturnValueOnce( + Effect.fail( + new GitHubCli.GitHubCliError({ + operation: "execute", + detail: "GitHub CLI is not authenticated.", + }), + ), + ); + const provider = yield* ProductArtifactsProvider.ProductArtifactsProvider; + const repoDir = yield* initRepo(); + + const result = yield* provider.writeArtifact({ + repoId: "repo-1", + cwd: repoDir, + path: "docs/product/overview.md", + content: "# Overview\n\nUpdated without comment.\n", + confirmed: true, + linkedRepository: "MohAnghabo/kanban-console", + linkedIssueNumber: 43, + }); + + assert.equal(result.status, "applied"); + assert.equal(result.commentTarget, undefined); + assert.equal(result.message.includes("comment posting failed"), true); + + const content = yield* provider.readArtifact({ + repoId: "repo-1", + cwd: repoDir, + path: "docs/product/overview.md", + }); + assert.equal(content.content.includes("Updated without comment."), true); + }), + ); + + it.effect("blocks confirmed writes on protected branches", () => + Effect.gen(function* () { + const provider = yield* ProductArtifactsProvider.ProductArtifactsProvider; + const repoDir = yield* initRepo(); + yield* runGit(repoDir, ["checkout", "main"]); + + const result = yield* provider.writeArtifact({ + repoId: "repo-1", + cwd: repoDir, + path: "docs/product/overview.md", + content: "# Overview\n\nProtected branch edit.\n", + confirmed: true, + }); + + assert.equal(result.status, "blocked"); + assert.equal(result.message.includes("protected branch main"), true); + }), + ); + }); +}); diff --git a/apps/server/src/kanban/ProductArtifactsProvider.ts b/apps/server/src/kanban/ProductArtifactsProvider.ts new file mode 100644 index 00000000000..e47b78be0f5 --- /dev/null +++ b/apps/server/src/kanban/ProductArtifactsProvider.ts @@ -0,0 +1,342 @@ +import { Context, Effect, Layer, Schema } from "effect"; +import { readdir, readFile, stat, writeFile, mkdir } from "node:fs/promises"; +import nodePath from "node:path"; +import type { + KanbanConsoleArtifact, + KanbanConsoleArtifactContent, + KanbanConsoleArtifactStatus, + KanbanConsoleArtifactWriteRequest, + KanbanConsoleArtifactWriteResult, +} from "@t3tools/contracts"; + +import * as GitVcsDriver from "../vcs/GitVcsDriver.ts"; +import * as GitHubCli from "../sourceControl/GitHubCli.ts"; + +const PRODUCT_DOCS_ROOT = "docs/product"; +const DEFAULT_TIMEOUT_MS = 30_000; + +export class ProductArtifactsProviderError extends Schema.TaggedErrorClass()( + "ProductArtifactsProviderError", + { + operation: Schema.String, + detail: Schema.String, + cause: Schema.optional(Schema.Defect), + }, +) { + override get message(): string { + return `Product artifacts provider failed in ${this.operation}: ${this.detail}`; + } +} + +export interface ProductArtifactPathInput { + readonly repoId: string; + readonly cwd: string; + readonly path: string; +} + +export interface ProductArtifactsProviderShape { + readonly listArtifacts: (input: { + readonly repoId: string; + readonly cwd: string; + }) => Effect.Effect, ProductArtifactsProviderError>; + readonly readArtifact: ( + input: ProductArtifactPathInput, + ) => Effect.Effect; + readonly writeArtifact: ( + input: KanbanConsoleArtifactWriteRequest, + ) => Effect.Effect; +} + +export class ProductArtifactsProvider extends Context.Service< + ProductArtifactsProvider, + ProductArtifactsProviderShape +>()("t3/kanban/ProductArtifactsProvider") {} + +function providerError(operation: string, cause: unknown): ProductArtifactsProviderError { + return new ProductArtifactsProviderError({ + operation, + detail: cause instanceof Error ? cause.message : String(cause), + cause, + }); +} + +function normalizeRelativePath(relativePath: string): string { + return relativePath.replace(/\\/g, "/").replace(/^\.\/+/, ""); +} + +function confinedMarkdownPath(input: { + readonly cwd: string; + readonly path: string; +}): Effect.Effect< + { readonly relativePath: string; readonly absolutePath: string; readonly rootPath: string }, + ProductArtifactsProviderError +> { + const relativePath = normalizeRelativePath(input.path); + const rootPath = nodePath.resolve(input.cwd, PRODUCT_DOCS_ROOT); + const absolutePath = nodePath.resolve(input.cwd, relativePath); + const isInsideRoot = + absolutePath === rootPath || absolutePath.startsWith(`${rootPath}${nodePath.sep}`); + + if ( + nodePath.isAbsolute(input.path) || + !relativePath.startsWith(`${PRODUCT_DOCS_ROOT}/`) || + !relativePath.endsWith(".md") || + !isInsideRoot + ) { + return Effect.fail( + new ProductArtifactsProviderError({ + operation: "pathConfinement", + detail: "Product artifacts must be Markdown files under docs/product.", + }), + ); + } + + return Effect.succeed({ relativePath, absolutePath, rootPath }); +} + +function titleFromMarkdown(relativePath: string, content: string): string { + const heading = content + .split(/\r?\n/g) + .map((line) => /^#\s+(.+?)\s*$/.exec(line)?.[1]?.trim()) + .find((line): line is string => Boolean(line)); + if (heading) return heading; + return nodePath.basename(relativePath, ".md").replace(/[-_]+/g, " "); +} + +function markdownPreview(content: string): string { + return content + .split(/\r?\n/g) + .map((line) => line.replace(/^#{1,6}\s+/u, "").replace(/[*_`]/g, "")) + .join("\n") + .trim(); +} + +function statusFromPorcelain(output: string): KanbanConsoleArtifactStatus { + const lines = output.split(/\r?\n/g).filter((line) => line.trim().length > 0); + if (lines.length === 0) return "clean"; + return lines.some((line) => line.slice(0, 2).includes("U")) ? "conflict" : "dirty"; +} + +function artifactId(repoId: string, relativePath: string): string { + return `${repoId}:${relativePath}`; +} + +function readPathStatus( + git: GitVcsDriver.GitVcsDriverShape, + input: { readonly cwd: string; readonly relativePath: string }, +) { + return git + .execute({ + operation: "ProductArtifactsProvider.status", + cwd: input.cwd, + args: ["status", "--porcelain=v1", "--", input.relativePath], + timeoutMs: DEFAULT_TIMEOUT_MS, + }) + .pipe(Effect.map((result) => statusFromPorcelain(result.stdout))); +} + +function readMarkdownFile(input: { + readonly absolutePath: string; +}): Effect.Effect { + return Effect.tryPromise({ + try: () => readFile(input.absolutePath, "utf8"), + catch: (cause) => providerError("readFile", cause), + }); +} + +function readFileUpdatedAt(input: { + readonly absolutePath: string; +}): Effect.Effect { + return Effect.tryPromise({ + try: () => stat(input.absolutePath), + catch: (cause) => providerError("stat", cause), + }).pipe(Effect.map((stats) => stats.mtime.toISOString())); +} + +async function listMarkdownFiles(rootPath: string): Promise { + const entries = await readdir(rootPath, { withFileTypes: true }).catch((error: unknown) => { + if (typeof error === "object" && error !== null && "code" in error && error.code === "ENOENT") { + return []; + } + throw error; + }); + const files: string[] = []; + + for (const entry of entries) { + const absolutePath = nodePath.join(rootPath, entry.name); + if (entry.isDirectory()) { + files.push(...(await listMarkdownFiles(absolutePath))); + } else if (entry.isFile() && entry.name.endsWith(".md")) { + files.push(absolutePath); + } + } + + return files; +} + +function branchIsProtected(branch: string): boolean { + return branch === "main" || branch.startsWith("release/"); +} + +function artifactEditComment(input: { readonly path: string; readonly target: string }): string { + return [ + "Kanban Console artifact update", + "", + `- Artifact: \`${input.path}\``, + `- Target: ${input.target}`, + "- Summary: Product artifact content was updated through the guarded docs/product flow.", + "- Raw diff and command output intentionally omitted.", + ].join("\n"); +} + +export const make = Effect.fn("ProductArtifactsProvider.make")(function* () { + const git = yield* GitVcsDriver.GitVcsDriver; + const github = yield* GitHubCli.GitHubCli; + + const readArtifact = Effect.fn("ProductArtifactsProvider.readArtifact")(function* ( + input: ProductArtifactPathInput, + ) { + const confined = yield* confinedMarkdownPath(input); + const [content, updatedAt, status] = yield* Effect.all( + [ + readMarkdownFile(confined), + readFileUpdatedAt(confined), + readPathStatus(git, { cwd: input.cwd, relativePath: confined.relativePath }), + ], + { concurrency: "unbounded" }, + ); + + return { + repoId: input.repoId, + path: confined.relativePath, + title: titleFromMarkdown(confined.relativePath, content), + status, + updatedAt, + content, + preview: markdownPreview(content), + } satisfies KanbanConsoleArtifactContent; + }); + + return ProductArtifactsProvider.of({ + listArtifacts: (input) => + Effect.gen(function* () { + const rootPath = nodePath.resolve(input.cwd, PRODUCT_DOCS_ROOT); + const files = yield* Effect.tryPromise({ + try: () => listMarkdownFiles(rootPath), + catch: (cause) => providerError("listArtifacts", cause), + }); + + const artifacts = yield* Effect.all( + files.map((absolutePath) => { + const relativePath = normalizeRelativePath(nodePath.relative(input.cwd, absolutePath)); + return readArtifact({ repoId: input.repoId, cwd: input.cwd, path: relativePath }).pipe( + Effect.map( + (artifact): KanbanConsoleArtifact => ({ + id: artifactId(input.repoId, artifact.path), + repoId: input.repoId, + path: artifact.path, + title: artifact.title, + status: artifact.status, + updatedAt: artifact.updatedAt, + }), + ), + ); + }), + { concurrency: 4 }, + ); + + return artifacts.toSorted((a, b) => a.path.localeCompare(b.path)); + }).pipe(Effect.mapError((error) => providerError("listArtifacts", error))), + + readArtifact: (input) => + readArtifact(input).pipe(Effect.mapError((error) => providerError("readArtifact", error))), + + writeArtifact: (input) => + Effect.gen(function* () { + const confined = yield* confinedMarkdownPath(input); + if (!input.confirmed) { + return { + repoId: input.repoId, + path: confined.relativePath, + status: "blocked", + message: "Artifact edits require explicit confirmation.", + } satisfies KanbanConsoleArtifactWriteResult; + } + + const [details, currentStatus] = yield* Effect.all( + [ + git.statusDetails(input.cwd), + readPathStatus(git, { cwd: input.cwd, relativePath: confined.relativePath }), + ], + { concurrency: "unbounded" }, + ); + const branch = details.branch ?? "DETACHED"; + if (branchIsProtected(branch)) { + return { + repoId: input.repoId, + path: confined.relativePath, + status: "blocked", + message: `Artifact edits are blocked on protected branch ${branch}.`, + } satisfies KanbanConsoleArtifactWriteResult; + } + if (currentStatus !== "clean") { + return { + repoId: input.repoId, + path: confined.relativePath, + status: "blocked", + message: `Artifact ${confined.relativePath} is ${currentStatus}; resolve local changes first.`, + } satisfies KanbanConsoleArtifactWriteResult; + } + + yield* Effect.tryPromise({ + try: async () => { + await mkdir(nodePath.dirname(confined.absolutePath), { recursive: true }); + await writeFile(confined.absolutePath, input.content, "utf8"); + }, + catch: (cause) => providerError("writeArtifact", cause), + }); + + const targetNumber = input.linkedPullRequestNumber ?? input.linkedIssueNumber; + const targetKind = input.linkedPullRequestNumber ? "pr" : "issue"; + const commentTarget = + input.linkedRepository && targetNumber !== undefined + ? `${targetKind}#${targetNumber}` + : undefined; + + const commentPosted = + input.linkedRepository && targetNumber !== undefined + ? yield* Effect.exit( + github.execute({ + cwd: input.cwd, + args: [ + "issue", + "comment", + String(targetNumber), + "--repo", + input.linkedRepository, + "--body", + artifactEditComment({ + path: confined.relativePath, + target: commentTarget ?? "linked work item", + }), + ], + timeoutMs: DEFAULT_TIMEOUT_MS, + }), + ).pipe(Effect.map((exit) => exit._tag === "Success")) + : false; + + return { + repoId: input.repoId, + path: confined.relativePath, + status: "applied", + message: + input.linkedRepository && targetNumber !== undefined && !commentPosted + ? "Artifact updated through the guarded docs/product flow; GitHub comment posting failed." + : "Artifact updated through the guarded docs/product flow.", + ...(commentTarget && commentPosted ? { commentTarget } : {}), + } satisfies KanbanConsoleArtifactWriteResult; + }).pipe(Effect.mapError((error) => providerError("writeArtifact", error))), + }); +}); + +export const layer = Layer.effect(ProductArtifactsProvider, make()); diff --git a/apps/web/src/components/KanbanConsoleMock.browser.tsx b/apps/web/src/components/KanbanConsoleMock.browser.tsx new file mode 100644 index 00000000000..8f1aff4a7fe --- /dev/null +++ b/apps/web/src/components/KanbanConsoleMock.browser.tsx @@ -0,0 +1,67 @@ +import "../index.css"; + +import { page } from "vitest/browser"; +import { afterEach, describe, expect, it } from "vitest"; +import { render } from "vitest-browser-react"; + +import { SidebarProvider } from "./ui/sidebar"; +import { KanbanConsoleMock } from "./KanbanConsoleMock"; + +describe("KanbanConsoleMock", () => { + afterEach(() => { + localStorage.clear(); + document.body.innerHTML = ""; + }); + + it("renders the mock board and toggles Arabic RTL mode", async () => { + const screen = await render( + + + , + ); + + try { + await expect + .element(page.getByRole("heading", { name: "Kanban Project Console" })) + .toBeInTheDocument(); + await expect + .element(page.getByRole("heading", { exact: true, name: "GitHub Projects board" })) + .toBeInTheDocument(); + + const views = [ + ["Git", "Lazygit-style git status"], + ["Artifacts", "Product artifacts"], + ["PRs", "PR watcher"], + ["Timeline", "Issue and PR timeline"], + ["CLI", "CLI command console"], + ["GitOps", "GitOps and release dashboard"], + ["Settings", "Console settings"], + ["States", "State previews"], + ] as const; + + for (const [buttonName, headingName] of views) { + await page.getByRole("button", { exact: true, name: buttonName }).click(); + await expect + .element(page.getByRole("heading", { exact: true, name: headingName })) + .toBeInTheDocument(); + } + + await page.getByRole("button", { exact: true, name: "Artifacts" }).click(); + await page.getByRole("button", { name: /docs\/product\/project-console\.md/u }).click(); + await page.getByLabelText("Artifact markdown editor").fill("# Product artifact\n\nUpdated."); + await page.getByRole("button", { exact: true, name: "Apply guarded patch" }).click(); + await expect + .element(page.getByText("Clean artifact is ready for guarded patch flow.")) + .toBeInTheDocument(); + + await page.getByRole("button", { exact: true, name: "AR" }).click(); + + await expect + .element(page.getByRole("heading", { name: "وحدة تحكم مشروع كانبان" })) + .toBeInTheDocument(); + expect(document.querySelector("[dir='rtl']")).not.toBeNull(); + } finally { + await screen.unmount(); + } + }); +}); diff --git a/apps/web/src/components/KanbanConsoleMock.tsx b/apps/web/src/components/KanbanConsoleMock.tsx new file mode 100644 index 00000000000..8167d5a39c1 --- /dev/null +++ b/apps/web/src/components/KanbanConsoleMock.tsx @@ -0,0 +1,927 @@ +import { useMemo, useState, type ReactNode } from "react"; +import { + DndContext, + PointerSensor, + useDraggable, + useDroppable, + useSensor, + useSensors, + type DragEndEvent, +} from "@dnd-kit/core"; +import { CSS } from "@dnd-kit/utilities"; +import { + ActivityIcon, + AlertTriangleIcon, + CheckCircle2Icon, + ChevronRightIcon, + CircleDotIcon, + ClipboardListIcon, + FileTextIcon, + GitBranchIcon, + GitPullRequestIcon, + KanbanSquareIcon, + LanguagesIcon, + LayoutDashboardIcon, + Loader2Icon, + LockIcon, + PlayIcon, + RocketIcon, + Settings2Icon, + ShieldAlertIcon, + SidebarIcon, + TerminalSquareIcon, +} from "lucide-react"; + +import { + consoleStateIds, + consoleViews, + getLocaleDirection, + getMessages, + getTasksByColumn, + getTaskTitle, + kanbanConsoleMockProvider, + kanbanColumns, + kanbanTasks, + monorepos, + moveTaskToColumn, + type ConsoleStateId, + type ConsoleViewId, + type KanbanColumnId, + type KanbanConsoleLocale, + type KanbanTaskMock, +} from "../kanbanConsoleMock"; +import { isElectron } from "../env"; +import { Badge } from "./ui/badge"; +import { Button } from "./ui/button"; +import { + Sheet, + SheetContent, + SheetDescription, + SheetFooter, + SheetHeader, + SheetPanel, + SheetTitle, +} from "./ui/sheet"; +import { SidebarInset, SidebarTrigger } from "./ui/sidebar"; +import { cn } from "~/lib/utils"; + +const viewIcons: Record = { + artifacts: FileTextIcon, + board: KanbanSquareIcon, + cli: TerminalSquareIcon, + git: GitBranchIcon, + gitops: RocketIcon, + prs: GitPullRequestIcon, + settings: Settings2Icon, + states: LayoutDashboardIcon, + timeline: ActivityIcon, +}; + +const stateIcons: Record = { + empty: CircleDotIcon, + error: AlertTriangleIcon, + loading: Loader2Icon, + "missing-auth": LockIcon, + permission: ShieldAlertIcon, +}; + +const stateTone: Record = { + empty: "border-border bg-card", + error: "border-destructive/30 bg-destructive/6 text-destructive-foreground", + loading: "border-info/30 bg-info/6 text-info-foreground", + "missing-auth": "border-warning/30 bg-warning/6 text-warning-foreground", + permission: "border-warning/30 bg-warning/6 text-warning-foreground", +}; + +export function KanbanConsoleMock() { + const [locale, setLocale] = useState("en"); + const [activeView, setActiveView] = useState("board"); + const [tasks, setTasks] = useState(() => [...kanbanTasks]); + const [selectedTaskId, setSelectedTaskId] = useState(kanbanTasks[0]?.id ?? ""); + const [moveTaskId, setMoveTaskId] = useState(null); + const [queuedCommand, setQueuedCommand] = useState("/phase t3-kanban-project-console phase-3"); + const snapshot = kanbanConsoleMockProvider.readSnapshot(); + + const messages = getMessages(locale); + const direction = getLocaleDirection(locale); + const selectedTask = tasks.find((task) => task.id === selectedTaskId) ?? tasks[0]; + const moveTask = moveTaskId ? tasks.find((task) => task.id === moveTaskId) : undefined; + const groupedTasks = useMemo(() => getTasksByColumn(tasks), [tasks]); + const dragSensors = useSensors( + useSensor(PointerSensor, { + activationConstraint: { + distance: 8, + }, + }), + ); + + const moveSelectedTask = (nextColumn: KanbanColumnId) => { + if (!moveTask) return; + setTasks((currentTasks) => moveTaskToColumn(currentTasks, moveTask.id, nextColumn)); + setSelectedTaskId(moveTask.id); + setMoveTaskId(null); + }; + + const moveDraggedTask = (event: DragEndEvent) => { + const taskId = String(event.active.id); + const nextColumn = event.over?.id; + + if (!nextColumn || typeof nextColumn !== "string") { + return; + } + + if (!kanbanColumns.some((column) => column.id === nextColumn)) { + return; + } + + const targetTask = tasks.find((task) => task.id === taskId); + if (!targetTask || targetTask.column === nextColumn) { + return; + } + + setTasks((currentTasks) => + moveTaskToColumn(currentTasks, taskId, nextColumn as KanbanColumnId), + ); + setSelectedTaskId(taskId); + }; + + return ( + +
+
+
+ {!isElectron ? : null} + +
+

{messages.consoleTitle}

+

+ Phase 2 mock surface: no live GitHub, git, CLI, or provider mutations. +

+
+ +
+
+ +
+ + +
+
+ +
+ {activeView === "board" ? ( + + + + ) : null} + {activeView === "git" ? : null} + {activeView === "artifacts" ? ( + + ) : null} + {activeView === "prs" ? ( + + ) : null} + {activeView === "timeline" ? : null} + {activeView === "cli" ? ( + + ) : null} + {activeView === "gitops" ? ( + + ) : null} + {activeView === "settings" ? ( + + ) : null} + {activeView === "states" ? : null} +
+
+
+ + +
+
+ + !open && setMoveTaskId(null)}> + + + {messages.moveSheetTitle} + + {moveTask ? getTaskTitle(moveTask, locale) : messages.emptyState} + + + + {kanbanColumns.map((column) => ( + + ))} + + + + + + +
+ ); +} + +function ProjectSidebar() { + return ( + + ); +} + +function ViewTabs({ + activeView, + locale, + onViewChange, +}: { + activeView: ConsoleViewId; + locale: KanbanConsoleLocale; + onViewChange: (view: ConsoleViewId) => void; +}) { + const messages = getMessages(locale); + + return ( + + ); +} + +function BoardView({ + groupedTasks, + locale, + onMoveTask, + onSelectTask, + selectedTaskId, +}: { + groupedTasks: ReturnType; + locale: KanbanConsoleLocale; + onMoveTask: (taskId: string) => void; + onSelectTask: (taskId: string) => void; + selectedTaskId: string | null; +}) { + const messages = getMessages(locale); + + return ( +
+ +
+ {groupedTasks.map((column) => ( + + ))} +
+
+ ); +} + +function KanbanColumn({ + column, + locale, + onMoveTask, + onSelectTask, + selectedTaskId, +}: { + column: ReturnType[number]; + locale: KanbanConsoleLocale; + onMoveTask: (taskId: string) => void; + onSelectTask: (taskId: string) => void; + selectedTaskId: string | null; +}) { + const messages = getMessages(locale); + const { isOver, setNodeRef } = useDroppable({ + id: column.id, + }); + + return ( +
+
+

{messages[column.labelKey]}

+ {column.tasks.length} +
+
+ {column.tasks.length === 0 ? ( +
+ {messages.emptyState} +
+ ) : null} + {column.tasks.map((task) => ( + + ))} +
+
+ ); +} + +function TaskCard({ + locale, + onMoveTask, + onSelectTask, + selected, + task, +}: { + locale: KanbanConsoleLocale; + onMoveTask: (taskId: string) => void; + onSelectTask: (taskId: string) => void; + selected: boolean; + task: KanbanTaskMock; +}) { + const messages = getMessages(locale); + const { attributes, isDragging, listeners, setNodeRef, transform } = useDraggable({ + id: task.id, + }); + const style = { + transform: CSS.Translate.toString(transform), + }; + + return ( +
+ +
+
+ + {task.checks.passing}/{task.checks.pending}/{task.checks.failing} +
+ +
+
+ ); +} + +function TaskDetailPanel({ + locale, + task, +}: { + locale: KanbanConsoleLocale; + task: KanbanTaskMock | null; +}) { + const messages = getMessages(locale); + const snapshot = kanbanConsoleMockProvider.readSnapshot(); + + if (!task) { + return null; + } + + const taskSession = snapshot.agentSessions?.find((session) => session.taskId === task.id); + + return ( + + ); +} + +function GitView({ + locale, + snapshot, +}: { + locale: KanbanConsoleLocale; + snapshot: ReturnType; +}) { + const messages = getMessages(locale); + const gitStatus = snapshot.gitStatuses[0]; + + return ( + +
+ + + + + + + {gitStatus?.files.map((file) => ( +
+ {file.path} +
+ ))} +
+
+
+ ); +} + +function ArtifactsView({ + locale, + snapshot, +}: { + locale: KanbanConsoleLocale; + snapshot: ReturnType; +}) { + const messages = getMessages(locale); + const [selectedArtifactId, setSelectedArtifactId] = useState(snapshot.artifacts[0]?.id ?? ""); + const selectedArtifact = + snapshot.artifacts.find((artifact) => artifact.id === selectedArtifactId) ?? + snapshot.artifacts[0]; + const [draft, setDraft] = useState( + "# Project artifact\n\n- Browse docs/product Markdown\n- Preview before write\n- Apply through guarded patch flow", + ); + const [saveStatus, setSaveStatus] = useState(messages.artifactClean); + + return ( + +
+
+ {snapshot.artifacts.map((artifact) => ( + + ))} +
+
+
+ + +
+ + +
+