diff --git a/.nvim.lua b/.nvim.lua new file mode 100644 index 0000000..63ccb66 --- /dev/null +++ b/.nvim.lua @@ -0,0 +1,12 @@ +-- Project-local neovim configuration for hyh +-- Requires `vim.o.exrc = true` in your neovim config + +require("neo-tree").setup({ + filesystem = { + filtered_items = { + visible = true, -- Show hidden files (dimmed out) + hide_dotfiles = false, + hide_gitignored = true, + }, + }, +}) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ff8e39e..35d1de9 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -38,11 +38,13 @@ repos: additional_dependencies: - mdformat-mkdocs==5.1.1 - mdformat-footnote==0.1.2 + exclude: ^src/hyh/plugin/ - repo: https://github.com/igorshubovych/markdownlint-cli rev: v0.46.0 hooks: - id: markdownlint-fix + exclude: ^src/hyh/plugin/ - repo: https://github.com/crate-ci/typos rev: v1.40.0 diff --git a/docs/plans/2025-12-29-speckit-implementation.md b/docs/plans/2025-12-29-speckit-implementation.md new file mode 100644 index 0000000..8caaf56 --- /dev/null +++ b/docs/plans/2025-12-29-speckit-implementation.md @@ -0,0 +1,1879 @@ +# Speckit Integration Implementation Plan + +> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. + +**Goal:** Transform hyh into a spec-driven development workflow tool with Claude Code plugin integration and DHH-style git worktree management. + +**Architecture:** Add three new modules (worktree.py, workflow.py, init.py) alongside updated plan.py parser. Bundle test-prompt templates and Claude Code plugin files. New CLI commands delegate to these modules. + +**Tech Stack:** Python 3.13+, msgspec for structs, pytest for TDD, Claude Code plugin system (markdown commands, JSON hooks) + +--- + +## Task 1: Add Speckit Checkbox Parser + +**Files:** + +- Modify: `src/hyh/plan.py` +- Test: `tests/hyh/test_plan.py` + +**Step 1: Write the failing test for basic checkbox parsing** + +Add to `tests/hyh/test_plan.py`: + +```python +def test_parse_speckit_checkbox_basic(): + """parse_speckit_tasks extracts tasks from checkbox format.""" + from hyh.plan import parse_speckit_tasks + + content = """\ +## Progress Management + +Mark completed tasks with [x]. + +## Phase 1: Setup + +- [ ] T001 Create project structure +- [x] T002 Initialize git repository + +## Phase 2: Core + +- [ ] T003 [P] Implement user model in src/models/user.py +- [ ] T004 [P] [US1] Add auth service in src/services/auth.py +""" + result = parse_speckit_tasks(content) + + assert len(result.tasks) == 4 + assert result.tasks["T001"].status == "pending" + assert result.tasks["T002"].status == "completed" + assert result.tasks["T003"].parallel is True + assert result.tasks["T004"].user_story == "US1" + assert "src/services/auth.py" in result.tasks["T004"].description +``` + +**Step 2: Run test to verify it fails** + +Run: `pytest tests/hyh/test_plan.py::test_parse_speckit_checkbox_basic -v` +Expected: FAIL with "cannot import name 'parse_speckit_tasks'" + +**Step 3: Write minimal implementation** + +Add to `src/hyh/plan.py`: + +```python +import re +from typing import Final + +from msgspec import Struct + + +class SpecTaskDefinition(Struct, frozen=True, forbid_unknown_fields=True, omit_defaults=True): + """Task definition from speckit checkbox format.""" + + description: str + status: str = "pending" # "pending" or "completed" + parallel: bool = False + user_story: str | None = None + phase: str | None = None + file_path: str | None = None + dependencies: tuple[str, ...] = () + + +class SpecTaskList(Struct, frozen=True, forbid_unknown_fields=True): + """Parsed speckit tasks.md content.""" + + tasks: dict[str, SpecTaskDefinition] + phases: tuple[str, ...] + + +_CHECKBOX_PATTERN: Final[re.Pattern[str]] = re.compile( + r"^- \[([ xX])\] (T\d+)(?: \[P\])?(?: \[([A-Z]+\d+)\])? (.+)$" +) + +_PHASE_PATTERN: Final[re.Pattern[str]] = re.compile( + r"^## Phase \d+: (.+)$" +) + + +def parse_speckit_tasks(content: str) -> SpecTaskList: + """Parse speckit checkbox format into task list. + + Format: + ## Phase N: Phase Name + - [ ] T001 [P] [US1] Description with path/to/file.py + - [x] T002 Completed task + + Markers: + - [ ] = pending, [x] = completed + - [P] = parallel (can run concurrently) + - [US1] = user story reference + """ + tasks: dict[str, SpecTaskDefinition] = {} + phases: list[str] = [] + current_phase: str | None = None + + for line in content.split("\n"): + # Check for phase header + phase_match = _PHASE_PATTERN.match(line.strip()) + if phase_match: + current_phase = phase_match.group(1) + phases.append(current_phase) + continue + + # Check for task checkbox + checkbox_match = _CHECKBOX_PATTERN.match(line.strip()) + if checkbox_match: + check, task_id, user_story, description = checkbox_match.groups() + + # Detect parallel marker + parallel = "[P]" in line + + # Extract file path from description if present + file_path = None + path_match = re.search(r"(\S+\.\w+)$", description) + if path_match: + file_path = path_match.group(1) + + tasks[task_id] = SpecTaskDefinition( + description=description.strip(), + status="completed" if check.lower() == "x" else "pending", + parallel=parallel, + user_story=user_story, + phase=current_phase, + file_path=file_path, + ) + + return SpecTaskList(tasks=tasks, phases=tuple(phases)) +``` + +**Step 4: Run test to verify it passes** + +Run: `pytest tests/hyh/test_plan.py::test_parse_speckit_checkbox_basic -v` +Expected: PASS + +**Step 5: Commit** + +```bash +git add src/hyh/plan.py tests/hyh/test_plan.py +git commit -m "feat(plan): add speckit checkbox format parser" +``` + +--- + +## Task 2: Add Phase Dependencies to Speckit Parser + +**Files:** + +- Modify: `src/hyh/plan.py` +- Test: `tests/hyh/test_plan.py` + +**Step 1: Write the failing test for phase dependencies** + +```python +def test_parse_speckit_tasks_phase_dependencies(): + """Tasks in Phase N depend on all tasks in Phase N-1.""" + from hyh.plan import parse_speckit_tasks + + content = """\ +## Phase 1: Setup + +- [ ] T001 Setup task A +- [ ] T002 [P] Setup task B + +## Phase 2: Core + +- [ ] T003 Core task (depends on Phase 1) +""" + result = parse_speckit_tasks(content) + + # Phase 1 tasks have no dependencies + assert result.tasks["T001"].dependencies == () + assert result.tasks["T002"].dependencies == () + # Phase 2 tasks depend on all Phase 1 tasks + assert set(result.tasks["T003"].dependencies) == {"T001", "T002"} +``` + +**Step 2: Run test to verify it fails** + +Run: `pytest tests/hyh/test_plan.py::test_parse_speckit_tasks_phase_dependencies -v` +Expected: FAIL (dependencies are empty) + +**Step 3: Update implementation to track phase dependencies** + +Update `parse_speckit_tasks` in `src/hyh/plan.py`: + +```python +def parse_speckit_tasks(content: str) -> SpecTaskList: + """Parse speckit checkbox format into task list with phase dependencies.""" + tasks: dict[str, SpecTaskDefinition] = {} + phases: list[str] = [] + current_phase: str | None = None + phase_tasks: dict[str, list[str]] = {} # phase_name -> [task_ids] + + for line in content.split("\n"): + phase_match = _PHASE_PATTERN.match(line.strip()) + if phase_match: + current_phase = phase_match.group(1) + phases.append(current_phase) + phase_tasks[current_phase] = [] + continue + + checkbox_match = _CHECKBOX_PATTERN.match(line.strip()) + if checkbox_match: + check, task_id, user_story, description = checkbox_match.groups() + parallel = "[P]" in line + + file_path = None + path_match = re.search(r"(\S+\.\w+)$", description) + if path_match: + file_path = path_match.group(1) + + tasks[task_id] = SpecTaskDefinition( + description=description.strip(), + status="completed" if check.lower() == "x" else "pending", + parallel=parallel, + user_story=user_story, + phase=current_phase, + file_path=file_path, + ) + + if current_phase: + phase_tasks[current_phase].append(task_id) + + # Build dependencies: Phase N depends on Phase N-1 + for i, phase in enumerate(phases): + if i == 0: + continue # First phase has no dependencies + prev_phase = phases[i - 1] + prev_task_ids = tuple(phase_tasks.get(prev_phase, [])) + + for task_id in phase_tasks.get(phase, []): + old_task = tasks[task_id] + tasks[task_id] = SpecTaskDefinition( + description=old_task.description, + status=old_task.status, + parallel=old_task.parallel, + user_story=old_task.user_story, + phase=old_task.phase, + file_path=old_task.file_path, + dependencies=prev_task_ids, + ) + + return SpecTaskList(tasks=tasks, phases=tuple(phases)) +``` + +**Step 4: Run test to verify it passes** + +Run: `pytest tests/hyh/test_plan.py::test_parse_speckit_tasks_phase_dependencies -v` +Expected: PASS + +**Step 5: Commit** + +```bash +git add src/hyh/plan.py tests/hyh/test_plan.py +git commit -m "feat(plan): add phase-based dependencies to speckit parser" +``` + +--- + +## Task 3: Convert SpecTaskList to WorkflowState + +**Files:** + +- Modify: `src/hyh/plan.py` +- Test: `tests/hyh/test_plan.py` + +**Step 1: Write the failing test** + +```python +def test_spec_task_list_to_workflow_state(): + """SpecTaskList converts to WorkflowState for daemon.""" + from hyh.plan import parse_speckit_tasks + from hyh.state import TaskStatus + + content = """\ +## Phase 1: Setup + +- [ ] T001 Create project +- [x] T002 Init git + +## Phase 2: Core + +- [ ] T003 [P] Build feature +""" + spec_tasks = parse_speckit_tasks(content) + state = spec_tasks.to_workflow_state() + + assert len(state.tasks) == 3 + assert state.tasks["T001"].status == TaskStatus.PENDING + assert state.tasks["T002"].status == TaskStatus.COMPLETED + assert state.tasks["T003"].dependencies == ("T001", "T002") +``` + +**Step 2: Run test to verify it fails** + +Run: `pytest tests/hyh/test_plan.py::test_spec_task_list_to_workflow_state -v` +Expected: FAIL with "SpecTaskList has no attribute 'to_workflow_state'" + +**Step 3: Add to_workflow_state method** + +Add to `SpecTaskList` in `src/hyh/plan.py`: + +```python +from .state import Task, TaskStatus, WorkflowState + + +class SpecTaskList(Struct, frozen=True, forbid_unknown_fields=True): + """Parsed speckit tasks.md content.""" + + tasks: dict[str, SpecTaskDefinition] + phases: tuple[str, ...] + + def to_workflow_state(self) -> WorkflowState: + """Convert to WorkflowState for daemon execution.""" + from .state import Task, TaskStatus, WorkflowState + + tasks = {} + for tid, spec_task in self.tasks.items(): + status = ( + TaskStatus.COMPLETED + if spec_task.status == "completed" + else TaskStatus.PENDING + ) + tasks[tid] = Task( + id=tid, + description=spec_task.description, + status=status, + dependencies=spec_task.dependencies, + ) + return WorkflowState(tasks=tasks) +``` + +**Step 4: Run test to verify it passes** + +Run: `pytest tests/hyh/test_plan.py::test_spec_task_list_to_workflow_state -v` +Expected: PASS + +**Step 5: Commit** + +```bash +git add src/hyh/plan.py tests/hyh/test_plan.py +git commit -m "feat(plan): add SpecTaskList.to_workflow_state() conversion" +``` + +--- + +## Task 4: Create worktree.py Module + +**Files:** + +- Create: `src/hyh/worktree.py` +- Create: `tests/hyh/test_worktree.py` + +**Step 1: Write the failing test for worktree creation** + +Create `tests/hyh/test_worktree.py`: + +```python +"""Tests for git worktree management (DHH-style).""" + +import subprocess +from pathlib import Path + +import pytest + + +def test_create_worktree_dhh_style(tmp_path: Path): + """create_worktree creates sibling directory with branch.""" + from hyh.worktree import create_worktree + + # Setup: create a git repo + main_repo = tmp_path / "myproject" + main_repo.mkdir() + subprocess.run(["git", "init"], cwd=main_repo, capture_output=True, check=True) + subprocess.run( + ["git", "config", "user.email", "test@test.com"], + cwd=main_repo, + capture_output=True, + check=True, + ) + subprocess.run( + ["git", "config", "user.name", "Test"], + cwd=main_repo, + capture_output=True, + check=True, + ) + (main_repo / "README.md").write_text("# Project") + subprocess.run(["git", "add", "-A"], cwd=main_repo, capture_output=True, check=True) + subprocess.run( + ["git", "commit", "-m", "initial"], + cwd=main_repo, + capture_output=True, + check=True, + ) + + # Act + result = create_worktree(main_repo, "42-user-auth") + + # Assert + expected_path = tmp_path / "myproject--42-user-auth" + assert result.worktree_path == expected_path + assert expected_path.exists() + assert (expected_path / "README.md").exists() + + # Verify branch was created + branch_result = subprocess.run( + ["git", "branch", "--show-current"], + cwd=expected_path, + capture_output=True, + text=True, + check=True, + ) + assert branch_result.stdout.strip() == "42-user-auth" +``` + +**Step 2: Run test to verify it fails** + +Run: `pytest tests/hyh/test_worktree.py::test_create_worktree_dhh_style -v` +Expected: FAIL with "No module named 'hyh.worktree'" + +**Step 3: Create minimal worktree.py** + +Create `src/hyh/worktree.py`: + +```python +"""Git worktree management (DHH-style). + +Pattern: ../project--branch as sibling directories. +See: https://gist.github.com/dhh/18575558fc5ee10f15b6cd3e108ed844 +""" + +import subprocess +from pathlib import Path +from typing import Final + +from msgspec import Struct + + +class WorktreeResult(Struct, frozen=True): + """Result of worktree creation.""" + + worktree_path: Path + branch_name: str + main_repo: Path + + +def create_worktree(main_repo: Path, branch_name: str) -> WorktreeResult: + """Create a worktree with DHH-style naming. + + Creates: ../{repo_name}--{branch_name}/ + Branch: {branch_name} + + Args: + main_repo: Path to the main repository. + branch_name: Name for both branch and worktree suffix. + + Returns: + WorktreeResult with paths. + + Raises: + subprocess.CalledProcessError: If git commands fail. + """ + main_repo = Path(main_repo).resolve() + repo_name = main_repo.name + worktree_path = main_repo.parent / f"{repo_name}--{branch_name}" + + subprocess.run( + ["git", "worktree", "add", "-b", branch_name, str(worktree_path)], + cwd=main_repo, + capture_output=True, + check=True, + ) + + return WorktreeResult( + worktree_path=worktree_path, + branch_name=branch_name, + main_repo=main_repo, + ) +``` + +**Step 4: Run test to verify it passes** + +Run: `pytest tests/hyh/test_worktree.py::test_create_worktree_dhh_style -v` +Expected: PASS + +**Step 5: Commit** + +```bash +git add src/hyh/worktree.py tests/hyh/test_worktree.py +git commit -m "feat(worktree): add DHH-style git worktree creation" +``` + +--- + +## Task 5: Add Worktree List and Switch Functions + +**Files:** + +- Modify: `src/hyh/worktree.py` +- Modify: `tests/hyh/test_worktree.py` + +**Step 1: Write tests for list and switch** + +Add to `tests/hyh/test_worktree.py`: + +```python +def test_list_worktrees(tmp_path: Path): + """list_worktrees returns all DHH-style worktrees.""" + from hyh.worktree import create_worktree, list_worktrees + + # Setup main repo + main_repo = tmp_path / "myproject" + main_repo.mkdir() + subprocess.run(["git", "init"], cwd=main_repo, capture_output=True, check=True) + subprocess.run( + ["git", "config", "user.email", "test@test.com"], + cwd=main_repo, + capture_output=True, + check=True, + ) + subprocess.run( + ["git", "config", "user.name", "Test"], + cwd=main_repo, + capture_output=True, + check=True, + ) + (main_repo / "README.md").write_text("# Project") + subprocess.run(["git", "add", "-A"], cwd=main_repo, capture_output=True, check=True) + subprocess.run( + ["git", "commit", "-m", "initial"], + cwd=main_repo, + capture_output=True, + check=True, + ) + + # Create two worktrees + create_worktree(main_repo, "42-feature-a") + create_worktree(main_repo, "43-feature-b") + + # Act + worktrees = list_worktrees(main_repo) + + # Assert + assert len(worktrees) == 2 + branches = {wt.branch_name for wt in worktrees} + assert branches == {"42-feature-a", "43-feature-b"} + + +def test_get_worktree_for_branch(tmp_path: Path): + """get_worktree returns path for a specific branch.""" + from hyh.worktree import create_worktree, get_worktree + + # Setup + main_repo = tmp_path / "myproject" + main_repo.mkdir() + subprocess.run(["git", "init"], cwd=main_repo, capture_output=True, check=True) + subprocess.run( + ["git", "config", "user.email", "test@test.com"], + cwd=main_repo, + capture_output=True, + check=True, + ) + subprocess.run( + ["git", "config", "user.name", "Test"], + cwd=main_repo, + capture_output=True, + check=True, + ) + (main_repo / "README.md").write_text("# Project") + subprocess.run(["git", "add", "-A"], cwd=main_repo, capture_output=True, check=True) + subprocess.run( + ["git", "commit", "-m", "initial"], + cwd=main_repo, + capture_output=True, + check=True, + ) + create_worktree(main_repo, "42-user-auth") + + # Act + result = get_worktree(main_repo, "42-user-auth") + + # Assert + assert result is not None + assert result.branch_name == "42-user-auth" + assert result.worktree_path == tmp_path / "myproject--42-user-auth" +``` + +**Step 2: Run tests to verify they fail** + +Run: `pytest tests/hyh/test_worktree.py -v -k "list_worktrees or get_worktree"` +Expected: FAIL with "cannot import name 'list_worktrees'" + +**Step 3: Add list_worktrees and get_worktree** + +Add to `src/hyh/worktree.py`: + +```python +def list_worktrees(main_repo: Path) -> list[WorktreeResult]: + """List all DHH-style worktrees for a repository. + + Args: + main_repo: Path to the main repository. + + Returns: + List of WorktreeResult for each worktree. + """ + main_repo = Path(main_repo).resolve() + repo_name = main_repo.name + prefix = f"{repo_name}--" + + result = subprocess.run( + ["git", "worktree", "list", "--porcelain"], + cwd=main_repo, + capture_output=True, + text=True, + check=True, + ) + + worktrees: list[WorktreeResult] = [] + current_path: Path | None = None + current_branch: str | None = None + + for line in result.stdout.split("\n"): + if line.startswith("worktree "): + current_path = Path(line.split(" ", 1)[1]) + elif line.startswith("branch refs/heads/"): + current_branch = line.replace("branch refs/heads/", "") + elif line == "" and current_path and current_branch: + # Filter to DHH-style worktrees only + if current_path.name.startswith(prefix): + worktrees.append( + WorktreeResult( + worktree_path=current_path, + branch_name=current_branch, + main_repo=main_repo, + ) + ) + current_path = None + current_branch = None + + return worktrees + + +def get_worktree(main_repo: Path, branch_name: str) -> WorktreeResult | None: + """Get worktree for a specific branch. + + Args: + main_repo: Path to the main repository. + branch_name: Branch name to find. + + Returns: + WorktreeResult if found, None otherwise. + """ + worktrees = list_worktrees(main_repo) + for wt in worktrees: + if wt.branch_name == branch_name: + return wt + return None +``` + +**Step 4: Run tests to verify they pass** + +Run: `pytest tests/hyh/test_worktree.py -v` +Expected: PASS + +**Step 5: Commit** + +```bash +git add src/hyh/worktree.py tests/hyh/test_worktree.py +git commit -m "feat(worktree): add list_worktrees and get_worktree functions" +``` + +--- + +## Task 6: Add Worktree CLI Commands + +**Files:** + +- Modify: `src/hyh/client.py` +- Test: `tests/hyh/test_worktree.py` + +**Step 1: Write test for CLI worktree create** + +Add to `tests/hyh/test_worktree.py`: + +```python +def test_cli_worktree_create(tmp_path: Path, monkeypatch): + """hyh worktree create creates worktree via CLI.""" + import sys + from io import StringIO + + # Setup main repo + main_repo = tmp_path / "myproject" + main_repo.mkdir() + subprocess.run(["git", "init"], cwd=main_repo, capture_output=True, check=True) + subprocess.run( + ["git", "config", "user.email", "test@test.com"], + cwd=main_repo, + capture_output=True, + check=True, + ) + subprocess.run( + ["git", "config", "user.name", "Test"], + cwd=main_repo, + capture_output=True, + check=True, + ) + (main_repo / "README.md").write_text("# Project") + subprocess.run(["git", "add", "-A"], cwd=main_repo, capture_output=True, check=True) + subprocess.run( + ["git", "commit", "-m", "initial"], + cwd=main_repo, + capture_output=True, + check=True, + ) + + # Mock cwd to main_repo + monkeypatch.chdir(main_repo) + monkeypatch.setenv("HYH_WORKTREE", str(main_repo)) + + # Run CLI + from hyh.client import main + + monkeypatch.setattr(sys, "argv", ["hyh", "worktree", "create", "42-feature"]) + + stdout = StringIO() + monkeypatch.setattr(sys, "stdout", stdout) + + main() + + # Verify + expected_path = tmp_path / "myproject--42-feature" + assert expected_path.exists() + assert "Created" in stdout.getvalue() or "42-feature" in stdout.getvalue() +``` + +**Step 2: Run test to verify it fails** + +Run: `pytest tests/hyh/test_worktree.py::test_cli_worktree_create -v` +Expected: FAIL with argument error (worktree subcommand doesn't exist) + +**Step 3: Add worktree CLI commands to client.py** + +Add to `src/hyh/client.py` in the argparse setup section: + +```python +# Add after the existing subparsers + +worktree_parser = subparsers.add_parser("worktree", help="Git worktree management") +worktree_sub = worktree_parser.add_subparsers(dest="worktree_command", required=True) + +worktree_create = worktree_sub.add_parser("create", help="Create a new worktree") +worktree_create.add_argument("branch", help="Branch name (e.g., 42-user-auth)") + +worktree_sub.add_parser("list", help="List all worktrees") + +worktree_switch = worktree_sub.add_parser("switch", help="Show path to switch to worktree") +worktree_switch.add_argument("branch", help="Branch name to switch to") +``` + +Add the command handlers: + +```python +def _cmd_worktree_create(branch: str) -> None: + from hyh.worktree import create_worktree + + main_repo = Path(_get_git_root()) + result = create_worktree(main_repo, branch) + print(f"Created worktree: {result.worktree_path}") + print(f"Branch: {result.branch_name}") + print(f"\nTo switch: cd {result.worktree_path}") + + +def _cmd_worktree_list() -> None: + from hyh.worktree import list_worktrees + + main_repo = Path(_get_git_root()) + worktrees = list_worktrees(main_repo) + + if not worktrees: + print("No worktrees found.") + return + + print("Worktrees:") + for wt in worktrees: + print(f" {wt.branch_name}: {wt.worktree_path}") + + +def _cmd_worktree_switch(branch: str) -> None: + from hyh.worktree import get_worktree + + main_repo = Path(_get_git_root()) + wt = get_worktree(main_repo, branch) + + if wt is None: + print(f"Worktree not found: {branch}", file=sys.stderr) + sys.exit(1) + + print(f"cd {wt.worktree_path}") +``` + +Add to the match statement in `main()`: + +```python +case "worktree": + match args.worktree_command: + case "create": + _cmd_worktree_create(args.branch) + case "list": + _cmd_worktree_list() + case "switch": + _cmd_worktree_switch(args.branch) +``` + +**Step 4: Run test to verify it passes** + +Run: `pytest tests/hyh/test_worktree.py::test_cli_worktree_create -v` +Expected: PASS + +**Step 5: Commit** + +```bash +git add src/hyh/client.py tests/hyh/test_worktree.py +git commit -m "feat(cli): add worktree create/list/switch commands" +``` + +--- + +## Task 7: Create Workflow State Module + +**Files:** + +- Create: `src/hyh/workflow.py` +- Create: `tests/hyh/test_workflow.py` + +**Step 1: Write test for workflow state detection** + +Create `tests/hyh/test_workflow.py`: + +```python +"""Tests for workflow state management.""" + +from pathlib import Path + +import pytest + + +def test_detect_workflow_phase_no_spec(tmp_path: Path): + """detect_phase returns 'none' when no spec exists.""" + from hyh.workflow import detect_phase + + result = detect_phase(tmp_path) + assert result.phase == "none" + assert result.next_action == "specify" + + +def test_detect_workflow_phase_has_spec(tmp_path: Path): + """detect_phase returns 'specify' when spec exists but no plan.""" + from hyh.workflow import detect_phase + + specs_dir = tmp_path / "specs" + specs_dir.mkdir() + (specs_dir / "spec.md").write_text("# Spec") + + result = detect_phase(tmp_path) + assert result.phase == "specify" + assert result.next_action == "plan" + + +def test_detect_workflow_phase_has_tasks(tmp_path: Path): + """detect_phase returns 'plan' when tasks exist but not complete.""" + from hyh.workflow import detect_phase + + specs_dir = tmp_path / "specs" + specs_dir.mkdir() + (specs_dir / "spec.md").write_text("# Spec") + (specs_dir / "plan.md").write_text("# Plan") + (specs_dir / "tasks.md").write_text("""\ +## Phase 1: Setup + +- [ ] T001 Create project +- [ ] T002 Init git +""") + + result = detect_phase(tmp_path) + assert result.phase == "plan" + assert result.next_action == "implement" + + +def test_detect_workflow_phase_all_complete(tmp_path: Path): + """detect_phase returns 'implement' when all tasks complete.""" + from hyh.workflow import detect_phase + + specs_dir = tmp_path / "specs" + specs_dir.mkdir() + (specs_dir / "spec.md").write_text("# Spec") + (specs_dir / "plan.md").write_text("# Plan") + (specs_dir / "tasks.md").write_text("""\ +## Phase 1: Setup + +- [x] T001 Create project +- [x] T002 Init git +""") + + result = detect_phase(tmp_path) + assert result.phase == "complete" + assert result.next_action is None +``` + +**Step 2: Run tests to verify they fail** + +Run: `pytest tests/hyh/test_workflow.py -v` +Expected: FAIL with "No module named 'hyh.workflow'" + +**Step 3: Create workflow.py** + +Create `src/hyh/workflow.py`: + +```python +"""Workflow state detection and management.""" + +from pathlib import Path + +from msgspec import Struct + +from .plan import parse_speckit_tasks + + +class WorkflowPhase(Struct, frozen=True): + """Current workflow phase and suggested next action.""" + + phase: str # "none", "specify", "plan", "implement", "complete" + next_action: str | None # "specify", "plan", "implement", None + spec_exists: bool = False + plan_exists: bool = False + tasks_total: int = 0 + tasks_complete: int = 0 + + +def detect_phase(worktree: Path) -> WorkflowPhase: + """Detect current workflow phase based on artifacts. + + Args: + worktree: Path to worktree root. + + Returns: + WorkflowPhase with current state and suggested action. + """ + worktree = Path(worktree) + specs_dir = worktree / "specs" + + spec_path = specs_dir / "spec.md" + plan_path = specs_dir / "plan.md" + tasks_path = specs_dir / "tasks.md" + + spec_exists = spec_path.exists() + plan_exists = plan_path.exists() + tasks_total = 0 + tasks_complete = 0 + + # No spec = nothing started + if not spec_exists: + return WorkflowPhase( + phase="none", + next_action="specify", + spec_exists=False, + plan_exists=False, + ) + + # Has spec but no plan + if not plan_exists: + return WorkflowPhase( + phase="specify", + next_action="plan", + spec_exists=True, + plan_exists=False, + ) + + # Has plan, check tasks + if tasks_path.exists(): + content = tasks_path.read_text() + task_list = parse_speckit_tasks(content) + tasks_total = len(task_list.tasks) + tasks_complete = sum( + 1 for t in task_list.tasks.values() if t.status == "completed" + ) + + if tasks_complete >= tasks_total and tasks_total > 0: + return WorkflowPhase( + phase="complete", + next_action=None, + spec_exists=True, + plan_exists=True, + tasks_total=tasks_total, + tasks_complete=tasks_complete, + ) + + return WorkflowPhase( + phase="plan", + next_action="implement", + spec_exists=True, + plan_exists=True, + tasks_total=tasks_total, + tasks_complete=tasks_complete, + ) +``` + +**Step 4: Run tests to verify they pass** + +Run: `pytest tests/hyh/test_workflow.py -v` +Expected: PASS + +**Step 5: Commit** + +```bash +git add src/hyh/workflow.py tests/hyh/test_workflow.py +git commit -m "feat(workflow): add phase detection from artifacts" +``` + +--- + +## Task 8: Add Workflow CLI Commands + +**Files:** + +- Modify: `src/hyh/client.py` +- Modify: `tests/hyh/test_workflow.py` + +**Step 1: Write test for workflow status CLI** + +Add to `tests/hyh/test_workflow.py`: + +```python +def test_cli_workflow_status(tmp_path: Path, monkeypatch): + """hyh workflow status shows current phase.""" + import sys + from io import StringIO + + # Setup with spec only + specs_dir = tmp_path / "specs" + specs_dir.mkdir() + (specs_dir / "spec.md").write_text("# Spec") + + monkeypatch.chdir(tmp_path) + monkeypatch.setenv("HYH_WORKTREE", str(tmp_path)) + + from hyh.client import main + + monkeypatch.setattr(sys, "argv", ["hyh", "workflow", "status"]) + + stdout = StringIO() + monkeypatch.setattr(sys, "stdout", stdout) + + main() + + output = stdout.getvalue() + assert "specify" in output.lower() or "plan" in output.lower() +``` + +**Step 2: Run test to verify it fails** + +Run: `pytest tests/hyh/test_workflow.py::test_cli_workflow_status -v` +Expected: FAIL (workflow subcommand doesn't exist) + +**Step 3: Add workflow CLI commands** + +Add to `src/hyh/client.py`: + +```python +# Add to argparse setup +workflow_parser = subparsers.add_parser("workflow", help="Workflow state management") +workflow_sub = workflow_parser.add_subparsers(dest="workflow_command", required=True) + +workflow_sub.add_parser("status", help="Show current workflow phase") +workflow_status = workflow_sub.add_parser("status", help="Show current workflow phase") +workflow_status.add_argument("--json", action="store_true", help="Output JSON") +workflow_status.add_argument("--quiet", action="store_true", help="Minimal output") +``` + +Add handlers: + +```python +def _cmd_workflow_status(json_output: bool = False, quiet: bool = False) -> None: + import json as json_module + + from hyh.workflow import detect_phase + + worktree = Path(_get_git_root()) + phase = detect_phase(worktree) + + if json_output: + print( + json_module.dumps( + { + "phase": phase.phase, + "next_action": phase.next_action, + "spec_exists": phase.spec_exists, + "plan_exists": phase.plan_exists, + "tasks_total": phase.tasks_total, + "tasks_complete": phase.tasks_complete, + } + ) + ) + return + + if quiet: + if phase.next_action: + print(f"Next: /hyh {phase.next_action}") + else: + print("Complete") + return + + print("=" * 50) + print(" WORKFLOW STATUS") + print("=" * 50) + print() + print(f" Phase: {phase.phase}") + print(f" Spec: {'yes' if phase.spec_exists else 'no'}") + print(f" Plan: {'yes' if phase.plan_exists else 'no'}") + + if phase.tasks_total > 0: + pct = int((phase.tasks_complete / phase.tasks_total) * 100) + print(f" Tasks: {phase.tasks_complete}/{phase.tasks_total} ({pct}%)") + + print() + if phase.next_action: + print(f" Next: /hyh {phase.next_action}") + else: + print(" Status: All tasks complete!") + print() +``` + +Add to match statement: + +```python +case "workflow": + match args.workflow_command: + case "status": + _cmd_workflow_status( + json_output=getattr(args, "json", False), + quiet=getattr(args, "quiet", False), + ) +``` + +**Step 4: Run test to verify it passes** + +Run: `pytest tests/hyh/test_workflow.py::test_cli_workflow_status -v` +Expected: PASS + +**Step 5: Commit** + +```bash +git add src/hyh/client.py tests/hyh/test_workflow.py +git commit -m "feat(cli): add workflow status command" +``` + +--- + +## Task 9: Bundle Templates from test-prompt + +**Files:** + +- Create: `src/hyh/templates/spec-template.md` +- Create: `src/hyh/templates/plan-template.md` +- Create: `src/hyh/templates/tasks-template.md` +- Create: `src/hyh/templates/checklist-template.md` +- Modify: `pyproject.toml` + +**Step 1: Copy templates from test-prompt** + +```bash +mkdir -p src/hyh/templates +cp test-prompt/.specify/templates/spec-template.md src/hyh/templates/ +cp test-prompt/.specify/templates/plan-template.md src/hyh/templates/ +cp test-prompt/.specify/templates/tasks-template.md src/hyh/templates/ +cp test-prompt/.specify/templates/checklist-template.md src/hyh/templates/ +``` + +**Step 2: Verify templates are included in package** + +Add to `pyproject.toml` if needed for including non-Python files: + +```toml +[tool.hatch.build.targets.wheel] +packages = ["src/hyh"] + +[tool.hatch.build.targets.wheel.force-include] +"src/hyh/templates" = "hyh/templates" +``` + +**Step 3: Write test to verify templates are accessible** + +Create `tests/hyh/test_templates.py`: + +```python +"""Tests for bundled templates.""" + +from pathlib import Path + +import pytest + + +def test_templates_exist(): + """Bundled templates are accessible.""" + from importlib.resources import files + + templates = files("hyh") / "templates" + + assert (templates / "spec-template.md").is_file() + assert (templates / "plan-template.md").is_file() + assert (templates / "tasks-template.md").is_file() + assert (templates / "checklist-template.md").is_file() + + +def test_spec_template_has_required_sections(): + """Spec template contains required sections.""" + from importlib.resources import files + + content = (files("hyh") / "templates" / "spec-template.md").read_text() + + assert "## User Scenarios" in content or "User Scenarios" in content + assert "## Requirements" in content or "Requirements" in content + assert "## Success Criteria" in content or "Success Criteria" in content +``` + +**Step 4: Run tests** + +Run: `pytest tests/hyh/test_templates.py -v` +Expected: PASS + +**Step 5: Commit** + +```bash +git add src/hyh/templates/ pyproject.toml tests/hyh/test_templates.py +git commit -m "feat: bundle speckit templates with package" +``` + +--- + +## Task 10: Create Plugin Files Structure + +**Files:** + +- Create: `src/hyh/plugin/plugin.json` +- Create: `src/hyh/plugin/commands/hyh.md` +- Create: `src/hyh/plugin/commands/help.md` +- Create: `src/hyh/plugin/hooks/hooks.json` +- Create: `src/hyh/plugin/skills/spec-driven-dev.md` + +**Step 1: Create plugin.json** + +Create `src/hyh/plugin/plugin.json`: + +```json +{ + "name": "hyh", + "description": "Hold Your Horses - spec-driven development workflow", + "version": "0.2.0", + "author": { + "name": "Pedro Proenca", + "email": "pedro@10xengs.com" + }, + "repository": "https://github.com/pproenca/hyh", + "license": "MIT", + "commands": ["./commands/"], + "skills": ["./skills/"], + "hooks": "./hooks/hooks.json" +} +``` + +**Step 2: Create main command file** + +Create `src/hyh/plugin/commands/hyh.md`: + +```markdown +--- +description: Spec-driven development workflow - specify, plan, implement +argument-hint: [specify|plan|implement|status] [args] +allowed-tools: Bash(hyh:*), Bash(git:*), Read, Write, Edit, Glob, Grep +--- + +# hyh - Spec-Driven Development + +Route based on $ARGUMENTS: + +## If $ARGUMENTS starts with "specify" + +Extract the feature description after "specify". Then: + +1. Generate a slug from the description (2-4 words, kebab-case) +2. Get next feature number: `hyh workflow status --json` and increment +3. Create worktree: `hyh worktree create {N}-{slug}` +4. Load spec template and fill with user's description +5. Ask up to 5 clarifying questions (one at a time) for [NEEDS CLARIFICATION] markers +6. Write finalized spec to `specs/spec.md` +7. Report: "Spec complete. Run `/hyh plan` to continue." + +## If $ARGUMENTS starts with "plan" + +1. Verify `specs/spec.md` exists +2. Load spec and constitution (if `.hyh/constitution.md` exists) +3. Generate `specs/research.md` (resolve technical unknowns) +4. Generate `specs/plan.md` (architecture, tech stack) +5. Generate `specs/data-model.md` if entities involved +6. Generate `specs/tasks.md` in speckit checkbox format +7. Generate `specs/checklists/requirements.md` +8. Run consistency analysis +9. Import tasks: `hyh plan import --file specs/tasks.md` +10. Report: "Plan complete. Run `/hyh implement` to continue." + +## If $ARGUMENTS starts with "implement" + +1. Run: `hyh workflow status` to verify tasks exist +2. Check checklists pass (or ask to proceed) +3. Loop: + a. `hyh task claim` → get next task + b. If no task: done + c. Execute task per instructions + d. `hyh task complete --id {id}` + e. Update specs/tasks.md with [x] +4. Report completion + +## If $ARGUMENTS is empty or "status" + +Run: `hyh workflow status` + +Based on result, suggest next action: +- No spec? → "Start with: /hyh specify " +- Has spec, no plan? → "Continue with: /hyh plan" +- Has tasks? → "Continue with: /hyh implement" +- All complete? → "All done! Ready to merge." +``` + +**Step 3: Create help command** + +Create `src/hyh/plugin/commands/help.md`: + +```markdown +--- +description: Show hyh commands and current workflow state +--- + +# hyh Help + +Display available commands and current state: + +1. Run: `hyh workflow status` +2. Show this help: + +## Commands + +| Command | Description | +|---------|-------------| +| `/hyh specify ` | Start new feature - creates worktree, generates spec | +| `/hyh plan` | Generate design artifacts and tasks from spec | +| `/hyh implement` | Execute tasks with daemon coordination | +| `/hyh status` | Show current workflow phase and progress | + +## Workflow + +``` +specify → plan → implement → merge + ↓ ↓ ↓ +spec.md tasks.md working code +``` + +## Worktree Commands + +| Command | Description | +|---------|-------------| +| `hyh worktree create ` | Create new feature worktree | +| `hyh worktree list` | List all feature worktrees | +| `hyh worktree switch ` | Show path to switch to worktree | +``` + +**Step 4: Create hooks.json** + +Create `src/hyh/plugin/hooks/hooks.json`: + +```json +{ + "hooks": { + "SessionStart": [ + { + "matcher": "", + "hooks": [ + { + "type": "command", + "command": "hyh workflow status --quiet" + } + ] + } + ] + } +} +``` + +**Step 5: Create skill file** + +Create `src/hyh/plugin/skills/spec-driven-dev.md`: + +```markdown +--- +name: spec-driven-development +description: Use when implementing features - follow the specify → plan → implement workflow +--- + +# Spec-Driven Development + +When implementing any non-trivial feature, use the hyh workflow: + +## 1. Specify First + +Before writing code, create a specification: +- Run `/hyh specify ` +- Answer clarifying questions +- Review the generated spec.md + +## 2. Plan Before Implementing + +Generate design artifacts: +- Run `/hyh plan` +- Review tasks.md for the work breakdown +- Check checklists pass + +## 3. Implement with Tracking + +Execute tasks systematically: +- Run `/hyh implement` +- Tasks are tracked via daemon +- Progress is visible with `/hyh status` + +## Why This Matters + +- Specs catch misunderstandings early +- Plans break work into manageable pieces +- Tracking ensures nothing is forgotten +- Worktrees keep main branch clean +``` + +**Step 6: Commit** + +```bash +git add src/hyh/plugin/ +git commit -m "feat: add Claude Code plugin files" +``` + +--- + +## Task 11: Add hyh init Command + +**Files:** + +- Create: `src/hyh/init.py` +- Modify: `src/hyh/client.py` +- Create: `tests/hyh/test_init.py` + +**Step 1: Write test for init command** + +Create `tests/hyh/test_init.py`: + +```python +"""Tests for hyh init command.""" + +from pathlib import Path + +import pytest + + +def test_init_creates_plugin_directory(tmp_path: Path, monkeypatch): + """hyh init creates .claude/plugins/hyh/ structure.""" + from hyh.init import init_project + + result = init_project(tmp_path) + + plugin_dir = tmp_path / ".claude" / "plugins" / "hyh" + assert plugin_dir.exists() + assert (plugin_dir / "plugin.json").exists() + assert (plugin_dir / "commands" / "hyh.md").exists() + assert (plugin_dir / "hooks" / "hooks.json").exists() + + +def test_init_creates_hyh_directory(tmp_path: Path): + """hyh init creates .hyh/ with config and templates.""" + from hyh.init import init_project + + result = init_project(tmp_path) + + hyh_dir = tmp_path / ".hyh" + assert hyh_dir.exists() + assert (hyh_dir / "config.json").exists() + assert (hyh_dir / "templates" / "spec-template.md").exists() + + +def test_init_config_has_required_fields(tmp_path: Path): + """Config file has main_branch and next_feature_number.""" + import json + + from hyh.init import init_project + + init_project(tmp_path) + + config = json.loads((tmp_path / ".hyh" / "config.json").read_text()) + assert "main_branch" in config + assert "next_feature_number" in config + assert config["next_feature_number"] == 1 +``` + +**Step 2: Run tests to verify they fail** + +Run: `pytest tests/hyh/test_init.py -v` +Expected: FAIL with "No module named 'hyh.init'" + +**Step 3: Create init.py** + +Create `src/hyh/init.py`: + +```python +"""Project initialization for hyh.""" + +import json +import shutil +import subprocess +from importlib.resources import files +from pathlib import Path + +from msgspec import Struct + + +class InitResult(Struct, frozen=True): + """Result of project initialization.""" + + project_root: Path + plugin_dir: Path + hyh_dir: Path + main_branch: str + + +def _get_main_branch(project_root: Path) -> str: + """Detect main branch name.""" + result = subprocess.run( + ["git", "symbolic-ref", "refs/remotes/origin/HEAD"], + cwd=project_root, + capture_output=True, + text=True, + ) + if result.returncode == 0: + # refs/remotes/origin/main -> main + return result.stdout.strip().split("/")[-1] + + # Fallback: check if main or master exists + for branch in ["main", "master"]: + result = subprocess.run( + ["git", "rev-parse", "--verify", branch], + cwd=project_root, + capture_output=True, + ) + if result.returncode == 0: + return branch + + return "main" # Default + + +def init_project(project_root: Path) -> InitResult: + """Initialize hyh in a project. + + Creates: + - .claude/plugins/hyh/ with plugin files + - .hyh/ with config and templates + + Args: + project_root: Path to project root. + + Returns: + InitResult with created paths. + """ + project_root = Path(project_root).resolve() + + # Create plugin directory + plugin_dir = project_root / ".claude" / "plugins" / "hyh" + plugin_dir.mkdir(parents=True, exist_ok=True) + + # Copy plugin files from package + plugin_source = files("hyh") / "plugin" + + # Copy plugin.json + (plugin_dir / "plugin.json").write_text( + (plugin_source / "plugin.json").read_text() + ) + + # Copy commands + commands_dir = plugin_dir / "commands" + commands_dir.mkdir(exist_ok=True) + for cmd_file in ["hyh.md", "help.md"]: + src = plugin_source / "commands" / cmd_file + if src.is_file(): + (commands_dir / cmd_file).write_text(src.read_text()) + + # Copy hooks + hooks_dir = plugin_dir / "hooks" + hooks_dir.mkdir(exist_ok=True) + (hooks_dir / "hooks.json").write_text( + (plugin_source / "hooks" / "hooks.json").read_text() + ) + + # Copy skills + skills_dir = plugin_dir / "skills" + skills_dir.mkdir(exist_ok=True) + skills_src = plugin_source / "skills" / "spec-driven-dev.md" + if skills_src.is_file(): + (skills_dir / "spec-driven-dev.md").write_text(skills_src.read_text()) + + # Create .hyh directory + hyh_dir = project_root / ".hyh" + hyh_dir.mkdir(exist_ok=True) + + # Detect main branch + main_branch = _get_main_branch(project_root) + + # Create config + config = { + "main_branch": main_branch, + "next_feature_number": 1, + } + (hyh_dir / "config.json").write_text(json.dumps(config, indent=2)) + + # Copy templates + templates_dir = hyh_dir / "templates" + templates_dir.mkdir(exist_ok=True) + + templates_source = files("hyh") / "templates" + for template in ["spec-template.md", "plan-template.md", "tasks-template.md", "checklist-template.md"]: + src = templates_source / template + if src.is_file(): + (templates_dir / template).write_text(src.read_text()) + + return InitResult( + project_root=project_root, + plugin_dir=plugin_dir, + hyh_dir=hyh_dir, + main_branch=main_branch, + ) +``` + +**Step 4: Add CLI command** + +Add to `src/hyh/client.py`: + +```python +# Add to argparse +subparsers.add_parser("init", help="Initialize hyh in current project") +``` + +Add handler: + +```python +def _cmd_init() -> None: + from hyh.init import init_project + + project_root = Path(_get_git_root()) + result = init_project(project_root) + + print("hyh initialized!") + print() + print(f"Plugin: {result.plugin_dir}") + print(f"Config: {result.hyh_dir}") + print(f"Branch: {result.main_branch}") + print() + print("Next steps:") + print(" 1. Commit the .claude/ and .hyh/ directories") + print(" 2. In Claude Code, run: /hyh specify ") +``` + +Add to match: + +```python +case "init": + _cmd_init() +``` + +**Step 5: Run tests and commit** + +Run: `pytest tests/hyh/test_init.py -v` +Expected: PASS + +```bash +git add src/hyh/init.py src/hyh/client.py tests/hyh/test_init.py +git commit -m "feat: add hyh init command for project setup" +``` + +--- + +## Task 12: Integration Test - Full Workflow + +**Files:** + +- Create: `tests/hyh/test_integration_workflow.py` + +**Step 1: Write integration test** + +Create `tests/hyh/test_integration_workflow.py`: + +```python +"""Integration test for full hyh workflow.""" + +import json +import subprocess +from pathlib import Path + +import pytest + + +@pytest.mark.slow +def test_full_workflow_specify_to_implement(tmp_path: Path): + """Test complete workflow from init through task execution.""" + # 1. Create git repo + main_repo = tmp_path / "myproject" + main_repo.mkdir() + subprocess.run(["git", "init"], cwd=main_repo, check=True, capture_output=True) + subprocess.run( + ["git", "config", "user.email", "test@test.com"], + cwd=main_repo, + check=True, + capture_output=True, + ) + subprocess.run( + ["git", "config", "user.name", "Test"], + cwd=main_repo, + check=True, + capture_output=True, + ) + (main_repo / "README.md").write_text("# Project") + subprocess.run(["git", "add", "-A"], cwd=main_repo, check=True, capture_output=True) + subprocess.run( + ["git", "commit", "-m", "initial"], + cwd=main_repo, + check=True, + capture_output=True, + ) + + # 2. Init hyh + from hyh.init import init_project + + init_project(main_repo) + assert (main_repo / ".claude" / "plugins" / "hyh" / "plugin.json").exists() + assert (main_repo / ".hyh" / "config.json").exists() + + # 3. Create worktree + from hyh.worktree import create_worktree + + wt_result = create_worktree(main_repo, "1-test-feature") + worktree = wt_result.worktree_path + assert worktree.exists() + + # 4. Check workflow status (should be "none") + from hyh.workflow import detect_phase + + phase = detect_phase(worktree) + assert phase.phase == "none" + assert phase.next_action == "specify" + + # 5. Create spec manually (simulating /hyh specify) + specs_dir = worktree / "specs" + specs_dir.mkdir() + (specs_dir / "spec.md").write_text("# Test Feature Spec") + + phase = detect_phase(worktree) + assert phase.phase == "specify" + assert phase.next_action == "plan" + + # 6. Create plan and tasks (simulating /hyh plan) + (specs_dir / "plan.md").write_text("# Implementation Plan") + (specs_dir / "tasks.md").write_text("""\ +## Phase 1: Setup + +- [ ] T001 Create project structure +- [ ] T002 [P] Initialize configuration + +## Phase 2: Core + +- [ ] T003 Implement main feature +""") + + phase = detect_phase(worktree) + assert phase.phase == "plan" + assert phase.next_action == "implement" + assert phase.tasks_total == 3 + assert phase.tasks_complete == 0 + + # 7. Parse tasks and verify structure + from hyh.plan import parse_speckit_tasks + + tasks = parse_speckit_tasks((specs_dir / "tasks.md").read_text()) + assert len(tasks.tasks) == 3 + assert tasks.tasks["T001"].phase == "Setup" + assert tasks.tasks["T003"].dependencies == ("T001", "T002") + + # 8. Convert to workflow state + state = tasks.to_workflow_state() + assert len(state.tasks) == 3 + + # 9. Simulate completion + (specs_dir / "tasks.md").write_text("""\ +## Phase 1: Setup + +- [x] T001 Create project structure +- [x] T002 [P] Initialize configuration + +## Phase 2: Core + +- [x] T003 Implement main feature +""") + + phase = detect_phase(worktree) + assert phase.phase == "complete" + assert phase.next_action is None +``` + +**Step 2: Run integration test** + +Run: `pytest tests/hyh/test_integration_workflow.py -v -m slow` +Expected: PASS + +**Step 3: Commit** + +```bash +git add tests/hyh/test_integration_workflow.py +git commit -m "test: add full workflow integration test" +``` + +--- + +## Summary + +| Task | Description | Files | +|------|-------------|-------| +| 1 | Speckit checkbox parser | plan.py | +| 2 | Phase dependencies | plan.py | +| 3 | SpecTaskList → WorkflowState | plan.py | +| 4 | worktree.py module | worktree.py | +| 5 | List/switch worktrees | worktree.py | +| 6 | Worktree CLI commands | client.py | +| 7 | Workflow state module | workflow.py | +| 8 | Workflow CLI commands | client.py | +| 9 | Bundle templates | templates/, pyproject.toml | +| 10 | Plugin files | plugin/ | +| 11 | hyh init command | init.py, client.py | +| 12 | Integration test | test_integration_workflow.py | + +--- + +**Plan complete and saved to `docs/plans/2025-12-29-speckit-implementation.md`.** + +**Two execution options:** + +1. **Subagent-Driven (this session)** - I dispatch a fresh subagent per task, review between tasks, fast iteration + +2. **Parallel Session (separate)** - Open new Claude Code session in worktree, use `superpowers:executing-plans` for batch execution with checkpoints + +**Which approach?** diff --git a/docs/plans/2025-12-29-speckit-integration-design.md b/docs/plans/2025-12-29-speckit-integration-design.md new file mode 100644 index 0000000..a21a5ec --- /dev/null +++ b/docs/plans/2025-12-29-speckit-integration-design.md @@ -0,0 +1,307 @@ +# Design: Speckit Integration into hyh + +**Date**: 2025-12-29 +**Status**: Approved +**Branch**: feature/speckit-integration + +## Overview + +Transform hyh from a low-level task orchestrator into a complete spec-driven development workflow tool. The speckit workflow from test-prompt becomes native to hyh, shipped as a Claude Code plugin with built-in git worktree management. + +## Architecture + +### Dual-Natured Tool + +1. **Python Package** (`pip install hyh`) + - Daemon with Unix socket RPC (per-worktree) + - State management, task coordination, git mutex + - Git worktree management (DHH-style) + - `hyh init` scaffolds Claude Code plugin + +2. **Claude Code Plugin** (generated by `hyh init`) + - Commands: `/hyh`, `/hyh:help` + - Hooks: SessionStart shows workflow state + - Skills: spec-driven development guidance + +### Worktree Pattern (DHH-style) + +``` +~/projects/ +├── myapp/ # Main repo (hyh init here) +├── myapp--42-user-auth/ # Feature worktree +├── myapp--43-payment-flow/ # Another feature +└── myapp--44-dashboard/ # Parallel work +``` + +## Command Structure + +### Main Entry: `/hyh` with `$ARGUMENTS` routing + +``` +/hyh → Detect state, ask what to do +/hyh specify → Create worktree + spec +/hyh plan → Generate design artifacts + tasks +/hyh implement → Execute tasks via daemon +/hyh status → Show current workflow state +/hyh switch → Switch to worktree N +/hyh list → Show all worktrees +``` + +### Phase Mapping + +| Phase | Includes | Output | +|-------|----------|--------| +| `specify` | specify + clarify | `spec.md` finalized | +| `plan` | plan + tasks + checklist + analyze | All design artifacts | +| `implement` | execute tasks | Working code | + +## Package Structure + +``` +src/hyh/ +├── __init__.py +├── client.py # CLI entry point (existing + new) +├── daemon.py # Unix socket RPC server (existing) +├── state.py # Task/WorkflowState structs (existing) +├── plan.py # UPDATED: Parse speckit checkbox format +├── worktree.py # NEW: Git worktree management +├── templates/ # Bundled from test-prompt +│ ├── spec-template.md +│ ├── plan-template.md +│ ├── tasks-template.md +│ └── checklist-template.md +├── plugin/ # Claude Code plugin files +│ ├── plugin.json +│ ├── commands/ +│ │ ├── hyh.md +│ │ └── help.md +│ ├── skills/ +│ │ └── spec-driven-dev.md +│ └── hooks/ +│ └── hooks.json +└── scripts/ + └── ... +``` + +## CLI Commands + +### New Commands + +```bash +# Worktree management +hyh worktree create # Create ../project--slug/, init specs/ +hyh worktree list # List all feature worktrees +hyh worktree switch # Switch to worktree + +# Workflow state +hyh workflow init # Initialize workflow in current worktree +hyh workflow status # Show phase, progress, next action + +# Init +hyh init # Scaffold plugin + .hyh/ in main repo +``` + +### Updated Commands + +```bash +hyh plan import --file # Parse speckit checkbox format +``` + +## Updated `plan.py` Parser + +Parse speckit checkbox format: + +```markdown +## Phase 2: User Story 1 - Authentication +- [ ] T005 [P] [US1] Create User model in src/models/user.py +- [ ] T006 [US1] Implement auth service in src/services/auth.py +- [x] T007 [US1] Add login endpoint in src/api/auth.py +``` + +Extracts: +- `id`: T005, T006, T007 +- `status`: pending (`[ ]`) or completed (`[x]`) +- `parallel`: True if `[P]` present +- `user_story`: US1 (optional) +- `description`: "Create User model" +- `file_path`: parsed from description +- `phase`: from heading + +Dependencies derived from phases (Phase N depends on Phase N-1). + +## Command Flows + +### `/hyh specify "user authentication"` + +``` +1. Parse slug: "user-auth" +2. Get next number: N=42 +3. Run: hyh worktree create 42-user-auth +4. Load spec-template.md from .hyh/templates/ +5. Fill template, identify [NEEDS CLARIFICATION] markers +6. Ask clarifying questions (max 5, one at a time) +7. Write specs/spec.md +8. Run: hyh workflow init --phase specify +``` + +### `/hyh plan` + +``` +1. Verify in worktree, spec.md exists +2. Load spec.md, .hyh/constitution.md (if exists) +3. Generate research.md (resolve unknowns) +4. Generate plan.md (architecture, tech stack) +5. Generate data-model.md, contracts/ (if applicable) +6. Generate tasks.md (speckit checkbox format) +7. Generate checklists/requirements.md +8. Run consistency analysis +9. Run: hyh plan import --file specs/tasks.md +10. Run: hyh workflow status (confirm tasks loaded) +``` + +### `/hyh implement` + +``` +1. Run: hyh workflow status (verify tasks exist) +2. Check checklists (ask to proceed if incomplete) +3. Loop: + a. Run: hyh task claim → get task JSON + b. If no task: done + c. Execute task (create/modify files per instructions) + d. Run: hyh task complete --id + e. Update specs/tasks.md (mark [x]) + f. Repeat +4. Run: hyh workflow status (show completion) +``` + +### `/hyh` (guided entry) + +``` +1. Run: hyh workflow status --json +2. Detect state: + - No worktree? → "Start new feature?" + - Has spec, no plan? → "Continue to planning?" + - Has tasks, incomplete? → "Resume implementation?" + - All complete? → "All tasks done! Ready to merge." +3. Present options via natural conversation +4. Route to appropriate phase +``` + +## Plugin Files + +### plugin.json + +```json +{ + "name": "hyh", + "description": "Hold Your Horses - spec-driven development workflow", + "version": "0.2.0", + "commands": ["./commands/"], + "skills": ["./skills/"], + "hooks": "./hooks/hooks.json" +} +``` + +### hooks/hooks.json + +```json +{ + "hooks": { + "SessionStart": [{ + "matcher": "", + "hooks": [{ + "type": "command", + "command": "hyh workflow status --quiet" + }] + }] + } +} +``` + +## File Structure After `hyh init` + +### Main Repo + +``` +.claude/plugins/hyh/ +├── plugin.json +├── commands/ +│ ├── hyh.md +│ └── help.md +├── skills/ +│ └── spec-driven-dev.md +└── hooks/ + └── hooks.json + +.hyh/ +├── config.json # Main branch, next feature # +├── templates/ +│ ├── spec-template.md +│ ├── plan-template.md +│ ├── tasks-template.md +│ └── checklist-template.md +└── constitution.md # Optional +``` + +### Feature Worktree + +``` +myapp--42-user-auth/ +├── .hyh/ +│ └── state.json # Daemon state +├── specs/ +│ ├── spec.md +│ ├── plan.md +│ ├── research.md +│ ├── data-model.md +│ ├── tasks.md +│ ├── contracts/ +│ └── checklists/ +└── (project files via worktree) +``` + +## Preserved from test-prompt + +| File | Status | Notes | +|------|--------|-------| +| `spec-template.md` | Exact copy | User stories with priorities | +| `plan-template.md` | Exact copy | Constitution check, structure | +| `tasks-template.md` | Exact copy | Checkbox format, phases | +| `checklist-template.md` | Exact copy | "Unit tests for requirements" | +| `constitution.md` | Exact copy | 5 principles, governance | +| `speckit.*.md` logic | Adapted | Merged into `/hyh` commands | + +## Implementation Tasks + +1. **Update `plan.py`** - Add speckit checkbox format parser +2. **Add `worktree.py`** - DHH-style worktree management +3. **Add workflow commands** - `hyh workflow init/status` +4. **Create plugin files** - Commands adapted from speckit +5. **Add `hyh init`** - Scaffold plugin + templates +6. **Bundle templates** - Include test-prompt templates in package +7. **Write tests** - Parser, worktree, workflow state + +## Design Decisions + +### Why Plugin Architecture (not embedded)? +- Keeps hyh core focused on infrastructure +- Plugin can evolve independently +- Users can customize commands/templates +- Follows Claude Code ecosystem patterns + +### Why DHH-style Worktrees? +- Simple naming: `project--branch` +- Sibling directories, no nesting +- Easy to switch, list, cleanup +- Parallel work without conflicts + +### Why Merge Phases? +- Three entry points match mental model (specify/plan/implement) +- Reduces command sprawl +- Natural checkpoints for user review +- Mirrors superpowers pattern + +### Why Not `/hyh finish`? +- Merging is developer's domain +- Teams have different workflows (PR, direct merge, squash) +- hyh orchestrates development, not git workflow diff --git a/pyproject.toml b/pyproject.toml index be47439..affa583 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -128,6 +128,10 @@ ignore = [ "src/hyh/daemon.py" = ["T201", "S603"] # Runtime is the subprocess execution layer - S603/S607 are intentional "src/hyh/runtime.py" = ["S603", "S607"] +# Worktree module executes git worktree commands - S603/S607 are intentional +"src/hyh/worktree.py" = ["S603", "S607"] +# Init module executes git commands for branch detection - S603/S607 are intentional +"src/hyh/init.py" = ["S603", "S607"] # State uses **kwargs: Any for Pydantic model_copy (legitimate Any usage) "src/hyh/state.py" = ["ANN401"] # Scripts: libcst visitor methods require specific naming (leave_Module, etc.) and unused args diff --git a/src/hyh/client.py b/src/hyh/client.py index dbbff32..ce6ece1 100644 --- a/src/hyh/client.py +++ b/src/hyh/client.py @@ -525,6 +525,8 @@ def main() -> None: subparsers.add_parser("demo", help="Interactive tour of hyh features") + subparsers.add_parser("init", help="Initialize hyh in current project") + status_parser = subparsers.add_parser("status", help="Show workflow status and recent events") status_parser.add_argument("--json", action="store_true", help="Output raw JSON") status_parser.add_argument( @@ -541,6 +543,24 @@ def main() -> None: help="List all registered projects", ) + worktree_parser = subparsers.add_parser("worktree", help="Git worktree management") + worktree_sub = worktree_parser.add_subparsers(dest="worktree_command", required=True) + + worktree_create = worktree_sub.add_parser("create", help="Create a new worktree") + worktree_create.add_argument("branch", help="Branch name (e.g., 42-user-auth)") + + worktree_sub.add_parser("list", help="List all worktrees") + + worktree_switch = worktree_sub.add_parser("switch", help="Show path to switch to worktree") + worktree_switch.add_argument("branch", help="Branch name to switch to") + + workflow_parser = subparsers.add_parser("workflow", help="Workflow state management") + workflow_sub = workflow_parser.add_subparsers(dest="workflow_command", required=True) + + workflow_status = workflow_sub.add_parser("status", help="Show current workflow phase") + workflow_status.add_argument("--json", action="store_true", help="Output JSON") + workflow_status.add_argument("--quiet", action="store_true", help="Minimal output") + args = parser.parse_args() if args.project: @@ -599,8 +619,25 @@ def main() -> None: _cmd_worker_id() case "demo": demo.run() + case "init": + _cmd_init() case "status": _cmd_status(args, socket_path, worktree_root) + case "worktree": + match args.worktree_command: + case "create": + _cmd_worktree_create(args.branch) + case "list": + _cmd_worktree_list() + case "switch": + _cmd_worktree_switch(args.branch) + case "workflow": + match args.workflow_command: + case "status": + _cmd_workflow_status( + json_output=getattr(args, "json", False), + quiet=getattr(args, "quiet", False), + ) def _cmd_ping(socket_path: str, worktree_root: str) -> None: @@ -824,6 +861,23 @@ def _cmd_worker_id() -> None: print(get_worker_id()) +def _cmd_init() -> None: + from hyh.init import init_project + + project_root = Path(_get_git_root()) + result = init_project(project_root) + + print("hyh initialized!") + print() + print(f"Plugin: {result.plugin_dir}") + print(f"Config: {result.hyh_dir}") + print(f"Branch: {result.main_branch}") + print() + print("Next steps:") + print(" 1. Commit the .claude/ and .hyh/ directories") + print(" 2. In Claude Code, run: /hyh specify ") + + def _cmd_plan_import(socket_path: str, worktree_root: str, file_path: str) -> None: path = Path(file_path) if not path.exists(): @@ -860,5 +914,93 @@ def _cmd_plan_reset(socket_path: str, worktree_root: str) -> None: print("Workflow state cleared") +def _cmd_worktree_create(branch: str) -> None: + from hyh.worktree import create_worktree + + main_repo = Path(_get_git_root()) + result = create_worktree(main_repo, branch) + print(f"Created worktree: {result.worktree_path}") + print(f"Branch: {result.branch_name}") + print(f"\nTo switch: cd {result.worktree_path}") + + +def _cmd_worktree_list() -> None: + from hyh.worktree import list_worktrees + + main_repo = Path(_get_git_root()) + worktrees = list_worktrees(main_repo) + + if not worktrees: + print("No worktrees found.") + return + + print("Worktrees:") + for wt in worktrees: + print(f" {wt.branch_name}: {wt.worktree_path}") + + +def _cmd_worktree_switch(branch: str) -> None: + from hyh.worktree import get_worktree + + main_repo = Path(_get_git_root()) + wt = get_worktree(main_repo, branch) + + if wt is None: + print(f"Worktree not found: {branch}", file=sys.stderr) + sys.exit(1) + + print(f"cd {wt.worktree_path}") + + +def _cmd_workflow_status(json_output: bool = False, quiet: bool = False) -> None: + import json as json_module + + from hyh.workflow import detect_phase + + worktree = Path(_get_git_root()) + phase = detect_phase(worktree) + + if json_output: + print( + json_module.dumps( + { + "phase": phase.phase, + "next_action": phase.next_action, + "spec_exists": phase.spec_exists, + "plan_exists": phase.plan_exists, + "tasks_total": phase.tasks_total, + "tasks_complete": phase.tasks_complete, + } + ) + ) + return + + if quiet: + if phase.next_action: + print(f"Next: /hyh {phase.next_action}") + else: + print("Complete") + return + + print("=" * 50) + print(" WORKFLOW STATUS") + print("=" * 50) + print() + print(f" Phase: {phase.phase}") + print(f" Spec: {'yes' if phase.spec_exists else 'no'}") + print(f" Plan: {'yes' if phase.plan_exists else 'no'}") + + if phase.tasks_total > 0: + pct = int((phase.tasks_complete / phase.tasks_total) * 100) + print(f" Tasks: {phase.tasks_complete}/{phase.tasks_total} ({pct}%)") + + print() + if phase.next_action: + print(f" Next: /hyh {phase.next_action}") + else: + print(" Status: All tasks complete!") + print() + + if __name__ == "__main__": main() diff --git a/src/hyh/init.py b/src/hyh/init.py new file mode 100644 index 0000000..2287f2c --- /dev/null +++ b/src/hyh/init.py @@ -0,0 +1,124 @@ +"""Project initialization for hyh.""" + +import json +import subprocess +from importlib.resources import files +from pathlib import Path + +from msgspec import Struct + + +class InitResult(Struct, frozen=True, forbid_unknown_fields=True): + """Result of project initialization.""" + + project_root: Path + plugin_dir: Path + hyh_dir: Path + main_branch: str + + +def _get_main_branch(project_root: Path) -> str: + """Detect main branch name.""" + result = subprocess.run( + ["git", "symbolic-ref", "refs/remotes/origin/HEAD"], + cwd=project_root, + capture_output=True, + text=True, + ) + if result.returncode == 0: + # refs/remotes/origin/main -> main + return result.stdout.strip().split("/")[-1] + + # Fallback: check if main or master exists + for branch in ["main", "master"]: + result = subprocess.run( + ["git", "rev-parse", "--verify", branch], + cwd=project_root, + capture_output=True, + ) + if result.returncode == 0: + return branch + + return "main" # Default + + +def init_project(project_root: Path) -> InitResult: + """Initialize hyh in a project. + + Creates: + - .claude/plugins/hyh/ with plugin files + - .hyh/ with config and templates + + Args: + project_root: Path to project root. + + Returns: + InitResult with created paths. + """ + project_root = Path(project_root).resolve() + + # Create plugin directory + plugin_dir = project_root / ".claude" / "plugins" / "hyh" + plugin_dir.mkdir(parents=True, exist_ok=True) + + # Copy plugin files from package + plugin_source = files("hyh") / "plugin" + + # Copy plugin.json + (plugin_dir / "plugin.json").write_text((plugin_source / "plugin.json").read_text()) + + # Copy commands + commands_dir = plugin_dir / "commands" + commands_dir.mkdir(exist_ok=True) + for cmd_file in ["hyh.md", "help.md"]: + src = plugin_source / "commands" / cmd_file + if src.is_file(): + (commands_dir / cmd_file).write_text(src.read_text()) + + # Copy hooks + hooks_dir = plugin_dir / "hooks" + hooks_dir.mkdir(exist_ok=True) + (hooks_dir / "hooks.json").write_text((plugin_source / "hooks" / "hooks.json").read_text()) + + # Copy skills + skills_dir = plugin_dir / "skills" + skills_dir.mkdir(exist_ok=True) + skills_src = plugin_source / "skills" / "spec-driven-dev.md" + if skills_src.is_file(): + (skills_dir / "spec-driven-dev.md").write_text(skills_src.read_text()) + + # Create .hyh directory + hyh_dir = project_root / ".hyh" + hyh_dir.mkdir(exist_ok=True) + + # Detect main branch + main_branch = _get_main_branch(project_root) + + # Create config + config = { + "main_branch": main_branch, + "next_feature_number": 1, + } + (hyh_dir / "config.json").write_text(json.dumps(config, indent=2)) + + # Copy templates + templates_dir = hyh_dir / "templates" + templates_dir.mkdir(exist_ok=True) + + templates_source = files("hyh") / "templates" + for template in [ + "spec-template.md", + "plan-template.md", + "tasks-template.md", + "checklist-template.md", + ]: + src = templates_source / template + if src.is_file(): + (templates_dir / template).write_text(src.read_text()) + + return InitResult( + project_root=project_root, + plugin_dir=plugin_dir, + hyh_dir=hyh_dir, + main_branch=main_branch, + ) diff --git a/src/hyh/plan.py b/src/hyh/plan.py index 5ba10fc..c2e7055 100644 --- a/src/hyh/plan.py +++ b/src/hyh/plan.py @@ -7,6 +7,12 @@ _SAFE_TASK_ID_PATTERN: Final[re.Pattern[str]] = re.compile(r"^[a-zA-Z0-9][a-zA-Z0-9_\-\.]*$") +_CHECKBOX_PATTERN: Final[re.Pattern[str]] = re.compile( + r"^- \[([ xX])\] (T\d+)(?: \[P\])?(?: \[([A-Z]+\d+)\])? (.+)$" +) + +_PHASE_PATTERN: Final[re.Pattern[str]] = re.compile(r"^## Phase \d+: (.+)$") + def _validate_task_id(task_id: str) -> None: if not task_id: @@ -26,6 +32,38 @@ class _TaskData(Struct, frozen=True, forbid_unknown_fields=True): dependencies: tuple[str, ...] +class SpecTaskDefinition(Struct, frozen=True, forbid_unknown_fields=True, omit_defaults=True): + """Task definition from speckit checkbox format.""" + + description: str + status: str = "pending" # "pending" or "completed" + parallel: bool = False + user_story: str | None = None + phase: str | None = None + file_path: str | None = None + dependencies: tuple[str, ...] = () + + +class SpecTaskList(Struct, frozen=True, forbid_unknown_fields=True): + """Parsed speckit tasks.md content.""" + + tasks: dict[str, SpecTaskDefinition] + phases: tuple[str, ...] + + def to_workflow_state(self) -> WorkflowState: + """Convert to WorkflowState for daemon execution.""" + tasks = {} + for tid, spec_task in self.tasks.items(): + status = TaskStatus.COMPLETED if spec_task.status == "completed" else TaskStatus.PENDING + tasks[tid] = Task( + id=tid, + description=spec_task.description, + status=status, + dependencies=spec_task.dependencies, + ) + return WorkflowState(tasks=tasks) + + class PlanTaskDefinition(Struct, frozen=True, forbid_unknown_fields=True, omit_defaults=True): description: str dependencies: tuple[str, ...] = () @@ -235,3 +273,76 @@ def __init__(self, email: str): - Tasks in Group N depend on ALL tasks in Group N-1 - Tasks within the same group are independent (can run in parallel) """ + + +def parse_speckit_tasks(content: str) -> SpecTaskList: + """Parse speckit checkbox format into task list. + + Format: + ## Phase N: Phase Name + - [ ] T001 [P] [US1] Description with path/to/file.py + - [x] T002 Completed task + + Markers: + - [ ] = pending, [x] = completed + - [P] = parallel (can run concurrently) + - [US1] = user story reference + + Dependencies: + - Tasks in Phase N automatically depend on ALL tasks in Phase N-1 + """ + tasks: dict[str, SpecTaskDefinition] = {} + phases: list[str] = [] + phase_tasks: dict[str, list[str]] = {} + current_phase: str | None = None + + for line in content.split("\n"): + phase_match = _PHASE_PATTERN.match(line.strip()) + if phase_match: + current_phase = phase_match.group(1) + phases.append(current_phase) + phase_tasks[current_phase] = [] + continue + + checkbox_match = _CHECKBOX_PATTERN.match(line.strip()) + if checkbox_match: + check, task_id, user_story, description = checkbox_match.groups() + parallel = "[P]" in line + + file_path = None + path_match = re.search(r"(\S+\.\w+)$", description) + if path_match: + file_path = path_match.group(1) + + tasks[task_id] = SpecTaskDefinition( + description=description.strip(), + status="completed" if check.lower() == "x" else "pending", + parallel=parallel, + user_story=user_story, + phase=current_phase, + file_path=file_path, + ) + + # Track which tasks belong to which phase + if current_phase is not None: + phase_tasks[current_phase].append(task_id) + + # Set phase-based dependencies: tasks in Phase N depend on ALL tasks in Phase N-1 + for i, phase in enumerate(phases): + if i > 0: + prev_phase = phases[i - 1] + prev_phase_task_ids = tuple(phase_tasks[prev_phase]) + for task_id in phase_tasks[phase]: + # Replace task with updated dependencies + old_task = tasks[task_id] + tasks[task_id] = SpecTaskDefinition( + description=old_task.description, + status=old_task.status, + parallel=old_task.parallel, + user_story=old_task.user_story, + phase=old_task.phase, + file_path=old_task.file_path, + dependencies=prev_phase_task_ids, + ) + + return SpecTaskList(tasks=tasks, phases=tuple(phases)) diff --git a/src/hyh/plugin/commands/help.md b/src/hyh/plugin/commands/help.md new file mode 100644 index 0000000..43967c6 --- /dev/null +++ b/src/hyh/plugin/commands/help.md @@ -0,0 +1,35 @@ +--- +description: Show hyh commands and current workflow state +--- + +# hyh Help + +Display available commands and current state: + +1. Run: `hyh workflow status` +2. Show this help: + +## Commands + +| Command | Description | +|---------|-------------| +| `/hyh specify ` | Start new feature - creates worktree, generates spec | +| `/hyh plan` | Generate design artifacts and tasks from spec | +| `/hyh implement` | Execute tasks with daemon coordination | +| `/hyh status` | Show current workflow phase and progress | + +## Workflow + +```text +specify → plan → implement → merge + ↓ ↓ ↓ +spec.md tasks.md working code +``` + +## Worktree Commands + +| Command | Description | +|---------|-------------| +| `hyh worktree create ` | Create new feature worktree | +| `hyh worktree list` | List all feature worktrees | +| `hyh worktree switch ` | Show path to switch to worktree | diff --git a/src/hyh/plugin/commands/hyh.md b/src/hyh/plugin/commands/hyh.md new file mode 100644 index 0000000..0530507 --- /dev/null +++ b/src/hyh/plugin/commands/hyh.md @@ -0,0 +1,56 @@ +--- +description: Spec-driven development workflow - specify, plan, implement +argument-hint: [specify|plan|implement|status] [args] +allowed-tools: Bash(hyh:*), Bash(git:*), Read, Write, Edit, Glob, Grep +--- + +# hyh - Spec-Driven Development + +Route based on $ARGUMENTS: + +## If $ARGUMENTS starts with "specify" + +Extract the feature description after "specify". Then: + +1. Generate a slug from the description (2-4 words, kebab-case) +2. Get next feature number: `hyh workflow status --json` and increment +3. Create worktree: `hyh worktree create {N}-{slug}` +4. Load spec template and fill with user's description +5. Ask up to 5 clarifying questions (one at a time) for [NEEDS CLARIFICATION] markers +6. Write finalized spec to `specs/spec.md` +7. Report: "Spec complete. Run `/hyh plan` to continue." + +## If $ARGUMENTS starts with "plan" + +1. Verify `specs/spec.md` exists +2. Load spec and constitution (if `.hyh/constitution.md` exists) +3. Generate `specs/research.md` (resolve technical unknowns) +4. Generate `specs/plan.md` (architecture, tech stack) +5. Generate `specs/data-model.md` if entities involved +6. Generate `specs/tasks.md` in speckit checkbox format +7. Generate `specs/checklists/requirements.md` +8. Run consistency analysis +9. Import tasks: `hyh plan import --file specs/tasks.md` +10. Report: "Plan complete. Run `/hyh implement` to continue." + +## If $ARGUMENTS starts with "implement" + +1. Run: `hyh workflow status` to verify tasks exist +2. Check checklists pass (or ask to proceed) +3. Loop: + a. `hyh task claim` → get next task + b. If no task: done + c. Execute task per instructions + d. `hyh task complete --id {id}` + e. Update specs/tasks.md with [x] +4. Report completion + +## If $ARGUMENTS is empty or "status" + +Run: `hyh workflow status` + +Based on result, suggest next action: +- No spec? → "Start with: /hyh specify " +- Has spec, no plan? → "Continue with: /hyh plan" +- Has tasks? → "Continue with: /hyh implement" +- All complete? → "All done! Ready to merge." diff --git a/src/hyh/plugin/hooks/hooks.json b/src/hyh/plugin/hooks/hooks.json new file mode 100644 index 0000000..767ca28 --- /dev/null +++ b/src/hyh/plugin/hooks/hooks.json @@ -0,0 +1,15 @@ +{ + "hooks": { + "SessionStart": [ + { + "matcher": "", + "hooks": [ + { + "type": "command", + "command": "hyh workflow status --quiet" + } + ] + } + ] + } +} diff --git a/src/hyh/plugin/plugin.json b/src/hyh/plugin/plugin.json new file mode 100644 index 0000000..3b7efb6 --- /dev/null +++ b/src/hyh/plugin/plugin.json @@ -0,0 +1,14 @@ +{ + "name": "hyh", + "description": "Hold Your Horses - spec-driven development workflow", + "version": "0.2.0", + "author": { + "name": "Pedro Proenca", + "email": "pedro@10xengs.com" + }, + "repository": "https://github.com/pproenca/hyh", + "license": "MIT", + "commands": ["./commands/"], + "skills": ["./skills/"], + "hooks": "./hooks/hooks.json" +} diff --git a/src/hyh/plugin/skills/spec-driven-dev.md b/src/hyh/plugin/skills/spec-driven-dev.md new file mode 100644 index 0000000..8bf547e --- /dev/null +++ b/src/hyh/plugin/skills/spec-driven-dev.md @@ -0,0 +1,36 @@ +--- +name: spec-driven-development +description: Use when implementing features - follow the specify → plan → implement workflow +--- + +# Spec-Driven Development + +When implementing any non-trivial feature, use the hyh workflow: + +## 1. Specify First + +Before writing code, create a specification: +- Run `/hyh specify ` +- Answer clarifying questions +- Review the generated spec.md + +## 2. Plan Before Implementing + +Generate design artifacts: +- Run `/hyh plan` +- Review tasks.md for the work breakdown +- Check checklists pass + +## 3. Implement with Tracking + +Execute tasks systematically: +- Run `/hyh implement` +- Tasks are tracked via daemon +- Progress is visible with `/hyh status` + +## Why This Matters + +- Specs catch misunderstandings early +- Plans break work into manageable pieces +- Tracking ensures nothing is forgotten +- Worktrees keep main branch clean diff --git a/src/hyh/templates/checklist-template.md b/src/hyh/templates/checklist-template.md new file mode 100644 index 0000000..0caeacf --- /dev/null +++ b/src/hyh/templates/checklist-template.md @@ -0,0 +1,40 @@ +# [CHECKLIST TYPE] Checklist: [FEATURE NAME] + +**Purpose**: [Brief description of what this checklist covers] +**Created**: [DATE] +**Feature**: [Link to spec.md or relevant documentation] + +**Note**: This checklist is generated by the `/speckit.checklist` command based on feature context and requirements. + + + +## [Category 1] + +- [ ] CHK001 First checklist item with clear action +- [ ] CHK002 Second checklist item +- [ ] CHK003 Third checklist item + +## [Category 2] + +- [ ] CHK004 Another category item +- [ ] CHK005 Item with specific criteria +- [ ] CHK006 Final item in this category + +## Notes + +- Check items off as completed: `[x]` +- Add comments or findings inline +- Link to relevant resources or documentation +- Items are numbered sequentially for easy reference diff --git a/src/hyh/templates/plan-template.md b/src/hyh/templates/plan-template.md new file mode 100644 index 0000000..a86b481 --- /dev/null +++ b/src/hyh/templates/plan-template.md @@ -0,0 +1,134 @@ +# Implementation Plan: [Feature] + +**Branch**: `[###-feature-name]` | **Date**: [Date] | **Specification**: [Link] +**Input**: Feature specification from `/specs/[###-feature-name]/spec.md` + +**Note**: This template is filled in by the `/speckit.plan` command. See `.specify/templates/commands/plan.md` for execution workflow. + +## Overview + +[Extract from feature specification: Key requirements + technical approach from research] + +## Technical Context + + + +**Language/Version**: [e.g., Python 3.11, Swift 5.9, Rust 1.75 or NEEDS CLARIFICATION] +**Primary Dependencies**: [e.g., FastAPI, UIKit, LLVM or NEEDS CLARIFICATION] +**Storage**: [If applicable, e.g., PostgreSQL, CoreData, File or N/A] +**Testing**: [e.g., pytest, XCTest, cargo test or NEEDS CLARIFICATION] +**Target Platform**: [e.g., Linux Server, iOS 15+, WASM or NEEDS CLARIFICATION] +**Project Type**: [single/web/mobile - determines source structure] +**Performance Goals**: [Domain-specific, e.g., 1000 req/s, 10k lines/sec, 60 fps or NEEDS CLARIFICATION] +**Constraints**: [Domain-specific, e.g., \<200ms p95, \<100MB memory, offline support or NEEDS CLARIFICATION] +**Scale/Scope**: [Domain-specific, e.g., 10k users, 1M LOC, 50 screens or NEEDS CLARIFICATION] + +## Constitution Check + +*Gate: Must pass before Phase 0 research. Re-verify after Phase 1 design.* + +**Reference**: Verify the following based on the 5 principles in `.specify/memory/constitution.md` + +### I. Code Quality Principle + +- [ ] Are readability and documentation requirements met? +- [ ] Are naming conventions clearly defined? +- [ ] Is code complexity within reasonable bounds? + +### II. Test-Driven Development + +- [ ] Is a test-first development process planned? +- [ ] Is there a plan for contract tests, integration tests, and unit tests? +- [ ] Is a test coverage target (80% or more) set? + +### III. UX Consistency + +- [ ] Are consistent UI patterns defined? +- [ ] Is error message clarity ensured? +- [ ] Is accessibility considered? + +### IV. Performance Standards + +- [ ] Are API response time targets (p95 < 200ms) considered? +- [ ] Is database optimization planned? +- [ ] Are frontend load time targets set (if applicable)? + +### V. Maintainability and Extensibility + +- [ ] Is modular, loosely-coupled design adopted? +- [ ] Is the configuration management policy clear? +- [ ] Is a versioning strategy defined? + +**Violation Justification**: Record in the "Complexity Tracking" table in this section + +## Project Structure + +### Documentation (for this feature) + +```text +specs/[###-feature]/ +├── plan.md # This file (output of /speckit.plan command) +├── research.md # Phase 0 output (/speckit.plan command) +├── data-model.md # Phase 1 output (/speckit.plan command) +├── quickstart.md # Phase 1 output (/speckit.plan command) +├── contracts/ # Phase 1 output (/speckit.plan command) +└── tasks.md # Phase 2 output (/speckit.tasks command - not created by /speckit.plan) +``` + +### Source Code (repository root) + + + +```text +# [Delete if unused] Option 1: Single Project (default) +src/ +├── models/ +├── services/ +├── cli/ +└── lib/ + +tests/ +├── contract/ +├── integration/ +└── unit/ + +# [Delete if unused] Option 2: Web Application (when "frontend" + "backend" detected) +backend/ +├── src/ +│ ├── models/ +│ ├── services/ +│ └── api/ +└── tests/ + +frontend/ +├── src/ +│ ├── components/ +│ ├── pages/ +│ └── services/ +└── tests/ + +# [Delete if unused] Option 3: Mobile + API (when "iOS/Android" detected) +api/ +└── [same as backend above] + +ios/ or android/ +└── [platform-specific structure: feature modules, UI flows, platform tests] +``` + +**Structure Decision**: [Document chosen structure and reference actual directories captured above] + +## Complexity Tracking + +> **Only fill in if there are violations requiring justification in the Constitution Check** + +| Violation | Reason Needed | Why Simpler Alternative Was Rejected | +| -------------------------- | ------------------ | -------------------------------------- | +| [e.g., 4th project] | [Current need] | [Why 3 projects are insufficient] | +| [e.g., Repository pattern] | [Specific problem] | [Why direct DB access is insufficient] | diff --git a/src/hyh/templates/spec-template.md b/src/hyh/templates/spec-template.md new file mode 100644 index 0000000..f30c2f0 --- /dev/null +++ b/src/hyh/templates/spec-template.md @@ -0,0 +1,115 @@ +# Feature Specification: [Feature Name] + +**Feature Branch**: `[###-feature-name]` +**Created**: [Date] +**Status**: Draft +**Input**: User description: "$ARGUMENTS" + +## User Scenarios and Tests *(Required)* + + + +### User Story 1 - [Concise Title] (Priority: P1) + +[Describe this user journey in plain language] + +**Reason for this priority**: [Explain the value and why it has this priority level] + +**Independent testing**: \[Explain how this can be tested independently - e.g., "Can be fully tested by [specific action] and provides [specific value]"\] + +**Acceptance Scenarios**: + +1. **Given** [initial state], **When** [action], **Then** [expected result] +1. **Given** [initial state], **When** [action], **Then** [expected result] + +______________________________________________________________________ + +### User Story 2 - [Concise Title] (Priority: P2) + +[Describe this user journey in plain language] + +**Reason for this priority**: [Explain the value and why it has this priority level] + +**Independent testing**: [Explain how this can be tested independently] + +**Acceptance Scenarios**: + +1. **Given** [initial state], **When** [action], **Then** [expected result] + +______________________________________________________________________ + +### User Story 3 - [Concise Title] (Priority: P3) + +[Describe this user journey in plain language] + +**Reason for this priority**: [Explain the value and why it has this priority level] + +**Independent testing**: [Explain how this can be tested independently] + +**Acceptance Scenarios**: + +1. **Given** [initial state], **When** [action], **Then** [expected result] + +______________________________________________________________________ + +[Add additional user stories as needed, each with an assigned priority] + +### Edge Cases + + + +- What happens when [boundary condition]? +- How does the system handle [error scenario]? + +## Requirements *(Required)* + + + +### Functional Requirements + +- **FR-001**: The system must [specific function, e.g., "allow users to create accounts"] +- **FR-002**: The system must [specific function, e.g., "validate email addresses"] +- **FR-003**: Users must be able to [important interaction, e.g., "reset their passwords"] +- **FR-004**: The system must [data requirement, e.g., "persist user settings"] +- **FR-005**: The system must [behavior, e.g., "log all security events"] + +*Example of marking unclear requirements:* + +- **FR-006**: The system must authenticate users via [NEEDS CLARIFICATION: Authentication method not specified - email/password, SSO, OAuth?] +- **FR-007**: The system must retain user data for [NEEDS CLARIFICATION: Retention period not specified] + +### Key Entities *(Include if the feature handles data)* + +- **[Entity 1]**: [What it represents, key attributes without implementation details] +- **[Entity 2]**: [What it represents, relationships with other entities] + +## Success Criteria *(Required)* + + + +### Measurable Outcomes + +- **SC-001**: [Measurable metric, e.g., "Users can complete account creation within 2 minutes"] +- **SC-002**: [Measurable metric, e.g., "System can handle 1000 concurrent users without performance degradation"] +- **SC-003**: [User satisfaction metric, e.g., "90% of users successfully complete the main task on first attempt"] +- **SC-004**: \[Business metric, e.g., "Reduce support tickets related to [X] by 50%"\] diff --git a/src/hyh/templates/tasks-template.md b/src/hyh/templates/tasks-template.md new file mode 100644 index 0000000..1076096 --- /dev/null +++ b/src/hyh/templates/tasks-template.md @@ -0,0 +1,250 @@ +______________________________________________________________________ + +## description: "Task list template for feature implementation" + +# Tasks: [FEATURE NAME] + +**Input**: Design documents from `/specs/[###-feature-name]/` +**Prerequisites**: plan.md (required), spec.md (required for user stories), research.md, data-model.md, contracts/ + +**Tests**: The examples below include test tasks. Tests are OPTIONAL - only include them if explicitly requested in the feature specification. + +**Organization**: Tasks are grouped by user story to enable independent implementation and testing of each story. + +## Format: `[ID] [P?] [Story] Description` + +- **[P]**: Can run in parallel (different files, no dependencies) +- **[Story]**: Which user story this task belongs to (e.g., US1, US2, US3) +- Include exact file paths in descriptions + +## Path Conventions + +- **Single project**: `src/`, `tests/` at repository root +- **Web app**: `backend/src/`, `frontend/src/` +- **Mobile**: `api/src/`, `ios/src/` or `android/src/` +- Paths shown below assume single project - adjust based on plan.md structure + + + +## Phase 1: Setup (Shared Infrastructure) + +**Purpose**: Project initialization and basic structure + +- [ ] T001 Create project structure per implementation plan +- [ ] T002 Initialize [language] project with [framework] dependencies +- [ ] T003 [P] Configure linting and formatting tools + +______________________________________________________________________ + +## Phase 2: Foundational (Blocking Prerequisites) + +**Purpose**: Core infrastructure that MUST be complete before ANY user story can be implemented + +**⚠️ CRITICAL**: No user story work can begin until this phase is complete + +Examples of foundational tasks (adjust based on your project): + +- [ ] T004 Setup database schema and migrations framework +- [ ] T005 [P] Implement authentication/authorization framework +- [ ] T006 [P] Setup API routing and middleware structure +- [ ] T007 Create base models/entities that all stories depend on +- [ ] T008 Configure error handling and logging infrastructure +- [ ] T009 Setup environment configuration management + +**Checkpoint**: Foundation ready - user story implementation can now begin in parallel + +______________________________________________________________________ + +## Phase 3: User Story 1 - [Title] (Priority: P1) 🎯 MVP + +**Goal**: [Brief description of what this story delivers] + +**Independent Test**: [How to verify this story works on its own] + +### Tests for User Story 1 (OPTIONAL - only if tests requested) ⚠️ + +> **NOTE: Write these tests FIRST, ensure they FAIL before implementation** + +- [ ] T010 [P] [US1] Contract test for [endpoint] in tests/contract/test\_[name].py +- [ ] T011 [P] [US1] Integration test for [user journey] in tests/integration/test\_[name].py + +### Implementation for User Story 1 + +- [ ] T012 [P] [US1] Create [Entity1] model in src/models/[entity1].py +- [ ] T013 [P] [US1] Create [Entity2] model in src/models/[entity2].py +- [ ] T014 [US1] Implement [Service] in src/services/[service].py (depends on T012, T013) +- [ ] T015 [US1] Implement [endpoint/feature] in src/[location]/[file].py +- [ ] T016 [US1] Add validation and error handling +- [ ] T017 [US1] Add logging for user story 1 operations + +**Checkpoint**: At this point, User Story 1 should be fully functional and testable independently + +______________________________________________________________________ + +## Phase 4: User Story 2 - [Title] (Priority: P2) + +**Goal**: [Brief description of what this story delivers] + +**Independent Test**: [How to verify this story works on its own] + +### Tests for User Story 2 (OPTIONAL - only if tests requested) ⚠️ + +- [ ] T018 [P] [US2] Contract test for [endpoint] in tests/contract/test\_[name].py +- [ ] T019 [P] [US2] Integration test for [user journey] in tests/integration/test\_[name].py + +### Implementation for User Story 2 + +- [ ] T020 [P] [US2] Create [Entity] model in src/models/[entity].py +- [ ] T021 [US2] Implement [Service] in src/services/[service].py +- [ ] T022 [US2] Implement [endpoint/feature] in src/[location]/[file].py +- [ ] T023 [US2] Integrate with User Story 1 components (if needed) + +**Checkpoint**: At this point, User Stories 1 AND 2 should both work independently + +______________________________________________________________________ + +## Phase 5: User Story 3 - [Title] (Priority: P3) + +**Goal**: [Brief description of what this story delivers] + +**Independent Test**: [How to verify this story works on its own] + +### Tests for User Story 3 (OPTIONAL - only if tests requested) ⚠️ + +- [ ] T024 [P] [US3] Contract test for [endpoint] in tests/contract/test\_[name].py +- [ ] T025 [P] [US3] Integration test for [user journey] in tests/integration/test\_[name].py + +### Implementation for User Story 3 + +- [ ] T026 [P] [US3] Create [Entity] model in src/models/[entity].py +- [ ] T027 [US3] Implement [Service] in src/services/[service].py +- [ ] T028 [US3] Implement [endpoint/feature] in src/[location]/[file].py + +**Checkpoint**: All user stories should now be independently functional + +______________________________________________________________________ + +[Add more user story phases as needed, following the same pattern] + +______________________________________________________________________ + +## Phase N: Polish & Cross-Cutting Concerns + +**Purpose**: Improvements that affect multiple user stories + +- [ ] TXXX [P] Documentation updates in docs/ +- [ ] TXXX Code cleanup and refactoring +- [ ] TXXX Performance optimization across all stories +- [ ] TXXX [P] Additional unit tests (if requested) in tests/unit/ +- [ ] TXXX Security hardening +- [ ] TXXX Run quickstart.md validation + +______________________________________________________________________ + +## Dependencies & Execution Order + +### Phase Dependencies + +- **Setup (Phase 1)**: No dependencies - can start immediately +- **Foundational (Phase 2)**: Depends on Setup completion - BLOCKS all user stories +- **User Stories (Phase 3+)**: All depend on Foundational phase completion + - User stories can then proceed in parallel (if staffed) + - Or sequentially in priority order (P1 → P2 → P3) +- **Polish (Final Phase)**: Depends on all desired user stories being complete + +### User Story Dependencies + +- **User Story 1 (P1)**: Can start after Foundational (Phase 2) - No dependencies on other stories +- **User Story 2 (P2)**: Can start after Foundational (Phase 2) - May integrate with US1 but should be independently testable +- **User Story 3 (P3)**: Can start after Foundational (Phase 2) - May integrate with US1/US2 but should be independently testable + +### Within Each User Story + +- Tests (if included) MUST be written and FAIL before implementation +- Models before services +- Services before endpoints +- Core implementation before integration +- Story complete before moving to next priority + +### Parallel Opportunities + +- All Setup tasks marked [P] can run in parallel +- All Foundational tasks marked [P] can run in parallel (within Phase 2) +- Once Foundational phase completes, all user stories can start in parallel (if team capacity allows) +- All tests for a user story marked [P] can run in parallel +- Models within a story marked [P] can run in parallel +- Different user stories can be worked on in parallel by different team members + +______________________________________________________________________ + +## Parallel Example: User Story 1 + +```bash +# Launch all tests for User Story 1 together (if tests requested): +Task: "Contract test for [endpoint] in tests/contract/test_[name].py" +Task: "Integration test for [user journey] in tests/integration/test_[name].py" + +# Launch all models for User Story 1 together: +Task: "Create [Entity1] model in src/models/[entity1].py" +Task: "Create [Entity2] model in src/models/[entity2].py" +``` + +______________________________________________________________________ + +## Implementation Strategy + +### MVP First (User Story 1 Only) + +1. Complete Phase 1: Setup +1. Complete Phase 2: Foundational (CRITICAL - blocks all stories) +1. Complete Phase 3: User Story 1 +1. **STOP and VALIDATE**: Test User Story 1 independently +1. Deploy/demo if ready + +### Incremental Delivery + +1. Complete Setup + Foundational → Foundation ready +1. Add User Story 1 → Test independently → Deploy/Demo (MVP!) +1. Add User Story 2 → Test independently → Deploy/Demo +1. Add User Story 3 → Test independently → Deploy/Demo +1. Each story adds value without breaking previous stories + +### Parallel Team Strategy + +With multiple developers: + +1. Team completes Setup + Foundational together +1. Once Foundational is done: + - Developer A: User Story 1 + - Developer B: User Story 2 + - Developer C: User Story 3 +1. Stories complete and integrate independently + +______________________________________________________________________ + +## Notes + +- [P] tasks = different files, no dependencies +- [Story] label maps task to specific user story for traceability +- Each user story should be independently completable and testable +- Verify tests fail before implementing +- Commit after each task or logical group +- Stop at any checkpoint to validate story independently +- Avoid: vague tasks, same file conflicts, cross-story dependencies that break independence diff --git a/src/hyh/workflow.py b/src/hyh/workflow.py new file mode 100644 index 0000000..144b617 --- /dev/null +++ b/src/hyh/workflow.py @@ -0,0 +1,84 @@ +"""Workflow state detection and management.""" + +from pathlib import Path + +from msgspec import Struct + +from .plan import parse_speckit_tasks + + +class WorkflowPhase(Struct, frozen=True, forbid_unknown_fields=True): + """Current workflow phase and suggested next action.""" + + phase: str # "none", "specify", "plan", "implement", "complete" + next_action: str | None # "specify", "plan", "implement", None + spec_exists: bool = False + plan_exists: bool = False + tasks_total: int = 0 + tasks_complete: int = 0 + + +def detect_phase(worktree: Path) -> WorkflowPhase: + """Detect current workflow phase based on artifacts. + + Args: + worktree: Path to worktree root. + + Returns: + WorkflowPhase with current state and suggested action. + """ + worktree = Path(worktree) + specs_dir = worktree / "specs" + + spec_path = specs_dir / "spec.md" + plan_path = specs_dir / "plan.md" + tasks_path = specs_dir / "tasks.md" + + spec_exists = spec_path.exists() + plan_exists = plan_path.exists() + tasks_total = 0 + tasks_complete = 0 + + # No spec = nothing started + if not spec_exists: + return WorkflowPhase( + phase="none", + next_action="specify", + spec_exists=False, + plan_exists=False, + ) + + # Has spec but no plan + if not plan_exists: + return WorkflowPhase( + phase="specify", + next_action="plan", + spec_exists=True, + plan_exists=False, + ) + + # Has plan, check tasks + if tasks_path.exists(): + content = tasks_path.read_text() + task_list = parse_speckit_tasks(content) + tasks_total = len(task_list.tasks) + tasks_complete = sum(1 for t in task_list.tasks.values() if t.status == "completed") + + if tasks_complete >= tasks_total and tasks_total > 0: + return WorkflowPhase( + phase="complete", + next_action=None, + spec_exists=True, + plan_exists=True, + tasks_total=tasks_total, + tasks_complete=tasks_complete, + ) + + return WorkflowPhase( + phase="plan", + next_action="implement", + spec_exists=True, + plan_exists=True, + tasks_total=tasks_total, + tasks_complete=tasks_complete, + ) diff --git a/src/hyh/worktree.py b/src/hyh/worktree.py new file mode 100644 index 0000000..6e8d7e9 --- /dev/null +++ b/src/hyh/worktree.py @@ -0,0 +1,115 @@ +"""Git worktree management (DHH-style). + +Pattern: ../project--branch as sibling directories. +See: https://gist.github.com/dhh/18575558fc5ee10f15b6cd3e108ed844 +""" + +import subprocess +from pathlib import Path + +from msgspec import Struct + + +class WorktreeResult(Struct, frozen=True, forbid_unknown_fields=True): + """Result of worktree creation.""" + + worktree_path: Path + branch_name: str + main_repo: Path + + +def create_worktree(main_repo: Path, branch_name: str) -> WorktreeResult: + """Create a worktree with DHH-style naming. + + Creates: ../{repo_name}--{branch_name}/ + Branch: {branch_name} + + Args: + main_repo: Path to the main repository. + branch_name: Name for both branch and worktree suffix. + + Returns: + WorktreeResult with paths. + + Raises: + subprocess.CalledProcessError: If git commands fail. + """ + main_repo = Path(main_repo).resolve() + repo_name = main_repo.name + worktree_path = main_repo.parent / f"{repo_name}--{branch_name}" + + subprocess.run( + ["git", "worktree", "add", "-b", branch_name, str(worktree_path)], + cwd=main_repo, + capture_output=True, + check=True, + ) + + return WorktreeResult( + worktree_path=worktree_path, + branch_name=branch_name, + main_repo=main_repo, + ) + + +def list_worktrees(main_repo: Path) -> list[WorktreeResult]: + """List all DHH-style worktrees for a repository. + + Args: + main_repo: Path to the main repository. + + Returns: + List of WorktreeResult for each worktree. + """ + main_repo = Path(main_repo).resolve() + repo_name = main_repo.name + prefix = f"{repo_name}--" + + result = subprocess.run( + ["git", "worktree", "list", "--porcelain"], + cwd=main_repo, + capture_output=True, + text=True, + check=True, + ) + + worktrees: list[WorktreeResult] = [] + current_path: Path | None = None + current_branch: str | None = None + + for line in result.stdout.split("\n"): + if line.startswith("worktree "): + current_path = Path(line.split(" ", 1)[1]) + elif line.startswith("branch refs/heads/"): + current_branch = line.replace("branch refs/heads/", "") + elif line == "" and current_path and current_branch: + # Filter to DHH-style worktrees only + if current_path.name.startswith(prefix): + worktrees.append( + WorktreeResult( + worktree_path=current_path, + branch_name=current_branch, + main_repo=main_repo, + ) + ) + current_path = None + current_branch = None + + return worktrees + + +def get_worktree(main_repo: Path, branch_name: str) -> WorktreeResult | None: + """Get worktree for a specific branch. + + Args: + main_repo: Path to the main repository. + branch_name: Branch name to find. + + Returns: + WorktreeResult if found, None otherwise. + """ + worktrees = list_worktrees(main_repo) + for wt in worktrees: + if wt.branch_name == branch_name: + return wt + return None diff --git a/test-prompt/.specify/memory/constitution.md b/test-prompt/.specify/memory/constitution.md new file mode 100644 index 0000000..d1ad2f8 --- /dev/null +++ b/test-prompt/.specify/memory/constitution.md @@ -0,0 +1,173 @@ + + +# CC-WF-Studio Constitution + +## Core Principles + +### I. Code Quality Principle + +**Required Standards**: + +- All code must prioritize readability and be self-documenting +- Variable names, function names, and class names must use clear Japanese or English that expresses their purpose +- Magic numbers are prohibited; they must be defined as constants +- Code complexity must be kept within reasonable bounds, with refactoring performed as needed +- All public APIs and functions must have appropriate documentation comments + +**Rationale**: Highly readable code reduces maintenance costs and improves overall team productivity. Clear naming conventions and documentation also reduce onboarding time for new members. + +### II. Test-Driven Development (Required) + +**Required Standards**: + +- All new features must be developed test-first (Red-Green-Refactor cycle) +- Test coverage must be maintained at minimum 80%, with 100% targeted for critical business logic +- The following 3 types of tests must be appropriately implemented: + - **Contract tests**: Public interface specifications for APIs and libraries + - **Integration tests**: Coordination behavior between multiple components + - **Unit tests**: Behavior of individual functions and methods +- Tests must be independently executable and must not depend on execution order +- When tests fail, implementation work must stop and root cause investigation and fixes must be prioritized + +**Rationale**: Test-driven development achieves requirement clarification, design quality improvement, and regression prevention. Tests function as a safety net for development, enabling confident refactoring and deployment. + +### III. UX Consistency + +**Required Standards**: + +- All user interfaces must follow consistent design patterns +- Error messages must be clear and indicate actionable steps (what happened, why it happened, how to resolve it) +- Loading states, success states, and error states must be explicitly displayed +- Accessibility must be considered including keyboard operation and screen readers +- Continuous improvements based on user feedback must be implemented +- Consistent input/output formats must be adopted across CLI, API, and GUI + +**Rationale**: Consistent UX flattens the learning curve and improves user productivity and satisfaction. Ensuring accessibility allows us to provide value to more users. + +### IV. Performance Standards + +**Required Standards**: + +- All API response times must target 200ms or less at the 95th percentile (p95) +- Database queries must use appropriate indexes and avoid N+1 problems +- Memory leaks must be prevented, with stable memory usage during long-running execution +- Performance-critical processes must undergo measurement and profiling +- Pagination, streaming, and batch processing must be applied when handling large data volumes +- Frontend must target initial load time within 3 seconds and time to interactive within 5 seconds + +**Rationale**: Performance directly impacts user experience. Response delays increase user churn rates and harm business value. Early measurement and optimization ensure scalability. + +### V. Maintainability and Extensibility + +**Required Standards**: + +- All features must be designed as independent libraries/modules +- Dependencies must be minimized to maintain loosely coupled architecture +- Configuration values must be separated from code and managed via environment variables or configuration files +- Logs must be output in structured format (JSON, etc.) to facilitate debugging and monitoring +- Versioning must follow semantic versioning (MAJOR.MINOR.PATCH) +- Breaking changes must include migration guides and deprecation periods + +**Rationale**: Highly maintainable code reduces long-term development costs. Modularization enables feature reuse and parallel development, improving team productivity. + +## Development Workflow + +### Feature Development Process + +1. **Specification**: Clearly define user stories and acceptance criteria (spec.md) +1. **Design**: Document technical approach and architecture (plan.md) +1. **Test Creation**: Implement acceptance criteria as test code +1. **Implementation**: Implement features until tests pass +1. **Review**: Conduct code review and quality checks +1. **Deployment**: Perform staged release and monitoring + +### Branch Strategy + +- Main branch: `master` or `main` (always maintain deployable state) +- Feature branches: `###-feature-name` (number and descriptive name) +- Utilize feature flags to maintain safe state even when merging incomplete features + +## Code Review Process + +### Required Review Items + +All pull requests must be reviewed from the following perspectives: + +1. **Constitution Compliance Check**: Does it comply with all 5 principles? +1. **Test Sufficiency**: Is there appropriate test coverage? +1. **Security**: Are there any vulnerabilities or security risks? +1. **Performance Impact**: Is there any performance degradation? +1. **Documentation**: Has necessary documentation been updated? + +### Justification of Complexity + +When introducing complexity that violates the constitution (e.g., multiple projects, complex abstractions): + +- Clear explanation of necessity +- Reasons why simpler alternatives were considered and rejected +- Record in the "Complexity Tracking" section of the implementation plan (plan.md) + +## Governance + +### Position of the Constitution + +This constitution takes precedence over all development practices. All members participating in the project are responsible for understanding and complying with this constitution. + +### Amendment Process + +Constitution amendments must follow these steps: + +1. Document the amendment proposal (background, reason, scope of impact) +1. Team review and discussion +1. Consensus building (major changes require unanimous approval) +1. Develop transition plan (if there is impact on existing code) +1. Update version number and create release notes + +### Compliance Review + +- Verify constitution compliance on all pull requests +- Review constitution compliance status of existing code quarterly +- When violations are discovered, develop improvement plans and address them as priority + +### Versioning Policy + +- **MAJOR**: Removal or redefinition of principles without backward compatibility +- **MINOR**: Addition of new principles or significant expansion of existing principles +- **PATCH**: Non-essential changes such as clarification, wording improvements, typo corrections + +**Version**: 1.0.0 | **Ratified**: 2025-11-01 | **Last Amended**: 2025-11-01 diff --git a/test-prompt/.specify/scripts/bash/check-prerequisites.sh b/test-prompt/.specify/scripts/bash/check-prerequisites.sh new file mode 100755 index 0000000..54f32ec --- /dev/null +++ b/test-prompt/.specify/scripts/bash/check-prerequisites.sh @@ -0,0 +1,166 @@ +#!/usr/bin/env bash + +# Consolidated prerequisite checking script +# +# This script provides unified prerequisite checking for Spec-Driven Development workflow. +# It replaces the functionality previously spread across multiple scripts. +# +# Usage: ./check-prerequisites.sh [OPTIONS] +# +# OPTIONS: +# --json Output in JSON format +# --require-tasks Require tasks.md to exist (for implementation phase) +# --include-tasks Include tasks.md in AVAILABLE_DOCS list +# --paths-only Only output path variables (no validation) +# --help, -h Show help message +# +# OUTPUTS: +# JSON mode: {"FEATURE_DIR":"...", "AVAILABLE_DOCS":["..."]} +# Text mode: FEATURE_DIR:... \n AVAILABLE_DOCS: \n ✓/✗ file.md +# Paths only: REPO_ROOT: ... \n BRANCH: ... \n FEATURE_DIR: ... etc. + +set -e + +# Parse command line arguments +JSON_MODE=false +REQUIRE_TASKS=false +INCLUDE_TASKS=false +PATHS_ONLY=false + +for arg in "$@"; do + case "$arg" in + --json) + JSON_MODE=true + ;; + --require-tasks) + REQUIRE_TASKS=true + ;; + --include-tasks) + INCLUDE_TASKS=true + ;; + --paths-only) + PATHS_ONLY=true + ;; + --help|-h) + cat << 'EOF' +Usage: check-prerequisites.sh [OPTIONS] + +Consolidated prerequisite checking for Spec-Driven Development workflow. + +OPTIONS: + --json Output in JSON format + --require-tasks Require tasks.md to exist (for implementation phase) + --include-tasks Include tasks.md in AVAILABLE_DOCS list + --paths-only Only output path variables (no prerequisite validation) + --help, -h Show this help message + +EXAMPLES: + # Check task prerequisites (plan.md required) + ./check-prerequisites.sh --json + + # Check implementation prerequisites (plan.md + tasks.md required) + ./check-prerequisites.sh --json --require-tasks --include-tasks + + # Get feature paths only (no validation) + ./check-prerequisites.sh --paths-only + +EOF + exit 0 + ;; + *) + echo "ERROR: Unknown option '$arg'. Use --help for usage information." >&2 + exit 1 + ;; + esac +done + +# Source common functions +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +source "$SCRIPT_DIR/common.sh" + +# Get feature paths and validate branch +eval $(get_feature_paths) +check_feature_branch "$CURRENT_BRANCH" "$HAS_GIT" || exit 1 + +# If paths-only mode, output paths and exit (support JSON + paths-only combined) +if $PATHS_ONLY; then + if $JSON_MODE; then + # Minimal JSON paths payload (no validation performed) + printf '{"REPO_ROOT":"%s","BRANCH":"%s","FEATURE_DIR":"%s","FEATURE_SPEC":"%s","IMPL_PLAN":"%s","TASKS":"%s"}\n' \ + "$REPO_ROOT" "$CURRENT_BRANCH" "$FEATURE_DIR" "$FEATURE_SPEC" "$IMPL_PLAN" "$TASKS" + else + echo "REPO_ROOT: $REPO_ROOT" + echo "BRANCH: $CURRENT_BRANCH" + echo "FEATURE_DIR: $FEATURE_DIR" + echo "FEATURE_SPEC: $FEATURE_SPEC" + echo "IMPL_PLAN: $IMPL_PLAN" + echo "TASKS: $TASKS" + fi + exit 0 +fi + +# Validate required directories and files +if [[ ! -d "$FEATURE_DIR" ]]; then + echo "ERROR: Feature directory not found: $FEATURE_DIR" >&2 + echo "Run /speckit.specify first to create the feature structure." >&2 + exit 1 +fi + +if [[ ! -f "$IMPL_PLAN" ]]; then + echo "ERROR: plan.md not found in $FEATURE_DIR" >&2 + echo "Run /speckit.plan first to create the implementation plan." >&2 + exit 1 +fi + +# Check for tasks.md if required +if $REQUIRE_TASKS && [[ ! -f "$TASKS" ]]; then + echo "ERROR: tasks.md not found in $FEATURE_DIR" >&2 + echo "Run /speckit.tasks first to create the task list." >&2 + exit 1 +fi + +# Build list of available documents +docs=() + +# Always check these optional docs +[[ -f "$RESEARCH" ]] && docs+=("research.md") +[[ -f "$DATA_MODEL" ]] && docs+=("data-model.md") + +# Check contracts directory (only if it exists and has files) +if [[ -d "$CONTRACTS_DIR" ]] && [[ -n "$(ls -A "$CONTRACTS_DIR" 2>/dev/null)" ]]; then + docs+=("contracts/") +fi + +[[ -f "$QUICKSTART" ]] && docs+=("quickstart.md") + +# Include tasks.md if requested and it exists +if $INCLUDE_TASKS && [[ -f "$TASKS" ]]; then + docs+=("tasks.md") +fi + +# Output results +if $JSON_MODE; then + # Build JSON array of documents + if [[ ${#docs[@]} -eq 0 ]]; then + json_docs="[]" + else + json_docs=$(printf '"%s",' "${docs[@]}") + json_docs="[${json_docs%,}]" + fi + + printf '{"FEATURE_DIR":"%s","AVAILABLE_DOCS":%s}\n' "$FEATURE_DIR" "$json_docs" +else + # Text output + echo "FEATURE_DIR:$FEATURE_DIR" + echo "AVAILABLE_DOCS:" + + # Show status of each potential document + check_file "$RESEARCH" "research.md" + check_file "$DATA_MODEL" "data-model.md" + check_dir "$CONTRACTS_DIR" "contracts/" + check_file "$QUICKSTART" "quickstart.md" + + if $INCLUDE_TASKS; then + check_file "$TASKS" "tasks.md" + fi +fi diff --git a/test-prompt/.specify/scripts/bash/common.sh b/test-prompt/.specify/scripts/bash/common.sh new file mode 100755 index 0000000..6931ecc --- /dev/null +++ b/test-prompt/.specify/scripts/bash/common.sh @@ -0,0 +1,156 @@ +#!/usr/bin/env bash +# Common functions and variables for all scripts + +# Get repository root, with fallback for non-git repositories +get_repo_root() { + if git rev-parse --show-toplevel >/dev/null 2>&1; then + git rev-parse --show-toplevel + else + # Fall back to script location for non-git repos + local script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + (cd "$script_dir/../../.." && pwd) + fi +} + +# Get current branch, with fallback for non-git repositories +get_current_branch() { + # First check if SPECIFY_FEATURE environment variable is set + if [[ -n "${SPECIFY_FEATURE:-}" ]]; then + echo "$SPECIFY_FEATURE" + return + fi + + # Then check git if available + if git rev-parse --abbrev-ref HEAD >/dev/null 2>&1; then + git rev-parse --abbrev-ref HEAD + return + fi + + # For non-git repos, try to find the latest feature directory + local repo_root=$(get_repo_root) + local specs_dir="$repo_root/specs" + + if [[ -d "$specs_dir" ]]; then + local latest_feature="" + local highest=0 + + for dir in "$specs_dir"/*; do + if [[ -d "$dir" ]]; then + local dirname=$(basename "$dir") + if [[ "$dirname" =~ ^([0-9]{3})- ]]; then + local number=${BASH_REMATCH[1]} + number=$((10#$number)) + if [[ "$number" -gt "$highest" ]]; then + highest=$number + latest_feature=$dirname + fi + fi + fi + done + + if [[ -n "$latest_feature" ]]; then + echo "$latest_feature" + return + fi + fi + + echo "main" # Final fallback +} + +# Check if we have git available +has_git() { + git rev-parse --show-toplevel >/dev/null 2>&1 +} + +check_feature_branch() { + local branch="$1" + local has_git_repo="$2" + + # For non-git repos, we can't enforce branch naming but still provide output + if [[ "$has_git_repo" != "true" ]]; then + echo "[specify] Warning: Git repository not detected; skipped branch validation" >&2 + return 0 + fi + + if [[ ! "$branch" =~ ^[0-9]{3}- ]]; then + echo "ERROR: Not on a feature branch. Current branch: $branch" >&2 + echo "Feature branches should be named like: 001-feature-name" >&2 + return 1 + fi + + return 0 +} + +get_feature_dir() { echo "$1/specs/$2"; } + +# Find feature directory by numeric prefix instead of exact branch match +# This allows multiple branches to work on the same spec (e.g., 004-fix-bug, 004-add-feature) +find_feature_dir_by_prefix() { + local repo_root="$1" + local branch_name="$2" + local specs_dir="$repo_root/specs" + + # Extract numeric prefix from branch (e.g., "004" from "004-whatever") + if [[ ! "$branch_name" =~ ^([0-9]{3})- ]]; then + # If branch doesn't have numeric prefix, fall back to exact match + echo "$specs_dir/$branch_name" + return + fi + + local prefix="${BASH_REMATCH[1]}" + + # Search for directories in specs/ that start with this prefix + local matches=() + if [[ -d "$specs_dir" ]]; then + for dir in "$specs_dir"/"$prefix"-*; do + if [[ -d "$dir" ]]; then + matches+=("$(basename "$dir")") + fi + done + fi + + # Handle results + if [[ ${#matches[@]} -eq 0 ]]; then + # No match found - return the branch name path (will fail later with clear error) + echo "$specs_dir/$branch_name" + elif [[ ${#matches[@]} -eq 1 ]]; then + # Exactly one match - perfect! + echo "$specs_dir/${matches[0]}" + else + # Multiple matches - this shouldn't happen with proper naming convention + echo "ERROR: Multiple spec directories found with prefix '$prefix': ${matches[*]}" >&2 + echo "Please ensure only one spec directory exists per numeric prefix." >&2 + echo "$specs_dir/$branch_name" # Return something to avoid breaking the script + fi +} + +get_feature_paths() { + local repo_root=$(get_repo_root) + local current_branch=$(get_current_branch) + local has_git_repo="false" + + if has_git; then + has_git_repo="true" + fi + + # Use prefix-based lookup to support multiple branches per spec + local feature_dir=$(find_feature_dir_by_prefix "$repo_root" "$current_branch") + + cat </dev/null) ]] && echo " ✓ $2" || echo " ✗ $2"; } + diff --git a/test-prompt/.specify/scripts/bash/create-new-feature.sh b/test-prompt/.specify/scripts/bash/create-new-feature.sh new file mode 100755 index 0000000..86d9ecf --- /dev/null +++ b/test-prompt/.specify/scripts/bash/create-new-feature.sh @@ -0,0 +1,260 @@ +#!/usr/bin/env bash + +set -e + +JSON_MODE=false +SHORT_NAME="" +BRANCH_NUMBER="" +ARGS=() +i=1 +while [ $i -le $# ]; do + arg="${!i}" + case "$arg" in + --json) + JSON_MODE=true + ;; + --short-name) + if [ $((i + 1)) -gt $# ]; then + echo 'Error: --short-name requires a value' >&2 + exit 1 + fi + i=$((i + 1)) + next_arg="${!i}" + # Check if the next argument is another option (starts with --) + if [[ "$next_arg" == --* ]]; then + echo 'Error: --short-name requires a value' >&2 + exit 1 + fi + SHORT_NAME="$next_arg" + ;; + --number) + if [ $((i + 1)) -gt $# ]; then + echo 'Error: --number requires a value' >&2 + exit 1 + fi + i=$((i + 1)) + next_arg="${!i}" + if [[ "$next_arg" == --* ]]; then + echo 'Error: --number requires a value' >&2 + exit 1 + fi + BRANCH_NUMBER="$next_arg" + ;; + --help|-h) + echo "Usage: $0 [--json] [--short-name ] [--number N] " + echo "" + echo "Options:" + echo " --json Output in JSON format" + echo " --short-name Provide a custom short name (2-4 words) for the branch" + echo " --number N Specify branch number manually (overrides auto-detection)" + echo " --help, -h Show this help message" + echo "" + echo "Examples:" + echo " $0 'Add user authentication system' --short-name 'user-auth'" + echo " $0 'Implement OAuth2 integration for API' --number 5" + exit 0 + ;; + *) + ARGS+=("$arg") + ;; + esac + i=$((i + 1)) +done + +FEATURE_DESCRIPTION="${ARGS[*]}" +if [ -z "$FEATURE_DESCRIPTION" ]; then + echo "Usage: $0 [--json] [--short-name ] [--number N] " >&2 + exit 1 +fi + +# Function to find the repository root by searching for existing project markers +find_repo_root() { + local dir="$1" + while [ "$dir" != "/" ]; do + if [ -d "$dir/.git" ] || [ -d "$dir/.specify" ]; then + echo "$dir" + return 0 + fi + dir="$(dirname "$dir")" + done + return 1 +} + +# Function to check existing branches (local and remote) and return next available number +check_existing_branches() { + local short_name="$1" + + # Fetch all remotes to get latest branch info (suppress errors if no remotes) + git fetch --all --prune 2>/dev/null || true + + # Find all branches matching the pattern using git ls-remote (more reliable) + local remote_branches=$(git ls-remote --heads origin 2>/dev/null | grep -E "refs/heads/[0-9]+-${short_name}$" | sed 's/.*\/\([0-9]*\)-.*/\1/' | sort -n) + + # Also check local branches + local local_branches=$(git branch 2>/dev/null | grep -E "^[* ]*[0-9]+-${short_name}$" | sed 's/^[* ]*//' | sed 's/-.*//' | sort -n) + + # Check specs directory as well + local spec_dirs="" + if [ -d "$SPECS_DIR" ]; then + spec_dirs=$(find "$SPECS_DIR" -maxdepth 1 -type d -name "[0-9]*-${short_name}" 2>/dev/null | xargs -n1 basename 2>/dev/null | sed 's/-.*//' | sort -n) + fi + + # Combine all sources and get the highest number + local max_num=0 + for num in $remote_branches $local_branches $spec_dirs; do + if [ "$num" -gt "$max_num" ]; then + max_num=$num + fi + done + + # Return next number + echo $((max_num + 1)) +} + +# Resolve repository root. Prefer git information when available, but fall back +# to searching for repository markers so the workflow still functions in repositories that +# were initialised with --no-git. +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +if git rev-parse --show-toplevel >/dev/null 2>&1; then + REPO_ROOT=$(git rev-parse --show-toplevel) + HAS_GIT=true +else + REPO_ROOT="$(find_repo_root "$SCRIPT_DIR")" + if [ -z "$REPO_ROOT" ]; then + echo "Error: Could not determine repository root. Please run this script from within the repository." >&2 + exit 1 + fi + HAS_GIT=false +fi + +cd "$REPO_ROOT" + +SPECS_DIR="$REPO_ROOT/specs" +mkdir -p "$SPECS_DIR" + +# Function to generate branch name with stop word filtering and length filtering +generate_branch_name() { + local description="$1" + + # Common stop words to filter out + local stop_words="^(i|a|an|the|to|for|of|in|on|at|by|with|from|is|are|was|were|be|been|being|have|has|had|do|does|did|will|would|should|could|can|may|might|must|shall|this|that|these|those|my|your|our|their|want|need|add|get|set)$" + + # Convert to lowercase and split into words + local clean_name=$(echo "$description" | tr '[:upper:]' '[:lower:]' | sed 's/[^a-z0-9]/ /g') + + # Filter words: remove stop words and words shorter than 3 chars (unless they're uppercase acronyms in original) + local meaningful_words=() + for word in $clean_name; do + # Skip empty words + [ -z "$word" ] && continue + + # Keep words that are NOT stop words AND (length >= 3 OR are potential acronyms) + if ! echo "$word" | grep -qiE "$stop_words"; then + if [ ${#word} -ge 3 ]; then + meaningful_words+=("$word") + elif echo "$description" | grep -q "\b${word^^}\b"; then + # Keep short words if they appear as uppercase in original (likely acronyms) + meaningful_words+=("$word") + fi + fi + done + + # If we have meaningful words, use first 3-4 of them + if [ ${#meaningful_words[@]} -gt 0 ]; then + local max_words=3 + if [ ${#meaningful_words[@]} -eq 4 ]; then max_words=4; fi + + local result="" + local count=0 + for word in "${meaningful_words[@]}"; do + if [ $count -ge $max_words ]; then break; fi + if [ -n "$result" ]; then result="$result-"; fi + result="$result$word" + count=$((count + 1)) + done + echo "$result" + else + # Fallback to original logic if no meaningful words found + echo "$description" | tr '[:upper:]' '[:lower:]' | sed 's/[^a-z0-9]/-/g' | sed 's/-\+/-/g' | sed 's/^-//' | sed 's/-$//' | tr '-' '\n' | grep -v '^$' | head -3 | tr '\n' '-' | sed 's/-$//' + fi +} + +# Generate branch name +if [ -n "$SHORT_NAME" ]; then + # Use provided short name, just clean it up + BRANCH_SUFFIX=$(echo "$SHORT_NAME" | tr '[:upper:]' '[:lower:]' | sed 's/[^a-z0-9]/-/g' | sed 's/-\+/-/g' | sed 's/^-//' | sed 's/-$//') +else + # Generate from description with smart filtering + BRANCH_SUFFIX=$(generate_branch_name "$FEATURE_DESCRIPTION") +fi + +# Determine branch number +if [ -z "$BRANCH_NUMBER" ]; then + if [ "$HAS_GIT" = true ]; then + # Check existing branches on remotes + BRANCH_NUMBER=$(check_existing_branches "$BRANCH_SUFFIX") + else + # Fall back to local directory check + HIGHEST=0 + if [ -d "$SPECS_DIR" ]; then + for dir in "$SPECS_DIR"/*; do + [ -d "$dir" ] || continue + dirname=$(basename "$dir") + number=$(echo "$dirname" | grep -o '^[0-9]\+' || echo "0") + number=$((10#$number)) + if [ "$number" -gt "$HIGHEST" ]; then HIGHEST=$number; fi + done + fi + BRANCH_NUMBER=$((HIGHEST + 1)) + fi +fi + +FEATURE_NUM=$(printf "%03d" "$BRANCH_NUMBER") +BRANCH_NAME="${FEATURE_NUM}-${BRANCH_SUFFIX}" + +# GitHub enforces a 244-byte limit on branch names +# Validate and truncate if necessary +MAX_BRANCH_LENGTH=244 +if [ ${#BRANCH_NAME} -gt $MAX_BRANCH_LENGTH ]; then + # Calculate how much we need to trim from suffix + # Account for: feature number (3) + hyphen (1) = 4 chars + MAX_SUFFIX_LENGTH=$((MAX_BRANCH_LENGTH - 4)) + + # Truncate suffix at word boundary if possible + TRUNCATED_SUFFIX=$(echo "$BRANCH_SUFFIX" | cut -c1-$MAX_SUFFIX_LENGTH) + # Remove trailing hyphen if truncation created one + TRUNCATED_SUFFIX=$(echo "$TRUNCATED_SUFFIX" | sed 's/-$//') + + ORIGINAL_BRANCH_NAME="$BRANCH_NAME" + BRANCH_NAME="${FEATURE_NUM}-${TRUNCATED_SUFFIX}" + + >&2 echo "[specify] Warning: Branch name exceeded GitHub's 244-byte limit" + >&2 echo "[specify] Original: $ORIGINAL_BRANCH_NAME (${#ORIGINAL_BRANCH_NAME} bytes)" + >&2 echo "[specify] Truncated to: $BRANCH_NAME (${#BRANCH_NAME} bytes)" +fi + +if [ "$HAS_GIT" = true ]; then + git checkout -b "$BRANCH_NAME" +else + >&2 echo "[specify] Warning: Git repository not detected; skipped branch creation for $BRANCH_NAME" +fi + +FEATURE_DIR="$SPECS_DIR/$BRANCH_NAME" +mkdir -p "$FEATURE_DIR" + +TEMPLATE="$REPO_ROOT/.specify/templates/spec-template.md" +SPEC_FILE="$FEATURE_DIR/spec.md" +if [ -f "$TEMPLATE" ]; then cp "$TEMPLATE" "$SPEC_FILE"; else touch "$SPEC_FILE"; fi + +# Set the SPECIFY_FEATURE environment variable for the current session +export SPECIFY_FEATURE="$BRANCH_NAME" + +if $JSON_MODE; then + printf '{"BRANCH_NAME":"%s","SPEC_FILE":"%s","FEATURE_NUM":"%s"}\n' "$BRANCH_NAME" "$SPEC_FILE" "$FEATURE_NUM" +else + echo "BRANCH_NAME: $BRANCH_NAME" + echo "SPEC_FILE: $SPEC_FILE" + echo "FEATURE_NUM: $FEATURE_NUM" + echo "SPECIFY_FEATURE environment variable set to: $BRANCH_NAME" +fi diff --git a/test-prompt/.specify/scripts/bash/setup-plan.sh b/test-prompt/.specify/scripts/bash/setup-plan.sh new file mode 100755 index 0000000..740a143 --- /dev/null +++ b/test-prompt/.specify/scripts/bash/setup-plan.sh @@ -0,0 +1,61 @@ +#!/usr/bin/env bash + +set -e + +# Parse command line arguments +JSON_MODE=false +ARGS=() + +for arg in "$@"; do + case "$arg" in + --json) + JSON_MODE=true + ;; + --help|-h) + echo "Usage: $0 [--json]" + echo " --json Output results in JSON format" + echo " --help Show this help message" + exit 0 + ;; + *) + ARGS+=("$arg") + ;; + esac +done + +# Get script directory and load common functions +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +source "$SCRIPT_DIR/common.sh" + +# Get all paths and variables from common functions +eval $(get_feature_paths) + +# Check if we're on a proper feature branch (only for git repos) +check_feature_branch "$CURRENT_BRANCH" "$HAS_GIT" || exit 1 + +# Ensure the feature directory exists +mkdir -p "$FEATURE_DIR" + +# Copy plan template if it exists +TEMPLATE="$REPO_ROOT/.specify/templates/plan-template.md" +if [[ -f "$TEMPLATE" ]]; then + cp "$TEMPLATE" "$IMPL_PLAN" + echo "Copied plan template to $IMPL_PLAN" +else + echo "Warning: Plan template not found at $TEMPLATE" + # Create a basic plan file if template doesn't exist + touch "$IMPL_PLAN" +fi + +# Output results +if $JSON_MODE; then + printf '{"FEATURE_SPEC":"%s","IMPL_PLAN":"%s","SPECS_DIR":"%s","BRANCH":"%s","HAS_GIT":"%s"}\n' \ + "$FEATURE_SPEC" "$IMPL_PLAN" "$FEATURE_DIR" "$CURRENT_BRANCH" "$HAS_GIT" +else + echo "FEATURE_SPEC: $FEATURE_SPEC" + echo "IMPL_PLAN: $IMPL_PLAN" + echo "SPECS_DIR: $FEATURE_DIR" + echo "BRANCH: $CURRENT_BRANCH" + echo "HAS_GIT: $HAS_GIT" +fi + diff --git a/test-prompt/.specify/scripts/bash/update-agent-context.sh b/test-prompt/.specify/scripts/bash/update-agent-context.sh new file mode 100755 index 0000000..2a44c68 --- /dev/null +++ b/test-prompt/.specify/scripts/bash/update-agent-context.sh @@ -0,0 +1,772 @@ +#!/usr/bin/env bash + +# Update agent context files with information from plan.md +# +# This script maintains AI agent context files by parsing feature specifications +# and updating agent-specific configuration files with project information. +# +# MAIN FUNCTIONS: +# 1. Environment Validation +# - Verifies git repository structure and branch information +# - Checks for required plan.md files and templates +# - Validates file permissions and accessibility +# +# 2. Plan Data Extraction +# - Parses plan.md files to extract project metadata +# - Identifies language/version, frameworks, databases, and project types +# - Handles missing or incomplete specification data gracefully +# +# 3. Agent File Management +# - Creates new agent context files from templates when needed +# - Updates existing agent files with new project information +# - Preserves manual additions and custom configurations +# - Supports multiple AI agent formats and directory structures +# +# 4. Content Generation +# - Generates language-specific build/test commands +# - Creates appropriate project directory structures +# - Updates technology stacks and recent changes sections +# - Maintains consistent formatting and timestamps +# +# 5. Multi-Agent Support +# - Handles agent-specific file paths and naming conventions +# - Supports: Claude, Gemini, Copilot, Cursor, Qwen, opencode, Codex, Windsurf, Kilo Code, Auggie CLI, Roo Code, CodeBuddy CLI, Amp, or Amazon Q Developer CLI +# - Can update single agents or all existing agent files +# - Creates default Claude file if no agent files exist +# +# Usage: ./update-agent-context.sh [agent_type] +# Agent types: claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|q +# Leave empty to update all existing agent files + +set -e + +# Enable strict error handling +set -u +set -o pipefail + +#============================================================================== +# Configuration and Global Variables +#============================================================================== + +# Get script directory and load common functions +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +source "$SCRIPT_DIR/common.sh" + +# Get all paths and variables from common functions +eval $(get_feature_paths) + +NEW_PLAN="$IMPL_PLAN" # Alias for compatibility with existing code +AGENT_TYPE="${1:-}" + +# Agent-specific file paths +CLAUDE_FILE="$REPO_ROOT/CLAUDE.md" +GEMINI_FILE="$REPO_ROOT/GEMINI.md" +COPILOT_FILE="$REPO_ROOT/.github/copilot-instructions.md" +CURSOR_FILE="$REPO_ROOT/.cursor/rules/specify-rules.mdc" +QWEN_FILE="$REPO_ROOT/QWEN.md" +AGENTS_FILE="$REPO_ROOT/AGENTS.md" +WINDSURF_FILE="$REPO_ROOT/.windsurf/rules/specify-rules.md" +KILOCODE_FILE="$REPO_ROOT/.kilocode/rules/specify-rules.md" +AUGGIE_FILE="$REPO_ROOT/.augment/rules/specify-rules.md" +ROO_FILE="$REPO_ROOT/.roo/rules/specify-rules.md" +CODEBUDDY_FILE="$REPO_ROOT/CODEBUDDY.md" +AMP_FILE="$REPO_ROOT/AGENTS.md" +Q_FILE="$REPO_ROOT/AGENTS.md" + +# Template file +TEMPLATE_FILE="$REPO_ROOT/.specify/templates/agent-file-template.md" + +# Global variables for parsed plan data +NEW_LANG="" +NEW_FRAMEWORK="" +NEW_DB="" +NEW_PROJECT_TYPE="" + +#============================================================================== +# Utility Functions +#============================================================================== + +log_info() { + echo "INFO: $1" +} + +log_success() { + echo "✓ $1" +} + +log_error() { + echo "ERROR: $1" >&2 +} + +log_warning() { + echo "WARNING: $1" >&2 +} + +# Cleanup function for temporary files +cleanup() { + local exit_code=$? + rm -f /tmp/agent_update_*_$$ + rm -f /tmp/manual_additions_$$ + exit $exit_code +} + +# Set up cleanup trap +trap cleanup EXIT INT TERM + +#============================================================================== +# Validation Functions +#============================================================================== + +validate_environment() { + # Check if we have a current branch/feature (git or non-git) + if [[ -z "$CURRENT_BRANCH" ]]; then + log_error "Unable to determine current feature" + if [[ "$HAS_GIT" == "true" ]]; then + log_info "Make sure you're on a feature branch" + else + log_info "Set SPECIFY_FEATURE environment variable or create a feature first" + fi + exit 1 + fi + + # Check if plan.md exists + if [[ ! -f "$NEW_PLAN" ]]; then + log_error "No plan.md found at $NEW_PLAN" + log_info "Make sure you're working on a feature with a corresponding spec directory" + if [[ "$HAS_GIT" != "true" ]]; then + log_info "Use: export SPECIFY_FEATURE=your-feature-name or create a new feature first" + fi + exit 1 + fi + + # Check if template exists (needed for new files) + if [[ ! -f "$TEMPLATE_FILE" ]]; then + log_warning "Template file not found at $TEMPLATE_FILE" + log_warning "Creating new agent files will fail" + fi +} + +#============================================================================== +# Plan Parsing Functions +#============================================================================== + +extract_plan_field() { + local field_pattern="$1" + local plan_file="$2" + + grep "^\*\*${field_pattern}\*\*: " "$plan_file" 2>/dev/null | \ + head -1 | \ + sed "s|^\*\*${field_pattern}\*\*: ||" | \ + sed 's/^[ \t]*//;s/[ \t]*$//' | \ + grep -v "NEEDS CLARIFICATION" | \ + grep -v "^N/A$" || echo "" +} + +parse_plan_data() { + local plan_file="$1" + + if [[ ! -f "$plan_file" ]]; then + log_error "Plan file not found: $plan_file" + return 1 + fi + + if [[ ! -r "$plan_file" ]]; then + log_error "Plan file is not readable: $plan_file" + return 1 + fi + + log_info "Parsing plan data from $plan_file" + + NEW_LANG=$(extract_plan_field "Language/Version" "$plan_file") + NEW_FRAMEWORK=$(extract_plan_field "Primary Dependencies" "$plan_file") + NEW_DB=$(extract_plan_field "Storage" "$plan_file") + NEW_PROJECT_TYPE=$(extract_plan_field "Project Type" "$plan_file") + + # Log what we found + if [[ -n "$NEW_LANG" ]]; then + log_info "Found language: $NEW_LANG" + else + log_warning "No language information found in plan" + fi + + if [[ -n "$NEW_FRAMEWORK" ]]; then + log_info "Found framework: $NEW_FRAMEWORK" + fi + + if [[ -n "$NEW_DB" ]] && [[ "$NEW_DB" != "N/A" ]]; then + log_info "Found database: $NEW_DB" + fi + + if [[ -n "$NEW_PROJECT_TYPE" ]]; then + log_info "Found project type: $NEW_PROJECT_TYPE" + fi +} + +format_technology_stack() { + local lang="$1" + local framework="$2" + local parts=() + + # Add non-empty parts + [[ -n "$lang" && "$lang" != "NEEDS CLARIFICATION" ]] && parts+=("$lang") + [[ -n "$framework" && "$framework" != "NEEDS CLARIFICATION" && "$framework" != "N/A" ]] && parts+=("$framework") + + # Join with proper formatting + if [[ ${#parts[@]} -eq 0 ]]; then + echo "" + elif [[ ${#parts[@]} -eq 1 ]]; then + echo "${parts[0]}" + else + # Join multiple parts with " + " + local result="${parts[0]}" + for ((i=1; i<${#parts[@]}; i++)); do + result="$result + ${parts[i]}" + done + echo "$result" + fi +} + +#============================================================================== +# Template and Content Generation Functions +#============================================================================== + +get_project_structure() { + local project_type="$1" + + if [[ "$project_type" == *"web"* ]]; then + echo "backend/\\nfrontend/\\ntests/" + else + echo "src/\\ntests/" + fi +} + +get_commands_for_language() { + local lang="$1" + + case "$lang" in + *"Python"*) + echo "cd src && pytest && ruff check ." + ;; + *"Rust"*) + echo "cargo test && cargo clippy" + ;; + *"JavaScript"*|*"TypeScript"*) + echo "npm test \\&\\& npm run lint" + ;; + *) + echo "# Add commands for $lang" + ;; + esac +} + +get_language_conventions() { + local lang="$1" + echo "$lang: Follow standard conventions" +} + +create_new_agent_file() { + local target_file="$1" + local temp_file="$2" + local project_name="$3" + local current_date="$4" + + if [[ ! -f "$TEMPLATE_FILE" ]]; then + log_error "Template not found at $TEMPLATE_FILE" + return 1 + fi + + if [[ ! -r "$TEMPLATE_FILE" ]]; then + log_error "Template file is not readable: $TEMPLATE_FILE" + return 1 + fi + + log_info "Creating new agent context file from template..." + + if ! cp "$TEMPLATE_FILE" "$temp_file"; then + log_error "Failed to copy template file" + return 1 + fi + + # Replace template placeholders + local project_structure + project_structure=$(get_project_structure "$NEW_PROJECT_TYPE") + + local commands + commands=$(get_commands_for_language "$NEW_LANG") + + local language_conventions + language_conventions=$(get_language_conventions "$NEW_LANG") + + # Perform substitutions with error checking using safer approach + # Escape special characters for sed by using a different delimiter or escaping + local escaped_lang=$(printf '%s\n' "$NEW_LANG" | sed 's/[\[\.*^$()+{}|]/\\&/g') + local escaped_framework=$(printf '%s\n' "$NEW_FRAMEWORK" | sed 's/[\[\.*^$()+{}|]/\\&/g') + local escaped_branch=$(printf '%s\n' "$CURRENT_BRANCH" | sed 's/[\[\.*^$()+{}|]/\\&/g') + + # Build technology stack and recent change strings conditionally + local tech_stack + if [[ -n "$escaped_lang" && -n "$escaped_framework" ]]; then + tech_stack="- $escaped_lang + $escaped_framework ($escaped_branch)" + elif [[ -n "$escaped_lang" ]]; then + tech_stack="- $escaped_lang ($escaped_branch)" + elif [[ -n "$escaped_framework" ]]; then + tech_stack="- $escaped_framework ($escaped_branch)" + else + tech_stack="- ($escaped_branch)" + fi + + local recent_change + if [[ -n "$escaped_lang" && -n "$escaped_framework" ]]; then + recent_change="- $escaped_branch: Added $escaped_lang + $escaped_framework" + elif [[ -n "$escaped_lang" ]]; then + recent_change="- $escaped_branch: Added $escaped_lang" + elif [[ -n "$escaped_framework" ]]; then + recent_change="- $escaped_branch: Added $escaped_framework" + else + recent_change="- $escaped_branch: Added" + fi + + local substitutions=( + "s|\[PROJECT NAME\]|$project_name|" + "s|\[DATE\]|$current_date|" + "s|\[EXTRACTED FROM ALL PLAN.MD FILES\]|$tech_stack|" + "s|\[ACTUAL STRUCTURE FROM PLANS\]|$project_structure|g" + "s|\[ONLY COMMANDS FOR ACTIVE TECHNOLOGIES\]|$commands|" + "s|\[LANGUAGE-SPECIFIC, ONLY FOR LANGUAGES IN USE\]|$language_conventions|" + "s|\[LAST 3 FEATURES AND WHAT THEY ADDED\]|$recent_change|" + ) + + for substitution in "${substitutions[@]}"; do + if ! sed -i.bak -e "$substitution" "$temp_file"; then + log_error "Failed to perform substitution: $substitution" + rm -f "$temp_file" "$temp_file.bak" + return 1 + fi + done + + # Convert \n sequences to actual newlines + newline=$(printf '\n') + sed -i.bak2 "s/\\\\n/${newline}/g" "$temp_file" + + # Clean up backup files + rm -f "$temp_file.bak" "$temp_file.bak2" + + return 0 +} + + + + +update_existing_agent_file() { + local target_file="$1" + local current_date="$2" + + log_info "Updating existing agent context file..." + + # Use a single temporary file for atomic update + local temp_file + temp_file=$(mktemp) || { + log_error "Failed to create temporary file" + return 1 + } + + # Process the file in one pass + local tech_stack=$(format_technology_stack "$NEW_LANG" "$NEW_FRAMEWORK") + local new_tech_entries=() + local new_change_entry="" + + # Prepare new technology entries + if [[ -n "$tech_stack" ]] && ! grep -q "$tech_stack" "$target_file"; then + new_tech_entries+=("- $tech_stack ($CURRENT_BRANCH)") + fi + + if [[ -n "$NEW_DB" ]] && [[ "$NEW_DB" != "N/A" ]] && [[ "$NEW_DB" != "NEEDS CLARIFICATION" ]] && ! grep -q "$NEW_DB" "$target_file"; then + new_tech_entries+=("- $NEW_DB ($CURRENT_BRANCH)") + fi + + # Prepare new change entry + if [[ -n "$tech_stack" ]]; then + new_change_entry="- $CURRENT_BRANCH: Added $tech_stack" + elif [[ -n "$NEW_DB" ]] && [[ "$NEW_DB" != "N/A" ]] && [[ "$NEW_DB" != "NEEDS CLARIFICATION" ]]; then + new_change_entry="- $CURRENT_BRANCH: Added $NEW_DB" + fi + + # Check if sections exist in the file + local has_active_technologies=0 + local has_recent_changes=0 + + if grep -q "^## Active Technologies" "$target_file" 2>/dev/null; then + has_active_technologies=1 + fi + + if grep -q "^## Recent Changes" "$target_file" 2>/dev/null; then + has_recent_changes=1 + fi + + # Process file line by line + local in_tech_section=false + local in_changes_section=false + local tech_entries_added=false + local changes_entries_added=false + local existing_changes_count=0 + local file_ended=false + + while IFS= read -r line || [[ -n "$line" ]]; do + # Handle Active Technologies section + if [[ "$line" == "## Active Technologies" ]]; then + echo "$line" >> "$temp_file" + in_tech_section=true + continue + elif [[ $in_tech_section == true ]] && [[ "$line" =~ ^##[[:space:]] ]]; then + # Add new tech entries before closing the section + if [[ $tech_entries_added == false ]] && [[ ${#new_tech_entries[@]} -gt 0 ]]; then + printf '%s\n' "${new_tech_entries[@]}" >> "$temp_file" + tech_entries_added=true + fi + echo "$line" >> "$temp_file" + in_tech_section=false + continue + elif [[ $in_tech_section == true ]] && [[ -z "$line" ]]; then + # Add new tech entries before empty line in tech section + if [[ $tech_entries_added == false ]] && [[ ${#new_tech_entries[@]} -gt 0 ]]; then + printf '%s\n' "${new_tech_entries[@]}" >> "$temp_file" + tech_entries_added=true + fi + echo "$line" >> "$temp_file" + continue + fi + + # Handle Recent Changes section + if [[ "$line" == "## Recent Changes" ]]; then + echo "$line" >> "$temp_file" + # Add new change entry right after the heading + if [[ -n "$new_change_entry" ]]; then + echo "$new_change_entry" >> "$temp_file" + fi + in_changes_section=true + changes_entries_added=true + continue + elif [[ $in_changes_section == true ]] && [[ "$line" =~ ^##[[:space:]] ]]; then + echo "$line" >> "$temp_file" + in_changes_section=false + continue + elif [[ $in_changes_section == true ]] && [[ "$line" == "- "* ]]; then + # Keep only first 2 existing changes + if [[ $existing_changes_count -lt 2 ]]; then + echo "$line" >> "$temp_file" + ((existing_changes_count++)) + fi + continue + fi + + # Update timestamp + if [[ "$line" =~ \*\*Last\ updated\*\*:.*[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9] ]]; then + echo "$line" | sed "s/[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]/$current_date/" >> "$temp_file" + else + echo "$line" >> "$temp_file" + fi + done < "$target_file" + + # Post-loop check: if we're still in the Active Technologies section and haven't added new entries + if [[ $in_tech_section == true ]] && [[ $tech_entries_added == false ]] && [[ ${#new_tech_entries[@]} -gt 0 ]]; then + printf '%s\n' "${new_tech_entries[@]}" >> "$temp_file" + tech_entries_added=true + fi + + # If sections don't exist, add them at the end of the file + if [[ $has_active_technologies -eq 0 ]] && [[ ${#new_tech_entries[@]} -gt 0 ]]; then + echo "" >> "$temp_file" + echo "## Active Technologies" >> "$temp_file" + printf '%s\n' "${new_tech_entries[@]}" >> "$temp_file" + tech_entries_added=true + fi + + if [[ $has_recent_changes -eq 0 ]] && [[ -n "$new_change_entry" ]]; then + echo "" >> "$temp_file" + echo "## Recent Changes" >> "$temp_file" + echo "$new_change_entry" >> "$temp_file" + changes_entries_added=true + fi + + # Move temp file to target atomically + if ! mv "$temp_file" "$target_file"; then + log_error "Failed to update target file" + rm -f "$temp_file" + return 1 + fi + + return 0 +} +#============================================================================== +# Main Agent File Update Function +#============================================================================== + +update_agent_file() { + local target_file="$1" + local agent_name="$2" + + if [[ -z "$target_file" ]] || [[ -z "$agent_name" ]]; then + log_error "update_agent_file requires target_file and agent_name parameters" + return 1 + fi + + log_info "Updating $agent_name context file: $target_file" + + local project_name + project_name=$(basename "$REPO_ROOT") + local current_date + current_date=$(date +%Y-%m-%d) + + # Create directory if it doesn't exist + local target_dir + target_dir=$(dirname "$target_file") + if [[ ! -d "$target_dir" ]]; then + if ! mkdir -p "$target_dir"; then + log_error "Failed to create directory: $target_dir" + return 1 + fi + fi + + if [[ ! -f "$target_file" ]]; then + # Create new file from template + local temp_file + temp_file=$(mktemp) || { + log_error "Failed to create temporary file" + return 1 + } + + if create_new_agent_file "$target_file" "$temp_file" "$project_name" "$current_date"; then + if mv "$temp_file" "$target_file"; then + log_success "Created new $agent_name context file" + else + log_error "Failed to move temporary file to $target_file" + rm -f "$temp_file" + return 1 + fi + else + log_error "Failed to create new agent file" + rm -f "$temp_file" + return 1 + fi + else + # Update existing file + if [[ ! -r "$target_file" ]]; then + log_error "Cannot read existing file: $target_file" + return 1 + fi + + if [[ ! -w "$target_file" ]]; then + log_error "Cannot write to existing file: $target_file" + return 1 + fi + + if update_existing_agent_file "$target_file" "$current_date"; then + log_success "Updated existing $agent_name context file" + else + log_error "Failed to update existing agent file" + return 1 + fi + fi + + return 0 +} + +#============================================================================== +# Agent Selection and Processing +#============================================================================== + +update_specific_agent() { + local agent_type="$1" + + case "$agent_type" in + claude) + update_agent_file "$CLAUDE_FILE" "Claude Code" + ;; + gemini) + update_agent_file "$GEMINI_FILE" "Gemini CLI" + ;; + copilot) + update_agent_file "$COPILOT_FILE" "GitHub Copilot" + ;; + cursor-agent) + update_agent_file "$CURSOR_FILE" "Cursor IDE" + ;; + qwen) + update_agent_file "$QWEN_FILE" "Qwen Code" + ;; + opencode) + update_agent_file "$AGENTS_FILE" "opencode" + ;; + codex) + update_agent_file "$AGENTS_FILE" "Codex CLI" + ;; + windsurf) + update_agent_file "$WINDSURF_FILE" "Windsurf" + ;; + kilocode) + update_agent_file "$KILOCODE_FILE" "Kilo Code" + ;; + auggie) + update_agent_file "$AUGGIE_FILE" "Auggie CLI" + ;; + roo) + update_agent_file "$ROO_FILE" "Roo Code" + ;; + codebuddy) + update_agent_file "$CODEBUDDY_FILE" "CodeBuddy CLI" + ;; + amp) + update_agent_file "$AMP_FILE" "Amp" + ;; + q) + update_agent_file "$Q_FILE" "Amazon Q Developer CLI" + ;; + *) + log_error "Unknown agent type '$agent_type'" + log_error "Expected: claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|roo|amp|q" + exit 1 + ;; + esac +} + +update_all_existing_agents() { + local found_agent=false + + # Check each possible agent file and update if it exists + if [[ -f "$CLAUDE_FILE" ]]; then + update_agent_file "$CLAUDE_FILE" "Claude Code" + found_agent=true + fi + + if [[ -f "$GEMINI_FILE" ]]; then + update_agent_file "$GEMINI_FILE" "Gemini CLI" + found_agent=true + fi + + if [[ -f "$COPILOT_FILE" ]]; then + update_agent_file "$COPILOT_FILE" "GitHub Copilot" + found_agent=true + fi + + if [[ -f "$CURSOR_FILE" ]]; then + update_agent_file "$CURSOR_FILE" "Cursor IDE" + found_agent=true + fi + + if [[ -f "$QWEN_FILE" ]]; then + update_agent_file "$QWEN_FILE" "Qwen Code" + found_agent=true + fi + + if [[ -f "$AGENTS_FILE" ]]; then + update_agent_file "$AGENTS_FILE" "Codex/opencode" + found_agent=true + fi + + if [[ -f "$WINDSURF_FILE" ]]; then + update_agent_file "$WINDSURF_FILE" "Windsurf" + found_agent=true + fi + + if [[ -f "$KILOCODE_FILE" ]]; then + update_agent_file "$KILOCODE_FILE" "Kilo Code" + found_agent=true + fi + + if [[ -f "$AUGGIE_FILE" ]]; then + update_agent_file "$AUGGIE_FILE" "Auggie CLI" + found_agent=true + fi + + if [[ -f "$ROO_FILE" ]]; then + update_agent_file "$ROO_FILE" "Roo Code" + found_agent=true + fi + + if [[ -f "$CODEBUDDY_FILE" ]]; then + update_agent_file "$CODEBUDDY_FILE" "CodeBuddy CLI" + found_agent=true + fi + + if [[ -f "$Q_FILE" ]]; then + update_agent_file "$Q_FILE" "Amazon Q Developer CLI" + found_agent=true + fi + + # If no agent files exist, create a default Claude file + if [[ "$found_agent" == false ]]; then + log_info "No existing agent files found, creating default Claude file..." + update_agent_file "$CLAUDE_FILE" "Claude Code" + fi +} +print_summary() { + echo + log_info "Summary of changes:" + + if [[ -n "$NEW_LANG" ]]; then + echo " - Added language: $NEW_LANG" + fi + + if [[ -n "$NEW_FRAMEWORK" ]]; then + echo " - Added framework: $NEW_FRAMEWORK" + fi + + if [[ -n "$NEW_DB" ]] && [[ "$NEW_DB" != "N/A" ]]; then + echo " - Added database: $NEW_DB" + fi + + echo + + log_info "Usage: $0 [claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|codebuddy|q]" +} + +#============================================================================== +# Main Execution +#============================================================================== + +main() { + # Validate environment before proceeding + validate_environment + + log_info "=== Updating agent context files for feature $CURRENT_BRANCH ===" + + # Parse the plan file to extract project information + if ! parse_plan_data "$NEW_PLAN"; then + log_error "Failed to parse plan data" + exit 1 + fi + + # Process based on agent type argument + local success=true + + if [[ -z "$AGENT_TYPE" ]]; then + # No specific agent provided - update all existing agent files + log_info "No agent specified, updating all existing agent files..." + if ! update_all_existing_agents; then + success=false + fi + else + # Specific agent provided - update only that agent + log_info "Updating specific agent: $AGENT_TYPE" + if ! update_specific_agent "$AGENT_TYPE"; then + success=false + fi + fi + + # Print summary + print_summary + + if [[ "$success" == true ]]; then + log_success "Agent context update completed successfully" + exit 0 + else + log_error "Agent context update completed with errors" + exit 1 + fi +} + +# Execute main function if script is run directly +if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then + main "$@" +fi + diff --git a/test-prompt/.specify/templates/agent-file-template.md b/test-prompt/.specify/templates/agent-file-template.md new file mode 100644 index 0000000..9026fc8 --- /dev/null +++ b/test-prompt/.specify/templates/agent-file-template.md @@ -0,0 +1,29 @@ +# [PROJECT NAME] Development Guidelines + +Auto-generated from all feature plans. Last updated: [DATE] + +## Active Technologies + +[EXTRACTED FROM ALL PLAN.MD FILES] + +## Project Structure + +```text +[ACTUAL STRUCTURE FROM PLANS] +``` + +## Commands + +[ONLY COMMANDS FOR ACTIVE TECHNOLOGIES] + +## Code Style + +[LANGUAGE-SPECIFIC, ONLY FOR LANGUAGES IN USE] + +## Recent Changes + +[LAST 3 FEATURES AND WHAT THEY ADDED] + + + + diff --git a/test-prompt/.specify/templates/checklist-template.md b/test-prompt/.specify/templates/checklist-template.md new file mode 100644 index 0000000..806657d --- /dev/null +++ b/test-prompt/.specify/templates/checklist-template.md @@ -0,0 +1,40 @@ +# [CHECKLIST TYPE] Checklist: [FEATURE NAME] + +**Purpose**: [Brief description of what this checklist covers] +**Created**: [DATE] +**Feature**: [Link to spec.md or relevant documentation] + +**Note**: This checklist is generated by the `/speckit.checklist` command based on feature context and requirements. + + + +## [Category 1] + +- [ ] CHK001 First checklist item with clear action +- [ ] CHK002 Second checklist item +- [ ] CHK003 Third checklist item + +## [Category 2] + +- [ ] CHK004 Another category item +- [ ] CHK005 Item with specific criteria +- [ ] CHK006 Final item in this category + +## Notes + +- Check items off as completed: `[x]` +- Add comments or findings inline +- Link to relevant resources or documentation +- Items are numbered sequentially for easy reference diff --git a/test-prompt/.specify/templates/plan-template.md b/test-prompt/.specify/templates/plan-template.md new file mode 100644 index 0000000..a86b481 --- /dev/null +++ b/test-prompt/.specify/templates/plan-template.md @@ -0,0 +1,134 @@ +# Implementation Plan: [Feature] + +**Branch**: `[###-feature-name]` | **Date**: [Date] | **Specification**: [Link] +**Input**: Feature specification from `/specs/[###-feature-name]/spec.md` + +**Note**: This template is filled in by the `/speckit.plan` command. See `.specify/templates/commands/plan.md` for execution workflow. + +## Overview + +[Extract from feature specification: Key requirements + technical approach from research] + +## Technical Context + + + +**Language/Version**: [e.g., Python 3.11, Swift 5.9, Rust 1.75 or NEEDS CLARIFICATION] +**Primary Dependencies**: [e.g., FastAPI, UIKit, LLVM or NEEDS CLARIFICATION] +**Storage**: [If applicable, e.g., PostgreSQL, CoreData, File or N/A] +**Testing**: [e.g., pytest, XCTest, cargo test or NEEDS CLARIFICATION] +**Target Platform**: [e.g., Linux Server, iOS 15+, WASM or NEEDS CLARIFICATION] +**Project Type**: [single/web/mobile - determines source structure] +**Performance Goals**: [Domain-specific, e.g., 1000 req/s, 10k lines/sec, 60 fps or NEEDS CLARIFICATION] +**Constraints**: [Domain-specific, e.g., \<200ms p95, \<100MB memory, offline support or NEEDS CLARIFICATION] +**Scale/Scope**: [Domain-specific, e.g., 10k users, 1M LOC, 50 screens or NEEDS CLARIFICATION] + +## Constitution Check + +*Gate: Must pass before Phase 0 research. Re-verify after Phase 1 design.* + +**Reference**: Verify the following based on the 5 principles in `.specify/memory/constitution.md` + +### I. Code Quality Principle + +- [ ] Are readability and documentation requirements met? +- [ ] Are naming conventions clearly defined? +- [ ] Is code complexity within reasonable bounds? + +### II. Test-Driven Development + +- [ ] Is a test-first development process planned? +- [ ] Is there a plan for contract tests, integration tests, and unit tests? +- [ ] Is a test coverage target (80% or more) set? + +### III. UX Consistency + +- [ ] Are consistent UI patterns defined? +- [ ] Is error message clarity ensured? +- [ ] Is accessibility considered? + +### IV. Performance Standards + +- [ ] Are API response time targets (p95 < 200ms) considered? +- [ ] Is database optimization planned? +- [ ] Are frontend load time targets set (if applicable)? + +### V. Maintainability and Extensibility + +- [ ] Is modular, loosely-coupled design adopted? +- [ ] Is the configuration management policy clear? +- [ ] Is a versioning strategy defined? + +**Violation Justification**: Record in the "Complexity Tracking" table in this section + +## Project Structure + +### Documentation (for this feature) + +```text +specs/[###-feature]/ +├── plan.md # This file (output of /speckit.plan command) +├── research.md # Phase 0 output (/speckit.plan command) +├── data-model.md # Phase 1 output (/speckit.plan command) +├── quickstart.md # Phase 1 output (/speckit.plan command) +├── contracts/ # Phase 1 output (/speckit.plan command) +└── tasks.md # Phase 2 output (/speckit.tasks command - not created by /speckit.plan) +``` + +### Source Code (repository root) + + + +```text +# [Delete if unused] Option 1: Single Project (default) +src/ +├── models/ +├── services/ +├── cli/ +└── lib/ + +tests/ +├── contract/ +├── integration/ +└── unit/ + +# [Delete if unused] Option 2: Web Application (when "frontend" + "backend" detected) +backend/ +├── src/ +│ ├── models/ +│ ├── services/ +│ └── api/ +└── tests/ + +frontend/ +├── src/ +│ ├── components/ +│ ├── pages/ +│ └── services/ +└── tests/ + +# [Delete if unused] Option 3: Mobile + API (when "iOS/Android" detected) +api/ +└── [same as backend above] + +ios/ or android/ +└── [platform-specific structure: feature modules, UI flows, platform tests] +``` + +**Structure Decision**: [Document chosen structure and reference actual directories captured above] + +## Complexity Tracking + +> **Only fill in if there are violations requiring justification in the Constitution Check** + +| Violation | Reason Needed | Why Simpler Alternative Was Rejected | +| -------------------------- | ------------------ | -------------------------------------- | +| [e.g., 4th project] | [Current need] | [Why 3 projects are insufficient] | +| [e.g., Repository pattern] | [Specific problem] | [Why direct DB access is insufficient] | diff --git a/test-prompt/.specify/templates/spec-template.md b/test-prompt/.specify/templates/spec-template.md new file mode 100644 index 0000000..f30c2f0 --- /dev/null +++ b/test-prompt/.specify/templates/spec-template.md @@ -0,0 +1,115 @@ +# Feature Specification: [Feature Name] + +**Feature Branch**: `[###-feature-name]` +**Created**: [Date] +**Status**: Draft +**Input**: User description: "$ARGUMENTS" + +## User Scenarios and Tests *(Required)* + + + +### User Story 1 - [Concise Title] (Priority: P1) + +[Describe this user journey in plain language] + +**Reason for this priority**: [Explain the value and why it has this priority level] + +**Independent testing**: \[Explain how this can be tested independently - e.g., "Can be fully tested by [specific action] and provides [specific value]"\] + +**Acceptance Scenarios**: + +1. **Given** [initial state], **When** [action], **Then** [expected result] +1. **Given** [initial state], **When** [action], **Then** [expected result] + +______________________________________________________________________ + +### User Story 2 - [Concise Title] (Priority: P2) + +[Describe this user journey in plain language] + +**Reason for this priority**: [Explain the value and why it has this priority level] + +**Independent testing**: [Explain how this can be tested independently] + +**Acceptance Scenarios**: + +1. **Given** [initial state], **When** [action], **Then** [expected result] + +______________________________________________________________________ + +### User Story 3 - [Concise Title] (Priority: P3) + +[Describe this user journey in plain language] + +**Reason for this priority**: [Explain the value and why it has this priority level] + +**Independent testing**: [Explain how this can be tested independently] + +**Acceptance Scenarios**: + +1. **Given** [initial state], **When** [action], **Then** [expected result] + +______________________________________________________________________ + +[Add additional user stories as needed, each with an assigned priority] + +### Edge Cases + + + +- What happens when [boundary condition]? +- How does the system handle [error scenario]? + +## Requirements *(Required)* + + + +### Functional Requirements + +- **FR-001**: The system must [specific function, e.g., "allow users to create accounts"] +- **FR-002**: The system must [specific function, e.g., "validate email addresses"] +- **FR-003**: Users must be able to [important interaction, e.g., "reset their passwords"] +- **FR-004**: The system must [data requirement, e.g., "persist user settings"] +- **FR-005**: The system must [behavior, e.g., "log all security events"] + +*Example of marking unclear requirements:* + +- **FR-006**: The system must authenticate users via [NEEDS CLARIFICATION: Authentication method not specified - email/password, SSO, OAuth?] +- **FR-007**: The system must retain user data for [NEEDS CLARIFICATION: Retention period not specified] + +### Key Entities *(Include if the feature handles data)* + +- **[Entity 1]**: [What it represents, key attributes without implementation details] +- **[Entity 2]**: [What it represents, relationships with other entities] + +## Success Criteria *(Required)* + + + +### Measurable Outcomes + +- **SC-001**: [Measurable metric, e.g., "Users can complete account creation within 2 minutes"] +- **SC-002**: [Measurable metric, e.g., "System can handle 1000 concurrent users without performance degradation"] +- **SC-003**: [User satisfaction metric, e.g., "90% of users successfully complete the main task on first attempt"] +- **SC-004**: \[Business metric, e.g., "Reduce support tickets related to [X] by 50%"\] diff --git a/test-prompt/.specify/templates/tasks-template.md b/test-prompt/.specify/templates/tasks-template.md new file mode 100644 index 0000000..47bcfed --- /dev/null +++ b/test-prompt/.specify/templates/tasks-template.md @@ -0,0 +1,250 @@ +______________________________________________________________________ + +## description: "Task list template for feature implementation" + +# Tasks: [FEATURE NAME] + +**Input**: Design documents from `/specs/[###-feature-name]/` +**Prerequisites**: plan.md (required), spec.md (required for user stories), research.md, data-model.md, contracts/ + +**Tests**: The examples below include test tasks. Tests are OPTIONAL - only include them if explicitly requested in the feature specification. + +**Organization**: Tasks are grouped by user story to enable independent implementation and testing of each story. + +## Format: `[ID] [P?] [Story] Description` + +- **[P]**: Can run in parallel (different files, no dependencies) +- **[Story]**: Which user story this task belongs to (e.g., US1, US2, US3) +- Include exact file paths in descriptions + +## Path Conventions + +- **Single project**: `src/`, `tests/` at repository root +- **Web app**: `backend/src/`, `frontend/src/` +- **Mobile**: `api/src/`, `ios/src/` or `android/src/` +- Paths shown below assume single project - adjust based on plan.md structure + + + +## Phase 1: Setup (Shared Infrastructure) + +**Purpose**: Project initialization and basic structure + +- [ ] T001 Create project structure per implementation plan +- [ ] T002 Initialize [language] project with [framework] dependencies +- [ ] T003 [P] Configure linting and formatting tools + +______________________________________________________________________ + +## Phase 2: Foundational (Blocking Prerequisites) + +**Purpose**: Core infrastructure that MUST be complete before ANY user story can be implemented + +**⚠️ CRITICAL**: No user story work can begin until this phase is complete + +Examples of foundational tasks (adjust based on your project): + +- [ ] T004 Setup database schema and migrations framework +- [ ] T005 [P] Implement authentication/authorization framework +- [ ] T006 [P] Setup API routing and middleware structure +- [ ] T007 Create base models/entities that all stories depend on +- [ ] T008 Configure error handling and logging infrastructure +- [ ] T009 Setup environment configuration management + +**Checkpoint**: Foundation ready - user story implementation can now begin in parallel + +______________________________________________________________________ + +## Phase 3: User Story 1 - [Title] (Priority: P1) 🎯 MVP + +**Goal**: [Brief description of what this story delivers] + +**Independent Test**: [How to verify this story works on its own] + +### Tests for User Story 1 (OPTIONAL - only if tests requested) ⚠️ + +> **NOTE: Write these tests FIRST, ensure they FAIL before implementation** + +- [ ] T010 [P] [US1] Contract test for [endpoint] in tests/contract/test\_[name].py +- [ ] T011 [P] [US1] Integration test for [user journey] in tests/integration/test\_[name].py + +### Implementation for User Story 1 + +- [ ] T012 [P] [US1] Create [Entity1] model in src/models/[entity1].py +- [ ] T013 [P] [US1] Create [Entity2] model in src/models/[entity2].py +- [ ] T014 [US1] Implement [Service] in src/services/[service].py (depends on T012, T013) +- [ ] T015 [US1] Implement [endpoint/feature] in src/[location]/[file].py +- [ ] T016 [US1] Add validation and error handling +- [ ] T017 [US1] Add logging for user story 1 operations + +**Checkpoint**: At this point, User Story 1 should be fully functional and testable independently + +______________________________________________________________________ + +## Phase 4: User Story 2 - [Title] (Priority: P2) + +**Goal**: [Brief description of what this story delivers] + +**Independent Test**: [How to verify this story works on its own] + +### Tests for User Story 2 (OPTIONAL - only if tests requested) ⚠️ + +- [ ] T018 [P] [US2] Contract test for [endpoint] in tests/contract/test\_[name].py +- [ ] T019 [P] [US2] Integration test for [user journey] in tests/integration/test\_[name].py + +### Implementation for User Story 2 + +- [ ] T020 [P] [US2] Create [Entity] model in src/models/[entity].py +- [ ] T021 [US2] Implement [Service] in src/services/[service].py +- [ ] T022 [US2] Implement [endpoint/feature] in src/[location]/[file].py +- [ ] T023 [US2] Integrate with User Story 1 components (if needed) + +**Checkpoint**: At this point, User Stories 1 AND 2 should both work independently + +______________________________________________________________________ + +## Phase 5: User Story 3 - [Title] (Priority: P3) + +**Goal**: [Brief description of what this story delivers] + +**Independent Test**: [How to verify this story works on its own] + +### Tests for User Story 3 (OPTIONAL - only if tests requested) ⚠️ + +- [ ] T024 [P] [US3] Contract test for [endpoint] in tests/contract/test\_[name].py +- [ ] T025 [P] [US3] Integration test for [user journey] in tests/integration/test\_[name].py + +### Implementation for User Story 3 + +- [ ] T026 [P] [US3] Create [Entity] model in src/models/[entity].py +- [ ] T027 [US3] Implement [Service] in src/services/[service].py +- [ ] T028 [US3] Implement [endpoint/feature] in src/[location]/[file].py + +**Checkpoint**: All user stories should now be independently functional + +______________________________________________________________________ + +[Add more user story phases as needed, following the same pattern] + +______________________________________________________________________ + +## Phase N: Polish & Cross-Cutting Concerns + +**Purpose**: Improvements that affect multiple user stories + +- [ ] TXXX [P] Documentation updates in docs/ +- [ ] TXXX Code cleanup and refactoring +- [ ] TXXX Performance optimization across all stories +- [ ] TXXX [P] Additional unit tests (if requested) in tests/unit/ +- [ ] TXXX Security hardening +- [ ] TXXX Run quickstart.md validation + +______________________________________________________________________ + +## Dependencies & Execution Order + +### Phase Dependencies + +- **Setup (Phase 1)**: No dependencies - can start immediately +- **Foundational (Phase 2)**: Depends on Setup completion - BLOCKS all user stories +- **User Stories (Phase 3+)**: All depend on Foundational phase completion + - User stories can then proceed in parallel (if staffed) + - Or sequentially in priority order (P1 → P2 → P3) +- **Polish (Final Phase)**: Depends on all desired user stories being complete + +### User Story Dependencies + +- **User Story 1 (P1)**: Can start after Foundational (Phase 2) - No dependencies on other stories +- **User Story 2 (P2)**: Can start after Foundational (Phase 2) - May integrate with US1 but should be independently testable +- **User Story 3 (P3)**: Can start after Foundational (Phase 2) - May integrate with US1/US2 but should be independently testable + +### Within Each User Story + +- Tests (if included) MUST be written and FAIL before implementation +- Models before services +- Services before endpoints +- Core implementation before integration +- Story complete before moving to next priority + +### Parallel Opportunities + +- All Setup tasks marked [P] can run in parallel +- All Foundational tasks marked [P] can run in parallel (within Phase 2) +- Once Foundational phase completes, all user stories can start in parallel (if team capacity allows) +- All tests for a user story marked [P] can run in parallel +- Models within a story marked [P] can run in parallel +- Different user stories can be worked on in parallel by different team members + +______________________________________________________________________ + +## Parallel Example: User Story 1 + +```bash +# Launch all tests for User Story 1 together (if tests requested): +Task: "Contract test for [endpoint] in tests/contract/test_[name].py" +Task: "Integration test for [user journey] in tests/integration/test_[name].py" + +# Launch all models for User Story 1 together: +Task: "Create [Entity1] model in src/models/[entity1].py" +Task: "Create [Entity2] model in src/models/[entity2].py" +``` + +______________________________________________________________________ + +## Implementation Strategy + +### MVP First (User Story 1 Only) + +1. Complete Phase 1: Setup +1. Complete Phase 2: Foundational (CRITICAL - blocks all stories) +1. Complete Phase 3: User Story 1 +1. **STOP and VALIDATE**: Test User Story 1 independently +1. Deploy/demo if ready + +### Incremental Delivery + +1. Complete Setup + Foundational → Foundation ready +1. Add User Story 1 → Test independently → Deploy/Demo (MVP!) +1. Add User Story 2 → Test independently → Deploy/Demo +1. Add User Story 3 → Test independently → Deploy/Demo +1. Each story adds value without breaking previous stories + +### Parallel Team Strategy + +With multiple developers: + +1. Team completes Setup + Foundational together +1. Once Foundational is done: + - Developer A: User Story 1 + - Developer B: User Story 2 + - Developer C: User Story 3 +1. Stories complete and integrate independently + +______________________________________________________________________ + +## Notes + +- [P] tasks = different files, no dependencies +- [Story] label maps task to specific user story for traceability +- Each user story should be independently completable and testable +- Verify tests fail before implementing +- Commit after each task or logical group +- Stop at any checkpoint to validate story independently +- Avoid: vague tasks, same file conflicts, cross-story dependencies that break independence diff --git a/tests/hyh/test_init.py b/tests/hyh/test_init.py new file mode 100644 index 0000000..642da95 --- /dev/null +++ b/tests/hyh/test_init.py @@ -0,0 +1,122 @@ +"""Tests for hyh init command.""" + +from pathlib import Path + + +def test_init_creates_plugin_directory(tmp_path: Path): + """hyh init creates .claude/plugins/hyh/ structure.""" + from hyh.init import init_project + + init_project(tmp_path) + + plugin_dir = tmp_path / ".claude" / "plugins" / "hyh" + assert plugin_dir.exists() + assert (plugin_dir / "plugin.json").exists() + assert (plugin_dir / "commands" / "hyh.md").exists() + assert (plugin_dir / "hooks" / "hooks.json").exists() + + +def test_init_creates_hyh_directory(tmp_path: Path): + """hyh init creates .hyh/ with config and templates.""" + from hyh.init import init_project + + init_project(tmp_path) + + hyh_dir = tmp_path / ".hyh" + assert hyh_dir.exists() + assert (hyh_dir / "config.json").exists() + assert (hyh_dir / "templates" / "spec-template.md").exists() + + +def test_init_config_has_required_fields(tmp_path: Path): + """Config file has main_branch and next_feature_number.""" + import json + + from hyh.init import init_project + + init_project(tmp_path) + + config = json.loads((tmp_path / ".hyh" / "config.json").read_text()) + assert "main_branch" in config + assert "next_feature_number" in config + assert config["next_feature_number"] == 1 + + +def test_init_returns_init_result(tmp_path: Path): + """init_project returns InitResult with correct paths.""" + from hyh.init import InitResult, init_project + + result = init_project(tmp_path) + + assert isinstance(result, InitResult) + assert result.project_root == tmp_path.resolve() + assert result.plugin_dir == tmp_path / ".claude" / "plugins" / "hyh" + assert result.hyh_dir == tmp_path / ".hyh" + + +def test_init_copies_all_templates(tmp_path: Path): + """init_project copies all template files.""" + from hyh.init import init_project + + init_project(tmp_path) + + templates_dir = tmp_path / ".hyh" / "templates" + assert (templates_dir / "spec-template.md").exists() + assert (templates_dir / "plan-template.md").exists() + assert (templates_dir / "tasks-template.md").exists() + assert (templates_dir / "checklist-template.md").exists() + + +def test_init_copies_skills(tmp_path: Path): + """init_project copies skills directory.""" + from hyh.init import init_project + + init_project(tmp_path) + + skills_dir = tmp_path / ".claude" / "plugins" / "hyh" / "skills" + assert skills_dir.exists() + assert (skills_dir / "spec-driven-dev.md").exists() + + +def test_init_idempotent(tmp_path: Path): + """Running init twice does not fail.""" + from hyh.init import init_project + + result1 = init_project(tmp_path) + result2 = init_project(tmp_path) + + assert result1.plugin_dir == result2.plugin_dir + assert result1.hyh_dir == result2.hyh_dir + + +def test_init_detects_main_branch_from_git(tmp_path: Path): + """init_project detects main branch when in git repo.""" + import subprocess + + from hyh.init import init_project + + # Initialize a git repo with 'master' branch + subprocess.run(["git", "init"], cwd=tmp_path, capture_output=True) + subprocess.run( + ["git", "config", "user.email", "test@test.com"], + cwd=tmp_path, + capture_output=True, + ) + subprocess.run( + ["git", "config", "user.name", "Test"], + cwd=tmp_path, + capture_output=True, + ) + # Create initial commit to establish branch + (tmp_path / "README.md").write_text("# Test") + subprocess.run(["git", "add", "."], cwd=tmp_path, capture_output=True) + subprocess.run( + ["git", "commit", "-m", "initial"], + cwd=tmp_path, + capture_output=True, + ) + + result = init_project(tmp_path) + + # Should detect master (default git branch name) + assert result.main_branch in ("main", "master") diff --git a/tests/hyh/test_integration_workflow.py b/tests/hyh/test_integration_workflow.py new file mode 100644 index 0000000..8828ed5 --- /dev/null +++ b/tests/hyh/test_integration_workflow.py @@ -0,0 +1,112 @@ +"""Integration test for full hyh workflow.""" + +import subprocess +from pathlib import Path + +import pytest + + +@pytest.mark.slow +def test_full_workflow_specify_to_implement(tmp_path: Path): + """Test complete workflow from init through task execution.""" + # 1. Create git repo + main_repo = tmp_path / "myproject" + main_repo.mkdir() + subprocess.run(["git", "init"], cwd=main_repo, check=True, capture_output=True) + subprocess.run( + ["git", "config", "user.email", "test@test.com"], + cwd=main_repo, + check=True, + capture_output=True, + ) + subprocess.run( + ["git", "config", "user.name", "Test"], + cwd=main_repo, + check=True, + capture_output=True, + ) + (main_repo / "README.md").write_text("# Project") + subprocess.run(["git", "add", "-A"], cwd=main_repo, check=True, capture_output=True) + subprocess.run( + ["git", "commit", "-m", "initial"], + cwd=main_repo, + check=True, + capture_output=True, + ) + + # 2. Init hyh + from hyh.init import init_project + + init_project(main_repo) + assert (main_repo / ".claude" / "plugins" / "hyh" / "plugin.json").exists() + assert (main_repo / ".hyh" / "config.json").exists() + + # 3. Create worktree + from hyh.worktree import create_worktree + + wt_result = create_worktree(main_repo, "1-test-feature") + worktree = wt_result.worktree_path + assert worktree.exists() + + # 4. Check workflow status (should be "none") + from hyh.workflow import detect_phase + + phase = detect_phase(worktree) + assert phase.phase == "none" + assert phase.next_action == "specify" + + # 5. Create spec manually (simulating /hyh specify) + specs_dir = worktree / "specs" + specs_dir.mkdir() + (specs_dir / "spec.md").write_text("# Test Feature Spec") + + phase = detect_phase(worktree) + assert phase.phase == "specify" + assert phase.next_action == "plan" + + # 6. Create plan and tasks (simulating /hyh plan) + (specs_dir / "plan.md").write_text("# Implementation Plan") + (specs_dir / "tasks.md").write_text("""\ +## Phase 1: Setup + +- [ ] T001 Create project structure +- [ ] T002 [P] Initialize configuration + +## Phase 2: Core + +- [ ] T003 Implement main feature +""") + + phase = detect_phase(worktree) + assert phase.phase == "plan" + assert phase.next_action == "implement" + assert phase.tasks_total == 3 + assert phase.tasks_complete == 0 + + # 7. Parse tasks and verify structure + from hyh.plan import parse_speckit_tasks + + tasks = parse_speckit_tasks((specs_dir / "tasks.md").read_text()) + assert len(tasks.tasks) == 3 + assert tasks.tasks["T001"].phase == "Setup" + assert tasks.tasks["T003"].dependencies == ("T001", "T002") + + # 8. Convert to workflow state + state = tasks.to_workflow_state() + assert len(state.tasks) == 3 + + # 9. Simulate completion + (specs_dir / "tasks.md").write_text("""\ +## Phase 1: Setup + +- [x] T001 Create project structure +- [x] T002 [P] Initialize configuration + +## Phase 2: Core + +- [x] T003 Implement main feature +""") + + phase = detect_phase(worktree) + assert phase.phase == "complete" + assert phase.next_action is None diff --git a/tests/hyh/test_plan.py b/tests/hyh/test_plan.py index 1d6def4..994f7a1 100644 --- a/tests/hyh/test_plan.py +++ b/tests/hyh/test_plan.py @@ -375,3 +375,86 @@ def test_parse_markdown_plan_flexible_header_formats(): assert "auth-service" in plan.tasks assert "1.1" in plan.tasks assert len(plan.tasks) == 5 + + +def test_parse_speckit_checkbox_basic(): + """parse_speckit_tasks extracts tasks from checkbox format.""" + from hyh.plan import parse_speckit_tasks + + content = """\ +## Progress Management + +Mark completed tasks with [x]. + +## Phase 1: Setup + +- [ ] T001 Create project structure +- [x] T002 Initialize git repository + +## Phase 2: Core + +- [ ] T003 [P] Implement user model in src/models/user.py +- [ ] T004 [P] [US1] Add auth service in src/services/auth.py +""" + result = parse_speckit_tasks(content) + + assert len(result.tasks) == 4 + assert result.tasks["T001"].status == "pending" + assert result.tasks["T002"].status == "completed" + assert result.tasks["T003"].parallel is True + assert result.tasks["T004"].user_story == "US1" + assert "src/services/auth.py" in result.tasks["T004"].description + + # Verify phases are correctly extracted + assert result.phases == ("Setup", "Core") + + # Verify file_path is correctly extracted + assert result.tasks["T003"].file_path == "src/models/user.py" + assert result.tasks["T004"].file_path == "src/services/auth.py" + + +def test_parse_speckit_tasks_phase_dependencies(): + """Tasks in Phase N depend on all tasks in Phase N-1.""" + from hyh.plan import parse_speckit_tasks + + content = """\ +## Phase 1: Setup + +- [ ] T001 Setup task A +- [ ] T002 [P] Setup task B + +## Phase 2: Core + +- [ ] T003 Core task (depends on Phase 1) +""" + result = parse_speckit_tasks(content) + + # Phase 1 tasks have no dependencies + assert result.tasks["T001"].dependencies == () + assert result.tasks["T002"].dependencies == () + # Phase 2 tasks depend on all Phase 1 tasks + assert set(result.tasks["T003"].dependencies) == {"T001", "T002"} + + +def test_spec_task_list_to_workflow_state(): + """SpecTaskList converts to WorkflowState for daemon.""" + from hyh.plan import parse_speckit_tasks + from hyh.state import TaskStatus + + content = """\ +## Phase 1: Setup + +- [ ] T001 Create project +- [x] T002 Init git + +## Phase 2: Core + +- [ ] T003 [P] Build feature +""" + spec_tasks = parse_speckit_tasks(content) + state = spec_tasks.to_workflow_state() + + assert len(state.tasks) == 3 + assert state.tasks["T001"].status == TaskStatus.PENDING + assert state.tasks["T002"].status == TaskStatus.COMPLETED + assert state.tasks["T003"].dependencies == ("T001", "T002") diff --git a/tests/hyh/test_templates.py b/tests/hyh/test_templates.py new file mode 100644 index 0000000..80d8312 --- /dev/null +++ b/tests/hyh/test_templates.py @@ -0,0 +1,47 @@ +"""Tests for bundled templates.""" + +from importlib.resources import files + + +def test_templates_exist(): + """Bundled templates are accessible.""" + templates = files("hyh") / "templates" + + assert (templates / "spec-template.md").is_file() + assert (templates / "plan-template.md").is_file() + assert (templates / "tasks-template.md").is_file() + assert (templates / "checklist-template.md").is_file() + + +def test_spec_template_has_required_sections(): + """Spec template contains required sections.""" + content = (files("hyh") / "templates" / "spec-template.md").read_text() + + assert "## User Scenarios" in content or "User Scenarios" in content + assert "## Requirements" in content or "Requirements" in content + assert "## Success Criteria" in content or "Success Criteria" in content + + +def test_plan_template_has_required_sections(): + """Plan template contains required sections.""" + content = (files("hyh") / "templates" / "plan-template.md").read_text() + + assert "## Overview" in content + assert "## Technical Context" in content + assert "## Project Structure" in content + + +def test_tasks_template_has_required_sections(): + """Tasks template contains required sections.""" + content = (files("hyh") / "templates" / "tasks-template.md").read_text() + + assert "## Phase" in content or "Phase" in content + assert "## Dependencies" in content or "Dependencies" in content + + +def test_checklist_template_has_required_sections(): + """Checklist template contains required sections.""" + content = (files("hyh") / "templates" / "checklist-template.md").read_text() + + assert "## Notes" in content or "Notes" in content + assert "- [ ]" in content # Has checkbox items diff --git a/tests/hyh/test_workflow.py b/tests/hyh/test_workflow.py new file mode 100644 index 0000000..34cdca6 --- /dev/null +++ b/tests/hyh/test_workflow.py @@ -0,0 +1,91 @@ +"""Tests for workflow state management.""" + +from pathlib import Path + + +def test_detect_workflow_phase_no_spec(tmp_path: Path): + """detect_phase returns 'none' when no spec exists.""" + from hyh.workflow import detect_phase + + result = detect_phase(tmp_path) + assert result.phase == "none" + assert result.next_action == "specify" + + +def test_detect_workflow_phase_has_spec(tmp_path: Path): + """detect_phase returns 'specify' when spec exists but no plan.""" + from hyh.workflow import detect_phase + + specs_dir = tmp_path / "specs" + specs_dir.mkdir() + (specs_dir / "spec.md").write_text("# Spec") + + result = detect_phase(tmp_path) + assert result.phase == "specify" + assert result.next_action == "plan" + + +def test_detect_workflow_phase_has_tasks(tmp_path: Path): + """detect_phase returns 'plan' when tasks exist but not complete.""" + from hyh.workflow import detect_phase + + specs_dir = tmp_path / "specs" + specs_dir.mkdir() + (specs_dir / "spec.md").write_text("# Spec") + (specs_dir / "plan.md").write_text("# Plan") + (specs_dir / "tasks.md").write_text("""\ +## Phase 1: Setup + +- [ ] T001 Create project +- [ ] T002 Init git +""") + + result = detect_phase(tmp_path) + assert result.phase == "plan" + assert result.next_action == "implement" + + +def test_detect_workflow_phase_all_complete(tmp_path: Path): + """detect_phase returns 'implement' when all tasks complete.""" + from hyh.workflow import detect_phase + + specs_dir = tmp_path / "specs" + specs_dir.mkdir() + (specs_dir / "spec.md").write_text("# Spec") + (specs_dir / "plan.md").write_text("# Plan") + (specs_dir / "tasks.md").write_text("""\ +## Phase 1: Setup + +- [x] T001 Create project +- [x] T002 Init git +""") + + result = detect_phase(tmp_path) + assert result.phase == "complete" + assert result.next_action is None + + +def test_cli_workflow_status(tmp_path: Path, monkeypatch): + """hyh workflow status shows current phase.""" + import sys + from io import StringIO + + # Setup with spec only + specs_dir = tmp_path / "specs" + specs_dir.mkdir() + (specs_dir / "spec.md").write_text("# Spec") + + monkeypatch.chdir(tmp_path) + monkeypatch.setenv("HYH_WORKTREE", str(tmp_path)) + + from hyh.client import main + + monkeypatch.setattr(sys, "argv", ["hyh", "workflow", "status"]) + + stdout = StringIO() + monkeypatch.setattr(sys, "stdout", stdout) + + main() + + output = stdout.getvalue() + assert "specify" in output.lower() or "plan" in output.lower() diff --git a/tests/hyh/test_worktree.py b/tests/hyh/test_worktree.py new file mode 100644 index 0000000..609744a --- /dev/null +++ b/tests/hyh/test_worktree.py @@ -0,0 +1,184 @@ +"""Tests for git worktree management (DHH-style).""" + +import subprocess +from pathlib import Path + + +def test_create_worktree_dhh_style(tmp_path: Path): + """create_worktree creates sibling directory with branch.""" + from hyh.worktree import create_worktree + + # Setup: create a git repo + main_repo = tmp_path / "myproject" + main_repo.mkdir() + subprocess.run(["git", "init"], cwd=main_repo, capture_output=True, check=True) + subprocess.run( + ["git", "config", "user.email", "test@test.com"], + cwd=main_repo, + capture_output=True, + check=True, + ) + subprocess.run( + ["git", "config", "user.name", "Test"], + cwd=main_repo, + capture_output=True, + check=True, + ) + (main_repo / "README.md").write_text("# Project") + subprocess.run(["git", "add", "-A"], cwd=main_repo, capture_output=True, check=True) + subprocess.run( + ["git", "commit", "-m", "initial"], + cwd=main_repo, + capture_output=True, + check=True, + ) + + # Act + result = create_worktree(main_repo, "42-user-auth") + + # Assert + expected_path = tmp_path / "myproject--42-user-auth" + assert result.worktree_path == expected_path + assert expected_path.exists() + assert (expected_path / "README.md").exists() + + # Verify branch was created + branch_result = subprocess.run( + ["git", "branch", "--show-current"], + cwd=expected_path, + capture_output=True, + text=True, + check=True, + ) + assert branch_result.stdout.strip() == "42-user-auth" + + +def test_list_worktrees(tmp_path: Path): + """list_worktrees returns all DHH-style worktrees.""" + from hyh.worktree import create_worktree, list_worktrees + + # Setup main repo + main_repo = tmp_path / "myproject" + main_repo.mkdir() + subprocess.run(["git", "init"], cwd=main_repo, capture_output=True, check=True) + subprocess.run( + ["git", "config", "user.email", "test@test.com"], + cwd=main_repo, + capture_output=True, + check=True, + ) + subprocess.run( + ["git", "config", "user.name", "Test"], + cwd=main_repo, + capture_output=True, + check=True, + ) + (main_repo / "README.md").write_text("# Project") + subprocess.run(["git", "add", "-A"], cwd=main_repo, capture_output=True, check=True) + subprocess.run( + ["git", "commit", "-m", "initial"], + cwd=main_repo, + capture_output=True, + check=True, + ) + + # Create two worktrees + create_worktree(main_repo, "42-feature-a") + create_worktree(main_repo, "43-feature-b") + + # Act + worktrees = list_worktrees(main_repo) + + # Assert + assert len(worktrees) == 2 + branches = {wt.branch_name for wt in worktrees} + assert branches == {"42-feature-a", "43-feature-b"} + + +def test_get_worktree_for_branch(tmp_path: Path): + """get_worktree returns path for a specific branch.""" + from hyh.worktree import create_worktree, get_worktree + + # Setup + main_repo = tmp_path / "myproject" + main_repo.mkdir() + subprocess.run(["git", "init"], cwd=main_repo, capture_output=True, check=True) + subprocess.run( + ["git", "config", "user.email", "test@test.com"], + cwd=main_repo, + capture_output=True, + check=True, + ) + subprocess.run( + ["git", "config", "user.name", "Test"], + cwd=main_repo, + capture_output=True, + check=True, + ) + (main_repo / "README.md").write_text("# Project") + subprocess.run(["git", "add", "-A"], cwd=main_repo, capture_output=True, check=True) + subprocess.run( + ["git", "commit", "-m", "initial"], + cwd=main_repo, + capture_output=True, + check=True, + ) + create_worktree(main_repo, "42-user-auth") + + # Act + result = get_worktree(main_repo, "42-user-auth") + + # Assert + assert result is not None + assert result.branch_name == "42-user-auth" + assert result.worktree_path == tmp_path / "myproject--42-user-auth" + + +def test_cli_worktree_create(tmp_path: Path, monkeypatch): + """hyh worktree create creates worktree via CLI.""" + import sys + from io import StringIO + + # Setup main repo + main_repo = tmp_path / "myproject" + main_repo.mkdir() + subprocess.run(["git", "init"], cwd=main_repo, capture_output=True, check=True) + subprocess.run( + ["git", "config", "user.email", "test@test.com"], + cwd=main_repo, + capture_output=True, + check=True, + ) + subprocess.run( + ["git", "config", "user.name", "Test"], + cwd=main_repo, + capture_output=True, + check=True, + ) + (main_repo / "README.md").write_text("# Project") + subprocess.run(["git", "add", "-A"], cwd=main_repo, capture_output=True, check=True) + subprocess.run( + ["git", "commit", "-m", "initial"], + cwd=main_repo, + capture_output=True, + check=True, + ) + + # Mock cwd to main_repo + monkeypatch.chdir(main_repo) + monkeypatch.setenv("HYH_WORKTREE", str(main_repo)) + + # Run CLI + from hyh.client import main + + monkeypatch.setattr(sys, "argv", ["hyh", "worktree", "create", "42-feature"]) + + stdout = StringIO() + monkeypatch.setattr(sys, "stdout", stdout) + + main() + + # Verify + expected_path = tmp_path / "myproject--42-feature" + assert expected_path.exists() + assert "Created" in stdout.getvalue() or "42-feature" in stdout.getvalue()