Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
742 changes: 400 additions & 342 deletions README.md

Large diffs are not rendered by default.

16 changes: 16 additions & 0 deletions adr_kit/mcp/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,9 @@ class CreateADRRequest(BaseModel):
""",
)
alternatives: str | None = Field(None, description="Alternative options considered")
skip_quality_gate: bool = Field(
False, description="Skip quality assessment (for testing or override)"
)
adr_dir: str = Field("docs/adr", description="ADR directory path")


Expand Down Expand Up @@ -150,6 +153,9 @@ class SupersedeADRRequest(BaseModel):
auto_approve: bool = Field(
False, description="Automatically approve new ADR without human review"
)
skip_quality_gate: bool = Field(
False, description="Skip quality assessment (for testing or override)"
)
adr_dir: str = Field("docs/adr", description="ADR directory path")


Expand All @@ -174,6 +180,16 @@ class PlanningContextRequest(BaseModel):
adr_dir: str = Field("docs/adr", description="ADR directory path")


class DecisionGuidanceRequest(BaseModel):
"""Parameters for getting decision quality guidance."""

include_examples: bool = Field(True, description="Include good vs bad ADR examples")
focus_area: str | None = Field(
None,
description="Optional focus area (e.g., 'database', 'frontend') for tailored examples",
)


# Response Data Models for Tool-Specific Data


Expand Down
67 changes: 50 additions & 17 deletions adr_kit/mcp/server.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,25 +141,56 @@ def adr_preflight(request: PreflightCheckRequest) -> dict[str, Any]:
@mcp.tool()
def adr_create(request: CreateADRRequest) -> dict[str, Any]:
"""
Create new ADR with optional policy enforcement.
Create new ADR with quality assessment and policy guidance.

WHEN TO USE: Document significant technical decisions.
RETURNS: ADR details + policy_guidance (if policies detected in content).

Parameters:
- title, context, decision, consequences: Required ADR content
- policy (optional): Structured policy dict for enforcement

If no policy provided, response includes policy_guidance with:
- suggestion: Auto-detected policy structure from decision text
- policy_reference: Complete documentation for all policy types
- example_usage: Code example with your ADR + suggested policy

Use pattern-friendly language for auto-detection:
- "Don't use X" / "Prefer Y over X" → import policies
- "All X must be Y" → pattern policies
- "X must not access Y" → architecture boundaries
- "TypeScript strict mode required" → config enforcement
RETURNS: ADR details + quality_feedback + policy_guidance.

## ADR Structure (MADR Format)

Your ADR should have four sections:

1. **Context** (WHY): The problem or opportunity that prompted this decision
- Current state and why it's insufficient
- Requirements and constraints
- Business/technical drivers

2. **Decision** (WHAT): The specific technology/pattern/approach chosen
- Explicit statement with technology names/versions
- Scope ('All new services', 'Frontend only')
- Constraints ('Don't use X', 'Must have Y')

3. **Consequences** (TRADE-OFFS): Both positive AND negative outcomes
- Benefits and improvements (### Positive)
- Drawbacks and limitations (### Negative)
- Risks and mitigation strategies

4. **Alternatives** (OPTIONAL but CRITICAL): What else was considered
- Each rejected option with specific reason
- Enables extraction of 'disallow' policies

## Quality Guidelines

- **Be specific**: "Use React 18" not "use a modern framework"
- **Document trade-offs**: List BOTH pros and cons (every decision has negatives)
- **Explain WHY**: Context should justify the decision
- **State constraints explicitly**: "Don't use Flask" → enables policy extraction
- **Include alternatives**: Rejected options become 'disallow' policies

## Response Contents

The response includes:
- **quality_feedback**: Assessment of decision quality with improvement suggestions
- **policy_guidance**: How to add automated enforcement (Task 2)

## Example

Good decision language:
- "Use **FastAPI** for all new backend services. **Don't use Flask** or Django."
- "All FastAPI handlers must be async functions."
- "Frontend must not access database directly - use API layer."

This enables automatic extraction of enforceable policies.
"""
try:
logger.info(f"Creating ADR: {request.title}")
Expand All @@ -175,6 +206,7 @@ def adr_create(request: CreateADRRequest) -> dict[str, Any]:
tags=request.tags,
policy=request.policy,
alternatives=request.alternatives,
skip_quality_gate=request.skip_quality_gate,
)

result = workflow.execute(input_data=creation_input)
Expand Down Expand Up @@ -305,6 +337,7 @@ def adr_supersede(request: SupersedeADRRequest) -> dict[str, Any]:
tags=request.new_tags,
policy=request.new_policy,
alternatives=request.new_alternatives,
skip_quality_gate=request.skip_quality_gate,
)

supersede_input = SupersedeInput(
Expand Down
3 changes: 3 additions & 0 deletions adr_kit/workflows/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,9 @@ class WorkflowStatus(str, Enum):
FAILED = "failed"
VALIDATION_ERROR = "validation_error"
CONFLICT_ERROR = "conflict_error"
REQUIRES_ACTION = (
"requires_action" # Quality gate or other check requires user action
)


@dataclass
Expand Down
Loading
Loading