diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000..9e61e78
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,18 @@
+FROM python:3.12-slim
+
+WORKDIR /app
+
+RUN apt-get update && apt-get install -y --no-install-recommends \
+ gcc libpq-dev && \
+ rm -rf /var/lib/apt/lists/*
+
+COPY pyproject.toml .
+COPY src/ src/
+
+RUN pip install --no-cache-dir .
+
+ENV WELLCODE_DATA_DIR=/data
+
+EXPOSE 8787
+
+CMD ["wellcode", "serve", "--host", "0.0.0.0", "--port", "8787"]
diff --git a/README.md b/README.md
index d031f1a..d6cf3ae 100644
--- a/README.md
+++ b/README.md
@@ -2,117 +2,497 @@
-Wellcode CLI
+Wellcode
- Engineering Metrics Powered by AI
+ Open-source developer productivity platform
- Free, open-source CLI tool that integrates with GitHub, Linear, and Split.io to gather and analyze engineering team metrics.
+ Track engineering metrics, DORA performance, AI coding tool ROI, and developer experience.
+ The open-source alternative to Swarmia, GetDX, and LinearB.
+
+ Install ·
+ Quick Start ·
+ Features ·
+ Dashboard ·
+ Plugins ·
+ API
+
+
+---
+
+## What is Wellcode?
+
+Wellcode is a CLI + web dashboard that connects to your existing tools (GitHub, GitLab, Bitbucket, JIRA, Linear) and gives you a single view of your engineering team's performance:
+
+- **DORA metrics** -- Deployment Frequency, Lead Time, Change Failure Rate, Mean Time to Recovery
+- **AI coding metrics** -- GitHub Copilot adoption, Cursor usage, AI-assisted PR detection, ROI analysis
+- **Pull request analytics** -- Cycle time, review bottlenecks, batch size, self-merges
+- **Developer experience surveys** -- Pulse and full DX surveys based on the SPACE / DX Core 4 frameworks
+- **Issue tracker metrics** -- JIRA and Linear cycle time, sprint velocity, estimation accuracy
+
+Everything runs locally by default (SQLite database, no cloud dependency). For teams, deploy it with PostgreSQL via Docker.
+
+---
-## 🚀 Installation
+## Installation
+
+**Requirements:** Python 3.10+
```bash
pip install wellcode-cli
```
-## 🏃 Quick Start
+Or from source:
-1. **Initial Setup**
```bash
-wellcode-cli config
+git clone https://github.com/wellcode-ai/wellcode-cli.git
+cd wellcode-cli
+python -m venv venv && source venv/bin/activate
+pip install -e .
```
-This will guide you through:
-- GitHub App installation for your organization
-- Optional Linear integration
-- Optional Split.io integration
-- Optional Anthropic integration (for AI-powered insights)
+### Docker (self-hosted with PostgreSQL)
-2. **Enable Shell Completion (Optional)**
```bash
-# For bash
-wellcode-cli completion bash >> ~/.bashrc
+git clone https://github.com/wellcode-ai/wellcode-cli.git
+cd wellcode-cli
-# For zsh
-wellcode-cli completion zsh >> ~/.zshrc
+# Set your tokens in .env or export them
+export GITHUB_TOKEN=ghp_...
+export GITHUB_ORG=your-org
-# For fish
-wellcode-cli completion fish > ~/.config/fish/completions/wellcode-cli.fish
+docker compose up -d
+# Dashboard at http://localhost:8787
```
-## 💻 Usage
+---
+
+## Quick Start
+
+### 1. Configure integrations
-### Review Metrics
```bash
-# Review last 7 days
-wellcode-cli review
+wellcode config
+```
-# Review specific date range
-wellcode-cli review --start-date 2024-01-01 --end-date 2024-01-31
+This walks you through connecting:
-# Review specific user
-wellcode-cli review --user johndoe
+| Integration | What you need |
+| --------------- | -------------------------------- |
+| **GitHub** | Personal access token or App |
+| **GitLab** | Personal access token |
+| **Bitbucket** | App password + workspace |
+| **JIRA** | Email + API token + instance URL |
+| **Linear** | API key |
+| **Anthropic** | API key (for AI-powered insights)|
+
+### 2. Collect metrics
+
+```bash
+# Collect last 7 days from all configured providers
+wellcode collect
-# Review specific team
-wellcode-cli review --team engineering
+# Collect a specific date range
+wellcode collect --start-date 2026-01-01 --end-date 2026-01-31
```
-### Interactive Mode
+### 3. View results
+
```bash
-wellcode-cli
+# DORA metrics
+wellcode dora
+
+# AI coding tool metrics
+wellcode ai-metrics
-# Then use natural language:
-> check performance last week
-> show metrics for team frontend
-> how was johndoe doing yesterday
+# Classic review (GitHub + JIRA + Linear in terminal)
+wellcode review
+
+# Start the web dashboard
+wellcode serve
+# Open http://localhost:8787
```
-## ✨ Features
+---
+
+## Features
+
+### CLI Commands
+
+| Command | Description |
+| ---------------- | ----------------------------------------------------------------- |
+| `wellcode serve` | Start the API server + web dashboard |
+| `wellcode collect` | Collect metrics from all configured providers and persist them |
+| `wellcode dora` | View DORA metrics with Elite/High/Medium/Low classification |
+| `wellcode ai-metrics` | View AI coding tool adoption and impact analysis |
+| `wellcode review` | Classic terminal metrics review (GitHub, JIRA, Linear, Split.io) |
+| `wellcode survey` | Create developer experience surveys (pulse or full DX) |
+| `wellcode report` | Generate an HTML report with Plotly charts |
+| `wellcode chat` | Interactive AI chat about your metrics (powered by Claude) |
+| `wellcode config` | Configuration wizard for all integrations |
+| `wellcode completion` | Generate shell completions (bash, zsh, fish) |
+
+### DORA Metrics
+
+All four DORA metrics computed from your actual data, classified against the industry benchmarks from the State of DevOps report:
+
+| Metric | Elite | High | Medium | Low |
+| ------------------------ | ------------ | ------------- | -------------- | ---------- |
+| Deployment Frequency | On-demand | Weekly-daily | Monthly-weekly | < Monthly |
+| Lead Time for Changes | < 1 hour | < 1 day | < 1 week | > 1 week |
+| Change Failure Rate | 0-15% | 16-30% | 31-45% | > 45% |
+| Mean Time to Recovery | < 1 hour | < 1 day | < 1 week | > 1 week |
+
+Sources: GitHub/GitLab/Bitbucket deployments API, merged PRs to main, reverts, hotfixes, incidents.
+
+### AI Coding Metrics
+
+Track the adoption and ROI of AI coding tools across your organization:
+
+- **GitHub Copilot** -- Suggestions shown/accepted, lines of code, active users (via the Copilot org API)
+- **Cursor AI** -- Detected from PR metadata and commit patterns
+- **Claude Code / Aider** -- Detected from commit co-author trailers and PR labels
+- **Impact analysis** -- Compare cycle time, review time, and revert rates for AI-assisted vs non-AI pull requests
+- **Cost tracking** -- Per-tool cost vs productivity gains
+
+### SCM Integrations
+
+| Provider | PRs | Deployments | Teams | Reviews |
+| ------------ | --- | ----------- | ----- | ------- |
+| GitHub | Yes | Yes | Yes | Yes |
+| GitLab | Yes | Yes | Yes | -- |
+| Bitbucket | Yes | -- | Yes | -- |
+
+All providers implement the same `SCMProvider` protocol, so metrics are computed identically regardless of source.
+
+### Developer Experience Surveys
+
+Built-in survey templates based on the DX Core 4 and SPACE frameworks:
+
+- **Pulse surveys** -- 3 quick questions (productivity, code review ease, deployment confidence)
+- **Full DX surveys** -- 10 questions across speed, effectiveness, quality, and business impact
+- **Analytics** -- Developer Experience Index (DXI) score, per-category breakdowns, text response aggregation
+- **API-driven** -- Create, distribute, and analyze surveys via the REST API
+
+### Web Dashboard
+
+Start with `wellcode serve` and open `http://localhost:8787`:
+
+- **Overview** -- Total PRs, merged PRs, cycle time, AI-assisted count
+- **DORA** -- Four metrics with Elite/High/Medium/Low badge and trend chart
+- **AI Metrics** -- Tool usage table, AI vs non-AI comparison, productivity impact
+- **Pull Requests** -- Review time, PR size, reverts, self-merges
+- **Surveys** -- Create surveys, view active surveys, analytics
+
+The dashboard is a single-page app served directly by the FastAPI backend -- no separate Node.js build required.
+
+### Data Persistence
+
+Metrics are stored in a local SQLite database by default (`~/.wellcode/data/wellcode.db`). For team deployments, set `DATABASE_URL` to a PostgreSQL connection string.
+
+15 tables covering pull requests, deployments, incidents, AI usage, issues, surveys, DORA snapshots, and more.
+
+---
+
+## API Reference
+
+When running `wellcode serve`, the full OpenAPI docs are available at `http://localhost:8787/docs`.
+
+Key endpoints:
+
+| Method | Path | Description |
+| ------ | ------------------------------- | -------------------------------- |
+| GET | `/health` | Health check + DB status |
+| GET | `/api/v1/metrics/prs` | PR metrics (filterable) |
+| GET | `/api/v1/metrics/snapshots` | Collection run history |
+| GET | `/api/v1/dora` | DORA metrics for a period |
+| GET | `/api/v1/dora/history` | DORA trend over time |
+| GET | `/api/v1/ai/impact` | AI tool impact analysis |
+| GET | `/api/v1/ai/usage` | Daily AI tool usage data |
+| GET | `/api/v1/surveys/templates` | Available survey templates |
+| POST | `/api/v1/surveys/create` | Create a new survey |
+| GET | `/api/v1/surveys/active` | List active surveys |
+| POST | `/api/v1/surveys/respond` | Submit a survey response |
+| GET | `/api/v1/surveys/{id}/analytics`| Survey analytics + DXI score |
+
+All endpoints accept `?start=YYYY-MM-DD&end=YYYY-MM-DD` for date filtering.
+
+---
+
+## Architecture
+
+```
+src/wellcode_cli/
+ main.py # CLI entry point (Click commands)
+ config.py # Configuration management
+ api/ # FastAPI web server
+ app.py # Application factory
+ routes/ # health, metrics, dora, ai_metrics, surveys
+ services/ # Business logic (shared by CLI + API)
+ dora.py # DORA metric calculations
+ ai_metrics.py # AI tool detection + impact analysis
+ collector.py # Metric collection orchestrator
+ surveys.py # DX survey engine
+ integrations/ # External provider clients
+ scm_protocol.py # Unified SCMProvider protocol
+ github/provider.py # GitHub implementation
+ gitlab/provider.py # GitLab implementation
+ bitbucket/provider.py # Bitbucket implementation
+ db/ # Persistence layer
+ engine.py # SQLite/PostgreSQL connection factory
+ models.py # SQLAlchemy models (15 tables)
+ repository.py # Data access layer (MetricStore)
+ migrations/ # Alembic migrations
+ workers/ # Background jobs
+ scheduler.py # APScheduler for periodic collection
+ web/static/ # Dashboard SPA
+ index.html # Single-page app (Tailwind + Chart.js)
+ github/ # Legacy GitHub integration (Rich display)
+ jira/ # JIRA integration
+ linear/ # Linear integration
+ commands/ # CLI command handlers
+```
+
+The key design principle is **separation of concerns**: integrations fetch data, services process it, and the CLI/API are thin presentation layers. This makes it straightforward to add new data sources without touching business logic.
+
+---
+
+## Building Plugins
+
+Wellcode uses a protocol-based plugin architecture. Adding a new SCM provider (e.g., Azure DevOps, Gitea) requires implementing a single Python class.
+
+### Step 1: Implement the `SCMProvider` protocol
+
+Create a new file at `src/wellcode_cli/integrations//provider.py`:
+
+```python
+from datetime import datetime
+from typing import Optional
-- 📊 GitHub metrics analysis
-- 📈 Linear issue tracking integration
-- 🔄 Split.io feature flag metrics
-- 🤖 AI-powered insights (via Anthropic)
-- 💬 Natural language interface
-- 📱 Interactive mode
+from wellcode_cli.config import get_config_value
+from wellcode_cli.integrations.scm_protocol import (
+ SCMDeployment,
+ SCMPullRequest,
+ SCMRepository,
+ SCMTeam,
+)
-## ⚙️ Configuration
-### GitHub App Installation
-1. Run `wellcode-cli config`
-2. Enter your organization name
-3. Follow the GitHub App installation link
-4. Select your organization and repositories
+class AzureDevOpsProvider:
+ """Azure DevOps implementation of the SCM provider protocol."""
-### Optional Integrations
-- **Linear**: Issue tracking metrics
-- **Split.io**: Feature flag analytics
-- **Anthropic**: AI-powered insights
+ def __init__(self, token: Optional[str] = None, org: Optional[str] = None):
+ self._token = token or get_config_value("AZURE_DEVOPS_TOKEN")
+ self._org = org or get_config_value("AZURE_DEVOPS_ORG")
-## 🆘 Support
+ @property
+ def provider_name(self) -> str:
+ return "azure_devops"
+
+ def get_repositories(self) -> list[SCMRepository]:
+ # Call the Azure DevOps REST API and return SCMRepository objects
+ ...
+
+ def get_pull_requests(
+ self,
+ since: datetime,
+ until: datetime,
+ repo_full_name: Optional[str] = None,
+ author: Optional[str] = None,
+ ) -> list[SCMPullRequest]:
+ # Fetch pull requests and map them to SCMPullRequest
+ ...
+
+ def get_deployments(
+ self,
+ since: datetime,
+ until: datetime,
+ repo_full_name: Optional[str] = None,
+ environment: Optional[str] = None,
+ ) -> list[SCMDeployment]:
+ # Fetch release/deployment data and map to SCMDeployment
+ ...
+
+ def get_teams(self) -> list[SCMTeam]:
+ # Fetch teams and members
+ ...
+```
+
+The protocol defines five data classes you must map your provider's data into:
+
+| Data class | Purpose |
+| ----------------- | -------------------------------------------- |
+| `SCMRepository` | Repository metadata (name, default branch) |
+| `SCMPullRequest` | PR with timestamps, sizes, review info |
+| `SCMDeployment` | Deployment event (env, status, timestamps) |
+| `SCMTeam` | Team with member list |
+| `SCMReview` | Individual code review |
+
+### Step 2: Register the provider
+
+Edit `src/wellcode_cli/services/collector.py` and add your provider to `_get_configured_providers()`:
+
+```python
+def _get_configured_providers() -> list[SCMProvider]:
+ providers = []
+
+ # ... existing providers ...
+
+ if get_config_value("AZURE_DEVOPS_TOKEN"):
+ from ..integrations.azure_devops.provider import AzureDevOpsProvider
+ providers.append(AzureDevOpsProvider())
+
+ return providers
+```
+
+That's it. The collector will automatically call your provider during `wellcode collect`, store the data in the same database tables, and all downstream features (DORA metrics, AI detection, dashboards, API) work without any further changes.
+
+### Step 3: Add config keys (optional)
+
+If your provider needs configuration, add getter functions to `src/wellcode_cli/config.py`:
+
+```python
+def get_azure_devops_token() -> Optional[str]:
+ return get_config_value("AZURE_DEVOPS_TOKEN")
+
+def get_azure_devops_org() -> Optional[str]:
+ return get_config_value("AZURE_DEVOPS_ORG")
+```
+
+Users can then set these via `~/.wellcode/config.json` or environment variables.
+
+### Adding a new issue tracker
+
+Issue trackers (JIRA, Linear) currently use their own collection logic in `src/wellcode_cli/jira/` and `src/wellcode_cli/linear/`. To add a new one, follow the same pattern: create a module under `integrations/`, map issues to `IssueMetric` DB models, and wire it into the collector.
+
+### Adding a new AI tool
+
+To detect a new AI coding tool (e.g., Windsurf, Cody), add patterns to `src/wellcode_cli/services/ai_metrics.py`:
+
+```python
+# In detect_ai_tool_from_pr()
+if "windsurf" in text or "windsurf" in label_text:
+ return "windsurf"
+```
+
+For tools with their own usage API, add a collection function similar to `collect_copilot_metrics()` and call it from the collector.
+
+---
+
+## Configuration
+
+All configuration can be set via `~/.wellcode/config.json` or environment variables:
+
+| Variable | Description |
+| --------------------------- | ------------------------------------ |
+| `GITHUB_TOKEN` | GitHub personal access token |
+| `GITHUB_ORG` | GitHub organization name |
+| `GITHUB_MODE` | `organization` or `personal` |
+| `GITLAB_TOKEN` | GitLab personal access token |
+| `GITLAB_URL` | GitLab instance URL (default: gitlab.com) |
+| `BITBUCKET_USERNAME` | Bitbucket username |
+| `BITBUCKET_APP_PASSWORD` | Bitbucket app password |
+| `BITBUCKET_WORKSPACE` | Bitbucket workspace slug |
+| `JIRA_URL` | JIRA instance URL |
+| `JIRA_EMAIL` | JIRA account email |
+| `JIRA_API_TOKEN` | JIRA API token |
+| `LINEAR_API_KEY` | Linear API key |
+| `SPLIT_API_KEY` | Split.io API key |
+| `ANTHROPIC_API_KEY` | Anthropic API key (for AI insights) |
+| `DATABASE_URL` | PostgreSQL URL (default: SQLite) |
+
+---
+
+## Self-Hosting with Docker
+
+The included `docker-compose.yml` runs Wellcode with PostgreSQL:
+
+```bash
+# Copy and edit your environment variables
+cp .env.example .env
+
+# Start the stack
+docker compose up -d
+
+# Dashboard: http://localhost:8787
+# API docs: http://localhost:8787/docs
+```
+
+The compose file includes:
+- **wellcode** -- API server + web dashboard + background collector
+- **postgres** -- PostgreSQL 16 for persistent storage
+
+---
+
+## Development
+
+```bash
+git clone https://github.com/wellcode-ai/wellcode-cli.git
+cd wellcode-cli
+python -m venv venv && source venv/bin/activate
+pip install -e ".[dev,test]"
+
+# Run tests
+pytest
+
+# Start dev server with auto-reload
+wellcode serve --reload
+
+# Run linting
+ruff check src/
+```
+
+### Project structure
+
+- `src/wellcode_cli/` -- All source code
+- `tests/` -- Test suite
+- `alembic.ini` -- Database migration config
+- `Dockerfile` -- Container image
+- `docker-compose.yml` -- Full stack deployment
+
+---
+
+## Comparison with Commercial Tools
+
+| Feature | Wellcode | Swarmia | GetDX |
+| -------------------------- | ---------- | ----------- | ----------- |
+| DORA metrics | Yes | Yes | Yes |
+| AI coding metrics | Yes | Yes | Yes |
+| GitHub | Yes | Yes | Yes |
+| GitLab | Yes | Yes | Yes |
+| Bitbucket | Yes | No | Yes |
+| JIRA | Yes | Yes | Yes |
+| Linear | Yes | Yes | Yes |
+| DX surveys | Yes | Yes | Yes |
+| Web dashboard | Yes | Yes | Yes |
+| CLI interface | Yes | No | No |
+| AI-powered insights | Yes | No | Yes |
+| Self-hosted | Yes | No | No |
+| Open source | Yes | No | No |
+| Plugin architecture | Yes | No | No |
+| Price | Free | From $20/dev| From $20/dev|
+
+---
+
+## Support
- Documentation: https://cli.wellcode.ai
- Issues: https://github.com/wellcode-ai/wellcode-cli/issues
- Email: support@wellcode.ai
-## 📄 Contributing
-
-We welcome contributions! Please see our [Contributing Guide](CONTRIBUTING.md) for details.
-
-## 💖 Contributors
+## Contributing
-Thanks goes to these wonderful people:
+We welcome contributions. See [CONTRIBUTING.md](CONTRIBUTING.md) for guidelines.
-
-
-
-
-
-
-
+Areas where help is especially welcome:
+- New SCM provider plugins (Azure DevOps, Gitea, Forgejo)
+- Incident management integrations (PagerDuty, Opsgenie)
+- CI/CD integrations (Jenkins, CircleCI, Buildkite)
+- Slack/Teams notification support
+- Additional AI tool detection patterns
-## 📄 License
+## License
-MIT License - see [LICENSE](LICENSE) file for details
\ No newline at end of file
+MIT License -- see [LICENSE](LICENSE) for details.
diff --git a/alembic.ini b/alembic.ini
new file mode 100644
index 0000000..f596c01
--- /dev/null
+++ b/alembic.ini
@@ -0,0 +1,37 @@
+[alembic]
+script_location = src/wellcode_cli/db/migrations
+prepend_sys_path = src
+sqlalchemy.url = sqlite:///%(here)s/data/wellcode.db
+
+[loggers]
+keys = root,sqlalchemy,alembic
+
+[handlers]
+keys = console
+
+[formatters]
+keys = generic
+
+[logger_root]
+level = WARN
+handlers = console
+
+[logger_sqlalchemy]
+level = WARN
+handlers =
+qualname = sqlalchemy.engine
+
+[logger_alembic]
+level = INFO
+handlers =
+qualname = alembic
+
+[handler_console]
+class = StreamHandler
+args = (sys.stderr,)
+level = NOTSET
+formatter = generic
+
+[formatter_generic]
+format = %(levelname)-5.5s [%(name)s] %(message)s
+datefmt = %H:%M:%S
diff --git a/docker-compose.yml b/docker-compose.yml
new file mode 100644
index 0000000..0b563ab
--- /dev/null
+++ b/docker-compose.yml
@@ -0,0 +1,46 @@
+version: "3.8"
+
+services:
+ wellcode:
+ build: .
+ ports:
+ - "8787:8787"
+ volumes:
+ - wellcode-data:/data
+ environment:
+ - DATABASE_URL=postgresql://wellcode:wellcode@postgres:5432/wellcode
+ - GITHUB_TOKEN=${GITHUB_TOKEN}
+ - GITHUB_ORG=${GITHUB_ORG}
+ - GITLAB_TOKEN=${GITLAB_TOKEN}
+ - GITLAB_URL=${GITLAB_URL}
+ - BITBUCKET_USERNAME=${BITBUCKET_USERNAME}
+ - BITBUCKET_APP_PASSWORD=${BITBUCKET_APP_PASSWORD}
+ - BITBUCKET_WORKSPACE=${BITBUCKET_WORKSPACE}
+ - JIRA_URL=${JIRA_URL}
+ - JIRA_EMAIL=${JIRA_EMAIL}
+ - JIRA_API_TOKEN=${JIRA_API_TOKEN}
+ - LINEAR_API_KEY=${LINEAR_API_KEY}
+ - ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY}
+ depends_on:
+ postgres:
+ condition: service_healthy
+ restart: unless-stopped
+
+ postgres:
+ image: postgres:16-alpine
+ environment:
+ POSTGRES_USER: wellcode
+ POSTGRES_PASSWORD: wellcode
+ POSTGRES_DB: wellcode
+ volumes:
+ - postgres-data:/var/lib/postgresql/data
+ healthcheck:
+ test: ["CMD-LINE", "pg_isready -U wellcode"]
+ interval: 5s
+ timeout: 5s
+ retries: 5
+ restart: unless-stopped
+
+volumes:
+ wellcode-data:
+ postgres-data:
diff --git a/pyproject.toml b/pyproject.toml
index e019573..4cc2b80 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -4,15 +4,17 @@ build-backend = "hatchling.build"
[project]
name = "wellcode-cli"
-version = "0.1.31"
-description = "Engineering Metrics Analysis Tool"
+version = "0.2.0"
+description = "Open-source developer productivity platform — track engineering metrics, DORA, AI coding tools, and developer experience"
readme = "README.md"
-authors = [{ name = "Your Name", email = "your.email@example.com" }]
+authors = [{ name = "Wellcode AI", email = "hello@wellcode.ai" }]
license = { file = "LICENSE" }
classifiers = [
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
+ "Development Status :: 4 - Beta",
+ "Topic :: Software Development :: Quality Assurance",
]
dependencies = [
"PyGithub>=1.55",
@@ -32,14 +34,28 @@ dependencies = [
"click>=8.0.0",
"pyperclip>=1.9.0",
"prompt_toolkit>=3.0.48",
+ "jira>=3.5.0",
+ "sqlalchemy>=2.0.0",
+ "alembic>=1.13.0",
+ "pydantic>=2.0.0",
+ "fastapi>=0.110.0",
+ "uvicorn[standard]>=0.27.0",
+ "apscheduler>=3.10.0",
+ "python-gitlab>=4.0.0",
+ "atlassian-python-api>=3.41.0",
+ "python-jose[cryptography]>=3.3.0",
+ "websockets>=12.0",
+ "httpx>=0.27.0",
+ "psycopg2-binary>=2.9.0",
]
-requires-python = ">=3.8"
+requires-python = ">=3.10"
[project.urls]
Homepage = "https://github.com/wellcode-ai/wellcode-cli"
Repository = "https://github.com/wellcode-ai/wellcode-cli"
[project.scripts]
+wellcode = "wellcode_cli.main:main"
wellcode-cli = "wellcode_cli.main:main"
[tool.hatch.build.targets.wheel]
@@ -48,12 +64,21 @@ packages = ["src/wellcode_cli"]
[tool.hatch.build]
include = [
"src/wellcode_cli/**/*.py",
+ "src/wellcode_cli/**/*.html",
+ "src/wellcode_cli/**/*.js",
+ "src/wellcode_cli/**/*.css",
]
[project.optional-dependencies]
test = [
"pytest>=7.0.0",
"pytest-cov>=4.0.0",
+ "pytest-asyncio>=0.23.0",
+ "httpx>=0.27.0",
+]
+dev = [
+ "ruff>=0.3.0",
+ "mypy>=1.8.0",
]
[tool.pytest.ini_options]
@@ -63,3 +88,8 @@ pythonpath = [
testpaths = [
"tests"
]
+asyncio_mode = "auto"
+
+[tool.ruff]
+line-length = 100
+target-version = "py310"
diff --git a/requirements.txt b/requirements.txt
index 11a6eec..c9ed72f 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -19,4 +19,5 @@ types-requests-oauthlib
types-urllib3
pre-commit
pyperclip
-prompt_toolkit
\ No newline at end of file
+prompt_toolkit
+jira
\ No newline at end of file
diff --git a/src/wellcode_cli/__init__.py b/src/wellcode_cli/__init__.py
index b84359f..d3ec452 100644
--- a/src/wellcode_cli/__init__.py
+++ b/src/wellcode_cli/__init__.py
@@ -1 +1 @@
-__version__ = "0.1.31"
+__version__ = "0.2.0"
diff --git a/src/wellcode_cli/api/__init__.py b/src/wellcode_cli/api/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/wellcode_cli/api/app.py b/src/wellcode_cli/api/app.py
new file mode 100644
index 0000000..5e1a785
--- /dev/null
+++ b/src/wellcode_cli/api/app.py
@@ -0,0 +1,44 @@
+"""FastAPI application for the Wellcode web dashboard and API."""
+
+from contextlib import asynccontextmanager
+from pathlib import Path
+
+from fastapi import FastAPI
+from fastapi.middleware.cors import CORSMiddleware
+from fastapi.staticfiles import StaticFiles
+
+from ..db.engine import init_db
+from .routes import ai_metrics, dora, health, metrics, surveys
+
+
+@asynccontextmanager
+async def lifespan(app: FastAPI):
+ init_db()
+ yield
+
+
+app = FastAPI(
+ title="Wellcode API",
+ description="Open-source developer productivity platform API",
+ version="0.2.0",
+ lifespan=lifespan,
+)
+
+app.add_middleware(
+ CORSMiddleware,
+ allow_origins=["*"],
+ allow_credentials=True,
+ allow_methods=["*"],
+ allow_headers=["*"],
+)
+
+app.include_router(health.router, tags=["Health"])
+app.include_router(metrics.router, prefix="/api/v1/metrics", tags=["Metrics"])
+app.include_router(dora.router, prefix="/api/v1/dora", tags=["DORA"])
+app.include_router(ai_metrics.router, prefix="/api/v1/ai", tags=["AI Metrics"])
+app.include_router(surveys.router, prefix="/api/v1/surveys", tags=["Surveys"])
+
+# Serve static web dashboard if available
+WEB_DIR = Path(__file__).parent.parent / "web" / "static"
+if WEB_DIR.exists():
+ app.mount("/", StaticFiles(directory=str(WEB_DIR), html=True), name="static")
diff --git a/src/wellcode_cli/api/middleware/__init__.py b/src/wellcode_cli/api/middleware/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/wellcode_cli/api/routes/__init__.py b/src/wellcode_cli/api/routes/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/wellcode_cli/api/routes/ai_metrics.py b/src/wellcode_cli/api/routes/ai_metrics.py
new file mode 100644
index 0000000..d643b61
--- /dev/null
+++ b/src/wellcode_cli/api/routes/ai_metrics.py
@@ -0,0 +1,114 @@
+"""AI coding tool metrics API endpoints."""
+
+from datetime import datetime, timedelta, timezone
+from typing import Optional
+
+from fastapi import APIRouter, Query
+from pydantic import BaseModel
+
+from ...db.engine import get_session
+from ...db.repository import MetricStore
+from ...services.ai_metrics import compute_ai_impact
+
+router = APIRouter()
+
+
+class AIToolMetric(BaseModel):
+ tool: str
+ total_suggestions_shown: int
+ total_suggestions_accepted: int
+ total_lines_accepted: int
+ active_users: int
+ acceptance_rate: float
+ total_cost_usd: float
+
+
+class AIImpactResponse(BaseModel):
+ ai_assisted_pr_count: int
+ non_ai_pr_count: int
+ ai_avg_cycle_time_hours: float
+ non_ai_avg_cycle_time_hours: float
+ ai_avg_review_time_hours: float
+ non_ai_avg_review_time_hours: float
+ ai_revert_rate: float
+ non_ai_revert_rate: float
+ productivity_change_pct: float
+ tools: list[AIToolMetric]
+
+
+class AIUsageDayResponse(BaseModel):
+ date: str
+ tool: str
+ suggestions_shown: int
+ suggestions_accepted: int
+ lines_accepted: int
+ active_users: int
+ cost_usd: float
+
+
+@router.get("/impact", response_model=AIImpactResponse)
+def get_ai_impact(
+ start: Optional[str] = Query(None),
+ end: Optional[str] = Query(None),
+):
+ now = datetime.now(timezone.utc)
+ end_dt = datetime.fromisoformat(end) if end else now
+ start_dt = datetime.fromisoformat(start) if start else end_dt - timedelta(days=30)
+
+ session = get_session()
+ store = MetricStore(session)
+ impact = compute_ai_impact(store, start_dt, end_dt)
+ session.close()
+
+ return AIImpactResponse(
+ ai_assisted_pr_count=impact.ai_assisted_pr_count,
+ non_ai_pr_count=impact.non_ai_pr_count,
+ ai_avg_cycle_time_hours=impact.ai_avg_cycle_time_hours,
+ non_ai_avg_cycle_time_hours=impact.non_ai_avg_cycle_time_hours,
+ ai_avg_review_time_hours=impact.ai_avg_review_time_hours,
+ non_ai_avg_review_time_hours=impact.non_ai_avg_review_time_hours,
+ ai_revert_rate=impact.ai_revert_rate,
+ non_ai_revert_rate=impact.non_ai_revert_rate,
+ productivity_change_pct=impact.productivity_change_pct,
+ tools=[
+ AIToolMetric(
+ tool=t.tool,
+ total_suggestions_shown=t.total_suggestions_shown,
+ total_suggestions_accepted=t.total_suggestions_accepted,
+ total_lines_accepted=t.total_lines_accepted,
+ active_users=t.active_users,
+ acceptance_rate=t.acceptance_rate,
+ total_cost_usd=t.total_cost_usd,
+ )
+ for t in impact.tools
+ ],
+ )
+
+
+@router.get("/usage", response_model=list[AIUsageDayResponse])
+def get_ai_usage(
+ start: Optional[str] = Query(None),
+ end: Optional[str] = Query(None),
+ tool: Optional[str] = Query(None),
+):
+ now = datetime.now(timezone.utc)
+ end_dt = datetime.fromisoformat(end) if end else now
+ start_dt = datetime.fromisoformat(start) if start else end_dt - timedelta(days=30)
+
+ session = get_session()
+ store = MetricStore(session)
+ metrics = store.ai_usage.get_by_period(start_dt, end_dt, tool=tool)
+ session.close()
+
+ return [
+ AIUsageDayResponse(
+ date=m.date.isoformat() if m.date else "",
+ tool=m.tool,
+ suggestions_shown=m.suggestions_shown or 0,
+ suggestions_accepted=m.suggestions_accepted or 0,
+ lines_accepted=m.lines_accepted or 0,
+ active_users=m.active_users or 0,
+ cost_usd=m.cost_usd or 0,
+ )
+ for m in metrics
+ ]
diff --git a/src/wellcode_cli/api/routes/dora.py b/src/wellcode_cli/api/routes/dora.py
new file mode 100644
index 0000000..01b0009
--- /dev/null
+++ b/src/wellcode_cli/api/routes/dora.py
@@ -0,0 +1,84 @@
+"""DORA metrics API endpoints."""
+
+from datetime import datetime, timedelta, timezone
+from typing import Optional
+
+from fastapi import APIRouter, Query
+from pydantic import BaseModel
+
+from ...db.engine import get_session
+from ...db.repository import MetricStore
+from ...services.dora import DORA_THRESHOLDS, compute_dora
+
+router = APIRouter()
+
+
+class DORAResponse(BaseModel):
+ deployment_frequency: float
+ lead_time_hours: float
+ change_failure_rate: float
+ mttr_hours: float
+ level: str
+ details: dict
+ thresholds: dict = DORA_THRESHOLDS
+
+
+class DORAHistoryItem(BaseModel):
+ period_start: str
+ period_end: str
+ deployment_frequency: float
+ lead_time_hours: float
+ change_failure_rate: float
+ mttr_hours: float
+ level: str
+
+
+@router.get("", response_model=DORAResponse)
+def get_dora_metrics(
+ start: Optional[str] = Query(None, description="Start date YYYY-MM-DD"),
+ end: Optional[str] = Query(None, description="End date YYYY-MM-DD"),
+ repo_id: Optional[int] = None,
+ team_id: Optional[int] = None,
+):
+ now = datetime.now(timezone.utc)
+ end_dt = datetime.fromisoformat(end) if end else now
+ start_dt = datetime.fromisoformat(start) if start else end_dt - timedelta(days=30)
+
+ session = get_session()
+ store = MetricStore(session)
+
+ metrics = compute_dora(store, start_dt, end_dt, repo_id=repo_id, team_id=team_id)
+
+ session.close()
+ return DORAResponse(
+ deployment_frequency=metrics.deployment_frequency,
+ lead_time_hours=metrics.lead_time_hours,
+ change_failure_rate=metrics.change_failure_rate,
+ mttr_hours=metrics.mttr_hours,
+ level=metrics.level,
+ details=metrics.details,
+ )
+
+
+@router.get("/history", response_model=list[DORAHistoryItem])
+def get_dora_history(
+ limit: int = Query(30, le=100),
+ team_id: Optional[int] = None,
+):
+ session = get_session()
+ store = MetricStore(session)
+ items = store.dora.get_history(limit=limit, team_id=team_id)
+ result = [
+ DORAHistoryItem(
+ period_start=d.period_start.isoformat() if d.period_start else "",
+ period_end=d.period_end.isoformat() if d.period_end else "",
+ deployment_frequency=d.deployment_frequency or 0,
+ lead_time_hours=d.lead_time_hours or 0,
+ change_failure_rate=d.change_failure_rate or 0,
+ mttr_hours=d.mttr_hours or 0,
+ level=d.level or "low",
+ )
+ for d in items
+ ]
+ session.close()
+ return result
diff --git a/src/wellcode_cli/api/routes/health.py b/src/wellcode_cli/api/routes/health.py
new file mode 100644
index 0000000..0593f8a
--- /dev/null
+++ b/src/wellcode_cli/api/routes/health.py
@@ -0,0 +1,29 @@
+"""Health check endpoints."""
+
+from datetime import datetime, timezone
+
+from fastapi import APIRouter
+from sqlalchemy import text
+
+from ...db.engine import get_engine
+
+router = APIRouter()
+
+
+@router.get("/health")
+def health_check():
+ db_ok = False
+ try:
+ engine = get_engine()
+ with engine.connect() as conn:
+ conn.execute(text("SELECT 1"))
+ db_ok = True
+ except Exception:
+ pass
+
+ return {
+ "status": "healthy" if db_ok else "degraded",
+ "timestamp": datetime.now(timezone.utc).isoformat(),
+ "database": "connected" if db_ok else "disconnected",
+ "version": "0.2.0",
+ }
diff --git a/src/wellcode_cli/api/routes/metrics.py b/src/wellcode_cli/api/routes/metrics.py
new file mode 100644
index 0000000..538fd84
--- /dev/null
+++ b/src/wellcode_cli/api/routes/metrics.py
@@ -0,0 +1,120 @@
+"""PR and general engineering metrics API endpoints."""
+
+import statistics
+from datetime import datetime, timedelta, timezone
+from typing import Optional
+
+from fastapi import APIRouter, Query
+from pydantic import BaseModel
+
+from ...db.engine import get_session
+from ...db.repository import MetricStore
+
+router = APIRouter()
+
+
+class PRMetricsResponse(BaseModel):
+ total_prs: int = 0
+ merged_prs: int = 0
+ open_prs: int = 0
+ avg_cycle_time_hours: float = 0
+ avg_time_to_merge_hours: float = 0
+ avg_time_to_first_review_hours: float = 0
+ avg_pr_size: float = 0
+ revert_count: int = 0
+ hotfix_count: int = 0
+ self_merge_count: int = 0
+ ai_assisted_count: int = 0
+ top_contributors: list = []
+ pr_size_distribution: dict = {}
+
+
+class SnapshotResponse(BaseModel):
+ id: int
+ collected_at: str
+ period_start: str
+ period_end: str
+ source: str
+ status: str
+ summary: Optional[dict] = None
+ duration_seconds: Optional[float] = None
+
+
+@router.get("/prs", response_model=PRMetricsResponse)
+def get_pr_metrics(
+ start: Optional[str] = Query(None, description="Start date YYYY-MM-DD"),
+ end: Optional[str] = Query(None, description="End date YYYY-MM-DD"),
+ repo_id: Optional[int] = None,
+ author: Optional[str] = None,
+):
+ now = datetime.now(timezone.utc)
+ end_dt = datetime.fromisoformat(end) if end else now
+ start_dt = datetime.fromisoformat(start) if start else end_dt - timedelta(days=30)
+
+ session = get_session()
+ store = MetricStore(session)
+
+ prs = store.pull_requests.get_by_period(start_dt, end_dt, repo_id=repo_id)
+
+ if author:
+ prs = [p for p in prs if p.author and p.author.username == author]
+
+ merged = [p for p in prs if p.state == "merged"]
+ open_prs = [p for p in prs if p.state == "open"]
+
+ cycle_times = [p.cycle_time_hours for p in merged if p.cycle_time_hours]
+ merge_times = [p.time_to_merge_hours for p in merged if p.time_to_merge_hours]
+ review_times = [p.time_to_first_review_hours for p in prs if p.time_to_first_review_hours]
+ sizes = [p.additions + p.deletions for p in prs if p.additions or p.deletions]
+
+ # Top contributors
+ contrib_counts = {}
+ for p in prs:
+ if p.author:
+ contrib_counts[p.author.username] = contrib_counts.get(p.author.username, 0) + 1
+ top = sorted(contrib_counts.items(), key=lambda x: x[1], reverse=True)[:10]
+
+ # Size distribution
+ small = sum(1 for s in sizes if s < 100)
+ medium = sum(1 for s in sizes if 100 <= s < 500)
+ large = sum(1 for s in sizes if s >= 500)
+
+ session.close()
+
+ return PRMetricsResponse(
+ total_prs=len(prs),
+ merged_prs=len(merged),
+ open_prs=len(open_prs),
+ avg_cycle_time_hours=statistics.mean(cycle_times) if cycle_times else 0,
+ avg_time_to_merge_hours=statistics.mean(merge_times) if merge_times else 0,
+ avg_time_to_first_review_hours=statistics.mean(review_times) if review_times else 0,
+ avg_pr_size=statistics.mean(sizes) if sizes else 0,
+ revert_count=sum(1 for p in prs if p.is_revert),
+ hotfix_count=sum(1 for p in prs if p.is_hotfix),
+ self_merge_count=sum(1 for p in prs if p.is_self_merged),
+ ai_assisted_count=sum(1 for p in prs if p.is_ai_generated),
+ top_contributors=[{"username": u, "pr_count": c} for u, c in top],
+ pr_size_distribution={"small": small, "medium": medium, "large": large},
+ )
+
+
+@router.get("/snapshots", response_model=list[SnapshotResponse])
+def list_snapshots(limit: int = Query(20, le=100)):
+ session = get_session()
+ store = MetricStore(session)
+ snaps = store.snapshots.list_recent(limit)
+ result = [
+ SnapshotResponse(
+ id=s.id,
+ collected_at=s.collected_at.isoformat() if s.collected_at else "",
+ period_start=s.period_start.isoformat() if s.period_start else "",
+ period_end=s.period_end.isoformat() if s.period_end else "",
+ source=s.source or "",
+ status=s.status or "",
+ summary=s.summary,
+ duration_seconds=s.duration_seconds,
+ )
+ for s in snaps
+ ]
+ session.close()
+ return result
diff --git a/src/wellcode_cli/api/routes/surveys.py b/src/wellcode_cli/api/routes/surveys.py
new file mode 100644
index 0000000..8278684
--- /dev/null
+++ b/src/wellcode_cli/api/routes/surveys.py
@@ -0,0 +1,120 @@
+"""DX Survey API endpoints."""
+
+from typing import Optional
+
+from fastapi import APIRouter, HTTPException
+from pydantic import BaseModel
+
+from ...db.engine import get_session
+from ...db.repository import MetricStore
+from ...services.surveys import (
+ SURVEY_TEMPLATES,
+ analyze_survey,
+ create_survey_from_template,
+ submit_response,
+)
+
+router = APIRouter()
+
+
+class CreateSurveyRequest(BaseModel):
+ template: str = "pulse"
+ title: Optional[str] = None
+ target_teams: Optional[list] = None
+ recurrence: str = "none"
+
+
+class SurveyResponseModel(BaseModel):
+ id: int
+ title: str
+ survey_type: Optional[str]
+ status: Optional[str]
+ created_at: str
+ total_questions: int
+
+
+class SubmitResponseRequest(BaseModel):
+ survey_id: int
+ answers: dict
+ developer_id: Optional[int] = None
+
+
+class SurveyAnalyticsResponse(BaseModel):
+ survey_id: int
+ title: str
+ total_responses: int
+ dxi_score: float
+ category_scores: dict
+ question_scores: dict
+ text_responses: list
+
+
+@router.get("/templates")
+def list_templates():
+ return {
+ name: [{"text": q["text"], "type": q["type"], "category": q.get("category")}
+ for q in questions]
+ for name, questions in SURVEY_TEMPLATES.items()
+ }
+
+
+@router.post("/create", response_model=SurveyResponseModel)
+def create_survey(req: CreateSurveyRequest):
+ if req.template not in SURVEY_TEMPLATES:
+ raise HTTPException(400, f"Unknown template: {req.template}")
+
+ survey = create_survey_from_template(
+ template=req.template,
+ title=req.title,
+ target_teams=req.target_teams,
+ recurrence=req.recurrence,
+ )
+ q_count = len(SURVEY_TEMPLATES.get(req.template, []))
+
+ return SurveyResponseModel(
+ id=survey.id,
+ title=survey.title,
+ survey_type=survey.survey_type,
+ status=survey.status,
+ created_at=survey.created_at.isoformat() if survey.created_at else "",
+ total_questions=q_count,
+ )
+
+
+@router.get("/active", response_model=list[SurveyResponseModel])
+def list_active_surveys():
+ session = get_session()
+ store = MetricStore(session)
+ surveys = store.surveys.get_active_surveys()
+ result = []
+ for s in surveys:
+ result.append(SurveyResponseModel(
+ id=s.id,
+ title=s.title,
+ survey_type=s.survey_type,
+ status=s.status,
+ created_at=s.created_at.isoformat() if s.created_at else "",
+ total_questions=len(s.questions) if s.questions else 0,
+ ))
+ session.close()
+ return result
+
+
+@router.post("/respond")
+def submit_survey_response(req: SubmitResponseRequest):
+ response = submit_response(req.survey_id, req.answers, req.developer_id)
+ return {"id": response.id, "submitted_at": response.submitted_at.isoformat()}
+
+
+@router.get("/{survey_id}/analytics", response_model=SurveyAnalyticsResponse)
+def get_survey_analytics(survey_id: int):
+ analytics = analyze_survey(survey_id)
+ return SurveyAnalyticsResponse(
+ survey_id=analytics.survey_id,
+ title=analytics.title,
+ total_responses=analytics.total_responses,
+ dxi_score=analytics.dxi_score,
+ category_scores=analytics.category_scores,
+ question_scores=analytics.question_scores,
+ text_responses=analytics.text_responses,
+ )
diff --git a/src/wellcode_cli/commands/config.py b/src/wellcode_cli/commands/config.py
index 009c2a5..6b3a679 100644
--- a/src/wellcode_cli/commands/config.py
+++ b/src/wellcode_cli/commands/config.py
@@ -108,6 +108,48 @@ def config():
console.print("[green]✓ GitHub configuration complete![/]")
+ # JIRA integration (requires URL, email, and API token)
+ console.print("\n[bold cyan]JIRA Configuration[/]")
+ has_jira = "JIRA_API_TOKEN" in config_data
+ if has_jira:
+ console.print("[yellow]JIRA is already configured[/]")
+ jira_choice = Prompt.ask(
+ "Would you like to reconfigure JIRA?",
+ choices=["y", "n", "clear"],
+ default="n",
+ )
+ if jira_choice == "y":
+ jira_url = Prompt.ask(
+ "Enter your JIRA instance URL (e.g. https://org.atlassian.net)",
+ default=config_data.get("JIRA_URL", ""),
+ )
+ jira_email = Prompt.ask(
+ "Enter your JIRA email",
+ default=config_data.get("JIRA_EMAIL", ""),
+ )
+ jira_token = Prompt.ask("Enter your JIRA API token")
+ if jira_url and jira_email and jira_token:
+ config_data["JIRA_URL"] = jira_url.rstrip("/")
+ config_data["JIRA_EMAIL"] = jira_email
+ config_data["JIRA_API_TOKEN"] = jira_token
+ elif jira_choice == "clear":
+ for k in ("JIRA_URL", "JIRA_EMAIL", "JIRA_API_TOKEN"):
+ config_data.pop(k, None)
+ console.print("[yellow]JIRA configuration cleared[/]")
+ else:
+ if Confirm.ask(
+ "Would you like to configure JIRA integration?", default=False
+ ):
+ jira_url = Prompt.ask(
+ "Enter your JIRA instance URL (e.g. https://org.atlassian.net)"
+ )
+ jira_email = Prompt.ask("Enter your JIRA email")
+ jira_token = Prompt.ask("Enter your JIRA API token")
+ if jira_url and jira_email and jira_token:
+ config_data["JIRA_URL"] = jira_url.rstrip("/")
+ config_data["JIRA_EMAIL"] = jira_email
+ config_data["JIRA_API_TOKEN"] = jira_token
+
# Optional integrations with secret masking
optional_configs = {
"Linear": ("LINEAR_API_KEY", "Enter your Linear API key"),
@@ -133,6 +175,10 @@ def config():
)
console.print("[green]✓ GitHub App installed and configured[/]")
+ jira_status = "✓" if "JIRA_API_TOKEN" in config_data else "✗"
+ jira_color = "green" if "JIRA_API_TOKEN" in config_data else "red"
+ console.print(f"[{jira_color}]{jira_status} JIRA[/]")
+
for name, (key, _) in optional_configs.items():
status = "✓" if key in config_data else "✗"
color = "green" if key in config_data else "red"
diff --git a/src/wellcode_cli/commands/review.py b/src/wellcode_cli/commands/review.py
index fdbf5ce..af2c04e 100644
--- a/src/wellcode_cli/commands/review.py
+++ b/src/wellcode_cli/commands/review.py
@@ -10,6 +10,7 @@
from ..config import (
get_anthropic_api_key,
get_github_org,
+ get_jira_api_token,
get_linear_api_key,
get_split_api_key,
)
@@ -18,6 +19,8 @@
from ..github.github_display import display_github_metrics
from ..github.github_format_ai import format_ai_response, get_ai_analysis
from ..github.github_metrics import get_github_metrics
+from ..jira.jira_display import display_jira_metrics
+from ..jira.jira_metrics import get_jira_metrics
from ..linear.linear_display import display_linear_metrics
from ..linear.linear_metrics import get_linear_metrics
from ..split_metrics import display_split_metrics, get_split_metrics
@@ -121,6 +124,15 @@ def review(start_date, end_date, user, team):
else:
console.print("[yellow]⚠️ Linear integration not configured[/]")
+ # JIRA metrics
+ if get_jira_api_token():
+ status.update("Fetching JIRA metrics...")
+ jira_metrics = get_jira_metrics(start_date, end_date, user)
+ all_metrics["jira"] = jira_metrics
+ display_jira_metrics(jira_metrics)
+ else:
+ console.print("[yellow]⚠️ JIRA integration not configured[/]")
+
# Split metrics
if get_split_api_key():
status.update("Fetching Split metrics...")
diff --git a/src/wellcode_cli/config.py b/src/wellcode_cli/config.py
index 239fc82..e77725d 100644
--- a/src/wellcode_cli/config.py
+++ b/src/wellcode_cli/config.py
@@ -46,3 +46,39 @@ def get_anthropic_api_key() -> Optional[str]:
def get_split_api_key() -> Optional[str]:
return get_config_value("SPLIT_API_KEY")
+
+
+def get_jira_url() -> Optional[str]:
+ return get_config_value("JIRA_URL")
+
+
+def get_jira_email() -> Optional[str]:
+ return get_config_value("JIRA_EMAIL")
+
+
+def get_jira_api_token() -> Optional[str]:
+ return get_config_value("JIRA_API_TOKEN")
+
+
+def get_gitlab_token() -> Optional[str]:
+ return get_config_value("GITLAB_TOKEN")
+
+
+def get_gitlab_url() -> Optional[str]:
+ return get_config_value("GITLAB_URL")
+
+
+def get_bitbucket_username() -> Optional[str]:
+ return get_config_value("BITBUCKET_USERNAME")
+
+
+def get_bitbucket_app_password() -> Optional[str]:
+ return get_config_value("BITBUCKET_APP_PASSWORD")
+
+
+def get_bitbucket_workspace() -> Optional[str]:
+ return get_config_value("BITBUCKET_WORKSPACE")
+
+
+def get_database_url() -> Optional[str]:
+ return get_config_value("DATABASE_URL")
diff --git a/src/wellcode_cli/db/__init__.py b/src/wellcode_cli/db/__init__.py
new file mode 100644
index 0000000..45711b2
--- /dev/null
+++ b/src/wellcode_cli/db/__init__.py
@@ -0,0 +1,3 @@
+from .engine import get_engine, get_session, init_db
+
+__all__ = ["get_engine", "get_session", "init_db"]
diff --git a/src/wellcode_cli/db/engine.py b/src/wellcode_cli/db/engine.py
new file mode 100644
index 0000000..9de54c3
--- /dev/null
+++ b/src/wellcode_cli/db/engine.py
@@ -0,0 +1,63 @@
+"""Database engine factory supporting SQLite (default) and PostgreSQL."""
+
+from pathlib import Path
+from typing import Optional
+
+from sqlalchemy import create_engine, event
+from sqlalchemy.engine import Engine
+from sqlalchemy.orm import Session, sessionmaker
+
+from ..config import get_config_value
+
+_engine: Optional[Engine] = None
+_session_factory: Optional[sessionmaker] = None
+
+DATA_DIR = Path.home() / ".wellcode" / "data"
+
+
+def _sqlite_wal_mode(dbapi_conn, connection_record):
+ cursor = dbapi_conn.cursor()
+ cursor.execute("PRAGMA journal_mode=WAL")
+ cursor.execute("PRAGMA foreign_keys=ON")
+ cursor.close()
+
+
+def get_database_url() -> str:
+ url = get_config_value("DATABASE_URL")
+ if url:
+ return url
+ DATA_DIR.mkdir(parents=True, exist_ok=True)
+ return f"sqlite:///{DATA_DIR / 'wellcode.db'}"
+
+
+def get_engine() -> Engine:
+ global _engine
+ if _engine is None:
+ url = get_database_url()
+ kwargs = {}
+ if url.startswith("sqlite"):
+ kwargs["connect_args"] = {"check_same_thread": False}
+ else:
+ kwargs["pool_size"] = 10
+ kwargs["max_overflow"] = 20
+ kwargs["pool_pre_ping"] = True
+
+ _engine = create_engine(url, echo=False, **kwargs)
+
+ if url.startswith("sqlite"):
+ event.listen(_engine, "connect", _sqlite_wal_mode)
+
+ return _engine
+
+
+def get_session() -> Session:
+ global _session_factory
+ if _session_factory is None:
+ _session_factory = sessionmaker(bind=get_engine(), expire_on_commit=False)
+ return _session_factory()
+
+
+def init_db():
+ """Create all tables. For production use Alembic migrations instead."""
+ from .models import Base
+ Base.metadata.create_all(get_engine())
diff --git a/src/wellcode_cli/db/migrations/env.py b/src/wellcode_cli/db/migrations/env.py
new file mode 100644
index 0000000..66bfdcb
--- /dev/null
+++ b/src/wellcode_cli/db/migrations/env.py
@@ -0,0 +1,47 @@
+"""Alembic migration environment configuration."""
+
+from logging.config import fileConfig
+
+from alembic import context
+from sqlalchemy import engine_from_config, pool
+
+from wellcode_cli.db.engine import get_database_url
+from wellcode_cli.db.models import Base
+
+config = context.config
+if config.config_file_name is not None:
+ fileConfig(config.config_file_name)
+
+target_metadata = Base.metadata
+
+
+def run_migrations_offline():
+ url = get_database_url()
+ context.configure(
+ url=url,
+ target_metadata=target_metadata,
+ literal_binds=True,
+ dialect_opts={"paramstyle": "named"},
+ )
+ with context.begin_transaction():
+ context.run_migrations()
+
+
+def run_migrations_online():
+ configuration = config.get_section(config.config_ini_section) or {}
+ configuration["sqlalchemy.url"] = get_database_url()
+ connectable = engine_from_config(
+ configuration,
+ prefix="sqlalchemy.",
+ poolclass=pool.NullPool,
+ )
+ with connectable.connect() as connection:
+ context.configure(connection=connection, target_metadata=target_metadata)
+ with context.begin_transaction():
+ context.run_migrations()
+
+
+if context.is_offline_mode():
+ run_migrations_offline()
+else:
+ run_migrations_online()
diff --git a/src/wellcode_cli/db/migrations/script.py.mako b/src/wellcode_cli/db/migrations/script.py.mako
new file mode 100644
index 0000000..958df87
--- /dev/null
+++ b/src/wellcode_cli/db/migrations/script.py.mako
@@ -0,0 +1,25 @@
+"""${message}
+
+Revision ID: ${up_revision}
+Revises: ${down_revision | comma,n}
+Create Date: ${create_date}
+"""
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+${imports if imports else ""}
+
+# revision identifiers, used by Alembic.
+revision: str = ${repr(up_revision)}
+down_revision: Union[str, None] = ${repr(down_revision)}
+branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)}
+depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)}
+
+
+def upgrade() -> None:
+ ${upgrades if upgrades else "pass"}
+
+
+def downgrade() -> None:
+ ${downgrades if downgrades else "pass"}
diff --git a/src/wellcode_cli/db/models.py b/src/wellcode_cli/db/models.py
new file mode 100644
index 0000000..5283603
--- /dev/null
+++ b/src/wellcode_cli/db/models.py
@@ -0,0 +1,436 @@
+"""SQLAlchemy models for persistent metric storage."""
+
+from datetime import datetime, timezone
+
+from sqlalchemy import (
+ JSON,
+ Boolean,
+ Column,
+ DateTime,
+ Float,
+ ForeignKey,
+ Index,
+ Integer,
+ String,
+ Text,
+ UniqueConstraint,
+)
+from sqlalchemy.orm import DeclarativeBase, relationship
+
+
+class Base(DeclarativeBase):
+ pass
+
+
+# ---------------------------------------------------------------------------
+# Reference / dimension tables
+# ---------------------------------------------------------------------------
+
+class Organization(Base):
+ __tablename__ = "organizations"
+
+ id = Column(Integer, primary_key=True, autoincrement=True)
+ name = Column(String(255), nullable=False, unique=True)
+ provider = Column(String(50), nullable=False) # github, gitlab, bitbucket
+ external_id = Column(String(255))
+ avatar_url = Column(Text)
+ created_at = Column(DateTime, default=lambda: datetime.now(timezone.utc))
+ updated_at = Column(DateTime, default=lambda: datetime.now(timezone.utc),
+ onupdate=lambda: datetime.now(timezone.utc))
+
+ repositories = relationship("Repository", back_populates="organization")
+ teams = relationship("Team", back_populates="organization")
+
+
+class Repository(Base):
+ __tablename__ = "repositories"
+
+ id = Column(Integer, primary_key=True, autoincrement=True)
+ organization_id = Column(Integer, ForeignKey("organizations.id"), nullable=True)
+ name = Column(String(255), nullable=False)
+ full_name = Column(String(512))
+ provider = Column(String(50), nullable=False)
+ external_id = Column(String(255))
+ default_branch = Column(String(100), default="main")
+ url = Column(Text)
+ is_active = Column(Boolean, default=True)
+ created_at = Column(DateTime, default=lambda: datetime.now(timezone.utc))
+ updated_at = Column(DateTime, default=lambda: datetime.now(timezone.utc),
+ onupdate=lambda: datetime.now(timezone.utc))
+
+ organization = relationship("Organization", back_populates="repositories")
+ pull_requests = relationship("PullRequestMetric", back_populates="repository")
+ deployments = relationship("DeploymentMetric", back_populates="repository")
+
+ __table_args__ = (
+ UniqueConstraint("provider", "full_name", name="uq_repo_provider_name"),
+ )
+
+
+class Team(Base):
+ __tablename__ = "teams"
+
+ id = Column(Integer, primary_key=True, autoincrement=True)
+ organization_id = Column(Integer, ForeignKey("organizations.id"), nullable=True)
+ name = Column(String(255), nullable=False)
+ slug = Column(String(255))
+ provider = Column(String(50))
+ external_id = Column(String(255))
+ created_at = Column(DateTime, default=lambda: datetime.now(timezone.utc))
+
+ organization = relationship("Organization", back_populates="teams")
+ members = relationship("TeamMember", back_populates="team")
+
+
+class Developer(Base):
+ __tablename__ = "developers"
+
+ id = Column(Integer, primary_key=True, autoincrement=True)
+ username = Column(String(255), nullable=False)
+ display_name = Column(String(255))
+ email = Column(String(255))
+ provider = Column(String(50), nullable=False)
+ external_id = Column(String(255))
+ avatar_url = Column(Text)
+ created_at = Column(DateTime, default=lambda: datetime.now(timezone.utc))
+ updated_at = Column(DateTime, default=lambda: datetime.now(timezone.utc),
+ onupdate=lambda: datetime.now(timezone.utc))
+
+ team_memberships = relationship("TeamMember", back_populates="developer")
+ pull_requests = relationship("PullRequestMetric", back_populates="author",
+ foreign_keys="PullRequestMetric.author_id")
+
+ __table_args__ = (
+ UniqueConstraint("provider", "username", name="uq_dev_provider_username"),
+ )
+
+
+class TeamMember(Base):
+ __tablename__ = "team_members"
+
+ id = Column(Integer, primary_key=True, autoincrement=True)
+ team_id = Column(Integer, ForeignKey("teams.id"), nullable=False)
+ developer_id = Column(Integer, ForeignKey("developers.id"), nullable=False)
+ role = Column(String(50), default="member")
+ joined_at = Column(DateTime, default=lambda: datetime.now(timezone.utc))
+
+ team = relationship("Team", back_populates="members")
+ developer = relationship("Developer", back_populates="team_memberships")
+
+
+# ---------------------------------------------------------------------------
+# Metric snapshot (each collection run)
+# ---------------------------------------------------------------------------
+
+class MetricSnapshot(Base):
+ __tablename__ = "metric_snapshots"
+
+ id = Column(Integer, primary_key=True, autoincrement=True)
+ collected_at = Column(DateTime, nullable=False,
+ default=lambda: datetime.now(timezone.utc))
+ period_start = Column(DateTime, nullable=False)
+ period_end = Column(DateTime, nullable=False)
+ source = Column(String(50)) # github, gitlab, jira, linear, etc.
+ status = Column(String(20), default="completed") # running, completed, failed
+ summary = Column(JSON)
+ duration_seconds = Column(Float)
+
+ __table_args__ = (
+ Index("ix_snapshot_period", "period_start", "period_end"),
+ )
+
+
+# ---------------------------------------------------------------------------
+# Pull request metrics
+# ---------------------------------------------------------------------------
+
+class PullRequestMetric(Base):
+ __tablename__ = "pr_metrics"
+
+ id = Column(Integer, primary_key=True, autoincrement=True)
+ snapshot_id = Column(Integer, ForeignKey("metric_snapshots.id"), nullable=True)
+ repository_id = Column(Integer, ForeignKey("repositories.id"), nullable=True)
+ author_id = Column(Integer, ForeignKey("developers.id"), nullable=True)
+
+ provider = Column(String(50), nullable=False)
+ external_id = Column(String(255))
+ number = Column(Integer)
+ title = Column(Text)
+ state = Column(String(20)) # open, closed, merged
+ base_branch = Column(String(255))
+ head_branch = Column(String(255))
+
+ created_at = Column(DateTime)
+ updated_at = Column(DateTime)
+ merged_at = Column(DateTime)
+ closed_at = Column(DateTime)
+ first_commit_at = Column(DateTime)
+ first_review_at = Column(DateTime)
+
+ additions = Column(Integer, default=0)
+ deletions = Column(Integer, default=0)
+ changed_files = Column(Integer, default=0)
+ commits_count = Column(Integer, default=0)
+
+ # Calculated durations (hours)
+ time_to_first_review_hours = Column(Float)
+ time_to_merge_hours = Column(Float)
+ coding_time_hours = Column(Float)
+ review_time_hours = Column(Float)
+ lead_time_hours = Column(Float)
+ cycle_time_hours = Column(Float)
+
+ review_count = Column(Integer, default=0)
+ reviewer_count = Column(Integer, default=0)
+ comment_count = Column(Integer, default=0)
+ review_cycles = Column(Integer, default=0)
+
+ is_revert = Column(Boolean, default=False)
+ is_hotfix = Column(Boolean, default=False)
+ is_self_merged = Column(Boolean, default=False)
+ is_ai_generated = Column(Boolean, default=False)
+ ai_tool = Column(String(50))
+
+ labels = Column(JSON)
+ reviewers = Column(JSON)
+
+ repository = relationship("Repository", back_populates="pull_requests")
+ author = relationship("Developer", back_populates="pull_requests",
+ foreign_keys=[author_id])
+
+ __table_args__ = (
+ UniqueConstraint("provider", "external_id", name="uq_pr_provider_extid"),
+ Index("ix_pr_created", "created_at"),
+ Index("ix_pr_merged", "merged_at"),
+ Index("ix_pr_author", "author_id"),
+ Index("ix_pr_repo", "repository_id"),
+ )
+
+
+# ---------------------------------------------------------------------------
+# Deployment metrics (for DORA)
+# ---------------------------------------------------------------------------
+
+class DeploymentMetric(Base):
+ __tablename__ = "deployment_metrics"
+
+ id = Column(Integer, primary_key=True, autoincrement=True)
+ snapshot_id = Column(Integer, ForeignKey("metric_snapshots.id"), nullable=True)
+ repository_id = Column(Integer, ForeignKey("repositories.id"), nullable=True)
+
+ provider = Column(String(50), nullable=False)
+ external_id = Column(String(255))
+ environment = Column(String(100))
+ ref = Column(String(255))
+ sha = Column(String(64))
+ status = Column(String(30)) # success, failure, pending, in_progress
+
+ deployed_at = Column(DateTime, nullable=False)
+ completed_at = Column(DateTime)
+ duration_seconds = Column(Float)
+
+ is_rollback = Column(Boolean, default=False)
+ is_failure = Column(Boolean, default=False)
+ triggered_by = Column(String(255)) # user, ci, schedule
+ pr_number = Column(Integer)
+
+ repository = relationship("Repository", back_populates="deployments")
+
+ __table_args__ = (
+ Index("ix_deploy_time", "deployed_at"),
+ Index("ix_deploy_repo", "repository_id"),
+ Index("ix_deploy_env", "environment"),
+ )
+
+
+# ---------------------------------------------------------------------------
+# Incident metrics (for DORA MTTR / Change Failure Rate)
+# ---------------------------------------------------------------------------
+
+class IncidentMetric(Base):
+ __tablename__ = "incident_metrics"
+
+ id = Column(Integer, primary_key=True, autoincrement=True)
+ snapshot_id = Column(Integer, ForeignKey("metric_snapshots.id"), nullable=True)
+ repository_id = Column(Integer, ForeignKey("repositories.id"), nullable=True)
+
+ provider = Column(String(50)) # github_issues, pagerduty, opsgenie
+ external_id = Column(String(255))
+ title = Column(Text)
+ severity = Column(String(20)) # p0, p1, p2, p3, p4
+
+ opened_at = Column(DateTime, nullable=False)
+ resolved_at = Column(DateTime)
+ time_to_recovery_hours = Column(Float)
+
+ deployment_id = Column(Integer, ForeignKey("deployment_metrics.id"), nullable=True)
+ caused_by_change = Column(Boolean, default=False)
+ root_cause = Column(Text)
+
+ __table_args__ = (
+ Index("ix_incident_opened", "opened_at"),
+ )
+
+
+# ---------------------------------------------------------------------------
+# AI usage metrics
+# ---------------------------------------------------------------------------
+
+class AIUsageMetric(Base):
+ __tablename__ = "ai_usage_metrics"
+
+ id = Column(Integer, primary_key=True, autoincrement=True)
+ snapshot_id = Column(Integer, ForeignKey("metric_snapshots.id"), nullable=True)
+ developer_id = Column(Integer, ForeignKey("developers.id"), nullable=True)
+
+ tool = Column(String(50), nullable=False) # copilot, cursor, claude_code, aider
+ date = Column(DateTime, nullable=False)
+
+ suggestions_shown = Column(Integer, default=0)
+ suggestions_accepted = Column(Integer, default=0)
+ lines_suggested = Column(Integer, default=0)
+ lines_accepted = Column(Integer, default=0)
+ active_users = Column(Integer, default=0)
+
+ language = Column(String(50))
+ editor = Column(String(50))
+ model = Column(String(100))
+
+ chat_sessions = Column(Integer, default=0)
+ chat_messages = Column(Integer, default=0)
+ inline_completions = Column(Integer, default=0)
+
+ cost_usd = Column(Float, default=0.0)
+
+ metadata_ = Column("metadata", JSON)
+
+ __table_args__ = (
+ Index("ix_ai_date", "date"),
+ Index("ix_ai_tool", "tool"),
+ Index("ix_ai_dev", "developer_id"),
+ )
+
+
+# ---------------------------------------------------------------------------
+# Issue tracker metrics (JIRA / Linear)
+# ---------------------------------------------------------------------------
+
+class IssueMetric(Base):
+ __tablename__ = "issue_metrics"
+
+ id = Column(Integer, primary_key=True, autoincrement=True)
+ snapshot_id = Column(Integer, ForeignKey("metric_snapshots.id"), nullable=True)
+
+ provider = Column(String(50), nullable=False) # jira, linear
+ external_id = Column(String(255))
+ project_key = Column(String(50))
+ issue_type = Column(String(50))
+ priority = Column(String(50))
+ status = Column(String(50))
+ status_category = Column(String(20))
+
+ assignee = Column(String(255))
+ reporter = Column(String(255))
+
+ created_at = Column(DateTime)
+ started_at = Column(DateTime)
+ resolved_at = Column(DateTime)
+
+ story_points = Column(Float)
+ cycle_time_hours = Column(Float)
+ time_to_start_hours = Column(Float)
+ time_in_progress_hours = Column(Float)
+
+ sprint_id = Column(String(100))
+ sprint_name = Column(String(255))
+ labels = Column(JSON)
+ components = Column(JSON)
+
+ __table_args__ = (
+ Index("ix_issue_created", "created_at"),
+ Index("ix_issue_provider", "provider"),
+ Index("ix_issue_project", "project_key"),
+ )
+
+
+# ---------------------------------------------------------------------------
+# DX Survey models
+# ---------------------------------------------------------------------------
+
+class Survey(Base):
+ __tablename__ = "surveys"
+
+ id = Column(Integer, primary_key=True, autoincrement=True)
+ title = Column(String(255), nullable=False)
+ description = Column(Text)
+ survey_type = Column(String(50)) # pulse, full_dx, custom
+ status = Column(String(20), default="draft") # draft, active, closed
+
+ created_at = Column(DateTime, default=lambda: datetime.now(timezone.utc))
+ starts_at = Column(DateTime)
+ ends_at = Column(DateTime)
+ recurrence = Column(String(20)) # none, weekly, monthly
+
+ target_teams = Column(JSON)
+ target_roles = Column(JSON)
+
+ questions = relationship("SurveyQuestion", back_populates="survey")
+ responses = relationship("SurveyResponse", back_populates="survey")
+
+
+class SurveyQuestion(Base):
+ __tablename__ = "survey_questions"
+
+ id = Column(Integer, primary_key=True, autoincrement=True)
+ survey_id = Column(Integer, ForeignKey("surveys.id"), nullable=False)
+ question_text = Column(Text, nullable=False)
+ question_type = Column(String(30), nullable=False) # rating, multiple_choice, text
+ options = Column(JSON)
+ order = Column(Integer, default=0)
+ required = Column(Boolean, default=True)
+ category = Column(String(50))
+
+ survey = relationship("Survey", back_populates="questions")
+
+
+class SurveyResponse(Base):
+ __tablename__ = "survey_responses"
+
+ id = Column(Integer, primary_key=True, autoincrement=True)
+ survey_id = Column(Integer, ForeignKey("surveys.id"), nullable=False)
+ developer_id = Column(Integer, ForeignKey("developers.id"), nullable=True)
+
+ submitted_at = Column(DateTime, default=lambda: datetime.now(timezone.utc))
+ answers = Column(JSON, nullable=False)
+ sentiment_score = Column(Float)
+
+ survey = relationship("Survey", back_populates="responses")
+
+
+# ---------------------------------------------------------------------------
+# DORA aggregated snapshots
+# ---------------------------------------------------------------------------
+
+class DORASnapshot(Base):
+ __tablename__ = "dora_snapshots"
+
+ id = Column(Integer, primary_key=True, autoincrement=True)
+ snapshot_id = Column(Integer, ForeignKey("metric_snapshots.id"), nullable=True)
+ team_id = Column(Integer, ForeignKey("teams.id"), nullable=True)
+ repository_id = Column(Integer, ForeignKey("repositories.id"), nullable=True)
+
+ period_start = Column(DateTime, nullable=False)
+ period_end = Column(DateTime, nullable=False)
+
+ deployment_frequency = Column(Float) # deploys per day
+ lead_time_hours = Column(Float) # median lead time for changes
+ change_failure_rate = Column(Float) # percentage
+ mttr_hours = Column(Float) # mean time to recovery
+
+ level = Column(String(20)) # elite, high, medium, low
+
+ details = Column(JSON)
+
+ __table_args__ = (
+ Index("ix_dora_period", "period_start", "period_end"),
+ )
diff --git a/src/wellcode_cli/db/repository.py b/src/wellcode_cli/db/repository.py
new file mode 100644
index 0000000..fd75453
--- /dev/null
+++ b/src/wellcode_cli/db/repository.py
@@ -0,0 +1,336 @@
+"""Data access layer for all persistent metric storage."""
+
+from datetime import datetime
+from typing import Optional
+
+from sqlalchemy import func, select
+from sqlalchemy.orm import Session
+
+from .models import (
+ AIUsageMetric,
+ DeploymentMetric,
+ Developer,
+ DORASnapshot,
+ IncidentMetric,
+ IssueMetric,
+ MetricSnapshot,
+ Organization,
+ PullRequestMetric,
+ Repository,
+ Survey,
+ SurveyResponse,
+ Team,
+)
+
+
+class BaseRepository:
+ def __init__(self, session: Session):
+ self.session = session
+
+ def commit(self):
+ self.session.commit()
+
+ def rollback(self):
+ self.session.rollback()
+
+
+class OrganizationRepo(BaseRepository):
+ def get_or_create(self, name: str, provider: str, **kwargs) -> Organization:
+ org = self.session.execute(
+ select(Organization).where(Organization.name == name)
+ ).scalar_one_or_none()
+ if org is None:
+ org = Organization(name=name, provider=provider, **kwargs)
+ self.session.add(org)
+ self.session.flush()
+ return org
+
+
+class RepositoryRepo(BaseRepository):
+ def get_or_create(self, full_name: str, provider: str, **kwargs) -> Repository:
+ repo = self.session.execute(
+ select(Repository).where(
+ Repository.full_name == full_name,
+ Repository.provider == provider,
+ )
+ ).scalar_one_or_none()
+ if repo is None:
+ repo = Repository(
+ full_name=full_name, name=full_name.split("/")[-1],
+ provider=provider, **kwargs,
+ )
+ self.session.add(repo)
+ self.session.flush()
+ return repo
+
+
+class DeveloperRepo(BaseRepository):
+ def get_or_create(self, username: str, provider: str, **kwargs) -> Developer:
+ dev = self.session.execute(
+ select(Developer).where(
+ Developer.username == username,
+ Developer.provider == provider,
+ )
+ ).scalar_one_or_none()
+ if dev is None:
+ dev = Developer(username=username, provider=provider, **kwargs)
+ self.session.add(dev)
+ self.session.flush()
+ return dev
+
+
+class TeamRepo(BaseRepository):
+ def get_or_create(self, name: str, org_id: Optional[int] = None, **kwargs) -> Team:
+ q = select(Team).where(Team.name == name)
+ if org_id:
+ q = q.where(Team.organization_id == org_id)
+ team = self.session.execute(q).scalar_one_or_none()
+ if team is None:
+ team = Team(name=name, organization_id=org_id, **kwargs)
+ self.session.add(team)
+ self.session.flush()
+ return team
+
+
+class SnapshotRepo(BaseRepository):
+ def create(self, period_start: datetime, period_end: datetime,
+ source: str = "all") -> MetricSnapshot:
+ snap = MetricSnapshot(
+ period_start=period_start,
+ period_end=period_end,
+ source=source,
+ status="running",
+ )
+ self.session.add(snap)
+ self.session.flush()
+ return snap
+
+ def complete(self, snapshot: MetricSnapshot, summary: dict,
+ duration_seconds: float):
+ snapshot.status = "completed"
+ snapshot.summary = summary
+ snapshot.duration_seconds = duration_seconds
+ self.session.flush()
+
+ def fail(self, snapshot: MetricSnapshot, error: str):
+ snapshot.status = "failed"
+ snapshot.summary = {"error": error}
+ self.session.flush()
+
+ def get_latest(self, source: Optional[str] = None) -> Optional[MetricSnapshot]:
+ q = select(MetricSnapshot).order_by(MetricSnapshot.collected_at.desc())
+ if source:
+ q = q.where(MetricSnapshot.source == source)
+ return self.session.execute(q.limit(1)).scalar_one_or_none()
+
+ def list_recent(self, limit: int = 20):
+ return self.session.execute(
+ select(MetricSnapshot)
+ .order_by(MetricSnapshot.collected_at.desc())
+ .limit(limit)
+ ).scalars().all()
+
+
+class PullRequestRepo(BaseRepository):
+ def upsert(self, pr: PullRequestMetric) -> PullRequestMetric:
+ existing = self.session.execute(
+ select(PullRequestMetric).where(
+ PullRequestMetric.provider == pr.provider,
+ PullRequestMetric.external_id == pr.external_id,
+ )
+ ).scalar_one_or_none()
+ if existing:
+ for col in PullRequestMetric.__table__.columns:
+ if col.name not in ("id", "provider", "external_id"):
+ val = getattr(pr, col.name)
+ if val is not None:
+ setattr(existing, col.name, val)
+ self.session.flush()
+ return existing
+ self.session.add(pr)
+ self.session.flush()
+ return pr
+
+ def get_by_period(self, start: datetime, end: datetime,
+ repo_id: Optional[int] = None,
+ author_id: Optional[int] = None):
+ q = select(PullRequestMetric).where(
+ PullRequestMetric.created_at >= start,
+ PullRequestMetric.created_at <= end,
+ )
+ if repo_id:
+ q = q.where(PullRequestMetric.repository_id == repo_id)
+ if author_id:
+ q = q.where(PullRequestMetric.author_id == author_id)
+ return self.session.execute(
+ q.order_by(PullRequestMetric.created_at)
+ ).scalars().all()
+
+ def get_merged_by_period(self, start: datetime, end: datetime,
+ repo_id: Optional[int] = None):
+ q = select(PullRequestMetric).where(
+ PullRequestMetric.merged_at >= start,
+ PullRequestMetric.merged_at <= end,
+ PullRequestMetric.state == "merged",
+ )
+ if repo_id:
+ q = q.where(PullRequestMetric.repository_id == repo_id)
+ return self.session.execute(q).scalars().all()
+
+
+class DeploymentRepo(BaseRepository):
+ def add(self, deploy: DeploymentMetric) -> DeploymentMetric:
+ self.session.add(deploy)
+ self.session.flush()
+ return deploy
+
+ def get_by_period(self, start: datetime, end: datetime,
+ repo_id: Optional[int] = None,
+ environment: Optional[str] = None):
+ q = select(DeploymentMetric).where(
+ DeploymentMetric.deployed_at >= start,
+ DeploymentMetric.deployed_at <= end,
+ )
+ if repo_id:
+ q = q.where(DeploymentMetric.repository_id == repo_id)
+ if environment:
+ q = q.where(DeploymentMetric.environment == environment)
+ return self.session.execute(
+ q.order_by(DeploymentMetric.deployed_at)
+ ).scalars().all()
+
+
+class IncidentRepo(BaseRepository):
+ def add(self, incident: IncidentMetric) -> IncidentMetric:
+ self.session.add(incident)
+ self.session.flush()
+ return incident
+
+ def get_by_period(self, start: datetime, end: datetime):
+ return self.session.execute(
+ select(IncidentMetric).where(
+ IncidentMetric.opened_at >= start,
+ IncidentMetric.opened_at <= end,
+ ).order_by(IncidentMetric.opened_at)
+ ).scalars().all()
+
+
+class AIUsageRepo(BaseRepository):
+ def add(self, metric: AIUsageMetric) -> AIUsageMetric:
+ self.session.add(metric)
+ self.session.flush()
+ return metric
+
+ def get_by_period(self, start: datetime, end: datetime,
+ tool: Optional[str] = None):
+ q = select(AIUsageMetric).where(
+ AIUsageMetric.date >= start,
+ AIUsageMetric.date <= end,
+ )
+ if tool:
+ q = q.where(AIUsageMetric.tool == tool)
+ return self.session.execute(q.order_by(AIUsageMetric.date)).scalars().all()
+
+ def get_summary(self, start: datetime, end: datetime):
+ return self.session.execute(
+ select(
+ AIUsageMetric.tool,
+ func.sum(AIUsageMetric.suggestions_shown).label("total_shown"),
+ func.sum(AIUsageMetric.suggestions_accepted).label("total_accepted"),
+ func.sum(AIUsageMetric.lines_accepted).label("total_lines"),
+ func.max(AIUsageMetric.active_users).label("peak_users"),
+ func.sum(AIUsageMetric.cost_usd).label("total_cost"),
+ ).where(
+ AIUsageMetric.date >= start,
+ AIUsageMetric.date <= end,
+ ).group_by(AIUsageMetric.tool)
+ ).all()
+
+
+class IssueRepo(BaseRepository):
+ def add(self, issue: IssueMetric) -> IssueMetric:
+ self.session.add(issue)
+ self.session.flush()
+ return issue
+
+ def get_by_period(self, start: datetime, end: datetime,
+ provider: Optional[str] = None):
+ q = select(IssueMetric).where(
+ IssueMetric.created_at >= start,
+ IssueMetric.created_at <= end,
+ )
+ if provider:
+ q = q.where(IssueMetric.provider == provider)
+ return self.session.execute(q.order_by(IssueMetric.created_at)).scalars().all()
+
+
+class DORARepo(BaseRepository):
+ def save(self, dora: DORASnapshot) -> DORASnapshot:
+ self.session.add(dora)
+ self.session.flush()
+ return dora
+
+ def get_latest(self, team_id: Optional[int] = None,
+ repo_id: Optional[int] = None) -> Optional[DORASnapshot]:
+ q = select(DORASnapshot).order_by(DORASnapshot.period_end.desc())
+ if team_id:
+ q = q.where(DORASnapshot.team_id == team_id)
+ if repo_id:
+ q = q.where(DORASnapshot.repository_id == repo_id)
+ return self.session.execute(q.limit(1)).scalar_one_or_none()
+
+ def get_history(self, limit: int = 30, team_id: Optional[int] = None):
+ q = select(DORASnapshot).order_by(DORASnapshot.period_end.desc())
+ if team_id:
+ q = q.where(DORASnapshot.team_id == team_id)
+ return self.session.execute(q.limit(limit)).scalars().all()
+
+
+class SurveyRepo(BaseRepository):
+ def create_survey(self, survey: Survey) -> Survey:
+ self.session.add(survey)
+ self.session.flush()
+ return survey
+
+ def get_active_surveys(self):
+ return self.session.execute(
+ select(Survey).where(Survey.status == "active")
+ ).scalars().all()
+
+ def add_response(self, response: SurveyResponse) -> SurveyResponse:
+ self.session.add(response)
+ self.session.flush()
+ return response
+
+ def get_responses(self, survey_id: int):
+ return self.session.execute(
+ select(SurveyResponse).where(SurveyResponse.survey_id == survey_id)
+ ).scalars().all()
+
+
+class MetricStore:
+ """Unified access to all metric repositories."""
+
+ def __init__(self, session: Session):
+ self.session = session
+ self.organizations = OrganizationRepo(session)
+ self.repositories = RepositoryRepo(session)
+ self.developers = DeveloperRepo(session)
+ self.teams = TeamRepo(session)
+ self.snapshots = SnapshotRepo(session)
+ self.pull_requests = PullRequestRepo(session)
+ self.deployments = DeploymentRepo(session)
+ self.incidents = IncidentRepo(session)
+ self.ai_usage = AIUsageRepo(session)
+ self.issues = IssueRepo(session)
+ self.dora = DORARepo(session)
+ self.surveys = SurveyRepo(session)
+
+ def commit(self):
+ self.session.commit()
+
+ def rollback(self):
+ self.session.rollback()
+
+ def close(self):
+ self.session.close()
diff --git a/src/wellcode_cli/github/__init__.py b/src/wellcode_cli/github/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/wellcode_cli/github/github_format_ai.py b/src/wellcode_cli/github/github_format_ai.py
index 877c373..3b1380d 100644
--- a/src/wellcode_cli/github/github_format_ai.py
+++ b/src/wellcode_cli/github/github_format_ai.py
@@ -101,6 +101,10 @@ def get_ai_analysis(all_metrics):
if "linear" in all_metrics:
metrics_summary["linear"] = all_metrics["linear"]
+ # JIRA metrics
+ if "jira" in all_metrics:
+ metrics_summary["jira"] = all_metrics["jira"]
+
# Split metrics
if "split" in all_metrics:
metrics_summary["split"] = all_metrics["split"]
diff --git a/src/wellcode_cli/github/models/__init__.py b/src/wellcode_cli/github/models/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/wellcode_cli/integrations/__init__.py b/src/wellcode_cli/integrations/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/wellcode_cli/integrations/bitbucket/__init__.py b/src/wellcode_cli/integrations/bitbucket/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/wellcode_cli/integrations/bitbucket/provider.py b/src/wellcode_cli/integrations/bitbucket/provider.py
new file mode 100644
index 0000000..6125655
--- /dev/null
+++ b/src/wellcode_cli/integrations/bitbucket/provider.py
@@ -0,0 +1,178 @@
+"""Bitbucket SCM provider implementation."""
+
+import logging
+from datetime import datetime, timezone
+from typing import Optional
+
+from ...config import get_config_value
+from ..scm_protocol import (
+ SCMDeployment,
+ SCMPullRequest,
+ SCMRepository,
+ SCMTeam,
+)
+
+logger = logging.getLogger(__name__)
+
+
+class BitbucketProvider:
+ """Bitbucket Cloud implementation of the SCM provider protocol."""
+
+ def __init__(
+ self,
+ username: Optional[str] = None,
+ app_password: Optional[str] = None,
+ workspace: Optional[str] = None,
+ ):
+ self._username = username or get_config_value("BITBUCKET_USERNAME")
+ self._app_password = app_password or get_config_value("BITBUCKET_APP_PASSWORD")
+ self._workspace = workspace or get_config_value("BITBUCKET_WORKSPACE")
+ self._client = None
+
+ @property
+ def provider_name(self) -> str:
+ return "bitbucket"
+
+ @property
+ def client(self):
+ if self._client is None:
+ try:
+ from atlassian import Bitbucket
+ except ImportError as err:
+ raise ImportError(
+ "atlassian-python-api is required: pip install atlassian-python-api"
+ ) from err
+ if not self._username or not self._app_password:
+ raise ValueError("Bitbucket credentials not configured")
+ self._client = Bitbucket(
+ url="https://api.bitbucket.org",
+ username=self._username,
+ password=self._app_password,
+ cloud=True,
+ )
+ return self._client
+
+ def get_repositories(self) -> list[SCMRepository]:
+ repos = []
+ if not self._workspace:
+ return repos
+
+ try:
+ data = self.client.get(
+ f"/2.0/repositories/{self._workspace}",
+ params={"pagelen": 100},
+ )
+ for r in data.get("values", []):
+ mainbranch = r.get("mainbranch", {})
+ repos.append(SCMRepository(
+ provider="bitbucket",
+ external_id=r.get("uuid", ""),
+ name=r.get("name", ""),
+ full_name=r.get("full_name", ""),
+ default_branch=mainbranch.get("name", "main") if mainbranch else "main",
+ url=r.get("links", {}).get("html", {}).get("href", ""),
+ is_active=True,
+ ))
+ except Exception as e:
+ logger.warning("Error fetching Bitbucket repos: %s", e)
+ return repos
+
+ def get_pull_requests(
+ self, since: datetime, until: datetime,
+ repo_full_name: Optional[str] = None,
+ author: Optional[str] = None,
+ ) -> list[SCMPullRequest]:
+ results = []
+
+ if repo_full_name:
+ repo_slugs = [repo_full_name]
+ elif self._workspace:
+ repo_slugs = [r.full_name for r in self.get_repositories()]
+ else:
+ return results
+
+ for repo_slug in repo_slugs:
+ try:
+ data = self.client.get(
+ f"/2.0/repositories/{repo_slug}/pullrequests",
+ params={"state": "ALL", "pagelen": 50},
+ )
+ for pr in data.get("values", []):
+ created_str = pr.get("created_on", "")
+ if not created_str:
+ continue
+ created_at = datetime.fromisoformat(
+ created_str.replace("Z", "+00:00")
+ )
+ if created_at < since.replace(tzinfo=timezone.utc):
+ break
+ if created_at > until.replace(tzinfo=timezone.utc):
+ continue
+
+ pr_author = pr.get("author", {}).get("display_name", "")
+ if author and pr_author != author:
+ continue
+
+ state_map = {"OPEN": "open", "MERGED": "merged", "DECLINED": "closed"}
+ state = state_map.get(pr.get("state", ""), "open")
+
+ merged_at = None
+ if pr.get("merge_commit") and pr.get("updated_on"):
+ merged_at = datetime.fromisoformat(
+ pr["updated_on"].replace("Z", "+00:00")
+ )
+
+ source = pr.get("source", {}).get("branch", {})
+ dest = pr.get("destination", {}).get("branch", {})
+
+ results.append(SCMPullRequest(
+ provider="bitbucket",
+ external_id=str(pr.get("id", "")),
+ number=pr.get("id", 0),
+ title=pr.get("title", ""),
+ state=state,
+ author=pr_author,
+ base_branch=dest.get("name", "main"),
+ head_branch=source.get("name", ""),
+ created_at=created_at,
+ merged_at=merged_at,
+ comment_count=pr.get("comment_count", 0),
+ is_revert="revert" in pr.get("title", "").lower(),
+ is_hotfix="hotfix" in pr.get("title", "").lower(),
+ repository_full_name=repo_slug,
+ url=pr.get("links", {}).get("html", {}).get("href", ""),
+ ))
+ except Exception as e:
+ logger.warning("Error fetching PRs from %s: %s", repo_slug, e)
+
+ return results
+
+ def get_deployments(
+ self, since: datetime, until: datetime,
+ repo_full_name: Optional[str] = None,
+ environment: Optional[str] = None,
+ ) -> list[SCMDeployment]:
+ # Bitbucket Pipelines don't have a direct deployments API like GitHub
+ # We'd parse pipeline results for deployment steps
+ return []
+
+ def get_teams(self) -> list[SCMTeam]:
+ teams = []
+ if not self._workspace:
+ return teams
+ try:
+ data = self.client.get(
+ f"/1.0/groups/{self._workspace}",
+ )
+ for g in data if isinstance(data, list) else []:
+ members = [m.get("username", "") for m in g.get("members", [])]
+ teams.append(SCMTeam(
+ provider="bitbucket",
+ external_id=g.get("slug", ""),
+ name=g.get("name", ""),
+ slug=g.get("slug", ""),
+ members=members,
+ ))
+ except Exception as e:
+ logger.debug("Error fetching Bitbucket teams: %s", e)
+ return teams
diff --git a/src/wellcode_cli/integrations/github/__init__.py b/src/wellcode_cli/integrations/github/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/wellcode_cli/integrations/github/provider.py b/src/wellcode_cli/integrations/github/provider.py
new file mode 100644
index 0000000..6e9a0ba
--- /dev/null
+++ b/src/wellcode_cli/integrations/github/provider.py
@@ -0,0 +1,233 @@
+"""GitHub SCM provider implementation using the unified interface."""
+
+import logging
+from datetime import datetime, timezone
+from typing import Optional
+
+from github import Github
+
+from ...config import get_config_value
+from ..scm_protocol import (
+ SCMDeployment,
+ SCMPullRequest,
+ SCMRepository,
+ SCMTeam,
+)
+
+logger = logging.getLogger(__name__)
+
+
+class GitHubProvider:
+ """GitHub implementation of the SCM provider protocol."""
+
+ def __init__(self, token: Optional[str] = None, org: Optional[str] = None):
+ self._token = token or get_config_value("GITHUB_TOKEN") or get_config_value("GITHUB_USER_TOKEN")
+ self._org = org or get_config_value("GITHUB_ORG")
+ self._client: Optional[Github] = None
+
+ @property
+ def provider_name(self) -> str:
+ return "github"
+
+ @property
+ def client(self) -> Github:
+ if self._client is None:
+ if not self._token:
+ raise ValueError("GitHub token not configured")
+ self._client = Github(self._token)
+ return self._client
+
+ def get_repositories(self) -> list[SCMRepository]:
+ repos = []
+ if self._org:
+ org = self.client.get_organization(self._org)
+ gh_repos = org.get_repos()
+ else:
+ gh_repos = self.client.get_user().get_repos()
+
+ for r in gh_repos:
+ repos.append(SCMRepository(
+ provider="github",
+ external_id=str(r.id),
+ name=r.name,
+ full_name=r.full_name,
+ default_branch=r.default_branch or "main",
+ url=r.html_url,
+ is_active=not r.archived,
+ ))
+ return repos
+
+ def get_pull_requests(
+ self, since: datetime, until: datetime,
+ repo_full_name: Optional[str] = None,
+ author: Optional[str] = None,
+ ) -> list[SCMPullRequest]:
+ results = []
+
+ if repo_full_name:
+ repo_list = [self.client.get_repo(repo_full_name)]
+ elif self._org:
+ repo_list = list(self.client.get_organization(self._org).get_repos())
+ else:
+ repo_list = list(self.client.get_user().get_repos())
+
+ since_utc = since.replace(tzinfo=timezone.utc) if since.tzinfo is None else since
+ until_utc = until.replace(tzinfo=timezone.utc) if until.tzinfo is None else until
+
+ for repo in repo_list:
+ try:
+ for pr in repo.get_pulls(state="all", sort="updated", direction="desc"):
+ created = pr.created_at.replace(tzinfo=timezone.utc) if pr.created_at.tzinfo is None else pr.created_at
+ if created < since_utc:
+ break
+ if created > until_utc:
+ continue
+ if author and pr.user.login != author:
+ continue
+
+ merged_at = None
+ if pr.merged_at:
+ merged_at = pr.merged_at.replace(tzinfo=timezone.utc) if pr.merged_at.tzinfo is None else pr.merged_at
+
+ state = "merged" if pr.merged else ("closed" if pr.state == "closed" else "open")
+
+ first_commit_at = None
+ if pr.merged:
+ try:
+ commits = list(pr.get_commits())
+ if commits:
+ fc = commits[0].commit.author.date
+ first_commit_at = fc.replace(tzinfo=timezone.utc) if fc.tzinfo is None else fc
+ except Exception:
+ pass
+
+ reviews = []
+ try:
+ reviews = list(pr.get_reviews())
+ except Exception:
+ pass
+
+ first_review_at = None
+ if reviews:
+ fr = min(r.submitted_at for r in reviews if r.submitted_at)
+ first_review_at = fr.replace(tzinfo=timezone.utc) if fr.tzinfo is None else fr
+
+ review_cycles = sum(1 for r in reviews if r.state == "CHANGES_REQUESTED")
+ reviewer_set = {r.user.login for r in reviews if r.user}
+
+ is_self_merged = (
+ pr.merged and pr.merged_by is not None
+ and pr.user.login == pr.merged_by.login
+ )
+
+ results.append(SCMPullRequest(
+ provider="github",
+ external_id=str(pr.id),
+ number=pr.number,
+ title=pr.title,
+ state=state,
+ author=pr.user.login,
+ base_branch=pr.base.ref,
+ head_branch=pr.head.ref,
+ created_at=created,
+ updated_at=pr.updated_at,
+ merged_at=merged_at,
+ closed_at=pr.closed_at,
+ first_commit_at=first_commit_at,
+ first_review_at=first_review_at,
+ additions=pr.additions,
+ deletions=pr.deletions,
+ changed_files=pr.changed_files,
+ commits_count=pr.commits,
+ review_count=len(reviews),
+ reviewer_count=len(reviewer_set),
+ comment_count=pr.comments + pr.review_comments,
+ review_cycles=review_cycles,
+ is_revert="revert" in pr.title.lower(),
+ is_hotfix="hotfix" in pr.title.lower() or any(
+ label.name.lower() == "hotfix" for label in pr.labels
+ ),
+ is_self_merged=is_self_merged,
+ labels=[label.name for label in pr.labels],
+ reviewers=list(reviewer_set),
+ repository_full_name=repo.full_name,
+ url=pr.html_url,
+ ))
+ except Exception as e:
+ logger.warning("Error fetching PRs from %s: %s", repo.full_name, e)
+
+ return results
+
+ def get_deployments(
+ self, since: datetime, until: datetime,
+ repo_full_name: Optional[str] = None,
+ environment: Optional[str] = None,
+ ) -> list[SCMDeployment]:
+ results = []
+
+ if repo_full_name:
+ repo_list = [self.client.get_repo(repo_full_name)]
+ elif self._org:
+ repo_list = list(self.client.get_organization(self._org).get_repos())
+ else:
+ repo_list = list(self.client.get_user().get_repos())
+
+ since_utc = since.replace(tzinfo=timezone.utc) if since.tzinfo is None else since
+ until_utc = until.replace(tzinfo=timezone.utc) if until.tzinfo is None else until
+
+ for repo in repo_list:
+ try:
+ for deploy in repo.get_deployments():
+ created = deploy.created_at.replace(tzinfo=timezone.utc) if deploy.created_at.tzinfo is None else deploy.created_at
+ if created < since_utc:
+ break
+ if created > until_utc:
+ continue
+
+ env = deploy.environment
+ if environment and env != environment:
+ continue
+
+ statuses = list(deploy.get_statuses())
+ status = statuses[0].state if statuses else "pending"
+ completed_at = statuses[0].created_at if statuses else None
+
+ duration = None
+ if completed_at:
+ completed_at = completed_at.replace(tzinfo=timezone.utc) if completed_at.tzinfo is None else completed_at
+ duration = (completed_at - created).total_seconds()
+
+ results.append(SCMDeployment(
+ provider="github",
+ external_id=str(deploy.id),
+ environment=env,
+ ref=deploy.ref,
+ sha=deploy.sha,
+ status=status,
+ deployed_at=created,
+ completed_at=completed_at,
+ duration_seconds=duration,
+ is_rollback=False,
+ triggered_by=deploy.creator.login if deploy.creator else "",
+ repository_full_name=repo.full_name,
+ ))
+ except Exception as e:
+ logger.debug("Error fetching deployments from %s: %s", repo.full_name, e)
+
+ return results
+
+ def get_teams(self) -> list[SCMTeam]:
+ if not self._org:
+ return []
+ teams = []
+ org = self.client.get_organization(self._org)
+ for t in org.get_teams():
+ members = [m.login for m in t.get_members()]
+ teams.append(SCMTeam(
+ provider="github",
+ external_id=str(t.id),
+ name=t.name,
+ slug=t.slug,
+ members=members,
+ ))
+ return teams
diff --git a/src/wellcode_cli/integrations/gitlab/__init__.py b/src/wellcode_cli/integrations/gitlab/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/wellcode_cli/integrations/gitlab/provider.py b/src/wellcode_cli/integrations/gitlab/provider.py
new file mode 100644
index 0000000..0177f26
--- /dev/null
+++ b/src/wellcode_cli/integrations/gitlab/provider.py
@@ -0,0 +1,202 @@
+"""GitLab SCM provider implementation."""
+
+import logging
+from datetime import datetime
+from typing import Optional
+
+from ...config import get_config_value
+from ..scm_protocol import (
+ SCMDeployment,
+ SCMPullRequest,
+ SCMRepository,
+ SCMTeam,
+)
+
+logger = logging.getLogger(__name__)
+
+
+class GitLabProvider:
+ """GitLab implementation of the SCM provider protocol."""
+
+ def __init__(self, token: Optional[str] = None, url: Optional[str] = None):
+ self._token = token or get_config_value("GITLAB_TOKEN")
+ self._url = url or get_config_value("GITLAB_URL") or "https://gitlab.com"
+ self._client = None
+
+ @property
+ def provider_name(self) -> str:
+ return "gitlab"
+
+ @property
+ def client(self):
+ if self._client is None:
+ try:
+ import gitlab
+ except ImportError as err:
+ raise ImportError("python-gitlab is required: pip install python-gitlab") from err
+ if not self._token:
+ raise ValueError("GitLab token not configured")
+ self._client = gitlab.Gitlab(self._url, private_token=self._token)
+ self._client.auth()
+ return self._client
+
+ def get_repositories(self) -> list[SCMRepository]:
+ repos = []
+ for project in self.client.projects.list(membership=True, iterator=True):
+ repos.append(SCMRepository(
+ provider="gitlab",
+ external_id=str(project.id),
+ name=project.name,
+ full_name=project.path_with_namespace,
+ default_branch=project.default_branch or "main",
+ url=project.web_url,
+ is_active=not project.archived,
+ ))
+ return repos
+
+ def get_pull_requests(
+ self, since: datetime, until: datetime,
+ repo_full_name: Optional[str] = None,
+ author: Optional[str] = None,
+ ) -> list[SCMPullRequest]:
+ results = []
+ since_str = since.strftime("%Y-%m-%dT%H:%M:%SZ")
+ until_str = until.strftime("%Y-%m-%dT%H:%M:%SZ")
+
+ if repo_full_name:
+ projects = [self.client.projects.get(repo_full_name)]
+ else:
+ projects = list(self.client.projects.list(membership=True, iterator=True))
+
+ for project in projects:
+ try:
+ mrs = project.mergerequests.list(
+ created_after=since_str,
+ created_before=until_str,
+ state="all",
+ iterator=True,
+ )
+ for mr in mrs:
+ if author and mr.author.get("username") != author:
+ continue
+
+ created_at = datetime.fromisoformat(
+ mr.created_at.replace("Z", "+00:00")
+ )
+ merged_at = None
+ if mr.merged_at:
+ merged_at = datetime.fromisoformat(
+ mr.merged_at.replace("Z", "+00:00")
+ )
+ closed_at = None
+ if mr.closed_at:
+ closed_at = datetime.fromisoformat(
+ mr.closed_at.replace("Z", "+00:00")
+ )
+
+ if mr.state == "merged":
+ state = "merged"
+ elif mr.state == "closed":
+ state = "closed"
+ else:
+ state = "open"
+
+ changes = mr.changes_count or 0
+
+ results.append(SCMPullRequest(
+ provider="gitlab",
+ external_id=str(mr.id),
+ number=mr.iid,
+ title=mr.title,
+ state=state,
+ author=mr.author.get("username", ""),
+ base_branch=mr.target_branch,
+ head_branch=mr.source_branch,
+ created_at=created_at,
+ merged_at=merged_at,
+ closed_at=closed_at,
+ additions=0,
+ deletions=0,
+ changed_files=int(changes) if changes else 0,
+ commits_count=0,
+ review_count=0,
+ reviewer_count=len(mr.reviewers) if mr.reviewers else 0,
+ comment_count=mr.user_notes_count or 0,
+ is_revert="revert" in mr.title.lower(),
+ is_hotfix="hotfix" in mr.title.lower(),
+ labels=mr.labels or [],
+ reviewers=[r.get("username", "") for r in (mr.reviewers or [])],
+ repository_full_name=project.path_with_namespace,
+ url=mr.web_url,
+ ))
+ except Exception as e:
+ logger.warning("Error fetching MRs from %s: %s", project.path_with_namespace, e)
+
+ return results
+
+ def get_deployments(
+ self, since: datetime, until: datetime,
+ repo_full_name: Optional[str] = None,
+ environment: Optional[str] = None,
+ ) -> list[SCMDeployment]:
+ results = []
+
+ if repo_full_name:
+ projects = [self.client.projects.get(repo_full_name)]
+ else:
+ projects = list(self.client.projects.list(membership=True, iterator=True))
+
+ for project in projects:
+ try:
+ deployments = project.deployments.list(
+ updated_after=since.strftime("%Y-%m-%dT%H:%M:%SZ"),
+ updated_before=until.strftime("%Y-%m-%dT%H:%M:%SZ"),
+ iterator=True,
+ )
+ for d in deployments:
+ if environment and d.environment != environment:
+ continue
+
+ deployed_at = datetime.fromisoformat(
+ d.created_at.replace("Z", "+00:00")
+ )
+ completed_at = None
+ if hasattr(d, "finished_at") and d.finished_at:
+ completed_at = datetime.fromisoformat(
+ d.finished_at.replace("Z", "+00:00")
+ )
+
+ results.append(SCMDeployment(
+ provider="gitlab",
+ external_id=str(d.id),
+ environment=d.environment,
+ ref=d.ref,
+ sha=d.sha,
+ status=d.status,
+ deployed_at=deployed_at,
+ completed_at=completed_at,
+ is_rollback=False,
+ triggered_by=d.user.get("username", "") if d.user else "",
+ repository_full_name=project.path_with_namespace,
+ ))
+ except Exception as e:
+ logger.debug("Error fetching deployments from %s: %s",
+ project.path_with_namespace, e)
+
+ return results
+
+ def get_teams(self) -> list[SCMTeam]:
+ teams = []
+ try:
+ for group in self.client.groups.list(iterator=True):
+ members = [m.username for m in group.members.list(iterator=True)]
+ teams.append(SCMTeam(
+ provider="gitlab",
+ external_id=str(group.id),
+ name=group.name,
+ slug=group.path,
+ members=members,
+ ))
+ except Exception as e:
+ logger.warning("Error fetching GitLab groups: %s", e)
+ return teams
diff --git a/src/wellcode_cli/integrations/jira/__init__.py b/src/wellcode_cli/integrations/jira/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/wellcode_cli/integrations/linear/__init__.py b/src/wellcode_cli/integrations/linear/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/wellcode_cli/integrations/scm_protocol.py b/src/wellcode_cli/integrations/scm_protocol.py
new file mode 100644
index 0000000..953d94d
--- /dev/null
+++ b/src/wellcode_cli/integrations/scm_protocol.py
@@ -0,0 +1,112 @@
+"""Protocol defining the unified SCM (Source Code Management) provider interface."""
+
+from dataclasses import dataclass, field
+from datetime import datetime
+from typing import Optional, Protocol, runtime_checkable
+
+
+@dataclass
+class SCMPullRequest:
+ """Unified pull request / merge request representation."""
+ provider: str
+ external_id: str
+ number: int
+ title: str
+ state: str # open, closed, merged
+ author: str
+ base_branch: str
+ head_branch: str
+ created_at: datetime
+ updated_at: Optional[datetime] = None
+ merged_at: Optional[datetime] = None
+ closed_at: Optional[datetime] = None
+ first_commit_at: Optional[datetime] = None
+ first_review_at: Optional[datetime] = None
+ additions: int = 0
+ deletions: int = 0
+ changed_files: int = 0
+ commits_count: int = 0
+ review_count: int = 0
+ reviewer_count: int = 0
+ comment_count: int = 0
+ review_cycles: int = 0
+ is_revert: bool = False
+ is_hotfix: bool = False
+ is_self_merged: bool = False
+ labels: list = field(default_factory=list)
+ reviewers: list = field(default_factory=list)
+ repository_full_name: str = ""
+ url: str = ""
+
+
+@dataclass
+class SCMDeployment:
+ """Unified deployment event."""
+ provider: str
+ external_id: str
+ environment: str
+ ref: str
+ sha: str
+ status: str # success, failure, pending
+ deployed_at: datetime
+ completed_at: Optional[datetime] = None
+ duration_seconds: Optional[float] = None
+ is_rollback: bool = False
+ triggered_by: str = ""
+ pr_number: Optional[int] = None
+ repository_full_name: str = ""
+
+
+@dataclass
+class SCMRepository:
+ """Unified repository representation."""
+ provider: str
+ external_id: str
+ name: str
+ full_name: str
+ default_branch: str = "main"
+ url: str = ""
+ is_active: bool = True
+
+
+@dataclass
+class SCMTeam:
+ provider: str
+ external_id: str
+ name: str
+ slug: str
+ members: list = field(default_factory=list)
+
+
+@dataclass
+class SCMReview:
+ provider: str
+ reviewer: str
+ state: str # approved, changes_requested, commented
+ submitted_at: datetime
+ body: str = ""
+ pr_number: int = 0
+
+
+@runtime_checkable
+class SCMProvider(Protocol):
+ """Protocol that all SCM integrations must implement."""
+
+ @property
+ def provider_name(self) -> str: ...
+
+ def get_repositories(self) -> list[SCMRepository]: ...
+
+ def get_pull_requests(
+ self, since: datetime, until: datetime,
+ repo_full_name: Optional[str] = None,
+ author: Optional[str] = None,
+ ) -> list[SCMPullRequest]: ...
+
+ def get_deployments(
+ self, since: datetime, until: datetime,
+ repo_full_name: Optional[str] = None,
+ environment: Optional[str] = None,
+ ) -> list[SCMDeployment]: ...
+
+ def get_teams(self) -> list[SCMTeam]: ...
diff --git a/src/wellcode_cli/jira/__init__.py b/src/wellcode_cli/jira/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/wellcode_cli/jira/jira_display.py b/src/wellcode_cli/jira/jira_display.py
new file mode 100644
index 0000000..fbe0c88
--- /dev/null
+++ b/src/wellcode_cli/jira/jira_display.py
@@ -0,0 +1,214 @@
+import statistics
+from datetime import datetime, timezone
+
+from rich.box import ROUNDED
+from rich.console import Console
+from rich.panel import Panel
+
+console = Console()
+
+
+def format_time(hours: float) -> str:
+ """Convert hours to a human-readable format."""
+ if hours < 1:
+ return f"{int(hours * 60)} minutes"
+ elif hours < 24:
+ return f"{round(hours, 1)} hours"
+ else:
+ days = hours / 24
+ return f"{round(days, 1)} days"
+
+
+def _health_indicator(value: float, good: float, warn: float) -> str:
+ if value > good:
+ return "🟢"
+ if value > warn:
+ return "🟡"
+ return "🔴"
+
+
+def _health_indicator_low_good(value: float, good: float, warn: float) -> str:
+ """Health indicator where lower values are better (e.g. cycle time)."""
+ if value < good:
+ return "🟢"
+ if value < warn:
+ return "🟡"
+ return "🔴"
+
+
+def _safe_mean(values: list) -> float:
+ return statistics.mean(values) if values else 0
+
+
+def _label_color(name: str) -> str:
+ lower = name.lower()
+ if "bug" in lower:
+ return "red"
+ if "feature" in lower or "story" in lower:
+ return "green"
+ if "improvement" in lower:
+ return "yellow"
+ return "blue"
+
+
+def display_jira_metrics(org_metrics):
+ """Display JIRA metrics with a modern UI using Rich components."""
+ now = datetime.now(timezone.utc)
+ console.print(
+ Panel(
+ "[bold cyan]JIRA Engineering Analytics[/]\n"
+ + f"[dim]Instance: {org_metrics.name}[/]\n"
+ + f"[dim]Report Generated: {now.strftime('%Y-%m-%d %H:%M')} UTC[/]",
+ box=ROUNDED,
+ style="cyan",
+ )
+ )
+
+ _display_issue_flow(org_metrics)
+ _display_time_metrics(org_metrics)
+ _display_sprint_performance(org_metrics)
+ _display_estimation_health(org_metrics)
+ _display_project_health(org_metrics)
+
+ if org_metrics.label_counts:
+ _display_distribution("Label Distribution", dict(org_metrics.label_counts))
+ if org_metrics.component_counts:
+ _display_distribution("Component Distribution", dict(org_metrics.component_counts))
+
+
+def _display_issue_flow(org_metrics):
+ total = org_metrics.issues.total_created
+ completed = org_metrics.issues.total_completed
+ completion_rate = (completed / total * 100) if total > 0 else 0
+ health = _health_indicator(completion_rate, 80, 60)
+
+ console.print(
+ Panel(
+ f"{health} [bold green]Issues Created:[/] {total}\n"
+ + f"[bold yellow]Issues Completed:[/] {completed} ({completion_rate:.1f}% completion rate)\n"
+ + f"[bold red]Bugs Created:[/] {org_metrics.issues.bugs_created}\n"
+ + f"[bold blue]Stories Created:[/] {org_metrics.issues.stories_created}\n"
+ + f"[bold white]Tasks Created:[/] {org_metrics.issues.tasks_created}",
+ title="[bold]Issue Flow",
+ box=ROUNDED,
+ )
+ )
+
+
+def _display_time_metrics(org_metrics):
+ cycle = org_metrics.cycle_time
+ avg_cycle_time = _safe_mean(cycle.cycle_times)
+ cycle_health = _health_indicator_low_good(avg_cycle_time, 24, 72)
+
+ console.print(
+ Panel(
+ f"{cycle_health} [bold]Cycle Time:[/] {format_time(avg_cycle_time)}\n"
+ + f"[bold]Time to Start:[/] {format_time(_safe_mean(cycle.time_to_start))}\n"
+ + f"[bold]Time in Progress:[/] {format_time(_safe_mean(cycle.time_in_progress))}",
+ title="[bold blue]Time Metrics",
+ box=ROUNDED,
+ )
+ )
+
+
+def _display_sprint_performance(org_metrics):
+ if not org_metrics.sprints:
+ return
+
+ sprint_panels = []
+ for sprint in org_metrics.sprints:
+ health = _health_indicator(sprint.points_completion_rate, 80, 60)
+ goal_line = f"Goal: {sprint.goal}\n" if sprint.goal else ""
+
+ sprint_panels.append(
+ f"{health} [bold cyan]{sprint.name}[/] ({sprint.state})\n"
+ + goal_line
+ + f"Issues: {sprint.completed_issues}/{sprint.total_issues} completed ({sprint.completion_rate:.1f}%)\n"
+ + f"Story Points: {sprint.story_points_completed}/{sprint.story_points_committed} ({sprint.points_completion_rate:.1f}%)\n"
+ + f"Velocity: {sprint.velocity} points"
+ )
+
+ console.print(
+ Panel(
+ "\n\n".join(sprint_panels),
+ title="[bold magenta]Sprint Performance",
+ box=ROUNDED,
+ )
+ )
+
+
+def _display_estimation_health(org_metrics):
+ est = org_metrics.estimation
+ if est.total_estimated == 0:
+ return
+
+ accuracy_rate = est.accurate_estimates / est.total_estimated * 100
+ accuracy_health = _health_indicator(accuracy_rate, 80, 60)
+ avg_variance = _safe_mean(est.estimation_variance)
+
+ console.print(
+ Panel(
+ f"{accuracy_health} [bold]Estimation Accuracy:[/] {accuracy_rate:.1f}%\n"
+ + f"[bold green]Accurate Estimates:[/] {est.accurate_estimates}\n"
+ + f"[bold red]Underestimates:[/] {est.underestimates}\n"
+ + f"[bold yellow]Overestimates:[/] {est.overestimates}\n"
+ + f"[bold]Average Variance:[/] {avg_variance:.1f} hours",
+ title="[bold yellow]Estimation Health",
+ box=ROUNDED,
+ )
+ )
+
+
+def _display_project_health(org_metrics):
+ if not org_metrics.projects:
+ return
+
+ project_panels = []
+ for _, project in org_metrics.projects.items():
+ completion_rate = (
+ (project.completed_issues / project.total_issues * 100)
+ if project.total_issues > 0
+ else 0
+ )
+ proj_health = _health_indicator(completion_rate, 80, 50)
+
+ project_panels.append(
+ f"{proj_health} [bold cyan]{project.name}[/] ({project.key})\n"
+ + f"Issues: {project.total_issues} total, {project.completed_issues} completed ({completion_rate:.1f}%)\n"
+ + f"Bugs: {project.bugs_count} | Stories: {project.stories_count} | Tasks: {project.tasks_count}\n"
+ + f"Cycle Time: {format_time(project.avg_cycle_time)}\n"
+ + f"Team Members: {len(project.members)}"
+ )
+
+ console.print(
+ Panel(
+ "\n\n".join(project_panels),
+ title="[bold green]Project Health",
+ box=ROUNDED,
+ )
+ )
+
+
+def _display_distribution(title: str, counts: dict):
+ """Display a visual bar-chart summary of label or component counts."""
+ if not counts:
+ return
+
+ sorted_items = sorted(counts.items(), key=lambda x: x[1], reverse=True)
+ max_count = max(count for _, count in sorted_items)
+ max_bar_length = 40
+
+ lines = []
+ for name, count in sorted_items:
+ bar_length = int((count / max_count) * max_bar_length)
+ bar = "█" * bar_length
+ color = _label_color(name)
+ lines.append(f"[{color}]{name:<25}[/] {bar} ({count})")
+
+ console.print(
+ Panel(
+ "\n".join(lines),
+ title=f"[bold cyan]{title}",
+ box=ROUNDED,
+ )
+ )
diff --git a/src/wellcode_cli/jira/jira_metrics.py b/src/wellcode_cli/jira/jira_metrics.py
new file mode 100644
index 0000000..629688a
--- /dev/null
+++ b/src/wellcode_cli/jira/jira_metrics.py
@@ -0,0 +1,294 @@
+import logging
+from datetime import datetime, timedelta
+from typing import Optional
+
+from dateutil import tz
+from jira import JIRA
+from rich.console import Console
+
+from ..config import get_jira_api_token, get_jira_email, get_jira_url
+from .models.metrics import (
+ JiraOrgMetrics,
+ JiraProjectMetrics,
+ JiraSprintMetrics,
+)
+
+console = Console()
+logger = logging.getLogger(__name__)
+
+STORY_POINTS_FIELD = "story_points"
+STORY_POINTS_CUSTOM_FIELD = "customfield_10016"
+
+_SEARCH_FIELDS = (
+ "summary,issuetype,status,priority,project,assignee,"
+ "created,resolutiondate,labels,components,"
+ f"{STORY_POINTS_FIELD},{STORY_POINTS_CUSTOM_FIELD}"
+)
+
+
+def _get_jira_client() -> JIRA:
+ url = get_jira_url()
+ email = get_jira_email()
+ token = get_jira_api_token()
+ return JIRA(server=url, basic_auth=(email, token))
+
+
+def _parse_datetime(value) -> Optional[datetime]:
+ if not value:
+ return None
+ if isinstance(value, datetime):
+ return value
+ return datetime.fromisoformat(value.replace("Z", "+00:00"))
+
+
+def _get_story_points(issue) -> float:
+ """Extract story points from an issue, trying common field names."""
+ fields = issue.fields
+ for attr in (STORY_POINTS_FIELD, STORY_POINTS_CUSTOM_FIELD):
+ sp = getattr(fields, attr, None)
+ if sp is not None:
+ return float(sp)
+ return 0
+
+
+def _get_in_progress_date(issue) -> Optional[datetime]:
+ """Find the first transition to an 'In Progress' status category from changelog."""
+ if not hasattr(issue, "changelog"):
+ return None
+ for history in issue.changelog.histories:
+ for item in history.items:
+ if item.field != "status":
+ continue
+ to_category = getattr(item, "to_category", None)
+ to_str = (item.toString or "").lower()
+ if to_category == "indeterminate" or "in progress" in to_str:
+ return _parse_datetime(history.created)
+ return None
+
+
+def _extract_issue_fields(issue):
+ """Pull relevant scalar values from a JIRA issue for metric processing."""
+ fields = issue.fields
+ return {
+ "issue_type": fields.issuetype.name if fields.issuetype else "Task",
+ "status_category": (
+ fields.status.statusCategory.key
+ if fields.status and fields.status.statusCategory
+ else "new"
+ ),
+ "priority": fields.priority.name if fields.priority else "Medium",
+ "project_key": fields.project.key if fields.project else "",
+ "project_name": fields.project.name if fields.project else "",
+ "assignee": fields.assignee.displayName if fields.assignee else None,
+ "created_at": _parse_datetime(fields.created),
+ "resolution_date": _parse_datetime(fields.resolutiondate),
+ "in_progress_date": _get_in_progress_date(issue),
+ "story_points": _get_story_points(issue),
+ "labels": fields.labels or [],
+ "components": fields.components or [],
+ }
+
+
+def _fetch_all_issues(client: JIRA, jql: str) -> list:
+ """Paginate through JQL results and return all issues."""
+ all_issues = []
+ start_at = 0
+ max_results = 50
+
+ while True:
+ batch = client.search_issues(
+ jql,
+ startAt=start_at,
+ maxResults=max_results,
+ expand="changelog",
+ fields=_SEARCH_FIELDS,
+ )
+ all_issues.extend(batch)
+ if len(batch) < max_results:
+ break
+ start_at += max_results
+
+ return all_issues
+
+
+def _process_issue(org_metrics: JiraOrgMetrics, issue):
+ """Process a single JIRA issue into all metric containers."""
+ f = _extract_issue_fields(issue)
+
+ org_metrics.issues.update_from_issue(
+ issue_type=f["issue_type"],
+ status_category=f["status_category"],
+ priority=f["priority"],
+ project_key=f["project_key"],
+ assignee=f["assignee"],
+ )
+
+ if f["created_at"]:
+ org_metrics.cycle_time.update_from_issue(
+ created_at=f["created_at"],
+ resolution_date=f["resolution_date"],
+ in_progress_date=f["in_progress_date"],
+ project_key=f["project_key"],
+ priority=f["priority"],
+ issue_type=f["issue_type"],
+ )
+
+ actual_hours = 0.0
+ if f["in_progress_date"] and f["resolution_date"]:
+ actual_hours = calculate_work_hours(f["in_progress_date"], f["resolution_date"])
+ if f["story_points"] and actual_hours > 0:
+ org_metrics.estimation.update_from_issue(
+ story_points=f["story_points"],
+ actual_hours=actual_hours,
+ project_key=f["project_key"],
+ )
+
+ _update_project_metrics(org_metrics, f)
+ _update_label_component_counts(org_metrics, f)
+
+
+def _update_project_metrics(org_metrics: JiraOrgMetrics, f: dict):
+ project_key = f["project_key"]
+ if not project_key:
+ return
+ if project_key not in org_metrics.projects:
+ org_metrics.projects[project_key] = JiraProjectMetrics(
+ key=project_key, name=f["project_name"],
+ )
+ org_metrics.projects[project_key].update_from_issue(
+ issue_type=f["issue_type"],
+ status_category=f["status_category"],
+ assignee=f["assignee"],
+ )
+
+
+def _update_label_component_counts(org_metrics: JiraOrgMetrics, f: dict):
+ for label in f["labels"]:
+ org_metrics.label_counts[label] += 1
+ for component in f["components"]:
+ org_metrics.component_counts[component.name] += 1
+
+
+def get_jira_metrics(start_date, end_date, user_filter=None) -> JiraOrgMetrics:
+ """Collect JIRA metrics for the given date range."""
+ client = _get_jira_client()
+ org_metrics = JiraOrgMetrics(name=get_jira_url() or "JIRA")
+
+ start_str = start_date.strftime("%Y-%m-%d")
+ end_str = end_date.strftime("%Y-%m-%d")
+
+ jql = f'created >= "{start_str}" AND created <= "{end_str}"'
+ if user_filter:
+ jql += f' AND assignee = "{user_filter}"'
+
+ all_issues = _fetch_all_issues(client, jql)
+
+ for issue in all_issues:
+ _process_issue(org_metrics, issue)
+
+ _collect_sprint_metrics(client, org_metrics, start_date, end_date)
+ org_metrics.aggregate_project_cycle_times()
+
+ return org_metrics
+
+
+def _is_sprint_in_range(sprint, start_date, end_date) -> bool:
+ sprint_end = _parse_datetime(getattr(sprint, "endDate", None))
+ sprint_start = _parse_datetime(getattr(sprint, "startDate", None))
+
+ if sprint_end and sprint_end < start_date.replace(tzinfo=sprint_end.tzinfo):
+ return False
+ if sprint_start and sprint_start > end_date.replace(tzinfo=sprint_start.tzinfo):
+ return False
+ return True
+
+
+def _get_status_category(issue) -> str:
+ status = issue.fields.status
+ if status and status.statusCategory:
+ return status.statusCategory.key
+ return "new"
+
+
+def _build_sprint_metrics(client: JIRA, sprint) -> Optional[JiraSprintMetrics]:
+ sm = JiraSprintMetrics(
+ sprint_id=sprint.id,
+ name=sprint.name,
+ state=sprint.state,
+ goal=getattr(sprint, "goal", "") or "",
+ )
+
+ try:
+ sprint_issues = client.search_issues(
+ f"sprint = {sprint.id}",
+ maxResults=200,
+ fields=f"issuetype,status,{STORY_POINTS_FIELD},{STORY_POINTS_CUSTOM_FIELD}",
+ )
+ except Exception:
+ return None
+
+ for issue in sprint_issues:
+ sm.total_issues += 1
+ sp = _get_story_points(issue)
+ sm.story_points_committed += sp
+
+ if _get_status_category(issue) == "done":
+ sm.completed_issues += 1
+ sm.story_points_completed += sp
+
+ return sm
+
+
+def _collect_sprint_metrics(client: JIRA, org_metrics: JiraOrgMetrics,
+ start_date, end_date):
+ """Fetch sprint data from all scrum boards."""
+ try:
+ boards = client.boards(type="scrum", maxResults=50)
+ except Exception as e:
+ logger.warning("Could not fetch JIRA boards: %s", e)
+ return
+
+ for board in boards:
+ try:
+ sprints = client.sprints(board.id, state="active,closed")
+ except Exception:
+ continue
+
+ for sprint in sprints:
+ if not _is_sprint_in_range(sprint, start_date, end_date):
+ continue
+ sm = _build_sprint_metrics(client, sprint)
+ if sm:
+ org_metrics.sprints.append(sm)
+
+
+def calculate_work_hours(start_date, end_date):
+ """Calculate work hours between two dates, excluding weekends."""
+ if not start_date or not end_date:
+ return 0
+
+ if start_date.tzinfo:
+ start_date = start_date.astimezone(tz.UTC)
+ if end_date.tzinfo:
+ end_date = end_date.astimezone(tz.UTC)
+
+ total_hours = 0
+ current_date = start_date
+
+ while current_date < end_date:
+ if current_date.weekday() < 5:
+ day_end = min(
+ current_date.replace(hour=17, minute=0, second=0, microsecond=0),
+ end_date,
+ )
+ day_start = current_date.replace(hour=9, minute=0, second=0, microsecond=0)
+
+ if day_end > day_start:
+ work_hours = (day_end - day_start).total_seconds() / 3600
+ total_hours += min(8, work_hours)
+
+ current_date = current_date.replace(
+ hour=9, minute=0, second=0, microsecond=0
+ ) + timedelta(days=1)
+
+ return total_hours
diff --git a/src/wellcode_cli/jira/models/__init__.py b/src/wellcode_cli/jira/models/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/wellcode_cli/jira/models/metrics.py b/src/wellcode_cli/jira/models/metrics.py
new file mode 100644
index 0000000..8a668cf
--- /dev/null
+++ b/src/wellcode_cli/jira/models/metrics.py
@@ -0,0 +1,395 @@
+import statistics
+from collections import defaultdict
+from dataclasses import dataclass, field
+from datetime import datetime
+from typing import Dict, List, Optional, Set
+
+from ...linear.models.metrics import BaseMetrics
+
+
+@dataclass
+class JiraIssueMetrics(BaseMetrics):
+ total_created: int = 0
+ total_completed: int = 0
+ total_in_progress: int = 0
+ bugs_created: int = 0
+ bugs_completed: int = 0
+ stories_created: int = 0
+ stories_completed: int = 0
+ tasks_created: int = 0
+ tasks_completed: int = 0
+ by_priority: Dict[str, int] = field(default_factory=lambda: defaultdict(int))
+ by_status: Dict[str, int] = field(default_factory=lambda: defaultdict(int))
+ by_project: Dict[str, Dict] = field(
+ default_factory=lambda: defaultdict(
+ lambda: {
+ "total": 0,
+ "bugs": 0,
+ "stories": 0,
+ "tasks": 0,
+ "completed": 0,
+ "in_progress": 0,
+ }
+ )
+ )
+ by_assignee: Dict[str, int] = field(default_factory=lambda: defaultdict(int))
+
+ def get_stats(self) -> Dict:
+ completion_rate = (
+ (self.total_completed / self.total_created * 100)
+ if self.total_created > 0
+ else 0
+ )
+ bug_rate = (
+ (self.bugs_created / self.total_created * 100)
+ if self.total_created > 0
+ else 0
+ )
+
+ return {
+ "total_issues": self.total_created,
+ "completion_rate": completion_rate,
+ "bug_rate": bug_rate,
+ "stories_to_bugs_ratio": (
+ self.stories_created / self.bugs_created
+ if self.bugs_created > 0
+ else 0
+ ),
+ "in_progress_rate": (
+ (self.total_in_progress / self.total_created * 100)
+ if self.total_created > 0
+ else 0
+ ),
+ "priority_distribution": dict(self.by_priority),
+ "status_distribution": dict(self.by_status),
+ "project_metrics": dict(self.by_project),
+ "assignee_distribution": dict(self.by_assignee),
+ }
+
+ _TYPE_COUNTERS = {
+ "bug": ("bugs_created", "bugs_completed"),
+ "story": ("stories_created", "stories_completed"),
+ "task": ("tasks_created", "tasks_completed"),
+ }
+
+ _TYPE_PROJECT_KEY = {"bug": "bugs", "story": "stories", "task": "tasks"}
+
+ def update_from_issue(self, issue_type: str, status_category: str,
+ priority: str, project_key: str,
+ assignee: Optional[str]):
+ self.total_created += 1
+ self.by_status[status_category] += 1
+ is_done = status_category == "done"
+
+ if is_done:
+ self.total_completed += 1
+ elif status_category == "indeterminate":
+ self.total_in_progress += 1
+
+ issue_type_lower = issue_type.lower()
+ self._update_type_counters(issue_type_lower, is_done)
+
+ if priority:
+ self.by_priority[priority] += 1
+ if project_key:
+ self._update_project(project_key, issue_type_lower, status_category)
+ if assignee:
+ self.by_assignee[assignee] += 1
+
+ def _update_type_counters(self, issue_type_lower: str, is_done: bool):
+ counters = self._TYPE_COUNTERS.get(issue_type_lower)
+ if not counters:
+ return
+ created_attr, completed_attr = counters
+ setattr(self, created_attr, getattr(self, created_attr) + 1)
+ if is_done:
+ setattr(self, completed_attr, getattr(self, completed_attr) + 1)
+
+ def _update_project(self, project_key: str, issue_type_lower: str,
+ status_category: str):
+ proj = self.by_project[project_key]
+ proj["total"] += 1
+ proj_key = self._TYPE_PROJECT_KEY.get(issue_type_lower)
+ if proj_key:
+ proj[proj_key] += 1
+ if status_category == "done":
+ proj["completed"] += 1
+ elif status_category == "indeterminate":
+ proj["in_progress"] += 1
+
+
+@dataclass
+class JiraCycleTimeMetrics(BaseMetrics):
+ cycle_times: List[float] = field(default_factory=list)
+ time_to_start: List[float] = field(default_factory=list)
+ time_in_progress: List[float] = field(default_factory=list)
+ by_project: Dict[str, List[float]] = field(
+ default_factory=lambda: defaultdict(list)
+ )
+ by_priority: Dict[str, List[float]] = field(
+ default_factory=lambda: defaultdict(list)
+ )
+ by_issue_type: Dict[str, List[float]] = field(
+ default_factory=lambda: defaultdict(list)
+ )
+
+ def get_stats(self) -> Dict:
+ def safe_mean(lst: List[float]) -> float:
+ return statistics.mean(lst) if lst else 0
+
+ return {
+ "avg_cycle_time": safe_mean(self.cycle_times),
+ "avg_time_to_start": safe_mean(self.time_to_start),
+ "avg_time_in_progress": safe_mean(self.time_in_progress),
+ "project_cycle_times": {
+ project: safe_mean(times)
+ for project, times in self.by_project.items()
+ },
+ "priority_cycle_times": {
+ priority: safe_mean(times)
+ for priority, times in self.by_priority.items()
+ },
+ "issue_type_cycle_times": {
+ itype: safe_mean(times)
+ for itype, times in self.by_issue_type.items()
+ },
+ "cycle_time_p95": (
+ statistics.quantiles(self.cycle_times, n=20)[-1]
+ if self.cycle_times
+ else 0
+ ),
+ "cycle_time_p50": (
+ statistics.median(self.cycle_times) if self.cycle_times else 0
+ ),
+ }
+
+ def update_from_issue(self, created_at: datetime,
+ resolution_date: Optional[datetime],
+ in_progress_date: Optional[datetime],
+ project_key: str, priority: str,
+ issue_type: str):
+ if resolution_date:
+ cycle_time = (resolution_date - created_at).total_seconds() / 3600
+ self.cycle_times.append(cycle_time)
+
+ if project_key:
+ self.by_project[project_key].append(cycle_time)
+ if priority:
+ self.by_priority[priority].append(cycle_time)
+ if issue_type:
+ self.by_issue_type[issue_type].append(cycle_time)
+
+ if in_progress_date:
+ time_to_start = (in_progress_date - created_at).total_seconds() / 3600
+ self.time_to_start.append(time_to_start)
+
+ if resolution_date:
+ time_in_prog = (
+ (resolution_date - in_progress_date).total_seconds() / 3600
+ )
+ self.time_in_progress.append(time_in_prog)
+
+
+@dataclass
+class JiraSprintMetrics(BaseMetrics):
+ sprint_id: int = 0
+ name: str = ""
+ state: str = ""
+ goal: str = ""
+ total_issues: int = 0
+ completed_issues: int = 0
+ story_points_committed: float = 0
+ story_points_completed: float = 0
+
+ @property
+ def velocity(self) -> float:
+ return self.story_points_completed
+
+ @property
+ def completion_rate(self) -> float:
+ if self.total_issues == 0:
+ return 0
+ return (self.completed_issues / self.total_issues) * 100
+
+ @property
+ def points_completion_rate(self) -> float:
+ if self.story_points_committed == 0:
+ return 0
+ return (self.story_points_completed / self.story_points_committed) * 100
+
+ def get_stats(self) -> Dict:
+ return {
+ "name": self.name,
+ "state": self.state,
+ "goal": self.goal,
+ "total_issues": self.total_issues,
+ "completed_issues": self.completed_issues,
+ "completion_rate": self.completion_rate,
+ "story_points_committed": self.story_points_committed,
+ "story_points_completed": self.story_points_completed,
+ "points_completion_rate": self.points_completion_rate,
+ "velocity": self.velocity,
+ }
+
+
+@dataclass
+class JiraEstimationMetrics(BaseMetrics):
+ total_estimated: int = 0
+ accurate_estimates: int = 0
+ underestimates: int = 0
+ overestimates: int = 0
+ estimation_variance: List[float] = field(default_factory=list)
+ by_project: Dict[str, Dict] = field(
+ default_factory=lambda: defaultdict(
+ lambda: {"total": 0, "accurate": 0, "under": 0, "over": 0, "variance": []}
+ )
+ )
+
+ def get_stats(self) -> Dict:
+ def safe_mean(lst: List[float]) -> float:
+ return statistics.mean(lst) if lst else 0
+
+ accuracy_rate = (
+ (self.accurate_estimates / self.total_estimated * 100)
+ if self.total_estimated > 0
+ else 0
+ )
+
+ return {
+ "total_estimated": self.total_estimated,
+ "accuracy_rate": accuracy_rate,
+ "underestimate_rate": (
+ (self.underestimates / self.total_estimated * 100)
+ if self.total_estimated > 0
+ else 0
+ ),
+ "overestimate_rate": (
+ (self.overestimates / self.total_estimated * 100)
+ if self.total_estimated > 0
+ else 0
+ ),
+ "avg_variance": safe_mean(self.estimation_variance),
+ "project_accuracy": {
+ project: {
+ "accuracy_rate": (
+ (stats["accurate"] / stats["total"] * 100)
+ if stats["total"] > 0
+ else 0
+ ),
+ "avg_variance": safe_mean(stats["variance"]),
+ }
+ for project, stats in self.by_project.items()
+ },
+ }
+
+ def update_from_issue(self, story_points: float, actual_hours: float,
+ project_key: str):
+ if not story_points or actual_hours <= 0:
+ return
+
+ expected_hours = story_points * 2
+ variance_percent = ((actual_hours - expected_hours) / expected_hours) * 100
+
+ self.total_estimated += 1
+ self.estimation_variance.append(variance_percent)
+
+ if abs(variance_percent) <= 20:
+ self.accurate_estimates += 1
+ elif variance_percent > 20:
+ self.underestimates += 1
+ else:
+ self.overestimates += 1
+
+ if project_key:
+ proj_stats = self.by_project[project_key]
+ proj_stats["total"] += 1
+ proj_stats["variance"].append(variance_percent)
+ if abs(variance_percent) <= 20:
+ proj_stats["accurate"] += 1
+ elif variance_percent > 20:
+ proj_stats["under"] += 1
+ else:
+ proj_stats["over"] += 1
+
+
+@dataclass
+class JiraProjectMetrics(BaseMetrics):
+ key: str = ""
+ name: str = ""
+ total_issues: int = 0
+ completed_issues: int = 0
+ bugs_count: int = 0
+ stories_count: int = 0
+ tasks_count: int = 0
+ avg_cycle_time: float = 0
+ members: Set[str] = field(default_factory=set)
+
+ def get_stats(self) -> Dict:
+ completion_rate = (
+ (self.completed_issues / self.total_issues * 100)
+ if self.total_issues > 0
+ else 0
+ )
+ return {
+ "key": self.key,
+ "name": self.name,
+ "total_issues": self.total_issues,
+ "completed_issues": self.completed_issues,
+ "completion_rate": completion_rate,
+ "bugs_count": self.bugs_count,
+ "stories_count": self.stories_count,
+ "tasks_count": self.tasks_count,
+ "avg_cycle_time": self.avg_cycle_time,
+ "members_count": len(self.members),
+ }
+
+ def update_from_issue(self, issue_type: str, status_category: str,
+ assignee: Optional[str]):
+ self.total_issues += 1
+
+ if status_category == "done":
+ self.completed_issues += 1
+
+ issue_type_lower = issue_type.lower()
+ if issue_type_lower == "bug":
+ self.bugs_count += 1
+ elif issue_type_lower == "story":
+ self.stories_count += 1
+ elif issue_type_lower == "task":
+ self.tasks_count += 1
+
+ if assignee:
+ self.members.add(assignee)
+
+
+@dataclass
+class JiraOrgMetrics(BaseMetrics):
+ name: str = ""
+ issues: JiraIssueMetrics = field(default_factory=JiraIssueMetrics)
+ projects: Dict[str, JiraProjectMetrics] = field(default_factory=dict)
+ sprints: List[JiraSprintMetrics] = field(default_factory=list)
+ cycle_time: JiraCycleTimeMetrics = field(default_factory=JiraCycleTimeMetrics)
+ estimation: JiraEstimationMetrics = field(default_factory=JiraEstimationMetrics)
+ label_counts: Dict[str, int] = field(default_factory=lambda: defaultdict(int))
+ component_counts: Dict[str, int] = field(default_factory=lambda: defaultdict(int))
+
+ def get_stats(self) -> Dict:
+ return {
+ "name": self.name,
+ "issues": self.issues.get_stats(),
+ "projects": {
+ key: project.get_stats()
+ for key, project in self.projects.items()
+ },
+ "sprints": [sprint.get_stats() for sprint in self.sprints],
+ "cycle_time": self.cycle_time.get_stats(),
+ "estimation": self.estimation.get_stats(),
+ "label_counts": dict(self.label_counts),
+ "component_counts": dict(self.component_counts),
+ }
+
+ def aggregate_project_cycle_times(self):
+ """Update per-project avg_cycle_time from the cycle_time metrics."""
+ for project_key, times in self.cycle_time.by_project.items():
+ if project_key in self.projects and times:
+ self.projects[project_key].avg_cycle_time = statistics.mean(times)
diff --git a/src/wellcode_cli/linear/__init__.py b/src/wellcode_cli/linear/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/wellcode_cli/linear/models/__init__.py b/src/wellcode_cli/linear/models/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/wellcode_cli/main.py b/src/wellcode_cli/main.py
index 48c265b..2ccc1a0 100644
--- a/src/wellcode_cli/main.py
+++ b/src/wellcode_cli/main.py
@@ -7,7 +7,6 @@
from .commands import chat, chat_interface, completion, config, report, review
-# Configure rich-click
click.rich_click.USE_RICH_MARKUP = True
click.rich_click.USE_MARKDOWN = True
click.rich_click.SHOW_ARGUMENTS = True
@@ -15,12 +14,11 @@
click.rich_click.STYLE_ERRORS_SUGGESTION = "yellow italic"
click.rich_click.ERRORS_SUGGESTION = "Try '--help' for more information."
-# Initialize rich console
console = Console()
@click.group(invoke_without_command=True)
-@click.version_option(version=__version__, prog_name="wellcode-cli")
+@click.version_option(version=__version__, prog_name="wellcode")
@click.option(
"-v",
"--verbose",
@@ -29,23 +27,21 @@
)
@click.pass_context
def cli(ctx, verbose):
- """🚀 Wellcode CLI - Engineering Metrics Analysis Tool"""
- # Set up logging based on verbosity level
+ """Wellcode - Open-source developer productivity platform"""
if verbose == 0:
log_level = logging.WARNING
elif verbose == 1:
log_level = logging.INFO
- else: # verbose >= 2
+ else:
log_level = logging.DEBUG
logging.basicConfig(level=log_level, format="%(levelname)s:%(message)s")
if ctx.invoked_subcommand is None:
- # Start interactive mode by default
ctx.invoke(chat_interface)
-# Add commands to CLI group
+# Existing commands
cli.add_command(review)
cli.add_command(config)
cli.add_command(chat_interface, name="chat")
@@ -54,6 +50,249 @@ def cli(ctx, verbose):
cli.add_command(completion)
+# --- New commands ---
+
+@cli.command()
+@click.option("--host", default="0.0.0.0", help="Host to bind to")
+@click.option("--port", "-p", default=8787, help="Port to listen on")
+@click.option("--reload", is_flag=True, help="Enable auto-reload for development")
+@click.option("--schedule/--no-schedule", default=True, help="Enable background metric collection")
+@click.option("--interval", default=6, help="Collection interval in hours")
+def serve(host, port, reload, schedule, interval):
+ """Start the Wellcode API server and web dashboard."""
+ import uvicorn
+
+ from .db.engine import init_db
+
+ init_db()
+
+ if schedule:
+ from .workers.scheduler import start_scheduler
+ start_scheduler(interval_hours=interval)
+ console.print(f"[green]Background collection enabled (every {interval}h)[/]")
+
+ console.print(f"\n[bold blue]Wellcode[/] v{__version__}")
+ console.print(f"[green]API server starting at http://{host}:{port}[/]")
+ console.print(f"[dim]API docs at http://{host}:{port}/docs[/]\n")
+
+ uvicorn.run(
+ "wellcode_cli.api.app:app",
+ host=host,
+ port=port,
+ reload=reload,
+ log_level="info",
+ )
+
+
+@cli.command()
+@click.option("--start-date", "-s", type=click.DateTime(), help="Start date (YYYY-MM-DD)")
+@click.option("--end-date", "-e", type=click.DateTime(), help="End date (YYYY-MM-DD)")
+@click.option("--days", "-d", default=7, help="Number of days to look back (default: 7)")
+def collect(start_date, end_date, days):
+ """Collect metrics from all configured providers and store them."""
+ from datetime import datetime, timedelta, timezone
+
+ from .db.engine import init_db
+ from .services.collector import collect_all
+
+ init_db()
+
+ if end_date is None:
+ end_date = datetime.now(timezone.utc)
+ if start_date is None:
+ start_date = end_date - timedelta(days=days)
+
+ console.print("\n[bold blue]Wellcode[/] - Collecting metrics")
+ console.print(f"Period: {start_date.date()} to {end_date.date()}\n")
+
+ with console.status("[bold green]Collecting metrics from all providers..."):
+ summary = collect_all(start_date, end_date)
+
+ if "error" in summary:
+ console.print(f"[red]Error: {summary['error']}[/]")
+ return
+
+ console.print("[bold green]Collection complete![/]\n")
+ console.print(f" Providers: {len(summary.get('providers', []))}")
+ console.print(f" Pull Requests: {summary.get('total_prs', 0)}")
+ console.print(f" Deployments: {summary.get('total_deployments', 0)}")
+ console.print(f" Repositories: {summary.get('total_repos', 0)}")
+
+ for prov in summary.get("providers", []):
+ status = "[green]OK[/]" if "error" not in prov else f"[red]{prov['error']}[/]"
+ console.print(f" [{prov['name']}] PRs: {prov['prs']}, Deploys: {prov['deployments']} {status}")
+
+
+@cli.command()
+@click.option("--start-date", "-s", type=click.DateTime(), help="Start date (YYYY-MM-DD)")
+@click.option("--end-date", "-e", type=click.DateTime(), help="End date (YYYY-MM-DD)")
+@click.option("--days", "-d", default=30, help="Number of days to analyze (default: 30)")
+@click.option("--repo-id", type=int, help="Filter by repository ID")
+@click.option("--team-id", type=int, help="Filter by team ID")
+def dora(start_date, end_date, days, repo_id, team_id):
+ """View DORA metrics for your organization."""
+ from datetime import datetime, timedelta, timezone
+
+ from rich.panel import Panel
+ from rich.table import Table
+
+ from .db.engine import get_session, init_db
+ from .db.repository import MetricStore
+ from .services.dora import compute_dora
+
+ init_db()
+
+ if end_date is None:
+ end_date = datetime.now(timezone.utc)
+ if start_date is None:
+ start_date = end_date - timedelta(days=days)
+
+ session = get_session()
+ store = MetricStore(session)
+ metrics = compute_dora(store, start_date, end_date, repo_id=repo_id, team_id=team_id)
+ session.close()
+
+ level_colors = {"elite": "green", "high": "blue", "medium": "yellow", "low": "red"}
+ level_color = level_colors.get(metrics.level, "white")
+
+ console.print(Panel.fit(
+ f"[bold {level_color}]DORA Level: {metrics.level.upper()}[/]",
+ title="[bold]DORA Metrics",
+ subtitle=f"{start_date.date()} to {end_date.date()}",
+ ))
+
+ table = Table(show_header=True, header_style="bold")
+ table.add_column("Metric")
+ table.add_column("Value")
+ table.add_column("Elite")
+ table.add_column("High")
+ table.add_column("Medium")
+
+ df = metrics.deployment_frequency
+ df_label = f"{df:.2f}/day" if df >= 1 else f"{df * 7:.1f}/week"
+ table.add_row("Deployment Frequency", df_label, ">1/day", "weekly-daily", "monthly-weekly")
+
+ lt = metrics.lead_time_hours
+ lt_label = f"{lt:.1f}h" if lt < 24 else f"{lt/24:.1f}d"
+ table.add_row("Lead Time for Changes", lt_label, "<1h", "<1 day", "<1 week")
+
+ cfr = metrics.change_failure_rate
+ table.add_row("Change Failure Rate", f"{cfr*100:.1f}%", "0-15%", "16-30%", "31-45%")
+
+ mttr = metrics.mttr_hours
+ mttr_label = f"{mttr:.1f}h" if mttr < 24 else f"{mttr/24:.1f}d"
+ table.add_row("Mean Time to Recovery", mttr_label, "<1h", "<1 day", "<1 week")
+
+ console.print(table)
+
+ details = metrics.details
+ console.print(f"\n[dim]Deployments: {details['total_deployments']} | "
+ f"Merged PRs: {details['total_merged_prs']} | "
+ f"Reverts: {details['reverts']} | "
+ f"Incidents: {details['incidents']}[/]")
+
+
+@cli.command(name="ai-metrics")
+@click.option("--start-date", "-s", type=click.DateTime(), help="Start date")
+@click.option("--end-date", "-e", type=click.DateTime(), help="End date")
+@click.option("--days", "-d", default=30, help="Days to analyze")
+def ai_metrics_cmd(start_date, end_date, days):
+ """View AI coding tool adoption and impact metrics."""
+ from datetime import datetime, timedelta, timezone
+
+ from rich.panel import Panel
+ from rich.table import Table
+
+ from .db.engine import get_session, init_db
+ from .db.repository import MetricStore
+ from .services.ai_metrics import compute_ai_impact
+
+ init_db()
+
+ if end_date is None:
+ end_date = datetime.now(timezone.utc)
+ if start_date is None:
+ start_date = end_date - timedelta(days=days)
+
+ session = get_session()
+ store = MetricStore(session)
+ impact = compute_ai_impact(store, start_date, end_date)
+ session.close()
+
+ total = impact.ai_assisted_pr_count + impact.non_ai_pr_count
+ ai_pct = (impact.ai_assisted_pr_count / total * 100) if total > 0 else 0
+
+ console.print(Panel.fit(
+ f"AI-assisted PRs: [bold]{impact.ai_assisted_pr_count}[/] ({ai_pct:.1f}% of total)\n"
+ f"Non-AI PRs: {impact.non_ai_pr_count}\n"
+ f"Productivity change: [bold]{'+'if impact.productivity_change_pct>0 else ''}{impact.productivity_change_pct:.1f}%[/]",
+ title="[bold blue]AI Impact Analysis",
+ ))
+
+ if impact.tools:
+ table = Table(title="AI Tool Usage", show_header=True, header_style="bold")
+ table.add_column("Tool")
+ table.add_column("Active Users")
+ table.add_column("Suggestions")
+ table.add_column("Accepted")
+ table.add_column("Acceptance Rate")
+ table.add_column("Lines Accepted")
+ table.add_column("Cost (USD)")
+
+ for t in impact.tools:
+ table.add_row(
+ t.tool,
+ str(t.active_users),
+ str(t.total_suggestions_shown),
+ str(t.total_suggestions_accepted),
+ f"{t.acceptance_rate*100:.1f}%",
+ str(t.total_lines_accepted),
+ f"${t.total_cost_usd:.2f}",
+ )
+ console.print(table)
+
+ if impact.ai_avg_cycle_time_hours > 0 or impact.non_ai_avg_cycle_time_hours > 0:
+ table2 = Table(title="AI vs Non-AI Comparison", show_header=True, header_style="bold")
+ table2.add_column("Metric")
+ table2.add_column("AI-Assisted")
+ table2.add_column("Non-AI")
+
+ table2.add_row(
+ "Avg Cycle Time",
+ f"{impact.ai_avg_cycle_time_hours:.1f}h",
+ f"{impact.non_ai_avg_cycle_time_hours:.1f}h",
+ )
+ table2.add_row(
+ "Avg Review Time",
+ f"{impact.ai_avg_review_time_hours:.1f}h",
+ f"{impact.non_ai_avg_review_time_hours:.1f}h",
+ )
+ table2.add_row(
+ "Revert Rate",
+ f"{impact.ai_revert_rate*100:.1f}%",
+ f"{impact.non_ai_revert_rate*100:.1f}%",
+ )
+ console.print(table2)
+
+
+@cli.command()
+@click.option("--template", "-t", type=click.Choice(["pulse", "full_dx"]), default="pulse")
+@click.option("--title", help="Survey title")
+def survey(template, title):
+ """Create and manage developer experience surveys."""
+ from .db.engine import init_db
+ from .services.surveys import SURVEY_TEMPLATES, create_survey_from_template
+
+ init_db()
+
+ s = create_survey_from_template(template=template, title=title)
+ console.print(f"\n[green]Survey created:[/] {s.title}")
+ console.print(f" ID: {s.id}")
+ console.print(f" Type: {template}")
+ console.print(f" Questions: {len(SURVEY_TEMPLATES[template])}")
+ console.print("\n[dim]Share via API: POST /api/v1/surveys/respond[/]")
+
+
def main():
cli()
diff --git a/src/wellcode_cli/services/__init__.py b/src/wellcode_cli/services/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/wellcode_cli/services/ai_metrics.py b/src/wellcode_cli/services/ai_metrics.py
new file mode 100644
index 0000000..a156f94
--- /dev/null
+++ b/src/wellcode_cli/services/ai_metrics.py
@@ -0,0 +1,205 @@
+"""AI coding tool metrics collection and analysis service.
+
+Supports:
+- GitHub Copilot (organization API)
+- Cursor AI (log parsing)
+- Claude Code / Aider (commit metadata detection)
+"""
+
+import logging
+import re
+from dataclasses import dataclass, field
+from datetime import datetime, timezone
+from typing import Optional
+
+import requests
+
+from ..config import get_config_value
+from ..db.models import AIUsageMetric
+from ..db.repository import MetricStore
+
+logger = logging.getLogger(__name__)
+
+
+@dataclass
+class AIToolSummary:
+ tool: str
+ total_suggestions_shown: int = 0
+ total_suggestions_accepted: int = 0
+ total_lines_accepted: int = 0
+ active_users: int = 0
+ acceptance_rate: float = 0.0
+ total_cost_usd: float = 0.0
+ daily_breakdown: list = field(default_factory=list)
+
+
+@dataclass
+class AIImpactAnalysis:
+ ai_assisted_pr_count: int = 0
+ non_ai_pr_count: int = 0
+ ai_avg_cycle_time_hours: float = 0.0
+ non_ai_avg_cycle_time_hours: float = 0.0
+ ai_avg_review_time_hours: float = 0.0
+ non_ai_avg_review_time_hours: float = 0.0
+ ai_revert_rate: float = 0.0
+ non_ai_revert_rate: float = 0.0
+ productivity_change_pct: float = 0.0
+ tools: list[AIToolSummary] = field(default_factory=list)
+
+
+# --- GitHub Copilot ---
+
+def collect_copilot_metrics(
+ store: MetricStore,
+ org: str,
+ since: datetime,
+ until: datetime,
+ snapshot_id: Optional[int] = None,
+) -> list[AIUsageMetric]:
+ """Fetch GitHub Copilot usage metrics from the organization API."""
+ token = get_config_value("GITHUB_TOKEN") or get_config_value("GITHUB_USER_TOKEN")
+ if not token:
+ logger.warning("No GitHub token configured for Copilot metrics")
+ return []
+
+ headers = {
+ "Accept": "application/vnd.github+json",
+ "Authorization": f"Bearer {token}",
+ "X-GitHub-Api-Version": "2022-11-28",
+ }
+
+ metrics = []
+ since_str = since.strftime("%Y-%m-%d")
+ until_str = until.strftime("%Y-%m-%d")
+
+ try:
+ url = f"https://api.github.com/orgs/{org}/copilot/usage"
+ params = {"since": since_str, "until": until_str}
+ resp = requests.get(url, headers=headers, params=params, timeout=30)
+
+ if resp.status_code == 200:
+ data = resp.json()
+ for day in data:
+ date_str = day.get("day", "")
+ if not date_str:
+ continue
+
+ metric = AIUsageMetric(
+ snapshot_id=snapshot_id,
+ tool="copilot",
+ date=datetime.fromisoformat(date_str).replace(tzinfo=timezone.utc),
+ suggestions_shown=day.get("total_suggestions_count", 0),
+ suggestions_accepted=day.get("total_acceptances_count", 0),
+ lines_suggested=day.get("total_lines_suggested", 0),
+ lines_accepted=day.get("total_lines_accepted", 0),
+ active_users=day.get("total_active_users", 0),
+ chat_sessions=day.get("total_chat_turns", 0),
+ metadata_={
+ "breakdown": day.get("breakdown", []),
+ },
+ )
+ store.ai_usage.add(metric)
+ metrics.append(metric)
+
+ store.commit()
+ logger.info("Collected %d days of Copilot usage data", len(metrics))
+ elif resp.status_code == 404:
+ logger.info("Copilot usage API not available for org %s", org)
+ else:
+ logger.warning("Copilot API returned %d: %s", resp.status_code, resp.text[:200])
+
+ except Exception as e:
+ logger.error("Error fetching Copilot metrics: %s", e)
+
+ return metrics
+
+
+# --- AI Commit Detection ---
+
+AI_COMMIT_PATTERNS = [
+ re.compile(r"(?i)co-authored-by:.*copilot", re.MULTILINE),
+ re.compile(r"(?i)co-authored-by:.*\bcursor\b", re.MULTILINE),
+ re.compile(r"(?i)co-authored-by:.*\bclaude\b", re.MULTILINE),
+ re.compile(r"(?i)co-authored-by:.*\baider\b", re.MULTILINE),
+ re.compile(r"(?i)generated by (copilot|cursor|claude|aider|ai)"),
+ re.compile(r"(?i)\[ai[-\s]generated\]"),
+ re.compile(r"(?i)^aider:", re.MULTILINE),
+]
+
+AI_BOT_AUTHORS = {
+ "github-actions[bot]", "dependabot[bot]", "renovate[bot]",
+ "copilot", "cursor-ai", "claude-code",
+}
+
+
+def detect_ai_tool_from_pr(title: str, labels: list, body: str = "") -> Optional[str]:
+ """Detect which AI tool was used based on PR metadata."""
+ text = f"{title} {body}".lower()
+ label_text = " ".join(lbl.lower() for lbl in labels)
+
+ if "copilot" in text or "copilot" in label_text:
+ return "copilot"
+ if "cursor" in text or "cursor" in label_text:
+ return "cursor"
+ if "claude" in text or "claude code" in text or "claude" in label_text:
+ return "claude_code"
+ if "aider" in text or "aider" in label_text:
+ return "aider"
+ if "ai-generated" in label_text or "ai generated" in text:
+ return "unknown_ai"
+ return None
+
+
+# --- Impact Analysis ---
+
+def compute_ai_impact(store: MetricStore, start: datetime, end: datetime) -> AIImpactAnalysis:
+ """Compare metrics between AI-assisted and non-AI PRs."""
+ all_prs = store.pull_requests.get_merged_by_period(start, end)
+
+ ai_prs = [p for p in all_prs if p.is_ai_generated]
+ non_ai_prs = [p for p in all_prs if not p.is_ai_generated]
+
+ def avg(values):
+ return sum(values) / len(values) if values else 0
+
+ ai_cycle = avg([p.cycle_time_hours for p in ai_prs if p.cycle_time_hours])
+ non_ai_cycle = avg([p.cycle_time_hours for p in non_ai_prs if p.cycle_time_hours])
+
+ ai_review = avg([p.time_to_first_review_hours for p in ai_prs if p.time_to_first_review_hours])
+ non_ai_review = avg([p.time_to_first_review_hours for p in non_ai_prs if p.time_to_first_review_hours])
+
+ ai_reverts = sum(1 for p in ai_prs if p.is_revert) / max(len(ai_prs), 1)
+ non_ai_reverts = sum(1 for p in non_ai_prs if p.is_revert) / max(len(non_ai_prs), 1)
+
+ prod_change = 0
+ if non_ai_cycle > 0 and ai_cycle > 0:
+ prod_change = ((non_ai_cycle - ai_cycle) / non_ai_cycle) * 100
+
+ # Tool summaries
+ ai_usage = store.ai_usage.get_summary(start, end)
+ tools = []
+ for row in ai_usage:
+ accepted = row.total_accepted or 0
+ shown = row.total_shown or 0
+ tools.append(AIToolSummary(
+ tool=row.tool,
+ total_suggestions_shown=shown,
+ total_suggestions_accepted=accepted,
+ total_lines_accepted=row.total_lines or 0,
+ active_users=row.peak_users or 0,
+ acceptance_rate=accepted / shown if shown > 0 else 0,
+ total_cost_usd=row.total_cost or 0,
+ ))
+
+ return AIImpactAnalysis(
+ ai_assisted_pr_count=len(ai_prs),
+ non_ai_pr_count=len(non_ai_prs),
+ ai_avg_cycle_time_hours=ai_cycle,
+ non_ai_avg_cycle_time_hours=non_ai_cycle,
+ ai_avg_review_time_hours=ai_review,
+ non_ai_avg_review_time_hours=non_ai_review,
+ ai_revert_rate=ai_reverts,
+ non_ai_revert_rate=non_ai_reverts,
+ productivity_change_pct=prod_change,
+ tools=tools,
+ )
diff --git a/src/wellcode_cli/services/collector.py b/src/wellcode_cli/services/collector.py
new file mode 100644
index 0000000..45bc64d
--- /dev/null
+++ b/src/wellcode_cli/services/collector.py
@@ -0,0 +1,247 @@
+"""Metric collection orchestrator.
+
+Pulls data from all configured SCM providers and stores it in the database.
+"""
+
+import logging
+import time
+from datetime import datetime
+from typing import Optional
+
+from ..config import get_config_value
+from ..db.engine import get_session, init_db
+from ..db.models import DeploymentMetric, PullRequestMetric
+from ..db.repository import MetricStore
+from ..integrations.scm_protocol import SCMProvider
+
+logger = logging.getLogger(__name__)
+
+
+def _get_configured_providers() -> list[SCMProvider]:
+ """Discover and instantiate all configured SCM providers."""
+ providers = []
+
+ token = get_config_value("GITHUB_TOKEN") or get_config_value("GITHUB_USER_TOKEN")
+ if token:
+ from ..integrations.github.provider import GitHubProvider
+ providers.append(GitHubProvider())
+
+ if get_config_value("GITLAB_TOKEN"):
+ from ..integrations.gitlab.provider import GitLabProvider
+ providers.append(GitLabProvider())
+
+ if get_config_value("BITBUCKET_USERNAME") and get_config_value("BITBUCKET_APP_PASSWORD"):
+ from ..integrations.bitbucket.provider import BitbucketProvider
+ providers.append(BitbucketProvider())
+
+ return providers
+
+
+def collect_all(
+ period_start: datetime,
+ period_end: datetime,
+ providers: Optional[list[SCMProvider]] = None,
+) -> dict:
+ """Run a full metric collection cycle for the given period."""
+ init_db()
+ session = get_session()
+ store = MetricStore(session)
+
+ start_time = time.time()
+ snapshot = store.snapshots.create(period_start, period_end)
+
+ if providers is None:
+ providers = _get_configured_providers()
+
+ if not providers:
+ logger.warning("No SCM providers configured")
+ store.snapshots.fail(snapshot, "No SCM providers configured")
+ store.commit()
+ store.close()
+ return {"error": "No SCM providers configured"}
+
+ summary = {
+ "providers": [],
+ "total_prs": 0,
+ "total_deployments": 0,
+ "total_repos": 0,
+ }
+
+ for provider in providers:
+ provider_name = provider.provider_name
+ logger.info("Collecting from %s...", provider_name)
+ prov_summary = {"name": provider_name, "prs": 0, "deployments": 0, "repos": 0}
+
+ try:
+ # Collect repositories
+ repos = provider.get_repositories()
+ for repo in repos:
+ store.repositories.get_or_create(
+ full_name=repo.full_name,
+ provider=repo.provider,
+ default_branch=repo.default_branch,
+ url=repo.url,
+ )
+ prov_summary["repos"] = len(repos)
+ store.commit()
+
+ # Collect PRs
+ prs = provider.get_pull_requests(period_start, period_end)
+ for scm_pr in prs:
+ repo = store.repositories.get_or_create(
+ full_name=scm_pr.repository_full_name,
+ provider=scm_pr.provider,
+ )
+ author = store.developers.get_or_create(
+ username=scm_pr.author,
+ provider=scm_pr.provider,
+ )
+
+ # Calculate durations
+ ttfr = None
+ if scm_pr.first_review_at and scm_pr.created_at:
+ ttfr = (scm_pr.first_review_at - scm_pr.created_at).total_seconds() / 3600
+
+ ttm = None
+ if scm_pr.merged_at and scm_pr.created_at:
+ ttm = (scm_pr.merged_at - scm_pr.created_at).total_seconds() / 3600
+
+ coding_time = None
+ if scm_pr.first_commit_at and scm_pr.first_review_at:
+ coding_time = (scm_pr.first_review_at - scm_pr.first_commit_at).total_seconds() / 3600
+
+ lead_time = None
+ if scm_pr.first_commit_at and scm_pr.merged_at:
+ lead_time = (scm_pr.merged_at - scm_pr.first_commit_at).total_seconds() / 3600
+
+ from .ai_metrics import detect_ai_tool_from_pr
+ ai_tool = detect_ai_tool_from_pr(
+ scm_pr.title, scm_pr.labels,
+ )
+
+ pr_metric = PullRequestMetric(
+ snapshot_id=snapshot.id,
+ repository_id=repo.id,
+ author_id=author.id,
+ provider=scm_pr.provider,
+ external_id=scm_pr.external_id,
+ number=scm_pr.number,
+ title=scm_pr.title,
+ state=scm_pr.state,
+ base_branch=scm_pr.base_branch,
+ head_branch=scm_pr.head_branch,
+ created_at=scm_pr.created_at,
+ updated_at=scm_pr.updated_at,
+ merged_at=scm_pr.merged_at,
+ closed_at=scm_pr.closed_at,
+ first_commit_at=scm_pr.first_commit_at,
+ first_review_at=scm_pr.first_review_at,
+ additions=scm_pr.additions,
+ deletions=scm_pr.deletions,
+ changed_files=scm_pr.changed_files,
+ commits_count=scm_pr.commits_count,
+ time_to_first_review_hours=ttfr,
+ time_to_merge_hours=ttm,
+ coding_time_hours=coding_time,
+ lead_time_hours=lead_time,
+ cycle_time_hours=lead_time,
+ review_count=scm_pr.review_count,
+ reviewer_count=scm_pr.reviewer_count,
+ comment_count=scm_pr.comment_count,
+ review_cycles=scm_pr.review_cycles,
+ is_revert=scm_pr.is_revert,
+ is_hotfix=scm_pr.is_hotfix,
+ is_self_merged=scm_pr.is_self_merged,
+ is_ai_generated=ai_tool is not None,
+ ai_tool=ai_tool,
+ labels=scm_pr.labels,
+ reviewers=scm_pr.reviewers,
+ )
+ store.pull_requests.upsert(pr_metric)
+
+ prov_summary["prs"] = len(prs)
+ store.commit()
+
+ # Collect deployments
+ deploys = provider.get_deployments(period_start, period_end)
+ for scm_deploy in deploys:
+ repo = store.repositories.get_or_create(
+ full_name=scm_deploy.repository_full_name,
+ provider=scm_deploy.provider,
+ )
+ deploy_metric = DeploymentMetric(
+ snapshot_id=snapshot.id,
+ repository_id=repo.id,
+ provider=scm_deploy.provider,
+ external_id=scm_deploy.external_id,
+ environment=scm_deploy.environment,
+ ref=scm_deploy.ref,
+ sha=scm_deploy.sha,
+ status=scm_deploy.status,
+ deployed_at=scm_deploy.deployed_at,
+ completed_at=scm_deploy.completed_at,
+ duration_seconds=scm_deploy.duration_seconds,
+ is_rollback=scm_deploy.is_rollback,
+ triggered_by=scm_deploy.triggered_by,
+ pr_number=scm_deploy.pr_number,
+ )
+ store.deployments.add(deploy_metric)
+
+ prov_summary["deployments"] = len(deploys)
+ store.commit()
+
+ # Collect teams
+ try:
+ teams = provider.get_teams()
+ for scm_team in teams:
+ org = None
+ if hasattr(provider, "_org") and provider._org:
+ org = store.organizations.get_or_create(
+ name=provider._org, provider=provider.provider_name
+ )
+ store.teams.get_or_create(
+ name=scm_team.name,
+ org_id=org.id if org else None,
+ slug=scm_team.slug,
+ provider=scm_team.provider,
+ external_id=scm_team.external_id,
+ )
+ for member_name in scm_team.members:
+ store.developers.get_or_create(
+ username=member_name, provider=scm_team.provider,
+ )
+ store.commit()
+ except Exception as e:
+ logger.debug("Could not collect teams from %s: %s", provider_name, e)
+
+ except Exception as e:
+ logger.error("Error collecting from %s: %s", provider_name, e)
+ prov_summary["error"] = str(e)
+
+ summary["providers"].append(prov_summary)
+ summary["total_prs"] += prov_summary["prs"]
+ summary["total_deployments"] += prov_summary["deployments"]
+ summary["total_repos"] += prov_summary["repos"]
+
+ # Collect Copilot metrics if GitHub org is configured
+ github_org = get_config_value("GITHUB_ORG")
+ if github_org:
+ from .ai_metrics import collect_copilot_metrics
+ try:
+ copilot_data = collect_copilot_metrics(
+ store, github_org, period_start, period_end, snapshot.id,
+ )
+ summary["copilot_days"] = len(copilot_data)
+ except Exception as e:
+ logger.debug("Could not collect Copilot metrics: %s", e)
+
+ duration = time.time() - start_time
+ store.snapshots.complete(snapshot, summary, duration)
+ store.commit()
+ store.close()
+
+ logger.info(
+ "Collection complete: %d PRs, %d deployments in %.1fs",
+ summary["total_prs"], summary["total_deployments"], duration,
+ )
+ return summary
diff --git a/src/wellcode_cli/services/dora.py b/src/wellcode_cli/services/dora.py
new file mode 100644
index 0000000..dd9a8c2
--- /dev/null
+++ b/src/wellcode_cli/services/dora.py
@@ -0,0 +1,217 @@
+"""DORA metrics calculation service.
+
+Computes the four key DORA metrics:
+- Deployment Frequency
+- Lead Time for Changes
+- Change Failure Rate
+- Mean Time to Recovery (MTTR)
+"""
+
+import statistics
+from dataclasses import dataclass
+from datetime import datetime
+from typing import Optional
+
+from ..db.models import (
+ DORASnapshot,
+)
+from ..db.repository import MetricStore
+
+DORA_THRESHOLDS = {
+ "deployment_frequency": {
+ "elite": 1.0, # multiple deploys per day (>=1/day)
+ "high": 1 / 7, # weekly to daily
+ "medium": 1 / 30, # monthly to weekly
+ },
+ "lead_time_hours": {
+ "elite": 1, # less than one hour
+ "high": 24, # less than one day
+ "medium": 24 * 7, # less than one week
+ },
+ "change_failure_rate": {
+ "elite": 0.15, # 0-15%
+ "high": 0.30, # 16-30%
+ "medium": 0.45, # 31-45%
+ },
+ "mttr_hours": {
+ "elite": 1,
+ "high": 24,
+ "medium": 24 * 7,
+ },
+}
+
+
+@dataclass
+class DORAMetrics:
+ deployment_frequency: float # deploys per day
+ lead_time_hours: float # median hours from first commit to deploy
+ change_failure_rate: float # percentage 0-1
+ mttr_hours: float # mean time to recovery in hours
+ level: str # elite, high, medium, low
+ details: dict
+
+
+def classify_dora_level(metrics: DORAMetrics) -> str:
+ """Classify overall DORA performance level."""
+ scores = []
+
+ df = metrics.deployment_frequency
+ if df >= DORA_THRESHOLDS["deployment_frequency"]["elite"]:
+ scores.append(4)
+ elif df >= DORA_THRESHOLDS["deployment_frequency"]["high"]:
+ scores.append(3)
+ elif df >= DORA_THRESHOLDS["deployment_frequency"]["medium"]:
+ scores.append(2)
+ else:
+ scores.append(1)
+
+ lt = metrics.lead_time_hours
+ if lt <= DORA_THRESHOLDS["lead_time_hours"]["elite"]:
+ scores.append(4)
+ elif lt <= DORA_THRESHOLDS["lead_time_hours"]["high"]:
+ scores.append(3)
+ elif lt <= DORA_THRESHOLDS["lead_time_hours"]["medium"]:
+ scores.append(2)
+ else:
+ scores.append(1)
+
+ cfr = metrics.change_failure_rate
+ if cfr <= DORA_THRESHOLDS["change_failure_rate"]["elite"]:
+ scores.append(4)
+ elif cfr <= DORA_THRESHOLDS["change_failure_rate"]["high"]:
+ scores.append(3)
+ elif cfr <= DORA_THRESHOLDS["change_failure_rate"]["medium"]:
+ scores.append(2)
+ else:
+ scores.append(1)
+
+ mttr = metrics.mttr_hours
+ if mttr <= DORA_THRESHOLDS["mttr_hours"]["elite"]:
+ scores.append(4)
+ elif mttr <= DORA_THRESHOLDS["mttr_hours"]["high"]:
+ scores.append(3)
+ elif mttr <= DORA_THRESHOLDS["mttr_hours"]["medium"]:
+ scores.append(2)
+ else:
+ scores.append(1)
+
+ avg = sum(scores) / len(scores)
+ if avg >= 3.5:
+ return "elite"
+ elif avg >= 2.5:
+ return "high"
+ elif avg >= 1.5:
+ return "medium"
+ return "low"
+
+
+def compute_dora(
+ store: MetricStore,
+ period_start: datetime,
+ period_end: datetime,
+ repo_id: Optional[int] = None,
+ team_id: Optional[int] = None,
+) -> DORAMetrics:
+ """Compute DORA metrics from stored data for a given period."""
+ days = max((period_end - period_start).total_seconds() / 86400, 1)
+
+ # --- Deployment Frequency ---
+ deployments = store.deployments.get_by_period(
+ period_start, period_end, repo_id=repo_id, environment="production"
+ )
+ if not deployments:
+ deployments = store.deployments.get_by_period(
+ period_start, period_end, repo_id=repo_id
+ )
+ successful_deploys = [d for d in deployments if d.status in ("success", "active")]
+ deploy_freq = len(successful_deploys) / days if days > 0 else 0
+
+ # Fallback: use merged PRs to main as proxy for deployments
+ if not deployments:
+ merged_prs = store.pull_requests.get_merged_by_period(
+ period_start, period_end, repo_id=repo_id
+ )
+ main_merges = [p for p in merged_prs if p.base_branch in ("main", "master")]
+ deploy_freq = len(main_merges) / days if days > 0 else 0
+ successful_deploys = main_merges
+
+ # --- Lead Time for Changes ---
+ merged_prs = store.pull_requests.get_merged_by_period(
+ period_start, period_end, repo_id=repo_id
+ )
+ lead_times = []
+ for pr in merged_prs:
+ if pr.lead_time_hours is not None:
+ lead_times.append(pr.lead_time_hours)
+ elif pr.first_commit_at and pr.merged_at:
+ lt = (pr.merged_at - pr.first_commit_at).total_seconds() / 3600
+ lead_times.append(lt)
+ elif pr.created_at and pr.merged_at:
+ lt = (pr.merged_at - pr.created_at).total_seconds() / 3600
+ lead_times.append(lt)
+
+ median_lead_time = statistics.median(lead_times) if lead_times else 0
+
+ # --- Change Failure Rate ---
+ total_deploys = len(successful_deploys) if successful_deploys else len(merged_prs)
+ incidents = store.incidents.get_by_period(period_start, period_end)
+ change_incidents = [i for i in incidents if i.caused_by_change]
+ failed_deploys = [d for d in deployments if d.is_failure or d.is_rollback]
+
+ failure_count = len(change_incidents) + len(failed_deploys)
+ reverts = sum(1 for pr in merged_prs if pr.is_revert)
+ hotfixes = sum(1 for pr in merged_prs if pr.is_hotfix)
+ failure_count += reverts
+
+ cfr = failure_count / total_deploys if total_deploys > 0 else 0
+
+ # --- Mean Time to Recovery ---
+ recovery_times = []
+ for incident in incidents:
+ if incident.time_to_recovery_hours is not None:
+ recovery_times.append(incident.time_to_recovery_hours)
+ elif incident.resolved_at and incident.opened_at:
+ rt = (incident.resolved_at - incident.opened_at).total_seconds() / 3600
+ recovery_times.append(rt)
+
+ mttr = statistics.mean(recovery_times) if recovery_times else 0
+
+ metrics = DORAMetrics(
+ deployment_frequency=deploy_freq,
+ lead_time_hours=median_lead_time,
+ change_failure_rate=min(cfr, 1.0),
+ mttr_hours=mttr,
+ level="",
+ details={
+ "total_deployments": len(deployments),
+ "successful_deployments": len(successful_deploys),
+ "failed_deployments": len(failed_deploys),
+ "total_merged_prs": len(merged_prs),
+ "reverts": reverts,
+ "hotfixes": hotfixes,
+ "incidents": len(incidents),
+ "change_incidents": len(change_incidents),
+ "lead_times_count": len(lead_times),
+ "recovery_times_count": len(recovery_times),
+ "period_days": days,
+ },
+ )
+ metrics.level = classify_dora_level(metrics)
+
+ # Persist the snapshot
+ dora_snap = DORASnapshot(
+ repository_id=repo_id,
+ team_id=team_id,
+ period_start=period_start,
+ period_end=period_end,
+ deployment_frequency=deploy_freq,
+ lead_time_hours=median_lead_time,
+ change_failure_rate=cfr,
+ mttr_hours=mttr,
+ level=metrics.level,
+ details=metrics.details,
+ )
+ store.dora.save(dora_snap)
+ store.commit()
+
+ return metrics
diff --git a/src/wellcode_cli/services/surveys.py b/src/wellcode_cli/services/surveys.py
new file mode 100644
index 0000000..207d9f9
--- /dev/null
+++ b/src/wellcode_cli/services/surveys.py
@@ -0,0 +1,179 @@
+"""Developer Experience survey service."""
+
+import statistics
+from collections import defaultdict
+from dataclasses import dataclass, field
+from datetime import datetime, timezone
+from typing import Optional
+
+from ..db.engine import get_session
+from ..db.models import Survey, SurveyQuestion, SurveyResponse
+from ..db.repository import MetricStore
+
+DX_PULSE_QUESTIONS = [
+ {
+ "text": "How productive did you feel this week? (1=Not at all, 5=Very productive)",
+ "type": "rating",
+ "category": "productivity",
+ },
+ {
+ "text": "How easy was it to get your code reviewed? (1=Very difficult, 5=Very easy)",
+ "type": "rating",
+ "category": "code_review",
+ },
+ {
+ "text": "How confident are you in deploying to production? (1=Not confident, 5=Very confident)",
+ "type": "rating",
+ "category": "deployment",
+ },
+]
+
+DX_FULL_QUESTIONS = [
+ {"text": "I can quickly find the information I need to do my work.", "type": "rating", "category": "speed"},
+ {"text": "Our CI/CD pipeline is fast and reliable.", "type": "rating", "category": "speed"},
+ {"text": "I can stay in a state of flow during development.", "type": "rating", "category": "effectiveness"},
+ {"text": "Our codebase is easy to understand and navigate.", "type": "rating", "category": "effectiveness"},
+ {"text": "I rarely encounter flaky tests or broken builds.", "type": "rating", "category": "quality"},
+ {"text": "Our code review process improves code quality.", "type": "rating", "category": "quality"},
+ {"text": "My work directly impacts our business goals.", "type": "rating", "category": "impact"},
+ {"text": "I understand how my team's work connects to company objectives.", "type": "rating", "category": "impact"},
+ {"text": "What is the biggest bottleneck in your development workflow?", "type": "text", "category": "open"},
+ {"text": "What tools or processes would you like to see improved?", "type": "text", "category": "open"},
+]
+
+SURVEY_TEMPLATES = {
+ "pulse": DX_PULSE_QUESTIONS,
+ "full_dx": DX_FULL_QUESTIONS,
+}
+
+
+@dataclass
+class SurveyAnalytics:
+ survey_id: int
+ title: str
+ total_responses: int = 0
+ response_rate: float = 0.0
+ dxi_score: float = 0.0 # Developer Experience Index (1-5)
+ category_scores: dict = field(default_factory=dict)
+ question_scores: dict = field(default_factory=dict)
+ text_responses: list = field(default_factory=list)
+
+
+def create_survey_from_template(
+ template: str = "pulse",
+ title: Optional[str] = None,
+ target_teams: Optional[list] = None,
+ recurrence: str = "none",
+) -> Survey:
+ """Create a new survey from a predefined template."""
+ session = get_session()
+ store = MetricStore(session)
+
+ questions = SURVEY_TEMPLATES.get(template, DX_PULSE_QUESTIONS)
+
+ survey = Survey(
+ title=title or f"Developer Experience Survey ({template})",
+ description=f"Auto-generated {template} survey",
+ survey_type=template,
+ status="active",
+ starts_at=datetime.now(timezone.utc),
+ recurrence=recurrence,
+ target_teams=target_teams,
+ )
+ store.surveys.create_survey(survey)
+
+ for i, q in enumerate(questions):
+ sq = SurveyQuestion(
+ survey_id=survey.id,
+ question_text=q["text"],
+ question_type=q["type"],
+ category=q.get("category"),
+ order=i,
+ options={"min": 1, "max": 5} if q["type"] == "rating" else None,
+ )
+ session.add(sq)
+
+ store.commit()
+ session.close()
+ return survey
+
+
+def submit_response(survey_id: int, answers: dict, developer_id: Optional[int] = None) -> SurveyResponse:
+ """Submit a survey response."""
+ session = get_session()
+ store = MetricStore(session)
+
+ # Calculate average sentiment score from rating questions
+ ratings = [v for v in answers.values() if isinstance(v, (int, float))]
+ sentiment = statistics.mean(ratings) if ratings else None
+
+ response = SurveyResponse(
+ survey_id=survey_id,
+ developer_id=developer_id,
+ answers=answers,
+ sentiment_score=sentiment,
+ )
+ store.surveys.add_response(response)
+ store.commit()
+ session.close()
+ return response
+
+
+def analyze_survey(survey_id: int) -> SurveyAnalytics:
+ """Compute analytics for a survey."""
+ session = get_session()
+ store = MetricStore(session)
+
+ responses = store.surveys.get_responses(survey_id)
+
+ survey = session.get(Survey, survey_id)
+ questions = list(session.execute(
+ session.query(SurveyQuestion).filter_by(survey_id=survey_id).order_by(SurveyQuestion.order).statement
+ ).scalars().all()) if survey else []
+
+ analytics = SurveyAnalytics(
+ survey_id=survey_id,
+ title=survey.title if survey else "",
+ total_responses=len(responses),
+ )
+
+ if not responses:
+ session.close()
+ return analytics
+
+ # Aggregate scores by category
+ category_ratings = defaultdict(list)
+ question_ratings = defaultdict(list)
+ text_answers = []
+
+ for resp in responses:
+ answers = resp.answers or {}
+ for key, value in answers.items():
+ if isinstance(value, (int, float)):
+ question_ratings[key].append(value)
+ # Find category for this question
+ for q in questions:
+ if str(q.id) == key or q.question_text == key:
+ if q.category:
+ category_ratings[q.category].append(value)
+ break
+ elif isinstance(value, str) and value.strip():
+ text_answers.append({"question": key, "answer": value})
+
+ analytics.category_scores = {
+ cat: statistics.mean(vals)
+ for cat, vals in category_ratings.items()
+ }
+
+ analytics.question_scores = {
+ q: statistics.mean(vals)
+ for q, vals in question_ratings.items()
+ }
+
+ analytics.text_responses = text_answers
+
+ all_ratings = [v for vals in category_ratings.values() for v in vals]
+ analytics.dxi_score = statistics.mean(all_ratings) if all_ratings else 0
+
+ session.close()
+ return analytics
diff --git a/src/wellcode_cli/web/__init__.py b/src/wellcode_cli/web/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/wellcode_cli/web/static/index.html b/src/wellcode_cli/web/static/index.html
new file mode 100644
index 0000000..3ec3dea
--- /dev/null
+++ b/src/wellcode_cli/web/static/index.html
@@ -0,0 +1,326 @@
+
+
+
+
+
+ Wellcode - Developer Productivity Dashboard
+
+
+
+
+
+
+
+
W
+
Wellcode
+
v0.2.0
+
+
+ Overview
+ DORA
+ AI Metrics
+ Pull Requests
+ DX Surveys
+
+
+
+ Last 7 days
+ Last 14 days
+ Last 30 days
+ Last 90 days
+
+ Collect
+
+
+
+
+
+
+
+
+
PR Size Distribution
+
+
+
+
+
+
+
+
+
+
+
DORA Metrics
+ -
+
+
+
+
Deployment Frequency
+
-
+
Elite: >1/day
+
+
+
Lead Time for Changes
+
-
+
Elite: <1 hour
+
+
+
Change Failure Rate
+
-
+
Elite: 0-15%
+
+
+
Mean Time to Recovery
+
-
+
Elite: <1 hour
+
+
+
+
+
DORA Trend
+
+
+
+
+
+
+
+
+
+
Productivity Change
+
-
+
+
+
+
+
+
AI vs Non-AI Comparison
+
+
+
+
+
+
+
+
+
Avg Time to Review
+
-
+
+
+
+
+
+
+
+
+
+
+
Developer Experience Surveys
+
Create and manage DX surveys to measure developer satisfaction and identify bottlenecks.
+
+ Create Pulse Survey
+ Create Full DX Survey
+
+
+
+
+
+
+
+
diff --git a/src/wellcode_cli/workers/__init__.py b/src/wellcode_cli/workers/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/wellcode_cli/workers/scheduler.py b/src/wellcode_cli/workers/scheduler.py
new file mode 100644
index 0000000..7515576
--- /dev/null
+++ b/src/wellcode_cli/workers/scheduler.py
@@ -0,0 +1,58 @@
+"""Background metric collection scheduler using APScheduler."""
+
+import logging
+from datetime import datetime, timedelta, timezone
+
+from apscheduler.schedulers.background import BackgroundScheduler
+from apscheduler.triggers.interval import IntervalTrigger
+
+from ..services.collector import collect_all
+
+logger = logging.getLogger(__name__)
+
+_scheduler: BackgroundScheduler | None = None
+
+
+def _run_collection(days_back: int = 7):
+ """Execute a metric collection run."""
+ now = datetime.now(timezone.utc)
+ start = now - timedelta(days=days_back)
+ logger.info("Scheduled collection: %s to %s", start.date(), now.date())
+ try:
+ summary = collect_all(start, now)
+ logger.info("Collection complete: %s", summary)
+ except Exception as e:
+ logger.error("Scheduled collection failed: %s", e)
+
+
+def start_scheduler(
+ interval_hours: int = 6,
+ days_back: int = 7,
+) -> BackgroundScheduler:
+ """Start the background scheduler for periodic metric collection."""
+ global _scheduler
+ if _scheduler is not None:
+ return _scheduler
+
+ _scheduler = BackgroundScheduler()
+
+ _scheduler.add_job(
+ _run_collection,
+ trigger=IntervalTrigger(hours=interval_hours),
+ kwargs={"days_back": days_back},
+ id="metric_collection",
+ name="Periodic metric collection",
+ replace_existing=True,
+ )
+
+ _scheduler.start()
+ logger.info("Scheduler started: collecting every %d hours", interval_hours)
+ return _scheduler
+
+
+def stop_scheduler():
+ global _scheduler
+ if _scheduler:
+ _scheduler.shutdown(wait=True)
+ _scheduler = None
+ logger.info("Scheduler stopped")