diff --git a/.editorconfig b/.editorconfig
new file mode 100644
index 0000000..0b30f71
--- /dev/null
+++ b/.editorconfig
@@ -0,0 +1,28 @@
+# EditorConfig is awesome: https://EditorConfig.org
+# Top-most EditorConfig file; root = true stops search at this directory
+root = true
+
+# Defaults for all files
+[*]
+charset = utf-8
+end_of_line = lf
+insert_final_newline = true
+trim_trailing_whitespace = true
+indent_style = space
+indent_size = 4
+
+# Python
+[*.py]
+indent_size = 4
+
+# YAML files (e.g. workflows, pre-commit config)
+[*.{yml,yaml}]
+indent_size = 2
+
+# Markdown files (trailing whitespace often meaningful)
+[*.md]
+trim_trailing_whitespace = false
+
+# Makefile (requires tab indentation)
+[Makefile]
+indent_style = tab
diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
new file mode 100644
index 0000000..949e514
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug_report.md
@@ -0,0 +1,35 @@
+---
+name: Bug Report
+about: Report a bug to help us improve
+title: '[BUG] '
+labels: bug
+assignees: ''
+---
+
+## Description
+
+A clear and concise description of what the bug is.
+
+## Steps to Reproduce
+
+1.
+2.
+3.
+
+## Expected Behavior
+
+What you expected to happen.
+
+## Actual Behavior
+
+What actually happened.
+
+## Environment
+
+- OS: [e.g., macOS 14.0, Ubuntu 22.04]
+- Python version: [e.g., 3.12.0 — run `python --version`]
+- How you installed TimeRun: [e.g., pip from PyPI, editable install, git clone]
+
+## Additional Context
+
+Add any other context, logs, or information about the problem here. See [CONTRIBUTING.md](https://github.com/HH-MWB/timerun/blob/main/CONTRIBUTING.md#reporting-bugs) for more guidance.
diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md
new file mode 100644
index 0000000..a6ffd5b
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feature_request.md
@@ -0,0 +1,27 @@
+---
+name: Feature Request
+about: Suggest an idea for this project
+title: '[FEATURE] '
+labels: enhancement
+assignees: ''
+---
+
+## Description
+
+A clear and concise description of what you want to happen.
+
+## Motivation
+
+Explain why this feature would be useful. What problem does it solve?
+
+## Proposed Solution
+
+Describe how you envision this feature working.
+
+## Alternatives Considered
+
+Describe any alternative solutions or features you've considered.
+
+## Additional Context
+
+Add any other context or examples about the feature request here.
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
new file mode 100644
index 0000000..bdc27ea
--- /dev/null
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1,21 @@
+## Description
+
+Brief description of what this PR does and why.
+
+## Type of Change
+
+- [ ] Bug fix (non-breaking change which fixes an issue)
+- [ ] New feature (non-breaking change which adds functionality)
+- [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected)
+- [ ] Documentation update
+- [ ] Other (please describe):
+
+## Related Issues
+
+Fixes #
+
+## Checklist
+
+- [ ] I have run `make lint test` and both pass
+- [ ] For new or changed behavior, I have added or updated BDD scenarios in `features/`
+- [ ] I have updated the documentation accordingly (if applicable)
diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml
index 477d9f7..93ea174 100644
--- a/.github/workflows/ci.yaml
+++ b/.github/workflows/ci.yaml
@@ -1,37 +1,52 @@
---
-name: Continuous Integration
+name: Run continuous integration
# yamllint disable-line rule:truthy
on:
pull_request:
- branches: [main]
+ branches:
+ - main
push:
- branches: [main]
+ branches:
+ - main
permissions:
contents: read
+concurrency:
+ group: ci-${{ github.ref }}
+ cancel-in-progress: true
+
jobs:
lint:
+ name: Lint
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v6
- - name: Set up Python 3.11
+ - name: Set up Python 3.10
uses: actions/setup-python@v6
with:
- python-version: '3.11'
+ python-version: '3.10'
+ cache: 'pip'
- name: Run pre-commit hooks
uses: pre-commit/action@v3.0.1
test:
+ name: Test
runs-on: ubuntu-latest
needs: lint
strategy:
+ fail-fast: false
matrix:
- python-version: ['3.10', '3.11', '3.12', '3.13', '3.14']
+ python-version:
+ - '3.10'
+ - '3.11'
+ - '3.12'
+ - '3.13'
+ - '3.14'
steps:
- name: Checkout code
uses: actions/checkout@v6
@@ -40,9 +55,13 @@ jobs:
uses: actions/setup-python@v6
with:
python-version: ${{ matrix.python-version }}
+ cache: 'pip'
- name: Install test dependencies
- run: pip install -e ".[dev]"
+ run: python -m pip install -e ".[dev]"
+
+ - name: Audit dependencies
+ run: python -m pip_audit
- name: Run tests with coverage
run: |
@@ -57,6 +76,7 @@ jobs:
flags: python${{ matrix.python-version }}
build:
+ name: Build
runs-on: ubuntu-latest
needs: test
steps:
@@ -67,9 +87,10 @@ jobs:
uses: actions/setup-python@v6
with:
python-version: '3.11'
+ cache: 'pip'
- name: Install build dependencies
- run: pip install build twine
+ run: python -m pip install build twine
- name: Build package
run: python -m build
diff --git a/.github/workflows/pages.yaml b/.github/workflows/pages.yaml
new file mode 100644
index 0000000..babedde
--- /dev/null
+++ b/.github/workflows/pages.yaml
@@ -0,0 +1,59 @@
+---
+name: Deploy docs to GitHub Pages
+
+# yamllint disable-line rule:truthy
+on:
+ push:
+ branches:
+ - main
+ workflow_dispatch:
+
+permissions:
+ contents: read
+ pages: write
+ id-token: write
+
+concurrency:
+ group: pages
+ cancel-in-progress: true
+
+jobs:
+ build:
+ name: Build site
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v6
+
+ - name: Set up Python 3.11
+ uses: actions/setup-python@v6
+ with:
+ python-version: '3.11'
+
+ - name: Create venv and install docs dependencies
+ run: |
+ python -m venv .venv
+ .venv/bin/pip install -e ".[docs]"
+
+ - name: Build site
+ run: .venv/bin/zensical build
+
+ - name: Setup Pages
+ uses: actions/configure-pages@v4
+
+ - name: Upload artifact
+ uses: actions/upload-pages-artifact@v3
+ with:
+ path: site
+
+ deploy:
+ name: Deploy site
+ environment:
+ name: github-pages
+ url: ${{ steps.deploy.outputs.page_url }}
+ runs-on: ubuntu-latest
+ needs: build
+ steps:
+ - name: Deploy to GitHub Pages
+ id: deploy
+ uses: actions/deploy-pages@v4
diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml
index c5113cc..5c07479 100644
--- a/.github/workflows/release.yaml
+++ b/.github/workflows/release.yaml
@@ -1,35 +1,96 @@
+---
name: Release to PyPI
+# yamllint disable-line rule:truthy
on:
- workflow_dispatch:
+ release:
+ types:
+ - published
+ - edited
permissions:
- contents: write
- id-token: write
+ contents: read
+
+concurrency:
+ group: release
+ cancel-in-progress: false
jobs:
- release:
+ build:
+ name: Build distribution
runs-on: ubuntu-latest
+ permissions:
+ contents: read
steps:
- name: Checkout code
uses: actions/checkout@v6
+ with:
+ persist-credentials: false
- name: Set up Python 3.11
uses: actions/setup-python@v6
with:
python-version: '3.11'
+ cache: 'pip'
+
+ - name: Upgrade pip
+ run: python -m pip install --upgrade pip
- name: Install build dependencies
- run: pip install build
+ run: python -m pip install build twine
- name: Build package
run: python -m build
- - name: Release to PyPI
+ - name: Check package
+ run: twine check dist/*
+
+ - name: Store the distribution packages
+ uses: actions/upload-artifact@v4
+ with:
+ name: python-package-distributions
+ path: dist/
+
+ publish-testpypi:
+ name: Publish to TestPyPI
+ needs: build
+ if: github.event.release.prerelease
+ runs-on: ubuntu-latest
+ permissions:
+ contents: read
+ id-token: write
+ environment:
+ name: testpypi
+ url: https://test.pypi.org/project/timerun/
+ steps:
+ - name: Download distribution packages
+ uses: actions/download-artifact@v4
+ with:
+ name: python-package-distributions
+ path: dist/
+
+ - name: Publish to TestPyPI
uses: pypa/gh-action-pypi-publish@release/v1
+ with:
+ repository-url: https://test.pypi.org/legacy/
- - name: Create tag
- run: |
- VERSION=$(python -c "import timerun; print(timerun.__version__)")
- git tag "v$VERSION"
- git push origin "v$VERSION"
+ publish-pypi:
+ name: Publish to PyPI
+ needs: build
+ if: ${{ !github.event.release.prerelease }}
+ runs-on: ubuntu-latest
+ permissions:
+ contents: read
+ id-token: write
+ environment:
+ name: pypi
+ url: https://pypi.org/project/timerun/
+ steps:
+ - name: Download distribution packages
+ uses: actions/download-artifact@v4
+ with:
+ name: python-package-distributions
+ path: dist/
+
+ - name: Publish to PyPI
+ uses: pypa/gh-action-pypi-publish@release/v1
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index dc19a62..ded5a6f 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,8 +1,13 @@
+---
repos:
- repo: 'https://github.com/pre-commit/pre-commit-hooks'
rev: v6.0.0
hooks:
+ - id: check-merge-conflict
+ - id: check-case-conflict
+ - id: check-added-large-files
- id: trailing-whitespace
+ args: [--markdown-linebreak-ext=md]
- id: end-of-file-fixer
- id: fix-byte-order-marker
- id: mixed-line-ending
@@ -11,8 +16,13 @@ repos:
- id: check-yaml
- id: check-toml
+ - repo: https://github.com/adrienverge/yamllint
+ rev: v1.38.0
+ hooks:
+ - id: yamllint
+
- repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.15.1
+ rev: v0.15.5
hooks:
- id: ruff-format
- id: ruff-check
@@ -26,24 +36,20 @@ repos:
additional_dependencies: [behave]
- repo: https://github.com/pylint-dev/pylint
- rev: v4.0.4
+ rev: v4.0.5
hooks:
- id: pylint
additional_dependencies: [behave]
- repo: https://github.com/PyCQA/bandit
- rev: 1.9.3
+ rev: 1.9.4
hooks:
- id: bandit
args: ["-c", "pyproject.toml"]
+ additional_dependencies: ["bandit[toml]"]
- repo: https://github.com/semgrep/pre-commit
- rev: v1.151.0
+ rev: v1.154.0
hooks:
- id: semgrep
args: ["--config", "p/python", "--error"]
-
- - repo: https://github.com/adrienverge/yamllint
- rev: v1.38.0
- hooks:
- - id: yamllint
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000..1a55160
--- /dev/null
+++ b/CODE_OF_CONDUCT.md
@@ -0,0 +1,86 @@
+# Contributor Covenant 3.0 Code of Conduct
+
+## Our Pledge
+
+We pledge to make our community welcoming, safe, and equitable for all.
+
+We are committed to fostering an environment that respects and promotes the dignity, rights, and contributions of all individuals, regardless of characteristics including race, ethnicity, caste, color, age, physical characteristics, neurodiversity, disability, sex or gender, gender identity or expression, sexual orientation, language, philosophy or religion, national or social origin, socio-economic position, level of education, or other status. The same privileges of participation are extended to everyone who participates in good faith and in accordance with this Covenant.
+
+## Encouraged Behaviors
+
+While acknowledging differences in social norms, we all strive to meet our community's expectations for positive behavior. We also understand that our words and actions may be interpreted differently than we intend based on culture, background, or native language.
+
+With these considerations in mind, we agree to behave mindfully toward each other and act in ways that center our shared values, including:
+
+1. Respecting the **purpose of our community**, our activities, and our ways of gathering.
+2. Engaging **kindly and honestly** with others.
+3. Respecting **different viewpoints** and experiences.
+4. **Taking responsibility** for our actions and contributions.
+5. Gracefully giving and accepting **constructive feedback**.
+6. Committing to **repairing harm** when it occurs.
+7. Behaving in other ways that promote and sustain the **well-being of our community**.
+
+## Restricted Behaviors
+
+We agree to restrict the following behaviors in our community. Instances, threats, and promotion of these behaviors are violations of this Code of Conduct.
+
+1. **Harassment.** Violating explicitly expressed boundaries or engaging in unnecessary personal attention after any clear request to stop.
+2. **Character attacks.** Making insulting, demeaning, or pejorative comments directed at a community member or group of people.
+3. **Stereotyping or discrimination.** Characterizing anyone's personality or behavior on the basis of immutable identities or traits.
+4. **Sexualization.** Behaving in a way that would generally be considered inappropriately intimate in the context or purpose of the community.
+5. **Violating confidentiality**. Sharing or acting on someone's personal or private information without their permission.
+6. **Endangerment.** Causing, encouraging, or threatening violence or other harm toward any person or group.
+7. Behaving in other ways that **threaten the well-being** of our community.
+
+### Other Restrictions
+
+1. **Misleading identity.** Impersonating someone else for any reason, or pretending to be someone else to evade enforcement actions.
+2. **Failing to credit sources.** Not properly crediting the sources of content you contribute.
+3. **Promotional materials**. Sharing marketing or other commercial content in a way that is outside the norms of the community.
+4. **Irresponsible communication.** Failing to responsibly present content which includes, links or describes any other restricted behaviors.
+
+## Reporting an Issue
+
+Tensions can occur between community members even when they are trying their best to collaborate. Not every conflict represents a code of conduct violation, and this Code of Conduct reinforces encouraged behaviors and norms that can help avoid conflicts and minimize harm.
+
+When an incident does occur, it is important to report it promptly. To report a possible violation, please contact the project maintainers by:
+
+- Opening a [GitHub issue](https://github.com/HH-MWB/timerun/issues/new) and labeling it appropriately (note: issues are public, but we will handle reports with discretion and respect for privacy)
+- Contacting the project maintainers directly through their GitHub profiles
+
+Community Moderators take reports of violations seriously and will make every effort to respond in a timely manner. They will investigate all reports of code of conduct violations, reviewing messages, logs, and recordings, or interviewing witnesses and other participants. Community Moderators will keep investigation and enforcement actions as transparent as possible while prioritizing safety and confidentiality. In order to honor these values, enforcement actions are carried out in private with the involved parties, but communicating to the whole community may be part of a mutually agreed upon resolution.
+
+## Addressing and Repairing Harm
+
+If an investigation by the Community Moderators finds that this Code of Conduct has been violated, the following enforcement ladder may be used to determine how best to repair harm, based on the incident's impact on the individuals involved and the community as a whole. Depending on the severity of a violation, lower rungs on the ladder may be skipped.
+
+1) Warning
+ 1) Event: A violation involving a single incident or series of incidents.
+ 2) Consequence: A private, written warning from the Community Moderators.
+ 3) Repair: Examples of repair include a private written apology, acknowledgement of responsibility, and seeking clarification on expectations.
+2) Temporarily Limited Activities
+ 1) Event: A repeated incidence of a violation that previously resulted in a warning, or the first incidence of a more serious violation.
+ 2) Consequence: A private, written warning with a time-limited cooldown period designed to underscore the seriousness of the situation and give the community members involved time to process the incident. The cooldown period may be limited to particular communication channels or interactions with particular community members.
+ 3) Repair: Examples of repair may include making an apology, using the cooldown period to reflect on actions and impact, and being thoughtful about re-entering community spaces after the period is over.
+3) Temporary Suspension
+ 1) Event: A pattern of repeated violation which the Community Moderators have tried to address with warnings, or a single serious violation.
+ 2) Consequence: A private written warning with conditions for return from suspension. In general, temporary suspensions give the person being suspended time to reflect upon their behavior and possible corrective actions.
+ 3) Repair: Examples of repair include respecting the spirit of the suspension, meeting the specified conditions for return, and being thoughtful about how to reintegrate with the community when the suspension is lifted.
+4) Permanent Ban
+ 1) Event: A pattern of repeated code of conduct violations that other steps on the ladder have failed to resolve, or a violation so serious that the Community Moderators determine there is no way to keep the community safe with this person as a member.
+ 2) Consequence: Access to all community spaces, tools, and communication channels is removed. In general, permanent bans should be rarely used, should have strong reasoning behind them, and should only be resorted to if working through other remedies has failed to change the behavior.
+ 3) Repair: There is no possible repair in cases of this severity.
+
+This enforcement ladder is intended as a guideline. It does not limit the ability of Community Managers to use their discretion and judgment, in keeping with the best interests of our community.
+
+## Scope
+
+This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public or other spaces. Examples of representing our community include using an official email address, posting via an official social media account, or acting as an appointed representative at an online or offline event.
+
+## Attribution
+
+This Code of Conduct is adapted from the Contributor Covenant, version 3.0, permanently available at [https://www.contributor-covenant.org/version/3/0/](https://www.contributor-covenant.org/version/3/0/).
+
+Contributor Covenant is stewarded by the Organization for Ethical Source and licensed under CC BY-SA 4.0. To view a copy of this license, visit [https://creativecommons.org/licenses/by-sa/4.0/](https://creativecommons.org/licenses/by-sa/4.0/)
+
+For answers to common questions about Contributor Covenant, see the FAQ at [https://www.contributor-covenant.org/faq](https://www.contributor-covenant.org/faq). Translations are provided at [https://www.contributor-covenant.org/translations](https://www.contributor-covenant.org/translations). Additional enforcement and community guideline resources can be found at [https://www.contributor-covenant.org/resources](https://www.contributor-covenant.org/resources). The enforcement ladder was inspired by the work of [Mozilla's code of conduct team](https://github.com/mozilla/inclusion).
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 9cead11..9c84051 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -6,17 +6,19 @@ Thank you for considering contributing to TimeRun. This guide explains how to se
- [Code of Conduct](#code-of-conduct)
- [How You Can Help](#how-you-can-help)
-- [Development Setup](#development-setup)
+- [Setup](#setup)
+- [Development Commands](#development-commands)
- [Testing](#testing)
- [Code Style and Quality](#code-style-and-quality)
- [Project Structure](#project-structure)
- [Pull Request Process](#pull-request-process)
+- [Releasing](#releasing)
- [Reporting Bugs](#reporting-bugs)
- [License](#license)
## Code of Conduct
-Please be respectful and constructive. By participating, you agree to uphold a welcoming environment for everyone.
+This project adheres to the [Contributor Covenant Code of Conduct](https://www.contributor-covenant.org/version/3/0/). See [CODE_OF_CONDUCT.md](CODE_OF_CONDUCT.md) for the full text and how to report issues.
## How You Can Help
@@ -25,7 +27,7 @@ Please be respectful and constructive. By participating, you agree to uphold a w
- **Submit code** — Fix bugs or add features via pull requests (see [Pull Request Process](#pull-request-process)).
- **Improve docs** — Fix typos, clarify README or docstrings, or add examples.
-## Development Setup
+## Setup
### Prerequisites
@@ -34,50 +36,34 @@ Please be respectful and constructive. By participating, you agree to uphold a w
### One-time setup
-1. **Fork** the repository on GitHub, then clone your fork:
+1. Fork the repository on GitHub, then clone your fork and go into the project directory:
```bash
- git clone https://github.com/YOUR_USERNAME/timerun.git
+ git clone https://github.com/HH-MWB/timerun.git
cd timerun
```
-2. **Create and activate a virtual environment** (recommended):
+2. Run `make init`. You will be prompted to choose a Python interpreter (press Enter for `python3`, or type e.g. `python3.10`). To skip the prompt, run `make init PYTHON=python3.10` (or another 3.10+ interpreter). This creates a `.venv`, installs the package in editable mode with dev and docs dependencies (Zensical), and installs pre-commit hooks.
- ```bash
- python3 -m venv .venv
- source .venv/bin/activate # Windows: .venv\Scripts\activate
- ```
-
-3. **Install the project in editable mode with dev dependencies**:
-
- ```bash
- pip install -e ".[dev]"
- ```
-
-4. **Install and enable pre-commit hooks** (optional but recommended):
-
- ```bash
- pip install pre-commit
- pre-commit install
- ```
-
- Or use the convenience target:
-
- ```bash
- make init
- ```
-
- Then activate the venv: `source .venv/bin/activate`.
+3. Optionally activate the venv for interactive use: `source .venv/bin/activate` (Windows: `.venv\Scripts\activate`). You can run `make test` and `make lint` without activating.
### Verify setup
-Run the test suite:
+1. Run `make test`. You should see the BDD scenarios run and a coverage report.
+2. Run `make lint`. Lint should pass.
-```bash
-make test
-```
+## Development Commands
+
+Use the Makefile for common tasks. Run `make help` for the full list.
-You should see the BDD scenarios run and a coverage report.
+- **`make help`** — Show all targets and descriptions
+- **`make init`** — Prompts for Python interpreter (default: `python3`); set `PYTHON` to skip (e.g. `make init PYTHON=python3.10`). Sets up venv, installs package and dev + docs deps (Zensical), installs pre-commit hooks.
+- **`make clean`** — Remove all files and directories listed in `.gitignore` (inverse of init)
+- **`make test`** — Run BDD tests with coverage (summary output)
+- **`make test-verbose`** — Run BDD tests with full scenario/step output (for debugging)
+- **`make docs`** — Serve the docs locally (http://127.0.0.1:8000)
+- **`make docs-build`** — Build the docs site (output in `site/`; config: `zensical.toml`)
+- **`make lint`** — Run pre-commit (format and lint) on all files
## Testing
@@ -85,21 +71,8 @@ TimeRun uses **behavior-driven development (BDD)** with [behave](https://behave.
### Run tests
-| Command | Description |
-|--------------------|----------------------------------------------------------------|
-| `make test` | Run BDD suite with progress + summary + coverage (default) |
-| `make test-summary`| Summary and coverage only (minimal output) |
-| `make test-verbose`| Full scenario/step output (use when debugging failures) |
-| `behave` | Run BDD suite only (no coverage) |
-
-### Run coverage manually
-
-```bash
-coverage run --source=timerun -m behave # full output
-coverage run --source=timerun -m behave -f progress # progress + summary
-coverage run --source=timerun -m behave -f null # summary only
-coverage report --show-missing
-```
+- Use **`make test`** for normal runs (summary and coverage; failures show which scenario failed).
+- Use **`make test-verbose`** when debugging failures (full scenario/step output).
### Adding or changing tests
@@ -109,43 +82,49 @@ coverage report --show-missing
## Code Style and Quality
-Style and linting are enforced via **pre-commit** (Ruff, mypy, Pylint, and other hooks). After `pre-commit install`, these run automatically on each commit.
-
-### Run checks manually
-
-```bash
-pre-commit run --all-files
-```
+Pre-commit hooks (installed by `make init`) run on each commit. Before pushing, run `make lint` and fix any failures so CI stays green.
-### What we expect
+CI (on pull requests and pushes to `main`) runs: **lint** (pre-commit) → **test** (Python 3.10–3.14 matrix, with coverage) → **build** (package build and `twine check`). Outdated runs for the same branch are cancelled automatically.
-- **Formatting** — Ruff format (run via pre-commit or `ruff format`).
-- **Linting** — Ruff check, Pylint, and other hooks must pass.
-- **Types** — Use type hints for public APIs; mypy must pass.
-- **Docstrings** — Public functions, classes, and modules should have docstrings.
-- **Security** — Bandit and Semgrep run in pre-commit; address any reported issues.
+We expect (all run via `make lint`):
-Fixing pre-commit failures before pushing keeps the history clean and CI green.
+- **pre-commit-hooks** — Trailing whitespace removed, end-of-file newline, no BOM, LF line endings; YAML and TOML syntax checked
+- **Ruff** — Code formatting (`ruff-format`) and linting (`ruff-check`)
+- **mypy** — Static type checking on `timerun.py`; use type hints on public APIs
+- **Pylint** — Lint and style on `timerun.py`; docstrings expected on public functions, classes, and modules
+- **Bandit** — Security issue detection (config in `pyproject.toml`)
+- **Semgrep** — Security and bug patterns (Python ruleset)
+- **yamllint** — YAML style and syntax (e.g. workflow and config files)
## Project Structure
```
timerun/
-├── timerun.py # Library (single-file by design)
-├── features/ # BDD feature files (Gherkin) — behave convention
-│ ├── __init__.py # Makes features a package for imports
+├── .editorconfig # Editor configuration for consistent style across editors
+├── .github/ # GitHub configuration
+│ ├── ISSUE_TEMPLATE/ # Issue templates (bug report, feature request)
+│ ├── PULL_REQUEST_TEMPLATE.md
+│ └── workflows/ # CI (ci.yaml), pages (pages.yaml), release (release.yaml)
+├── .pre-commit-config.yaml # Pre-commit hooks configuration
+├── features/ # BDD feature files (Gherkin) — behave convention
│ ├── *.feature
-│ ├── environment.py # Optional: hooks (before/after scenario, etc.)
-│ └── steps/ # Step definitions (flat; all .py files loaded)
+│ ├── __init__.py
+│ ├── environment.py # Optional: hooks (before/after scenario, etc.)
+│ └── steps/ # Step definitions (flat; all .py files loaded)
│ ├── __init__.py
-│ ├── utils.py # Shared constants and helpers (no step decorators)
-│ ├── common_steps.py # Shared steps used by multiple features
-│ └── *_steps.py # Feature-specific step files
-├── pyproject.toml # Project metadata and config
-├── Makefile # Commands: init, test, clean, help
+│ ├── utils.py # Shared constants and helpers (no step decorators)
+│ ├── common_steps.py # Shared steps used by multiple features
+│ └── *_steps.py # Feature-specific step files
+├── pyproject.toml # Project metadata and config
+├── zensical.toml # Docs site config (Zensical)
+├── docs/ # Docs source (Markdown)
+├── timerun.py # Library (single-file by design)
+├── Makefile # Commands: init, test, test-verbose, lint, docs, docs-build, clean, help
├── README.md
+├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
-└── LICENSE
+├── LICENSE
+└── SECURITY.md
```
- **`timerun.py`** — The only library module; keep it a single file by design.
@@ -163,11 +142,10 @@ timerun/
2. **Make your changes** — Follow [Code Style and Quality](#code-style-and-quality) and add or update BDD scenarios in `features/` for new or changed behavior.
-3. **Run the suite and pre-commit**:
+3. **Verify lint and tests pass** (run lint, then tests):
```bash
- make test
- pre-commit run --all-files
+ make lint test
```
4. **Commit** with clear, concise messages. Optionally use conventional style (e.g. `feat: add X`, `fix: correct Y`).
@@ -185,6 +163,29 @@ timerun/
Maintainers will review and may request changes. Once approved, your PR will be merged.
+## Releasing
+
+Releases are driven by **GitHub Releases** and publish to **TestPyPI** first, then **PyPI** after confirmation.
+
+### Prerequisites (maintainers)
+
+- **Environments** in this repo: `testpypi` and `pypi` (Settings → Environments).
+- **Trusted Publishing** configured on [PyPI](https://pypi.org/manage/account/publishing/) and [TestPyPI](https://test.pypi.org/manage/account/publishing/) for this repository, workflow `release.yaml`, and the corresponding environment names.
+
+### Release flow
+
+1. **Bump version** in `timerun.py` (`__version__`) and commit to `main`.
+2. **Create a GitHub Release** (Releases → Draft a new release):
+ - Choose or create a tag (e.g. `v1.0.0`) from `main`.
+ - Check **“This is a pre-release”**.
+ - Add release notes and publish.
+3. The **release workflow** runs and publishes the package to **TestPyPI** only.
+4. **Test** the package from TestPyPI (e.g. `pip install -i https://test.pypi.org/simple/ timerun==1.0.0`).
+5. When satisfied, **edit the release** on GitHub: uncheck “This is a pre-release” and save.
+6. The workflow runs again and publishes to **PyPI**.
+
+The same workflow handles both events: `release: types: [published, edited]`. Pre-release → TestPyPI; full release → PyPI.
+
## Reporting Bugs
- **Search** existing issues to avoid duplicates.
diff --git a/Makefile b/Makefile
index 652129e..419543a 100644
--- a/Makefile
+++ b/Makefile
@@ -1,44 +1,142 @@
-# Makefile for Timerun
-# Description: Development environment setup and project management
-# Requirements: Python 3, pip
+# TimeRun - Makefile
+#
+# Commands for development setup, testing, linting, and docs.
+# Requires Python 3.10+ and pip.
+#
+# Usage: make [target] (run "make help" for all targets)
-.DEFAULT_GOAL := help
+# ============================================================================
+# Configuration (edit only the "Editable" block if needed)
+# ============================================================================
-# Project configuration
+# ---- Editable ----
+# PYTHON: Interpreter for "make init". Empty = prompt; set to skip.
+PYTHON ?=
+# VENV_DIR: Virtualenv directory (e.g. .venv or venv).
VENV_DIR := .venv
+# ---- Do Not Edit ----
+COVERAGE_SOURCE := timerun
+VENV_BIN := $(VENV_DIR)/bin
+GITIGNORE_PATHS := \
+ .gitignore \
+ $(VENV_DIR) \
+ .mypy_cache \
+ .ruff_cache \
+ .coverage \
+ htmlcov \
+ site
+GITIGNORE_GLOBS := \
+ *.pyc \
+ *.egg-info \
+ __pycache__
+
+# Default target when no target is specified
+.DEFAULT_GOAL := help
+
+# ============================================================================
+# General Targets
+# ============================================================================
+
+##@ General
+
.PHONY: help
-help: ## Show this help message
- @echo "Available targets:"
- @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | \
- awk 'BEGIN {FS = ":.*?## "}; {printf " %-20s %s\n", $$1, $$2}'
+help: ## Display this help message with all available targets
+ @echo "TimeRun - Available Commands"
+ @echo ""
+ @echo "Usage: make [target]"
+ @awk 'BEGIN {FS = ":.*##"} \
+ /^[a-zA-Z_0-9-]+:.*?##/ { \
+ printf " \033[36m%-20s\033[0m %s\n", $$1, $$2 \
+ } \
+ /^##@/ { \
+ printf "\n\033[1m%s\033[0m\n", substr($$0, 5) \
+ }' $(MAKEFILE_LIST)
+
+# ============================================================================
+# Environment Targets
+# ============================================================================
+
+##@ Environment
.PHONY: init
-init: ## Set up Python development environment with pre-commit hooks
- @test -d "$(VENV_DIR)" || python3 -m venv "$(VENV_DIR)" >/dev/null 2>&1
- @"$(VENV_DIR)/bin/pip" install -e ".[dev]" >/dev/null 2>&1
- @"$(VENV_DIR)/bin/pip" install pre-commit >/dev/null 2>&1
- @"$(VENV_DIR)/bin/pre-commit" install >/dev/null 2>&1
- @echo "Development environment ready! To activate it, run: source $(VENV_DIR)/bin/activate"
+init: ## Set up dev env. Prompts for Python, or: make init PYTHON=python3.10
+ @set -f; \
+ printf '%s\n' $(GITIGNORE_PATHS) $(GITIGNORE_GLOBS) > .gitignore; \
+ set +f;
+ @if [ -n "$(PYTHON)" ]; then \
+ py="$(PYTHON)"; \
+ elif [ -t 0 ]; then \
+ read -p "Which Python interpreter? [python3]: " py; \
+ py=$${py:-python3}; \
+ else \
+ py=python3; \
+ fi; \
+ if [ ! -d "$(VENV_DIR)" ]; then $$py -m venv "$(VENV_DIR)" >/dev/null; fi
+ @$(VENV_BIN)/pip install --upgrade pip >/dev/null
+ @$(VENV_BIN)/pip install -e ".[dev,docs]" >/dev/null
+ @$(VENV_BIN)/pip install pre-commit >/dev/null
+ @$(VENV_BIN)/pre-commit install >/dev/null
-.PHONY: test
-test: ## Run BDD tests (progress + summary + coverage)
- @"$(VENV_DIR)/bin/coverage" run --source=timerun -m behave -f progress
- @"$(VENV_DIR)/bin/coverage" report --show-missing
+.PHONY: clean
+clean: ## Remove all files/dirs listed in .gitignore (inverse of init)
+ @rm -rf $(GITIGNORE_PATHS)
+ @set -f; \
+ for p in $(GITIGNORE_GLOBS); do \
+ find . -not -path './.git/*' -name "$$p" \
+ -exec rm -rf {} + 2>/dev/null || true; \
+ done; \
+ set +f
+
+# Internal: used by test, docs, lint; not shown in help
+.PHONY: check-venv
+check-venv:
+ @if [ ! -d "$(VENV_DIR)" ]; then \
+ echo "Error: $(VENV_DIR) not found!"; \
+ echo "Please run 'make init' to create the development environment."; \
+ exit 1; \
+ fi
+ @if ! $(VENV_BIN)/python -c "import timerun" 2>/dev/null; then \
+ echo "Error: timerun not installed in $(VENV_DIR)!"; \
+ echo "Please run 'make init' to install the package and dependencies."; \
+ exit 1; \
+ fi
-.PHONY: test-summary
-test-summary: ## Run BDD tests (summary and coverage only; use 'make test' to see which feature failed)
- @"$(VENV_DIR)/bin/coverage" run --source=timerun -m behave -f null
- @"$(VENV_DIR)/bin/coverage" report --show-missing
+# ============================================================================
+# Testing Targets
+# ============================================================================
+
+##@ Testing
+
+.PHONY: test
+test: BEHAVE_ARGS := -f null
+test: test-verbose ## Run BDD tests (summary + coverage; failures show which scenario failed)
.PHONY: test-verbose
-test-verbose: ## Run BDD tests with full scenario/step output (for debugging failures)
- @"$(VENV_DIR)/bin/coverage" run --source=timerun -m behave
- @"$(VENV_DIR)/bin/coverage" report --show-missing
+test-verbose: check-venv ## Run BDD tests with full scenario/step output (for debugging failures)
+ @$(VENV_BIN)/coverage run --source=$(COVERAGE_SOURCE) -m behave $(BEHAVE_ARGS)
+ @$(VENV_BIN)/coverage report --show-missing
-.PHONY: clean
-clean: ## Delete all temporary files including venv
- @rm -rf "$(VENV_DIR)" *.egg-info
- @rm -rf .mypy_cache .ruff_cache .coverage htmlcov
- @find . -name "*.pyc" -delete
- @find . -name "__pycache__" -type d -exec rm -rf {} +
+# ============================================================================
+# Docs Targets (Zensical; docs deps installed by make init)
+# ============================================================================
+
+##@ Docs
+
+.PHONY: docs
+docs: check-venv ## Serve the docs locally (http://127.0.0.1:8000); Ctrl+C removes site/
+ @trap 'rm -rf site' INT; $(VENV_BIN)/zensical serve
+
+.PHONY: docs-build
+docs-build: check-venv ## Build the docs site (output in site/); ensures site/.gitignore
+ @$(VENV_BIN)/zensical build
+
+# ============================================================================
+# Lint Targets
+# ============================================================================
+
+##@ Lint
+
+.PHONY: lint
+lint: check-venv ## Run pre-commit (lint and format checks) on all files
+ @$(VENV_BIN)/pre-commit run --all-files
diff --git a/README.md b/README.md
index 37af1cf..9668071 100644
--- a/README.md
+++ b/README.md
@@ -14,17 +14,11 @@
-TimeRun is a **single-file** Python package with no dependencies beyond the [Python Standard Library](https://docs.python.org/3/library/). The package is designed to stay minimal and dependency-free.
+TimeRun is a **single-file** Python package with **no dependencies** beyond the standard library. It records **wall-clock time** and **CPU time** for code blocks or function calls and supports optional **metadata** (e.g. run id, tags) per measurement.
-It records **wall-clock time** (real elapsed time) and **CPU time** (process time) for code blocks or function calls, and lets you attach optional **metadata** (e.g. run id, tags) to each measurement.
+For the full value proposition and positioning, see [Why TimeRun](https://hh-mwb.github.io/timerun/about/) on the docs site.
-## Setup
-
-### Prerequisites
-
-**Python 3.10+**
-
-### Installation
+## Installation
From [PyPI](https://pypi.org/project/timerun/):
@@ -32,17 +26,19 @@ From [PyPI](https://pypi.org/project/timerun/):
pip install timerun
```
-From source:
+From [source](https://github.com/HH-MWB/timerun):
```bash
pip install git+https://github.com/HH-MWB/timerun.git
```
-## Quickstart
+*Note: Requires Python 3.10+.*
+
+## Usage
### Time Code Block
-Use `with Timer() as m:` or `async with Timer() as m:`. On block exit, the yielded `Measurement` has `wall_time` and `cpu_time` set.
+Use `with Timer() as m:` (or `async with`). The yielded `Measurement` has `wall_time` and `cpu_time`:
```python
>>> from timerun import Timer
@@ -59,7 +55,7 @@ datetime.timedelta(microseconds=8)
### Time Function Calls
-Use `@Timer()` to time every call. Works with sync and async functions and with sync and async generators. One `Measurement` per call is appended to the wrapped callable's `measurements` deque.
+Use `@Timer()`. One `Measurement` per call is appended to the callable’s `measurements` deque:
```python
>>> from timerun import Timer
@@ -76,6 +72,18 @@ datetime.timedelta(microseconds=8)
*Note: Argument `maxlen` caps how many measurements are kept (e.g. `@Timer(maxlen=10)`). By default the deque is unbounded.*
+### Callbacks on Start and End
+
+Optional `on_start` and `on_end` callbacks run once per measurement. Both receive the measurement instance (`on_start` before timings are set, `on_end` after). Typical uses are logging, forwarding to OpenTelemetry, or enqueueing to a metrics pipeline.
+
+```python
+>>> from timerun import Timer
+>>> with Timer(on_end=lambda m: print(m.wall_time.timedelta)):
+... pass # code block to be measured
+...
+0:00:00.000008
+```
+
## Contributing
Contributions are welcome. See [CONTRIBUTING.md](https://github.com/HH-MWB/timerun/blob/main/CONTRIBUTING.md) for setup, testing, and pull request guidelines.
diff --git a/SECURITY.md b/SECURITY.md
new file mode 100644
index 0000000..60010e4
--- /dev/null
+++ b/SECURITY.md
@@ -0,0 +1,31 @@
+# Security Policy
+
+## Supported Versions
+
+We actively support security updates for the current version of this project. Please ensure you're using the latest version.
+
+## Reporting a Vulnerability
+
+If you discover a security vulnerability, please **do not** open a public issue. Instead, please report it using GitHub Security Advisories:
+
+1. Navigate to the [Security tab](https://github.com/HH-MWB/timerun/security) of this repository
+2. Click on "Advisories"
+3. Click "Report a vulnerability" to create a private security advisory
+
+Alternatively, you can directly access the [Report a vulnerability](https://github.com/HH-MWB/timerun/security/advisories/new) page.
+
+Please include the following information in your report:
+
+- Description of the vulnerability
+- Steps to reproduce the issue
+- Potential impact
+- Suggested fix (if any)
+
+We will acknowledge receipt of your report within 48 hours and provide an update on the status of the vulnerability within 7 days.
+
+## Disclosure Policy
+
+- We will acknowledge receipt of your vulnerability report
+- We will confirm the issue and assess its severity
+- We will work on a fix and keep you informed of progress
+- Once a fix is ready, we will release it and credit you (unless you prefer to remain anonymous)
diff --git a/docs/about/index.md b/docs/about/index.md
new file mode 100644
index 0000000..f58a233
--- /dev/null
+++ b/docs/about/index.md
@@ -0,0 +1,81 @@
+---
+title: About
+---
+
+# About
+
+TimeRun gives you **structured, dependency-free timing** (wall + CPU) with optional **metadata and callbacks**, so you can measure any Python code and plug results into your existing **observability stack**. Single file, zero dependencies, standard library only.
+
+---
+
+## The problem we solve
+
+Developers and teams need to **measure execution time** of Python code (blocks, functions, async) in a way that is:
+
+- **Trustworthy** — wall-clock and CPU time, not ad-hoc `time.time()` or `time.perf_counter()`.
+- **Observable** — easy to send measurements into logging, metrics, or tracing (e.g. OpenTelemetry) without locking them into one vendor.
+- **Low-friction** — minimal setup, no extra runtime dependencies, and usable in both scripts and production services.
+
+Alternatives (manual `time.perf_counter()`, heavy APM agents, or “batteries-included” profilers) either lack structure (no CPU vs wall, no metadata), add complexity, or don’t fit the “measure and export” workflow. TimeRun fills that gap.
+
+---
+
+## Who it’s for
+
+If that sounds like you, TimeRun is a good fit.
+
+- **Python developers** doing performance tuning, benchmarking, or debugging latency.
+- **Platform / SRE / backend engineers** who need lightweight, library-level timing that can feed into existing observability (logs, metrics, tracing).
+- **Libraries and frameworks** that want optional timing without imposing dependencies on their users.
+
+---
+
+## What you get
+
+| Benefit | What it means |
+|--------|----------------|
+| **Zero dependencies** | Standard library only → no supply-chain or version conflicts; safe to add to libraries and constrained environments. |
+| **Single-file** | One module to reason about, audit, or vendor; easy to copy or fork if needed. |
+| **Wall + CPU time** | Distinguish “real” elapsed time from CPU burn; better for I/O vs CPU-bound analysis. |
+| **Structured measurements** | `Measurement` with `TimeSpan` (nanosecond precision, `timedelta` view) and metadata → ready for logging, metrics, or tracing. |
+| **Context manager + decorator** | Same API for ad-hoc blocks and for repeated function/generator/async calls; one mental model. |
+| **Observability hooks** | `on_start` / `on_end` with full `Measurement` → integrate with OpenTelemetry, metrics pipelines, or custom logging without baking them into the library. |
+| **Bounded history (decorator)** | `maxlen` on the measurements deque → avoid unbounded memory when timing hot paths. |
+| **Async-aware** | `async with` and decorators for async functions and async generators → fits modern async Python. |
+
+**Outcomes:**
+
+- **Faster performance work** — add timing with a context manager or decorator; get wall + CPU and optional metadata without wiring up timers by hand.
+- **Clean observability integration** — one callback (`on_end`) to push measurements to logs, metrics, or tracing, without coupling the app to a specific vendor.
+- **Fewer dependency and maintenance worries** — no extra packages, single-file design, MIT license.
+
+---
+
+## How it compares
+
+| Aspect | TimeRun | Manual `perf_counter` / `process_time()` | Heavy profilers (cProfile, py-spy) | Vendor APM agents |
+|--------|--------|----------------------------------------|------------------------------------|-------------------|
+| **Dependencies** | None | None | Often extra tooling | Agent + vendor stack |
+| **Wall + CPU** | Yes | You wire both | Varies | Usually wall only |
+| **Observability** | Your choice (callbacks) | You build it | Export varies | Locked to vendor |
+| **Use case** | Targeted timing, feed your stack | Ad-hoc scripts | Whole-process profiling | Full APM |
+
+- **vs manual timing** — TimeRun gives a consistent `Measurement` (wall + CPU + metadata), callbacks, and decorator/context-manager API so you don’t reimplement the same pattern.
+- **vs heavy profilers** — TimeRun is for **targeted** timing of chosen blocks or functions and for **feeding observability**, not for whole-process profiling.
+- **vs vendor APM** — TimeRun is library-level, dependency-free, and export-agnostic; you decide where measurements go (OpenTelemetry, Prometheus, logs, etc.).
+
+---
+
+## When not to use TimeRun
+
+- **Whole-process profiling** — use cProfile, py-spy, or similar.
+- **Full APM (errors, infra, traces)** — use a vendor APM; TimeRun can still feed it via callbacks.
+- **Need only ad-hoc one-off timings in a script** — `time.perf_counter()` is fine; TimeRun pays off when you want structure, CPU time, or observability hooks.
+
+---
+
+## Bottom line
+
+TimeRun is the **minimal, dependency-free way** to get structured timing (wall + CPU) and optional metadata/callbacks in Python, so you can measure what matters and plug results into your existing observability stack without extra dependencies or lock-in.
+
+[Quick start](../index.md#quick-start) to install and run. [Reference](../guide/index.md) for the API. [Recipes](../recipes/index.md) for real-world patterns.
diff --git a/docs/guide/callbacks.md b/docs/guide/callbacks.md
new file mode 100644
index 0000000..0b4c4fd
--- /dev/null
+++ b/docs/guide/callbacks.md
@@ -0,0 +1,33 @@
+---
+title: Callbacks
+---
+
+# Callbacks
+
+The optional **`on_start`** and **`on_end`** callbacks are invoked once per measurement. Both receive the same **Measurement** instance that the Timer yields or appends for that run.
+
+## When they are invoked and what they receive
+
+| Callback | When | State of the Measurement |
+|----------|------|---------------------------|
+| `on_start(measurement)` | When timing is about to start (on enter of the block or start of the decorated call). | `metadata` is set (a deep copy of the Timer’s initial metadata). `wall_time` and `cpu_time` are **`None`**. |
+| `on_end(measurement)` | When timing has just ended (on block exit or end of the decorated call). | `wall_time` and `cpu_time` are set. `metadata` may have been mutated in the block or in `on_start`. |
+
+Use `on_start` to add to `metadata` (e.g. from context variables). Use `on_end` to read durations and metadata and send them to logging, OpenTelemetry, or a metrics pipeline.
+
+## Synchronous only
+
+Callbacks are **synchronous only**. They are invoked on the same thread and must return before the Timer continues. To integrate with asynchronous exporters (e.g. OpenTelemetry), schedule work from the callback (e.g. `asyncio.create_task(export(m))` in an async context, or use a thread or queue).
+
+## Example
+
+```python
+from timerun import Timer
+
+with Timer(on_end=lambda m: print(m.wall_time.timedelta)):
+ pass # code block to be measured
+```
+
+For applied patterns (logging, files, OpenTelemetry), see [Share results](../recipes/share-results.md).
+
+**Back to:** [Reference](index.md)
diff --git a/docs/guide/index.md b/docs/guide/index.md
new file mode 100644
index 0000000..56fdce2
--- /dev/null
+++ b/docs/guide/index.md
@@ -0,0 +1,24 @@
+---
+title: Reference
+---
+
+# Reference
+
+This section describes the TimeRun API: its concepts, parameters, and behavior. Use it after the [Quick start](../index.md#quick-start) for a complete picture of blocks, functions, metadata, callbacks, and sync/async and generator support, or as a lookup while using the library.
+
+For applied patterns (e.g. attaching a request id in `on_start`, exporting to OpenTelemetry), see [Recipes](../recipes/index.md).
+
+---
+
+## Core types
+
+- **[TimeSpan](timespan.md)** — Immutable time interval: attributes, `timedelta`, comparison, and validation.
+- **[Measurement](measurement.md)** — A single timing result: `wall_time`, `cpu_time`, and `metadata`; when values are set and how to use them.
+
+## Timer
+
+- **[Timer (overview)](timer.md)** — Constructor parameters and the two modes (context manager and decorator); what each mode yields.
+- **[Measure a block](measure-block.md)** — Using `with Timer()` and `async with`; one measurement per block; nested, sequential, and multi-threaded use; exceptions and invalid use.
+- **[Measure functions](measure-functions.md)** — Using `@Timer()` with sync/async functions and generators; the `measurements` deque and `maxlen`; thread safety.
+- **[Metadata](metadata.md)** — Supplying and copying metadata; per-measurement mutation; isolation between runs.
+- **[Callbacks](callbacks.md)** — `on_start` and `on_end`: when they are invoked, what they receive, and the synchronous-only contract.
diff --git a/docs/guide/measure-block.md b/docs/guide/measure-block.md
new file mode 100644
index 0000000..f09419c
--- /dev/null
+++ b/docs/guide/measure-block.md
@@ -0,0 +1,47 @@
+---
+title: Measure a block
+---
+
+# Measure a block
+
+Use the Timer as a **context manager** to measure a single block of code. The Timer yields one **Measurement** per block; `wall_time` and `cpu_time` are set on block exit.
+
+## Syntax
+
+Synchronous:
+
+```python
+from timerun import Timer
+
+with Timer() as m:
+ pass # code block to be measured
+
+m.wall_time.timedelta # datetime.timedelta
+m.cpu_time.timedelta # datetime.timedelta
+```
+
+Asynchronous:
+
+```python
+async with Timer() as m:
+ await do_something()
+
+m.wall_time.timedelta
+```
+
+On block exit the Timer records CPU time first, then wall time. Wall time is therefore typically equal to or slightly greater than CPU time, even when there is no I/O or scheduling delay.
+
+## Behavior summary
+
+| Scenario | Behavior |
+|----------|----------|
+| Single block | One measurement; `wall_time` and `cpu_time` set on exit. |
+| Sequential blocks (same Timer) | One measurement per block; each block receives its own measurement with a fresh copy of the initial metadata. |
+| Nested blocks (same Timer) | Outer and inner each receive one measurement; timings are independent (outer wall time includes the inner block’s wall time). |
+| Multiple threads (same Timer) | One measurement per thread per enter/exit; a thread-local stack ensures no cross-thread mixing. |
+| Exception in block | The measurement is still recorded (wall/cpu set on exit); the exception propagates to the caller. |
+| Invalid use: `__exit__` without `__enter__` | `RuntimeError` with message `"__exit__ called without a matching __enter__"`. |
+
+The same Timer instance can be reused for multiple blocks (sequential or nested). Each block receives its own Measurement; metadata mutations in one block do not appear in the next (see [Metadata](metadata.md)).
+
+**Next:** [Measure functions](measure-functions.md)
diff --git a/docs/guide/measure-functions.md b/docs/guide/measure-functions.md
new file mode 100644
index 0000000..b30ccef
--- /dev/null
+++ b/docs/guide/measure-functions.md
@@ -0,0 +1,48 @@
+---
+title: Measure functions
+---
+
+# Measure functions
+
+Apply the **decorator** `@Timer()` to measure each call of a function or generator. One **Measurement** per call is appended to the wrapped callable’s `measurements` deque.
+
+## Syntax
+
+```python
+from timerun import Timer
+
+@Timer()
+def func():
+ return
+
+func()
+func.measurements[-1].wall_time.timedelta
+func.measurements[-1].cpu_time.timedelta
+```
+
+Use `@Timer(maxlen=10)` to limit how many measurements are retained; the oldest entries are discarded when the deque reaches capacity. The default is unbounded.
+
+## Callable types
+
+| Type | Behavior |
+|------|----------|
+| Sync function | One measurement per call. |
+| Async function | One measurement per call (covers the full `await` of the call). |
+| Sync generator | One measurement per **full consumption** of the generator (from first `next` until exhaustion or close). |
+| Async generator | One measurement per **full consumption** of the async generator. |
+
+For generators, a single measurement covers the entire iteration, not each yielded value.
+
+## measurements deque
+
+The wrapped callable has a `measurements` attribute: a `deque` of `Measurement` instances in order from oldest to newest. Each call (or full generator consumption) appends one entry. When `maxlen` is set, the deque is bounded and discards the oldest entry when full.
+
+## Thread safety
+
+Concurrent calls from multiple threads each produce one measurement. Appends to `measurements` are thread-safe; for example, two threads calling the same timed function yield two measurements.
+
+## Exceptions
+
+If a timed function or generator raises, one measurement is still recorded for that run, and the exception is re-raised to the caller.
+
+**Next:** [Metadata](metadata.md)
diff --git a/docs/guide/measurement.md b/docs/guide/measurement.md
new file mode 100644
index 0000000..7f55b10
--- /dev/null
+++ b/docs/guide/measurement.md
@@ -0,0 +1,42 @@
+---
+title: Measurement
+---
+
+# Measurement
+
+A **Measurement** represents a single timing result: wall-clock time, CPU time, and optional metadata. The Timer creates one Measurement per block or function call. You obtain it from the context manager (`with Timer() as m`) or from the decorator’s `measurements` deque.
+
+## Attributes
+
+| Attribute | Type | Description |
+|------------|------|-------------|
+| `wall_time` | `TimeSpan \| None` | Wall-clock time for the measurement, or `None` until the block or call exits. |
+| `cpu_time` | `TimeSpan \| None` | CPU time for the measurement, or `None` until the block or call exits. |
+| `metadata` | `dict[str, object]` | Key-value metadata (e.g. run id, tags). Defaults to `{}`. Mutate in place to add or change entries for this measurement. |
+
+## When wall_time and cpu_time are set
+
+When the Timer creates the Measurement (in `__enter__` or at the start of a decorated call), `wall_time` and `cpu_time` are `None`. They are assigned when the block exits or the call completes. Thus, in `on_start` the measurement does not yet have timings; in `on_end`, both are set.
+
+## Metadata
+
+Metadata is mutable. Initial metadata is supplied via `Timer(metadata={...})`; each measurement receives a deep copy at enter time. You can mutate `measurement.metadata` inside the timed block or in `on_start` to add or change keys for that run only. See [Metadata](metadata.md) for copying and scope rules.
+
+## Example
+
+```python
+from timerun import Timer, Measurement
+
+with Timer(metadata={"run_id": "exp-1"}) as m:
+ pass # your code
+
+# After block exit:
+m.wall_time # TimeSpan (set)
+m.cpu_time # TimeSpan (set)
+m.wall_time.timedelta # datetime.timedelta
+m.metadata # {"run_id": "exp-1"} (your copy; mutable)
+```
+
+You can also construct a Measurement manually (e.g. for tests) by passing `wall_time`, `cpu_time`, and optional `metadata` to the constructor.
+
+**Next:** [Timer (overview)](timer.md)
diff --git a/docs/guide/metadata.md b/docs/guide/metadata.md
new file mode 100644
index 0000000..718155d
--- /dev/null
+++ b/docs/guide/metadata.md
@@ -0,0 +1,33 @@
+---
+title: Metadata
+---
+
+# Metadata
+
+Key-value **metadata** can be attached to each measurement (e.g. run id, tags). It is stored on the **Measurement** and can be read or mutated for that run.
+
+## Supplying metadata
+
+Provide a dictionary when instantiating the Timer:
+
+```python
+with Timer(metadata={"run_id": "abc", "tag": "ingest"}) as m:
+ do_work()
+
+# m.metadata is {"run_id": "abc", "tag": "ingest"}
+```
+
+Each measurement receives a **deep copy** of this dictionary at enter time. The Timer retains the original by reference but does not reuse the same dict for multiple measurements.
+
+## Per-measurement copy; isolation between runs
+
+- Each block or call receives its **own** copy of the initial metadata. Mutating `measurement.metadata` inside that block (or in `on_start` for that measurement) affects only that measurement.
+- If you reuse the same Timer for a second block, the second block’s measurement starts from a fresh deep copy of the Timer’s initial metadata. It does **not** inherit any keys or changes from the first block.
+
+In summary: metadata is scoped to the measurement. Use it to tag that run; it does not leak to the next run.
+
+## Mutating metadata
+
+You can mutate `measurement.metadata` inside the timed block or in `on_start` to add or change entries for that run (e.g. request id from context, or a tag set after checking a condition). For patterns such as adding a request id in `on_start`, see [Use metadata effectively](../recipes/metadata.md).
+
+**Next:** [Callbacks](callbacks.md)
diff --git a/docs/guide/timer.md b/docs/guide/timer.md
new file mode 100644
index 0000000..36ca109
--- /dev/null
+++ b/docs/guide/timer.md
@@ -0,0 +1,32 @@
+---
+title: Timer (overview)
+---
+
+# Timer (overview)
+
+**Timer** is the main entry point. It measures execution and records wall-clock and CPU time per run. It operates in two modes: as a **context manager** (for a block of code) or as a **decorator** (for function or generator calls). Both modes support synchronous and asynchronous use; the decorator also supports sync and async generators.
+
+## Constructor parameters
+
+All parameters are optional and keyword-only.
+
+| Parameter | Type | Description |
+|------------|------|-------------|
+| `metadata` | `dict \| None` | Key-value metadata for the measurement(s). Each measurement gets a deep copy at enter time. Defaults to `{}`. |
+| `on_start` | `callable \| None` | Called once per measurement when timing is about to start. Receives the `Measurement` (metadata set; `wall_time` and `cpu_time` are `None`). Defaults to `None`. |
+| `on_end` | `callable \| None` | Called once per measurement when timing has just ended. Receives the `Measurement` with `wall_time` and `cpu_time` set. Defaults to `None`. |
+| `maxlen` | `int \| None` | **Decorator only.** Maximum number of measurements to keep on the wrapped callable. Ignored when used as a context manager. Defaults to `None` (unbounded). |
+
+## Context manager mode
+
+Use `with Timer() as m:` (sync) or `async with Timer() as m:` (async). On block exit, the yielded `Measurement` has its `wall_time` and `cpu_time` set. There is one measurement per block; nested and sequential blocks each receive their own measurement. See [Measure a block](measure-block.md) for nested blocks, multiple threads, and exception behavior.
+
+## Decorator mode
+
+Apply `@Timer()` (or `@Timer(metadata={...}, maxlen=100)` etc.) to a function or generator. Each call produces one `Measurement`, appended to the wrapped callable’s `measurements` deque. Supported callables include sync and async functions and sync and async generators (one measurement per call, or per full consumption for generators). See [Measure functions](measure-functions.md) for `maxlen` and thread safety.
+
+## Callbacks
+
+Callbacks are **synchronous only**. To integrate with asynchronous exporters (e.g. OpenTelemetry), schedule work from the callback (e.g. `asyncio.create_task(export(m))` in an async context, or use a thread or queue). See [Callbacks](callbacks.md) for when `on_start` and `on_end` are invoked and what they receive.
+
+**Next:** [Measure a block](measure-block.md)
diff --git a/docs/guide/timespan.md b/docs/guide/timespan.md
new file mode 100644
index 0000000..28a1b71
--- /dev/null
+++ b/docs/guide/timespan.md
@@ -0,0 +1,40 @@
+---
+title: TimeSpan
+---
+
+# TimeSpan
+
+A **TimeSpan** represents an immutable time interval with start and end timestamps. The Timer uses it for `wall_time` and `cpu_time` on each measurement. You can also construct TimeSpan instances directly (e.g. for tests or custom logic).
+
+## Attributes
+
+| Attribute | Type | Description |
+|------------|------|-------------|
+| `duration` | `int` | Elapsed time in nanoseconds (`end - start`). Set in `__post_init__`, not a constructor argument. Used for equality, ordering, and hashing. |
+| `start` | `int` | Start timestamp in nanoseconds. |
+| `end` | `int` | End timestamp in nanoseconds. |
+| `timedelta`| `datetime.timedelta` | Read-only. Duration as a `datetime.timedelta`; nanoseconds are converted to whole microseconds (`duration // 1000`) to match `timedelta` resolution. |
+
+## Comparison and hashing
+
+Equality and ordering are based **only on `duration`**. `start` and `end` are excluded from comparison, so two spans with the same duration compare equal even if their intervals differ. TimeSpan is hashable and supports sorting.
+
+## Validation
+
+`end` must be greater than or equal to `start`. If `end` is less than `start`, the constructor raises `ValueError` with message `"end must be >= start"`.
+
+## Example
+
+```python
+from datetime import timedelta
+from timerun import TimeSpan
+
+span = TimeSpan(start=0, end=1_000_000) # 1 ms
+span.duration # 1000000 (nanoseconds)
+span.timedelta # datetime.timedelta(microseconds=1000)
+
+# Comparison by duration only
+TimeSpan(start=0, end=100) == TimeSpan(start=200, end=300) # True (same duration)
+```
+
+**Next:** [Measurement](measurement.md)
diff --git a/docs/index.md b/docs/index.md
new file mode 100644
index 0000000..bc29a0d
--- /dev/null
+++ b/docs/index.md
@@ -0,0 +1,66 @@
+---
+title: Home
+---
+
+TimeRun
+
+**Structured timing for Python.** One small library, no dependencies — wall and CPU time, ready for your logs, metrics, or tracing.
+
+---
+
+## About
+
+You need to **measure execution time** of Python code in a way that’s trustworthy (wall + CPU, not ad-hoc timers), observable (send to logging, OpenTelemetry, or any pipeline), and low-friction (no new dependencies, works in scripts and production). TimeRun does exactly that.
+
+- **Zero dependencies** — Standard library only; safe for libraries and constrained environments.
+- **Wall + CPU time** — Distinguish real elapsed time from CPU burn (I/O vs CPU-bound).
+- **Observability-ready** — `on_start` / `on_end` callbacks and metadata to plug into logging, OpenTelemetry, or any metrics pipeline.
+
+[Read the full story for positioning →](about/index.md)
+
+---
+
+## Quick start
+
+#### Install from [PyPI](https://pypi.org/project/timerun/)
+
+```bash
+pip install timerun
+```
+
+#### Measure code block
+
+```python
+from timerun import Timer
+
+with Timer() as m:
+ pass # your code
+```
+
+#### Measure function calls
+
+```python
+@Timer()
+def my_func():
+ pass
+
+my_func()
+m = my_func.measurements[-1] # measurement for last call
+```
+
+#### Use measurement result
+
+```python
+>>> m.wall_time.timedelta
+datetime.timedelta(microseconds=11)
+>>> m.cpu_time.timedelta
+datetime.timedelta(microseconds=8)
+```
+
+[Read the reference for API details →](guide/index.md)
+
+---
+
+## Trust
+
+[](https://pypi.org/project/timerun/) [](https://github.com/HH-MWB/timerun/blob/main/LICENSE) [](https://codecov.io/gh/HH-MWB/timerun) [](https://pepy.tech/project/timerun)
diff --git a/docs/recipes/analyze-results.md b/docs/recipes/analyze-results.md
new file mode 100644
index 0000000..76e65e0
--- /dev/null
+++ b/docs/recipes/analyze-results.md
@@ -0,0 +1,112 @@
+# Analyze results
+
+**Problem:** You have many measurements (e.g. from repeated runs or a decorator's `measurements` deque) and want to summarize or compare — mean, variance, confidence intervals.
+
+**Idea:** TimeRun gives you the numbers; you use standard tools (e.g. numpy, scipy, pandas) for analysis. Collect `Measurement` objects, extract durations, then compute what you need.
+
+## Collect measurements
+
+Two common ways to get a list of measurements:
+
+1. **From a decorated function** — use the timer’s `measurements` deque when you run the same function many times (e.g. benchmarks or repeated calls):
+
+```python
+from timerun import Timer
+
+@Timer()
+def my_func():
+ pass
+
+for _ in range(100):
+ my_func()
+
+measurements = list(my_func.measurements)
+```
+
+2. **From a context manager** — append each measurement in `on_end` to a list (or a queue for later processing) when you time one-off blocks or multiple different blocks.
+
+## What to extract
+
+Each measurement has **wall time** and **CPU time**; use the one that matches your question (e.g. wall for latency, CPU for compute-bound work). Use `wall_time.duration` (nanoseconds, int) or `wall_time.timedelta` for float seconds. You can also use **metadata** to group or filter before computing stats (e.g. by `run_id`, `stage`) so you get per-group summaries.
+
+```python
+durations_ns = [m.wall_time.duration for m in measurements]
+# or for seconds as float:
+durations_s = [m.wall_time.timedelta.total_seconds() for m in measurements]
+```
+
+## Summarize
+
+TimeRun does not implement statistics — it only records timings. Use numpy, scipy, pandas, or your own code for aggregation and inference.
+
+### Mean and a simple confidence interval
+
+Example using `scipy.stats` for a t-based 95% confidence interval:
+
+```python
+import numpy as np
+import scipy.stats
+
+durations_ns = [m.wall_time.duration for m in measurements]
+a = np.array(durations_ns) / 1e9 # convert to seconds
+
+mean_s = a.mean()
+n = len(a)
+ci = scipy.stats.t.interval(0.95, n - 1, loc=mean_s, scale=scipy.stats.sem(a))
+# ci is (lower, upper) in seconds
+print(f"mean = {mean_s:.6f} s, 95% CI = [{ci[0]:.6f}, {ci[1]:.6f}]")
+```
+
+### Variance and percentiles
+
+Use the same duration array `a` (in seconds). Standard deviation and variance describe spread; percentiles (e.g. 50th, 99th) are useful for latency-style analysis:
+
+```python
+# a = np.array(durations_ns) / 1e9 # from above
+std_s = a.std()
+var_s = a.var()
+p50, p99 = np.percentile(a, [50, 99])
+
+print(f"std = {std_s:.6f} s")
+print(f"variance = {var_s:.12f} s²")
+print(f"p50 (median) = {p50:.6f} s")
+print(f"p99 = {p99:.6f} s")
+```
+
+Example output:
+
+```
+std = 0.002341 s
+variance = 0.000005481234 s²
+p50 (median) = 0.052103 s
+p99 = 0.058892 s
+```
+
+To group by metadata (e.g. by `run_id` or `stage`) and compute stats per group, put durations and metadata into a pandas DataFrame and use `groupby` before applying the same summaries.
+
+## Plot the confidence interval
+
+You can draw the mean and confidence interval (e.g. 95% CI) as a simple diagram. Reuse the same `mean_s` and `ci` from the summary above:
+
+```python
+import matplotlib.pyplot as plt
+
+# mean_s and ci from the Summarize section above
+lower, upper = ci
+half_width = (upper - lower) / 2
+
+fig, ax = plt.subplots()
+ax.errorbar(0, mean_s, yerr=half_width, fmt="o", capsize=5, label="mean ± 95% CI")
+ax.set_ylabel("Duration (s)")
+ax.set_xticks([])
+ax.legend()
+ax.set_title("95% confidence interval")
+plt.tight_layout()
+plt.show()
+```
+
+This plots the mean as a point with an error bar spanning the confidence interval. For more on confidence intervals and benchmarking, see your preferred stats or benchmarking reference.
+
+**Back to:** [Recipes](index.md)
+
+**See also:** For the `measurements` deque and `maxlen`, see [Measure functions](../guide/measure-functions.md). For collecting in `on_end`, see [Callbacks](../guide/callbacks.md).
diff --git a/docs/recipes/index.md b/docs/recipes/index.md
new file mode 100644
index 0000000..3db52d4
--- /dev/null
+++ b/docs/recipes/index.md
@@ -0,0 +1,13 @@
+---
+title: Recipes
+---
+
+# Recipes
+
+Real-world patterns for using TimeRun: use metadata effectively, share results with your stack, and analyze timing data.
+
+You already know the API from the [Reference](../guide/index.md) (measure blocks, functions, metadata, callbacks). Here we show how to apply it to concrete problems.
+
+1. **[Use metadata effectively](metadata.md)** — Add context (e.g. request id, stage) to every measurement by mutating metadata in `on_start`.
+2. **[Share results](share-results.md)** — Send measurements to logs, files, or OpenTelemetry using `on_end`.
+3. **[Analyze results](analyze-results.md)** — Collect measurements and compute summaries or confidence intervals with standard tools.
diff --git a/docs/recipes/metadata.md b/docs/recipes/metadata.md
new file mode 100644
index 0000000..7bd552c
--- /dev/null
+++ b/docs/recipes/metadata.md
@@ -0,0 +1,67 @@
+# Use metadata effectively
+
+**Problem:** You want context on every measurement (e.g. request id, stage, experiment id) without repeating it in every `Timer()` call.
+
+**Idea:** Metadata is attached to each measurement. You can **mutate `measurement.metadata` in `on_start`** (or inside the block) to add or change keys for that run. Each measurement gets its own copy of the initial metadata at enter time, so mutating it in `on_start` only affects that measurement.
+
+## Why this works
+
+You can edit metadata in `on_start` (or in the block) because the callback receives the **same** `Measurement` instance that is returned from `with Timer(...) as m`. When the block enters, the Timer creates that Measurement with `metadata=deepcopy(self._metadata)` — so each run already has its own dict. Mutating `m.metadata` in `on_start` or in the block therefore mutates that run’s copy only; the object is passed by reference.
+
+You **cannot** set per-run values at Timer init because init runs once and there is no “current run” yet. The dict you pass to `Timer(metadata={...})` is stored and **deep-copied** into each new Measurement on every `__enter__`. So you can only supply a shared template at init; per-run edits must happen after the Measurement for that run exists — in `on_start` or inside the block.
+
+## Example: add run context in `on_start`
+
+Omit metadata (or pass a dict); an empty dict is the default when you pass `None`. Fill it per run in `on_start` from context vars or thread-local storage:
+
+```python
+from contextvars import ContextVar
+from timerun import Timer
+
+request_id_ctx: ContextVar[str] = ContextVar("request_id", default="")
+
+def add_request_id(m):
+ m.metadata["request_id"] = request_id_ctx.get()
+
+with Timer(on_start=add_request_id) as m:
+ pass # your code
+
+# m.metadata now includes "request_id" for this run
+print(m.metadata) # e.g. {"request_id": "req-abc"}
+```
+
+## Example: set tags inside the block
+
+Mutating `m.metadata` inside the timed block is **generally not recommended** — prefer **callbacks** (e.g. `on_start`) when the context is known at the start of the run (e.g. request id, stage). It is useful when metadata must be **dynamically computed from code logic** (e.g. outcome, branch taken, or a value only known after some work):
+
+```python
+with Timer(metadata={"stage": "ingest"}) as m:
+ do_work()
+ if some_condition:
+ m.metadata["tag"] = "slow_path"
+# m.metadata is {"stage": "ingest", "tag": "slow_path"} when relevant
+```
+
+## Example: invocation count with a singleton counter
+
+Use a module-level (or singleton) counter and set it in `on_start` so each measurement carries the call number for that run (e.g. “call #1”, “#2”, …). Handy with a decorator to see invocation order:
+
+```python
+from timerun import Timer
+
+_invocation_counter = 0
+
+def set_invocation(m):
+ global _invocation_counter
+ _invocation_counter += 1
+ m.metadata["invocation"] = _invocation_counter
+
+with Timer(on_start=set_invocation) as m:
+ pass # your code
+
+# m.metadata["invocation"] is 1, 2, 3, ... for each run
+```
+
+**Next:** [Share results](share-results.md)
+
+For the API details (passing `metadata={...}`, reading `m.metadata`), see [Reference: Metadata](../guide/metadata.md).
diff --git a/docs/recipes/share-results.md b/docs/recipes/share-results.md
new file mode 100644
index 0000000..cb78e9c
--- /dev/null
+++ b/docs/recipes/share-results.md
@@ -0,0 +1,83 @@
+# Share results
+
+**Problem:** You need to get measurements out of the process — to a log, a file, OpenTelemetry, or a metrics backend.
+
+**Idea:** Use **`on_end`** (and optionally `on_start`) to push each measurement out when the run finishes. The callback receives the `Measurement` with `wall_time`, `cpu_time`, and `metadata` set.
+
+## Log
+
+```python
+import logging
+from timerun import Timer
+
+logger = logging.getLogger(__name__)
+
+def log_measurement(m):
+ logger.info(
+ "timing",
+ extra={
+ "wall_s": m.wall_time.timedelta.total_seconds(),
+ "cpu_s": m.cpu_time.timedelta.total_seconds(),
+ **m.metadata,
+ },
+ )
+
+with Timer(on_end=log_measurement):
+ do_work()
+```
+
+## File
+
+Append a line per measurement (e.g. JSON or CSV) in `on_end`. For high throughput, consider buffering and flushing in batches.
+
+```python
+import json
+from pathlib import Path
+from timerun import Timer
+
+path = Path("measurements.jsonl")
+
+def append_measurement(m):
+ record = {
+ "wall_ns": m.wall_time.duration,
+ "cpu_ns": m.cpu_time.duration,
+ **m.metadata,
+ }
+ with path.open("a") as f:
+ f.write(json.dumps(record) + "\n")
+
+with Timer(metadata={"run": "1"}, on_end=append_measurement):
+ do_work()
+```
+
+## OpenTelemetry
+
+Create a span in `on_start`, end it in `on_end`, and set attributes from the measurement. TimeRun does not depend on OpenTelemetry; you use its API from your callback.
+
+```python
+from timerun import Timer
+
+# Assume you have a Tracer (e.g. from opentelemetry.trace import get_tracer)
+# tracer = get_tracer(__name__)
+
+def on_start(m):
+ m.metadata["span"] = tracer.start_span("timerun")
+
+def on_end(m):
+ span = m.metadata.get("span")
+ if span is None:
+ return # If on_start didn't set a span, skip.
+ span.set_attribute("wall_time_ns", m.wall_time.duration)
+ span.set_attribute("cpu_time_ns", m.cpu_time.duration)
+ for k, v in m.metadata.items():
+ if k != "span" and v is not None:
+ span.set_attribute(k, str(v))
+ span.end()
+
+with Timer(on_start=on_start, on_end=on_end):
+ do_work()
+```
+
+**Next:** [Analyze results](analyze-results.md)
+
+For callback basics, see [Reference: Callbacks](../guide/callbacks.md). For the OpenTelemetry API, see the [OpenTelemetry Python docs](https://opentelemetry.io/docs/languages/python/).
diff --git a/features/block_timing.feature b/features/block_timing.feature
index 5ad828e..d887d42 100644
--- a/features/block_timing.feature
+++ b/features/block_timing.feature
@@ -64,6 +64,20 @@ Feature: Block timing
And the second measurement's metadata key "tag" is "original"
And the second measurement's metadata does not contain key "extra"
+ # --- Callbacks on start and end ---
+
+ Scenario: The on_start callback is invoked once with the same measurement instance the Timer yields for that block
+ Given an on_start callback that records invocations
+ When I measure a code block with a Timer that has that on_start callback
+ Then the on_start callback was called once
+ And the on_start callback was called with the same measurement instance that the Timer yielded for that block
+
+ Scenario: The on_end callback is invoked once with the same measurement instance the Timer yields for that block
+ Given an on_end callback that records invocations
+ When I measure a code block with a Timer that has that on_end callback
+ Then the on_end callback was called once
+ And the on_end callback was called with the same measurement instance that the Timer yielded for that block
+
# --- Edge cases and errors ---
Scenario: Block that raises still yields measurement; exception propagates
diff --git a/features/steps/block_timing_steps.py b/features/steps/block_timing_steps.py
index c1ceebc..cbbd03d 100644
--- a/features/steps/block_timing_steps.py
+++ b/features/steps/block_timing_steps.py
@@ -66,6 +66,21 @@ def step_given_metadata_add_in_first(
context.metadata_add_in_first = (key, value)
+@given("an {callback_kind} callback that records invocations")
+def step_given_callback_records_invocations(
+ context: Context,
+ callback_kind: str,
+) -> None:
+ """Store list and callback that records the measurement passed to it."""
+ invocations: list[timerun.Measurement] = []
+
+ def record_invocation(m: timerun.Measurement) -> None:
+ invocations.append(m)
+
+ setattr(context, f"{callback_kind}_invocations", invocations)
+ setattr(context, f"{callback_kind}_callback", record_invocation)
+
+
# --- When ---
@@ -146,6 +161,20 @@ def step_measure_block_with_metadata(context: Context) -> None:
pass
+@when(
+ "I measure a code block with a Timer that has that {callback_kind} "
+ "callback",
+)
+def step_measure_block_with_callback(
+ context: Context,
+ callback_kind: str,
+) -> None:
+ """Measure with Timer(on_start=... or on_end=...); run a trivial block."""
+ callback = getattr(context, f"{callback_kind}_callback")
+ with timerun.Timer(**{callback_kind: callback}) as context.measurement:
+ pass
+
+
@when(
"I measure two blocks with the same Timer instance and that metadata",
)
@@ -283,3 +312,26 @@ def step_block_yielded_measurement(context: Context) -> None:
"""Assert block produced a measurement."""
assert context.measurement is not None
assert context.measurement.wall_time is not None
+
+
+@then("the {callback_kind} callback was called once")
+def step_callback_called_once(context: Context, callback_kind: str) -> None:
+ """Assert the callback was invoked exactly once."""
+ invocations = getattr(context, f"{callback_kind}_invocations")
+ assert len(invocations) == 1, (
+ f"expected the {callback_kind} callback to be called once, "
+ f"got {len(invocations)}"
+ )
+
+
+@then(
+ "the {callback_kind} callback was called with the same measurement "
+ "instance that the Timer yielded for that block",
+)
+def step_callback_called_with_the_measurement(
+ context: Context,
+ callback_kind: str,
+) -> None:
+ """Assert callback's argument is the same instance the Timer yielded."""
+ arg = getattr(context, f"{callback_kind}_invocations")[0]
+ assert arg is context.measurement
diff --git a/pyproject.toml b/pyproject.toml
index 2f1f48e..95a332f 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -18,7 +18,7 @@ keywords = [
]
authors = [{ name = "HH-MWB", email = "h.hong@mail.com" }]
classifiers = [
- "Development Status :: 3 - Alpha",
+ "Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
@@ -34,7 +34,8 @@ classifiers = [
dynamic = ["version"]
[project.optional-dependencies]
-dev = ["behave", "coverage"]
+dev = ["behave", "coverage", "pip-audit"]
+docs = ["zensical"]
[project.urls]
Homepage = "https://github.com/HH-MWB/timerun"
@@ -107,3 +108,6 @@ max-complexity = 5
[tool.ruff.lint.per-file-ignores]
"features/steps/*.py" = ["S101"]
+
+[tool.coverage.report]
+fail_under = 100
diff --git a/timerun.py b/timerun.py
index bffa383..41e98f7 100644
--- a/timerun.py
+++ b/timerun.py
@@ -22,13 +22,12 @@
cast,
)
-__version__: str = "0.5.0"
+__version__: str = "0.6.0"
__all__ = [
"Measurement",
"TimeSpan",
"Timer",
- "__version__",
]
P = ParamSpec("P")
@@ -133,11 +132,27 @@ class Timer:
metadata : dict or None, optional
Key-value metadata for the measurement(s). Stored by reference; each
measurement gets a deep copy at enter time. Defaults to ``{}``.
+ on_start : callable or None, optional
+ Called once per measurement when timing is about to start. Receives the
+ :class:`Measurement` (with ``metadata`` set; ``wall_time`` and
+ ``cpu_time`` are ``None``). Use for logging or setting up external span
+ context. Defaults to ``None``.
+ on_end : callable or None, optional
+ Called once per measurement when timing has just ended. Receives the
+ :class:`Measurement` with ``wall_time`` and ``cpu_time`` set. Use for
+ logging duration, sending to OpenTelemetry, or enqueueing to a metrics
+ pipeline. Defaults to ``None``.
maxlen : int or None, optional
Only used in decorator mode. Maximum number of measurements to keep on
the wrapped callable. Ignored when used as a context manager. Defaults
to ``None`` (unbounded).
+ Notes
+ -----
+ Callbacks are synchronous only. For async exporters (e.g. OpenTelemetry),
+ schedule work from the callback (e.g. ``asyncio.create_task(export(m))``
+ when in an async context, or a thread/queue).
+
Yields (context manager)
-----------------------
Measurement
@@ -169,21 +184,34 @@ def func():
def __init__(
self,
+ *,
metadata: dict[str, object] | None = None,
+ on_start: Callable[[Measurement], None] | None = None,
+ on_end: Callable[[Measurement], None] | None = None,
maxlen: int | None = None,
) -> None:
- """Initialize with optional metadata and maxlen (decorator mode)."""
+ """Init with optional metadata, callbacks, and maxlen (decorator)."""
self._metadata = metadata if isinstance(metadata, dict) else {}
+ self._on_start = on_start
+ self._on_end = on_end
self._maxlen = maxlen
self._local = local()
def __enter__(self) -> Measurement:
"""Start timing; return the measurement record."""
+ # Create measurement with a deep copy of timer metadata.
measurement = Measurement(metadata=deepcopy(self._metadata))
+
+ # Ensure thread-local stack exists and record start timestamps.
self._local.stack = getattr(self._local, "stack", deque())
self._local.stack.append(
(measurement, perf_counter_ns(), process_time_ns()),
)
+
+ # Notify caller timing started (wall_time/cpu_time still None).
+ if self._on_start is not None:
+ self._on_start(measurement)
+
return measurement
def __exit__(
@@ -193,15 +221,25 @@ def __exit__(
exc_tb: TracebackType | None,
) -> Literal[False]:
"""Stop timing; set wall_time and cpu_time on the measurement."""
+ # Capture end timestamps (before popping to pair with __enter__).
cpu_end = process_time_ns()
wall_end = perf_counter_ns()
+
+ # Pop (measurement, wall_start, cpu_start) from this thread.
try:
measurement, wall_start, cpu_start = self._local.stack.pop()
except (AttributeError, IndexError) as e:
msg = "__exit__ called without a matching __enter__"
raise RuntimeError(msg) from e
+
+ # Attach elapsed spans to the measurement.
measurement.wall_time = TimeSpan(start=wall_start, end=wall_end)
measurement.cpu_time = TimeSpan(start=cpu_start, end=cpu_end)
+
+ # Notify caller that timing has ended (measurement is fully populated).
+ if self._on_end is not None:
+ self._on_end(measurement)
+
return False
async def __aenter__(self) -> Measurement:
@@ -225,7 +263,17 @@ def __call__( # noqa: C901
| _TimedCallable[P, AsyncGenerator[Y, None]]
| _TimedCallable[P, Generator[Y, None, None]]
):
- """When given a callable, wrap it with timing (decorator usage)."""
+ """When given a callable, wrap it with timing (decorator usage).
+
+ Notes
+ -----
+ In each wrapper branch, ``append_measurement(m)`` in the ``finally``
+ block uses ``m`` from the context manager (``with self as m`` or
+ ``async with self as m``). The context manager always runs before the
+ ``finally`` block, so ``m`` is always set. The used-before-assignment
+ linter warning is a false positive.
+
+ """
measurements: deque[Measurement] = deque(maxlen=self._maxlen)
lock = Lock()
diff --git a/zensical.toml b/zensical.toml
new file mode 100644
index 0000000..e743036
--- /dev/null
+++ b/zensical.toml
@@ -0,0 +1,63 @@
+# Zensical config for TimeRun (https://hh-mwb.github.io/timerun/)
+
+[project]
+# Identity
+site_name = "TimeRun"
+site_description = "Python package for time measurement — wall-clock and CPU time, zero dependencies."
+site_url = "https://hh-mwb.github.io/timerun/"
+
+# Repository
+repo_url = "https://github.com/HH-MWB/timerun"
+
+# Navigation
+nav = [
+ { "Home" = "index.md" },
+ { "About" = "about/index.md" },
+ { "Reference" = [
+ "guide/index.md",
+ { "TimeSpan" = "guide/timespan.md" },
+ { "Measurement" = "guide/measurement.md" },
+ { "Timer (overview)" = "guide/timer.md" },
+ { "Measure a block" = "guide/measure-block.md" },
+ { "Measure functions" = "guide/measure-functions.md" },
+ { "Metadata" = "guide/metadata.md" },
+ { "Callbacks" = "guide/callbacks.md" }
+ ]},
+ { "Recipes" = [
+ "recipes/index.md",
+ { "Use metadata effectively" = "recipes/metadata.md" },
+ { "Share results" = "recipes/share-results.md" },
+ { "Analyze results" = "recipes/analyze-results.md" }
+ ]}
+]
+
+[project.theme]
+features = [
+ "content.code.copy",
+ "navigation.instant",
+ "navigation.tabs",
+ "navigation.sections",
+ "navigation.top",
+ "search.highlight"
+]
+
+# Light / dark / auto (follow system) toggle
+[[project.theme.palette]]
+media = "(prefers-color-scheme)"
+toggle.icon = "lucide/sun-moon"
+toggle.name = "Follow system"
+
+[[project.theme.palette]]
+media = "(prefers-color-scheme: light)"
+scheme = "default"
+toggle.icon = "lucide/sun"
+toggle.name = "Light mode"
+
+[[project.theme.palette]]
+media = "(prefers-color-scheme: dark)"
+scheme = "slate"
+toggle.icon = "lucide/moon"
+toggle.name = "Dark mode"
+
+[project.theme.icon]
+repo = "fontawesome/brands/github"