From 6c25840144ec9337e6559acda91321201150cdc0 Mon Sep 17 00:00:00 2001
From: HH-MWB <50187675+HH-MWB@users.noreply.github.com>
Date: Tue, 3 Feb 2026 23:58:01 -0500
Subject: [PATCH 1/8] chore: restart from scratch with behavior-driven
development
---
.github/workflows/ci.yaml | 10 +-
.pre-commit-config.yaml | 35 ++-
CONTRIBUTING.md | 205 +++++++++-----
Makefile | 7 +-
README.md | 95 +------
features/steps/__init__.py | 1 +
features/steps/version_steps.py | 25 ++
features/version.feature | 10 +
pyproject.toml | 57 +++-
tests/__init__.py | 1 -
tests/conftest.py | 211 --------------
tests/test_elapsedtime.py | 165 -----------
tests/test_stopwatch.py | 138 ---------
tests/test_timer.py | 482 --------------------------------
timerun.py | 451 +-----------------------------
15 files changed, 274 insertions(+), 1619 deletions(-)
create mode 100644 features/steps/__init__.py
create mode 100644 features/steps/version_steps.py
create mode 100644 features/version.feature
delete mode 100644 tests/__init__.py
delete mode 100644 tests/conftest.py
delete mode 100644 tests/test_elapsedtime.py
delete mode 100644 tests/test_stopwatch.py
delete mode 100644 tests/test_timer.py
diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml
index c2f9f82..6bf5b09 100644
--- a/.github/workflows/ci.yaml
+++ b/.github/workflows/ci.yaml
@@ -45,12 +45,10 @@ jobs:
run: pip install -e ".[dev]"
- name: Run tests with coverage
- run: >-
- python -m pytest tests/
- --cov=timerun
- --cov-branch
- --cov-report=xml
- --cov-report=term
+ run: |
+ coverage run --source=timerun -m behave
+ coverage report
+ coverage xml
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v5
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 098c0ca..3022d7c 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -11,24 +11,39 @@ repos:
- id: check-yaml
- id: check-toml
- - repo: https://github.com/HH-MWB/pyenforce
- rev: v0.1.0
+ - repo: https://github.com/astral-sh/ruff-pre-commit
+ rev: v0.15.0
hooks:
- id: ruff-format
- id: ruff-check
+
+ - repo: https://github.com/pre-commit/mirrors-mypy
+ rev: v1.19.1
+ hooks:
- id: mypy
- additional_dependencies:
- - ".[mypy]" # Required to re-adds mypy as a dependency
- - pytest
+ args: ["--scripts-are-modules"]
+ files: ^timerun\.py$
+ additional_dependencies: [behave]
+
+ - repo: https://github.com/pylint-dev/pylint
+ rev: v4.0.4
+ hooks:
- id: pylint
- additional_dependencies:
- - ".[pylint]" # Required to re-adds Pylint as a dependency
- - pytest
+ additional_dependencies: [behave]
+
+ - repo: https://github.com/PyCQA/bandit
+ rev: 1.9.3
+ hooks:
- id: bandit
+ args: ["-c", "pyproject.toml"]
+
+ - repo: https://github.com/semgrep/pre-commit
+ rev: v1.150.0
+ hooks:
- id: semgrep
- - id: vulture
+ args: ["--config", "p/python", "--error"]
- repo: https://github.com/adrienverge/yamllint
- rev: v1.37.1
+ rev: v1.38.0
hooks:
- id: yamllint
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 46ac27d..f85bf83 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -1,119 +1,192 @@
# Contributing to TimeRun
-Thank you for your interest in contributing to TimeRun! This document provides guidelines for contributing to the project.
+Thank you for considering contributing to TimeRun. This guide explains how to set up your environment, run tests, and submit changes.
-## Getting Started
+## Table of Contents
+
+- [Code of Conduct](#code-of-conduct)
+- [How You Can Help](#how-you-can-help)
+- [Development Setup](#development-setup)
+- [Testing](#testing)
+- [Code Style and Quality](#code-style-and-quality)
+- [Project Structure](#project-structure)
+- [Pull Request Process](#pull-request-process)
+- [Reporting Bugs](#reporting-bugs)
+- [License](#license)
+
+## Code of Conduct
+
+Please be respectful and constructive. By participating, you agree to uphold a welcoming environment for everyone.
+
+## How You Can Help
+
+- **Report bugs** — Open an issue with clear steps to reproduce.
+- **Suggest features** — Open an issue describing the use case and desired behavior.
+- **Submit code** — Fix bugs or add features via pull requests (see [Pull Request Process](#pull-request-process)).
+- **Improve docs** — Fix typos, clarify README or docstrings, or add examples.
+
+## Development Setup
### Prerequisites
-- Python 3.9 or higher
-- Git
+- **Python 3.9+**
+- **Git**
-### Development Setup
+### One-time setup
+
+1. **Fork** the repository on GitHub, then clone your fork:
-1. Fork the repository on GitHub
-2. Clone your fork locally:
```bash
git clone https://github.com/YOUR_USERNAME/timerun.git
cd timerun
```
-3. Set up the development environment:
+2. **Create and activate a virtual environment** (recommended):
+
```bash
- make init
+ python3 -m venv .venv
+ source .venv/bin/activate # Windows: .venv\Scripts\activate
```
-4. Activate the virtual environment:
+3. **Install the project in editable mode with dev dependencies**:
+
```bash
- source .venv/bin/activate
+ pip install -e ".[dev]"
```
-## Development Workflow
+4. **Install and enable pre-commit hooks** (optional but recommended):
-### Running Tests
+ ```bash
+ pip install pre-commit
+ pre-commit install
+ ```
+
+ Or use the convenience target:
+
+ ```bash
+ make init
+ ```
+
+ Then activate the venv: `source .venv/bin/activate`.
+
+### Verify setup
+
+Run the test suite:
-Run the test suite with coverage:
```bash
make test
```
-### Code Style
+You should see the BDD scenarios run and a coverage report.
+
+## Testing
+
+TimeRun uses **behavior-driven development (BDD)** with [behave](https://behave.readthedocs.io/). All tests are written in Gherkin and live under `features/`.
+
+### Run tests
+
+| Command | Description |
+|----------------|--------------------------------------|
+| `make test` | Run BDD suite with coverage report |
+| `behave` | Run BDD suite only (no coverage) |
+
+### Run coverage manually
+
+```bash
+coverage run --source=timerun -m behave
+coverage report --show-missing
+```
+
+### Adding or changing tests
-This project follows these code style guidelines:
-- **Black** for code formatting (line length: 79 characters)
-- **isort** for import sorting
+- **Feature files** — Add or edit `.feature` files in `features/` (e.g. `features/version.feature`). Use standard Gherkin: `Feature`, `Scenario`, `Given`, `When`, `Then`.
+- **Step definitions** — Implement steps in Python under `features/steps/`, typically in a `*_steps.py` file. Use `@given`, `@when`, `@then` from `behave`; step functions receive a `context` argument.
+- Keep scenarios focused and steps reusable. Add or extend scenarios for new behavior rather than skipping BDD.
+
+## Code Style and Quality
+
+Style and linting are enforced via **pre-commit** (Ruff, mypy, Pylint, and other hooks). After `pre-commit install`, these run automatically on each commit.
+
+### Run checks manually
-Pre-commit hooks are installed automatically with `make init` and will run on every commit. You can also run them manually:
```bash
pre-commit run --all-files
```
-### Making Changes
+### What we expect
-1. Create a new branch for your feature or bugfix:
- ```bash
- git checkout -b feature/your-feature-name
- ```
+- **Formatting** — Ruff format (run via pre-commit or `ruff format`).
+- **Linting** — Ruff check, Pylint, and other hooks must pass.
+- **Types** — Use type hints for public APIs; mypy must pass.
+- **Docstrings** — Public functions, classes, and modules should have docstrings.
+- **Security** — Bandit and Semgrep run in pre-commit; address any reported issues.
-2. Make your changes following the project conventions
-3. Add or update tests as needed
-4. Ensure all tests pass: `make test`
-5. Commit your changes with a clear message
+Fixing pre-commit failures before pushing keeps the history clean and CI green.
-### Submitting Changes
+## Project Structure
-1. Push your branch to your fork:
- ```bash
- git push origin feature/your-feature-name
- ```
+```
+timerun/
+├── timerun.py # Library (single-file by design)
+├── features/ # BDD feature files (Gherkin)
+│ ├── *.feature
+│ └── steps/ # Step definitions (Python)
+│ └── *_steps.py
+├── pyproject.toml # Project metadata and config
+├── Makefile # Commands: init, test, clean, help
+├── README.md
+├── CONTRIBUTING.md
+└── LICENSE
+```
-2. Create a pull request on GitHub with:
- - Clear description of the changes
- - Reference to any related issues
- - Test coverage for new functionality
+- **`timerun.py`** — The only library module; keep it a single file by design.
+- **`features/`** — All executable specs; no separate unit test directory.
-## Project Structure
+## Pull Request Process
-- `timerun.py` - Main library code (single file module)
-- `tests/` - Test suite
-- `pyproject.toml` - Project configuration and dependencies
-- `Makefile` - Development commands
+1. **Create a branch** from `main`:
-## Guidelines
+ ```bash
+ git checkout main
+ git pull origin main
+ git checkout -b feature/short-description # or fix/short-description
+ ```
-### Code Quality
+2. **Make your changes** — Follow [Code Style and Quality](#code-style-and-quality) and add or update BDD scenarios in `features/` for new or changed behavior.
-- Maintain 100% test coverage for new code
-- Follow existing code patterns and conventions
-- Add docstrings for all public functions and classes
-- Use type hints consistently
+3. **Run the suite and pre-commit**:
-### Testing
+ ```bash
+ make test
+ pre-commit run --all-files
+ ```
-- Write tests for all new functionality
-- Use descriptive test names
-- Test both success and error cases
-- Keep tests focused and independent
+4. **Commit** with clear, concise messages. Optionally use conventional style (e.g. `feat: add X`, `fix: correct Y`).
-### Documentation
+5. **Push** to your fork and open a pull request against `main`:
-- Update docstrings for any API changes
-- Add examples for new features
-- Update README.md if needed
+ ```bash
+ git push origin feature/short-description
+ ```
-## Reporting Issues
+6. **Fill out the PR**:
+ - Describe what changed and why.
+ - Reference any related issues (e.g. "Fixes #123").
+ - Confirm tests pass and, for new behavior, that BDD scenarios were added or updated.
-When reporting bugs or requesting features:
+Maintainers will review and may request changes. Once approved, your PR will be merged.
-1. Check existing issues first
-2. Use the issue templates if available
-3. Provide clear reproduction steps for bugs
-4. Include Python version and environment details
+## Reporting Bugs
-## Questions?
+- **Search** existing issues to avoid duplicates.
+- **Open an issue** with:
+ - A short, clear title.
+ - Steps to reproduce (code or commands).
+ - Expected vs actual behavior.
+ - Your environment: OS, Python version (`python --version`), and how you installed TimeRun (pip, editable, etc.).
-Feel free to open an issue for questions about contributing or reach out to the maintainers.
+For small, obvious fixes you may open a PR directly with a short explanation.
## License
-By contributing to TimeRun, you agree that your contributions will be licensed under the MIT License.
+Contributions are made under the [MIT License](LICENSE). By submitting a pull request, you agree that your contributions will be licensed under the same terms.
diff --git a/Makefile b/Makefile
index 72b7f0a..57ba0ce 100644
--- a/Makefile
+++ b/Makefile
@@ -22,12 +22,13 @@ init: ## Set up Python development environment with pre-commit hooks
@echo "Development environment ready! To activate it, run: source $(VENV_DIR)/bin/activate"
.PHONY: test
-test: ## Run all tests and display coverage ratio
- @"$(VENV_DIR)/bin/pytest" tests/ --cov=timerun --cov-report=term-missing
+test: ## Run BDD tests with behave and display coverage
+ @"$(VENV_DIR)/bin/coverage" run --source=timerun -m behave
+ @"$(VENV_DIR)/bin/coverage" report --show-missing
.PHONY: clean
clean: ## Delete all temporary files including venv
@rm -rf "$(VENV_DIR)" *.egg-info
- @rm -rf .mypy_cache .pytest_cache .coverage htmlcov
+ @rm -rf .mypy_cache .coverage htmlcov
@find . -name "*.pyc" -delete
@find . -name "__pycache__" -type d -exec rm -rf {} +
diff --git a/README.md b/README.md
index 45a9db9..4430396 100644
--- a/README.md
+++ b/README.md
@@ -4,7 +4,7 @@
-TimeRun - Python library for elapsed time measurement.
+TimeRun — Python package for time measurement.
@@ -14,27 +14,24 @@
-TimeRun is a simple, yet elegant elapsed time measurement library for [Python](https://www.python.org). It is distributed as a single file module and has no dependencies other than the [Python Standard Library](https://docs.python.org/3/library/).
+TimeRun is a **single-file** Python package with no dependencies beyond the [Python Standard Library](https://docs.python.org/3/library/). The package is designed to stay minimal and dependency-free.
-- **Elapsed Time**: Customized time delta which represents elapsed time in nanoseconds
-- **Stopwatch**: An elapsed time measurer with the highest available resolution
-- **Timer**: Convenient syntax to capture and save measured elapsed time results
## Setup
### Prerequisites
-The only prerequisite to use TimeRun is running **Python 3.9+**.
+**Python 3.9+**
### Installation
-Install TimeRun from [Python Package Index](https://pypi.org/project/timerun/):
+From [PyPI](https://pypi.org/project/timerun/):
```bash
pip install timerun
```
-Install TimeRun from [Source Code](https://github.com/HH-MWB/timerun):
+From source:
```bash
pip install git+https://github.com/HH-MWB/timerun.git
@@ -42,88 +39,12 @@ pip install git+https://github.com/HH-MWB/timerun.git
## Quickstart
-### Measure Code Block
-
-```python
->>> import time
->>> from timerun import Timer
->>> with Timer() as timer:
-... time.sleep(0.1) # your code here
->>> print(timer.duration)
-0:00:00.100000000
-```
-
-### Measure Function
-
-```python
->>> import time
->>> from timerun import Timer
->>> timer = Timer()
->>> @timer
-... def func():
-... time.sleep(0.1) # your code here
->>> func()
->>> print(timer.duration)
-0:00:00.100000000
-```
-
-### Measure Async Function
-
-```python
->>> import asyncio
->>> from timerun import Timer
->>> timer = Timer()
->>> @timer
-... async def async_func():
-... await asyncio.sleep(0.1) # your code here
->>> asyncio.run(async_func())
->>> print(timer.duration)
-0:00:00.100000000
-```
-
-### Measure Async Code Block
-
-```python
->>> import asyncio
->>> from timerun import Timer
->>> async def async_code():
-... async with Timer() as timer:
-... await asyncio.sleep(0.1) # your code here
-... print(timer.duration)
->>> asyncio.run(async_code())
-0:00:00.100000000
-```
-
-### Multiple Measurements
-
-```python
->>> import time
->>> from timerun import Timer
->>> timer = Timer()
->>> with timer:
-... time.sleep(0.1) # your code here
->>> with timer:
-... time.sleep(0.1) # your code here
->>> print(timer.duration) # Last duration
-0:00:00.100000000
->>> print(timer.durations) # All durations
-(ElapsedTime(nanoseconds=100000000), ElapsedTime(nanoseconds=100000000))
-```
-
-### Advanced Options
-
-```python
->>> from timerun import Timer
->>> # Exclude sleep time from measurements
->>> timer = Timer(count_sleep=False)
->>> # Limit storage to last 10 measurements
->>> timer = Timer(max_len=10)
-```
+TBD
## Contributing
-We welcome contributions! Please see [CONTRIBUTING.md](CONTRIBUTING.md) for guidelines on how to contribute to this project.
+Contributions are welcome. See [CONTRIBUTING.md](https://github.com/HH-MWB/timerun/blob/main/CONTRIBUTING.md) for setup, testing, and pull request guidelines.
## License
-This project is licensed under the MIT License - see the [LICENSE](https://github.com/HH-MWB/timerun/blob/main/LICENSE) file for details.
+This project is licensed under the MIT License — see the [LICENSE](https://github.com/HH-MWB/timerun/blob/main/LICENSE) file for details.
diff --git a/features/steps/__init__.py b/features/steps/__init__.py
new file mode 100644
index 0000000..7fe3dcf
--- /dev/null
+++ b/features/steps/__init__.py
@@ -0,0 +1 @@
+"""Behave step definitions."""
diff --git a/features/steps/version_steps.py b/features/steps/version_steps.py
new file mode 100644
index 0000000..5b1c65f
--- /dev/null
+++ b/features/steps/version_steps.py
@@ -0,0 +1,25 @@
+"""Step definitions for the package version feature."""
+
+from behave import then, when
+from behave.runner import Context
+
+import timerun
+
+
+@when("I read the package version")
+def step_read_version(context: Context) -> None:
+ """Read the package version and store it for Then steps."""
+ context.version = getattr(timerun, "__version__", None)
+
+
+@then("the package has a version")
+def step_package_has_version(context: Context) -> None:
+ """Assert the package exposes a version."""
+ assert context.version is not None
+
+
+@then("the version is a non-empty string")
+def step_version_non_empty_string(context: Context) -> None:
+ """Assert the version is a non-empty string."""
+ assert isinstance(context.version, str)
+ assert len(context.version) > 0
diff --git a/features/version.feature b/features/version.feature
new file mode 100644
index 0000000..63824ed
--- /dev/null
+++ b/features/version.feature
@@ -0,0 +1,10 @@
+Feature: Package version
+
+ As a user or tool integrating with timerun,
+ I want to read the package version programmatically,
+ so that I can check compatibility, display it to users, or use it in automation.
+
+ Scenario: Package exposes a readable version
+ When I read the package version
+ Then the package has a version
+ And the version is a non-empty string
diff --git a/pyproject.toml b/pyproject.toml
index 67d8443..bfeaf1d 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -35,7 +35,7 @@ classifiers = [
dynamic = ["version"]
[project.optional-dependencies]
-dev = ["pytest", "pytest-asyncio", "pytest-cov"]
+dev = ["behave", "coverage"]
[project.urls]
Homepage = "https://github.com/HH-MWB/timerun"
@@ -44,9 +44,64 @@ Changelog = "https://github.com/HH-MWB/timerun/releases"
PyPI = "https://pypi.org/project/timerun"
[tool.setuptools]
+py-modules = ["timerun"]
zip-safe = true
include-package-data = false
license-files = ["LICENSE"]
[tool.setuptools.dynamic]
version = { attr = "timerun.__version__" }
+
+[tool.bandit]
+exclude_dirs = ["features/steps"]
+
+[tool.bandit.try_except_pass]
+check_typed_exception = true
+
+[tool.mypy]
+strict = true
+disallow_any_generics = true
+disallow_any_unimported = true
+disallow_any_explicit = true
+disallow_untyped_defs = true
+disallow_untyped_calls = true
+disallow_untyped_decorators = true
+strict_equality = true
+strict_equality_for_none = true
+warn_redundant_casts = true
+warn_return_any = true
+warn_unreachable = true
+warn_unused_ignores = true
+enable_error_code = [
+ "deprecated",
+ "exhaustive-match",
+ "explicit-override",
+ "ignore-without-code",
+ "possibly-undefined",
+ "redundant-expr",
+ "redundant-self",
+ "truthy-bool",
+ "unimported-reveal",
+ "unused-awaitable",
+]
+
+[tool.pylint.messages_control]
+disable = ["not-callable"] # false positive for behave's when/then decorators
+
+[tool.ruff]
+line-length = 79
+fix = true
+force-exclude = true
+
+[tool.ruff.lint]
+select = ["ALL"]
+ignore = [
+ "D203", # Incompatible with D211 (no-blank-line-before-class)
+ "D213", # Incompatible with D212 (multi-line-summary-first-line)
+]
+
+[tool.ruff.lint.mccabe]
+max-complexity = 5
+
+[tool.ruff.lint.per-file-ignores]
+"features/steps/*.py" = ["S101"]
diff --git a/tests/__init__.py b/tests/__init__.py
deleted file mode 100644
index 683ed66..0000000
--- a/tests/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-"""Test suite for timerun."""
diff --git a/tests/conftest.py b/tests/conftest.py
deleted file mode 100644
index 9a42247..0000000
--- a/tests/conftest.py
+++ /dev/null
@@ -1,211 +0,0 @@
-"""A collection of shared PyTest fixtures for timerun."""
-
-from collections.abc import Callable, Iterable, Iterator
-from contextlib import AbstractContextManager, contextmanager
-from unittest.mock import Mock
-
-import pytest
-
-from timerun import ElapsedTime, Stopwatch, Timer
-
-# =========================================================================== #
-# Patcheres #
-# =========================================================================== #
-
-
-@pytest.fixture
-def patch_clock(
- monkeypatch: pytest.MonkeyPatch,
-) -> Callable[[int], AbstractContextManager[None]]:
- """Patch the clock method in Stopwatch.
-
- Parameters
- ----------
- monkeypatch : MonkeyPatch
- The fixture has been used to patch the clock method.
-
- Returns
- -------
- Callable[[int], AbstractContextManager[None]]
- A context manager takes integer argument and patch that value as
- the return value of the clock method.
-
- Examples
- --------
- >>> with patch_clock(1):
- ... pass
-
- """
-
- @contextmanager
- def patch(elapsed_ns: int) -> Iterator[None]:
- """Patch clock method through monkeypatch context.
-
- Parameters
- ----------
- elapsed_ns : int
- The value should be returned by the clock method.
-
- Yields
- ------
- None
- Control is yielded back to the caller.
-
- """
- monkeypatch.setattr(Stopwatch, "_clock", lambda _: elapsed_ns)
- yield
-
- return patch
-
-
-@pytest.fixture
-def patch_split(
- monkeypatch: pytest.MonkeyPatch,
-) -> Callable[[Iterable[int]], AbstractContextManager[None]]:
- """Patch the split method in Timer.
-
- Parameters
- ----------
- monkeypatch : MonkeyPatch
- The fixture has been used to patch the split method.
-
- Returns
- -------
- Callable[[Iterable[int]], AbstractContextManager[None]]
- A context manager takes a list of integers as nanoseconds and
- patch those as the return values of the elapse method.
-
- Examples
- --------
- >>> with patch_split([100, 200, 300]):
- ... pass
-
- """
-
- @contextmanager
- def patch(elapsed_times: Iterable[int]) -> Iterator[None]:
- """Patch split method through monkeypatch context.
-
- Parameters
- ----------
- elapsed_times : Iterable[int]
- The nanoseconds should be returned by the split method.
-
- Yields
- ------
- None
- Control is yielded back to the caller.
-
- """
- mock_stopwatch = Mock(spec=["reset", "split"])
- mock_stopwatch.split.configure_mock(
- side_effect=[ElapsedTime(nanoseconds=t) for t in elapsed_times],
- )
-
- monkeypatch.setattr(Timer, "_stopwatch", mock_stopwatch)
- yield
-
- return patch
-
-
-# =========================================================================== #
-# Initiated Instances #
-# =========================================================================== #
-
-
-@pytest.fixture
-def stopwatch() -> Stopwatch:
- """Create a Stopwatch started at time ``0``.
-
- Returns
- -------
- Stopwatch
- A stopwatch started at time ``0``.
-
- """
- watch: Stopwatch = Stopwatch()
- watch._start = 0 # pylint: disable=protected-access # noqa: SLF001
- return watch
-
-
-@pytest.fixture
-def timer() -> Timer:
- """Create a Timer with unlimited storage size.
-
- Returns
- -------
- Timer
- A newly created Timer.
-
- """
- return Timer()
-
-
-# =========================================================================== #
-# Elapsed Time #
-# =========================================================================== #
-
-
-@pytest.fixture
-def elapsed_1_ns() -> ElapsedTime:
- """Elapsed Time of 1 nanosecond.
-
- Returns
- -------
- ElapsedTime
- Elapsed time of 1 nanosecond.
-
- """
- return ElapsedTime(nanoseconds=1)
-
-
-@pytest.fixture
-def elapsed_100_ns() -> ElapsedTime:
- """Elapsed Time of 100 nanoseconds.
-
- Returns
- -------
- ElapsedTime
- Elapsed time of 100 nanoseconds.
-
- """
- return ElapsedTime(nanoseconds=100)
-
-
-@pytest.fixture
-def elapsed_1_ms() -> ElapsedTime:
- """Elapsed Time of 1 microsecond.
-
- Returns
- -------
- ElapsedTime
- Elapsed time of 1 microsecond.
-
- """
- return ElapsedTime(nanoseconds=1000)
-
-
-@pytest.fixture
-def elapsed_1_pt_5_ms() -> ElapsedTime:
- """Elapsed Time of 1.5 microseconds.
-
- Returns
- -------
- ElapsedTime
- Elapsed time of 1.5 microseconds.
-
- """
- return ElapsedTime(nanoseconds=1500)
-
-
-@pytest.fixture
-def elapsed_1_sec() -> ElapsedTime:
- """Elapsed Time of 1 second.
-
- Returns
- -------
- ElapsedTime
- Elapsed time of 1 second.
-
- """
- return ElapsedTime(nanoseconds=int(1e9))
diff --git a/tests/test_elapsedtime.py b/tests/test_elapsedtime.py
deleted file mode 100644
index f54ff70..0000000
--- a/tests/test_elapsedtime.py
+++ /dev/null
@@ -1,165 +0,0 @@
-"""A collection of tests for class ``ElapsedTime``."""
-
-# pylint: disable=no-self-use,magic-value-comparison
-
-from dataclasses import FrozenInstanceError
-from datetime import timedelta
-
-import pytest
-
-from timerun import ElapsedTime
-
-
-class TestInit:
- """Test suite for Elapsed Time initialization."""
-
- def test_init_without_keyword(self) -> None:
- """Test initiate ElapsedTime."""
- duration: ElapsedTime = ElapsedTime(1)
- assert duration.nanoseconds == 1
-
- def test_init_using_keyword(self) -> None:
- """Test initiate ElapsedTime using keyword."""
- duration: ElapsedTime = ElapsedTime(nanoseconds=1)
- assert duration.nanoseconds == 1
-
-
-class TestImmutable: # pylint: disable=too-few-public-methods
- """Test ElapsedTime is immutable."""
-
- def test_modify_after_init(self, elapsed_1_ns: ElapsedTime) -> None:
- """Test modify after initialization.
-
- ElapsedTime is expected to be immutable. Update attribute after
- would fail and raise ``FrozenInstanceError``.
-
- Parameters
- ----------
- elapsed_1_ns : ElapsedTime
- A ElapsedTime instance will be using to update attribute.
-
- """
- with pytest.raises(FrozenInstanceError):
- elapsed_1_ns.nanoseconds = 0 # type: ignore[misc]
- assert elapsed_1_ns.nanoseconds == 1
-
-
-class TestComparable:
- """Test ElapsedTime is comparable."""
-
- def test_equal(self) -> None:
- """Test '==' operator for ElapsedTime."""
- assert ElapsedTime(nanoseconds=1000) == ElapsedTime(nanoseconds=1000)
-
- def test_not_equal(self) -> None:
- """Test '!=' operator for ElapsedTime."""
- assert ElapsedTime(nanoseconds=1000) != ElapsedTime(nanoseconds=2000)
-
- def test_greater_than(self) -> None:
- """Test '>' operator for ElapsedTime."""
- assert ElapsedTime(nanoseconds=2000) > ElapsedTime(nanoseconds=1000)
-
- def test_smaller_than(self) -> None:
- """Test '<' operator for ElapsedTime."""
- assert ElapsedTime(nanoseconds=1000) < ElapsedTime(nanoseconds=2000)
-
- def test_greater_or_equal(self) -> None:
- """Test '>=' operator for ElapsedTime."""
- assert ElapsedTime(nanoseconds=1000) >= ElapsedTime(nanoseconds=1000)
- assert ElapsedTime(nanoseconds=2000) >= ElapsedTime(nanoseconds=1000)
-
- def test_smaller_or_equal(self) -> None:
- """Test '<=' operator for ElapsedTime."""
- assert ElapsedTime(nanoseconds=1000) <= ElapsedTime(nanoseconds=1000)
- assert ElapsedTime(nanoseconds=1000) <= ElapsedTime(nanoseconds=2000)
-
-
-class TestTimedeltaAttribute:
- """Test using timedelta attribute."""
-
- def test_microseconds_accuracy(self, elapsed_1_ms: ElapsedTime) -> None:
- """Test using ElapsedTime of 1 microsecond.
-
- Given ElapsedTime of ``1`` microsecond, expected timedelta is
- ``1`` microsecond.
-
- Parameters
- ----------
- elapsed_1_ms : ElapsedTime
- Elapsed Time of 1 microsecond.
-
- """
- assert elapsed_1_ms.timedelta == timedelta(microseconds=1)
-
- def test_nanoseconds_accuracy(
- self,
- elapsed_1_pt_5_ms: ElapsedTime,
- ) -> None:
- """Test using ElapsedTime of 1.5 microseconds.
-
- Given ElapsedTime of ``1.5`` microseconds expected timedelta to
- be ``1`` microsecond, because of the accuracy lost.
-
- Parameters
- ----------
- elapsed_1_pt_5_ms : ElapsedTime
- Elapsed Time of 1.5 microseconds.
-
- """
- assert elapsed_1_pt_5_ms.timedelta == timedelta(microseconds=1)
-
-
-class TestStr:
- """Test suite for calling str function on ElapsedTime."""
-
- def test_elapsed_time_seconds_as_decimals(
- self,
- elapsed_100_ns: ElapsedTime,
- ) -> None:
- """Test elapsed time in seconds is in decimal.
-
- Given an elapsed time, expected to see the part after seconds as
- a decimal part.
-
- Parameters
- ----------
- elapsed_100_ns : ElapsedTime
- Elapsed Time to be used to call ``str``.
-
- """
- assert str(elapsed_100_ns) == "0:00:00.000000100"
-
- def test_elapsed_time_seconds_as_integer(
- self,
- elapsed_1_sec: ElapsedTime,
- ) -> None:
- """Test elapsed time in seconds is an integer.
-
- Given an elapsed time in integer seconds, the decimal part
- should be hidden.
-
- Parameters
- ----------
- elapsed_1_sec : ElapsedTime
- Elapsed Time to be used to call ``str``.
-
- """
- assert str(elapsed_1_sec) == "0:00:01"
-
-
-class TestRepr: # pylint: disable=too-few-public-methods
- """Test suite for calling repr function on ElapsedTime."""
-
- def test_repr(self, elapsed_100_ns: ElapsedTime) -> None:
- """Test call function repr.
-
- Given an ElapsedTime, call repr would get an output can be used
- to re-create this ElapsedTime.
-
- Parameters
- ----------
- elapsed_100_ns : ElapsedTime
- Elapsed Time to be used to call ``repr``.
-
- """
- assert repr(elapsed_100_ns) == "ElapsedTime(nanoseconds=100)"
diff --git a/tests/test_stopwatch.py b/tests/test_stopwatch.py
deleted file mode 100644
index 1828b1b..0000000
--- a/tests/test_stopwatch.py
+++ /dev/null
@@ -1,138 +0,0 @@
-"""A collection of tests for class ``Stopwatch``."""
-
-# pylint: disable=no-self-use
-
-from __future__ import annotations
-
-from time import perf_counter_ns, process_time_ns
-from typing import TYPE_CHECKING
-
-from timerun import ElapsedTime, Stopwatch
-
-if TYPE_CHECKING:
- from collections.abc import Callable
- from contextlib import AbstractContextManager
-
-
-class TestInit:
- """Test suite for stopwatch initialization."""
-
- def test_include_sleep(self) -> None:
- """Test initialize stopwatch take sleep in to count."""
- stopwatch: Stopwatch = Stopwatch(count_sleep=True)
- assert (
- stopwatch._clock # pylint: disable=protected-access # noqa: SLF001
- == perf_counter_ns
- )
-
- def test_exclude_sleep(self) -> None:
- """Test initialize stopwatch do not take sleep in to count."""
- stopwatch: Stopwatch = Stopwatch(count_sleep=False)
- assert (
- stopwatch._clock # pylint: disable=protected-access # noqa: SLF001
- == process_time_ns
- )
-
- def test_default_measurer(self) -> None:
- """Test initialize stopwatch without arguments."""
- default: Stopwatch = Stopwatch()
- include: Stopwatch = Stopwatch(count_sleep=True)
- assert (
- default._clock # pylint: disable=protected-access # noqa: SLF001
- == include._clock # pylint: disable=protected-access # noqa: SLF001
- )
-
-
-class TestReset: # pylint: disable=too-few-public-methods
- """Test suite for starting stopwatch."""
-
- def test_reset(
- self,
- patch_clock: Callable[[int], AbstractContextManager[None]],
- stopwatch: Stopwatch,
- ) -> None:
- """Test to reset a stopwatch.
-
- Expected to have a stopwatch whose `_start` attribute is not
- ``1``, but been reset to ``1`` after call ``reset`` method.
-
- Parameters
- ----------
- patch_clock : Callable
- Patcher has been used to set the starting time at ``1``.
- stopwatch : Stopwatch
- A started Stopwatch, which will be reset.
-
- """
- assert stopwatch._start != 1 # pylint: disable=protected-access # noqa: SLF001
- with patch_clock(1):
- stopwatch.reset()
- assert stopwatch._start == 1 # pylint: disable=protected-access # noqa: SLF001
-
-
-class TestSplit:
- """Test suite for split method in stopwatch."""
-
- def test_calculation(
- self,
- patch_clock: Callable[[int], AbstractContextManager[None]],
- stopwatch: Stopwatch,
- elapsed_100_ns: ElapsedTime,
- ) -> None:
- """Test elapsed time calculation.
-
- The stopwatch has been started at time ``0``. With patching
- clock time to ``100``, the captured elapsed time should be
- ``100`` nanoseconds.
-
- Parameters
- ----------
- patch_clock : Callable
- Patcher has been used to set the clock time.
- stopwatch : Stopwatch
- A stopwatch started at time ``0``.
- elapsed_100_ns : ElapsedTime
- Elapsed Time of 100 nanoseconds.
-
- """
- assert not stopwatch._start # pylint: disable=protected-access # noqa: SLF001
-
- with patch_clock(100):
- elapsed: ElapsedTime = stopwatch.split()
- assert elapsed == elapsed_100_ns
-
- def test_split_multiple_times(
- self,
- patch_clock: Callable[[int], AbstractContextManager[None]],
- stopwatch: Stopwatch,
- elapsed_100_ns: ElapsedTime,
- elapsed_1_ms: ElapsedTime,
- ) -> None:
- """Test call split method multiple times.
-
- The stopwatch has been started at time ``0``. With patching
- clock time to ``100``, the first captured elapsed time should be
- ``100`` nanoseconds. Then, patching clock time to ``1000``, the
- second captured elapsed time should be ``1000`` nanoseconds.
-
- Parameters
- ----------
- patch_clock : Callable
- Patcher has been used to set the clock time.
- stopwatch : Stopwatch
- A stopwatch started at time ``0``.
- elapsed_100_ns : ElapsedTime
- Elapsed Time of 100 nanoseconds.
- elapsed_1_ms : ElapsedTime
- Elapsed Time of 1 microsecond.
-
- """
- assert not stopwatch._start # pylint: disable=protected-access # noqa: SLF001
-
- with patch_clock(100):
- first_elapsed: ElapsedTime = stopwatch.split()
- assert first_elapsed == elapsed_100_ns
-
- with patch_clock(1000):
- second_elapsed: ElapsedTime = stopwatch.split()
- assert second_elapsed == elapsed_1_ms
diff --git a/tests/test_timer.py b/tests/test_timer.py
deleted file mode 100644
index af8fd96..0000000
--- a/tests/test_timer.py
+++ /dev/null
@@ -1,482 +0,0 @@
-"""A collection of tests for class ``Timer``."""
-
-# pylint: disable=no-self-use
-
-from __future__ import annotations
-
-import asyncio
-from typing import TYPE_CHECKING, cast
-
-import pytest
-
-from timerun import ElapsedTime, NoDurationCapturedError, Timer
-
-if TYPE_CHECKING:
- from collections.abc import AsyncGenerator, Awaitable, Callable, Iterable
- from contextlib import AbstractContextManager
-
-# =========================================================================== #
-# Test suite for using Timer as a context manager. #
-# =========================================================================== #
-
-
-def test_use_timer_as_context_manager_single_run(
- patch_split: Callable[[Iterable[int]], AbstractContextManager[None]],
- timer: Timer,
- elapsed_1_ms: ElapsedTime,
-) -> None:
- """Test using it as a context manager.
-
- Test using the timer and ``with`` to capture the duration time
- for code block.
-
- Parameters
- ----------
- patch_split : Callable
- Patcher has been used to set the captured duration time.
- timer : Timer
- A newly created Timer with unlimited storage size.
- elapsed_1_ms : ElapsedTime
- Elapsed Time of 1 microsecond.
-
- """
- with patch_split([1000]), timer:
- pass
-
- assert timer.duration == elapsed_1_ms
-
-
-def test_use_timer_as_context_manager_multiple_run(
- patch_split: Callable[[Iterable[int]], AbstractContextManager[None]],
- timer: Timer,
- elapsed_100_ns: ElapsedTime,
- elapsed_1_ms: ElapsedTime,
- elapsed_1_pt_5_ms: ElapsedTime,
-) -> None:
- """Test run multiple times with the same timer.
-
- Test run timer using ``with`` ``3`` times and expected to see
- all three captured duration times.
-
- Parameters
- ----------
- patch_split : Callable
- Patcher has been used to set the captured duration time.
- timer : Timer
- A newly created Timer with unlimited storage size.
- elapsed_100_ns : ElapsedTime
- Elapsed Time of 100 nanoseconds.
- elapsed_1_ms : ElapsedTime
- Elapsed Time of 1 microsecond.
- elapsed_1_pt_5_ms : ElapsedTime
- Elapsed Time of 1.5 microseconds.
-
- """
- with patch_split([100, 1000, 1500]):
- for _ in range(3):
- with timer:
- pass
-
- assert timer.durations == (
- elapsed_100_ns,
- elapsed_1_ms,
- elapsed_1_pt_5_ms,
- )
-
-
-class TestAsDecorator:
- """Test suite for using Timer as a function decorator."""
-
- def test_single_run(
- self,
- patch_split: Callable[[Iterable[int]], AbstractContextManager[None]],
- timer: Timer,
- elapsed_1_ms: ElapsedTime,
- ) -> None:
- """Test the function with a single run.
-
- Test run decorated function and expected to get the captured
- duration afterward.
-
- Parameters
- ----------
- patch_split : Callable
- Patcher has been used to set the captured duration time.
- timer : Timer
- A newly created Timer with unlimited storage size.
- elapsed_1_ms : ElapsedTime
- Elapsed Time of 1 microsecond.
-
- """
-
- @timer
- def func() -> None:
- pass
-
- with patch_split([1000]):
- func()
- assert timer.duration == elapsed_1_ms
-
- def test_multiple_run( # pylint: disable=too-many-arguments,too-many-positional-arguments
- self,
- patch_split: Callable[[Iterable[int]], AbstractContextManager[None]],
- timer: Timer,
- elapsed_100_ns: ElapsedTime,
- elapsed_1_ms: ElapsedTime,
- elapsed_1_pt_5_ms: ElapsedTime,
- ) -> None:
- """Test the function with multiple runs.
-
- Test run decorated function ``3`` times and expected to see all
- three captured duration times.
-
- Parameters
- ----------
- patch_split : Callable
- Patcher has been used to set the captured duration time.
- timer : Timer
- A newly created Timer with unlimited storage size.
- elapsed_100_ns : ElapsedTime
- Elapsed Time of 100 nanoseconds.
- elapsed_1_ms : ElapsedTime
- Elapsed Time of 1 microsecond.
- elapsed_1_pt_5_ms : ElapsedTime
- Elapsed Time of 1.5 microseconds.
-
- """
-
- @timer
- def func() -> None:
- pass
-
- with patch_split([100, 1000, 1500]):
- for _ in range(3):
- func()
-
- assert timer.durations == (
- elapsed_100_ns,
- elapsed_1_ms,
- elapsed_1_pt_5_ms,
- )
-
-
-class TestNoElapsedTimeCapturedException:
- """Test suite for NoElapsedTimeCaptured exception."""
-
- def test_access_duration_attr_before_run(self, timer: Timer) -> None:
- """Test access duration attribute before capturing anything.
-
- Test tries to access duration attribute before capturing
- anything, expected to see ``NoDurationCapturedError`` exception.
-
- Parameters
- ----------
- timer : Timer
- A newly created Timer with unlimited storage size.
-
- """
- with pytest.raises(NoDurationCapturedError):
- _ = timer.duration
-
-
-class TestInit:
- """Test suite for Timerinitialization."""
-
- def test_use_customized_duration_list(self) -> None:
- """Test capture durations into an existing list."""
- durations: list[ElapsedTime] = []
- timer = Timer(storage=durations)
- assert (
- timer._durations is durations # pylint: disable=protected-access # noqa: SLF001
- )
-
- def test_max_storage_limitation(
- self,
- patch_split: Callable[[Iterable[int]], AbstractContextManager[None]],
- elapsed_1_ms: ElapsedTime,
- elapsed_1_pt_5_ms: ElapsedTime,
- ) -> None:
- """Test to set the max number of durations been saved.
-
- Test timer with a max storage limitation at ``2``. Using it to
- catch ``3`` duration times and expected to see two latest only.
-
- Parameters
- ----------
- patch_split : Callable
- Patcher been used to set the captured duration time.
- elapsed_1_ms : ElapsedTime
- Elapsed Time of 1 microsecond.
- elapsed_1_pt_5_ms : ElapsedTime
- Elapsed Time of 1.5 microseconds.
-
- """
- timer = Timer(max_len=2)
-
- with patch_split([100, 1000, 1500]):
- for _ in range(3):
- with timer:
- pass
-
- assert timer.durations == (elapsed_1_ms, elapsed_1_pt_5_ms)
-
-
-# =========================================================================== #
-# Test suite for using Timer as an async context manager. #
-# =========================================================================== #
-
-
-@pytest.mark.asyncio
-async def test_use_timer_as_async_context_manager_single_run(
- patch_split: Callable[[Iterable[int]], AbstractContextManager[None]],
- timer: Timer,
- elapsed_1_ms: ElapsedTime,
-) -> None:
- """Test using it as an async context manager.
-
- Test using the timer and ``async with`` to capture the duration time
- for async code block.
-
- Parameters
- ----------
- patch_split : Callable
- Patcher has been used to set the captured duration time.
- timer : Timer
- A newly created Timer with unlimited storage size.
- elapsed_1_ms : ElapsedTime
- Elapsed Time of 1 microsecond.
-
- """
- with patch_split([1000]):
- async with timer:
- await asyncio.sleep(0)
-
- assert timer.duration == elapsed_1_ms
-
-
-@pytest.mark.asyncio
-async def test_use_timer_as_async_context_manager_multiple_run(
- patch_split: Callable[[Iterable[int]], AbstractContextManager[None]],
- timer: Timer,
- elapsed_100_ns: ElapsedTime,
- elapsed_1_ms: ElapsedTime,
- elapsed_1_pt_5_ms: ElapsedTime,
-) -> None:
- """Test run multiple times with the same timer (async).
-
- Test run timer using ``async with`` ``3`` times and expected to see
- all three captured duration times.
-
- Parameters
- ----------
- patch_split : Callable
- Patcher has been used to set the captured duration time.
- timer : Timer
- A newly created Timer with unlimited storage size.
- elapsed_100_ns : ElapsedTime
- Elapsed Time of 100 nanoseconds.
- elapsed_1_ms : ElapsedTime
- Elapsed Time of 1 microsecond.
- elapsed_1_pt_5_ms : ElapsedTime
- Elapsed Time of 1.5 microseconds.
-
- """
- with patch_split([100, 1000, 1500]):
- for _ in range(3):
- async with timer:
- await asyncio.sleep(0)
-
- assert timer.durations == (
- elapsed_100_ns,
- elapsed_1_ms,
- elapsed_1_pt_5_ms,
- )
-
-
-class TestAsAsyncDecorator:
- """Test suite for using Timer as an async function decorator."""
-
- @pytest.mark.asyncio
- async def test_single_run(
- self,
- patch_split: Callable[[Iterable[int]], AbstractContextManager[None]],
- timer: Timer,
- elapsed_1_ms: ElapsedTime,
- ) -> None:
- """Test the async function with a single run.
-
- Test run decorated async function and expected to get the captured
- duration afterward.
-
- Parameters
- ----------
- patch_split : Callable
- Patcher has been used to set the captured duration time.
- timer : Timer
- A newly created Timer with unlimited storage size.
- elapsed_1_ms : ElapsedTime
- Elapsed Time of 1 microsecond.
-
- """
-
- @timer
- async def async_func() -> None:
- await asyncio.sleep(0)
-
- with patch_split([1000]):
- await cast("Awaitable[None]", async_func())
- assert timer.duration == elapsed_1_ms
-
- @pytest.mark.asyncio
- async def test_multiple_run( # pylint: disable=too-many-arguments,too-many-positional-arguments
- self,
- patch_split: Callable[[Iterable[int]], AbstractContextManager[None]],
- timer: Timer,
- elapsed_100_ns: ElapsedTime,
- elapsed_1_ms: ElapsedTime,
- elapsed_1_pt_5_ms: ElapsedTime,
- ) -> None:
- """Test the async function with multiple runs.
-
- Test run decorated async function ``3`` times and expected to see all
- three captured duration times.
-
- Parameters
- ----------
- patch_split : Callable
- Patcher has been used to set the captured duration time.
- timer : Timer
- A newly created Timer with unlimited storage size.
- elapsed_100_ns : ElapsedTime
- Elapsed Time of 100 nanoseconds.
- elapsed_1_ms : ElapsedTime
- Elapsed Time of 1 microsecond.
- elapsed_1_pt_5_ms : ElapsedTime
- Elapsed Time of 1.5 microseconds.
-
- """
-
- @timer
- async def async_func() -> None:
- await asyncio.sleep(0)
-
- with patch_split([100, 1000, 1500]):
- for _ in range(3):
- await cast("Awaitable[None]", async_func())
-
- assert timer.durations == (
- elapsed_100_ns,
- elapsed_1_ms,
- elapsed_1_pt_5_ms,
- )
-
-
-class TestAsAsyncGeneratorDecorator:
- """Test suite for using Timer as an async generator function decorator."""
-
- @pytest.mark.asyncio
- async def test_single_run(
- self,
- patch_split: Callable[[Iterable[int]], AbstractContextManager[None]],
- timer: Timer,
- elapsed_1_ms: ElapsedTime,
- ) -> None:
- """Test the async generator function with a single run.
-
- Test run decorated async generator function and expected to get the
- captured duration afterward.
-
- Parameters
- ----------
- patch_split : Callable
- Patcher has been used to set the captured duration time.
- timer : Timer
- A newly created Timer with unlimited storage size.
- elapsed_1_ms : ElapsedTime
- Elapsed Time of 1 microsecond.
-
- """
-
- @timer
- async def async_gen_func() -> AsyncGenerator[int]:
- """Async generator function for testing.
-
- Yields
- ------
- int
- Sequential integers for testing.
-
- """
- await asyncio.sleep(0)
- yield 1
- await asyncio.sleep(0)
- yield 2
-
- with patch_split([1000]):
- items: list[int] = [
- item
- async for item in cast(
- "AsyncGenerator[int]",
- async_gen_func(),
- )
- ]
-
- assert items == [1, 2]
- assert timer.duration == elapsed_1_ms
-
- @pytest.mark.asyncio
- async def test_multiple_run( # pylint: disable=too-many-arguments,too-many-positional-arguments
- self,
- patch_split: Callable[[Iterable[int]], AbstractContextManager[None]],
- timer: Timer,
- elapsed_100_ns: ElapsedTime,
- elapsed_1_ms: ElapsedTime,
- elapsed_1_pt_5_ms: ElapsedTime,
- ) -> None:
- """Test the async generator function with multiple runs.
-
- Test run decorated async generator function ``3`` times and expected
- to see all three captured duration times.
-
- Parameters
- ----------
- patch_split : Callable
- Patcher has been used to set the captured duration time.
- timer : Timer
- A newly created Timer with unlimited storage size.
- elapsed_100_ns : ElapsedTime
- Elapsed Time of 100 nanoseconds.
- elapsed_1_ms : ElapsedTime
- Elapsed Time of 1 microsecond.
- elapsed_1_pt_5_ms : ElapsedTime
- Elapsed Time of 1.5 microseconds.
-
- """
-
- @timer
- async def async_gen_func() -> AsyncGenerator[int]:
- """Async generator function for testing.
-
- Yields
- ------
- int
- Sequential integers for testing.
-
- """
- await asyncio.sleep(0)
- yield 1
-
- with patch_split([100, 1000, 1500]):
- for _ in range(3):
- async_gen: AsyncGenerator[int] = cast(
- "AsyncGenerator[int]",
- async_gen_func(),
- )
- async for _ in async_gen:
- pass
-
- assert timer.durations == (
- elapsed_100_ns,
- elapsed_1_ms,
- elapsed_1_pt_5_ms,
- )
diff --git a/timerun.py b/timerun.py
index 68aa05b..1ca817d 100644
--- a/timerun.py
+++ b/timerun.py
@@ -1,450 +1,3 @@
-"""TimeRun is a Python library for elapsed time measurement."""
+"""TimeRun is a Python library for time measurements."""
-from __future__ import annotations
-
-from collections import deque
-from contextlib import ContextDecorator
-from dataclasses import dataclass
-from datetime import timedelta
-from inspect import isasyncgenfunction, iscoroutinefunction
-from time import perf_counter_ns, process_time_ns
-from typing import TYPE_CHECKING, Protocol, TypeVar, cast
-
-if TYPE_CHECKING:
- from collections.abc import (
- AsyncGenerator,
- Awaitable,
- Callable,
- Iterator,
- )
-
-__all__: tuple[str, ...] = ( # noqa: RUF022
- # -- Core --
- "ElapsedTime",
- "Stopwatch",
- "Timer",
- # -- Exceptions --
- "NoDurationCapturedError",
- "TimeRunError",
-)
-
-__version__: str = "0.4.0"
-
-
-# =========================================================================== #
-# Type Protocols #
-# --------------------------------------------------------------------------- #
-# #
-# The Timer class needs to store captured durations in a flexible way that #
-# allows users to provide their own storage implementations. #
-# #
-# Instead of restricting to specific types like List or Deque, timerun uses a #
-# protocol to define the required interface for duration storage. #
-# #
-# This allows users to provide custom storage backends (database, file, #
-# memory-mapped, etc.) as long as they implement the basic sequence methods. #
-# #
-# =========================================================================== #
-
-T = TypeVar("T")
-
-
-class AppendableSequence(Protocol[T]):
- """Protocol for sequences that support appending and indexing."""
-
- def append(self, _item: T) -> None:
- """Add an item to the sequence."""
-
- def __getitem__(self, _index: int) -> T:
- """Get item by index (supports negative indexing)."""
-
- def __len__(self) -> int:
- """Return number of items in the sequence."""
-
- def __iter__(self) -> Iterator[T]:
- """Iterate over items in the sequence."""
-
-
-# =========================================================================== #
-# Exceptions #
-# --------------------------------------------------------------------------- #
-# #
-# Invalid behaviors when using the classes and functions in timerun should be #
-# converted to an exception and raised. #
-# #
-# To make exceptions easier to manage, all exceptions created for the timerun #
-# library will extend from a base exception ``TimeRunException``. #
-# #
-# =========================================================================== #
-
-
-class TimeRunError(Exception):
- """Base exception for TimeRun."""
-
-
-class NoDurationCapturedError(TimeRunError, AttributeError):
- """No Duration Captured Exception."""
-
- def __init__(self) -> None:
- """Initialize the exception."""
- super().__init__(
- "No duration available. This is likely because the Timer has not "
- "been used to measure any code blocks or functions yet.",
- )
-
-
-# =========================================================================== #
-# Elapsed Time #
-# --------------------------------------------------------------------------- #
-# #
-# In Python, class datetime.timedelta is a duration expressing the difference #
-# between two date, time, or datetime instances to microsecond resolution. #
-# #
-# However, the highest available resolution measurer provided by Python can #
-# measure short durations in nanoseconds. #
-# #
-# Thus, there is a need to have a class that can represent elapsed time at a #
-# higher resolution (nanoseconds) for the best accuracy. #
-# #
-# =========================================================================== #
-
-
-@dataclass(init=True, repr=False, eq=True, order=True, frozen=True)
-class ElapsedTime:
- """An immutable object representing elapsed time in nanoseconds.
-
- Attributes
- ----------
- nanoseconds : int
- The elapsed time expressed in nanoseconds.
- timedelta : timedelta
- The duration as a timedelta type. This attribute may not
- maintain the original accuracy.
-
- Parameters
- ----------
- nanoseconds : int
- The elapsed time expressed in nanoseconds.
-
- Examples
- --------
- >>> t = ElapsedTime(10)
- >>> t
- ElapsedTime(nanoseconds=10)
- >>> print(t)
- 0:00:00.000000010
-
- """
-
- __slots__ = ["nanoseconds"]
-
- nanoseconds: int
-
- def __str__(self) -> str: # type: ignore[explicit-override]
- """Return the string representation of the elapsed time."""
- integer_part = timedelta(seconds=self.nanoseconds // int(1e9))
-
- if not (decimal_part := self.nanoseconds % int(1e9)):
- return str(integer_part)
- return f"{integer_part}.{decimal_part:09}"
-
- def __repr__(self) -> str: # type: ignore[explicit-override]
- """Return the representation of the elapsed time."""
- return f"ElapsedTime(nanoseconds={self.nanoseconds})"
-
- @property
- def timedelta(self) -> timedelta:
- """The duration converted from nanoseconds to a timedelta type."""
- return timedelta(microseconds=self.nanoseconds // int(1e3))
-
-
-# =========================================================================== #
-# Stopwatch #
-# --------------------------------------------------------------------------- #
-# #
-# Based on PEP 418, Python provides performance counter and process time #
-# functions to measure a short duration of time elapsed. #
-# #
-# Based on PEP 564, Python got new time functions with nanosecond resolution. #
-# #
-# Ref: #
-# * https://www.python.org/dev/peps/pep-0418/ #
-# * https://www.python.org/dev/peps/pep-0564/ #
-# #
-# =========================================================================== #
-
-
-class Stopwatch:
- """A stopwatch with the highest available resolution (in nanoseconds).
-
- It measures elapsed time. It can be set to include or exclude the
- sleeping time.
-
- Parameters
- ----------
- count_sleep : bool, optional
- An optional boolean variable expressing whether the time elapsed
- during sleep should be counted or not. Defaults to True if None.
-
- Methods
- -------
- reset
- Restart the stopwatch by setting the starting time to the
- current time.
- split
- Get the elapsed time between now and the starting time.
-
- Examples
- --------
- >>> stopwatch = Stopwatch()
- >>> stopwatch.reset()
- >>> stopwatch.split()
- ElapsedTime(nanoseconds=100)
-
- """
-
- __slots__ = ["_clock", "_start"]
-
- def __init__(self, *, count_sleep: bool | None = None) -> None:
- """Initialize the stopwatch."""
- if count_sleep is None:
- count_sleep = True
-
- self._clock: Callable[[], int] = (
- perf_counter_ns if count_sleep else process_time_ns
- )
-
- self._start: int = self._clock()
-
- def reset(self) -> None:
- """Reset the starting time to the current time."""
- self._start = self._clock()
-
- def split(self) -> ElapsedTime:
- """Get the elapsed time between now and the starting time.
-
- Returns
- -------
- ElapsedTime
- The elapsed time captured by the stopwatch.
-
- """
- return ElapsedTime(self._clock() - self._start)
-
-
-# =========================================================================== #
-# Timer #
-# --------------------------------------------------------------------------- #
-# #
-# For most use cases, the user would just want to measure the elapsed time #
-# for a run of a code block or function. #
-# #
-# It would be cleaner and more elegant if the user can measure a function by #
-# using a decorator and measure a code block by using a context manager. #
-# #
-# =========================================================================== #
-
-
-class Timer(ContextDecorator):
- """A context decorator that can capture and save the measured elapsed time.
-
- Attributes
- ----------
- durations : Tuple[ElapsedTime, ...]
- The captured duration times as a tuple.
- duration : ElapsedTime
- The last captured duration time.
-
- Parameters
- ----------
- count_sleep : bool, optional
- An optional boolean variable expressing whether the time elapsed
- during sleep should be counted or not. Defaults to True if None.
- storage : AppendableSequence[ElapsedTime], optional
- A sequence-like object used to save captured results.
- If provided, this storage will be used directly and max_len will
- be ignored. If not provided, a new deque will be created.
- max_len : int, optional
- The maximum length for the capturing storage. Defaults to None,
- which will create storage with infinite length.
-
- Examples
- --------
- >>> import time
- >>> with Timer() as timer:
- ... time.sleep(0.1) # your code here
- >>> print(timer.duration)
-
- >>> import time
- >>> timer = Timer()
- >>> @timer
- ... def func():
- ... time.sleep(0.1) # your code here
- >>> func()
- >>> print(timer.duration)
-
- >>> import asyncio
- >>> timer = Timer()
- >>> @timer
- ... async def async_func():
- ... await asyncio.sleep(0.1) # your code here
- >>> asyncio.run(async_func())
- >>> print(timer.duration)
-
- >>> async def async_code():
- ... async with Timer() as timer:
- ... await asyncio.sleep(0.1) # your code here
- ... print(timer.duration)
- >>> asyncio.run(async_code())
-
- """
-
- __slots__ = ["_durations", "_stopwatch"]
-
- def __init__(
- self,
- *,
- count_sleep: bool | None = None,
- storage: AppendableSequence[ElapsedTime] | None = None,
- max_len: int | None = None,
- ) -> None:
- """Initialize the timer."""
- self._stopwatch: Stopwatch = Stopwatch(count_sleep=count_sleep)
- self._durations: AppendableSequence[ElapsedTime] = (
- storage if storage is not None else deque(maxlen=max_len)
- )
-
- def __enter__(self) -> Timer: # noqa: PYI034
- """Start the timer."""
- self._stopwatch.reset()
- return self
-
- def __exit__(self, *_: object) -> None:
- """Stop the timer and save the duration."""
- duration: ElapsedTime = self._stopwatch.split()
- self._durations.append(duration)
-
- async def __aenter__(self) -> Timer: # noqa: PYI034
- """Start the timer (async context manager)."""
- self._stopwatch.reset()
- return self
-
- async def __aexit__(self, *_: object) -> None:
- """Stop the timer and save the duration (async context manager)."""
- duration: ElapsedTime = self._stopwatch.split()
- self._durations.append(duration)
-
- def _wrap_async_function( # type: ignore[explicit-any]
- self,
- func: Callable[..., Awaitable[object]],
- ) -> Callable[..., Awaitable[object]]:
- """Wrap an async function to measure its execution time."""
-
- async def async_wrapper(*args: object, **kwargs: object) -> object:
- """Wrap async function execution with timing.
-
- Parameters
- ----------
- *args : object
- Positional arguments passed to the wrapped function.
- **kwargs : object
- Keyword arguments passed to the wrapped function.
-
- Returns
- -------
- object
- The result of the wrapped async function.
-
- """
- async with self:
- return await func(*args, **kwargs)
-
- return async_wrapper
-
- def _wrap_async_generator( # type: ignore[explicit-any]
- self,
- func: Callable[..., object],
- ) -> Callable[..., AsyncGenerator[object]]:
- """Wrap an async generator function to measure its execution time."""
-
- async def async_gen_wrapper(
- *args: object,
- **kwargs: object,
- ) -> AsyncGenerator[object]:
- """Wrap async generator function execution with timing.
-
- Parameters
- ----------
- *args : object
- Positional arguments passed to the wrapped function.
- **kwargs : object
- Keyword arguments passed to the wrapped function.
-
- Yields
- ------
- object
- Items yielded from the wrapped async generator function.
-
- """
- async with self:
- async for item in cast(
- "AsyncGenerator[object]",
- func(*args, **kwargs),
- ):
- yield item
-
- return async_gen_wrapper
-
- def __call__( # type: ignore[override,explicit-override,explicit-any]
- self,
- func: Callable[..., object] | Callable[..., Awaitable[object]],
- ) -> Callable[..., object] | Callable[..., Awaitable[object]]:
- """Wrap a function (sync or async) to measure its execution time.
-
- Parameters
- ----------
- func : Callable
- The function to be decorated (can be sync or async).
-
- Returns
- -------
- Callable
- A wrapped function that measures execution time.
-
- """
- if iscoroutinefunction(func):
- return self._wrap_async_function(func)
- if isasyncgenfunction(func):
- return self._wrap_async_generator(func)
- return super().__call__(func)
-
- @property
- def durations(self) -> tuple[ElapsedTime, ...]:
- """The captured duration times as a tuple.
-
- A tuple containing all captured duration times, that can be
- unpacked into multiple variables.
-
- Examples
- --------
- >>> first_duration, second_duration = timer.durations
-
- """
- return tuple(self._durations)
-
- @property
- def duration(self) -> ElapsedTime:
- """The last captured duration time.
-
- Raises
- ------
- NoDurationCapturedError
- Error that occurs when accessing an empty durations list,
- which is usually because the measurer has not been triggered
- yet.
-
- """
- try:
- return self._durations[-1]
- except IndexError as error:
- raise NoDurationCapturedError from error
+__version__: str = "0.5.0"
From 163d200ee5afc343a3d26926cfbe89c240619edf Mon Sep 17 00:00:00 2001
From: HH-MWB <50187675+HH-MWB@users.noreply.github.com>
Date: Wed, 4 Feb 2026 19:19:39 -0500
Subject: [PATCH 2/8] feat: define time span
---
features/steps/time_span_steps.py | 77 +++++++++++++++++++++++++++++++
features/time_span.feature | 34 ++++++++++++++
features/version.feature | 7 ++-
timerun.py | 45 ++++++++++++++++++
4 files changed, 161 insertions(+), 2 deletions(-)
create mode 100644 features/steps/time_span_steps.py
create mode 100644 features/time_span.feature
diff --git a/features/steps/time_span_steps.py b/features/steps/time_span_steps.py
new file mode 100644
index 0000000..2c706c2
--- /dev/null
+++ b/features/steps/time_span_steps.py
@@ -0,0 +1,77 @@
+"""Step definitions for the time span feature."""
+
+import operator
+from datetime import timedelta
+
+import parse
+from behave import given, register_type, then
+from behave.runner import Context
+
+import timerun
+
+# Gherkin relation phrases to operator functions for span comparison.
+RELATION_OPERATORS = {
+ "equals": operator.eq,
+ "does not equal": operator.ne,
+ "is less than": operator.lt,
+ "is greater than": operator.gt,
+ "is less than or equal to": operator.le,
+ "is greater than or equal to": operator.ge,
+}
+
+register_type(
+ Relation=parse.with_pattern(r"|".join(RELATION_OPERATORS))(
+ lambda text: text.strip(),
+ ),
+)
+
+
+@given("a time span from {start:n} to {end:n}")
+def step_given_time_span(context: Context, start: int, end: int) -> None:
+ """Create a TimeSpan(start, end) and store as context.time_span."""
+ context.time_span = timerun.TimeSpan(start=start, end=end)
+
+
+@given("span {name:w} of {duration:n} nanoseconds")
+def step_given_span_of_duration(
+ context: Context,
+ name: str,
+ duration: int,
+) -> None:
+ """Create a TimeSpan(0, duration) and store as context.time_span_."""
+ span = timerun.TimeSpan(start=0, end=duration)
+ setattr(context, f"time_span_{name.lower()}", span)
+
+
+@then("the duration is {expected:n} nanoseconds")
+def step_duration_is(context: Context, expected: int) -> None:
+ """Assert context.time_span.duration equals expected."""
+ assert context.time_span.duration == expected
+
+
+@then("the timedelta is {seconds:f} seconds in standard Python timedelta type")
+def step_timedelta_is_seconds_standard_type(
+ context: Context,
+ seconds: float,
+) -> None:
+ """Assert time_span.timedelta is timedelta and equals given seconds."""
+ result = context.time_span.timedelta
+ assert isinstance(result, timedelta)
+ assert result == timedelta(seconds=seconds)
+
+
+@then("the {which:w} value is {expected:n}")
+def step_value_is(
+ context: Context,
+ which: str,
+ expected: int,
+) -> None:
+ """Assert time_span.start or time_span.end equals expected."""
+ assert getattr(context.time_span, which) == expected
+
+
+@then("time span A {relation:Relation} time span B")
+def step_compare_a_b(context: Context, relation: str) -> None:
+ """Assert time_span_a and time_span_b satisfy the given relation."""
+ op = RELATION_OPERATORS[relation]
+ assert op(context.time_span_a, context.time_span_b)
diff --git a/features/time_span.feature b/features/time_span.feature
new file mode 100644
index 0000000..1e581d7
--- /dev/null
+++ b/features/time_span.feature
@@ -0,0 +1,34 @@
+Feature: Time span
+ As someone measuring how long something takes,
+ I want a span of time that tells me how long it took,
+ so that I can:
+ - compare which took longer
+ - see the duration in a familiar form (e.g. seconds)
+ - tell which started or ended first
+
+ Scenario: I can see how long the span is
+ Given a time span from 0 to 1,000,000
+ Then the duration is 1,000,000 nanoseconds
+
+ Scenario: I can see the duration in a standard form
+ Given a time span from 0 to 2,500,000,000
+ Then the timedelta is 2.5 seconds in standard Python timedelta type
+
+ Scenario Outline: I can compare spans by duration
+ Given span A of nanoseconds
+ And span B of nanoseconds
+ Then time span A time span B
+
+ Examples:
+ | duration_a | duration_b | relation |
+ | 1,000,000 | 2,000,000 | is less than |
+ | 3,000,000 | 1,000,000 | is greater than |
+ | 1,000,000 | 1,000,000 | equals |
+ | 1,000,000 | 2,000,000 | does not equal |
+ | 1,000,000 | 1,000,000 | is less than or equal to |
+ | 2,000,000 | 1,000,000 | is greater than or equal to |
+
+ Scenario: I can read start and end
+ Given a time span from 1,000 to 2,000
+ Then the start value is 1,000
+ And the end value is 2,000
diff --git a/features/version.feature b/features/version.feature
index 63824ed..0e04679 100644
--- a/features/version.feature
+++ b/features/version.feature
@@ -2,9 +2,12 @@ Feature: Package version
As a user or tool integrating with timerun,
I want to read the package version programmatically,
- so that I can check compatibility, display it to users, or use it in automation.
+ so that I can:
+ - check compatibility
+ - display it to users
+ - use it in automation
- Scenario: Package exposes a readable version
+ Scenario: I can read the package version
When I read the package version
Then the package has a version
And the version is a non-empty string
diff --git a/timerun.py b/timerun.py
index 1ca817d..71caec5 100644
--- a/timerun.py
+++ b/timerun.py
@@ -1,3 +1,48 @@
"""TimeRun is a Python library for time measurements."""
+from dataclasses import dataclass, field
+from datetime import timedelta
+
__version__: str = "0.5.0"
+
+
+@dataclass(order=True, frozen=True)
+class TimeSpan:
+ """A time interval with start and end timestamps.
+
+ Instances are immutable. Equality and ordering are based only on
+ ``duration``; ``start`` and ``end`` are excluded from comparison.
+
+ Attributes
+ ----------
+ duration : int
+ Elapsed time in nanoseconds (end - start). Set in ``__post_init__``,
+ not a constructor argument. Used for equality, ordering, and hashing.
+ start : int
+ Start timestamp in nanoseconds.
+ end : int
+ End timestamp in nanoseconds.
+ timedelta : timedelta
+ Read-only. Duration as a ``datetime.timedelta``; nanoseconds are
+ converted to whole microseconds (``duration // 1000``) to match
+ timedelta's resolution.
+
+ Notes
+ -----
+ ``start`` and ``end`` use ``field(compare=False)``, so two spans with
+ the same length compare equal even if their intervals differ.
+
+ """
+
+ duration: int = field(init=False)
+ start: int = field(compare=False)
+ end: int = field(compare=False)
+
+ def __post_init__(self) -> None:
+ """Set duration to end minus start (nanoseconds)."""
+ object.__setattr__(self, "duration", self.end - self.start)
+
+ @property
+ def timedelta(self) -> timedelta:
+ """Duration as a datetime.timedelta."""
+ return timedelta(microseconds=self.duration // 1000)
From 4859f6d6f5d72abea0563fff832f4afe0744fba7 Mon Sep 17 00:00:00 2001
From: HH-MWB <50187675+HH-MWB@users.noreply.github.com>
Date: Wed, 4 Feb 2026 22:34:45 -0500
Subject: [PATCH 3/8] chore: shorten test output with behave progress formatter
---
.github/workflows/ci.yaml | 2 +-
CONTRIBUTING.md | 14 +++++++++-----
Makefile | 12 +++++++++++-
3 files changed, 21 insertions(+), 7 deletions(-)
diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml
index 6bf5b09..9ca0660 100644
--- a/.github/workflows/ci.yaml
+++ b/.github/workflows/ci.yaml
@@ -46,7 +46,7 @@ jobs:
- name: Run tests with coverage
run: |
- coverage run --source=timerun -m behave
+ coverage run --source=timerun -m behave -f progress
coverage report
coverage xml
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index f85bf83..cc8357b 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -85,15 +85,19 @@ TimeRun uses **behavior-driven development (BDD)** with [behave](https://behave.
### Run tests
-| Command | Description |
-|----------------|--------------------------------------|
-| `make test` | Run BDD suite with coverage report |
-| `behave` | Run BDD suite only (no coverage) |
+| Command | Description |
+|--------------------|----------------------------------------------------------------|
+| `make test` | Run BDD suite with progress + summary + coverage (default) |
+| `make test-summary`| Summary and coverage only (minimal output) |
+| `make test-verbose`| Full scenario/step output (use when debugging failures) |
+| `behave` | Run BDD suite only (no coverage) |
### Run coverage manually
```bash
-coverage run --source=timerun -m behave
+coverage run --source=timerun -m behave # full output
+coverage run --source=timerun -m behave -f progress # progress + summary
+coverage run --source=timerun -m behave -f null # summary only
coverage report --show-missing
```
diff --git a/Makefile b/Makefile
index 57ba0ce..8849223 100644
--- a/Makefile
+++ b/Makefile
@@ -22,7 +22,17 @@ init: ## Set up Python development environment with pre-commit hooks
@echo "Development environment ready! To activate it, run: source $(VENV_DIR)/bin/activate"
.PHONY: test
-test: ## Run BDD tests with behave and display coverage
+test: ## Run BDD tests (progress + summary + coverage)
+ @"$(VENV_DIR)/bin/coverage" run --source=timerun -m behave -f progress
+ @"$(VENV_DIR)/bin/coverage" report --show-missing
+
+.PHONY: test-summary
+test-summary: ## Run BDD tests (summary and coverage only; use 'make test' to see which feature failed)
+ @"$(VENV_DIR)/bin/coverage" run --source=timerun -m behave -f null
+ @"$(VENV_DIR)/bin/coverage" report --show-missing
+
+.PHONY: test-verbose
+test-verbose: ## Run BDD tests with full scenario/step output (for debugging failures)
@"$(VENV_DIR)/bin/coverage" run --source=timerun -m behave
@"$(VENV_DIR)/bin/coverage" report --show-missing
From 47406ab4bc620b4e75619f126e8193f2fc5954b4 Mon Sep 17 00:00:00 2001
From: HH-MWB <50187675+HH-MWB@users.noreply.github.com>
Date: Wed, 4 Feb 2026 23:03:18 -0500
Subject: [PATCH 4/8] feat: define measurement record
---
features/measurement_record.feature | 30 +++++++++
features/steps/measurement_record_steps.py | 76 ++++++++++++++++++++++
features/steps/time_span_steps.py | 24 ++++---
features/steps/version_steps.py | 5 ++
timerun.py | 24 +++++++
5 files changed, 150 insertions(+), 9 deletions(-)
create mode 100644 features/measurement_record.feature
create mode 100644 features/steps/measurement_record_steps.py
diff --git a/features/measurement_record.feature b/features/measurement_record.feature
new file mode 100644
index 0000000..eb79650
--- /dev/null
+++ b/features/measurement_record.feature
@@ -0,0 +1,30 @@
+Feature: Measurement record
+
+ As someone measuring how long something takes,
+ I want a measurement record with wall time, CPU time, and optional metadata,
+ so that I can:
+ - store and pass around timing results in a structured way
+ - read wall and CPU durations from that record
+ - attach metadata (e.g. tags, run id) to label or correlate runs
+
+ Scenario: I can create a measurement from wall and CPU time spans and see the durations
+ Given a wall time span from 0 to 1,000,000
+ And a CPU time span from 0 to 500,000
+ When I create a measurement from the wall time span and the CPU time span
+ Then the measurement's wall time duration is 1,000,000 nanoseconds
+ And the measurement's CPU time duration is 500,000 nanoseconds
+
+ Scenario: I can see that a new measurement's metadata is an empty dict
+ Given a wall time span from 0 to 1
+ And a CPU time span from 0 to 1
+ When I create a measurement from the wall time span and the CPU time span
+ Then the measurement's metadata is an empty dict
+
+ Scenario: I can set metadata on a measurement and read it back
+ Given a wall time span from 0 to 1
+ And a CPU time span from 0 to 1
+ When I create a measurement from the wall time span and the CPU time span
+ And the metadata key "run_id" is set to "exp-1"
+ And the metadata key "tag" is set to "baseline"
+ Then the measurement's metadata key "run_id" is "exp-1"
+ And the measurement's metadata key "tag" is "baseline"
diff --git a/features/steps/measurement_record_steps.py b/features/steps/measurement_record_steps.py
new file mode 100644
index 0000000..046e5a5
--- /dev/null
+++ b/features/steps/measurement_record_steps.py
@@ -0,0 +1,76 @@
+"""Step definitions for the Measurement record feature."""
+
+from behave import given, then, when
+from behave.runner import Context
+
+import timerun
+
+# --- Given ---
+
+
+@given("a {kind} time span from {start:n} to {end:n}")
+def step_given_typed_time_span(
+ context: Context,
+ kind: str,
+ start: int,
+ end: int,
+) -> None:
+ """Set time span to context based on kind (wall/CPU)."""
+ span = timerun.TimeSpan(start=start, end=end)
+ setattr(context, f"{kind.lower()}_time_span", span)
+
+
+# --- When ---
+
+
+@when("I create a measurement from the wall time span and the CPU time span")
+def step_create_measurement_from_spans(context: Context) -> None:
+ """Build Measurement from wall/cpu spans; set context.measurement."""
+ context.measurement = timerun.Measurement(
+ wall_time=context.wall_time_span,
+ cpu_time=context.cpu_time_span,
+ )
+
+
+@when('the metadata key "{key}" is set to "{value}"')
+def step_measurement_metadata_key_set(
+ context: Context,
+ key: str,
+ value: str,
+) -> None:
+ """Set the measurement's metadata[key] to value."""
+ context.measurement.metadata[key] = value
+
+
+# --- Then ---
+
+
+@then("the measurement's {kind} time duration is {expected:n} nanoseconds")
+def step_measurement_time_duration(
+ context: Context,
+ kind: str,
+ expected: int,
+) -> None:
+ """Assert measurement wall_time or cpu_time duration equals expected."""
+ assert (
+ getattr(context.measurement, f"{kind.lower()}_time").duration
+ == expected
+ )
+
+
+@then("the measurement's metadata is an empty dict")
+def step_measurement_metadata_empty_dict(context: Context) -> None:
+ """Assert the measurement's metadata is a dict and empty."""
+ metadata = context.measurement.metadata
+ assert isinstance(metadata, dict)
+ assert not metadata
+
+
+@then('the measurement\'s metadata key "{key}" is "{value}"')
+def step_measurement_metadata_key_value(
+ context: Context,
+ key: str,
+ value: str,
+) -> None:
+ """Assert the measurement's metadata[key] equals value."""
+ assert context.measurement.metadata[key] == value
diff --git a/features/steps/time_span_steps.py b/features/steps/time_span_steps.py
index 2c706c2..d604380 100644
--- a/features/steps/time_span_steps.py
+++ b/features/steps/time_span_steps.py
@@ -26,6 +26,9 @@
)
+# --- Given ---
+
+
@given("a time span from {start:n} to {end:n}")
def step_given_time_span(context: Context, start: int, end: int) -> None:
"""Create a TimeSpan(start, end) and store as context.time_span."""
@@ -43,8 +46,11 @@ def step_given_span_of_duration(
setattr(context, f"time_span_{name.lower()}", span)
+# --- Then ---
+
+
@then("the duration is {expected:n} nanoseconds")
-def step_duration_is(context: Context, expected: int) -> None:
+def step_time_span_duration_is(context: Context, expected: int) -> None:
"""Assert context.time_span.duration equals expected."""
assert context.time_span.duration == expected
@@ -60,18 +66,18 @@ def step_timedelta_is_seconds_standard_type(
assert result == timedelta(seconds=seconds)
+@then("time span A {relation:Relation} time span B")
+def step_time_span_a_relation_b(context: Context, relation: str) -> None:
+ """Assert time_span_a and time_span_b satisfy the given relation."""
+ op = RELATION_OPERATORS[relation]
+ assert op(context.time_span_a, context.time_span_b)
+
+
@then("the {which:w} value is {expected:n}")
-def step_value_is(
+def step_time_span_value_is(
context: Context,
which: str,
expected: int,
) -> None:
"""Assert time_span.start or time_span.end equals expected."""
assert getattr(context.time_span, which) == expected
-
-
-@then("time span A {relation:Relation} time span B")
-def step_compare_a_b(context: Context, relation: str) -> None:
- """Assert time_span_a and time_span_b satisfy the given relation."""
- op = RELATION_OPERATORS[relation]
- assert op(context.time_span_a, context.time_span_b)
diff --git a/features/steps/version_steps.py b/features/steps/version_steps.py
index 5b1c65f..6277036 100644
--- a/features/steps/version_steps.py
+++ b/features/steps/version_steps.py
@@ -5,6 +5,8 @@
import timerun
+# --- When ---
+
@when("I read the package version")
def step_read_version(context: Context) -> None:
@@ -12,6 +14,9 @@ def step_read_version(context: Context) -> None:
context.version = getattr(timerun, "__version__", None)
+# --- Then ---
+
+
@then("the package has a version")
def step_package_has_version(context: Context) -> None:
"""Assert the package exposes a version."""
diff --git a/timerun.py b/timerun.py
index 71caec5..e371d5f 100644
--- a/timerun.py
+++ b/timerun.py
@@ -46,3 +46,27 @@ def __post_init__(self) -> None:
def timedelta(self) -> timedelta:
"""Duration as a datetime.timedelta."""
return timedelta(microseconds=self.duration // 1000)
+
+
+@dataclass
+class Measurement:
+ """A measurement collection: wall time, CPU time, and optional metadata.
+
+ Stores one measurement only. Use this to collect the result of a single
+ timing run: wall-clock time, CPU time, and any user-defined metadata.
+
+ Attributes
+ ----------
+ wall_time : TimeSpan
+ Elapsed (wall-clock) time for the measurement.
+ cpu_time : TimeSpan
+ CPU time for the measurement.
+ metadata : dict
+ Optional key-value metadata (e.g., tags, run id). Defaults to ``{}``;
+ mutate in place to add or change entries.
+
+ """
+
+ wall_time: TimeSpan
+ cpu_time: TimeSpan
+ metadata: dict[object, object] = field(default_factory=dict)
From 44fe555573cf6e18e1683b8212ba7e94e6c3339a Mon Sep 17 00:00:00 2001
From: HH-MWB <50187675+HH-MWB@users.noreply.github.com>
Date: Sun, 15 Feb 2026 21:15:41 -0500
Subject: [PATCH 5/8] feat: add BlockTimer context manager to measure code
blocks
---
features/block_timing.feature | 78 ++++
...ent_record.feature => measurement.feature} | 21 +-
features/steps/block_timing_steps.py | 431 ++++++++++++++++++
features/steps/common_steps.py | 36 ++
...t_record_steps.py => measurement_steps.py} | 10 -
features/steps/time_span_steps.py | 14 +-
features/time_span.feature | 37 +-
features/version.feature | 7 +-
pyproject.toml | 3 +
timerun.py | 128 +++++-
10 files changed, 718 insertions(+), 47 deletions(-)
create mode 100644 features/block_timing.feature
rename features/{measurement_record.feature => measurement.feature} (60%)
create mode 100644 features/steps/block_timing_steps.py
create mode 100644 features/steps/common_steps.py
rename features/steps/{measurement_record_steps.py => measurement_steps.py} (86%)
diff --git a/features/block_timing.feature b/features/block_timing.feature
new file mode 100644
index 0000000..f95985f
--- /dev/null
+++ b/features/block_timing.feature
@@ -0,0 +1,78 @@
+Feature: Block timing
+
+ As someone measuring duration,
+ I want to time blocks of code (sync, async, or threaded),
+ so that I get per-task timings and can attach metadata.
+
+ # --- Basic timing: sync, async, CPU-bound ---
+
+ Scenario: Blocking sleep with `with` yields wall time and near-zero CPU time
+ Given a blocking operation that takes around 10,000,000 nanoseconds
+ When I measure the blocking operation using `with`
+ Then the measurement's wall time duration is within the configured buffer of 10,000,000 nanoseconds
+ And the measurement's CPU time duration is within the configured buffer of 0 nanoseconds
+
+ Scenario: Async sleep with `async with` yields wall time and near-zero CPU time
+ Given an async operation that takes around 10,000,000 nanoseconds
+ When I measure the async operation using `async with`
+ Then the measurement's wall time duration is within the configured buffer of 10,000,000 nanoseconds
+ And the measurement's CPU time duration is within the configured buffer of 0 nanoseconds
+
+ Scenario: CPU-bound block with `with` yields wall and CPU time close together
+ Given a CPU-bound operation that runs for around 10,000,000 nanoseconds
+ When I measure the CPU-bound operation using `with`
+ Then the measurement's wall time duration is within the configured buffer of 10,000,000 nanoseconds
+ And the measurement's CPU time duration is within the configured buffer of 10,000,000 nanoseconds
+ And the measurement's CPU time is close to wall time
+
+ # --- One BlockTimer, multiple blocks or threads ---
+
+ Scenario: Two threads with one BlockTimer yield one measurement per thread
+ Given each thread sleeps 5,000,000 nanoseconds
+ When I measure blocks from 2 threads using the same BlockTimer instance
+ Then each thread's measurement has wall time duration within the configured buffer of 5,000,000 nanoseconds
+ And the measurements are from different threads
+
+ Scenario: Two sequential blocks with one BlockTimer yield correct durations
+ Given the first block duration is 5,000,000 nanoseconds
+ And the second block duration is 10,000,000 nanoseconds
+ When I measure two sequential blocks with the same BlockTimer instance
+ Then the first measurement's wall time duration is within the configured buffer of 5,000,000 nanoseconds
+ And the second measurement's wall time duration is within the configured buffer of 10,000,000 nanoseconds
+
+ Scenario: Nested blocks with one BlockTimer yield independent outer and inner times
+ Given the outer block duration is 20,000,000 nanoseconds
+ And the inner block duration is 5,000,000 nanoseconds
+ When I measure nested blocks with the same BlockTimer instance
+ Then the outer measurement's wall time duration is within the configured buffer of 25,000,000 nanoseconds
+ And the inner measurement's wall time duration is within the configured buffer of 5,000,000 nanoseconds
+ And the outer measurement's wall time duration is at least the inner measurement's wall time duration
+
+ # --- Metadata ---
+
+ Scenario: Initial metadata is carried on the yielded measurement
+ Given metadata run_id "exp-1" and tag "baseline"
+ When I measure a code block with that metadata
+ Then the measurement's metadata key "run_id" is "exp-1"
+ And the measurement's metadata key "tag" is "baseline"
+
+ Scenario: Metadata set in first block is not visible in second block (reused BlockTimer)
+ Given metadata run_id "same-run" and tag "original"
+ And I will add metadata key "extra" as "from_first_block" in the first block
+ When I measure two blocks with the same BlockTimer instance and that metadata
+ Then the first measurement's metadata key "extra" is "from_first_block"
+ And the second measurement's metadata key "run_id" is "same-run"
+ And the second measurement's metadata key "tag" is "original"
+ And the second measurement's metadata does not contain key "extra"
+
+ # --- Edge cases and errors ---
+
+ Scenario: Block that raises still yields measurement; exception propagates
+ When I measure a code block that raises an exception
+ Then the measurement's wall time duration is between 0 and 1,000,000,000 nanoseconds
+ And an exception was propagated to the caller
+
+ Scenario: __exit__ without __enter__ raises RuntimeError
+ When I call __exit__ on a BlockTimer instance without calling __enter__ first
+ Then a RuntimeError is raised
+ And the error message is "__exit__ called without a matching __enter__"
diff --git a/features/measurement_record.feature b/features/measurement.feature
similarity index 60%
rename from features/measurement_record.feature
rename to features/measurement.feature
index eb79650..337be64 100644
--- a/features/measurement_record.feature
+++ b/features/measurement.feature
@@ -1,26 +1,27 @@
-Feature: Measurement record
+Feature: Measurement
- As someone measuring how long something takes,
- I want a measurement record with wall time, CPU time, and optional metadata,
- so that I can:
- - store and pass around timing results in a structured way
- - read wall and CPU durations from that record
- - attach metadata (e.g. tags, run id) to label or correlate runs
+ As someone measuring duration,
+ I want a value with wall time, CPU time, and optional metadata,
+ so that I can store results and attach labels.
- Scenario: I can create a measurement from wall and CPU time spans and see the durations
+ # --- Creating a measurement ---
+
+ Scenario: Measurement from wall and CPU spans has both durations
Given a wall time span from 0 to 1,000,000
And a CPU time span from 0 to 500,000
When I create a measurement from the wall time span and the CPU time span
Then the measurement's wall time duration is 1,000,000 nanoseconds
And the measurement's CPU time duration is 500,000 nanoseconds
- Scenario: I can see that a new measurement's metadata is an empty dict
+ Scenario: New measurement has empty metadata by default
Given a wall time span from 0 to 1
And a CPU time span from 0 to 1
When I create a measurement from the wall time span and the CPU time span
Then the measurement's metadata is an empty dict
- Scenario: I can set metadata on a measurement and read it back
+ # --- Metadata ---
+
+ Scenario: Metadata can be set and read back
Given a wall time span from 0 to 1
And a CPU time span from 0 to 1
When I create a measurement from the wall time span and the CPU time span
diff --git a/features/steps/block_timing_steps.py b/features/steps/block_timing_steps.py
new file mode 100644
index 0000000..defe944
--- /dev/null
+++ b/features/steps/block_timing_steps.py
@@ -0,0 +1,431 @@
+"""Step definitions for the Block timing feature."""
+
+from __future__ import annotations
+
+import asyncio
+import time
+from concurrent.futures import ThreadPoolExecutor
+from typing import TYPE_CHECKING
+
+from behave import given, then, when
+
+import timerun
+
+if TYPE_CHECKING:
+ from behave.runner import Context
+
+# "duration within buffer of X": accept X <= duration <= X + BUFFER_NS.
+# Covers sleep/scheduling jitter so tests don't flake.
+BUFFER_NS = 10_000_000 # 10 ms
+# CPU can be slightly below wall time (scheduling); allow 1 ms undershoot.
+CPU_LOWER_SLACK_NS = 1_000_000
+
+
+def sleep_wall_at_least(nanoseconds: int) -> None:
+ """Sleep >= `nanoseconds` ns wall time. Jitter absorbed by BUFFER_NS."""
+ time.sleep(nanoseconds / 1e9)
+
+
+def spin_wall_at_least(nanoseconds: int) -> None:
+ """Busy loop until wall time >= `nanoseconds` ns. Uses CPU."""
+ start = time.perf_counter_ns()
+ while time.perf_counter_ns() - start < nanoseconds:
+ pass
+
+
+# --- Given ---
+
+
+@given("a blocking operation that takes around {duration_ns:n} nanoseconds")
+def step_given_blocking_operation(context: Context, duration_ns: int) -> None:
+ """Store duration for a blocking operation (e.g. time.sleep)."""
+ context.operation_duration_ns = duration_ns
+
+
+@given("an async operation that takes around {duration_ns:n} nanoseconds")
+def step_given_async_operation(context: Context, duration_ns: int) -> None:
+ """Store duration for an async operation (e.g. asyncio.sleep)."""
+ context.operation_duration_ns = duration_ns
+
+
+@given(
+ "a CPU-bound operation that runs for around {duration_ns:n} nanoseconds",
+)
+def step_given_cpu_bound_operation(context: Context, duration_ns: int) -> None:
+ """Store duration for a CPU-bound operation (busy-loop)."""
+ context.operation_duration_ns = duration_ns
+
+
+@given("each thread sleeps {duration_ns:n} nanoseconds")
+def step_given_thread_sleep(context: Context, duration_ns: int) -> None:
+ """Store duration for the two-thread scenario."""
+ context.thread_sleep_ns = duration_ns
+
+
+@given("the first block duration is {duration_ns:n} nanoseconds")
+def step_given_first_block_duration(
+ context: Context,
+ duration_ns: int,
+) -> None:
+ """Store first block duration for sequential blocks."""
+ context.first_block_ns = duration_ns
+
+
+@given("the second block duration is {duration_ns:n} nanoseconds")
+def step_given_second_block_duration(
+ context: Context,
+ duration_ns: int,
+) -> None:
+ """Store second block duration for sequential blocks."""
+ context.second_block_ns = duration_ns
+
+
+@given("the outer block duration is {duration_ns:n} nanoseconds")
+def step_given_outer_block_duration(
+ context: Context,
+ duration_ns: int,
+) -> None:
+ """Store outer block duration for nested blocks."""
+ context.outer_block_ns = duration_ns
+
+
+@given("the inner block duration is {duration_ns:n} nanoseconds")
+def step_given_inner_block_duration(
+ context: Context,
+ duration_ns: int,
+) -> None:
+ """Store inner block duration for nested blocks."""
+ context.inner_block_ns = duration_ns
+
+
+@given('metadata run_id "{run_id}" and tag "{tag}"')
+def step_given_metadata(context: Context, run_id: str, tag: str) -> None:
+ """Store metadata dict for use with BlockTimer(metadata=...)."""
+ context.metadata = {"run_id": run_id, "tag": tag}
+
+
+@given('I will add metadata key "{key}" as "{value}" in the first block')
+def step_given_metadata_add_in_first(
+ context: Context,
+ key: str,
+ value: str,
+) -> None:
+ """First block will add this key/value to measurement metadata."""
+ context.metadata_add_in_first = (key, value)
+
+
+# --- When ---
+
+
+@when("I measure the blocking operation using `with`")
+def step_measure_blocking_using_with(context: Context) -> None:
+ """BlockTimer() around sleep_wall_at_least(operation_duration_ns)."""
+ with timerun.BlockTimer() as context.measurement:
+ sleep_wall_at_least(context.operation_duration_ns)
+
+
+@when("I measure the async operation using `async with`")
+def step_measure_async_using_async_with(context: Context) -> None:
+ """Async BlockTimer() around asyncio.sleep(operation_duration_ns)."""
+
+ # Define async task: BlockTimer around sleep.
+ async def run() -> timerun.Measurement:
+ async with timerun.BlockTimer() as m:
+ await asyncio.sleep(context.operation_duration_ns / 1e9)
+ return m
+
+ # Run and store measurement.
+ context.measurement = asyncio.run(run())
+
+
+@when("I measure the CPU-bound operation using `with`")
+def step_measure_cpu_bound_using_with(context: Context) -> None:
+ """BlockTimer() around spin_wall_at_least(operation_duration_ns)."""
+ with timerun.BlockTimer() as context.measurement:
+ spin_wall_at_least(context.operation_duration_ns)
+
+
+@when(
+ "I measure blocks from {thread_count:n} threads "
+ "using the same BlockTimer instance",
+)
+def step_measure_blocks_from_threads(
+ context: Context,
+ thread_count: int,
+) -> None:
+ """Measure blocks from thread_count threads (number from feature)."""
+ # Store thread count for Then steps; one shared BlockTimer.
+ context.thread_count = thread_count
+ cm = timerun.BlockTimer()
+
+ # Worker: enter timer, sleep, return measurement.
+ def run() -> timerun.Measurement:
+ with cm as m:
+ sleep_wall_at_least(context.thread_sleep_ns)
+ return m
+
+ # Run thread_count workers and collect measurements.
+ with ThreadPoolExecutor(max_workers=thread_count) as ex:
+ futures = [ex.submit(run) for _ in range(thread_count)]
+ context.thread_measurements = [f.result() for f in futures]
+
+
+@when("I measure two sequential blocks with the same BlockTimer instance")
+def step_measure_two_sequential_blocks(context: Context) -> None:
+ """Measure two sequential blocks."""
+ cm = timerun.BlockTimer()
+
+ with cm as context.first_measurement:
+ sleep_wall_at_least(context.first_block_ns)
+
+ with cm as context.second_measurement:
+ sleep_wall_at_least(context.second_block_ns)
+
+
+@when("I measure nested blocks with the same BlockTimer instance")
+def step_measure_nested_blocks(context: Context) -> None:
+ """Measure nested blocks."""
+ cm = timerun.BlockTimer()
+
+ with cm as context.outer_measurement:
+ sleep_wall_at_least(context.outer_block_ns)
+
+ with cm as context.inner_measurement:
+ sleep_wall_at_least(context.inner_block_ns)
+
+
+@when("I measure a code block with that metadata")
+def step_measure_block_with_metadata(context: Context) -> None:
+ """BlockTimer(metadata=context.metadata), store the Measurement."""
+ with timerun.BlockTimer(metadata=context.metadata) as context.measurement:
+ pass
+
+
+@when(
+ "I measure two blocks with the same BlockTimer instance and that metadata",
+)
+def step_measure_two_blocks_with_metadata(context: Context) -> None:
+ """Two blocks; Given may set metadata_add_in_first, mutate 1st."""
+ cm = timerun.BlockTimer(metadata=context.metadata)
+ # First block: optionally add key/value to measurement metadata.
+ with cm as context.first_measurement:
+ if hasattr(context, "metadata_add_in_first"):
+ context.first_measurement.metadata[
+ context.metadata_add_in_first[0]
+ ] = context.metadata_add_in_first[1]
+ # Second block: no extra metadata.
+ with cm as context.second_measurement:
+ pass
+
+
+@when("I measure a code block that raises an exception")
+def step_measure_block_raises(context: Context) -> None:
+ """BlockTimer() around raising block; catch exception, keep measurement."""
+ # Run timed block that raises; measurement still recorded on exit.
+ try:
+ with timerun.BlockTimer() as context.measurement:
+ raise ValueError # noqa: TRY301
+
+ # Store exception for Then to assert.
+ except ValueError as e:
+ context.exception = e
+
+
+@when(
+ "I call __exit__ on a BlockTimer instance without calling __enter__ first",
+)
+def step_call_exit_without_enter(context: Context) -> None:
+ """BlockTimer().__exit__ without __enter__; store exception in context."""
+ try:
+ timerun.BlockTimer().__exit__(None, None, None)
+ except RuntimeError as e:
+ context.exception = e
+
+
+# --- Then ---
+
+
+@then(
+ "the measurement's wall time duration is between {min_ns:n} and "
+ "{max_ns:n} nanoseconds",
+)
+def step_wall_time_between(context: Context, min_ns: int, max_ns: int) -> None:
+ """Assert min_ns <= measurement.wall_time.duration <= max_ns."""
+ # Required context validation.
+ assert context.measurement.wall_time is not None
+
+ # Duration in [min_ns, max_ns].
+ duration = context.measurement.wall_time.duration
+ assert min_ns <= duration <= max_ns, (
+ f"wall time {duration} not in [{min_ns}, {max_ns}]"
+ )
+
+
+@then(
+ "the measurement's wall time duration is within the configured buffer of "
+ "{expected_ns:n} nanoseconds",
+)
+def step_wall_time_within_buffer(context: Context, expected_ns: int) -> None:
+ """Assert expected_ns <= wall_time.duration <= expected_ns + buffer_ns."""
+ # Required context validation.
+ assert context.measurement.wall_time is not None
+
+ # Duration in [expected_ns, expected_ns + BUFFER_NS].
+ duration = context.measurement.wall_time.duration
+ max_ns = expected_ns + BUFFER_NS
+ assert expected_ns <= duration <= max_ns, (
+ f"wall time {duration} not in [{expected_ns}, {max_ns}] "
+ f"(buffer={BUFFER_NS})"
+ )
+
+
+@then(
+ "the measurement's CPU time duration is within the configured buffer of "
+ "{expected_ns:n} nanoseconds",
+)
+def step_cpu_time_within_buffer(context: Context, expected_ns: int) -> None:
+ """cpu_time in [min_ns, expected_ns+buffer_ns]; allow undershoot."""
+ # Required context validation.
+ assert context.measurement.cpu_time is not None
+ # Duration in [expected_ns - CPU_LOWER_SLACK_NS, expected_ns + BUFFER_NS].
+ duration = context.measurement.cpu_time.duration
+ min_ns = max(0, expected_ns - CPU_LOWER_SLACK_NS)
+ max_ns = expected_ns + BUFFER_NS
+ assert min_ns <= duration <= max_ns, (
+ f"CPU time {duration} not in [{min_ns}, {max_ns}] (buffer={BUFFER_NS})"
+ )
+
+
+@then("the measurement's CPU time is close to wall time")
+def step_cpu_close_to_wall(context: Context) -> None:
+ """Assert wall - BUFFER_NS <= CPU <= wall (single-threaded)."""
+ # Required context validation.
+ assert context.measurement.wall_time is not None
+ assert context.measurement.cpu_time is not None
+
+ # Duration in [wall - BUFFER_NS, wall].
+ wall = context.measurement.wall_time.duration
+ cpu = context.measurement.cpu_time.duration
+ min_cpu = max(0, wall - BUFFER_NS)
+ assert min_cpu <= cpu <= wall, (
+ f"CPU {cpu} not in [wall-BUFFER_NS, wall] = [{min_cpu}, {wall}]"
+ )
+
+
+@then(
+ "each thread's measurement has wall time duration within the configured "
+ "buffer of {expected_ns:n} nanoseconds",
+)
+def step_each_thread_wall_within_buffer(
+ context: Context,
+ expected_ns: int,
+) -> None:
+ """Each thread's wall_time in [expected_ns, expected_ns+buffer_ns]."""
+ # Required context validation.
+ measurements = context.thread_measurements
+ assert len(measurements) == context.thread_count, (
+ f"expected {context.thread_count} measurements, "
+ f"got {len(measurements)}"
+ )
+
+ # Duration in [expected_ns, expected_ns + BUFFER_NS] per measurement.
+ max_ns = expected_ns + BUFFER_NS
+ for m in measurements:
+ assert m.wall_time is not None
+ assert expected_ns <= m.wall_time.duration <= max_ns, (
+ f"wall time {m.wall_time.duration} not in "
+ f"[{expected_ns}, {max_ns}] (buffer={BUFFER_NS})"
+ )
+
+
+@then("the measurements are from different threads")
+def step_measurements_from_different_threads(context: Context) -> None:
+ """Assert we have thread_count distinct measurements (one per thread)."""
+ # Required context validation.
+ measurements = context.thread_measurements
+
+ # Exactly thread_count measurements.
+ assert len(measurements) == context.thread_count, (
+ f"expected {context.thread_count} measurements, "
+ f"got {len(measurements)}"
+ )
+
+ # All distinct (one measurement per thread).
+ assert len(measurements) == len({id(m) for m in measurements}), (
+ "measurements are not all distinct (one per thread)"
+ )
+
+
+@then(
+ "the {which} measurement's wall time duration is within the configured "
+ "buffer of {expected_ns:n} nanoseconds",
+)
+def step_which_measurement_wall_within_buffer(
+ context: Context,
+ which: str,
+ expected_ns: int,
+) -> None:
+ """Outer/inner wall_time in [expected_ns, expected_ns+buffer_ns]."""
+ # Required context validation.
+ m = getattr(context, f"{which}_measurement")
+ assert m.wall_time is not None
+
+ # Duration in [expected_ns, expected_ns + BUFFER_NS].
+ duration = m.wall_time.duration
+ max_ns = expected_ns + BUFFER_NS
+ assert expected_ns <= duration <= max_ns, (
+ f"{which} wall time {duration} not in [{expected_ns}, {max_ns}] "
+ f"(buffer={BUFFER_NS})"
+ )
+
+
+@then(
+ "the outer measurement's wall time duration is at least the inner "
+ "measurement's wall time duration",
+)
+def step_outer_wall_at_least_inner(context: Context) -> None:
+ """Outer block duration >= inner (outer contains inner)."""
+ # Required context validation: both have wall_time.
+ assert context.outer_measurement.wall_time is not None
+ assert context.inner_measurement.wall_time is not None
+
+ # Duration: outer >= inner.
+ outer_d = context.outer_measurement.wall_time.duration
+ inner_d = context.inner_measurement.wall_time.duration
+ assert outer_d >= inner_d, f"outer {outer_d} < inner {inner_d}"
+
+
+@then('the first measurement\'s metadata key "{key}" is "{value}"')
+def step_first_measurement_metadata_key(
+ context: Context,
+ key: str,
+ value: str,
+) -> None:
+ """Assert the first measurement's metadata[key] equals value."""
+ assert context.first_measurement.metadata[key] == value
+
+
+@then('the second measurement\'s metadata key "{key}" is "{value}"')
+def step_second_measurement_metadata_key(
+ context: Context,
+ key: str,
+ value: str,
+) -> None:
+ """Assert the second measurement's metadata[key] equals value."""
+ assert context.second_measurement.metadata[key] == value
+
+
+@then('the second measurement\'s metadata does not contain key "{key}"')
+def step_second_measurement_metadata_no_key(
+ context: Context,
+ key: str,
+) -> None:
+ """Second measurement's metadata lacks key (no leak from first block)."""
+ assert key not in context.second_measurement.metadata
+
+
+@then("an exception was propagated to the caller")
+def step_exception_propagated(context: Context) -> None:
+ """Assert we caught the exception that was raised inside the block."""
+ assert hasattr(context, "exception")
+ assert isinstance(context.exception, ValueError)
diff --git a/features/steps/common_steps.py b/features/steps/common_steps.py
new file mode 100644
index 0000000..386ae40
--- /dev/null
+++ b/features/steps/common_steps.py
@@ -0,0 +1,36 @@
+"""Shared step definitions used by multiple features.
+
+Steps here use consistent wording and semantics across features
+(exception assertions, error messages, measurement metadata).
+"""
+
+import builtins
+
+from behave import then
+from behave.runner import Context
+
+
+@then("a {exception_type} is raised")
+def step_exception_raised(context: Context, exception_type: str) -> None:
+ """Assert exception of the given type was stored in context.exception."""
+ assert hasattr(context, "exception"), "Expected an exception to be raised"
+ assert isinstance(context.exception, getattr(builtins, exception_type)), (
+ f"Expected {exception_type}, got {type(context.exception).__name__}"
+ )
+
+
+@then('the error message is "{message}"')
+def step_error_message_is(context: Context, message: str) -> None:
+ """Assert the stored exception message equals message."""
+ assert hasattr(context, "exception"), "Expected an exception to be raised"
+ assert str(context.exception) == message
+
+
+@then('the measurement\'s metadata key "{key}" is "{value}"')
+def step_measurement_metadata_key_is(
+ context: Context,
+ key: str,
+ value: str,
+) -> None:
+ """Assert the key value pair is in measurement's metadata."""
+ assert context.measurement.metadata[key] == value
diff --git a/features/steps/measurement_record_steps.py b/features/steps/measurement_steps.py
similarity index 86%
rename from features/steps/measurement_record_steps.py
rename to features/steps/measurement_steps.py
index 046e5a5..bba56a7 100644
--- a/features/steps/measurement_record_steps.py
+++ b/features/steps/measurement_steps.py
@@ -64,13 +64,3 @@ def step_measurement_metadata_empty_dict(context: Context) -> None:
metadata = context.measurement.metadata
assert isinstance(metadata, dict)
assert not metadata
-
-
-@then('the measurement\'s metadata key "{key}" is "{value}"')
-def step_measurement_metadata_key_value(
- context: Context,
- key: str,
- value: str,
-) -> None:
- """Assert the measurement's metadata[key] equals value."""
- assert context.measurement.metadata[key] == value
diff --git a/features/steps/time_span_steps.py b/features/steps/time_span_steps.py
index d604380..c160a62 100644
--- a/features/steps/time_span_steps.py
+++ b/features/steps/time_span_steps.py
@@ -4,7 +4,7 @@
from datetime import timedelta
import parse
-from behave import given, register_type, then
+from behave import given, register_type, then, when
from behave.runner import Context
import timerun
@@ -46,6 +46,18 @@ def step_given_span_of_duration(
setattr(context, f"time_span_{name.lower()}", span)
+# --- When ---
+
+
+@when("I try to create a time span from {start:n} to {end:n}")
+def step_try_create_time_span(context: Context, start: int, end: int) -> None:
+ """Create TimeSpan(start, end); store exception in context.exception."""
+ try:
+ timerun.TimeSpan(start=start, end=end)
+ except Exception as e: # noqa: BLE001 # pylint: disable=broad-exception-caught
+ context.exception = e
+
+
# --- Then ---
diff --git a/features/time_span.feature b/features/time_span.feature
index 1e581d7..fc4cd15 100644
--- a/features/time_span.feature
+++ b/features/time_span.feature
@@ -1,20 +1,27 @@
Feature: Time span
- As someone measuring how long something takes,
- I want a span of time that tells me how long it took,
- so that I can:
- - compare which took longer
- - see the duration in a familiar form (e.g. seconds)
- - tell which started or ended first
-
- Scenario: I can see how long the span is
+
+ As someone measuring duration,
+ I want a time span,
+ so that I can compare durations, use timedelta, and read start or end.
+
+ # --- Duration and attributes ---
+
+ Scenario: Span duration is readable in nanoseconds
Given a time span from 0 to 1,000,000
Then the duration is 1,000,000 nanoseconds
- Scenario: I can see the duration in a standard form
+ Scenario: Span start and end are readable
+ Given a time span from 1,000 to 2,000
+ Then the start value is 1,000
+ And the end value is 2,000
+
+ Scenario: Duration as standard Python timedelta
Given a time span from 0 to 2,500,000,000
Then the timedelta is 2.5 seconds in standard Python timedelta type
- Scenario Outline: I can compare spans by duration
+ # --- Comparison ---
+
+ Scenario Outline: Compare two spans by duration
Given span A of nanoseconds
And span B of nanoseconds
Then time span A time span B
@@ -28,7 +35,9 @@ Feature: Time span
| 1,000,000 | 1,000,000 | is less than or equal to |
| 2,000,000 | 1,000,000 | is greater than or equal to |
- Scenario: I can read start and end
- Given a time span from 1,000 to 2,000
- Then the start value is 1,000
- And the end value is 2,000
+ # --- Validation ---
+
+ Scenario: end less than start raises ValueError
+ When I try to create a time span from 10 to 5
+ Then a ValueError is raised
+ And the error message is "end must be >= start"
diff --git a/features/version.feature b/features/version.feature
index 0e04679..2d55858 100644
--- a/features/version.feature
+++ b/features/version.feature
@@ -2,12 +2,9 @@ Feature: Package version
As a user or tool integrating with timerun,
I want to read the package version programmatically,
- so that I can:
- - check compatibility
- - display it to users
- - use it in automation
+ so that I can check compatibility or use it in automation.
- Scenario: I can read the package version
+ Scenario: Package version is readable
When I read the package version
Then the package has a version
And the version is a non-empty string
diff --git a/pyproject.toml b/pyproject.toml
index bfeaf1d..cca14e0 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -85,6 +85,9 @@ enable_error_code = [
"unused-awaitable",
]
+[tool.pylint.format]
+max-line-length = 79
+
[tool.pylint.messages_control]
disable = ["not-callable"] # false positive for behave's when/then decorators
diff --git a/timerun.py b/timerun.py
index e371d5f..c4b955f 100644
--- a/timerun.py
+++ b/timerun.py
@@ -1,10 +1,27 @@
"""TimeRun is a Python library for time measurements."""
+from __future__ import annotations
+
+from collections import deque
+from copy import deepcopy
from dataclasses import dataclass, field
from datetime import timedelta
+from threading import local
+from time import perf_counter_ns, process_time_ns
+from typing import TYPE_CHECKING, Literal
+
+if TYPE_CHECKING:
+ from types import TracebackType
__version__: str = "0.5.0"
+__all__ = [
+ "BlockTimer",
+ "Measurement",
+ "TimeSpan",
+ "__version__",
+]
+
@dataclass(order=True, frozen=True)
class TimeSpan:
@@ -30,7 +47,7 @@ class TimeSpan:
Notes
-----
``start`` and ``end`` use ``field(compare=False)``, so two spans with
- the same length compare equal even if their intervals differ.
+ the same duration compare equal even if their intervals differ.
"""
@@ -40,6 +57,9 @@ class TimeSpan:
def __post_init__(self) -> None:
"""Set duration to end minus start (nanoseconds)."""
+ if self.end < self.start:
+ msg = "end must be >= start"
+ raise ValueError(msg)
object.__setattr__(self, "duration", self.end - self.start)
@property
@@ -55,18 +75,112 @@ class Measurement:
Stores one measurement only. Use this to collect the result of a single
timing run: wall-clock time, CPU time, and any user-defined metadata.
+ When created by :class:`BlockTimer`, ``wall_time`` and ``cpu_time`` are
+ ``None`` until the block exits, then they are set to the measured spans.
+
Attributes
----------
- wall_time : TimeSpan
- Elapsed (wall-clock) time for the measurement.
- cpu_time : TimeSpan
- CPU time for the measurement.
+ wall_time : TimeSpan or None
+ Wall-clock time for the measurement, or ``None`` if not yet set.
+ cpu_time : TimeSpan or None
+ CPU time for the measurement, or ``None`` if not yet set.
metadata : dict
Optional key-value metadata (e.g., tags, run id). Defaults to ``{}``;
mutate in place to add or change entries.
"""
- wall_time: TimeSpan
- cpu_time: TimeSpan
+ wall_time: TimeSpan | None = None
+ cpu_time: TimeSpan | None = None
metadata: dict[object, object] = field(default_factory=dict)
+
+
+class BlockTimer:
+ """Context manager for timing a block (wall time + CPU time).
+
+ Use with ``with`` or ``async with``. Yields a :class:`Measurement` whose
+ ``wall_time`` and ``cpu_time`` are set when the block exits. End times are
+ taken at the start of ``__exit__``, with wall time last.
+
+ Optional ``metadata`` is stored by reference at construction; each
+ measurement gets a deep copy at enter time. Exceptions propagate.
+
+ Parameters
+ ----------
+ metadata : dict or None, optional
+ Key-value metadata to attach to the yielded :class:`Measurement`.
+ Stored by reference; each measurement gets a deep copy at enter time.
+ Defaults to ``{}``.
+
+ Yields
+ ------
+ Measurement
+ The measurement record. Its ``wall_time`` and ``cpu_time`` are
+ ``None`` on entry and set to :class:`TimeSpan` instances when the
+ block exits.
+
+ Notes
+ -----
+ Thread-safe: state is thread-local; one measurement per thread.
+
+ Nested blocks: the same instance may be reused in sequential or nested
+ blocks; each block gets its own measurement.
+
+ Exceptions: if the block raises, ``wall_time`` and ``cpu_time`` are still
+ set before the exception propagates.
+
+ Async: ``async with`` uses the same synchronous timing.
+
+ Examples
+ --------
+ >>> with BlockTimer() as m:
+ ... pass
+ >>> m.wall_time.duration # nanoseconds
+
+ """
+
+ def __init__(self, metadata: dict[object, object] | None = None) -> None:
+ """Initialize the context manager."""
+ self._metadata = metadata if isinstance(metadata, dict) else {}
+ self._local = local()
+
+ def __enter__(self) -> Measurement:
+ """Start timing; return the measurement record."""
+ measurement = Measurement(metadata=deepcopy(self._metadata))
+ if not hasattr(self._local, "stack"):
+ self._local.stack = deque()
+ self._local.stack.append(
+ (measurement, perf_counter_ns(), process_time_ns()),
+ )
+ return measurement
+
+ def __exit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_value: BaseException | None,
+ traceback: TracebackType | None,
+ ) -> Literal[False]:
+ """Stop timing; set wall_time and cpu_time on the measurement."""
+ cpu_end = process_time_ns()
+ wall_end = perf_counter_ns()
+ try:
+ measurement, wall_start, cpu_start = self._local.stack.pop()
+ except (AttributeError, IndexError) as e:
+ msg = "__exit__ called without a matching __enter__"
+ raise RuntimeError(msg) from e
+ measurement.wall_time = TimeSpan(start=wall_start, end=wall_end)
+ measurement.cpu_time = TimeSpan(start=cpu_start, end=cpu_end)
+ return False
+
+ async def __aenter__(self) -> Measurement:
+ """Async entry: delegates to __enter__."""
+ return self.__enter__()
+
+ async def __aexit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_value: BaseException | None,
+ traceback: TracebackType | None,
+ ) -> Literal[False]:
+ """Async exit: delegates to __exit__."""
+ return self.__exit__(exc_type, exc_value, traceback)
From 64c6e051c64cafc3703162a1a7a03145710b7d3a Mon Sep 17 00:00:00 2001
From: HH-MWB <50187675+HH-MWB@users.noreply.github.com>
Date: Tue, 17 Feb 2026 22:39:48 -0500
Subject: [PATCH 6/8] feat: add FunctionTimer decorator to measure functions
---
.github/workflows/ci.yaml | 2 +-
.pre-commit-config.yaml | 4 +-
CONTRIBUTING.md | 15 +-
README.md | 2 +-
features/__init__.py | 1 +
features/block_timing.feature | 5 +-
features/environment.py | 19 ++
features/function_timing.feature | 61 ++++++
features/steps/block_timing_steps.py | 101 ++--------
features/steps/common_steps.py | 44 ++++-
features/steps/function_timing_steps.py | 232 ++++++++++++++++++++++
features/steps/measurement_steps.py | 7 +-
features/steps/time_span_steps.py | 13 +-
features/steps/utils.py | 41 ++++
pyproject.toml | 3 +-
timerun.py | 245 ++++++++++++++++++++++--
16 files changed, 668 insertions(+), 127 deletions(-)
create mode 100644 features/__init__.py
create mode 100644 features/environment.py
create mode 100644 features/function_timing.feature
create mode 100644 features/steps/function_timing_steps.py
create mode 100644 features/steps/utils.py
diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml
index 9ca0660..477d9f7 100644
--- a/.github/workflows/ci.yaml
+++ b/.github/workflows/ci.yaml
@@ -31,7 +31,7 @@ jobs:
needs: lint
strategy:
matrix:
- python-version: ['3.9', '3.10', '3.11', '3.12', '3.13', '3.14']
+ python-version: ['3.10', '3.11', '3.12', '3.13', '3.14']
steps:
- name: Checkout code
uses: actions/checkout@v6
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 3022d7c..dc19a62 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -12,7 +12,7 @@ repos:
- id: check-toml
- repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.15.0
+ rev: v0.15.1
hooks:
- id: ruff-format
- id: ruff-check
@@ -38,7 +38,7 @@ repos:
args: ["-c", "pyproject.toml"]
- repo: https://github.com/semgrep/pre-commit
- rev: v1.150.0
+ rev: v1.151.0
hooks:
- id: semgrep
args: ["--config", "p/python", "--error"]
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index cc8357b..9cead11 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -29,7 +29,7 @@ Please be respectful and constructive. By participating, you agree to uphold a w
### Prerequisites
-- **Python 3.9+**
+- **Python 3.10+**
- **Git**
### One-time setup
@@ -132,10 +132,15 @@ Fixing pre-commit failures before pushing keeps the history clean and CI green.
```
timerun/
├── timerun.py # Library (single-file by design)
-├── features/ # BDD feature files (Gherkin)
+├── features/ # BDD feature files (Gherkin) — behave convention
+│ ├── __init__.py # Makes features a package for imports
│ ├── *.feature
-│ └── steps/ # Step definitions (Python)
-│ └── *_steps.py
+│ ├── environment.py # Optional: hooks (before/after scenario, etc.)
+│ └── steps/ # Step definitions (flat; all .py files loaded)
+│ ├── __init__.py
+│ ├── utils.py # Shared constants and helpers (no step decorators)
+│ ├── common_steps.py # Shared steps used by multiple features
+│ └── *_steps.py # Feature-specific step files
├── pyproject.toml # Project metadata and config
├── Makefile # Commands: init, test, clean, help
├── README.md
@@ -144,7 +149,7 @@ timerun/
```
- **`timerun.py`** — The only library module; keep it a single file by design.
-- **`features/`** — All executable specs; no separate unit test directory.
+- **`features/`** — All executable specs; no separate unit test directory. Layout follows [behave](https://behave.readthedocs.io/) convention: step definitions live under `features/steps/` (flat; subdirectories are not searched). Shared logic lives in `features/steps/utils.py`; shared steps (e.g. metadata, wall-time buffer, exception propagation) in `common_steps.py`. Run behave from the project root so `from features.steps.utils import ...` works.
## Pull Request Process
diff --git a/README.md b/README.md
index 4430396..76d79a0 100644
--- a/README.md
+++ b/README.md
@@ -21,7 +21,7 @@ TimeRun is a **single-file** Python package with no dependencies beyond the [Pyt
### Prerequisites
-**Python 3.9+**
+**Python 3.10+**
### Installation
diff --git a/features/__init__.py b/features/__init__.py
new file mode 100644
index 0000000..dd16409
--- /dev/null
+++ b/features/__init__.py
@@ -0,0 +1 @@
+"""Behave BDD features and step definitions."""
diff --git a/features/block_timing.feature b/features/block_timing.feature
index f95985f..6c94d34 100644
--- a/features/block_timing.feature
+++ b/features/block_timing.feature
@@ -30,8 +30,7 @@ Feature: Block timing
Scenario: Two threads with one BlockTimer yield one measurement per thread
Given each thread sleeps 5,000,000 nanoseconds
When I measure blocks from 2 threads using the same BlockTimer instance
- Then each thread's measurement has wall time duration within the configured buffer of 5,000,000 nanoseconds
- And the measurements are from different threads
+ Then the measurements are from different threads
Scenario: Two sequential blocks with one BlockTimer yield correct durations
Given the first block duration is 5,000,000 nanoseconds
@@ -69,7 +68,7 @@ Feature: Block timing
Scenario: Block that raises still yields measurement; exception propagates
When I measure a code block that raises an exception
- Then the measurement's wall time duration is between 0 and 1,000,000,000 nanoseconds
+ Then the block yielded a measurement
And an exception was propagated to the caller
Scenario: __exit__ without __enter__ raises RuntimeError
diff --git a/features/environment.py b/features/environment.py
new file mode 100644
index 0000000..97ad226
--- /dev/null
+++ b/features/environment.py
@@ -0,0 +1,19 @@
+"""Behave environment hooks.
+
+Runs before/after the test run, features, or scenarios.
+https://behave.readthedocs.io/en/stable/tutorial.html#environmental-controls
+"""
+
+# Example hooks (uncomment and customize if needed):
+#
+# def before_all(context):
+# pass
+#
+# def after_all(context):
+# pass
+#
+# def before_scenario(context, scenario):
+# pass
+#
+# def after_scenario(context, scenario):
+# pass
diff --git a/features/function_timing.feature b/features/function_timing.feature
new file mode 100644
index 0000000..94e4178
--- /dev/null
+++ b/features/function_timing.feature
@@ -0,0 +1,61 @@
+Feature: Function timing
+
+ As someone measuring duration,
+ I want to time function and generator execution with a decorator,
+ so that I get per-call measurements and can attach metadata.
+
+ # --- Sync and async functions ---
+
+ Scenario: Timing a synchronous sleeping function records real time and minimal CPU time
+ Given a sync function that sleeps for around 10,000,000 nanoseconds
+ When I call the decorated sync function
+ Then the measurement's wall time duration is within the configured buffer of 10,000,000 nanoseconds
+ And the measurement's CPU time duration is within the configured buffer of 0 nanoseconds
+
+ Scenario: Timing an async sleeping function records real time and minimal CPU time
+ Given an async function that sleeps for around 10,000,000 nanoseconds
+ When I call the decorated async function
+ Then the measurement's wall time duration is within the configured buffer of 10,000,000 nanoseconds
+ And the measurement's CPU time duration is within the configured buffer of 0 nanoseconds
+
+ # --- Sync and async generators ---
+
+ Scenario: Fully consuming a sync generator records one measurement
+ Given a sync generator that yields 3 items and sleeps 5,000,000 nanoseconds total
+ When I fully consume the decorated sync generator
+ Then the decorated function's measurements deque has 1 entry
+
+ Scenario: Fully consuming an async generator records one measurement
+ Given an async generator that yields 3 items and sleeps 5,000,000 nanoseconds total
+ When I fully consume the decorated async generator
+ Then the decorated function's measurements deque has 1 entry
+
+ # --- Metadata ---
+
+ Scenario: Metadata attached to the timer appears on each measurement
+ Given metadata run_id "exp-1" and tag "baseline"
+ When I call a decorated function with that metadata
+ Then the measurement's metadata key "run_id" is "exp-1"
+ And the measurement's metadata key "tag" is "baseline"
+
+ # --- Exceptions ---
+
+ Scenario: When a timed function raises an error, one measurement is still recorded and the error is re-raised
+ When I call a decorated function that raises an exception
+ Then the decorated function's measurements deque has 1 entry
+ And an exception was propagated to the caller
+
+ # --- Limiting stored measurements (maxlen) ---
+
+ Scenario: With maxlen 2, only the last 2 measurements are kept
+ Given a sync function that sleeps for around 1,000,000 nanoseconds
+ When I decorate it with FunctionTimer with maxlen 2
+ And I call the decorated function 3 times
+ Then the decorated function's measurements deque has 2 entries
+
+ # --- Thread safety ---
+
+ Scenario: Two threads calling the same timed function produce two measurements
+ Given a sync function that sleeps for around 5,000,000 nanoseconds
+ When I call the decorated function from 2 threads concurrently
+ Then the decorated function's measurements deque has 2 entries
diff --git a/features/steps/block_timing_steps.py b/features/steps/block_timing_steps.py
index defe944..3b07e32 100644
--- a/features/steps/block_timing_steps.py
+++ b/features/steps/block_timing_steps.py
@@ -3,35 +3,23 @@
from __future__ import annotations
import asyncio
-import time
from concurrent.futures import ThreadPoolExecutor
from typing import TYPE_CHECKING
from behave import given, then, when
import timerun
+from features.steps.utils import (
+ BUFFER_NS,
+ CPU_LOWER_SLACK_NS,
+ assert_wall_time_within_buffer,
+ sleep_wall_at_least,
+ spin_wall_at_least,
+)
if TYPE_CHECKING:
from behave.runner import Context
-# "duration within buffer of X": accept X <= duration <= X + BUFFER_NS.
-# Covers sleep/scheduling jitter so tests don't flake.
-BUFFER_NS = 10_000_000 # 10 ms
-# CPU can be slightly below wall time (scheduling); allow 1 ms undershoot.
-CPU_LOWER_SLACK_NS = 1_000_000
-
-
-def sleep_wall_at_least(nanoseconds: int) -> None:
- """Sleep >= `nanoseconds` ns wall time. Jitter absorbed by BUFFER_NS."""
- time.sleep(nanoseconds / 1e9)
-
-
-def spin_wall_at_least(nanoseconds: int) -> None:
- """Busy loop until wall time >= `nanoseconds` ns. Uses CPU."""
- start = time.perf_counter_ns()
- while time.perf_counter_ns() - start < nanoseconds:
- pass
-
# --- Given ---
@@ -98,12 +86,6 @@ def step_given_inner_block_duration(
context.inner_block_ns = duration_ns
-@given('metadata run_id "{run_id}" and tag "{tag}"')
-def step_given_metadata(context: Context, run_id: str, tag: str) -> None:
- """Store metadata dict for use with BlockTimer(metadata=...)."""
- context.metadata = {"run_id": run_id, "tag": tag}
-
-
@given('I will add metadata key "{key}" as "{value}" in the first block')
def step_given_metadata_add_in_first(
context: Context,
@@ -261,24 +243,6 @@ def step_wall_time_between(context: Context, min_ns: int, max_ns: int) -> None:
)
-@then(
- "the measurement's wall time duration is within the configured buffer of "
- "{expected_ns:n} nanoseconds",
-)
-def step_wall_time_within_buffer(context: Context, expected_ns: int) -> None:
- """Assert expected_ns <= wall_time.duration <= expected_ns + buffer_ns."""
- # Required context validation.
- assert context.measurement.wall_time is not None
-
- # Duration in [expected_ns, expected_ns + BUFFER_NS].
- duration = context.measurement.wall_time.duration
- max_ns = expected_ns + BUFFER_NS
- assert expected_ns <= duration <= max_ns, (
- f"wall time {duration} not in [{expected_ns}, {max_ns}] "
- f"(buffer={BUFFER_NS})"
- )
-
-
@then(
"the measurement's CPU time duration is within the configured buffer of "
"{expected_ns:n} nanoseconds",
@@ -287,6 +251,7 @@ def step_cpu_time_within_buffer(context: Context, expected_ns: int) -> None:
"""cpu_time in [min_ns, expected_ns+buffer_ns]; allow undershoot."""
# Required context validation.
assert context.measurement.cpu_time is not None
+
# Duration in [expected_ns - CPU_LOWER_SLACK_NS, expected_ns + BUFFER_NS].
duration = context.measurement.cpu_time.duration
min_ns = max(0, expected_ns - CPU_LOWER_SLACK_NS)
@@ -312,32 +277,6 @@ def step_cpu_close_to_wall(context: Context) -> None:
)
-@then(
- "each thread's measurement has wall time duration within the configured "
- "buffer of {expected_ns:n} nanoseconds",
-)
-def step_each_thread_wall_within_buffer(
- context: Context,
- expected_ns: int,
-) -> None:
- """Each thread's wall_time in [expected_ns, expected_ns+buffer_ns]."""
- # Required context validation.
- measurements = context.thread_measurements
- assert len(measurements) == context.thread_count, (
- f"expected {context.thread_count} measurements, "
- f"got {len(measurements)}"
- )
-
- # Duration in [expected_ns, expected_ns + BUFFER_NS] per measurement.
- max_ns = expected_ns + BUFFER_NS
- for m in measurements:
- assert m.wall_time is not None
- assert expected_ns <= m.wall_time.duration <= max_ns, (
- f"wall time {m.wall_time.duration} not in "
- f"[{expected_ns}, {max_ns}] (buffer={BUFFER_NS})"
- )
-
-
@then("the measurements are from different threads")
def step_measurements_from_different_threads(context: Context) -> None:
"""Assert we have thread_count distinct measurements (one per thread)."""
@@ -366,16 +305,10 @@ def step_which_measurement_wall_within_buffer(
expected_ns: int,
) -> None:
"""Outer/inner wall_time in [expected_ns, expected_ns+buffer_ns]."""
- # Required context validation.
- m = getattr(context, f"{which}_measurement")
- assert m.wall_time is not None
-
- # Duration in [expected_ns, expected_ns + BUFFER_NS].
- duration = m.wall_time.duration
- max_ns = expected_ns + BUFFER_NS
- assert expected_ns <= duration <= max_ns, (
- f"{which} wall time {duration} not in [{expected_ns}, {max_ns}] "
- f"(buffer={BUFFER_NS})"
+ assert_wall_time_within_buffer(
+ getattr(context, f"{which}_measurement"),
+ expected_ns,
+ BUFFER_NS,
)
@@ -424,8 +357,8 @@ def step_second_measurement_metadata_no_key(
assert key not in context.second_measurement.metadata
-@then("an exception was propagated to the caller")
-def step_exception_propagated(context: Context) -> None:
- """Assert we caught the exception that was raised inside the block."""
- assert hasattr(context, "exception")
- assert isinstance(context.exception, ValueError)
+@then("the block yielded a measurement")
+def step_block_yielded_measurement(context: Context) -> None:
+ """Assert the block produced a measurement (e.g. when block raises)."""
+ assert context.measurement is not None
+ assert context.measurement.wall_time is not None
diff --git a/features/steps/common_steps.py b/features/steps/common_steps.py
index 386ae40..4d27b60 100644
--- a/features/steps/common_steps.py
+++ b/features/steps/common_steps.py
@@ -1,19 +1,41 @@
"""Shared step definitions used by multiple features.
Steps here use consistent wording and semantics across features
-(exception assertions, error messages, measurement metadata).
+(exception assertions, error messages, measurement metadata, wall time buffer).
"""
+from __future__ import annotations
+
import builtins
+from typing import TYPE_CHECKING
+
+from behave import given, then
+
+from features.steps.utils import BUFFER_NS, assert_wall_time_within_buffer
+
+if TYPE_CHECKING:
+ from behave.runner import Context
+
+
+# --- Given ---
+
-from behave import then
-from behave.runner import Context
+@given('metadata run_id "{run_id}" and tag "{tag}"')
+def step_given_metadata(context: Context, run_id: str, tag: str) -> None:
+ """Store metadata for BlockTimer/FunctionTimer(metadata=...)."""
+ context.metadata = {"run_id": run_id, "tag": tag}
+
+
+# --- Then ---
@then("a {exception_type} is raised")
def step_exception_raised(context: Context, exception_type: str) -> None:
"""Assert exception of the given type was stored in context.exception."""
+ # Required: an exception was stored by the When step.
assert hasattr(context, "exception"), "Expected an exception to be raised"
+
+ # Type must match (e.g. ValueError, RuntimeError).
assert isinstance(context.exception, getattr(builtins, exception_type)), (
f"Expected {exception_type}, got {type(context.exception).__name__}"
)
@@ -26,6 +48,22 @@ def step_error_message_is(context: Context, message: str) -> None:
assert str(context.exception) == message
+@then(
+ "the measurement's wall time duration is within the configured buffer of "
+ "{expected_ns:n} nanoseconds",
+)
+def step_wall_time_within_buffer(context: Context, expected_ns: int) -> None:
+ """Assert expected_ns <= wall_time.duration <= expected_ns + buffer_ns."""
+ assert_wall_time_within_buffer(context.measurement, expected_ns, BUFFER_NS)
+
+
+@then("an exception was propagated to the caller")
+def step_exception_propagated(context: Context) -> None:
+ """Assert we caught the exception raised inside the block/timed call."""
+ assert hasattr(context, "exception")
+ assert isinstance(context.exception, ValueError)
+
+
@then('the measurement\'s metadata key "{key}" is "{value}"')
def step_measurement_metadata_key_is(
context: Context,
diff --git a/features/steps/function_timing_steps.py b/features/steps/function_timing_steps.py
new file mode 100644
index 0000000..a6ec23a
--- /dev/null
+++ b/features/steps/function_timing_steps.py
@@ -0,0 +1,232 @@
+"""Step definitions for the Function timing feature."""
+
+from __future__ import annotations
+
+import asyncio
+from concurrent.futures import ThreadPoolExecutor
+from typing import TYPE_CHECKING
+
+from behave import given, then, when
+
+import timerun
+from features.steps.utils import (
+ sleep_wall_at_least,
+)
+
+if TYPE_CHECKING:
+ from behave.runner import Context
+
+
+# --- Given ---
+
+
+@given("a sync function that sleeps for around {duration_ns:n} nanoseconds")
+def step_given_sync_func_sleep(context: Context, duration_ns: int) -> None:
+ """Store duration for a sync function that will sleep."""
+ context.func_duration_ns = duration_ns
+
+
+@given("an async function that sleeps for around {duration_ns:n} nanoseconds")
+def step_given_async_func_sleep(context: Context, duration_ns: int) -> None:
+ """Store duration for an async function that will sleep."""
+ context.func_duration_ns = duration_ns
+
+
+@given(
+ "a sync generator that yields {count:n} items and sleeps "
+ "{duration_ns:n} nanoseconds total",
+)
+def step_given_sync_gen(
+ context: Context,
+ count: int,
+ duration_ns: int,
+) -> None:
+ """Store duration and yield count for sync gen (sleep across yields)."""
+ context.gen_duration_ns = duration_ns
+ context.gen_count = count
+
+
+@given(
+ "an async generator that yields {count:n} items and sleeps "
+ "{duration_ns:n} nanoseconds total",
+)
+def step_given_async_gen(
+ context: Context,
+ count: int,
+ duration_ns: int,
+) -> None:
+ """Store duration and yield count for an async generator."""
+ context.gen_duration_ns = duration_ns
+ context.gen_count = count
+
+
+# --- When ---
+
+
+@when("I call the decorated sync function")
+def step_when_call_decorated_sync(context: Context) -> None:
+ """Create sync function, decorate with FunctionTimer(), call it."""
+
+ # Define decorated sync function (sleep for configured duration).
+ @timerun.FunctionTimer()
+ def sync_func() -> None:
+ sleep_wall_at_least(context.func_duration_ns)
+
+ # Call and store function + last measurement for Then steps.
+ sync_func()
+ context.decorated_function = sync_func
+ context.measurement = sync_func.measurements[-1]
+
+
+@when("I call the decorated async function")
+def step_when_call_decorated_async(context: Context) -> None:
+ """Create async function, decorate, run it."""
+
+ # Define decorated async function (sleep for configured duration).
+ @timerun.FunctionTimer()
+ async def async_func() -> None:
+ await asyncio.sleep(context.func_duration_ns / 1e9)
+
+ # Run and store function + last measurement for Then steps.
+ asyncio.run(async_func())
+ context.decorated_function = async_func
+ context.measurement = async_func.measurements[-1]
+
+
+@when("I fully consume the decorated sync generator")
+def step_when_consume_sync_gen(context: Context) -> None:
+ """Create sync generator, decorate, consume fully."""
+ per_sleep = context.gen_duration_ns // context.gen_count
+
+ # Define decorated sync generator (sleep spread across yields).
+ @timerun.FunctionTimer()
+ def sync_gen() -> object:
+ for i in range(context.gen_count):
+ sleep_wall_at_least(per_sleep)
+ yield i
+
+ # Consume fully and store generator + last measurement for Then steps.
+ list(sync_gen())
+ context.decorated_function = sync_gen
+ context.measurement = sync_gen.measurements[-1]
+
+
+@when("I fully consume the decorated async generator")
+def step_when_consume_async_gen(context: Context) -> None:
+ """Create async generator, decorate, consume fully."""
+ per_sleep = context.gen_duration_ns // context.gen_count
+
+ # Define decorated async generator (sleep spread across yields).
+ @timerun.FunctionTimer()
+ async def async_gen() -> object:
+ for i in range(context.gen_count):
+ await asyncio.sleep(per_sleep / 1e9)
+ yield i
+
+ # Consume fully via event loop and store generator + last measurement.
+ async def run() -> None:
+ async for _ in async_gen():
+ pass
+
+ asyncio.run(run())
+ context.decorated_function = async_gen
+ context.measurement = async_gen.measurements[-1]
+
+
+@when("I call a decorated function with that metadata")
+def step_when_call_with_metadata(context: Context) -> None:
+ """Decorate a no-op function with FunctionTimer(metadata=...), call it."""
+
+ @timerun.FunctionTimer(metadata=context.metadata)
+ def f() -> None:
+ pass
+
+ f()
+ context.decorated_function = f
+ context.measurement = f.measurements[-1]
+
+
+@when("I call a decorated function that raises an exception")
+def step_when_call_raising(context: Context) -> None:
+ """Decorate a function that raises, call it, catch exception."""
+
+ # Define decorated function that raises; measurement recorded on exit.
+ @timerun.FunctionTimer()
+ def raising() -> None:
+ raise ValueError
+
+ # Call, catch exception for Then to assert; store function and measurement.
+ try:
+ raising()
+ except ValueError as e:
+ context.exception = e
+ context.decorated_function = raising
+ context.measurement = raising.measurements[-1]
+
+
+@when("I decorate it with FunctionTimer with maxlen {maxlen:n}")
+def step_when_decorate_maxlen(context: Context, maxlen: int) -> None:
+ """Store maxlen; actual decoration in next step."""
+ context.func_maxlen = maxlen
+
+
+@when("I call the decorated function {times:n} times")
+def step_when_call_three_times(context: Context, times: int) -> None:
+ """Decorate sync function with FunctionTimer(maxlen=...), call N times."""
+
+ # Define decorated sync function with maxlen from previous step.
+ @timerun.FunctionTimer(maxlen=context.func_maxlen)
+ def sync_func() -> None:
+ sleep_wall_at_least(context.func_duration_ns)
+
+ # Call times times; only last maxlen measurements kept.
+ for _ in range(times):
+ sync_func()
+ context.decorated_function = sync_func
+
+
+@when(
+ "I call the decorated function from {thread_count:n} threads concurrently",
+)
+def step_when_call_from_threads(context: Context, thread_count: int) -> None:
+ """Create sync function, decorate, run it from thread_count threads."""
+
+ # Define decorated sync function (sleep for configured duration).
+ @timerun.FunctionTimer()
+ def sync_func() -> None:
+ sleep_wall_at_least(context.func_duration_ns)
+
+ # Worker: call the timed function once.
+ def run() -> None:
+ sync_func()
+
+ # Run thread_count workers concurrently; store function and count for Then.
+ with ThreadPoolExecutor(max_workers=thread_count) as ex:
+ futures = [ex.submit(run) for _ in range(thread_count)]
+ for f in futures:
+ f.result()
+ context.decorated_function = sync_func
+ context.thread_count = thread_count
+
+
+# --- Then ---
+
+
+@then("the decorated function's measurements deque has 1 entry")
+def step_then_measurements_one(context: Context) -> None:
+ """Assert len(decorated_function.measurements) == 1."""
+ func = context.decorated_function
+ assert hasattr(func, "measurements")
+ assert len(func.measurements) == 1, (
+ f"expected 1 measurement, got {len(func.measurements)}"
+ )
+
+
+@then("the decorated function's measurements deque has {n:n} entries")
+def step_then_measurements_count(context: Context, n: int) -> None:
+ """Assert len(decorated_function.measurements) == n."""
+ func = context.decorated_function
+ assert hasattr(func, "measurements")
+ assert len(func.measurements) == n, (
+ f"expected {n} measurements, got {len(func.measurements)}"
+ )
diff --git a/features/steps/measurement_steps.py b/features/steps/measurement_steps.py
index bba56a7..e92d1df 100644
--- a/features/steps/measurement_steps.py
+++ b/features/steps/measurement_steps.py
@@ -16,8 +16,11 @@ def step_given_typed_time_span(
end: int,
) -> None:
"""Set time span to context based on kind (wall/CPU)."""
- span = timerun.TimeSpan(start=start, end=end)
- setattr(context, f"{kind.lower()}_time_span", span)
+ setattr(
+ context,
+ f"{kind.lower()}_time_span",
+ timerun.TimeSpan(start=start, end=end),
+ )
# --- When ---
diff --git a/features/steps/time_span_steps.py b/features/steps/time_span_steps.py
index c160a62..7aa4ce9 100644
--- a/features/steps/time_span_steps.py
+++ b/features/steps/time_span_steps.py
@@ -42,8 +42,11 @@ def step_given_span_of_duration(
duration: int,
) -> None:
"""Create a TimeSpan(0, duration) and store as context.time_span_."""
- span = timerun.TimeSpan(start=0, end=duration)
- setattr(context, f"time_span_{name.lower()}", span)
+ setattr(
+ context,
+ f"time_span_{name.lower()}",
+ timerun.TimeSpan(start=0, end=duration),
+ )
# --- When ---
@@ -81,8 +84,10 @@ def step_timedelta_is_seconds_standard_type(
@then("time span A {relation:Relation} time span B")
def step_time_span_a_relation_b(context: Context, relation: str) -> None:
"""Assert time_span_a and time_span_b satisfy the given relation."""
- op = RELATION_OPERATORS[relation]
- assert op(context.time_span_a, context.time_span_b)
+ assert RELATION_OPERATORS[relation](
+ context.time_span_a,
+ context.time_span_b,
+ )
@then("the {which:w} value is {expected:n}")
diff --git a/features/steps/utils.py b/features/steps/utils.py
new file mode 100644
index 0000000..376480d
--- /dev/null
+++ b/features/steps/utils.py
@@ -0,0 +1,41 @@
+"""Shared utilities for step definitions.
+
+Constants and helpers for block_timing_steps and function_timing_steps
+to avoid duplication and keep assertions consistent.
+"""
+
+import time
+
+# Buffer: expected_ns <= duration <= expected_ns + BUFFER_NS.
+# Covers sleep/scheduling jitter so tests don't flake.
+BUFFER_NS = 10_000_000 # 10 ms
+
+# CPU can be slightly below wall time (scheduling); allow 1 ms undershoot.
+CPU_LOWER_SLACK_NS = 1_000_000
+
+
+def sleep_wall_at_least(nanoseconds: int) -> None:
+ """Sleep >= `nanoseconds` ns wall time. Jitter absorbed by BUFFER_NS."""
+ time.sleep(nanoseconds / 1e9)
+
+
+def spin_wall_at_least(nanoseconds: int) -> None:
+ """Busy loop until wall time >= `nanoseconds` ns. Uses CPU."""
+ start = time.perf_counter_ns()
+ while time.perf_counter_ns() - start < nanoseconds:
+ pass
+
+
+def assert_wall_time_within_buffer(
+ measurement: object,
+ expected_ns: int,
+ buffer_ns: int = BUFFER_NS,
+) -> None:
+ """Assert wall_time.duration in [expected_ns, expected_ns + buffer_ns]."""
+ assert measurement.wall_time is not None
+ duration = measurement.wall_time.duration
+ max_ns = expected_ns + buffer_ns
+ assert expected_ns <= duration <= max_ns, (
+ f"wall time {duration} not in [{expected_ns}, {max_ns}] "
+ f"(buffer={buffer_ns})"
+ )
diff --git a/pyproject.toml b/pyproject.toml
index cca14e0..cfef921 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -6,7 +6,7 @@ build-backend = "setuptools.build_meta"
name = "timerun"
description = "TimeRun is a Python library for elapsed time measurement."
readme = "README.md"
-requires-python = ">=3.9"
+requires-python = ">=3.10"
license = { text = "MIT" }
keywords = [
"time",
@@ -23,7 +23,6 @@ classifiers = [
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
- "Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
diff --git a/timerun.py b/timerun.py
index c4b955f..175765c 100644
--- a/timerun.py
+++ b/timerun.py
@@ -1,27 +1,45 @@
"""TimeRun is a Python library for time measurements."""
-from __future__ import annotations
-
+from abc import ABC, abstractmethod
from collections import deque
+from collections.abc import AsyncGenerator, Callable, Generator
from copy import deepcopy
from dataclasses import dataclass, field
from datetime import timedelta
-from threading import local
+from functools import wraps
+from inspect import (
+ isasyncgenfunction,
+ iscoroutinefunction,
+ isgeneratorfunction,
+)
+from threading import Lock, local
from time import perf_counter_ns, process_time_ns
-from typing import TYPE_CHECKING, Literal
-
-if TYPE_CHECKING:
- from types import TracebackType
+from types import TracebackType
+from typing import (
+ Generic,
+ Literal,
+ ParamSpec,
+ Protocol,
+ TypeVar,
+ cast,
+)
__version__: str = "0.5.0"
__all__ = [
"BlockTimer",
+ "FunctionTimer",
"Measurement",
"TimeSpan",
"__version__",
]
+P = ParamSpec("P") # callable parameters
+R = TypeVar("R") # callable return type
+R_co = TypeVar("R_co", covariant=True) # covariant return (Protocol)
+Y = TypeVar("Y") # generator yield type
+T = TypeVar("T") # context manager resource type
+
@dataclass(order=True, frozen=True)
class TimeSpan:
@@ -92,10 +110,40 @@ class Measurement:
wall_time: TimeSpan | None = None
cpu_time: TimeSpan | None = None
- metadata: dict[object, object] = field(default_factory=dict)
+ metadata: dict[str, object] = field(default_factory=dict)
+
+
+class _SyncToAsyncContextManagerMixin(ABC, Generic[T]):
+ """Mixin: async context manager that delegates to sync __enter__/__exit__.
+
+ Use with any class that implements __enter__ and __exit__; adds support
+ for ``async with`` by calling the sync implementation.
+ """
+ @abstractmethod
+ def __enter__(self) -> T: ...
-class BlockTimer:
+ @abstractmethod
+ def __exit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_val: BaseException | None,
+ exc_tb: TracebackType | None,
+ ) -> bool: ...
+
+ async def __aenter__(self) -> T:
+ return self.__enter__()
+
+ async def __aexit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_val: BaseException | None,
+ exc_tb: TracebackType | None,
+ ) -> bool:
+ return self.__exit__(exc_type, exc_val, exc_tb)
+
+
+class BlockTimer(_SyncToAsyncContextManagerMixin[Measurement]):
"""Context manager for timing a block (wall time + CPU time).
Use with ``with`` or ``async with``. Yields a :class:`Measurement` whose
@@ -139,12 +187,12 @@ class BlockTimer:
"""
- def __init__(self, metadata: dict[object, object] | None = None) -> None:
+ def __init__(self, metadata: dict[str, object] | None = None) -> None:
"""Initialize the context manager."""
self._metadata = metadata if isinstance(metadata, dict) else {}
self._local = local()
- def __enter__(self) -> Measurement:
+ def __enter__(self) -> Measurement: # type: ignore[explicit-override]
"""Start timing; return the measurement record."""
measurement = Measurement(metadata=deepcopy(self._metadata))
if not hasattr(self._local, "stack"):
@@ -154,7 +202,7 @@ def __enter__(self) -> Measurement:
)
return measurement
- def __exit__(
+ def __exit__( # type: ignore[explicit-override]
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
@@ -172,15 +220,172 @@ def __exit__(
measurement.cpu_time = TimeSpan(start=cpu_start, end=cpu_end)
return False
- async def __aenter__(self) -> Measurement:
- """Async entry: delegates to __enter__."""
- return self.__enter__()
- async def __aexit__(
+class _BlockRecorder(_SyncToAsyncContextManagerMixin[Measurement]):
+ """Records the measurement from a timed block (BlockTimer) into a deque.
+
+ Used by FunctionTimer. Runs BlockTimer, then on exit appends the
+ measurement to the deque under the lock and re-raises if the block raised.
+ Supports ``with`` and ``async with`` via the mixin.
+ """
+
+ def __init__(
+ self,
+ metadata: dict[str, object] | None,
+ measurements: deque[Measurement],
+ lock: Lock,
+ ) -> None:
+ self._timer = BlockTimer(metadata=metadata)
+ self._measurements = measurements
+ self._lock = lock
+
+ def __enter__(self) -> Measurement: # type: ignore[explicit-override]
+ self._measurement = self._timer.__enter__() # pylint: disable=attribute-defined-outside-init
+ return self._measurement
+
+ def __exit__( # type: ignore[explicit-override]
self,
exc_type: type[BaseException] | None,
- exc_value: BaseException | None,
- traceback: TracebackType | None,
+ exc_val: BaseException | None,
+ exc_tb: TracebackType | None,
) -> Literal[False]:
- """Async exit: delegates to __exit__."""
- return self.__exit__(exc_type, exc_value, traceback)
+ self._timer.__exit__(exc_type, exc_val, exc_tb)
+ with self._lock:
+ self._measurements.append(self._measurement)
+ if exc_val is not None:
+ raise exc_val
+ return False
+
+
+class _TimedCallable(Protocol[P, R_co]): # pylint: disable=too-few-public-methods
+ """Protocol for the wrapped callable with a measurements attribute."""
+
+ measurements: deque[Measurement]
+
+ def __call__(self, *args: P.args, **kwargs: P.kwargs) -> R_co: ...
+
+
+class FunctionTimer: # pylint: disable=too-few-public-methods
+ """Decorator for timing a function (wall time + CPU time).
+
+ Use as ``@FunctionTimer()`` or
+ ``@FunctionTimer(metadata={...}, maxlen=100)``.
+ Supports sync functions, async functions, sync generators, and async
+ generators. Each run uses :class:`BlockTimer`; one :class:`Measurement` per
+ invocation (per call or per full generator consumption). Measurements are
+ appended to a deque on the wrapped callable (attribute ``measurements``).
+
+ Parameters
+ ----------
+ metadata : dict or None, optional
+ Passed to :class:`BlockTimer` for each run; interpretation and defaults
+ follow BlockTimer (e.g. None or non-dict become ``{}``). Read from the
+ decorator instance at each invocation, so reassigning it affects future
+ runs.
+ maxlen : int or None, optional
+ Maximum number of measurements to keep on the wrapped callable.
+ Passed to the storage deque as ``deque(maxlen=maxlen)``; ``None``
+ means unbounded. Oldest entries are dropped when full.
+
+ Attributes (on wrapped callable)
+ ---------------------------------
+ measurements : deque of Measurement
+ Deque of measurements (oldest to newest). Use ``func.measurements[-1]``
+ for the last run, or iterate for history. Append is done under a lock
+ for thread safety.
+
+ Notes
+ -----
+ Generators: one measurement per full consumption (from first ``next()`` /
+ ``anext()`` until exhausted or closed). Wall time and CPU time cover the
+ entire consumption (generator + consumer code between iterations).
+
+ Exceptions: if the callable raises, the measurement is still recorded
+ (wall_time and cpu_time set by BlockTimer), then the exception propagates.
+
+ Examples
+ --------
+ >>> @FunctionTimer(maxlen=10)
+ ... def slow():
+ ... pass
+ >>> slow()
+ >>> slow.measurements[-1].wall_time.duration # nanoseconds
+
+ """
+
+ def __init__(
+ self,
+ metadata: dict[str, object] | None = None,
+ maxlen: int | None = None,
+ ) -> None:
+ """Initialize the decorator."""
+ self._metadata = metadata
+ self._maxlen = maxlen
+
+ def __call__( # noqa: C901
+ self,
+ f: Callable[P, R],
+ ) -> (
+ _TimedCallable[P, R]
+ | _TimedCallable[P, AsyncGenerator[Y, None]]
+ | _TimedCallable[P, Generator[Y, None, None]]
+ ):
+ """Wrap the function with timing."""
+ measurements: deque[Measurement] = deque(maxlen=self._maxlen)
+ lock = Lock()
+ if isasyncgenfunction(f):
+
+ @wraps(f)
+ async def wrapper(
+ *args: P.args,
+ **kwargs: P.kwargs,
+ ) -> AsyncGenerator[Y, None]:
+ inner = f(*args, **kwargs)
+ async with _BlockRecorder(
+ self._metadata,
+ measurements,
+ lock,
+ ):
+ async for x in inner:
+ yield x
+
+ elif iscoroutinefunction(f):
+
+ @wraps(f)
+ async def wrapper( # type: ignore[return]
+ *args: P.args,
+ **kwargs: P.kwargs,
+ ) -> R:
+ async with _BlockRecorder(
+ self._metadata,
+ measurements,
+ lock,
+ ):
+ return cast("R", await f(*args, **kwargs))
+
+ elif isgeneratorfunction(f):
+
+ @wraps(f)
+ def wrapper(
+ *args: P.args,
+ **kwargs: P.kwargs,
+ ) -> Generator[Y, None, None]:
+ inner = f(*args, **kwargs)
+ with _BlockRecorder(self._metadata, measurements, lock):
+ yield from inner
+
+ else:
+
+ @wraps(f)
+ def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
+ with _BlockRecorder(self._metadata, measurements, lock):
+ return f(*args, **kwargs)
+
+ wrapped = cast(
+ "_TimedCallable[P, R] | "
+ "_TimedCallable[P, AsyncGenerator[Y, None]] | "
+ "_TimedCallable[P, Generator[Y, None, None]]",
+ wrapper,
+ )
+ wrapped.measurements = measurements
+ return wrapped
From 458dcbba188ebec449935dd968fecc733d84721f Mon Sep 17 00:00:00 2001
From: HH-MWB <50187675+HH-MWB@users.noreply.github.com>
Date: Wed, 18 Feb 2026 16:07:08 -0500
Subject: [PATCH 7/8] refactor: unify BlockTimer and FunctionTimer into single
Timer class
---
features/block_timing.feature | 28 +--
features/function_timing.feature | 10 +-
features/steps/block_timing_steps.py | 241 +++++++-------------
features/steps/common_steps.py | 44 +++-
features/steps/function_timing_steps.py | 182 +++++++--------
features/steps/measurement_steps.py | 18 +-
features/steps/time_span_steps.py | 14 +-
features/steps/utils.py | 15 +-
features/steps/version_steps.py | 18 +-
features/version.feature | 3 +-
timerun.py | 280 ++++++++----------------
11 files changed, 341 insertions(+), 512 deletions(-)
diff --git a/features/block_timing.feature b/features/block_timing.feature
index 6c94d34..5ad828e 100644
--- a/features/block_timing.feature
+++ b/features/block_timing.feature
@@ -7,42 +7,42 @@ Feature: Block timing
# --- Basic timing: sync, async, CPU-bound ---
Scenario: Blocking sleep with `with` yields wall time and near-zero CPU time
- Given a blocking operation that takes around 10,000,000 nanoseconds
- When I measure the blocking operation using `with`
+ Given a blocking operation that runs for around 10,000,000 nanoseconds
+ When I measure the operation using `with`
Then the measurement's wall time duration is within the configured buffer of 10,000,000 nanoseconds
And the measurement's CPU time duration is within the configured buffer of 0 nanoseconds
Scenario: Async sleep with `async with` yields wall time and near-zero CPU time
- Given an async operation that takes around 10,000,000 nanoseconds
+ Given an async operation that runs for around 10,000,000 nanoseconds
When I measure the async operation using `async with`
Then the measurement's wall time duration is within the configured buffer of 10,000,000 nanoseconds
And the measurement's CPU time duration is within the configured buffer of 0 nanoseconds
Scenario: CPU-bound block with `with` yields wall and CPU time close together
Given a CPU-bound operation that runs for around 10,000,000 nanoseconds
- When I measure the CPU-bound operation using `with`
+ When I measure the operation using `with`
Then the measurement's wall time duration is within the configured buffer of 10,000,000 nanoseconds
And the measurement's CPU time duration is within the configured buffer of 10,000,000 nanoseconds
And the measurement's CPU time is close to wall time
- # --- One BlockTimer, multiple blocks or threads ---
+ # --- One Timer, multiple blocks or threads ---
- Scenario: Two threads with one BlockTimer yield one measurement per thread
+ Scenario: Two threads with one Timer yield one measurement per thread
Given each thread sleeps 5,000,000 nanoseconds
- When I measure blocks from 2 threads using the same BlockTimer instance
+ When I measure blocks from 2 threads using the same Timer instance
Then the measurements are from different threads
- Scenario: Two sequential blocks with one BlockTimer yield correct durations
+ Scenario: Two sequential blocks with one Timer yield correct durations
Given the first block duration is 5,000,000 nanoseconds
And the second block duration is 10,000,000 nanoseconds
- When I measure two sequential blocks with the same BlockTimer instance
+ When I measure two sequential blocks with the same Timer instance
Then the first measurement's wall time duration is within the configured buffer of 5,000,000 nanoseconds
And the second measurement's wall time duration is within the configured buffer of 10,000,000 nanoseconds
- Scenario: Nested blocks with one BlockTimer yield independent outer and inner times
+ Scenario: Nested blocks with one Timer yield independent outer and inner times
Given the outer block duration is 20,000,000 nanoseconds
And the inner block duration is 5,000,000 nanoseconds
- When I measure nested blocks with the same BlockTimer instance
+ When I measure nested blocks with the same Timer instance
Then the outer measurement's wall time duration is within the configured buffer of 25,000,000 nanoseconds
And the inner measurement's wall time duration is within the configured buffer of 5,000,000 nanoseconds
And the outer measurement's wall time duration is at least the inner measurement's wall time duration
@@ -55,10 +55,10 @@ Feature: Block timing
Then the measurement's metadata key "run_id" is "exp-1"
And the measurement's metadata key "tag" is "baseline"
- Scenario: Metadata set in first block is not visible in second block (reused BlockTimer)
+ Scenario: Metadata set in first block is not visible in second block (reused Timer)
Given metadata run_id "same-run" and tag "original"
And I will add metadata key "extra" as "from_first_block" in the first block
- When I measure two blocks with the same BlockTimer instance and that metadata
+ When I measure two blocks with the same Timer instance and that metadata
Then the first measurement's metadata key "extra" is "from_first_block"
And the second measurement's metadata key "run_id" is "same-run"
And the second measurement's metadata key "tag" is "original"
@@ -72,6 +72,6 @@ Feature: Block timing
And an exception was propagated to the caller
Scenario: __exit__ without __enter__ raises RuntimeError
- When I call __exit__ on a BlockTimer instance without calling __enter__ first
+ When I call __exit__ on a Timer instance without calling __enter__ first
Then a RuntimeError is raised
And the error message is "__exit__ called without a matching __enter__"
diff --git a/features/function_timing.feature b/features/function_timing.feature
index 94e4178..a6aa150 100644
--- a/features/function_timing.feature
+++ b/features/function_timing.feature
@@ -8,13 +8,13 @@ Feature: Function timing
Scenario: Timing a synchronous sleeping function records real time and minimal CPU time
Given a sync function that sleeps for around 10,000,000 nanoseconds
- When I call the decorated sync function
+ When I call the decorated function
Then the measurement's wall time duration is within the configured buffer of 10,000,000 nanoseconds
And the measurement's CPU time duration is within the configured buffer of 0 nanoseconds
Scenario: Timing an async sleeping function records real time and minimal CPU time
Given an async function that sleeps for around 10,000,000 nanoseconds
- When I call the decorated async function
+ When I call the decorated function
Then the measurement's wall time duration is within the configured buffer of 10,000,000 nanoseconds
And the measurement's CPU time duration is within the configured buffer of 0 nanoseconds
@@ -22,12 +22,12 @@ Feature: Function timing
Scenario: Fully consuming a sync generator records one measurement
Given a sync generator that yields 3 items and sleeps 5,000,000 nanoseconds total
- When I fully consume the decorated sync generator
+ When I fully consume the decorated generator
Then the decorated function's measurements deque has 1 entry
Scenario: Fully consuming an async generator records one measurement
Given an async generator that yields 3 items and sleeps 5,000,000 nanoseconds total
- When I fully consume the decorated async generator
+ When I fully consume the decorated generator
Then the decorated function's measurements deque has 1 entry
# --- Metadata ---
@@ -49,7 +49,7 @@ Feature: Function timing
Scenario: With maxlen 2, only the last 2 measurements are kept
Given a sync function that sleeps for around 1,000,000 nanoseconds
- When I decorate it with FunctionTimer with maxlen 2
+ When I decorate it with Timer with maxlen 2
And I call the decorated function 3 times
Then the decorated function's measurements deque has 2 entries
diff --git a/features/steps/block_timing_steps.py b/features/steps/block_timing_steps.py
index 3b07e32..c1ceebc 100644
--- a/features/steps/block_timing_steps.py
+++ b/features/steps/block_timing_steps.py
@@ -11,7 +11,7 @@
import timerun
from features.steps.utils import (
BUFFER_NS,
- CPU_LOWER_SLACK_NS,
+ assert_metadata_key_equals,
assert_wall_time_within_buffer,
sleep_wall_at_least,
spin_wall_at_least,
@@ -24,66 +24,36 @@
# --- Given ---
-@given("a blocking operation that takes around {duration_ns:n} nanoseconds")
-def step_given_blocking_operation(context: Context, duration_ns: int) -> None:
- """Store duration for a blocking operation (e.g. time.sleep)."""
- context.operation_duration_ns = duration_ns
-
-
-@given("an async operation that takes around {duration_ns:n} nanoseconds")
-def step_given_async_operation(context: Context, duration_ns: int) -> None:
- """Store duration for an async operation (e.g. asyncio.sleep)."""
- context.operation_duration_ns = duration_ns
-
-
@given(
- "a CPU-bound operation that runs for around {duration_ns:n} nanoseconds",
+ "a {kind} operation that runs for around {duration_ns:n} nanoseconds",
)
-def step_given_cpu_bound_operation(context: Context, duration_ns: int) -> None:
- """Store duration for a CPU-bound operation (busy-loop)."""
+@given(
+ "an {kind} operation that runs for around {duration_ns:n} nanoseconds",
+)
+def step_given_operation(
+ context: Context,
+ kind: str,
+ duration_ns: int,
+) -> None:
+ """Store operation duration and kind."""
context.operation_duration_ns = duration_ns
+ context.operation_kind = kind
@given("each thread sleeps {duration_ns:n} nanoseconds")
def step_given_thread_sleep(context: Context, duration_ns: int) -> None:
- """Store duration for the two-thread scenario."""
+ """Store thread sleep duration."""
context.thread_sleep_ns = duration_ns
-@given("the first block duration is {duration_ns:n} nanoseconds")
-def step_given_first_block_duration(
- context: Context,
- duration_ns: int,
-) -> None:
- """Store first block duration for sequential blocks."""
- context.first_block_ns = duration_ns
-
-
-@given("the second block duration is {duration_ns:n} nanoseconds")
-def step_given_second_block_duration(
- context: Context,
- duration_ns: int,
-) -> None:
- """Store second block duration for sequential blocks."""
- context.second_block_ns = duration_ns
-
-
-@given("the outer block duration is {duration_ns:n} nanoseconds")
-def step_given_outer_block_duration(
- context: Context,
- duration_ns: int,
-) -> None:
- """Store outer block duration for nested blocks."""
- context.outer_block_ns = duration_ns
-
-
-@given("the inner block duration is {duration_ns:n} nanoseconds")
-def step_given_inner_block_duration(
+@given("the {which} block duration is {duration_ns:n} nanoseconds")
+def step_given_block_duration(
context: Context,
+ which: str,
duration_ns: int,
) -> None:
- """Store inner block duration for nested blocks."""
- context.inner_block_ns = duration_ns
+ """Store block duration for which block."""
+ setattr(context, f"{which}_block_ns", duration_ns)
@given('I will add metadata key "{key}" as "{value}" in the first block')
@@ -92,53 +62,46 @@ def step_given_metadata_add_in_first(
key: str,
value: str,
) -> None:
- """First block will add this key/value to measurement metadata."""
+ """First block will add key/value to metadata."""
context.metadata_add_in_first = (key, value)
# --- When ---
-@when("I measure the blocking operation using `with`")
-def step_measure_blocking_using_with(context: Context) -> None:
- """BlockTimer() around sleep_wall_at_least(operation_duration_ns)."""
- with timerun.BlockTimer() as context.measurement:
- sleep_wall_at_least(context.operation_duration_ns)
+@when("I measure the operation using `with`")
+def step_measure_operation_using_with(context: Context) -> None:
+ """Measure with Timer(); sleep or spin per operation_kind."""
+ with timerun.Timer() as context.measurement:
+ if getattr(context, "operation_kind", "blocking") == "CPU-bound":
+ spin_wall_at_least(context.operation_duration_ns)
+ else:
+ sleep_wall_at_least(context.operation_duration_ns)
@when("I measure the async operation using `async with`")
def step_measure_async_using_async_with(context: Context) -> None:
- """Async BlockTimer() around asyncio.sleep(operation_duration_ns)."""
+ """Measure async with Timer(); asyncio.sleep."""
- # Define async task: BlockTimer around sleep.
async def run() -> timerun.Measurement:
- async with timerun.BlockTimer() as m:
+ async with timerun.Timer() as m:
await asyncio.sleep(context.operation_duration_ns / 1e9)
return m
- # Run and store measurement.
context.measurement = asyncio.run(run())
-@when("I measure the CPU-bound operation using `with`")
-def step_measure_cpu_bound_using_with(context: Context) -> None:
- """BlockTimer() around spin_wall_at_least(operation_duration_ns)."""
- with timerun.BlockTimer() as context.measurement:
- spin_wall_at_least(context.operation_duration_ns)
-
-
@when(
"I measure blocks from {thread_count:n} threads "
- "using the same BlockTimer instance",
+ "using the same Timer instance",
)
def step_measure_blocks_from_threads(
context: Context,
thread_count: int,
) -> None:
- """Measure blocks from thread_count threads (number from feature)."""
- # Store thread count for Then steps; one shared BlockTimer.
+ """Measure blocks from N threads."""
context.thread_count = thread_count
- cm = timerun.BlockTimer()
+ cm = timerun.Timer()
# Worker: enter timer, sleep, return measurement.
def run() -> timerun.Measurement:
@@ -152,10 +115,10 @@ def run() -> timerun.Measurement:
context.thread_measurements = [f.result() for f in futures]
-@when("I measure two sequential blocks with the same BlockTimer instance")
+@when("I measure two sequential blocks with the same Timer instance")
def step_measure_two_sequential_blocks(context: Context) -> None:
"""Measure two sequential blocks."""
- cm = timerun.BlockTimer()
+ cm = timerun.Timer()
with cm as context.first_measurement:
sleep_wall_at_least(context.first_block_ns)
@@ -164,10 +127,10 @@ def step_measure_two_sequential_blocks(context: Context) -> None:
sleep_wall_at_least(context.second_block_ns)
-@when("I measure nested blocks with the same BlockTimer instance")
+@when("I measure nested blocks with the same Timer instance")
def step_measure_nested_blocks(context: Context) -> None:
"""Measure nested blocks."""
- cm = timerun.BlockTimer()
+ cm = timerun.Timer()
with cm as context.outer_measurement:
sleep_wall_at_least(context.outer_block_ns)
@@ -178,23 +141,25 @@ def step_measure_nested_blocks(context: Context) -> None:
@when("I measure a code block with that metadata")
def step_measure_block_with_metadata(context: Context) -> None:
- """BlockTimer(metadata=context.metadata), store the Measurement."""
- with timerun.BlockTimer(metadata=context.metadata) as context.measurement:
+ """Measure with Timer(metadata=...); store result."""
+ with timerun.Timer(metadata=context.metadata) as context.measurement:
pass
@when(
- "I measure two blocks with the same BlockTimer instance and that metadata",
+ "I measure two blocks with the same Timer instance and that metadata",
)
def step_measure_two_blocks_with_metadata(context: Context) -> None:
- """Two blocks; Given may set metadata_add_in_first, mutate 1st."""
- cm = timerun.BlockTimer(metadata=context.metadata)
+ """Measure two blocks; first may add metadata."""
+ cm = timerun.Timer(metadata=context.metadata)
+
# First block: optionally add key/value to measurement metadata.
with cm as context.first_measurement:
if hasattr(context, "metadata_add_in_first"):
context.first_measurement.metadata[
context.metadata_add_in_first[0]
] = context.metadata_add_in_first[1]
+
# Second block: no extra metadata.
with cm as context.second_measurement:
pass
@@ -202,24 +167,19 @@ def step_measure_two_blocks_with_metadata(context: Context) -> None:
@when("I measure a code block that raises an exception")
def step_measure_block_raises(context: Context) -> None:
- """BlockTimer() around raising block; catch exception, keep measurement."""
- # Run timed block that raises; measurement still recorded on exit.
+ """Measure raising block; catch exception."""
try:
- with timerun.BlockTimer() as context.measurement:
+ with timerun.Timer() as context.measurement:
raise ValueError # noqa: TRY301
-
- # Store exception for Then to assert.
except ValueError as e:
context.exception = e
-@when(
- "I call __exit__ on a BlockTimer instance without calling __enter__ first",
-)
-def step_call_exit_without_enter(context: Context) -> None:
- """BlockTimer().__exit__ without __enter__; store exception in context."""
+@when("I call __exit__ on a Timer instance without calling __enter__ first")
+def step_exit_without_enter(context: Context) -> None:
+ """Call Timer().__exit__ without __enter__; store error."""
try:
- timerun.BlockTimer().__exit__(None, None, None)
+ timerun.Timer().__exit__(None, None, None)
except RuntimeError as e:
context.exception = e
@@ -227,43 +187,9 @@ def step_call_exit_without_enter(context: Context) -> None:
# --- Then ---
-@then(
- "the measurement's wall time duration is between {min_ns:n} and "
- "{max_ns:n} nanoseconds",
-)
-def step_wall_time_between(context: Context, min_ns: int, max_ns: int) -> None:
- """Assert min_ns <= measurement.wall_time.duration <= max_ns."""
- # Required context validation.
- assert context.measurement.wall_time is not None
-
- # Duration in [min_ns, max_ns].
- duration = context.measurement.wall_time.duration
- assert min_ns <= duration <= max_ns, (
- f"wall time {duration} not in [{min_ns}, {max_ns}]"
- )
-
-
-@then(
- "the measurement's CPU time duration is within the configured buffer of "
- "{expected_ns:n} nanoseconds",
-)
-def step_cpu_time_within_buffer(context: Context, expected_ns: int) -> None:
- """cpu_time in [min_ns, expected_ns+buffer_ns]; allow undershoot."""
- # Required context validation.
- assert context.measurement.cpu_time is not None
-
- # Duration in [expected_ns - CPU_LOWER_SLACK_NS, expected_ns + BUFFER_NS].
- duration = context.measurement.cpu_time.duration
- min_ns = max(0, expected_ns - CPU_LOWER_SLACK_NS)
- max_ns = expected_ns + BUFFER_NS
- assert min_ns <= duration <= max_ns, (
- f"CPU time {duration} not in [{min_ns}, {max_ns}] (buffer={BUFFER_NS})"
- )
-
-
@then("the measurement's CPU time is close to wall time")
def step_cpu_close_to_wall(context: Context) -> None:
- """Assert wall - BUFFER_NS <= CPU <= wall (single-threaded)."""
+ """Assert CPU close to wall time."""
# Required context validation.
assert context.measurement.wall_time is not None
assert context.measurement.cpu_time is not None
@@ -277,24 +203,6 @@ def step_cpu_close_to_wall(context: Context) -> None:
)
-@then("the measurements are from different threads")
-def step_measurements_from_different_threads(context: Context) -> None:
- """Assert we have thread_count distinct measurements (one per thread)."""
- # Required context validation.
- measurements = context.thread_measurements
-
- # Exactly thread_count measurements.
- assert len(measurements) == context.thread_count, (
- f"expected {context.thread_count} measurements, "
- f"got {len(measurements)}"
- )
-
- # All distinct (one measurement per thread).
- assert len(measurements) == len({id(m) for m in measurements}), (
- "measurements are not all distinct (one per thread)"
- )
-
-
@then(
"the {which} measurement's wall time duration is within the configured "
"buffer of {expected_ns:n} nanoseconds",
@@ -304,7 +212,7 @@ def step_which_measurement_wall_within_buffer(
which: str,
expected_ns: int,
) -> None:
- """Outer/inner wall_time in [expected_ns, expected_ns+buffer_ns]."""
+ """Assert which measurement wall time in buffer."""
assert_wall_time_within_buffer(
getattr(context, f"{which}_measurement"),
expected_ns,
@@ -317,7 +225,7 @@ def step_which_measurement_wall_within_buffer(
"measurement's wall time duration",
)
def step_outer_wall_at_least_inner(context: Context) -> None:
- """Outer block duration >= inner (outer contains inner)."""
+ """Assert outer wall >= inner."""
# Required context validation: both have wall_time.
assert context.outer_measurement.wall_time is not None
assert context.inner_measurement.wall_time is not None
@@ -328,24 +236,19 @@ def step_outer_wall_at_least_inner(context: Context) -> None:
assert outer_d >= inner_d, f"outer {outer_d} < inner {inner_d}"
-@then('the first measurement\'s metadata key "{key}" is "{value}"')
-def step_first_measurement_metadata_key(
- context: Context,
- key: str,
- value: str,
-) -> None:
- """Assert the first measurement's metadata[key] equals value."""
- assert context.first_measurement.metadata[key] == value
-
-
-@then('the second measurement\'s metadata key "{key}" is "{value}"')
-def step_second_measurement_metadata_key(
+@then('the {which} measurement\'s metadata key "{key}" is "{value}"')
+def step_measurement_metadata_key(
context: Context,
+ which: str,
key: str,
value: str,
) -> None:
- """Assert the second measurement's metadata[key] equals value."""
- assert context.second_measurement.metadata[key] == value
+ """Assert which measurement metadata[key] is value."""
+ assert_metadata_key_equals(
+ getattr(context, f"{which}_measurement"),
+ key,
+ value,
+ )
@then('the second measurement\'s metadata does not contain key "{key}"')
@@ -353,12 +256,30 @@ def step_second_measurement_metadata_no_key(
context: Context,
key: str,
) -> None:
- """Second measurement's metadata lacks key (no leak from first block)."""
+ """Assert second measurement has no key."""
assert key not in context.second_measurement.metadata
+@then("the measurements are from different threads")
+def step_measurements_from_different_threads(context: Context) -> None:
+ """Assert N distinct measurements."""
+ # Required context validation.
+ measurements = context.thread_measurements
+
+ # Exactly thread_count measurements.
+ assert len(measurements) == context.thread_count, (
+ f"expected {context.thread_count} measurements, "
+ f"got {len(measurements)}"
+ )
+
+ # All distinct (one measurement per thread).
+ assert len(measurements) == len({id(m) for m in measurements}), (
+ "measurements are not all distinct (one per thread)"
+ )
+
+
@then("the block yielded a measurement")
def step_block_yielded_measurement(context: Context) -> None:
- """Assert the block produced a measurement (e.g. when block raises)."""
+ """Assert block produced a measurement."""
assert context.measurement is not None
assert context.measurement.wall_time is not None
diff --git a/features/steps/common_steps.py b/features/steps/common_steps.py
index 4d27b60..6c0055d 100644
--- a/features/steps/common_steps.py
+++ b/features/steps/common_steps.py
@@ -11,7 +11,12 @@
from behave import given, then
-from features.steps.utils import BUFFER_NS, assert_wall_time_within_buffer
+from features.steps.utils import (
+ BUFFER_NS,
+ CPU_LOWER_SLACK_NS,
+ assert_metadata_key_equals,
+ assert_wall_time_within_buffer,
+)
if TYPE_CHECKING:
from behave.runner import Context
@@ -22,7 +27,7 @@
@given('metadata run_id "{run_id}" and tag "{tag}"')
def step_given_metadata(context: Context, run_id: str, tag: str) -> None:
- """Store metadata for BlockTimer/FunctionTimer(metadata=...)."""
+ """Store metadata for Timer."""
context.metadata = {"run_id": run_id, "tag": tag}
@@ -31,7 +36,7 @@ def step_given_metadata(context: Context, run_id: str, tag: str) -> None:
@then("a {exception_type} is raised")
def step_exception_raised(context: Context, exception_type: str) -> None:
- """Assert exception of the given type was stored in context.exception."""
+ """Assert stored exception type."""
# Required: an exception was stored by the When step.
assert hasattr(context, "exception"), "Expected an exception to be raised"
@@ -43,25 +48,40 @@ def step_exception_raised(context: Context, exception_type: str) -> None:
@then('the error message is "{message}"')
def step_error_message_is(context: Context, message: str) -> None:
- """Assert the stored exception message equals message."""
+ """Assert exception message."""
assert hasattr(context, "exception"), "Expected an exception to be raised"
assert str(context.exception) == message
+@then("an exception was propagated to the caller")
+def step_exception_propagated(context: Context) -> None:
+ """Assert ValueError was caught."""
+ assert hasattr(context, "exception")
+ assert isinstance(context.exception, ValueError)
+
+
@then(
"the measurement's wall time duration is within the configured buffer of "
"{expected_ns:n} nanoseconds",
)
def step_wall_time_within_buffer(context: Context, expected_ns: int) -> None:
- """Assert expected_ns <= wall_time.duration <= expected_ns + buffer_ns."""
+ """Assert wall time in buffer."""
assert_wall_time_within_buffer(context.measurement, expected_ns, BUFFER_NS)
-@then("an exception was propagated to the caller")
-def step_exception_propagated(context: Context) -> None:
- """Assert we caught the exception raised inside the block/timed call."""
- assert hasattr(context, "exception")
- assert isinstance(context.exception, ValueError)
+@then(
+ "the measurement's CPU time duration is within the configured buffer of "
+ "{expected_ns:n} nanoseconds",
+)
+def step_cpu_time_within_buffer(context: Context, expected_ns: int) -> None:
+ """Assert CPU time in buffer."""
+ assert context.measurement.cpu_time is not None
+ duration = context.measurement.cpu_time.duration
+ min_ns = max(0, expected_ns - CPU_LOWER_SLACK_NS)
+ max_ns = expected_ns + BUFFER_NS
+ assert min_ns <= duration <= max_ns, (
+ f"CPU time {duration} not in [{min_ns}, {max_ns}] (buffer={BUFFER_NS})"
+ )
@then('the measurement\'s metadata key "{key}" is "{value}"')
@@ -70,5 +90,5 @@ def step_measurement_metadata_key_is(
key: str,
value: str,
) -> None:
- """Assert the key value pair is in measurement's metadata."""
- assert context.measurement.metadata[key] == value
+ """Assert metadata[key] is value."""
+ assert_metadata_key_equals(context.measurement, key, value)
diff --git a/features/steps/function_timing_steps.py b/features/steps/function_timing_steps.py
index a6ec23a..a5d6605 100644
--- a/features/steps/function_timing_steps.py
+++ b/features/steps/function_timing_steps.py
@@ -20,124 +20,106 @@
# --- Given ---
-@given("a sync function that sleeps for around {duration_ns:n} nanoseconds")
-def step_given_sync_func_sleep(context: Context, duration_ns: int) -> None:
- """Store duration for a sync function that will sleep."""
- context.func_duration_ns = duration_ns
-
-
-@given("an async function that sleeps for around {duration_ns:n} nanoseconds")
-def step_given_async_func_sleep(context: Context, duration_ns: int) -> None:
- """Store duration for an async function that will sleep."""
- context.func_duration_ns = duration_ns
-
-
@given(
- "a sync generator that yields {count:n} items and sleeps "
- "{duration_ns:n} nanoseconds total",
+ "a {kind} function that sleeps for around {duration_ns:n} nanoseconds",
+)
+@given(
+ "an {kind} function that sleeps for around {duration_ns:n} nanoseconds",
)
-def step_given_sync_gen(
+def step_given_func_sleep(
context: Context,
- count: int,
+ kind: str,
duration_ns: int,
) -> None:
- """Store duration and yield count for sync gen (sleep across yields)."""
- context.gen_duration_ns = duration_ns
- context.gen_count = count
+ """Store func kind and duration."""
+ context.func_duration_ns = duration_ns
+ context.func_kind = kind
@given(
- "an async generator that yields {count:n} items and sleeps "
+ "a {kind} generator that yields {count:n} items and sleeps "
"{duration_ns:n} nanoseconds total",
)
-def step_given_async_gen(
+@given(
+ "an {kind} generator that yields {count:n} items and sleeps "
+ "{duration_ns:n} nanoseconds total",
+)
+def step_given_gen(
context: Context,
+ kind: str,
count: int,
duration_ns: int,
) -> None:
- """Store duration and yield count for an async generator."""
+ """Store generator kind, duration and count."""
context.gen_duration_ns = duration_ns
context.gen_count = count
+ context.gen_kind = kind
# --- When ---
-@when("I call the decorated sync function")
-def step_when_call_decorated_sync(context: Context) -> None:
- """Create sync function, decorate with FunctionTimer(), call it."""
+@when("I call the decorated function")
+def step_when_call_decorated_func(context: Context) -> None:
+ """Decorate function with Timer(), run it."""
+ if context.func_kind == "async":
- # Define decorated sync function (sleep for configured duration).
- @timerun.FunctionTimer()
- def sync_func() -> None:
- sleep_wall_at_least(context.func_duration_ns)
+ @timerun.Timer()
+ async def async_func() -> None:
+ await asyncio.sleep(context.func_duration_ns / 1e9)
- # Call and store function + last measurement for Then steps.
- sync_func()
- context.decorated_function = sync_func
- context.measurement = sync_func.measurements[-1]
-
-
-@when("I call the decorated async function")
-def step_when_call_decorated_async(context: Context) -> None:
- """Create async function, decorate, run it."""
-
- # Define decorated async function (sleep for configured duration).
- @timerun.FunctionTimer()
- async def async_func() -> None:
- await asyncio.sleep(context.func_duration_ns / 1e9)
-
- # Run and store function + last measurement for Then steps.
- asyncio.run(async_func())
- context.decorated_function = async_func
- context.measurement = async_func.measurements[-1]
+ asyncio.run(async_func())
+ context.decorated_function = async_func
+ context.measurement = async_func.measurements[-1]
+ else:
+ @timerun.Timer()
+ def sync_func() -> None:
+ sleep_wall_at_least(context.func_duration_ns)
-@when("I fully consume the decorated sync generator")
-def step_when_consume_sync_gen(context: Context) -> None:
- """Create sync generator, decorate, consume fully."""
- per_sleep = context.gen_duration_ns // context.gen_count
-
- # Define decorated sync generator (sleep spread across yields).
- @timerun.FunctionTimer()
- def sync_gen() -> object:
- for i in range(context.gen_count):
- sleep_wall_at_least(per_sleep)
- yield i
-
- # Consume fully and store generator + last measurement for Then steps.
- list(sync_gen())
- context.decorated_function = sync_gen
- context.measurement = sync_gen.measurements[-1]
+ sync_func()
+ context.decorated_function = sync_func
+ context.measurement = sync_func.measurements[-1]
-@when("I fully consume the decorated async generator")
-def step_when_consume_async_gen(context: Context) -> None:
- """Create async generator, decorate, consume fully."""
+@when("I fully consume the decorated generator")
+def step_when_consume_gen(context: Context) -> None: # noqa: C901
+ """Decorate generator with Timer(), consume fully."""
per_sleep = context.gen_duration_ns // context.gen_count
-
- # Define decorated async generator (sleep spread across yields).
- @timerun.FunctionTimer()
- async def async_gen() -> object:
- for i in range(context.gen_count):
- await asyncio.sleep(per_sleep / 1e9)
- yield i
-
- # Consume fully via event loop and store generator + last measurement.
- async def run() -> None:
- async for _ in async_gen():
+ if context.gen_kind == "async":
+
+ @timerun.Timer()
+ async def async_gen() -> object:
+ for i in range(context.gen_count):
+ await asyncio.sleep(per_sleep / 1e9)
+ yield i
+
+ async def run() -> None:
+ async for _ in async_gen():
+ pass
+
+ asyncio.run(run())
+ context.decorated_function = async_gen
+ context.measurement = async_gen.measurements[-1]
+ else:
+
+ @timerun.Timer()
+ def sync_gen() -> object:
+ for i in range(context.gen_count):
+ sleep_wall_at_least(per_sleep)
+ yield i
+
+ for _ in sync_gen():
pass
-
- asyncio.run(run())
- context.decorated_function = async_gen
- context.measurement = async_gen.measurements[-1]
+ context.decorated_function = sync_gen
+ context.measurement = sync_gen.measurements[-1]
@when("I call a decorated function with that metadata")
def step_when_call_with_metadata(context: Context) -> None:
- """Decorate a no-op function with FunctionTimer(metadata=...), call it."""
+ """Call no-op function decorated with Timer(metadata=...)."""
- @timerun.FunctionTimer(metadata=context.metadata)
+ @timerun.Timer(metadata=context.metadata)
def f() -> None:
pass
@@ -148,10 +130,9 @@ def f() -> None:
@when("I call a decorated function that raises an exception")
def step_when_call_raising(context: Context) -> None:
- """Decorate a function that raises, call it, catch exception."""
+ """Call raising function under Timer(); catch exception."""
- # Define decorated function that raises; measurement recorded on exit.
- @timerun.FunctionTimer()
+ @timerun.Timer()
def raising() -> None:
raise ValueError
@@ -164,18 +145,17 @@ def raising() -> None:
context.measurement = raising.measurements[-1]
-@when("I decorate it with FunctionTimer with maxlen {maxlen:n}")
+@when("I decorate it with Timer with maxlen {maxlen:n}")
def step_when_decorate_maxlen(context: Context, maxlen: int) -> None:
- """Store maxlen; actual decoration in next step."""
+ """Store maxlen for next step."""
context.func_maxlen = maxlen
@when("I call the decorated function {times:n} times")
def step_when_call_three_times(context: Context, times: int) -> None:
- """Decorate sync function with FunctionTimer(maxlen=...), call N times."""
+ """Decorate with Timer(maxlen=...), call N times."""
- # Define decorated sync function with maxlen from previous step.
- @timerun.FunctionTimer(maxlen=context.func_maxlen)
+ @timerun.Timer(maxlen=context.func_maxlen)
def sync_func() -> None:
sleep_wall_at_least(context.func_duration_ns)
@@ -189,10 +169,9 @@ def sync_func() -> None:
"I call the decorated function from {thread_count:n} threads concurrently",
)
def step_when_call_from_threads(context: Context, thread_count: int) -> None:
- """Create sync function, decorate, run it from thread_count threads."""
+ """Run decorated function from N threads."""
- # Define decorated sync function (sleep for configured duration).
- @timerun.FunctionTimer()
+ @timerun.Timer()
def sync_func() -> None:
sleep_wall_at_least(context.func_duration_ns)
@@ -212,19 +191,10 @@ def run() -> None:
# --- Then ---
-@then("the decorated function's measurements deque has 1 entry")
-def step_then_measurements_one(context: Context) -> None:
- """Assert len(decorated_function.measurements) == 1."""
- func = context.decorated_function
- assert hasattr(func, "measurements")
- assert len(func.measurements) == 1, (
- f"expected 1 measurement, got {len(func.measurements)}"
- )
-
-
+@then("the decorated function's measurements deque has {n:n} entry")
@then("the decorated function's measurements deque has {n:n} entries")
def step_then_measurements_count(context: Context, n: int) -> None:
- """Assert len(decorated_function.measurements) == n."""
+ """Assert measurements count is n."""
func = context.decorated_function
assert hasattr(func, "measurements")
assert len(func.measurements) == n, (
diff --git a/features/steps/measurement_steps.py b/features/steps/measurement_steps.py
index e92d1df..a6e4298 100644
--- a/features/steps/measurement_steps.py
+++ b/features/steps/measurement_steps.py
@@ -1,10 +1,16 @@
"""Step definitions for the Measurement record feature."""
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
from behave import given, then, when
-from behave.runner import Context
import timerun
+if TYPE_CHECKING:
+ from behave.runner import Context
+
# --- Given ---
@@ -15,7 +21,7 @@ def step_given_typed_time_span(
start: int,
end: int,
) -> None:
- """Set time span to context based on kind (wall/CPU)."""
+ """Set wall or CPU time span on context."""
setattr(
context,
f"{kind.lower()}_time_span",
@@ -28,7 +34,7 @@ def step_given_typed_time_span(
@when("I create a measurement from the wall time span and the CPU time span")
def step_create_measurement_from_spans(context: Context) -> None:
- """Build Measurement from wall/cpu spans; set context.measurement."""
+ """Build Measurement from spans."""
context.measurement = timerun.Measurement(
wall_time=context.wall_time_span,
cpu_time=context.cpu_time_span,
@@ -41,7 +47,7 @@ def step_measurement_metadata_key_set(
key: str,
value: str,
) -> None:
- """Set the measurement's metadata[key] to value."""
+ """Set measurement metadata[key]."""
context.measurement.metadata[key] = value
@@ -54,7 +60,7 @@ def step_measurement_time_duration(
kind: str,
expected: int,
) -> None:
- """Assert measurement wall_time or cpu_time duration equals expected."""
+ """Assert measurement duration equals expected."""
assert (
getattr(context.measurement, f"{kind.lower()}_time").duration
== expected
@@ -63,7 +69,7 @@ def step_measurement_time_duration(
@then("the measurement's metadata is an empty dict")
def step_measurement_metadata_empty_dict(context: Context) -> None:
- """Assert the measurement's metadata is a dict and empty."""
+ """Assert metadata is empty dict."""
metadata = context.measurement.metadata
assert isinstance(metadata, dict)
assert not metadata
diff --git a/features/steps/time_span_steps.py b/features/steps/time_span_steps.py
index 7aa4ce9..8c2b40b 100644
--- a/features/steps/time_span_steps.py
+++ b/features/steps/time_span_steps.py
@@ -31,7 +31,7 @@
@given("a time span from {start:n} to {end:n}")
def step_given_time_span(context: Context, start: int, end: int) -> None:
- """Create a TimeSpan(start, end) and store as context.time_span."""
+ """Create TimeSpan, store on context."""
context.time_span = timerun.TimeSpan(start=start, end=end)
@@ -41,7 +41,7 @@ def step_given_span_of_duration(
name: str,
duration: int,
) -> None:
- """Create a TimeSpan(0, duration) and store as context.time_span_."""
+ """Create TimeSpan(0, duration), store as named."""
setattr(
context,
f"time_span_{name.lower()}",
@@ -54,7 +54,7 @@ def step_given_span_of_duration(
@when("I try to create a time span from {start:n} to {end:n}")
def step_try_create_time_span(context: Context, start: int, end: int) -> None:
- """Create TimeSpan(start, end); store exception in context.exception."""
+ """Create TimeSpan; store exception."""
try:
timerun.TimeSpan(start=start, end=end)
except Exception as e: # noqa: BLE001 # pylint: disable=broad-exception-caught
@@ -66,7 +66,7 @@ def step_try_create_time_span(context: Context, start: int, end: int) -> None:
@then("the duration is {expected:n} nanoseconds")
def step_time_span_duration_is(context: Context, expected: int) -> None:
- """Assert context.time_span.duration equals expected."""
+ """Assert time_span duration."""
assert context.time_span.duration == expected
@@ -75,7 +75,7 @@ def step_timedelta_is_seconds_standard_type(
context: Context,
seconds: float,
) -> None:
- """Assert time_span.timedelta is timedelta and equals given seconds."""
+ """Assert timedelta equals seconds."""
result = context.time_span.timedelta
assert isinstance(result, timedelta)
assert result == timedelta(seconds=seconds)
@@ -83,7 +83,7 @@ def step_timedelta_is_seconds_standard_type(
@then("time span A {relation:Relation} time span B")
def step_time_span_a_relation_b(context: Context, relation: str) -> None:
- """Assert time_span_a and time_span_b satisfy the given relation."""
+ """Assert two time spans satisfy relation."""
assert RELATION_OPERATORS[relation](
context.time_span_a,
context.time_span_b,
@@ -96,5 +96,5 @@ def step_time_span_value_is(
which: str,
expected: int,
) -> None:
- """Assert time_span.start or time_span.end equals expected."""
+ """Assert start or end equals expected."""
assert getattr(context.time_span, which) == expected
diff --git a/features/steps/utils.py b/features/steps/utils.py
index 376480d..90f4aa9 100644
--- a/features/steps/utils.py
+++ b/features/steps/utils.py
@@ -15,12 +15,12 @@
def sleep_wall_at_least(nanoseconds: int) -> None:
- """Sleep >= `nanoseconds` ns wall time. Jitter absorbed by BUFFER_NS."""
+ """Sleep at least nanoseconds (wall)."""
time.sleep(nanoseconds / 1e9)
def spin_wall_at_least(nanoseconds: int) -> None:
- """Busy loop until wall time >= `nanoseconds` ns. Uses CPU."""
+ """Busy-loop at least nanoseconds (wall)."""
start = time.perf_counter_ns()
while time.perf_counter_ns() - start < nanoseconds:
pass
@@ -31,7 +31,7 @@ def assert_wall_time_within_buffer(
expected_ns: int,
buffer_ns: int = BUFFER_NS,
) -> None:
- """Assert wall_time.duration in [expected_ns, expected_ns + buffer_ns]."""
+ """Assert wall time in buffer."""
assert measurement.wall_time is not None
duration = measurement.wall_time.duration
max_ns = expected_ns + buffer_ns
@@ -39,3 +39,12 @@ def assert_wall_time_within_buffer(
f"wall time {duration} not in [{expected_ns}, {max_ns}] "
f"(buffer={buffer_ns})"
)
+
+
+def assert_metadata_key_equals(
+ measurement: object,
+ key: str,
+ value: str,
+) -> None:
+ """Assert metadata[key] equals value."""
+ assert measurement.metadata[key] == value
diff --git a/features/steps/version_steps.py b/features/steps/version_steps.py
index 6277036..d1db665 100644
--- a/features/steps/version_steps.py
+++ b/features/steps/version_steps.py
@@ -1,30 +1,30 @@
"""Step definitions for the package version feature."""
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
from behave import then, when
-from behave.runner import Context
import timerun
+if TYPE_CHECKING:
+ from behave.runner import Context
+
# --- When ---
@when("I read the package version")
def step_read_version(context: Context) -> None:
- """Read the package version and store it for Then steps."""
+ """Read and store package version."""
context.version = getattr(timerun, "__version__", None)
# --- Then ---
-@then("the package has a version")
-def step_package_has_version(context: Context) -> None:
- """Assert the package exposes a version."""
- assert context.version is not None
-
-
@then("the version is a non-empty string")
def step_version_non_empty_string(context: Context) -> None:
- """Assert the version is a non-empty string."""
+ """Assert version is non-empty string."""
assert isinstance(context.version, str)
assert len(context.version) > 0
diff --git a/features/version.feature b/features/version.feature
index 2d55858..59d5380 100644
--- a/features/version.feature
+++ b/features/version.feature
@@ -6,5 +6,4 @@ Feature: Package version
Scenario: Package version is readable
When I read the package version
- Then the package has a version
- And the version is a non-empty string
+ Then the version is a non-empty string
diff --git a/timerun.py b/timerun.py
index 175765c..50c6db3 100644
--- a/timerun.py
+++ b/timerun.py
@@ -1,6 +1,5 @@
"""TimeRun is a Python library for time measurements."""
-from abc import ABC, abstractmethod
from collections import deque
from collections.abc import AsyncGenerator, Callable, Generator
from copy import deepcopy
@@ -16,7 +15,6 @@
from time import perf_counter_ns, process_time_ns
from types import TracebackType
from typing import (
- Generic,
Literal,
ParamSpec,
Protocol,
@@ -27,18 +25,16 @@
__version__: str = "0.5.0"
__all__ = [
- "BlockTimer",
- "FunctionTimer",
"Measurement",
"TimeSpan",
+ "Timer",
"__version__",
]
-P = ParamSpec("P") # callable parameters
-R = TypeVar("R") # callable return type
-R_co = TypeVar("R_co", covariant=True) # covariant return (Protocol)
-Y = TypeVar("Y") # generator yield type
-T = TypeVar("T") # context manager resource type
+P = ParamSpec("P")
+R = TypeVar("R")
+R_co = TypeVar("R_co", covariant=True)
+Y = TypeVar("Y")
@dataclass(order=True, frozen=True)
@@ -93,8 +89,9 @@ class Measurement:
Stores one measurement only. Use this to collect the result of a single
timing run: wall-clock time, CPU time, and any user-defined metadata.
- When created by :class:`BlockTimer`, ``wall_time`` and ``cpu_time`` are
- ``None`` until the block exits, then they are set to the measured spans.
+ When created by Timer (context manager or decorator), ``wall_time`` and
+ ``cpu_time`` are ``None`` until the block exits, then they are set to the
+ measured spans.
Attributes
----------
@@ -113,100 +110,87 @@ class Measurement:
metadata: dict[str, object] = field(default_factory=dict)
-class _SyncToAsyncContextManagerMixin(ABC, Generic[T]):
- """Mixin: async context manager that delegates to sync __enter__/__exit__.
-
- Use with any class that implements __enter__ and __exit__; adds support
- for ``async with`` by calling the sync implementation.
- """
-
- @abstractmethod
- def __enter__(self) -> T: ...
-
- @abstractmethod
- def __exit__(
- self,
- exc_type: type[BaseException] | None,
- exc_val: BaseException | None,
- exc_tb: TracebackType | None,
- ) -> bool: ...
-
- async def __aenter__(self) -> T:
- return self.__enter__()
+class _TimedCallable(Protocol[P, R_co]): # pylint: disable=too-few-public-methods
+ measurements: deque[Measurement]
- async def __aexit__(
- self,
- exc_type: type[BaseException] | None,
- exc_val: BaseException | None,
- exc_tb: TracebackType | None,
- ) -> bool:
- return self.__exit__(exc_type, exc_val, exc_tb)
+ def __call__(self, *args: P.args, **kwargs: P.kwargs) -> R_co: ...
-class BlockTimer(_SyncToAsyncContextManagerMixin[Measurement]):
- """Context manager for timing a block (wall time + CPU time).
+class Timer:
+ """Times execution and records wall-clock and CPU time per run.
- Use with ``with`` or ``async with``. Yields a :class:`Measurement` whose
- ``wall_time`` and ``cpu_time`` are set when the block exits. End times are
- taken at the start of ``__exit__``, with wall time last.
+ Use as a context manager (``with Timer() as m:`` or ``async with
+ Timer() as m:``) to time a block: on exit, the yielded
+ :class:`Measurement` has its ``wall_time`` and ``cpu_time`` set.
- Optional ``metadata`` is stored by reference at construction; each
- measurement gets a deep copy at enter time. Exceptions propagate.
+ Use as a decorator (``@Timer()`` or ``@Timer(metadata={...},
+ maxlen=100)``) to time each call: supports sync and async functions and
+ generators; one :class:`Measurement` per run is appended to the wrapped
+ callable's ``measurements`` deque.
Parameters
----------
metadata : dict or None, optional
- Key-value metadata to attach to the yielded :class:`Measurement`.
- Stored by reference; each measurement gets a deep copy at enter time.
- Defaults to ``{}``.
+ Key-value metadata for the measurement(s). Stored by reference; each
+ measurement gets a deep copy at enter time. Defaults to ``{}``.
+ maxlen : int or None, optional
+ Only used in decorator mode. Maximum number of measurements to keep on
+ the wrapped callable. Ignored when used as a context manager. Defaults
+ to ``None`` (unbounded).
- Yields
- ------
+ Yields (context manager)
+ -----------------------
Measurement
- The measurement record. Its ``wall_time`` and ``cpu_time`` are
- ``None`` on entry and set to :class:`TimeSpan` instances when the
- block exits.
+ The measurement record. ``wall_time`` and ``cpu_time`` are set on block
+ exit.
- Notes
- -----
- Thread-safe: state is thread-local; one measurement per thread.
+ Attributes (decorator mode, on wrapped callable)
+ -----------------------------------------------
+ measurements : deque of Measurement
+ Deque of measurements (oldest to newest).
- Nested blocks: the same instance may be reused in sequential or nested
- blocks; each block gets its own measurement.
+ Examples
+ --------
+ Context manager::
- Exceptions: if the block raises, ``wall_time`` and ``cpu_time`` are still
- set before the exception propagates.
+ with Timer() as m:
+ do_work()
+ print(m.wall_time.duration)
- Async: ``async with`` uses the same synchronous timing.
+ Decorator::
- Examples
- --------
- >>> with BlockTimer() as m:
- ... pass
- >>> m.wall_time.duration # nanoseconds
+ @Timer(maxlen=10)
+ def slow():
+ pass
+ slow()
+ print(slow.measurements[-1].wall_time.duration)
"""
- def __init__(self, metadata: dict[str, object] | None = None) -> None:
- """Initialize the context manager."""
+ def __init__(
+ self,
+ metadata: dict[str, object] | None = None,
+ maxlen: int | None = None,
+ ) -> None:
+ """Initialize with optional metadata and maxlen (decorator mode)."""
self._metadata = metadata if isinstance(metadata, dict) else {}
+ self._maxlen = maxlen
self._local = local()
- def __enter__(self) -> Measurement: # type: ignore[explicit-override]
+ def __enter__(self) -> Measurement:
"""Start timing; return the measurement record."""
measurement = Measurement(metadata=deepcopy(self._metadata))
- if not hasattr(self._local, "stack"):
- self._local.stack = deque()
+ self._local.stack = getattr(self._local, "stack", deque())
self._local.stack.append(
(measurement, perf_counter_ns(), process_time_ns()),
)
return measurement
- def __exit__( # type: ignore[explicit-override]
+ def __exit__(
self,
exc_type: type[BaseException] | None,
- exc_value: BaseException | None,
- traceback: TracebackType | None,
+ exc_val: BaseException | None,
+ exc_tb: TracebackType | None,
) -> Literal[False]:
"""Stop timing; set wall_time and cpu_time on the measurement."""
cpu_end = process_time_ns()
@@ -220,107 +204,18 @@ def __exit__( # type: ignore[explicit-override]
measurement.cpu_time = TimeSpan(start=cpu_start, end=cpu_end)
return False
+ async def __aenter__(self) -> Measurement:
+ """Support ``async with`` by delegating to sync __enter__."""
+ return self.__enter__()
-class _BlockRecorder(_SyncToAsyncContextManagerMixin[Measurement]):
- """Records the measurement from a timed block (BlockTimer) into a deque.
-
- Used by FunctionTimer. Runs BlockTimer, then on exit appends the
- measurement to the deque under the lock and re-raises if the block raised.
- Supports ``with`` and ``async with`` via the mixin.
- """
-
- def __init__(
- self,
- metadata: dict[str, object] | None,
- measurements: deque[Measurement],
- lock: Lock,
- ) -> None:
- self._timer = BlockTimer(metadata=metadata)
- self._measurements = measurements
- self._lock = lock
-
- def __enter__(self) -> Measurement: # type: ignore[explicit-override]
- self._measurement = self._timer.__enter__() # pylint: disable=attribute-defined-outside-init
- return self._measurement
-
- def __exit__( # type: ignore[explicit-override]
+ async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
- ) -> Literal[False]:
- self._timer.__exit__(exc_type, exc_val, exc_tb)
- with self._lock:
- self._measurements.append(self._measurement)
- if exc_val is not None:
- raise exc_val
- return False
-
-
-class _TimedCallable(Protocol[P, R_co]): # pylint: disable=too-few-public-methods
- """Protocol for the wrapped callable with a measurements attribute."""
-
- measurements: deque[Measurement]
-
- def __call__(self, *args: P.args, **kwargs: P.kwargs) -> R_co: ...
-
-
-class FunctionTimer: # pylint: disable=too-few-public-methods
- """Decorator for timing a function (wall time + CPU time).
-
- Use as ``@FunctionTimer()`` or
- ``@FunctionTimer(metadata={...}, maxlen=100)``.
- Supports sync functions, async functions, sync generators, and async
- generators. Each run uses :class:`BlockTimer`; one :class:`Measurement` per
- invocation (per call or per full generator consumption). Measurements are
- appended to a deque on the wrapped callable (attribute ``measurements``).
-
- Parameters
- ----------
- metadata : dict or None, optional
- Passed to :class:`BlockTimer` for each run; interpretation and defaults
- follow BlockTimer (e.g. None or non-dict become ``{}``). Read from the
- decorator instance at each invocation, so reassigning it affects future
- runs.
- maxlen : int or None, optional
- Maximum number of measurements to keep on the wrapped callable.
- Passed to the storage deque as ``deque(maxlen=maxlen)``; ``None``
- means unbounded. Oldest entries are dropped when full.
-
- Attributes (on wrapped callable)
- ---------------------------------
- measurements : deque of Measurement
- Deque of measurements (oldest to newest). Use ``func.measurements[-1]``
- for the last run, or iterate for history. Append is done under a lock
- for thread safety.
-
- Notes
- -----
- Generators: one measurement per full consumption (from first ``next()`` /
- ``anext()`` until exhausted or closed). Wall time and CPU time cover the
- entire consumption (generator + consumer code between iterations).
-
- Exceptions: if the callable raises, the measurement is still recorded
- (wall_time and cpu_time set by BlockTimer), then the exception propagates.
-
- Examples
- --------
- >>> @FunctionTimer(maxlen=10)
- ... def slow():
- ... pass
- >>> slow()
- >>> slow.measurements[-1].wall_time.duration # nanoseconds
-
- """
-
- def __init__(
- self,
- metadata: dict[str, object] | None = None,
- maxlen: int | None = None,
- ) -> None:
- """Initialize the decorator."""
- self._metadata = metadata
- self._maxlen = maxlen
+ ) -> bool:
+ """Support ``async with`` by delegating to sync __exit__."""
+ return self.__exit__(exc_type, exc_val, exc_tb)
def __call__( # noqa: C901
self,
@@ -330,9 +225,14 @@ def __call__( # noqa: C901
| _TimedCallable[P, AsyncGenerator[Y, None]]
| _TimedCallable[P, Generator[Y, None, None]]
):
- """Wrap the function with timing."""
+ """When given a callable, wrap it with timing (decorator usage)."""
measurements: deque[Measurement] = deque(maxlen=self._maxlen)
lock = Lock()
+
+ def append_measurement(m: Measurement) -> None:
+ with lock:
+ measurements.append(m)
+
if isasyncgenfunction(f):
@wraps(f)
@@ -341,13 +241,12 @@ async def wrapper(
**kwargs: P.kwargs,
) -> AsyncGenerator[Y, None]:
inner = f(*args, **kwargs)
- async with _BlockRecorder(
- self._metadata,
- measurements,
- lock,
- ):
- async for x in inner:
- yield x
+ try:
+ async with self as m:
+ async for x in inner:
+ yield x
+ finally:
+ append_measurement(m) # pylint: disable=used-before-assignment
elif iscoroutinefunction(f):
@@ -356,12 +255,11 @@ async def wrapper( # type: ignore[return]
*args: P.args,
**kwargs: P.kwargs,
) -> R:
- async with _BlockRecorder(
- self._metadata,
- measurements,
- lock,
- ):
- return cast("R", await f(*args, **kwargs))
+ try:
+ async with self as m:
+ return cast("R", await f(*args, **kwargs))
+ finally:
+ append_measurement(m) # pylint: disable=used-before-assignment
elif isgeneratorfunction(f):
@@ -371,15 +269,21 @@ def wrapper(
**kwargs: P.kwargs,
) -> Generator[Y, None, None]:
inner = f(*args, **kwargs)
- with _BlockRecorder(self._metadata, measurements, lock):
- yield from inner
+ try:
+ with self as m:
+ yield from inner
+ finally:
+ append_measurement(m) # pylint: disable=used-before-assignment
else:
@wraps(f)
def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
- with _BlockRecorder(self._metadata, measurements, lock):
- return f(*args, **kwargs)
+ try:
+ with self as m:
+ return f(*args, **kwargs)
+ finally:
+ append_measurement(m) # pylint: disable=used-before-assignment
wrapped = cast(
"_TimedCallable[P, R] | "
From 8f4eb127398a929f2f4e882a5d9d55d2e8addcfd Mon Sep 17 00:00:00 2001
From: HH-MWB <50187675+HH-MWB@users.noreply.github.com>
Date: Wed, 18 Feb 2026 18:26:48 -0500
Subject: [PATCH 8/8] docs: add README Quickstart and align docstring/examples
with it
---
Makefile | 2 +-
README.md | 37 ++++++++++++++++++++++++++++++++++++-
pyproject.toml | 2 +-
timerun.py | 14 +++++++-------
4 files changed, 45 insertions(+), 10 deletions(-)
diff --git a/Makefile b/Makefile
index 8849223..652129e 100644
--- a/Makefile
+++ b/Makefile
@@ -39,6 +39,6 @@ test-verbose: ## Run BDD tests with full scenario/step output (for debugging fai
.PHONY: clean
clean: ## Delete all temporary files including venv
@rm -rf "$(VENV_DIR)" *.egg-info
- @rm -rf .mypy_cache .coverage htmlcov
+ @rm -rf .mypy_cache .ruff_cache .coverage htmlcov
@find . -name "*.pyc" -delete
@find . -name "__pycache__" -type d -exec rm -rf {} +
diff --git a/README.md b/README.md
index 76d79a0..37af1cf 100644
--- a/README.md
+++ b/README.md
@@ -16,6 +16,7 @@
TimeRun is a **single-file** Python package with no dependencies beyond the [Python Standard Library](https://docs.python.org/3/library/). The package is designed to stay minimal and dependency-free.
+It records **wall-clock time** (real elapsed time) and **CPU time** (process time) for code blocks or function calls, and lets you attach optional **metadata** (e.g. run id, tags) to each measurement.
## Setup
@@ -39,7 +40,41 @@ pip install git+https://github.com/HH-MWB/timerun.git
## Quickstart
-TBD
+### Time Code Block
+
+Use `with Timer() as m:` or `async with Timer() as m:`. On block exit, the yielded `Measurement` has `wall_time` and `cpu_time` set.
+
+```python
+>>> from timerun import Timer
+>>> with Timer() as m:
+... pass # code block to be measured
+...
+>>> m.wall_time.timedelta
+datetime.timedelta(microseconds=11)
+>>> m.cpu_time.timedelta
+datetime.timedelta(microseconds=8)
+```
+
+*Note: On block exit the timer records CPU time first, then wall time, so wall time is slightly larger than CPU time even when there is no I/O or scheduling.*
+
+### Time Function Calls
+
+Use `@Timer()` to time every call. Works with sync and async functions and with sync and async generators. One `Measurement` per call is appended to the wrapped callable's `measurements` deque.
+
+```python
+>>> from timerun import Timer
+>>> @Timer()
+... def func(): # function to be measured
+... return
+...
+>>> func()
+>>> func.measurements[-1].wall_time.timedelta
+datetime.timedelta(microseconds=11)
+>>> func.measurements[-1].cpu_time.timedelta
+datetime.timedelta(microseconds=8)
+```
+
+*Note: Argument `maxlen` caps how many measurements are kept (e.g. `@Timer(maxlen=10)`). By default the deque is unbounded.*
## Contributing
diff --git a/pyproject.toml b/pyproject.toml
index cfef921..2f1f48e 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
[project]
name = "timerun"
-description = "TimeRun is a Python library for elapsed time measurement."
+description = "TimeRun is a Python library for time measurements."
readme = "README.md"
requires-python = ">=3.10"
license = { text = "MIT" }
diff --git a/timerun.py b/timerun.py
index 50c6db3..bffa383 100644
--- a/timerun.py
+++ b/timerun.py
@@ -154,16 +154,16 @@ class Timer:
Context manager::
with Timer() as m:
- do_work()
- print(m.wall_time.duration)
+ pass # code block to be measured
+ print(m.wall_time.timedelta)
Decorator::
- @Timer(maxlen=10)
- def slow():
- pass
- slow()
- print(slow.measurements[-1].wall_time.duration)
+ @Timer()
+ def func():
+ return
+ func()
+ print(func.measurements[-1].wall_time.timedelta)
"""