diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index c2f9f82..477d9f7 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -31,7 +31,7 @@ jobs: needs: lint strategy: matrix: - python-version: ['3.9', '3.10', '3.11', '3.12', '3.13', '3.14'] + python-version: ['3.10', '3.11', '3.12', '3.13', '3.14'] steps: - name: Checkout code uses: actions/checkout@v6 @@ -45,12 +45,10 @@ jobs: run: pip install -e ".[dev]" - name: Run tests with coverage - run: >- - python -m pytest tests/ - --cov=timerun - --cov-branch - --cov-report=xml - --cov-report=term + run: | + coverage run --source=timerun -m behave -f progress + coverage report + coverage xml - name: Upload coverage reports to Codecov uses: codecov/codecov-action@v5 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 098c0ca..dc19a62 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -11,24 +11,39 @@ repos: - id: check-yaml - id: check-toml - - repo: https://github.com/HH-MWB/pyenforce - rev: v0.1.0 + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.15.1 hooks: - id: ruff-format - id: ruff-check + + - repo: https://github.com/pre-commit/mirrors-mypy + rev: v1.19.1 + hooks: - id: mypy - additional_dependencies: - - ".[mypy]" # Required to re-adds mypy as a dependency - - pytest + args: ["--scripts-are-modules"] + files: ^timerun\.py$ + additional_dependencies: [behave] + + - repo: https://github.com/pylint-dev/pylint + rev: v4.0.4 + hooks: - id: pylint - additional_dependencies: - - ".[pylint]" # Required to re-adds Pylint as a dependency - - pytest + additional_dependencies: [behave] + + - repo: https://github.com/PyCQA/bandit + rev: 1.9.3 + hooks: - id: bandit + args: ["-c", "pyproject.toml"] + + - repo: https://github.com/semgrep/pre-commit + rev: v1.151.0 + hooks: - id: semgrep - - id: vulture + args: ["--config", "p/python", "--error"] - repo: https://github.com/adrienverge/yamllint - rev: v1.37.1 + rev: v1.38.0 hooks: - id: yamllint diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 46ac27d..9cead11 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,119 +1,201 @@ # Contributing to TimeRun -Thank you for your interest in contributing to TimeRun! This document provides guidelines for contributing to the project. +Thank you for considering contributing to TimeRun. This guide explains how to set up your environment, run tests, and submit changes. -## Getting Started +## Table of Contents + +- [Code of Conduct](#code-of-conduct) +- [How You Can Help](#how-you-can-help) +- [Development Setup](#development-setup) +- [Testing](#testing) +- [Code Style and Quality](#code-style-and-quality) +- [Project Structure](#project-structure) +- [Pull Request Process](#pull-request-process) +- [Reporting Bugs](#reporting-bugs) +- [License](#license) + +## Code of Conduct + +Please be respectful and constructive. By participating, you agree to uphold a welcoming environment for everyone. + +## How You Can Help + +- **Report bugs** — Open an issue with clear steps to reproduce. +- **Suggest features** — Open an issue describing the use case and desired behavior. +- **Submit code** — Fix bugs or add features via pull requests (see [Pull Request Process](#pull-request-process)). +- **Improve docs** — Fix typos, clarify README or docstrings, or add examples. + +## Development Setup ### Prerequisites -- Python 3.9 or higher -- Git +- **Python 3.10+** +- **Git** -### Development Setup +### One-time setup + +1. **Fork** the repository on GitHub, then clone your fork: -1. Fork the repository on GitHub -2. Clone your fork locally: ```bash git clone https://github.com/YOUR_USERNAME/timerun.git cd timerun ``` -3. Set up the development environment: +2. **Create and activate a virtual environment** (recommended): + ```bash - make init + python3 -m venv .venv + source .venv/bin/activate # Windows: .venv\Scripts\activate ``` -4. Activate the virtual environment: +3. **Install the project in editable mode with dev dependencies**: + ```bash - source .venv/bin/activate + pip install -e ".[dev]" ``` -## Development Workflow +4. **Install and enable pre-commit hooks** (optional but recommended): -### Running Tests + ```bash + pip install pre-commit + pre-commit install + ``` + + Or use the convenience target: + + ```bash + make init + ``` + + Then activate the venv: `source .venv/bin/activate`. + +### Verify setup + +Run the test suite: -Run the test suite with coverage: ```bash make test ``` -### Code Style +You should see the BDD scenarios run and a coverage report. + +## Testing + +TimeRun uses **behavior-driven development (BDD)** with [behave](https://behave.readthedocs.io/). All tests are written in Gherkin and live under `features/`. + +### Run tests + +| Command | Description | +|--------------------|----------------------------------------------------------------| +| `make test` | Run BDD suite with progress + summary + coverage (default) | +| `make test-summary`| Summary and coverage only (minimal output) | +| `make test-verbose`| Full scenario/step output (use when debugging failures) | +| `behave` | Run BDD suite only (no coverage) | + +### Run coverage manually + +```bash +coverage run --source=timerun -m behave # full output +coverage run --source=timerun -m behave -f progress # progress + summary +coverage run --source=timerun -m behave -f null # summary only +coverage report --show-missing +``` + +### Adding or changing tests -This project follows these code style guidelines: -- **Black** for code formatting (line length: 79 characters) -- **isort** for import sorting +- **Feature files** — Add or edit `.feature` files in `features/` (e.g. `features/version.feature`). Use standard Gherkin: `Feature`, `Scenario`, `Given`, `When`, `Then`. +- **Step definitions** — Implement steps in Python under `features/steps/`, typically in a `*_steps.py` file. Use `@given`, `@when`, `@then` from `behave`; step functions receive a `context` argument. +- Keep scenarios focused and steps reusable. Add or extend scenarios for new behavior rather than skipping BDD. + +## Code Style and Quality + +Style and linting are enforced via **pre-commit** (Ruff, mypy, Pylint, and other hooks). After `pre-commit install`, these run automatically on each commit. + +### Run checks manually -Pre-commit hooks are installed automatically with `make init` and will run on every commit. You can also run them manually: ```bash pre-commit run --all-files ``` -### Making Changes +### What we expect -1. Create a new branch for your feature or bugfix: - ```bash - git checkout -b feature/your-feature-name - ``` +- **Formatting** — Ruff format (run via pre-commit or `ruff format`). +- **Linting** — Ruff check, Pylint, and other hooks must pass. +- **Types** — Use type hints for public APIs; mypy must pass. +- **Docstrings** — Public functions, classes, and modules should have docstrings. +- **Security** — Bandit and Semgrep run in pre-commit; address any reported issues. -2. Make your changes following the project conventions -3. Add or update tests as needed -4. Ensure all tests pass: `make test` -5. Commit your changes with a clear message +Fixing pre-commit failures before pushing keeps the history clean and CI green. -### Submitting Changes +## Project Structure -1. Push your branch to your fork: - ```bash - git push origin feature/your-feature-name - ``` +``` +timerun/ +├── timerun.py # Library (single-file by design) +├── features/ # BDD feature files (Gherkin) — behave convention +│ ├── __init__.py # Makes features a package for imports +│ ├── *.feature +│ ├── environment.py # Optional: hooks (before/after scenario, etc.) +│ └── steps/ # Step definitions (flat; all .py files loaded) +│ ├── __init__.py +│ ├── utils.py # Shared constants and helpers (no step decorators) +│ ├── common_steps.py # Shared steps used by multiple features +│ └── *_steps.py # Feature-specific step files +├── pyproject.toml # Project metadata and config +├── Makefile # Commands: init, test, clean, help +├── README.md +├── CONTRIBUTING.md +└── LICENSE +``` -2. Create a pull request on GitHub with: - - Clear description of the changes - - Reference to any related issues - - Test coverage for new functionality +- **`timerun.py`** — The only library module; keep it a single file by design. +- **`features/`** — All executable specs; no separate unit test directory. Layout follows [behave](https://behave.readthedocs.io/) convention: step definitions live under `features/steps/` (flat; subdirectories are not searched). Shared logic lives in `features/steps/utils.py`; shared steps (e.g. metadata, wall-time buffer, exception propagation) in `common_steps.py`. Run behave from the project root so `from features.steps.utils import ...` works. -## Project Structure +## Pull Request Process -- `timerun.py` - Main library code (single file module) -- `tests/` - Test suite -- `pyproject.toml` - Project configuration and dependencies -- `Makefile` - Development commands +1. **Create a branch** from `main`: -## Guidelines + ```bash + git checkout main + git pull origin main + git checkout -b feature/short-description # or fix/short-description + ``` -### Code Quality +2. **Make your changes** — Follow [Code Style and Quality](#code-style-and-quality) and add or update BDD scenarios in `features/` for new or changed behavior. -- Maintain 100% test coverage for new code -- Follow existing code patterns and conventions -- Add docstrings for all public functions and classes -- Use type hints consistently +3. **Run the suite and pre-commit**: -### Testing + ```bash + make test + pre-commit run --all-files + ``` -- Write tests for all new functionality -- Use descriptive test names -- Test both success and error cases -- Keep tests focused and independent +4. **Commit** with clear, concise messages. Optionally use conventional style (e.g. `feat: add X`, `fix: correct Y`). -### Documentation +5. **Push** to your fork and open a pull request against `main`: -- Update docstrings for any API changes -- Add examples for new features -- Update README.md if needed + ```bash + git push origin feature/short-description + ``` -## Reporting Issues +6. **Fill out the PR**: + - Describe what changed and why. + - Reference any related issues (e.g. "Fixes #123"). + - Confirm tests pass and, for new behavior, that BDD scenarios were added or updated. -When reporting bugs or requesting features: +Maintainers will review and may request changes. Once approved, your PR will be merged. -1. Check existing issues first -2. Use the issue templates if available -3. Provide clear reproduction steps for bugs -4. Include Python version and environment details +## Reporting Bugs -## Questions? +- **Search** existing issues to avoid duplicates. +- **Open an issue** with: + - A short, clear title. + - Steps to reproduce (code or commands). + - Expected vs actual behavior. + - Your environment: OS, Python version (`python --version`), and how you installed TimeRun (pip, editable, etc.). -Feel free to open an issue for questions about contributing or reach out to the maintainers. +For small, obvious fixes you may open a PR directly with a short explanation. ## License -By contributing to TimeRun, you agree that your contributions will be licensed under the MIT License. +Contributions are made under the [MIT License](LICENSE). By submitting a pull request, you agree that your contributions will be licensed under the same terms. diff --git a/Makefile b/Makefile index 72b7f0a..652129e 100644 --- a/Makefile +++ b/Makefile @@ -22,12 +22,23 @@ init: ## Set up Python development environment with pre-commit hooks @echo "Development environment ready! To activate it, run: source $(VENV_DIR)/bin/activate" .PHONY: test -test: ## Run all tests and display coverage ratio - @"$(VENV_DIR)/bin/pytest" tests/ --cov=timerun --cov-report=term-missing +test: ## Run BDD tests (progress + summary + coverage) + @"$(VENV_DIR)/bin/coverage" run --source=timerun -m behave -f progress + @"$(VENV_DIR)/bin/coverage" report --show-missing + +.PHONY: test-summary +test-summary: ## Run BDD tests (summary and coverage only; use 'make test' to see which feature failed) + @"$(VENV_DIR)/bin/coverage" run --source=timerun -m behave -f null + @"$(VENV_DIR)/bin/coverage" report --show-missing + +.PHONY: test-verbose +test-verbose: ## Run BDD tests with full scenario/step output (for debugging failures) + @"$(VENV_DIR)/bin/coverage" run --source=timerun -m behave + @"$(VENV_DIR)/bin/coverage" report --show-missing .PHONY: clean clean: ## Delete all temporary files including venv @rm -rf "$(VENV_DIR)" *.egg-info - @rm -rf .mypy_cache .pytest_cache .coverage htmlcov + @rm -rf .mypy_cache .ruff_cache .coverage htmlcov @find . -name "*.pyc" -delete @find . -name "__pycache__" -type d -exec rm -rf {} + diff --git a/README.md b/README.md index 45a9db9..37af1cf 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@

-

TimeRun - Python library for elapsed time measurement.

+

TimeRunPython package for time measurement.

Version @@ -14,27 +14,25 @@ Total Downloads

-TimeRun is a simple, yet elegant elapsed time measurement library for [Python](https://www.python.org). It is distributed as a single file module and has no dependencies other than the [Python Standard Library](https://docs.python.org/3/library/). +TimeRun is a **single-file** Python package with no dependencies beyond the [Python Standard Library](https://docs.python.org/3/library/). The package is designed to stay minimal and dependency-free. -- **Elapsed Time**: Customized time delta which represents elapsed time in nanoseconds -- **Stopwatch**: An elapsed time measurer with the highest available resolution -- **Timer**: Convenient syntax to capture and save measured elapsed time results +It records **wall-clock time** (real elapsed time) and **CPU time** (process time) for code blocks or function calls, and lets you attach optional **metadata** (e.g. run id, tags) to each measurement. ## Setup ### Prerequisites -The only prerequisite to use TimeRun is running **Python 3.9+**. +**Python 3.10+** ### Installation -Install TimeRun from [Python Package Index](https://pypi.org/project/timerun/): +From [PyPI](https://pypi.org/project/timerun/): ```bash pip install timerun ``` -Install TimeRun from [Source Code](https://github.com/HH-MWB/timerun): +From source: ```bash pip install git+https://github.com/HH-MWB/timerun.git @@ -42,88 +40,46 @@ pip install git+https://github.com/HH-MWB/timerun.git ## Quickstart -### Measure Code Block +### Time Code Block -```python ->>> import time ->>> from timerun import Timer ->>> with Timer() as timer: -... time.sleep(0.1) # your code here ->>> print(timer.duration) -0:00:00.100000000 -``` - -### Measure Function - -```python ->>> import time ->>> from timerun import Timer ->>> timer = Timer() ->>> @timer -... def func(): -... time.sleep(0.1) # your code here ->>> func() ->>> print(timer.duration) -0:00:00.100000000 -``` - -### Measure Async Function +Use `with Timer() as m:` or `async with Timer() as m:`. On block exit, the yielded `Measurement` has `wall_time` and `cpu_time` set. ```python ->>> import asyncio >>> from timerun import Timer ->>> timer = Timer() ->>> @timer -... async def async_func(): -... await asyncio.sleep(0.1) # your code here ->>> asyncio.run(async_func()) ->>> print(timer.duration) -0:00:00.100000000 +>>> with Timer() as m: +... pass # code block to be measured +... +>>> m.wall_time.timedelta +datetime.timedelta(microseconds=11) +>>> m.cpu_time.timedelta +datetime.timedelta(microseconds=8) ``` -### Measure Async Code Block +*Note: On block exit the timer records CPU time first, then wall time, so wall time is slightly larger than CPU time even when there is no I/O or scheduling.* -```python ->>> import asyncio ->>> from timerun import Timer ->>> async def async_code(): -... async with Timer() as timer: -... await asyncio.sleep(0.1) # your code here -... print(timer.duration) ->>> asyncio.run(async_code()) -0:00:00.100000000 -``` +### Time Function Calls -### Multiple Measurements +Use `@Timer()` to time every call. Works with sync and async functions and with sync and async generators. One `Measurement` per call is appended to the wrapped callable's `measurements` deque. ```python ->>> import time >>> from timerun import Timer ->>> timer = Timer() ->>> with timer: -... time.sleep(0.1) # your code here ->>> with timer: -... time.sleep(0.1) # your code here ->>> print(timer.duration) # Last duration -0:00:00.100000000 ->>> print(timer.durations) # All durations -(ElapsedTime(nanoseconds=100000000), ElapsedTime(nanoseconds=100000000)) +>>> @Timer() +... def func(): # function to be measured +... return +... +>>> func() +>>> func.measurements[-1].wall_time.timedelta +datetime.timedelta(microseconds=11) +>>> func.measurements[-1].cpu_time.timedelta +datetime.timedelta(microseconds=8) ``` -### Advanced Options - -```python ->>> from timerun import Timer ->>> # Exclude sleep time from measurements ->>> timer = Timer(count_sleep=False) ->>> # Limit storage to last 10 measurements ->>> timer = Timer(max_len=10) -``` +*Note: Argument `maxlen` caps how many measurements are kept (e.g. `@Timer(maxlen=10)`). By default the deque is unbounded.* ## Contributing -We welcome contributions! Please see [CONTRIBUTING.md](CONTRIBUTING.md) for guidelines on how to contribute to this project. +Contributions are welcome. See [CONTRIBUTING.md](https://github.com/HH-MWB/timerun/blob/main/CONTRIBUTING.md) for setup, testing, and pull request guidelines. ## License -This project is licensed under the MIT License - see the [LICENSE](https://github.com/HH-MWB/timerun/blob/main/LICENSE) file for details. +This project is licensed under the MIT License — see the [LICENSE](https://github.com/HH-MWB/timerun/blob/main/LICENSE) file for details. diff --git a/features/__init__.py b/features/__init__.py new file mode 100644 index 0000000..dd16409 --- /dev/null +++ b/features/__init__.py @@ -0,0 +1 @@ +"""Behave BDD features and step definitions.""" diff --git a/features/block_timing.feature b/features/block_timing.feature new file mode 100644 index 0000000..5ad828e --- /dev/null +++ b/features/block_timing.feature @@ -0,0 +1,77 @@ +Feature: Block timing + + As someone measuring duration, + I want to time blocks of code (sync, async, or threaded), + so that I get per-task timings and can attach metadata. + + # --- Basic timing: sync, async, CPU-bound --- + + Scenario: Blocking sleep with `with` yields wall time and near-zero CPU time + Given a blocking operation that runs for around 10,000,000 nanoseconds + When I measure the operation using `with` + Then the measurement's wall time duration is within the configured buffer of 10,000,000 nanoseconds + And the measurement's CPU time duration is within the configured buffer of 0 nanoseconds + + Scenario: Async sleep with `async with` yields wall time and near-zero CPU time + Given an async operation that runs for around 10,000,000 nanoseconds + When I measure the async operation using `async with` + Then the measurement's wall time duration is within the configured buffer of 10,000,000 nanoseconds + And the measurement's CPU time duration is within the configured buffer of 0 nanoseconds + + Scenario: CPU-bound block with `with` yields wall and CPU time close together + Given a CPU-bound operation that runs for around 10,000,000 nanoseconds + When I measure the operation using `with` + Then the measurement's wall time duration is within the configured buffer of 10,000,000 nanoseconds + And the measurement's CPU time duration is within the configured buffer of 10,000,000 nanoseconds + And the measurement's CPU time is close to wall time + + # --- One Timer, multiple blocks or threads --- + + Scenario: Two threads with one Timer yield one measurement per thread + Given each thread sleeps 5,000,000 nanoseconds + When I measure blocks from 2 threads using the same Timer instance + Then the measurements are from different threads + + Scenario: Two sequential blocks with one Timer yield correct durations + Given the first block duration is 5,000,000 nanoseconds + And the second block duration is 10,000,000 nanoseconds + When I measure two sequential blocks with the same Timer instance + Then the first measurement's wall time duration is within the configured buffer of 5,000,000 nanoseconds + And the second measurement's wall time duration is within the configured buffer of 10,000,000 nanoseconds + + Scenario: Nested blocks with one Timer yield independent outer and inner times + Given the outer block duration is 20,000,000 nanoseconds + And the inner block duration is 5,000,000 nanoseconds + When I measure nested blocks with the same Timer instance + Then the outer measurement's wall time duration is within the configured buffer of 25,000,000 nanoseconds + And the inner measurement's wall time duration is within the configured buffer of 5,000,000 nanoseconds + And the outer measurement's wall time duration is at least the inner measurement's wall time duration + + # --- Metadata --- + + Scenario: Initial metadata is carried on the yielded measurement + Given metadata run_id "exp-1" and tag "baseline" + When I measure a code block with that metadata + Then the measurement's metadata key "run_id" is "exp-1" + And the measurement's metadata key "tag" is "baseline" + + Scenario: Metadata set in first block is not visible in second block (reused Timer) + Given metadata run_id "same-run" and tag "original" + And I will add metadata key "extra" as "from_first_block" in the first block + When I measure two blocks with the same Timer instance and that metadata + Then the first measurement's metadata key "extra" is "from_first_block" + And the second measurement's metadata key "run_id" is "same-run" + And the second measurement's metadata key "tag" is "original" + And the second measurement's metadata does not contain key "extra" + + # --- Edge cases and errors --- + + Scenario: Block that raises still yields measurement; exception propagates + When I measure a code block that raises an exception + Then the block yielded a measurement + And an exception was propagated to the caller + + Scenario: __exit__ without __enter__ raises RuntimeError + When I call __exit__ on a Timer instance without calling __enter__ first + Then a RuntimeError is raised + And the error message is "__exit__ called without a matching __enter__" diff --git a/features/environment.py b/features/environment.py new file mode 100644 index 0000000..97ad226 --- /dev/null +++ b/features/environment.py @@ -0,0 +1,19 @@ +"""Behave environment hooks. + +Runs before/after the test run, features, or scenarios. +https://behave.readthedocs.io/en/stable/tutorial.html#environmental-controls +""" + +# Example hooks (uncomment and customize if needed): +# +# def before_all(context): +# pass +# +# def after_all(context): +# pass +# +# def before_scenario(context, scenario): +# pass +# +# def after_scenario(context, scenario): +# pass diff --git a/features/function_timing.feature b/features/function_timing.feature new file mode 100644 index 0000000..a6aa150 --- /dev/null +++ b/features/function_timing.feature @@ -0,0 +1,61 @@ +Feature: Function timing + + As someone measuring duration, + I want to time function and generator execution with a decorator, + so that I get per-call measurements and can attach metadata. + + # --- Sync and async functions --- + + Scenario: Timing a synchronous sleeping function records real time and minimal CPU time + Given a sync function that sleeps for around 10,000,000 nanoseconds + When I call the decorated function + Then the measurement's wall time duration is within the configured buffer of 10,000,000 nanoseconds + And the measurement's CPU time duration is within the configured buffer of 0 nanoseconds + + Scenario: Timing an async sleeping function records real time and minimal CPU time + Given an async function that sleeps for around 10,000,000 nanoseconds + When I call the decorated function + Then the measurement's wall time duration is within the configured buffer of 10,000,000 nanoseconds + And the measurement's CPU time duration is within the configured buffer of 0 nanoseconds + + # --- Sync and async generators --- + + Scenario: Fully consuming a sync generator records one measurement + Given a sync generator that yields 3 items and sleeps 5,000,000 nanoseconds total + When I fully consume the decorated generator + Then the decorated function's measurements deque has 1 entry + + Scenario: Fully consuming an async generator records one measurement + Given an async generator that yields 3 items and sleeps 5,000,000 nanoseconds total + When I fully consume the decorated generator + Then the decorated function's measurements deque has 1 entry + + # --- Metadata --- + + Scenario: Metadata attached to the timer appears on each measurement + Given metadata run_id "exp-1" and tag "baseline" + When I call a decorated function with that metadata + Then the measurement's metadata key "run_id" is "exp-1" + And the measurement's metadata key "tag" is "baseline" + + # --- Exceptions --- + + Scenario: When a timed function raises an error, one measurement is still recorded and the error is re-raised + When I call a decorated function that raises an exception + Then the decorated function's measurements deque has 1 entry + And an exception was propagated to the caller + + # --- Limiting stored measurements (maxlen) --- + + Scenario: With maxlen 2, only the last 2 measurements are kept + Given a sync function that sleeps for around 1,000,000 nanoseconds + When I decorate it with Timer with maxlen 2 + And I call the decorated function 3 times + Then the decorated function's measurements deque has 2 entries + + # --- Thread safety --- + + Scenario: Two threads calling the same timed function produce two measurements + Given a sync function that sleeps for around 5,000,000 nanoseconds + When I call the decorated function from 2 threads concurrently + Then the decorated function's measurements deque has 2 entries diff --git a/features/measurement.feature b/features/measurement.feature new file mode 100644 index 0000000..337be64 --- /dev/null +++ b/features/measurement.feature @@ -0,0 +1,31 @@ +Feature: Measurement + + As someone measuring duration, + I want a value with wall time, CPU time, and optional metadata, + so that I can store results and attach labels. + + # --- Creating a measurement --- + + Scenario: Measurement from wall and CPU spans has both durations + Given a wall time span from 0 to 1,000,000 + And a CPU time span from 0 to 500,000 + When I create a measurement from the wall time span and the CPU time span + Then the measurement's wall time duration is 1,000,000 nanoseconds + And the measurement's CPU time duration is 500,000 nanoseconds + + Scenario: New measurement has empty metadata by default + Given a wall time span from 0 to 1 + And a CPU time span from 0 to 1 + When I create a measurement from the wall time span and the CPU time span + Then the measurement's metadata is an empty dict + + # --- Metadata --- + + Scenario: Metadata can be set and read back + Given a wall time span from 0 to 1 + And a CPU time span from 0 to 1 + When I create a measurement from the wall time span and the CPU time span + And the metadata key "run_id" is set to "exp-1" + And the metadata key "tag" is set to "baseline" + Then the measurement's metadata key "run_id" is "exp-1" + And the measurement's metadata key "tag" is "baseline" diff --git a/features/steps/__init__.py b/features/steps/__init__.py new file mode 100644 index 0000000..7fe3dcf --- /dev/null +++ b/features/steps/__init__.py @@ -0,0 +1 @@ +"""Behave step definitions.""" diff --git a/features/steps/block_timing_steps.py b/features/steps/block_timing_steps.py new file mode 100644 index 0000000..c1ceebc --- /dev/null +++ b/features/steps/block_timing_steps.py @@ -0,0 +1,285 @@ +"""Step definitions for the Block timing feature.""" + +from __future__ import annotations + +import asyncio +from concurrent.futures import ThreadPoolExecutor +from typing import TYPE_CHECKING + +from behave import given, then, when + +import timerun +from features.steps.utils import ( + BUFFER_NS, + assert_metadata_key_equals, + assert_wall_time_within_buffer, + sleep_wall_at_least, + spin_wall_at_least, +) + +if TYPE_CHECKING: + from behave.runner import Context + + +# --- Given --- + + +@given( + "a {kind} operation that runs for around {duration_ns:n} nanoseconds", +) +@given( + "an {kind} operation that runs for around {duration_ns:n} nanoseconds", +) +def step_given_operation( + context: Context, + kind: str, + duration_ns: int, +) -> None: + """Store operation duration and kind.""" + context.operation_duration_ns = duration_ns + context.operation_kind = kind + + +@given("each thread sleeps {duration_ns:n} nanoseconds") +def step_given_thread_sleep(context: Context, duration_ns: int) -> None: + """Store thread sleep duration.""" + context.thread_sleep_ns = duration_ns + + +@given("the {which} block duration is {duration_ns:n} nanoseconds") +def step_given_block_duration( + context: Context, + which: str, + duration_ns: int, +) -> None: + """Store block duration for which block.""" + setattr(context, f"{which}_block_ns", duration_ns) + + +@given('I will add metadata key "{key}" as "{value}" in the first block') +def step_given_metadata_add_in_first( + context: Context, + key: str, + value: str, +) -> None: + """First block will add key/value to metadata.""" + context.metadata_add_in_first = (key, value) + + +# --- When --- + + +@when("I measure the operation using `with`") +def step_measure_operation_using_with(context: Context) -> None: + """Measure with Timer(); sleep or spin per operation_kind.""" + with timerun.Timer() as context.measurement: + if getattr(context, "operation_kind", "blocking") == "CPU-bound": + spin_wall_at_least(context.operation_duration_ns) + else: + sleep_wall_at_least(context.operation_duration_ns) + + +@when("I measure the async operation using `async with`") +def step_measure_async_using_async_with(context: Context) -> None: + """Measure async with Timer(); asyncio.sleep.""" + + async def run() -> timerun.Measurement: + async with timerun.Timer() as m: + await asyncio.sleep(context.operation_duration_ns / 1e9) + return m + + context.measurement = asyncio.run(run()) + + +@when( + "I measure blocks from {thread_count:n} threads " + "using the same Timer instance", +) +def step_measure_blocks_from_threads( + context: Context, + thread_count: int, +) -> None: + """Measure blocks from N threads.""" + context.thread_count = thread_count + cm = timerun.Timer() + + # Worker: enter timer, sleep, return measurement. + def run() -> timerun.Measurement: + with cm as m: + sleep_wall_at_least(context.thread_sleep_ns) + return m + + # Run thread_count workers and collect measurements. + with ThreadPoolExecutor(max_workers=thread_count) as ex: + futures = [ex.submit(run) for _ in range(thread_count)] + context.thread_measurements = [f.result() for f in futures] + + +@when("I measure two sequential blocks with the same Timer instance") +def step_measure_two_sequential_blocks(context: Context) -> None: + """Measure two sequential blocks.""" + cm = timerun.Timer() + + with cm as context.first_measurement: + sleep_wall_at_least(context.first_block_ns) + + with cm as context.second_measurement: + sleep_wall_at_least(context.second_block_ns) + + +@when("I measure nested blocks with the same Timer instance") +def step_measure_nested_blocks(context: Context) -> None: + """Measure nested blocks.""" + cm = timerun.Timer() + + with cm as context.outer_measurement: + sleep_wall_at_least(context.outer_block_ns) + + with cm as context.inner_measurement: + sleep_wall_at_least(context.inner_block_ns) + + +@when("I measure a code block with that metadata") +def step_measure_block_with_metadata(context: Context) -> None: + """Measure with Timer(metadata=...); store result.""" + with timerun.Timer(metadata=context.metadata) as context.measurement: + pass + + +@when( + "I measure two blocks with the same Timer instance and that metadata", +) +def step_measure_two_blocks_with_metadata(context: Context) -> None: + """Measure two blocks; first may add metadata.""" + cm = timerun.Timer(metadata=context.metadata) + + # First block: optionally add key/value to measurement metadata. + with cm as context.first_measurement: + if hasattr(context, "metadata_add_in_first"): + context.first_measurement.metadata[ + context.metadata_add_in_first[0] + ] = context.metadata_add_in_first[1] + + # Second block: no extra metadata. + with cm as context.second_measurement: + pass + + +@when("I measure a code block that raises an exception") +def step_measure_block_raises(context: Context) -> None: + """Measure raising block; catch exception.""" + try: + with timerun.Timer() as context.measurement: + raise ValueError # noqa: TRY301 + except ValueError as e: + context.exception = e + + +@when("I call __exit__ on a Timer instance without calling __enter__ first") +def step_exit_without_enter(context: Context) -> None: + """Call Timer().__exit__ without __enter__; store error.""" + try: + timerun.Timer().__exit__(None, None, None) + except RuntimeError as e: + context.exception = e + + +# --- Then --- + + +@then("the measurement's CPU time is close to wall time") +def step_cpu_close_to_wall(context: Context) -> None: + """Assert CPU close to wall time.""" + # Required context validation. + assert context.measurement.wall_time is not None + assert context.measurement.cpu_time is not None + + # Duration in [wall - BUFFER_NS, wall]. + wall = context.measurement.wall_time.duration + cpu = context.measurement.cpu_time.duration + min_cpu = max(0, wall - BUFFER_NS) + assert min_cpu <= cpu <= wall, ( + f"CPU {cpu} not in [wall-BUFFER_NS, wall] = [{min_cpu}, {wall}]" + ) + + +@then( + "the {which} measurement's wall time duration is within the configured " + "buffer of {expected_ns:n} nanoseconds", +) +def step_which_measurement_wall_within_buffer( + context: Context, + which: str, + expected_ns: int, +) -> None: + """Assert which measurement wall time in buffer.""" + assert_wall_time_within_buffer( + getattr(context, f"{which}_measurement"), + expected_ns, + BUFFER_NS, + ) + + +@then( + "the outer measurement's wall time duration is at least the inner " + "measurement's wall time duration", +) +def step_outer_wall_at_least_inner(context: Context) -> None: + """Assert outer wall >= inner.""" + # Required context validation: both have wall_time. + assert context.outer_measurement.wall_time is not None + assert context.inner_measurement.wall_time is not None + + # Duration: outer >= inner. + outer_d = context.outer_measurement.wall_time.duration + inner_d = context.inner_measurement.wall_time.duration + assert outer_d >= inner_d, f"outer {outer_d} < inner {inner_d}" + + +@then('the {which} measurement\'s metadata key "{key}" is "{value}"') +def step_measurement_metadata_key( + context: Context, + which: str, + key: str, + value: str, +) -> None: + """Assert which measurement metadata[key] is value.""" + assert_metadata_key_equals( + getattr(context, f"{which}_measurement"), + key, + value, + ) + + +@then('the second measurement\'s metadata does not contain key "{key}"') +def step_second_measurement_metadata_no_key( + context: Context, + key: str, +) -> None: + """Assert second measurement has no key.""" + assert key not in context.second_measurement.metadata + + +@then("the measurements are from different threads") +def step_measurements_from_different_threads(context: Context) -> None: + """Assert N distinct measurements.""" + # Required context validation. + measurements = context.thread_measurements + + # Exactly thread_count measurements. + assert len(measurements) == context.thread_count, ( + f"expected {context.thread_count} measurements, " + f"got {len(measurements)}" + ) + + # All distinct (one measurement per thread). + assert len(measurements) == len({id(m) for m in measurements}), ( + "measurements are not all distinct (one per thread)" + ) + + +@then("the block yielded a measurement") +def step_block_yielded_measurement(context: Context) -> None: + """Assert block produced a measurement.""" + assert context.measurement is not None + assert context.measurement.wall_time is not None diff --git a/features/steps/common_steps.py b/features/steps/common_steps.py new file mode 100644 index 0000000..6c0055d --- /dev/null +++ b/features/steps/common_steps.py @@ -0,0 +1,94 @@ +"""Shared step definitions used by multiple features. + +Steps here use consistent wording and semantics across features +(exception assertions, error messages, measurement metadata, wall time buffer). +""" + +from __future__ import annotations + +import builtins +from typing import TYPE_CHECKING + +from behave import given, then + +from features.steps.utils import ( + BUFFER_NS, + CPU_LOWER_SLACK_NS, + assert_metadata_key_equals, + assert_wall_time_within_buffer, +) + +if TYPE_CHECKING: + from behave.runner import Context + + +# --- Given --- + + +@given('metadata run_id "{run_id}" and tag "{tag}"') +def step_given_metadata(context: Context, run_id: str, tag: str) -> None: + """Store metadata for Timer.""" + context.metadata = {"run_id": run_id, "tag": tag} + + +# --- Then --- + + +@then("a {exception_type} is raised") +def step_exception_raised(context: Context, exception_type: str) -> None: + """Assert stored exception type.""" + # Required: an exception was stored by the When step. + assert hasattr(context, "exception"), "Expected an exception to be raised" + + # Type must match (e.g. ValueError, RuntimeError). + assert isinstance(context.exception, getattr(builtins, exception_type)), ( + f"Expected {exception_type}, got {type(context.exception).__name__}" + ) + + +@then('the error message is "{message}"') +def step_error_message_is(context: Context, message: str) -> None: + """Assert exception message.""" + assert hasattr(context, "exception"), "Expected an exception to be raised" + assert str(context.exception) == message + + +@then("an exception was propagated to the caller") +def step_exception_propagated(context: Context) -> None: + """Assert ValueError was caught.""" + assert hasattr(context, "exception") + assert isinstance(context.exception, ValueError) + + +@then( + "the measurement's wall time duration is within the configured buffer of " + "{expected_ns:n} nanoseconds", +) +def step_wall_time_within_buffer(context: Context, expected_ns: int) -> None: + """Assert wall time in buffer.""" + assert_wall_time_within_buffer(context.measurement, expected_ns, BUFFER_NS) + + +@then( + "the measurement's CPU time duration is within the configured buffer of " + "{expected_ns:n} nanoseconds", +) +def step_cpu_time_within_buffer(context: Context, expected_ns: int) -> None: + """Assert CPU time in buffer.""" + assert context.measurement.cpu_time is not None + duration = context.measurement.cpu_time.duration + min_ns = max(0, expected_ns - CPU_LOWER_SLACK_NS) + max_ns = expected_ns + BUFFER_NS + assert min_ns <= duration <= max_ns, ( + f"CPU time {duration} not in [{min_ns}, {max_ns}] (buffer={BUFFER_NS})" + ) + + +@then('the measurement\'s metadata key "{key}" is "{value}"') +def step_measurement_metadata_key_is( + context: Context, + key: str, + value: str, +) -> None: + """Assert metadata[key] is value.""" + assert_metadata_key_equals(context.measurement, key, value) diff --git a/features/steps/function_timing_steps.py b/features/steps/function_timing_steps.py new file mode 100644 index 0000000..a5d6605 --- /dev/null +++ b/features/steps/function_timing_steps.py @@ -0,0 +1,202 @@ +"""Step definitions for the Function timing feature.""" + +from __future__ import annotations + +import asyncio +from concurrent.futures import ThreadPoolExecutor +from typing import TYPE_CHECKING + +from behave import given, then, when + +import timerun +from features.steps.utils import ( + sleep_wall_at_least, +) + +if TYPE_CHECKING: + from behave.runner import Context + + +# --- Given --- + + +@given( + "a {kind} function that sleeps for around {duration_ns:n} nanoseconds", +) +@given( + "an {kind} function that sleeps for around {duration_ns:n} nanoseconds", +) +def step_given_func_sleep( + context: Context, + kind: str, + duration_ns: int, +) -> None: + """Store func kind and duration.""" + context.func_duration_ns = duration_ns + context.func_kind = kind + + +@given( + "a {kind} generator that yields {count:n} items and sleeps " + "{duration_ns:n} nanoseconds total", +) +@given( + "an {kind} generator that yields {count:n} items and sleeps " + "{duration_ns:n} nanoseconds total", +) +def step_given_gen( + context: Context, + kind: str, + count: int, + duration_ns: int, +) -> None: + """Store generator kind, duration and count.""" + context.gen_duration_ns = duration_ns + context.gen_count = count + context.gen_kind = kind + + +# --- When --- + + +@when("I call the decorated function") +def step_when_call_decorated_func(context: Context) -> None: + """Decorate function with Timer(), run it.""" + if context.func_kind == "async": + + @timerun.Timer() + async def async_func() -> None: + await asyncio.sleep(context.func_duration_ns / 1e9) + + asyncio.run(async_func()) + context.decorated_function = async_func + context.measurement = async_func.measurements[-1] + else: + + @timerun.Timer() + def sync_func() -> None: + sleep_wall_at_least(context.func_duration_ns) + + sync_func() + context.decorated_function = sync_func + context.measurement = sync_func.measurements[-1] + + +@when("I fully consume the decorated generator") +def step_when_consume_gen(context: Context) -> None: # noqa: C901 + """Decorate generator with Timer(), consume fully.""" + per_sleep = context.gen_duration_ns // context.gen_count + if context.gen_kind == "async": + + @timerun.Timer() + async def async_gen() -> object: + for i in range(context.gen_count): + await asyncio.sleep(per_sleep / 1e9) + yield i + + async def run() -> None: + async for _ in async_gen(): + pass + + asyncio.run(run()) + context.decorated_function = async_gen + context.measurement = async_gen.measurements[-1] + else: + + @timerun.Timer() + def sync_gen() -> object: + for i in range(context.gen_count): + sleep_wall_at_least(per_sleep) + yield i + + for _ in sync_gen(): + pass + context.decorated_function = sync_gen + context.measurement = sync_gen.measurements[-1] + + +@when("I call a decorated function with that metadata") +def step_when_call_with_metadata(context: Context) -> None: + """Call no-op function decorated with Timer(metadata=...).""" + + @timerun.Timer(metadata=context.metadata) + def f() -> None: + pass + + f() + context.decorated_function = f + context.measurement = f.measurements[-1] + + +@when("I call a decorated function that raises an exception") +def step_when_call_raising(context: Context) -> None: + """Call raising function under Timer(); catch exception.""" + + @timerun.Timer() + def raising() -> None: + raise ValueError + + # Call, catch exception for Then to assert; store function and measurement. + try: + raising() + except ValueError as e: + context.exception = e + context.decorated_function = raising + context.measurement = raising.measurements[-1] + + +@when("I decorate it with Timer with maxlen {maxlen:n}") +def step_when_decorate_maxlen(context: Context, maxlen: int) -> None: + """Store maxlen for next step.""" + context.func_maxlen = maxlen + + +@when("I call the decorated function {times:n} times") +def step_when_call_three_times(context: Context, times: int) -> None: + """Decorate with Timer(maxlen=...), call N times.""" + + @timerun.Timer(maxlen=context.func_maxlen) + def sync_func() -> None: + sleep_wall_at_least(context.func_duration_ns) + + # Call times times; only last maxlen measurements kept. + for _ in range(times): + sync_func() + context.decorated_function = sync_func + + +@when( + "I call the decorated function from {thread_count:n} threads concurrently", +) +def step_when_call_from_threads(context: Context, thread_count: int) -> None: + """Run decorated function from N threads.""" + + @timerun.Timer() + def sync_func() -> None: + sleep_wall_at_least(context.func_duration_ns) + + # Worker: call the timed function once. + def run() -> None: + sync_func() + + # Run thread_count workers concurrently; store function and count for Then. + with ThreadPoolExecutor(max_workers=thread_count) as ex: + futures = [ex.submit(run) for _ in range(thread_count)] + for f in futures: + f.result() + context.decorated_function = sync_func + context.thread_count = thread_count + + +# --- Then --- + + +@then("the decorated function's measurements deque has {n:n} entry") +@then("the decorated function's measurements deque has {n:n} entries") +def step_then_measurements_count(context: Context, n: int) -> None: + """Assert measurements count is n.""" + func = context.decorated_function + assert hasattr(func, "measurements") + assert len(func.measurements) == n, ( + f"expected {n} measurements, got {len(func.measurements)}" + ) diff --git a/features/steps/measurement_steps.py b/features/steps/measurement_steps.py new file mode 100644 index 0000000..a6e4298 --- /dev/null +++ b/features/steps/measurement_steps.py @@ -0,0 +1,75 @@ +"""Step definitions for the Measurement record feature.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from behave import given, then, when + +import timerun + +if TYPE_CHECKING: + from behave.runner import Context + +# --- Given --- + + +@given("a {kind} time span from {start:n} to {end:n}") +def step_given_typed_time_span( + context: Context, + kind: str, + start: int, + end: int, +) -> None: + """Set wall or CPU time span on context.""" + setattr( + context, + f"{kind.lower()}_time_span", + timerun.TimeSpan(start=start, end=end), + ) + + +# --- When --- + + +@when("I create a measurement from the wall time span and the CPU time span") +def step_create_measurement_from_spans(context: Context) -> None: + """Build Measurement from spans.""" + context.measurement = timerun.Measurement( + wall_time=context.wall_time_span, + cpu_time=context.cpu_time_span, + ) + + +@when('the metadata key "{key}" is set to "{value}"') +def step_measurement_metadata_key_set( + context: Context, + key: str, + value: str, +) -> None: + """Set measurement metadata[key].""" + context.measurement.metadata[key] = value + + +# --- Then --- + + +@then("the measurement's {kind} time duration is {expected:n} nanoseconds") +def step_measurement_time_duration( + context: Context, + kind: str, + expected: int, +) -> None: + """Assert measurement duration equals expected.""" + assert ( + getattr(context.measurement, f"{kind.lower()}_time").duration + == expected + ) + + +@then("the measurement's metadata is an empty dict") +def step_measurement_metadata_empty_dict(context: Context) -> None: + """Assert metadata is empty dict.""" + metadata = context.measurement.metadata + assert isinstance(metadata, dict) + assert not metadata diff --git a/features/steps/time_span_steps.py b/features/steps/time_span_steps.py new file mode 100644 index 0000000..8c2b40b --- /dev/null +++ b/features/steps/time_span_steps.py @@ -0,0 +1,100 @@ +"""Step definitions for the time span feature.""" + +import operator +from datetime import timedelta + +import parse +from behave import given, register_type, then, when +from behave.runner import Context + +import timerun + +# Gherkin relation phrases to operator functions for span comparison. +RELATION_OPERATORS = { + "equals": operator.eq, + "does not equal": operator.ne, + "is less than": operator.lt, + "is greater than": operator.gt, + "is less than or equal to": operator.le, + "is greater than or equal to": operator.ge, +} + +register_type( + Relation=parse.with_pattern(r"|".join(RELATION_OPERATORS))( + lambda text: text.strip(), + ), +) + + +# --- Given --- + + +@given("a time span from {start:n} to {end:n}") +def step_given_time_span(context: Context, start: int, end: int) -> None: + """Create TimeSpan, store on context.""" + context.time_span = timerun.TimeSpan(start=start, end=end) + + +@given("span {name:w} of {duration:n} nanoseconds") +def step_given_span_of_duration( + context: Context, + name: str, + duration: int, +) -> None: + """Create TimeSpan(0, duration), store as named.""" + setattr( + context, + f"time_span_{name.lower()}", + timerun.TimeSpan(start=0, end=duration), + ) + + +# --- When --- + + +@when("I try to create a time span from {start:n} to {end:n}") +def step_try_create_time_span(context: Context, start: int, end: int) -> None: + """Create TimeSpan; store exception.""" + try: + timerun.TimeSpan(start=start, end=end) + except Exception as e: # noqa: BLE001 # pylint: disable=broad-exception-caught + context.exception = e + + +# --- Then --- + + +@then("the duration is {expected:n} nanoseconds") +def step_time_span_duration_is(context: Context, expected: int) -> None: + """Assert time_span duration.""" + assert context.time_span.duration == expected + + +@then("the timedelta is {seconds:f} seconds in standard Python timedelta type") +def step_timedelta_is_seconds_standard_type( + context: Context, + seconds: float, +) -> None: + """Assert timedelta equals seconds.""" + result = context.time_span.timedelta + assert isinstance(result, timedelta) + assert result == timedelta(seconds=seconds) + + +@then("time span A {relation:Relation} time span B") +def step_time_span_a_relation_b(context: Context, relation: str) -> None: + """Assert two time spans satisfy relation.""" + assert RELATION_OPERATORS[relation]( + context.time_span_a, + context.time_span_b, + ) + + +@then("the {which:w} value is {expected:n}") +def step_time_span_value_is( + context: Context, + which: str, + expected: int, +) -> None: + """Assert start or end equals expected.""" + assert getattr(context.time_span, which) == expected diff --git a/features/steps/utils.py b/features/steps/utils.py new file mode 100644 index 0000000..90f4aa9 --- /dev/null +++ b/features/steps/utils.py @@ -0,0 +1,50 @@ +"""Shared utilities for step definitions. + +Constants and helpers for block_timing_steps and function_timing_steps +to avoid duplication and keep assertions consistent. +""" + +import time + +# Buffer: expected_ns <= duration <= expected_ns + BUFFER_NS. +# Covers sleep/scheduling jitter so tests don't flake. +BUFFER_NS = 10_000_000 # 10 ms + +# CPU can be slightly below wall time (scheduling); allow 1 ms undershoot. +CPU_LOWER_SLACK_NS = 1_000_000 + + +def sleep_wall_at_least(nanoseconds: int) -> None: + """Sleep at least nanoseconds (wall).""" + time.sleep(nanoseconds / 1e9) + + +def spin_wall_at_least(nanoseconds: int) -> None: + """Busy-loop at least nanoseconds (wall).""" + start = time.perf_counter_ns() + while time.perf_counter_ns() - start < nanoseconds: + pass + + +def assert_wall_time_within_buffer( + measurement: object, + expected_ns: int, + buffer_ns: int = BUFFER_NS, +) -> None: + """Assert wall time in buffer.""" + assert measurement.wall_time is not None + duration = measurement.wall_time.duration + max_ns = expected_ns + buffer_ns + assert expected_ns <= duration <= max_ns, ( + f"wall time {duration} not in [{expected_ns}, {max_ns}] " + f"(buffer={buffer_ns})" + ) + + +def assert_metadata_key_equals( + measurement: object, + key: str, + value: str, +) -> None: + """Assert metadata[key] equals value.""" + assert measurement.metadata[key] == value diff --git a/features/steps/version_steps.py b/features/steps/version_steps.py new file mode 100644 index 0000000..d1db665 --- /dev/null +++ b/features/steps/version_steps.py @@ -0,0 +1,30 @@ +"""Step definitions for the package version feature.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from behave import then, when + +import timerun + +if TYPE_CHECKING: + from behave.runner import Context + +# --- When --- + + +@when("I read the package version") +def step_read_version(context: Context) -> None: + """Read and store package version.""" + context.version = getattr(timerun, "__version__", None) + + +# --- Then --- + + +@then("the version is a non-empty string") +def step_version_non_empty_string(context: Context) -> None: + """Assert version is non-empty string.""" + assert isinstance(context.version, str) + assert len(context.version) > 0 diff --git a/features/time_span.feature b/features/time_span.feature new file mode 100644 index 0000000..fc4cd15 --- /dev/null +++ b/features/time_span.feature @@ -0,0 +1,43 @@ +Feature: Time span + + As someone measuring duration, + I want a time span, + so that I can compare durations, use timedelta, and read start or end. + + # --- Duration and attributes --- + + Scenario: Span duration is readable in nanoseconds + Given a time span from 0 to 1,000,000 + Then the duration is 1,000,000 nanoseconds + + Scenario: Span start and end are readable + Given a time span from 1,000 to 2,000 + Then the start value is 1,000 + And the end value is 2,000 + + Scenario: Duration as standard Python timedelta + Given a time span from 0 to 2,500,000,000 + Then the timedelta is 2.5 seconds in standard Python timedelta type + + # --- Comparison --- + + Scenario Outline: Compare two spans by duration + Given span A of nanoseconds + And span B of nanoseconds + Then time span A time span B + + Examples: + | duration_a | duration_b | relation | + | 1,000,000 | 2,000,000 | is less than | + | 3,000,000 | 1,000,000 | is greater than | + | 1,000,000 | 1,000,000 | equals | + | 1,000,000 | 2,000,000 | does not equal | + | 1,000,000 | 1,000,000 | is less than or equal to | + | 2,000,000 | 1,000,000 | is greater than or equal to | + + # --- Validation --- + + Scenario: end less than start raises ValueError + When I try to create a time span from 10 to 5 + Then a ValueError is raised + And the error message is "end must be >= start" diff --git a/features/version.feature b/features/version.feature new file mode 100644 index 0000000..59d5380 --- /dev/null +++ b/features/version.feature @@ -0,0 +1,9 @@ +Feature: Package version + + As a user or tool integrating with timerun, + I want to read the package version programmatically, + so that I can check compatibility or use it in automation. + + Scenario: Package version is readable + When I read the package version + Then the version is a non-empty string diff --git a/pyproject.toml b/pyproject.toml index 67d8443..2f1f48e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,9 +4,9 @@ build-backend = "setuptools.build_meta" [project] name = "timerun" -description = "TimeRun is a Python library for elapsed time measurement." +description = "TimeRun is a Python library for time measurements." readme = "README.md" -requires-python = ">=3.9" +requires-python = ">=3.10" license = { text = "MIT" } keywords = [ "time", @@ -23,7 +23,6 @@ classifiers = [ "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", @@ -35,7 +34,7 @@ classifiers = [ dynamic = ["version"] [project.optional-dependencies] -dev = ["pytest", "pytest-asyncio", "pytest-cov"] +dev = ["behave", "coverage"] [project.urls] Homepage = "https://github.com/HH-MWB/timerun" @@ -44,9 +43,67 @@ Changelog = "https://github.com/HH-MWB/timerun/releases" PyPI = "https://pypi.org/project/timerun" [tool.setuptools] +py-modules = ["timerun"] zip-safe = true include-package-data = false license-files = ["LICENSE"] [tool.setuptools.dynamic] version = { attr = "timerun.__version__" } + +[tool.bandit] +exclude_dirs = ["features/steps"] + +[tool.bandit.try_except_pass] +check_typed_exception = true + +[tool.mypy] +strict = true +disallow_any_generics = true +disallow_any_unimported = true +disallow_any_explicit = true +disallow_untyped_defs = true +disallow_untyped_calls = true +disallow_untyped_decorators = true +strict_equality = true +strict_equality_for_none = true +warn_redundant_casts = true +warn_return_any = true +warn_unreachable = true +warn_unused_ignores = true +enable_error_code = [ + "deprecated", + "exhaustive-match", + "explicit-override", + "ignore-without-code", + "possibly-undefined", + "redundant-expr", + "redundant-self", + "truthy-bool", + "unimported-reveal", + "unused-awaitable", +] + +[tool.pylint.format] +max-line-length = 79 + +[tool.pylint.messages_control] +disable = ["not-callable"] # false positive for behave's when/then decorators + +[tool.ruff] +line-length = 79 +fix = true +force-exclude = true + +[tool.ruff.lint] +select = ["ALL"] +ignore = [ + "D203", # Incompatible with D211 (no-blank-line-before-class) + "D213", # Incompatible with D212 (multi-line-summary-first-line) +] + +[tool.ruff.lint.mccabe] +max-complexity = 5 + +[tool.ruff.lint.per-file-ignores] +"features/steps/*.py" = ["S101"] diff --git a/tests/__init__.py b/tests/__init__.py deleted file mode 100644 index 683ed66..0000000 --- a/tests/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Test suite for timerun.""" diff --git a/tests/conftest.py b/tests/conftest.py deleted file mode 100644 index 9a42247..0000000 --- a/tests/conftest.py +++ /dev/null @@ -1,211 +0,0 @@ -"""A collection of shared PyTest fixtures for timerun.""" - -from collections.abc import Callable, Iterable, Iterator -from contextlib import AbstractContextManager, contextmanager -from unittest.mock import Mock - -import pytest - -from timerun import ElapsedTime, Stopwatch, Timer - -# =========================================================================== # -# Patcheres # -# =========================================================================== # - - -@pytest.fixture -def patch_clock( - monkeypatch: pytest.MonkeyPatch, -) -> Callable[[int], AbstractContextManager[None]]: - """Patch the clock method in Stopwatch. - - Parameters - ---------- - monkeypatch : MonkeyPatch - The fixture has been used to patch the clock method. - - Returns - ------- - Callable[[int], AbstractContextManager[None]] - A context manager takes integer argument and patch that value as - the return value of the clock method. - - Examples - -------- - >>> with patch_clock(1): - ... pass - - """ - - @contextmanager - def patch(elapsed_ns: int) -> Iterator[None]: - """Patch clock method through monkeypatch context. - - Parameters - ---------- - elapsed_ns : int - The value should be returned by the clock method. - - Yields - ------ - None - Control is yielded back to the caller. - - """ - monkeypatch.setattr(Stopwatch, "_clock", lambda _: elapsed_ns) - yield - - return patch - - -@pytest.fixture -def patch_split( - monkeypatch: pytest.MonkeyPatch, -) -> Callable[[Iterable[int]], AbstractContextManager[None]]: - """Patch the split method in Timer. - - Parameters - ---------- - monkeypatch : MonkeyPatch - The fixture has been used to patch the split method. - - Returns - ------- - Callable[[Iterable[int]], AbstractContextManager[None]] - A context manager takes a list of integers as nanoseconds and - patch those as the return values of the elapse method. - - Examples - -------- - >>> with patch_split([100, 200, 300]): - ... pass - - """ - - @contextmanager - def patch(elapsed_times: Iterable[int]) -> Iterator[None]: - """Patch split method through monkeypatch context. - - Parameters - ---------- - elapsed_times : Iterable[int] - The nanoseconds should be returned by the split method. - - Yields - ------ - None - Control is yielded back to the caller. - - """ - mock_stopwatch = Mock(spec=["reset", "split"]) - mock_stopwatch.split.configure_mock( - side_effect=[ElapsedTime(nanoseconds=t) for t in elapsed_times], - ) - - monkeypatch.setattr(Timer, "_stopwatch", mock_stopwatch) - yield - - return patch - - -# =========================================================================== # -# Initiated Instances # -# =========================================================================== # - - -@pytest.fixture -def stopwatch() -> Stopwatch: - """Create a Stopwatch started at time ``0``. - - Returns - ------- - Stopwatch - A stopwatch started at time ``0``. - - """ - watch: Stopwatch = Stopwatch() - watch._start = 0 # pylint: disable=protected-access # noqa: SLF001 - return watch - - -@pytest.fixture -def timer() -> Timer: - """Create a Timer with unlimited storage size. - - Returns - ------- - Timer - A newly created Timer. - - """ - return Timer() - - -# =========================================================================== # -# Elapsed Time # -# =========================================================================== # - - -@pytest.fixture -def elapsed_1_ns() -> ElapsedTime: - """Elapsed Time of 1 nanosecond. - - Returns - ------- - ElapsedTime - Elapsed time of 1 nanosecond. - - """ - return ElapsedTime(nanoseconds=1) - - -@pytest.fixture -def elapsed_100_ns() -> ElapsedTime: - """Elapsed Time of 100 nanoseconds. - - Returns - ------- - ElapsedTime - Elapsed time of 100 nanoseconds. - - """ - return ElapsedTime(nanoseconds=100) - - -@pytest.fixture -def elapsed_1_ms() -> ElapsedTime: - """Elapsed Time of 1 microsecond. - - Returns - ------- - ElapsedTime - Elapsed time of 1 microsecond. - - """ - return ElapsedTime(nanoseconds=1000) - - -@pytest.fixture -def elapsed_1_pt_5_ms() -> ElapsedTime: - """Elapsed Time of 1.5 microseconds. - - Returns - ------- - ElapsedTime - Elapsed time of 1.5 microseconds. - - """ - return ElapsedTime(nanoseconds=1500) - - -@pytest.fixture -def elapsed_1_sec() -> ElapsedTime: - """Elapsed Time of 1 second. - - Returns - ------- - ElapsedTime - Elapsed time of 1 second. - - """ - return ElapsedTime(nanoseconds=int(1e9)) diff --git a/tests/test_elapsedtime.py b/tests/test_elapsedtime.py deleted file mode 100644 index f54ff70..0000000 --- a/tests/test_elapsedtime.py +++ /dev/null @@ -1,165 +0,0 @@ -"""A collection of tests for class ``ElapsedTime``.""" - -# pylint: disable=no-self-use,magic-value-comparison - -from dataclasses import FrozenInstanceError -from datetime import timedelta - -import pytest - -from timerun import ElapsedTime - - -class TestInit: - """Test suite for Elapsed Time initialization.""" - - def test_init_without_keyword(self) -> None: - """Test initiate ElapsedTime.""" - duration: ElapsedTime = ElapsedTime(1) - assert duration.nanoseconds == 1 - - def test_init_using_keyword(self) -> None: - """Test initiate ElapsedTime using keyword.""" - duration: ElapsedTime = ElapsedTime(nanoseconds=1) - assert duration.nanoseconds == 1 - - -class TestImmutable: # pylint: disable=too-few-public-methods - """Test ElapsedTime is immutable.""" - - def test_modify_after_init(self, elapsed_1_ns: ElapsedTime) -> None: - """Test modify after initialization. - - ElapsedTime is expected to be immutable. Update attribute after - would fail and raise ``FrozenInstanceError``. - - Parameters - ---------- - elapsed_1_ns : ElapsedTime - A ElapsedTime instance will be using to update attribute. - - """ - with pytest.raises(FrozenInstanceError): - elapsed_1_ns.nanoseconds = 0 # type: ignore[misc] - assert elapsed_1_ns.nanoseconds == 1 - - -class TestComparable: - """Test ElapsedTime is comparable.""" - - def test_equal(self) -> None: - """Test '==' operator for ElapsedTime.""" - assert ElapsedTime(nanoseconds=1000) == ElapsedTime(nanoseconds=1000) - - def test_not_equal(self) -> None: - """Test '!=' operator for ElapsedTime.""" - assert ElapsedTime(nanoseconds=1000) != ElapsedTime(nanoseconds=2000) - - def test_greater_than(self) -> None: - """Test '>' operator for ElapsedTime.""" - assert ElapsedTime(nanoseconds=2000) > ElapsedTime(nanoseconds=1000) - - def test_smaller_than(self) -> None: - """Test '<' operator for ElapsedTime.""" - assert ElapsedTime(nanoseconds=1000) < ElapsedTime(nanoseconds=2000) - - def test_greater_or_equal(self) -> None: - """Test '>=' operator for ElapsedTime.""" - assert ElapsedTime(nanoseconds=1000) >= ElapsedTime(nanoseconds=1000) - assert ElapsedTime(nanoseconds=2000) >= ElapsedTime(nanoseconds=1000) - - def test_smaller_or_equal(self) -> None: - """Test '<=' operator for ElapsedTime.""" - assert ElapsedTime(nanoseconds=1000) <= ElapsedTime(nanoseconds=1000) - assert ElapsedTime(nanoseconds=1000) <= ElapsedTime(nanoseconds=2000) - - -class TestTimedeltaAttribute: - """Test using timedelta attribute.""" - - def test_microseconds_accuracy(self, elapsed_1_ms: ElapsedTime) -> None: - """Test using ElapsedTime of 1 microsecond. - - Given ElapsedTime of ``1`` microsecond, expected timedelta is - ``1`` microsecond. - - Parameters - ---------- - elapsed_1_ms : ElapsedTime - Elapsed Time of 1 microsecond. - - """ - assert elapsed_1_ms.timedelta == timedelta(microseconds=1) - - def test_nanoseconds_accuracy( - self, - elapsed_1_pt_5_ms: ElapsedTime, - ) -> None: - """Test using ElapsedTime of 1.5 microseconds. - - Given ElapsedTime of ``1.5`` microseconds expected timedelta to - be ``1`` microsecond, because of the accuracy lost. - - Parameters - ---------- - elapsed_1_pt_5_ms : ElapsedTime - Elapsed Time of 1.5 microseconds. - - """ - assert elapsed_1_pt_5_ms.timedelta == timedelta(microseconds=1) - - -class TestStr: - """Test suite for calling str function on ElapsedTime.""" - - def test_elapsed_time_seconds_as_decimals( - self, - elapsed_100_ns: ElapsedTime, - ) -> None: - """Test elapsed time in seconds is in decimal. - - Given an elapsed time, expected to see the part after seconds as - a decimal part. - - Parameters - ---------- - elapsed_100_ns : ElapsedTime - Elapsed Time to be used to call ``str``. - - """ - assert str(elapsed_100_ns) == "0:00:00.000000100" - - def test_elapsed_time_seconds_as_integer( - self, - elapsed_1_sec: ElapsedTime, - ) -> None: - """Test elapsed time in seconds is an integer. - - Given an elapsed time in integer seconds, the decimal part - should be hidden. - - Parameters - ---------- - elapsed_1_sec : ElapsedTime - Elapsed Time to be used to call ``str``. - - """ - assert str(elapsed_1_sec) == "0:00:01" - - -class TestRepr: # pylint: disable=too-few-public-methods - """Test suite for calling repr function on ElapsedTime.""" - - def test_repr(self, elapsed_100_ns: ElapsedTime) -> None: - """Test call function repr. - - Given an ElapsedTime, call repr would get an output can be used - to re-create this ElapsedTime. - - Parameters - ---------- - elapsed_100_ns : ElapsedTime - Elapsed Time to be used to call ``repr``. - - """ - assert repr(elapsed_100_ns) == "ElapsedTime(nanoseconds=100)" diff --git a/tests/test_stopwatch.py b/tests/test_stopwatch.py deleted file mode 100644 index 1828b1b..0000000 --- a/tests/test_stopwatch.py +++ /dev/null @@ -1,138 +0,0 @@ -"""A collection of tests for class ``Stopwatch``.""" - -# pylint: disable=no-self-use - -from __future__ import annotations - -from time import perf_counter_ns, process_time_ns -from typing import TYPE_CHECKING - -from timerun import ElapsedTime, Stopwatch - -if TYPE_CHECKING: - from collections.abc import Callable - from contextlib import AbstractContextManager - - -class TestInit: - """Test suite for stopwatch initialization.""" - - def test_include_sleep(self) -> None: - """Test initialize stopwatch take sleep in to count.""" - stopwatch: Stopwatch = Stopwatch(count_sleep=True) - assert ( - stopwatch._clock # pylint: disable=protected-access # noqa: SLF001 - == perf_counter_ns - ) - - def test_exclude_sleep(self) -> None: - """Test initialize stopwatch do not take sleep in to count.""" - stopwatch: Stopwatch = Stopwatch(count_sleep=False) - assert ( - stopwatch._clock # pylint: disable=protected-access # noqa: SLF001 - == process_time_ns - ) - - def test_default_measurer(self) -> None: - """Test initialize stopwatch without arguments.""" - default: Stopwatch = Stopwatch() - include: Stopwatch = Stopwatch(count_sleep=True) - assert ( - default._clock # pylint: disable=protected-access # noqa: SLF001 - == include._clock # pylint: disable=protected-access # noqa: SLF001 - ) - - -class TestReset: # pylint: disable=too-few-public-methods - """Test suite for starting stopwatch.""" - - def test_reset( - self, - patch_clock: Callable[[int], AbstractContextManager[None]], - stopwatch: Stopwatch, - ) -> None: - """Test to reset a stopwatch. - - Expected to have a stopwatch whose `_start` attribute is not - ``1``, but been reset to ``1`` after call ``reset`` method. - - Parameters - ---------- - patch_clock : Callable - Patcher has been used to set the starting time at ``1``. - stopwatch : Stopwatch - A started Stopwatch, which will be reset. - - """ - assert stopwatch._start != 1 # pylint: disable=protected-access # noqa: SLF001 - with patch_clock(1): - stopwatch.reset() - assert stopwatch._start == 1 # pylint: disable=protected-access # noqa: SLF001 - - -class TestSplit: - """Test suite for split method in stopwatch.""" - - def test_calculation( - self, - patch_clock: Callable[[int], AbstractContextManager[None]], - stopwatch: Stopwatch, - elapsed_100_ns: ElapsedTime, - ) -> None: - """Test elapsed time calculation. - - The stopwatch has been started at time ``0``. With patching - clock time to ``100``, the captured elapsed time should be - ``100`` nanoseconds. - - Parameters - ---------- - patch_clock : Callable - Patcher has been used to set the clock time. - stopwatch : Stopwatch - A stopwatch started at time ``0``. - elapsed_100_ns : ElapsedTime - Elapsed Time of 100 nanoseconds. - - """ - assert not stopwatch._start # pylint: disable=protected-access # noqa: SLF001 - - with patch_clock(100): - elapsed: ElapsedTime = stopwatch.split() - assert elapsed == elapsed_100_ns - - def test_split_multiple_times( - self, - patch_clock: Callable[[int], AbstractContextManager[None]], - stopwatch: Stopwatch, - elapsed_100_ns: ElapsedTime, - elapsed_1_ms: ElapsedTime, - ) -> None: - """Test call split method multiple times. - - The stopwatch has been started at time ``0``. With patching - clock time to ``100``, the first captured elapsed time should be - ``100`` nanoseconds. Then, patching clock time to ``1000``, the - second captured elapsed time should be ``1000`` nanoseconds. - - Parameters - ---------- - patch_clock : Callable - Patcher has been used to set the clock time. - stopwatch : Stopwatch - A stopwatch started at time ``0``. - elapsed_100_ns : ElapsedTime - Elapsed Time of 100 nanoseconds. - elapsed_1_ms : ElapsedTime - Elapsed Time of 1 microsecond. - - """ - assert not stopwatch._start # pylint: disable=protected-access # noqa: SLF001 - - with patch_clock(100): - first_elapsed: ElapsedTime = stopwatch.split() - assert first_elapsed == elapsed_100_ns - - with patch_clock(1000): - second_elapsed: ElapsedTime = stopwatch.split() - assert second_elapsed == elapsed_1_ms diff --git a/tests/test_timer.py b/tests/test_timer.py deleted file mode 100644 index af8fd96..0000000 --- a/tests/test_timer.py +++ /dev/null @@ -1,482 +0,0 @@ -"""A collection of tests for class ``Timer``.""" - -# pylint: disable=no-self-use - -from __future__ import annotations - -import asyncio -from typing import TYPE_CHECKING, cast - -import pytest - -from timerun import ElapsedTime, NoDurationCapturedError, Timer - -if TYPE_CHECKING: - from collections.abc import AsyncGenerator, Awaitable, Callable, Iterable - from contextlib import AbstractContextManager - -# =========================================================================== # -# Test suite for using Timer as a context manager. # -# =========================================================================== # - - -def test_use_timer_as_context_manager_single_run( - patch_split: Callable[[Iterable[int]], AbstractContextManager[None]], - timer: Timer, - elapsed_1_ms: ElapsedTime, -) -> None: - """Test using it as a context manager. - - Test using the timer and ``with`` to capture the duration time - for code block. - - Parameters - ---------- - patch_split : Callable - Patcher has been used to set the captured duration time. - timer : Timer - A newly created Timer with unlimited storage size. - elapsed_1_ms : ElapsedTime - Elapsed Time of 1 microsecond. - - """ - with patch_split([1000]), timer: - pass - - assert timer.duration == elapsed_1_ms - - -def test_use_timer_as_context_manager_multiple_run( - patch_split: Callable[[Iterable[int]], AbstractContextManager[None]], - timer: Timer, - elapsed_100_ns: ElapsedTime, - elapsed_1_ms: ElapsedTime, - elapsed_1_pt_5_ms: ElapsedTime, -) -> None: - """Test run multiple times with the same timer. - - Test run timer using ``with`` ``3`` times and expected to see - all three captured duration times. - - Parameters - ---------- - patch_split : Callable - Patcher has been used to set the captured duration time. - timer : Timer - A newly created Timer with unlimited storage size. - elapsed_100_ns : ElapsedTime - Elapsed Time of 100 nanoseconds. - elapsed_1_ms : ElapsedTime - Elapsed Time of 1 microsecond. - elapsed_1_pt_5_ms : ElapsedTime - Elapsed Time of 1.5 microseconds. - - """ - with patch_split([100, 1000, 1500]): - for _ in range(3): - with timer: - pass - - assert timer.durations == ( - elapsed_100_ns, - elapsed_1_ms, - elapsed_1_pt_5_ms, - ) - - -class TestAsDecorator: - """Test suite for using Timer as a function decorator.""" - - def test_single_run( - self, - patch_split: Callable[[Iterable[int]], AbstractContextManager[None]], - timer: Timer, - elapsed_1_ms: ElapsedTime, - ) -> None: - """Test the function with a single run. - - Test run decorated function and expected to get the captured - duration afterward. - - Parameters - ---------- - patch_split : Callable - Patcher has been used to set the captured duration time. - timer : Timer - A newly created Timer with unlimited storage size. - elapsed_1_ms : ElapsedTime - Elapsed Time of 1 microsecond. - - """ - - @timer - def func() -> None: - pass - - with patch_split([1000]): - func() - assert timer.duration == elapsed_1_ms - - def test_multiple_run( # pylint: disable=too-many-arguments,too-many-positional-arguments - self, - patch_split: Callable[[Iterable[int]], AbstractContextManager[None]], - timer: Timer, - elapsed_100_ns: ElapsedTime, - elapsed_1_ms: ElapsedTime, - elapsed_1_pt_5_ms: ElapsedTime, - ) -> None: - """Test the function with multiple runs. - - Test run decorated function ``3`` times and expected to see all - three captured duration times. - - Parameters - ---------- - patch_split : Callable - Patcher has been used to set the captured duration time. - timer : Timer - A newly created Timer with unlimited storage size. - elapsed_100_ns : ElapsedTime - Elapsed Time of 100 nanoseconds. - elapsed_1_ms : ElapsedTime - Elapsed Time of 1 microsecond. - elapsed_1_pt_5_ms : ElapsedTime - Elapsed Time of 1.5 microseconds. - - """ - - @timer - def func() -> None: - pass - - with patch_split([100, 1000, 1500]): - for _ in range(3): - func() - - assert timer.durations == ( - elapsed_100_ns, - elapsed_1_ms, - elapsed_1_pt_5_ms, - ) - - -class TestNoElapsedTimeCapturedException: - """Test suite for NoElapsedTimeCaptured exception.""" - - def test_access_duration_attr_before_run(self, timer: Timer) -> None: - """Test access duration attribute before capturing anything. - - Test tries to access duration attribute before capturing - anything, expected to see ``NoDurationCapturedError`` exception. - - Parameters - ---------- - timer : Timer - A newly created Timer with unlimited storage size. - - """ - with pytest.raises(NoDurationCapturedError): - _ = timer.duration - - -class TestInit: - """Test suite for Timerinitialization.""" - - def test_use_customized_duration_list(self) -> None: - """Test capture durations into an existing list.""" - durations: list[ElapsedTime] = [] - timer = Timer(storage=durations) - assert ( - timer._durations is durations # pylint: disable=protected-access # noqa: SLF001 - ) - - def test_max_storage_limitation( - self, - patch_split: Callable[[Iterable[int]], AbstractContextManager[None]], - elapsed_1_ms: ElapsedTime, - elapsed_1_pt_5_ms: ElapsedTime, - ) -> None: - """Test to set the max number of durations been saved. - - Test timer with a max storage limitation at ``2``. Using it to - catch ``3`` duration times and expected to see two latest only. - - Parameters - ---------- - patch_split : Callable - Patcher been used to set the captured duration time. - elapsed_1_ms : ElapsedTime - Elapsed Time of 1 microsecond. - elapsed_1_pt_5_ms : ElapsedTime - Elapsed Time of 1.5 microseconds. - - """ - timer = Timer(max_len=2) - - with patch_split([100, 1000, 1500]): - for _ in range(3): - with timer: - pass - - assert timer.durations == (elapsed_1_ms, elapsed_1_pt_5_ms) - - -# =========================================================================== # -# Test suite for using Timer as an async context manager. # -# =========================================================================== # - - -@pytest.mark.asyncio -async def test_use_timer_as_async_context_manager_single_run( - patch_split: Callable[[Iterable[int]], AbstractContextManager[None]], - timer: Timer, - elapsed_1_ms: ElapsedTime, -) -> None: - """Test using it as an async context manager. - - Test using the timer and ``async with`` to capture the duration time - for async code block. - - Parameters - ---------- - patch_split : Callable - Patcher has been used to set the captured duration time. - timer : Timer - A newly created Timer with unlimited storage size. - elapsed_1_ms : ElapsedTime - Elapsed Time of 1 microsecond. - - """ - with patch_split([1000]): - async with timer: - await asyncio.sleep(0) - - assert timer.duration == elapsed_1_ms - - -@pytest.mark.asyncio -async def test_use_timer_as_async_context_manager_multiple_run( - patch_split: Callable[[Iterable[int]], AbstractContextManager[None]], - timer: Timer, - elapsed_100_ns: ElapsedTime, - elapsed_1_ms: ElapsedTime, - elapsed_1_pt_5_ms: ElapsedTime, -) -> None: - """Test run multiple times with the same timer (async). - - Test run timer using ``async with`` ``3`` times and expected to see - all three captured duration times. - - Parameters - ---------- - patch_split : Callable - Patcher has been used to set the captured duration time. - timer : Timer - A newly created Timer with unlimited storage size. - elapsed_100_ns : ElapsedTime - Elapsed Time of 100 nanoseconds. - elapsed_1_ms : ElapsedTime - Elapsed Time of 1 microsecond. - elapsed_1_pt_5_ms : ElapsedTime - Elapsed Time of 1.5 microseconds. - - """ - with patch_split([100, 1000, 1500]): - for _ in range(3): - async with timer: - await asyncio.sleep(0) - - assert timer.durations == ( - elapsed_100_ns, - elapsed_1_ms, - elapsed_1_pt_5_ms, - ) - - -class TestAsAsyncDecorator: - """Test suite for using Timer as an async function decorator.""" - - @pytest.mark.asyncio - async def test_single_run( - self, - patch_split: Callable[[Iterable[int]], AbstractContextManager[None]], - timer: Timer, - elapsed_1_ms: ElapsedTime, - ) -> None: - """Test the async function with a single run. - - Test run decorated async function and expected to get the captured - duration afterward. - - Parameters - ---------- - patch_split : Callable - Patcher has been used to set the captured duration time. - timer : Timer - A newly created Timer with unlimited storage size. - elapsed_1_ms : ElapsedTime - Elapsed Time of 1 microsecond. - - """ - - @timer - async def async_func() -> None: - await asyncio.sleep(0) - - with patch_split([1000]): - await cast("Awaitable[None]", async_func()) - assert timer.duration == elapsed_1_ms - - @pytest.mark.asyncio - async def test_multiple_run( # pylint: disable=too-many-arguments,too-many-positional-arguments - self, - patch_split: Callable[[Iterable[int]], AbstractContextManager[None]], - timer: Timer, - elapsed_100_ns: ElapsedTime, - elapsed_1_ms: ElapsedTime, - elapsed_1_pt_5_ms: ElapsedTime, - ) -> None: - """Test the async function with multiple runs. - - Test run decorated async function ``3`` times and expected to see all - three captured duration times. - - Parameters - ---------- - patch_split : Callable - Patcher has been used to set the captured duration time. - timer : Timer - A newly created Timer with unlimited storage size. - elapsed_100_ns : ElapsedTime - Elapsed Time of 100 nanoseconds. - elapsed_1_ms : ElapsedTime - Elapsed Time of 1 microsecond. - elapsed_1_pt_5_ms : ElapsedTime - Elapsed Time of 1.5 microseconds. - - """ - - @timer - async def async_func() -> None: - await asyncio.sleep(0) - - with patch_split([100, 1000, 1500]): - for _ in range(3): - await cast("Awaitable[None]", async_func()) - - assert timer.durations == ( - elapsed_100_ns, - elapsed_1_ms, - elapsed_1_pt_5_ms, - ) - - -class TestAsAsyncGeneratorDecorator: - """Test suite for using Timer as an async generator function decorator.""" - - @pytest.mark.asyncio - async def test_single_run( - self, - patch_split: Callable[[Iterable[int]], AbstractContextManager[None]], - timer: Timer, - elapsed_1_ms: ElapsedTime, - ) -> None: - """Test the async generator function with a single run. - - Test run decorated async generator function and expected to get the - captured duration afterward. - - Parameters - ---------- - patch_split : Callable - Patcher has been used to set the captured duration time. - timer : Timer - A newly created Timer with unlimited storage size. - elapsed_1_ms : ElapsedTime - Elapsed Time of 1 microsecond. - - """ - - @timer - async def async_gen_func() -> AsyncGenerator[int]: - """Async generator function for testing. - - Yields - ------ - int - Sequential integers for testing. - - """ - await asyncio.sleep(0) - yield 1 - await asyncio.sleep(0) - yield 2 - - with patch_split([1000]): - items: list[int] = [ - item - async for item in cast( - "AsyncGenerator[int]", - async_gen_func(), - ) - ] - - assert items == [1, 2] - assert timer.duration == elapsed_1_ms - - @pytest.mark.asyncio - async def test_multiple_run( # pylint: disable=too-many-arguments,too-many-positional-arguments - self, - patch_split: Callable[[Iterable[int]], AbstractContextManager[None]], - timer: Timer, - elapsed_100_ns: ElapsedTime, - elapsed_1_ms: ElapsedTime, - elapsed_1_pt_5_ms: ElapsedTime, - ) -> None: - """Test the async generator function with multiple runs. - - Test run decorated async generator function ``3`` times and expected - to see all three captured duration times. - - Parameters - ---------- - patch_split : Callable - Patcher has been used to set the captured duration time. - timer : Timer - A newly created Timer with unlimited storage size. - elapsed_100_ns : ElapsedTime - Elapsed Time of 100 nanoseconds. - elapsed_1_ms : ElapsedTime - Elapsed Time of 1 microsecond. - elapsed_1_pt_5_ms : ElapsedTime - Elapsed Time of 1.5 microseconds. - - """ - - @timer - async def async_gen_func() -> AsyncGenerator[int]: - """Async generator function for testing. - - Yields - ------ - int - Sequential integers for testing. - - """ - await asyncio.sleep(0) - yield 1 - - with patch_split([100, 1000, 1500]): - for _ in range(3): - async_gen: AsyncGenerator[int] = cast( - "AsyncGenerator[int]", - async_gen_func(), - ) - async for _ in async_gen: - pass - - assert timer.durations == ( - elapsed_100_ns, - elapsed_1_ms, - elapsed_1_pt_5_ms, - ) diff --git a/timerun.py b/timerun.py index 68aa05b..bffa383 100644 --- a/timerun.py +++ b/timerun.py @@ -1,450 +1,295 @@ -"""TimeRun is a Python library for elapsed time measurement.""" - -from __future__ import annotations +"""TimeRun is a Python library for time measurements.""" from collections import deque -from contextlib import ContextDecorator -from dataclasses import dataclass +from collections.abc import AsyncGenerator, Callable, Generator +from copy import deepcopy +from dataclasses import dataclass, field from datetime import timedelta -from inspect import isasyncgenfunction, iscoroutinefunction +from functools import wraps +from inspect import ( + isasyncgenfunction, + iscoroutinefunction, + isgeneratorfunction, +) +from threading import Lock, local from time import perf_counter_ns, process_time_ns -from typing import TYPE_CHECKING, Protocol, TypeVar, cast - -if TYPE_CHECKING: - from collections.abc import ( - AsyncGenerator, - Awaitable, - Callable, - Iterator, - ) - -__all__: tuple[str, ...] = ( # noqa: RUF022 - # -- Core -- - "ElapsedTime", - "Stopwatch", - "Timer", - # -- Exceptions -- - "NoDurationCapturedError", - "TimeRunError", +from types import TracebackType +from typing import ( + Literal, + ParamSpec, + Protocol, + TypeVar, + cast, ) -__version__: str = "0.4.0" - - -# =========================================================================== # -# Type Protocols # -# --------------------------------------------------------------------------- # -# # -# The Timer class needs to store captured durations in a flexible way that # -# allows users to provide their own storage implementations. # -# # -# Instead of restricting to specific types like List or Deque, timerun uses a # -# protocol to define the required interface for duration storage. # -# # -# This allows users to provide custom storage backends (database, file, # -# memory-mapped, etc.) as long as they implement the basic sequence methods. # -# # -# =========================================================================== # - -T = TypeVar("T") - - -class AppendableSequence(Protocol[T]): - """Protocol for sequences that support appending and indexing.""" - - def append(self, _item: T) -> None: - """Add an item to the sequence.""" - - def __getitem__(self, _index: int) -> T: - """Get item by index (supports negative indexing).""" - - def __len__(self) -> int: - """Return number of items in the sequence.""" - - def __iter__(self) -> Iterator[T]: - """Iterate over items in the sequence.""" - - -# =========================================================================== # -# Exceptions # -# --------------------------------------------------------------------------- # -# # -# Invalid behaviors when using the classes and functions in timerun should be # -# converted to an exception and raised. # -# # -# To make exceptions easier to manage, all exceptions created for the timerun # -# library will extend from a base exception ``TimeRunException``. # -# # -# =========================================================================== # - - -class TimeRunError(Exception): - """Base exception for TimeRun.""" - +__version__: str = "0.5.0" -class NoDurationCapturedError(TimeRunError, AttributeError): - """No Duration Captured Exception.""" - - def __init__(self) -> None: - """Initialize the exception.""" - super().__init__( - "No duration available. This is likely because the Timer has not " - "been used to measure any code blocks or functions yet.", - ) +__all__ = [ + "Measurement", + "TimeSpan", + "Timer", + "__version__", +] +P = ParamSpec("P") +R = TypeVar("R") +R_co = TypeVar("R_co", covariant=True) +Y = TypeVar("Y") -# =========================================================================== # -# Elapsed Time # -# --------------------------------------------------------------------------- # -# # -# In Python, class datetime.timedelta is a duration expressing the difference # -# between two date, time, or datetime instances to microsecond resolution. # -# # -# However, the highest available resolution measurer provided by Python can # -# measure short durations in nanoseconds. # -# # -# Thus, there is a need to have a class that can represent elapsed time at a # -# higher resolution (nanoseconds) for the best accuracy. # -# # -# =========================================================================== # +@dataclass(order=True, frozen=True) +class TimeSpan: + """A time interval with start and end timestamps. -@dataclass(init=True, repr=False, eq=True, order=True, frozen=True) -class ElapsedTime: - """An immutable object representing elapsed time in nanoseconds. + Instances are immutable. Equality and ordering are based only on + ``duration``; ``start`` and ``end`` are excluded from comparison. Attributes ---------- - nanoseconds : int - The elapsed time expressed in nanoseconds. + duration : int + Elapsed time in nanoseconds (end - start). Set in ``__post_init__``, + not a constructor argument. Used for equality, ordering, and hashing. + start : int + Start timestamp in nanoseconds. + end : int + End timestamp in nanoseconds. timedelta : timedelta - The duration as a timedelta type. This attribute may not - maintain the original accuracy. + Read-only. Duration as a ``datetime.timedelta``; nanoseconds are + converted to whole microseconds (``duration // 1000``) to match + timedelta's resolution. - Parameters - ---------- - nanoseconds : int - The elapsed time expressed in nanoseconds. - - Examples - -------- - >>> t = ElapsedTime(10) - >>> t - ElapsedTime(nanoseconds=10) - >>> print(t) - 0:00:00.000000010 + Notes + ----- + ``start`` and ``end`` use ``field(compare=False)``, so two spans with + the same duration compare equal even if their intervals differ. """ - __slots__ = ["nanoseconds"] - - nanoseconds: int + duration: int = field(init=False) + start: int = field(compare=False) + end: int = field(compare=False) - def __str__(self) -> str: # type: ignore[explicit-override] - """Return the string representation of the elapsed time.""" - integer_part = timedelta(seconds=self.nanoseconds // int(1e9)) - - if not (decimal_part := self.nanoseconds % int(1e9)): - return str(integer_part) - return f"{integer_part}.{decimal_part:09}" - - def __repr__(self) -> str: # type: ignore[explicit-override] - """Return the representation of the elapsed time.""" - return f"ElapsedTime(nanoseconds={self.nanoseconds})" + def __post_init__(self) -> None: + """Set duration to end minus start (nanoseconds).""" + if self.end < self.start: + msg = "end must be >= start" + raise ValueError(msg) + object.__setattr__(self, "duration", self.end - self.start) @property def timedelta(self) -> timedelta: - """The duration converted from nanoseconds to a timedelta type.""" - return timedelta(microseconds=self.nanoseconds // int(1e3)) + """Duration as a datetime.timedelta.""" + return timedelta(microseconds=self.duration // 1000) -# =========================================================================== # -# Stopwatch # -# --------------------------------------------------------------------------- # -# # -# Based on PEP 418, Python provides performance counter and process time # -# functions to measure a short duration of time elapsed. # -# # -# Based on PEP 564, Python got new time functions with nanosecond resolution. # -# # -# Ref: # -# * https://www.python.org/dev/peps/pep-0418/ # -# * https://www.python.org/dev/peps/pep-0564/ # -# # -# =========================================================================== # +@dataclass +class Measurement: + """A measurement collection: wall time, CPU time, and optional metadata. + Stores one measurement only. Use this to collect the result of a single + timing run: wall-clock time, CPU time, and any user-defined metadata. -class Stopwatch: - """A stopwatch with the highest available resolution (in nanoseconds). + When created by Timer (context manager or decorator), ``wall_time`` and + ``cpu_time`` are ``None`` until the block exits, then they are set to the + measured spans. - It measures elapsed time. It can be set to include or exclude the - sleeping time. - - Parameters + Attributes ---------- - count_sleep : bool, optional - An optional boolean variable expressing whether the time elapsed - during sleep should be counted or not. Defaults to True if None. - - Methods - ------- - reset - Restart the stopwatch by setting the starting time to the - current time. - split - Get the elapsed time between now and the starting time. - - Examples - -------- - >>> stopwatch = Stopwatch() - >>> stopwatch.reset() - >>> stopwatch.split() - ElapsedTime(nanoseconds=100) + wall_time : TimeSpan or None + Wall-clock time for the measurement, or ``None`` if not yet set. + cpu_time : TimeSpan or None + CPU time for the measurement, or ``None`` if not yet set. + metadata : dict + Optional key-value metadata (e.g., tags, run id). Defaults to ``{}``; + mutate in place to add or change entries. """ - __slots__ = ["_clock", "_start"] + wall_time: TimeSpan | None = None + cpu_time: TimeSpan | None = None + metadata: dict[str, object] = field(default_factory=dict) - def __init__(self, *, count_sleep: bool | None = None) -> None: - """Initialize the stopwatch.""" - if count_sleep is None: - count_sleep = True - self._clock: Callable[[], int] = ( - perf_counter_ns if count_sleep else process_time_ns - ) - - self._start: int = self._clock() - - def reset(self) -> None: - """Reset the starting time to the current time.""" - self._start = self._clock() - - def split(self) -> ElapsedTime: - """Get the elapsed time between now and the starting time. - - Returns - ------- - ElapsedTime - The elapsed time captured by the stopwatch. +class _TimedCallable(Protocol[P, R_co]): # pylint: disable=too-few-public-methods + measurements: deque[Measurement] - """ - return ElapsedTime(self._clock() - self._start) + def __call__(self, *args: P.args, **kwargs: P.kwargs) -> R_co: ... -# =========================================================================== # -# Timer # -# --------------------------------------------------------------------------- # -# # -# For most use cases, the user would just want to measure the elapsed time # -# for a run of a code block or function. # -# # -# It would be cleaner and more elegant if the user can measure a function by # -# using a decorator and measure a code block by using a context manager. # -# # -# =========================================================================== # +class Timer: + """Times execution and records wall-clock and CPU time per run. + Use as a context manager (``with Timer() as m:`` or ``async with + Timer() as m:``) to time a block: on exit, the yielded + :class:`Measurement` has its ``wall_time`` and ``cpu_time`` set. -class Timer(ContextDecorator): - """A context decorator that can capture and save the measured elapsed time. - - Attributes - ---------- - durations : Tuple[ElapsedTime, ...] - The captured duration times as a tuple. - duration : ElapsedTime - The last captured duration time. + Use as a decorator (``@Timer()`` or ``@Timer(metadata={...}, + maxlen=100)``) to time each call: supports sync and async functions and + generators; one :class:`Measurement` per run is appended to the wrapped + callable's ``measurements`` deque. Parameters ---------- - count_sleep : bool, optional - An optional boolean variable expressing whether the time elapsed - during sleep should be counted or not. Defaults to True if None. - storage : AppendableSequence[ElapsedTime], optional - A sequence-like object used to save captured results. - If provided, this storage will be used directly and max_len will - be ignored. If not provided, a new deque will be created. - max_len : int, optional - The maximum length for the capturing storage. Defaults to None, - which will create storage with infinite length. + metadata : dict or None, optional + Key-value metadata for the measurement(s). Stored by reference; each + measurement gets a deep copy at enter time. Defaults to ``{}``. + maxlen : int or None, optional + Only used in decorator mode. Maximum number of measurements to keep on + the wrapped callable. Ignored when used as a context manager. Defaults + to ``None`` (unbounded). + + Yields (context manager) + ----------------------- + Measurement + The measurement record. ``wall_time`` and ``cpu_time`` are set on block + exit. + + Attributes (decorator mode, on wrapped callable) + ----------------------------------------------- + measurements : deque of Measurement + Deque of measurements (oldest to newest). Examples -------- - >>> import time - >>> with Timer() as timer: - ... time.sleep(0.1) # your code here - >>> print(timer.duration) - - >>> import time - >>> timer = Timer() - >>> @timer - ... def func(): - ... time.sleep(0.1) # your code here - >>> func() - >>> print(timer.duration) - - >>> import asyncio - >>> timer = Timer() - >>> @timer - ... async def async_func(): - ... await asyncio.sleep(0.1) # your code here - >>> asyncio.run(async_func()) - >>> print(timer.duration) - - >>> async def async_code(): - ... async with Timer() as timer: - ... await asyncio.sleep(0.1) # your code here - ... print(timer.duration) - >>> asyncio.run(async_code()) + Context manager:: - """ + with Timer() as m: + pass # code block to be measured + print(m.wall_time.timedelta) - __slots__ = ["_durations", "_stopwatch"] + Decorator:: + + @Timer() + def func(): + return + func() + print(func.measurements[-1].wall_time.timedelta) + + """ def __init__( self, - *, - count_sleep: bool | None = None, - storage: AppendableSequence[ElapsedTime] | None = None, - max_len: int | None = None, + metadata: dict[str, object] | None = None, + maxlen: int | None = None, ) -> None: - """Initialize the timer.""" - self._stopwatch: Stopwatch = Stopwatch(count_sleep=count_sleep) - self._durations: AppendableSequence[ElapsedTime] = ( - storage if storage is not None else deque(maxlen=max_len) + """Initialize with optional metadata and maxlen (decorator mode).""" + self._metadata = metadata if isinstance(metadata, dict) else {} + self._maxlen = maxlen + self._local = local() + + def __enter__(self) -> Measurement: + """Start timing; return the measurement record.""" + measurement = Measurement(metadata=deepcopy(self._metadata)) + self._local.stack = getattr(self._local, "stack", deque()) + self._local.stack.append( + (measurement, perf_counter_ns(), process_time_ns()), ) + return measurement - def __enter__(self) -> Timer: # noqa: PYI034 - """Start the timer.""" - self._stopwatch.reset() - return self - - def __exit__(self, *_: object) -> None: - """Stop the timer and save the duration.""" - duration: ElapsedTime = self._stopwatch.split() - self._durations.append(duration) - - async def __aenter__(self) -> Timer: # noqa: PYI034 - """Start the timer (async context manager).""" - self._stopwatch.reset() - return self - - async def __aexit__(self, *_: object) -> None: - """Stop the timer and save the duration (async context manager).""" - duration: ElapsedTime = self._stopwatch.split() - self._durations.append(duration) - - def _wrap_async_function( # type: ignore[explicit-any] + def __exit__( self, - func: Callable[..., Awaitable[object]], - ) -> Callable[..., Awaitable[object]]: - """Wrap an async function to measure its execution time.""" - - async def async_wrapper(*args: object, **kwargs: object) -> object: - """Wrap async function execution with timing. - - Parameters - ---------- - *args : object - Positional arguments passed to the wrapped function. - **kwargs : object - Keyword arguments passed to the wrapped function. - - Returns - ------- - object - The result of the wrapped async function. - - """ - async with self: - return await func(*args, **kwargs) - - return async_wrapper - - def _wrap_async_generator( # type: ignore[explicit-any] + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> Literal[False]: + """Stop timing; set wall_time and cpu_time on the measurement.""" + cpu_end = process_time_ns() + wall_end = perf_counter_ns() + try: + measurement, wall_start, cpu_start = self._local.stack.pop() + except (AttributeError, IndexError) as e: + msg = "__exit__ called without a matching __enter__" + raise RuntimeError(msg) from e + measurement.wall_time = TimeSpan(start=wall_start, end=wall_end) + measurement.cpu_time = TimeSpan(start=cpu_start, end=cpu_end) + return False + + async def __aenter__(self) -> Measurement: + """Support ``async with`` by delegating to sync __enter__.""" + return self.__enter__() + + async def __aexit__( self, - func: Callable[..., object], - ) -> Callable[..., AsyncGenerator[object]]: - """Wrap an async generator function to measure its execution time.""" - - async def async_gen_wrapper( - *args: object, - **kwargs: object, - ) -> AsyncGenerator[object]: - """Wrap async generator function execution with timing. - - Parameters - ---------- - *args : object - Positional arguments passed to the wrapped function. - **kwargs : object - Keyword arguments passed to the wrapped function. - - Yields - ------ - object - Items yielded from the wrapped async generator function. - - """ - async with self: - async for item in cast( - "AsyncGenerator[object]", - func(*args, **kwargs), - ): - yield item - - return async_gen_wrapper - - def __call__( # type: ignore[override,explicit-override,explicit-any] + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> bool: + """Support ``async with`` by delegating to sync __exit__.""" + return self.__exit__(exc_type, exc_val, exc_tb) + + def __call__( # noqa: C901 self, - func: Callable[..., object] | Callable[..., Awaitable[object]], - ) -> Callable[..., object] | Callable[..., Awaitable[object]]: - """Wrap a function (sync or async) to measure its execution time. - - Parameters - ---------- - func : Callable - The function to be decorated (can be sync or async). - - Returns - ------- - Callable - A wrapped function that measures execution time. - - """ - if iscoroutinefunction(func): - return self._wrap_async_function(func) - if isasyncgenfunction(func): - return self._wrap_async_generator(func) - return super().__call__(func) - - @property - def durations(self) -> tuple[ElapsedTime, ...]: - """The captured duration times as a tuple. - - A tuple containing all captured duration times, that can be - unpacked into multiple variables. - - Examples - -------- - >>> first_duration, second_duration = timer.durations - - """ - return tuple(self._durations) - - @property - def duration(self) -> ElapsedTime: - """The last captured duration time. - - Raises - ------ - NoDurationCapturedError - Error that occurs when accessing an empty durations list, - which is usually because the measurer has not been triggered - yet. - - """ - try: - return self._durations[-1] - except IndexError as error: - raise NoDurationCapturedError from error + f: Callable[P, R], + ) -> ( + _TimedCallable[P, R] + | _TimedCallable[P, AsyncGenerator[Y, None]] + | _TimedCallable[P, Generator[Y, None, None]] + ): + """When given a callable, wrap it with timing (decorator usage).""" + measurements: deque[Measurement] = deque(maxlen=self._maxlen) + lock = Lock() + + def append_measurement(m: Measurement) -> None: + with lock: + measurements.append(m) + + if isasyncgenfunction(f): + + @wraps(f) + async def wrapper( + *args: P.args, + **kwargs: P.kwargs, + ) -> AsyncGenerator[Y, None]: + inner = f(*args, **kwargs) + try: + async with self as m: + async for x in inner: + yield x + finally: + append_measurement(m) # pylint: disable=used-before-assignment + + elif iscoroutinefunction(f): + + @wraps(f) + async def wrapper( # type: ignore[return] + *args: P.args, + **kwargs: P.kwargs, + ) -> R: + try: + async with self as m: + return cast("R", await f(*args, **kwargs)) + finally: + append_measurement(m) # pylint: disable=used-before-assignment + + elif isgeneratorfunction(f): + + @wraps(f) + def wrapper( + *args: P.args, + **kwargs: P.kwargs, + ) -> Generator[Y, None, None]: + inner = f(*args, **kwargs) + try: + with self as m: + yield from inner + finally: + append_measurement(m) # pylint: disable=used-before-assignment + + else: + + @wraps(f) + def wrapper(*args: P.args, **kwargs: P.kwargs) -> R: + try: + with self as m: + return f(*args, **kwargs) + finally: + append_measurement(m) # pylint: disable=used-before-assignment + + wrapped = cast( + "_TimedCallable[P, R] | " + "_TimedCallable[P, AsyncGenerator[Y, None]] | " + "_TimedCallable[P, Generator[Y, None, None]]", + wrapper, + ) + wrapped.measurements = measurements + return wrapped