diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 93ea174..4e304b5 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -37,7 +37,6 @@ jobs: test: name: Test runs-on: ubuntu-latest - needs: lint strategy: fail-fast: false matrix: @@ -58,7 +57,7 @@ jobs: cache: 'pip' - name: Install test dependencies - run: python -m pip install -e ".[dev]" + run: python -m pip install -e . -r requirements-dev.txt - name: Audit dependencies run: python -m pip_audit @@ -78,7 +77,9 @@ jobs: build: name: Build runs-on: ubuntu-latest - needs: test + needs: + - lint + - test steps: - name: Checkout code uses: actions/checkout@v6 diff --git a/.github/workflows/pages.yaml b/.github/workflows/pages.yaml index babedde..e3215fd 100644 --- a/.github/workflows/pages.yaml +++ b/.github/workflows/pages.yaml @@ -30,10 +30,10 @@ jobs: with: python-version: '3.11' - - name: Create venv and install docs dependencies + - name: Install docs dependencies run: | python -m venv .venv - .venv/bin/pip install -e ".[docs]" + .venv/bin/pip install zensical - name: Build site run: .venv/bin/zensical build diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ded5a6f..c19efd7 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -15,6 +15,9 @@ repos: - --fix=lf - id: check-yaml - id: check-toml + - id: detect-private-key + - id: requirements-txt-fixer + files: ^requirements-dev\.txt$ - repo: https://github.com/adrienverge/yamllint rev: v1.38.0 @@ -22,11 +25,16 @@ repos: - id: yamllint - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.15.5 + rev: v0.15.8 hooks: - id: ruff-format - id: ruff-check + - repo: https://github.com/codespell-project/codespell + rev: v2.4.2 + hooks: + - id: codespell + - repo: https://github.com/pre-commit/mirrors-mypy rev: v1.19.1 hooks: @@ -49,7 +57,7 @@ repos: additional_dependencies: ["bandit[toml]"] - repo: https://github.com/semgrep/pre-commit - rev: v1.154.0 + rev: v1.156.0 hooks: - id: semgrep args: ["--config", "p/python", "--error"] diff --git a/Makefile b/Makefile index 419543a..8f1d22a 100644 --- a/Makefile +++ b/Makefile @@ -74,7 +74,7 @@ init: ## Set up dev env. Prompts for Python, or: make init PYTHON=python3.10 fi; \ if [ ! -d "$(VENV_DIR)" ]; then $$py -m venv "$(VENV_DIR)" >/dev/null; fi @$(VENV_BIN)/pip install --upgrade pip >/dev/null - @$(VENV_BIN)/pip install -e ".[dev,docs]" >/dev/null + @$(VENV_BIN)/pip install -e . -r requirements-dev.txt zensical >/dev/null @$(VENV_BIN)/pip install pre-commit >/dev/null @$(VENV_BIN)/pre-commit install >/dev/null @@ -118,7 +118,7 @@ test-verbose: check-venv ## Run BDD tests with full scenario/step output (for de @$(VENV_BIN)/coverage report --show-missing # ============================================================================ -# Docs Targets (Zensical; docs deps installed by make init) +# Docs Targets # ============================================================================ ##@ Docs diff --git a/README.md b/README.md index 9668071..1145104 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@

-

TimeRunPython package for time measurement.

+

TimeRunStructured timing for Python.

Version @@ -14,9 +14,9 @@ Total Downloads

-TimeRun is a **single-file** Python package with **no dependencies** beyond the standard library. It records **wall-clock time** and **CPU time** for code blocks or function calls and supports optional **metadata** (e.g. run id, tags) per measurement. +TimeRun is a **single-file** Python package with **no dependencies** beyond the standard library. It records **wall-clock time** and **CPU time** when you measure **a block** or **function calls** (one `Measurement` per block or per call) and supports optional **metadata** (e.g. run id, tags) and **callbacks** (`on_start` / `on_end`) per measurement. -For the full value proposition and positioning, see [Why TimeRun](https://hh-mwb.github.io/timerun/about/) on the docs site. +For positioning and the full value proposition, see [Overview](https://hh-mwb.github.io/timerun/overview/) on the docs site. ## Installation @@ -72,9 +72,11 @@ datetime.timedelta(microseconds=8) *Note: Argument `maxlen` caps how many measurements are kept (e.g. `@Timer(maxlen=10)`). By default the deque is unbounded.* -### Callbacks on Start and End +### Callbacks -Optional `on_start` and `on_end` callbacks run once per measurement. Both receive the measurement instance (`on_start` before timings are set, `on_end` after). Typical uses are logging, forwarding to OpenTelemetry, or enqueueing to a metrics pipeline. +Optional `on_start` and `on_end` callbacks run once per measurement. Both receive the `Measurement` instance — `on_start` before timings are set, `on_end` after. For example: + +Print elapsed time when a block finishes: ```python >>> from timerun import Timer @@ -84,6 +86,20 @@ Optional `on_start` and `on_end` callbacks run once per measurement. Both receiv 0:00:00.000008 ``` +Attach a trace id before each call starts: + +```python +>>> from uuid import uuid4 +>>> from timerun import Timer +>>> @Timer(on_start=lambda m: m.metadata.update(trace_id=uuid4().hex)) +... def func(): +... return +... +>>> func() +>>> func.measurements[-1].metadata +{'trace_id': '8aa2c000c98843738a2f0d5d3600d052'} +``` + ## Contributing Contributions are welcome. See [CONTRIBUTING.md](https://github.com/HH-MWB/timerun/blob/main/CONTRIBUTING.md) for setup, testing, and pull request guidelines. diff --git a/docs/recipes/analyze-results.md b/docs/cookbook/analyze-results.md similarity index 89% rename from docs/recipes/analyze-results.md rename to docs/cookbook/analyze-results.md index 46c1525..ab1a0e2 100644 --- a/docs/recipes/analyze-results.md +++ b/docs/cookbook/analyze-results.md @@ -1,3 +1,7 @@ +--- +title: Analyze results +--- + # Analyze results **Problem:** You have many measurements (e.g. from repeated runs or a decorator's `measurements` deque) and want to summarize or compare — mean, variance, confidence intervals. @@ -27,7 +31,7 @@ measurements = list(my_func.measurements) ## What to extract -Each measurement has **wall time** and **CPU time**; use the one that matches your question (e.g. wall for latency, CPU for compute-bound work). Use `wall_time.duration` (nanoseconds, int) or `wall_time.timedelta` for float seconds. You can also use **metadata** to group or filter before computing stats (e.g. by `run_id`, `stage`) so you get per-group summaries. +Each measurement has **wall time** and **CPU time**; use the one that matches your question (e.g. wall for latency, CPU for compute-bound work). Use `wall_time.duration` (nanoseconds, int) or `wall_time.timedelta.total_seconds()` for float seconds. You can also use **metadata** to group or filter before computing stats (e.g. by `run_id`, `stage`) so you get per-group summaries. ```python durations_ns = [m.wall_time.duration for m in measurements] @@ -107,6 +111,4 @@ plt.show() This plots the mean as a point with an error bar spanning the confidence interval. For more on confidence intervals and benchmarking, see your preferred stats or benchmarking reference. -**Back to:** [Recipes](index.md) - -**See also:** For the `measurements` deque and `maxlen`, see [Measure functions](../guide/measure-functions.md). For collecting in `on_end`, see [Callbacks](../guide/callbacks.md). +**See also:** [Measure function calls](../guide/measure-functions.md) for the `measurements` deque and `maxlen`. [Callbacks](../guide/callbacks.md) for collecting measurements in `on_end`. diff --git a/docs/cookbook/index.md b/docs/cookbook/index.md new file mode 100644 index 0000000..86a49a2 --- /dev/null +++ b/docs/cookbook/index.md @@ -0,0 +1,14 @@ +--- +title: Cookbook +--- + +# Cookbook + +Real-world patterns for using TimeRun: use metadata effectively, share results with your stack, time web traffic, and analyze timing data. + +You already know the API from the [Guide](../guide/index.md): timer overview, measure a block, measure function calls, metadata, and callbacks. Here we show how to apply it to concrete problems. + +1. **[Use metadata effectively](metadata.md)** — Add context (e.g. request id, stage) to every measurement by mutating metadata in `on_start`. +2. **[Share results](share-results.md)** — Send measurements to logs, files, OpenTelemetry, or Prometheus using `on_end`. +3. **[Time web requests](web-framework.md)** — Wrap HTTP requests with `Timer` in FastAPI, Flask, or Django. +4. **[Analyze results](analyze-results.md)** — Collect measurements and compute summaries or confidence intervals with standard tools. diff --git a/docs/cookbook/metadata.md b/docs/cookbook/metadata.md new file mode 100644 index 0000000..c6c072b --- /dev/null +++ b/docs/cookbook/metadata.md @@ -0,0 +1,78 @@ +--- +title: Use metadata effectively +--- + +# Use metadata effectively + +**Problem:** You want context on every measurement (e.g. request id, stage, experiment id) without repeating it in every `Timer()` call. + +**Idea:** Metadata is attached to each measurement. You can **mutate `measurement.metadata` in `on_start`** (or inside the block) to add or change keys for that run. Each measurement gets its own copy of the initial metadata when the run starts, so mutating it in `on_start` only affects that measurement. + +## Why this works + +Each measurement gets its own deep copy of the metadata dict, so mutations in `on_start` or the block affect only that run. See [Guide: Metadata](../guide/metadata.md) for copy and isolation rules. + +## Example: add run context in `on_start` + +Omit metadata (or pass a dict); an empty dict is the default when you pass `None`. Fill it per run in `on_start` from context vars or thread-local storage: + +```python +from contextvars import ContextVar +from timerun import Timer + +request_id_ctx: ContextVar[str] = ContextVar("request_id", default="") + +def add_request_id(m): + m.metadata["request_id"] = request_id_ctx.get() + +with Timer(on_start=add_request_id) as m: + pass # your code + +# m.metadata now includes "request_id" for this run +print(m.metadata) # e.g. {"request_id": "req-abc"} +``` + +## Example: set tags inside the block + +When context is fixed at the start of the run (request id, stage), **`on_start` is often clearer** than mutating inside the block. Mutating `m.metadata` in the block is still valid when values depend on **work you do inside the timed region** (outcome, branch taken, or a value known only after some steps): + +```python +with Timer(metadata={"stage": "ingest"}) as m: + do_work() + if some_condition: + m.metadata["tag"] = "slow_path" +# m.metadata is {"stage": "ingest", "tag": "slow_path"} when relevant +``` + +## Example: invocation count with a closure + +Use a factory that returns an `on_start` callback with its own counter so each measurement gets a monotonic call number (e.g. for a decorated hot path): + +```python +from timerun import Timer + + +def make_invocation_callback(): + count = 0 # (1)! + + def set_invocation(m): + nonlocal count # (2)! + count += 1 + m.metadata["invocation"] = count + + return set_invocation # (3)! + + +on_start = make_invocation_callback() +for _ in range(3): + with Timer(on_start=on_start) as m: + pass # your code + +# After each block, invocation is 1, 2, 3, ...; last m.metadata["invocation"] is 3 +``` + +1. Counter lives in the closure; each factory call gets its own independent sequence. +2. `nonlocal` updates the enclosing `count` so every invocation of `set_invocation` sees the same running total. +3. Return the inner function so `Timer` receives a stable `on_start` callback with shared state. + +**See also:** [Guide: Metadata](../guide/metadata.md) for passing `metadata={...}` and copy rules. diff --git a/docs/recipes/share-results.md b/docs/cookbook/share-results.md similarity index 56% rename from docs/recipes/share-results.md rename to docs/cookbook/share-results.md index cb78e9c..80ebc3f 100644 --- a/docs/recipes/share-results.md +++ b/docs/cookbook/share-results.md @@ -1,12 +1,16 @@ +--- +title: Share results +--- + # Share results -**Problem:** You need to get measurements out of the process — to a log, a file, OpenTelemetry, or a metrics backend. +**Problem:** You need to get measurements out of the process — to a log, a file, OpenTelemetry, Prometheus, or another metrics backend. **Idea:** Use **`on_end`** (and optionally `on_start`) to push each measurement out when the run finishes. The callback receives the `Measurement` with `wall_time`, `cpu_time`, and `metadata` set. ## Log -```python +```python hl_lines="16" import logging from timerun import Timer @@ -61,10 +65,10 @@ from timerun import Timer # tracer = get_tracer(__name__) def on_start(m): - m.metadata["span"] = tracer.start_span("timerun") + m.metadata["span"] = tracer.start_span("timerun") # (1)! def on_end(m): - span = m.metadata.get("span") + span = m.metadata.get("span") # (2)! if span is None: return # If on_start didn't set a span, skip. span.set_attribute("wall_time_ns", m.wall_time.duration) @@ -72,12 +76,39 @@ def on_end(m): for k, v in m.metadata.items(): if k != "span" and v is not None: span.set_attribute(k, str(v)) - span.end() + span.end() # (3)! with Timer(on_start=on_start, on_end=on_end): do_work() ``` -**Next:** [Analyze results](analyze-results.md) +1. Start the span before the timed work runs so nested operations can attach to the same trace context if your tracer supports it. +2. Retrieve the span object you stashed on the `Measurement`; guard in case `on_start` failed or was skipped. +3. End the span after attributes are set so duration and metadata are recorded on the same span. + +## Prometheus + +Use the [Prometheus Python client](https://github.com/prometheus/client_python) (`pip install prometheus-client`). Register a histogram (or summary) and observe wall-clock seconds in `on_end`: + +```python +from prometheus_client import Histogram +from timerun import Timer + +OPERATION_SECONDS = Histogram( + "timerun_operation_seconds", + "Wall time for timed operations (seconds)", + buckets=(0.001, 0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, float("inf")), +) + + +def observe_wall_time(m): + OPERATION_SECONDS.observe(m.wall_time.timedelta.total_seconds()) + + +with Timer(on_end=observe_wall_time): + do_work() +``` + +Expose metrics from your process with `start_http_server` or your framework’s integration so Prometheus can scrape them. -For callback basics, see [Reference: Callbacks](../guide/callbacks.md). For the OpenTelemetry API, see the [OpenTelemetry Python docs](https://opentelemetry.io/docs/languages/python/). +**See also:** [Guide: Callbacks](../guide/callbacks.md) for when callbacks run. For the OpenTelemetry API, see the [OpenTelemetry Python docs](https://opentelemetry.io/docs/languages/python/). diff --git a/docs/cookbook/web-framework.md b/docs/cookbook/web-framework.md new file mode 100644 index 0000000..00e028a --- /dev/null +++ b/docs/cookbook/web-framework.md @@ -0,0 +1,107 @@ +--- +title: Time web requests +--- + +# Time web requests + +**Problem:** You want **wall and CPU time per HTTP request** so you can log latency or export metrics without ad-hoc timers in every view. + +**Idea:** Run the request (or the ASGI/WSGI pipeline) inside a **`Timer`** and handle the `Measurement` in **`on_end`** (logging, metrics, etc.). TimeRun stays dependency-free; framework imports live only in your app. + +=== "FastAPI (Starlette)" + + ```python + import logging + from starlette.middleware.base import BaseHTTPMiddleware + from timerun import Timer + + logger = logging.getLogger(__name__) + + + class RequestTimerMiddleware(BaseHTTPMiddleware): + async def dispatch(self, request, call_next): + async with Timer( + on_end=lambda m: logger.info( + "request", + extra={ + "path": str(request.url.path), + "wall_s": m.wall_time.timedelta.total_seconds(), + "cpu_s": m.cpu_time.timedelta.total_seconds(), + }, + ), + ): + return await call_next(request) + + + # app.add_middleware(RequestTimerMiddleware) + ``` + +=== "Flask" + + ```python + import logging + from flask import Flask, g + from timerun import Timer + + logger = logging.getLogger(__name__) + app = Flask(__name__) + + + @app.before_request + def _timer_enter(): + g._timer = Timer() + g._measurement = g._timer.__enter__() + + + @app.teardown_request + def _timer_exit(exc): + if not hasattr(g, "_timer"): + return + g._timer.__exit__( + type(exc) if exc else None, + exc, + exc.__traceback__ if exc else None + ) + m = g._measurement + logger.info( + "request", + extra={ + "wall_s": m.wall_time.timedelta.total_seconds(), + "cpu_s": m.cpu_time.timedelta.total_seconds(), + }, + ) + ``` + +=== "Django" + + ```python + import logging + from timerun import Timer + + logger = logging.getLogger(__name__) + + + class RequestTimerMiddleware: + def __init__(self, get_response): + self.get_response = get_response + + def __call__(self, request): + def log_request(m): + logger.info( + "request", + extra={ + "path": request.path, + "wall_s": m.wall_time.timedelta.total_seconds(), + "cpu_s": m.cpu_time.timedelta.total_seconds(), + }, + ) + + with Timer(on_end=log_request): + return self.get_response(request) + ``` + + Add the class to `MIDDLEWARE`. Using `with Timer()` ensures `on_end` runs even when the view raises. + +For richer context (request id, user), combine with [Use metadata effectively](metadata.md) and `on_start`. + +**See also:** [Share results](share-results.md) for logs, Prometheus, and OpenTelemetry. diff --git a/docs/guide/callbacks.md b/docs/guide/callbacks.md index 0b4c4fd..fe99d01 100644 --- a/docs/guide/callbacks.md +++ b/docs/guide/callbacks.md @@ -4,7 +4,7 @@ title: Callbacks # Callbacks -The optional **`on_start`** and **`on_end`** callbacks are invoked once per measurement. Both receive the same **Measurement** instance that the Timer yields or appends for that run. +The optional **`on_start`** and **`on_end`** callbacks are invoked once per measurement. Both receive the same **Measurement** instance that the Timer creates for that run. ## When they are invoked and what they receive @@ -17,7 +17,9 @@ Use `on_start` to add to `metadata` (e.g. from context variables). Use `on_end` ## Synchronous only -Callbacks are **synchronous only**. They are invoked on the same thread and must return before the Timer continues. To integrate with asynchronous exporters (e.g. OpenTelemetry), schedule work from the callback (e.g. `asyncio.create_task(export(m))` in an async context, or use a thread or queue). +!!! warning "Callbacks must not block on async I/O" + + Callbacks are **synchronous only**. They run on the same thread and must return before the Timer continues. To integrate with asynchronous exporters (e.g. OpenTelemetry), schedule work from the callback (e.g. `asyncio.create_task(export(m))` in an async context, or use a thread or queue). ## Example @@ -28,6 +30,4 @@ with Timer(on_end=lambda m: print(m.wall_time.timedelta)): pass # code block to be measured ``` -For applied patterns (logging, files, OpenTelemetry), see [Share results](../recipes/share-results.md). - -**Back to:** [Reference](index.md) +For applied patterns (logging, files, OpenTelemetry), see [Share results](../cookbook/share-results.md). diff --git a/docs/guide/index.md b/docs/guide/index.md index 56fdce2..772e556 100644 --- a/docs/guide/index.md +++ b/docs/guide/index.md @@ -1,24 +1,26 @@ --- -title: Reference +title: Guide --- -# Reference +# Guide -This section describes the TimeRun API: its concepts, parameters, and behavior. Use it after the [Quick start](../index.md#quick-start) for a complete picture of blocks, functions, metadata, callbacks, and sync/async and generator support, or as a lookup while using the library. +This section describes the TimeRun API: its concepts, parameters, and behavior. Use it after the [Quickstart](../quickstart/index.md#quick-start) for a complete picture of blocks, functions, metadata, callbacks, and sync/async and generator support. -For applied patterns (e.g. attaching a request id in `on_start`, exporting to OpenTelemetry), see [Recipes](../recipes/index.md). +You can also use it as a lookup while working with the library. ---- - -## Core types +For applied patterns (e.g. attaching a request id in `on_start`, exporting to OpenTelemetry), see the [Cookbook](../cookbook/index.md). -- **[TimeSpan](timespan.md)** — Immutable time interval: attributes, `timedelta`, comparison, and validation. -- **[Measurement](measurement.md)** — A single timing result: `wall_time`, `cpu_time`, and `metadata`; when values are set and how to use them. +--- -## Timer +## Using the Timer -- **[Timer (overview)](timer.md)** — Constructor parameters and the two modes (context manager and decorator); what each mode yields. +- **[Timer overview](timer.md)** — How wall and CPU time are recorded, constructor parameters, context manager and decorator usage, and callbacks. - **[Measure a block](measure-block.md)** — Using `with Timer()` and `async with`; one measurement per block; nested, sequential, and multi-threaded use; exceptions and invalid use. -- **[Measure functions](measure-functions.md)** — Using `@Timer()` with sync/async functions and generators; the `measurements` deque and `maxlen`; thread safety. +- **[Measure function calls](measure-functions.md)** — Using `@Timer()` with sync/async functions and generators; the `measurements` deque and `maxlen`; thread safety. - **[Metadata](metadata.md)** — Supplying and copying metadata; per-measurement mutation; isolation between runs. - **[Callbacks](callbacks.md)** — `on_start` and `on_end`: when they are invoked, what they receive, and the synchronous-only contract. + +## Core types + +- **[TimeSpan](timespan.md)** — Immutable time interval: attributes, `timedelta`, comparison, and validation. +- **[Measurement](measurement.md)** — A single run’s result: `wall_time`, `cpu_time`, and `metadata`. diff --git a/docs/guide/measure-block.md b/docs/guide/measure-block.md index f09419c..6b5a57f 100644 --- a/docs/guide/measure-block.md +++ b/docs/guide/measure-block.md @@ -4,32 +4,35 @@ title: Measure a block # Measure a block -Use the Timer as a **context manager** to measure a single block of code. The Timer yields one **Measurement** per block; `wall_time` and `cpu_time` are set on block exit. +Use the Timer as a **context manager** to measure a single block of code. The Timer creates one **Measurement** per block; `wall_time` and `cpu_time` are set on block exit. ## Syntax -Synchronous: +=== "Synchronous" -```python -from timerun import Timer + ```python + from timerun import Timer -with Timer() as m: - pass # code block to be measured + with Timer() as m: + pass # code block to be measured -m.wall_time.timedelta # datetime.timedelta -m.cpu_time.timedelta # datetime.timedelta -``` + m.wall_time.timedelta # datetime.timedelta + m.cpu_time.timedelta # datetime.timedelta + ``` -Asynchronous: +=== "Asynchronous" -```python -async with Timer() as m: - await do_something() + ```python + from timerun import Timer -m.wall_time.timedelta -``` + async with Timer() as m: + await do_something() -On block exit the Timer records CPU time first, then wall time. Wall time is therefore typically equal to or slightly greater than CPU time, even when there is no I/O or scheduling delay. + m.wall_time.timedelta # datetime.timedelta + m.cpu_time.timedelta # datetime.timedelta + ``` + +Wall vs CPU time and sampling order are explained in [Timer overview: How timing works](timer.md#how-timing-works). ## Behavior summary @@ -38,10 +41,10 @@ On block exit the Timer records CPU time first, then wall time. Wall time is the | Single block | One measurement; `wall_time` and `cpu_time` set on exit. | | Sequential blocks (same Timer) | One measurement per block; each block receives its own measurement with a fresh copy of the initial metadata. | | Nested blocks (same Timer) | Outer and inner each receive one measurement; timings are independent (outer wall time includes the inner block’s wall time). | -| Multiple threads (same Timer) | One measurement per thread per enter/exit; a thread-local stack ensures no cross-thread mixing. | +| Multiple threads (same Timer) | One measurement per thread per enter/exit; each thread has its own independent stack, so threads do not interfere with each other. | +| Concurrent asyncio tasks (same Timer) | One measurement per task per enter/exit; each asyncio task has its own independent stack (like threads in sync mode), so concurrent tasks do not interfere with each other. | +| Invalid use: `async with` outside a task | `RuntimeError` with message `"no current asyncio task"` (for example if `async with Timer()` is used from synchronous code without a current asyncio task). | | Exception in block | The measurement is still recorded (wall/cpu set on exit); the exception propagates to the caller. | | Invalid use: `__exit__` without `__enter__` | `RuntimeError` with message `"__exit__ called without a matching __enter__"`. | The same Timer instance can be reused for multiple blocks (sequential or nested). Each block receives its own Measurement; metadata mutations in one block do not appear in the next (see [Metadata](metadata.md)). - -**Next:** [Measure functions](measure-functions.md) diff --git a/docs/guide/measure-functions.md b/docs/guide/measure-functions.md index b30ccef..65999e5 100644 --- a/docs/guide/measure-functions.md +++ b/docs/guide/measure-functions.md @@ -1,24 +1,76 @@ --- -title: Measure functions +title: Measure function calls --- -# Measure functions +# Measure function calls -Apply the **decorator** `@Timer()` to measure each call of a function or generator. One **Measurement** per call is appended to the wrapped callable’s `measurements` deque. +Apply the **decorator** `@Timer()` to time every call to a function or generator. One **Measurement** per call is appended to the wrapped callable’s `measurements` deque. ## Syntax -```python -from timerun import Timer +=== "Sync function" -@Timer() -def func(): - return + ```python + from timerun import Timer -func() -func.measurements[-1].wall_time.timedelta -func.measurements[-1].cpu_time.timedelta -``` + @Timer() + def func(): + return + + func() + func.measurements[-1].wall_time.timedelta + func.measurements[-1].cpu_time.timedelta + ``` + +=== "Async function" + + ```python + import asyncio + from timerun import Timer + + @Timer() + async def func(): + return + + async def main(): + await func() + func.measurements[-1].wall_time.timedelta + + asyncio.run(main()) + ``` + +=== "Sync generator" + + ```python + from timerun import Timer + + @Timer() + def gen(): + yield 1 + yield 2 + + list(gen()) # one measurement after full consumption + gen.measurements[-1].wall_time.timedelta + ``` + +=== "Async generator" + + ```python + import asyncio + from timerun import Timer + + @Timer() + async def agen(): + yield 1 + yield 2 + + async def main(): + async for _ in agen(): + pass + agen.measurements[-1].wall_time.timedelta + + asyncio.run(main()) + ``` Use `@Timer(maxlen=10)` to limit how many measurements are retained; the oldest entries are discarded when the deque reaches capacity. The default is unbounded. @@ -28,21 +80,21 @@ Use `@Timer(maxlen=10)` to limit how many measurements are retained; the oldest |------|----------| | Sync function | One measurement per call. | | Async function | One measurement per call (covers the full `await` of the call). | -| Sync generator | One measurement per **full consumption** of the generator (from first `next` until exhaustion or close). | -| Async generator | One measurement per **full consumption** of the async generator. | +| Sync generator | One measurement per **full consumption** of the generator (from the first iteration until the generator is exhausted or closed). | +| Async generator | One measurement per **full consumption** of the async generator (same as sync: from first iteration until exhausted or closed). | For generators, a single measurement covers the entire iteration, not each yielded value. -## measurements deque +You do not need to exhaust the generator: stopping early (for example `break` out of a `for` loop) or calling `.close()` on the iterator still ends the timed region and records **one** measurement for that run. + +## The `measurements` deque The wrapped callable has a `measurements` attribute: a `deque` of `Measurement` instances in order from oldest to newest. Each call (or full generator consumption) appends one entry. When `maxlen` is set, the deque is bounded and discards the oldest entry when full. ## Thread safety -Concurrent calls from multiple threads each produce one measurement. Appends to `measurements` are thread-safe; for example, two threads calling the same timed function yield two measurements. +Concurrent calls from multiple threads each produce one measurement. Appends to `measurements` are thread-safe; for example, two threads calling the same timed function produce two measurements. ## Exceptions If a timed function or generator raises, one measurement is still recorded for that run, and the exception is re-raised to the caller. - -**Next:** [Metadata](metadata.md) diff --git a/docs/guide/measurement.md b/docs/guide/measurement.md index 7f55b10..25666e7 100644 --- a/docs/guide/measurement.md +++ b/docs/guide/measurement.md @@ -4,23 +4,19 @@ title: Measurement # Measurement -A **Measurement** represents a single timing result: wall-clock time, CPU time, and optional metadata. The Timer creates one Measurement per block or function call. You obtain it from the context manager (`with Timer() as m`) or from the decorator’s `measurements` deque. +A **Measurement** represents a single timing result: wall-clock time, CPU time, and optional metadata. The Timer creates one Measurement per timed block, or one per call when you use the decorator to time function calls. You obtain it from the context manager (`with Timer() as m`) or from the decorator’s `measurements` deque. ## Attributes | Attribute | Type | Description | |------------|------|-------------| -| `wall_time` | `TimeSpan \| None` | Wall-clock time for the measurement, or `None` until the block or call exits. | -| `cpu_time` | `TimeSpan \| None` | CPU time for the measurement, or `None` until the block or call exits. | +| `wall_time` | `TimeSpan \| None` | Wall-clock time for the measurement (set on block/call exit; `None` before that, including during `on_start`). | +| `cpu_time` | `TimeSpan \| None` | CPU time for the measurement (set on block/call exit; `None` before that, including during `on_start`). | | `metadata` | `dict[str, object]` | Key-value metadata (e.g. run id, tags). Defaults to `{}`. Mutate in place to add or change entries for this measurement. | -## When wall_time and cpu_time are set - -When the Timer creates the Measurement (in `__enter__` or at the start of a decorated call), `wall_time` and `cpu_time` are `None`. They are assigned when the block exits or the call completes. Thus, in `on_start` the measurement does not yet have timings; in `on_end`, both are set. - ## Metadata -Metadata is mutable. Initial metadata is supplied via `Timer(metadata={...})`; each measurement receives a deep copy at enter time. You can mutate `measurement.metadata` inside the timed block or in `on_start` to add or change keys for that run only. See [Metadata](metadata.md) for copying and scope rules. +`metadata` is a mutable dict scoped to this measurement. See [Metadata](metadata.md) for copy semantics, isolation, and mutation patterns. ## Example @@ -38,5 +34,3 @@ m.metadata # {"run_id": "exp-1"} (your copy; mutable) ``` You can also construct a Measurement manually (e.g. for tests) by passing `wall_time`, `cpu_time`, and optional `metadata` to the constructor. - -**Next:** [Timer (overview)](timer.md) diff --git a/docs/guide/metadata.md b/docs/guide/metadata.md index 718155d..6514b4f 100644 --- a/docs/guide/metadata.md +++ b/docs/guide/metadata.md @@ -24,10 +24,10 @@ Each measurement receives a **deep copy** of this dictionary at enter time. The - Each block or call receives its **own** copy of the initial metadata. Mutating `measurement.metadata` inside that block (or in `on_start` for that measurement) affects only that measurement. - If you reuse the same Timer for a second block, the second block’s measurement starts from a fresh deep copy of the Timer’s initial metadata. It does **not** inherit any keys or changes from the first block. -In summary: metadata is scoped to the measurement. Use it to tag that run; it does not leak to the next run. +!!! tip "Metadata is per run" -## Mutating metadata + Metadata is scoped to the measurement. Use it to tag that run; it does not leak to the next run. -You can mutate `measurement.metadata` inside the timed block or in `on_start` to add or change entries for that run (e.g. request id from context, or a tag set after checking a condition). For patterns such as adding a request id in `on_start`, see [Use metadata effectively](../recipes/metadata.md). +## Mutating metadata -**Next:** [Callbacks](callbacks.md) +You can mutate `measurement.metadata` inside the timed block or in `on_start` to add or change entries for that run (e.g. request id from context, or a tag set after checking a condition). For patterns such as adding a request id in `on_start`, see [Use metadata effectively](../cookbook/metadata.md). diff --git a/docs/guide/timer.md b/docs/guide/timer.md index 36ca109..a6ffede 100644 --- a/docs/guide/timer.md +++ b/docs/guide/timer.md @@ -1,32 +1,59 @@ --- -title: Timer (overview) +title: Timer overview --- -# Timer (overview) +# Timer overview -**Timer** is the main entry point. It measures execution and records wall-clock and CPU time per run. It operates in two modes: as a **context manager** (for a block of code) or as a **decorator** (for function or generator calls). Both modes support synchronous and asynchronous use; the decorator also supports sync and async generators. +**Timer** is the main entry point. It measures execution and records wall-clock and CPU time per run. It operates in two modes: as a **context manager** (wraps a block of code in a `with` statement to time it automatically) or as a **decorator** (wraps a function with `@Timer()` to time every call). Both modes support synchronous and asynchronous use; the decorator also supports sync and async generators. + +## How timing works + +`Timer` samples clocks when entering and exiting the timed region (Python's `__enter__` / `__exit__` hooks, called automatically by the `with` statement). The [Measurement](measurement.md) fields `wall_time` and `cpu_time` are built from those samples as [`TimeSpan`](timespan.md) values. + +`wall_time` and `cpu_time` are the intervals between clock samples taken when entering the timed region and when exiting it. Creating the `Measurement`, copying metadata, and running `on_start` (if any) all happen before the first sample, so they are outside the span. The span ends at the start of `__exit__`, before the stack is popped and `TimeSpan` values are assigned; `on_end` runs after that. A small amount of context-manager and interpreter work right after the first sample and right before the last sample is included in what you see. + +!!! note "Sampling order" + + On exit, CPU time is sampled before wall time, so `wall_time` can be slightly larger than `cpu_time` even with little I/O or scheduling. + +`wall_time` uses `time.perf_counter_ns()` (monotonic elapsed wall time — counts all real time including waiting). `cpu_time` uses `time.process_time_ns()` (total CPU time of the current process — only counts time the processor spent running your code, not time spent waiting on I/O or sleep). Nested use of the same `Timer` instance stacks measurements: each level has its own `Measurement`, and inner wall time falls inside outer wall time when nested. + +```mermaid +sequenceDiagram + participant User + participant Enter as Timer._enter + participant Clock + participant Work as Timed work + participant Exit as Timer._exit + User->>Enter: __enter__ + Enter->>Enter: copy metadata, on_start + Enter->>Clock: wall_start cpu_start + Enter->>User: return Measurement + User->>Work: run + Work->>Exit: __exit__ + Exit->>Clock: cpu_end wall_end + Exit->>Exit: stack pop, TimeSpan, on_end +``` ## Constructor parameters -All parameters are optional and keyword-only. +All parameters are optional and keyword-only (passed by name, e.g. `Timer(metadata={...})`, not by position). | Parameter | Type | Description | |------------|------|-------------| -| `metadata` | `dict \| None` | Key-value metadata for the measurement(s). Each measurement gets a deep copy at enter time. Defaults to `{}`. | +| `metadata` | `dict \| None` | Key-value metadata for the measurement(s). Each measurement gets a deep copy when the timed block or call begins. Defaults to `{}`. | | `on_start` | `callable \| None` | Called once per measurement when timing is about to start. Receives the `Measurement` (metadata set; `wall_time` and `cpu_time` are `None`). Defaults to `None`. | | `on_end` | `callable \| None` | Called once per measurement when timing has just ended. Receives the `Measurement` with `wall_time` and `cpu_time` set. Defaults to `None`. | | `maxlen` | `int \| None` | **Decorator only.** Maximum number of measurements to keep on the wrapped callable. Ignored when used as a context manager. Defaults to `None` (unbounded). | ## Context manager mode -Use `with Timer() as m:` (sync) or `async with Timer() as m:` (async). On block exit, the yielded `Measurement` has its `wall_time` and `cpu_time` set. There is one measurement per block; nested and sequential blocks each receive their own measurement. See [Measure a block](measure-block.md) for nested blocks, multiple threads, and exception behavior. +Use `with Timer() as m:` (sync) or `async with Timer() as m:` (async). On block exit, the returned `Measurement` has its `wall_time` and `cpu_time` set. There is one measurement per block; nested and sequential blocks each receive their own measurement. See [Measure a block](measure-block.md) for nested blocks, multiple threads, and exception behavior. ## Decorator mode -Apply `@Timer()` (or `@Timer(metadata={...}, maxlen=100)` etc.) to a function or generator. Each call produces one `Measurement`, appended to the wrapped callable’s `measurements` deque. Supported callables include sync and async functions and sync and async generators (one measurement per call, or per full consumption for generators). See [Measure functions](measure-functions.md) for `maxlen` and thread safety. +Apply `@Timer()` (or `@Timer(metadata={...}, maxlen=100)` etc.) to a function or generator. Each call produces one `Measurement`, appended to the wrapped callable’s `measurements` deque. Supported callables include sync and async functions and sync and async generators (one measurement per call, or per full consumption for generators). See [Measure function calls](measure-functions.md) for `maxlen` and thread safety. ## Callbacks -Callbacks are **synchronous only**. To integrate with asynchronous exporters (e.g. OpenTelemetry), schedule work from the callback (e.g. `asyncio.create_task(export(m))` in an async context, or use a thread or queue). See [Callbacks](callbacks.md) for when `on_start` and `on_end` are invoked and what they receive. - -**Next:** [Measure a block](measure-block.md) +`on_start` and `on_end` are synchronous-only callbacks. See [Callbacks](callbacks.md) for invocation timing, what they receive, and async integration. diff --git a/docs/guide/timespan.md b/docs/guide/timespan.md index 28a1b71..7dde146 100644 --- a/docs/guide/timespan.md +++ b/docs/guide/timespan.md @@ -10,7 +10,7 @@ A **TimeSpan** represents an immutable time interval with start and end timestam | Attribute | Type | Description | |------------|------|-------------| -| `duration` | `int` | Elapsed time in nanoseconds (`end - start`). Set in `__post_init__`, not a constructor argument. Used for equality, ordering, and hashing. | +| `duration` | `int` | Elapsed time in nanoseconds (`end - start`). Computed automatically when the object is created — not a constructor argument. Used for equality, ordering, and hashing. | | `start` | `int` | Start timestamp in nanoseconds. | | `end` | `int` | End timestamp in nanoseconds. | | `timedelta`| `datetime.timedelta` | Read-only. Duration as a `datetime.timedelta`; nanoseconds are converted to whole microseconds (`duration // 1000`) to match `timedelta` resolution. | @@ -36,5 +36,3 @@ span.timedelta # datetime.timedelta(microseconds=1000) # Comparison by duration only TimeSpan(start=0, end=100) == TimeSpan(start=200, end=300) # True (same duration) ``` - -**Next:** [Measurement](measurement.md) diff --git a/docs/index.md b/docs/index.md index bc29a0d..7998c3f 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,66 +1,9 @@ --- -title: Home +title: TimeRun ---

TimeRun

**Structured timing for Python.** One small library, no dependencies — wall and CPU time, ready for your logs, metrics, or tracing. ---- - -## About - -You need to **measure execution time** of Python code in a way that’s trustworthy (wall + CPU, not ad-hoc timers), observable (send to logging, OpenTelemetry, or any pipeline), and low-friction (no new dependencies, works in scripts and production). TimeRun does exactly that. - -- **Zero dependencies** — Standard library only; safe for libraries and constrained environments. -- **Wall + CPU time** — Distinguish real elapsed time from CPU burn (I/O vs CPU-bound). -- **Observability-ready** — `on_start` / `on_end` callbacks and metadata to plug into logging, OpenTelemetry, or any metrics pipeline. - -[Read the full story for positioning →](about/index.md) - ---- - -## Quick start - -#### Install from [PyPI](https://pypi.org/project/timerun/) - -```bash -pip install timerun -``` - -#### Measure code block - -```python -from timerun import Timer - -with Timer() as m: - pass # your code -``` - -#### Measure function calls - -```python -@Timer() -def my_func(): - pass - -my_func() -m = my_func.measurements[-1] # measurement for last call -``` - -#### Use measurement result - -```python ->>> m.wall_time.timedelta -datetime.timedelta(microseconds=11) ->>> m.cpu_time.timedelta -datetime.timedelta(microseconds=8) -``` - -[Read the reference for API details →](guide/index.md) - ---- - -## Trust - -[![PyPI version](https://img.shields.io/pypi/v/timerun.svg)](https://pypi.org/project/timerun/) [![License](https://img.shields.io/pypi/l/timerun.svg)](https://github.com/HH-MWB/timerun/blob/main/LICENSE) [![Coverage](https://codecov.io/gh/HH-MWB/timerun/branch/main/graph/badge.svg)](https://codecov.io/gh/HH-MWB/timerun) [![Total downloads](https://static.pepy.tech/badge/timerun)](https://pepy.tech/project/timerun) +[Quickstart](quickstart/index.md) · [Overview](overview/index.md) · [Guide](guide/index.md) · [Cookbook](cookbook/index.md) diff --git a/docs/about/index.md b/docs/overview/index.md similarity index 90% rename from docs/about/index.md rename to docs/overview/index.md index f58a233..e5e8242 100644 --- a/docs/about/index.md +++ b/docs/overview/index.md @@ -1,8 +1,8 @@ --- -title: About +title: Overview --- -# About +# Overview TimeRun gives you **structured, dependency-free timing** (wall + CPU) with optional **metadata and callbacks**, so you can measure any Python code and plug results into your existing **observability stack**. Single file, zero dependencies, standard library only. @@ -22,12 +22,12 @@ Alternatives (manual `time.perf_counter()`, heavy APM agents, or “batteries-in ## Who it’s for -If that sounds like you, TimeRun is a good fit. - - **Python developers** doing performance tuning, benchmarking, or debugging latency. - **Platform / SRE / backend engineers** who need lightweight, library-level timing that can feed into existing observability (logs, metrics, tracing). - **Libraries and frameworks** that want optional timing without imposing dependencies on their users. +If that sounds like you, TimeRun is a good fit. + --- ## What you get @@ -74,8 +74,6 @@ If that sounds like you, TimeRun is a good fit. --- -## Bottom line - -TimeRun is the **minimal, dependency-free way** to get structured timing (wall + CPU) and optional metadata/callbacks in Python, so you can measure what matters and plug results into your existing observability stack without extra dependencies or lock-in. +## Next steps -[Quick start](../index.md#quick-start) to install and run. [Reference](../guide/index.md) for the API. [Recipes](../recipes/index.md) for real-world patterns. +[Quickstart](../quickstart/index.md#quick-start) to install and run. [Guide](../guide/index.md) for the API. [Cookbook](../cookbook/index.md) for real-world patterns. diff --git a/docs/quickstart/index.md b/docs/quickstart/index.md new file mode 100644 index 0000000..79dfdbb --- /dev/null +++ b/docs/quickstart/index.md @@ -0,0 +1,85 @@ +--- +title: Quickstart +--- + +

TimeRun

+ +**Structured timing for Python.** One small library, no dependencies — wall and CPU time, ready for your logs, metrics, or tracing. + +--- + +## At a glance + +You need to **measure execution time** of Python code in a way that’s trustworthy (wall + CPU, not ad-hoc timers), observable (send to logging, OpenTelemetry, or any pipeline), and low-friction (no new dependencies, works in scripts and production). TimeRun does exactly that. + +- **Zero dependencies** — Standard library only; safe for libraries and constrained environments. +- **Wall + CPU time** — Distinguish real elapsed time from CPU burn (I/O vs CPU-bound). +- **Observability-ready** — `on_start` / `on_end` callbacks and metadata to plug into logging, OpenTelemetry, or any metrics pipeline. + +[Read the full story for positioning →](../overview/index.md) + +--- + +## Quick start + +#### Install from [PyPI](https://pypi.org/project/timerun/) + +Requires **Python 3.10+**. + +```bash +pip install timerun +``` + +#### Measure a code block + +```python +from timerun import Timer + +with Timer() as m: + pass # your code +``` + +For async code, use `async with Timer() as m:` the same way. + +#### Measure function calls + +```python +@Timer() +def my_func(): + pass + +my_func() +m = my_func.measurements[-1] # measurement for last call +``` + +#### Use the measurement + +```python +>>> m.wall_time.timedelta +datetime.timedelta(microseconds=11) +>>> m.cpu_time.timedelta +datetime.timedelta(microseconds=8) +``` + +How wall and CPU time relate (and why they can differ) is explained in [Timer overview](../guide/timer.md#how-timing-works). + +#### Callbacks + +Optional `on_start` and `on_end` run once per measurement and receive the same `Measurement` — `on_start` before timings are set, `on_end` after. + +```python +from timerun import Timer + +with Timer(on_end=lambda m: print(m.wall_time.timedelta)): + pass # your code +``` + +More examples: [Callbacks](../guide/callbacks.md), [Cookbook](../cookbook/index.md) (metadata, logging, OpenTelemetry). + +[Read the guide for API details →](../guide/index.md) + +--- + +PyPI, license, coverage, and download stats: + +[![PyPI version](https://img.shields.io/pypi/v/timerun.svg)](https://pypi.org/project/timerun/) [![License](https://img.shields.io/pypi/l/timerun.svg)](https://github.com/HH-MWB/timerun/blob/main/LICENSE) [![Coverage](https://codecov.io/gh/HH-MWB/timerun/branch/main/graph/badge.svg)](https://codecov.io/gh/HH-MWB/timerun) [![Total downloads](https://static.pepy.tech/badge/timerun)](https://pepy.tech/project/timerun) diff --git a/docs/recipes/index.md b/docs/recipes/index.md deleted file mode 100644 index 3db52d4..0000000 --- a/docs/recipes/index.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Recipes ---- - -# Recipes - -Real-world patterns for using TimeRun: use metadata effectively, share results with your stack, and analyze timing data. - -You already know the API from the [Reference](../guide/index.md) (measure blocks, functions, metadata, callbacks). Here we show how to apply it to concrete problems. - -1. **[Use metadata effectively](metadata.md)** — Add context (e.g. request id, stage) to every measurement by mutating metadata in `on_start`. -2. **[Share results](share-results.md)** — Send measurements to logs, files, or OpenTelemetry using `on_end`. -3. **[Analyze results](analyze-results.md)** — Collect measurements and compute summaries or confidence intervals with standard tools. diff --git a/docs/recipes/metadata.md b/docs/recipes/metadata.md deleted file mode 100644 index 7bd552c..0000000 --- a/docs/recipes/metadata.md +++ /dev/null @@ -1,67 +0,0 @@ -# Use metadata effectively - -**Problem:** You want context on every measurement (e.g. request id, stage, experiment id) without repeating it in every `Timer()` call. - -**Idea:** Metadata is attached to each measurement. You can **mutate `measurement.metadata` in `on_start`** (or inside the block) to add or change keys for that run. Each measurement gets its own copy of the initial metadata at enter time, so mutating it in `on_start` only affects that measurement. - -## Why this works - -You can edit metadata in `on_start` (or in the block) because the callback receives the **same** `Measurement` instance that is returned from `with Timer(...) as m`. When the block enters, the Timer creates that Measurement with `metadata=deepcopy(self._metadata)` — so each run already has its own dict. Mutating `m.metadata` in `on_start` or in the block therefore mutates that run’s copy only; the object is passed by reference. - -You **cannot** set per-run values at Timer init because init runs once and there is no “current run” yet. The dict you pass to `Timer(metadata={...})` is stored and **deep-copied** into each new Measurement on every `__enter__`. So you can only supply a shared template at init; per-run edits must happen after the Measurement for that run exists — in `on_start` or inside the block. - -## Example: add run context in `on_start` - -Omit metadata (or pass a dict); an empty dict is the default when you pass `None`. Fill it per run in `on_start` from context vars or thread-local storage: - -```python -from contextvars import ContextVar -from timerun import Timer - -request_id_ctx: ContextVar[str] = ContextVar("request_id", default="") - -def add_request_id(m): - m.metadata["request_id"] = request_id_ctx.get() - -with Timer(on_start=add_request_id) as m: - pass # your code - -# m.metadata now includes "request_id" for this run -print(m.metadata) # e.g. {"request_id": "req-abc"} -``` - -## Example: set tags inside the block - -Mutating `m.metadata` inside the timed block is **generally not recommended** — prefer **callbacks** (e.g. `on_start`) when the context is known at the start of the run (e.g. request id, stage). It is useful when metadata must be **dynamically computed from code logic** (e.g. outcome, branch taken, or a value only known after some work): - -```python -with Timer(metadata={"stage": "ingest"}) as m: - do_work() - if some_condition: - m.metadata["tag"] = "slow_path" -# m.metadata is {"stage": "ingest", "tag": "slow_path"} when relevant -``` - -## Example: invocation count with a singleton counter - -Use a module-level (or singleton) counter and set it in `on_start` so each measurement carries the call number for that run (e.g. “call #1”, “#2”, …). Handy with a decorator to see invocation order: - -```python -from timerun import Timer - -_invocation_counter = 0 - -def set_invocation(m): - global _invocation_counter - _invocation_counter += 1 - m.metadata["invocation"] = _invocation_counter - -with Timer(on_start=set_invocation) as m: - pass # your code - -# m.metadata["invocation"] is 1, 2, 3, ... for each run -``` - -**Next:** [Share results](share-results.md) - -For the API details (passing `metadata={...}`, reading `m.metadata`), see [Reference: Metadata](../guide/metadata.md). diff --git a/features/block_timing.feature b/features/block_timing.feature deleted file mode 100644 index 085d5df..0000000 --- a/features/block_timing.feature +++ /dev/null @@ -1,90 +0,0 @@ -Feature: Block timing - - As someone measuring duration, - I want to time blocks of code (sync, async, or threaded), - so that I get per-task timings and can attach metadata. - - # --- Basic timing: sync, async, CPU-bound --- - - Scenario: Blocking sleep with `with` yields wall time and near-zero CPU time - Given a blocking operation that runs for around 10,000,000 nanoseconds - When I measure the operation using `with` - Then the measurement's wall time duration is within the configured buffer of 10,000,000 nanoseconds - And the measurement's CPU time duration is within the configured buffer of 0 nanoseconds - - Scenario: Async sleep with `async with` yields wall time and near-zero CPU time - Given an async operation that runs for around 10,000,000 nanoseconds - When I measure the async operation using `async with` - Then the measurement's wall time duration is within the configured buffer of 10,000,000 nanoseconds - And the measurement's CPU time duration is within the configured buffer of 0 nanoseconds - - Scenario: CPU-bound block with `with` yields meaningful wall and CPU time - Given a CPU-bound operation that runs for around 10,000,000 nanoseconds - When I measure the operation using `with` - Then the measurement's wall time duration is at least 10,000,000 nanoseconds - And the measurement's CPU time duration is within the configured buffer of 10,000,000 nanoseconds - - # --- One Timer, multiple blocks or threads --- - - Scenario: Two threads with one Timer yield one measurement per thread - Given each thread sleeps 5,000,000 nanoseconds - When I measure blocks from 2 threads using the same Timer instance - Then the measurements are from different threads - - Scenario: Two sequential blocks with one Timer yield correct durations - Given the first block duration is 5,000,000 nanoseconds - And the second block duration is 10,000,000 nanoseconds - When I measure two sequential blocks with the same Timer instance - Then the first measurement's wall time duration is within the configured buffer of 5,000,000 nanoseconds - And the second measurement's wall time duration is within the configured buffer of 10,000,000 nanoseconds - - Scenario: Nested blocks with one Timer yield independent outer and inner times - Given the outer block duration is 20,000,000 nanoseconds - And the inner block duration is 5,000,000 nanoseconds - When I measure nested blocks with the same Timer instance - Then the outer measurement's wall time duration is within the configured buffer of 25,000,000 nanoseconds - And the inner measurement's wall time duration is within the configured buffer of 5,000,000 nanoseconds - And the outer measurement's wall time duration is at least the inner measurement's wall time duration - - # --- Metadata --- - - Scenario: Initial metadata is carried on the yielded measurement - Given metadata run_id "exp-1" and tag "baseline" - When I measure a code block with that metadata - Then the measurement's metadata key "run_id" is "exp-1" - And the measurement's metadata key "tag" is "baseline" - - Scenario: Metadata set in first block is not visible in second block (reused Timer) - Given metadata run_id "same-run" and tag "original" - And I will add metadata key "extra" as "from_first_block" in the first block - When I measure two blocks with the same Timer instance and that metadata - Then the first measurement's metadata key "extra" is "from_first_block" - And the second measurement's metadata key "run_id" is "same-run" - And the second measurement's metadata key "tag" is "original" - And the second measurement's metadata does not contain key "extra" - - # --- Callbacks on start and end --- - - Scenario: The on_start callback is invoked once with the same measurement instance the Timer yields for that block - Given an on_start callback that records invocations - When I measure a code block with a Timer that has that on_start callback - Then the on_start callback was called once - And the on_start callback was called with the same measurement instance that the Timer yielded for that block - - Scenario: The on_end callback is invoked once with the same measurement instance the Timer yields for that block - Given an on_end callback that records invocations - When I measure a code block with a Timer that has that on_end callback - Then the on_end callback was called once - And the on_end callback was called with the same measurement instance that the Timer yielded for that block - - # --- Edge cases and errors --- - - Scenario: Block that raises still yields measurement; exception propagates - When I measure a code block that raises an exception - Then the block yielded a measurement - And an exception was propagated to the caller - - Scenario: __exit__ without __enter__ raises RuntimeError - When I call __exit__ on a Timer instance without calling __enter__ first - Then a RuntimeError is raised - And the error message is "__exit__ called without a matching __enter__" diff --git a/features/function_timing.feature b/features/function_timing.feature deleted file mode 100644 index a6aa150..0000000 --- a/features/function_timing.feature +++ /dev/null @@ -1,61 +0,0 @@ -Feature: Function timing - - As someone measuring duration, - I want to time function and generator execution with a decorator, - so that I get per-call measurements and can attach metadata. - - # --- Sync and async functions --- - - Scenario: Timing a synchronous sleeping function records real time and minimal CPU time - Given a sync function that sleeps for around 10,000,000 nanoseconds - When I call the decorated function - Then the measurement's wall time duration is within the configured buffer of 10,000,000 nanoseconds - And the measurement's CPU time duration is within the configured buffer of 0 nanoseconds - - Scenario: Timing an async sleeping function records real time and minimal CPU time - Given an async function that sleeps for around 10,000,000 nanoseconds - When I call the decorated function - Then the measurement's wall time duration is within the configured buffer of 10,000,000 nanoseconds - And the measurement's CPU time duration is within the configured buffer of 0 nanoseconds - - # --- Sync and async generators --- - - Scenario: Fully consuming a sync generator records one measurement - Given a sync generator that yields 3 items and sleeps 5,000,000 nanoseconds total - When I fully consume the decorated generator - Then the decorated function's measurements deque has 1 entry - - Scenario: Fully consuming an async generator records one measurement - Given an async generator that yields 3 items and sleeps 5,000,000 nanoseconds total - When I fully consume the decorated generator - Then the decorated function's measurements deque has 1 entry - - # --- Metadata --- - - Scenario: Metadata attached to the timer appears on each measurement - Given metadata run_id "exp-1" and tag "baseline" - When I call a decorated function with that metadata - Then the measurement's metadata key "run_id" is "exp-1" - And the measurement's metadata key "tag" is "baseline" - - # --- Exceptions --- - - Scenario: When a timed function raises an error, one measurement is still recorded and the error is re-raised - When I call a decorated function that raises an exception - Then the decorated function's measurements deque has 1 entry - And an exception was propagated to the caller - - # --- Limiting stored measurements (maxlen) --- - - Scenario: With maxlen 2, only the last 2 measurements are kept - Given a sync function that sleeps for around 1,000,000 nanoseconds - When I decorate it with Timer with maxlen 2 - And I call the decorated function 3 times - Then the decorated function's measurements deque has 2 entries - - # --- Thread safety --- - - Scenario: Two threads calling the same timed function produce two measurements - Given a sync function that sleeps for around 5,000,000 nanoseconds - When I call the decorated function from 2 threads concurrently - Then the decorated function's measurements deque has 2 entries diff --git a/features/steps/block_timing_steps.py b/features/steps/block_timing_steps.py deleted file mode 100644 index 3b52451..0000000 --- a/features/steps/block_timing_steps.py +++ /dev/null @@ -1,312 +0,0 @@ -"""Step definitions for the Block timing feature.""" - -from __future__ import annotations - -import asyncio -import time -from concurrent.futures import ThreadPoolExecutor -from typing import TYPE_CHECKING - -from behave import given, then, when - -import timerun - -if TYPE_CHECKING: - from behave.runner import Context - - -# --- Given --- - - -@given( - "a {kind} operation that runs for around {duration_ns:n} nanoseconds", -) -@given( - "an {kind} operation that runs for around {duration_ns:n} nanoseconds", -) -def step_given_operation( - context: Context, - kind: str, - duration_ns: int, -) -> None: - """Store operation duration and kind.""" - context.operation_duration_ns = duration_ns - context.operation_kind = kind - - -@given("each thread sleeps {duration_ns:n} nanoseconds") -def step_given_thread_sleep(context: Context, duration_ns: int) -> None: - """Store thread sleep duration.""" - context.thread_sleep_ns = duration_ns - - -@given("the {which} block duration is {duration_ns:n} nanoseconds") -def step_given_block_duration( - context: Context, - which: str, - duration_ns: int, -) -> None: - """Store block duration for which block.""" - setattr(context, f"{which}_block_ns", duration_ns) - - -@given('I will add metadata key "{key}" as "{value}" in the first block') -def step_given_metadata_add_in_first( - context: Context, - key: str, - value: str, -) -> None: - """First block will add key/value to metadata.""" - context.metadata_add_in_first = (key, value) - - -@given("an {callback_kind} callback that records invocations") -def step_given_callback_records_invocations( - context: Context, - callback_kind: str, -) -> None: - """Store list and callback that records the measurement passed to it.""" - # List and callback that appends each measurement. - invocations: list[timerun.Measurement] = [] - - def record_invocation(m: timerun.Measurement) -> None: - invocations.append(m) - - # Store on context for When/Then. - setattr(context, f"{callback_kind}_invocations", invocations) - setattr(context, f"{callback_kind}_callback", record_invocation) - - -# --- When --- - - -@when("I measure the operation using `with`") -def step_measure_operation_using_with(context: Context) -> None: - """Measure with Timer(); sleep or spin per operation_kind.""" - with timerun.Timer() as context.measurement: - if context.operation_kind == "CPU-bound": - # Spin until duration elapsed. - start = time.process_time_ns() - while ( - time.process_time_ns() - start < context.operation_duration_ns - ): - pass - else: - # Sleep for duration. - time.sleep(context.operation_duration_ns / 1e9) - - -@when("I measure the async operation using `async with`") -def step_measure_async_using_async_with(context: Context) -> None: - """Measure async with Timer(); asyncio.sleep.""" - - async def run() -> timerun.Measurement: - async with timerun.Timer() as m: - await asyncio.sleep(context.operation_duration_ns / 1e9) - return m - - context.measurement = asyncio.run(run()) - - -@when( - "I measure blocks from {thread_count:n} threads " - "using the same Timer instance", -) -def step_measure_blocks_from_threads( - context: Context, - thread_count: int, -) -> None: - """Measure blocks from N threads.""" - context.thread_count = thread_count - cm = timerun.Timer() - - # Worker: enter timer, sleep, return measurement. - def run() -> timerun.Measurement: - with cm as m: - time.sleep(context.thread_sleep_ns / 1e9) - return m - - # Run thread_count workers and collect measurements. - with ThreadPoolExecutor(max_workers=thread_count) as ex: - futures = [ex.submit(run) for _ in range(thread_count)] - context.thread_measurements = [f.result() for f in futures] - - -@when("I measure two sequential blocks with the same Timer instance") -def step_measure_two_sequential_blocks(context: Context) -> None: - """Measure two sequential blocks.""" - cm = timerun.Timer() - - # First block. - with cm as context.first_measurement: - time.sleep(context.first_block_ns / 1e9) - - # Second block. - with cm as context.second_measurement: - time.sleep(context.second_block_ns / 1e9) - - -@when("I measure nested blocks with the same Timer instance") -def step_measure_nested_blocks(context: Context) -> None: - """Measure nested blocks.""" - cm = timerun.Timer() - - with cm as context.outer_measurement: - time.sleep(context.outer_block_ns / 1e9) - - with cm as context.inner_measurement: - time.sleep(context.inner_block_ns / 1e9) - - -@when("I measure a code block with that metadata") -def step_measure_block_with_metadata(context: Context) -> None: - """Measure with Timer(metadata=...); store result.""" - with timerun.Timer(metadata=context.metadata) as context.measurement: - pass - - -@when( - "I measure a code block with a Timer that has that {callback_kind} " - "callback", -) -def step_measure_block_with_callback( - context: Context, - callback_kind: str, -) -> None: - """Measure with Timer(on_start=... or on_end=...); run a trivial block.""" - callback = getattr(context, f"{callback_kind}_callback") - with timerun.Timer(**{callback_kind: callback}) as context.measurement: - pass - - -@when( - "I measure two blocks with the same Timer instance and that metadata", -) -def step_measure_two_blocks_with_metadata(context: Context) -> None: - """Measure two blocks; first may add metadata.""" - cm = timerun.Timer(metadata=context.metadata) - - # First block: optionally add key/value to measurement metadata. - with cm as context.first_measurement: - if hasattr(context, "metadata_add_in_first"): - context.first_measurement.metadata[ - context.metadata_add_in_first[0] - ] = context.metadata_add_in_first[1] - - # Second block: no extra metadata. - with cm as context.second_measurement: - pass - - -@when("I measure a code block that raises an exception") -def step_measure_block_raises(context: Context) -> None: - """Measure raising block; catch exception.""" - try: - with timerun.Timer() as context.measurement: - raise ValueError # noqa: TRY301 - except ValueError as e: - context.exception = e - - -@when("I call __exit__ on a Timer instance without calling __enter__ first") -def step_exit_without_enter(context: Context) -> None: - """Call Timer().__exit__ without __enter__; store error.""" - try: - timerun.Timer().__exit__(None, None, None) - except RuntimeError as e: - context.exception = e - - -# --- Then --- - - -@then( - "the measurement's wall time duration is at least {expected_ns:n} " - "nanoseconds", -) -def step_measurement_wall_time_at_least( - context: Context, - expected_ns: int, -) -> None: - """Assert wall time >= expected; used for CPU-bound scenario.""" - # Require wall_time and get duration. - assert context.measurement.wall_time is not None - duration = context.measurement.wall_time.duration - - # Duration must be at least expected. - assert duration >= expected_ns, ( - f"wall time {duration} < expected {expected_ns}" - ) - - -@then( - "the outer measurement's wall time duration is at least the inner " - "measurement's wall time duration", -) -def step_outer_wall_at_least_inner(context: Context) -> None: - """Assert outer wall >= inner.""" - # Required context validation: both have wall_time. - assert context.outer_measurement.wall_time is not None - assert context.inner_measurement.wall_time is not None - - # Duration: outer >= inner. - outer_d = context.outer_measurement.wall_time.duration - inner_d = context.inner_measurement.wall_time.duration - assert outer_d >= inner_d, f"outer {outer_d} < inner {inner_d}" - - -@then('the second measurement\'s metadata does not contain key "{key}"') -def step_second_measurement_metadata_no_key( - context: Context, - key: str, -) -> None: - """Assert second measurement has no key.""" - assert key not in context.second_measurement.metadata - - -@then("the measurements are from different threads") -def step_measurements_from_different_threads(context: Context) -> None: - """Assert N distinct measurements.""" - # Required context validation. - measurements = context.thread_measurements - - # Exactly thread_count measurements. - assert len(measurements) == context.thread_count, ( - f"expected {context.thread_count} measurements, " - f"got {len(measurements)}" - ) - - # All distinct (one measurement per thread). - assert len(measurements) == len({id(m) for m in measurements}), ( - "measurements are not all distinct (one per thread)" - ) - - -@then("the block yielded a measurement") -def step_block_yielded_measurement(context: Context) -> None: - """Assert block produced a measurement.""" - assert context.measurement is not None - assert context.measurement.wall_time is not None - - -@then("the {callback_kind} callback was called once") -def step_callback_called_once(context: Context, callback_kind: str) -> None: - """Assert the callback was invoked exactly once.""" - invocations = getattr(context, f"{callback_kind}_invocations") - assert len(invocations) == 1, ( - f"expected the {callback_kind} callback to be called once, " - f"got {len(invocations)}" - ) - - -@then( - "the {callback_kind} callback was called with the same measurement " - "instance that the Timer yielded for that block", -) -def step_callback_called_with_the_measurement( - context: Context, - callback_kind: str, -) -> None: - """Assert callback's argument is the same instance the Timer yielded.""" - arg = getattr(context, f"{callback_kind}_invocations")[0] - assert arg is context.measurement diff --git a/features/steps/common_steps.py b/features/steps/common_steps.py index f73350c..68bc1f9 100644 --- a/features/steps/common_steps.py +++ b/features/steps/common_steps.py @@ -1,8 +1,8 @@ """Step definitions shared by more than one feature. Rule: a step belongs here only if its Gherkin text is used in multiple feature -files (e.g. block_timing.feature and function_timing.feature). Steps used by -a single feature live in that feature's step file (e.g. block_timing_steps.py). +files (e.g. timer_basics.feature and timer_metadata.feature). Steps used by +a single feature live in that feature's step file (e.g. timer_basics_steps.py). """ from __future__ import annotations @@ -15,8 +15,8 @@ if TYPE_CHECKING: from behave.runner import Context -# Buffer: expected_ns <= duration <= expected_ns + BUFFER_NS (10 ms). -BUFFER_NS = 10_000_000 +# Buffer: expected_ns <= duration <= expected_ns + BUFFER_NS (5 ms). +BUFFER_NS = 5_000_000 # --- Helpers --- @@ -31,10 +31,20 @@ def _measurement(context: Context, which: str | None) -> object: # --- Given --- -@given('metadata run_id "{run_id}" and tag "{tag}"') -def step_given_metadata(context: Context, run_id: str, tag: str) -> None: - """Store metadata for Timer.""" - context.metadata = {"run_id": run_id, "tag": tag} +@given( + "a {kind} function that sleeps for around {duration_ns:n} nanoseconds", +) +@given( + "an {kind} function that sleeps for around {duration_ns:n} nanoseconds", +) +def step_given_func_sleep( + context: Context, + kind: str, + duration_ns: int, +) -> None: + """Store func kind and duration.""" + context.func_duration_ns = duration_ns + context.func_kind = kind # --- Then --- @@ -62,16 +72,6 @@ def step_error_message_is(context: Context, message: str) -> None: assert str(context.exception) == message -@then("an exception was propagated to the caller") -def step_exception_propagated(context: Context) -> None: - """Assert ValueError was caught.""" - # Required: an exception was stored. - assert hasattr(context, "exception") - - # Must be ValueError. - assert isinstance(context.exception, ValueError) - - @then( "the measurement's wall time duration is within the configured buffer of " "{expected_ns:n} nanoseconds", @@ -100,24 +100,6 @@ def step_wall_time_within_buffer( ) -@then( - "the measurement's CPU time duration is within the configured buffer of " - "{expected_ns:n} nanoseconds", -) -def step_cpu_time_within_buffer(context: Context, expected_ns: int) -> None: - """Assert CPU time in buffer.""" - # Resolve duration and buffer bounds. - assert context.measurement.cpu_time is not None - - # Duration must lie in [min_ns, max_ns]. - duration = context.measurement.cpu_time.duration - min_ns = max(0, expected_ns - 1_000_000) - max_ns = expected_ns + BUFFER_NS - assert min_ns <= duration <= max_ns, ( - f"CPU time {duration} not in [{min_ns}, {max_ns}] (buffer={BUFFER_NS})" - ) - - @then('the measurement\'s metadata key "{key}" is "{value}"') @then('the {which} measurement\'s metadata key "{key}" is "{value}"') def step_measurement_metadata_key_is( @@ -129,3 +111,13 @@ def step_measurement_metadata_key_is( """Assert metadata[key] is value (default or first/second/outer/inner).""" measurement = _measurement(context, which) assert measurement.metadata[key] == value + + +@then("the decorated function's measurements deque has {n:n} entry") +@then("the decorated function's measurements deque has {n:n} entries") +def step_then_measurements_count(context: Context, n: int) -> None: + """Assert measurements count is n.""" + func = context.decorated_function + assert len(func.measurements) == n, ( + f"expected {n} measurements, got {len(func.measurements)}" + ) diff --git a/features/steps/function_timing_steps.py b/features/steps/function_timing_steps.py deleted file mode 100644 index 52fdc15..0000000 --- a/features/steps/function_timing_steps.py +++ /dev/null @@ -1,205 +0,0 @@ -"""Step definitions for the Function timing feature.""" - -from __future__ import annotations - -import asyncio -import time -from concurrent.futures import ThreadPoolExecutor -from typing import TYPE_CHECKING - -from behave import given, then, when - -import timerun - -if TYPE_CHECKING: - from behave.runner import Context - - -# --- Given --- - - -@given( - "a {kind} function that sleeps for around {duration_ns:n} nanoseconds", -) -@given( - "an {kind} function that sleeps for around {duration_ns:n} nanoseconds", -) -def step_given_func_sleep( - context: Context, - kind: str, - duration_ns: int, -) -> None: - """Store func kind and duration.""" - context.func_duration_ns = duration_ns - context.func_kind = kind - - -@given( - "a {kind} generator that yields {count:n} items and sleeps " - "{duration_ns:n} nanoseconds total", -) -@given( - "an {kind} generator that yields {count:n} items and sleeps " - "{duration_ns:n} nanoseconds total", -) -def step_given_gen( - context: Context, - kind: str, - count: int, - duration_ns: int, -) -> None: - """Store generator kind, duration and count.""" - context.gen_duration_ns = duration_ns - context.gen_count = count - context.gen_kind = kind - - -# --- When --- - - -@when("I call the decorated function") -def step_when_call_decorated_func(context: Context) -> None: - """Decorate function with Timer(), run it.""" - if context.func_kind == "async": - # Define async function, run it, store function and measurement. - @timerun.Timer() - async def async_func() -> None: - await asyncio.sleep(context.func_duration_ns / 1e9) - - asyncio.run(async_func()) - context.decorated_function = async_func - context.measurement = async_func.measurements[-1] - else: - # Define sync function, run it, store function and measurement. - @timerun.Timer() - def sync_func() -> None: - time.sleep(context.func_duration_ns / 1e9) - - sync_func() - context.decorated_function = sync_func - context.measurement = sync_func.measurements[-1] - - -@when("I fully consume the decorated generator") -def step_when_consume_gen(context: Context) -> None: # noqa: C901 - """Decorate generator with Timer(), consume fully.""" - per_sleep = context.gen_duration_ns // context.gen_count - - if context.gen_kind == "async": - # Define async gen and runner; consume; store func and measurement. - @timerun.Timer() - async def async_gen() -> object: - for i in range(context.gen_count): - await asyncio.sleep(per_sleep / 1e9) - yield i - - async def run() -> None: - async for _ in async_gen(): - pass - - asyncio.run(run()) - context.decorated_function = async_gen - context.measurement = async_gen.measurements[-1] - else: - # Define sync generator; consume; store function and measurement. - @timerun.Timer() - def sync_gen() -> object: - for i in range(context.gen_count): - time.sleep(per_sleep / 1e9) - yield i - - for _ in sync_gen(): - pass - context.decorated_function = sync_gen - context.measurement = sync_gen.measurements[-1] - - -@when("I call a decorated function with that metadata") -def step_when_call_with_metadata(context: Context) -> None: - """Call no-op function decorated with Timer(metadata=...).""" - - @timerun.Timer(metadata=context.metadata) - def f() -> None: - pass - - f() - context.decorated_function = f - context.measurement = f.measurements[-1] - - -@when("I call a decorated function that raises an exception") -def step_when_call_raising(context: Context) -> None: - """Call raising function under Timer(); catch exception.""" - - # Define raising function. - @timerun.Timer() - def raising() -> None: - raise ValueError - - # Call and catch exception for Then to assert. - try: - raising() - except ValueError as e: - context.exception = e - - # Store function and measurement for Then. - context.decorated_function = raising - context.measurement = raising.measurements[-1] - - -@when("I decorate it with Timer with maxlen {maxlen:n}") -def step_when_decorate_maxlen(context: Context, maxlen: int) -> None: - """Store maxlen for next step.""" - context.func_maxlen = maxlen - - -@when("I call the decorated function {times:n} times") -def step_when_call_three_times(context: Context, times: int) -> None: - """Decorate with Timer(maxlen=...), call N times.""" - - @timerun.Timer(maxlen=context.func_maxlen) - def sync_func() -> None: - time.sleep(context.func_duration_ns / 1e9) - - # Call N times; only last maxlen measurements kept. - for _ in range(times): - sync_func() - - context.decorated_function = sync_func - - -@when( - "I call the decorated function from {thread_count:n} threads concurrently", -) -def step_when_call_from_threads(context: Context, thread_count: int) -> None: - """Run decorated function from N threads.""" - - @timerun.Timer() - def sync_func() -> None: - time.sleep(context.func_duration_ns / 1e9) - - def run() -> None: - sync_func() - - # Run thread_count workers concurrently. - with ThreadPoolExecutor(max_workers=thread_count) as ex: - futures = [ex.submit(run) for _ in range(thread_count)] - for f in futures: - f.result() - - # Store for Then. - context.decorated_function = sync_func - context.thread_count = thread_count - - -# --- Then --- - - -@then("the decorated function's measurements deque has {n:n} entry") -@then("the decorated function's measurements deque has {n:n} entries") -def step_then_measurements_count(context: Context, n: int) -> None: - """Assert measurements count is n.""" - func = context.decorated_function - assert len(func.measurements) == n, ( - f"expected {n} measurements, got {len(func.measurements)}" - ) diff --git a/features/steps/time_span_steps.py b/features/steps/time_span_steps.py index 8c2b40b..f336a52 100644 --- a/features/steps/time_span_steps.py +++ b/features/steps/time_span_steps.py @@ -1,14 +1,19 @@ """Step definitions for the time span feature.""" +from __future__ import annotations + import operator from datetime import timedelta +from typing import TYPE_CHECKING import parse from behave import given, register_type, then, when -from behave.runner import Context import timerun +if TYPE_CHECKING: + from behave.runner import Context + # Gherkin relation phrases to operator functions for span comparison. RELATION_OPERATORS = { "equals": operator.eq, diff --git a/features/steps/timer_basics_steps.py b/features/steps/timer_basics_steps.py new file mode 100644 index 0000000..1b2a16e --- /dev/null +++ b/features/steps/timer_basics_steps.py @@ -0,0 +1,184 @@ +"""Step definitions for basic timing (blocks, functions, generators).""" + +from __future__ import annotations + +import asyncio +import time +from typing import TYPE_CHECKING + +from behave import given, then, when + +import timerun +from features.steps.common_steps import BUFFER_NS + +if TYPE_CHECKING: + from behave.runner import Context + +# Lower-bound slack for CPU time: max(0, expected_ns - CPU_TOLERANCE_NS). +CPU_TOLERANCE_NS = 500_000 # 0.5 ms + +# --- Given --- + + +@given( + "a {kind} operation that runs for around {duration_ns:n} nanoseconds", +) +@given( + "an {kind} operation that runs for around {duration_ns:n} nanoseconds", +) +def step_given_operation( + context: Context, + kind: str, + duration_ns: int, +) -> None: + """Store operation duration and kind.""" + context.operation_duration_ns = duration_ns + context.operation_kind = kind + + +@given( + "a {kind} generator that yields {count:n} items and sleeps " + "{duration_ns:n} nanoseconds total", +) +@given( + "an {kind} generator that yields {count:n} items and sleeps " + "{duration_ns:n} nanoseconds total", +) +def step_given_gen( + context: Context, + kind: str, + count: int, + duration_ns: int, +) -> None: + """Store generator kind, duration and count.""" + context.gen_duration_ns = duration_ns + context.gen_count = count + context.gen_kind = kind + + +# --- When --- + + +@when("I measure the operation using `with`") +def step_measure_sync_operation_using_with(context: Context) -> None: + """Measure with Timer(); sleep or spin per operation_kind.""" + with timerun.Timer() as context.measurement: + if context.operation_kind == "CPU-bound": + # Spin until duration elapsed. + start = time.process_time_ns() + while ( + time.process_time_ns() - start < context.operation_duration_ns + ): + pass + else: + # Sleep for duration. + time.sleep(context.operation_duration_ns / 1e9) + + +@when("I measure the async operation using `async with`") +def step_measure_async_operation_using_async_with(context: Context) -> None: + """Measure async with Timer(); asyncio.sleep.""" + + async def run() -> timerun.Measurement: + async with timerun.Timer() as m: + await asyncio.sleep(context.operation_duration_ns / 1e9) + return m + + context.measurement = asyncio.run(run()) + + +@when("I call the decorated function") +def step_when_call_decorated_func(context: Context) -> None: + """Decorate function with Timer(), run it.""" + if context.func_kind == "async": + # Define async function, run it, store function and measurement. + @timerun.Timer() + async def async_func() -> None: + await asyncio.sleep(context.func_duration_ns / 1e9) + + asyncio.run(async_func()) + context.decorated_function = async_func + context.measurement = async_func.measurements[-1] + else: + # Define sync function, run it, store function and measurement. + @timerun.Timer() + def sync_func() -> None: + time.sleep(context.func_duration_ns / 1e9) + + sync_func() + context.decorated_function = sync_func + context.measurement = sync_func.measurements[-1] + + +@when("I fully consume the decorated generator") +def step_when_consume_gen(context: Context) -> None: # noqa: C901 + """Decorate generator with Timer(), consume fully.""" + per_sleep = context.gen_duration_ns // context.gen_count + + if context.gen_kind == "async": + # Define async gen and runner; consume; store func and measurement. + @timerun.Timer() + async def async_gen() -> object: + for i in range(context.gen_count): + await asyncio.sleep(per_sleep / 1e9) + yield i + + async def run() -> None: + async for _ in async_gen(): + pass + + asyncio.run(run()) + context.decorated_function = async_gen + context.measurement = async_gen.measurements[-1] + else: + # Define sync generator; consume; store function and measurement. + @timerun.Timer() + def sync_gen() -> object: + for i in range(context.gen_count): + time.sleep(per_sleep / 1e9) + yield i + + for _ in sync_gen(): + pass + context.decorated_function = sync_gen + context.measurement = sync_gen.measurements[-1] + + +# --- Then --- + + +@then( + "the measurement's wall time duration is at least {expected_ns:n} " + "nanoseconds", +) +def step_measurement_wall_time_at_least( + context: Context, + expected_ns: int, +) -> None: + """Assert wall time >= expected.""" + # Require wall_time and get duration. + assert context.measurement.wall_time is not None + duration = context.measurement.wall_time.duration + + # Duration must be at least expected. + assert duration >= expected_ns, ( + f"wall time {duration} < expected {expected_ns}" + ) + + +@then( + "the measurement's CPU time duration is within the configured buffer of " + "{expected_ns:n} nanoseconds", +) +def step_cpu_time_within_buffer(context: Context, expected_ns: int) -> None: + """Assert CPU time in buffer.""" + # Resolve duration and buffer bounds. + assert context.measurement.cpu_time is not None + + # Duration must lie in [min_ns, max_ns]. + duration = context.measurement.cpu_time.duration + min_ns = max(0, expected_ns - CPU_TOLERANCE_NS) + max_ns = expected_ns + BUFFER_NS + assert min_ns <= duration <= max_ns, ( + f"CPU time {duration} not in [{min_ns}, {max_ns}] (buffer={BUFFER_NS})" + ) diff --git a/features/steps/timer_callbacks_steps.py b/features/steps/timer_callbacks_steps.py new file mode 100644 index 0000000..42060b8 --- /dev/null +++ b/features/steps/timer_callbacks_steps.py @@ -0,0 +1,75 @@ +"""Step definitions for Timer on_start / on_end callbacks.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from behave import given, then, when + +import timerun + +if TYPE_CHECKING: + from behave.runner import Context + + +# --- Given --- + + +@given("an {callback_kind} callback that records invocations") +def step_given_callback_records_invocations( + context: Context, + callback_kind: str, +) -> None: + """Store list and callback that records each measurement.""" + # List and callback that appends each measurement. + invocations: list[timerun.Measurement] = [] + + def record_invocation(m: timerun.Measurement) -> None: + invocations.append(m) + + # Store on context for When/Then. + setattr(context, f"{callback_kind}_invocations", invocations) + setattr(context, f"{callback_kind}_callback", record_invocation) + + +# --- When --- + + +@when( + "I measure a code block with a Timer that has that {callback_kind} " + "callback", +) +def step_measure_block_with_callback( + context: Context, + callback_kind: str, +) -> None: + """Measure with Timer callback; run trivial block.""" + callback = getattr(context, f"{callback_kind}_callback") + with timerun.Timer(**{callback_kind: callback}) as context.measurement: + pass + + +# --- Then --- + + +@then("the {callback_kind} callback was called once") +def step_callback_called_once(context: Context, callback_kind: str) -> None: + """Assert callback was invoked exactly once.""" + invocations = getattr(context, f"{callback_kind}_invocations") + assert len(invocations) == 1, ( + f"expected the {callback_kind} callback to be called once, " + f"got {len(invocations)}" + ) + + +@then( + "the {callback_kind} callback was called with the same measurement " + "instance that the Timer yielded for that block", +) +def step_callback_called_with_the_measurement( + context: Context, + callback_kind: str, +) -> None: + """Assert callback received the same measurement instance.""" + arg = getattr(context, f"{callback_kind}_invocations")[0] + assert arg is context.measurement diff --git a/features/steps/timer_errors_steps.py b/features/steps/timer_errors_steps.py new file mode 100644 index 0000000..afe05ca --- /dev/null +++ b/features/steps/timer_errors_steps.py @@ -0,0 +1,112 @@ +"""Step definitions for Timer error handling.""" + +from __future__ import annotations + +import asyncio +from typing import TYPE_CHECKING + +from behave import given, then, when + +import timerun + +if TYPE_CHECKING: + from behave.runner import Context + + +# --- Given --- + + +@given( + "an event loop where code runs with no current asyncio task", +) +def step_given_event_loop_no_current_task(context: Context) -> None: + """Store event loop with no current task (runs from When callback).""" + context.loop = asyncio.new_event_loop() + + +# --- When --- + + +@when("I measure a code block that raises an exception") +def step_measure_block_raises(context: Context) -> None: + """Measure raising block; catch exception.""" + try: + with timerun.Timer() as context.measurement: + raise ValueError # noqa: TRY301 + except ValueError as e: + context.exception = e + + +@when("I call __exit__ on a Timer instance without calling __enter__ first") +def step_exit_without_enter(context: Context) -> None: + """Call Timer().__exit__ without __enter__; store error.""" + try: + timerun.Timer().__exit__(None, None, None) + except RuntimeError as e: + context.exception = e + + +@when("I use async with Timer from a callback on that loop") +def step_async_with_timer_no_current_task(context: Context) -> None: + """Async with Timer from call_soon (no current task); store error.""" + loop = context.loop + + def callback() -> None: + async def use_timer() -> None: + async with timerun.Timer(): + pass + + coro = use_timer() + try: + coro.send(None) + except RuntimeError as e: + context.exception = e + except StopIteration: + pass + finally: + coro.close() + loop.stop() + + loop.call_soon(callback) + loop.run_forever() + loop.close() + + +@when("I call a decorated function that raises an exception") +def step_when_call_raising(context: Context) -> None: + """Call raising function under Timer(); catch exception.""" + + # Define raising function. + @timerun.Timer() + def raising() -> None: + raise ValueError + + # Call and catch exception for Then to assert. + try: + raising() + except ValueError as e: + context.exception = e + + # Store function and measurement for Then. + context.decorated_function = raising + context.measurement = raising.measurements[-1] + + +# --- Then --- + + +@then("the block yielded a measurement") +def step_block_yielded_measurement(context: Context) -> None: + """Assert block produced a measurement.""" + assert context.measurement is not None + assert context.measurement.wall_time is not None + + +@then("an exception was propagated to the caller") +def step_exception_propagated(context: Context) -> None: + """Assert ValueError was caught.""" + # Required: an exception was stored. + assert hasattr(context, "exception") + + # Must be ValueError. + assert isinstance(context.exception, ValueError) diff --git a/features/steps/timer_metadata_steps.py b/features/steps/timer_metadata_steps.py new file mode 100644 index 0000000..a1fe2fb --- /dev/null +++ b/features/steps/timer_metadata_steps.py @@ -0,0 +1,85 @@ +"""Step definitions for Timer metadata.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from behave import given, then, when + +import timerun + +if TYPE_CHECKING: + from behave.runner import Context + + +# --- Given --- + + +@given('metadata run_id "{run_id}" and tag "{tag}"') +def step_given_metadata(context: Context, run_id: str, tag: str) -> None: + """Store metadata for Timer.""" + context.metadata = {"run_id": run_id, "tag": tag} + + +@given('I will add metadata key "{key}" as "{value}" in the first block') +def step_given_metadata_add_in_first( + context: Context, + key: str, + value: str, +) -> None: + """First block will add key/value to metadata.""" + context.metadata_add_in_first = (key, value) + + +# --- When --- + + +@when("I measure a code block with that metadata") +def step_measure_block_with_metadata(context: Context) -> None: + """Measure with Timer(metadata=...); store result.""" + with timerun.Timer(metadata=context.metadata) as context.measurement: + pass + + +@when( + "I measure two blocks with the same Timer instance and that metadata", +) +def step_measure_two_blocks_with_metadata(context: Context) -> None: + """Measure two blocks; first may add metadata.""" + cm = timerun.Timer(metadata=context.metadata) + + # First block: optionally add key/value to measurement metadata. + with cm as context.first_measurement: + if hasattr(context, "metadata_add_in_first"): + context.first_measurement.metadata[ + context.metadata_add_in_first[0] + ] = context.metadata_add_in_first[1] + + # Second block: no extra metadata. + with cm as context.second_measurement: + pass + + +@when("I call a decorated function with that metadata") +def step_when_call_with_metadata(context: Context) -> None: + """Call no-op function decorated with Timer(metadata=...).""" + + @timerun.Timer(metadata=context.metadata) + def f() -> None: + pass + + f() + context.decorated_function = f + context.measurement = f.measurements[-1] + + +# --- Then --- + + +@then('the second measurement\'s metadata does not contain key "{key}"') +def step_second_measurement_metadata_no_key( + context: Context, + key: str, +) -> None: + """Assert second measurement has no key.""" + assert key not in context.second_measurement.metadata diff --git a/features/steps/timer_reuse_steps.py b/features/steps/timer_reuse_steps.py new file mode 100644 index 0000000..497318c --- /dev/null +++ b/features/steps/timer_reuse_steps.py @@ -0,0 +1,218 @@ +"""Step definitions for reusing one Timer across executions.""" + +from __future__ import annotations + +import asyncio +import time +from concurrent.futures import ThreadPoolExecutor +from typing import TYPE_CHECKING + +from behave import given, then, when + +import timerun + +if TYPE_CHECKING: + from behave.runner import Context + + +# --- Given --- + + +@given("each thread sleeps {duration_ns:n} nanoseconds") +def step_given_thread_sleep(context: Context, duration_ns: int) -> None: + """Store thread sleep duration.""" + context.thread_sleep_ns = duration_ns + + +@given("a Timer shared by both tasks") +def step_given_shared_timer(context: Context) -> None: + """Create a Timer instance shared by task A and task B.""" + context.shared_timer = timerun.Timer() + + +@given("task {which} runs for {duration_ns:n} nanoseconds") +def step_given_task_duration( + context: Context, + which: str, + duration_ns: int, +) -> None: + """Define async function for the given task using the shared timer.""" + shared_timer = context.shared_timer + duration_sec = duration_ns / 1e9 + + async def run_task() -> timerun.Measurement: + async with shared_timer as m: + await asyncio.sleep(duration_sec) + return m + + setattr(context, f"task_{which.lower()}_ns", duration_ns) + setattr(context, f"task_{which.lower()}", run_task) + + +@given("task A starts before task B") +def step_given_task_a_starts_before_b(context: Context) -> None: + """Store start order: A then B.""" + context.task_a_starts_before_b = True + + +@given("the {which} block duration is {duration_ns:n} nanoseconds") +def step_given_block_duration( + context: Context, + which: str, + duration_ns: int, +) -> None: + """Store block duration for the named block (first, second, etc.).""" + setattr(context, f"{which}_block_ns", duration_ns) + + +# --- When --- + + +@when( + "I measure blocks from {thread_count:n} threads " + "using the same Timer instance", +) +def step_measure_blocks_from_threads( + context: Context, + thread_count: int, +) -> None: + """Measure blocks from N threads.""" + context.thread_count = thread_count + cm = timerun.Timer() + + # Worker: enter timer, sleep, return measurement. + def run() -> timerun.Measurement: + with cm as m: + time.sleep(context.thread_sleep_ns / 1e9) + return m + + # Run thread_count workers and collect measurements. + with ThreadPoolExecutor(max_workers=thread_count) as ex: + futures = [ex.submit(run) for _ in range(thread_count)] + context.thread_measurements = [f.result() for f in futures] + + +@when("I measure two sequential blocks with the same Timer instance") +def step_measure_two_sequential_blocks(context: Context) -> None: + """Measure two sequential blocks.""" + cm = timerun.Timer() + + # First block. + with cm as context.first_measurement: + time.sleep(context.first_block_ns / 1e9) + + # Second block. + with cm as context.second_measurement: + time.sleep(context.second_block_ns / 1e9) + + +@when("I measure nested blocks with the same Timer instance") +def step_measure_nested_blocks(context: Context) -> None: + """Measure nested blocks.""" + cm = timerun.Timer() + + with cm as context.outer_measurement: + time.sleep(context.outer_block_ns / 1e9) + + with cm as context.inner_measurement: + time.sleep(context.inner_block_ns / 1e9) + + +@when("I run both tasks concurrently with the same Timer") +def step_run_both_tasks_concurrently(context: Context) -> None: + """Run tasks concurrently, store results as first and second.""" + assert getattr(context, "task_a_starts_before_b", False), ( + "start order must be given (e.g. task A starts before task B)" + ) + + async def run_task_a_before_b() -> tuple[ + timerun.Measurement, + timerun.Measurement, + ]: + return await asyncio.gather(context.task_a(), context.task_b()) + + results = asyncio.run(run_task_a_before_b()) + context.first_measurement = results[0] + context.second_measurement = results[1] + + +@when("I decorate it with Timer with maxlen {maxlen:n}") +def step_when_decorate_maxlen(context: Context, maxlen: int) -> None: + """Store maxlen for next step.""" + context.func_maxlen = maxlen + + +@when("I call the decorated function {times:n} times") +def step_when_call_three_times(context: Context, times: int) -> None: + """Decorate with Timer(maxlen=...), call N times.""" + + @timerun.Timer(maxlen=context.func_maxlen) + def sync_func() -> None: + time.sleep(context.func_duration_ns / 1e9) + + # Call N times; only last maxlen measurements kept. + for _ in range(times): + sync_func() + + context.decorated_function = sync_func + + +@when( + "I call the decorated function from {thread_count:n} threads concurrently", +) +def step_when_call_from_threads(context: Context, thread_count: int) -> None: + """Run decorated function from N threads.""" + + @timerun.Timer() + def sync_func() -> None: + time.sleep(context.func_duration_ns / 1e9) + + def run() -> None: + sync_func() + + # Run thread_count workers concurrently. + with ThreadPoolExecutor(max_workers=thread_count) as ex: + futures = [ex.submit(run) for _ in range(thread_count)] + for f in futures: + f.result() + + # Store for Then. + context.decorated_function = sync_func + context.thread_count = thread_count + + +# --- Then --- + + +@then( + "the outer measurement's wall time duration is at least the inner " + "measurement's wall time duration", +) +def step_outer_wall_at_least_inner(context: Context) -> None: + """Assert outer wall >= inner.""" + # Required context validation: both have wall_time. + assert context.outer_measurement.wall_time is not None + assert context.inner_measurement.wall_time is not None + + # Duration: outer >= inner. + outer_d = context.outer_measurement.wall_time.duration + inner_d = context.inner_measurement.wall_time.duration + assert outer_d >= inner_d, f"outer {outer_d} < inner {inner_d}" + + +@then("the measurements are from different threads") +def step_measurements_from_different_threads(context: Context) -> None: + """Assert thread_count distinct measurements.""" + # Required context validation. + measurements = context.thread_measurements + + # Exactly thread_count measurements. + assert len(measurements) == context.thread_count, ( + f"expected {context.thread_count} measurements, " + f"got {len(measurements)}" + ) + + # All distinct (one measurement per thread). + assert len(measurements) == len({id(m) for m in measurements}), ( + "measurements are not all distinct (one per thread)" + ) diff --git a/features/timer_basics.feature b/features/timer_basics.feature new file mode 100644 index 0000000..62bdff6 --- /dev/null +++ b/features/timer_basics.feature @@ -0,0 +1,45 @@ +Feature: Basic timing + + As someone measuring duration, + I want to time code execution (blocks, functions, and generators), + so that I get accurate wall time and CPU time measurements. + + Scenario: Blocking sleep with `with` yields wall time and near-zero CPU time + Given a blocking operation that runs for around 5,000,000 nanoseconds + When I measure the operation using `with` + Then the measurement's wall time duration is within the configured buffer of 5,000,000 nanoseconds + And the measurement's CPU time duration is within the configured buffer of 0 nanoseconds + + Scenario: Async sleep with `async with` yields wall time and near-zero CPU time + Given an async operation that runs for around 5,000,000 nanoseconds + When I measure the async operation using `async with` + Then the measurement's wall time duration is within the configured buffer of 5,000,000 nanoseconds + And the measurement's CPU time duration is within the configured buffer of 0 nanoseconds + + Scenario: CPU-bound block with `with` yields meaningful wall and CPU time + Given a CPU-bound operation that runs for around 5,000,000 nanoseconds + When I measure the operation using `with` + Then the measurement's wall time duration is at least 5,000,000 nanoseconds + And the measurement's CPU time duration is within the configured buffer of 5,000,000 nanoseconds + + Scenario: Timing a synchronous sleeping function records real time and minimal CPU time + Given a sync function that sleeps for around 5,000,000 nanoseconds + When I call the decorated function + Then the measurement's wall time duration is within the configured buffer of 5,000,000 nanoseconds + And the measurement's CPU time duration is within the configured buffer of 0 nanoseconds + + Scenario: Timing an async sleeping function records real time and minimal CPU time + Given an async function that sleeps for around 5,000,000 nanoseconds + When I call the decorated function + Then the measurement's wall time duration is within the configured buffer of 5,000,000 nanoseconds + And the measurement's CPU time duration is within the configured buffer of 0 nanoseconds + + Scenario: Fully consuming a sync generator records one measurement + Given a sync generator that yields 3 items and sleeps 2,500,000 nanoseconds total + When I fully consume the decorated generator + Then the decorated function's measurements deque has 1 entry + + Scenario: Fully consuming an async generator records one measurement + Given an async generator that yields 3 items and sleeps 2,500,000 nanoseconds total + When I fully consume the decorated generator + Then the decorated function's measurements deque has 1 entry diff --git a/features/timer_callbacks.feature b/features/timer_callbacks.feature new file mode 100644 index 0000000..ef57e7f --- /dev/null +++ b/features/timer_callbacks.feature @@ -0,0 +1,17 @@ +Feature: Timer callbacks + + As someone measuring duration, + I want callbacks when timing starts and ends, + so that I can react to measurement lifecycle events. + + Scenario: The on_start callback is invoked once with the same measurement instance the Timer yields for that block + Given an on_start callback that records invocations + When I measure a code block with a Timer that has that on_start callback + Then the on_start callback was called once + And the on_start callback was called with the same measurement instance that the Timer yielded for that block + + Scenario: The on_end callback is invoked once with the same measurement instance the Timer yields for that block + Given an on_end callback that records invocations + When I measure a code block with a Timer that has that on_end callback + Then the on_end callback was called once + And the on_end callback was called with the same measurement instance that the Timer yielded for that block diff --git a/features/timer_errors.feature b/features/timer_errors.feature new file mode 100644 index 0000000..d13f57b --- /dev/null +++ b/features/timer_errors.feature @@ -0,0 +1,26 @@ +Feature: Timer error handling + + As someone measuring duration, + I want timing to handle errors gracefully, + so that measurements are preserved and misuse is clearly reported. + + Scenario: Block that raises still yields measurement; exception propagates + When I measure a code block that raises an exception + Then the block yielded a measurement + And an exception was propagated to the caller + + Scenario: __exit__ without __enter__ raises RuntimeError + When I call __exit__ on a Timer instance without calling __enter__ first + Then a RuntimeError is raised + And the error message is "__exit__ called without a matching __enter__" + + Scenario: async with Timer when no current asyncio task raises RuntimeError + Given an event loop where code runs with no current asyncio task + When I use async with Timer from a callback on that loop + Then a RuntimeError is raised + And the error message is "no current asyncio task" + + Scenario: When a timed function raises an error, one measurement is still recorded and the error is re-raised + When I call a decorated function that raises an exception + Then the decorated function's measurements deque has 1 entry + And an exception was propagated to the caller diff --git a/features/timer_metadata.feature b/features/timer_metadata.feature new file mode 100644 index 0000000..174a8bc --- /dev/null +++ b/features/timer_metadata.feature @@ -0,0 +1,26 @@ +Feature: Timer metadata + + As someone measuring duration, + I want to attach metadata to measurements, + so that I can label and identify results. + + Scenario: Initial metadata is carried on the yielded measurement + Given metadata run_id "exp-1" and tag "baseline" + When I measure a code block with that metadata + Then the measurement's metadata key "run_id" is "exp-1" + And the measurement's metadata key "tag" is "baseline" + + Scenario: Metadata set in first block is not visible in second block (reused Timer) + Given metadata run_id "same-run" and tag "original" + And I will add metadata key "extra" as "from_first_block" in the first block + When I measure two blocks with the same Timer instance and that metadata + Then the first measurement's metadata key "extra" is "from_first_block" + And the second measurement's metadata key "run_id" is "same-run" + And the second measurement's metadata key "tag" is "original" + And the second measurement's metadata does not contain key "extra" + + Scenario: Metadata attached to the timer appears on each measurement + Given metadata run_id "exp-1" and tag "baseline" + When I call a decorated function with that metadata + Then the measurement's metadata key "run_id" is "exp-1" + And the measurement's metadata key "tag" is "baseline" diff --git a/features/timer_reuse.feature b/features/timer_reuse.feature new file mode 100644 index 0000000..5f59d3c --- /dev/null +++ b/features/timer_reuse.feature @@ -0,0 +1,54 @@ +Feature: Timer reuse + + As someone measuring duration, + I want to reuse one Timer across multiple executions, + so that each execution is independently measured and storage stays bounded. + + Scenario: Two threads with one Timer yield one measurement per thread + Given each thread sleeps 2,000,000 nanoseconds + When I measure blocks from 2 threads using the same Timer instance + Then the measurements are from different threads + + Scenario: Two sequential blocks with one Timer yield correct durations + Given the first block duration is 2,000,000 nanoseconds + And the second block duration is 5,000,000 nanoseconds + When I measure two sequential blocks with the same Timer instance + Then the first measurement's wall time duration is within the configured buffer of 2,000,000 nanoseconds + And the second measurement's wall time duration is within the configured buffer of 5,000,000 nanoseconds + + Scenario: Nested blocks with one Timer yield independent outer and inner times + Given the outer block duration is 10,000,000 nanoseconds + And the inner block duration is 2,000,000 nanoseconds + When I measure nested blocks with the same Timer instance + Then the outer measurement's wall time duration is within the configured buffer of 12,000,000 nanoseconds + And the inner measurement's wall time duration is within the configured buffer of 2,000,000 nanoseconds + And the outer measurement's wall time duration is at least the inner measurement's wall time duration + + Scenario: The task that started first finishes first + Given a Timer shared by both tasks + And task A runs for 500,000 nanoseconds + And task B runs for 2,000,000 nanoseconds + And task A starts before task B + When I run both tasks concurrently with the same Timer + Then the first measurement's wall time duration is within the configured buffer of 500,000 nanoseconds + And the second measurement's wall time duration is within the configured buffer of 2,000,000 nanoseconds + + Scenario: The task that started second finishes first + Given a Timer shared by both tasks + And task A runs for 2,000,000 nanoseconds + And task B runs for 500,000 nanoseconds + And task A starts before task B + When I run both tasks concurrently with the same Timer + Then the first measurement's wall time duration is within the configured buffer of 2,000,000 nanoseconds + And the second measurement's wall time duration is within the configured buffer of 500,000 nanoseconds + + Scenario: Two threads calling the same timed function produce two measurements + Given a sync function that sleeps for around 2,000,000 nanoseconds + When I call the decorated function from 2 threads concurrently + Then the decorated function's measurements deque has 2 entries + + Scenario: With maxlen 2, only the last 2 measurements are kept + Given a sync function that sleeps for around 500,000 nanoseconds + When I decorate it with Timer with maxlen 2 + And I call the decorated function 3 times + Then the decorated function's measurements deque has 2 entries diff --git a/pyproject.toml b/pyproject.toml index 95a332f..1cd27ad 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,13 +33,10 @@ classifiers = [ ] dynamic = ["version"] -[project.optional-dependencies] -dev = ["behave", "coverage", "pip-audit"] -docs = ["zensical"] - [project.urls] -Homepage = "https://github.com/HH-MWB/timerun" -"Bug Reports" = "https://github.com/HH-MWB/timerun/issues" +Homepage = "https://hh-mwb.github.io/timerun/" +Repository = "https://github.com/HH-MWB/timerun" +Issues = "https://github.com/HH-MWB/timerun/issues" Changelog = "https://github.com/HH-MWB/timerun/releases" PyPI = "https://pypi.org/project/timerun" diff --git a/requirements-dev.txt b/requirements-dev.txt new file mode 100644 index 0000000..def885c --- /dev/null +++ b/requirements-dev.txt @@ -0,0 +1,3 @@ +behave +coverage +pip-audit diff --git a/timerun.py b/timerun.py index 1744132..2437cfc 100644 --- a/timerun.py +++ b/timerun.py @@ -1,17 +1,56 @@ -"""TimeRun is a Python library for time measurements.""" +"""Structured timing for Python: wall-clock and CPU time per block or call. +TimeRun is a single-file package with no dependencies beyond the standard +library. It records wall-clock time (``perf_counter_ns``) and CPU time +(``process_time_ns``) for code blocks and function calls, producing one +:class:`Measurement` per run. Each measurement carries two +:class:`TimeSpan` objects and optional user-defined metadata. + +Use :class:`Timer` as a context manager to time a block:: + + with Timer() as m: + ... + print(m.wall_time.timedelta) + +Or as a decorator to time every call:: + + @Timer() + def func(): + ... + func() + print(func.measurements[-1].wall_time.timedelta) + +Measurements support optional ``metadata`` (deep-copied per run) and +``on_start`` / ``on_end`` callbacks. Timer is reusable: thread-safe for +sync blocks, and task-safe for concurrent asyncio tasks. + +Public API +---------- +TimeSpan + Immutable nanosecond interval (``duration``, ``start``, ``end``, + ``timedelta``). +Measurement + Wall time, CPU time, and metadata for a single timing run. +Timer + Context manager and decorator that records measurements. + +""" + +from asyncio import current_task from collections import deque from collections.abc import AsyncGenerator, Callable, Generator +from contextvars import ContextVar from copy import deepcopy from dataclasses import dataclass, field from datetime import timedelta +from enum import Enum from functools import wraps from inspect import ( isasyncgenfunction, iscoroutinefunction, isgeneratorfunction, ) -from threading import Lock, local +from threading import Lock, current_thread from time import perf_counter_ns, process_time_ns from types import TracebackType from typing import ( @@ -22,7 +61,7 @@ cast, ) -__version__: str = "0.6.1" +__version__: str = "0.6.2" __all__ = [ "Measurement", @@ -57,6 +96,11 @@ class TimeSpan: converted to whole microseconds (``duration // 1000``) to match timedelta's resolution. + Raises + ------ + ValueError + If ``end < start``. + Notes ----- ``start`` and ``end`` use ``field(compare=False)``, so two spans with @@ -69,7 +113,7 @@ class TimeSpan: end: int = field(compare=False) def __post_init__(self) -> None: - """Set duration to end minus start (nanoseconds).""" + """Validate end >= start, then set duration (nanoseconds).""" if self.end < self.start: msg = "end must be >= start" raise ValueError(msg) @@ -83,10 +127,10 @@ def timedelta(self) -> timedelta: @dataclass class Measurement: - """A measurement collection: wall time, CPU time, and optional metadata. + """A single measurement record: wall time, CPU time, and optional metadata. - Stores one measurement only. Use this to collect the result of a single - timing run: wall-clock time, CPU time, and any user-defined metadata. + Use this to collect the result of a single timing run: wall-clock time, + CPU time, and any user-defined metadata. When created by Timer (context manager or decorator), ``wall_time`` and ``cpu_time`` are ``None`` until the block exits, then they are set to the @@ -109,6 +153,13 @@ class Measurement: metadata: dict[str, object] = field(default_factory=dict) +class _ContextMode(Enum): + """Sync vs async execution context for timer stack ownership.""" + + SYNC = "sync" + ASYNC = "async" + + class _TimedCallable(Protocol[P, R_co]): # pylint: disable=too-few-public-methods measurements: deque[Measurement] @@ -149,6 +200,10 @@ class Timer: Notes ----- + One context variable is used for both sync and async; each thread (sync) + or asyncio task (async) has its own stack, so concurrent async tasks + sharing one Timer get correct timings regardless of finish order. + Callbacks are synchronous only. For async exporters (e.g. OpenTelemetry), schedule work from the callback (e.g. ``asyncio.create_task(export(m))`` when in an async context, or a thread/queue). @@ -191,69 +246,137 @@ def __init__( maxlen: int | None = None, ) -> None: """Init with optional metadata, callbacks, and maxlen (decorator).""" + # Store base metadata used to seed each new measurement. self._metadata = metadata if isinstance(metadata, dict) else {} + + # Register lifecycle callbacks invoked at measurement start/end. self._on_start = on_start self._on_end = on_end + + # Keep decorator-mode retention policy for wrapped call history. self._maxlen = maxlen - self._local = local() - def __enter__(self) -> Measurement: - """Start timing; return the measurement record.""" - # Create measurement with a deep copy of timer metadata. - measurement = Measurement(metadata=deepcopy(self._metadata)) + # Create per-instance context-local stack storage for active timings. + self._stack_var: ContextVar[ + tuple[int | None, deque[tuple[Measurement, int, int]]] + ] = ContextVar(f"timerun_timer_stack_{id(self)}") + + def _get_context_stack( + self, + mode: _ContextMode, + ) -> deque[tuple[Measurement, int, int]]: + """Return the stack for the current execution context. + + "Owner" is the execution identity that owns the stack: thread id in + sync mode, task id in async mode. See :class:`_ContextMode`. + Binds (owner_id, stack) for this context when the context has no + bound stack yet (e.g. first use in this thread/task), so callers + always get the stack for this owner (possibly empty). + + Parameters + ---------- + mode : _ContextMode + SYNC for thread-local, ASYNC for task-local stack. + + Returns + ------- + deque + The stack for this owner. + + Raises + ------ + RuntimeError + If mode is ASYNC and there is no current asyncio task. + + """ + # Resolve owner id for this context (thread or task). + if mode is _ContextMode.ASYNC: + task = current_task() + if task is None: + msg = "no current asyncio task" + raise RuntimeError(msg) + owner_id = id(task) + else: + owner_id = id(current_thread()) + + # Get or bind stack for this context and return it. + owner, stack = self._stack_var.get((None, deque())) + if owner != owner_id: + self._stack_var.set((owner_id, stack)) + return stack + + def _enter(self, mode: _ContextMode) -> Measurement: + """Start a measurement and push it onto the context stack. + + Returns + ------- + Measurement + Newly created measurement record for this enter operation. - # Notify caller timing started (wall_time/cpu_time still None). + """ + # Create measurement and run on_start callback if set. + measurement = Measurement(metadata=deepcopy(self._metadata)) if self._on_start is not None: self._on_start(measurement) - # Ensure thread-local stack exists and record start timestamps. - self._local.stack = getattr(self._local, "stack", deque()) - self._local.stack.append( - (measurement, perf_counter_ns(), process_time_ns()), - ) - + # Push onto context stack and return. + stack = self._get_context_stack(mode) + stack.append((measurement, perf_counter_ns(), process_time_ns())) return measurement - def __exit__( - self, - exc_type: type[BaseException] | None, - exc_val: BaseException | None, - exc_tb: TracebackType | None, - ) -> Literal[False]: - """Stop timing; set wall_time and cpu_time on the measurement.""" - # Capture end timestamps (before popping to pair with __enter__). + def _exit(self, mode: _ContextMode) -> None: + """Finalize and pop the latest measurement from the context stack. + + Raises + ------ + RuntimeError + If no matching active measurement (e.g. exit without enter). + + """ + # Capture end timestamps before popping. cpu_end = process_time_ns() wall_end = perf_counter_ns() - # Pop (measurement, wall_start, cpu_start) from this thread. - try: - measurement, wall_start, cpu_start = self._local.stack.pop() - except (AttributeError, IndexError) as e: + # Pop measurement or raise if no matching enter. + stack = self._get_context_stack(mode) + if not stack: msg = "__exit__ called without a matching __enter__" - raise RuntimeError(msg) from e + raise RuntimeError(msg) + measurement, wall_start, cpu_start = stack.pop() - # Attach elapsed spans to the measurement. + # Set spans on measurement and run on_end callback if set. measurement.wall_time = TimeSpan(start=wall_start, end=wall_end) measurement.cpu_time = TimeSpan(start=cpu_start, end=cpu_end) - - # Notify caller that timing has ended (measurement is fully populated). if self._on_end is not None: self._on_end(measurement) + def __enter__(self) -> Measurement: + """Start timing; return the measurement record.""" + return self._enter(_ContextMode.SYNC) + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> Literal[False]: + """Stop timing; update the measurement.""" + self._exit(_ContextMode.SYNC) return False async def __aenter__(self) -> Measurement: - """Support ``async with`` by delegating to sync __enter__.""" - return self.__enter__() + """Start timing (async); return the measurement record.""" + return self._enter(_ContextMode.ASYNC) async def __aexit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None, - ) -> bool: - """Support ``async with`` by delegating to sync __exit__.""" - return self.__exit__(exc_type, exc_val, exc_tb) + ) -> Literal[False]: + """Stop timing (async); update the measurement.""" + self._exit(_ContextMode.ASYNC) + return False def __call__( # noqa: C901 self, @@ -274,6 +397,7 @@ def __call__( # noqa: C901 linter warning is a false positive. """ + # Shared state and helper for all wrapper branches. measurements: deque[Measurement] = deque(maxlen=self._maxlen) lock = Lock() @@ -281,6 +405,7 @@ def append_measurement(m: Measurement) -> None: with lock: measurements.append(m) + # Build wrapper by callable type: async gen, coroutine, sync gen, sync. if isasyncgenfunction(f): @wraps(f) @@ -299,7 +424,7 @@ async def wrapper( elif iscoroutinefunction(f): @wraps(f) - async def wrapper( # type: ignore[return] + async def wrapper( *args: P.args, **kwargs: P.kwargs, ) -> R: @@ -333,6 +458,7 @@ def wrapper(*args: P.args, **kwargs: P.kwargs) -> R: finally: append_measurement(m) # pylint: disable=used-before-assignment + # Attach measurements to wrapper and return. wrapped = cast( "_TimedCallable[P, R] | " "_TimedCallable[P, AsyncGenerator[Y, None]] | " diff --git a/zensical.toml b/zensical.toml index e743036..ec395b5 100644 --- a/zensical.toml +++ b/zensical.toml @@ -1,63 +1,83 @@ # Zensical config for TimeRun (https://hh-mwb.github.io/timerun/) [project] -# Identity site_name = "TimeRun" site_description = "Python package for time measurement — wall-clock and CPU time, zero dependencies." site_url = "https://hh-mwb.github.io/timerun/" - -# Repository repo_url = "https://github.com/HH-MWB/timerun" - -# Navigation nav = [ - { "Home" = "index.md" }, - { "About" = "about/index.md" }, - { "Reference" = [ + { "Quickstart" = "quickstart/index.md" }, + { "Overview" = "overview/index.md" }, + { "Guide" = [ "guide/index.md", - { "TimeSpan" = "guide/timespan.md" }, - { "Measurement" = "guide/measurement.md" }, - { "Timer (overview)" = "guide/timer.md" }, + { "Timer overview" = "guide/timer.md" }, { "Measure a block" = "guide/measure-block.md" }, - { "Measure functions" = "guide/measure-functions.md" }, + { "Measure function calls" = "guide/measure-functions.md" }, { "Metadata" = "guide/metadata.md" }, - { "Callbacks" = "guide/callbacks.md" } + { "Callbacks" = "guide/callbacks.md" }, + { "TimeSpan" = "guide/timespan.md" }, + { "Measurement" = "guide/measurement.md" }, + ]}, + { "Cookbook" = [ + "cookbook/index.md", + { "Use metadata effectively" = "cookbook/metadata.md" }, + { "Share results" = "cookbook/share-results.md" }, + { "Time web requests" = "cookbook/web-framework.md" }, + { "Analyze results" = "cookbook/analyze-results.md" }, ]}, - { "Recipes" = [ - "recipes/index.md", - { "Use metadata effectively" = "recipes/metadata.md" }, - { "Share results" = "recipes/share-results.md" }, - { "Analyze results" = "recipes/analyze-results.md" } - ]} ] +# -- Theme ------------------------------------------------------------------- + [project.theme] features = [ + "content.code.annotate", "content.code.copy", + "content.tabs.link", + "navigation.footer", "navigation.instant", - "navigation.tabs", "navigation.sections", + "navigation.tabs", "navigation.top", - "search.highlight" + "search.highlight", ] -# Light / dark / auto (follow system) toggle +[project.theme.icon] +repo = "fontawesome/brands/github" + +# Color scheme: auto / light / dark [[project.theme.palette]] -media = "(prefers-color-scheme)" -toggle.icon = "lucide/sun-moon" -toggle.name = "Follow system" +media = "(prefers-color-scheme)" +toggle = { icon = "lucide/sun-moon", name = "Follow system" } [[project.theme.palette]] -media = "(prefers-color-scheme: light)" -scheme = "default" -toggle.icon = "lucide/sun" -toggle.name = "Light mode" +media = "(prefers-color-scheme: light)" +scheme = "default" +toggle = { icon = "lucide/sun", name = "Light mode" } [[project.theme.palette]] -media = "(prefers-color-scheme: dark)" -scheme = "slate" -toggle.icon = "lucide/moon" -toggle.name = "Dark mode" +media = "(prefers-color-scheme: dark)" +scheme = "slate" +toggle = { icon = "lucide/moon", name = "Dark mode" } -[project.theme.icon] -repo = "fontawesome/brands/github" +# -- Markdown extensions ------------------------------------------------------ + +[project.markdown_extensions] +admonition = {} + +[project.markdown_extensions.pymdownx] +details = {} +inlinehilite = {} + +[project.markdown_extensions.pymdownx.highlight] +anchor_linenums = true +line_spans = "__span" +pygments_lang_class = true + +[project.markdown_extensions.pymdownx.superfences] +custom_fences = [ + { name = "mermaid", class = "mermaid", format = "pymdownx.superfences.fence_code_format" }, +] + +[project.markdown_extensions.pymdownx.tabbed] +alternate_style = true