From 0363f132cfb23216d0abe6f3381656aecba927ce Mon Sep 17 00:00:00 2001 From: fynnbe Date: Mon, 24 Nov 2025 14:14:25 +0100 Subject: [PATCH 01/56] WIP setup mkdocs --- .github/workflows/build.yaml | 104 ++++++----- .gitignore | 5 +- docs/changelog.md | 5 + docs/compatibility.md | 7 + docs/index.md | 1 + mkdocs.yaml | 165 ++++++++++++++++++ pyproject.toml | 14 +- scripts/generate_api_doc_pages.py | 37 ++++ scripts/pdoc/create_pydantic_patch.sh | 25 --- .../pdoc/mark_pydantic_attrs_private.patch | 28 --- scripts/pdoc/run.sh | 16 -- src/bioimageio/core/cli.py | 39 +++-- src/bioimageio/core/tensor.py | 4 +- .../core/weight_converters/_add_weights.py | 2 +- 14 files changed, 312 insertions(+), 140 deletions(-) create mode 100644 docs/changelog.md create mode 100644 docs/compatibility.md create mode 100644 docs/index.md create mode 100644 mkdocs.yaml create mode 100644 scripts/generate_api_doc_pages.py delete mode 100755 scripts/pdoc/create_pydantic_patch.sh delete mode 100644 scripts/pdoc/mark_pydantic_attrs_private.patch delete mode 100755 scripts/pdoc/run.sh diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 5874d7e47..1e67773ad 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -8,7 +8,7 @@ on: workflow_dispatch: inputs: force-publish: - description: 'Force publish even if no version change detected' + description: 'Force publish even if no version change was detected' required: false type: choice options: @@ -190,34 +190,6 @@ jobs: env: BIOIMAGEIO_CACHE_PATH: bioimageio_cache - docs: - needs: [coverage, test] - if: github.ref == 'refs/heads/main' - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - uses: actions/download-artifact@v4 - with: - name: coverage-summary - path: dist - - uses: actions/setup-python@v6 - with: - python-version: '3.12' - cache: 'pip' - - run: pip install -e .[dev,partners] - - name: Generate developer docs - run: ./scripts/pdoc/run.sh - - run: cp README.md ./dist/README.md - - name: copy rendered presentations - run: | - mkdir ./dist/presentations - cp -r ./presentations/*.html ./dist/presentations/ - - name: Deploy to gh-pages 🚀 - uses: JamesIves/github-pages-deploy-action@v4 - with: - branch: gh-pages - folder: dist - build: runs-on: ubuntu-latest steps: @@ -235,21 +207,33 @@ jobs: path: dist/ name: dist - publish: - needs: [test, build, conda-build, docs] + docs: + needs: [build, conda-build, coverage, test] runs-on: ubuntu-latest - environment: - name: release - url: https://pypi.org/project/bioimageio.core/ permissions: contents: write # required for tag creation - id-token: write # required for pypi publish action + outputs: + new-version: ${{ steps.get-new-version.outputs.new-version }} steps: - - name: Check out the repository - uses: actions/checkout@v4 + - uses: actions/checkout@v4 with: - fetch-depth: 2 + fetch-depth: 0 fetch-tags: true + - uses: actions/download-artifact@v4 + with: + name: coverage-summary + path: dist + - uses: actions/setup-python@v6 + with: + python-version: '3.12' + cache: 'pip' + - run: pip install -e .[dev,docs,partners] + - name: Get branch name to deploy to + id: get_branch + shell: bash + run: | + if [[ -n '${{ github.event.pull_request.head.ref }}' ]]; then branch=gh-pages-${{ github.event.pull_request.head.ref }}; else branch=gh-pages; fi + echo "::set-output name=branch::$branch" - name: Get parent commit if: inputs.force-publish != 'true' id: get-parent-commit @@ -258,7 +242,6 @@ jobs: - id: get-existing-tag if: inputs.force-publish == 'true' run: echo "existing-tag=$(git tag --points-at HEAD 'v[0-9]*.[0-9]*.[0-9]*')" >> $GITHUB_OUTPUT - - name: Detect new version from last commit and create tag id: tag-version if: github.ref == 'refs/heads/main' && steps.get-parent-commit.outputs.sha && inputs.force-publish != 'true' @@ -273,8 +256,6 @@ jobs: import os from pathlib import Path - - if "${{ inputs.force-publish }}" == "true": existing_tag = "${{ steps.get-existing-tag.outputs.existing-tag }}" valid = existing_tag.count("v") == 1 and existing_tag.count(".") == 2 and all(part.isdigit() for part in existing_tag.lstrip("v").split(".")) @@ -291,23 +272,52 @@ jobs: with open(os.environ['GITHUB_OUTPUT'], 'a') as f: print(f"new-version={new_version}", file=f) + - name: Configure Git Credentials + run: | + git config user.name github-actions[bot] + git config user.email 41898282+github-actions[bot]@users.noreply.github.com + - name: Generate developer docs + run: mike deploy --push --branch ${{ steps.get_branch.outputs.branch }} --update-aliases ${{ steps.get-new-version.outputs.new-version || 'dev'}} ${{ steps.get-new-version.outputs.new-version && 'latest' || ' '}} + - name: copy rendered presentations + run: | + mkdir ./dist/presentations + cp -r ./presentations/*.html ./dist/presentations/ + - name: Deploy to gh-pages 🚀 + uses: JamesIves/github-pages-deploy-action@v4 + with: + branch: gh-pages + folder: dist + clean: true + clean-exclude: | + .nojekyll + index.html + versions.json + latest/ + dev/ + v0.*/ + publish: + needs: [test, coverage, build, conda-build, docs] + runs-on: ubuntu-latest + if: github.ref == 'refs/heads/main' && needs.docs.outputs.new-version + environment: + name: release + url: https://pypi.org/project/bioimageio.core/ + permissions: + contents: write # required to create a github release (release drafter) + id-token: write # required for pypi publish action + steps: - uses: actions/download-artifact@v4 - if: github.ref == 'refs/heads/main' && steps.get-new-version.outputs.new-version with: name: dist path: dist - name: Publish package on PyPI - if: github.ref == 'refs/heads/main' && steps.get-new-version.outputs.new-version uses: pypa/gh-action-pypi-publish@release/v1 with: packages-dir: dist/ - - name: Publish the release notes - if: github.ref == 'refs/heads/main' uses: release-drafter/release-drafter@v6.0.0 with: - publish: "${{ steps.get-new-version.outputs.new-version != '' }}" - tag: '${{ steps.get-new-version.outputs.new-version }}' + tag: '${{ needs.docs.outputs.new-version }}' env: GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}' diff --git a/.gitignore b/.gitignore index 688e4a889..3cb4fc7f2 100644 --- a/.gitignore +++ b/.gitignore @@ -6,12 +6,13 @@ __pycache__/ *.egg-info/ *.pyc **/tmp +bioimageio_cache/ bioimageio_unzipped_tf_weights/ build/ cache coverage.xml dist/ -docs/ dogfood/ +pkgs/ +site/ typings/pooch/ -bioimageio_cache/ diff --git a/docs/changelog.md b/docs/changelog.md new file mode 100644 index 000000000..a38b0a23c --- /dev/null +++ b/docs/changelog.md @@ -0,0 +1,5 @@ +--- +title: Changelog +--- + +--8<-- "changelog.md" diff --git a/docs/compatibility.md b/docs/compatibility.md new file mode 100644 index 000000000..5143266ee --- /dev/null +++ b/docs/compatibility.md @@ -0,0 +1,7 @@ +# Compatibility with bioimage.io resources + +bioimageio.core is used on [bioimage.io](https://bioimage.io) to test resources during and after the upload process. +Results are reported as "Test reports" (bioimageio.core deployed in a generic Python environment) +as well as the bioimageio.core tool compatibility (testing a resource with bioimageio.core in a dedicated Python environment). + +An overview of the latter is available [as part of the collection documentation](https://bioimage-io.github.io/collection/latest/reports_overview/). diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 000000000..612c7a5e0 --- /dev/null +++ b/docs/index.md @@ -0,0 +1 @@ +--8<-- "README.md" diff --git a/mkdocs.yaml b/mkdocs.yaml new file mode 100644 index 000000000..fb0622338 --- /dev/null +++ b/mkdocs.yaml @@ -0,0 +1,165 @@ +site_name: 'bioimageio.core' +site_url: 'https://bioimage-io.github.io/core-bioimage-io-python' +site_author: Fynn Beuttenmüller +site_description: 'Python specific core utilities for bioimage.io resources (in particular DL models).' + +repo_name: bioimage-io/core-bioimage-io-python +repo_url: https://github.com/bioimage-io/core-bioimage-io-python +edit_uri: edit/main/docs/ + +theme: + name: material + features: + - announce.dismiss + - content.action.edit + - content.action.view + - content.code.annotate + - content.code.copy + - content.code.select + - content.footnote.tooltips + - content.tabs.link + - content.tooltips + - header.autohide + - navigation.expand + - navigation.footer + - navigation.indexes + - navigation.instant + - navigation.instant.prefetch + - navigation.instant.preview + - navigation.instant.progress + - navigation.path + - navigation.prune + - navigation.sections + - navigation.tabs + - navigation.tabs.sticky + - navigation.top + - navigation.tracking + - search.highlight + - search.share + - search.suggest + - toc.follow + # - toc.integrate + + palette: + - media: '(prefers-color-scheme)' + primary: 'deep-purple' + accent: 'blue' + toggle: + icon: material/brightness-auto + name: 'Switch to light mode' + - media: '(prefers-color-scheme: light)' + scheme: default + primary: 'deep-purple' + accent: 'blue' + toggle: + icon: material/brightness-7 + name: 'Switch to dark mode' + - media: '(prefers-color-scheme: dark)' + scheme: slate + primary: 'deep-purple' + accent: 'blue' + toggle: + icon: material/brightness-4 + name: 'Switch to system preference' + + font: + text: Roboto + code: Roboto Mono + + logo: images/bioimage-io-icon.png + favicon: images/favicon.ico + +plugins: + - autorefs + - coverage: + html_report_dir: dist/coverage + - markdown-exec + - mkdocstrings: + handlers: + python: + inventories: + - https://docs.pydantic.dev/latest/objects.inv + - https://bioimage-io.github.io/spec-bioimage-io/latest/objects.inv + - https://bioimage-io.github.io/spec-bioimage-io/dev/objects.inv + options: + annotations_path: source + backlinks: tree + docstring_options: + ignore_init_summary: true + docstring_section_style: spacy + filters: 'public' + heading_level: 1 + inherited_members: true + members: true + merge_init_into_class: true + parameter_headings: true + separate_signature: true + scoped_crossrefs: true + show_root_heading: true + show_root_full_path: false + show_signature_annotations: true + show_if_no_docstring: true + show_source: true + show_symbol_type_heading: true + show_symbol_type_toc: true + # unwrap_annotated: true + signature_crossrefs: true + summary: true + extensions: + - griffe_pydantic: + schema: true + show_inheritance_diagram: true + - mike: + alias_type: symlink + canonical_version: latest + version_selector: true + - gen-files: + scripts: + - scripts/generate_api_doc_pages.py + - literate-nav: + nav_file: SUMMARY.md + - search + - section-index + +markdown_extensions: + - attr_list + - admonition + - callouts: + strip_period: false + - footnotes + - pymdownx.details + - pymdownx.emoji: + emoji_index: !!python/name:material.extensions.emoji.twemoji + emoji_generator: !!python/name:material.extensions.emoji.to_svg + - pymdownx.highlight: + pygments_lang_class: true + - pymdownx.magiclink + - pymdownx.snippets: + base_path: [!relative $config_dir] + check_paths: true + - pymdownx.superfences + - pymdownx.tabbed: + alternate_style: true + slugify: !!python/object/apply:pymdownx.slugs.slugify + kwds: + case: lower + - pymdownx.tasklist: + custom_checkbox: true + - pymdownx.tilde + - toc: + permalink: '¤' + +nav: + - Home: + - index.md + - Compatibility: compatibility.md + - API Reference: reference/ + - Changelog: changelog.md + - Coverage report: coverage.md + +extra: + social: + - icon: fontawesome/brands/github + link: https://github.com/bioimage-io + version: + provider: mike diff --git a/pyproject.toml b/pyproject.toml index ed7e9aa39..6157b87e6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -61,7 +61,6 @@ dev = [ "onnxruntime", "onnxscript", "packaging>=17.0", - "pdoc", "pre-commit", "pyright==1.1.407", "pytest-cov", @@ -73,6 +72,19 @@ dev = [ "torch>=1.6,<3", "torchvision>=0.21", ] +docs = [ + "griffe-pydantic", + "markdown-callouts", + "markdown-exec", + "mike", + "mkdocs-api-autonav", + "mkdocs-coverage", + "mkdocs-gen-files", + "mkdocs-literate-nav", + "mkdocs-literate-nav", + "mkdocs-material", + "mkdocs-section-index", +] [build-system] requires = ["pip", "setuptools>=61.0"] diff --git a/scripts/generate_api_doc_pages.py b/scripts/generate_api_doc_pages.py new file mode 100644 index 000000000..a4ab1a82c --- /dev/null +++ b/scripts/generate_api_doc_pages.py @@ -0,0 +1,37 @@ +"""Generate the code reference pages. +(adapted from https://mkdocstrings.github.io/recipes/#bind-pages-to-sections-themselves) +""" + +from pathlib import Path + +import mkdocs_gen_files + +nav = mkdocs_gen_files.nav.Nav() + +root = Path(__file__).parent.parent +src = root / "src" + +for path in sorted(src.rglob("*.py")): + module_path = path.relative_to(src).with_suffix("") + doc_path = path.relative_to(src).with_suffix(".md") + full_doc_path = Path("reference", doc_path) + + parts = tuple(module_path.parts) + + if parts[-1] == "__init__": + parts = parts[:-1] + doc_path = doc_path.with_name("index.md") + full_doc_path = full_doc_path.with_name("index.md") + elif parts[-1] == "__main__": + continue + + nav[parts] = doc_path.as_posix() + + with mkdocs_gen_files.open(full_doc_path, "w") as fd: + ident = ".".join(parts) + fd.write(f"::: {ident}") + + mkdocs_gen_files.set_edit_path(full_doc_path, path.relative_to(root)) + +with mkdocs_gen_files.open("reference/SUMMARY.md", "w") as nav_file: + nav_file.writelines(nav.build_literate_nav()) diff --git a/scripts/pdoc/create_pydantic_patch.sh b/scripts/pdoc/create_pydantic_patch.sh deleted file mode 100755 index 05b6da6bd..000000000 --- a/scripts/pdoc/create_pydantic_patch.sh +++ /dev/null @@ -1,25 +0,0 @@ -pydantic_root=$(python -c "import pydantic;from pathlib import Path;print(Path(pydantic.__file__).parent)") -main=$pydantic_root'/main.py' -original="$(dirname "$0")/original.py" -patched="$(dirname "$0")/patched.py" - -if [ -e $original ] -then - echo "found existing $original" -else - cp --verbose $main $original -fi - -if [ -e $patched ] -then - echo "found existing $patched" -else - cp --verbose $main $patched - echo "Please update $patched, then press enter to continue" - read -fi - -patch_file="$(dirname "$0")/mark_pydantic_attrs_private.patch" -diff -au $original $patched > $patch_file -echo "content of $patch_file:" -cat $patch_file diff --git a/scripts/pdoc/mark_pydantic_attrs_private.patch b/scripts/pdoc/mark_pydantic_attrs_private.patch deleted file mode 100644 index 722d4fbb9..000000000 --- a/scripts/pdoc/mark_pydantic_attrs_private.patch +++ /dev/null @@ -1,28 +0,0 @@ ---- ./original.py 2024-11-08 15:18:37.493768700 +0100 -+++ ./patched.py 2024-11-08 15:13:54.288887700 +0100 -@@ -121,14 +121,14 @@ - # `GenerateSchema.model_schema` to work for a plain `BaseModel` annotation. - - model_config: ClassVar[ConfigDict] = ConfigDict() -- """ -+ """@private - Configuration for the model, should be a dictionary conforming to [`ConfigDict`][pydantic.config.ConfigDict]. - """ - - # Because `dict` is in the local namespace of the `BaseModel` class, we use `Dict` for annotations. - # TODO v3 fallback to `dict` when the deprecated `dict` method gets removed. - model_fields: ClassVar[Dict[str, FieldInfo]] = {} # noqa: UP006 -- """ -+ """@private - Metadata about the fields defined on the model, - mapping of field names to [`FieldInfo`][pydantic.fields.FieldInfo] objects. - -@@ -136,7 +136,7 @@ - """ - - model_computed_fields: ClassVar[Dict[str, ComputedFieldInfo]] = {} # noqa: UP006 -- """A dictionary of computed field names and their corresponding `ComputedFieldInfo` objects.""" -+ """@private A dictionary of computed field names and their corresponding `ComputedFieldInfo` objects.""" - - __class_vars__: ClassVar[set[str]] - """The names of the class variables defined on the model.""" diff --git a/scripts/pdoc/run.sh b/scripts/pdoc/run.sh deleted file mode 100755 index 74981aa5f..000000000 --- a/scripts/pdoc/run.sh +++ /dev/null @@ -1,16 +0,0 @@ -cd "$(dirname "$0")" # cd to folder this script is in - -# patch pydantic to hide pydantic attributes that somehow show up in the docs -# (not even as inherited, but as if the documented class itself would define them) -pydantic_main=$(python -c "import pydantic;from pathlib import Path;print(Path(pydantic.__file__).parent / 'main.py')") - -patch --verbose --forward -p1 $pydantic_main < mark_pydantic_attrs_private.patch - -cd ../.. # cd to repo root -pdoc \ - --docformat google \ - --logo "https://bioimage.io/static/img/bioimage-io-logo.svg" \ - --logo-link "https://bioimage.io/" \ - --favicon "https://bioimage.io/static/img/bioimage-io-icon-small.svg" \ - --footer-text "bioimageio.core $(python -c 'import bioimageio.core;print(bioimageio.core.__version__)')" \ - -o ./dist bioimageio.core bioimageio.spec # generate bioimageio.spec as well for references diff --git a/src/bioimageio/core/cli.py b/src/bioimageio/core/cli.py index ff24f1ece..d0e09c847 100644 --- a/src/bioimageio/core/cli.py +++ b/src/bioimageio/core/cli.py @@ -16,6 +16,7 @@ from pathlib import Path from pprint import pformat, pprint from typing import ( + Annotated, Any, Dict, Iterable, @@ -30,24 +31,8 @@ Union, ) -import rich.markdown -from loguru import logger -from pydantic import AliasChoices, BaseModel, Field, model_validator -from pydantic_settings import ( - BaseSettings, - CliPositionalArg, - CliSettingsSource, - CliSubCommand, - JsonConfigSettingsSource, - PydanticBaseSettingsSource, - SettingsConfigDict, - YamlConfigSettingsSource, -) -from tqdm import tqdm -from typing_extensions import assert_never - import bioimageio.spec -from bioimageio.core import __version__ +import rich.markdown from bioimageio.spec import ( AnyModelDescr, InvalidDescr, @@ -65,6 +50,22 @@ from bioimageio.spec.model import ModelDescr, v0_4, v0_5 from bioimageio.spec.notebook import NotebookDescr from bioimageio.spec.utils import ensure_description_is_model, get_reader, write_yaml +from loguru import logger +from pydantic import AliasChoices, BaseModel, Field, PlainSerializer, model_validator +from pydantic_settings import ( + BaseSettings, + CliPositionalArg, + CliSettingsSource, + CliSubCommand, + JsonConfigSettingsSource, + PydanticBaseSettingsSource, + SettingsConfigDict, + YamlConfigSettingsSource, +) +from tqdm import tqdm +from typing_extensions import assert_never + +from bioimageio.core import __version__ from .commands import WeightFormatArgAll, WeightFormatArgAny, package, test from .common import MemberId, SampleId, SupportedWeightsFormat @@ -450,7 +451,9 @@ class PredictCmd(CmdBase, WithSource): blockwise: bool = False """process inputs blockwise""" - stats: Path = Path("dataset_statistics.json") + stats: Annotated[Path, PlainSerializer(lambda p: p.as_posix())] = Path( + "dataset_statistics.json" + ) """path to dataset statistics (will be written if it does not exist, but the model requires statistical dataset measures) diff --git a/src/bioimageio/core/tensor.py b/src/bioimageio/core/tensor.py index 17358b002..c49469f7e 100644 --- a/src/bioimageio/core/tensor.py +++ b/src/bioimageio/core/tensor.py @@ -177,11 +177,11 @@ def from_numpy( Args: array: the nd numpy array - axes: A description of the array's axes, + dims: A description of the array's axes, if None axes are guessed (which might fail and raise a ValueError.) Raises: - ValueError: if `axes` is None and axes guessing fails. + ValueError: if `dims` is None and dims guessing fails. """ if dims is None: diff --git a/src/bioimageio/core/weight_converters/_add_weights.py b/src/bioimageio/core/weight_converters/_add_weights.py index cc9156192..255aa7b2d 100644 --- a/src/bioimageio/core/weight_converters/_add_weights.py +++ b/src/bioimageio/core/weight_converters/_add_weights.py @@ -30,8 +30,8 @@ def add_weights( Default: choose automatically from any available. target_format: convert to a specific weights format. Default: attempt to convert to any missing format. - devices: Devices that may be used during conversion. verbose: log more (error) output + allow_tracing: allow conversion to torchscript by tracing if scripting fails. Returns: A (potentially invalid) model copy stored at `output_path` with added weights if any conversion was possible. From add1a416feff9148b5f451294c88846463c5c7d4 Mon Sep 17 00:00:00 2001 From: fynnbe Date: Mon, 24 Nov 2025 14:21:32 +0100 Subject: [PATCH 02/56] use max disk space action --- .github/actions/max_disk_space/action.yaml | 13 +++++++++++++ .github/workflows/build.yaml | 3 +++ 2 files changed, 16 insertions(+) create mode 100644 .github/actions/max_disk_space/action.yaml diff --git a/.github/actions/max_disk_space/action.yaml b/.github/actions/max_disk_space/action.yaml new file mode 100644 index 000000000..759e53afb --- /dev/null +++ b/.github/actions/max_disk_space/action.yaml @@ -0,0 +1,13 @@ +name: 'Maximize disk space' +description: 'Maximize available disk space by removing unwanted software' + +runs: + using: 'composite' + steps: + - name: Maximize available disk space + uses: AdityaGarg8/remove-unwanted-software@v5 + with: + remove-android: 'true' + remove-dotnet: 'true' + remove-haskell: 'true' + remove-codeql: 'true' diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 1e67773ad..fdf789474 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -41,6 +41,8 @@ jobs: lookup-only: true - uses: actions/checkout@v4 if: steps.look-up.outputs.cache-hit != 'true' + - uses: ./.github/actions/max_disk_space + if: steps.look-up.outputs.cache-hit != 'true' - uses: actions/cache@v4 if: steps.look-up.outputs.cache-hit != 'true' with: @@ -219,6 +221,7 @@ jobs: with: fetch-depth: 0 fetch-tags: true + - uses: ./.github/actions/max_disk_space - uses: actions/download-artifact@v4 with: name: coverage-summary From 0fa1ef33067055bfda100ee276c6952ad17e6341 Mon Sep 17 00:00:00 2001 From: fynnbe Date: Mon, 24 Nov 2025 14:48:53 +0100 Subject: [PATCH 03/56] use max disk space action in tests too --- .github/workflows/build.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index fdf789474..db4f8c334 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -86,6 +86,7 @@ jobs: steps: - uses: actions/checkout@v4 + - uses: ./.github/actions/max_disk_space - uses: actions/setup-python@v6 with: python-version: ${{matrix.python-version}} From e12e2e73912e3d94e0bfe51dbc6eed61409f5b4c Mon Sep 17 00:00:00 2001 From: fynnbe Date: Mon, 24 Nov 2025 15:18:01 +0100 Subject: [PATCH 04/56] update pyright settings --- .github/workflows/build.yaml | 2 ++ pyproject.toml | 3 +-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index db4f8c334..8b16fe534 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -232,6 +232,8 @@ jobs: python-version: '3.12' cache: 'pip' - run: pip install -e .[dev,docs,partners] + - name: Check doc scripts + run: pyright scripts/generate_api_doc_pages.py - name: Get branch name to deploy to id: get_branch shell: bash diff --git a/pyproject.toml b/pyproject.toml index 6157b87e6..03b135c07 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -102,8 +102,7 @@ exclude = [ "**/node_modules", "dogfood", "presentations", - "scripts/pdoc/original.py", - "scripts/pdoc/patched.py", + "scripts/generate_api_doc_pages.py", "tests/old_*", ] include = ["src", "scripts", "tests"] From 244ac487436f1e521982c25f959a7f43653ae803 Mon Sep 17 00:00:00 2001 From: fynnbe Date: Tue, 25 Nov 2025 15:08:21 +0100 Subject: [PATCH 05/56] add tests --- tests/test_add_weights.py | 90 ++++++++++++++++++--------------------- 1 file changed, 42 insertions(+), 48 deletions(-) diff --git a/tests/test_add_weights.py b/tests/test_add_weights.py index 836353c74..86abd4a5c 100644 --- a/tests/test_add_weights.py +++ b/tests/test_add_weights.py @@ -1,48 +1,42 @@ -# TODO: update add weights tests -# import os - - -# def _test_add_weights(model, tmp_path, base_weights, added_weights, **kwargs): -# from bioimageio.core.build_spec import add_weights - -# rdf = load_raw_resource_description(model) -# assert base_weights in rdf.weights -# assert added_weights in rdf.weights - -# weight_path = load_description(model).weights[added_weights].source -# assert weight_path.exists() - -# drop_weights = set(rdf.weights.keys()) - {base_weights} -# for drop in drop_weights: -# rdf.weights.pop(drop) -# assert tuple(rdf.weights.keys()) == (base_weights,) - -# in_path = tmp_path / "model1.zip" -# export_resource_package(rdf, output_path=in_path) - -# out_path = tmp_path / "model2.zip" -# add_weights(in_path, weight_path, weight_type=added_weights, output_path=out_path, **kwargs) - -# assert out_path.exists() -# new_rdf = load_description(out_path) -# assert set(new_rdf.weights.keys()) == {base_weights, added_weights} -# for weight in new_rdf.weights.values(): -# assert weight.source.exists() - -# test_res = _test_model(out_path, added_weights) -# failed = [s for s in test_res if s["status"] != "passed"] -# assert not failed, failed -# test_res = _test_model(out_path) -# failed = [s for s in test_res if s["status"] != "passed"] -# assert not failed, failed - -# # make sure the weights were cleaned from the cwd -# assert not os.path.exists(os.path.split(weight_path)[1]) - - -# def test_add_torchscript(unet2d_nuclei_broad_model, tmp_path): -# _test_add_weights(unet2d_nuclei_broad_model, tmp_path, "pytorch_state_dict", "torchscript") - - -# def test_add_onnx(unet2d_nuclei_broad_model, tmp_path): -# _test_add_weights(unet2d_nuclei_broad_model, tmp_path, "pytorch_state_dict", "onnx", opset_version=12) +import os +from pathlib import Path + +import pytest +from bioimageio.spec.model.v0_5 import WeightsFormat + +from bioimageio.core import add_weights, load_model_description + + +@pytest.mark.parametrize( + ("model_fixture", "source_format", "target_format"), + [ + ("unet2d_nuclei_broad_model", "pytorch_state_dict", "torchscript"), + ("unet2d_nuclei_broad_model", "pytorch_state_dict", "onnx"), + ("unet2d_nuclei_broad_model", "torchscript", "onnx"), + ], +) +def test_add_weights( + model_fixture: str, + source_format: WeightsFormat, + target_format: WeightsFormat, + tmp_path: Path, + request: pytest.FixtureRequest, +): + model_source = request.getfixturevalue(model_fixture) + + model = load_model_description(model_source, format_version="latest") + assert source_format in model.weights.available_formats, ( + "source format not found in model" + ) + if target_format in model.weights.available_formats: + model.weights[target_format] = None + + out_path = tmp_path / "converted.zip" + converted = add_weights( + model, + output_path=out_path, + source_format=source_format, + target_format=target_format, + ) + + assert target_format in converted.weights.available_formats From 20e44660dc29675d0370d24c34680201607a0e33 Mon Sep 17 00:00:00 2001 From: fynnbe Date: Tue, 25 Nov 2025 15:08:33 +0100 Subject: [PATCH 06/56] fix typo --- src/bioimageio/core/commands.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/bioimageio/core/commands.py b/src/bioimageio/core/commands.py index 61d0bd4be..1a391f177 100644 --- a/src/bioimageio/core/commands.py +++ b/src/bioimageio/core/commands.py @@ -4,8 +4,6 @@ from pathlib import Path from typing import Optional, Sequence, Union -from typing_extensions import Literal - from bioimageio.spec import ( InvalidDescr, ResourceDescr, @@ -13,6 +11,7 @@ save_bioimageio_package_as_folder, ) from bioimageio.spec._internal.types import FormatVersionPlaceholder +from typing_extensions import Literal from ._resource_tests import test_description @@ -102,7 +101,7 @@ def package( Args: descr: a bioimageio resource description path: output path - weight-format: include only this single weight-format (if not 'all'). + weight_format: include only this single weight-format (if not 'all'). """ if isinstance(descr, InvalidDescr): logged = descr.validation_summary.save() From 2c34d6f90aa563c4364020ee2937d67b4641fb44 Mon Sep 17 00:00:00 2001 From: fynnbe Date: Tue, 25 Nov 2025 15:08:57 +0100 Subject: [PATCH 07/56] WIP fix python repl examples in docs --- mkdocs.yaml | 17 ++++++++++++----- pyproject.toml | 1 + 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/mkdocs.yaml b/mkdocs.yaml index fb0622338..7afb97143 100644 --- a/mkdocs.yaml +++ b/mkdocs.yaml @@ -86,29 +86,36 @@ plugins: backlinks: tree docstring_options: ignore_init_summary: true + returns_multiple_items: false + returns_named_value: false + trim_doctest_flags: true docstring_section_style: spacy + docstring_style: google filters: 'public' heading_level: 1 + imported_members: false inherited_members: true members: true merge_init_into_class: true parameter_headings: true - separate_signature: true + preload_modules: [pydantic, bioimageio.spec] scoped_crossrefs: true - show_root_heading: true + separate_signature: true + show_docstring_examples: true + show_if_no_docstring: true + show_inheritance_diagram: true show_root_full_path: false + show_root_heading: true show_signature_annotations: true - show_if_no_docstring: true show_source: true show_symbol_type_heading: true show_symbol_type_toc: true - # unwrap_annotated: true signature_crossrefs: true summary: true + # unwrap_annotated: true extensions: - griffe_pydantic: schema: true - show_inheritance_diagram: true - mike: alias_type: symlink canonical_version: latest diff --git a/pyproject.toml b/pyproject.toml index 03b135c07..3e5c9e917 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -82,6 +82,7 @@ docs = [ "mkdocs-gen-files", "mkdocs-literate-nav", "mkdocs-literate-nav", + "mkdocs-literate-nav", "mkdocs-material", "mkdocs-section-index", ] From 6b6c1567d24856d16f9f69686c75c03ff4675583 Mon Sep 17 00:00:00 2001 From: fynnbe Date: Tue, 25 Nov 2025 15:33:18 +0100 Subject: [PATCH 08/56] fix issues in test_add_weights.py --- tests/test_add_weights.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/tests/test_add_weights.py b/tests/test_add_weights.py index 86abd4a5c..25e2ac56f 100644 --- a/tests/test_add_weights.py +++ b/tests/test_add_weights.py @@ -1,4 +1,3 @@ -import os from pathlib import Path import pytest @@ -29,7 +28,7 @@ def test_add_weights( "source format not found in model" ) if target_format in model.weights.available_formats: - model.weights[target_format] = None + setattr(model.weights, target_format, None) out_path = tmp_path / "converted.zip" converted = add_weights( @@ -38,5 +37,8 @@ def test_add_weights( source_format=source_format, target_format=target_format, ) - + assert not isinstance(converted, InvalidDescr), ( + "conversion resulted in invalid descr", + converted.validation_summary.display(), + ) assert target_format in converted.weights.available_formats From 9bc83a15a63abd76be0ae265aff9d32e6bf100ad Mon Sep 17 00:00:00 2001 From: fynnbe Date: Mon, 8 Dec 2025 11:28:34 +0100 Subject: [PATCH 09/56] add missing import --- tests/test_add_weights.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/test_add_weights.py b/tests/test_add_weights.py index 25e2ac56f..225257d6c 100644 --- a/tests/test_add_weights.py +++ b/tests/test_add_weights.py @@ -1,6 +1,7 @@ from pathlib import Path import pytest +from bioimageio.spec import InvalidDescr from bioimageio.spec.model.v0_5 import WeightsFormat from bioimageio.core import add_weights, load_model_description From 3f4c2512f3bea194ec32380e29b2cd2fe05953dd Mon Sep 17 00:00:00 2001 From: fynnbe Date: Tue, 9 Dec 2025 10:59:40 +0100 Subject: [PATCH 10/56] update docs --- README.md | 12 +++---- mkdocs.yaml | 8 ++++- scripts/generate_api_doc_pages.py | 38 +++++++++++++++++++-- src/bioimageio/core/__init__.py | 17 +++++++-- src/bioimageio/core/_prediction_pipeline.py | 5 ++- 5 files changed, 64 insertions(+), 16 deletions(-) diff --git a/README.md b/README.md index da957da35..dc1f4b982 100644 --- a/README.md +++ b/README.md @@ -25,8 +25,7 @@ bioimage.core has to offer: 1. test a model ```console - $ bioimageio test powerful-chipmunk - ... + bioimageio test powerful-chipmunk ```
@@ -65,8 +64,7 @@ bioimage.core has to offer: or ```console - $ bioimageio test impartial-shrimp - ... + bioimageio test impartial-shrimp ```
(Click to expand output) @@ -144,8 +142,7 @@ bioimage.core has to offer: - display the `bioimageio-predict` command help to get an overview: ```console - $ bioimageio predict --help - ... + bioimageio predict --help ```
@@ -233,8 +230,7 @@ bioimage.core has to offer: - create an example and run prediction locally! ```console - $ bioimageio predict impartial-shrimp --example=True - ... + bioimageio predict impartial-shrimp --example=True ```
diff --git a/mkdocs.yaml b/mkdocs.yaml index 7afb97143..bb61f13f1 100644 --- a/mkdocs.yaml +++ b/mkdocs.yaml @@ -9,6 +9,7 @@ edit_uri: edit/main/docs/ theme: name: material + language: en features: - announce.dismiss - content.action.edit @@ -75,6 +76,9 @@ plugins: html_report_dir: dist/coverage - markdown-exec - mkdocstrings: + enable_inventory: true + default_handler: python + locale: en handlers: python: inventories: @@ -91,7 +95,7 @@ plugins: trim_doctest_flags: true docstring_section_style: spacy docstring_style: google - filters: 'public' + filters: ['!^_[^_]'] heading_level: 1 imported_members: false inherited_members: true @@ -155,6 +159,8 @@ markdown_extensions: - pymdownx.tilde - toc: permalink: '¤' + permalink_title: Anchor link to this section for reference + toc_depth: 2 nav: - Home: diff --git a/scripts/generate_api_doc_pages.py b/scripts/generate_api_doc_pages.py index a4ab1a82c..bebf3e23f 100644 --- a/scripts/generate_api_doc_pages.py +++ b/scripts/generate_api_doc_pages.py @@ -11,6 +11,9 @@ root = Path(__file__).parent.parent src = root / "src" +# Track flat nav entries we have added +added_nav_labels: set[str] = set() + for path in sorted(src.rglob("*.py")): module_path = path.relative_to(src).with_suffix("") doc_path = path.relative_to(src).with_suffix(".md") @@ -18,6 +21,10 @@ parts = tuple(module_path.parts) + # Skip if this is just the bioimageio namespace package + if parts == ("bioimageio",): + continue + if parts[-1] == "__init__": parts = parts[:-1] doc_path = doc_path.with_name("index.md") @@ -25,10 +32,37 @@ elif parts[-1] == "__main__": continue - nav[parts] = doc_path.as_posix() + if not parts: # Skip if parts is empty + continue + + # Build a flat nav for API Reference: one entry for bioimageio.core and + # one entry per top-level submodule under bioimageio.core. No subsections. + if parts[0:2] == ("bioimageio", "core"): + if len(parts) == 2: + # Landing page for bioimageio.core at reference/index.md + full_doc_path = Path("reference", "index.md") + doc_path = Path("index.md") + if "bioimageio.core" not in added_nav_labels: + nav[("bioimageio.core",)] = doc_path.as_posix() + added_nav_labels.add("bioimageio.core") + else: + # Top-level submodule/package directly under bioimageio.core + top = parts[2] + if top not in added_nav_labels: + pkg_init = src / "bioimageio" / "core" / top / "__init__.py" + if pkg_init.exists(): + nav_target = Path("bioimageio") / "core" / top / "index.md" + else: + nav_target = Path("bioimageio") / "core" / f"{top}.md" + + nav[(top,)] = nav_target.as_posix() + added_nav_labels.add(top) with mkdocs_gen_files.open(full_doc_path, "w") as fd: - ident = ".".join(parts) + # Reconstruct the full identifier from the original module_path + ident = ".".join(module_path.parts) + if ident.endswith(".__init__"): + ident = ident[:-9] # Remove .__init__ fd.write(f"::: {ident}") mkdocs_gen_files.set_edit_path(full_doc_path, path.relative_to(root)) diff --git a/src/bioimageio/core/__init__.py b/src/bioimageio/core/__init__.py index ac51907d5..bab4a0984 100644 --- a/src/bioimageio/core/__init__.py +++ b/src/bioimageio/core/__init__.py @@ -1,5 +1,18 @@ -""" -.. include:: ../../README.md +"""bioimageio.core --- core functionality for BioImage.IO resources + +The main focus on this library is to provide functionality to run prediction with +BioImage.IO models, including standardized pre- and postprocessing operations. +The BioImage.IO models (and other resources) are described by---and can be loaded with---the bioimageio.spec package. + +See `predict` and `predict_many` for straight-forward model inference +and `create_prediction_pipeline` for finer control of the inference process. + +Other notable bioimageio.core functionalities include: +- Testing BioImage.IO resources beyond format validation, e.g. by generating model outputs from test inputs. + See `test_model` or for arbitrary resource types `test_description`. +- Extending available model weight formats by converting existing ones, see `add_weights`. +- Creating and manipulating `Sample`s consisting of tensors with associated statistics. +- Computing statistics on datasets (represented as sequences of samples), see `compute_dataset_measures`. """ # ruff: noqa: E402 diff --git a/src/bioimageio/core/_prediction_pipeline.py b/src/bioimageio/core/_prediction_pipeline.py index 0b7717aa5..a2b055bcc 100644 --- a/src/bioimageio/core/_prediction_pipeline.py +++ b/src/bioimageio/core/_prediction_pipeline.py @@ -12,11 +12,10 @@ Union, ) +from bioimageio.spec.model import AnyModelDescr, v0_4, v0_5 from loguru import logger from tqdm import tqdm -from bioimageio.spec.model import AnyModelDescr, v0_4, v0_5 - from ._op_base import BlockedOperator from .axis import AxisId, PerAxis from .common import ( @@ -66,7 +65,7 @@ def __init__( default_blocksize_parameter: BlocksizeParameter = 10, default_batch_size: int = 1, ) -> None: - """Use `create_prediction_pipeline` to create a `PredictionPipeline`""" + """Consider using `create_prediction_pipeline` to create a `PredictionPipeline` with sensible defaults.""" super().__init__() default_blocksize_parameter = default_ns or default_blocksize_parameter if default_ns is not None: From 0aae9248f18d4b526620b063bd89f0ff999b90af Mon Sep 17 00:00:00 2001 From: fynnbe Date: Fri, 19 Dec 2025 11:31:03 +0100 Subject: [PATCH 11/56] mark bioimageio imports as known third party --- pyproject.toml | 3 +++ src/bioimageio/core/_prediction_pipeline.py | 3 ++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 3e5c9e917..3d3ed5b7d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -147,5 +147,8 @@ exclude = [ [tool.ruff.lint] select = ["NPY201"] +[tool.ruff.lint.isort] +known-first-party = ["bioimageio"] + [tool.coverage.report] exclude_also = ["if TYPE_CHECKING:", "assert_never\\("] diff --git a/src/bioimageio/core/_prediction_pipeline.py b/src/bioimageio/core/_prediction_pipeline.py index a2b055bcc..0cad757e7 100644 --- a/src/bioimageio/core/_prediction_pipeline.py +++ b/src/bioimageio/core/_prediction_pipeline.py @@ -12,10 +12,11 @@ Union, ) -from bioimageio.spec.model import AnyModelDescr, v0_4, v0_5 from loguru import logger from tqdm import tqdm +from bioimageio.spec.model import AnyModelDescr, v0_4, v0_5 + from ._op_base import BlockedOperator from .axis import AxisId, PerAxis from .common import ( From 2e59dd413ffa0175031ccf4daa8b1b8137dc0515 Mon Sep 17 00:00:00 2001 From: fynnbe Date: Fri, 19 Dec 2025 13:07:45 +0100 Subject: [PATCH 12/56] update absolute tolerance --- src/bioimageio/core/_resource_tests.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/src/bioimageio/core/_resource_tests.py b/src/bioimageio/core/_resource_tests.py index c4572929d..c581d9eed 100644 --- a/src/bioimageio/core/_resource_tests.py +++ b/src/bioimageio/core/_resource_tests.py @@ -24,6 +24,10 @@ ) import numpy as np +from loguru import logger +from numpy.typing import NDArray +from typing_extensions import NotRequired, TypedDict, Unpack, assert_never, get_args + from bioimageio.spec import ( AnyDatasetDescr, AnyModelDescr, @@ -61,18 +65,14 @@ ValidationSummary, WarningEntry, ) -from loguru import logger -from numpy.typing import NDArray -from typing_extensions import NotRequired, TypedDict, Unpack, assert_never, get_args - -from bioimageio.core import __version__ -from bioimageio.core.io import save_tensor +from . import __version__ from ._prediction_pipeline import create_prediction_pipeline from ._settings import settings from .axis import AxisId, BatchSize from .common import MemberId, SupportedWeightsFormat from .digest_spec import get_test_input_sample, get_test_output_sample +from .io import save_tensor from .sample import Sample CONDA_CMD = "conda.bat" if platform.system() == "Windows" else "conda" @@ -710,7 +710,7 @@ def _get_tolerance( if wf == weights_format: applicable = v0_5.ReproducibilityTolerance( relative_tolerance=test_kwargs.get("relative_tolerance", 1e-3), - absolute_tolerance=test_kwargs.get("absolute_tolerance", 1e-4), + absolute_tolerance=test_kwargs.get("absolute_tolerance", 1e-3), ) break @@ -739,7 +739,7 @@ def _get_tolerance( mismatched_tol = 0 else: # use given (deprecated) test kwargs - atol = deprecated.get("absolute_tolerance", 1e-5) + atol = deprecated.get("absolute_tolerance", 1e-3) rtol = deprecated.get("relative_tolerance", 1e-3) mismatched_tol = 0 @@ -874,10 +874,10 @@ def add_warning_entry(msg: str): f"Output '{m}' disagrees with {mismatched_elements} of" + f" {expected_np.size} expected values" + f" ({mismatched_ppm:.1f} ppm)." - + f"\n Max relative difference: {r_max:.2e}" + + f"\n Max relative difference not accounted for by absolute tolerance ({atol:.2e}): {r_max:.2e}" + rf" (= \|{r_actual:.2e} - {r_expected:.2e}\|/\|{r_expected:.2e} + 1e-6\|)" + f" at {dict(zip(dims, r_max_idx))}" - + f"\n Max absolute difference not accounted for by relative tolerance: {a_max:.2e}" + + f"\n Max absolute difference not accounted for by relative tolerance ({rtol:.2e}): {a_max:.2e}" + rf" (= \|{a_actual:.7e} - {a_expected:.7e}\|) at {dict(zip(dims, a_max_idx))}" + f"\n Saved actual output to {actual_output_path}." ) From d9a2a0cfd0145c17b16478d79bae6eed93f7d2cd Mon Sep 17 00:00:00 2001 From: fynnbe Date: Fri, 19 Dec 2025 13:09:41 +0100 Subject: [PATCH 13/56] update docs --- mkdocs.yaml | 2 ++ pyproject.toml | 15 ++++++++------- scripts/generate_api_doc_pages.py | 7 +++++++ 3 files changed, 17 insertions(+), 7 deletions(-) diff --git a/mkdocs.yaml b/mkdocs.yaml index bb61f13f1..f6c3c4d45 100644 --- a/mkdocs.yaml +++ b/mkdocs.yaml @@ -120,6 +120,8 @@ plugins: extensions: - griffe_pydantic: schema: true + - griffe_inherited_docstrings + - griffe_public_redundant_aliases - mike: alias_type: symlink canonical_version: latest diff --git a/pyproject.toml b/pyproject.toml index 3d3ed5b7d..c5352c6db 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -49,13 +49,16 @@ partners = [ # "stardist", # for model testing and stardist postprocessing # TODO: add updated stardist to partners env ] dev = [ - "cellpose", # for model testing + "cellpose", # for model testing "crick", + "griffe-pydantic", + "griffe-inherited-docstrings", + "griffe-public-redundant-aliases", "httpx", "jupyter", "keras>=3.0,<4", "matplotlib", - "monai", # for model testing + "monai", # for model testing "numpy", "onnx", "onnxruntime", @@ -66,23 +69,21 @@ dev = [ "pytest-cov", "pytest", "python-dotenv", - "segment-anything", # for model testing + "segment-anything", # for model testing "tensorflow", - "timm", # for model testing + "timm", # for model testing "torch>=1.6,<3", "torchvision>=0.21", ] docs = [ - "griffe-pydantic", "markdown-callouts", "markdown-exec", + "markdown-pycon", "mike", "mkdocs-api-autonav", "mkdocs-coverage", "mkdocs-gen-files", "mkdocs-literate-nav", - "mkdocs-literate-nav", - "mkdocs-literate-nav", "mkdocs-material", "mkdocs-section-index", ] diff --git a/scripts/generate_api_doc_pages.py b/scripts/generate_api_doc_pages.py index bebf3e23f..872b9b292 100644 --- a/scripts/generate_api_doc_pages.py +++ b/scripts/generate_api_doc_pages.py @@ -25,6 +25,12 @@ if parts == ("bioimageio",): continue + # Skip private submodules prefixed with '_' + if any( + part.startswith("_") and part not in ("__init__", "__main__") for part in parts + ): + continue + if parts[-1] == "__init__": parts = parts[:-1] doc_path = doc_path.with_name("index.md") @@ -64,6 +70,7 @@ if ident.endswith(".__init__"): ident = ident[:-9] # Remove .__init__ fd.write(f"::: {ident}") + print(f"Written {full_doc_path}") mkdocs_gen_files.set_edit_path(full_doc_path, path.relative_to(root)) From b0b7ccdd3a671a324fbb8a106ead66b40e9cc1af Mon Sep 17 00:00:00 2001 From: fynnbe Date: Mon, 22 Dec 2025 16:01:56 +0100 Subject: [PATCH 14/56] update docs --- mkdocs.yaml | 25 ++++++++++--------------- 1 file changed, 10 insertions(+), 15 deletions(-) diff --git a/mkdocs.yaml b/mkdocs.yaml index f6c3c4d45..ed97e53b7 100644 --- a/mkdocs.yaml +++ b/mkdocs.yaml @@ -39,26 +39,25 @@ theme: - search.share - search.suggest - toc.follow - # - toc.integrate palette: - media: '(prefers-color-scheme)' - primary: 'deep-purple' - accent: 'blue' + primary: 'indigo' + accent: 'orange' toggle: icon: material/brightness-auto name: 'Switch to light mode' - media: '(prefers-color-scheme: light)' scheme: default - primary: 'deep-purple' - accent: 'blue' + primary: 'indigo' + accent: 'orange' toggle: icon: material/brightness-7 name: 'Switch to dark mode' - media: '(prefers-color-scheme: dark)' scheme: slate - primary: 'deep-purple' - accent: 'blue' + primary: 'indigo' + accent: 'orange' toggle: icon: material/brightness-4 name: 'Switch to system preference' @@ -93,13 +92,11 @@ plugins: returns_multiple_items: false returns_named_value: false trim_doctest_flags: true - docstring_section_style: spacy + # docstring_section_style: spacy docstring_style: google - filters: ['!^_[^_]'] + filters: public heading_level: 1 - imported_members: false inherited_members: true - members: true merge_init_into_class: true parameter_headings: true preload_modules: [pydantic, bioimageio.spec] @@ -112,11 +109,12 @@ plugins: show_root_heading: true show_signature_annotations: true show_source: true + show_submodules: true show_symbol_type_heading: true show_symbol_type_toc: true signature_crossrefs: true summary: true - # unwrap_annotated: true + unwrap_annotated: false extensions: - griffe_pydantic: schema: true @@ -126,9 +124,6 @@ plugins: alias_type: symlink canonical_version: latest version_selector: true - - gen-files: - scripts: - - scripts/generate_api_doc_pages.py - literate-nav: nav_file: SUMMARY.md - search From e2d2807c8ceae245e51f4c9e3714546034ea6610 Mon Sep 17 00:00:00 2001 From: fynnbe Date: Thu, 15 Jan 2026 10:23:43 +0100 Subject: [PATCH 15/56] avoid broken onnx_ir version --- pyproject.toml | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index c5352c6db..cd5cf2889 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -40,7 +40,11 @@ Documentation = "https://bioimage-io.github.io/core-bioimage-io-python/bioimagei Source = "https://github.com/bioimage-io/core-bioimage-io-python" [project.optional-dependencies] -onnx = ["onnxruntime", "onnxscript"] +onnx = [ + "onnxruntime", + "onnxscript", + 'onnx_ir!=0.1.14;python_version<"3.10"', # uses typing.Concatentate which requires py>=3.10 +] pytorch = ["torch>=1.6,<3", "torchvision>=0.21", "keras>=3.0,<4"] tensorflow = ["tensorflow", "keras>=2.15,<4"] partners = [ @@ -49,7 +53,7 @@ partners = [ # "stardist", # for model testing and stardist postprocessing # TODO: add updated stardist to partners env ] dev = [ - "cellpose", # for model testing + "cellpose", # for model testing "crick", "griffe-pydantic", "griffe-inherited-docstrings", @@ -58,20 +62,21 @@ dev = [ "jupyter", "keras>=3.0,<4", "matplotlib", - "monai", # for model testing + "monai", # for model testing "numpy", "onnx", "onnxruntime", "onnxscript", + 'onnx_ir!=0.1.14;python_version<"3.10"', # uses typing.Concatentate which requires py>=3.10 "packaging>=17.0", "pre-commit", "pyright==1.1.407", "pytest-cov", "pytest", "python-dotenv", - "segment-anything", # for model testing + "segment-anything", # for model testing "tensorflow", - "timm", # for model testing + "timm", # for model testing "torch>=1.6,<3", "torchvision>=0.21", ] From 28c6867b771d28d4cc6d3a123a010a0a059a3172 Mon Sep 17 00:00:00 2001 From: fynnbe Date: Thu, 15 Jan 2026 10:37:19 +0100 Subject: [PATCH 16/56] bump patch and spec and update changelog --- changelog.md | 5 +++++ pyproject.toml | 2 +- src/bioimageio/core/__init__.py | 2 +- 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/changelog.md b/changelog.md index 1ce18e61c..8172bb40b 100644 --- a/changelog.md +++ b/changelog.md @@ -1,3 +1,8 @@ +### 0.9.6 + +- bump bioimageio.spec library version to 0.5.6.0 +- increase default reprducibility tolerance + ### 0.9.5 - bump bioimageio.spec library version to 0.5.6.0 diff --git a/pyproject.toml b/pyproject.toml index cd5cf2889..f0aa86ab5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,7 +6,7 @@ requires-python = ">=3.9" readme = "README.md" dynamic = ["version"] dependencies = [ - "bioimageio.spec ==0.5.6.0", + "bioimageio.spec ==0.5.7.0", "h5py", "imagecodecs", "imageio>=2.10", diff --git a/src/bioimageio/core/__init__.py b/src/bioimageio/core/__init__.py index bab4a0984..78f43d466 100644 --- a/src/bioimageio/core/__init__.py +++ b/src/bioimageio/core/__init__.py @@ -16,7 +16,7 @@ """ # ruff: noqa: E402 -__version__ = "0.9.5" +__version__ = "0.9.6" from loguru import logger logger.disable("bioimageio.core") From ad6224735c843ddead33de2a2a9b6d9c5e75e984 Mon Sep 17 00:00:00 2001 From: fynnbe Date: Thu, 15 Jan 2026 10:44:11 +0100 Subject: [PATCH 17/56] rewrite test_add_weights --- tests/test_add_weights.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/tests/test_add_weights.py b/tests/test_add_weights.py index 225257d6c..31932f4d5 100644 --- a/tests/test_add_weights.py +++ b/tests/test_add_weights.py @@ -1,30 +1,28 @@ from pathlib import Path import pytest -from bioimageio.spec import InvalidDescr -from bioimageio.spec.model.v0_5 import WeightsFormat from bioimageio.core import add_weights, load_model_description +from bioimageio.spec import InvalidDescr +from bioimageio.spec.model.v0_5 import WeightsFormat @pytest.mark.parametrize( - ("model_fixture", "source_format", "target_format"), + ("source_format", "target_format"), [ - ("unet2d_nuclei_broad_model", "pytorch_state_dict", "torchscript"), - ("unet2d_nuclei_broad_model", "pytorch_state_dict", "onnx"), - ("unet2d_nuclei_broad_model", "torchscript", "onnx"), + ("pytorch_state_dict", "torchscript"), + ("pytorch_state_dict", "onnx"), + ("torchscript", "onnx"), ], ) def test_add_weights( - model_fixture: str, source_format: WeightsFormat, target_format: WeightsFormat, + unet2d_nuclei_broad_model: str, tmp_path: Path, request: pytest.FixtureRequest, ): - model_source = request.getfixturevalue(model_fixture) - - model = load_model_description(model_source, format_version="latest") + model = load_model_description(unet2d_nuclei_broad_model, format_version="latest") assert source_format in model.weights.available_formats, ( "source format not found in model" ) From 83c3244f59106723f04c5b531b837e003e61203f Mon Sep 17 00:00:00 2001 From: fynnbe Date: Thu, 15 Jan 2026 16:19:09 +0100 Subject: [PATCH 18/56] fix conda recipe generation --- conda-recipe/meta.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/conda-recipe/meta.yaml b/conda-recipe/meta.yaml index b4ffb39a4..1538cc25d 100644 --- a/conda-recipe/meta.yaml +++ b/conda-recipe/meta.yaml @@ -55,7 +55,7 @@ test: requires: {% for dep in pyproject['project']['optional-dependencies']['dev'] %} {% if 'torch' not in dep %} # can't install pytorch>=2.8 from conda-forge smh - - {{ dep.lower().replace('_', '-') }} + - {{ dep.lower().replace('_', '-').replace('onnx_ir!=0.1.14;python_version<"3.10"', 'onnx_ir!=0.1.14') }} {% endif %} {% endfor %} commands: From f66efcdd07fd61cb856921354615f77270349e2a Mon Sep 17 00:00:00 2001 From: fynnbe Date: Fri, 16 Jan 2026 09:51:43 +0100 Subject: [PATCH 19/56] add tests --- tests/test_resource_tests.py | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/tests/test_resource_tests.py b/tests/test_resource_tests.py index f4eca96bc..5b09176b7 100644 --- a/tests/test_resource_tests.py +++ b/tests/test_resource_tests.py @@ -1,3 +1,7 @@ +from pathlib import Path + +import numpy as np + from bioimageio.spec import InvalidDescr, ValidationContext @@ -42,3 +46,31 @@ def test_loading_description_multiple_times(unet2d_nuclei_broad_model: str): # load again, which some users might end up doing model_descr = load_description(model_descr) # pyright: ignore[reportArgumentType] assert not isinstance(model_descr, InvalidDescr) + + +def test_test_description_runtime_env(unet2d_nuclei_broad_model: str): + from bioimageio.core._resource_tests import test_description + + summary = test_description(unet2d_nuclei_broad_model, runtime_env="as-described") + + assert summary.status == "passed", summary.display() + + +def test_failed_reproducibility(unet2d_nuclei_broad_model: str, tmp_path: str): + from bioimageio.core import load_model + from bioimageio.core._resource_tests import test_model + from bioimageio.spec.common import FileDescr + from bioimageio.spec.utils import load_array, save_array + + model = load_model(unet2d_nuclei_broad_model, format_version="latest") + + # use corrupted test input to fail the reproducibility test + test_array_path = Path(tmp_path) / "input.npy" + assert model.inputs[0].test_tensor is not None + test_array = load_array(model.inputs[0].test_tensor) + save_array(test_array_path, np.zeros_like(test_array)) + model.inputs[0].test_tensor = FileDescr(source=test_array_path) + + summary = test_model(model) + + assert summary.status == "valid-format" From ce8a96340495ee7005476f487329ba34c9499124 Mon Sep 17 00:00:00 2001 From: fynnbe Date: Fri, 16 Jan 2026 10:25:07 +0100 Subject: [PATCH 20/56] fix conda recipe --- conda-recipe/meta.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/conda-recipe/meta.yaml b/conda-recipe/meta.yaml index 1538cc25d..a69a4e142 100644 --- a/conda-recipe/meta.yaml +++ b/conda-recipe/meta.yaml @@ -55,7 +55,7 @@ test: requires: {% for dep in pyproject['project']['optional-dependencies']['dev'] %} {% if 'torch' not in dep %} # can't install pytorch>=2.8 from conda-forge smh - - {{ dep.lower().replace('_', '-').replace('onnx_ir!=0.1.14;python_version<"3.10"', 'onnx_ir!=0.1.14') }} + - {{ dep.lower().replace('onnx_ir!=0.1.14;python_version<"3.10"', 'onnx-ir!=0.1.14').replace('_', '-') }} {% endif %} {% endfor %} commands: From c76434edabc9deb2f0e3f80258b4838990450b86 Mon Sep 17 00:00:00 2001 From: fynnbe Date: Fri, 16 Jan 2026 11:39:35 +0100 Subject: [PATCH 21/56] improve cli --- pyproject.toml | 3 + src/bioimageio/core/__main__.py | 4 +- src/bioimageio/core/cli.py | 66 ++++++-------- tests/test_cli.py | 151 +++++++++++++++++--------------- 4 files changed, 113 insertions(+), 111 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index f0aa86ab5..9359682eb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -158,3 +158,6 @@ known-first-party = ["bioimageio"] [tool.coverage.report] exclude_also = ["if TYPE_CHECKING:", "assert_never\\("] + +[tool.coverage.run] +patch = ["subprocess"] diff --git a/src/bioimageio/core/__main__.py b/src/bioimageio/core/__main__.py index ed7c32808..123b6a9c9 100644 --- a/src/bioimageio/core/__main__.py +++ b/src/bioimageio/core/__main__.py @@ -1,6 +1,7 @@ import sys from loguru import logger +from pydantic_settings import CliApp logger.enable("bioimageio") @@ -17,8 +18,7 @@ def main(): - cli = Bioimageio() # pyright: ignore[reportCallIssue] - cli.run() + _ = CliApp.run(Bioimageio) if __name__ == "__main__": diff --git a/src/bioimageio/core/cli.py b/src/bioimageio/core/cli.py index d0e09c847..a6866ff02 100644 --- a/src/bioimageio/core/cli.py +++ b/src/bioimageio/core/cli.py @@ -31,8 +31,25 @@ Union, ) -import bioimageio.spec import rich.markdown +from loguru import logger +from pydantic import AliasChoices, BaseModel, Field, PlainSerializer, model_validator +from pydantic_settings import ( + BaseSettings, + CliApp, + CliPositionalArg, + CliSettingsSource, + CliSubCommand, + JsonConfigSettingsSource, + PydanticBaseSettingsSource, + SettingsConfigDict, + YamlConfigSettingsSource, +) +from tqdm import tqdm +from typing_extensions import assert_never + +import bioimageio.spec +from bioimageio.core import __version__ from bioimageio.spec import ( AnyModelDescr, InvalidDescr, @@ -50,22 +67,6 @@ from bioimageio.spec.model import ModelDescr, v0_4, v0_5 from bioimageio.spec.notebook import NotebookDescr from bioimageio.spec.utils import ensure_description_is_model, get_reader, write_yaml -from loguru import logger -from pydantic import AliasChoices, BaseModel, Field, PlainSerializer, model_validator -from pydantic_settings import ( - BaseSettings, - CliPositionalArg, - CliSettingsSource, - CliSubCommand, - JsonConfigSettingsSource, - PydanticBaseSettingsSource, - SettingsConfigDict, - YamlConfigSettingsSource, -) -from tqdm import tqdm -from typing_extensions import assert_never - -from bioimageio.core import __version__ from .commands import WeightFormatArgAll, WeightFormatArgAny, package, test from .common import MemberId, SampleId, SupportedWeightsFormat @@ -161,7 +162,7 @@ class ValidateFormatCmd(CmdBase, WithSource, WithSummaryLogging): def descr(self): return load_description(self.source, perform_io_checks=self.perform_io_checks) - def run(self): + def cli_cmd(self): self.log(self.descr) sys.exit( 0 @@ -213,7 +214,7 @@ class TestCmd(CmdBase, WithSource, WithSummaryLogging): - '0.4', '0.5', ...: Use the specified format version (may trigger auto updating) """ - def run(self): + def cli_cmd(self): sys.exit( test( self.descr, @@ -242,7 +243,7 @@ class PackageCmd(CmdBase, WithSource, WithSummaryLogging): ) """The weight format to include in the package (for model descriptions only).""" - def run(self): + def cli_cmd(self): if isinstance(self.descr, InvalidDescr): self.log(self.descr) raise ValueError(f"Invalid {self.descr.type} description.") @@ -315,7 +316,7 @@ class UpdateCmdBase(CmdBase, WithSource, ABC): def updated(self) -> Union[ResourceDescr, InvalidDescr]: raise NotImplementedError - def run(self): + def cli_cmd(self): original_yaml = open_bioimageio_yaml(self.source).unparsed_content assert isinstance(original_yaml, str) stream = StringIO() @@ -577,7 +578,7 @@ def get_example_command(preview: bool, escape: bool = False): + f"\n(note that a local '{JSON_FILE}' or '{YAML_FILE}' may interfere with this)" ) - def run(self): + def cli_cmd(self): if self.example: return self._example() @@ -745,6 +746,8 @@ def input_dataset(stat: Stat): class AddWeightsCmd(CmdBase, WithSource, WithSummaryLogging): + """Add additional weights to a model description by converting from available formats.""" + output: CliPositionalArg[Path] """The path to write the updated model package to.""" @@ -761,7 +764,7 @@ class AddWeightsCmd(CmdBase, WithSource, WithSummaryLogging): """Allow tracing when converting pytorch_state_dict to torchscript (still uses scripting if possible).""" - def run(self): + def cli_cmd(self): model_descr = ensure_description_is_model(self.descr) if isinstance(model_descr, v0_4.ModelDescr): raise TypeError( @@ -817,8 +820,7 @@ class Bioimageio( """Create a bioimageio.yaml description with updated file hashes.""" add_weights: CliSubCommand[AddWeightsCmd] = Field(alias="add-weights") - """Add additional weights to the model descriptions converted from available - formats to improve deployability.""" + """Add additional weights to a model description by converting from available formats.""" @classmethod def settings_customise_sources( @@ -852,22 +854,12 @@ def _log(cls, data: Any): ) return data - def run(self): + def cli_cmd(self) -> None: logger.info( "executing CLI command:\n{}", pformat({k: v for k, v in self.model_dump().items() if v is not None}), ) - cmd = ( - self.add_weights - or self.package - or self.predict - or self.test - or self.update_format - or self.update_hashes - or self.validate_format - ) - assert cmd is not None - cmd.run() + _ = CliApp.run_subcommand(self) assert isinstance(Bioimageio.__doc__, str) diff --git a/tests/test_cli.py b/tests/test_cli.py index 203677ecf..fc805611e 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -2,6 +2,7 @@ from pathlib import Path from typing import Any, List, Sequence +import numpy as np import pytest from pydantic import FilePath @@ -21,6 +22,8 @@ def run_subprocess( @pytest.mark.parametrize( "args", [ + ["--help"], + ["add-weights", "unet2d_nuclei_broad_model", "tmp_path"], [ "package", "unet2d_nuclei_broad_model", @@ -29,6 +32,7 @@ def run_subprocess( "pytorch_state_dict", ], ["package", "unet2d_nuclei_broad_model", "output.zip"], + ["predict", "--example", "unet2d_nuclei_broad_model"], [ "test", "unet2d_nuclei_broad_model", @@ -36,11 +40,9 @@ def run_subprocess( "pytorch_state_dict", ], ["test", "unet2d_nuclei_broad_model"], - ["predict", "--example", "unet2d_nuclei_broad_model"], ["update-format", "unet2d_nuclei_broad_model_old"], - ["add-weights", "unet2d_nuclei_broad_model", "tmp_path"], - ["update-hashes", "unet2d_nuclei_broad_model_old"], ["update-hashes", "unet2d_nuclei_broad_model_old", "--output=stdout"], + ["update-hashes", "unet2d_nuclei_broad_model_old"], ], ) def test_cli( @@ -77,83 +79,88 @@ def test_cli_fails(args: List[str], stardist_wrong_shape: FilePath): assert ret.returncode == 1, ret.stdout -# TODO: update CLI test -# def _test_cli_predict_image(model: Path, tmp_path: Path, extra_cmd_args: Optional[List[str]] = None): -# spec = load_description(model) -# in_path = spec.test_inputs[0] - -# out_path = tmp_path.with_suffix(".npy") -# cmd = ["bioimageio", "predict-image", model, "--input", str(in_path), "--output", str(out_path)] -# if extra_cmd_args is not None: -# cmd.extend(extra_cmd_args) -# ret = run_subprocess(cmd) -# assert ret.returncode == 0, ret.stdout -# assert out_path.exists() - - -# def test_cli_predict_image(unet2d_nuclei_broad_model: Path, tmp_path: Path): -# _test_cli_predict_image(unet2d_nuclei_broad_model, tmp_path) - - -# def test_cli_predict_image_with_weight_format(unet2d_nuclei_broad_model: Path, tmp_path: Path): -# _test_cli_predict_image(unet2d_nuclei_broad_model, tmp_path, ["--weight-format", "pytorch_state_dict"]) - - -# def _test_cli_predict_images(model: Path, tmp_path: Path, extra_cmd_args: Optional[List[str]] = None): -# n_images = 3 -# shape = (1, 1, 128, 128) -# expected_shape = (1, 1, 128, 128) - -# in_folder = tmp_path / "inputs" -# in_folder.mkdir() -# out_folder = tmp_path / "outputs" -# out_folder.mkdir() - -# expected_outputs: List[Path] = [] -# for i in range(n_images): -# path = in_folder / f"im-{i}.npy" -# im = np.random.randint(0, 255, size=shape).astype("uint8") -# np.save(path, im) -# expected_outputs.append(out_folder / f"im-{i}.npy") - -# input_pattern = str(in_folder / "*.npy") -# cmd = ["bioimageio", "predict-images", str(model), input_pattern, str(out_folder)] -# if extra_cmd_args is not None: -# cmd.extend(extra_cmd_args) -# ret = run_subprocess(cmd) -# assert ret.returncode == 0, ret.stdout +def _test_cli_predict_single( + model_source: str, tmp_path: Path, extra_cmd_args: Sequence[str] = () +): + from bioimageio.spec import load_model_description + + model = load_model_description(model_source, format_version="latest") + assert model.inputs[0].test_tensor is not None + in_source = model.inputs[0].test_tensor.source + + out_path = tmp_path.with_suffix(".npy") + cmd = [ + "bioimageio", + "predict", + str(model_source), + "--input", + str(in_source), + "--output", + str(out_path), + ] + list(extra_cmd_args) + ret = run_subprocess(cmd) + assert ret.returncode == 0, ret.stdout + assert out_path.exists() -# for out_path in expected_outputs: -# assert out_path.exists() -# assert np.load(out_path).shape == expected_shape +def test_cli_predict_single(unet2d_nuclei_broad_model: Path, tmp_path: Path): + _test_cli_predict_single(str(unet2d_nuclei_broad_model), tmp_path) -# def test_cli_predict_images(unet2d_nuclei_broad_model: Path, tmp_path: Path): -# _test_cli_predict_images(unet2d_nuclei_broad_model, tmp_path) +def test_cli_predict_single_with_weight_format( + unet2d_nuclei_broad_model: Path, tmp_path: Path +): + _test_cli_predict_single( + str(unet2d_nuclei_broad_model), + tmp_path, + ["--weight-format", "pytorch_state_dict"], + ) -# def test_cli_predict_images_with_weight_format(unet2d_nuclei_broad_model: Path, tmp_path: Path): -# _test_cli_predict_images(unet2d_nuclei_broad_model, tmp_path, ["--weight-format", "pytorch_state_dict"]) +def _test_cli_predict_multiple( + model_source: str, tmp_path: Path, extra_cmd_args: Sequence[str] = () +): + n_images = 3 + shape = (1, 1, 128, 128) + expected_shape = (1, 1, 128, 128) + + in_folder = tmp_path / "inputs" + in_folder.mkdir() + out_folder = tmp_path / "outputs" + out_folder.mkdir() + + expected_outputs: List[Path] = [] + for i in range(n_images): + path = in_folder / f"im-{i}.npy" + im = np.random.randint(0, 255, size=shape).astype("uint8") + np.save(path, im) + expected_outputs.append(out_folder / f"im-{i}.npy") + + input_pattern = str(in_folder / "*.npy") + cmd = [ + "bioimageio", + "predict", + model_source, + input_pattern, + str(out_folder), + ] + list(extra_cmd_args) + ret = run_subprocess(cmd) + assert ret.returncode == 0, ret.stdout -# def test_torch_to_torchscript(unet2d_nuclei_broad_model: Path, tmp_path: Path): -# out_path = tmp_path.with_suffix(".pt") -# ret = run_subprocess( -# ["bioimageio", "convert-torch-weights-to-torchscript", str(unet2d_nuclei_broad_model), str(out_path)] -# ) -# assert ret.returncode == 0, ret.stdout -# assert out_path.exists() + for out_path in expected_outputs: + assert out_path.exists() + assert np.load(out_path).shape == expected_shape -# def test_torch_to_onnx(convert_to_onnx: Path, tmp_path: Path): -# out_path = tmp_path.with_suffix(".onnx") -# ret = run_subprocess(["bioimageio", "convert-torch-weights-to-onnx", str(convert_to_onnx), str(out_path)]) -# assert ret.returncode == 0, ret.stdout -# assert out_path.exists() +def test_cli_predict_multiple(unet2d_nuclei_broad_model: Path, tmp_path: Path): + _test_cli_predict_multiple(str(unet2d_nuclei_broad_model), tmp_path) -# def test_keras_to_tf(unet2d_keras: Path, tmp_path: Path): -# out_path = tmp_path / "weights.zip" -# ret = run_subprocess(["bioimageio", "convert-keras-weights-to-tensorflow", str(unet2d_keras), str(out_path)]) -# assert ret.returncode == 0, ret.stdout -# assert out_path.exists() +def test_cli_predict_multiple_with_weight_format( + unet2d_nuclei_broad_model: Path, tmp_path: Path +): + _test_cli_predict_multiple( + str(unet2d_nuclei_broad_model), + tmp_path, + ["--weight-format", "pytorch_state_dict"], + ) From df7eed15cdf4e4fcea9d8160e15cf59943645a17 Mon Sep 17 00:00:00 2001 From: fynnbe Date: Fri, 16 Jan 2026 13:02:38 +0100 Subject: [PATCH 22/56] fix cli tests --- src/bioimageio/core/cli.py | 10 +++++++++- tests/test_cli.py | 28 +++++++++++++++------------- 2 files changed, 24 insertions(+), 14 deletions(-) diff --git a/src/bioimageio/core/cli.py b/src/bioimageio/core/cli.py index a6866ff02..13aa6e21c 100644 --- a/src/bioimageio/core/cli.py +++ b/src/bioimageio/core/cli.py @@ -659,7 +659,7 @@ def expand_outputs(): ) for s in sample_ids ] - + # check for distinctness and correct number within each output sample for i, out in enumerate(outputs, start=1): if len(set(out)) < len(out): raise ValueError( @@ -671,6 +671,14 @@ def expand_outputs(): f"[output sample #{i}] Expected {len(output_ids)} outputs {output_ids}, got {out}" ) + # check for distinctness across all output samples + all_output_paths = [p for out in outputs for p in out] + if len(set(all_output_paths)) < len(all_output_paths): + raise ValueError( + "Output paths are not distinct across samples. " + + f"Make sure to include '{{sample_id}}' in the output path pattern." + ) + return outputs outputs = expand_outputs() diff --git a/tests/test_cli.py b/tests/test_cli.py index fc805611e..7e9de4ef6 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -86,16 +86,16 @@ def _test_cli_predict_single( model = load_model_description(model_source, format_version="latest") assert model.inputs[0].test_tensor is not None - in_source = model.inputs[0].test_tensor.source - - out_path = tmp_path.with_suffix(".npy") + in_path = tmp_path / "in.npy" + _ = in_path.write_bytes(model.inputs[0].test_tensor.get_reader().read()) + out_path = tmp_path / "out.npy" cmd = [ "bioimageio", "predict", str(model_source), - "--input", - str(in_source), - "--output", + "--inputs", + str(in_path), + "--outputs", str(out_path), ] + list(extra_cmd_args) ret = run_subprocess(cmd) @@ -128,21 +128,23 @@ def _test_cli_predict_multiple( in_folder.mkdir() out_folder = tmp_path / "outputs" out_folder.mkdir() - + out_file_pattern = "im-{sample_id}.npy" + inputs: List[str] = [] expected_outputs: List[Path] = [] for i in range(n_images): - path = in_folder / f"im-{i}.npy" + input_path = in_folder / f"im-{i}.npy" im = np.random.randint(0, 255, size=shape).astype("uint8") - np.save(path, im) - expected_outputs.append(out_folder / f"im-{i}.npy") + np.save(input_path, im) + inputs.extend(["--inputs", str(input_path)]) + expected_outputs.append(out_folder / out_file_pattern.format(sample_id=i)) - input_pattern = str(in_folder / "*.npy") cmd = [ "bioimageio", "predict", model_source, - input_pattern, - str(out_folder), + *inputs, + "--outputs", + str(out_folder / out_file_pattern), ] + list(extra_cmd_args) ret = run_subprocess(cmd) assert ret.returncode == 0, ret.stdout From 78d213f478be8cf1f8beb956d0b80d1636d36588 Mon Sep 17 00:00:00 2001 From: fynnbe Date: Fri, 16 Jan 2026 14:47:11 +0100 Subject: [PATCH 23/56] add doc images --- docs/images/bioimage-io-icon.png | Bin 0 -> 34869 bytes docs/images/favicon.ico | Bin 0 -> 15086 bytes 2 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 docs/images/bioimage-io-icon.png create mode 100644 docs/images/favicon.ico diff --git a/docs/images/bioimage-io-icon.png b/docs/images/bioimage-io-icon.png new file mode 100644 index 0000000000000000000000000000000000000000..9bfd476554caa60d3d0ea5c0c53c2755fb942218 GIT binary patch literal 34869 zcmb5V^LJg}7cP8an{8~n;fZZFw$<21PmDHb>@-$mTaA-Ojcubb?*4q=d;fsh`}6B0TV9Y+)lB_=n^ytLp{=q4)py zff{xywE!M|aF^0?*Ko3O_cC?01bKORvD!M?yP2ChTe3R2T4$aMeguKYL2{B}pS`nA z*L?g*H9JKvr_$4{$8uAP`3N4M) z#!{;xXKl!H#$v4dLCd0WAI93Clwq}z<6za*Eh3)pmt$!)Iez26QA5CH{KtCoF;O`E zB8%(5AESrRJeFhV|L31^GjUH)Z0+u=@6pTVPwXHX(8IRx>3EGux3;HZEJrI}KG(k4 z)T)mu>+M71R*MchpUDC`WTGG>kc)8f+goLED6SG=Ib}g08jH)<)a{js?2 zfTD@3chFFp`F}fzs|3BjOx>XqzBhJHq>;uX;dPhL2G4_pq@tg?6-WbJD&QPAhYTP% z0_LoBFV6?u?(QEkNNjjV9ZrW}FX^N-@i$&h2sRpP$%fLt0vl>SUrdcNp~I^w_8rED zYpRF-+_A35t^HP4pbT{`T>e2R25lNXvmTS1O_d`((6Z!6ToY3+$fM4eQ@FNwHNg5mx6QI z*lfG=G_Qo5Q$>p(tiN22)yN%Ya#&F)AC7Y7Jhc-|cqn;D5hTO;+Afsk430zo|+ zcwy&v>aJ}U;S3?-?}sD~2(j`}reBEDZvVsI}&bwUKKYcw^5RX)W z`X%IoWD>?Oo|yv3edc-pBfdx>U-3Beec=tpx9U_!aoVR@@cJ61ZpS3HNI8K7G-e>u z_H%A1 zPVxIPL-|ied-6G)eWE$)7ow|GsI+L>d&^0A{Zz{I{V&p|(J&l+3_{-D@jK)cHXWyg zgyu(EkV$5b13ZHLbJWH+UC=ukIXOQuS;FIUyRcCAXIEmGD}(}-ft}srjqcbL{vMqs zmszwhpVdE7je(K9bg(x0r#k2y881&tbzk}BV2s~>C&Qc#jnwV%C4=-VhI;P^t!YM;wxe&Hok;?Lg+45g{T(f#Ssk8J6$v&-M`W-?-iaai{Ju+15^ zjY+Md+Sx>&6%kwZvoEn2efvX>>b`=)7YN)v2?>B#P^JDAzy0`0P|VXslK`cn{!H75h-nr>j?yW<`SlGS#Ix8|@6;a!$p64tgjlIQa3!+AY_ z8Nl$+13lz%i6p}ms1&gmf0v-JV7b{k@Y#(eC`X7vkjMCJ4vv&EZ^+~yI#m=ejOlxp zz=wGNke1iu$DPKO^NjgB>Ho;QQiqd)SWPP*agTeC5s{k6d644c3zncqnyeNl*; zWPpI6ZgCtE!H*t<1rkwZ%}etF+}s9;17vWjDZ523s)tfwGc)Gdt7LG!c=ahrSyJ>d z>XZPy;?}2}hJ&(d3#dZ(gi6ym*RwMIDSWu&CEBlbL zi{gRc5&b^sJLVueikZSExw;%L}Aq8=KBpcz5$zNxC#Qq*E6 zv`A7&p7qS?C5G=AB~i(sk}aHutJ}b)iS?~I)5_2Jf7;?{D*cHT@#werZ}Xv^gkySH z)4Iz~o=i?Kzbpsck$uCW6uPc{E3`+vQkUJ6a47jx_#fjm1SYUFE@5JcdfjX@%T-#D4zIdN4QqZpcRW4*_*8&AC^7Y3( zhSbYKT@&G$I^YFqWFtk!PK`BZ*BIHn9ugut9Q`)BscLdj{K!3x+%V3T3CPCl6ma1 ziQDP}H-0yn*gH$HSW{3H<>e!&7gqQ&x1sor&g?FlS0n05 z7CP_nAf~CY;ZS2^t-^dtn8d*W-ps|`wF476iOx37&y`)ESe<{iTxhMqcdJCyy&cbi zB-5t0#$l)q;XeQJ^I2xDYbee zC|v~FSy?@R*tpNH7Y_~*9xhQwbv@jBJW=56R{GWEaN8CnrPbgi-nWkcKU4<`Xt>qW z(4PYnH&vRdtW#lXN`u|D0Aqgkg^)|0h&30@Sajd!hg^W(<`=y+=L9~>#|@3c6WKeK zXewrWYvI{+PcAl=@d)K9kn^#7OR&T%d&&%;uCV}zrP1^5!28u)$2I(JmzJp~e^K4C z@aqId>6G%)%4t^#sI;~Zk-O1SmI zNGfQ&tYP^u+y{Jd`Tp4HPL$6pB8&|Yp$Nk}lXk+`_VfI@ef7Eh0879eABbV6MP?iz zB9#v~BiYlI^b{xpo#^%%+N!e!eY=ojA@fwd;-K!r{cstZ*8-ew zGVM8z1y6@JzU&IyS4t7we^{U{QxSh+X|KhZcXhd~CQ2Lsat`*Pa3E)#OcQ#(4@Mid zP)|f!{#6!YL30BI|9h&@zg@gk8j|EUV( z7i0b~lwiv7pq65zh9G*U$Hhz#xq3%Y8i2e`2C&I+hj#29T?&uc-D# zxErWR}6)zHGAyuG*DdbzNz}(!Qi@@{BGl<*6X^se>gwfCz z3PEveW+foz6%MTKeI*w()BTE^$V6n97$@ZzZ%>PR#YXLdevBl!`K>7Mm}txBC&xTG zXvV7vdcQU|29n}BTUdIu+^}uNyz-wAI1g2p4hgc35XX;eDgUx@P3K#Y-f=yq(0{s8 zaP!VdY!)85JGy)H9P;&k>1Nx@s)S}S;E1pN1i%@O&gNo>)UOMfeP&bEl1Z0_%9N^F>+U zXx7lY3MhZUDtS9wv#2Tf&tQ9kie~GV7GoaPh57@P9}DlX>xDQYot-kjNIdKB}{wYpcu?gego0$=J5FmyrL{npXQ|7%6*%X21=< z(ajS(_unRB_PX-%tVH3Bg8JD6@u7sjiYv;o6npE}i}(6u zwKm!b1w!Y*pI2q=@#E!p3yI#m*Ox#=f&$+zma^wQKa1hVPQOZ~%(YuHGwEs?XVkh^ z!a8d%R}l_HK)txOe2W?*kz1Y*3l*7NvZJDCLff=UhlC*t;KhEgX7E9F_+^m|V3!m^gOU2?j48cO*YYJ8VFwm63d? z*)3kbcosK=@^-j`aHMWBc+uvD&N1eZ$Zd+-=_Tt(eZrndxjhQbAtjmW{u>pi4?jIy zk?Z{7k>46$>?faXRrC4RnYACwgW{qi6G~^x5`)`0JHX&c&-35+c1ET>F&zI0wxl&lDZk*7B3H3`BnhfsYR7SiBicn88=G>`BUq zm{mi^?o-EsHMLS8(hqNL4fw(67%%9k=(H)*Q+!3387a$`f=wqR->Xa9SwQryj||3s zrMbclZhqzg^JCf*{4Zn>ZC-`By5^}b zF>Dq`Z*NMc$@TRRVu^C|?^ARJi9F2AJSdRwlnzM=GtE}2#4}nloDEZ(mkQ(2r%cl} z=In3AXP-rLwd<-6`NKOhEWerXAqr3~4C$dMbJ{gf1ITGZ(PToMtsk^VddGC&H?aiV z5W1(@yp|zsm@Fi3?JS2LV^4w*Q=Lrn_=Mef*M+jOd5f8qyZ*x?xuazaAW=A~appz+ zcy3LHm?n|ulvGgZSXr~a)RK2RnMv?@LayCyh!gNW`^W|7Y!0A1{kc`xPuytl!bXE4 zEXNoi%qT+dqejP$g=EK2#mN>9u~?Gqz6G~_$uHW+C|l_2*EbXmw{*{DS3k)hLt>GG z(U?kc|K`D|bK0*DDdEdLwg`P`GC-N@hS4X0n5_RNg)#-ix8-xPy^E(`R<=a~O8RqD z9@Nt_n%Z`$=Z0a%(YIX_t5DKfh}w+iuzpn53I5i64kcB{VCYBR*pm@m6*VrI)#Dx7 zlJL8uwzkWcBv=IcVJDl3$FY%`Z%8m0N5Czq87j+3P66Tu7*$SMyMLO{`L=FwSF$|8 zS5p`! zby0Ocwwl9_e=DT{bbq5gk58dX?XF{x{ss7nx}YG}A;H<1x;&kM4v!Q9R#vYt*%2Uq zpK@DkYV)6J2T{sJP-C==Irn*hXviw=r0-wT8JR((OyHK%?YXKl5EmpqyAAS>`qbjM z1Il^o_H=hA1%H^VthH*q&lk_%sWB4q(TXm-2^b-X^A)sHxyrzgh)PvU;10O9N+}Z+)vm_`VP`M}>%aE;NH=w0+xs4^8Na;jisQ z-0ka5M*3(z=91*&l7LajSeffM#jh-$%gzI-OWb{~|Kg4=N}^^a2L?^A+1rbY%%O(1 zY@ovHVSH%h+V=r$-#)U<8Ue`MT9#&K8ZMLiZ~a$oy=GyJ{EJY$-rae59Eb!>nYb#> z%Je>t^54}QohGTLp|PV4-0N`(L|{(K%U9W=h^jR2Sl7XB5H-FVn+JH@B*JMf20F~7 zHDGvHE8+v`kHdOhhm=_JY$gGz$O^U=@rkjQ=it>%#J$O2JqZ%a-wa)I6>&C3zNd+_ zVx+lwYLT^SG8>p9{J2RJWQrGt+&N#|%#zgzu1tuxD$2oyp1N`UZ6;Ywo#Sx`M+A@; zuu@eT5h8$g@}HeV9KV$aw&F)5htNQ4asS)Q$vqpITuXom(Ena!cT{A5mHkG+D@DxSX^6tGkET#1aLm(C4F#W+59lN!0>gbo0c5+K|+>bD~#a1dP;@a?nq86ecz zm=dRx_rtoASR5|Okk%{}T&y*Qkt5?qQii6}kzXSvJf5KNC5}o$OjDs7np>BC!negAC?GbS- zs0zTGc`m7jgGNrKHqO-$jPKR#$DL64o|qIfeX7A?dy;2ud^q6ter-U}tc~P;V#vf( zl}v(Y4*E8&8+}L$dGdeU!CNDSk@~bU^0t!Rmu~`AHIy>p=wBVm<;C@OLWBSu87#S@izO!@w$^4@gqmYNMZ;Uq2j(Bl8 z{u>ggiVhoK9}M68X+hgKs#4#wFKYlv2nB5&=cABLa$jMKuLKESjXYDCIT~sp6>+6H49%skr64f8*6q_gtD3WL)>=is1fL%U4 zp`wNxx`@WCK|sffQm-$%v!j%SC(D^UUxHCi);Vjo?)h~K&O+)svb??VfHhF8D)v z_|#OU-ty!1!X0-VjM;iW)h+7cbJ7d8RLKNLuz`9-C^?H5ws=u|WFm<~ez|X=22I~Y zx|9*R(L)slnIxLgC0y)piw~%V=bzaL_=9HbzRp`&qRZKtii3}gC;2#-FW_s2#92JX+qz6>|z zJ}-rGk^ z?XmcNg}FX6*kWzJ;-B`6_=CbiH4!{HfU)E5u9_oKNxH8~;FywctS5 zgTm%v>ZAa_FVqK*ef!w1^~G1ylYTq^P*Q1^-%Sc~h!Y?P4lW*`qr4wJ_D=j@VM>$N zVkp7(I-II!F4o#i(D@V|e>1D$de&71jPCY-E8i@Ah@d`?#P^>;M1oEJDbhb)H`#DJ zV?q%%qi&`X2u%OUu+_LS!+{M**{G@qTBp`|A%1RFNvau^!&{zqVieHxT!Omv;WQDE zmv`Y%f;avM=M&(TooX`ePAjoMWko zqx0-Zmh-GlCm_H@UyE`TI&H4~c6hOJ_ZvNIq#sEL<{#Ub6SPhIDW8Fy8lVsoJ^pUC zDXVZX0BVdJ{iP^pYV)RckB|X^3xB)*ojMp(S7-4l_UvW?f~T)n=+3$ug@~F`dFA-! zsgoSmO&?J+y&WwPjC9vvxHPWYdUjm6ozLpJyJlFxxckh?haap=bl=3GUzI$G9jTz9 zVC}6eC6TnPI-BkYwO$HIGMg;N3$6^wT?VsN93GlaMbq58aZ^nlUrR6SM8}gzA~Jz8 z3UNYEZh6FX{MFU%bCw_z8O{(~t+g>M%-0;UY67v%Xs(jfy~*EX?=|}=_*3r)wOdz+ zLjAXFV?oGyF@RF>?PenLf1Hvs+3E~oKL5X2fYIFMMieYA2DM_95 z{-_4DovNla0s>+Jc}1Q1cWF(`%5PW)hY7VF;^#q0Tw_L8PlI&?N9P{2>>!#CQP35y>)DZn!E!q1YYCK-q_av zo$ABXQJEfH-BQtliIEaKXr>`k-!{jS_|v(DwejEBJ;Nc17_;#(Mn1Pu{B7cm?r787^o3&Jz z8jPDF0FVn(N1xA>PYgM>l0fo#pdICu2I_-FBW`3ep&Hoo<0anoN6g#dp{L`E5aVHA z?Drze?|}QOf!hAG05V6n!5X7`oks)7nv6jkE{NnDFc4O*92Wdsy){D)6KC9#C(dt|OmtWKDBL z1=E6I!3;i+h?Bb(ldCGTu&i&+NWwun8gzS`_KR`DvBw5W`9C3uKakUzQs)-mB2IPwUIGj7YbGzUzb6_; z0;;8Gh|LK$`M^l`b8}QhYa8dErI8_fssuZk{;FyKnDdr2iQ$2b?kivq`Y4;!OD;Nc zEcB9jP4VAEEI*h|ts{~K_=^a%clu1nkPG5Z9koBL58p#9hx_7(faoJ0e2Z76MXH~V z@5*VNZaIEP?)b7z_*rQpfka+jc9WAO>)n56uo17$+*Af|N@B3&o$iI28p3vBSfN?t zy6~e&We=;?(xw2nItAc@1aRw*_Te<$ED-w8hmnN7VvdmI>x(hai{^Ra=Q#r>$RV1n zA4xs*;T5WWzZhwxC=A17B48^uA_rsMK>>hEB0f^xx-$SiVd3(3mSuz-j`+iJ>X6wo z?uAk>3b_j6i-SwRJb}eTHkaAfv$S1c2WrPdr|LleqZcHxrF;t}<-_zMDNoRdx_EwmLaWHm*mOO! z^bBIb3Xl2aKW)sFRuWvHw_X-`=2)gH>`$|uJ zHxf_rffAFr9CUyI*eHj->Zj1ZV116i9mu{X!ok>zf5PM4nC`dB>-$Ol*%&8L%DN5+ zg8Ua`ml7<1Nt_Ro-t_q>-DJHVJOK2h3%tMBNUk>^Uj_JqxEg2fV1vMpPqJj8flLZK zOW%U|DXY7eb&Q2HJ(-svIKH$q+c~m_;E(womB#{+22hm0TuKq)zI)#iiAKH z67lA4gpHax(Rc*YLU~`Epx7&?kjOP%u75!Xv+5x`!a^Wn53lIvBM4ul=_rLv|=9XK`?IF_P7 zsLq@(rDxZ6S{|qud(&uQ&skBcb;BOcvl-aD6^4^Hq5Stk#cZNW)+;JNfsH@A_TB1W zMm__sA{Mg&82ypC{+0_{WtM!(PN+bZ9(lGJ|NDtoOs%zW7yGzJiQtZMw5}P zI3D{*Ft^GG{;t@EB-D>6RxNvY9H|altTlrEwf6S=f7kkvjw{T2hCT(!-niNejb@V; zDl{IV(-^u*sT=6$Af|F+rxixLKM0|PP$uO`FxtAw7gjwA(mwH@JJk}5XgmC z!O6H-R{LlD^9SCJHBX5{;Ti2cT*C~2>q=kXU&}{?#RABif0e#J-X1&YDY?)I$JxQ% zCzNv41=DigkL6@3Q;-S7#-KKLhJ)hIml!NCSW%72cXa+;`CJn)Vs!16|KqD9Pi6hC zC|)yR6imxoC{*b>qv%ah$o_F|u*|XCf61G>jXj9(hN!X>QEMs`8=3zPiMi)A#wnTJ z$oX_lT#|e#4zJyU94L~*A-Z$I(p1C7cndId)H1j47 z1O%+L4vNC!rEheE7lTR%Uis`Yp43rPeGb5ZT?}CKnoUSfJ}#6#PFv6mwawlSPtV@8 zD%nM>rAu1bMazh7LgPv@WBz6o8n|^IAtg)^gSxz9t0fn2D7srkDVzEV+bT2MMQiIE zg0(g*+^f}v>IU1HK>tSx5_+<&{GY4?*)c@rH3=jUK*MZGR1$N#;pEgtpM^onWb_j_ z!)l!4Dy5TMVA(Q)A`Y9RhhXZ<@FC~aPY?xSx6Z2SzYj|xE$P;8Ji6QPse3{3?wymz zojz6sfbMeHs6M%|4#sXnm%is@F|Lq%f zw*Og2j)jQw|xg`ClxJ~MlucN&El39 zke-1tx3MR=BanlCy#EPa^tvWVD}rzGv&^QcM_SmyG(l7KcF zWI=1Z0feOm@PmE*C}z4%@%J)R|G{4srPUkthNrNZNZP6Sl-)q#ki*|fi>Q2PG@jbD zm3ADDm#s8|3XItdKu7`acioL{QcmJfK_%aVvX!=Y^n5pp-ut@E;a}H;H*YS)sZNK> z2(1!iZhD5hl-~m2hdK?EPFbSOf9Jcj)UC8mb^vhD&pp(;^KT z)VixV9PK&^9isDqW@GAI#9|n%u?V6o|~^rB_MA55%VeWX{6KIF%|xu&y6?_Tv^ z2&nmD8)W23uDk;|SGy&gdkB870gav=OOU8RZ~U$_w(b+0JUt5uH0eHN7gS1vKbDrM zt!G=L!rL2&n9LHcJNdzapfwUdAg9aEl7IvhYDP$r8-JH%O!T-v5#jh!OY1Z;L@)9V z|MKx72H&3?5>elVDZvIn^Hac?k9}6Go@4iFBeVXDplUQemB-8vL~hAuhZGWNX>w!C zZau?rp>>@^6J+|_mzn5)L5U=h8hrVFm_qO#CdTW&pjn3#N&4UXXUzJeosNOL@iI9? zJ@rbasFK(l7v=QOC5rbuNe8=|ef(nd$Uy;1m)jTT;$H}~BkPeLCA=@^)lY%yMNXWO z+-2+kmSkByUXmEc#wmXMz6Xt3SY#EWDrWk}M64VNd;4AoD9M)Gh&3fbC?tZgHgc)y6_bs6wG7TxJ zAhOBN?AM23#o$Eii4|LFer$BkWBmv4ISv4tzaSz@^I6yIW;*Q7)uo@?&iDT1aW;i zlZ?C{?JQ=)rt?2L$bPtZKa-+%4B#KJ&ZK-8@LH*L$mPaBW`?<9Mbj^>g)-o+L3!k_ zqmuh=Gtd6^76-@0)h3T0G%Ex_6bCgguYJ?gLETd5=IIxOIG=T9dp9wdiG3b!ZZ;g= zYi{0|IOEWNHeLS-D^Vx$TSpJ@l<6B`tHE?uzwOZS-+hh>^tw+)PXRVd0Q?AmhNOGm z7yc+VR-~=Q>AI^|{^a0kC0y;*GthYNX?D}@BJ*UvP7&HM*{A%bBhuv>$P2w*I>i%Y z@|`r91Y!u_4~qrC+$L)BvY2sy#(1Cpn!3RM>{`Y4WqT;sGoNjoyL|}(xID`7Y}RF8 zh^aA{#+XL|tH2s|{_JW~^Ca}#bZV7BT}^$)3jQK;ugyXqe2MVKZl{$S>&0a+PYR3X zV2y`l!v`|vUnhYGXV=AIO;9VbJJ{y@8i7DA_+5%KBGM`ZSZ=o(qKsx3-(WG~TU+jIP4wB+jsCZ)!7imaKH$I?d3CDBA(|L>10=6K$>QYsyvH}*8E3>HA>6VkffaR4^&)sR{ZmRZ{>Hymlw#cN;I3(-KOw-W5n< zTK}G+!;_mnQ5Qq=3>+((l7< zSzkEz&h`8w@3Xw-QXHaCd_GM5tZdd44P7APR8~`!L?T50Uk23-F5p zZ~W-o`O^|^j*d?LqQyl?v=RCt;hwP{6~k%{n*T@1+8(>RL7+MjV@#FERIfbrZ!S~e zPK&KuwK{V-V5mIxxSg#=5k$a0ld7kaeD`^G-P3FR_P(M0{dFzjb|C<^(uclsa ziFXm5$8_RwMUM_u#`*X&=Lj{I~t&_Plvl zcR}r}Yc4S{QMV%tPv=f>{OHpAIat}HfUU8VP+TF1k{s9hK$~HhXjp)<>%#D`ilwff zFB*Gvq!LR2)R;%VL#p0U+;iGXF!{r6T|zz5(YWr{6ZDcP^=P)&7IVpWpCXNbqV~4e zyaEG2J25w_?|5{EGX9I`fILZwnRH;-6C|icNXeRIg{r{i2$x|C_r5b@`t4z2OmS=i zi7e%6Q1ck4Bqm6BN7hxtdYic8U1p3o-*>9kufY<$!1bu(Z^n^L3Lh%ZFNT$ivHP&tp$it92&= zQSH=f|Jwl{*Tumrm9NlWWMA3k0Osr&u>?+YW38^A5FOYCxZ2P~MG$TL^?L{idV7QA zmS?(qw|0vnpTC-&GY3-n3)Azr*7g62)7pQNr_=5IRBPf&M4Qs?W_m_Z?ewLzP0tgP z4)FX^K${HGV;S#W%s4Hbth2oPBH%?cg$2iWOzUTIl_XV9t)&se^(<7+lZex1Pbk2e z8vvq#V^uk(4qW)FDx~j8INT5H@)Xwr2a3)*R3K7=r(cLK74fu+FFGr-@3o~UC4C5Q z_rR_EVR?7QN`o7kc=5Ych8ap;Fq&=hM4W!}_$MhOimvPNiqT$10{^P#jMa}GI@hC| ztwO7rtlY7VtiGhhT(S~*lk1@E1Zu~-QZLPhX6r_b(r?C;6PV>o2Iwf1Mnc z*XajtTHf$rzd~voTTmNo-j{hFKNkwIIq*#C^rsca1>Rq$EK(6+Z>2!|oE@49 zqoBc@?k69vuY4EOtrZbBq0q*ibNVY)FF3?Yf4>vv_wbJ5tT@inenR_rA?JnHXajq^ zA2)pDhHMeRnRT(cs=D*rhbRijJn@D1Zj?PNhuxp^9osuFsjl||8oNBCutt#$-yuPS zG&LZ-Ffh9JpHXS6c_`WrzDXfT^2nask09t&1A}meB=I^40W2wsr!`)DMYoArKEQAY zJHVe(P?g8@zgHRHE!n8S8%$vbo3i#UbwsINN<)@iYSYvvy2SS{zBo`vpKop`(G2FAxNyAx`lytmNEb%)1F;P35VB)|1-fc>f}qfT7J? z9(>#rK&=VgVkQ?baR5~1>%%$BGxs|kAJ}8{0iGg-ZRWBsMOCl!22^6uDP8Rpd2m1q z7cST|aYNWxfA=@n&_h(*F4+7zCCNd62a+BJE?`KGKa;C+~49d#bKx zYd&+PCuT;NOvK`x^U$+i=eMZ#KPkbWOQ$qo0IjsE)YRst#xdkzoHmQ*0J8xY+;8)I zGpSCue5GNNDstagW~ZH{5ucaQzBtvMlZPYiF`lyB6seDHb=5Taz6+vTCU6=AKnh4 zh3Xdt@%k`5wzEHm;6)!JzX#;L0@?r($dj-=E%jC^lJ$kq(CzVW2 z4#-_RpWccAIEhnz21e??;zdvTAf3DNDy=s{dd&v?_U&&}*IXYZtaZ+*q&AXnH&;BJ zG0IL@qv@$FnQ-et1a6kvj?C*3KcBT0xq0y-j#)as=Y`&SgPuXC$v@PUV{C6F z_c7^1hbkapLhh=?QD0WWT2vBzxD-BJ$El@Yw8wNl0wxXK29E;k533J_zheBj-~s0D z1!lBOX>H*mDKoeNSZHBypzE?j=81gYgJ=|uy$j(GC`(fVe2@)a|rql^^=IhxGB)V-JIZzK^3JET-Q`NfRU@xyWj=S zVn1tkv#&(-cs-_%e(GGn1(48xhbyVu9{o0ZN7A_OKnA#8A&s-7r=dgWx>l3e^M?}+ zM^A&zr&kT7D5a9$B;q0k8lDl#?ivq%?8N!>d9sB2qTQ#eoYBB7kp9AzA|ZjK*SGk9 zs=G5-xv!whghx%h?$`T$o|TQAOTkK5T#}ZBvMyj`AZxrnSWkz#Xh^j9I_@J(Kv2vn zJ7>6-9{k4NtER^HD>%!)LFD@jKQ8v#Y+pY1K!ci;pD)4g5n=CquLkwq8?`qd9|OqE zM>Je-n6p1lTFE<-LTj8=Wf$2(YX{arhryj^xzlbI422(Oo}NeGw9T5;lJU>~NHp%b zQ23ybkV0eKw<2dF3|nE{7bSs@e9dj}dZcz~Tdid8#oX1h{|O9sJ`3`pE!dc1;Pz|V z=bO*{CUX#5q{zfDR_?aNLWc`mZch<9nZ3^2#u==_%lmtj{jcdaxz2KOL%jmS3Qp7~gk8kx;?p+%YOxg(C z4GB-~DWo+DNzA#GXP1(vS@|*V3x0FkKch@4-pGVt9^*Zm2k8+%!nTJ{tTf^I>>47u z%{m&idF{V5JV6>H*w6jRPH73h%x}DubNnWCim?Ml8eHED<-_#^#h*-ppWr>527S)! z`0}8!YC*pFrvK4ySa*$;&OL%~3u;B1kCYl(Y`t$FP0GJ;_-bJamVv(&xbgNFoMIwW zb>1ss{7#TaHl~IcM*E>Vd~k1(KOanf-7ZJQ^ZZ2Ymw#44xEF0qKg1b=jV#8(nm`hM zf|QF9^3i^@t@wY{Lx+&l&eM%=ersJ`BA=6s!~~jF({H3TC@VqFX7g;MUJXdB?l|wz z_0r5ZB`$!B7*hD|kQnW4lNKb?SjPPZJ`K3xm2P*L@Tq3zgAsEdWv6cIycQb(=6(aet3g_xtu}d&^Sk)(g}V+O zW>Nb$A&xkdk(XbKI<2S^4`d_6$$_f-0Y2LWalZ#N-epACDa8Kl7g3B?#aE$^SQ<8X z8TSFD(MIy=%o0w##b+L=#IrsG8z%7XE*Hs%>uY`(79)#zI0TEQJN0leS@>kf^Ga=^e<-Vm+Ydc zdy|CI3PxecSO%7N9VA#w5B>coLwtOPV1yznPYtxxYAfETmiULZMO*>5hRT`QxW9K3 z><9=E@6L6Qo{gsXJ|eJT6HnGJF0-MN^Y`bbbqh9+wH-NH!Ux+WDO|LmnX)%v+}svL zL&tYKM0mi{Q3iMw!ZQR>tX7TIeG`$}CVcu<;FZoR!&tI0F{6!W}`%s&`^2HFs7hr))wCu`3=rQwGEAKbmTmqYuLlEbZg5jM_BE_e{R&h>{ zzrSPs0pXfqWThX>R-@AN${kFxpVpzOVEpg!W~?Bul2cU!1tQ(8!`eJY%rs6ky{K22 zq>tGoz_kEcXF&H^0FqGnRPGX7_gHunwetip2b)UkmmNc@z$x(_AmzdSk2mXB#pa`{ zGCzvyYo6NQKQ%uu=ZwcyqeC*a<>dB3?CM97{aOEqrmKvqqY1JPcX#*T zt_g0zJ$P^k7Tn$4-CYC0-7UBi+$FfX!w%o>p7Vabnd#}NuCBWGR&{D(r$R1+H7?30 z2fxuz`0Z8~XHyz~InIMr*hxf4hYF|{D0BcPDWqf(*0Mq+J1) z#t(D@VO2{}6ZIt@vd5hB4vT_ZPBK&XB60xC~@_2p6EeelV>>+CIKYk+~+aZ@E&WTT1F#bzEriC9n zeP>qjKOAx%uVAa;%zU)K4c!2MOFVO(qUs$iTV}28=0Da1fhf_MD7*RYl+<=~JHtbM8GzyCi{YN$ zdd5hHF0y)HoYOhI^8hQGlJCmL3tyWGZMecc8r`e5^)@W+)hIqhHCJsmku+A?CMS{_4Q2aC7DJwX8YRo zzQz2dFxB|Y9#9wzdR_tIy5NbG2m~y$&a;p48nwhmM)Xr*4a8D48WGU!wfYmzm=*0R zB19O6#IbDMQ}A3HYtQch>VY9`OTMn;LfDRnKeUxvt6#iW1RalS-hA0b@xk*Ra5=TNAb>2eS_(=%AN1Kd2!Hg$PqX&sMV@k%pAy@?K&!;t?=gx9}Tvb zdwpH`*CCKu`?Jf}nwCVGBU;IfmNI4p%9NqxGE8!|;d0DzkbJK{xX7}|oKnH$Fy!_v z?XE!%V&hnw_&$1aeK%`3A`5Yu%wJnzi>0@VO9M5rMjUUj&%rAc`g0vrH<@uhrXYs< zrBa{{BYiu$kVzhor1I$~?>!j99e$aZW|V>qn|G-M2=5qIq*9&6%znr+7mI8(;jnzK z$JgK@Lrv8}YW{uK0Sg%t--7h_1cbWMIeu*ttw1?Q_y@}rc?fyO3w+xn3TNE4>l+#H z%Krv>^`M4Yl9>6v+z0;DCH(46=q|BGtQ-$dl?c%lQM{g%`Taj}(^4I+gw0wC7CYyT zzYb5yGN+}9eQp}cRtH4S>g~e;*oQo?%-bE&*N8ygt6g3-$-6f1`Z97e$w>Bo=AQdr-7gKCqH=B-nxNRqo*uo zG(Z8`f*j=Q(`yXqXhB>PglSvCf9~5YwV$oIl}`tt*uU304`7zqizq0*L$qL_ZbMT) z->62{oMz$FWyj|*^FkiB>tc<`YxUDS49Hb?kmQxYs*T(JQ`G_FuD{bQ1e4k6QS?R9 z0DKJ8KEiKjcPR&o9;bqr+i}?^F2If&dJ=V@N$#&AMBgOgM(b3%JJnF~CxblRV_@aJZ>)O_4m%~eK)mxejgOs8 z+^v68>#%g8LNMwcKn2(M1XuGH_99+URKyj+&vWS6a417jPiq@zFg6e}-WRKTIq9bb zsyx4x^f&oM=Fd=3-*`)*n|wyMps zWPQF`suaA4zr=#+2i78MN+Gm5o}0jC)3wC79X*@}Uo7^Dt~GCr8r$54zPF$G^?AEnHP!ICK<%3dE&aCX;U5y(YY}+z zQFWkBDRzOyLLAGGfLplyzNv_zFscJ=IgGVt;2Q{j3r{|7q1zx0EkqeCQ@-62B$lo^Xnu0?-bJ!7w>@Gr@D08Jo zezzmG`yEauRR;Mp??`L%3jzbb`OQIb9fBSPUaXm@-dta1Q99S}6-}xw{TotClzns2 z_Qwy9-RmHB;qjFA3YumOw1rzxqG;;G7)R4s2iV-OzJWF-%*3O&v^5K8{|a2TpPKAN zX3s+0WSXRm4)duOS!1U_T;1$z7))KC##CH!k48J9`-+H007NzQ*R8P$5e+UHL!qb? z(TcJlHFfIOn&2$llrKKG^~WhSCT zg&&kTNfNEX7+^_xxsaEt&eayca?T(fSRq__H=a?MmOVIU*M5TZwz@RQB>dTJ*4q`3 z|AP4X<=ZB^_e9Cu989AR+h~-sa`@1Q(~SBzZ>91w;V(EMbdD20ZI?r!@zTs+rbml~ z1q85ZqZQH_#y!crn@|lSY%u8PswNN;wUqo0M&`uhW&_&GjL*;xx7{NXV>B!fyPnzJ z%Z9MtN@SD&cqu||VR%oN*EhU*s8Qfnx1ma@8H(klf$otsmXe(j*I{4sR?qg7u2zc{eAS^ zTeY2Nh&b6^`N%%}lMqfm*f?p3aBwPoKN4;X0F>qC02k>_%e?Uv5R5G&Z~ejFB+H22 z%VwZUw|-{v+2QAJ?SsuQb|s$Py2*6mb$g%ew2>J>KSs~;ma=4Zq4J7*%T|9Qg-8k~ zeUuY1^5gsPZ}30f#S4*72Tw3!C>-uMGL2^fXsYr{S+Jc+8$16trLX9c0g>))@OEXS z`p@w8IxPY6H1qablXdTb3;5UY(VViVI2PSRa+?7~;5-JNl z-%Yh-5UBQDN+hb^_O6!tR|>)?9|g9I4`%d*Rk@C(*>;j#bf6W5(@QAt@C%)@AtCTf zlf-nFp}!gUaej0}4}Y`Abr1#QR!;_ZI=(EsTrSPDs(#b0wv`PwaZTZj(!?)7DJHXJ zOC$zF)B%CRyF-xr0q92;rX`lhqr!vZQ4WVyC42lx*J}&QWOgt=>)t74Bb0|H;`}BE z-(uyug1kCMs;qi2ZWYR;$FIJel;EahA5W{%x0*`DETS)0 z9z{JduZ5hh2sH_xUNFaxUQ5%58krR8UGi{}nGndBf0BB6?u^ zJSaoFzc{$Km`A)>-re9xFZdn!>T#|lxf0em>9qF_EA74DrQ%G=Wa!aG^l8*XT8J0f zIKYnv^0Oj)7$VA(+wERtKf`5{qtQ^AD2|NeCaaIV3fdFs-SPc+r!t<%j3xTovpA<7 zlSAxsNSM^^m#>$Xfc;2qVDd6tDHh70*)zRcfaY^~X!*NN+LE=@EmoseZ zoQB*~47z!3hghc+JnI{wc~UN&JUC7wId6vjS-m5U&zw2yGw?-;=b8H>QWMa7OSPPQ zkRCwmi$ONC2B$aq%2LIVZO_v9$1^SQ42?6uKhHDnJah?G`kCx^P0=rh7;X#D6#jbO zUhu>W1Ga{L3yx63*|2IWUk!f53V7olVn{@k6yVbXG2$AIQ~bl4_75Grax;vlpGI?S zfobN~mYtG=$^vuQ)YQ3NsTEsfh1tsxra!ctX(HX|Xn2i-iVKVpbr8nd2WdwFnFK;` z89Ya-9_sRf}joO*R;+UC5FOS}{%%1IFCA9Z()Eqmp&-;_%c9o~;LbcT;>*UTe< z*>sjKnbopDf3$?MUXAwIm|yT2sJ6;rdPJ}=hY5pmpB zwM(vAtyLZ2yzWM8u&FtpoxAq_;yBy7spEvoe&KK`9Dj?D%;CXKcg4#<(Q_xlY>`gy z)yZmk7r@aG2~m5DwIu`|SB|ARmEn1Ot2~wIs0GL^YM4;SoNidE%rtsXY2^>R4}RnI z&jhbFu1C=2QFqO_wz0E&l`|*&f{!d3^-3$FS={0kw~mP+ce(*Bbp3$p*$O;EA-muu z_=+}H=krsYW3Vy^l;$B#jFWsgx~pS!scfb?=Pj#>nYo0wNYMuX`00`*TVt}5SV3F< zBK^XlErR#IAT$3!W**WmV(i$WV>ryc{U{+*9&Vx6k4B<_gclYUuq-73_7s$2Y;^I( z12ff{`^>oUmGF{ek0ZTh!4t~C^p{kbiqd}-&P_qCNtQ8D+LxNxb(jAT`n+v=@MbYHa2dWE{BZbppj{nY zJZ`FAFyEN#^R<%RCzWrwGHMTOs!DdfUKXqP#DVDae(Q}wKp_Scd6qvX8=}WgNM}Rz z$N71jUzoqY!Q-aUTgjHeH?%@)d11`@p4?orJHD@VZn6;(m^lVh?=RrdYUqLS$44_h zxEK_BeYUV>&_blbzBVe`RtBxir6HI?sGl+^?10v(d0_>Y!r%&{iF;4Z^vD2Lej*00 z%Q1r1h;)3#qAZauV!we=O(|&YUaSDRPTSdJY@);r+Pi00yMW_=&4Qvo{qsNo%noCf z@A{DTX*NwDoregi*VW?p>qHUK3zMU>S9Dmt6r%8j%wJN#h9Pk@<-KS`aX}uY=W@2B zSr|hie6%URBlS5*$JL2RAuP3rKSSmE$&daa@Ju%J9De1}(@i2hc<4$MswN(h9Deco z&-XA#{ItK+CLvqFh~HXCK&VljFx}=YTdtz1pJu3|DIRBa7sKFw)hs5!QQn{XadI3y zugb@x^bObNdnP7xE_3U^VGfl1b1Lj=*uFpcWv6!*jYl*p6jw^3f>nZ=37*YtX$*U3 zEaHs&i1y%ebaDs-((16nkG}c=+Tw3yrjepJ#(-}^1S{mROi%e!_yJ{5v&GvaBIE~0n^yq}!MDU>V8+&ATC^3&A6g#?5 zk<8(Pt~&=msfiQ%uBin7b7d_X7eUX*9Q|D~!#TU<%;#*`4H2B=lAV%!tYQqMJ1sqt zNP2#zMBd%~X-$f%@5QzBLgUlh!>-D5!-O8rcLO|!!DMxl3F5Z?W4GCG_yem`U4@;` z)M;XS|7zR_XUJ(Td1gl}%s3kqM?8P!KKH^JvgXECPQrp&8soaln0ZH|S7&u~9G17X zb%u6$oP%FTpNrc2*~SVu!%oa5o=Dp7EDmJ~_#LDpts_oOPC}H7Wq;G_B1>Pa5mdf> z(PJ*fYDFcr{CPmodv{mOX#NH87GQ2pl}VFnS;9xZ_^$sh~0lyoD&3RITe+48_oPo^yDvi_ovyk-}K?p zjm2p$O0E9e67xe3V`-rE>FNWkhv%P_ojqVz)x&$Dx9F{n{o>BDU*Smo8R5G3^Vr!d zma+s&FEPojr*^b7d=+>OKT$g4`YR8{tIxF;tCi;iA?XWkQzi_s_yMqZ$)K6sVK0&T z)ZHzJSTQ=PsgqKvGTzl0N}B~vhtJM5>NC%UK~+NdbR(*TYE+5CL(d8C`NWYH?4nVD zB+=*4e{XBm!7y8@Sd3I^uJn#^(qQtN9>u(he{!ZeR%%zCj*i~IA-xZkWZf{*IM_;Z zl8J&X#Xcc9ly2PGwe)G(ny2C2qF85mIY%_;qqn-*ZSqds)SvAVT9|O&++A=lpeFAQ zm;C{z^yv>P<^5?eKiARI16%gWvoNG!-iAUJZ>{{;$NU}WfWS46xg7S4Ns&EThw%>_ zTuNkN&uX!4Bl36VEVaf2a)r=a9}7~}4Um!AS4Th7pl8va;$Q9Wb${noU~b&Y22vqX zryEk`5R~Sp(`*j@o0u<^RGTJNF%%fDij_9SmDvHP4`btchs;3CZ~3nlK&JL76RN3K z+nlLq4i%f2nBYY=xfi0j2gxO&o)U}&0-F_Tf}y((&HE6PQFUtY3q4xFHVHg>U+1nH z_VFaHx`|W0aCEdeOD}u1iV_SUE6;DQQ%U#b9FH4clFk0A_&|^%lOc2CQRJH=8SwlUrH7`e^ zxwfWpx1IPK$w>r9VO7)UPVXr;r2?^G;FB4WTz1qsuhirpU=IPz`sz%hC|SJ~Q;DT? z&W~UJ5h63nd#0kN)N+3k%zdij^2*>YD}~6vi4aDre6-}^{kO^x_Zkmlj70E)GCp#uE$r01UAVD z3l)e$mV8*7?i0W{b#cobJDhsHM?LAM1>1T@a0#5aU)wBS9Z>qZwD4PV{T9Bja)OG} zzCD1lX$g_0sY`M_Af7wxYL+u@H@A{(jUFm<5`L*3*H)~QG?~a8dAx|$Qa!cjcvBhK zp3+`64zc4zp!Ck+I^{c=gPdKf!{`>(jq+iiH?b-E1r;S(a@7@2O~_+#%&GWKByFmq zEaG0XPjsCyQ01GLdPM*_Baq61gfKG&uf`4&KCh{xTsZ8zkx}>`jBxg>ypmn+&o&e+ zT)PoIdx1PR?wyNENZA&CDyzm`@xRc`;8~BLvw=N}wS2yuwVBt(c@{w2>XA->jWeM~ zI1+n~fGnFU7kc_L{aesDNdqoXgQv?j4o%qcG?;X~Y%69Em8z6Q{m2y z#bxc$qV9bAR5!Asi5XFaV00C29E%+e)x#0?pCdsS1Ej^!`g8(N|PmFFSH;}tB}%yu4&LIs;vkG=RLqpioO zHKdDry)@|Jj^oe=R(?MZuDNIDo&`-{zz}@kx^L<2)iJ~u!8taVS;+gf&kQia1DU+< z(m!3#Ja*YWlV;9OK>|Uq`XC}iQK76eEc4!?X3mHzU|skb{O~k_TA~<+Ss8h1kY^L^ zNTDt&F6ZybM6jT#l93UK_>xXc%LdD{xZ^J?M+X z6ct6OjwxG?sNx8{G0Y`+VcBPDlNltj1?$AR4mtRn zV(di6F5@$O6JPjyKABCAA>&r_YYKoXwCArkA7t`<8!f+#V9iq@hU4Ejb^jy*67LGALGwx8U=wnP($ z21Lr+Y*dY;Dw<9Uw!2CMK_6Bism^1*Bxb7&^?bv?k3>hS?B_b?4qs&9ir-(252X`- zy32AWF$5 zzhnnFJ743P)Te7*3)&jM`&A$rIx+o9j8~>9vIF1&2P zegP&fFQ|IGDW`rJ7(%umb(FPrAgsG3Z{%H2ClNVCpRdI%Nm`t}l~d z47rk+HBoCr-+5pha?e{Q9VAur=I2Ch%>2+%d& zieGTwyl!LLAkp>2l-+!-kK;TB6`}hcd5sgBz=j^bBY_k-_LPOTVqK_)$QdJuoc$yZ z3@7KYK4n8VZ0q}%!4 z;e#`$M5Uf`(ihuqlR&!6yvPHT?)cq$J^9W@XB{bAR_|!k*ZWgPR(Plaz@n56`xosg zFui(kH^_UIN6DFoV3z|foRf?PVH7Vbv?&Z?7F9o&22yxRt3$kaqHp?6ch(SLgV%WZ zG}gVS!cJRbXupJ)+D(>XpGz8THqIgPWh@XQANRo}Z@QJ-g`(Qb{sshfy&t2K!`6fd zKyL20MCT3AD;{)hS*UVi;vj>tS2(*w$Qbh?_|K=vpwALG1&suj?q$(S^-X0^6#+2d0=0lE8gm zuMKRsH7Ld}lO)gK?_2c^2@1v0y0_#>%d;TZB}_}ZUd3;w#lXrYMtduUnVXh`Gx0?U zf!{+cgr9-swB!NWnpj6QdXs{1#6^W6J+L#Im^9ysPGI&Nmj4`wg|%DY>&0x}d{m~U zI-Zok!~%Fet&WH_sxYBousEh=blALtft})WI32>462#%zkeBR~8U`v5A#^p4m;~t` zC0V&s+C6e+Vc|8D9BhxXH)^4F`jvY+ELEtMt4Idjqo-7W4&tJ|nVQ1KPk5`EN;tfS z*wm_IfMo+s2yIegX8PwQpz=VWD*({|)KX6kf#LA}2K7{sLc1;;RXH zL0Ntj@>`xa;A?GB&tk!-PIosE9WJjBkgq0<=W`ZE@SpPNr*R|uKjTcbT`m^&D}|Pz z;v!%}Ts$j&7kgI;wdBw@I2lj|{Q#2%rGm3SPtiz5iiENX;X!>ivNc1@MKc?rN-Tmg zJ1j77F<@6H8dvCHSr4=1bZP}_Xmzf9426Bn`z|hQ+;1`2ibrSnR*DP(z+x6ASfqhI zK#qEAS;TVK;XOVGen{r)?57Vw4k64A0@YG)nQ?uK^Q$%mae~cBiJ;YKv^n^X=GI!~ zX?F>Jx?8=~^nLqOet-C7w(MxCY9=a3CO6T|rAareU+60&dEZ7vK=?PTf&Bdmg@-1Q zCUP5?rbilOM{whJJg3u_&J#%uziIZI5Ii?#-`~-$mbY?w@(1oJ2seA!llw~aW%@eA zH=e}F1iV(1rS%?w=PM9*`dojM-qJQt zRya4;cd_~HxuKMkvvaJkwuF;QxZK9Gr|NMw)8yhFtp1rD2LaMA$Ipzy>gbWf$zj}n zFAx42%(u4fdnFAJA@acw@d9&0E~7q?1$JT7S%z<2gfAp*zQkEY=ia%?1PSZW4^!)% zLt*<*4Ez~6dh6tomh#iSy-54FzFsK1pBgGbPUUoYWiNf$EO4l=RITovy_213Bp;ur z#`L%Z%#$|b{!p%hh50sUPrensO2~LuOX%{PJ?0yhmK@P(GrihaT-m;`Q59VTxSKY=pX(r zP9V>wm&iulZ5P-oYtDjJa(9>aF$eMUoUNL}&+pn%O|B)G@_jJf zBzDa|XKfn@=sr2)9-03b0N4-l28)YIHyVhQl;a=SKX5>HUJ5I3=-6Vi)*pShHe9fF z-G4g>YWBImfK~-Qo)a)BXv=YIj0}24iP}=pVS>74}g_T^g2b@uhcu$1Skw&@jd$l5HX z=6~xV7!Nrr=J3-RAFmX{s9c+6dc1G{ztP*0dvNqeWqYZR| zI0sgIF3wK+dAidLJ zAA}tKnC^hoba#Fyo1Ie>Y_k>t8TEWtTp4v&v>CZyNpqoAtB?)t$|En=;&G)msz>2y z=J@&L^8zdUjN0qJr-t1S_Uv%27mU`|-dArUHa8fN;1DR+Wis41q+CvFjbZv0+hygv zpt_5GZWPbW_i4;-2@5Uc{l}@cy6dQEm$R88$xUUnS5fHwOswU}CurSo1>5@)9B~P1 zR>nhiJRjFhN#&?{zi3|hD|md6li>Ab;^zHbZX*U=wuRb+-56MR83M!OFL`47E)S>ft9p6oEtVL* z8Q%SCK4yrGr&h!L9WcEsa#8|E0Y)1Wa;h3;athv9-9k(@oXa>p2O39G`2Z6Vw_A>; zvj9is92Kr-AOsIghe*X{_TeMxTI!0SUj3gW0wm&}E%aUdIqF$Fr>_Nz7$wyFIR!#Q z5cT8rk8uzmgH~5pJJcJxeRWr%&$r}mb$(BGgc#^WPIO!Yuc%y4Vl7hs2H-gXS`$+I zI9gs1hB{4SoZWU|&v03Pbgtc&hS~pCDtI;2?Uxv6Zke^Ck`97i<&WOv<>?6q!27vK zK(W_#{0;5kR9P7Y^~~VrEw{DG+jfBVLTtD=<@=H@%FvoU4xdW&w{k{bW*Pw?#1&Jo z5;a~b%k(L(9Gsi}gQ1NBrr}n%Ag7RliznCe`Igf}7A6HcL1Rkm=5jFn{#+`9kwWZ1i9&ijV-F_GEgrni|8`A*<80*mx02mx6FUo@r0;i>;!4E)>z@DY^ z^!`9P6nAz3h7Z}`@g9%FH5|O66YcA>$fONh^RtSC>#nCR7yr)2X$c<5K75T{cdhRD za3&V{(HRlgA9Wp~Z%Vm|$-)(terhYAibi4!r}6LFqxmv>lS<{5XveFF70KFKqDr}( zTm^;4*Z|OQzpIsRDd;BROL|fJ*zWAU++|~oyBp^HlpJL*J4Z0_~ z0K(FWWjdwb<}a@$yTAKL(}Ypr^>L-diF_H zq#5ckJrG%obZhKmqQ5;yY5G08W&|D-@U%H~Y^K9(Q&&fx=M7%H0q+|T_Z*biBElb4 z!2ET1V%q-0T-Oz}v<@I-U2YYyR)R}UAl0D}+-FU|&=pB$2Q24rXTAj7f z@S+a9i2N7MHF^_t%h7AHrTkJy_i!NiU4y0p@mftiz`7x>A4^jc-$JRY4O7Iy#G8p@ zYZnxKd+qfrhyF%cH&4$BJNgQGx&5{IL`tyfoNs?3$gwwYvoJTWBCqDMfYR0;0YVQuUohtWGGWXy(I{Q2Ip@pcuu1{F0LV&+?H8CW{B zvZ~OWP%AQ(R&$Wgs?>o9$B*aaTg$R)tF|T+@P&%98^@O^(o3HF8kZ5EVbLaP4uo08 z35`awB$zmPt|X89;PZG^6|IPI2d1?-Lr=W;dPVNBMZQKI(8k^uth6~r&1NSbKazjv z=Sdq#eiS)?mAj?>JojH>noUV)jc?SXn)Pd<1c!B(?2~> zaoJ3F+uLBRp^vk*|HY@X?19B`B~t}K*o}B17$a-j{pED&l+Ay-3EbGKmv1gry&K+d zEMLDm(68#LmF+sOVB8TIUeei?Snj;hK};esB6_VrvLC1N?$kewlC>kDW>)jMED=wUz>gX*tK#fz+F z26-|Jc`}U#uv)h9#EJD(P|_UWMZdQrEjJ|lvnwD#wM;Ay4UvcX&lEV90XnbMzX|rK zf?%1u8i?;Z4ZsX*@@;zA;0JX@Gf410`Xn1r+^xSJqi!oL*;RvYEq(}&bX#HLq@LNR z4!`DjzFWd>6S(z5hgf89j28CqMqTpRr<)G#cN9p0?RnS96jmb2 zPjavlx%sO?l**}2t`1)xxSVd^8iDR$2<$Z;$0U9is=&x%(ZSQ$OMre5Qfb+a1(O|P zGzWu~QBGK*>Wb#>gHY$-M%L*8ayTwS#il@Ia`L#KRpqJ zbQcyhkON)0{{s-9fhK-3Bys_N1V^F+f`I+X*X_{%gk@FcVD>c^4(zPqv?|2xZ}fXS za7U;<=Qp(PX2B-34rh@ksG#On5tv+9rm4*wNKE&x6H;7yN_Fe7ixgun8+kf7Q##y? zzrb7CKcj&#Dx-%hqyE7Bsw>e0762BZ_)EYEs)j2hRT>0Prl$2|hTq+14g4p_J9>i1 z6F-F&MR7k`4`9lj2@NP1EYEUep;4nxY&|KI-&WakQ0~iXVz1?3cPlDmMziDmM9#M@ zY$M$yr(S&@(oa~xC3vb!iZ^9HZOSV2N&&hODcEcL*A_`IbNv4hAaE)z@S=LaTYVlH z`nBUs8V2%mvlGlyn0Udxd_eVuUKsr0o2#I>e^&q~0DtGIrf!IdJahQw+pNKEb>kBR z@$*4idLr7MxOFL{0?-+R=}}Neexe4VbUV6*3Ds;Fek}32^FkqXiS#e(ao9iE755OX zF*rsz++X~D4c`Lc-s+b{mVrIFla}Li&=dWV-4Hng0-%*kh46A%Imjj}#oARYKgyk( z_sicZdo%wK-u%lw0lJ<~@6V}6yaQ)iHNo+06J?I!U56ET^Ioh66 zP|aH`8C_($1bjAIAhh52`WMA5AiG08{Lr*r_&$6FCtv74m1l^!F~64et@$ZX{Id&% zlP$^bI0MpLQ@=S^7PZ55tglU%PLQ`Qf}ZOVYaJ9HLs*quY50M-D;E2q`tT@={Buo(TG zO+q&Dwn&{$Sbq1|&X(H$FpU$iLmci<=xybL4ArM}ZfM0OeT?dBkLo5{cb-&0S8Pte zH)2EUMAnye#;wEv|79eh9}{eIJqZXqUvHEE?Mg7GJ<#!;R1Qdb0J(PzhCBWWXw3DC zguyeelb|QicA3covQsbYqw~$1;ktX09Wnx!lDebEBLH#X;CzR;+i>i z*qBWV%lB`W5bA_<>gMy3Fv|c)=+Ot{uY%5?uvdgo9SjX9YVbYmKbYHC@2Av3m%S>5 zaw`yK%gUL+7!&b@ceMWwXoR=)xyN!=#W?cubVPnB1}MIV_5gI0bvC-9SiY#}v#WRWV#cKJIQplqtyQF#wr_|#aqqaall0ODjFOEp0GtlJYZ6Qw+ zn==AU$Q+(1;}uQ=5;AX{tSj<{7alA3Cf`6?!g_nOh^A-}p5K6%t|^yQ5Jc1$0t7mM z1_R7sWGVhGgPlL@-y2XZ$n?;trA}WijOJEk`6_iVJ>&y|mH)fAX}`JY0|1{rr6=9E z9@uL&qu>`+wL3#e{9oXYuc}X2UUmUN2k(FC8$6QbOmyLtX6bvXyuU|Q03L%RxChTE ztXDkVA(%u2ZFKVGCtoNFakK*=z#=3P1##Rx(8NDG_kGAJm?~;Cm$Mw-f2f{3D-EGo zU%5b=V_KO~5A^z4AMzt?4gwENUfO`6PD&&S{?^MD_r)F-(9$aG0|Iz8<`R!)8@Q}i zXJUvj-L5?ZJcMvpjJ0`oDq!DVl!HYpV;AnN)&F44zY4(u!b%a~n+v%%P3Je&yK&t` zJ+v{C9gfXXQ4Uye)oVCB9q*l+>6uF0=VDl@FI8S z!sKvi+dV4?!1g9K3vzZGZ&W@$&$vP<^5EyjeeE#=6Y7DuFOJ=Hg_YdTUDEgO!yTZi znH+Zb(HE~)7i|`8Le_yw(N{VVTb_wlzOnLa0w8GPlR5gpcym_Do^qzz+T6hi@4ePH zYe*07%|;8nfWR`+WM6u>xtk=K9foNbne5_Y^&@*E%tp;aRmQkcbW%I!KNG?}rEXk9 zPw7n+XOoN|`?Ds~!#nQs06O6bj-0wI6o4l>EC2+u zp~!8fYdCsGE6}6|pSHHlmZ=*0*#M~p;1K}yL0S3eD0P9NVu8SF`Hce9-?;mN$(Gh; ze%rWBQ#_`Thapc&Ke%K>QE5Yh6LIqk!?u`ujUaZYNwxLgUd46*?4}-c3?cXhUF+?$ z(Z?jT1OQ3^2=sSG-stkB0t%q_74z1ZQ=vo6MWmYg`DmTvFSx8PE__hvdI@*|3emy~ zy5egcFHMm({un@O=V&C$-skk)pfRaWaV-aiPWd(K*IUBM!WKy!H%-1IDVqJj~ zP5G`cN{_5&U$aEsdV4jL3V5B1K%Oejgp<`@nMaZHSa^`xO4j&qtr;$ylbHDPAV5y1 z2T)rpfFnxk4k)eGl_4O>jwOOO8Z#p2i9tRM>;8!yn9a`$?ZLLS>X1SH_IA;> z{s61Pfv+yq4GeWt_J#uWAc^QJcC-i@Gr&}Lbt-=jXH^TVB=mmu8^isno9EGc8W8*; zRyTKYk|p(p%L2mnKFN1oSc?u*(gUMVaqj>mL8CuQ1I2>_OeQEYJ z?u2_5?FndhPGj9vj-cFI!BoGBPUvoxTBDXvemOnD)K*6=F{90F1KFz9Rw0Fb9e0Q> zYS;0cXVx{u%Br}h2s%XUv2aA7gXHt*-uW}cMd?>Uq5GNQb@i#ebzs9Yh=dwV%3xGJ zE97PW%T~;Y-#7~VyMEx4x=b%LS003Vd~3Ell;n*hvI%0ho`Ho0mDByt1VRKOhM~aN z+S4;x2a)@Dc6J2VCT0V)NO76(1dLN~M9sCY-e2ytK?3ma3bCJrwgpIAQ(HX*-HOOD zjUshYe)Ar-i`=Igi59MP?Nq6$30IUkoBZykcGN%1C3L#?CJcA|3=(W?oecP+C!&g@ zybv9ubkEg^(2JWPn3Ew7H@XdBCjba+vm-4Tw2q`}(iACgOh1A)LkkOlX`o%TIWPoY z1k2~%maPwHb@mJ%ZDbp;Cy%u09wZVsozD>K(;bp>yDpNVrS+SiO1!AghMx7F}IWWZlCC@N9Wd)wL0mr7P$VWAHV#tQ0TeOKwxW z2V3KiaqQ|nWMIF$PNMXXP^+hFEn4uKO#^c z2nl=}SMnr8@h-R5GxNBDeV|=f4@X!*?If^^QVOS#uK>L1dLuBNdC+V-?L_all*1rKTeba6b-vCXZ{KYk*uGlMZIOo zT}Rh&3XiiV^FE3_CQp3{sT~J!ucb{Fn3{bk_Jarsdbc|jy-C+DOo)w?U?fN1LJ9u+ z2kf*LvhTV3=a3;=wtEEg^{Oeq7G`AkmEPYQz;Sh~1MQT7C6m)cC}k-%mMYRRUKuhr z*5j=cy#HOS>~cQ|y7Ay_XZf7>>_?8a(YU(*i4r%Wj+gYmi}}PHb0CDXt=b}h)AW$lx*pmhN?)Bp3XP~48(%X1WurpR|x{a#7l_@e-kfQ zcs>7bK-@Rg$D;7^2f8!q=|I3tC@%gFdR!Ygl5ZeYk8X?vEH#k`I)g-6)y%1dQ~EMo zOBLew%j6Aky5>MCaE@VtIcUSdNfB^9UoEZ2e@TTekOPY5vZs8#{r}zt-PUd8b$~iY z(;A@po9aiAchZMKh4dCL|Njs92NWQ-N~Ds@!DgKLt^y8Cye${pl zCcOxxHxj3?M(hLmV&ZZ)?-NM^dR84U-~XlqGn{wjOuGJ0lmRXXu*O7<2XDco89=dO z#r`pqvhe*$f$}w}1bvwN4Q0^o*HM;5FlpyfSV9{rB{>yUQ{?6q!Eru`=R<`LtKjc7lrY~C~{b&kzRFY_G%=fD^y3N z(C9D%zCIA^!;%w<9QFl#pRS0$(#8h<x{rDf#X`fC?BOvorwN(2%No%fnZ0w^EY zKhRVzx6o(sw&TBH+4&fb@@~$Q)Iy$`S{s_I62QHNR%$>f8WeT+K6M#H)>KOkdPC}S zZ5Y%)U3}wCkfLQ2b-Z;KVr;)rcYT+LWW^>2#GsVEI*-u>bF=n%iy4a2(r7w$o%%jG z|IL)An@yXjQXXee6HE940L=m5x&O_DobHjJ$CMQv;HCJN+k;ndwbF;7ilYUcdDNo!&!rv4i^ zV6RqmINElTG6AU2*n3OM_GjmXh?5@ofCZelF0h6)^rz&7nv)d zicMu`88Cb&43|IxiZHSyyVIn*vt_%}vcLj|JidQSAvPwa8>VC$=sRybk!B}4W<+nh z?L-9`L=YgCORn_Q(xl=|JO&a$eLy)YE8i_VV=$OM&G)0e$`MkY?R=z@8;}A5SMk(ipjJ{lR@C5q(+VF8 z+v%jlWd;OYxJ0Ai4Rkn%m?xJtEtCQR@CGZLgWdFiaU!7BoFkQQ@<{Mybl1eS*kb?C zBk_%`n8(8UYRP*ue<#F8kCVm$=6{wwef9}i>1P)d*8J-(nB-Ymct$vXCoujFbR2)* zdC;SmspF^J&d1(rm-Yvh}fEza_>^fJHGvn`c z;4F;#(s4T=~1{@eV`A^fIw?UrgrA z%EKu80}l9vlIM7*}7=uMnHz4^<7FKWPDK$n*H0f*kF%X-CCoCcZU#Ub!5 zZ!gON&oUiWPmTy@;2sZK(Oqx9W-H!(F?s*_gA&PaTsssTeoTC0!8OsqcS)a&%dab! zStp*Ee-KpgHk~@ipV}Uf0UYQAj+>mnyIbF&MxzfTzD(Xhn5pL8Nm1}#zIi-xjIz@q z&hFawg@@5X!@AFD0x)6CG;Y0``wqD0O%m8tk!8Tg#TAR*w!0u)t3uu(**&9vZ!9HLAGkTa<#qO z;Ru3&D2J2)IW(L>P{HB>XpB;*PzZ$Fn|=T9cXns_?Pfy)gqG#=nBO(?zVFSOH*eld zOs0A!H&dTJCY0W$ja^Nq0Vb2l+uL^UX)=9_XPzke{tpdIrdRNR557Sa(-R;*SHCXS z;$Hke{u4<6m4PJTmVjt$z{p!A9=`p5Hnp!-<M8UR}?$oM>}mnb3r>nb3k-scl8Gz{-6uQ-*-p3UEi+k$YaTM3C=Vt-_-rx zGJb2vk2Zb=dK;wP+se}@9|P6DTSB1i=gEusgMiIrNs&t#Xg}>%70Yn=AIat48TAiy^!t39I?3Kqi_c7F$?{S*V+>C2Jw0T zut#AI9$}SH;U9HMydrjQBDAH+<1dpff|Eq93OF*7hxB#QMX2X4A%ft zKh!10Y=7wd0`;!-06ZG6Lm6Y8!^`m9d3^Id=oGF$2BqS9GKh21{jU1R_U?yn8|^+& z;Up$L!+YY=56Z@L|mcf zIOl%gxM_uVjgqe`qTNOy4&l7W{%!%k%@~GrByf^`1dRbHyFwWOx=j2JI^id_2jJJ) zUY}a(A9Wn?QwG@8Q;@xaV}vo1Q3mRnaconJp@-I#NI3LQAFXx8lGGdH(opXrFvoyA zL3=<-ww_xZV>Q+zPtL&uF*e<;>#LjgLDL17tHjSOmKS+qjHk1`j_XPt{2P66Am(t+ zA<^)A#OSlh5eM~18#CZ1-uD2VGzr^flCt=4mYXoVGw+5({N8`gPVP(Wmgeu`aLqjQZhx#n_F{)cD4HP!)fX zc+9&fot{_@?9YgA+{~NduMhl7C_iIu_OmPgMm&^-dZ#_Ho{?yWuIf{E`qD;_k#iEq z4%_7X>WQ&ol)-)BHR&+<1LD?ng1WBm2OsbVFxdORh_$+F%puATYQ3{0KKgZ6b=WUP zIhh-w4Acetvia*;2H5;8W%I!PEb;#v6jvQ~K1W?QjLB7OpY70Jy3(N$7x{87>y2Cu z?XD;01I}v+(5dk|wnP66f9IfM@}k}cKo9(eZFPF5Oj^zPQo21lhc-{_m7pY0jqwWf z5so&mv0ct9cKphgq0?ies@RA%Vt=JDcV@3+kb%!QPq1BQVxLi8jDtpeDiXQ-C+RtE zk+yl_PY1QQneO#F*klv5zgOFV@`vC(eCsI*nvy6Xv*t-m`Z)&~=r=hxHv_SSklKD`+(PcMnB~WyY(67wd4ZnGG&wWxG`P}fqjhDZEZ`m z=L57MxM zYuZoUDL(PDXfLD+&>YZxwZ*N!Lwoe4%gC?YsuDG?Ml9JT1Eb?)@W@|DWZDr2`yoCO zeGcO>`g8q^C1aeM*-n#1h0<=uUhzqo4cuzH5`QYFsm64xCGhtF5yCe49!2XaC%k5kPdm~UbNR2t>kmzS>i`mt8^p9K%xg#EP zUvP!-q_fTJ0(S4)!RuTHxLbfuYWyr2d)ng5+meI*F#HJCi|c<9?}=&R8~>cP8O~1| zL3Nfh1Kw7kQ%3w+2E7(=rQ>R!(e92`O8CaF4YL#ciO}+2AB!gexA%2hkMSNpaEJ^I&gT{jz-U-&~>H=$$EBt)M{0DP= zMt#%IFyEwNH(+LdGHTa(88Uy5^d9@FHI|80{;>?0-v>QX9q+n$4xF^1d7xa-SlQqHS$2E$|o@>Ny2Ifyd<3R3pfw87GfSYrf zM}xl?wnd!#d#np85i=k5+g!4tSIP+q+nTR(cVVkPlc>Fy#T#pAt{;ZxSY&uQ;;_g5 zM*P=+Hybn&bVq)@CcHO$58IvuvLK)K2jsn0#vLk^*qkC|-+{^dE)HG$`Gq0-EGll| zx*+aAaZ%sN%jfw-O$!3<+n!5rhN-siVCQb2G|*+xAW#;_J{Lodx1x5Qi^h8368$Xm zC6F}_-+0xQ$KCu6IDFwpeuzIVr&0Ri{wRoh5o>UN4$qG!oRrqUoXmAf-yIbk+n<4& z-%VV#Y1_Y{fBn!GHYSHBXG+w@ug}KryW9h~sQ;wctZOZzKRo}*@U3~~$TcT^7k{{- zE&3pXF&o~01Zq?p9(VJbz_Ifl3>@>?nZVJnO5nKUilFf+v*V9i;-T+WxNaUaA?0My zlM5y2cZ>4yT|YeUYLo9vlyx?z-mZ+m-v)F9BsR)I*_iDGJ-Mhct}_hp*7fuMb$!@W zdtjdp!fBKL{SWwjKodaEf(C=^dFG&T3x@?hz91dX4uWQa+=v)5)Fb`x(btS1XyUR) zf#Vjs2R=T(#vDP-tyTYp;FpKjEVFAUlkom3zGIoE%R~{}nzBP|rI%RLy)Hehdpy|< zZKa!(@_nAk!&cTS;646RB_2M(d-W2fPO#}xHIC=HRLs$%_A&`C(c20nq9`Slhf@g; zE_wJa#8xKSN;Jaf^=ze^t@N;!UMzt!k)_}>RfaZHr6tV6p)^M?RQKlRkwL-t=0QqR zB4%ig_4%`&)A9zCP>}tcmM3ohOFxX~7NE}<>v3vCB;6VpI%iTN`p+&HWY5Il#x#zE1;Paqsr$Amvl; zL?LVkx%1tOjSv%7AwI`9%+o3PaZTQ%L7Xd>g6`;i4C?oTe3y8x!Z{a39dJK|d28&g zX#TxXCm*D6p*#-qyImciZW7z#`a;&$u{EtbJ+Sf*(u%3-JpuFXn z0Cn1cKDD)>);Zk&B5@Cpn7A*%oI3fB{6d;wUplM*qD%?fa@HOvqJ9Zz{H^K$b;d&X z_Wdm4;Jzb?V=)$aXkuU<6zj{%G1#&U+xp#}))AAX=j`>)Yt0`}KdQDEMt&+ykvG`O zzKt~O>u`^U{FtvNG4ACUMEs@fKWg9QS^h~|Bxv19Xa2m;$5@(xvNN!>!*x?!yPUi5 z?ibj%EMYE;c{t|CnRn-Yp6-8La!(cZulOSu-6`^5$)m7ux5Z4Y0D0vq1gWKd3Vv{-oTPe`FqvdTxn)t1JGTAxP#m-DGq%Gq%=|0;DD+^)ceK+=e$Qw0FWmppXN`d*=Q@?DJdwSf<(C=AN2c59rM{FF6v>lEKg<)~AwEWXC z=|1w$T<>2XPvZ}La33(Nq0OyswRjVC zUjA0P{z~O|m`|Hx+qd>Zkx9_p80)LnO9pkbI>Su87h&qIQi?qWsWb9KCzIdzRMH#uL%Y6*)@9KWy zbLO=;=Tm-7jgf=)PQ`xFi`;Wjd99=1-BHWyszlwXY-iA(N(q@cS03)#K>{8dCvk^V zzJO44?FWm@D6ju)A$T^JE5Jgy1iR2!{7Tt zf~O|QuvJ-(x`uyHIXv*zc4yQ*bMH-BZbfd55WEw~Ut`c?UCT>* zqrCb)I_s(gE&W(}kNG=&3v>OMpuxJmTP?wt`-|H_XD}wOHcctAw0z@wMesh10r-N* z17_(0-xqNJd*&NXiT{)HY2y~~J`29jgIeAyPv`H!yV3ox6}CYvyK>0B%G`ciuH=M% za9*M_u2{l0epAUjYM&iAXOnuo7<>$nVxzqeE!}+@C)w&)3Una=iCnR zJkyYG`x>-9Xu^`l$elF8^#`Cb&>u(TtMlfez;H|B+nG;w_uzjYs2%75{SESfoq!`2 z&mPpzZl@HaRc?^kbROxH9HdhIYNI41CbAPjZh6S9@L#A(s4%I*TneZjawsG>1d|Ox z9w0AJ2q;mdBus8d0wIl&?IrR#Xnv{)<_!M0(4^6NgY0>VJJcd#fVn(pqvAE3Z9xnR zD9F=U&+@uF|H}9LJ$^=-;2dEV;~$KD)dpgCOTwqp4r6dL+E@Xq)~}2_E&=e z_-^w5eJ&4;{zg0}{%CCr@n(0N<@t6n&WQwPnblg0a~IBLScYa4wH~mub?Bv32B2EX)a?P#x*)-<@|?zXr(`tqi~M*I^zxU;F^x#BOa{8vkdrx z^VB>25zh)!e~bZ|%{i^=*PnasXNci9F)zS4mh&0UfDohFOS8=wYF4xZ7P&bzh7J78#80EuHWF* zLid&{%Q6E~k4wmmIovyfE-I8foX@zvQZgfM>Hgey@f-2n`@YdL9QwOSO372b*PXXS zZNMLDU_X#?K6S$NV~5rFdqSK6Z8vq7bWfOlz&B=kmAp3EAJy#j;)=n0ZF48*ZQI-( z{9%D~p0?G3Ga?y1Vy5@dxQ!+B+81pcM{Hd*cz2mO`e22GWt(Ma+85Gy((4!f5@sg* zMo({NtmAM!=3n)?ZZ>xg%Cf8-npyn)usxUa`fe+|HYho3ZI^&?eY;0at#Jf~y|^iO z*s}fG Date: Fri, 16 Jan 2026 15:39:25 +0100 Subject: [PATCH 24/56] wip update docs --- README.md | 290 +----------------------------------------- docs/cli.md | 3 + docs/get_started.md | 40 ++++++ docs/installation.md | 26 ++++ docs/use_in_python.md | 9 ++ mkdocs.yaml | 7 +- 6 files changed, 89 insertions(+), 286 deletions(-) create mode 100644 docs/cli.md create mode 100644 docs/get_started.md create mode 100644 docs/installation.md create mode 100644 docs/use_in_python.md diff --git a/README.md b/README.md index dc1f4b982..557275233 100644 --- a/README.md +++ b/README.md @@ -8,294 +8,16 @@ # bioimageio.core -Python specific core utilities for bioimage.io resources (in particular DL models). - -## Get started - -To get started we recommend installing bioimageio.core with conda together with a deep -learning framework, e.g. pytorch, and run a few `bioimageio` commands to see what -bioimage.core has to offer: - -1. install with conda (for more details on conda environments, [checkout the conda docs](https://conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html)) - - ```console - conda install -c conda-forge bioimageio.core pytorch - ``` - -1. test a model - - ```console - bioimageio test powerful-chipmunk - ``` - -
- (Click to expand output) - - ```console - - - ✔️ bioimageio validation passed - ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ - source https://uk1s3.embassy.ebi.ac.uk/public-datasets/bioimage.io/powerful-chipmunk/1/files/rdf.yaml - format version model 0.4.10 - bioimageio.spec 0.5.3post4 - bioimageio.core 0.6.8 - - - - ❓ location detail - ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ - ✔️ initialized ModelDescr to describe model 0.4.10 - - ✔️ bioimageio.spec format validation model 0.4.10 - 🔍 context.perform_io_checks True - 🔍 context.root https://uk1s3.embassy.ebi.ac.uk/public-datasets/bioimage.io/powerful-chipmunk/1/files - 🔍 context.known_files.weights.pt 3bd9c518c8473f1e35abb7624f82f3aa92f1015e66fb1f6a9d08444e1f2f5698 - 🔍 context.known_files.weights-torchscript.pt 4e568fd81c0ffa06ce13061327c3f673e1bac808891135badd3b0fcdacee086b - 🔍 context.warning_level error - - ✔️ Reproduce test outputs from test inputs - - ✔️ Reproduce test outputs from test inputs - ``` - -
- - or - - ```console - bioimageio test impartial-shrimp - ``` - -
(Click to expand output) - - ```console - ✔️ bioimageio validation passed - ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ - source https://uk1s3.embassy.ebi.ac.uk/public-datasets/bioimage.io/impartial-shrimp/1.1/files/rdf.yaml - format version model 0.5.3 - bioimageio.spec 0.5.3.2 - bioimageio.core 0.6.9 - - - ❓ location detail - ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ - ✔️ initialized ModelDescr to describe model 0.5.3 - - - ✔️ bioimageio.spec format validation model 0.5.3 - - 🔍 context.perform_io_checks False - 🔍 context.warning_level error - - ✔️ Reproduce test outputs from test inputs (pytorch_state_dict) - - - ✔️ Run pytorch_state_dict inference for inputs with batch_size: 1 and size parameter n: - - 0 - - ✔️ Run pytorch_state_dict inference for inputs with batch_size: 2 and size parameter n: - - 0 - - ✔️ Run pytorch_state_dict inference for inputs with batch_size: 1 and size parameter n: - - 1 - - ✔️ Run pytorch_state_dict inference for inputs with batch_size: 2 and size parameter n: - - 1 - - ✔️ Run pytorch_state_dict inference for inputs with batch_size: 1 and size parameter n: - - 2 - - ✔️ Run pytorch_state_dict inference for inputs with batch_size: 2 and size parameter n: - - 2 - - ✔️ Reproduce test outputs from test inputs (torchscript) - - - ✔️ Run torchscript inference for inputs with batch_size: 1 and size parameter n: 0 - - - ✔️ Run torchscript inference for inputs with batch_size: 2 and size parameter n: 0 - - - ✔️ Run torchscript inference for inputs with batch_size: 1 and size parameter n: 1 - - - ✔️ Run torchscript inference for inputs with batch_size: 2 and size parameter n: 1 - - - ✔️ Run torchscript inference for inputs with batch_size: 1 and size parameter n: 2 - - - ✔️ Run torchscript inference for inputs with batch_size: 2 and size parameter n: 2 - ``` - -
-1. run prediction on your data - -- display the `bioimageio-predict` command help to get an overview: - - ```console - bioimageio predict --help - ``` - -
- (Click to expand output) - - ```console - usage: bioimageio predict [-h] [--inputs Sequence[Union[str,Annotated[Tuple[str,...],MinLenmin_length=1]]]] - [--outputs {str,Tuple[str,...]}] [--overwrite bool] [--blockwise bool] [--stats Path] - [--preview bool] - [--weight_format {typing.Literal['keras_hdf5','onnx','pytorch_state_dict','tensorflow_js','tensorflow_saved_model_bundle','torchscript'],any}] - [--example bool] - SOURCE - - bioimageio-predict - Run inference on your data with a bioimage.io model. - - positional arguments: - SOURCE Url/path to a bioimageio.yaml/rdf.yaml file - or a bioimage.io resource identifier, e.g. 'affable-shark' - - optional arguments: - -h, --help show this help message and exit - --inputs Sequence[Union[str,Annotated[Tuple[str,...],MinLen(min_length=1)]]] - Model input sample paths (for each input tensor) - - The input paths are expected to have shape... - - (n_samples,) or (n_samples,1) for models expecting a single input tensor - - (n_samples,) containing the substring '{input_id}', or - - (n_samples, n_model_inputs) to provide each input tensor path explicitly. - - All substrings that are replaced by metadata from the model description: - - '{model_id}' - - '{input_id}' - - Example inputs to process sample 'a' and 'b' - for a model expecting a 'raw' and a 'mask' input tensor: - --inputs="[["a_raw.tif","a_mask.tif"],["b_raw.tif","b_mask.tif"]]" - (Note that JSON double quotes need to be escaped.) - - Alternatively a `bioimageio-cli.yaml` (or `bioimageio-cli.json`) file - may provide the arguments, e.g.: - ```yaml - inputs: - - [a_raw.tif, a_mask.tif] - - [b_raw.tif, b_mask.tif] - ``` - - `.npy` and any file extension supported by imageio are supported. - Aavailable formats are listed at - https://imageio.readthedocs.io/en/stable/formats/index.html#all-formats. - Some formats have additional dependencies. - -   (default: ('{input_id}/001.tif',)) - --outputs {str,Tuple[str,...]} - Model output path pattern (per output tensor) - - All substrings that are replaced: - - '{model_id}' (from model description) - - '{output_id}' (from model description) - - '{sample_id}' (extracted from input paths) - -   (default: outputs_{model_id}/{output_id}/{sample_id}.tif) - --overwrite bool allow overwriting existing output files (default: False) - --blockwise bool process inputs blockwise (default: False) - --stats Path path to dataset statistics - (will be written if it does not exist, - but the model requires statistical dataset measures) -   (default: dataset_statistics.json) - --preview bool preview which files would be processed - and what outputs would be generated. (default: False) - --weight_format {typing.Literal['keras_hdf5','onnx','pytorch_state_dict','tensorflow_js','tensorflow_saved_model_bundle','torchscript'],any} - The weight format to use. (default: any) - --example bool generate and run an example - - 1. downloads example model inputs - 2. creates a `{model_id}_example` folder - 3. writes input arguments to `{model_id}_example/bioimageio-cli.yaml` - 4. executes a preview dry-run - 5. executes prediction with example input - -   (default: False) - ``` - -
- -- create an example and run prediction locally! - - ```console - bioimageio predict impartial-shrimp --example=True - ``` - -
- (Click to expand output) - - ```console - 🛈 bioimageio prediction preview structure: - {'{sample_id}': {'inputs': {'{input_id}': ''}, - 'outputs': {'{output_id}': ''}}} - 🔎 bioimageio prediction preview output: - {'1': {'inputs': {'input0': 'impartial-shrimp_example/input0/001.tif'}, - 'outputs': {'output0': 'impartial-shrimp_example/outputs/output0/1.tif'}}} - predict with impartial-shrimp: 100%|███████████████████████████████████████████████████| 1/1 [00:21<00:00, 21.76s/sample] - 🎉 Sucessfully ran example prediction! - To predict the example input using the CLI example config file impartial-shrimp_example\bioimageio-cli.yaml, execute `bioimageio predict` from impartial-shrimp_example: - $ cd impartial-shrimp_example - $ bioimageio predict "impartial-shrimp" - - Alternatively run the following command in the current workind directory, not the example folder: - $ bioimageio predict --preview=False --overwrite=True --stats="impartial-shrimp_example/dataset_statistics.json" --inputs="[[\"impartial-shrimp_example/input0/001.tif\"]]" --outputs="impartial-shrimp_example/outputs/{output_id}/{sample_id}.tif" "impartial-shrimp" - (note that a local 'bioimageio-cli.json' or 'bioimageio-cli.yaml' may interfere with this) - ``` - -
- -## Installation - -### Via Conda - -The `bioimageio.core` package can be installed from conda-forge via - -```console -conda install -c conda-forge bioimageio.core -``` - -If you do not install any additional deep learning libraries, you will only be able to use general convenience -functionality, but not any functionality depending on model prediction. -To install additional deep learning libraries add `pytorch`, `onnxruntime`, `keras` or `tensorflow`. - -Deeplearning frameworks to consider installing alongside `bioimageio.core`: - -- [Pytorch/Torchscript](https://pytorch.org/get-started/locally/) -- [TensorFlow](https://www.tensorflow.org/install) -- [ONNXRuntime](https://onnxruntime.ai/docs/install/#python-installs) - -### Via pip - -The package is also available via pip -(e.g. with recommended extras `onnx` and `pytorch`): - -```console -pip install "bioimageio.core[onnx,pytorch]" -``` - -## 🐍 Use in Python - -`bioimageio.core` is a python package that implements prediction with bioimageio models +`bioimageio.core` is a python package that implements prediction with bioimage.io models including standardized pre- and postprocessing operations. -These models are described by---and can be loaded with---the bioimageio.spec package. +Such models are represented as [bioimageio.spec](https://bioimage-io.github.io/spec-bioimage-io) resource descriptions. -In addition bioimageio.core provides functionality to convert model weight formats. +In addition bioimageio.core provides functionality to convert model weight formats +and compute selected dataset statistics used for preprocessing. -### Documentation +## Documentation -[Here you find the bioimageio.core documentation.](https://bioimage-io.github.io/core-bioimage-io-python/bioimageio/core.html) +[Here you find the bioimageio.core documentation.](https://bioimage-io.github.io/core-bioimage-io-python) #### Presentations diff --git a/docs/cli.md b/docs/cli.md new file mode 100644 index 000000000..b39b8250d --- /dev/null +++ b/docs/cli.md @@ -0,0 +1,3 @@ +```console exec="1" +bioimageio --help +``` diff --git a/docs/get_started.md b/docs/get_started.md new file mode 100644 index 000000000..972f1fc10 --- /dev/null +++ b/docs/get_started.md @@ -0,0 +1,40 @@ +To get started we recommend installing bioimageio.core with conda together with a deep +learning framework, e.g. pytorch, and run a few `bioimageio` commands to see what +bioimage.core has to offer: + +1. Install with conda (for more details on conda environments, [checkout the conda docs](https://conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html)). +More details and pip instructions are [here](installation.md). + + ```console + conda install -c conda-forge bioimageio.core pytorch + ``` + +1. Get an overview of available commands + + ```console exec="1" + bioimageio --help + ``` + +1. Test a model + + ```console exec="1" + bioimageio test affable-shark + ``` + + To test your model replace the already published 'affabl-shark' with a local folder or path to a bioimageio.yaml file. + Check out the [bioimageio.spec documentation](https://bioimage-io.github.io/spec-bioimage-io) for more information + on the bioimage.io metadata description format. + +1. Run prediction on your data + +- Display the `bioimageio predict` command help to get an overview: + + ```console exec="1" + bioimageio predict --help + ``` + +- create an example and run prediction locally! + + ```console exec="1" + bioimageio predict affable-shark --example=True + ``` diff --git a/docs/installation.md b/docs/installation.md new file mode 100644 index 000000000..2194f523d --- /dev/null +++ b/docs/installation.md @@ -0,0 +1,26 @@ +## Via Conda + +The `bioimageio.core` package can be installed from conda-forge via + +```console +conda install -c conda-forge bioimageio.core +``` + +If you do not install any additional deep learning libraries, you will only be able to use general convenience +functionality, but not any functionality depending on model prediction. +To install additional deep learning libraries add `pytorch`, `onnxruntime`, `keras` or `tensorflow`. + +Deeplearning frameworks to consider installing alongside `bioimageio.core`: + +- [Pytorch/Torchscript](https://pytorch.org/get-started/locally/) +- [TensorFlow](https://www.tensorflow.org/install) +- [ONNXRuntime](https://onnxruntime.ai/docs/install/#python-installs) + +## Via pip + +The package is also available via pip +(e.g. with recommended extras `onnx` and `pytorch`): + +```console +pip install "bioimageio.core[onnx,pytorch]" +``` diff --git a/docs/use_in_python.md b/docs/use_in_python.md new file mode 100644 index 000000000..bd102a987 --- /dev/null +++ b/docs/use_in_python.md @@ -0,0 +1,9 @@ +Here you can find recommendations for using bioimageio.core in your Python package or scripts. + +See [API reference](reference/index.html) for details beyond this brief orientation. + +## Run inference + +The [bioimageio.core.predict][] and [predict_many][bioimageio.core.predict_many] aim to provide an easy-to-use interface +to run inference with a bioimage.io model. +See [Compatibility](compatibility.md) for a list of compatible models or browser the Model Zoo at [https://bioimage.io](https://bioimage.io). diff --git a/mkdocs.yaml b/mkdocs.yaml index ed97e53b7..1a037e562 100644 --- a/mkdocs.yaml +++ b/mkdocs.yaml @@ -82,8 +82,7 @@ plugins: python: inventories: - https://docs.pydantic.dev/latest/objects.inv - - https://bioimage-io.github.io/spec-bioimage-io/latest/objects.inv - - https://bioimage-io.github.io/spec-bioimage-io/dev/objects.inv + - https://bioimage-io.github.io/spec-bioimage-io/v0.5.7.0/objects.inv options: annotations_path: source backlinks: tree @@ -162,6 +161,10 @@ markdown_extensions: nav: - Home: - index.md + - Get started: get_started.md + - Installation: installation.md + - bioimageio CLI: cli.md + - Use in Python: use_in_python.md - Compatibility: compatibility.md - API Reference: reference/ - Changelog: changelog.md From d999b79bee0f37b01d23e4014c8c4957a768b554 Mon Sep 17 00:00:00 2001 From: fynnbe Date: Wed, 28 Jan 2026 16:08:11 +0100 Subject: [PATCH 25/56] remove main block in main file --- src/bioimageio/core/__main__.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/src/bioimageio/core/__main__.py b/src/bioimageio/core/__main__.py index 123b6a9c9..3c48e6b21 100644 --- a/src/bioimageio/core/__main__.py +++ b/src/bioimageio/core/__main__.py @@ -16,10 +16,4 @@ from .cli import Bioimageio - -def main(): - _ = CliApp.run(Bioimageio) - - -if __name__ == "__main__": - main() +_ = CliApp.run(Bioimageio) From b9a878d02f43575d6cf9b0e367b2e39bcad5a861 Mon Sep 17 00:00:00 2001 From: fynnbe Date: Wed, 28 Jan 2026 17:11:21 +0100 Subject: [PATCH 26/56] use markdown-exec[ansi] --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 9359682eb..29e6883e1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -82,7 +82,7 @@ dev = [ ] docs = [ "markdown-callouts", - "markdown-exec", + "markdown-exec[ansi]", "markdown-pycon", "mike", "mkdocs-api-autonav", From 630bfa9edcd403a87f517f8f5792495ca38a5c94 Mon Sep 17 00:00:00 2001 From: fynnbe Date: Thu, 29 Jan 2026 14:33:16 +0100 Subject: [PATCH 27/56] update docs --- .gitignore | 1 + README.md | 51 +++------- docs/cli.md | 20 ++++ docs/get_started.md | 31 ++++-- docs/installation.md | 2 +- docs/use_in_python.md | 2 +- mkdocs.yaml | 11 ++- scripts/generate_api_doc_pages.py | 51 +++++----- src/bioimageio/core/__init__.py | 154 +++++++++++------------------- src/bioimageio/core/cli.py | 17 +++- src/bioimageio/core/commands.py | 5 +- 11 files changed, 169 insertions(+), 176 deletions(-) diff --git a/.gitignore b/.gitignore index 3cb4fc7f2..a4991c3cd 100644 --- a/.gitignore +++ b/.gitignore @@ -12,6 +12,7 @@ build/ cache coverage.xml dist/ +docs/api/ dogfood/ pkgs/ site/ diff --git a/README.md b/README.md index 557275233..102bcb8cc 100644 --- a/README.md +++ b/README.md @@ -19,49 +19,28 @@ and compute selected dataset statistics used for preprocessing. [Here you find the bioimageio.core documentation.](https://bioimage-io.github.io/core-bioimage-io-python) -#### Presentations - -- [Create a model from scratch](https://bioimage-io.github.io/core-bioimage-io-python/presentations/create_ambitious_sloth.slides.html) ([source](https://github.com/bioimage-io/core-bioimage-io-python/tree/main/presentations)) - #### Examples -
-
Notebooks that save and load resource descriptions and validate their format (using bioimageio.spec, a dependency of bioimageio.core)
-
load_model_and_create_your_own.ipynb +Notebooks that save and load resource descriptions and validate their format (using bioimageio.spec, a dependency of bioimageio.core) +
-
dataset_creation.ipynb + +
  • dataset_creation.ipynb Open In Colab -
  • -
    Use the described resources in Python with bioimageio.core
    -
    model_usage.ipynb - Open In Colab -
    - -## 💻 Use the Command Line Interface + + -`bioimageio.core` installs a command line interface (CLI) for testing models and other functionality. -You can list all the available commands via: - -```console -bioimageio -``` - -For examples see [Get started](#get-started). - -### CLI inputs from file +Use the described resources in Python with bioimageio.core + -For convenience the command line options (not arguments) may be given in a `bioimageio-cli.json` -or `bioimageio-cli.yaml` file, e.g.: +#### Presentations -```yaml -# bioimageio-cli.yaml -inputs: inputs/*_{tensor_id}.h5 -outputs: outputs_{model_id}/{sample_id}_{tensor_id}.h5 -overwrite: true -blockwise: true -stats: inputs/dataset_statistics.json -``` +- [Create a model from scratch](https://bioimage-io.github.io/core-bioimage-io-python/presentations/create_ambitious_sloth.slides.html) ([source](https://github.com/bioimage-io/core-bioimage-io-python/tree/main/presentations)) ## Set up Development Environment diff --git a/docs/cli.md b/docs/cli.md index b39b8250d..49012b6a5 100644 --- a/docs/cli.md +++ b/docs/cli.md @@ -1,3 +1,23 @@ +## bioimageio Command Line Interface + +`bioimageio.core` installs a command line interface (CLI) for testing models and other functionality. +You can list all the available commands via: + ```console exec="1" bioimageio --help ``` + +For concrete examples see [Get started](get-started.md). + +### CLI inputs from file + +For convenience the command line options (not arguments) may be given in a `bioimageio-cli.json` or `bioimageio-cli.yaml` file, e.g.: + +```yaml +# bioimageio-cli.yaml +inputs: inputs/*_{tensor_id}.h5 +outputs: outputs_{model_id}/{sample_id}_{tensor_id}.h5 +overwrite: true +blockwise: true +stats: inputs/dataset_statistics.json +``` diff --git a/docs/get_started.md b/docs/get_started.md index 972f1fc10..a783d7353 100644 --- a/docs/get_started.md +++ b/docs/get_started.md @@ -3,7 +3,8 @@ learning framework, e.g. pytorch, and run a few `bioimageio` commands to see wha bioimage.core has to offer: 1. Install with conda (for more details on conda environments, [checkout the conda docs](https://conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html)). -More details and pip instructions are [here](installation.md). +Recommended third party deep learning libraries to install alongside bioimageio.core and +[pip installation instructions are here](installation.md). ```console conda install -c conda-forge bioimageio.core pytorch @@ -11,30 +12,46 @@ More details and pip instructions are [here](installation.md). 1. Get an overview of available commands - ```console exec="1" + ```bash exec="1" source="console" result="ansi" bioimageio --help ``` 1. Test a model - ```console exec="1" + ```bash exec="1" source="console" result="ansi" bioimageio test affable-shark ``` - To test your model replace the already published 'affabl-shark' with a local folder or path to a bioimageio.yaml file. + To test your model replace the already published model identifier + 'affabl-shark' with a local folder or path to a bioimageio.yaml file. Check out the [bioimageio.spec documentation](https://bioimage-io.github.io/spec-bioimage-io) for more information on the bioimage.io metadata description format. + The Python equivalent would be: + + ```python exec="1" souce="console" + from bioimageio.core import test_description + + summary = test_description("affable-shark") + summary.display() + ``` + 1. Run prediction on your data - Display the `bioimageio predict` command help to get an overview: - ```console exec="1" + ```bash exec="1" source="console" result="ansi" bioimageio predict --help ``` - create an example and run prediction locally! - ```console exec="1" - bioimageio predict affable-shark --example=True + ```bash exec="1" source="console" result="ansi" + bioimageio predict affable-shark --example ``` + +1. For model inference from within Python these options are available: + + - [bioimageio.core.predict][] to run inference on a single sample/image + - [bioimageio.core.predict_many][] to run inference on a set of samples + - [bioimageio.core.create_prediction_pipeline][] for reusing the instatiated model and more fine-grain control over the inference process this function creates a suitable [bioimageio.core.PredictionPipeline][] for more advanced use. diff --git a/docs/installation.md b/docs/installation.md index 2194f523d..2cb30e563 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -7,7 +7,7 @@ conda install -c conda-forge bioimageio.core ``` If you do not install any additional deep learning libraries, you will only be able to use general convenience -functionality, but not any functionality depending on model prediction. +functionality, but model inference will be unavailable. To install additional deep learning libraries add `pytorch`, `onnxruntime`, `keras` or `tensorflow`. Deeplearning frameworks to consider installing alongside `bioimageio.core`: diff --git a/docs/use_in_python.md b/docs/use_in_python.md index bd102a987..a70360ba0 100644 --- a/docs/use_in_python.md +++ b/docs/use_in_python.md @@ -1,6 +1,6 @@ Here you can find recommendations for using bioimageio.core in your Python package or scripts. -See [API reference](reference/index.html) for details beyond this brief orientation. +See [API reference](api/index.html) for details beyond this brief orientation. ## Run inference diff --git a/mkdocs.yaml b/mkdocs.yaml index 1a037e562..85d6ba587 100644 --- a/mkdocs.yaml +++ b/mkdocs.yaml @@ -73,7 +73,11 @@ plugins: - autorefs - coverage: html_report_dir: dist/coverage - - markdown-exec + - gen-files: + scripts: + - scripts/generate_api_doc_pages.py + - markdown-exec: + ansi: required - mkdocstrings: enable_inventory: true default_handler: python @@ -107,8 +111,9 @@ plugins: show_root_full_path: false show_root_heading: true show_signature_annotations: true + show_signature_type_parameters: true show_source: true - show_submodules: true + show_submodules: false show_symbol_type_heading: true show_symbol_type_toc: true signature_crossrefs: true @@ -166,7 +171,7 @@ nav: - bioimageio CLI: cli.md - Use in Python: use_in_python.md - Compatibility: compatibility.md - - API Reference: reference/ + - API Reference: api/ - Changelog: changelog.md - Coverage report: coverage.md diff --git a/scripts/generate_api_doc_pages.py b/scripts/generate_api_doc_pages.py index 872b9b292..becf0f37e 100644 --- a/scripts/generate_api_doc_pages.py +++ b/scripts/generate_api_doc_pages.py @@ -16,8 +16,8 @@ for path in sorted(src.rglob("*.py")): module_path = path.relative_to(src).with_suffix("") - doc_path = path.relative_to(src).with_suffix(".md") - full_doc_path = Path("reference", doc_path) + nav_path = path.relative_to(src).with_suffix(".md") + full_doc_path = Path("api", nav_path) parts = tuple(module_path.parts) @@ -33,7 +33,7 @@ if parts[-1] == "__init__": parts = parts[:-1] - doc_path = doc_path.with_name("index.md") + nav_path = nav_path.with_name("index.md") full_doc_path = full_doc_path.with_name("index.md") elif parts[-1] == "__main__": continue @@ -43,26 +43,29 @@ # Build a flat nav for API Reference: one entry for bioimageio.core and # one entry per top-level submodule under bioimageio.core. No subsections. - if parts[0:2] == ("bioimageio", "core"): - if len(parts) == 2: - # Landing page for bioimageio.core at reference/index.md - full_doc_path = Path("reference", "index.md") - doc_path = Path("index.md") - if "bioimageio.core" not in added_nav_labels: - nav[("bioimageio.core",)] = doc_path.as_posix() - added_nav_labels.add("bioimageio.core") - else: - # Top-level submodule/package directly under bioimageio.core - top = parts[2] - if top not in added_nav_labels: - pkg_init = src / "bioimageio" / "core" / top / "__init__.py" - if pkg_init.exists(): - nav_target = Path("bioimageio") / "core" / top / "index.md" - else: - nav_target = Path("bioimageio") / "core" / f"{top}.md" - - nav[(top,)] = nav_target.as_posix() - added_nav_labels.add(top) + assert parts[0:2] == ("bioimageio", "core") + if len(parts) == 2: + # Landing page for bioimageio.core at api/index.md + full_doc_path = Path("api", "index.md") + nav_target = Path("index.md") + module_name = ".".join(parts) + if module_name not in added_nav_labels: + nav[(module_name,)] = nav_target.as_posix() + added_nav_labels.add(module_name) + + else: + # Top-level submodule/package directly under bioimageio.x + top = ".".join(parts[:3]) + + if top not in added_nav_labels: + pkg_init = src / "/".join(parts) / "__init__.py" + if pkg_init.exists(): + nav_target = Path("/".join(parts[:3])) / "index.md" + else: + nav_target = Path("/".join(parts[:2])) / f"{parts[2]}.md" + + nav[(top,)] = nav_target.as_posix() + added_nav_labels.add(top) with mkdocs_gen_files.open(full_doc_path, "w") as fd: # Reconstruct the full identifier from the original module_path @@ -74,5 +77,5 @@ mkdocs_gen_files.set_edit_path(full_doc_path, path.relative_to(root)) -with mkdocs_gen_files.open("reference/SUMMARY.md", "w") as nav_file: +with mkdocs_gen_files.open("api/SUMMARY.md", "w") as nav_file: nav_file.writelines(nav.build_literate_nav()) diff --git a/src/bioimageio/core/__init__.py b/src/bioimageio/core/__init__.py index 78f43d466..ccbbf0d4e 100644 --- a/src/bioimageio/core/__init__.py +++ b/src/bioimageio/core/__init__.py @@ -21,55 +21,65 @@ logger.disable("bioimageio.core") -from bioimageio.spec import ( - ValidationSummary, - build_description, - dump_description, - load_dataset_description, - load_description, - load_description_and_validate_format_only, - load_model_description, - save_bioimageio_package, - save_bioimageio_package_as_folder, - save_bioimageio_yaml_only, - validate_format, -) +import bioimageio.spec -from . import ( - axis, - block_meta, - cli, - commands, - common, - digest_spec, - io, - model_adapters, - prediction, - proc_ops, - proc_setup, - sample, - stat_calculators, - stat_measures, - tensor, +from . import axis as axis +from . import backends as backends +from . import block_meta as block_meta +from . import cli as cli +from . import commands as commands +from . import common as common +from . import digest_spec as digest_spec +from . import io as io +from . import model_adapters as model_adapters +from . import prediction as prediction +from . import proc_ops as proc_ops +from . import proc_setup as proc_setup +from . import sample as sample +from . import stat_calculators as stat_calculators +from . import stat_measures as stat_measures +from . import tensor as tensor +from . import weight_converters as weight_converters +from ._prediction_pipeline import PredictionPipeline as PredictionPipeline +from ._prediction_pipeline import ( + create_prediction_pipeline as create_prediction_pipeline, ) -from ._prediction_pipeline import PredictionPipeline, create_prediction_pipeline -from ._resource_tests import ( - enable_determinism, - load_description_and_test, - test_description, - test_model, +from ._resource_tests import enable_determinism as enable_determinism +from ._resource_tests import load_description_and_test as load_description_and_test +from ._resource_tests import test_description as test_description +from ._resource_tests import test_model as test_model +from ._settings import Settings as Settings +from ._settings import settings as settings + +# reexports from bioimageio.spec +build_description = bioimageio.spec.build_description +dump_description = bioimageio.spec.dump_description +load_dataset_description = bioimageio.spec.load_dataset_description +load_description = bioimageio.spec.load_description +load_description_and_validate_format_only = ( + bioimageio.spec.load_description_and_validate_format_only ) -from ._settings import settings -from .axis import Axis, AxisId -from .backends import create_model_adapter -from .block_meta import BlockMeta -from .common import MemberId -from .prediction import predict, predict_many -from .sample import Sample -from .stat_calculators import compute_dataset_measures -from .stat_measures import Stat -from .tensor import Tensor -from .weight_converters import add_weights +load_model_description = bioimageio.spec.load_model_description +save_bioimageio_package = bioimageio.spec.save_bioimageio_package +save_bioimageio_package_as_folder = bioimageio.spec.save_bioimageio_package_as_folder +save_bioimageio_yaml_only = bioimageio.spec.save_bioimageio_yaml_only +validate_format = bioimageio.spec.validate_format +ValidationSummary = bioimageio.spec.ValidationSummary + + +# reexports from bioimageio.core submodules +add_weights = weight_converters.add_weights +Axis = axis.Axis +AxisId = axis.AxisId +BlockMeta = block_meta.BlockMeta +compute_dataset_measures = stat_calculators.compute_dataset_measures +create_model_adapter = backends.create_model_adapter +MemberId = common.MemberId +predict = prediction.predict +predict_many = prediction.predict_many +Sample = sample.Sample +Stat = stat_measures.Stat +Tensor = tensor.Tensor # aliases test_resource = test_description @@ -78,55 +88,3 @@ """alias of `load_description`""" load_model = load_model_description """alias of `load_model_description`""" - -__all__ = [ - "__version__", - "add_weights", - "axis", - "Axis", - "AxisId", - "block_meta", - "BlockMeta", - "build_description", - "cli", - "commands", - "common", - "compute_dataset_measures", - "create_model_adapter", - "create_prediction_pipeline", - "digest_spec", - "dump_description", - "enable_determinism", - "io", - "load_dataset_description", - "load_description_and_test", - "load_description_and_validate_format_only", - "load_description", - "load_model_description", - "load_model", - "load_resource", - "MemberId", - "model_adapters", - "predict_many", - "predict", - "prediction", - "PredictionPipeline", - "proc_ops", - "proc_setup", - "sample", - "Sample", - "save_bioimageio_package_as_folder", - "save_bioimageio_package", - "save_bioimageio_yaml_only", - "settings", - "stat_calculators", - "stat_measures", - "Stat", - "tensor", - "Tensor", - "test_description", - "test_model", - "test_resource", - "validate_format", - "ValidationSummary", -] diff --git a/src/bioimageio/core/cli.py b/src/bioimageio/core/cli.py index 13aa6e21c..7aab7fb07 100644 --- a/src/bioimageio/core/cli.py +++ b/src/bioimageio/core/cli.py @@ -33,7 +33,14 @@ import rich.markdown from loguru import logger -from pydantic import AliasChoices, BaseModel, Field, PlainSerializer, model_validator +from pydantic import ( + AliasChoices, + BaseModel, + Field, + PlainSerializer, + WithJsonSchema, + model_validator, +) from pydantic_settings import ( BaseSettings, CliApp, @@ -452,9 +459,11 @@ class PredictCmd(CmdBase, WithSource): blockwise: bool = False """process inputs blockwise""" - stats: Annotated[Path, PlainSerializer(lambda p: p.as_posix())] = Path( - "dataset_statistics.json" - ) + stats: Annotated[ + Path, + WithJsonSchema({"type": "string"}), + PlainSerializer(lambda p: p.as_posix(), return_type=str), + ] = Path("dataset_statistics.json") """path to dataset statistics (will be written if it does not exist, but the model requires statistical dataset measures) diff --git a/src/bioimageio/core/commands.py b/src/bioimageio/core/commands.py index 1a391f177..697ec3b61 100644 --- a/src/bioimageio/core/commands.py +++ b/src/bioimageio/core/commands.py @@ -4,6 +4,8 @@ from pathlib import Path from typing import Optional, Sequence, Union +from typing_extensions import Literal + from bioimageio.spec import ( InvalidDescr, ResourceDescr, @@ -11,7 +13,6 @@ save_bioimageio_package_as_folder, ) from bioimageio.spec._internal.types import FormatVersionPlaceholder -from typing_extensions import Literal from ._resource_tests import test_description @@ -54,7 +55,7 @@ def test( ) -> int: """Test a bioimageio resource. - Arguments as described in `bioimageio.core.cli.TestCmd` + Arguments as described in [bioimageio.core.cli.TestCmd][] """ if isinstance(descr, InvalidDescr): test_summary = descr.validation_summary From 48c9abff3e5a749e5e70df8de86a6b81ef92f751 Mon Sep 17 00:00:00 2001 From: fynnbe Date: Thu, 29 Jan 2026 17:17:50 +0100 Subject: [PATCH 28/56] update block illustration --- src/bioimageio/core/block_meta.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/src/bioimageio/core/block_meta.py b/src/bioimageio/core/block_meta.py index 688463dec..f2849e02c 100644 --- a/src/bioimageio/core/block_meta.py +++ b/src/bioimageio/core/block_meta.py @@ -51,24 +51,25 @@ class BlockMeta: The inner slice (thin) is expanded by a halo in both dimensions on both sides. The outer slice reaches from the sample member origin (0, 0) to the right halo point. - ```terminal + ``` + first block (at the sample origin) ┌ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─┐ ╷ halo(left) ╷ - ╷ ╷ + ╷ padding outside the sample ╷ ╷ (0, 0)┏━━━━━━━━━━━━━━━━━┯━━━━━━━━━┯━━━➔ ╷ ┃ │ ╷ sample member - ╷ ┃ inner │ ╷ - ╷ ┃ (and outer) │ outer ╷ - ╷ ┃ slice │ slice ╷ + ╷ ┃ inner │ outer ╷ + ╷ ┃ region │ region ╷ + ╷ ┃ /slice │ /slice ╷ ╷ ┃ │ ╷ ╷ ┣─────────────────┘ ╷ - ╷ ┃ outer slice ╷ + ╷ ┃ outer region/slice ╷ ╷ ┃ halo(right) ╷ └ ─ ─ ─ ─┃─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─┘ ⬇ ``` - note: + Note: - Inner and outer slices are specified in sample member coordinates. - The outer_slice of a block at the sample edge may overlap by more than the halo with the neighboring block (the inner slices will not overlap though). @@ -178,7 +179,7 @@ def tagged_shape(self) -> PerAxis[int]: return self.shape @property - def inner_slice_wo_overlap(self): + def inner_slice_wo_overlap(self) -> PerAxis[SliceInfo]: """subslice of the inner slice, such that all `inner_slice_wo_overlap` can be stiched together trivially to form the original sample. From 04b6a19f355dd5d340491d7022cd62a96315583f Mon Sep 17 00:00:00 2001 From: fynnbe Date: Thu, 29 Jan 2026 17:52:59 +0100 Subject: [PATCH 29/56] fix conda recipe --- conda-recipe/meta.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/conda-recipe/meta.yaml b/conda-recipe/meta.yaml index a69a4e142..2552cf93d 100644 --- a/conda-recipe/meta.yaml +++ b/conda-recipe/meta.yaml @@ -40,7 +40,7 @@ requirements: {% endif %} {% endfor %} {% for dep in pyproject['project']['optional-dependencies']['onnx'] %} - - {{ dep.lower() }} + - {{ dep.replace(';python_version<"3.10"', '').lower().replace('_', '-') }} {% endfor %} {% for dep in pyproject['project']['optional-dependencies']['tensorflow'] %} - {{ dep.lower() }} @@ -55,7 +55,7 @@ test: requires: {% for dep in pyproject['project']['optional-dependencies']['dev'] %} {% if 'torch' not in dep %} # can't install pytorch>=2.8 from conda-forge smh - - {{ dep.lower().replace('onnx_ir!=0.1.14;python_version<"3.10"', 'onnx-ir!=0.1.14').replace('_', '-') }} + - {{ dep.replace(';python_version<"3.10"', '').lower().replace('_', '-') }} {% endif %} {% endfor %} commands: From e8d72fbb2b3fc28867b127c4ace7866d18f01958 Mon Sep 17 00:00:00 2001 From: fynnbe Date: Fri, 30 Jan 2026 15:04:52 +0100 Subject: [PATCH 30/56] remove limited h5 support --- docs/cli.md | 4 +- pyproject.toml | 3 +- src/bioimageio/core/backends/keras_backend.py | 3 +- src/bioimageio/core/io.py | 174 +++--------------- tests/test_io.py | 2 - tests/test_prediction.py | 2 +- 6 files changed, 33 insertions(+), 155 deletions(-) diff --git a/docs/cli.md b/docs/cli.md index 49012b6a5..746eb8a85 100644 --- a/docs/cli.md +++ b/docs/cli.md @@ -15,8 +15,8 @@ For convenience the command line options (not arguments) may be given in a `bioi ```yaml # bioimageio-cli.yaml -inputs: inputs/*_{tensor_id}.h5 -outputs: outputs_{model_id}/{sample_id}_{tensor_id}.h5 +inputs: inputs/*_{tensor_id}.tiff +outputs: outputs_{model_id}/{sample_id}_{tensor_id}.tiff overwrite: true blockwise: true stats: inputs/dataset_statistics.json diff --git a/pyproject.toml b/pyproject.toml index 29e6883e1..0f6df02c5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,6 @@ readme = "README.md" dynamic = ["version"] dependencies = [ "bioimageio.spec ==0.5.7.0", - "h5py", "imagecodecs", "imageio>=2.10", "loguru", @@ -46,7 +45,7 @@ onnx = [ 'onnx_ir!=0.1.14;python_version<"3.10"', # uses typing.Concatentate which requires py>=3.10 ] pytorch = ["torch>=1.6,<3", "torchvision>=0.21", "keras>=3.0,<4"] -tensorflow = ["tensorflow", "keras>=2.15,<4"] +tensorflow = ["tensorflow", "keras>=2.15,<4", "h5py"] partners = [ # "biapy", # pins core exactly "careamics", diff --git a/src/bioimageio/core/backends/keras_backend.py b/src/bioimageio/core/backends/keras_backend.py index b11fb7181..a2e95d3e6 100644 --- a/src/bioimageio/core/backends/keras_backend.py +++ b/src/bioimageio/core/backends/keras_backend.py @@ -4,7 +4,6 @@ from tempfile import TemporaryDirectory from typing import Any, Optional, Sequence, Union -import h5py # pyright: ignore[reportMissingTypeStubs] from keras.src.legacy.saving import ( # pyright: ignore[reportMissingTypeStubs] legacy_h5_format, ) @@ -76,6 +75,8 @@ def __init__( weight_reader = model_description.weights.keras_hdf5.get_reader() if weight_reader.suffix in (".h5", "hdf5"): + import h5py # pyright: ignore[reportMissingTypeStubs] + h5_file = h5py.File(weight_reader, mode="r") self._network = legacy_h5_format.load_model_from_hdf5(h5_file) else: diff --git a/src/bioimageio/core/io.py b/src/bioimageio/core/io.py index 55a87bdad..d0b18e8fe 100644 --- a/src/bioimageio/core/io.py +++ b/src/bioimageio/core/io.py @@ -8,17 +8,14 @@ Mapping, Optional, Sequence, - Tuple, TypeVar, Union, ) -import h5py # pyright: ignore[reportMissingTypeStubs] from imageio.v3 import imread, imwrite # type: ignore from loguru import logger from numpy.typing import NDArray from pydantic import BaseModel, ConfigDict, TypeAdapter -from typing_extensions import assert_never from bioimageio.spec._internal.io import get_reader, interprete_file_source from bioimageio.spec._internal.type_guards import is_ndarray @@ -32,17 +29,12 @@ ) from bioimageio.spec.utils import download, load_array, save_array -from .axis import AxisLike +from .axis import AxisId, AxisLike from .common import PerMember from .sample import Sample from .stat_measures import DatasetMeasure, MeasureValue from .tensor import Tensor -DEFAULT_H5_DATASET_PATH = "data" - - -SUFFIXES_WITH_DATAPATH = (".h5", ".hdf", ".hdf5") - def load_image( source: Union[ZipPath, PermissiveFileSource], is_volume: Optional[bool] = None @@ -62,51 +54,14 @@ def load_image( parsed_source = interprete_file_source(source) if isinstance(parsed_source, RelativeFilePath): - src = parsed_source.absolute() - else: - src = parsed_source - - if isinstance(src, Path): - file_source, suffix, subpath = _split_dataset_path(src) - elif isinstance(src, HttpUrl): - file_source, suffix, subpath = _split_dataset_path(src) - elif isinstance(src, ZipPath): - file_source, suffix, subpath = _split_dataset_path(src) - else: - assert_never(src) - - if suffix == ".npy": - if subpath is not None: - logger.warning( - "Unexpected subpath {} for .npy source {}", subpath, file_source - ) - - image = load_array(file_source) - elif suffix in SUFFIXES_WITH_DATAPATH: - if subpath is None: - dataset_path = DEFAULT_H5_DATASET_PATH - else: - dataset_path = str(subpath) - - reader = download(file_source) + parsed_source = parsed_source.absolute() - with h5py.File(reader, "r") as f: - h5_dataset = f.get( # pyright: ignore[reportUnknownVariableType] - dataset_path - ) - if not isinstance(h5_dataset, h5py.Dataset): - raise ValueError( - f"{file_source} did not load as {h5py.Dataset}, but has type " - + str( - type(h5_dataset) # pyright: ignore[reportUnknownArgumentType] - ) - ) - image: NDArray[Any] - image = h5_dataset[:] # pyright: ignore[reportUnknownVariableType] + if parsed_source.suffix == ".npy": + image = load_array(parsed_source) else: - reader = download(file_source) + reader = download(parsed_source) image = imread( # pyright: ignore[reportUnknownVariableType] - reader.read(), extension=suffix + reader.read(), extension=parsed_source.suffix ) assert is_ndarray(image) @@ -127,62 +82,6 @@ def load_tensor( Suffix = str -def _split_dataset_path( - source: _SourceT, -) -> Tuple[_SourceT, Suffix, Optional[PurePosixPath]]: - """Split off subpath (e.g. internal h5 dataset path) - from a file path following a file extension. - - Examples: - >>> _split_dataset_path(Path("my_file.h5/dataset")) - (...Path('my_file.h5'), '.h5', PurePosixPath('dataset')) - - >>> _split_dataset_path(Path("my_plain_file")) - (...Path('my_plain_file'), '', None) - - """ - if isinstance(source, RelativeFilePath): - src = source.absolute() - else: - src = source - - del source - - def separate_pure_path(path: PurePosixPath): - for p in path.parents: - if p.suffix in SUFFIXES_WITH_DATAPATH: - return p, p.suffix, PurePosixPath(path.relative_to(p)) - - return path, path.suffix, None - - if isinstance(src, HttpUrl): - file_path, suffix, data_path = separate_pure_path(PurePosixPath(src.path or "")) - - if data_path is None: - return src, suffix, None - - return ( - HttpUrl(str(file_path).replace(f"/{data_path}", "")), - suffix, - data_path, - ) - - if isinstance(src, ZipPath): - file_path, suffix, data_path = separate_pure_path(PurePosixPath(str(src))) - - if data_path is None: - return src, suffix, None - - return ( - ZipPath(str(file_path).replace(f"/{data_path}", "")), - suffix, - data_path, - ) - - file_path, suffix, data_path = separate_pure_path(PurePosixPath(src)) - return Path(file_path), suffix, data_path - - def save_tensor(path: Union[Path, str], tensor: Tensor) -> None: # TODO: save axis meta data @@ -190,32 +89,26 @@ def save_tensor(path: Union[Path, str], tensor: Tensor) -> None: tensor.data.to_numpy() ) assert is_ndarray(data) - file_path, suffix, subpath = _split_dataset_path(Path(path)) - if not suffix: + path = Path(path) + if not path.suffix: raise ValueError(f"No suffix (needed to decide file format) found in {path}") - file_path.parent.mkdir(exist_ok=True, parents=True) - if file_path.suffix == ".npy": - if subpath is not None: - raise ValueError(f"Unexpected subpath {subpath} found in .npy path {path}") - save_array(file_path, data) - elif suffix in (".h5", ".hdf", ".hdf5"): - if subpath is None: - dataset_path = DEFAULT_H5_DATASET_PATH - else: - dataset_path = str(subpath) - - with h5py.File(file_path, "a") as f: - if dataset_path in f: - del f[dataset_path] - - _ = f.create_dataset(dataset_path, data=data, chunks=True) + extension = path.suffix.lower() + path.parent.mkdir(exist_ok=True, parents=True) + if extension == ".npy": + save_array(path, data) + elif extension in (".h5", ".hdf", ".hdf5"): + raise NotImplementedError("Saving to h5 with dataset path is not implemented.") else: - # if singleton_axes := [a for a, s in tensor.tagged_shape.items() if s == 1]: - # tensor = tensor[{a: 0 for a in singleton_axes}] - # singleton_axes_msg = f"(without singleton axes {singleton_axes}) " - # else: - singleton_axes_msg = "" + if ( + extension in (".tif", ".tiff") + and tensor.tagged_shape.get(ba := AxisId("batch")) == 1 + ): + # remove singleton batch axis for saving + tensor = tensor[{ba: 0}] + singleton_axes_msg = f"(without singleton batch axes) " + else: + singleton_axes_msg = "" logger.debug( "writing tensor {} {}to {}", @@ -223,7 +116,7 @@ def save_tensor(path: Union[Path, str], tensor: Tensor) -> None: singleton_axes_msg, path, ) - imwrite(path, data) + imwrite(path, data, extension=extension) def save_sample( @@ -309,19 +202,6 @@ def ensure_unzipped( return out_path -def get_suffix(source: Union[ZipPath, FileSource]) -> str: - if isinstance(source, Path): - return source.suffix - elif isinstance(source, ZipPath): - return source.suffix - if isinstance(source, RelativeFilePath): - return source.path.suffix - elif isinstance(source, ZipPath): - return source.suffix - elif isinstance(source, HttpUrl): - if source.path is None: - return "" - else: - return PurePosixPath(source.path).suffix - else: - assert_never(source) +def get_suffix(source: Union[ZipPath, FileSource]) -> Suffix: + """DEPRECATED: use source.suffix instead.""" + return source.suffix diff --git a/tests/test_io.py b/tests/test_io.py index a45dfe51a..7fd7005b2 100644 --- a/tests/test_io.py +++ b/tests/test_io.py @@ -10,8 +10,6 @@ [ "img.png", "img.tiff", - "img.h5", - "img.h5/img", "img.npy", ], ) diff --git a/tests/test_prediction.py b/tests/test_prediction.py index 04779158d..61ef8642a 100644 --- a/tests/test_prediction.py +++ b/tests/test_prediction.py @@ -117,7 +117,7 @@ def test_predict_with_fixed_blocking(prep: Prep): def test_predict_save_output(prep: Prep, tmp_path: Path): - save_path = tmp_path / "{member_id}_{sample_id}.h5" + save_path = tmp_path / "{member_id}_{sample_id}.tiff" out = predict( model=prep.prediction_pipeline, inputs=prep.input_sample, From fcc9929d4ee427d1731ad28870ff88e42ebebac0 Mon Sep 17 00:00:00 2001 From: fynnbe Date: Fri, 30 Jan 2026 15:45:31 +0100 Subject: [PATCH 31/56] improve console doc examples formatting --- docs/cli.md | 2 +- docs/get_started.md | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/cli.md b/docs/cli.md index 746eb8a85..3db09484d 100644 --- a/docs/cli.md +++ b/docs/cli.md @@ -3,7 +3,7 @@ `bioimageio.core` installs a command line interface (CLI) for testing models and other functionality. You can list all the available commands via: -```console exec="1" +```bash exec="1" source="console" result="ansi" width="200" bioimageio --help ``` diff --git a/docs/get_started.md b/docs/get_started.md index a783d7353..69c0ec81f 100644 --- a/docs/get_started.md +++ b/docs/get_started.md @@ -12,13 +12,13 @@ Recommended third party deep learning libraries to install alongside bioimageio. 1. Get an overview of available commands - ```bash exec="1" source="console" result="ansi" + ```bash exec="1" source="console" result="ansi" width="200" bioimageio --help ``` 1. Test a model - ```bash exec="1" source="console" result="ansi" + ```bash exec="1" source="console" result="ansi" width="200" bioimageio test affable-shark ``` @@ -29,7 +29,7 @@ Recommended third party deep learning libraries to install alongside bioimageio. The Python equivalent would be: - ```python exec="1" souce="console" + ```python exec="1" souce="console" width="300" from bioimageio.core import test_description summary = test_description("affable-shark") @@ -40,13 +40,13 @@ Recommended third party deep learning libraries to install alongside bioimageio. - Display the `bioimageio predict` command help to get an overview: - ```bash exec="1" source="console" result="ansi" + ```bash exec="1" source="console" result="ansi" width="200" bioimageio predict --help ``` - create an example and run prediction locally! - ```bash exec="1" source="console" result="ansi" + ```bash exec="1" source="console" result="ansi" width="200" bioimageio predict affable-shark --example ``` From 3a21380292cc7b0b1b95d4e2026448cf96e50ed3 Mon Sep 17 00:00:00 2001 From: fynnbe Date: Fri, 30 Jan 2026 15:54:11 +0100 Subject: [PATCH 32/56] remove unused import --- src/bioimageio/core/io.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/bioimageio/core/io.py b/src/bioimageio/core/io.py index d0b18e8fe..6f6d8deb5 100644 --- a/src/bioimageio/core/io.py +++ b/src/bioimageio/core/io.py @@ -1,7 +1,7 @@ import collections.abc import warnings import zipfile -from pathlib import Path, PurePosixPath +from pathlib import Path from shutil import copyfileobj from typing import ( Any, From 96f09032407043aa3378ee98a50748a38381ad6c Mon Sep 17 00:00:00 2001 From: fynnbe Date: Tue, 3 Feb 2026 09:54:01 +0100 Subject: [PATCH 33/56] rename test --- tests/test_io.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_io.py b/tests/test_io.py index 7fd7005b2..3ff166936 100644 --- a/tests/test_io.py +++ b/tests/test_io.py @@ -23,7 +23,7 @@ (5, 3, 4), ], ) -def test_image_io(name: str, shape: Tuple[int, ...], tmp_path: Path): +def test_tensor_io(name: str, shape: Tuple[int, ...], tmp_path: Path): from bioimageio.core import Tensor from bioimageio.core.io import load_tensor, save_tensor From eb535e49499af565ec60709b3401c7dfeb7c67ed Mon Sep 17 00:00:00 2001 From: fynnbe Date: Tue, 3 Feb 2026 11:14:38 +0100 Subject: [PATCH 34/56] add conda env name explicitly in test summary --- src/bioimageio/core/_resource_tests.py | 33 +++++++++++++++++++------- 1 file changed, 25 insertions(+), 8 deletions(-) diff --git a/src/bioimageio/core/_resource_tests.py b/src/bioimageio/core/_resource_tests.py index c581d9eed..090e554a9 100644 --- a/src/bioimageio/core/_resource_tests.py +++ b/src/bioimageio/core/_resource_tests.py @@ -260,14 +260,15 @@ def test_description( else: assert_never(runtime_env) - try: - run_command(["thiscommandshouldalwaysfail", "please"]) - except Exception: - pass - else: - raise RuntimeError( - "given run_command does not raise an exception for a failing command" - ) + if run_command is not default_run_command: + try: + run_command(["thiscommandshouldalwaysfail", "please"]) + except Exception: + pass + else: + raise RuntimeError( + "given run_command does not raise an exception for a failing command" + ) td_kwargs: Dict[str, Any] = ( dict(ignore_cleanup_errors=True) if sys.version_info >= (3, 10) else {} @@ -447,6 +448,22 @@ def _test_in_env( ) ) return + else: + descr.validation_summary.add_detail( + ValidationDetail( + name=f"Created conda environment '{env_name}'", + status="passed", + loc=test_loc, + ) + ) + else: + descr.validation_summary.add_detail( + ValidationDetail( + name=f"Found existing conda environment '{env_name}'", + status="passed", + loc=test_loc, + ) + ) working_dir.mkdir(parents=True, exist_ok=True) summary_path = working_dir / "summary.json" From 13c3362820efe37415e0ffb6b0383d0689f680f5 Mon Sep 17 00:00:00 2001 From: fynnbe Date: Tue, 3 Feb 2026 15:24:53 +0100 Subject: [PATCH 35/56] add working_dir arg --- src/bioimageio/core/_resource_tests.py | 22 ++++++++++++++++------ src/bioimageio/core/cli.py | 4 ++++ src/bioimageio/core/commands.py | 2 ++ 3 files changed, 22 insertions(+), 6 deletions(-) diff --git a/src/bioimageio/core/_resource_tests.py b/src/bioimageio/core/_resource_tests.py index 090e554a9..2c59ac997 100644 --- a/src/bioimageio/core/_resource_tests.py +++ b/src/bioimageio/core/_resource_tests.py @@ -4,6 +4,7 @@ import subprocess import sys import warnings +from contextlib import nullcontext from io import StringIO from itertools import product from pathlib import Path @@ -211,6 +212,7 @@ def test_description( Literal["currently-active", "as-described"], Path, BioimageioCondaEnv ] = ("currently-active"), run_command: Callable[[Sequence[str]], None] = default_run_command, + working_dir: Optional[Union[os.PathLike[str], str]] = None, **deprecated: Unpack[DeprecatedKwargs], ) -> ValidationSummary: """Test a bioimage.io resource dynamically, @@ -236,6 +238,9 @@ def test_description( run_command: (Experimental feature!) Function to execute (conda) terminal commands in a subprocess. The function should raise an exception if the command fails. **run_command** is ignored if **runtime_env** is `"currently-active"`. + working_dir: (for debugging) directory to save any temporary files + (model packages, conda environments, test summaries). + Defaults to a temporary directory. """ if runtime_env == "currently-active": rd = load_description_and_test( @@ -270,10 +275,15 @@ def test_description( "given run_command does not raise an exception for a failing command" ) - td_kwargs: Dict[str, Any] = ( - dict(ignore_cleanup_errors=True) if sys.version_info >= (3, 10) else {} - ) - with TemporaryDirectory(**td_kwargs) as _d: + if working_dir is None: + td_kwargs: Dict[str, Any] = ( + dict(ignore_cleanup_errors=True) if sys.version_info >= (3, 10) else {} + ) + working_dir_ctxt = TemporaryDirectory(**td_kwargs) + else: + working_dir_ctxt = nullcontext(working_dir) + + with working_dir_ctxt as _d: working_dir = Path(_d) if isinstance(source, ResourceDescrBase): @@ -392,7 +402,7 @@ def _test_in_env( test_loc = () - # remove name as we crate a name based on the env description hash value + # remove name as we create a name based on the env description hash value conda_env.name = None dumped_env = conda_env.model_dump(mode="json", exclude_none=True) @@ -517,7 +527,7 @@ def _test_in_env( # add relevant details from command summary command_summary = ValidationSummary.load_json(summary_path) for detail in command_summary.details: - if detail.loc[: len(test_loc)] == test_loc: + if detail.loc[: len(test_loc)] == test_loc or detail.status == "failed": descr.validation_summary.add_detail(detail) diff --git a/src/bioimageio/core/cli.py b/src/bioimageio/core/cli.py index 7aab7fb07..1c97db670 100644 --- a/src/bioimageio/core/cli.py +++ b/src/bioimageio/core/cli.py @@ -204,6 +204,9 @@ class TestCmd(CmdBase, WithSource, WithSummaryLogging): Note: The `bioimageio.core` dependency will be added automatically if not present. """ + working_dir: Optional[Path] = Field(None, alias="working-dir") + """(for debugging) Directory to save any temporary files.""" + determinism: Literal["seed_only", "full"] = "seed_only" """Modes to improve reproducibility of test outputs.""" @@ -231,6 +234,7 @@ def cli_cmd(self): runtime_env=self.runtime_env, determinism=self.determinism, format_version=self.format_version, + working_dir=self.working_dir, ) ) diff --git a/src/bioimageio/core/commands.py b/src/bioimageio/core/commands.py index 697ec3b61..025f177ba 100644 --- a/src/bioimageio/core/commands.py +++ b/src/bioimageio/core/commands.py @@ -52,6 +52,7 @@ def test( ] = "currently-active", determinism: Literal["seed_only", "full"] = "seed_only", format_version: Union[FormatVersionPlaceholder, str] = "discover", + working_dir: Optional[Path] = None, ) -> int: """Test a bioimageio resource. @@ -67,6 +68,7 @@ def test( devices=[devices] if isinstance(devices, str) else devices, runtime_env=runtime_env, determinism=determinism, + working_dir=working_dir, ) _ = test_summary.log(summary) From 30a4f0c0c76d1f569a2187d8e3d83931cb96ba91 Mon Sep 17 00:00:00 2001 From: fynnbe Date: Tue, 3 Feb 2026 15:25:23 +0100 Subject: [PATCH 36/56] improve get-started docs --- docs/get_started.md | 145 +++++++++++++++++++++++++++++++------------ docs/installation.md | 10 ++- 2 files changed, 113 insertions(+), 42 deletions(-) diff --git a/docs/get_started.md b/docs/get_started.md index 69c0ec81f..258933409 100644 --- a/docs/get_started.md +++ b/docs/get_started.md @@ -1,57 +1,122 @@ -To get started we recommend installing bioimageio.core with conda together with a deep -learning framework, e.g. pytorch, and run a few `bioimageio` commands to see what -bioimage.core has to offer: +## Finding a compatible Python environment -1. Install with conda (for more details on conda environments, [checkout the conda docs](https://conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html)). -Recommended third party deep learning libraries to install alongside bioimageio.core and -[pip installation instructions are here](installation.md). +For model inference you need a Python environment with the `bioimageio.core` package and model (framework) specific dependencies installed. +You may choose to install `bioimageio.core` alongside (a) suitable framework(s) as optional dependencies with pip, e.g.: - ```console - conda install -c conda-forge bioimageio.core pytorch - ``` +```bash +pip install bioimageio.core[pytorch,onnx] +``` -1. Get an overview of available commands +If you are not sure which framework you want to use this model with or the model comes with custom dependencies, +you may choose to have the bioimageio Command Line Interface (CLI) create a suitable environment for a specific model, +using [mini-forge](https://github.com/conda-forge/miniforge) (or your favorite conda distribution). +For more details on conda environments, [checkout the conda docs](https://conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html). +First create/use any conda environment with `bioimageio.core>0.9.6` in it: - ```bash exec="1" source="console" result="ansi" width="200" - bioimageio --help - ``` +```bash +conda create -n bioimageio -c conda-forge "bioimageio.core>0.9.6" +conda activate bioimageio +``` -1. Test a model +Choose a model source, e.g. a bioimage.io model id like "affable-shark" or a path/url to a bioimageio.yaml (often named rdf.yaml). +Then use the bioimageio CLI (or [bioimageio.core.test_description][]) to test the model. +Use runtime-env=as-described to test each available weight format in the recommended conda environment that is installed on the fly if necessary: - ```bash exec="1" source="console" result="ansi" width="200" - bioimageio test affable-shark - ``` + - To test your model replace the already published model identifier - 'affabl-shark' with a local folder or path to a bioimageio.yaml file. - Check out the [bioimageio.spec documentation](https://bioimage-io.github.io/spec-bioimage-io) for more information - on the bioimage.io metadata description format. +```bash +bioimageio test affable-shark --runtime-env=as-described +``` - The Python equivalent would be: +The resulting report shows details of the tests performed in the respective conda environments. +Inspecting the report, choose a conda environment that passed all tests. +The conda environments will be named by the SHA-256 value of the generated conda environment.yaml, e.g. "95227f474ca45b024cf315edb4101e4919199d0a79ef5ff1eb474dc8ce1ec4d8". - ```python exec="1" souce="console" width="300" - from bioimageio.core import test_description +You may want to rename or clone your chosen conda environment: - summary = test_description("affable-shark") - summary.display() - ``` +```bash +conda activate base +conda rename -n 95227f474ca45b024cf315edb4101e4919199d0a79ef5ff1eb474dc8ce1ec4d8 bioimageio-affable-shark +conda activate bioimageio-affable-shark +``` -1. Run prediction on your data +## Test model+environment -- Display the `bioimageio predict` command help to get an overview: +Test a bioimageio compatible model, e.g. "affable-shark" in an active Python environment: - ```bash exec="1" source="console" result="ansi" width="200" - bioimageio predict --help - ``` +```bash exec="1" source="console" result="ansi" width="200" +bioimageio test affable-shark +``` -- create an example and run prediction locally! +To test your model replace the already published model identifier 'affabl-shark' with a local folder or path to a bioimageio.yaml file. +Check out the [bioimageio.spec documentation](https://bioimage-io.github.io/spec-bioimage-io) for more information on the bioimage.io metadata description format. - ```bash exec="1" source="console" result="ansi" width="200" - bioimageio predict affable-shark --example - ``` +The Python equivalent would be: -1. For model inference from within Python these options are available: +```python exec="1" souce="console" result="ansi" width="300" +from bioimageio.core import test_description - - [bioimageio.core.predict][] to run inference on a single sample/image - - [bioimageio.core.predict_many][] to run inference on a set of samples - - [bioimageio.core.create_prediction_pipeline][] for reusing the instatiated model and more fine-grain control over the inference process this function creates a suitable [bioimageio.core.PredictionPipeline][] for more advanced use. +summary = test_description("affable-shark") +summary.display() +``` + +## CLI: bioimageio predict + +You can use the `bioimageio` Command Line Interface (CLI) provided by the `bioimageio.core` package to run prediction with a bioimageio compatible model in a [suitable Python environment](#finding-a-compatible-python-environment). + +```bash exec="1" source="console" result="ansi" width="200" +bioimageio predict --help +``` + +Create a local example and run prediction locally: + +```bash exec="1" source="console" result="ansi" width="200" +bioimageio predict affable-shark --example +``` + +## Python: bioimageio.core.predict + +Here is a code snippet to get started deploying a model in Python using the test sample provided by the model description: + +```python +from bioimageio.core import load_model_description, predict +from bioimageio.core.digest_spec import get_test_input_sample + +model_descr = load_model_description("") +input_sample = get_test_input_sample(model_descr) +output_sample = predict(model=model_descr, inputs=input_sample) +``` + +### Python: predict your own data + +```python +from bioimageio.core.digest_spec import create_sample_for_model + +input_sample = create_sample_for_model( + model_descr, + inputs={{"raw": ""}} +) +output_sample = predict(model=model_descr, inputs=input_sample) +``` + +### Python: prediction options + +For model inference from within Python these options are available: + +- [bioimageio.core.predict][] to run inference on a single sample/image. +- [bioimageio.core.predict_many][] to run inference on a set of samples. +- [bioimageio.core.create_prediction_pipeline][] for reusing the instatiated model and more fine-grain control over the inference process this function creates a suitable [bioimageio.core.PredictionPipeline][] for more advanced use. + +## Other bioimageio.core functionality + +### CLI: bioimageio commands + +To get an overview of available commands: + +```bash exec="1" source="console" result="ansi" width="200" +bioimageio --help +``` + +### Python: API docs + +See [bioimageio.core][]. diff --git a/docs/installation.md b/docs/installation.md index 2cb30e563..6aff16487 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -6,7 +6,7 @@ The `bioimageio.core` package can be installed from conda-forge via conda install -c conda-forge bioimageio.core ``` -If you do not install any additional deep learning libraries, you will only be able to use general convenience +If you do not install any additional deep learning (DL) libraries, you will only be able to use general convenience functionality, but model inference will be unavailable. To install additional deep learning libraries add `pytorch`, `onnxruntime`, `keras` or `tensorflow`. @@ -16,11 +16,17 @@ Deeplearning frameworks to consider installing alongside `bioimageio.core`: - [TensorFlow](https://www.tensorflow.org/install) - [ONNXRuntime](https://onnxruntime.ai/docs/install/#python-installs) +Example for installing bioimageio.core via conda with additional DL frameworks: + +```console +conda install -c conda-forge bioimageio.core pytorch torchvision onnxruntime +``` + ## Via pip The package is also available via pip (e.g. with recommended extras `onnx` and `pytorch`): ```console -pip install "bioimageio.core[onnx,pytorch]" +pip install "bioimageio.core[pytorch,onnx]" ``` From a0f6cc265b484282207c5ba4377436be81b18c23 Mon Sep 17 00:00:00 2001 From: fynnbe Date: Mon, 9 Feb 2026 14:09:08 +0100 Subject: [PATCH 37/56] unify quantile variable names and add sample quantile method --- changelog.md | 5 +- src/bioimageio/core/common.py | 24 ++++ src/bioimageio/core/proc_ops.py | 155 +++++++++++++++++++----- src/bioimageio/core/stat_calculators.py | 91 +++++++------- src/bioimageio/core/stat_measures.py | 18 +-- src/bioimageio/core/tensor.py | 9 +- tests/test_proc_ops.py | 49 +++++++- tests/test_stat_measures.py | 15 ++- 8 files changed, 277 insertions(+), 89 deletions(-) diff --git a/changelog.md b/changelog.md index 8172bb40b..6ec7cab15 100644 --- a/changelog.md +++ b/changelog.md @@ -1,7 +1,10 @@ ### 0.9.6 -- bump bioimageio.spec library version to 0.5.6.0 +- bump bioimageio.spec library version to 0.5.7.1 - increase default reprducibility tolerance +- unify quantile (vs percentile) variable names +- add quantile computation method parameter +- accept `SampleQuantile` or `DatasetQuantile` as `min`/`max` arguments to `proc_ops.Clip` ### 0.9.5 diff --git a/src/bioimageio/core/common.py b/src/bioimageio/core/common.py index 9f939061c..2d9426512 100644 --- a/src/bioimageio/core/common.py +++ b/src/bioimageio/core/common.py @@ -25,6 +25,30 @@ "torchscript", ] +QuantileMethod = Literal[ + "inverted_cdf", + # "averaged_inverted_cdf", + # "closest_observation", + # "interpolated_inverted_cdf", + # "hazen", + # "weibull", + "linear", + # "median_unbiased", + # "normal_unbiased", +] +"""Methods to use when the desired quantile lies between two data points. +See https://numpy.org/devdocs/reference/generated/numpy.quantile.html#numpy-quantile for details. + +Note: + Only relevant for `SampleQuantile` measures, as `DatasetQuantile` measures computed by [bioimageio.core.stat_calculators.][] are approximations (and use the "linear" method for each sample quantiles) + +!!! warning + Limited choices to map more easily to bioimageio.spec descriptions. + Current implementations: + - [bioimageio.spec.model.v0_5.ClipKwargs][] implies "inverted_cdf" for sample quantiles and "linear" (numpy's default) for dataset quantiles. + - [bioimageio.spec.model.v0_5.ScaleRangeKwargs][] implies "linear" (numpy's default) + +""" DTypeStr = Literal[ "bool", diff --git a/src/bioimageio/core/proc_ops.py b/src/bioimageio/core/proc_ops.py index 65c4975aa..37657dd86 100644 --- a/src/bioimageio/core/proc_ops.py +++ b/src/bioimageio/core/proc_ops.py @@ -1,6 +1,7 @@ import collections.abc from abc import ABC, abstractmethod from dataclasses import InitVar, dataclass, field +from functools import partial from typing import ( Collection, Literal, @@ -32,7 +33,7 @@ from .stat_measures import ( DatasetMean, DatasetMeasure, - DatasetPercentile, + DatasetQuantile, DatasetStd, MeanMeasure, Measure, @@ -230,19 +231,73 @@ def from_proc_descr( @dataclass class Clip(_SimpleOperator): - min: Optional[float] = None + min: Optional[Union[float, SampleQuantile, DatasetQuantile]] = None """minimum value for clipping""" - max: Optional[float] = None + max: Optional[Union[float, SampleQuantile, DatasetQuantile]] = None """maximum value for clipping""" def __post_init__(self): - assert self.min is not None or self.max is not None, "missing min or max value" - assert self.min is None or self.max is None or self.min < self.max, ( - f"expected min < max, but {self.min} !< {self.max}" - ) + if self.min is None and self.max is None: + raise ValueError("missing min or max value") + + if ( + isinstance(self.min, float) + and isinstance(self.max, float) + and self.min >= self.max + ): + raise ValueError(f"expected min < max, but {self.min} >= {self.max}") + + if isinstance(self.min, (SampleQuantile, DatasetQuantile)) and isinstance( + self.max, (SampleQuantile, DatasetQuantile) + ): + if self.min.axes != self.max.axes: + raise NotImplementedError( + f"expected min and max quantiles with same axes, but got {self.min.axes} and {self.max.axes}" + ) + if self.min.q >= self.max.q: + raise ValueError( + f"expected min quantile < max quantile, but {self.min.q} >= {self.max.q}" + ) + + @property + def required_measures(self): + return { + arg + for arg in (self.min, self.max) + if isinstance(arg, (SampleQuantile, DatasetQuantile)) + } def _apply(self, x: Tensor, stat: Stat) -> Tensor: - return x.clip(self.min, self.max) + if isinstance(self.min, (SampleQuantile, DatasetQuantile)): + min_value = stat[self.min] + if isinstance(min_value, (int, float)): + # use clip for scalar value + min_clip_arg = min_value + else: + # clip does not support non-scalar values + x = Tensor.from_xarray( + x.data.where(x.data >= min_value.data, min_value.data) + ) + min_clip_arg = None + else: + min_clip_arg = self.min + + if isinstance(self.max, (SampleQuantile, DatasetQuantile)): + max_value = stat[self.max] + if isinstance(max_value, (int, float)): + # use clip for scalar value + max_clip_arg = max_value + else: + # clip does not support non-scalar values + x = Tensor.from_xarray(x.data.where(x.data <= max_value, max_value)) + max_clip_arg = None + else: + max_clip_arg = self.max + + if min_clip_arg is not None or max_clip_arg is not None: + x = x.clip(min_clip_arg, max_clip_arg) + + return x def get_output_shape( self, input_shape: Mapping[AxisId, int] @@ -253,11 +308,48 @@ def get_output_shape( def from_proc_descr( cls, descr: Union[v0_4.ClipDescr, v0_5.ClipDescr], member_id: MemberId ) -> Self: + if isinstance(descr, v0_5.ClipDescr): + dataset_mode, axes = _get_axes(descr.kwargs) + if dataset_mode: + Quantile = DatasetQuantile + else: + Quantile = SampleQuantile + + if descr.kwargs.min is not None: + min_arg = descr.kwargs.min + elif descr.kwargs.min_percentile is not None: + min_arg = Quantile( + q=descr.kwargs.min_percentile / 100, + axes=axes, + member_id=member_id, + method="inverted_cdf", + ) + else: + min_arg = None + + if descr.kwargs.max is not None: + max_arg = descr.kwargs.max + elif descr.kwargs.max_percentile is not None: + max_arg = Quantile( + q=descr.kwargs.max_percentile / 100, + axes=axes, + member_id=member_id, + method="inverted_cdf", + ) + else: + max_arg = None + + elif isinstance(descr, v0_4.ClipDescr): + min_arg = descr.kwargs.min + max_arg = descr.kwargs.max + else: + assert_never(descr) + return cls( input=member_id, output=member_id, - min=descr.kwargs.min, - max=descr.kwargs.max, + min=min_arg, + max=max_arg, ) @@ -404,6 +496,7 @@ def _get_axes( v0_5.ScaleRangeKwargs, v0_4.ScaleMeanVarianceKwargs, v0_5.ScaleMeanVarianceKwargs, + v0_5.ClipKwargs, ], ) -> Tuple[bool, Optional[Tuple[AxisId, ...]]]: if kwargs.axes is None: @@ -420,28 +513,28 @@ def _get_axes( @dataclass class ScaleRange(_SimpleOperator): - lower_percentile: InitVar[Optional[Union[SampleQuantile, DatasetPercentile]]] = None - upper_percentile: InitVar[Optional[Union[SampleQuantile, DatasetPercentile]]] = None - lower: Union[SampleQuantile, DatasetPercentile] = field(init=False) - upper: Union[SampleQuantile, DatasetPercentile] = field(init=False) + lower_quantile: InitVar[Optional[Union[SampleQuantile, DatasetQuantile]]] = None + upper_quantile: InitVar[Optional[Union[SampleQuantile, DatasetQuantile]]] = None + lower: Union[SampleQuantile, DatasetQuantile] = field(init=False) + upper: Union[SampleQuantile, DatasetQuantile] = field(init=False) eps: float = 1e-6 def __post_init__( self, - lower_percentile: Optional[Union[SampleQuantile, DatasetPercentile]], - upper_percentile: Optional[Union[SampleQuantile, DatasetPercentile]], + lower_quantile: Optional[Union[SampleQuantile, DatasetQuantile]], + upper_quantile: Optional[Union[SampleQuantile, DatasetQuantile]], ): - if lower_percentile is None: - tid = self.input if upper_percentile is None else upper_percentile.member_id - self.lower = DatasetPercentile(q=0.0, member_id=tid) + if lower_quantile is None: + tid = self.input if upper_quantile is None else upper_quantile.member_id + self.lower = DatasetQuantile(q=0.0, member_id=tid) else: - self.lower = lower_percentile + self.lower = lower_quantile - if upper_percentile is None: - self.upper = DatasetPercentile(q=1.0, member_id=self.lower.member_id) + if upper_quantile is None: + self.upper = DatasetQuantile(q=1.0, member_id=self.lower.member_id) else: - self.upper = upper_percentile + self.upper = upper_quantile assert self.lower.member_id == self.upper.member_id assert self.lower.q < self.upper.q @@ -470,18 +563,22 @@ def from_proc_descr( ) dataset_mode, axes = _get_axes(descr.kwargs) if dataset_mode: - Percentile = DatasetPercentile + Quantile = DatasetQuantile else: - Percentile = SampleQuantile + Quantile = partial(SampleQuantile, method="linear") return cls( input=member_id, output=member_id, - lower_percentile=Percentile( - q=kwargs.min_percentile / 100, axes=axes, member_id=ref_tensor + lower_quantile=Quantile( + q=kwargs.min_percentile / 100, + axes=axes, + member_id=ref_tensor, ), - upper_percentile=Percentile( - q=kwargs.max_percentile / 100, axes=axes, member_id=ref_tensor + upper_quantile=Quantile( + q=kwargs.max_percentile / 100, + axes=axes, + member_id=ref_tensor, ), ) diff --git a/src/bioimageio/core/stat_calculators.py b/src/bioimageio/core/stat_calculators.py index c9ae2d838..069cb69d3 100644 --- a/src/bioimageio/core/stat_calculators.py +++ b/src/bioimageio/core/stat_calculators.py @@ -22,19 +22,20 @@ import numpy as np import xarray as xr -from bioimageio.spec.model.v0_5 import BATCH_AXIS_ID from loguru import logger from numpy.typing import NDArray from typing_extensions import assert_never +from bioimageio.spec.model.v0_5 import BATCH_AXIS_ID + from .axis import AxisId, PerAxis -from .common import MemberId +from .common import MemberId, QuantileMethod from .sample import Sample from .stat_measures import ( DatasetMean, DatasetMeasure, DatasetMeasureBase, - DatasetPercentile, + DatasetQuantile, DatasetStd, DatasetVar, Measure, @@ -208,33 +209,39 @@ def finalize( } -class SamplePercentilesCalculator: - """to calculate sample percentiles""" +class SampleQuantilesCalculator: + """to calculate sample quantiles""" def __init__( self, member_id: MemberId, axes: Optional[Sequence[AxisId]], qs: Collection[float], + method: QuantileMethod = "linear", ): super().__init__() assert all(0.0 <= q <= 1.0 for q in qs) self._qs = sorted(set(qs)) self._axes = None if axes is None else tuple(axes) self._member_id = member_id + self._method = method def compute(self, sample: Sample) -> Dict[SampleQuantile, MeasureValue]: tensor = sample.members[self._member_id] - ps = tensor.quantile(self._qs, dim=self._axes) + ps = tensor.quantile(self._qs, dim=self._axes, method=self._method) return { - SampleQuantile(q=q, axes=self._axes, member_id=self._member_id): p + SampleQuantile( + q=q, axes=self._axes, member_id=self._member_id, method=self._method + ): p for q, p in zip(self._qs, ps) } -class MeanPercentilesCalculator: - """to calculate dataset percentiles heuristically by averaging across samples - **note**: the returned dataset percentiles are an estiamte and **not mathematically correct** +class MeanQuantilesCalculator: + """to calculate dataset quantiles heuristically by averaging across samples + + Note: + The returned dataset quantiles are an estiamte and **not mathematically correct** """ def __init__( @@ -253,9 +260,9 @@ def __init__( def update(self, sample: Sample): tensor = sample.members[self._member_id] - sample_estimates = tensor.quantile(self._qs, dim=self._axes).astype( - "float64", copy=False - ) + sample_estimates = tensor.quantile( + self._qs, dim=self._axes, method="linear" + ).astype("float64", copy=False) # reduced voxel count n = int(tensor.size / np.prod(sample_estimates.shape_tuple[1:])) @@ -271,7 +278,7 @@ def update(self, sample: Sample): self._n += n - def finalize(self) -> Dict[DatasetPercentile, MeasureValue]: + def finalize(self) -> Dict[DatasetQuantile, MeasureValue]: if self._estimates is None: return {} else: @@ -279,13 +286,13 @@ def finalize(self) -> Dict[DatasetPercentile, MeasureValue]: "Computed dataset percentiles naively by averaging percentiles of samples." ) return { - DatasetPercentile(q=q, axes=self._axes, member_id=self._member_id): e + DatasetQuantile(q=q, axes=self._axes, member_id=self._member_id): e for q, e in zip(self._qs, self._estimates) } -class CrickPercentilesCalculator: - """to calculate dataset percentiles with the experimental [crick libray](https://github.com/dask/crick)""" +class CrickQuantilesCalculator: + """to calculate dataset quantiles with the experimental [crick libray](https://github.com/dask/crick)""" def __init__( self, @@ -293,12 +300,10 @@ def __init__( axes: Optional[Sequence[AxisId]], qs: Collection[float], ): - warnings.warn( - "Computing dataset percentiles with experimental 'crick' library." - ) + warnings.warn("Computing dataset quantiles with experimental 'crick' library.") super().__init__() assert all(0.0 <= q <= 1.0 for q in qs) - assert axes is None or "_percentiles" not in axes + assert axes is None or "_quantiles" not in axes self._qs = sorted(set(qs)) self._axes = None if axes is None else tuple(axes) self._member_id = member_id @@ -310,7 +315,7 @@ def __init__( def _initialize(self, tensor_sizes: PerAxis[int]): assert crick is not None out_sizes: OrderedDict[AxisId, int] = collections.OrderedDict( - _percentiles=len(self._qs) + _quantiles=len(self._qs) ) if self._axes is not None: for d, s in tensor_sizes.items(): @@ -329,7 +334,7 @@ def update(self, part: Sample): if isinstance(part, Sample) else part.members[self._member_id].data ) - assert "_percentiles" not in tensor.dims + assert "_quantiles" not in tensor.dims if self._digest is None: self._initialize(tensor.tagged_shape) @@ -339,7 +344,7 @@ def update(self, part: Sample): for i, idx in enumerate(self._indices): self._digest[i].update(tensor[dict(zip(self._dims[1:], idx))]) - def finalize(self) -> Dict[DatasetPercentile, MeasureValue]: + def finalize(self) -> Dict[DatasetQuantile, MeasureValue]: if self._digest is None: return {} else: @@ -350,7 +355,7 @@ def finalize(self) -> Dict[DatasetPercentile, MeasureValue]: [[d.quantile(q) for d in self._digest] for q in self._qs] ).reshape(self._shape) return { - DatasetPercentile( + DatasetQuantile( q=q, axes=self._axes, member_id=self._member_id ): Tensor(v, dims=self._dims[1:]) for q, v in zip(self._qs, vs) @@ -358,11 +363,11 @@ def finalize(self) -> Dict[DatasetPercentile, MeasureValue]: if crick is None: - DatasetPercentilesCalculator: Type[ - Union[MeanPercentilesCalculator, CrickPercentilesCalculator] - ] = MeanPercentilesCalculator + DatasetQuantilesCalculator: Type[ + Union[MeanQuantilesCalculator, CrickQuantilesCalculator] + ] = MeanQuantilesCalculator else: - DatasetPercentilesCalculator = CrickPercentilesCalculator + DatasetQuantilesCalculator = CrickQuantilesCalculator class NaiveSampleMeasureCalculator: @@ -380,11 +385,11 @@ def compute(self, sample: Sample) -> Dict[SampleMeasure, MeasureValue]: SampleMeasureCalculator = Union[ MeanCalculator, MeanVarStdCalculator, - SamplePercentilesCalculator, + SampleQuantilesCalculator, NaiveSampleMeasureCalculator, ] DatasetMeasureCalculator = Union[ - MeanCalculator, MeanVarStdCalculator, DatasetPercentilesCalculator + MeanCalculator, MeanVarStdCalculator, DatasetQuantilesCalculator ] @@ -493,10 +498,10 @@ def get_measure_calculators( required_dataset_mean_var_std: Set[Union[DatasetMean, DatasetVar, DatasetStd]] = ( set() ) - required_sample_percentiles: Dict[ - Tuple[MemberId, Optional[Tuple[AxisId, ...]]], Set[float] + required_sample_quantiles: Dict[ + Tuple[MemberId, Optional[Tuple[AxisId, ...]], QuantileMethod], Set[float] ] = {} - required_dataset_percentiles: Dict[ + required_dataset_quantiles: Dict[ Tuple[MemberId, Optional[Tuple[AxisId, ...]]], Set[float] ] = {} @@ -522,11 +527,11 @@ def get_measure_calculators( ) assert rm in required_dataset_mean_var_std elif isinstance(rm, SampleQuantile): - required_sample_percentiles.setdefault((rm.member_id, rm.axes), set()).add( - rm.q - ) - elif isinstance(rm, DatasetPercentile): - required_dataset_percentiles.setdefault((rm.member_id, rm.axes), set()).add( + required_sample_quantiles.setdefault( + (rm.member_id, rm.axes, rm.method), set() + ).add(rm.q) + elif isinstance(rm, DatasetQuantile): + required_dataset_quantiles.setdefault((rm.member_id, rm.axes), set()).add( rm.q ) else: @@ -556,14 +561,14 @@ def get_measure_calculators( MeanVarStdCalculator(member_id=rm.member_id, axes=rm.axes) ) - for (tid, axes), qs in required_sample_percentiles.items(): + for (tid, axes, m), qs in required_sample_quantiles.items(): sample_calculators.append( - SamplePercentilesCalculator(member_id=tid, axes=axes, qs=qs) + SampleQuantilesCalculator(member_id=tid, axes=axes, qs=qs, method=m) ) - for (tid, axes), qs in required_dataset_percentiles.items(): + for (tid, axes), qs in required_dataset_quantiles.items(): dataset_calculators.append( - DatasetPercentilesCalculator(member_id=tid, axes=axes, qs=qs) + DatasetQuantilesCalculator(member_id=tid, axes=axes, qs=qs) ) return sample_calculators, dataset_calculators diff --git a/src/bioimageio/core/stat_measures.py b/src/bioimageio/core/stat_measures.py index 609207897..1a96cd6bf 100644 --- a/src/bioimageio/core/stat_measures.py +++ b/src/bioimageio/core/stat_measures.py @@ -23,7 +23,7 @@ from typing_extensions import Annotated from .axis import AxisId -from .common import MemberId, PerMember +from .common import MemberId, PerMember, QuantileMethod from .tensor import Tensor @@ -157,19 +157,23 @@ def model_post_init(self, __context: Any): class SampleQuantile(_Quantile, SampleMeasureBase, frozen=True): - """The `n`th percentile of a single tensor""" + """The `q`th quantile of a single tensor""" + + method: QuantileMethod = "linear" + """Method to use when the desired quantile lies between two data points. + See https://numpy.org/devdocs/reference/generated/numpy.quantile.html#numpy-quantile for details.""" def compute(self, sample: SampleLike) -> MeasureValue: tensor = sample.members[self.member_id] - return tensor.quantile(self.q, dim=self.axes) + return tensor.quantile(self.q, dim=self.axes, method=self.method) def model_post_init(self, __context: Any): super().model_post_init(__context) assert self.axes is None or AxisId("batch") not in self.axes -class DatasetPercentile(_Quantile, DatasetMeasureBase, frozen=True): - """The `n`th percentile across multiple samples""" +class DatasetQuantile(_Quantile, DatasetMeasureBase, frozen=True): + """The `q`th quantile across multiple samples""" def model_post_init(self, __context: Any): super().model_post_init(__context) @@ -180,7 +184,7 @@ def model_post_init(self, __context: Any): Union[SampleMean, SampleStd, SampleVar, SampleQuantile], Discriminator("name") ] DatasetMeasure = Annotated[ - Union[DatasetMean, DatasetStd, DatasetVar, DatasetPercentile], Discriminator("name") + Union[DatasetMean, DatasetStd, DatasetVar, DatasetQuantile], Discriminator("name") ] Measure = Annotated[Union[SampleMeasure, DatasetMeasure], Discriminator("scope")] Stat = Dict[Measure, MeasureValue] @@ -188,7 +192,7 @@ def model_post_init(self, __context: Any): MeanMeasure = Union[SampleMean, DatasetMean] StdMeasure = Union[SampleStd, DatasetStd] VarMeasure = Union[SampleVar, DatasetVar] -PercentileMeasure = Union[SampleQuantile, DatasetPercentile] +PercentileMeasure = Union[SampleQuantile, DatasetQuantile] MeanMeasureT = TypeVar("MeanMeasureT", bound=MeanMeasure) StdMeasureT = TypeVar("StdMeasureT", bound=StdMeasure) VarMeasureT = TypeVar("VarMeasureT", bound=VarMeasure) diff --git a/src/bioimageio/core/tensor.py b/src/bioimageio/core/tensor.py index c49469f7e..248a29884 100644 --- a/src/bioimageio/core/tensor.py +++ b/src/bioimageio/core/tensor.py @@ -19,11 +19,12 @@ import numpy as np import xarray as xr -from bioimageio.spec.model import v0_5 from loguru import logger from numpy.typing import DTypeLike, NDArray from typing_extensions import Self, assert_never +from bioimageio.spec.model import v0_5 + from ._magic_tensor_ops import MagicTensorOpsMixin from .axis import AxisId, AxisInfo, AxisLike, PerAxis from .common import ( @@ -33,6 +34,7 @@ PadWhere, PadWidth, PadWidthLike, + QuantileMethod, SliceInfo, ) @@ -388,6 +390,7 @@ def quantile( self, q: Union[float, Sequence[float]], dim: Optional[Union[AxisId, Sequence[AxisId]]] = None, + method: QuantileMethod = "linear", ) -> Self: assert ( isinstance(q, (float, int)) @@ -404,7 +407,9 @@ def quantile( assert dim is None or ( (quantile_dim := AxisId("quantile")) != dim and quantile_dim not in set(dim) ) - return self.__class__.from_xarray(self._data.quantile(q, dim=dim)) + return self.__class__.from_xarray( + self._data.quantile(q, dim=dim, method=method) + ) def resize_to( self, diff --git a/tests/test_proc_ops.py b/tests/test_proc_ops.py index 7ab2eaa00..54babbf3a 100644 --- a/tests/test_proc_ops.py +++ b/tests/test_proc_ops.py @@ -189,6 +189,51 @@ def test_clip(tid: MemberId): xr.testing.assert_equal(expected, sample.members[tid].data) +def test_clip_percentiles(): + from bioimageio.core.proc_ops import Clip + from bioimageio.core.stat_measures import SampleQuantile + from bioimageio.spec.model.v0_5 import AxisId, ClipDescr, ClipKwargs + + descr = ClipDescr( + kwargs=ClipKwargs(min_percentile=20, max_percentile=60, axes=(AxisId("x"),)) + ) + op = Clip.from_proc_descr( + descr, + member_id=MemberId("data"), + ) + assert op.required_measures == { + SampleQuantile( + member_id=MemberId("data"), + scope="sample", + name="quantile", + q=0.2, + axes=(AxisId("x"),), + method="inverted_cdf", + ), + SampleQuantile( + member_id=MemberId("data"), + scope="sample", + name="quantile", + q=0.6, + axes=(AxisId("x"),), + method="inverted_cdf", + ), + } + + data = xr.DataArray(np.arange(15).reshape(3, 5), dims=("c", "x")) + sample = Sample( + members={MemberId("data"): Tensor.from_xarray(data)}, stat={}, id=None + ) + sample.stat = compute_measures(op.required_measures, [sample]) + + expected = xr.DataArray( + np.array([[3, 3, 3, 4, 4], [7, 7, 8, 9, 9], [12, 12, 13, 14, 14]]), + dims=("c", "x"), + ) + op(sample) + xr.testing.assert_equal(expected, sample.members[MemberId("data")].data) + + def test_combination_of_op_steps_with_dims_specified(tid: MemberId): from bioimageio.core.proc_ops import ZeroMeanUnitVariance @@ -330,10 +375,10 @@ def test_scale_range_axes(tid: MemberId): eps = 1.0e-6 lower_quantile = SampleQuantile( - member_id=tid, q=0.1, axes=(AxisId("x"), AxisId("y")) + member_id=tid, q=0.1, axes=(AxisId("x"), AxisId("y")), method="linear" ) upper_quantile = SampleQuantile( - member_id=tid, q=0.9, axes=(AxisId("x"), AxisId("y")) + member_id=tid, q=0.9, axes=(AxisId("x"), AxisId("y")), method="linear" ) op = ScaleRange(tid, tid, lower_quantile, upper_quantile, eps=eps) diff --git a/tests/test_stat_measures.py b/tests/test_stat_measures.py index 49c876098..92d1c1115 100644 --- a/tests/test_stat_measures.py +++ b/tests/test_stat_measures.py @@ -10,7 +10,7 @@ from bioimageio.core.common import MemberId from bioimageio.core.sample import Sample from bioimageio.core.stat_calculators import ( - SamplePercentilesCalculator, + SampleQuantilesCalculator, get_measure_calculators, ) from bioimageio.core.stat_measures import SampleQuantile @@ -42,23 +42,28 @@ def test_individual_normal_measure( xr.testing.assert_allclose(expected.data, actual.data) +@pytest.mark.parametrize("method", ["inverted_cdf", "linear"]) @pytest.mark.parametrize("axes", [None, (AxisId("x"), AxisId("y"))]) -def test_individual_percentile_measure(axes: Optional[Tuple[AxisId, ...]]): +def test_individual_percentile_measure( + axes: Optional[Tuple[AxisId, ...]], method: Literal["inverted_cdf", "linear"] +): qs = [0, 0.1, 0.5, 1.0] tid = MemberId("tensor") - measures = [SampleQuantile(member_id=tid, axes=axes, q=q) for q in qs] + measures = [ + SampleQuantile(member_id=tid, axes=axes, q=q, method=method) for q in qs + ] calcs, _ = get_measure_calculators(measures) assert len(calcs) == 1 calc = calcs[0] - assert isinstance(calc, SamplePercentilesCalculator) + assert isinstance(calc, SampleQuantilesCalculator) data = Tensor( np.random.random((5, 6, 3)), dims=(AxisId("x"), AxisId("y"), AxisId("c")) ) actual = calc.compute(Sample(members={tid: data}, stat={}, id=None)) for m in measures: - expected = data.quantile(q=m.q, dim=m.axes) + expected = data.quantile(q=m.q, dim=m.axes, method=m.method) actual_data = actual[m] if isinstance(actual_data, Tensor): actual_data = actual_data.data From bb58471f34fd4844ffe0708bc132012913958c7a Mon Sep 17 00:00:00 2001 From: fynnbe Date: Mon, 9 Feb 2026 14:31:06 +0100 Subject: [PATCH 38/56] exclude hidden files from pyright analysis --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index 0f6df02c5..d87721d3e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -104,6 +104,7 @@ version = { attr = "bioimageio.core.__version__" } [tool.pyright] exclude = [ + "**/.*", "**/__pycache__", "**/node_modules", "dogfood", From 2a872998b9d7f9178e00527adbe1d5548b920ef4 Mon Sep 17 00:00:00 2001 From: fynnbe Date: Mon, 9 Feb 2026 14:32:16 +0100 Subject: [PATCH 39/56] fix Clip._apply --- src/bioimageio/core/proc_ops.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/bioimageio/core/proc_ops.py b/src/bioimageio/core/proc_ops.py index 37657dd86..d50d7070e 100644 --- a/src/bioimageio/core/proc_ops.py +++ b/src/bioimageio/core/proc_ops.py @@ -289,7 +289,9 @@ def _apply(self, x: Tensor, stat: Stat) -> Tensor: max_clip_arg = max_value else: # clip does not support non-scalar values - x = Tensor.from_xarray(x.data.where(x.data <= max_value, max_value)) + x = Tensor.from_xarray( + x.data.where(x.data <= max_value.data, max_value.data) + ) max_clip_arg = None else: max_clip_arg = self.max @@ -313,7 +315,7 @@ def from_proc_descr( if dataset_mode: Quantile = DatasetQuantile else: - Quantile = SampleQuantile + Quantile = partial(SampleQuantile, method="inverted_cdf") if descr.kwargs.min is not None: min_arg = descr.kwargs.min @@ -322,7 +324,6 @@ def from_proc_descr( q=descr.kwargs.min_percentile / 100, axes=axes, member_id=member_id, - method="inverted_cdf", ) else: min_arg = None @@ -334,7 +335,6 @@ def from_proc_descr( q=descr.kwargs.max_percentile / 100, axes=axes, member_id=member_id, - method="inverted_cdf", ) else: max_arg = None From 13cdfc74657e8c66fbfb4ce56b8a546c2c93df50 Mon Sep 17 00:00:00 2001 From: fynnbe Date: Mon, 9 Feb 2026 14:39:28 +0100 Subject: [PATCH 40/56] fix test_clip_percentiles --- tests/test_proc_ops.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/test_proc_ops.py b/tests/test_proc_ops.py index 54babbf3a..d3845a057 100644 --- a/tests/test_proc_ops.py +++ b/tests/test_proc_ops.py @@ -195,7 +195,7 @@ def test_clip_percentiles(): from bioimageio.spec.model.v0_5 import AxisId, ClipDescr, ClipKwargs descr = ClipDescr( - kwargs=ClipKwargs(min_percentile=20, max_percentile=60, axes=(AxisId("x"),)) + kwargs=ClipKwargs(min_percentile=30, max_percentile=70, axes=(AxisId("x"),)) ) op = Clip.from_proc_descr( descr, @@ -206,7 +206,7 @@ def test_clip_percentiles(): member_id=MemberId("data"), scope="sample", name="quantile", - q=0.2, + q=0.3, axes=(AxisId("x"),), method="inverted_cdf", ), @@ -214,7 +214,7 @@ def test_clip_percentiles(): member_id=MemberId("data"), scope="sample", name="quantile", - q=0.6, + q=0.7, axes=(AxisId("x"),), method="inverted_cdf", ), @@ -227,7 +227,7 @@ def test_clip_percentiles(): sample.stat = compute_measures(op.required_measures, [sample]) expected = xr.DataArray( - np.array([[3, 3, 3, 4, 4], [7, 7, 8, 9, 9], [12, 12, 13, 14, 14]]), + np.array([[1, 1, 2, 3, 3], [6, 6, 7, 8, 8], [12, 12, 13, 14, 14]]), dims=("c", "x"), ) op(sample) From 2e109d395880fd51055b87629157bd21dc50a1c2 Mon Sep 17 00:00:00 2001 From: fynnbe Date: Mon, 9 Feb 2026 15:17:55 +0100 Subject: [PATCH 41/56] pytest ignore scripts/generate_api_doc_pages.py (avoids mkdocs exception) --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index d87721d3e..0c95c0cef 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -138,7 +138,7 @@ typeCheckingMode = "strict" useLibraryCodeForTypes = true [tool.pytest.ini_options] -addopts = "--doctest-modules --failed-first --ignore dogfood --ignore src/bioimageio/core/backends --ignore src/bioimageio/core/weight_converters" +addopts = "--doctest-modules --failed-first --ignore dogfood --ignore src/bioimageio/core/backends --ignore src/bioimageio/core/weight_converters --ignore scripts/generate_api_doc_pages.py" testpaths = ["src", "tests"] [tool.ruff] From 349ccf8a2a4b11115f2887e3e5d9b3af68180632 Mon Sep 17 00:00:00 2001 From: fynnbe Date: Tue, 10 Feb 2026 11:33:02 +0100 Subject: [PATCH 42/56] fix test_clip_percentiles --- tests/test_proc_ops.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/test_proc_ops.py b/tests/test_proc_ops.py index d3845a057..0e800ad51 100644 --- a/tests/test_proc_ops.py +++ b/tests/test_proc_ops.py @@ -220,15 +220,15 @@ def test_clip_percentiles(): ), } - data = xr.DataArray(np.arange(15).reshape(3, 5), dims=("c", "x")) + data = xr.DataArray(np.arange(15).reshape(3, 5), dims=("channel", "x")) sample = Sample( members={MemberId("data"): Tensor.from_xarray(data)}, stat={}, id=None ) sample.stat = compute_measures(op.required_measures, [sample]) expected = xr.DataArray( - np.array([[1, 1, 2, 3, 3], [6, 6, 7, 8, 8], [12, 12, 13, 14, 14]]), - dims=("c", "x"), + np.array([[1, 1, 2, 3, 3], [6, 6, 7, 8, 8], [11, 11, 12, 13, 13]]), + dims=("channel", "x"), ) op(sample) xr.testing.assert_equal(expected, sample.members[MemberId("data")].data) @@ -337,7 +337,7 @@ def test_scale_mean_variance_per_channel(tid: MemberId, axes_str: Optional[str]) sample.stat = compute_measures(op.required_measures, [sample]) op(sample) - if axes is not None and AxisId("c") not in axes: + if axes is not None and AxisId("channel") not in axes: # mean,std per channel should match exactly xr.testing.assert_allclose( ref_data, sample.members[tid].data, rtol=1e-5, atol=1e-7 From 58367266f3344ac261ba541d38b49317c662592a Mon Sep 17 00:00:00 2001 From: fynnbe Date: Tue, 10 Feb 2026 11:33:49 +0100 Subject: [PATCH 43/56] bump spec --- mkdocs.yaml | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/mkdocs.yaml b/mkdocs.yaml index 85d6ba587..e0ee0f4cb 100644 --- a/mkdocs.yaml +++ b/mkdocs.yaml @@ -86,7 +86,7 @@ plugins: python: inventories: - https://docs.pydantic.dev/latest/objects.inv - - https://bioimage-io.github.io/spec-bioimage-io/v0.5.7.0/objects.inv + - https://bioimage-io.github.io/spec-bioimage-io/v0.5.7.1/objects.inv options: annotations_path: source backlinks: tree diff --git a/pyproject.toml b/pyproject.toml index 0c95c0cef..2f99325e1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,7 +6,7 @@ requires-python = ">=3.9" readme = "README.md" dynamic = ["version"] dependencies = [ - "bioimageio.spec ==0.5.7.0", + "bioimageio.spec ==0.5.7.1", "imagecodecs", "imageio>=2.10", "loguru", From 41f5975d12268760dbd9358fe9b5054102b30f67 Mon Sep 17 00:00:00 2001 From: fynnbe Date: Wed, 11 Feb 2026 14:10:07 +0100 Subject: [PATCH 44/56] split off coverage comment --- .github/workflows/build.yaml | 31 +++++++++++++++++++++++-------- 1 file changed, 23 insertions(+), 8 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 8b16fe534..5681d2ba5 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -126,7 +126,7 @@ jobs: include-hidden-files: true coverage: - needs: [test] + needs: test runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 @@ -141,13 +141,11 @@ jobs: ls -la .coverage* coverage combine coverage xml -o coverage.xml - - uses: orgoro/coverage@v3.2 + - uses: actions/upload-artifact@v4 with: - coverageFile: coverage.xml - token: ${{ secrets.GITHUB_TOKEN }} - thresholdAll: 0.7 - thresholdNew: 0.9 - thresholdModified: 0.6 + name: coverage.xml + path: coverage.xml + retention-days: 1 - name: generate coverage badge and html report run: | pip install genbadge[coverage] @@ -157,6 +155,23 @@ jobs: with: name: coverage-summary path: dist + retention-days: 1 + + coverage-comment: + needs: coverage + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/download-artifact@v4 + with: + name: coverage.xml + - uses: orgoro/coverage@v3.2 + with: + coverageFile: coverage.xml + token: ${{ secrets.GITHUB_TOKEN }} + thresholdAll: 0.7 + thresholdNew: 0.9 + thresholdModified: 0.6 conda-build: needs: [populate-cache, test] # only so we run tests even if the pinned bioimageio.spec version is not yet published on conda-forge @@ -211,7 +226,7 @@ jobs: name: dist docs: - needs: [build, conda-build, coverage, test] + needs: coverage runs-on: ubuntu-latest permissions: contents: write # required for tag creation From fa938236291356a5d2372335f1f51dc84ba8ebed Mon Sep 17 00:00:00 2001 From: fynnbe Date: Thu, 12 Feb 2026 10:22:48 +0100 Subject: [PATCH 45/56] fix typing --- src/bioimageio/core/stat_calculators.py | 2 +- tests/test_stat_measures.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/bioimageio/core/stat_calculators.py b/src/bioimageio/core/stat_calculators.py index 069cb69d3..c85c2f175 100644 --- a/src/bioimageio/core/stat_calculators.py +++ b/src/bioimageio/core/stat_calculators.py @@ -224,7 +224,7 @@ def __init__( self._qs = sorted(set(qs)) self._axes = None if axes is None else tuple(axes) self._member_id = member_id - self._method = method + self._method: QuantileMethod = method def compute(self, sample: Sample) -> Dict[SampleQuantile, MeasureValue]: tensor = sample.members[self._member_id] diff --git a/tests/test_stat_measures.py b/tests/test_stat_measures.py index 92d1c1115..e4cd9d926 100644 --- a/tests/test_stat_measures.py +++ b/tests/test_stat_measures.py @@ -1,5 +1,5 @@ from itertools import product -from typing import Optional, Tuple +from typing import Literal, Optional, Tuple import numpy as np import pytest From ebdced49acdf77c4a888529acd0c554a4b234864 Mon Sep 17 00:00:00 2001 From: fynnbe Date: Thu, 12 Feb 2026 11:37:53 +0100 Subject: [PATCH 46/56] readd main func in __main__ as entry point --- src/bioimageio/core/__main__.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/bioimageio/core/__main__.py b/src/bioimageio/core/__main__.py index 3c48e6b21..123b6a9c9 100644 --- a/src/bioimageio/core/__main__.py +++ b/src/bioimageio/core/__main__.py @@ -16,4 +16,10 @@ from .cli import Bioimageio -_ = CliApp.run(Bioimageio) + +def main(): + _ = CliApp.run(Bioimageio) + + +if __name__ == "__main__": + main() From 9d161644b3a6917bfc7777f9e057511dcec7dea8 Mon Sep 17 00:00:00 2001 From: fynnbe Date: Thu, 12 Feb 2026 16:58:04 +0100 Subject: [PATCH 47/56] bump spec and add test from spec at main --- .github/workflows/build.yaml | 7 +++++++ changelog.md | 2 +- mkdocs.yaml | 2 +- pyproject.toml | 2 +- 4 files changed, 10 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 5681d2ba5..c0e566153 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -81,6 +81,9 @@ jobs: numpy-version: 2 - python-version: '3.12' numpy-version: 1 + - python-version: '3.12' + numpy-version: 2 + spec-from-main: true # - python-version: '3.13' # numpy-version: 2 @@ -100,6 +103,10 @@ jobs: run: | pyright --version pyright -p pyproject.toml --pythonversion ${{ matrix.python-version }} + - if: matrix.spec-from-main + run: | + pip uninstall -y bioimageio.spec + pip install git+https://github.com/bioimage-io/spec-bioimage-io.git@main - name: Restore bioimageio cache ${{needs.populate-cache.outputs.cache-key}} uses: actions/cache/restore@v4 with: diff --git a/changelog.md b/changelog.md index 6ec7cab15..801796c41 100644 --- a/changelog.md +++ b/changelog.md @@ -1,6 +1,6 @@ ### 0.9.6 -- bump bioimageio.spec library version to 0.5.7.1 +- bump bioimageio.spec library version to 0.5.7.2 - increase default reprducibility tolerance - unify quantile (vs percentile) variable names - add quantile computation method parameter diff --git a/mkdocs.yaml b/mkdocs.yaml index e0ee0f4cb..4f523d58c 100644 --- a/mkdocs.yaml +++ b/mkdocs.yaml @@ -86,7 +86,7 @@ plugins: python: inventories: - https://docs.pydantic.dev/latest/objects.inv - - https://bioimage-io.github.io/spec-bioimage-io/v0.5.7.1/objects.inv + - https://bioimage-io.github.io/spec-bioimage-io/v0.5.7.2/objects.inv options: annotations_path: source backlinks: tree diff --git a/pyproject.toml b/pyproject.toml index 2f99325e1..6fd78e502 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,7 +6,7 @@ requires-python = ">=3.9" readme = "README.md" dynamic = ["version"] dependencies = [ - "bioimageio.spec ==0.5.7.1", + "bioimageio.spec ==0.5.7.2", "imagecodecs", "imageio>=2.10", "loguru", From 8a4cdef3bae483cf89e56d59f6740c4b38898f26 Mon Sep 17 00:00:00 2001 From: fynnbe Date: Fri, 13 Feb 2026 00:03:52 +0100 Subject: [PATCH 48/56] use weight-format arg in CLI call when testing in a separate conda env --- src/bioimageio/core/_resource_tests.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/bioimageio/core/_resource_tests.py b/src/bioimageio/core/_resource_tests.py index 2c59ac997..25f3593f9 100644 --- a/src/bioimageio/core/_resource_tests.py +++ b/src/bioimageio/core/_resource_tests.py @@ -495,6 +495,7 @@ def _test_in_env( f"--{summary_path_arg_name}={summary_path.as_posix()}", f"--determinism={determinism}", ] + + ([f"--weight-format={weight_format}"] if weight_format else []) + ([f"--expected-type={expected_type}"] if expected_type else []) + (["--stop-early"] if stop_early else []) ) From 0071dcd6c8c076116e256ee57ee3a2a1f2703b0b Mon Sep 17 00:00:00 2001 From: fynnbe Date: Fri, 13 Feb 2026 10:21:21 +0100 Subject: [PATCH 49/56] move griffe libs to docs deps --- pyproject.toml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 6fd78e502..3daf80f1c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -54,9 +54,6 @@ partners = [ dev = [ "cellpose", # for model testing "crick", - "griffe-pydantic", - "griffe-inherited-docstrings", - "griffe-public-redundant-aliases", "httpx", "jupyter", "keras>=3.0,<4", @@ -80,6 +77,9 @@ dev = [ "torchvision>=0.21", ] docs = [ + "griffe-pydantic", + "griffe-inherited-docstrings", + "griffe-public-redundant-aliases", "markdown-callouts", "markdown-exec[ansi]", "markdown-pycon", From 7c659cb1116f22989c89e966f16a952692a6a694 Mon Sep 17 00:00:00 2001 From: fynnbe Date: Mon, 16 Feb 2026 14:10:29 +0100 Subject: [PATCH 50/56] bump spec --- changelog.md | 2 +- mkdocs.yaml | 2 +- pyproject.toml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/changelog.md b/changelog.md index 801796c41..6dd4966d7 100644 --- a/changelog.md +++ b/changelog.md @@ -1,6 +1,6 @@ ### 0.9.6 -- bump bioimageio.spec library version to 0.5.7.2 +- bump bioimageio.spec library version to 0.5.7.3 - increase default reprducibility tolerance - unify quantile (vs percentile) variable names - add quantile computation method parameter diff --git a/mkdocs.yaml b/mkdocs.yaml index 4f523d58c..33ac091d4 100644 --- a/mkdocs.yaml +++ b/mkdocs.yaml @@ -86,7 +86,7 @@ plugins: python: inventories: - https://docs.pydantic.dev/latest/objects.inv - - https://bioimage-io.github.io/spec-bioimage-io/v0.5.7.2/objects.inv + - https://bioimage-io.github.io/spec-bioimage-io/v0.5.7.3/objects.inv options: annotations_path: source backlinks: tree diff --git a/pyproject.toml b/pyproject.toml index 3daf80f1c..f56e741ad 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,7 +6,7 @@ requires-python = ">=3.9" readme = "README.md" dynamic = ["version"] dependencies = [ - "bioimageio.spec ==0.5.7.2", + "bioimageio.spec ==0.5.7.3", "imagecodecs", "imageio>=2.10", "loguru", From 62c16b65342c27adbf39d1f88ee0170729796009 Mon Sep 17 00:00:00 2001 From: fynnbe Date: Mon, 16 Feb 2026 14:35:06 +0100 Subject: [PATCH 51/56] update set-output syntax --- .github/workflows/build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index c0e566153..c4728500f 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -261,7 +261,7 @@ jobs: shell: bash run: | if [[ -n '${{ github.event.pull_request.head.ref }}' ]]; then branch=gh-pages-${{ github.event.pull_request.head.ref }}; else branch=gh-pages; fi - echo "::set-output name=branch::$branch" + echo "name=branch::$branch" >> $GITHUB_OUTPUT - name: Get parent commit if: inputs.force-publish != 'true' id: get-parent-commit From 3aab02dd1c182792229d29fedd55ce4c704fab72 Mon Sep 17 00:00:00 2001 From: fynnbe Date: Tue, 17 Feb 2026 10:02:40 +0100 Subject: [PATCH 52/56] fix branch name output --- .github/workflows/build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index c4728500f..429a6ec79 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -261,7 +261,7 @@ jobs: shell: bash run: | if [[ -n '${{ github.event.pull_request.head.ref }}' ]]; then branch=gh-pages-${{ github.event.pull_request.head.ref }}; else branch=gh-pages; fi - echo "name=branch::$branch" >> $GITHUB_OUTPUT + echo "branch=$branch" >> $GITHUB_OUTPUT - name: Get parent commit if: inputs.force-publish != 'true' id: get-parent-commit From 7632d6c0a0024c4e5953285311c91dae2a0b2e4b Mon Sep 17 00:00:00 2001 From: fynnbe Date: Tue, 17 Feb 2026 14:40:34 +0100 Subject: [PATCH 53/56] add empty-cache command --- src/bioimageio/core/cli.py | 17 ++++++++++++++++- tests/test_cli.py | 17 +++++++++++++++++ 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/src/bioimageio/core/cli.py b/src/bioimageio/core/cli.py index 1c97db670..8fc4ff668 100644 --- a/src/bioimageio/core/cli.py +++ b/src/bioimageio/core/cli.py @@ -73,7 +73,12 @@ from bioimageio.spec.dataset import DatasetDescr from bioimageio.spec.model import ModelDescr, v0_4, v0_5 from bioimageio.spec.notebook import NotebookDescr -from bioimageio.spec.utils import ensure_description_is_model, get_reader, write_yaml +from bioimageio.spec.utils import ( + empty_cache, + ensure_description_is_model, + get_reader, + write_yaml, +) from .commands import WeightFormatArgAll, WeightFormatArgAny, package, test from .common import MemberId, SampleId, SupportedWeightsFormat @@ -803,6 +808,13 @@ def cli_cmd(self): self.log(updated_model_descr) +class EmptyCache(CmdBase): + """Empty the bioimageio cache directory.""" + + def cli_cmd(self): + empty_cache() + + JSON_FILE = "bioimageio-cli.json" YAML_FILE = "bioimageio-cli.yaml" @@ -843,6 +855,9 @@ class Bioimageio( add_weights: CliSubCommand[AddWeightsCmd] = Field(alias="add-weights") """Add additional weights to a model description by converting from available formats.""" + empty_cache: CliSubCommand[EmptyCache] = Field(alias="empty-cache") + """Empty the bioimageio cache directory.""" + @classmethod def settings_customise_sources( cls, diff --git a/tests/test_cli.py b/tests/test_cli.py index 7e9de4ef6..035977453 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -6,6 +6,8 @@ import pytest from pydantic import FilePath +from bioimageio.spec import load_description, settings + def run_subprocess( commands: Sequence[str], **kwargs: Any @@ -69,6 +71,21 @@ def test_cli( assert ret.returncode == 0, ret.stdout +def test_empty_cache(tmp_path: Path, unet2d_nuclei_broad_model: str): + from bioimageio.spec.utils import empty_cache + + origingal_cache_path = settings.cache_path + try: + settings.cache_path = tmp_path / "cache" + assert not settings.cache_path.exists() + _ = load_description(unet2d_nuclei_broad_model, perform_io_checks=False) + assert len(list(settings.cache_path.iterdir())) == 1 + empty_cache() + assert len(list(settings.cache_path.iterdir())) == 0 + finally: + settings.cache_path = origingal_cache_path + + @pytest.mark.parametrize("args", [["test", "stardist_wrong_shape"]]) def test_cli_fails(args: List[str], stardist_wrong_shape: FilePath): resolved_args = [ From c43d6500266387d77287c332ffc811ad326b04dc Mon Sep 17 00:00:00 2001 From: fynnbe Date: Tue, 17 Feb 2026 15:30:19 +0100 Subject: [PATCH 54/56] bump spec --- changelog.md | 2 +- mkdocs.yaml | 2 +- pyproject.toml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/changelog.md b/changelog.md index 6dd4966d7..e8fade6d1 100644 --- a/changelog.md +++ b/changelog.md @@ -1,6 +1,6 @@ ### 0.9.6 -- bump bioimageio.spec library version to 0.5.7.3 +- bump bioimageio.spec library version to 0.5.7.4 - increase default reprducibility tolerance - unify quantile (vs percentile) variable names - add quantile computation method parameter diff --git a/mkdocs.yaml b/mkdocs.yaml index 33ac091d4..95f8211ca 100644 --- a/mkdocs.yaml +++ b/mkdocs.yaml @@ -86,7 +86,7 @@ plugins: python: inventories: - https://docs.pydantic.dev/latest/objects.inv - - https://bioimage-io.github.io/spec-bioimage-io/v0.5.7.3/objects.inv + - https://bioimage-io.github.io/spec-bioimage-io/v0.5.7.4/objects.inv options: annotations_path: source backlinks: tree diff --git a/pyproject.toml b/pyproject.toml index f56e741ad..b9830339a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,7 +6,7 @@ requires-python = ">=3.9" readme = "README.md" dynamic = ["version"] dependencies = [ - "bioimageio.spec ==0.5.7.3", + "bioimageio.spec ==0.5.7.4", "imagecodecs", "imageio>=2.10", "loguru", From 539bd2ba66bebce083d092c73c0ae1fc2619beec Mon Sep 17 00:00:00 2001 From: fynnbe Date: Tue, 17 Feb 2026 15:32:39 +0100 Subject: [PATCH 55/56] save actual output only if working_dir is specified --- changelog.md | 1 + src/bioimageio/core/_resource_tests.py | 50 ++++++++++++++++++++------ 2 files changed, 41 insertions(+), 10 deletions(-) diff --git a/changelog.md b/changelog.md index e8fade6d1..605be0597 100644 --- a/changelog.md +++ b/changelog.md @@ -5,6 +5,7 @@ - unify quantile (vs percentile) variable names - add quantile computation method parameter - accept `SampleQuantile` or `DatasetQuantile` as `min`/`max` arguments to `proc_ops.Clip` +- save actual output during model testing only if an explicit working directory was specified to produce less clutter ### 0.9.5 diff --git a/src/bioimageio/core/_resource_tests.py b/src/bioimageio/core/_resource_tests.py index 25f3593f9..6a58cbbbd 100644 --- a/src/bioimageio/core/_resource_tests.py +++ b/src/bioimageio/core/_resource_tests.py @@ -252,6 +252,7 @@ def test_description( expected_type=expected_type, sha256=sha256, stop_early=stop_early, + working_dir=working_dir, **deprecated, ) return rd.validation_summary @@ -275,6 +276,7 @@ def test_description( "given run_command does not raise an exception for a failing command" ) + verbose = working_dir is not None if working_dir is None: td_kwargs: Dict[str, Any] = ( dict(ignore_cleanup_errors=True) if sys.version_info >= (3, 10) else {} @@ -318,6 +320,7 @@ def test_description( sha256=sha256, stop_early=stop_early, run_command=run_command, + verbose=verbose, **deprecated, ) @@ -337,6 +340,7 @@ def _test_in_env( stop_early: bool, expected_type: Optional[str], sha256: Optional[Sha256], + verbose: bool, **deprecated: Unpack[DeprecatedKwargs], ): """Test a bioimage.io resource in a given conda environment. @@ -367,6 +371,7 @@ def _test_in_env( expected_type=expected_type, sha256=sha256, stop_early=stop_early, + verbose=verbose, **deprecated, ) @@ -543,6 +548,7 @@ def load_description_and_test( expected_type: Literal["model"], sha256: Optional[Sha256] = None, stop_early: bool = True, + working_dir: Optional[Union[os.PathLike[str], str]] = None, **deprecated: Unpack[DeprecatedKwargs], ) -> Union[ModelDescr, InvalidDescr]: ... @@ -558,6 +564,7 @@ def load_description_and_test( expected_type: Literal["dataset"], sha256: Optional[Sha256] = None, stop_early: bool = True, + working_dir: Optional[Union[os.PathLike[str], str]] = None, **deprecated: Unpack[DeprecatedKwargs], ) -> Union[DatasetDescr, InvalidDescr]: ... @@ -573,6 +580,7 @@ def load_description_and_test( expected_type: Optional[str] = None, sha256: Optional[Sha256] = None, stop_early: bool = True, + working_dir: Optional[Union[os.PathLike[str], str]] = None, **deprecated: Unpack[DeprecatedKwargs], ) -> Union[LatestResourceDescr, InvalidDescr]: ... @@ -588,6 +596,7 @@ def load_description_and_test( expected_type: Literal["model"], sha256: Optional[Sha256] = None, stop_early: bool = True, + working_dir: Optional[Union[os.PathLike[str], str]] = None, **deprecated: Unpack[DeprecatedKwargs], ) -> Union[AnyModelDescr, InvalidDescr]: ... @@ -603,6 +612,7 @@ def load_description_and_test( expected_type: Literal["dataset"], sha256: Optional[Sha256] = None, stop_early: bool = True, + working_dir: Optional[Union[os.PathLike[str], str]] = None, **deprecated: Unpack[DeprecatedKwargs], ) -> Union[AnyDatasetDescr, InvalidDescr]: ... @@ -618,6 +628,7 @@ def load_description_and_test( expected_type: Optional[str] = None, sha256: Optional[Sha256] = None, stop_early: bool = True, + working_dir: Optional[Union[os.PathLike[str], str]] = None, **deprecated: Unpack[DeprecatedKwargs], ) -> Union[ResourceDescr, InvalidDescr]: ... @@ -632,6 +643,7 @@ def load_description_and_test( expected_type: Optional[str] = None, sha256: Optional[Sha256] = None, stop_early: bool = True, + working_dir: Optional[Union[os.PathLike[str], str]] = None, **deprecated: Unpack[DeprecatedKwargs], ) -> Union[ResourceDescr, InvalidDescr]: """Test a bioimage.io resource dynamically, @@ -704,7 +716,15 @@ def load_description_and_test( enable_determinism(determinism, weight_formats=weight_formats) for w in weight_formats: - _test_model_inference(rd, w, devices, stop_early=stop_early, **deprecated) + _test_model_inference( + rd, + w, + devices, + stop_early=stop_early, + working_dir=working_dir, + verbose=working_dir is not None, + **deprecated, + ) if stop_early and rd.validation_summary.status != "passed": break @@ -779,6 +799,9 @@ def _test_model_inference( weight_format: SupportedWeightsFormat, devices: Optional[Sequence[str]], stop_early: bool, + *, + working_dir: Optional[Union[os.PathLike[str], str]], + verbose: bool, **deprecated: Unpack[DeprecatedKwargs], ) -> None: test_name = f"Reproduce test outputs from test inputs ({weight_format})" @@ -862,15 +885,20 @@ def add_warning_entry(msg: str): if not mismatched_elements: continue - actual_output_path = Path(f"actual_output_{m}_{weight_format}.npy") - try: - save_tensor(actual_output_path, actual) - except Exception as e: - logger.error( - "Failed to save actual output tensor to {}: {}", - actual_output_path, - e, + if working_dir is not None and verbose: + actual_output_path = ( + Path(working_dir) / f"actual_output_{m}_{weight_format}.npy" ) + try: + save_tensor(actual_output_path, actual) + except Exception as e: + logger.error( + "Failed to save actual output tensor to {}: {}", + actual_output_path, + e, + ) + else: + actual_output_path = None mismatched_ppm = mismatched_elements / expected_np.size * 1e6 abs_diff[~mismatched] = 0 # ignore non-mismatched elements @@ -907,8 +935,10 @@ def add_warning_entry(msg: str): + f" at {dict(zip(dims, r_max_idx))}" + f"\n Max absolute difference not accounted for by relative tolerance ({rtol:.2e}): {a_max:.2e}" + rf" (= \|{a_actual:.7e} - {a_expected:.7e}\|) at {dict(zip(dims, a_max_idx))}" - + f"\n Saved actual output to {actual_output_path}." ) + if actual_output_path is not None: + msg += f"\n Saved actual output to {actual_output_path}." + if mismatched_ppm > mismatched_tol: add_error_entry(msg) if stop_early: From 4ea018fbce0f00d794d76103ce0024e932bc8d32 Mon Sep 17 00:00:00 2001 From: fynnbe Date: Tue, 17 Feb 2026 16:21:13 +0100 Subject: [PATCH 56/56] ignore .lock files in test_empty_cache --- tests/test_cli.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/tests/test_cli.py b/tests/test_cli.py index 035977453..a034ad9c7 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -79,9 +79,15 @@ def test_empty_cache(tmp_path: Path, unet2d_nuclei_broad_model: str): settings.cache_path = tmp_path / "cache" assert not settings.cache_path.exists() _ = load_description(unet2d_nuclei_broad_model, perform_io_checks=False) - assert len(list(settings.cache_path.iterdir())) == 1 + assert ( + len([fn for fn in settings.cache_path.iterdir() if fn.suffix != ".lock"]) + == 1 + ) empty_cache() - assert len(list(settings.cache_path.iterdir())) == 0 + assert ( + len([fn for fn in settings.cache_path.iterdir() if fn.suffix != ".lock"]) + == 0 + ) finally: settings.cache_path = origingal_cache_path