From 0ee305374f9e5f072f46920aa4dee9f38bcc12cb Mon Sep 17 00:00:00 2001 From: Frank Loesche Date: Fri, 13 Mar 2026 15:44:35 -0400 Subject: [PATCH 01/10] start optimizing --- .gitattributes | 2 + examples/all_on.py | 16 + examples/play_pat.py | 14 + examples/stream_all_patterns.py | 29 + pixi.lock | 103 +++- pyproject.toml | 5 + src/arena_interface/__init__.py | 3 +- src/arena_interface/arena_interface.py | 784 +++++++++++++++++-------- src/arena_interface/bench.py | 107 ++-- src/arena_interface/cli.py | 84 ++- 10 files changed, 829 insertions(+), 318 deletions(-) create mode 100644 .gitattributes create mode 100644 examples/all_on.py create mode 100644 examples/play_pat.py create mode 100644 examples/stream_all_patterns.py diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..997504b --- /dev/null +++ b/.gitattributes @@ -0,0 +1,2 @@ +# SCM syntax highlighting & preventing 3-way merges +pixi.lock merge=binary linguist-language=YAML linguist-generated=true -diff diff --git a/examples/all_on.py b/examples/all_on.py new file mode 100644 index 0000000..5ddd4df --- /dev/null +++ b/examples/all_on.py @@ -0,0 +1,16 @@ +import time + +from arena_interface import ArenaInterface + +ai = ArenaInterface(debug=True) +ai.set_ethernet_mode(ip_address="10.103.40.45") + +start_time = time.time() +ai.all_on() +end_time = time.time() +duration = end_time - start_time + +time.sleep(5) +ai.all_off() + +print(f"Duration of all_on() call: {duration:.6f} seconds") diff --git a/examples/play_pat.py b/examples/play_pat.py new file mode 100644 index 0000000..14fde6b --- /dev/null +++ b/examples/play_pat.py @@ -0,0 +1,14 @@ +import time + +from arena_interface import ArenaInterface + +ai = ArenaInterface(debug=True) +ai.set_ethernet_mode(ip_address="10.103.40.45") + +# Measure time for play_pattern call +start_time = time.time() +ai.play_pattern(pattern_id=532, frame_rate=20, runtime_duration=10) +end_time = time.time() + +duration = end_time - start_time +print(f"play_pattern() duration: {duration:.6f} seconds") diff --git a/examples/stream_all_patterns.py b/examples/stream_all_patterns.py new file mode 100644 index 0000000..520c3d4 --- /dev/null +++ b/examples/stream_all_patterns.py @@ -0,0 +1,29 @@ +"""Stream each pattern file in patterns/ for 5 seconds.""" + +import os +from pathlib import Path + +from arena_interface import ArenaInterface + +PATTERNS_DIR = Path(__file__).resolve().parent.parent / "patterns" +FRAME_RATE = 0 +RUNTIME_DURATION = 50 # 50 × 100 ms = 5 s + +ip = os.environ.get("ARENA_ETH_IP", "10.103.40.45") +ai = ArenaInterface(debug=True) +ai.set_ethernet_mode(ip_address=ip) + +for pat_file in sorted(PATTERNS_DIR.glob("*.pat")): + print(f"\n--- Streaming {pat_file.name} for 5 s at {FRAME_RATE} Hz ---") + result = ai.stream_frames( + pattern_path=str(pat_file), + frame_rate=FRAME_RATE, + runtime_duration=RUNTIME_DURATION, + analog_out_waveform="constant", + analog_update_rate=1.0, + analog_frequency=0.0, + ) + print(f" frames: {result['frames']}") + print(f" elapsed: {result['elapsed_s']:.2f} s") + print(f" rate: {result['rate_hz']:.1f} Hz") + print(f" tx: {result['tx_mbps']:.2f} Mb/s") diff --git a/pixi.lock b/pixi.lock index 650e25a..11ee540 100644 --- a/pixi.lock +++ b/pixi.lock @@ -17,6 +17,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/c-ares-1.34.6-hb03c661_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/c-compiler-1.11.0-h4d9bdce_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/ca-certificates-2026.2.25-hbd8a1cb_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/click-8.3.1-pyh8f84b5b_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/conda-gcc-specs-14.3.0-he8ccf15_18.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/gcc-14.3.0-h0dff253_18.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/gcc_impl_linux-64-14.3.0-hbdf3cc3_18.conda @@ -53,9 +54,11 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/openssl-3.6.1-h35e630c_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/pcre2-10.47-haa7fec5_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/perl-5.32.1-7_hd590300_perl5.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pyserial-3.5-pyhcf101f3_2.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/python-3.14.3-h32b2ec7_101_cp314.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.14-8_cp314.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/readline-8.3-h853b02a_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/schedule-1.2.2-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/sysroot_linux-64-2.28-h4ee821c_9.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/tk-8.6.13-noxft_h366c992_103.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2025c-hc9c84f9_1.conda @@ -64,7 +67,6 @@ environments: - pypi: https://files.pythonhosted.org/packages/9a/3c/c17fb3ca2d9c3acff52e30b309f538586f9f5b9c9cf454f3845fc9af4881/certifi-2026.2.25-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/47/d9/d83e293854571c877a92da46fdec39158f8d7e68da75bf73581225d28e90/cffi-2.0.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl - pypi: https://files.pythonhosted.org/packages/38/9c/71336bff6934418dc8d1e8a1644176ac9088068bc571da612767619c97b3/charset_normalizer-3.4.5-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl - - pypi: https://files.pythonhosted.org/packages/98/78/01c019cdb5d6498122777c1a43056ebb3ebfeef2076d9d026bfe15583b2b/click-8.3.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/6b/e7/237155ae19a9023de7e30ec64e5d99a9431a567407ac21170a046d22a5a3/cryptography-46.0.5-cp311-abi3-manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/02/10/5da547df7a391dcde17f59520a231527b8571e6f46fc8efb02ccb370ab12/docutils-0.22.4-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/42/77/de194443bf38daed9452139e960c632b0ef9f9a5dd9ce605fdf18ca9f1b1/id-1.6.1-py3-none-any.whl @@ -84,7 +86,6 @@ environments: - pypi: https://files.pythonhosted.org/packages/0c/c3/44f3fbbfa403ea2a7c779186dc20772604442dde72947e7d01069cbe98e3/pycparser-3.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/bd/24/12818598c362d7f300f18e74db45963dbcb85150324092410c8b49405e42/pyproject_hooks-1.2.0-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/07/bc/587a445451b253b285629263eb51c2d8e9bcea4fc97826266d186f96f558/pyserial-3.5-py2.py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/3b/ab/b3226f0bd7cdcf710fbede2b3548584366da3b19b5021e74f5bde2a8fa3f/pytest-9.0.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/e1/67/921ec3024056483db83953ae8e48079ad62b92db7880013ca77632921dd0/readme_renderer-44.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl @@ -108,6 +109,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/osx-64/clang-19.1.7-default_h1323312_8.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/clang_impl_osx-64-19.1.7-default_ha1a018a_8.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/clang_osx-64-19.1.7-h8a78ed7_31.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/click-8.3.1-pyh8f84b5b_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/compiler-rt-19.1.7-he914875_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/compiler-rt_osx-64-19.1.7-h138dee1_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/git-2.53.0-pl5321hd1efe10_0.conda @@ -142,9 +144,11 @@ environments: - conda: https://conda.anaconda.org/conda-forge/osx-64/openssl-3.6.1-hb6871ef_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/pcre2-10.47-h13923f0_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/perl-5.32.1-7_h10d778d_perl5.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pyserial-3.5-pyhcf101f3_2.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/python-3.14.3-h4f44bb5_101_cp314.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.14-8_cp314.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/readline-8.3-h68b038d_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/schedule-1.2.2-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/sdkroot_env_osx-64-26.0-h62b880e_7.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/sigtool-codesign-0.1.3-hc0f2934_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/tapi-1600.0.11.8-h8d8e812_0.conda @@ -154,7 +158,6 @@ environments: - pypi: https://files.pythonhosted.org/packages/c5/0d/84a4380f930db0010168e0aa7b7a8fed9ba1835a8fbb1472bc6d0201d529/build-1.4.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/9a/3c/c17fb3ca2d9c3acff52e30b309f538586f9f5b9c9cf454f3845fc9af4881/certifi-2026.2.25-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/be/0f0fd9bb4a7fa4fb5067fb7d9ac693d4e928d306f80a0d02bde43a7c4aee/charset_normalizer-3.4.5-cp314-cp314-macosx_10_15_universal2.whl - - pypi: https://files.pythonhosted.org/packages/98/78/01c019cdb5d6498122777c1a43056ebb3ebfeef2076d9d026bfe15583b2b/click-8.3.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/02/10/5da547df7a391dcde17f59520a231527b8571e6f46fc8efb02ccb370ab12/docutils-0.22.4-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/42/77/de194443bf38daed9452139e960c632b0ef9f9a5dd9ce605fdf18ca9f1b1/id-1.6.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/0e/61/66938bbb5fc52dbdf84594873d5b51fb1f7c7794e9c0f5bd885f30bc507b/idna-3.11-py3-none-any.whl @@ -171,7 +174,6 @@ environments: - pypi: https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/bd/24/12818598c362d7f300f18e74db45963dbcb85150324092410c8b49405e42/pyproject_hooks-1.2.0-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/07/bc/587a445451b253b285629263eb51c2d8e9bcea4fc97826266d186f96f558/pyserial-3.5-py2.py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/3b/ab/b3226f0bd7cdcf710fbede2b3548584366da3b19b5021e74f5bde2a8fa3f/pytest-9.0.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/e1/67/921ec3024056483db83953ae8e48079ad62b92db7880013ca77632921dd0/readme_renderer-44.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl @@ -194,6 +196,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/osx-arm64/clang-19.1.7-default_hf9bcbb7_8.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/clang_impl_osx-arm64-19.1.7-default_hc11f16d_8.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/clang_osx-arm64-19.1.7-h75f8d18_31.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/click-8.3.1-pyh8f84b5b_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/compiler-rt-19.1.7-h855ad52_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/compiler-rt_osx-arm64-19.1.7-he32a8d3_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/git-2.53.0-pl5321hc9deb11_0.conda @@ -228,9 +231,11 @@ environments: - conda: https://conda.anaconda.org/conda-forge/osx-arm64/openssl-3.6.1-hd24854e_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pcre2-10.47-h30297fc_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/perl-5.32.1-7_h4614cfb_perl5.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pyserial-3.5-pyhcf101f3_2.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/python-3.14.3-h4c637c5_101_cp314.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.14-8_cp314.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/readline-8.3-h46df422_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/schedule-1.2.2-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/sdkroot_env_osx-arm64-26.0-ha3f98da_7.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/sigtool-codesign-0.1.3-h98dc951_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/tapi-1600.0.11.8-h997e182_0.conda @@ -240,7 +245,6 @@ environments: - pypi: https://files.pythonhosted.org/packages/c5/0d/84a4380f930db0010168e0aa7b7a8fed9ba1835a8fbb1472bc6d0201d529/build-1.4.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/9a/3c/c17fb3ca2d9c3acff52e30b309f538586f9f5b9c9cf454f3845fc9af4881/certifi-2026.2.25-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/be/0f0fd9bb4a7fa4fb5067fb7d9ac693d4e928d306f80a0d02bde43a7c4aee/charset_normalizer-3.4.5-cp314-cp314-macosx_10_15_universal2.whl - - pypi: https://files.pythonhosted.org/packages/98/78/01c019cdb5d6498122777c1a43056ebb3ebfeef2076d9d026bfe15583b2b/click-8.3.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/02/10/5da547df7a391dcde17f59520a231527b8571e6f46fc8efb02ccb370ab12/docutils-0.22.4-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/42/77/de194443bf38daed9452139e960c632b0ef9f9a5dd9ce605fdf18ca9f1b1/id-1.6.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/0e/61/66938bbb5fc52dbdf84594873d5b51fb1f7c7794e9c0f5bd885f30bc507b/idna-3.11-py3-none-any.whl @@ -257,7 +261,6 @@ environments: - pypi: https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/bd/24/12818598c362d7f300f18e74db45963dbcb85150324092410c8b49405e42/pyproject_hooks-1.2.0-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/07/bc/587a445451b253b285629263eb51c2d8e9bcea4fc97826266d186f96f558/pyserial-3.5-py2.py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/3b/ab/b3226f0bd7cdcf710fbede2b3548584366da3b19b5021e74f5bde2a8fa3f/pytest-9.0.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/e1/67/921ec3024056483db83953ae8e48079ad62b92db7880013ca77632921dd0/readme_renderer-44.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl @@ -271,6 +274,8 @@ environments: win-64: - conda: https://conda.anaconda.org/conda-forge/win-64/bzip2-1.0.8-h0ad9c76_9.conda - conda: https://conda.anaconda.org/conda-forge/noarch/ca-certificates-2026.2.25-h4c7d964_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/click-8.3.1-pyha7b4d00_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/colorama-0.4.6-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/git-2.53.0-h57928b3_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libexpat-2.7.4-hac47afa_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libffi-3.5.2-h3d046cb_0.conda @@ -279,8 +284,10 @@ environments: - conda: https://conda.anaconda.org/conda-forge/win-64/libsqlite-3.52.0-hf5d6505_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libzlib-1.3.1-h2466b09_2.conda - conda: https://conda.anaconda.org/conda-forge/win-64/openssl-3.6.1-hf411b9b_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pyserial-3.5-pyhcf101f3_2.conda - conda: https://conda.anaconda.org/conda-forge/win-64/python-3.14.3-h4b44e0e_101_cp314.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.14-8_cp314.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/schedule-1.2.2-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/tk-8.6.13-h6ed50ae_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2025c-hc9c84f9_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/ucrt-10.0.26100.0-h57928b3_0.conda @@ -291,8 +298,6 @@ environments: - pypi: https://files.pythonhosted.org/packages/c5/0d/84a4380f930db0010168e0aa7b7a8fed9ba1835a8fbb1472bc6d0201d529/build-1.4.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/9a/3c/c17fb3ca2d9c3acff52e30b309f538586f9f5b9c9cf454f3845fc9af4881/certifi-2026.2.25-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/40/65/e7c6c77d7aaa4c0d7974f2e403e17f0ed2cb0fc135f77d686b916bf1eead/charset_normalizer-3.4.5-cp314-cp314-win_amd64.whl - - pypi: https://files.pythonhosted.org/packages/98/78/01c019cdb5d6498122777c1a43056ebb3ebfeef2076d9d026bfe15583b2b/click-8.3.1-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/02/10/5da547df7a391dcde17f59520a231527b8571e6f46fc8efb02ccb370ab12/docutils-0.22.4-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/42/77/de194443bf38daed9452139e960c632b0ef9f9a5dd9ce605fdf18ca9f1b1/id-1.6.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/0e/61/66938bbb5fc52dbdf84594873d5b51fb1f7c7794e9c0f5bd885f30bc507b/idna-3.11-py3-none-any.whl @@ -309,7 +314,6 @@ environments: - pypi: https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/bd/24/12818598c362d7f300f18e74db45963dbcb85150324092410c8b49405e42/pyproject_hooks-1.2.0-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/07/bc/587a445451b253b285629263eb51c2d8e9bcea4fc97826266d186f96f558/pyserial-3.5-py2.py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/3b/ab/b3226f0bd7cdcf710fbede2b3548584366da3b19b5021e74f5bde2a8fa3f/pytest-9.0.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/de/3d/8161f7711c017e01ac9f008dfddd9410dff3674334c233bde66e7ba65bbf/pywin32_ctypes-0.2.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/e1/67/921ec3024056483db83953ae8e48079ad62b92db7880013ca77632921dd0/readme_renderer-44.0-py3-none-any.whl @@ -339,7 +343,7 @@ packages: - pypi: ./ name: arena-interface version: 7.0.0 - sha256: ea2f12374a366a27937abf8e75f4accd573f8c683f8a32dd88d2d84ec2612942 + sha256: ca64cadf9210e5274c4792514a497fd37f054e51773da43a50d99602935b3ccc requires_dist: - click>=8.1 - pyserial>=3.5 @@ -754,18 +758,44 @@ packages: purls: [] size: 21135 timestamp: 1769482854554 -- pypi: https://files.pythonhosted.org/packages/98/78/01c019cdb5d6498122777c1a43056ebb3ebfeef2076d9d026bfe15583b2b/click-8.3.1-py3-none-any.whl - name: click - version: 8.3.1 - sha256: 981153a64e25f12d547d3426c367a4857371575ee7ad18df2a6183ab0545b2a6 - requires_dist: - - colorama ; sys_platform == 'win32' - requires_python: '>=3.10' -- pypi: https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl - name: colorama - version: 0.4.6 - sha256: 4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6 - requires_python: '>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*' +- conda: https://conda.anaconda.org/conda-forge/noarch/click-8.3.1-pyh8f84b5b_1.conda + sha256: 38cfe1ee75b21a8361c8824f5544c3866f303af1762693a178266d7f198e8715 + md5: ea8a6c3256897cc31263de9f455e25d9 + depends: + - python >=3.10 + - __unix + - python + license: BSD-3-Clause + license_family: BSD + purls: + - pkg:pypi/click?source=hash-mapping + size: 97676 + timestamp: 1764518652276 +- conda: https://conda.anaconda.org/conda-forge/noarch/click-8.3.1-pyha7b4d00_1.conda + sha256: c3bc9a49930fa1c3383a1485948b914823290efac859a2587ca57a270a652e08 + md5: 6cd3ccc98bacfcc92b2bd7f236f01a7e + depends: + - python >=3.10 + - colorama + - __win + - python + license: BSD-3-Clause + license_family: BSD + purls: + - pkg:pypi/click?source=hash-mapping + size: 96620 + timestamp: 1764518654675 +- conda: https://conda.anaconda.org/conda-forge/noarch/colorama-0.4.6-pyhd8ed1ab_1.conda + sha256: ab29d57dc70786c1269633ba3dff20288b81664d3ff8d21af995742e2bb03287 + md5: 962b9857ee8e7018c22f2776ffa0b2d7 + depends: + - python >=3.9 + license: BSD-3-Clause + license_family: BSD + purls: + - pkg:pypi/colorama?source=hash-mapping + size: 27011 + timestamp: 1733218222191 - conda: https://conda.anaconda.org/conda-forge/osx-64/compiler-rt-19.1.7-he914875_1.conda sha256: 28e5f0a6293acba68ebc54694a2fc40b1897202735e8e8cbaaa0e975ba7b235b md5: e6b9e71e5cb08f9ed0185d31d33a074b @@ -2402,12 +2432,18 @@ packages: version: 1.2.0 sha256: 9e5c6bfa8dcc30091c74b0cf803c81fdd29d94f01992a7707bc97babb1141913 requires_python: '>=3.7' -- pypi: https://files.pythonhosted.org/packages/07/bc/587a445451b253b285629263eb51c2d8e9bcea4fc97826266d186f96f558/pyserial-3.5-py2.py3-none-any.whl - name: pyserial - version: '3.5' - sha256: c4451db6ba391ca6ca299fb3ec7bae67a5c55dde170964c7a14ceefec02f2cf0 - requires_dist: - - hidapi ; extra == 'cp2110' +- conda: https://conda.anaconda.org/conda-forge/noarch/pyserial-3.5-pyhcf101f3_2.conda + sha256: 8c618e8ca376d73133c9971abe45463a48c9cfb529788d609fd19568764941ba + md5: b27a6cd32160c34682ce6053b04c12c6 + depends: + - python >=3.10 + - python + license: BSD-3-Clause + license_family: BSD + purls: + - pkg:pypi/pyserial?source=hash-mapping + size: 73910 + timestamp: 1767289527295 - pypi: https://files.pythonhosted.org/packages/3b/ab/b3226f0bd7cdcf710fbede2b3548584366da3b19b5021e74f5bde2a8fa3f/pytest-9.0.2-py3-none-any.whl name: pytest version: 9.0.2 @@ -2646,6 +2682,17 @@ packages: version: 0.15.6 sha256: 98893c4c0aadc8e448cfa315bd0cc343a5323d740fe5f28ef8a3f9e21b381f7e requires_python: '>=3.7' +- conda: https://conda.anaconda.org/conda-forge/noarch/schedule-1.2.2-pyhd8ed1ab_1.conda + sha256: c19be64e5e2b79a9910a41cce8f42a4f47e161c12f9707b04a33b9341df6c7c6 + md5: cedcd7606497aff90b91134046d44370 + depends: + - python >=3.9 + license: MIT + license_family: MIT + purls: + - pkg:pypi/schedule?source=hash-mapping + size: 17298 + timestamp: 1735043793005 - conda: https://conda.anaconda.org/conda-forge/noarch/sdkroot_env_osx-64-26.0-h62b880e_7.conda sha256: 7e7e2556978bc9bd9628c6e39138c684082320014d708fbca0c9050df98c0968 md5: 68a978f77c0ba6ca10ce55e188a21857 diff --git a/pyproject.toml b/pyproject.toml index 2d8e0c8..75b81a4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -83,6 +83,9 @@ platforms = ["linux-64", "osx-64", "osx-arm64", "win-64"] [tool.pixi.dependencies] git = "*" +pyserial = ">=3.5,<4" +click = ">=8.3.1,<9" +schedule = ">=1.2.2,<2" [tool.pixi.pypi-dependencies] "arena-interface" = { path = ".", editable = true } @@ -107,6 +110,8 @@ qtools-install = "python tools/quantum_leaps_tools.py qtools-install" qspy = "python tools/quantum_leaps_tools.py qspy" bench = "arena-interface bench" bench-full = "arena-interface bench --stream-path patterns/pat0004.pat" +bench-max-rate = "arena-interface bench --stream-path patterns/pat0004.pat --stream-max-rate" +bench-max-rate-smoke = "arena-interface bench --cmd-iters 250 --spf-seconds 2 --stream-path patterns/pat0004.pat --stream-seconds 2 --stream-max-rate --stream-max-rate-seconds 2" bench-smoke = "arena-interface bench --cmd-iters 250 --spf-seconds 2 --stream-path patterns/pat0004.pat --stream-seconds 2" bench-persistent = "arena-interface bench --cmd-connect-mode persistent" bench-new-connection = "arena-interface bench --cmd-connect-mode new_connection" diff --git a/src/arena_interface/__init__.py b/src/arena_interface/__init__.py index d202581..95d9f4c 100644 --- a/src/arena_interface/__init__.py +++ b/src/arena_interface/__init__.py @@ -9,10 +9,11 @@ __url__, __version__, ) -from .arena_interface import ArenaInterface +from .arena_interface import ArenaInterface, CommandTimeouts __all__ = [ "ArenaInterface", + "CommandTimeouts", "__author__", "__copyright__", "__description__", diff --git a/src/arena_interface/arena_interface.py b/src/arena_interface/arena_interface.py index cf336a3..8c5de8b 100644 --- a/src/arena_interface/arena_interface.py +++ b/src/arena_interface/arena_interface.py @@ -1,4 +1,5 @@ """Python interface and CLI for the Reiser Lab ArenaController.""" + from __future__ import annotations import atexit @@ -16,7 +17,8 @@ import sys import time from contextlib import contextmanager -from typing import Callable +from dataclasses import dataclass +from typing import Callable, cast try: import serial @@ -48,6 +50,60 @@ StatusCallback = Callable[[str], None] +@dataclass +class CommandTimeouts: + """Per-command-category response timeouts. + + Inspired by PanelsController.m's ``expectResponse(..., timeout)`` pattern + where each MATLAB command passes its own timeout value. The defaults + below mirror the values chosen in the MATLAB implementation. + + All values are in **seconds**. A value of ``None`` means "use the + instance's transport-level default" (which itself may be ``None`` for + blocking-forever). + + Attributes + ---------- + fast_cmd_s: + Quick request/response commands: ``all_on``, ``display_reset``, + ``set_refresh_rate``, ``get_*``, ``update_pattern_frame``, + ``show_pattern_frame``, ``reset_perf_stats``, etc. + PanelsController.m uses 0.1 s for these. + slow_cmd_s: + Commands whose firmware-side processing takes noticeably longer: + ``all_off``, ``stop_display``. + PanelsController.m uses 0.3 s. + mode_switch_s: + Heavy mode changes such as ``switch_grayscale``. + PanelsController.m uses 2.0 s ("This takes very long"). + stream_frame_s: + Timeout for a single ``STREAM_FRAME`` (0x32) round-trip inside the + ``stream_frames`` loop. There is no direct MATLAB analog (the + ``streamFrame`` method is commented out) but 0.5 s gives comfortable + headroom at ≥200 Hz frame rates. + play_cmd_s: + Timeout for the *initial* ``play_pattern`` / ``play_pattern_analog_closed_loop`` + command exchange (before the blocking wait for completion). + PanelsController.m uses 0.1–0.2 s for comparable initial acks. + """ + + fast_cmd_s: float | None = 0.1 + slow_cmd_s: float | None = 0.3 + mode_switch_s: float | None = 2.0 + stream_frame_s: float | None = 0.5 + play_cmd_s: float | None = 0.2 + + +class _Sentinel: + """Sentinel type used to distinguish 'caller did not pass timeout_s' + from 'caller explicitly passed None' (which means block indefinitely).""" + + __slots__ = () + + +_SENTINEL: _Sentinel = _Sentinel() + + class ArenaInterface: """Python interface to the Reiser lab ArenaController.""" @@ -60,11 +116,24 @@ def __init__( keepalive: bool = True, socket_timeout_s: float | None = SOCKET_TIMEOUT, serial_timeout_s: float | None = SERIAL_TIMEOUT, + command_timeouts: CommandTimeouts | None = None, ): - """Initialize an ArenaInterface instance.""" + """Initialize an ArenaInterface instance. + + Parameters + ---------- + command_timeouts: + Per-command-category response timeouts. When ``None`` (the + default) a :class:`CommandTimeouts` with sensible defaults + (mirroring PanelsController.m) is created automatically. Pass + an explicit instance to override individual categories, or pass + ``CommandTimeouts(fast_cmd_s=None, slow_cmd_s=None, ...)`` to + fall back to the transport-level ``socket_timeout_s`` for every + command. + """ self._debug = bool(debug) self._serial = None - self._ethernet_ip_address = '' + self._ethernet_ip_address = "" self._ethernet_socket: socket.socket | None = None self._socket_reconnects: int = 0 self._socket_last_error: str | None = None @@ -76,6 +145,9 @@ def __init__( self._keepalive = bool(keepalive) self._socket_timeout_s = self._coerce_timeout(socket_timeout_s) self._serial_timeout_s = self._coerce_timeout(serial_timeout_s) + self.command_timeouts = ( + command_timeouts if command_timeouts is not None else CommandTimeouts() + ) atexit.register(self._exit) def __enter__(self): @@ -183,7 +255,9 @@ def _safe_all_off( except Exception as exc: message = self._format_exception(exc) self._socket_last_error = message - self._bench_emit_status(status_callback, f"[bench] {context}: ALL_OFF failed: {message}") + self._bench_emit_status( + status_callback, f"[bench] {context}: ALL_OFF failed: {message}" + ) self._close_ethernet_socket() return message @@ -271,7 +345,6 @@ def _connect_ethernet_socket(self, repeat_count: int = 10, reuse: bool = True) - raise last_exc if last_exc is not None else ConnectionRefusedError() - def _recv_exact(self, ethernet_socket: socket.socket, n: int) -> bytes: """Receive exactly n bytes from a TCP socket or raise on EOF.""" data = b"" @@ -296,7 +369,15 @@ def _read(self, transport, n: int) -> bytes: if len(data) != n: raise TimeoutError(f"serial read short: expected {n}, got {len(data)}") return data - def _send_and_receive(self, cmd, ethernet_socket=None, *, return_timings: bool = False): + + def _send_and_receive( + self, + cmd, + ethernet_socket=None, + *, + return_timings: bool = False, + timeout_s: float | None | _Sentinel = _SENTINEL, + ): """Send a command and wait for a binary response. If no socket is provided and we're in Ethernet mode, this reuses a @@ -308,43 +389,71 @@ def _send_and_receive(self, cmd, ethernet_socket=None, *, return_timings: bool = If True, return a tuple: (payload_bytes, send_ms, recv_ms), where send_ms is time spent in send/write calls and recv_ms is time spent waiting for and reading the response bytes. + timeout_s: + Per-call response timeout override. When set, the socket (or + serial) timeout is temporarily changed for this exchange and + then restored. ``None`` means "block indefinitely". The + special internal sentinel (the default) means "use the + instance's transport-level default and do not touch the + timeout at all". """ + use_per_call_timeout = not isinstance(timeout_s, _Sentinel) + _effective_timeout_s = cast("float | None", timeout_s) if use_per_call_timeout else None + if self._serial: - t0 = time.perf_counter_ns() - if isinstance(cmd, str): - self._serial.write(cmd.encode()) - else: - self._serial.write(cmd) - t1 = time.perf_counter_ns() - resp_len = self._serial.read(1) - if not resp_len: - raise TimeoutError("serial response length timed out") - response = resp_len + self._serial.read(int(resp_len[0])) - t2 = time.perf_counter_ns() - payload = response[3:] - if return_timings: - return payload, (t1 - t0) / 1e6, (t2 - t1) / 1e6 - return payload + prev_serial_timeout = self._serial.timeout if use_per_call_timeout else None + if use_per_call_timeout: + self._serial.timeout = self._coerce_timeout(_effective_timeout_s) + try: + t0 = time.perf_counter_ns() + if isinstance(cmd, str): + self._serial.write(cmd.encode()) + else: + self._serial.write(cmd) + t1 = time.perf_counter_ns() + resp_len = self._serial.read(1) + if not resp_len: + raise TimeoutError("serial response length timed out") + response = resp_len + self._serial.read(int(resp_len[0])) + t2 = time.perf_counter_ns() + payload = response[3:] + if return_timings: + return payload, (t1 - t0) / 1e6, (t2 - t1) / 1e6 + return payload + finally: + if use_per_call_timeout: + self._serial.timeout = prev_serial_timeout # Ethernet - sock = ethernet_socket if (ethernet_socket is not None) else self._connect_ethernet_socket(reuse=True) + sock = ( + ethernet_socket + if (ethernet_socket is not None) + else self._connect_ethernet_socket(reuse=True) + ) def _do_io(s: socket.socket): - t0 = time.perf_counter_ns() - if isinstance(cmd, str): - s.sendall(cmd.encode()) - else: - s.sendall(cmd) - t1 = time.perf_counter_ns() + prev_sock_timeout = s.gettimeout() if use_per_call_timeout else None + if use_per_call_timeout: + s.settimeout(self._coerce_timeout(_effective_timeout_s)) + try: + t0 = time.perf_counter_ns() + if isinstance(cmd, str): + s.sendall(cmd.encode()) + else: + s.sendall(cmd) + t1 = time.perf_counter_ns() - resp_len = self._recv_exact(s, 1) - payload = self._recv_exact(s, int(resp_len[0])) - t2 = time.perf_counter_ns() + resp_len = self._recv_exact(s, 1) + payload = self._recv_exact(s, int(resp_len[0])) + t2 = time.perf_counter_ns() - out = (resp_len + payload)[3:] - if return_timings: - return out, (t1 - t0) / 1e6, (t2 - t1) / 1e6 - return out + out = (resp_len + payload)[3:] + if return_timings: + return out, (t1 - t0) / 1e6, (t2 - t1) / 1e6 + return out + finally: + if use_per_call_timeout: + s.settimeout(prev_sock_timeout) # If we're using the persistent socket, allow one reconnect attempt. attempts = 1 if (ethernet_socket is not None) else 2 @@ -361,13 +470,15 @@ def _do_io(s: socket.socket): sock = self._connect_ethernet_socket(reuse=True) raise ConnectionError("failed to send/receive over Ethernet after reconnect") + def _send_and_receive_stream( - self, - stream_header: bytes, - frame_chunked: list[bytes], - ethernet_socket: socket.socket | None = None, - *, - return_timings: bool = False, + self, + stream_header: bytes, + frame_chunked: list[bytes], + ethernet_socket: socket.socket | None = None, + *, + return_timings: bool = False, + timeout_s: float | None | _Sentinel = _SENTINEL, ): """Send a stream frame (header + payload) and wait for response. @@ -380,24 +491,39 @@ def _send_and_receive_stream( instance's persistent Ethernet socket. return_timings: If True, return (payload_bytes, send_ms, recv_ms). + timeout_s: + Per-call response timeout override (see ``_send_and_receive``). """ - sock = ethernet_socket if (ethernet_socket is not None) else self._connect_ethernet_socket(reuse=True) + use_per_call_timeout = not isinstance(timeout_s, _Sentinel) + _effective_timeout_s = cast("float | None", timeout_s) if use_per_call_timeout else None + sock = ( + ethernet_socket + if (ethernet_socket is not None) + else self._connect_ethernet_socket(reuse=True) + ) def _do_io(s: socket.socket): - t0 = time.perf_counter_ns() - s.sendall(stream_header) - for chunk in frame_chunked: - s.sendall(chunk) - t1 = time.perf_counter_ns() + prev_sock_timeout = s.gettimeout() if use_per_call_timeout else None + if use_per_call_timeout: + s.settimeout(self._coerce_timeout(_effective_timeout_s)) + try: + t0 = time.perf_counter_ns() + s.sendall(stream_header) + for chunk in frame_chunked: + s.sendall(chunk) + t1 = time.perf_counter_ns() - resp_len = self._recv_exact(s, 1) - payload = self._recv_exact(s, int(resp_len[0])) - t2 = time.perf_counter_ns() + resp_len = self._recv_exact(s, 1) + payload = self._recv_exact(s, int(resp_len[0])) + t2 = time.perf_counter_ns() - out = (resp_len + payload)[3:] - if return_timings: - return out, (t1 - t0) / 1e6, (t2 - t1) / 1e6 - return out + out = (resp_len + payload)[3:] + if return_timings: + return out, (t1 - t0) / 1e6, (t2 - t1) / 1e6 + return out + finally: + if use_per_call_timeout: + s.settimeout(prev_sock_timeout) # If we're using the persistent socket, allow one reconnect attempt. attempts = 1 if (ethernet_socket is not None) else 2 @@ -432,7 +558,7 @@ def set_serial_mode(self, port, baudrate=SERIAL_BAUDRATE): ) self._close_ethernet_socket() - self._ethernet_ip_address = '' + self._ethernet_ip_address = "" if self._serial: self._serial.close() @@ -443,7 +569,6 @@ def set_serial_mode(self, port, baudrate=SERIAL_BAUDRATE): self._serial.open() return self._serial.is_open - def _close_ethernet_socket(self): """Close and forget the persistent Ethernet socket (if any).""" if self._ethernet_socket is not None: @@ -465,81 +590,91 @@ def close(self): def all_off(self): """Turn all panels off.""" - self._send_and_receive(b'\x01\x00') + self._send_and_receive(b"\x01\x00", timeout_s=self.command_timeouts.slow_cmd_s) def display_reset(self): """Reset arena.""" - self._send_and_receive(b'\x01\x01') + self._send_and_receive(b"\x01\x01", timeout_s=self.command_timeouts.fast_cmd_s) def switch_grayscale(self, grayscale_index): """Switches grayscale value. grayscale_index: 0=binary, 1=grayscale""" - cmd_bytes = struct.pack(' bytes: """Fetch a raw performance stats snapshot (binary payload).""" - return self._send_and_receive(b'\x01\x71', ethernet_socket) + return self._send_and_receive( + b"\x01\x71", ethernet_socket, timeout_s=self.command_timeouts.fast_cmd_s + ) def reset_perf_stats(self, ethernet_socket=None): """Reset performance counters on the device.""" - self._send_and_receive(b'\x01\x72', ethernet_socket) + self._send_and_receive( + b"\x01\x72", ethernet_socket, timeout_s=self.command_timeouts.fast_cmd_s + ) def all_on(self): """Turn all panels on.""" - self._send_and_receive(b'\x01\xff') + self._send_and_receive(b"\x01\xff", timeout_s=self.command_timeouts.fast_cmd_s) def stream_frame(self, path, frame_index, analog_output_value=0): """Stream frame in pattern file.""" - self._debug_print('pattern path: ', path) - with open(path, mode='rb') as f: + self._debug_print("pattern path: ", path) + with open(path, mode="rb") as f: content = f.read() - pattern_header = struct.unpack(' (frame_count - 1): frame_index = frame_count - 1 - self._debug_print('frame_index: ', frame_index) - frame_len = len(frames)//frame_count + self._debug_print("frame_index: ", frame_index) + frame_len = len(frames) // frame_count frame_start = frame_index * frame_len # self._debug_print('frame_start: ', frame_start) frame_end = frame_start + frame_len @@ -662,12 +820,12 @@ def stream_frame(self, path, frame_index, analog_output_value=0): frame = frames[frame_start:frame_end] data_len = len(frame) # self._debug_print('data_len: ', data_len) - frame_header = struct.pack('][frame bytes...] # @@ -730,10 +893,14 @@ def stream_frames( # Try ".pattern" if file_size >= 4: frame_size = struct.unpack(" 0 and ((file_size - 4) % frame_size == 0): + if ( + 0 < frame_size <= 65535 + and (file_size - 4) > 0 + and ((file_size - 4) % frame_size == 0) + ): num_frames = int((file_size - 4) / frame_size) frames = [ - file_bytes[4 + (i * frame_size): 4 + ((i + 1) * frame_size)] + file_bytes[4 + (i * frame_size) : 4 + ((i + 1) * frame_size)] for i in range(num_frames) ] @@ -759,30 +926,35 @@ def stream_frames( ) num_frames = int(frame_count) - frames = [blob[i * frame_size:(i + 1) * frame_size] for i in range(num_frames)] + frames = [blob[i * frame_size : (i + 1) * frame_size] for i in range(num_frames)] runtime_duration_s = float(runtime_duration) / float(RUNTIME_DURATION_PER_SECOND) frames_target = int(runtime_duration_s * float(frame_rate)) if frame_rate else 0 frame_period_ns = int((1.0 / float(frame_rate)) * 1e9) if frame_rate else 0 - analog_update_period_ns = int((1.0 / float(analog_update_rate)) * 1e9) if analog_update_rate else 0 + analog_update_period_ns = ( + int((1.0 / float(analog_update_rate)) * 1e9) if analog_update_rate else 0 + ) # Map waveform output [-1..1] into a conservative 12-bit-ish range. analog_amplitude = (ANALOG_OUTPUT_VALUE_MAX - ANALOG_OUTPUT_VALUE_MIN) / 2.0 analog_offset = (ANALOG_OUTPUT_VALUE_MAX + ANALOG_OUTPUT_VALUE_MIN) / 2.0 def analog_waveform_for(name: str): - if name == 'sin': + if name == "sin": return math.sin - if name == 'square': + if name == "square": return lambda x: 1.0 if math.sin(x) >= 0 else -1.0 - if name == 'sawtooth': + if name == "sawtooth": return lambda x: 2.0 * (x / (2.0 * math.pi) - math.floor(0.5 + x / (2.0 * math.pi))) - if name == 'triangle': - return lambda x: 2.0 * abs(2.0 * (x / (2.0 * math.pi) - math.floor(0.5 + x / (2.0 * math.pi)))) - 1.0 - if name == 'constant': + if name == "triangle": + return lambda x: ( + 2.0 * abs(2.0 * (x / (2.0 * math.pi) - math.floor(0.5 + x / (2.0 * math.pi)))) + - 1.0 + ) + if name == "constant": return lambda x: 0.0 - raise ValueError(f'Invalid analog output waveform: {name}') + raise ValueError(f"Invalid analog output waveform: {name}") # Ensure persistent socket is established once for the run. self._connect_ethernet_socket(reuse=True) @@ -837,7 +1009,10 @@ def analog_waveform_for(name: str): # Analog output update (optional) now_ns = time.perf_counter_ns() - if analog_update_period_ns and (now_ns - last_analog_update_ns) >= analog_update_period_ns: + if ( + analog_update_period_ns + and (now_ns - last_analog_update_ns) >= analog_update_period_ns + ): t_s = (now_ns - start_time_ns) / 1e9 analog_phase = (t_s * float(analog_frequency)) * (2.0 * math.pi) analog_output_value_f = analog_amplitude * float(wf(analog_phase)) + analog_offset @@ -853,33 +1028,44 @@ def analog_waveform_for(name: str): # Stream frame header: cmd(0x32), data_len(uint16), analog(uint16), reserved(uint16) data_len = len(frame) - stream_header = struct.pack(' 0 else 0.0 if frames_target: - self._bench_emit_status(status_callback, f'[bench] stream_frames: {frames_streamed}/{frames_target} frames ({rate_hz:.1f} Hz)') + self._bench_emit_status( + status_callback, + f"[bench] stream_frames: {frames_streamed}/{frames_target} frames ({rate_hz:.1f} Hz)", + ) else: - self._bench_emit_status(status_callback, f'[bench] stream_frames: {frames_streamed} frames ({rate_hz:.1f} Hz)') + self._bench_emit_status( + status_callback, + f"[bench] stream_frames: {frames_streamed} frames ({rate_hz:.1f} Hz)", + ) next_progress_ns += int(progress_interval_s * 1e9) if frame_period_ns: next_frame_deadline_ns += frame_period_ns i += 1 if stop_after: - self._send_and_receive(bytes([1, 0])) + self._send_and_receive(bytes([1, 0]), timeout_s=self.command_timeouts.slow_cmd_s) elapsed_s = (time.perf_counter_ns() - start_time_ns) / 1e9 rate_hz = frames_streamed / elapsed_s if elapsed_s > 0 else 0.0 mbps = (bytes_sent * 8) / (elapsed_s * 1e6) if elapsed_s > 0 else 0.0 - self._bench_emit_status(status_callback, f'[bench] stream_frames: frames={frames_streamed} elapsed_s={elapsed_s:.3f} rate={rate_hz:.1f} Hz tx={mbps:.2f} Mb/s') + self._bench_emit_status( + status_callback, + f"[bench] stream_frames: frames={frames_streamed} elapsed_s={elapsed_s:.3f} rate={rate_hz:.1f} Hz tx={mbps:.2f} Mb/s", + ) result = { "frames": frames_streamed, @@ -926,11 +1121,11 @@ def analog_waveform_for(name: str): def all_off_str(self): """Turn all panels off with string.""" - self._send_and_receive('ALL_OFF') + self._send_and_receive("ALL_OFF", timeout_s=self.command_timeouts.slow_cmd_s) def all_on_str(self): """Turn all panels on with string.""" - self._send_and_receive('ALL_ON') + self._send_and_receive("ALL_ON", timeout_s=self.command_timeouts.fast_cmd_s) # --------------------------------------------------------------------- # Benchmark helpers (host-side) @@ -1034,7 +1229,9 @@ def bench_metadata(self, label: str | None = None) -> dict: "package_version": pkg_version, "transport": "serial" if (self._serial is not None) else "ethernet", "ethernet_ip": self._ethernet_ip_address if self._ethernet_ip_address else None, - "serial_port": getattr(self._serial, "port", None) if self._serial is not None else None, + "serial_port": getattr(self._serial, "port", None) + if self._serial is not None + else None, "tcp_nodelay": self._tcp_nodelay, "tcp_quickack_requested": self._tcp_quickack_requested, "tcp_quickack_supported": self._tcp_quickack_supported, @@ -1059,7 +1256,9 @@ def bench_metadata(self, label: str | None = None) -> dict: peer_ip = meta.get("ethernet_ip") if peer_ip: try: - route_out = subprocess.check_output(["ip", "route", "get", str(peer_ip)], text=True).strip() + route_out = subprocess.check_output( + ["ip", "route", "get", str(peer_ip)], text=True + ).strip() meta["net_route_get"] = route_out m = re.search(r"\bdev\s+(\S+)", route_out) iface = m.group(1) if m else None @@ -1105,7 +1304,6 @@ def _read_sysfs(name: str) -> str | None: # Non-Linux hosts (or minimal containers) may not have `ip` or sysfs. pass - return meta def bench_connect_time(self, iters: int = 200) -> dict: @@ -1140,13 +1338,13 @@ def bench_connect_time(self, iters: int = 200) -> dict: return summary def bench_command_rtt( - self, - iters: int = 2000, - wrap_mode: bool = True, - connect_mode: str = "persistent", - warmup: int = 20, - progress_interval_s: float = 1.0, - status_callback: StatusCallback | None = None, + self, + iters: int = 2000, + wrap_mode: bool = True, + connect_mode: str = "persistent", + warmup: int = 20, + progress_interval_s: float = 1.0, + status_callback: StatusCallback | None = None, ) -> dict: """Measure host-side RTT for a small request/response command. @@ -1179,7 +1377,9 @@ def bench_command_rtt( reconnects_before = self.get_socket_reconnects(reset=False) cleanup_error: str | None = None - progress_step_ns = max(1, int(float(progress_interval_s) * 1e9)) if progress_interval_s > 0 else 0 + progress_step_ns = ( + max(1, int(float(progress_interval_s) * 1e9)) if progress_interval_s > 0 else 0 + ) try: if wrap_mode: @@ -1203,9 +1403,7 @@ def bench_command_rtt( bytes_rx = 0 errors = 0 measure_start_ns = time.perf_counter_ns() - next_progress_ns = ( - measure_start_ns + progress_step_ns if progress_step_ns > 0 else None - ) + next_progress_ns = measure_start_ns + progress_step_ns if progress_step_ns > 0 else None for iteration in range(int(iters)): if connect_mode == "persistent": @@ -1214,7 +1412,7 @@ def bench_command_rtt( t1 = time.perf_counter_ns() rtts_ms.append((t1 - t0) / 1e6) bytes_tx += 2 # b'q' - bytes_rx += (len(payload) + 3) # status + echo + payload (length excluded) + bytes_rx += len(payload) + 3 # status + echo + payload (length excluded) else: s = self._open_ethernet_socket() try: @@ -1223,7 +1421,7 @@ def bench_command_rtt( t1 = time.perf_counter_ns() rtts_ms.append((t1 - t0) / 1e6) bytes_tx += 2 - bytes_rx += (len(payload) + 3) + bytes_rx += len(payload) + 3 except Exception: errors += 1 finally: @@ -1270,16 +1468,16 @@ def bench_command_rtt( return summary def bench_spf_updates( - self, - rate_hz: float = 200.0, - seconds: float = 5.0, - pattern_id: int = 10, - frame_min: int = 0, - frame_max: int = 1000, - pacing: str = "target", - warmup: int = 0, - progress_interval_s: float = 1.0, - status_callback: StatusCallback | None = None, + self, + rate_hz: float = 200.0, + seconds: float = 5.0, + pattern_id: int = 10, + frame_min: int = 0, + frame_max: int = 1000, + pacing: str = "target", + warmup: int = 0, + progress_interval_s: float = 1.0, + status_callback: StatusCallback | None = None, ) -> dict: """Benchmark SHOW_PATTERN_FRAME update performance (SPF). @@ -1305,7 +1503,9 @@ def bench_spf_updates( reconnects_before = self.get_socket_reconnects(reset=False) cleanup_error: str | None = None - progress_step_ns = max(1, int(float(progress_interval_s) * 1e9)) if progress_interval_s > 0 else 0 + progress_step_ns = ( + max(1, int(float(progress_interval_s) * 1e9)) if progress_interval_s > 0 else 0 + ) try: self.reset_perf_stats() @@ -1423,17 +1623,17 @@ def bench_spf_updates( return summary def bench_stream_frames( - self, - pattern_path: str, - frame_rate: float = 200.0, - seconds: float = 5.0, - stream_cmd_coalesced: bool = True, - progress_interval_s: float = 1.0, - analog_out_waveform: str = "constant", - analog_update_rate: float = 1.0, - analog_frequency: float = 0.0, - collect_timings: bool = True, - status_callback: StatusCallback | None = None, + self, + pattern_path: str, + frame_rate: float = 200.0, + seconds: float = 5.0, + stream_cmd_coalesced: bool = True, + progress_interval_s: float = 1.0, + analog_out_waveform: str = "constant", + analog_update_rate: float = 1.0, + analog_frequency: float = 0.0, + collect_timings: bool = True, + status_callback: StatusCallback | None = None, ) -> dict: """Benchmark STREAM_FRAME throughput using `stream_frames()`. @@ -1488,27 +1688,120 @@ def bench_stream_frames( return stats + def bench_stream_frames_max_rate( + self, + pattern_path: str, + seconds: float = 5.0, + stream_cmd_coalesced: bool = True, + progress_interval_s: float = 1.0, + collect_timings: bool = True, + status_callback: StatusCallback | None = None, + ) -> dict: + """Benchmark STREAM_FRAME throughput with no pacing (as fast as possible). + + This is identical to :meth:`bench_stream_frames` but forces + ``frame_rate=0`` so frames are sent back-to-back with no sleep/spin + pacing. The achieved rate is therefore bounded only by the host TCP + stack, the network, and the firmware's ability to accept and process + frames. + + Use this to find the **maximum sustainable throughput** of a given + pattern size across firmware builds, Ethernet stacks, switches, and + host machines. + + Parameters + ---------- + pattern_path: + Path to a ``.pattern`` or ``.pat`` file (same formats as + :meth:`stream_frames`). + seconds: + Wall-clock duration of the streaming burst. + stream_cmd_coalesced: + If True, send the stream header and frame payload in a single + ``sendall``; otherwise chunk the payload. + progress_interval_s: + How often to emit progress via *status_callback*. + collect_timings: + If True, record per-frame send/recv timing breakdowns. + status_callback: + Optional callable for progress/status messages. + + Returns + ------- + dict + The same structure as :meth:`bench_stream_frames` with an extra + ``"pacing": "max"`` key so results are easy to distinguish from + rate-limited runs. + """ + # Clear any prior socket error so results are per-run. + self._socket_last_error = None + + reconnects_before = self.get_socket_reconnects(reset=False) + cleanup_error: str | None = None + + try: + self.reset_perf_stats() + + runtime_duration = int(round(float(seconds) * float(RUNTIME_DURATION_PER_SECOND))) + stats = self.stream_frames( + str(pattern_path), + 0, # frame_rate=0 → no pacing, send as fast as possible + runtime_duration, + "constant", # analog waveform irrelevant at max rate + 0, # analog_update_rate=0 → disabled + 0.0, # analog_frequency + stream_cmd_coalesced=bool(stream_cmd_coalesced), + progress_interval_s=float(progress_interval_s), + collect_timings=bool(collect_timings), + status_callback=status_callback, + stop_after=False, + ) + stats.update( + { + "pacing": "max", + "pattern_path": str(pattern_path), + "frame_rate": 0, + "seconds": float(seconds), + "stream_cmd_coalesced": bool(stream_cmd_coalesced), + "reconnects": int(self.get_socket_reconnects(reset=False) - reconnects_before), + "last_socket_error": self._socket_last_error, + } + ) + finally: + cleanup_error = self._safe_all_off( + status_callback=status_callback, + context="stream_frames_max_rate cleanup", + ) + + if cleanup_error is not None: + raise RuntimeError(f"stream_frames_max_rate cleanup failed: {cleanup_error}") + + return stats + def bench_suite( - self, - label: str | None = None, - *, - include_connect: bool = False, - connect_iters: int = 200, - cmd_iters: int = 2000, - cmd_connect_mode: str = "persistent", - spf_rate: float = 200.0, - spf_seconds: float = 5.0, - spf_pattern_id: int = 10, - spf_frame_min: int = 0, - spf_frame_max: int = 1000, - spf_pacing: str = "target", - stream_path: str | None = None, - stream_rate: float = 200.0, - stream_seconds: float = 5.0, - stream_coalesced: bool = True, - progress_interval_s: float = 1.0, - bench_io_timeout_s: float | None = BENCH_IO_TIMEOUT_S, - status_callback: StatusCallback | None = None, + self, + label: str | None = None, + *, + include_connect: bool = False, + connect_iters: int = 200, + cmd_iters: int = 2000, + cmd_connect_mode: str = "persistent", + spf_rate: float = 200.0, + spf_seconds: float = 5.0, + spf_pattern_id: int = 10, + spf_frame_min: int = 0, + spf_frame_max: int = 1000, + spf_pacing: str = "target", + stream_path: str | None = None, + stream_rate: float = 200.0, + stream_seconds: float = 5.0, + stream_coalesced: bool = True, + stream_max_rate: bool = False, + stream_max_rate_seconds: float = 5.0, + stream_max_rate_coalesced: bool = True, + progress_interval_s: float = 1.0, + bench_io_timeout_s: float | None = BENCH_IO_TIMEOUT_S, + status_callback: StatusCallback | None = None, ) -> dict: """Run a repeatable benchmark suite and return structured results. @@ -1633,6 +1926,21 @@ def run_phase(name: str, fn, /, **kwargs) -> bool: ): return self._bench_finalize_suite_results(results) + if ( + stream_path + and stream_max_rate + and not run_phase( + "stream_frames_max_rate", + self.bench_stream_frames_max_rate, + pattern_path=str(stream_path), + seconds=float(stream_max_rate_seconds), + stream_cmd_coalesced=bool(stream_max_rate_coalesced), + progress_interval_s=float(progress_interval_s), + status_callback=status_callback, + ) + ): + return self._bench_finalize_suite_results(results) + return self._bench_finalize_suite_results(results) def _bench_finalize_suite_results(self, results: dict) -> dict: diff --git a/src/arena_interface/bench.py b/src/arena_interface/bench.py index f411644..3b543b9 100644 --- a/src/arena_interface/bench.py +++ b/src/arena_interface/bench.py @@ -7,6 +7,7 @@ - ``ai.bench_command_rtt(...)`` - ``ai.bench_spf_updates(...)`` - ``ai.bench_stream_frames(...)`` +- ``ai.bench_stream_frames_max_rate(...)`` - ``ai.bench_suite(...)`` This module keeps thin wrapper functions for backwards compatibility and for @@ -25,11 +26,11 @@ def bench_connect_time(arena_interface: ArenaInterface, iters: int = 200) -> dic def bench_command_rtt( - arena_interface: ArenaInterface, - iters: int = 2000, - wrap_mode: bool = True, - connect_mode: str = "persistent", - warmup: int = 20, + arena_interface: ArenaInterface, + iters: int = 2000, + wrap_mode: bool = True, + connect_mode: str = "persistent", + warmup: int = 20, ) -> dict[str, Any]: return arena_interface.bench_command_rtt( iters=int(iters), @@ -40,14 +41,14 @@ def bench_command_rtt( def bench_spf_updates( - arena_interface: ArenaInterface, - rate_hz: float = 200.0, - seconds: float = 5.0, - pattern_id: int = 10, - frame_min: int = 0, - frame_max: int = 1000, - pacing: str = "target", - warmup: int = 0, + arena_interface: ArenaInterface, + rate_hz: float = 200.0, + seconds: float = 5.0, + pattern_id: int = 10, + frame_min: int = 0, + frame_max: int = 1000, + pacing: str = "target", + warmup: int = 0, ) -> dict[str, Any]: return arena_interface.bench_spf_updates( rate_hz=float(rate_hz), @@ -61,16 +62,16 @@ def bench_spf_updates( def bench_stream_frames( - arena_interface: ArenaInterface, - pattern_path: str, - frame_rate: float = 200.0, - seconds: float = 5.0, - stream_cmd_coalesced: bool = True, - progress_interval_s: float = 1.0, - analog_out_waveform: str = "constant", - analog_update_rate: float = 1.0, - analog_frequency: float = 0.0, - collect_timings: bool = True, + arena_interface: ArenaInterface, + pattern_path: str, + frame_rate: float = 200.0, + seconds: float = 5.0, + stream_cmd_coalesced: bool = True, + progress_interval_s: float = 1.0, + analog_out_waveform: str = "constant", + analog_update_rate: float = 1.0, + analog_frequency: float = 0.0, + collect_timings: bool = True, ) -> dict[str, Any]: return arena_interface.bench_stream_frames( pattern_path=str(pattern_path), @@ -85,25 +86,46 @@ def bench_stream_frames( ) +def bench_stream_frames_max_rate( + arena_interface: ArenaInterface, + pattern_path: str, + seconds: float = 5.0, + stream_cmd_coalesced: bool = True, + progress_interval_s: float = 1.0, + collect_timings: bool = True, +) -> dict[str, Any]: + """Thin wrapper around :meth:`ArenaInterface.bench_stream_frames_max_rate`.""" + return arena_interface.bench_stream_frames_max_rate( + pattern_path=str(pattern_path), + seconds=float(seconds), + stream_cmd_coalesced=bool(stream_cmd_coalesced), + progress_interval_s=float(progress_interval_s), + collect_timings=bool(collect_timings), + ) + + def bench_suite( - arena_interface: ArenaInterface, - label: str | None = None, - *, - include_connect: bool = False, - connect_iters: int = 200, - cmd_iters: int = 2000, - cmd_connect_mode: str = "persistent", - spf_rate: float = 200.0, - spf_seconds: float = 5.0, - spf_pattern_id: int = 10, - spf_frame_min: int = 0, - spf_frame_max: int = 1000, - spf_pacing: str = "target", - stream_path: str | None = None, - stream_rate: float = 200.0, - stream_seconds: float = 5.0, - stream_coalesced: bool = True, - progress_interval_s: float = 1.0, + arena_interface: ArenaInterface, + label: str | None = None, + *, + include_connect: bool = False, + connect_iters: int = 200, + cmd_iters: int = 2000, + cmd_connect_mode: str = "persistent", + spf_rate: float = 200.0, + spf_seconds: float = 5.0, + spf_pattern_id: int = 10, + spf_frame_min: int = 0, + spf_frame_max: int = 1000, + spf_pacing: str = "target", + stream_path: str | None = None, + stream_rate: float = 200.0, + stream_seconds: float = 5.0, + stream_coalesced: bool = True, + stream_max_rate: bool = False, + stream_max_rate_seconds: float = 5.0, + stream_max_rate_coalesced: bool = True, + progress_interval_s: float = 1.0, ) -> dict[str, Any]: return arena_interface.bench_suite( label=label, @@ -121,6 +143,9 @@ def bench_suite( stream_rate=float(stream_rate), stream_seconds=float(stream_seconds), stream_coalesced=bool(stream_coalesced), + stream_max_rate=bool(stream_max_rate), + stream_max_rate_seconds=float(stream_max_rate_seconds), + stream_max_rate_coalesced=bool(stream_max_rate_coalesced), progress_interval_s=float(progress_interval_s), ) diff --git a/src/arena_interface/cli.py b/src/arena_interface/cli.py index 3578e5d..38b64fc 100755 --- a/src/arena_interface/cli.py +++ b/src/arena_interface/cli.py @@ -6,8 +6,7 @@ import click -from .arena_interface import ArenaInterface, BENCH_IO_TIMEOUT_S, SERIAL_BAUDRATE - +from .arena_interface import BENCH_IO_TIMEOUT_S, SERIAL_BAUDRATE, ArenaInterface pass_arena_interface = click.make_pass_decorator(ArenaInterface) @@ -82,7 +81,37 @@ def _print_suite_summary( if isinstance(st.get("cmd_rtt_ms"), dict): cmd = st.get("cmd_rtt_ms") or {} send = st.get("send_ms") if isinstance(st.get("send_ms"), dict) else {} - wait = st.get("response_wait_ms") if isinstance(st.get("response_wait_ms"), dict) else {} + wait = ( + st.get("response_wait_ms") if isinstance(st.get("response_wait_ms"), dict) else {} + ) + extra = " rtt_p99={p99:.3f} ms (send_p99={sp99:.3f} ms wait_p99={wp99:.3f} ms)".format( + p99=float(cmd.get("p99_ms", float("nan"))), + sp99=float(send.get("p99_ms", float("nan"))), + wp99=float(wait.get("p99_ms", float("nan"))), + ) + + click.echo( + "frames={frames} elapsed_s={elapsed_s:.3f} rate={rate_hz:.1f} Hz tx={tx_mbps:.2f} Mb/s reconnects={reconnects}{extra}".format( + frames=st.get("frames"), + elapsed_s=st.get("elapsed_s"), + rate_hz=st.get("rate_hz"), + tx_mbps=st.get("tx_mbps"), + reconnects=st.get("reconnects"), + extra=extra, + ) + ) + + if stream_requested and ("stream_frames_max_rate" in suite): + click.echo("\n-- stream_frames_max_rate (no pacing) --") + st = suite["stream_frames_max_rate"] + + extra = "" + if isinstance(st.get("cmd_rtt_ms"), dict): + cmd = st.get("cmd_rtt_ms") or {} + send = st.get("send_ms") if isinstance(st.get("send_ms"), dict) else {} + wait = ( + st.get("response_wait_ms") if isinstance(st.get("response_wait_ms"), dict) else {} + ) extra = " rtt_p99={p99:.3f} ms (send_p99={sp99:.3f} ms wait_p99={wp99:.3f} ms)".format( p99=float(cmd.get("p99_ms", float("nan"))), sp99=float(send.get("p99_ms", float("nan"))), @@ -257,8 +286,12 @@ def get_perf_stats(arena_interface: ArenaInterface): show_default=True, help="Include a TCP connect() timing test (Ethernet only).", ) -@click.option("--connect-iters", default=200, show_default=True, help="Iterations for connect() timing test") -@click.option("--cmd-iters", default=2000, show_default=True, help="Iterations for command RTT test") +@click.option( + "--connect-iters", default=200, show_default=True, help="Iterations for connect() timing test" +) +@click.option( + "--cmd-iters", default=2000, show_default=True, help="Iterations for command RTT test" +) @click.option( "--cmd-connect-mode", type=click.Choice(["persistent", "new_connection"], case_sensitive=False), @@ -266,8 +299,12 @@ def get_perf_stats(arena_interface: ArenaInterface): show_default=True, help="Use a persistent socket or open/close a new TCP connection per command.", ) -@click.option("--spf-rate", default=200.0, show_default=True, help="Target Hz for update_pattern_frame loop") -@click.option("--spf-seconds", default=5.0, show_default=True, help="Seconds to run update_pattern_frame loop") +@click.option( + "--spf-rate", default=200.0, show_default=True, help="Target Hz for update_pattern_frame loop" +) +@click.option( + "--spf-seconds", default=5.0, show_default=True, help="Seconds to run update_pattern_frame loop" +) @click.option("--spf-pattern-id", default=10, show_default=True) @click.option("--spf-frame-min", default=0, show_default=True) @click.option("--spf-frame-max", default=1000, show_default=True) @@ -284,10 +321,31 @@ def get_perf_stats(arena_interface: ArenaInterface): default=None, help="Optional .pattern or .pat file to stream", ) -@click.option("--stream-rate", default=200.0, show_default=True, help="Target FPS for stream_frames") -@click.option("--stream-seconds", default=5.0, show_default=True, help="Seconds to run stream_frames") +@click.option( + "--stream-rate", default=200.0, show_default=True, help="Target FPS for stream_frames" +) +@click.option( + "--stream-seconds", default=5.0, show_default=True, help="Seconds to run stream_frames" +) @click.option("--stream-coalesced/--stream-chunked", default=True, show_default=True) -@click.option("--progress-interval", default=1.0, show_default=True, help="Progress print interval (seconds)") +@click.option( + "--stream-max-rate/--no-stream-max-rate", + default=False, + show_default=True, + help="Include a max-throughput (no pacing) streaming test. Requires --stream-path.", +) +@click.option( + "--stream-max-rate-seconds", + default=5.0, + show_default=True, + help="Seconds to run max-rate stream", +) +@click.option( + "--stream-max-rate-coalesced/--stream-max-rate-chunked", default=True, show_default=True +) +@click.option( + "--progress-interval", default=1.0, show_default=True, help="Progress print interval (seconds)" +) @click.option( "--io-timeout", default=BENCH_IO_TIMEOUT_S, @@ -313,6 +371,9 @@ def bench( stream_rate: float, stream_seconds: float, stream_coalesced: bool, + stream_max_rate: bool, + stream_max_rate_seconds: float, + stream_max_rate_coalesced: bool, progress_interval: float, io_timeout: float, ): @@ -339,6 +400,9 @@ def bench( stream_rate=float(stream_rate), stream_seconds=float(stream_seconds), stream_coalesced=bool(stream_coalesced), + stream_max_rate=bool(stream_max_rate), + stream_max_rate_seconds=float(stream_max_rate_seconds), + stream_max_rate_coalesced=bool(stream_max_rate_coalesced), progress_interval_s=float(progress_interval), bench_io_timeout_s=float(io_timeout), status_callback=click.echo, From cae307c8968402335982f581d9bab97b3c7385d0 Mon Sep 17 00:00:00 2001 From: Frank Loesche Date: Sat, 14 Mar 2026 12:28:59 -0400 Subject: [PATCH 02/10] example mode 2 and mode 3 --- examples/mode2.py | 27 +++++++++++++++++++++++++++ examples/mode3.py | 44 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 71 insertions(+) create mode 100644 examples/mode2.py create mode 100644 examples/mode3.py diff --git a/examples/mode2.py b/examples/mode2.py new file mode 100644 index 0000000..301eaf8 --- /dev/null +++ b/examples/mode2.py @@ -0,0 +1,27 @@ +"""Play 5 patterns for 5 seconds each using play_pattern. + +Pattern 1 runs at 300 fps; the rest at random rates between 10 and 300 fps. +The 4 additional patterns are chosen randomly from IDs 2–700. +""" + +import os +import random + +from arena_interface import ArenaInterface + +PATTERN_IDS = [1] + random.sample(range(2, 701), 4) +RUNTIME_DURATION = 50 # 50 × 100 ms = 5 s + +ip = os.environ.get("ARENA_ETH_IP", "10.103.40.45") +ai = ArenaInterface(debug=True) +ai.set_ethernet_mode(ip_address=ip) + +for pat_id in PATTERN_IDS: + fps = 300 if pat_id == PATTERN_IDS[0] else random.randint(10, 300) + print(f"\n--- Playing pattern {pat_id} at {fps} fps for 5 s ---") + ai.play_pattern( + pattern_id=pat_id, + frame_rate=fps, + runtime_duration=RUNTIME_DURATION, + ) + print(f" Pattern {pat_id} done.") diff --git a/examples/mode3.py b/examples/mode3.py new file mode 100644 index 0000000..4c7a94d --- /dev/null +++ b/examples/mode3.py @@ -0,0 +1,44 @@ +"""Show 5 patterns for 5 seconds each using show_pattern_frame. + +For each pattern, send as many show_pattern_frame commands as possible +at a target rate of 300 Hz with random frame indices between 1 and 15. +""" + +import os +import random +import time + +from arena_interface import ArenaInterface + +PATTERN_IDS = [1] + random.sample(range(2, 701), 4) +DURATION_S = 5.0 +TARGET_RATE_HZ = 300 +FRAME_INDEX_MIN = 1 +FRAME_INDEX_MAX = 15 + +ip = os.environ.get("ARENA_ETH_IP", "10.103.40.45") +ai = ArenaInterface(debug=True) +ai.set_ethernet_mode(ip_address=ip) + +for pat_id in PATTERN_IDS: + print(f"\n--- Showing pattern {pat_id} for {DURATION_S} s at {TARGET_RATE_HZ} Hz ---") + interval = 1.0 / TARGET_RATE_HZ + count = 0 + t_start = time.perf_counter() + deadline = t_start + DURATION_S + + while True: + t_now = time.perf_counter() + if t_now >= deadline: + break + frame_idx = random.randint(FRAME_INDEX_MIN, FRAME_INDEX_MAX) + ai.show_pattern_frame(pattern_id=pat_id, frame_index=frame_idx) + count += 1 + # spin-wait until next slot + next_time = t_start + count * interval + while time.perf_counter() < next_time: + pass + + elapsed = time.perf_counter() - t_start + actual_hz = count / elapsed if elapsed > 0 else 0 + print(f" Pattern {pat_id}: {count} frames in {elapsed:.2f} s ({actual_hz:.1f} Hz)") From 98d767b6a148dfea8db9bd6fdf23585071df6eb3 Mon Sep 17 00:00:00 2001 From: Frank Loesche Date: Thu, 19 Mar 2026 10:36:42 -0400 Subject: [PATCH 03/10] reduce expected SD patterns to 10 --- examples/mode2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/mode2.py b/examples/mode2.py index 301eaf8..c2f98a9 100644 --- a/examples/mode2.py +++ b/examples/mode2.py @@ -9,7 +9,7 @@ from arena_interface import ArenaInterface -PATTERN_IDS = [1] + random.sample(range(2, 701), 4) +PATTERN_IDS = [1] + random.sample(range(2, 10), 4) RUNTIME_DURATION = 50 # 50 × 100 ms = 5 s ip = os.environ.get("ARENA_ETH_IP", "10.103.40.45") From fe921d16343a65f2153fb70cc914e139470508fa Mon Sep 17 00:00:00 2001 From: Frank Loesche Date: Thu, 19 Mar 2026 10:37:07 -0400 Subject: [PATCH 04/10] reduce expected SD patterns to 10 --- examples/mode3.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/mode3.py b/examples/mode3.py index 4c7a94d..6ada4fd 100644 --- a/examples/mode3.py +++ b/examples/mode3.py @@ -10,7 +10,7 @@ from arena_interface import ArenaInterface -PATTERN_IDS = [1] + random.sample(range(2, 701), 4) +PATTERN_IDS = [1] + random.sample(range(2, 10), 4) DURATION_S = 5.0 TARGET_RATE_HZ = 300 FRAME_INDEX_MIN = 1 From 0a9e2d2d5f75725307f8fe46897224451db9a878 Mon Sep 17 00:00:00 2001 From: Peter Polidoro Date: Fri, 13 Mar 2026 11:05:23 -0400 Subject: [PATCH 05/10] Update version in publish.yml --- .github/workflows/publish.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 242f31d..7d82700 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -3,7 +3,7 @@ name: Publish on: push: tags: - - "v*" + - "*" workflow_dispatch: jobs: From 169b4fd27d9f0cb138cc970c1491860c6ed9a757 Mon Sep 17 00:00:00 2001 From: Peter Polidoro Date: Fri, 13 Mar 2026 13:06:41 -0400 Subject: [PATCH 06/10] docs/workflow/release polish --- .github/workflows/ci.yml | 3 ++ .github/workflows/publish.yml | 8 ++- .gitignore | 2 + CHANGELOG.md | 4 ++ README.md | 91 ++++++++++++++++++++++++++++++++++- RELEASING.md | 24 +++++++-- 6 files changed, 125 insertions(+), 7 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 44fcac9..563b6a8 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -6,6 +6,9 @@ on: branches: - main +permissions: + contents: read + jobs: test: name: Tests (${{ matrix.python-version }}) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 7d82700..ce817e0 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -3,9 +3,13 @@ name: Publish on: push: tags: - - "*" + - "*.*.*" + - "v*.*.*" workflow_dispatch: +permissions: + contents: read + jobs: build: name: Build distribution @@ -40,7 +44,7 @@ jobs: publish-to-pypi: name: Publish Python distribution to PyPI - if: startsWith(github.ref, 'refs/tags/v') + if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') needs: - build runs-on: ubuntu-latest diff --git a/.gitignore b/.gitignore index 935fb43..1b6deeb 100644 --- a/.gitignore +++ b/.gitignore @@ -219,4 +219,6 @@ flycheck_*.el # Bench outputs bench_results*.jsonl bench_matrix*.jsonl +bench_artifacts/ +qspy*.log .DS_Store diff --git a/CHANGELOG.md b/CHANGELOG.md index bb1e809..196212b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,3 +12,7 @@ while still requiring it for serial transport usage - added CI and PyPI Trusted Publishing GitHub Actions workflows - documented a reproducible release process for PyPI and conda-forge +- expanded the README with Pixi benchmark usage, JSONL capture, and QSPY log + collection guidance for performance characterization +- relaxed the publish workflow so release tags can be either `7.0.0` or + `v7.0.0` diff --git a/README.md b/README.md index fccdb34..891d1b2 100644 --- a/README.md +++ b/README.md @@ -159,8 +159,21 @@ pixi install pixi run help pixi run check pixi run release-check +pixi run qtools-install +pixi run qspy -c /dev/ttyACM0 -b 115200 +pixi run bench-smoke +pixi run bench-full --json-out bench_results.jsonl ``` +Pixi forwards extra arguments after the task name to the underlying command, so +`pixi run bench-full --json-out bench_results.jsonl --label "lab-a"` works +as expected and appends one JSON result object for that run. + +For the stock transport-agnostic tasks (`all-on`, `all-off`, `bench`, +`bench-smoke`, and `bench-full`), set `ARENA_ETH_IP` or +`ARENA_SERIAL_PORT` in your shell before running the task. This is the +simplest way to choose the transport without rewriting the task command. + ### Plain pip ```sh @@ -170,6 +183,81 @@ python -m build python -m twine check dist/* ``` +## Performance characterization workflow + +`bench_results.jsonl` stores the host-side benchmark results, while QSPY +captures the raw firmware QS stream so you can compare those host-side +measurements with device-side `PERF_*` records such as `PERF_UPD kind=SPF` and +`PERF_NET`. + +A convenient workflow is to keep both artifacts under a single timestamped +directory. + +### Terminal A: start QSPY and capture the raw QS log + +```sh +mkdir -p bench_artifacts/2026-03-13-eth +pixi run qtools-install +pixi run qspy -c /dev/ttyACM0 -b 115200 2>&1 | tee bench_artifacts/2026-03-13-eth/qspy.log +``` + +PowerShell: + +```powershell +New-Item -ItemType Directory -Force bench_artifacts\2026-03-13-eth | Out-Null +pixi run qtools-install +pixi run qspy -c COM3 -b 115200 2>&1 | Tee-Object -FilePath bench_artifacts\2026-03-13-eth\qspy.log +``` + +Leave QSPY running while the benchmark executes in a second terminal. Stop it +after the benchmark completes so the log contains the full run. + +### Terminal B: run the host benchmark and append JSONL results + +```sh +export ARENA_ETH_IP=192.168.10.104 +pixi run bench-full \ + --json-out bench_artifacts/2026-03-13-eth/bench_results.jsonl \ + --label "fw=7.0.0 host=$(hostname) transport=ethernet" +``` + +PowerShell: + +```powershell +$env:ARENA_ETH_IP = "192.168.10.104" +pixi run bench-full --json-out bench_artifacts\2026-03-13-eth\bench_results.jsonl --label "fw=7.0.0 host=$env:COMPUTERNAME transport=ethernet" +``` + +`bench-full` runs the suite plus a streaming phase using +`patterns/pat0004.pat`. Use `pixi run bench-smoke` first when you want a short +sanity check before a longer capture. For Ethernet socket-option comparisons, +`pixi run bench-socket-matrix --ethernet 192.168.10.104 --json-out bench_matrix.jsonl` +runs the suite across the predefined TCP tuning variants. + +### Preserve the artifact bundle + +After the run, keep at least: + +- `bench_results.jsonl` for host-side timings and metadata +- `qspy.log` for the raw QS stream +- a filtered `qspy_perf.log` for quick comparison (optional) + +On POSIX you can extract just the performance lines with: + +```sh +grep 'PERF_' bench_artifacts/2026-03-13-eth/qspy.log > bench_artifacts/2026-03-13-eth/qspy_perf.log +``` + +PowerShell: + +```powershell +Select-String -Path bench_artifacts\2026-03-13-eth\qspy.log -Pattern 'PERF_' | ForEach-Object { $_.Line } | Set-Content bench_artifacts\2026-03-13-eth\qspy_perf.log +``` + +For reproducible comparisons, keep the artifact directory together with the +firmware commit or tag, host computer, transport, switch/LAN notes, and the +benchmark label you used. + ## Releasing The repository includes GitHub Actions workflows for CI and PyPI Trusted @@ -179,7 +267,8 @@ Recommended release flow: 1. Update `CHANGELOG.md`. 2. Run `pixi run release-check` or the equivalent pip commands above. -3. Commit the release changes and create a `vX.Y.Z` tag. +3. Commit the release changes and create a release tag such as `7.0.0` or + `v7.0.0`. 4. Push the tag to GitHub. 5. The `publish.yml` workflow builds the wheel and sdist, then publishes them to PyPI using Trusted Publishing. diff --git a/RELEASING.md b/RELEASING.md index c121458..3bd7826 100644 --- a/RELEASING.md +++ b/RELEASING.md @@ -26,15 +26,31 @@ If you use Pixi and `pyproject.toml` changed, regenerate `pixi.lock` with The repository includes `.github/workflows/publish.yml`, which is intended for GitHub Actions Trusted Publishing. +One-time setup on GitHub: + +1. In the repository settings, create a GitHub Actions environment named + `pypi`. +2. Optionally add protection rules or required reviewers if you want a manual + approval gate before publishing. + One-time setup on PyPI: 1. Create the `arena-interface` project on PyPI if it does not already exist. 2. In the PyPI project settings, add a Trusted Publisher for this GitHub - repository and workflow. -3. Push a tag such as `v7.0.0`. + repository. +3. Use workflow filename `publish.yml` and environment name `pypi`. + +Release trigger: + +1. Push a release tag such as `7.0.0` or `v7.0.0`. +2. GitHub Actions will build `dist/*` and publish to PyPI without storing a + long-lived API token in GitHub secrets. +3. `workflow_dispatch` is kept as a manual build/debug entry point; the actual + PyPI publish step only runs for tag pushes. -After the tag is pushed, GitHub Actions will build `dist/*` and publish to -PyPI without storing a long-lived API token in GitHub secrets. +The normal release path is to let GitHub Actions publish via Trusted +Publishing. Local `twine upload` is only needed if you intentionally want to +bypass that workflow. ## Conda-forge From 9de590075e204f29cb1c52c6f02b62f43b3be318 Mon Sep 17 00:00:00 2001 From: Peter Polidoro Date: Fri, 13 Mar 2026 16:15:49 -0400 Subject: [PATCH 07/10] Final updates for 7.0.0 --- .gitignore | 1 + CHANGELOG.md | 3 + README.md | 99 +++++- pyproject.toml | 4 + scripts/bench_matrix.py | 9 +- scripts/perf_summary.py | 18 ++ src/arena_interface/arena_interface.py | 230 +++++++++++--- src/arena_interface/cli.py | 27 +- src/arena_interface/perf_summary.py | 397 ++++++++++++++++++++++++ src/arena_interface/perf_summary_cli.py | 70 +++++ tests/test_bench_cleanup.py | 158 ++++++++++ 11 files changed, 958 insertions(+), 58 deletions(-) create mode 100644 scripts/perf_summary.py create mode 100644 src/arena_interface/perf_summary.py create mode 100644 src/arena_interface/perf_summary_cli.py create mode 100644 tests/test_bench_cleanup.py diff --git a/.gitignore b/.gitignore index 1b6deeb..bb7c6f6 100644 --- a/.gitignore +++ b/.gitignore @@ -222,3 +222,4 @@ bench_matrix*.jsonl bench_artifacts/ qspy*.log .DS_Store +perf_summary*.json diff --git a/CHANGELOG.md b/CHANGELOG.md index 196212b..242342e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,3 +16,6 @@ collection guidance for performance characterization - relaxed the publish workflow so release tags can be either `7.0.0` or `v7.0.0` +- added a performance summary tool for benchmark JSONL files and QSPY PERF logs +- added Windows-like benchmark task aliases and matrix entries for socket-tuning comparisons +- downgraded post-run ALL_OFF cleanup failures to recorded benchmark warnings so completed measurements are preserved diff --git a/README.md b/README.md index 891d1b2..8259af9 100644 --- a/README.md +++ b/README.md @@ -139,7 +139,7 @@ its packaging and tooling source of truth. - `src/arena_interface/`: importable package, version metadata, and CLI - `tests/`: lightweight tests that do not require hardware -- `scripts/`: developer helper scripts, including benchmark matrix helpers +- `scripts/`: developer helper scripts, including benchmark matrix and performance summary helpers - `tools/`: repository-local helper tools such as the QSPY/QTools wrapper - `patterns/`: example pattern files for streaming tests - `.github/workflows/`: CI and PyPI publishing automation @@ -165,15 +165,40 @@ pixi run bench-smoke pixi run bench-full --json-out bench_results.jsonl ``` +Useful benchmark-oriented Pixi tasks: + +```sh +pixi run bench +pixi run bench-full +pixi run bench-smoke +pixi run bench-windows-like +pixi run bench-full-windows-like +pixi run bench-socket-matrix +pixi run perf-summary --jsonl bench_results.jsonl +``` + Pixi forwards extra arguments after the task name to the underlying command, so `pixi run bench-full --json-out bench_results.jsonl --label "lab-a"` works -as expected and appends one JSON result object for that run. +as expected and appends one JSON result object for that run. For this repository +layout, use `pixi run bench-full --ethernet 192.168.10.194 ...` rather than an +extra separator before `--ethernet`. For the stock transport-agnostic tasks (`all-on`, `all-off`, `bench`, `bench-smoke`, and `bench-full`), set `ARENA_ETH_IP` or `ARENA_SERIAL_PORT` in your shell before running the task. This is the simplest way to choose the transport without rewriting the task command. +Task notes: + +- `bench-full` runs the full suite plus a streaming phase using `patterns/pat0004.pat`. +- `bench-windows-like` disables `TCP_QUICKACK` while leaving `TCP_NODELAY` on. + This is a useful approximation when comparing a Linux host with a Windows-like + Ethernet socket policy. +- `bench-full-windows-like` is the same comparison but also includes the stream phase. +- `bench-no-latency-tuning` disables both `TCP_NODELAY` and `TCP_QUICKACK`. +- `bench-socket-matrix` runs several socket-policy variants back-to-back and is + the fastest way to quantify host-side latency tuning effects. + ### Plain pip ```sh @@ -190,8 +215,15 @@ captures the raw firmware QS stream so you can compare those host-side measurements with device-side `PERF_*` records such as `PERF_UPD kind=SPF` and `PERF_NET`. +A few key metrics are usually enough to compare runs: + +- command RTT mean and p99 (`command_rtt`) +- SPF achieved rate and p99 update RTT (`spf_updates`) +- stream frame rate and transmit throughput (`stream_frames`) +- reconnect count plus cleanup status + A convenient workflow is to keep both artifacts under a single timestamped -directory. +directory and then generate a compact summary from them. ### Terminal A: start QSPY and capture the raw QS log @@ -214,27 +246,38 @@ after the benchmark completes so the log contains the full run. ### Terminal B: run the host benchmark and append JSONL results +Linux default socket policy: + ```sh -export ARENA_ETH_IP=192.168.10.104 -pixi run bench-full \ - --json-out bench_artifacts/2026-03-13-eth/bench_results.jsonl \ - --label "fw=7.0.0 host=$(hostname) transport=ethernet" +pixi run bench-full --ethernet 192.168.10.194 \ + --label linux-default \ + --json-out bench_artifacts/2026-03-13-eth/bench_results.jsonl +``` + +Windows-like comparison with `TCP_QUICKACK` disabled: + +```sh +pixi run bench-full-windows-like --ethernet 192.168.10.194 \ + --label windows-like \ + --json-out bench_artifacts/2026-03-13-eth/bench_results.jsonl ``` PowerShell: ```powershell -$env:ARENA_ETH_IP = "192.168.10.104" -pixi run bench-full --json-out bench_artifacts\2026-03-13-eth\bench_results.jsonl --label "fw=7.0.0 host=$env:COMPUTERNAME transport=ethernet" +pixi run bench-full --ethernet 192.168.10.194 --label "windows-host" --json-out bench_artifacts\2026-03-13-eth\bench_results.jsonl ``` -`bench-full` runs the suite plus a streaming phase using -`patterns/pat0004.pat`. Use `pixi run bench-smoke` first when you want a short -sanity check before a longer capture. For Ethernet socket-option comparisons, -`pixi run bench-socket-matrix --ethernet 192.168.10.104 --json-out bench_matrix.jsonl` -runs the suite across the predefined TCP tuning variants. +For a one-command socket comparison matrix, use: -### Preserve the artifact bundle +```sh +pixi run bench-socket-matrix --ethernet 192.168.10.194 \ + --stream-path patterns/pat0004.pat \ + --label host-matrix \ + --json-out bench_artifacts/2026-03-13-eth/bench_results.jsonl +``` + +### Generate a compact performance summary After the run, keep at least: @@ -254,6 +297,32 @@ PowerShell: Select-String -Path bench_artifacts\2026-03-13-eth\qspy.log -Pattern 'PERF_' | ForEach-Object { $_.Line } | Set-Content bench_artifacts\2026-03-13-eth\qspy_perf.log ``` +Generate a compact text summary and optionally save a machine-readable JSON +summary: + +```sh +pixi run perf-summary --jsonl bench_artifacts/2026-03-13-eth/bench_results.jsonl \ + --qspy-log bench_artifacts/2026-03-13-eth/qspy.log \ + --baseline linux-default \ + --json-out bench_artifacts/2026-03-13-eth/perf_summary.json +``` + +The summary tool groups host-side runs by label and reports the latest QSPY +`PERF_*` records so you can answer questions like: how much slower is the +Windows-like socket policy than the Linux default, did SPF hold 200 Hz, and did +streaming throughput change. + +### Interpreting cleanup warnings + +Post-run `ALL_OFF` cleanup is intentionally treated as a warning when the +measurement phase has already completed. If the host reports +`status=ok_cleanup_failed`, the measured host-side statistics are still valid +and are preserved in the JSONL output. In that case: + +- keep the QSPY log running and save it +- review the saved cleanup diagnostics (`ip route get ...` and `ip neigh show ...` on Linux) +- compare whether QSPY shows a fresh boot/link sequence or just a host/network path hiccup + For reproducible comparisons, keep the artifact directory together with the firmware commit or tag, host computer, transport, switch/LAN notes, and the benchmark label you used. diff --git a/pyproject.toml b/pyproject.toml index 75b81a4..1c0bd23 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -46,6 +46,7 @@ Firmware = "https://github.com/janelia-arduino/ArenaController" [project.scripts] arena-interface = "arena_interface.cli:cli" +arena-interface-perf-summary = "arena_interface.perf_summary_cli:main" [tool.setuptools] package-dir = { "" = "src" } @@ -116,9 +117,12 @@ bench-smoke = "arena-interface bench --cmd-iters 250 --spf-seconds 2 --stream-pa bench-persistent = "arena-interface bench --cmd-connect-mode persistent" bench-new-connection = "arena-interface bench --cmd-connect-mode new_connection" bench-no-quickack = "arena-interface --no-tcp-quickack bench" +bench-windows-like = "arena-interface --no-tcp-quickack bench" +bench-full-windows-like = "arena-interface --no-tcp-quickack bench --stream-path patterns/pat0004.pat" bench-no-nodelay = "arena-interface --no-tcp-nodelay bench" bench-no-latency-tuning = "arena-interface --no-tcp-nodelay --no-tcp-quickack bench" bench-socket-matrix = "python scripts/bench_matrix.py" +perf-summary = "arena-interface-perf-summary" [tool.pixi.target.linux-64.dependencies] make = "*" diff --git a/scripts/bench_matrix.py b/scripts/bench_matrix.py index a161021..f19e042 100644 --- a/scripts/bench_matrix.py +++ b/scripts/bench_matrix.py @@ -17,6 +17,7 @@ VARIANTS: dict[str, dict[str, bool]] = { "default": {"tcp_nodelay": True, "tcp_quickack": True}, + "windows_like": {"tcp_nodelay": True, "tcp_quickack": False}, "no_quickack": {"tcp_nodelay": True, "tcp_quickack": False}, "no_nodelay": {"tcp_nodelay": False, "tcp_quickack": True}, "no_latency_tuning": {"tcp_nodelay": False, "tcp_quickack": False}, @@ -37,7 +38,7 @@ def build_parser() -> argparse.ArgumentParser: "--variants", nargs="+", choices=sorted(VARIANTS), - default=["default", "no_quickack", "no_nodelay", "no_latency_tuning"], + default=["default", "windows_like", "no_nodelay", "no_latency_tuning"], help="Socket-option variants to execute", ) parser.add_argument("--include-connect", action="store_true", help="Include TCP connect timing in each run") @@ -77,7 +78,7 @@ def print_summary(variant_name: str, suite: dict) -> None: quickack = meta.get("tcp_quickack_supported") and meta.get("tcp_quickack_requested") status = suite.get("status", "unknown") - if status != "ok": + if status == "error": error = suite.get("error") or {} print( f"{variant_name:>18} | FAILED {error.get('phase')} {error.get('type')}: {error.get('message')}" @@ -89,7 +90,7 @@ def print_summary(variant_name: str, suite: dict) -> None: stream = suite.get("stream_frames") line = ( - f"{variant_name:>18} | cmd mean={cmd['mean_ms']:.3f} ms p99={cmd['p99_ms']:.3f} | " + f"{variant_name:>18} | status={status} cmd mean={cmd['mean_ms']:.3f} ms p99={cmd['p99_ms']:.3f} | " f"spf={spf['achieved_hz']:.1f} Hz | nodelay={meta.get('tcp_nodelay')} quickack={quickack}" ) if isinstance(stream, dict): @@ -147,7 +148,7 @@ def main() -> int: if args.json_out is not None: ArenaInterface.write_bench_jsonl(str(args.json_out), suite) print_summary(variant_name, suite) - if suite.get("status") != "ok": + if suite.get("status") == "error": exit_code = 1 return exit_code diff --git a/scripts/perf_summary.py b/scripts/perf_summary.py new file mode 100644 index 0000000..23ec015 --- /dev/null +++ b/scripts/perf_summary.py @@ -0,0 +1,18 @@ +"""Generate a compact performance summary from benchmark JSONL and QSPY logs.""" + +from __future__ import annotations + +import sys +from pathlib import Path + +if __package__ in {None, ""}: + repo_root = Path(__file__).resolve().parents[1] + src_root = repo_root / "src" + if str(src_root) not in sys.path: + sys.path.insert(0, str(src_root)) + +from arena_interface.perf_summary_cli import main + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/src/arena_interface/arena_interface.py b/src/arena_interface/arena_interface.py index 8c5de8b..5e92210 100644 --- a/src/arena_interface/arena_interface.py +++ b/src/arena_interface/arena_interface.py @@ -42,6 +42,7 @@ ANALOG_OUTPUT_VALUE_MIN = 100 ANALOG_OUTPUT_VALUE_MAX = 4095 BENCH_IO_TIMEOUT_S = 5.0 +BENCH_CLEANUP_RECONNECT_BACKOFF_S = 0.25 # Chunk size used for optional STREAM_FRAME chunked sends. # Keep this comfortably below typical MTU to avoid excessive fragmentation. @@ -241,25 +242,114 @@ def _format_exception(exc: BaseException) -> str: """Return a compact exception description for logs/results.""" return f"{type(exc).__name__}: {exc}" + @staticmethod + def _run_command_text(args: list[str]) -> str: + """Run a command and return a compact text payload for diagnostics.""" + try: + proc = subprocess.run( + args, + check=False, + capture_output=True, + text=True, + ) + except Exception as exc: + return f"" + + stdout = (proc.stdout or "").strip() + stderr = (proc.stderr or "").strip() + payload = stdout or stderr or "" + if proc.returncode != 0: + return f"[exit {proc.returncode}] {payload}" + return payload + + def _collect_linux_net_diagnostics(self, peer_ip: str | None) -> dict[str, str]: + """Collect small Linux routing and neighbor diagnostics for benchmark cleanup.""" + if not peer_ip or platform.system().lower() != "linux": + return {} + + return { + "peer_ip": str(peer_ip), + "route_get": self._run_command_text(["ip", "route", "get", str(peer_ip)]), + "neighbor": self._run_command_text(["ip", "neigh", "show", str(peer_ip)]), + } + def _safe_all_off( self, *, status_callback: StatusCallback | None = None, context: str = "bench cleanup", - ) -> str | None: - """Best-effort ALL_OFF used by benchmark cleanup paths.""" + ) -> dict[str, object]: + """Best-effort ALL_OFF used by benchmark cleanup paths. + + Cleanup failures after a successful measurement should be preserved as + warnings, not turned into hard benchmark failures that discard the + already-collected timing data. + """ + cleanup: dict[str, object] = { + "all_off_attempted": True, + "all_off_ok": False, + "status": "failed", + "all_off_error": None, + "retry_performed": False, + "diagnostics": {}, + "attempts": [], + } + + def record_attempt(attempt: int, ok: bool, error: str | None) -> None: + attempts = cleanup.setdefault("attempts", []) + assert isinstance(attempts, list) + attempts.append( + { + "attempt": int(attempt), + "ok": bool(ok), + "error": error, + } + ) + try: self.all_off() + record_attempt(1, True, None) + cleanup["all_off_ok"] = True + cleanup["status"] = "ok" self._bench_emit_status(status_callback, f"[bench] {context}: ALL_OFF ok") - return None + return cleanup except Exception as exc: message = self._format_exception(exc) + record_attempt(1, False, message) + cleanup["all_off_error"] = message + cleanup["diagnostics"] = self._collect_linux_net_diagnostics(self._ethernet_ip_address) self._socket_last_error = message self._bench_emit_status( status_callback, f"[bench] {context}: ALL_OFF failed: {message}" ) + + should_retry = bool(self._ethernet_ip_address) + if should_retry: + cleanup["retry_performed"] = True self._close_ethernet_socket() - return message + time.sleep(BENCH_CLEANUP_RECONNECT_BACKOFF_S) + try: + self.all_off() + record_attempt(2, True, None) + cleanup["all_off_ok"] = True + cleanup["status"] = "ok_after_retry" + self._bench_emit_status( + status_callback, + f"[bench] {context}: ALL_OFF recovered after reconnect/backoff", + ) + return cleanup + except Exception as exc: + retry_message = self._format_exception(exc) + record_attempt(2, False, retry_message) + cleanup["all_off_error"] = retry_message + self._socket_last_error = retry_message + self._bench_emit_status( + status_callback, + f"[bench] {context}: ALL_OFF retry failed: {retry_message}", + ) + + self._close_ethernet_socket() + return cleanup def _refresh_quickack(self, ethernet_socket: socket.socket) -> None: """Best-effort refresh of TCP_QUICKACK on Linux. @@ -1213,6 +1303,16 @@ def _bench_progress_maybe( next_progress_ns += step_ns return next_progress_ns + @staticmethod + def _cleanup_result_default() -> dict[str, object]: + """Return a normalized cleanup result placeholder.""" + return { + "all_off_attempted": False, + "all_off_ok": None, + "status": "not_attempted", + "all_off_error": None, + } + def bench_metadata(self, label: str | None = None) -> dict: """Return a small metadata blob to attach to benchmark results.""" try: @@ -1376,7 +1476,7 @@ def bench_command_rtt( self._socket_last_error = None reconnects_before = self.get_socket_reconnects(reset=False) - cleanup_error: str | None = None + cleanup: dict[str, object] | None = None progress_step_ns = ( max(1, int(float(progress_interval_s) * 1e9)) if progress_interval_s > 0 else 0 ) @@ -1457,13 +1557,12 @@ def bench_command_rtt( ) finally: if wrap_mode: - cleanup_error = self._safe_all_off( + cleanup = self._safe_all_off( status_callback=status_callback, context="command_rtt cleanup", ) - - if cleanup_error is not None: - raise RuntimeError(f"command_rtt cleanup failed: {cleanup_error}") + if "summary" in locals(): + summary["cleanup"] = cleanup return summary @@ -1502,7 +1601,7 @@ def bench_spf_updates( self._socket_last_error = None reconnects_before = self.get_socket_reconnects(reset=False) - cleanup_error: str | None = None + cleanup: dict[str, object] | None = None progress_step_ns = ( max(1, int(float(progress_interval_s) * 1e9)) if progress_interval_s > 0 else 0 ) @@ -1612,13 +1711,12 @@ def bench_spf_updates( "last_socket_error": self._socket_last_error, } finally: - cleanup_error = self._safe_all_off( + cleanup = self._safe_all_off( status_callback=status_callback, context="spf_updates cleanup", ) - - if cleanup_error is not None: - raise RuntimeError(f"spf_updates cleanup failed: {cleanup_error}") + if "summary" in locals(): + summary["cleanup"] = cleanup return summary @@ -1648,7 +1746,7 @@ def bench_stream_frames( self._socket_last_error = None reconnects_before = self.get_socket_reconnects(reset=False) - cleanup_error: str | None = None + cleanup: dict[str, object] | None = None try: self.reset_perf_stats() @@ -1678,13 +1776,12 @@ def bench_stream_frames( } ) finally: - cleanup_error = self._safe_all_off( + cleanup = self._safe_all_off( status_callback=status_callback, context="stream_frames cleanup", ) - - if cleanup_error is not None: - raise RuntimeError(f"stream_frames cleanup failed: {cleanup_error}") + if "stats" in locals(): + stats["cleanup"] = cleanup return stats @@ -1819,6 +1916,7 @@ def bench_suite( "status": "ok", "failed_phase": None, "error": None, + "warnings": [], "phases": [], } results["meta"]["bench_io_timeout_s"] = self._coerce_timeout(bench_io_timeout_s) @@ -1851,27 +1949,54 @@ def run_phase(name: str, fn, /, **kwargs) -> bool: status_callback, f"[bench] FAILED {name}: {self._format_exception(exc)}", ) - cleanup_error = self._safe_all_off( + cleanup = self._safe_all_off( status_callback=status_callback, context=f"{name} post-error cleanup", ) - if cleanup_error is not None: - results["cleanup"] = { - "all_off_attempted": True, - "all_off_ok": False, - "all_off_error": cleanup_error, - } - else: - results["cleanup"] = { - "all_off_attempted": True, - "all_off_ok": True, - "all_off_error": None, - } + cleanup["phase"] = name + results["cleanup"] = cleanup return False phase["status"] = "ok" phase["ended_utc"] = self._utc_now_iso() phase["elapsed_s"] = (time.perf_counter_ns() - started_ns) / 1e9 + + phase_result = results.get(name) + phase_cleanup = None + if isinstance(phase_result, dict): + cleanup_value = phase_result.get("cleanup") + if isinstance(cleanup_value, dict): + phase_cleanup = cleanup_value + + if phase_cleanup is not None: + phase["cleanup_status"] = phase_cleanup.get("status") + cleanup_status = str(phase_cleanup.get("status")) + if cleanup_status == "failed": + results["status"] = "ok_cleanup_failed" + results["warnings"].append( + { + "phase": name, + "type": "cleanup_failed", + "message": phase_cleanup.get("all_off_error"), + } + ) + self._bench_emit_status( + status_callback, + f"[bench] WARNING {name}: cleanup failed after measurement: {phase_cleanup.get('all_off_error')}", + ) + elif cleanup_status == "ok_after_retry": + results["warnings"].append( + { + "phase": name, + "type": "cleanup_retried", + "message": "ALL_OFF recovered after reconnect/backoff", + } + ) + self._bench_emit_status( + status_callback, + f"[bench] warning {name}: cleanup recovered after reconnect/backoff", + ) + self._bench_emit_status( status_callback, f"[bench] finished {name} in {phase['elapsed_s']:.3f} s", @@ -1954,12 +2079,45 @@ def _bench_finalize_suite_results(self, results: dict) -> dict: except OSError: pass - if "cleanup" not in results: + if results.get("status") == "error": + if "cleanup" not in results: + results["cleanup"] = self._cleanup_result_default() + return results + + phase_cleanups: dict[str, dict[str, object]] = {} + for phase in results.get("phases", []): + if not isinstance(phase, dict): + continue + name = phase.get("name") + if not isinstance(name, str): + continue + payload = results.get(name) + if not isinstance(payload, dict): + continue + cleanup = payload.get("cleanup") + if isinstance(cleanup, dict) and cleanup.get("all_off_attempted"): + phase_cleanups[name] = cleanup + + if phase_cleanups: + status_rank = {"failed": 3, "ok_after_retry": 2, "ok": 1, "not_attempted": 0} + cleanup_status = max( + (str(cleanup.get("status", "not_attempted")) for cleanup in phase_cleanups.values()), + key=lambda status: status_rank.get(status, -1), + default="not_attempted", + ) + first_error = next( + (cleanup.get("all_off_error") for cleanup in phase_cleanups.values() if cleanup.get("all_off_error")), + None, + ) results["cleanup"] = { - "all_off_attempted": False, - "all_off_ok": None, - "all_off_error": None, + "all_off_attempted": True, + "all_off_ok": all(bool(cleanup.get("all_off_ok")) for cleanup in phase_cleanups.values()), + "status": cleanup_status, + "all_off_error": first_error, + "phases": phase_cleanups, } + elif "cleanup" not in results: + results["cleanup"] = self._cleanup_result_default() return results diff --git a/src/arena_interface/cli.py b/src/arena_interface/cli.py index 38b64fc..4b806b2 100755 --- a/src/arena_interface/cli.py +++ b/src/arena_interface/cli.py @@ -22,6 +22,8 @@ def _print_phase_history(suite: dict) -> None: f"{phase.get('name')}: status={phase.get('status')} " f"elapsed_s={float(phase.get('elapsed_s', 0.0)):.3f}" ) + if phase.get("cleanup_status"): + line += f" cleanup={phase.get('cleanup_status')}" if phase.get("error"): line += f" error={phase.get('error_type')}: {phase.get('error')}" click.echo(line) @@ -37,7 +39,7 @@ def _print_suite_summary( click.echo( f"meta: label={meta.get('label')} host={meta.get('hostname')} " f"python={meta.get('python')} transport={meta.get('transport')} eth_ip={meta.get('ethernet_ip')} " - f"io_timeout={meta.get('bench_io_timeout_s')}" + f"io_timeout={meta.get('bench_io_timeout_s')} status={suite.get('status')}" ) if include_connect and ("connect_time" in suite): @@ -131,10 +133,23 @@ def _print_suite_summary( _print_phase_history(suite) + warnings = suite.get("warnings") or [] + if warnings: + click.echo("\n-- warnings --") + for warning in warnings: + click.echo( + "{phase}: {wtype} {message}".format( + phase=warning.get("phase"), + wtype=warning.get("type"), + message=warning.get("message"), + ) + ) + cleanup = suite.get("cleanup") or {} if cleanup.get("all_off_attempted"): click.echo( - "\ncleanup: all_off_ok={ok} error={err}".format( + "\ncleanup: status={status} all_off_ok={ok} error={err}".format( + status=cleanup.get("status"), ok=cleanup.get("all_off_ok"), err=cleanup.get("all_off_error"), ) @@ -418,7 +433,7 @@ def bench( ArenaInterface.write_bench_jsonl(str(json_out), suite) click.echo(f"\nappended JSONL: {json_out}") - if suite.get("status") != "ok": + if suite.get("status") == "error": error = suite.get("error") or {} raise click.ClickException( "benchmark failed in {phase}: {etype}: {message}".format( @@ -428,6 +443,12 @@ def bench( ) ) + if suite.get("status") == "ok_cleanup_failed": + click.echo( + "\nBench done with a cleanup warning. Measured results were kept; review the cleanup diagnostics and keep the QSPY log.\n" + ) + return + click.echo("\nBench done. Capture the QS PERF_* lines to compare device-side timings.\n") diff --git a/src/arena_interface/perf_summary.py b/src/arena_interface/perf_summary.py new file mode 100644 index 0000000..95e91b9 --- /dev/null +++ b/src/arena_interface/perf_summary.py @@ -0,0 +1,397 @@ +"""Helpers for summarizing benchmark JSONL runs and QSPY PERF logs.""" + +from __future__ import annotations + +import json +import math +import re +from pathlib import Path +from typing import Iterable + +_QSPY_RECORD_RE = re.compile(r"\b(?PPERF_[A-Z0-9_]+)\b(?P.*)$") +_QSPY_KV_RE = re.compile(r"(?P[A-Za-z_][A-Za-z0-9_\-/]*)=(?P[^\s]+)") +_INT_RE = re.compile(r"^[+-]?\d+$") +_FLOAT_RE = re.compile(r"^[+-]?(?:\d+\.\d*|\d*\.\d+)(?:[eE][+-]?\d+)?$") + +_HOST_DELTA_METRICS: tuple[tuple[str, str, str], ...] = ( + ("cmd_mean_ms", "command RTT mean", "ms"), + ("cmd_p99_ms", "command RTT p99", "ms"), + ("spf_achieved_hz", "SPF achieved", "Hz"), + ("stream_rate_hz", "stream rate", "Hz"), + ("stream_tx_mbps", "stream TX", "Mb/s"), +) + + +def _coerce_qspy_value(raw: str) -> object: + if raw.lower() in {"true", "false"}: + return raw.lower() == "true" + if raw.lower().startswith("0x"): + try: + return int(raw, 16) + except ValueError: + return raw + if _INT_RE.match(raw): + try: + return int(raw) + except ValueError: + return raw + if _FLOAT_RE.match(raw): + try: + return float(raw) + except ValueError: + return raw + return raw + + +def _is_finite_number(value: object) -> bool: + return isinstance(value, (int, float)) and math.isfinite(float(value)) + + +def load_bench_results(paths: Iterable[Path], *, label_filter: str | None = None) -> list[dict]: + """Load benchmark result objects from one or more JSONL files.""" + results: list[dict] = [] + needle = label_filter.lower() if label_filter else None + + for path in paths: + with Path(path).open("r", encoding="utf-8") as handle: + for line_number, raw in enumerate(handle, start=1): + text = raw.strip() + if not text: + continue + payload = json.loads(text) + if not isinstance(payload, dict): + continue + label = str((payload.get("meta") or {}).get("label") or "") + if needle and needle not in label.lower(): + continue + payload.setdefault("_source_jsonl", str(path)) + payload.setdefault("_source_line", int(line_number)) + results.append(payload) + return results + + +def parse_qspy_perf_records(lines: Iterable[str]) -> list[dict]: + """Parse PERF_* records from a QSPY text log.""" + records: list[dict] = [] + for line_number, raw_line in enumerate(lines, start=1): + line = raw_line.rstrip("\n") + match = _QSPY_RECORD_RE.search(line) + if match is None: + continue + + fields: dict[str, object] = { + "record": match.group("record"), + "line_no": int(line_number), + "raw": line, + } + rest = match.group("rest") or "" + for kv_match in _QSPY_KV_RE.finditer(rest): + key = kv_match.group("key") + value = _coerce_qspy_value(kv_match.group("value")) + fields[key] = value + records.append(fields) + return records + + +def load_qspy_perf_records(paths: Iterable[Path]) -> list[dict]: + """Load PERF_* records from one or more QSPY log files.""" + records: list[dict] = [] + for path in paths: + with Path(path).open("r", encoding="utf-8", errors="replace") as handle: + file_records = parse_qspy_perf_records(handle) + for record in file_records: + record.setdefault("source_log", str(path)) + records.extend(file_records) + return records + + +def extract_host_run_metrics(run: dict, *, default_label: str) -> dict[str, object]: + """Extract the main user-facing performance metrics from one benchmark run.""" + meta = run.get("meta") or {} + cleanup = run.get("cleanup") or {} + command_rtt = run.get("command_rtt") or {} + spf_updates = run.get("spf_updates") or {} + stream_frames = run.get("stream_frames") or {} + + quickack_supported = bool(meta.get("tcp_quickack_supported")) + quickack_requested = bool(meta.get("tcp_quickack_requested")) + quickack_active = quickack_supported and quickack_requested + + return { + "label": meta.get("label") or default_label, + "status": run.get("status") or "unknown", + "cleanup_status": cleanup.get("status") or "not_attempted", + "cleanup_error": cleanup.get("all_off_error"), + "warnings": run.get("warnings") or [], + "transport": meta.get("transport"), + "tcp_nodelay": meta.get("tcp_nodelay"), + "tcp_quickack_supported": quickack_supported, + "tcp_quickack_requested": quickack_requested, + "tcp_quickack_active": quickack_active, + "source_jsonl": run.get("_source_jsonl"), + "source_line": run.get("_source_line"), + "cmd_mean_ms": command_rtt.get("mean_ms"), + "cmd_p99_ms": command_rtt.get("p99_ms"), + "cmd_reconnects": command_rtt.get("reconnects"), + "spf_target_hz": spf_updates.get("target_hz"), + "spf_achieved_hz": spf_updates.get("achieved_hz"), + "spf_p99_update_ms": (spf_updates.get("update_rtt_ms") or {}).get("p99_ms"), + "stream_frames": stream_frames.get("frames"), + "stream_rate_hz": stream_frames.get("rate_hz"), + "stream_tx_mbps": stream_frames.get("tx_mbps"), + "stream_cmd_p99_ms": (stream_frames.get("cmd_rtt_ms") or {}).get("p99_ms"), + } + + +def summarize_host_runs(runs: list[dict], *, baseline_label: str | None = None) -> dict[str, object]: + """Summarize benchmark JSONL runs into a compact comparison structure.""" + metrics = [ + extract_host_run_metrics(run, default_label=f"run_{index}") + for index, run in enumerate(runs, start=1) + ] + + counts: dict[str, int] = {} + for metric in metrics: + status = str(metric.get("status") or "unknown") + counts[status] = counts.get(status, 0) + 1 + + baseline = None + if metrics: + if baseline_label: + needle = baseline_label.lower() + for metric in metrics: + label = str(metric.get("label") or "") + if needle in label.lower(): + baseline = metric + break + if baseline is None: + baseline = metrics[0] + + comparisons: list[dict[str, object]] = [] + if baseline is not None: + baseline_label_value = str(baseline.get("label")) + for metric in metrics: + if metric is baseline: + continue + metric_deltas: list[dict[str, object]] = [] + for key, description, unit in _HOST_DELTA_METRICS: + base_value = baseline.get(key) + current_value = metric.get(key) + if not (_is_finite_number(base_value) and _is_finite_number(current_value)): + continue + base = float(base_value) + current = float(current_value) + delta = current - base + pct = None + if base != 0: + pct = (delta / base) * 100.0 + metric_deltas.append( + { + "metric": key, + "description": description, + "unit": unit, + "baseline": base, + "current": current, + "delta": delta, + "pct": pct, + } + ) + comparisons.append( + { + "baseline_label": baseline_label_value, + "label": metric.get("label"), + "deltas": metric_deltas, + } + ) + + return { + "run_count": len(metrics), + "status_counts": counts, + "runs": metrics, + "baseline": baseline, + "comparisons": comparisons, + } + + +def summarize_qspy_records(records: list[dict]) -> dict[str, object]: + """Group PERF_* records into a compact latest-record summary.""" + grouped: dict[str, dict[str, object]] = {} + for record in records: + kind = record.get("kind") + group_name = f"{record['record']} kind={kind}" if kind else str(record["record"]) + info = grouped.setdefault(group_name, {"count": 0, "latest": None, "numeric_fields": {}}) + info["count"] = int(info.get("count", 0)) + 1 + info["latest"] = record + + numeric_fields = info.setdefault("numeric_fields", {}) + assert isinstance(numeric_fields, dict) + for key, value in record.items(): + if key in {"record", "kind", "raw", "line_no", "source_log"}: + continue + if _is_finite_number(value): + bucket = numeric_fields.setdefault(key, []) + assert isinstance(bucket, list) + bucket.append(float(value)) + + groups: list[dict[str, object]] = [] + for name, info in grouped.items(): + latest = info.get("latest") or {} + numeric_summary: dict[str, dict[str, float]] = {} + for key, values in (info.get("numeric_fields") or {}).items(): + if not values: + continue + numeric_summary[key] = { + "min": min(values), + "max": max(values), + "last": values[-1], + } + groups.append( + { + "group": name, + "count": int(info.get("count", 0)), + "latest": latest, + "numeric_summary": numeric_summary, + } + ) + + groups.sort(key=lambda item: str(item.get("group"))) + return {"record_count": len(records), "groups": groups} + + +def build_performance_summary( + *, + jsonl_paths: Iterable[Path] = (), + qspy_log_paths: Iterable[Path] = (), + label_filter: str | None = None, + baseline_label: str | None = None, +) -> dict[str, object]: + """Load artifacts and build one combined performance summary.""" + jsonl_path_list = [Path(path) for path in jsonl_paths] + qspy_path_list = [Path(path) for path in qspy_log_paths] + runs = load_bench_results(jsonl_path_list, label_filter=label_filter) + qspy_records = load_qspy_perf_records(qspy_path_list) + return { + "inputs": { + "jsonl": [str(path) for path in jsonl_path_list], + "qspy_logs": [str(path) for path in qspy_path_list], + "label_filter": label_filter, + "baseline_label": baseline_label, + }, + "host": summarize_host_runs(runs, baseline_label=baseline_label), + "qspy": summarize_qspy_records(qspy_records), + } + + +def _format_number(value: object, unit: str | None = None) -> str: + if not _is_finite_number(value): + return "n/a" + number = float(value) + if unit == "Hz": + text = f"{number:.1f}" + elif unit == "Mb/s": + text = f"{number:.2f}" + else: + text = f"{number:.3f}" + return f"{text} {unit}" if unit else text + + +def _format_compact_fields(record: dict[str, object], *, limit: int = 6) -> str: + preferred: list[tuple[str, object]] = [] + fallback: list[tuple[str, object]] = [] + priority_terms = ( + "kind", + "mode", + "phase", + "frames", + "updates", + "rate", + "hz", + "fps", + "mean", + "avg", + "p50", + "p95", + "p99", + "max", + "min", + "bytes", + "drop", + "err", + ) + + for key, value in record.items(): + if key in {"record", "raw", "line_no", "source_log"}: + continue + pair = (str(key), value) + if any(term in str(key).lower() for term in priority_terms): + preferred.append(pair) + else: + fallback.append(pair) + + selected = preferred[:limit] + if len(selected) < limit: + selected.extend(fallback[: max(0, limit - len(selected))]) + return ", ".join(f"{key}={value}" for key, value in selected) + + +def render_text_summary(summary: dict[str, object]) -> str: + """Render a human-readable performance summary.""" + lines: list[str] = [] + + host = summary.get("host") or {} + runs = host.get("runs") or [] + if runs: + status_counts = host.get("status_counts") or {} + counts_text = ", ".join(f"{key}={value}" for key, value in sorted(status_counts.items())) + lines.append(f"Host benchmark runs: {host.get('run_count', 0)} ({counts_text})") + lines.append("label | status | cmd mean / p99 | SPF achieved | stream rate / tx | socket policy") + for run in runs: + policy = ( + f"nodelay={run.get('tcp_nodelay')} " + f"quickack={run.get('tcp_quickack_active')}" + ) + line = ( + f"{run.get('label')} | {run.get('status')}" + f" | {_format_number(run.get('cmd_mean_ms'), 'ms')} / {_format_number(run.get('cmd_p99_ms'), 'ms')}" + f" | {_format_number(run.get('spf_achieved_hz'), 'Hz')}" + f" | {_format_number(run.get('stream_rate_hz'), 'Hz')} / {_format_number(run.get('stream_tx_mbps'), 'Mb/s')}" + f" | {policy}" + ) + cleanup_status = run.get("cleanup_status") + if cleanup_status not in {None, "not_attempted", "ok"}: + line += f" | cleanup={cleanup_status}" + lines.append(line) + + baseline = host.get("baseline") + comparisons = host.get("comparisons") or [] + if baseline and comparisons: + lines.append("") + lines.append(f"Baseline: {baseline.get('label')}") + for comparison in comparisons: + delta_bits: list[str] = [] + for delta in comparison.get("deltas") or []: + pct = delta.get("pct") + pct_text = f" ({float(pct):+.1f}%)" if _is_finite_number(pct) else "" + delta_bits.append( + f"{delta.get('description')} {float(delta.get('delta')):+.3f} {delta.get('unit')}{pct_text}" + ) + if delta_bits: + lines.append(f"- {comparison.get('label')}: " + "; ".join(delta_bits)) + else: + lines.append("Host benchmark runs: none") + + qspy = summary.get("qspy") or {} + groups = qspy.get("groups") or [] + lines.append("") + lines.append(f"QSPY PERF records: {qspy.get('record_count', 0)}") + if groups: + for group in groups: + latest = group.get("latest") or {} + compact = _format_compact_fields(latest) + lines.append( + f"- {group.get('group')}: count={group.get('count')} latest(line {latest.get('line_no')}): {compact}" + ) + else: + lines.append("- none") + + return "\n".join(lines).strip() + "\n" diff --git a/src/arena_interface/perf_summary_cli.py b/src/arena_interface/perf_summary_cli.py new file mode 100644 index 0000000..ee27d75 --- /dev/null +++ b/src/arena_interface/perf_summary_cli.py @@ -0,0 +1,70 @@ +"""Console entry point for performance summary generation.""" + +from __future__ import annotations + +import argparse +import json +import sys +from pathlib import Path + +from .perf_summary import build_performance_summary, render_text_summary + + +def build_parser() -> argparse.ArgumentParser: + parser = argparse.ArgumentParser( + description=( + "Summarize ArenaController benchmark JSONL runs and optional QSPY PERF logs " + "into a compact host/device performance report." + ) + ) + parser.add_argument( + "--jsonl", + dest="jsonl_paths", + type=Path, + nargs="+", + default=[], + help="One or more benchmark JSONL files produced by arena-interface bench --json-out", + ) + parser.add_argument( + "--qspy-log", + dest="qspy_log_paths", + type=Path, + nargs="+", + default=[], + help="One or more saved QSPY text logs containing PERF_* records", + ) + parser.add_argument( + "--label-filter", + default=None, + help="Only include benchmark runs whose label contains this substring", + ) + parser.add_argument( + "--baseline", + default=None, + help="Label substring to use as the baseline run for host delta comparisons", + ) + parser.add_argument( + "--json-out", + type=Path, + default=None, + help="Optional path to write the machine-readable summary JSON", + ) + return parser + + +def main(argv: list[str] | None = None) -> int: + args = build_parser().parse_args(argv) + summary = build_performance_summary( + jsonl_paths=args.jsonl_paths, + qspy_log_paths=args.qspy_log_paths, + label_filter=args.label_filter, + baseline_label=args.baseline, + ) + sys.stdout.write(render_text_summary(summary)) + if args.json_out is not None: + args.json_out.write_text(json.dumps(summary, indent=2, sort_keys=True), encoding="utf-8") + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/tests/test_bench_cleanup.py b/tests/test_bench_cleanup.py new file mode 100644 index 0000000..bcb611f --- /dev/null +++ b/tests/test_bench_cleanup.py @@ -0,0 +1,158 @@ +from __future__ import annotations + +import json + +import arena_interface.arena_interface as arena_mod +from arena_interface import ArenaInterface +from arena_interface.perf_summary import build_performance_summary, render_text_summary + + +def test_safe_all_off_recovers_after_retry(monkeypatch) -> None: + ai = ArenaInterface() + ai.set_ethernet_mode("192.0.2.10") + + attempts = {"count": 0} + + def fake_all_off() -> None: + attempts["count"] += 1 + if attempts["count"] == 1: + raise TimeoutError("cleanup timeout") + + monkeypatch.setattr(ai, "all_off", fake_all_off) + monkeypatch.setattr( + ai, + "_collect_linux_net_diagnostics", + lambda peer_ip: {"peer_ip": str(peer_ip), "route_get": "dev eth0", "neighbor": "reachable"}, + ) + monkeypatch.setattr(arena_mod.time, "sleep", lambda _: None) + + cleanup = ai._safe_all_off(context="unit-test cleanup") + + assert cleanup["status"] == "ok_after_retry" + assert cleanup["all_off_ok"] is True + assert cleanup["retry_performed"] is True + assert cleanup["diagnostics"] == { + "peer_ip": "192.0.2.10", + "route_get": "dev eth0", + "neighbor": "reachable", + } + assert len(cleanup["attempts"]) == 2 + assert cleanup["attempts"][0]["ok"] is False + assert cleanup["attempts"][1]["ok"] is True + + +def test_bench_suite_keeps_successful_measurements_on_cleanup_warning(monkeypatch) -> None: + ai = ArenaInterface(tcp_quickack=False) + + monkeypatch.setattr( + ai, + "bench_command_rtt", + lambda **kwargs: { + "mean_ms": 0.50, + "p99_ms": 0.80, + "reconnects": 0, + "cleanup": { + "all_off_attempted": True, + "all_off_ok": False, + "status": "failed", + "all_off_error": "TimeoutError: cleanup timeout", + "retry_performed": True, + "diagnostics": {"route_get": "dev eth0"}, + "attempts": [ + {"attempt": 1, "ok": False, "error": "TimeoutError: cleanup timeout"}, + {"attempt": 2, "ok": False, "error": "TimeoutError: cleanup timeout"}, + ], + }, + }, + ) + monkeypatch.setattr( + ai, + "bench_spf_updates", + lambda **kwargs: { + "updates": 100, + "elapsed_s": 0.5, + "target_hz": 200.0, + "achieved_hz": 200.0, + "update_rtt_ms": {"p99_ms": 0.70}, + "reconnects": 0, + "cleanup": { + "all_off_attempted": True, + "all_off_ok": True, + "status": "ok", + "all_off_error": None, + "retry_performed": False, + "diagnostics": {}, + "attempts": [{"attempt": 1, "ok": True, "error": None}], + }, + }, + ) + + suite = ai.bench_suite(label="unit-test") + + assert suite["status"] == "ok_cleanup_failed" + assert suite["failed_phase"] is None + assert suite["error"] is None + assert suite["command_rtt"]["mean_ms"] == 0.50 + assert suite["cleanup"]["status"] == "failed" + assert suite["cleanup"]["all_off_ok"] is False + assert suite["warnings"][0]["type"] == "cleanup_failed" + + +def test_performance_summary_reports_host_and_qspy_metrics(tmp_path) -> None: + bench_path = tmp_path / "bench_results.jsonl" + qspy_path = tmp_path / "qspy.log" + + run_default = { + "meta": { + "label": "linux-default", + "transport": "ethernet", + "tcp_nodelay": True, + "tcp_quickack_requested": True, + "tcp_quickack_supported": True, + }, + "status": "ok", + "cleanup": {"status": "ok", "all_off_error": None}, + "warnings": [], + "command_rtt": {"mean_ms": 0.45, "p99_ms": 0.70, "reconnects": 0}, + "spf_updates": {"achieved_hz": 200.0, "target_hz": 200.0, "update_rtt_ms": {"p99_ms": 0.80}}, + "stream_frames": {"frames": 1000, "rate_hz": 199.8, "tx_mbps": 22.5, "cmd_rtt_ms": {"p99_ms": 1.1}}, + } + run_windows_like = { + "meta": { + "label": "windows-like", + "transport": "ethernet", + "tcp_nodelay": True, + "tcp_quickack_requested": False, + "tcp_quickack_supported": True, + }, + "status": "ok", + "cleanup": {"status": "ok", "all_off_error": None}, + "warnings": [], + "command_rtt": {"mean_ms": 0.60, "p99_ms": 0.95, "reconnects": 0}, + "spf_updates": {"achieved_hz": 198.5, "target_hz": 200.0, "update_rtt_ms": {"p99_ms": 0.95}}, + "stream_frames": {"frames": 990, "rate_hz": 197.9, "tx_mbps": 22.0, "cmd_rtt_ms": {"p99_ms": 1.4}}, + } + + bench_path.write_text( + json.dumps(run_default) + "\n" + json.dumps(run_windows_like) + "\n", + encoding="utf-8", + ) + qspy_path.write_text( + "000000 PERF_UPD kind=SPF frames=1000 rate_hz=199.8 p99_us=450\n" + "000001 PERF_NET bytes_tx=123456 bytes_rx=123400 drops=0\n", + encoding="utf-8", + ) + + summary = build_performance_summary( + jsonl_paths=[bench_path], + qspy_log_paths=[qspy_path], + baseline_label="linux-default", + ) + text = render_text_summary(summary) + + assert summary["host"]["run_count"] == 2 + assert "Host benchmark runs: 2" in text + assert "Baseline: linux-default" in text + assert "windows-like" in text + assert "QSPY PERF records: 2" in text + assert "PERF_UPD kind=SPF" in text From e26edc70f9eb0b00695bd61a2310611822e61bba Mon Sep 17 00:00:00 2001 From: Peter Polidoro Date: Tue, 17 Mar 2026 10:25:48 -0400 Subject: [PATCH 08/10] Add more bench tests --- .github/workflows/ci.yml | 2 +- .github/workflows/publish.yml | 4 +- CHANGELOG.md | 3 + README.md | 104 +++++++++++------ RELEASING.md | 9 ++ pyproject.toml | 15 +++ scripts/bench_matrix.py | 145 ++++++++++++++++++++++-- src/arena_interface/arena_interface.py | 5 +- src/arena_interface/perf_summary_cli.py | 1 + tests/test_output_paths.py | 81 +++++++++++++ 10 files changed, 322 insertions(+), 47 deletions(-) create mode 100644 tests/test_output_paths.py diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 563b6a8..a9299f1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -59,7 +59,7 @@ jobs: python -m twine check dist/* - name: Store the distribution packages - uses: actions/upload-artifact@v5 + uses: actions/upload-artifact@v7 with: name: python-package-distributions path: dist/ diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index ce817e0..9cc4b0d 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -37,7 +37,7 @@ jobs: run: python -m build - name: Store the distribution packages - uses: actions/upload-artifact@v5 + uses: actions/upload-artifact@v7 with: name: python-package-distributions path: dist/ @@ -56,7 +56,7 @@ jobs: steps: - name: Download all the dists - uses: actions/download-artifact@v6 + uses: actions/download-artifact@v8 with: name: python-package-distributions path: dist/ diff --git a/CHANGELOG.md b/CHANGELOG.md index 242342e..9bce9fd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,3 +19,6 @@ - added a performance summary tool for benchmark JSONL files and QSPY PERF logs - added Windows-like benchmark task aliases and matrix entries for socket-tuning comparisons - downgraded post-run ALL_OFF cleanup failures to recorded benchmark warnings so completed measurements are preserved +- added stream-enabled Pixi tasks and matrix helpers for comparing Linux-default, Windows-like, and no-tuning host socket policies +- made benchmark JSONL and performance summary commands create parent output directories automatically +- updated GitHub Actions artifact steps to current major versions to avoid the Node.js 20 deprecation warnings on GitHub-hosted runners diff --git a/README.md b/README.md index 8259af9..4cc10e6 100644 --- a/README.md +++ b/README.md @@ -173,31 +173,47 @@ pixi run bench-full pixi run bench-smoke pixi run bench-windows-like pixi run bench-full-windows-like -pixi run bench-socket-matrix +pixi run bench-full-no-nodelay +pixi run bench-full-no-latency-tuning +pixi run bench-frame-rate-compare +pixi run bench-frame-rate-matrix --ethernet 192.168.10.194 pixi run perf-summary --jsonl bench_results.jsonl +pixi run perf-summary-frame-rate ``` -Pixi forwards extra arguments after the task name to the underlying command, so -`pixi run bench-full --json-out bench_results.jsonl --label "lab-a"` works -as expected and appends one JSON result object for that run. For this repository -layout, use `pixi run bench-full --ethernet 192.168.10.194 ...` rather than an -extra separator before `--ethernet`. +Pixi forwards extra arguments after the task name to the underlying command. +That works well for `bench` subcommand options already baked into the task, so +`pixi run bench-full --json-out bench_results.jsonl --label "lab-a"` appends +one JSON result object for that run as expected. -For the stock transport-agnostic tasks (`all-on`, `all-off`, `bench`, -`bench-smoke`, and `bench-full`), set `ARENA_ETH_IP` or -`ARENA_SERIAL_PORT` in your shell before running the task. This is the -simplest way to choose the transport without rewriting the task command. +For tasks that wrap `arena-interface bench`, transport selection is easier via +`ARENA_ETH_IP` or `ARENA_SERIAL_PORT` because those are top-level CLI options. +For example: + +```sh +export ARENA_ETH_IP=192.168.10.194 +pixi run bench-full --label linux-default --json-out bench_results.jsonl +``` + +Script-backed tasks such as `bench-socket-matrix` and `bench-frame-rate-matrix` +parse their own command-line options, so passing `--ethernet` or `--serial` +after the task name is fine there. Task notes: - `bench-full` runs the full suite plus a streaming phase using `patterns/pat0004.pat`. -- `bench-windows-like` disables `TCP_QUICKACK` while leaving `TCP_NODELAY` on. +- `bench-full-windows-like` disables `TCP_QUICKACK` while leaving `TCP_NODELAY` on. This is a useful approximation when comparing a Linux host with a Windows-like Ethernet socket policy. -- `bench-full-windows-like` is the same comparison but also includes the stream phase. -- `bench-no-latency-tuning` disables both `TCP_NODELAY` and `TCP_QUICKACK`. -- `bench-socket-matrix` runs several socket-policy variants back-to-back and is - the fastest way to quantify host-side latency tuning effects. +- `bench-full-no-nodelay` isolates the effect of disabling `TCP_NODELAY` while + leaving `TCP_QUICKACK` enabled. +- `bench-full-no-latency-tuning` disables both `TCP_NODELAY` and `TCP_QUICKACK`. +- `bench-frame-rate-compare` runs four stream-enabled variants back-to-back and + appends labeled results to `bench_artifacts/frame_rate_results.jsonl`. +- `bench-frame-rate-matrix` runs the same comparison through the dedicated + matrix helper and prints a delta summary relative to the default socket policy. +- `perf-summary-frame-rate` renders a compact host-side comparison from the + collected frame-rate JSONL file and writes `bench_artifacts/frame_rate_summary.json`. ### Plain pip @@ -222,8 +238,36 @@ A few key metrics are usually enough to compare runs: - stream frame rate and transmit throughput (`stream_frames`) - reconnect count plus cleanup status -A convenient workflow is to keep both artifacts under a single timestamped -directory and then generate a compact summary from them. +### Quick frame-rate comparison of host socket tuning + +For a repeatable “how much are the Linux socket optimizations helping?” pass, +set the transport once and run the dedicated comparison task: + +```sh +export ARENA_ETH_IP=192.168.10.194 +pixi run bench-frame-rate-compare +pixi run perf-summary-frame-rate +``` + +This captures four labeled runs in `bench_artifacts/frame_rate_results.jsonl`: + +- `linux-default` +- `windows-like` +- `no-nodelay` +- `no-latency-tuning` + +The summary reports the deltas relative to `linux-default`, including stream +rate, stream throughput, SPF achieved rate, and command RTT. Parent directories +for `--json-out` are created automatically, so `bench_artifacts/` does not need +to exist ahead of time. + +If you prefer a one-command matrix that also prints a relative delta summary to +stdout, use the script-backed task instead: + +```sh +pixi run bench-frame-rate-matrix --ethernet 192.168.10.194 +pixi run perf-summary-frame-rate-matrix +``` ### Terminal A: start QSPY and capture the raw QS log @@ -246,34 +290,30 @@ after the benchmark completes so the log contains the full run. ### Terminal B: run the host benchmark and append JSONL results -Linux default socket policy: - -```sh -pixi run bench-full --ethernet 192.168.10.194 \ - --label linux-default \ - --json-out bench_artifacts/2026-03-13-eth/bench_results.jsonl -``` - -Windows-like comparison with `TCP_QUICKACK` disabled: +Set the transport once, then run whichever socket-policy variants you want to +compare: ```sh -pixi run bench-full-windows-like --ethernet 192.168.10.194 \ - --label windows-like \ - --json-out bench_artifacts/2026-03-13-eth/bench_results.jsonl +export ARENA_ETH_IP=192.168.10.194 +pixi run bench-full --label linux-default --json-out bench_artifacts/2026-03-13-eth/bench_results.jsonl +pixi run bench-full-windows-like --label windows-like --json-out bench_artifacts/2026-03-13-eth/bench_results.jsonl +pixi run bench-full-no-nodelay --label no-nodelay --json-out bench_artifacts/2026-03-13-eth/bench_results.jsonl +pixi run bench-full-no-latency-tuning --label no-latency-tuning --json-out bench_artifacts/2026-03-13-eth/bench_results.jsonl ``` PowerShell: ```powershell -pixi run bench-full --ethernet 192.168.10.194 --label "windows-host" --json-out bench_artifacts\2026-03-13-eth\bench_results.jsonl +$env:ARENA_ETH_IP = "192.168.10.194" +pixi run bench-full --label "linux-default" --json-out bench_artifacts\2026-03-13-eth\bench_results.jsonl +pixi run bench-full-windows-like --label "windows-like" --json-out bench_artifacts\2026-03-13-eth\bench_results.jsonl ``` -For a one-command socket comparison matrix, use: +For a one-command socket comparison matrix with custom output paths, use: ```sh pixi run bench-socket-matrix --ethernet 192.168.10.194 \ --stream-path patterns/pat0004.pat \ - --label host-matrix \ --json-out bench_artifacts/2026-03-13-eth/bench_results.jsonl ``` diff --git a/RELEASING.md b/RELEASING.md index 3bd7826..2f066bd 100644 --- a/RELEASING.md +++ b/RELEASING.md @@ -52,6 +52,15 @@ The normal release path is to let GitHub Actions publish via Trusted Publishing. Local `twine upload` is only needed if you intentionally want to bypass that workflow. +Notes: + +- The workflows use current `actions/upload-artifact` and + `actions/download-artifact` major versions to stay ahead of the GitHub-hosted + runner migration away from Node.js 20. +- In the PyPI publish job, `Generating and uploading digital attestations` is + expected with Trusted Publishing and `pypa/gh-action-pypi-publish`; it is not + a separate failure condition. + ## Conda-forge Conda-forge packages are maintained in a separate feedstock repository, so the diff --git a/pyproject.toml b/pyproject.toml index 1c0bd23..a098290 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -120,9 +120,24 @@ bench-no-quickack = "arena-interface --no-tcp-quickack bench" bench-windows-like = "arena-interface --no-tcp-quickack bench" bench-full-windows-like = "arena-interface --no-tcp-quickack bench --stream-path patterns/pat0004.pat" bench-no-nodelay = "arena-interface --no-tcp-nodelay bench" +bench-full-no-nodelay = "arena-interface --no-tcp-nodelay bench --stream-path patterns/pat0004.pat" bench-no-latency-tuning = "arena-interface --no-tcp-nodelay --no-tcp-quickack bench" +bench-full-no-latency-tuning = "arena-interface --no-tcp-nodelay --no-tcp-quickack bench --stream-path patterns/pat0004.pat" +bench-frame-default = "arena-interface bench --stream-path patterns/pat0004.pat --label linux-default --json-out bench_artifacts/frame_rate_results.jsonl" +bench-frame-windows-like = "arena-interface --no-tcp-quickack bench --stream-path patterns/pat0004.pat --label windows-like --json-out bench_artifacts/frame_rate_results.jsonl" +bench-frame-no-nodelay = "arena-interface --no-tcp-nodelay bench --stream-path patterns/pat0004.pat --label no-nodelay --json-out bench_artifacts/frame_rate_results.jsonl" +bench-frame-no-latency-tuning = "arena-interface --no-tcp-nodelay --no-tcp-quickack bench --stream-path patterns/pat0004.pat --label no-latency-tuning --json-out bench_artifacts/frame_rate_results.jsonl" +bench-frame-rate-compare = [ + { task = "bench-frame-default" }, + { task = "bench-frame-windows-like" }, + { task = "bench-frame-no-nodelay" }, + { task = "bench-frame-no-latency-tuning" }, +] bench-socket-matrix = "python scripts/bench_matrix.py" +bench-frame-rate-matrix = "python scripts/bench_matrix.py --stream-path patterns/pat0004.pat --json-out bench_artifacts/frame_rate_matrix.jsonl" perf-summary = "arena-interface-perf-summary" +perf-summary-frame-rate = "arena-interface-perf-summary --jsonl bench_artifacts/frame_rate_results.jsonl --baseline linux-default --json-out bench_artifacts/frame_rate_summary.json" +perf-summary-frame-rate-matrix = "arena-interface-perf-summary --jsonl bench_artifacts/frame_rate_matrix.jsonl --baseline default --json-out bench_artifacts/frame_rate_matrix_summary.json" [tool.pixi.target.linux-64.dependencies] make = "*" diff --git a/scripts/bench_matrix.py b/scripts/bench_matrix.py index f19e042..cb1a290 100644 --- a/scripts/bench_matrix.py +++ b/scripts/bench_matrix.py @@ -3,6 +3,8 @@ from __future__ import annotations import argparse +import math +import os import sys from pathlib import Path @@ -23,17 +25,56 @@ "no_latency_tuning": {"tcp_nodelay": False, "tcp_quickack": False}, } +COMPARISON_METRICS: tuple[tuple[str, str, str], ...] = ( + ("stream_rate_hz", "stream rate", "Hz"), + ("stream_tx_mbps", "stream TX", "Mb/s"), + ("spf_achieved_hz", "SPF achieved", "Hz"), + ("cmd_mean_ms", "command RTT mean", "ms"), + ("cmd_p99_ms", "command RTT p99", "ms"), +) + + +def _env_int(name: str, default: int) -> int: + raw = os.environ.get(name) + if raw is None or not raw.strip(): + return default + try: + return int(raw) + except ValueError: + return default + def build_parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser( - description="Run the ArenaController host benchmark suite across multiple TCP socket variants." + description=( + "Run the ArenaController host benchmark suite across multiple " + "TCP socket variants." + ) + ) + parser.add_argument( + "--ethernet", + default=os.environ.get("ARENA_ETH_IP"), + help="Firmware Ethernet IP address (defaults to ARENA_ETH_IP)", + ) + parser.add_argument( + "--serial", + default=os.environ.get("ARENA_SERIAL_PORT"), + help="Serial port path (defaults to ARENA_SERIAL_PORT)", + ) + parser.add_argument( + "--baudrate", + type=int, + default=_env_int("ARENA_SERIAL_BAUDRATE", SERIAL_BAUDRATE), + help="Serial baudrate (defaults to ARENA_SERIAL_BAUDRATE or 115200)", ) - parser.add_argument("--ethernet", default=None, help="Firmware Ethernet IP address") - parser.add_argument("--serial", default=None, help="Serial port path") - parser.add_argument("--baudrate", type=int, default=SERIAL_BAUDRATE, help="Serial baudrate") parser.add_argument("--debug", action="store_true", help="Enable debug prints") parser.add_argument("--label", default=None, help="Base label added to each run") - parser.add_argument("--json-out", type=Path, default=None, help="Append each result object to this JSONL file") + parser.add_argument( + "--json-out", + type=Path, + default=None, + help="Append each result object to this JSONL file", + ) parser.add_argument( "--variants", nargs="+", @@ -41,7 +82,11 @@ def build_parser() -> argparse.ArgumentParser: default=["default", "windows_like", "no_nodelay", "no_latency_tuning"], help="Socket-option variants to execute", ) - parser.add_argument("--include-connect", action="store_true", help="Include TCP connect timing in each run") + parser.add_argument( + "--include-connect", + action="store_true", + help="Include TCP connect timing in each run", + ) parser.add_argument("--connect-iters", type=int, default=200) parser.add_argument("--cmd-iters", type=int, default=2000) parser.add_argument( @@ -69,19 +114,86 @@ def build_parser() -> argparse.ArgumentParser: return parser +def _display_variant_name(variant_name: str) -> str: + return variant_name.replace("_", "-") + + def variant_label(base_label: str | None, variant_name: str) -> str: - return f"{base_label} [{variant_name}]" if base_label else variant_name + pretty_name = _display_variant_name(variant_name) + return f"{base_label} [{pretty_name}]" if base_label else pretty_name + + +def _is_finite_number(value: object) -> bool: + return isinstance(value, (int, float)) and math.isfinite(float(value)) + + +def _suite_metric(suite: dict, metric_name: str) -> float | None: + if metric_name == "cmd_mean_ms": + value = (suite.get("command_rtt") or {}).get("mean_ms") + elif metric_name == "cmd_p99_ms": + value = (suite.get("command_rtt") or {}).get("p99_ms") + elif metric_name == "spf_achieved_hz": + value = (suite.get("spf_updates") or {}).get("achieved_hz") + elif metric_name == "stream_rate_hz": + value = (suite.get("stream_frames") or {}).get("rate_hz") + elif metric_name == "stream_tx_mbps": + value = (suite.get("stream_frames") or {}).get("tx_mbps") + else: # pragma: no cover - defensive guard + raise KeyError(metric_name) + if not _is_finite_number(value): + return None + return float(value) + + +def _format_metric_delta(delta: float, pct: float | None, unit: str) -> str: + magnitude = f"{delta:+.3f} {unit}" + if unit == "Hz": + magnitude = f"{delta:+.1f} {unit}" + elif unit == "Mb/s": + magnitude = f"{delta:+.2f} {unit}" + if pct is None: + return magnitude + return f"{magnitude} ({pct:+.1f}%)" + + +def print_delta_summary(successful_suites: list[tuple[str, dict]]) -> None: + if len(successful_suites) < 2: + return + + baseline_name, baseline_suite = next( + ((name, suite) for name, suite in successful_suites if name == "default"), + successful_suites[0], + ) + + print("") + print(f"relative to baseline: {_display_variant_name(baseline_name)}") + for variant_name, suite in successful_suites: + if variant_name == baseline_name: + continue + bits: list[str] = [] + for metric_name, label, unit in COMPARISON_METRICS: + baseline_value = _suite_metric(baseline_suite, metric_name) + current_value = _suite_metric(suite, metric_name) + if baseline_value is None or current_value is None: + continue + delta = current_value - baseline_value + pct = None if baseline_value == 0 else (delta / baseline_value) * 100.0 + bits.append(f"{label} {_format_metric_delta(delta, pct, unit)}") + if bits: + print(f"- {_display_variant_name(variant_name)}: " + "; ".join(bits)) def print_summary(variant_name: str, suite: dict) -> None: meta = suite.get("meta", {}) quickack = meta.get("tcp_quickack_supported") and meta.get("tcp_quickack_requested") status = suite.get("status", "unknown") + variant_display = _display_variant_name(variant_name) if status == "error": error = suite.get("error") or {} print( - f"{variant_name:>18} | FAILED {error.get('phase')} {error.get('type')}: {error.get('message')}" + f"{variant_display:>18} | FAILED {error.get('phase')} " + f"{error.get('type')}: {error.get('message')}" ) return @@ -90,7 +202,8 @@ def print_summary(variant_name: str, suite: dict) -> None: stream = suite.get("stream_frames") line = ( - f"{variant_name:>18} | status={status} cmd mean={cmd['mean_ms']:.3f} ms p99={cmd['p99_ms']:.3f} | " + f"{variant_display:>18} | status={status} cmd mean={cmd['mean_ms']:.3f} ms " + f"p99={cmd['p99_ms']:.3f} | " f"spf={spf['achieved_hz']:.1f} Hz | nodelay={meta.get('tcp_nodelay')} quickack={quickack}" ) if isinstance(stream, dict): @@ -107,15 +220,22 @@ def configure_transport(ai: ArenaInterface, args: argparse.Namespace) -> None: if args.serial: ai.set_serial_mode(args.serial, baudrate=args.baudrate) return - raise SystemExit("No transport selected. Provide --ethernet IP or --serial PORT.") + raise SystemExit( + "No transport selected. Provide --ethernet IP or --serial PORT, " + "or set ARENA_ETH_IP / ARENA_SERIAL_PORT." + ) def main() -> int: args = build_parser().parse_args() exit_code = 0 + successful_suites: list[tuple[str, dict]] = [] print("variant | command RTT | SPF | socket policy") - print("----------------------+---------------------------+------------+-------------------------------") + print( + "----------------------+---------------------------+------------+" + "-------------------------------" + ) for variant_name in args.variants: variant = VARIANTS[variant_name] @@ -150,7 +270,10 @@ def main() -> int: print_summary(variant_name, suite) if suite.get("status") == "error": exit_code = 1 + else: + successful_suites.append((variant_name, suite)) + print_delta_summary(successful_suites) return exit_code diff --git a/src/arena_interface/arena_interface.py b/src/arena_interface/arena_interface.py index 5e92210..b7fbdd2 100644 --- a/src/arena_interface/arena_interface.py +++ b/src/arena_interface/arena_interface.py @@ -18,6 +18,7 @@ import time from contextlib import contextmanager from dataclasses import dataclass +from pathlib import Path from typing import Callable, cast try: @@ -2124,6 +2125,8 @@ def _bench_finalize_suite_results(self, results: dict) -> dict: @staticmethod def write_bench_jsonl(path: str, result: dict) -> None: """Append one benchmark result object to a JSONL file.""" - with open(path, "a", encoding="utf-8") as f: + output_path = Path(path) + output_path.parent.mkdir(parents=True, exist_ok=True) + with output_path.open("a", encoding="utf-8") as f: f.write(json.dumps(result, sort_keys=True)) f.write("\n") diff --git a/src/arena_interface/perf_summary_cli.py b/src/arena_interface/perf_summary_cli.py index ee27d75..76889f1 100644 --- a/src/arena_interface/perf_summary_cli.py +++ b/src/arena_interface/perf_summary_cli.py @@ -62,6 +62,7 @@ def main(argv: list[str] | None = None) -> int: ) sys.stdout.write(render_text_summary(summary)) if args.json_out is not None: + args.json_out.parent.mkdir(parents=True, exist_ok=True) args.json_out.write_text(json.dumps(summary, indent=2, sort_keys=True), encoding="utf-8") return 0 diff --git a/tests/test_output_paths.py b/tests/test_output_paths.py new file mode 100644 index 0000000..e937657 --- /dev/null +++ b/tests/test_output_paths.py @@ -0,0 +1,81 @@ +from __future__ import annotations + +import importlib.util +import json +from pathlib import Path + +from arena_interface import ArenaInterface +from arena_interface.perf_summary_cli import main as perf_summary_main + + +def _load_bench_matrix_module(): + script_path = Path(__file__).resolve().parents[1] / "scripts" / "bench_matrix.py" + spec = importlib.util.spec_from_file_location("_bench_matrix", script_path) + assert spec is not None and spec.loader is not None + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + return module + + +def test_write_bench_jsonl_creates_parent_directory(tmp_path) -> None: + jsonl_path = tmp_path / "nested" / "bench" / "results.jsonl" + + ArenaInterface.write_bench_jsonl( + str(jsonl_path), + {"status": "ok", "meta": {"label": "unit-test"}}, + ) + + assert jsonl_path.is_file() + payload = json.loads(jsonl_path.read_text(encoding="utf-8").strip()) + assert payload["status"] == "ok" + assert payload["meta"]["label"] == "unit-test" + + +def test_perf_summary_cli_creates_parent_directory(tmp_path) -> None: + bench_path = tmp_path / "inputs" / "bench_results.jsonl" + bench_path.parent.mkdir(parents=True, exist_ok=True) + bench_path.write_text( + json.dumps( + { + "meta": { + "label": "linux-default", + "tcp_nodelay": True, + "tcp_quickack_requested": True, + }, + "status": "ok", + "cleanup": {"status": "ok", "all_off_error": None}, + "command_rtt": {"mean_ms": 0.5, "p99_ms": 0.8, "reconnects": 0}, + "spf_updates": { + "achieved_hz": 200.0, + "target_hz": 200.0, + "update_rtt_ms": {"p99_ms": 0.9}, + }, + "stream_frames": { + "frames": 1000, + "rate_hz": 199.5, + "tx_mbps": 22.0, + "cmd_rtt_ms": {"p99_ms": 1.1}, + }, + } + ) + + "\n", + encoding="utf-8", + ) + summary_path = tmp_path / "nested" / "perf" / "summary.json" + + rc = perf_summary_main(["--jsonl", str(bench_path), "--json-out", str(summary_path)]) + + assert rc == 0 + assert summary_path.is_file() + summary = json.loads(summary_path.read_text(encoding="utf-8")) + assert summary["host"]["run_count"] == 1 + + +def test_bench_matrix_parser_uses_environment_transport_defaults(monkeypatch) -> None: + monkeypatch.setenv("ARENA_ETH_IP", "192.0.2.25") + module = _load_bench_matrix_module() + + args = module.build_parser().parse_args([]) + + assert args.ethernet == "192.0.2.25" + assert args.serial is None From aaa1918b17cf121e35e7e8b73cafaf0f5164db9e Mon Sep 17 00:00:00 2001 From: Peter Polidoro Date: Tue, 17 Mar 2026 11:18:06 -0400 Subject: [PATCH 09/10] Add pixi version tasks --- CHANGELOG.md | 9 ++ README.md | 30 +++- RELEASING.md | 15 +- codemeta.json | 4 +- pyproject.toml | 4 +- scripts/bump_version.py | 241 +++++++++++++++++++++++++++++++ src/arena_interface/__about__.py | 2 +- tests/test_packaging_metadata.py | 88 +++++++++++ 8 files changed, 380 insertions(+), 13 deletions(-) create mode 100644 scripts/bump_version.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 9bce9fd..27742b2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,14 @@ # Changelog +## Unreleased + +- TBD + +## 7.0.1 - 2026-03-17 + +- added a Pixi version helper task and script so package metadata and release + notes can be bumped consistently before the next tag + ## 7.0.0 - 2026-03-13 - moved package versioning to a single source of truth in diff --git a/README.md b/README.md index 4cc10e6..d4b2e7f 100644 --- a/README.md +++ b/README.md @@ -157,6 +157,7 @@ metadata stay aligned. ```sh pixi install pixi run help +pixi run version-show pixi run check pixi run release-check pixi run qtools-install @@ -224,6 +225,22 @@ python -m build python -m twine check dist/* ``` +### Version updates + +Use the Pixi helper to update the repository version consistently before a +release: + +```sh +pixi run version-show +pixi run version-bump 7.0.1 +pixi install +``` + +`version-bump` updates the package metadata files and turns the current +`## Unreleased` changelog section into a dated release entry. Run `pixi install` +immediately afterward so `pixi.lock` is regenerated from the updated metadata +before you commit or tag the release. + ## Performance characterization workflow `bench_results.jsonl` stores the host-side benchmark results, while QSPY @@ -374,12 +391,13 @@ Publishing. Recommended release flow: -1. Update `CHANGELOG.md`. -2. Run `pixi run release-check` or the equivalent pip commands above. -3. Commit the release changes and create a release tag such as `7.0.0` or - `v7.0.0`. -4. Push the tag to GitHub. -5. The `publish.yml` workflow builds the wheel and sdist, then publishes them +1. Add release notes under `## Unreleased` in `CHANGELOG.md`. +2. Run `pixi run version-bump ` and then `pixi install`. +3. Run `pixi run release-check` or the equivalent pip commands above. +4. Commit the release changes and create a release tag such as `` or + `v`. +5. Push the tag to GitHub. +6. The `publish.yml` workflow builds the wheel and sdist, then publishes them to PyPI using Trusted Publishing. For conda-forge guidance, see `RELEASING.md`. diff --git a/RELEASING.md b/RELEASING.md index 2f066bd..3fa44b6 100644 --- a/RELEASING.md +++ b/RELEASING.md @@ -15,9 +15,18 @@ Using Pixi: ```sh pixi install +pixi run version-show pixi run release-check ``` +If you are preparing a release, bump the version first and then refresh the +lock file: + +```sh +pixi run version-bump 7.0.1 +pixi install +``` + If you use Pixi and `pyproject.toml` changed, regenerate `pixi.lock` with `pixi install` and commit the updated lock file. @@ -42,7 +51,7 @@ One-time setup on PyPI: Release trigger: -1. Push a release tag such as `7.0.0` or `v7.0.0`. +1. Push a release tag such as `` or `v`. 2. GitHub Actions will build `dist/*` and publish to PyPI without storing a long-lived API token in GitHub secrets. 3. `workflow_dispatch` is kept as a manual build/debug entry point; the actual @@ -68,11 +77,11 @@ upstream project does not need to vendor the feedstock into this repository. Recommended flow after a PyPI release: -1. Wait for the `7.0.0` sdist to be available on PyPI. +1. Wait for the `` sdist to be available on PyPI. 2. Generate or update a conda-forge v1 recipe: ```sh - grayskull pypi --use-v1-format --strict-conda-forge arena-interface==7.0.0 + grayskull pypi --use-v1-format --strict-conda-forge arena-interface== ``` 3. Submit the generated `recipe.yaml` to `conda-forge/staged-recipes` for the diff --git a/codemeta.json b/codemeta.json index b793089..65993ae 100644 --- a/codemeta.json +++ b/codemeta.json @@ -4,9 +4,9 @@ "license": "https://spdx.org/licenses/BSD-3-Clause", "codeRepository": "https://github.com/janelia-python/arena_interface_python", "dateCreated": "2023-10-17", - "dateModified": "2026-03-13", + "dateModified": "2026-03-17", "name": "arena-interface", - "version": "7.0.0", + "version": "7.0.1", "description": "Python interface and CLI for the Reiser Lab ArenaController.", "programmingLanguage": [ "Python 3" diff --git a/pyproject.toml b/pyproject.toml index a098290..c5aeab7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -76,7 +76,7 @@ select = ["E", "F", "I", "B", "UP"] [tool.pixi.workspace] name = "arena-interface-python" -version = "7.0.0" +version = "7.0.1" description = "Python interface and benchmark tooling for ArenaController." authors = ["Peter Polidoro "] channels = ["conda-forge"] @@ -97,6 +97,8 @@ default = { features = ["dev"], solve-group = "default" } [tool.pixi.tasks] python = "python" help = "arena-interface --help" +version-show = "python scripts/bump_version.py --current" +version-bump = "python scripts/bump_version.py" all-off = "arena-interface all-off" all-on = "arena-interface all-on" format = "ruff format ." diff --git a/scripts/bump_version.py b/scripts/bump_version.py new file mode 100644 index 0000000..6582510 --- /dev/null +++ b/scripts/bump_version.py @@ -0,0 +1,241 @@ +from __future__ import annotations + +import argparse +import json +import re +import sys +from dataclasses import dataclass +from datetime import date +from pathlib import Path + + +ABOUT_RE = re.compile(r'(?m)^__version__\s*=\s*"([^"]+)"\s*$') +PIXI_WORKSPACE_VERSION_RE = re.compile( + r'(?ms)(^\[tool\.pixi\.workspace\]\n.*?^version = ")([^"]+)(")' +) +CODEMETA_VERSION_RE = re.compile(r'(?m)^(\s*"version"\s*:\s*")([^"]+)(",?\s*)$') +CODEMETA_DATE_MODIFIED_RE = re.compile( + r'(?m)^(\s*"dateModified"\s*:\s*")([^"]+)(",?\s*)$' +) +UNRELEASED_RE = re.compile(r'(?ms)^## Unreleased\s*\n(.*?)(?=^## |\Z)') + + +@dataclass(frozen=True) +class RepoPaths: + root: Path + about: Path + pyproject: Path + codemeta: Path + changelog: Path + + @classmethod + def from_root(cls, root: Path) -> "RepoPaths": + return cls( + root=root, + about=root / "src" / "arena_interface" / "__about__.py", + pyproject=root / "pyproject.toml", + codemeta=root / "codemeta.json", + changelog=root / "CHANGELOG.md", + ) + + +@dataclass(frozen=True) +class UpdatedFile: + path: Path + changed: bool + + +def read_text(path: Path) -> str: + return path.read_text(encoding="utf-8") + + +def write_text(path: Path, text: str) -> None: + path.write_text(text, encoding="utf-8") + + +def normalize_version(value: str) -> str: + normalized = value.strip() + if normalized.startswith("v"): + normalized = normalized[1:] + if not normalized or any(char.isspace() for char in normalized): + raise ValueError("version must be a non-empty string without whitespace") + return normalized + + +def require_substitution(path: Path, text: str, pattern: re.Pattern[str], replacement: str) -> str: + updated, count = pattern.subn(replacement, text, count=1) + if count != 1: + raise RuntimeError(f"expected exactly one replacement in {path}") + return updated + + +def current_version(paths: RepoPaths) -> str: + match = ABOUT_RE.search(read_text(paths.about)) + if match is None: + raise RuntimeError(f"could not find __version__ assignment in {paths.about}") + return match.group(1) + + +def update_about(paths: RepoPaths, new_version: str) -> UpdatedFile: + original = read_text(paths.about) + updated = require_substitution( + paths.about, + original, + ABOUT_RE, + rf'__version__ = "{new_version}"', + ) + if updated != original: + write_text(paths.about, updated) + return UpdatedFile(paths.about, True) + return UpdatedFile(paths.about, False) + + +def update_pyproject(paths: RepoPaths, new_version: str) -> UpdatedFile: + original = read_text(paths.pyproject) + updated = require_substitution( + paths.pyproject, + original, + PIXI_WORKSPACE_VERSION_RE, + rf'\g<1>{new_version}\g<3>', + ) + if updated != original: + write_text(paths.pyproject, updated) + return UpdatedFile(paths.pyproject, True) + return UpdatedFile(paths.pyproject, False) + + +def update_codemeta(paths: RepoPaths, new_version: str, release_date: str) -> UpdatedFile: + original = read_text(paths.codemeta) + updated = require_substitution( + paths.codemeta, + original, + CODEMETA_VERSION_RE, + rf'\g<1>{new_version}\g<3>', + ) + updated = require_substitution( + paths.codemeta, + updated, + CODEMETA_DATE_MODIFIED_RE, + rf'\g<1>{release_date}\g<3>', + ) + if updated != original: + parsed = json.loads(updated) + if parsed["version"] != new_version: + raise RuntimeError("codemeta version update verification failed") + if parsed["dateModified"] != release_date: + raise RuntimeError("codemeta dateModified update verification failed") + write_text(paths.codemeta, updated) + return UpdatedFile(paths.codemeta, True) + return UpdatedFile(paths.codemeta, False) + + +def ensure_unreleased(text: str) -> str: + if re.search(r'(?m)^## Unreleased\s*$', text): + return text + + heading = "# Changelog\n\n" + if heading not in text: + raise RuntimeError("CHANGELOG.md must start with '# Changelog'") + + return text.replace(heading, heading + "## Unreleased\n\n- TBD\n\n", 1) + + +def update_changelog(paths: RepoPaths, new_version: str, release_date: str) -> UpdatedFile: + original = read_text(paths.changelog) + + if re.search( + rf'(?m)^## {re.escape(new_version)}\s*-\s*\d{{4}}-\d{{2}}-\d{{2}}\s*$', + original, + ): + raise RuntimeError(f"CHANGELOG.md already contains a section for {new_version}") + + text = ensure_unreleased(original) + match = UNRELEASED_RE.search(text) + if match is None: + raise RuntimeError("could not find an '## Unreleased' section in CHANGELOG.md") + + unreleased_body = match.group(1).strip() or "- TBD" + replacement = ( + "## Unreleased\n\n" + "- TBD\n\n" + f"## {new_version} - {release_date}\n\n" + f"{unreleased_body}\n\n" + ) + updated = text[: match.start()] + replacement + text[match.end() :].lstrip("\n") + if updated != original: + write_text(paths.changelog, updated) + return UpdatedFile(paths.changelog, True) + return UpdatedFile(paths.changelog, False) + + +def parse_args(argv: list[str]) -> argparse.Namespace: + parser = argparse.ArgumentParser( + description=( + "Update the repository version in package metadata and roll " + "CHANGELOG.md from 'Unreleased' into a dated release section." + ) + ) + parser.add_argument( + "new_version", + nargs="?", + help="new package version such as 7.0.1", + ) + parser.add_argument( + "--current", + action="store_true", + help="print the current package version and exit", + ) + parser.add_argument( + "--date", + default=date.today().isoformat(), + help="release date to record in CHANGELOG.md (default: today)", + ) + parser.add_argument( + "--root", + type=Path, + default=Path(__file__).resolve().parents[1], + help=argparse.SUPPRESS, + ) + return parser.parse_args(argv) + + +def main(argv: list[str] | None = None) -> int: + args = parse_args(argv or sys.argv[1:]) + paths = RepoPaths.from_root(args.root.resolve()) + + if args.current: + print(current_version(paths)) + return 0 + + if not args.new_version: + print("error: NEW_VERSION is required unless --current is used", file=sys.stderr) + return 2 + + try: + new_version = normalize_version(args.new_version) + except ValueError as exc: + print(f"error: {exc}", file=sys.stderr) + return 2 + + old_version = current_version(paths) + if new_version == old_version: + print(f"version is already {new_version}") + return 0 + + changed = [ + update_about(paths, new_version), + update_pyproject(paths, new_version), + update_codemeta(paths, new_version, args.date), + update_changelog(paths, new_version, args.date), + ] + + updated_paths = [item.path.relative_to(paths.root).as_posix() for item in changed if item.changed] + print(f"updated version: {old_version} -> {new_version}") + for relative_path in updated_paths: + print(f" - {relative_path}") + print("next: run 'pixi install' to refresh pixi.lock, then review and commit the changes") + return 0 + + +if __name__ == "__main__": # pragma: no cover + raise SystemExit(main()) diff --git a/src/arena_interface/__about__.py b/src/arena_interface/__about__.py index 2f239f7..04d1086 100644 --- a/src/arena_interface/__about__.py +++ b/src/arena_interface/__about__.py @@ -1,6 +1,6 @@ """Project metadata for arena_interface.""" -__version__ = "7.0.0" +__version__ = "7.0.1" __description__ = "Python interface and CLI for the Reiser Lab ArenaController." __license__ = "BSD-3-Clause" __url__ = "https://github.com/janelia-python/arena_interface_python" diff --git a/tests/test_packaging_metadata.py b/tests/test_packaging_metadata.py index 8e96ede..5a474b7 100644 --- a/tests/test_packaging_metadata.py +++ b/tests/test_packaging_metadata.py @@ -1,6 +1,9 @@ from __future__ import annotations import json +import shutil +import subprocess +import sys import tomllib from pathlib import Path @@ -22,3 +25,88 @@ def test_version_metadata_is_consistent() -> None: ) assert pyproject["tool"]["pixi"]["workspace"]["version"] == __version__ assert codemeta["version"] == __version__ + + + +def bump_target(version: str) -> str: + parts = version.split(".") + if parts and all(part.isdigit() for part in parts): + parts[-1] = str(int(parts[-1]) + 1) + return ".".join(parts) + return version + ".post1" + + +def test_version_bump_helper_reports_current_version() -> None: + result = subprocess.run( + [sys.executable, "scripts/bump_version.py", "--current"], + check=True, + capture_output=True, + cwd=ROOT, + text=True, + ) + + assert result.stdout.strip() == __version__ + + +def test_version_bump_helper_updates_repo_copy(tmp_path: Path) -> None: + repo_copy = tmp_path / "repo" + repo_copy.mkdir() + + for relative in [ + "CHANGELOG.md", + "codemeta.json", + "pyproject.toml", + "src/arena_interface/__about__.py", + ]: + source = ROOT / relative + target = repo_copy / relative + target.parent.mkdir(parents=True, exist_ok=True) + shutil.copy2(source, target) + + new_version = bump_target(__version__) + release_date = "2026-03-17" + result = subprocess.run( + [ + sys.executable, + str(ROOT / "scripts" / "bump_version.py"), + "--root", + str(repo_copy), + "--date", + release_date, + f"v{new_version}", + ], + check=True, + capture_output=True, + text=True, + ) + + assert f"updated version: {__version__} -> {new_version}" in result.stdout + + about_text = (repo_copy / "src/arena_interface/__about__.py").read_text(encoding="utf-8") + assert f'__version__ = "{new_version}"' in about_text + + with (repo_copy / "pyproject.toml").open("rb") as f: + pyproject = tomllib.load(f) + assert pyproject["tool"]["pixi"]["workspace"]["version"] == new_version + + codemeta = json.loads((repo_copy / "codemeta.json").read_text(encoding="utf-8")) + assert codemeta["version"] == new_version + assert codemeta["dateModified"] == release_date + + changelog_text = (repo_copy / "CHANGELOG.md").read_text(encoding="utf-8") + assert changelog_text.startswith( + "# Changelog\n\n" + "## Unreleased\n\n" + "- TBD\n\n" + f"## {new_version} - {release_date}\n\n" + ) + + +def test_pixi_version_tasks_are_declared() -> None: + with (ROOT / "pyproject.toml").open("rb") as f: + pyproject = tomllib.load(f) + + tasks = pyproject["tool"]["pixi"]["tasks"] + + assert tasks["version-show"] == "python scripts/bump_version.py --current" + assert tasks["version-bump"] == "python scripts/bump_version.py" From ddde976990ad00eb2c727420eb24e33e93e7a273 Mon Sep 17 00:00:00 2001 From: Frank Loesche Date: Thu, 19 Mar 2026 10:41:16 -0400 Subject: [PATCH 10/10] update dependencies from upstream --- pixi.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pixi.lock b/pixi.lock index 11ee540..fa874b0 100644 --- a/pixi.lock +++ b/pixi.lock @@ -342,8 +342,8 @@ packages: timestamp: 1770939786096 - pypi: ./ name: arena-interface - version: 7.0.0 - sha256: ca64cadf9210e5274c4792514a497fd37f054e51773da43a50d99602935b3ccc + version: 7.0.1 + sha256: 89ef0ffd3e97cdcb998c1c70ade786ee46a5d3b94552e2c9845cf8f62fe9b48a requires_dist: - click>=8.1 - pyserial>=3.5