Skip to content

pypi updates and upgrade ci versions #733

pypi updates and upgrade ci versions

pypi updates and upgrade ci versions #733

Workflow file for this run

name: Test and Coverage
on:
push:
branches: [main]
pull_request:
branches: [main]
workflow_dispatch:
inputs:
skip_dependencies:
description: "Skip job dependencies (for local act testing)"
required: false
default: "false"
type: choice
options:
- "true"
- "false"
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
test:
runs-on: ubuntu-latest
timeout-minutes: 30
permissions:
contents: read
strategy:
matrix:
python-version: ["3.10", "3.12"]
steps:
- uses: actions/checkout@v6
with:
submodules: recursive
token: ${{ secrets.GITHUB_TOKEN }}
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v6
with:
python-version: ${{ matrix.python-version }}
cache: "pip"
cache-dependency-path: "pyproject.toml"
- name: Install system dependencies
run: |
sudo apt-get update
sudo apt-get install -y build-essential cmake ninja-build pkg-config libdbus-1-dev libglib2.0-dev libudev-dev libbluetooth-dev python3-dev
- name: Install Python dependencies
run: |
python -m pip install --upgrade pip
pip install -e .[dev,test,examples]
- name: Run tests with coverage
run: |
python -m pytest tests/ -n auto --ignore=tests/benchmarks/ --junitxml=test-results.xml --cov=src/bluetooth_sig --cov-report=html --cov-report=xml --cov-report=term-missing --cov-fail-under=85
- name: Publish JUnit test report
if: always()
uses: dorny/test-reporter@v2
with:
name: Pytest
path: test-results.xml
reporter: java-junit
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Extract coverage percentage and create badge
if: matrix.python-version == '3.12'
run: |
python scripts/extract_coverage_badge.py
- name: Upload test results
if: always()
uses: actions/upload-artifact@v6
with:
name: test-results-${{ matrix.python-version }}
path: |
test-results.xml
retention-days: 30
- name: Upload coverage artifacts
if: matrix.python-version == '3.12'
uses: actions/upload-artifact@v6
with:
name: coverage-report
path: htmlcov
retention-days: 30
benchmark:
name: Performance Benchmarks
runs-on: ubuntu-latest
timeout-minutes: 15
permissions:
contents: read
pull-requests: write
steps:
- uses: actions/checkout@v6
with:
submodules: recursive
token: ${{ secrets.GITHUB_TOKEN }}
- name: Set up Python
uses: actions/setup-python@v6
with:
python-version: "3.11"
cache: "pip"
cache-dependency-path: "pyproject.toml"
- name: Install system dependencies
run: |
sudo apt-get update
sudo apt-get install -y build-essential cmake ninja-build pkg-config libdbus-1-dev libglib2.0-dev libudev-dev libbluetooth-dev python3-dev
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -e ".[test]"
- name: Set consistent Python hash seed
run: echo "PYTHONHASHSEED=0" >> $GITHUB_ENV
- name: Run benchmarks
run: |
export PYTHONPATH=$GITHUB_WORKSPACE/src:$PYTHONPATH
python -m pytest tests/benchmarks/ \
--benchmark-only \
--benchmark-json=benchmark.json \
--benchmark-columns=min,max,mean,stddev \
--benchmark-sort=name
- name: Store benchmark baseline (push)
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
uses: benchmark-action/github-action-benchmark@v1.20.7
with:
name: "Python Benchmarks"
tool: "pytest"
output-file-path: benchmark.json
external-data-json-path: ./cache/benchmark-data.json
save-data-file: true
auto-push: false
# don't require a token when not pushing pages
- name: Upload benchmark baseline cache
uses: actions/cache@v5
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
with:
path: ./cache
key: ${{ runner.os }}-benchmark
- name: Download previous benchmark data
uses: actions/cache@v5
if: github.event_name == 'pull_request'
with:
path: ./cache
key: ${{ runner.os }}-benchmark
- name: Check for previous baseline
if: github.event_name == 'pull_request'
id: baseline_check
run: |
if [ -f ./cache/benchmark-data.json ]; then
echo "found=true" >> $GITHUB_OUTPUT
else
echo "found=false" >> $GITHUB_OUTPUT
fi
- name: Debug benchmark JSON files (print summaries)
if: github.event_name == 'pull_request' && steps.baseline_check.outputs.found == 'true'
run: |
echo "--- current benchmark.json ---"
python - <<'PY'
import json, sys
try:
d = json.load(open('benchmark.json'))
print('type:', type(d).__name__)
if isinstance(d, dict):
print('keys:', list(d.keys())[:20])
elif isinstance(d, list):
print('len:', len(d))
if len(d):
print('sample keys:', list(d[0].keys()))
except Exception as e:
print('error reading benchmark.json', e)
PY
echo "--- cached ./cache/benchmark-data.json ---"
python - <<'PY'
import json, sys
try:
d = json.load(open('./cache/benchmark-data.json'))
print('type:', type(d).__name__)
if isinstance(d, dict):
print('top-level keys:', list(d.keys())[:20])
# if action format stores benchmarks under names, show those
for k in list(d.keys())[:5]:
v = d[k]
print(k, '-> type', type(v).__name__, ('len', len(v)) if hasattr(v, '__len__') else '')
elif isinstance(d, list):
print('len list:', len(d))
if len(d):
print('sample keys:', list(d[0].keys()))
except Exception as e:
print('error reading cache file', e)
PY
- name: Compare with baseline
if: github.event_name == 'pull_request' && steps.baseline_check.outputs.found == 'true'
uses: benchmark-action/github-action-benchmark@v1.20.7
with:
name: "Python Benchmarks"
tool: "pytest"
output-file-path: benchmark.json
github-token: ${{ secrets.GITHUB_TOKEN }}
external-data-json-path: ./cache/benchmark-data.json
alert-threshold: "200%"
comment-on-alert: true
fail-on-alert: true
summary-always: true
- name: Download previous benchmark history
uses: dawidd6/action-download-artifact@v16
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
continue-on-error: true
with:
name: benchmark-history
workflow: test-coverage.yml
branch: main
path: benchmark-history
- name: Update benchmark history
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
run: |
python scripts/update_benchmark_history.py \
benchmark.json \
benchmark-history/history.json
- name: Upload benchmark results
uses: actions/upload-artifact@v6
with:
name: benchmark-results
path: benchmark.json
retention-days: 90
- name: Upload benchmark history
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
uses: actions/upload-artifact@v6
with:
name: benchmark-history
path: benchmark-history/history.json
retention-days: 90
build-docs:
name: Build Documentation
needs: [test, benchmark]
# Allow running independently via workflow_dispatch for local act testing
if: ${{ !cancelled() && (success() || github.event.inputs.skip_dependencies == 'true') }}
runs-on: ubuntu-latest
timeout-minutes: 15
permissions:
contents: read
steps:
- uses: actions/checkout@v6
with:
fetch-depth: 0 # Full history needed for git describe versioning
submodules: recursive
token: ${{ secrets.GITHUB_TOKEN }}
- name: Set up Python
uses: actions/setup-python@v6
with:
python-version: "3.12"
cache: "pip"
cache-dependency-path: "pyproject.toml"
- name: Install documentation dependencies
run: |
python -m pip install --upgrade pip
pip install -e ".[docs]"
- name: Install system packages required for UML generation
run: |
# PlantUML requires Java; pydeps uses Graphviz (dot) to render
# dependency graphs. Install both so docs generation is reliable.
sudo apt-get update
sudo apt-get install -y graphviz plantuml openjdk-11-jre-headless
- name: Download coverage artifacts
uses: actions/download-artifact@v7
with:
name: coverage-report
path: htmlcov
continue-on-error: true
- name: Download benchmark artifacts
uses: actions/download-artifact@v7
with:
name: benchmark-results
path: benchmarks
continue-on-error: true
- name: Download benchmark history
uses: dawidd6/action-download-artifact@v16
continue-on-error: true
with:
name: benchmark-history
workflow: test-coverage.yml
branch: main
path: benchmarks
- name: Link benchmarks into docs directory
run: |
if [ -f "benchmarks/benchmark.json" ]; then
echo "✅ Benchmark results found, copying to docs/source/_static/performance/"
mkdir -p docs/source/_static/performance
cp benchmarks/benchmark.json docs/source/_static/performance/
if [ -f "benchmarks/history.json" ]; then
echo "✅ Benchmark history found, copying to docs/source/_static/performance/"
cp benchmarks/history.json docs/source/_static/performance/
else
echo "⚠️ No benchmark history found yet (first run creates it)"
fi
ls -la docs/source/_static/performance/
else
echo "⚠️ No benchmark results found, docs will build without benchmarks"
fi
- name: Build documentation
run: |
sphinx-build -b html -j auto docs/source docs/build/html
- name: Copy coverage into built docs
run: |
if [ -d "htmlcov" ]; then
echo "✅ Copying coverage reports into docs/build/html/"
cp -r htmlcov docs/build/html/coverage
ls -la docs/build/html/coverage/coverage-badge.json
else
echo "⚠️ No coverage reports to copy"
fi
- name: Upload built documentation artifact
uses: actions/upload-artifact@v6
with:
name: ${{ github.ref == 'refs/heads/main' && 'site' || 'built-docs' }}
path: docs/build/html/
retention-days: 30
test-documentation:
name: Test Documentation with Playwright
needs: build-docs
# Only run on PRs - documentation tests don't contribute to coverage
if: ${{ github.event_name == 'pull_request' && !cancelled() && (success() || github.event.inputs.skip_dependencies == 'true') }}
runs-on: ubuntu-latest
timeout-minutes: 120
permissions:
contents: read
steps:
- uses: actions/checkout@v6
with:
fetch-depth: 0 # Full history needed for git diff
submodules: recursive
token: ${{ secrets.GITHUB_TOKEN }}
- name: Set up Python
uses: actions/setup-python@v6
with:
python-version: "3.12"
cache: "pip"
cache-dependency-path: "pyproject.toml"
- name: Download built documentation artifact
uses: actions/download-artifact@v7
with:
name: ${{ github.ref == 'refs/heads/main' && 'site' || 'built-docs' }}
path: docs/build/html
- name: Install test dependencies
run: |
python -m pip install --upgrade pip
pip install -e ".[test]"
- name: Cache Playwright browsers
uses: actions/cache@v5
id: playwright-cache
with:
path: ~/.cache/ms-playwright
key: playwright-${{ runner.os }}-${{ hashFiles('**/pyproject.toml') }}
restore-keys: |
playwright-${{ runner.os }}-
- name: Install Playwright browsers
if: steps.playwright-cache.outputs.cache-hit != 'true'
run: playwright install chromium --with-deps
- name: Install Playwright system dependencies (if cached)
if: steps.playwright-cache.outputs.cache-hit == 'true'
run: playwright install-deps chromium
- name: Detect changed documentation files
id: changed-docs
run: |
# Detect which documentation files changed in this PR/push
if [ "${{ github.event_name }}" = "pull_request" ]; then
BASE_REF="${{ github.event.pull_request.base.sha }}"
HEAD_REF="${{ github.event.pull_request.head.sha }}"
else
# For push events, compare with previous commit
BASE_REF="${{ github.event.before }}"
HEAD_REF="${{ github.sha }}"
fi
echo "Comparing $BASE_REF...$HEAD_REF"
CHANGED_FILES=$(python scripts/detect_changed_docs.py --base "$BASE_REF" --head "$HEAD_REF" --verbose)
echo "changed_files=$CHANGED_FILES" >> $GITHUB_OUTPUT
# Log for debugging
echo "Files to test: $CHANGED_FILES"
- name: Run Playwright tests (selective)
env:
DOCS_TEST_FILES: ${{ steps.changed-docs.outputs.changed_files }}
run: |
echo "Testing files: $DOCS_TEST_FILES"
# Skip tests if no documentation changes detected
if [ "$DOCS_TEST_FILES" = "[]" ]; then
echo "ℹ️ No documentation changes detected, skipping Playwright tests"
mkdir -p test-results
echo '<?xml version="1.0" encoding="utf-8"?><testsuites><testsuite name="playwright" tests="0" skipped="0" failures="0" errors="0"></testsuite></testsuites>' > test-results/playwright-results.xml
exit 0
fi
# Dynamic worker calculation via pytest hook (pytest_xdist_auto_num_workers)
# Workers scale based on file count: few files = 1-2 workers, many files = 4-8 workers
pytest tests/docs/playwright_tests/ \
-m "built_docs and playwright" \
-n auto \
-v \
--junit-xml=test-results/playwright-results.xml
- name: Upload test results
if: always()
uses: actions/upload-artifact@v6
with:
name: playwright-test-results
path: test-results/
retention-days: 7
deploy:
name: Deploy to GitHub Pages
needs: build-docs
if: github.ref == 'refs/heads/main'
runs-on: ubuntu-latest
timeout-minutes: 10
permissions:
pages: write
id-token: write
environment:
name: github-pages
steps:
- name: Download site artifacts
uses: actions/download-artifact@v7
with:
name: site
path: site
- name: Upload to GitHub Pages
uses: actions/upload-pages-artifact@v4
with:
path: site
- name: Deploy to GitHub Pages
id: deployment
uses: actions/deploy-pages@v4