Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
101 changes: 101 additions & 0 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -237,6 +237,107 @@ jobs:
name: mpe-${{ matrix.goos }}-${{ matrix.goarch }}
path: target/mpe-${{ matrix.goos }}-${{ matrix.goarch }}${{ matrix.goos == 'windows' && '.exe' || '' }}

integration-tests:
name: Integration Tests
runs-on: ubuntu-latest
needs: [go-build]
steps:
- name: Checkout code
uses: actions/checkout@v4

- name: Download Linux AMD64 binary
uses: actions/download-artifact@v4
with:
name: mpe-linux-amd64
path: target/

- name: Make binary executable
run: chmod +x target/mpe-linux-amd64

- name: Create symlink for tests
run: |
mkdir -p target
ln -sf mpe-linux-amd64 target/mpe

- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.11'
cache: 'pip'
cache-dependency-path: requirements-test.txt

- name: Install Python dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements-test.txt

- name: Run pytest
run: |
pytest -v --tb=short --color=yes --junitxml=pytest-report.xml --alluredir=allure-results
continue-on-error: false

- name: Install Allure
if: always()
run: |
curl -o allure-2.24.0.tgz -L https://github.com/allure-framework/allure2/releases/download/2.24.0/allure-2.24.0.tgz
tar -zxvf allure-2.24.0.tgz -C /opt/
sudo ln -s /opt/allure-2.24.0/bin/allure /usr/bin/allure

- name: Generate Allure Report
if: always()
run: |
allure generate allure-results -o allure-report --clean

- name: Upload Allure Report
if: always()
uses: actions/upload-artifact@v4
with:
name: allure-report
path: allure-report/

- name: Generate test summary
if: always()
run: |
echo "## 🧪 Python Integration Test Results" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY

if [ -f pytest-report.xml ]; then
# Extract test counts from JUnit XML
TESTS=$(grep -o 'tests="[0-9]*"' pytest-report.xml | head -1 | grep -o '[0-9]*')
FAILURES=$(grep -o 'failures="[0-9]*"' pytest-report.xml | head -1 | grep -o '[0-9]*')
ERRORS=$(grep -o 'errors="[0-9]*"' pytest-report.xml | head -1 | grep -o '[0-9]*')

PASSED=$((TESTS - FAILURES - ERRORS))

echo "- **Total Tests:** ${TESTS}" >> $GITHUB_STEP_SUMMARY
echo "- **Passed:** ✅ ${PASSED}" >> $GITHUB_STEP_SUMMARY

if [ "${FAILURES}" != "0" ] || [ "${ERRORS}" != "0" ]; then
echo "- **Failed:** ❌ $((FAILURES + ERRORS))" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "❌ Some integration tests failed." >> $GITHUB_STEP_SUMMARY
else
echo "" >> $GITHUB_STEP_SUMMARY
echo "✅ All ${TESTS} integration tests passed!" >> $GITHUB_STEP_SUMMARY
fi

echo "" >> $GITHUB_STEP_SUMMARY
echo "📊 **Allure Report:** Check artifacts for detailed test report" >> $GITHUB_STEP_SUMMARY
else
echo "❌ No test results found" >> $GITHUB_STEP_SUMMARY
fi

- name: Upload test results
if: always()
uses: actions/upload-artifact@v4
with:
name: pytest-results
path: |
pytest-report.xml
allure-results/
.pytest_cache/
htmlcov/

example-validation:
name: Validate Example PolicyDomains
runs-on: ubuntu-latest
Expand Down
13 changes: 13 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -8,3 +8,16 @@ target/

# Generated during artifact builds
NOTICES
# Python
__pycache__/
*.py[cod]
*.class
.pytest_cache/
.venv-test/
htmlcov/
.coverage
coverage.out
pytest-report.xml
allure-results/
allure-report/
allure-history/
38 changes: 38 additions & 0 deletions pytest.ini
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
[pytest]
# Pytest configuration for PolicyEngine

# Test discovery patterns
python_files = test_*.py *_test.py
python_classes = Test* *Tests
python_functions = test_*

# Test paths
testpaths = tests

# Output options
addopts =
-v
--strict-markers
--tb=short
--color=yes
-ra

# Markers for organizing tests
markers =
unit: Unit tests
integration: Integration tests requiring PolicyEngine service
smoke: Quick smoke tests
api: HTTP API tests
grpc: gRPC service tests
policy: PolicyDomain validation tests
slow: Tests that take significant time

# Logging
log_cli = false
log_cli_level = INFO
log_cli_format = %(asctime)s [%(levelname)8s] %(message)s
log_cli_date_format = %Y-%m-%d %H:%M:%S

# Coverage options (if pytest-cov is installed)
# [tool:pytest]
# addopts = --cov=tests --cov-report=html --cov-report=term
25 changes: 25 additions & 0 deletions requirements-test.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
# Python testing dependencies for PolicyEngine

# Core testing framework
pytest>=7.4.0
pytest-asyncio>=0.21.0
pytest-timeout>=2.1.0
pytest-xdist>=3.3.0 # Parallel test execution

# Coverage reporting
pytest-cov>=4.1.0
allure-pytest>=2.13.0

# HTTP client for API testing
requests>=2.31.0
httpx>=0.24.0 # Async HTTP client

# gRPC client (if testing gRPC endpoints)
grpcio>=1.56.0
grpcio-tools>=1.56.0

# YAML parsing (for PolicyDomain files)
PyYAML>=6.0

# Utilities
python-dotenv>=1.0.0 # Environment variable management
100 changes: 100 additions & 0 deletions scripts/run-tests.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
#!/usr/bin/env bash
#
# Run PolicyEngine Python integration tests
#

set -euo pipefail

# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color

# Script directory
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"

echo -e "${GREEN}PolicyEngine Test Runner${NC}"
echo "================================"

# Check if Python 3 is available
if ! command -v python3 &> /dev/null; then
echo -e "${RED}Error: python3 is not installed${NC}"
exit 1
fi

# Check if virtual environment exists, create if not
VENV_DIR="${PROJECT_ROOT}/.venv-test"
if [ ! -d "$VENV_DIR" ]; then
echo -e "${YELLOW}Creating Python virtual environment...${NC}"
python3 -m venv "$VENV_DIR"
fi

# Activate virtual environment
echo -e "${YELLOW}Activating virtual environment...${NC}"
source "${VENV_DIR}/bin/activate"

# Install/upgrade dependencies
echo -e "${YELLOW}Installing test dependencies...${NC}"
pip install -q --upgrade pip
pip install -q -r "${PROJECT_ROOT}/requirements-test.txt"

# Build the mpe binary if it doesn't exist
if [ ! -f "${PROJECT_ROOT}/target/mpe" ]; then
echo -e "${YELLOW}Building mpe binary...${NC}"
cd "$PROJECT_ROOT"
make build
fi

# Parse arguments
PYTEST_ARGS=()
RUN_INTEGRATION=false

while [[ $# -gt 0 ]]; do
case $1 in
--integration|-i)
RUN_INTEGRATION=true
shift
;;
--smoke|-s)
PYTEST_ARGS+=("-m" "smoke")
shift
;;
--coverage|-c)
PYTEST_ARGS+=("--cov=tests" "--cov-report=html" "--cov-report=term")
shift
;;
*)
PYTEST_ARGS+=("$1")
shift
;;
esac
done

# Default: skip integration tests unless explicitly requested
if [ "$RUN_INTEGRATION" = false ]; then
PYTEST_ARGS+=("-m" "not integration")
fi

# Run pytest
echo -e "${GREEN}Running tests...${NC}"
echo "Command: pytest ${PYTEST_ARGS[*]}"
echo ""

cd "$PROJECT_ROOT"
pytest "${PYTEST_ARGS[@]}"
TEST_EXIT_CODE=$?

# Deactivate virtual environment
deactivate

# Print results
echo ""
if [ $TEST_EXIT_CODE -eq 0 ]; then
echo -e "${GREEN}✅ All tests passed!${NC}"
else
echo -e "${RED}❌ Tests failed with exit code ${TEST_EXIT_CODE}${NC}"
fi

exit $TEST_EXIT_CODE
12 changes: 12 additions & 0 deletions tests/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
# PolicyEngine Python Integration Tests
"""
Integration tests for the Manetu PolicyEngine.

This test suite provides Python-based integration testing for:
- HTTP API endpoints
- gRPC service interfaces
- PolicyDomain validation
- End-to-end policy evaluation workflows
"""

__version__ = "0.1.0"
Loading
Loading