-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathrun_tests.py
More file actions
executable file
·129 lines (106 loc) · 3.95 KB
/
run_tests.py
File metadata and controls
executable file
·129 lines (106 loc) · 3.95 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
#!/usr/bin/env python3
import os
import sys
import time
import json
import argparse
import subprocess
from pathlib import Path
from datetime import datetime
def run_tests(args):
"""Run test suite with specified options"""
start_time = time.time()
# Prepare test command
cmd = ["pytest"]
if args.unit:
cmd.extend(["tests/unit"])
if args.integration:
cmd.extend(["tests/integration"])
if args.load:
cmd.extend(["tests/load"])
if args.hardware:
cmd.extend(["tests/hardware"])
if args.performance:
cmd.extend(["tests/performance"])
if not any([args.unit, args.integration, args.load, args.hardware, args.performance]):
cmd.extend(["tests/"]) # Run all tests if none specified
# Add coverage options
if args.coverage:
cmd.extend([
"--cov=.",
"--cov-report=term-missing",
f"--cov-report=html:{args.report_dir}/coverage"
])
# Add benchmark options
if args.benchmark:
cmd.extend([
"--benchmark-only",
"--benchmark-autosave",
f"--benchmark-storage={args.report_dir}/benchmarks"
])
# Run tests
print(f"\nRunning tests: {' '.join(cmd)}")
result = subprocess.run(cmd, capture_output=True, text=True)
# Save test output
output_file = Path(args.report_dir) / "test_output.txt"
with open(output_file, "w") as f:
f.write(result.stdout)
if result.stderr:
f.write("\n\nErrors:\n")
f.write(result.stderr)
# Generate test summary
elapsed = time.time() - start_time
summary = {
"timestamp": datetime.now().isoformat(),
"duration": elapsed,
"exit_code": result.returncode,
"command": " ".join(cmd),
"test_types": {
"unit": args.unit,
"integration": args.integration,
"load": args.load,
"hardware": args.hardware,
"performance": args.performance
}
}
# Save summary
summary_file = Path(args.report_dir) / "test_summary.json"
with open(summary_file, "w") as f:
json.dump(summary, f, indent=2)
# Print summary
print("\nTest Summary:")
print("-" * 40)
print(f"Duration: {elapsed:.2f} seconds")
print(f"Exit Code: {result.returncode}")
print(f"Report Directory: {args.report_dir}")
return result.returncode
def setup_reports_dir(base_dir: str) -> str:
"""Setup reports directory with timestamp"""
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
report_dir = Path(base_dir) / timestamp
# Create directories
report_dir.mkdir(parents=True, exist_ok=True)
(report_dir / "coverage").mkdir(exist_ok=True)
(report_dir / "benchmarks").mkdir(exist_ok=True)
return str(report_dir)
def main():
parser = argparse.ArgumentParser(description="Run test suite and generate reports")
# Test selection
parser.add_argument("--unit", action="store_true", help="Run unit tests")
parser.add_argument("--integration", action="store_true", help="Run integration tests")
parser.add_argument("--load", action="store_true", help="Run load tests")
parser.add_argument("--hardware", action="store_true", help="Run hardware simulation tests")
parser.add_argument("--performance", action="store_true", help="Run performance tests")
# Test options
parser.add_argument("--coverage", action="store_true", help="Generate coverage reports")
parser.add_argument("--benchmark", action="store_true", help="Run benchmarks")
parser.add_argument("--report-dir", type=str, default="test_reports",
help="Base directory for test reports")
args = parser.parse_args()
# Setup report directory
args.report_dir = setup_reports_dir(args.report_dir)
# Run tests
exit_code = run_tests(args)
sys.exit(exit_code)
if __name__ == "__main__":
main()