-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathconftest.py
More file actions
285 lines (220 loc) · 9.65 KB
/
conftest.py
File metadata and controls
285 lines (220 loc) · 9.65 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
"""
conftest.py
Pytest configuration file that sets up Playwright testing, database connections, and test metadata.
Most report-related logic has been moved to html_reporter/report_handler.py.
"""
import os
from pathlib import Path
import pytest
from _pytest.runner import CallInfo
from playwright.sync_api import Playwright, sync_playwright, Browser, BrowserContext, Page
from html_reporter.report_handler import generate_html_report
from utils.soft_assert import SoftAssertContextManager
# Constants
REPORT_DIR = Path("reports")
REPORT_DIR.mkdir(exist_ok=True)
# Pytest Configuration
def pytest_addoption(parser):
"""Add custom command-line options"""
parser.addoption("--headless", action="store", default="false", help="Run tests in headless mode (true/false)")
parser.addoption("--html-report", action="store", default="reports/test_report.html",
help="Path to HTML report file")
parser.addoption("--report-title", action="store", default="Test Automation Report",
help="Title for the HTML report")
@pytest.hookimpl
def pytest_configure(config):
config.screenshots_amount = 0 # Limit the number of screenshots attached to reports.
os.environ["HEADLESS"] = config.getoption("headless")
# Playwright Fixtures
@pytest.fixture(scope="session")
def playwright_instance() -> Playwright:
"""
Set up the Playwright instance for the test session.
This fixture initializes Playwright and yields the instance.
Returns:
Playwright: A configured Playwright instance with browser engines.
"""
with sync_playwright() as playwright:
# The sync_playwright context manager handles initialization and cleanup
yield playwright
# Playwright is automatically closed after all tests complete
@pytest.fixture(scope="session")
def browser(playwright_instance) -> Browser:
"""
Launch a Chromium browser instance.
The browser stays active for the entire session and closes after tests complete.
Args:
playwright_instance: The Playwright instance from the playwright_instance fixture
Returns:
Browser: A configured Chromium browser instance
Environment Variables:
HEADLESS: When 'true', runs the browser without a visible UI
"""
if os.getenv('HEADLESS', 'false') == 'true' or os.getenv('GITHUB_RUN') is not None:
# Launch in headless mode (no visible browser window)
browser = playwright_instance.chromium.launch(headless=True)
else:
# Launch with visible browser window and maximize it
browser = playwright_instance.chromium.launch(headless=os.getenv('HEADLESS', 'false') == 'true',
args=["--start-maximized"])
yield browser
# Ensure browser is closed after all tests complete
browser.close()
@pytest.fixture(scope="session")
def browser_context(browser) -> BrowserContext:
"""
Create a new browser context for the test module.
Each context has isolated sessions, cookies, and storage to avoid test interference.
Args:
browser: The Browser instance from the browser fixture
Returns:
BrowserContext: An isolated browser context with its own cookies/storage
Environment Variables:
HEADLESS: When 'true', configures viewport dimensions for headless mode
"""
if os.getenv('HEADLESS', 'false') == 'true':
# Fixed viewport size for consistent testing in headless mode
context = browser.new_context(viewport={"width": 1920, "height": 1080}, screen={"width": 1920, "height": 1080})
else:
# Use system's native viewport size (maximized browser)
context = browser.new_context(no_viewport=True)
yield context
# Clean up the context after module tests complete
context.close()
@pytest.fixture(scope="session")
def page(request, browser_context) -> Page:
"""
Create a new page within the browser context for testing.
Args:
request: The pytest request object for test metadata access
browser_context: The BrowserContext instance from the browser_context fixture
Returns:
Page: A new browser page for test automation
Notes:
- Attaches the page to the request node for access in other fixtures/hooks
- Automatically handles logout before closing the page
"""
# Create a new page in the current browser context
page = browser_context.new_page()
# Attach page to pytest request for access in other fixtures/hooks
request.node.page = page
yield page
# Close the page to clean up resources
page.close()
# Pytest Hooks
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call: CallInfo) -> None:
"""
Create detailed test reports with rich metadata for all test phases.
This hook captures test outcomes, screenshots, logs, and exception details for reporting
during setup, call, and teardown phases. The implementation has been refactored to use
the TestResultHandler class for improved maintainability.
Args:
item: The pytest test item being run
call: Information about the test function call
"""
# Import the handler here to avoid circular imports
from html_reporter.result_handler import ResultHandler
# Yield to allow pytest to generate the report first
outcome = yield
report = outcome.get_result()
# Use the handler class to process the test result
handler = ResultHandler(item.config)
handler.process_test_result(item, call, report)
@pytest.hookimpl
def pytest_sessionfinish(session):
"""
Generate final HTML report and clean up resources after all tests finish.
This hook runs after all tests have completed execution to:
1. Clean up orphaned Playwright browser processes
2. Generate a consolidated HTML report from individual test results
3. Remove temporary JSON files after report generation
Args:
session: The pytest session object containing test information
"""
# Force cleanup of any remaining browser processes to prevent resource leaks
import psutil
current_pid = os.getpid()
# Only clean processes related to current worker to avoid affecting other test runs
for proc in psutil.process_iter():
try:
# Check if process is child of current process and is a Playwright browser
if proc.ppid() == current_pid and 'playwright' in proc.name().lower():
proc.kill()
except (psutil.NoSuchProcess, psutil.AccessDenied):
# Skip processes we can't access or that no longer exist
pass
# Skip report generation on worker nodes in distributed testing
if hasattr(session.config, "workerinput"):
return # Skip on worker nodes - only master node generates the report
# Generate the consolidated HTML report from all collected test results
generate_html_report(session, REPORT_DIR)
# Clean up individual test result JSON files after the report is generated
# This happens last to ensure report generation completes successfully
for json_file in REPORT_DIR.glob("*.json"):
json_file.unlink(missing_ok=True)
# Test logging helper
@pytest.fixture
def test_logger(request):
"""
Fixture to add logs to test results that will be included in the final report.
Args:
request: The pytest request object
Returns:
callable: A function that adds messages to the test logs
"""
def _log_message(message: str):
if not hasattr(request.node, "test_logs"):
request.node.test_logs = []
request.node.test_logs.append(message)
return _log_message
@pytest.fixture
def soft_assert(request):
"""
Provides a soft assertion mechanism that collects failures without stopping test execution.
Creates a SoftAssertContextManager and attaches it to the test item for later
access during test result processing. This allows multiple assertions to be checked
within a single test while collecting all failures.
Args:
request: The pytest request object
Returns:
SoftAssertContextManager: Soft assertion context for collecting multiple failures
"""
context = SoftAssertContextManager()
request.node._soft_assert = context # Attach to the pytest item for later access
return context
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_protocol(item, nextitem):
"""
Hook to track the currently running test item throughout the test framework.
Sets a global reference to the current test item that can be accessed
by utilities that don't receive the test item directly.
Args:
item: The current test item being executed
nextitem: The next test item to be executed
"""
pytest.current_item = item
yield
pytest.current_item = None
@pytest.hookimpl(tryfirst=True)
def pytest_configure_node(node):
"""
Logs when a worker node is configured in distributed testing mode.
This provides visibility into test distribution and parallel execution status.
Args:
node: The worker node being configured
"""
node.log.info(f"Worker {node.gateway.id} is configured and starting")
@pytest.hookimpl(tryfirst=True)
def pytest_testnodedown(node, error):
"""
Logs the status of a worker node when it completes testing.
Provides error details if the node failed or a success message if it completed normally.
Args:
node: The worker node that has finished
error: Error information if the node failed, None otherwise
"""
if error:
node.log.error(f"Worker {node.gateway.id} failed: {error}")
else:
node.log.info(f"Worker {node.gateway.id} finished successfully")