From 713134c2d7a14b8acb43aa437881ceb18f22d1be Mon Sep 17 00:00:00 2001 From: Ikeoluwa Oladele Date: Mon, 24 Mar 2025 12:45:37 -0400 Subject: [PATCH] The tests I failed to include in the last commit --- .../source_credibility_metric_service.py | 17 +-- backend/mainService/test/conftest.py | 4 +- .../mainService/test/test_citation_service.py | 33 ++++-- .../test_source_credibility_metric_service.py | 112 +++++++----------- .../metricsService/tests/utils/test_cache.py | 60 +++++++++- 5 files changed, 132 insertions(+), 94 deletions(-) diff --git a/backend/mainService/src/services/source_credibility_metric_service.py b/backend/mainService/src/services/source_credibility_metric_service.py index 012b0f5..43b7c71 100644 --- a/backend/mainService/src/services/source_credibility_metric_service.py +++ b/backend/mainService/src/services/source_credibility_metric_service.py @@ -46,25 +46,18 @@ def _calculate_source_score(metric: Dict, source: Dict, async def get_credibility_metrics(sources: List[Dict]) -> List[Dict]: """ Call the credibility API to get metrics for sources. - Uses connection pooling and timeout handling for better performance. - - Args: - sources (List[Dict]): List of source metadata - - Returns: - List[Dict]: Credibility metrics for each source + Uses timeout handling for better reliability. """ credibility_metrics_api = os.getenv('CREDIBILITY_API_URL','') if not credibility_metrics_api: logger.error("CREDIBILITY_API_URL is not set") return [] - # Configure timeout and connection settings - timeout = aiohttp.ClientTimeout(total=10) # 10 seconds total timeout - connector = aiohttp.TCPConnector(limit=10) # Limit concurrent connections + # Configure timeout + timeout = aiohttp.ClientTimeout(total=10) try: - async with aiohttp.ClientSession(timeout=timeout, connector=connector) as session: + async with aiohttp.ClientSession(timeout=timeout) as session: async with session.post( credibility_metrics_api, json={'sources': sources}, @@ -81,8 +74,6 @@ async def get_credibility_metrics(sources: List[Dict]) -> List[Dict]: except Exception: logger.exception("Error calling credibility API") return [] - finally: - connector.close() async def calculate_overall_score(credibility_metrics: List[Dict], sources_with_scores: List[Dict], rerank_weight: float = 0.6, credibility_weight: float = 0.4) -> Dict[str, Any]: diff --git a/backend/mainService/test/conftest.py b/backend/mainService/test/conftest.py index 8ecfb2d..ef656ae 100644 --- a/backend/mainService/test/conftest.py +++ b/backend/mainService/test/conftest.py @@ -45,4 +45,6 @@ def mock_scraper(): def mock_playwright_driver(): mock_driver = AsyncMock() mock_driver.quit = AsyncMock() - return mock_driver \ No newline at end of file + return mock_driver + +pytest_plugins = ['pytest_asyncio'] \ No newline at end of file diff --git a/backend/mainService/test/test_citation_service.py b/backend/mainService/test/test_citation_service.py index fe40ea2..1080487 100644 --- a/backend/mainService/test/test_citation_service.py +++ b/backend/mainService/test/test_citation_service.py @@ -186,7 +186,10 @@ async def test_process_citation_auto_success( # Mock credibility metrics mock_get_credibility_metrics.return_value = [{"status": "success", "data": {"title": "Test Source"}}] - mock_calculate_overall_score.return_value = 0.8 + mock_calculate_overall_score.return_value = { + "overall_score": 84.00, + "source_scores": [84.00] + } # Mock document processing mock_doc = MagicMock() @@ -210,7 +213,7 @@ async def test_process_citation_auto_success( assert "overall_score" in result assert "sources" in result assert result["result"] == ["Test Citation"] - assert result["overall_score"] == 0.8 + assert result["overall_score"] == 84.00 assert len(result["sources"]) == 1 @pytest.mark.asyncio @@ -299,7 +302,10 @@ async def test_process_citation_web_success( # Mock credibility metrics mock_get_credibility_metrics.return_value = [{"status": "success", "data": {"title": "Test Source"}}] - mock_calculate_overall_score.return_value = 0.8 + mock_calculate_overall_score.return_value = { + "overall_score": 84.00, + "source_scores": [84.00] + } # Mock document processing mock_doc = MagicMock() @@ -325,7 +331,7 @@ async def test_process_citation_web_success( assert "overall_score" in result assert "sources" in result assert result["result"] == ["Test Citation"] - assert result["overall_score"] == 0.8 + assert result["overall_score"] == 84.00 assert len(result["sources"]) == 1 @pytest.mark.asyncio @@ -407,7 +413,10 @@ async def test_process_citation_source_success( # Mock credibility metrics mock_get_credibility_metrics.return_value = [{"status": "success", "data": {"title": "Test Source"}}] - mock_calculate_overall_score.return_value = 0.8 + mock_calculate_overall_score.return_value = { + "overall_score": 84.00, + "source_scores": [84.00] + } # Mock document processing mock_doc = MagicMock() @@ -432,7 +441,7 @@ async def test_process_citation_source_success( assert "overall_score" in result assert "sources" in result assert result["result"] == ["Test Citation"] - assert result["overall_score"] == 0.8 + assert result["overall_score"] == 84.00 assert len(result["sources"]) == 1 @pytest.mark.asyncio @@ -506,7 +515,10 @@ async def test_process_citation_existing_index( # Mock credibility metrics mock_get_credibility_metrics.return_value = [{"status": "success", "data": {"title": "Test Source"}}] - mock_calculate_overall_score.return_value = 0.8 + mock_calculate_overall_score.return_value = { + "overall_score": 84.00, + "source_scores": [84.00] + } # Mock document processing mock_doc = MagicMock() @@ -530,7 +542,7 @@ async def test_process_citation_existing_index( assert "overall_score" in result assert "sources" in result assert result["result"] == ["Test Citation"] - assert result["overall_score"] == 0.8 + assert result["overall_score"] == 84.00 assert len(result["sources"]) == 1 @pytest.mark.asyncio @@ -638,7 +650,10 @@ async def test_process_citation_mla_style( # Mock credibility metrics mock_get_credibility_metrics.return_value = [{"status": "success", "data": {"title": "Test Source"}}] - mock_calculate_overall_score.return_value = 0.8 + mock_calculate_overall_score.return_value = { + "overall_score": 84.00, + "source_scores": [84.00] + } # Mock document processing mock_doc = MagicMock() diff --git a/backend/mainService/test/test_source_credibility_metric_service.py b/backend/mainService/test/test_source_credibility_metric_service.py index 2ba76e3..5cb8257 100644 --- a/backend/mainService/test/test_source_credibility_metric_service.py +++ b/backend/mainService/test/test_source_credibility_metric_service.py @@ -72,89 +72,69 @@ async def test_get_credibility_metrics_exception(): # Assert assert result == [] -def test_calculate_overall_score_success(): - # Arrange +@pytest.mark.asyncio +async def test_calculate_overall_score_success(): + # Test data credibility_metrics = [ { "status": "success", - "data": { - "credibility_score": 0.85 - } - }, + "data": {"credibility_score": 0.8} + } + ] + sources_with_scores = [ { - "status": "success", - "data": { - "credibility_score": 0.75 - } + "rerank_score": 0.9 } ] - # Act - result = calculate_overall_score(credibility_metrics) + result = await calculate_overall_score(credibility_metrics, sources_with_scores) + assert isinstance(result, dict) + assert "overall_score" in result + assert "source_scores" in result + assert result["overall_score"] == 86.00 # (0.9 * 0.6 + 0.8 * 0.4) * 100 - # Assert - assert result == 0.80 # (0.85 + 0.75) / 2 - -def test_calculate_overall_score_empty(): - # Arrange - credibility_metrics = [] - - # Act - result = calculate_overall_score(credibility_metrics) - - # Assert - assert result == 0.0 +@pytest.mark.asyncio +async def test_calculate_overall_score_empty(): + result = await calculate_overall_score([], []) + assert result["overall_score"] == 0.00 + assert result["source_scores"] == [] -def test_calculate_overall_score_mixed_status(): - # Arrange +@pytest.mark.asyncio +async def test_calculate_overall_score_mixed_status(): credibility_metrics = [ - { - "status": "success", - "data": { - "credibility_score": 0.85 - } - }, - { - "status": "error", - "data": { - "credibility_score": 0.75 - } - } + {"status": "success", "data": {"credibility_score": 0.8}}, + {"status": "failed", "data": {"credibility_score": 0.5}} + ] + sources_with_scores = [ + {"rerank_score": 0.9}, + {"rerank_score": 0.7} ] - # Act - result = calculate_overall_score(credibility_metrics) - - # Assert - assert result == 0.85 # Only considers successful responses + result = await calculate_overall_score(credibility_metrics, sources_with_scores) + print(result) + assert len(result["source_scores"]) == 2 + assert result["source_scores"][0] == 86.00 -def test_calculate_overall_score_missing_data(): - # Arrange +@pytest.mark.asyncio +async def test_calculate_overall_score_missing_data(): credibility_metrics = [ - { - "status": "success" - } + {"status": "success", "data": {}} + ] + sources_with_scores = [ + {"rerank_score": 0.9} ] - # Act - result = calculate_overall_score(credibility_metrics) - - # Assert - assert result == 0.0 + result = await calculate_overall_score(credibility_metrics, sources_with_scores) + assert result["overall_score"] == 0.00 -def test_calculate_overall_score_exception(): - # Arrange +@pytest.mark.asyncio +async def test_calculate_overall_score_exception(): credibility_metrics = [ - { - "status": "success", - "data": { - "credibility_score": "invalid" # Invalid score type - } - } + {"status": "success", "data": None} + ] + sources_with_scores = [ + {"rerank_score": 0.9} ] - # Act - result = calculate_overall_score(credibility_metrics) - - # Assert - assert result == 0.0 \ No newline at end of file + result = await calculate_overall_score(credibility_metrics, sources_with_scores) + assert result["overall_score"] == 0.00 \ No newline at end of file diff --git a/backend/metricsService/tests/utils/test_cache.py b/backend/metricsService/tests/utils/test_cache.py index b52b707..25ffb90 100644 --- a/backend/metricsService/tests/utils/test_cache.py +++ b/backend/metricsService/tests/utils/test_cache.py @@ -5,18 +5,14 @@ @pytest.mark.asyncio async def test_get_cache_miss(): """Test get_cache when cache miss occurs""" - from src.utils.cache import get_cache - # Test with a non-existent key result = await get_cache("non_existent_key") assert result is None @pytest.mark.asyncio -@pytest.mark.skip(reason="Not implemented yet") async def test_set_get_cache(): """Test setting and getting cache values""" - from src.utils.cache import get_cache, set_cache - + # Test data test_key = "test_key" test_value = {"data": "test_value"} @@ -26,4 +22,58 @@ async def test_set_get_cache(): # Get the cached value result = await get_cache(test_key) + # Verify the result + assert result is not None assert result == test_value + assert isinstance(result, dict) + assert result["data"] == "test_value" + +@pytest.mark.asyncio +async def test_set_get_cache_with_expiry(): + """Test setting and getting cache values with expiration""" + test_key = "test_key_expiry" + test_value = {"data": "test_value"} + expiry = 60 # 60 seconds + + # Set the cache value with expiry + await set_cache(test_key, test_value, expiry) + + # Get the cached value + result = await get_cache(test_key) + + # Verify the result + assert result is not None + assert result == test_value + +@pytest.mark.asyncio +async def test_set_cache_invalid_value(): + """Test setting cache with invalid value""" + test_key = "test_key_invalid" + test_value = None + + # Set the cache value + await set_cache(test_key, test_value) + + # Get the cached value + result = await get_cache(test_key) + + # Verify the result + assert result is None + +@pytest.mark.asyncio +async def test_set_get_cache_multiple_values(): + """Test setting and getting multiple cache values""" + test_data = [ + ("key1", {"data": "value1"}), + ("key2", {"data": "value2"}), + ("key3", {"data": "value3"}) + ] + + # Set multiple cache values + for key, value in test_data: + await set_cache(key, value) + + # Get and verify each cached value + for key, expected_value in test_data: + result = await get_cache(key) + assert result == expected_value