From 6d23ac76a264818877f20697e62d1cc84fe2f923 Mon Sep 17 00:00:00 2001 From: Claude Date: Thu, 19 Feb 2026 02:47:51 +0000 Subject: [PATCH 1/4] Fix 685+ linting errors across the codebase - Auto-fix F401 (unused imports), F541 (f-string without placeholders), F841 (unused variables), F811 (redefined names), E712 (True/False comparisons), E713 (not-in membership tests) via ruff --fix - Fix E722 (bare except) by specifying Exception type - Fix E741 (ambiguous variable names) by renaming l/O/I to clear names - Fix E402 (import ordering) with noqa comments where sys.path manipulation requires imports after setup code - Fix F821 (undefined names), F601 (implicit string concat), E721 (type comparison) manually - Work in progress: ~78 errors remaining, mostly E402 in test files https://claude.ai/code/session_01JAbMU7GmooaRYsKsyPRdrc --- demos/demo_batch_dda_processing.py | 13 +- demos/demo_config.py | 1 - demos/demo_intelligent_chat.py | 1 - demos/demo_medical_data_linking.py | 6 +- demos/demo_patient_memory.py | 4 +- demos/demo_pdf_ingestion.py | 16 +- demos/demo_presentation.py | 16 +- demos/interactive_chat.py | 7 +- demos/live_api_demo.py | 2 - demos/migrate_medical_entities_to_neo4j.py | 6 +- demos/multi_agent_dda_demo.py | 4 +- demos/run_full_demo.py | 5 +- scripts/analyze_token_costs.py | 14 +- scripts/batch/enrich_all_domains.py | 4 +- scripts/batch/generate_dda_documents.py | 1 - scripts/batch/process_all_ddas.py | 8 +- scripts/batch/process_all_metadata.py | 6 +- scripts/batch/run_neo4j_queries.py | 3 +- scripts/cleanup_patient_memories.py | 2 +- scripts/dev/explore_graphiti.py | 1 - scripts/ingest_pdfs_for_rag.py | 6 +- scripts/inspection/check_graphs.py | 6 +- scripts/inspection/inspect_falkordb.py | 2 +- scripts/inspection/verify_hybrid_ontology.py | 21 +-- .../inspection/verify_metadata_enhancement.py | 5 +- .../verify_relationship_densification.py | 5 +- scripts/migrate_odin_schema.py | 2 +- scripts/migration/migrate_layers.py | 2 +- scripts/ontology_batch_remediation.py | 2 +- scripts/sync_data_to_postgres.py | 2 +- scripts/test_integration.py | 15 +- .../agents/data_architect/agent.py | 1 - .../agents/data_architect/dda_parser.py | 1 - .../agents/data_architect/domain_modeler.py | 6 +- .../handlers/modeling_feedback_handler.py | 11 +- .../data_architect/modeling_workflow.py | 2 - src/application/agents/data_engineer/agent.py | 1 - .../agents/data_engineer/handlers/build_kg.py | 20 +-- .../data_engineer/metadata_graph_builder.py | 4 +- .../agents/data_engineer/metadata_workflow.py | 2 +- .../agents/data_engineer/type_inference.py | 1 - src/application/agents/echo_agent.py | 1 - .../knowledge_manager/conflict_resolver.py | 2 +- .../agents/knowledge_manager/llm_reasoner.py | 2 +- .../knowledge_manager/ontology_mapper.py | 1 - .../knowledge_manager/reasoning_engine.py | 6 +- .../agents/knowledge_manager/server.py | 6 +- .../knowledge_manager/validation_engine.py | 11 +- src/application/api/crystallization_router.py | 2 - src/application/api/dependencies.py | 8 +- src/application/api/document_router.py | 6 +- src/application/api/evaluation_auth.py | 4 +- src/application/api/evaluation_router.py | 11 +- src/application/api/kg_router.py | 6 +- src/application/api/main.py | 34 ++--- src/application/api/remediation_router.py | 2 +- .../commands/knowledge_commands.py | 2 +- .../commands/knowledge_handlers.py | 3 +- src/application/jobs/promotion_scanner.py | 2 +- src/application/knowledge_management.py | 1 - src/application/rules/medical_rules.py | 3 +- .../services/automatic_layer_transition.py | 8 +- .../services/chat_history_service.py | 5 +- .../services/chunk_impact_analyzer.py | 6 +- .../services/confidence_framework.py | 5 +- .../services/conversation_graph.py | 6 - .../services/conversation_nodes.py | 27 ++-- .../services/conversational_intent_service.py | 2 +- .../services/cross_graph_query_builder.py | 2 +- .../services/crystallization_service.py | 8 - src/application/services/dikw_router.py | 7 +- .../services/document_quality_service.py | 3 - src/application/services/document_service.py | 5 +- src/application/services/document_tracker.py | 2 +- src/application/services/entity_extractor.py | 4 +- src/application/services/entity_resolver.py | 1 - .../services/episodic_memory_service.py | 3 +- .../services/extended_kg_audit_service.py | 3 +- .../services/extraction_audit_service.py | 8 +- .../services/feedback_integrator.py | 2 +- src/application/services/feedback_tracer.py | 2 +- .../services/hypergraph_bridge_service.py | 13 +- .../services/intelligent_chat_service.py | 4 +- .../services/knowledge_enricher.py | 2 - .../services/langgraph_chat_service.py | 2 - .../services/medical_data_linker.py | 2 +- .../services/memory_context_builder.py | 4 +- .../services/neo4j_pdf_ingestion.py | 11 +- .../services/neurosymbolic_query_service.py | 13 +- .../services/ontology_cleanup_service.py | 2 +- .../services/ontology_quality_service.py | 3 +- .../services/patient_memory_service.py | 4 +- .../services/pdf_ingestion_service.py | 1 - src/application/services/promotion_gate.py | 2 +- .../services/quality_scanner_job.py | 2 +- src/application/services/rag_service.py | 2 +- .../services/response_modulator.py | 7 +- .../services/rlhf_data_extractor.py | 4 +- .../services/semantic_grounding.py | 4 +- .../services/simple_pdf_ingestion.py | 1 - src/application/services/temporal_scoring.py | 2 +- src/composition_root.py | 72 ++++----- src/domain/confidence_models.py | 10 +- src/domain/goal_templates.py | 2 +- src/domain/hypergraph_models.py | 2 +- src/domain/knowledge_layers.py | 2 +- src/domain/medical_rules_models.py | 2 +- src/domain/metadata/workflow.py | 2 +- src/domain/ontologies/registry.py | 7 +- src/domain/ontology_quality_models.py | 2 +- src/domain/query_intent_models.py | 2 +- src/domain/temporal_models.py | 4 +- .../architecture_graph_writer.py | 2 +- src/infrastructure/database/config.py | 1 - src/infrastructure/database/models.py | 3 +- src/infrastructure/database/repositories.py | 9 +- src/infrastructure/graphiti_backend.py | 2 +- src/infrastructure/neo4j_backend.py | 3 +- src/infrastructure/parsers/markdown_parser.py | 8 +- src/infrastructure/redis_session_cache.py | 2 +- src/interfaces/cli.py | 34 ++--- src/interfaces/kg_operations_api.py | 8 +- src/multi_agent_system.egg-info/SOURCES.txt | 143 ++++++++++++++++-- src/services/governance_engine.py | 3 +- src/services/metadata_enrichment.py | 2 - .../test_automatic_layer_transition.py | 6 +- .../application/test_confidence_framework.py | 5 +- ...test_data_engineer_metadata_integration.py | 3 +- .../application/test_deduplication_service.py | 2 - .../test_document_quality_service.py | 2 - tests/application/test_entity_resolver.py | 4 +- tests/application/test_extended_kg_audit.py | 4 +- tests/application/test_feedback_tracer.py | 7 +- tests/application/test_hypergraph_bridge.py | 4 +- .../test_knowledge_manager_agent.py | 4 - tests/application/test_layer_transition.py | 2 +- .../test_neurosymbolic_query_service.py | 5 +- .../test_ontology_quality_service.py | 4 +- tests/application/test_quality_models.py | 7 +- tests/application/test_quality_scanner_job.py | 2 +- tests/application/test_remediation_router.py | 3 +- tests/application/test_remediation_service.py | 2 +- tests/application/test_semantic_grounding.py | 2 +- tests/application/test_semantic_normalizer.py | 2 +- tests/application/test_type_inference.py | 4 +- tests/conftest.py | 2 - tests/domain/metadata/conftest.py | 3 +- tests/domain/metadata/test_database.py | 1 - tests/domain/metadata/test_integration.py | 7 +- .../domain/metadata/test_metadata_objects.py | 1 - tests/domain/metadata/test_relationships.py | 2 +- tests/domain/metadata/test_workflow.py | 2 +- .../ontologies/test_ontology_registry.py | 1 - tests/domain/test_canonical_concepts.py | 3 +- tests/eval/conftest.py | 2 +- tests/eval/pytest_plugin.py | 8 +- tests/eval/runner/evaluators/deterministic.py | 6 +- tests/eval/runner/evaluators/llm_judge.py | 7 +- .../runner/evaluators/test_deterministic.py | 16 -- .../eval/runner/evaluators/test_llm_judge.py | 3 - tests/eval/runner/memory_inspector.py | 3 - tests/eval/runner/models.py | 2 +- tests/eval/runner/reporting.py | 6 +- tests/eval/runner/scenario_loader.py | 5 +- tests/eval/runner/scenario_models.py | 3 +- tests/eval/runner/scenario_orchestrator.py | 6 +- tests/eval/runner/test_memory_inspector.py | 10 +- tests/eval/runner/test_orchestrator_unit.py | 14 +- tests/eval/runner/test_scenario_loader.py | 11 -- .../infrastructure/test_config_validation.py | 2 - tests/integration/test_crystallization_api.py | 2 +- .../test_crystallization_integration.py | 7 +- .../integration/test_modeling_integration.py | 8 +- .../test_patient_memory_integration.py | 6 +- tests/integration/test_phase3_integration.py | 3 +- .../integration/test_quality_api_endpoints.py | 3 +- tests/interfaces/test_agent_servers.py | 2 - tests/manual/test_direct_writer.py | 11 +- tests/manual/test_e2e_flow.py | 10 +- tests/manual/test_falkor_connection.py | 1 - tests/manual/test_falkor_query.py | 1 - tests/manual/test_falkor_simple.py | 4 +- tests/manual/test_metadata_debug.py | 2 +- .../performance/test_modeling_performance.py | 7 - tests/test_automatic_promotion.py | 3 +- tests/test_chat_history_service.py | 3 +- tests/test_conversation_graph.py | 6 +- tests/test_conversational_layer.py | 9 +- tests/test_crystallization_pipeline.py | 5 +- tests/test_dikw_router.py | 1 - tests/test_evaluation_framework.py | 3 - tests/test_event_bus.py | 1 - tests/test_generate_data_map.py | 4 +- tests/test_improved_reasoning.py | 8 +- tests/test_in_memory_backend.py | 1 - tests/test_intelligent_chat_integration.py | 2 +- tests/test_kg_operations_api.py | 3 - tests/test_knowledge_manager.py | 1 - tests/test_medical_rules.py | 2 - tests/test_neo4j_ingestion.py | 2 +- tests/test_ontology_mapping.py | 1 - tests/test_phase2a_infrastructure.py | 6 +- tests/test_rabbitmq_event_bus.py | 2 - tests/test_reasoning_demo.py | 2 +- tests/test_shacl_validation.py | 6 +- tests/test_temporal_scoring.py | 2 - 206 files changed, 524 insertions(+), 687 deletions(-) diff --git a/demos/demo_batch_dda_processing.py b/demos/demo_batch_dda_processing.py index e9a056b1a..fca73349d 100755 --- a/demos/demo_batch_dda_processing.py +++ b/demos/demo_batch_dda_processing.py @@ -32,7 +32,6 @@ from dotenv import load_dotenv from composition_root import ( - bootstrap_command_bus, bootstrap_knowledge_management, create_modeling_command_handler, create_generate_metadata_command_handler @@ -125,14 +124,14 @@ def print_summary_statistics(results: List[Dict[str, Any]]): metadata_created = sum(1 for r in successful if r.get("metadata_created", False)) total_time = sum(r.get("processing_time_seconds", 0) for r in successful) - print(f"\n Knowledge Graph Created:") + print("\n Knowledge Graph Created:") print(f" Architecture Graphs: {architecture_created}") print(f" Metadata Graphs: {metadata_created}") print(f" Total Entities: {total_entities:,}") print(f" Total Relationships: {total_relationships:,}") print(f" Business Concepts Extracted: {total_business_concepts:,}") - print(f"\n Performance:") + print("\n Performance:") print(f" Total Time: {total_time:.2f}s") print(f" Avg Time per DDA: {total_time / len(successful):.2f}s") @@ -142,7 +141,7 @@ def print_summary_statistics(results: List[Dict[str, Any]]): print(f" Avg Relationships per DDA: {total_relationships / len(successful):.1f}") if failed: - print(f"\n Failed DDAs:") + print("\n Failed DDAs:") for result in failed: print(f" - {result['dda']}: {result['error']}") @@ -276,9 +275,9 @@ async def main(): if not args.auto_confirm: print_section("Confirmation") print(f"\n This will process {len(dda_files)} DDAs through the neurosymbolic pipeline:") - print(f" 1. Data Architect: Create architecture graphs") - print(f" 2. Data Engineer: Generate metadata with LLM enrichment") - print(f" 3. Knowledge Graph: Populate with multi-layer entities") + print(" 1. Data Architect: Create architecture graphs") + print(" 2. Data Engineer: Generate metadata with LLM enrichment") + print(" 3. Knowledge Graph: Populate with multi-layer entities") confirm = input("\n Proceed? (y/n): ").strip().lower() if confirm != 'y': diff --git a/demos/demo_config.py b/demos/demo_config.py index da743e92a..899a99175 100644 --- a/demos/demo_config.py +++ b/demos/demo_config.py @@ -2,7 +2,6 @@ # Customize the SynapseFlow Multi-Agent DDA Demo import os -from typing import Dict, Any class DemoConfig: """Configuration for the SynapseFlow Multi-Agent DDA Demo.""" diff --git a/demos/demo_intelligent_chat.py b/demos/demo_intelligent_chat.py index a4b299c70..31d66231f 100644 --- a/demos/demo_intelligent_chat.py +++ b/demos/demo_intelligent_chat.py @@ -25,7 +25,6 @@ import sys import asyncio from pathlib import Path -from datetime import datetime import logging # Add src to path diff --git a/demos/demo_medical_data_linking.py b/demos/demo_medical_data_linking.py index 857512678..19db3258a 100755 --- a/demos/demo_medical_data_linking.py +++ b/demos/demo_medical_data_linking.py @@ -61,7 +61,7 @@ def print_linking_results(result): print(f" ✗ Skipped: {result.skipped_count}") if result.links_created > 0: - print(f"\n Sample Links (first 10):") + print("\n Sample Links (first 10):") for i, link in enumerate(result.links[:10], 1): print(f"\n {i}. {link.medical_entity_name} ({link.medical_entity_type})") print(f" → {link.data_entity_name} ({link.data_entity_type})") @@ -120,8 +120,8 @@ async def main(): print_section("Configuration") print(f"\n Confidence Threshold: {args.confidence_threshold}") - print(f" Neo4j: bolt://localhost:7687") - print(f" Backend: Unified Neo4j (medical KG + DDA metadata)") + print(" Neo4j: bolt://localhost:7687") + print(" Backend: Unified Neo4j (medical KG + DDA metadata)") # Confirm execution if not args.auto_confirm: diff --git a/demos/demo_patient_memory.py b/demos/demo_patient_memory.py index 17c2a8896..4d985fd1c 100755 --- a/demos/demo_patient_memory.py +++ b/demos/demo_patient_memory.py @@ -219,7 +219,7 @@ async def main(): print("\n⚡ Checking Redis session cache...") session_data = await redis.get_session(session_id) if session_data: - print(f" ✅ Session active") + print(" ✅ Session active") print(f" - Device: {session_data.get('device')}") print(f" - Conversation count: {session_data.get('conversation_count')}") print(f" - Last activity: {session_data.get('last_activity')}") @@ -227,7 +227,7 @@ async def main(): # Check patient context print("\n🩺 Checking complete patient context...") context = await memory_service.get_patient_context(patient_id) - print(f" ✅ Patient context retrieved:") + print(" ✅ Patient context retrieved:") print(f" - Diagnoses: {len(context.diagnoses)}") for dx in context.diagnoses: print(f" • {dx['condition']} ({dx.get('icd10_code', 'N/A')})") diff --git a/demos/demo_pdf_ingestion.py b/demos/demo_pdf_ingestion.py index faa1bea96..8e52ea9d4 100644 --- a/demos/demo_pdf_ingestion.py +++ b/demos/demo_pdf_ingestion.py @@ -112,12 +112,12 @@ def print_summary_statistics(results: list): total_time = sum(r.get("total_time_seconds", 0) for r in successful) total_words = sum(r.get("markdown_words", 0) for r in successful) - print(f"\n Knowledge Extracted:") + print("\n Knowledge Extracted:") print(f" Entities: {total_entities:,}") print(f" Relationships: {total_relationships:,}") print(f" Total Words Processed: {total_words:,}") - print(f"\n Performance:") + print("\n Performance:") print(f" Total Time: {total_time:.2f}s") print(f" Avg Time per Document: {total_time / len(successful):.2f}s") @@ -127,7 +127,7 @@ def print_summary_statistics(results: list): print(f" Avg Relationships per Document: {total_relationships / len(successful):.1f}") if failed: - print(f"\n Failed Documents:") + print("\n Failed Documents:") for result in failed: print(f" - {result['document']}: {result['error']}") @@ -175,14 +175,14 @@ async def main(): return 1 print_header("PDF Knowledge Ingestion Demo") - print(f"\nConfiguration:") + print("\nConfiguration:") print(f" PDF Directory: {args.pdf_dir}") print(f" Max Documents: {args.max_docs or 'All'}") print(f" Save Markdown: {args.save_markdown}") if args.save_markdown: print(f" Markdown Directory: {args.markdown_dir}") - print(f" Graph Database: FalkorDB (localhost:6379)") - print(f" Graph Name: medical_knowledge") + print(" Graph Database: FalkorDB (localhost:6379)") + print(" Graph Name: medical_knowledge") # Initialize service print_section("Initializing Service") @@ -280,8 +280,8 @@ async def main(): print_summary_statistics(results) print_header("Ingestion Complete") - print(f"\nKnowledge graph 'medical_knowledge' updated in FalkorDB") - print(f"View at: http://localhost:3000") + print("\nKnowledge graph 'medical_knowledge' updated in FalkorDB") + print("View at: http://localhost:3000") return 0 diff --git a/demos/demo_presentation.py b/demos/demo_presentation.py index 2c3d18edf..d6cbf4144 100644 --- a/demos/demo_presentation.py +++ b/demos/demo_presentation.py @@ -13,12 +13,8 @@ Usage: python demo_presentation.py """ -import asyncio -import json import time -import subprocess import sys -import os from pathlib import Path # Add src to path for imports @@ -26,8 +22,6 @@ from infrastructure.in_memory_backend import InMemoryGraphBackend from application.event_bus import EventBus -from domain.event import KnowledgeEvent -from domain.roles import Role from interfaces.kg_operations_api import app, initialize_api from fastapi.testclient import TestClient @@ -134,7 +128,7 @@ def demo_1_basic_operations(self): if response.status_code == 200: result = response.json() - print(f"✅ Query executed successfully") + print("✅ Query executed successfully") print(f" Results: {result['result_count']} items found") print(f" Execution time: {result['execution_time']:.3f}s") else: @@ -149,7 +143,7 @@ def demo_2_event_driven_architecture(self): print("✅ Event bus initialized successfully") print(f" Type: {type(self.event_bus).__name__}") - print(f" Status: Operational") + print(" Status: Operational") # Step 2: Simulate event publishing self.print_step(2, "Simulating Event Publishing") @@ -218,7 +212,7 @@ def demo_3_batch_operations(self): if response.status_code == 200: result = response.json() - print(f"✅ Batch executed successfully") + print("✅ Batch executed successfully") print(f" Total operations: {result['total_operations']}") print(f" Successful: {result['successful']}") print(f" Failed: {result['failed']}") @@ -247,7 +241,7 @@ def demo_4_api_functionality(self): response = self.api_client.get("/stats") if response.status_code == 200: result = response.json() - print(f"✅ Statistics retrieved") + print("✅ Statistics retrieved") print(f" Entities: {result['entity_count']}") print(f" Relationships: {result['relationship_count']}") print(f" Total nodes: {result['total_nodes']}") @@ -298,7 +292,7 @@ def demo_5_advanced_features(self): response = self.api_client.post("/query", json=complex_query) if response.status_code == 200: result = response.json() - print(f"✅ Complex query executed") + print("✅ Complex query executed") print(f" Results: {result['result_count']} items") print(f" Execution time: {result['execution_time']:.3f}s") else: diff --git a/demos/interactive_chat.py b/demos/interactive_chat.py index 531b8aa74..fe7d2b36c 100755 --- a/demos/interactive_chat.py +++ b/demos/interactive_chat.py @@ -10,7 +10,6 @@ """ import sys -import os from pathlib import Path from typing import List, Dict, Any import re @@ -35,7 +34,7 @@ def __init__(self, graph_name: str = "medical_knowledge"): self.graph_name = graph_name print(f"\n{'='*70}") - print(f" Medical Knowledge Graph - Interactive Chat") + print(" Medical Knowledge Graph - Interactive Chat") print(f"{'='*70}\n") print(f"Connected to graph: {graph_name}") @@ -53,12 +52,12 @@ def _print_statistics(self): 'MATCH (n) RETURN DISTINCT n.type as type, count(*) as count ORDER BY count DESC LIMIT 5' ) - print(f"\nGraph Statistics:") + print("\nGraph Statistics:") print(f" Total Entities: {node_count:,}") print(f" Total Relationships: {edge_count:,}") if type_result.result_set: - print(f"\n Top Entity Types:") + print("\n Top Entity Types:") for row in type_result.result_set[:5]: entity_type = row[0] if row[0] else "Unknown" count = row[1] diff --git a/demos/live_api_demo.py b/demos/live_api_demo.py index c8396f961..5716c55ec 100644 --- a/demos/live_api_demo.py +++ b/demos/live_api_demo.py @@ -10,10 +10,8 @@ """ import requests -import json import time import sys -from typing import Dict, Any class LiveAPIDemo: diff --git a/demos/migrate_medical_entities_to_neo4j.py b/demos/migrate_medical_entities_to_neo4j.py index 225bacc71..a151480d5 100755 --- a/demos/migrate_medical_entities_to_neo4j.py +++ b/demos/migrate_medical_entities_to_neo4j.py @@ -10,11 +10,9 @@ """ import sys -import asyncio from pathlib import Path from datetime import datetime import logging -from typing import List, Dict, Any # Add src to path sys.path.insert(0, str(Path(__file__).parent.parent / "src")) @@ -208,7 +206,7 @@ def migrate_entities_and_relationships(): result = session.run("MATCH (n) WHERE n:Table OR n:Column RETURN count(n) as count") data_count = result.single()["count"] - print(f"\n Neo4j Status:") + print("\n Neo4j Status:") print(f" Medical Entities: {medical_count}") print(f" Medical Relationships: {rel_count}") print(f" Data Entities (DDAs): {data_count}") @@ -223,4 +221,4 @@ def migrate_entities_and_relationships(): if __name__ == "__main__": entities, relationships = migrate_entities_and_relationships() print(f"\n✅ Successfully migrated {entities} entities and {relationships} relationships") - print(f"✅ Neo4j is now the unified backend for medical KG + DDA metadata") + print("✅ Neo4j is now the unified backend for medical KG + DDA metadata") diff --git a/demos/multi_agent_dda_demo.py b/demos/multi_agent_dda_demo.py index 9be8a79eb..6181b743d 100644 --- a/demos/multi_agent_dda_demo.py +++ b/demos/multi_agent_dda_demo.py @@ -14,10 +14,8 @@ """ import asyncio -import json import time import sys -import os from pathlib import Path from typing import Dict, Any, List, Optional @@ -657,7 +655,7 @@ async def event_handler(event): print("✅ Event bus initialized successfully") print(f" Type: {type(self.event_bus).__name__}") - print(f" Status: Operational") + print(" Status: Operational") print(" - Asynchronous event processing") print(" - Role-based access control") print(" - Event validation and routing") diff --git a/demos/run_full_demo.py b/demos/run_full_demo.py index 6eda49e65..759336c0d 100644 --- a/demos/run_full_demo.py +++ b/demos/run_full_demo.py @@ -3,7 +3,6 @@ """Run full demo with Advanced Knowledge Representation features.""" import asyncio -import os from pathlib import Path from dotenv import load_dotenv from unittest.mock import MagicMock, AsyncMock @@ -14,7 +13,7 @@ # Mock dependencies if not available try: - import pyshacl + import pyshacl # noqa: F401 except ImportError: sys.modules["pyshacl"] = MagicMock() sys.modules["rdflib"] = MagicMock() @@ -23,7 +22,6 @@ async def run_demo(): print("🚀 Starting Advanced Knowledge Graph Demo...") # 1. Bootstrap Components - from src.composition_root import bootstrap_knowledge_management from src.application.commands.metadata_command import GenerateMetadataCommand from src.infrastructure.in_memory_backend import InMemoryGraphBackend @@ -36,7 +34,6 @@ async def run_demo(): # Force InMemory Backend for demo to ensure it runs kg_backend = InMemoryGraphBackend() - event_bus = None # Not needed for this demo script flow # We need a Graphiti instance for the enricher # We can mock it or use a real one if configured diff --git a/scripts/analyze_token_costs.py b/scripts/analyze_token_costs.py index ce1ede2ca..69c03cef0 100644 --- a/scripts/analyze_token_costs.py +++ b/scripts/analyze_token_costs.py @@ -24,7 +24,7 @@ # For all 18 PDFs print("\n For 18 PDFs:") -print(f" → Total: 18 * $0.00087 = $0.016 (one-time cost)") +print(" → Total: 18 * $0.00087 = $0.016 (one-time cost)") # Chat Queries print("\n2. Chat Query (per question):") @@ -39,16 +39,16 @@ # High volume scenarios print("\n3. Scale Analysis:") print("\n 1,000 queries/day:") -print(f" Daily: 1,000 * $0.00072 = $0.72") -print(f" Monthly: $0.72 * 30 = $21.60") +print(" Daily: 1,000 * $0.00072 = $0.72") +print(" Monthly: $0.72 * 30 = $21.60") print("\n 10,000 queries/day:") -print(f" Daily: 10,000 * $0.00072 = $7.20") -print(f" Monthly: $7.20 * 30 = $216") +print(" Daily: 10,000 * $0.00072 = $7.20") +print(" Monthly: $7.20 * 30 = $216") print("\n 100,000 queries/day:") -print(f" Daily: 100,000 * $0.00072 = $72") -print(f" Monthly: $72 * 30 = $2,160") +print(" Daily: 100,000 * $0.00072 = $72") +print(" Monthly: $72 * 30 = $2,160") # Breakdown by operation print("\n4. Cost Breakdown by Operation:") diff --git a/scripts/batch/enrich_all_domains.py b/scripts/batch/enrich_all_domains.py index 129dbe706..d432f695f 100644 --- a/scripts/batch/enrich_all_domains.py +++ b/scripts/batch/enrich_all_domains.py @@ -63,7 +63,7 @@ async def enrich_all_domains(): if result["success"]: successful += 1 - print(f" ✅ Success!") + print(" ✅ Success!") results.append({ "file": dda_file.name, "domain": domain_name, @@ -100,7 +100,7 @@ async def enrich_all_domains(): # Show failed files if any if failed > 0: - print(f"\n⚠️ Failed files:") + print("\n⚠️ Failed files:") for r in results: if r['status'] != 'success': print(f" - {r['file']}: {r.get('error', 'Unknown error')}") diff --git a/scripts/batch/generate_dda_documents.py b/scripts/batch/generate_dda_documents.py index 82e9ec904..94c803df2 100644 --- a/scripts/batch/generate_dda_documents.py +++ b/scripts/batch/generate_dda_documents.py @@ -5,7 +5,6 @@ """ import os -from datetime import datetime # Define the DDA domains and their specific content DDA_DOMAINS = [ diff --git a/scripts/batch/process_all_ddas.py b/scripts/batch/process_all_ddas.py index 977110efa..ff0900056 100644 --- a/scripts/batch/process_all_ddas.py +++ b/scripts/batch/process_all_ddas.py @@ -18,7 +18,7 @@ async def process_all_ddas(): from src.composition_root import bootstrap_graphiti, create_modeling_command_handler from src.application.commands.modeling_command import ModelingCommand print("🔧 Initializing Graphiti...") - graph = await bootstrap_graphiti("batch-modeling") + await bootstrap_graphiti("batch-modeling") # Create modeling handler with Neo4j credentials (architecture graph writer) print("🔧 Creating modeling handler with Neo4j credentials...\n") @@ -61,7 +61,7 @@ async def process_all_ddas(): if result["success"]: successful += 1 graph_doc = result['graph_document'] - print(f" ✅ Success!") + print(" ✅ Success!") print(f" Entities: {graph_doc.get('entities_count', 0)}") print(f" Nodes: {graph_doc.get('nodes_created', 0)}") print(f" Edges: {graph_doc.get('edges_created', 0)}") @@ -110,14 +110,14 @@ async def process_all_ddas(): total_nodes = sum(r.get('nodes', 0) for r in results if r['status'] == 'success') total_edges = sum(r.get('edges', 0) for r in results if r['status'] == 'success') - print(f"\n📈 Total Graph Statistics:") + print("\n📈 Total Graph Statistics:") print(f" Entities defined: {total_entities}") print(f" Nodes created: {total_nodes}") print(f" Edges created: {total_edges}") # Show failed files if any if failed > 0: - print(f"\n⚠️ Failed files:") + print("\n⚠️ Failed files:") for r in results: if r['status'] != 'success': print(f" - {r['file']}: {r.get('error', 'Unknown error')}") diff --git a/scripts/batch/process_all_metadata.py b/scripts/batch/process_all_metadata.py index ee171df4e..b7f4d1817 100644 --- a/scripts/batch/process_all_metadata.py +++ b/scripts/batch/process_all_metadata.py @@ -73,7 +73,7 @@ async def process_all_metadata(): if result["success"]: successful += 1 metadata_summary = result['metadata_graph'] - print(f" ✅ Success!") + print(" ✅ Success!") print(f" Tables: {metadata_summary.get('tables_created', 0)}") print(f" Columns: {metadata_summary.get('columns_created', 0)}") @@ -118,13 +118,13 @@ async def process_all_metadata(): total_tables = sum(r.get('tables', 0) for r in results if r['status'] == 'success') total_columns = sum(r.get('columns', 0) for r in results if r['status'] == 'success') - print(f"\n📈 Total Metadata Statistics:") + print("\n📈 Total Metadata Statistics:") print(f" Tables created: {total_tables}") print(f" Columns created: {total_columns}") # Show failed files if any if failed > 0: - print(f"\n⚠️ Failed files:") + print("\n⚠️ Failed files:") for r in results: if r['status'] != 'success': print(f" - {r['file']}: {r.get('error', 'Unknown error')}") diff --git a/scripts/batch/run_neo4j_queries.py b/scripts/batch/run_neo4j_queries.py index 1cbb48eae..8935d5345 100644 --- a/scripts/batch/run_neo4j_queries.py +++ b/scripts/batch/run_neo4j_queries.py @@ -7,9 +7,8 @@ """ import asyncio -import os from pathlib import Path -from typing import Dict, Any, List +from typing import Dict, Any # Add src to path import sys diff --git a/scripts/cleanup_patient_memories.py b/scripts/cleanup_patient_memories.py index 463b4cc30..a812e6ac3 100644 --- a/scripts/cleanup_patient_memories.py +++ b/scripts/cleanup_patient_memories.py @@ -71,7 +71,7 @@ def show_status(): shared_collections.append((name, count)) print(f" ⚠️ SHARED: {name}") print(f" Points: {count}") - print(f" WARNING: This is the old shared collection that can leak data!") + print(" WARNING: This is the old shared collection that can leak data!") elif name.startswith("patient_mem_"): patient_collections.append((name, count)) patient_id = name.replace("patient_mem_", "").replace("_", ":") diff --git a/scripts/dev/explore_graphiti.py b/scripts/dev/explore_graphiti.py index 1d146742f..632c66d6c 100644 --- a/scripts/dev/explore_graphiti.py +++ b/scripts/dev/explore_graphiti.py @@ -1,6 +1,5 @@ import asyncio -import os from graphiti_core import Graphiti async def explore_graphiti(): diff --git a/scripts/ingest_pdfs_for_rag.py b/scripts/ingest_pdfs_for_rag.py index 020965d61..463ba1715 100644 --- a/scripts/ingest_pdfs_for_rag.py +++ b/scripts/ingest_pdfs_for_rag.py @@ -122,7 +122,7 @@ async def main(): metadata={"category": pdf_path.parent.name} ) - print(f" ✓ Success!") + print(" ✓ Success!") print(f" - Document ID: {doc.id}") print(f" - Chunks created: {doc.chunk_count}") successful += 1 @@ -138,13 +138,13 @@ async def main(): print(f" Failed: {failed}/{len(pdfs)}") # Show FAISS index stats - print(f"\n📊 FAISS Index Statistics:") + print("\n📊 FAISS Index Statistics:") print(f" - Total chunks: {len(doc_service._chunk_store)}") print(f" - Index path: {doc_service.faiss_index_path}.index") # Test search if doc_service._faiss_index is not None: - print(f"\n🔍 Testing search...") + print("\n🔍 Testing search...") try: results = await doc_service.search_similar("Crohn's disease", top_k=3) print(f" ✓ Search working! Found {len(results)} results") diff --git a/scripts/inspection/check_graphs.py b/scripts/inspection/check_graphs.py index 080823e99..ece825624 100644 --- a/scripts/inspection/check_graphs.py +++ b/scripts/inspection/check_graphs.py @@ -29,7 +29,7 @@ async def check_neo4j(): # Get sample entities result = session.run("MATCH (n:Entity) RETURN n.name as name LIMIT 5") - print(f"\n📝 Sample Entities:") + print("\n📝 Sample Entities:") for record in result: print(f" - {record['name']}") @@ -52,14 +52,14 @@ def check_falkordb(): # Count by label result = graph.query("MATCH (n) RETURN labels(n)[0] as label, count(n) as count") if result.result_set: - print(f"\n📝 Nodes by Label:") + print("\n📝 Nodes by Label:") for row in result.result_set: print(f" - {row[0]}: {row[1]}") # Sample nodes result = graph.query("MATCH (n) RETURN n LIMIT 5") if result.result_set: - print(f"\n🔍 Sample Nodes:") + print("\n🔍 Sample Nodes:") for row in result.result_set: node = row[0] print(f" - {node}") diff --git a/scripts/inspection/inspect_falkordb.py b/scripts/inspection/inspect_falkordb.py index e3b46bcf1..796c3f1c8 100644 --- a/scripts/inspection/inspect_falkordb.py +++ b/scripts/inspection/inspect_falkordb.py @@ -12,7 +12,7 @@ # Show labels result = graph.query("MATCH (n) RETURN labels(n)[0] as label, count(n) as cnt") -print(f"\nNode labels:") +print("\nNode labels:") for row in result.result_set: print(f" {row[0]}: {row[1]}") diff --git a/scripts/inspection/verify_hybrid_ontology.py b/scripts/inspection/verify_hybrid_ontology.py index c1716de69..266a241f4 100644 --- a/scripts/inspection/verify_hybrid_ontology.py +++ b/scripts/inspection/verify_hybrid_ontology.py @@ -3,7 +3,6 @@ import sys import os from unittest.mock import MagicMock, AsyncMock -from datetime import datetime sys.path.append(os.getcwd()) @@ -13,18 +12,14 @@ # We need pyshacl for validation, but if it's not installed, the engine skips it. # We'll assume it's installed or handled gracefully. -from src.application.agents.data_engineer.metadata_workflow import MetadataGenerationWorkflow -from src.application.agents.data_engineer.metadata_graph_builder import MetadataGraphBuilder -from src.application.agents.data_architect.dda_parser import DDAParserFactory -from src.application.agents.knowledge_manager.reasoning_engine import ReasoningEngine -from src.application.agents.knowledge_manager.validation_engine import ValidationEngine -from src.application.services.knowledge_enricher import KnowledgeEnricher -from src.infrastructure.in_memory_backend import InMemoryGraphBackend -from domain.dda_models import DDADocument, DataEntity -from domain.ontologies.odin import ODIN -from domain.ontologies.schema_org import SCHEMA -from domain.event import KnowledgeEvent -from domain.roles import Role +from src.application.agents.knowledge_manager.reasoning_engine import ReasoningEngine # noqa: E402 +from src.application.agents.knowledge_manager.validation_engine import ValidationEngine # noqa: E402 +from src.application.services.knowledge_enricher import KnowledgeEnricher # noqa: E402 +from src.infrastructure.in_memory_backend import InMemoryGraphBackend # noqa: E402 +from domain.ontologies.odin import ODIN # noqa: E402 +from domain.ontologies.schema_org import SCHEMA # noqa: E402 +from domain.event import KnowledgeEvent # noqa: E402 +from domain.roles import Role # noqa: E402 async def verify_hybrid_ontology(): print("🚀 Verifying Advanced Knowledge Representation...") diff --git a/scripts/inspection/verify_metadata_enhancement.py b/scripts/inspection/verify_metadata_enhancement.py index 425e9a275..08f327712 100644 --- a/scripts/inspection/verify_metadata_enhancement.py +++ b/scripts/inspection/verify_metadata_enhancement.py @@ -1,13 +1,10 @@ """Verification script for Metadata Model Enhancements.""" import asyncio -import os from datetime import datetime from infrastructure.in_memory_backend import InMemoryGraphBackend from application.agents.data_engineer.metadata_graph_builder import MetadataGraphBuilder -from application.agents.data_engineer.type_inference import TypeInferenceService -from infrastructure.graphiti import get_graphiti from domain.dda_models import DDADocument, DataEntity # Mock DDA Document @@ -40,7 +37,7 @@ async def verify_enhancements(): # We'll mock the type inference service to avoid needing real LLM/Graphiti here class MockTypeInference: async def infer_data_type(self, *args): - from domain.odin_models import DataTypeEntity, DataType + from domain.odin_models import DataTypeEntity return DataTypeEntity(name="VARCHAR", base_type="STRING") async def infer_precision(self, *args): return 50 async def infer_scale(self, *args): return None diff --git a/scripts/inspection/verify_relationship_densification.py b/scripts/inspection/verify_relationship_densification.py index 3a6f9d266..b441a443e 100644 --- a/scripts/inspection/verify_relationship_densification.py +++ b/scripts/inspection/verify_relationship_densification.py @@ -1,16 +1,13 @@ """Verification script for Relationship Densification.""" import asyncio -import os from datetime import datetime -from typing import Dict, Any, List from infrastructure.in_memory_backend import InMemoryGraphBackend from application.agents.data_engineer.metadata_graph_builder import MetadataGraphBuilder from application.agents.knowledge_manager.reasoning_engine import ReasoningEngine from domain.dda_models import DDADocument, DataEntity from domain.event import KnowledgeEvent -from src.domain.knowledge_layers import KnowledgeLayer from domain.roles import Role # Mock Graphiti/LLM @@ -144,7 +141,7 @@ async def infer_scale(self, *args): return None for suggestion in result_transitive.get("suggestions", []): if suggestion.get("relationship_type") == "is_a" and suggestion.get("source") == "A" and suggestion.get("target") == "C": closure_found = True - print(f" ✅ Found transitive closure suggestion: A -> C") + print(" ✅ Found transitive closure suggestion: A -> C") break if not closure_found: diff --git a/scripts/migrate_odin_schema.py b/scripts/migrate_odin_schema.py index e61f5b63d..84f016459 100644 --- a/scripts/migrate_odin_schema.py +++ b/scripts/migrate_odin_schema.py @@ -10,7 +10,7 @@ import os from src.infrastructure.falkor_backend import FalkorBackend -from src.domain.odin_models import Table, Column, Policy, PolicyType +from src.domain.odin_models import Policy, PolicyType def ensure_governance_fields(backend: FalkorBackend): diff --git a/scripts/migration/migrate_layers.py b/scripts/migration/migrate_layers.py index cecd5e47b..6e953006e 100644 --- a/scripts/migration/migrate_layers.py +++ b/scripts/migration/migrate_layers.py @@ -14,7 +14,7 @@ import os import logging from datetime import datetime -from typing import Dict, List, Tuple +from typing import Dict from neo4j import AsyncGraphDatabase diff --git a/scripts/ontology_batch_remediation.py b/scripts/ontology_batch_remediation.py index 7bdace0a6..2e01acf5c 100644 --- a/scripts/ontology_batch_remediation.py +++ b/scripts/ontology_batch_remediation.py @@ -89,7 +89,7 @@ def print_report(results: Dict[str, Any], is_dry_run: bool = False): print(f" [{status}] {step['name']}: {step['updated']:,}") print(f"\n## Total Updated: {results.get('total_updated', 0):,}") - print(f"\n## Coverage Improvement") + print("\n## Coverage Improvement") print(f" Before: {results.get('coverage_before', 0)}%") print(f" After: {results.get('coverage_after', 0)}%") diff --git a/scripts/sync_data_to_postgres.py b/scripts/sync_data_to_postgres.py index 284bbcd2b..8818741b1 100644 --- a/scripts/sync_data_to_postgres.py +++ b/scripts/sync_data_to_postgres.py @@ -15,7 +15,7 @@ import logging import sys from datetime import datetime -from typing import Dict, List, Optional, Any +from typing import Dict, Any from uuid import UUID import os diff --git a/scripts/test_integration.py b/scripts/test_integration.py index 6c510a485..2f2228e06 100644 --- a/scripts/test_integration.py +++ b/scripts/test_integration.py @@ -25,7 +25,7 @@ import sys import os from datetime import datetime -from typing import Dict, List, Any, Optional +from typing import Dict, List, Any from dataclasses import dataclass, field # Add src to path @@ -221,7 +221,6 @@ async def test_feature_flag_read(): """Test reading feature flags.""" from application.services.feature_flag_service import ( get_feature_flag_service, - is_flag_enabled, MIGRATION_FLAGS, ) @@ -303,7 +302,7 @@ async def test_session_dual_write(): status="active", extra_data={"test": True} ) - created = await repo.create(pg_session) + await repo.create(pg_session) await session.commit() # Verify it was created @@ -329,7 +328,7 @@ async def test_session_dual_write(): try: from application.services.feature_flag_service import get_feature_flag_service get_feature_flag_service().clear_cache() - except: + except Exception: pass @@ -362,7 +361,7 @@ async def test_feedback_dual_write(): query_text="Test query", response_text="Test response", ) - created = await repo.create(pg_feedback) + await repo.create(pg_feedback) await session.commit() # Verify @@ -387,7 +386,7 @@ async def test_feedback_dual_write(): try: from application.services.feature_flag_service import get_feature_flag_service get_feature_flag_service().clear_cache() - except: + except Exception: pass @@ -420,7 +419,7 @@ async def test_document_dual_write(): chunk_count=5, entity_count=10, ) - created = await repo.create(pg_doc) + await repo.create(pg_doc) await session.commit() # Verify @@ -445,7 +444,7 @@ async def test_document_dual_write(): try: from application.services.feature_flag_service import get_feature_flag_service get_feature_flag_service().clear_cache() - except: + except Exception: pass diff --git a/src/application/agents/data_architect/agent.py b/src/application/agents/data_architect/agent.py index 938e837d7..24a77c0bf 100644 --- a/src/application/agents/data_architect/agent.py +++ b/src/application/agents/data_architect/agent.py @@ -1,7 +1,6 @@ from domain.agent import Agent from application.commands.agent_commands import StartProjectCommand from graphiti_core import Graphiti -from langchain_core.documents import Document from typing import Optional, Dict, Any, List from domain.communication import CommunicationChannel from domain.command_bus import CommandBus diff --git a/src/application/agents/data_architect/dda_parser.py b/src/application/agents/data_architect/dda_parser.py index ba4c0ba2c..3fef5b4df 100644 --- a/src/application/agents/data_architect/dda_parser.py +++ b/src/application/agents/data_architect/dda_parser.py @@ -1,7 +1,6 @@ from abc import ABC, abstractmethod from typing import List from domain.dda_models import DDADocument -import os class DDAParser(ABC): diff --git a/src/application/agents/data_architect/domain_modeler.py b/src/application/agents/data_architect/domain_modeler.py index 2099619aa..41c7b6010 100644 --- a/src/application/agents/data_architect/domain_modeler.py +++ b/src/application/agents/data_architect/domain_modeler.py @@ -2,10 +2,8 @@ from domain.dda_models import DDADocument, DataEntity, Relationship from graphiti_core import Graphiti from graphiti_core.nodes import EpisodeType -from datetime import datetime import json import hashlib -from functools import lru_cache class DomainModeler: @@ -321,12 +319,12 @@ def _create_update_episode_content(self, dda_document: DDADocument, merged_entit # Update header content_parts.append(f"DOMAIN UPDATE: {dda_document.domain}") - content_parts.append(f"Update Type: Merge with existing domain") + content_parts.append("Update Type: Merge with existing domain") content_parts.append(f"Existing Domain UUID: {existing_domain.get('uuid', 'Unknown')}") content_parts.append(f"Update Date: {dda_document.effective_date.strftime('%Y-%m-%d')}") # Domain information (if changed) - content_parts.append(f"\nDomain Information:") + content_parts.append("\nDomain Information:") content_parts.append(f"Business Context: {dda_document.business_context}") content_parts.append(f"Data Owner: {dda_document.data_owner}") content_parts.append(f"Stakeholders: {', '.join(dda_document.stakeholders)}") diff --git a/src/application/agents/data_architect/handlers/modeling_feedback_handler.py b/src/application/agents/data_architect/handlers/modeling_feedback_handler.py index e7b144d1f..b0b6a294a 100644 --- a/src/application/agents/data_architect/handlers/modeling_feedback_handler.py +++ b/src/application/agents/data_architect/handlers/modeling_feedback_handler.py @@ -2,7 +2,6 @@ from application.commands.base import CommandHandler from application.commands.collaboration_commands import ModelingFeedbackCommand from graphiti_core import Graphiti -import json class ModelingFeedbackCommandHandler(CommandHandler): @@ -74,31 +73,31 @@ def _create_feedback_episode_content(self, command: ModelingFeedbackCommand) -> content_parts = [] # Feedback header - content_parts.append(f"DOMAIN MODELING FEEDBACK") + content_parts.append("DOMAIN MODELING FEEDBACK") content_parts.append(f"Domain: {command.domain}") content_parts.append(f"Original Episode UUID: {command.episode_uuid}") content_parts.append(f"Feedback Type: {command.feedback_type}") content_parts.append(f"Rating: {command.rating}/5" if command.rating else "No rating provided") # Main feedback content - content_parts.append(f"\nFEEDBACK CONTENT:") + content_parts.append("\nFEEDBACK CONTENT:") content_parts.append(command.feedback_content) # Entity-specific feedback if command.entity_feedback: - content_parts.append(f"\nENTITY-SPECIFIC FEEDBACK:") + content_parts.append("\nENTITY-SPECIFIC FEEDBACK:") for entity_name, feedback in command.entity_feedback.items(): content_parts.append(f"- {entity_name}: {feedback}") # Relationship-specific feedback if command.relationship_feedback: - content_parts.append(f"\nRELATIONSHIP-SPECIFIC FEEDBACK:") + content_parts.append("\nRELATIONSHIP-SPECIFIC FEEDBACK:") for rel_name, feedback in command.relationship_feedback.items(): content_parts.append(f"- {rel_name}: {feedback}") # Suggestions if command.suggestions: - content_parts.append(f"\nSUGGESTIONS:") + content_parts.append("\nSUGGESTIONS:") for i, suggestion in enumerate(command.suggestions, 1): content_parts.append(f"{i}. {suggestion}") diff --git a/src/application/agents/data_architect/modeling_workflow.py b/src/application/agents/data_architect/modeling_workflow.py index 5a0e29e2c..9b3339786 100644 --- a/src/application/agents/data_architect/modeling_workflow.py +++ b/src/application/agents/data_architect/modeling_workflow.py @@ -2,7 +2,6 @@ from typing import List, Dict, Any, Optional from pydantic import BaseModel from domain.dda_models import DDADocument -from domain.communication import CommunicationChannel, Message from application.commands.modeling_command import ModelingCommand from application.commands.metadata_command import GenerateMetadataCommand from application.agents.data_architect.dda_parser import DDAParserFactory @@ -10,7 +9,6 @@ import json import os from datetime import datetime -import shutil class ValidationResult(BaseModel): diff --git a/src/application/agents/data_engineer/agent.py b/src/application/agents/data_engineer/agent.py index f62e7afd1..0657b39fc 100644 --- a/src/application/agents/data_engineer/agent.py +++ b/src/application/agents/data_engineer/agent.py @@ -3,7 +3,6 @@ from domain.command_bus import CommandBus from graphiti_core import Graphiti from typing import Optional, Dict, Any, List -from domain.event import KnowledgeEvent from domain.roles import Role from domain.kg_backends import KnowledgeGraphBackend from application.event_bus import EventBus diff --git a/src/application/agents/data_engineer/handlers/build_kg.py b/src/application/agents/data_engineer/handlers/build_kg.py index 6e46f2910..203ba52b3 100644 --- a/src/application/agents/data_engineer/handlers/build_kg.py +++ b/src/application/agents/data_engineer/handlers/build_kg.py @@ -85,7 +85,7 @@ async def _build_knowledge_graph(self, source_data: Dict[str, Any], domain_model episode_results = await self.graph.add_episode( name=f"KG Build - {source_data.get('domain', 'Unknown')}", episode_body=episode_content, - source_description=f"Knowledge Graph built using domain models", + source_description="Knowledge Graph built using domain models", reference_time=source_data.get('timestamp', '2024-01-01'), source="message", group_id=f"kg_{source_data.get('domain', 'unknown').lower().replace(' ', '_')}", @@ -105,15 +105,15 @@ def _create_contextual_episode_content(self, source_data: Dict[str, Any], domain content_parts = [] # Header with domain model context - content_parts.append(f"KNOWLEDGE GRAPH BUILD") + content_parts.append("KNOWLEDGE GRAPH BUILD") content_parts.append(f"Domain: {source_data.get('domain', 'Unknown')}") content_parts.append(f"Source Data Type: {source_data.get('type', 'Unknown')}") content_parts.append(f"Domain Models Available: {len(domain_models)}") # Domain model context if domain_models: - content_parts.append(f"\nDOMAIN MODEL CONTEXT:") - content_parts.append(f"The following domain models should be used as reference:") + content_parts.append("\nDOMAIN MODEL CONTEXT:") + content_parts.append("The following domain models should be used as reference:") entities = [m for m in domain_models if m.get('type') == 'entity'] relationships = [m for m in domain_models if m.get('type') == 'relationship'] @@ -129,18 +129,18 @@ def _create_contextual_episode_content(self, source_data: Dict[str, Any], domain content_parts.append(f"- {rel.get('name', 'Unknown')}: {rel.get('attributes', {}).get('description', 'No description')}") # Source data content - content_parts.append(f"\nSOURCE DATA:") + content_parts.append("\nSOURCE DATA:") content_parts.append(f"Content: {source_data.get('content', 'No content provided')}") if source_data.get('metadata'): content_parts.append(f"Metadata: {source_data.get('metadata')}") # Instructions for graph building - content_parts.append(f"\nBUILDING INSTRUCTIONS:") - content_parts.append(f"1. Use the domain models above as reference for entity and relationship structure") - content_parts.append(f"2. Extract entities and relationships from the source data") - content_parts.append(f"3. Ensure consistency with existing domain model patterns") - content_parts.append(f"4. Create nodes and edges that align with the domain architecture") + content_parts.append("\nBUILDING INSTRUCTIONS:") + content_parts.append("1. Use the domain models above as reference for entity and relationship structure") + content_parts.append("2. Extract entities and relationships from the source data") + content_parts.append("3. Ensure consistency with existing domain model patterns") + content_parts.append("4. Create nodes and edges that align with the domain architecture") return "\n".join(content_parts) diff --git a/src/application/agents/data_engineer/metadata_graph_builder.py b/src/application/agents/data_engineer/metadata_graph_builder.py index 6487b472a..6a5a4fd2b 100644 --- a/src/application/agents/data_engineer/metadata_graph_builder.py +++ b/src/application/agents/data_engineer/metadata_graph_builder.py @@ -1,10 +1,10 @@ """Metadata graph builder for creating ODIN-compliant metadata graphs from DDA documents.""" from typing import Dict, Any, List, Optional -from domain.dda_models import DDADocument, DataEntity, Relationship +from domain.dda_models import DDADocument, DataEntity from domain.odin_models import ( Catalog, Schema, Table, Column, DataTypeEntity, TypeAssignment, - Constraint, ConstraintType, LineageNode, LineageRelationship, LineageType, + Constraint, ConstraintType, LineageNode, LineageType, DataQualityRule, DataQualityScore, UsageStats ) from domain.kg_backends import KnowledgeGraphBackend diff --git a/src/application/agents/data_engineer/metadata_workflow.py b/src/application/agents/data_engineer/metadata_workflow.py index 86fad23b6..645d73636 100644 --- a/src/application/agents/data_engineer/metadata_workflow.py +++ b/src/application/agents/data_engineer/metadata_workflow.py @@ -135,7 +135,7 @@ async def _read_architecture_graph(self, graph_ref: str) -> Optional[Dict[str, A # Try to search for nodes in the architecture graph # graph_ref could be a group_id like "dda_customer_analytics" search_results = await self.graph.search( - query=f"domain architecture entities relationships", + query="domain architecture entities relationships", group_ids=[graph_ref], num_results=50 ) diff --git a/src/application/agents/data_engineer/type_inference.py b/src/application/agents/data_engineer/type_inference.py index 45466c354..78b3caa25 100644 --- a/src/application/agents/data_engineer/type_inference.py +++ b/src/application/agents/data_engineer/type_inference.py @@ -3,7 +3,6 @@ from typing import Dict, Any, Optional from graphiti_core import Graphiti from domain.odin_models import DataType, DataTypeEntity -import json import re diff --git a/src/application/agents/echo_agent.py b/src/application/agents/echo_agent.py index 635b581f6..8bbe911ed 100644 --- a/src/application/agents/echo_agent.py +++ b/src/application/agents/echo_agent.py @@ -1,7 +1,6 @@ from domain.agent import Agent from domain.communication import CommunicationChannel from domain.command_bus import CommandBus -from typing import Optional class EchoAgent(Agent): diff --git a/src/application/agents/knowledge_manager/conflict_resolver.py b/src/application/agents/knowledge_manager/conflict_resolver.py index e717611b9..5ec4aa16a 100644 --- a/src/application/agents/knowledge_manager/conflict_resolver.py +++ b/src/application/agents/knowledge_manager/conflict_resolver.py @@ -1,6 +1,6 @@ """Conflict detection and resolution for knowledge graph operations.""" -from typing import Dict, Any, List, Optional +from typing import Dict, Any, List from domain.kg_backends import KnowledgeGraphBackend from domain.event import KnowledgeEvent diff --git a/src/application/agents/knowledge_manager/llm_reasoner.py b/src/application/agents/knowledge_manager/llm_reasoner.py index 83ff00ea5..4589c6080 100644 --- a/src/application/agents/knowledge_manager/llm_reasoner.py +++ b/src/application/agents/knowledge_manager/llm_reasoner.py @@ -1,6 +1,6 @@ """LLM-based reasoner for semantic inference.""" -from typing import Dict, Any, List, Optional +from typing import Dict, Any, List from graphiti_core import Graphiti from domain.event import KnowledgeEvent diff --git a/src/application/agents/knowledge_manager/ontology_mapper.py b/src/application/agents/knowledge_manager/ontology_mapper.py index 2c4b5027b..5a6ff17a3 100644 --- a/src/application/agents/knowledge_manager/ontology_mapper.py +++ b/src/application/agents/knowledge_manager/ontology_mapper.py @@ -21,7 +21,6 @@ get_hierarchy_path, get_auto_relationships, suggest_type_mapping, - OntologyDomain, ) diff --git a/src/application/agents/knowledge_manager/reasoning_engine.py b/src/application/agents/knowledge_manager/reasoning_engine.py index b01fbf8e7..651ac88f1 100644 --- a/src/application/agents/knowledge_manager/reasoning_engine.py +++ b/src/application/agents/knowledge_manager/reasoning_engine.py @@ -4,8 +4,6 @@ from domain.kg_backends import KnowledgeGraphBackend from domain.event import KnowledgeEvent from domain.confidence_models import ( - Confidence, - ConfidenceSource, symbolic_confidence, neural_confidence ) @@ -567,7 +565,7 @@ async def _classify_entity(self, event: KnowledgeEvent) -> Optional[Dict[str, An async def _suggest_relationships(self, event: KnowledgeEvent) -> Optional[Dict[str, Any]]: """Suggest potential relationships for the entity.""" - entity_id = event.data.get("id", "") + event.data.get("id", "") properties = event.data.get("properties", {}) suggestions = [] @@ -1247,7 +1245,7 @@ async def _perception_to_semantic_reasoning( # Infer domain from column names if entity_type == "Column": column_name = properties.get("name", "").lower() - data_type = properties.get("data_type", "") + properties.get("data_type", "") domain_keywords = { "financial": ["amount", "price", "cost", "balance", "payment", "revenue"], diff --git a/src/application/agents/knowledge_manager/server.py b/src/application/agents/knowledge_manager/server.py index efd802560..db4736098 100644 --- a/src/application/agents/knowledge_manager/server.py +++ b/src/application/agents/knowledge_manager/server.py @@ -1,13 +1,11 @@ """HTTP server for the Knowledge Manager Agent.""" -import asyncio -import json import logging from typing import Dict, Any, List -from fastapi import FastAPI, HTTPException, BackgroundTasks +from fastapi import FastAPI, HTTPException from pydantic import BaseModel from domain.communication import Message -from .agent import KnowledgeManagerAgent, KGUpdateRequest, KGUpdateType, KGUpdateResult +from .agent import KnowledgeManagerAgent, KGUpdateRequest, KGUpdateType from graphiti_core import Graphiti logger = logging.getLogger(__name__) diff --git a/src/application/agents/knowledge_manager/validation_engine.py b/src/application/agents/knowledge_manager/validation_engine.py index db3e6400b..82aa316ad 100644 --- a/src/application/agents/knowledge_manager/validation_engine.py +++ b/src/application/agents/knowledge_manager/validation_engine.py @@ -329,7 +329,7 @@ async def _validate_shacl(self, event: KnowledgeEvent) -> Dict[str, Any]: """Validate event data against SHACL shapes.""" try: import pyshacl - from rdflib import Graph, Literal, RDF, URIRef, Namespace + from rdflib import Graph, Literal, RDF, URIRef, Namespace # noqa: F401 except ImportError: return {"is_valid": True, "warnings": ["pyshacl not installed, skipping SHACL validation"]} @@ -435,7 +435,7 @@ async def _validate_layer_assignment(self, event: KnowledgeEvent) -> Dict[str, A return {"is_valid": False, "errors": errors} # Validate layer value - valid_layers = [l.value for l in KnowledgeLayer] + valid_layers = [layer_enum.value for layer_enum in KnowledgeLayer] if layer not in valid_layers: errors.append( f"Invalid layer '{layer}'. Must be one of: {', '.join(valid_layers)}" @@ -564,13 +564,6 @@ async def _validate_layer_relationship_hierarchy( # Check for reverse relationships (higher layer pointing to lower layer) if source_order > target_order and source_order > 0 and target_order > 0: # Allow certain reverse relationships - allowed_reverse_types = [ - "derived_from", - "based_on", - "references", - "uses", - "reads_from" - ] # This would need relationship type from event, which we don't have in this context # So we just issue a warning diff --git a/src/application/api/crystallization_router.py b/src/application/api/crystallization_router.py index 5cc7b702e..9610962ac 100644 --- a/src/application/api/crystallization_router.py +++ b/src/application/api/crystallization_router.py @@ -14,8 +14,6 @@ import logging from .dependencies import ( - get_kg_backend, - get_event_bus, get_crystallization_service, get_promotion_gate, get_entity_resolver, diff --git a/src/application/api/dependencies.py b/src/application/api/dependencies.py index a51afe206..73b473553 100644 --- a/src/application/api/dependencies.py +++ b/src/application/api/dependencies.py @@ -1,10 +1,8 @@ """Dependency injection for FastAPI application.""" -from functools import lru_cache -from typing import AsyncGenerator, Tuple, Optional +from typing import AsyncGenerator, Tuple import os -from fastapi import Depends from graphiti_core import Graphiti from domain.kg_backends import KnowledgeGraphBackend from application.event_bus import EventBus @@ -85,7 +83,7 @@ async def get_episodic_memory(): global _episodic_memory_instance, _event_bus_instance # Check if episodic memory is enabled - if not os.getenv("ENABLE_EPISODIC_MEMORY", "").lower() in ("true", "1", "yes"): + if os.getenv("ENABLE_EPISODIC_MEMORY", "").lower() not in ("true", "1", "yes"): return None if _episodic_memory_instance is None: @@ -431,7 +429,7 @@ async def get_crystallization_service(): global _crystallization_service, _promotion_gate, _entity_resolver # Check if crystallization is enabled - if not os.getenv("ENABLE_CRYSTALLIZATION", "").lower() in ("true", "1", "yes"): + if os.getenv("ENABLE_CRYSTALLIZATION", "").lower() not in ("true", "1", "yes"): return None if _crystallization_service is None: diff --git a/src/application/api/document_router.py b/src/application/api/document_router.py index e5c57810c..909e1a5a2 100644 --- a/src/application/api/document_router.py +++ b/src/application/api/document_router.py @@ -3,7 +3,7 @@ Provides endpoints for managing PDF documents and their ingestion. """ -from fastapi import APIRouter, Depends, HTTPException, UploadFile, File, Query, BackgroundTasks +from fastapi import APIRouter, HTTPException, UploadFile, File, Query, BackgroundTasks from pathlib import Path from typing import List, Optional import asyncio @@ -417,7 +417,7 @@ async def get_document_entities( labels = record["labels"] # Filter out base labels to get the specific entity type entity_type_label = next( - (l for l in labels if l not in ["Entity", "ExtractedEntity"]), + (label for label in labels if label not in ["Entity", "ExtractedEntity"]), labels[0] if labels else "Unknown" ) @@ -504,7 +504,7 @@ async def get_document_graph( if node_id and node_id not in seen_nodes: seen_nodes.add(node_id) entity_type = next( - (l for l in labels if l not in ["Entity", "ExtractedEntity"]), + (label for label in labels if label not in ["Entity", "ExtractedEntity"]), labels[0] if labels else "Unknown" ) nodes.append({ diff --git a/src/application/api/evaluation_auth.py b/src/application/api/evaluation_auth.py index a7809019d..8c292aaac 100644 --- a/src/application/api/evaluation_auth.py +++ b/src/application/api/evaluation_auth.py @@ -10,7 +10,7 @@ import logging from typing import Optional -from fastapi import HTTPException, Header, Depends +from fastapi import HTTPException, Depends from fastapi.security import APIKeyHeader logger = logging.getLogger(__name__) @@ -101,7 +101,7 @@ async def __call__( # Verificar que la API key es correcta if api_key != self.expected_key: - logger.warning(f"Invalid eval API key attempt") + logger.warning("Invalid eval API key attempt") raise HTTPException( status_code=403, detail="Invalid evaluation API key" diff --git a/src/application/api/evaluation_router.py b/src/application/api/evaluation_router.py index 1a68f049d..7a51f4ebc 100644 --- a/src/application/api/evaluation_router.py +++ b/src/application/api/evaluation_router.py @@ -17,7 +17,7 @@ import logging import uuid from datetime import datetime, UTC -from typing import Any, Dict, List, Optional +from typing import Any, Optional from fastapi import APIRouter, Depends, HTTPException, Query @@ -33,7 +33,6 @@ Neo4jDIKWLayerSnapshot, SeedStateRequest, SeedStateResponse, - SeedEntityRequest, ResetPatientResponse, FlushPipelinesResponse, PipelineStatus, @@ -103,14 +102,14 @@ async def evaluation_health( try: crystallization = await get_crystallization_service() services["crystallization"] = crystallization is not None - except Exception as e: + except Exception: services["crystallization"] = False # Check episodic memory (optional) try: episodic = await get_episodic_memory() services["episodic_memory"] = episodic is not None - except Exception as e: + except Exception: services["episodic_memory"] = False all_critical_ok = all([ @@ -621,7 +620,7 @@ async def _reset_patient_memory( if session_id: await patient_memory.redis.delete_session(session_id) layers_cleared.append("redis") - logger.debug(f"Redis: cleared sessions for patient") + logger.debug("Redis: cleared sessions for patient") except Exception as e: logger.error(f"Failed to clear Redis: {e}") errors.append(f"Redis: {str(e)}") @@ -633,7 +632,7 @@ async def _reset_patient_memory( # Graphiti doesn't have a simple delete-by-patient API # This would need custom implementation layers_cleared.append("graphiti") - logger.debug(f"Graphiti: marked for clearing (may need manual cleanup)") + logger.debug("Graphiti: marked for clearing (may need manual cleanup)") except Exception as e: logger.error(f"Failed to clear Graphiti: {e}") errors.append(f"Graphiti: {str(e)}") diff --git a/src/application/api/kg_router.py b/src/application/api/kg_router.py index 710a163b9..acc8414a9 100644 --- a/src/application/api/kg_router.py +++ b/src/application/api/kg_router.py @@ -1,7 +1,7 @@ """FastAPI router for Knowledge Graph operations.""" -from typing import Dict, Any, List, Optional -from fastapi import APIRouter, Depends, HTTPException, Body +from typing import Dict, Any, Optional +from fastapi import APIRouter, Depends, HTTPException from pydantic import BaseModel from domain.kg_backends import KnowledgeGraphBackend @@ -86,7 +86,7 @@ async def ask_graph( Generate ONLY the Cypher query to answer this question. Do not include markdown formatting. """ - episode = await llm.add_episode( + await llm.add_episode( name=f"text_to_cypher_{datetime.now().timestamp()}", episode_body=prompt, source_description="Text-to-Cypher API", diff --git a/src/application/api/main.py b/src/application/api/main.py index 39277146b..3c4a3f4fc 100644 --- a/src/application/api/main.py +++ b/src/application/api/main.py @@ -2,7 +2,6 @@ from fastapi import FastAPI, WebSocket, WebSocketDisconnect, Depends, HTTPException, UploadFile, File, Body, Query from fastapi.middleware.cors import CORSMiddleware -from fastapi.staticfiles import StaticFiles from fastapi.responses import FileResponse from pathlib import Path import logging @@ -339,7 +338,7 @@ async def chat_websocket_endpoint( if new_title: break if attempt == 0: - logger.info(f"Title generation attempt 1 failed, retrying...") + logger.info("Title generation attempt 1 failed, retrying...") await asyncio.sleep(0.5) if new_title: @@ -626,55 +625,55 @@ async def get_graph_data( # Build Cypher query based on filters if layer: - query = f""" + query = """ MATCH (n) WHERE toLower(n.layer) = $layer WITH n LIMIT $limit OPTIONAL MATCH (n)-[r]->(m) RETURN - collect(DISTINCT {{ + collect(DISTINCT { id: elementId(n), label: coalesce(n.name, elementId(n)), type: head(labels(n)), layer: n.layer, properties: properties(n) - }}) as nodes, - collect(DISTINCT {{ + }) as nodes, + collect(DISTINCT { id: elementId(r), source: elementId(n), target: elementId(m), label: type(r), type: type(r) - }}) as edges + }) as edges """ params = {"layer": layer, "limit": limit} else: - query = f""" + query = """ MATCH (n) WITH n LIMIT $limit OPTIONAL MATCH (n)-[r]->(m) WHERE m IS NOT NULL RETURN - collect(DISTINCT {{ + collect(DISTINCT { id: elementId(n), label: coalesce(n.name, elementId(n)), type: head(labels(n)), layer: coalesce(n.layer, 'perception'), properties: properties(n) - }}) + collect(DISTINCT {{ + }) + collect(DISTINCT { id: elementId(m), label: coalesce(m.name, elementId(m)), type: head(labels(m)), layer: coalesce(m.layer, 'perception'), properties: properties(m) - }}) as nodes, - collect(DISTINCT {{ + }) as nodes, + collect(DISTINCT { id: elementId(r), source: elementId(n), target: elementId(m), label: type(r), type: type(r) - }}) as edges + }) as edges """ params = {"limit": limit} @@ -1391,7 +1390,7 @@ async def upload_dda( c.created_at = COALESCE(c.created_at, datetime()) RETURN elementId(c) as catalog_id """ - catalog_result = await kg_backend.query_raw(catalog_query, { + await kg_backend.query_raw(catalog_query, { "domain": result.domain, "data_owner": result.data_owner, "business_context": result.business_context @@ -2236,7 +2235,6 @@ async def export_training_data( from application.formatters.training_data_formatter import ( TrainingDataFormatter, FormatterConfig, - OutputFormat, ) backend = await get_kg_backend() @@ -2763,7 +2761,7 @@ async def search_sessions( # ======================================== @app.get("/api/graph/layer-stats") -async def get_layer_statistics(kg_backend = Depends(get_kg_backend)): +async def get_graph_layer_statistics(kg_backend = Depends(get_kg_backend)): """ Get knowledge graph layer statistics. @@ -3240,7 +3238,6 @@ async def trigger_quality_scan( scan_type: Type of scan to run ('document', 'ontology', or 'both') """ from application.services.quality_scanner_job import ( - get_quality_scanner, initialize_quality_scanner, ) from application.services.document_tracker import DocumentTracker @@ -3294,7 +3291,6 @@ async def get_document_quality_trends( """ try: from infrastructure.database.session import db_session - from infrastructure.database.repositories import DocumentQualityRepository from sqlalchemy import select, func from infrastructure.database.models import DocumentQuality from datetime import datetime, timedelta @@ -3419,7 +3415,7 @@ async def get_promotion_status(): @app.post("/api/promotion/scan") -async def trigger_promotion_scan(): +async def trigger_layer_promotion_scan(): """ Manually trigger a promotion scan. diff --git a/src/application/api/remediation_router.py b/src/application/api/remediation_router.py index 6f0ddd80b..3a15d160d 100644 --- a/src/application/api/remediation_router.py +++ b/src/application/api/remediation_router.py @@ -11,7 +11,7 @@ """ import logging -from typing import List, Optional +from typing import List from fastapi import APIRouter, Depends, HTTPException, Query from pydantic import BaseModel diff --git a/src/application/commands/knowledge_commands.py b/src/application/commands/knowledge_commands.py index 8b550ea41..75632c6b9 100644 --- a/src/application/commands/knowledge_commands.py +++ b/src/application/commands/knowledge_commands.py @@ -1,6 +1,6 @@ """Commands for knowledge management operations.""" -from typing import Dict, Any, List, Optional +from typing import Dict, Any, List from domain.commands import Command from pydantic import BaseModel from datetime import datetime diff --git a/src/application/commands/knowledge_handlers.py b/src/application/commands/knowledge_handlers.py index bd3f22367..9047af294 100644 --- a/src/application/commands/knowledge_handlers.py +++ b/src/application/commands/knowledge_handlers.py @@ -1,6 +1,6 @@ """Command handlers for knowledge management operations.""" -from typing import Dict, Any, List +from typing import Dict from domain.command_bus import CommandHandler from application.commands.knowledge_commands import ( EscalateKGUpdateCommand, @@ -13,7 +13,6 @@ AuditLogResult ) from application.agents.knowledge_manager.agent import KnowledgeManagerAgent, KGUpdateRequest, KGUpdateType -from graphiti_core import Graphiti import logging logger = logging.getLogger(__name__) diff --git a/src/application/jobs/promotion_scanner.py b/src/application/jobs/promotion_scanner.py index c3f49e149..b11cdb4b3 100644 --- a/src/application/jobs/promotion_scanner.py +++ b/src/application/jobs/promotion_scanner.py @@ -215,7 +215,7 @@ async def _scan_layer(self, layer: str) -> Dict[str, Any]: entity_data=entity_data, from_layer=Layer(actual_layer), to_layer=Layer(to_layer), - reason=f"Background scan promotion" + reason="Background scan promotion" ) if record and record.status.value == "completed": diff --git a/src/application/knowledge_management.py b/src/application/knowledge_management.py index 32060f69b..7078972bc 100644 --- a/src/application/knowledge_management.py +++ b/src/application/knowledge_management.py @@ -7,7 +7,6 @@ escalation logic can be layered on top of this service in the future. """ -from typing import Dict, Any from domain.event import KnowledgeEvent from domain.kg_backends import KnowledgeGraphBackend diff --git a/src/application/rules/medical_rules.py b/src/application/rules/medical_rules.py index efbcd1b62..c15772cd5 100644 --- a/src/application/rules/medical_rules.py +++ b/src/application/rules/medical_rules.py @@ -12,8 +12,7 @@ import logging from dataclasses import dataclass -from datetime import datetime -from typing import Dict, List, Optional, Any, Set +from typing import Dict, List, Optional, Any from domain.medical_rules_models import ( MedicalRule, diff --git a/src/application/services/automatic_layer_transition.py b/src/application/services/automatic_layer_transition.py index 8a6801572..10a2d56fb 100644 --- a/src/application/services/automatic_layer_transition.py +++ b/src/application/services/automatic_layer_transition.py @@ -17,11 +17,9 @@ """ from typing import Dict, Any, Optional, List -from dataclasses import dataclass, field +from dataclasses import dataclass from datetime import datetime, timedelta -from enum import Enum import logging -import uuid from domain.event import KnowledgeEvent from domain.roles import Role @@ -282,7 +280,7 @@ async def _check_perception_promotion(self, entity_data: Dict[str, Any]) -> bool has_ontology = bool(ontology_codes or snomed_code or umls_cui or icd10_code) if has_ontology: - logger.info(f"Entity has ontology match") + logger.info("Entity has ontology match") return True return False @@ -659,7 +657,7 @@ async def run_promotion_scan(self) -> Dict[str, Any]: entity_data=entity_data, from_layer=from_layer, to_layer=to_layer, - reason=f"Batch promotion scan" + reason="Batch promotion scan" ) if record and record.status == TransitionStatus.COMPLETED: diff --git a/src/application/services/chat_history_service.py b/src/application/services/chat_history_service.py index 7f59f6bae..049755417 100644 --- a/src/application/services/chat_history_service.py +++ b/src/application/services/chat_history_service.py @@ -264,8 +264,6 @@ async def _create_session_postgres( self, session_uuid: uuid.UUID, patient_id: str, title: str, device: str, neo4j_id: str ) -> None: try: - from infrastructure.database.models import Session as PgSession - from infrastructure.database.repositories import SessionRepository from sqlalchemy import text async with self._db_session() as session: @@ -470,9 +468,8 @@ async def search_sessions( async def _search_sessions_postgres( self, patient_id: str, query: str, limit: int ) -> List[SessionMetadata]: - from sqlalchemy import select, or_, distinct + from sqlalchemy import select, or_ from infrastructure.database.models import Session as PgSession, Message as PgMessage - from infrastructure.database.repositories import SessionRepository async with self._db_session() as session: # Find sessions where title or message content matches diff --git a/src/application/services/chunk_impact_analyzer.py b/src/application/services/chunk_impact_analyzer.py index eacd4d321..05e9df2db 100644 --- a/src/application/services/chunk_impact_analyzer.py +++ b/src/application/services/chunk_impact_analyzer.py @@ -21,7 +21,7 @@ from dataclasses import dataclass, field from datetime import datetime from pathlib import Path -from typing import Any, Dict, List, Optional, Tuple +from typing import Dict, List, Tuple from domain.chunk_separation_models import ( ChunkSeparationOption, @@ -213,7 +213,7 @@ async def _collect_live_metrics(self, report: ChunkImpactReport) -> None: if result: row = result[0] - chunk_count = row.get("chunk_count", 0) + row.get("chunk_count", 0) total_bytes = row.get("total_bytes", 0) report.data_volume_mb = total_bytes / (1024 * 1024) if total_bytes else 0 @@ -701,7 +701,7 @@ async def run_impact_analysis( async def main(): report = await run_impact_analysis(output_path=args.output) - print(f"\n========== IMPACT ANALYSIS SUMMARY ==========") + print("\n========== IMPACT ANALYSIS SUMMARY ==========") if report.recommendation: print(f"Recommended Option: {report.recommendation.recommended_option.value}") diff --git a/src/application/services/confidence_framework.py b/src/application/services/confidence_framework.py index 15a6a5af5..1e7ca96fe 100644 --- a/src/application/services/confidence_framework.py +++ b/src/application/services/confidence_framework.py @@ -18,10 +18,7 @@ ConfidenceSource, ConfidenceCombination, AggregationStrategy, - ConfidencePropagation, - ConfidenceTracker, - neural_confidence, - symbolic_confidence + ConfidencePropagation ) logger = logging.getLogger(__name__) diff --git a/src/application/services/conversation_graph.py b/src/application/services/conversation_graph.py index 86ed34da3..2faeb35db 100644 --- a/src/application/services/conversation_graph.py +++ b/src/application/services/conversation_graph.py @@ -23,21 +23,16 @@ import json from langgraph.graph import StateGraph, END -from langgraph.graph.message import add_messages from langgraph.checkpoint.memory import MemorySaver from langchain_core.messages import HumanMessage, AIMessage, SystemMessage from domain.conversation_state import ( ConversationState, - ConversationMode, create_initial_state, - PatientContext, ) from application.services.conversation_nodes import ConversationNodes from application.services.conversation_router import ( route_by_mode, - should_skip_synthesizer, - should_persist_memory, ) logger = logging.getLogger(__name__) @@ -288,7 +283,6 @@ async def reset_conversation(self, thread_id: str) -> bool: Returns: True if reset successful """ - config = {"configurable": {"thread_id": thread_id}} try: # For MemorySaver, we need to clear the storage directly diff --git a/src/application/services/conversation_nodes.py b/src/application/services/conversation_nodes.py index 9b01e217f..f1ef311cc 100644 --- a/src/application/services/conversation_nodes.py +++ b/src/application/services/conversation_nodes.py @@ -10,13 +10,13 @@ """ import logging -from typing import Dict, Any, Optional, List, Tuple +from typing import Dict, Any, Optional, List from datetime import datetime import json import os from openai import AsyncOpenAI -from langchain_core.messages import HumanMessage, AIMessage, SystemMessage +from langchain_core.messages import HumanMessage, AIMessage from domain.conversation_state import ( ConversationState, @@ -24,14 +24,12 @@ UrgencyLevel, EmotionalTone, AssistantAction, - PatientContext, ActiveGoal, serialize_goal, deserialize_goal, ) from domain.goal_templates import ( GoalType, - GOAL_TEMPLATES, create_goal_from_template, get_slot_question, get_completion_prompt, @@ -108,7 +106,7 @@ async def entry_node(self, state: ConversationState) -> Dict[str, Any]: print(f"[ENTRY_NODE] thread_id={thread_id}") print(f"[ENTRY_NODE] Turn {previous_turn_count} -> {turn_count}, Messages in state: {len(messages)}") if previous_turn_count == 0: - print(f"[ENTRY_NODE] *** NEW CONVERSATION (turn_count was 0) ***") + print("[ENTRY_NODE] *** NEW CONVERSATION (turn_count was 0) ***") else: print(f"[ENTRY_NODE] *** CONTINUING CONVERSATION (turn_count was {previous_turn_count}) ***") @@ -601,7 +599,7 @@ def _build_greeting_prompt( recently_resolved = patient_context.get("recently_resolved", []) patient_name = patient_context.get("patient_name") medications = patient_context.get("current_medications", []) - conversation_summary = patient_context.get("conversation_summary", "") + patient_context.get("conversation_summary", "") mem0_memories = patient_context.get("mem0_memories", []) # Check if user mentioned Matucha by name - they know her @@ -672,9 +670,9 @@ def _build_greeting_prompt( # Different intro styles for new patients intro_styles = [ f"{time_greeting}! I'm Matucha, and I'll be your medical assistant.", - f"Hi there! My name's Matucha - I'm here to help with any health questions.", + "Hi there! My name's Matucha - I'm here to help with any health questions.", f"{time_greeting}! I'm Matucha, your friendly medical assistant.", - f"Hello! I'm Matucha. I'm here to help you with health-related questions.", + "Hello! I'm Matucha. I'm here to help you with health-related questions.", ] selected_intro = random.choice(intro_styles) @@ -1012,13 +1010,13 @@ async def goal_driven_node(self, state: ConversationState) -> Dict[str, Any]: context_lower = refined_context.lower() if any(kw in context_lower for kw in ["desk", "work", "office", "sitting", "computer", "while working"]): goal.fill_slot("exercise_context", "desk_exercises") - print(f"[GOAL_DRIVEN] Updated exercise_context to 'desk_exercises' based on refinement") + print("[GOAL_DRIVEN] Updated exercise_context to 'desk_exercises' based on refinement") elif any(kw in context_lower for kw in ["quick", "short", "5 minute", "10 minute", "brief", "break"]): goal.fill_slot("exercise_context", "quick_breaks") - print(f"[GOAL_DRIVEN] Updated exercise_context to 'quick_breaks' based on refinement") + print("[GOAL_DRIVEN] Updated exercise_context to 'quick_breaks' based on refinement") elif any(kw in context_lower for kw in ["travel", "hotel", "trip", "on the go"]): goal.fill_slot("exercise_context", "travel") - print(f"[GOAL_DRIVEN] Updated exercise_context to 'travel' based on refinement") + print("[GOAL_DRIVEN] Updated exercise_context to 'travel' based on refinement") # Acknowledge the refinement and continue with updated context acknowledgment = await self._generate_refinement_acknowledgment( @@ -1466,7 +1464,7 @@ async def _generate_conversational_slot_question( # Build context-aware prompt for generating natural question goal_type_friendly = goal.goal_type.value.replace("_", " ") - progress_info = f"{len(filled_slots)}/{len([s for s in goal.slots.values() if s.required])}" + f"{len(filled_slots)}/{len([s for s in goal.slots.values() if s.required])}" prompt = f"""You are Matucha, helping a patient with {goal_type_friendly}. @@ -1621,10 +1619,9 @@ async def closing_node(self, state: ConversationState) -> Dict[str, Any]: """ messages = state.get("messages", []) explored_topics = state.get("explored_topics", []) - patient_context = state.get("patient_context", {}) + state.get("patient_context", {}) - last_message = messages[-1] if messages else None - user_text = last_message.content if last_message else "" + messages[-1] if messages else None # Build closing message based on conversation content if explored_topics: diff --git a/src/application/services/conversational_intent_service.py b/src/application/services/conversational_intent_service.py index a8ba228e6..ee05d28c6 100644 --- a/src/application/services/conversational_intent_service.py +++ b/src/application/services/conversational_intent_service.py @@ -7,7 +7,7 @@ import re import logging -from typing import Optional, Dict, List +from typing import Optional from openai import AsyncOpenAI import os diff --git a/src/application/services/cross_graph_query_builder.py b/src/application/services/cross_graph_query_builder.py index e64934953..02bfaa94e 100644 --- a/src/application/services/cross_graph_query_builder.py +++ b/src/application/services/cross_graph_query_builder.py @@ -379,7 +379,7 @@ def find_related_entities( params = {"entity_name": entity_name, "max_results": max_results} if relationship_types: - rel_filter = f"AND type(r) IN $relationship_types" + rel_filter = "AND type(r) IN $relationship_types" params["relationship_types"] = relationship_types query = f""" diff --git a/src/application/services/crystallization_service.py b/src/application/services/crystallization_service.py index e1e9a95dc..f9552d468 100644 --- a/src/application/services/crystallization_service.py +++ b/src/application/services/crystallization_service.py @@ -521,14 +521,6 @@ async def crystallize_from_graphiti( # Query FalkorDB for recent entities via Graphiti # The actual query depends on Graphiti's API # This is a simplified version - query = """ - MATCH (e:Entity) - WHERE e.created_at > $since_timestamp - RETURN e.uuid as id, e.name as name, e.entity_type as entity_type, - e.summary as summary, e.created_at as created_at - ORDER BY e.created_at DESC - LIMIT $limit - """ # Note: This would need to be adapted to Graphiti's actual query method # For now, we'll use a placeholder that works with the Graphiti client diff --git a/src/application/services/dikw_router.py b/src/application/services/dikw_router.py index d7ee7d290..5331a6c8a 100644 --- a/src/application/services/dikw_router.py +++ b/src/application/services/dikw_router.py @@ -18,7 +18,6 @@ import re import logging from dataclasses import dataclass -from datetime import datetime from typing import Dict, List, Optional, Any, Tuple from domain.query_intent_models import ( @@ -240,7 +239,7 @@ def route_query(self, query: str) -> RoutingDecision: logger.debug( f"Routed query: intent={intent.primary_intent.value}, " - f"confidence={intent.confidence:.2f}, layers={[l.value for l in decision.layers]}" + f"confidence={intent.confidence:.2f}, layers={[layer.value for layer in decision.layers]}" ) return decision @@ -453,10 +452,10 @@ def explain_routing(self, query: str) -> Dict[str, Any]: "matched_patterns": decision.intent.matched_patterns, }, "routing": { - "layers": [l.value for l in decision.layers], + "layers": [layer.value for layer in decision.layers], "strategy": decision.strategy, "fallback_enabled": decision.fallback_enabled, - "fallback_layers": [l.value for l in decision.get_fallback_layers()], + "fallback_layers": [layer.value for layer in decision.get_fallback_layers()], }, "context": self.get_query_context(decision), "requires_inference": decision.intent.requires_inference, diff --git a/src/application/services/document_quality_service.py b/src/application/services/document_quality_service.py index f43ec13f7..3f0053212 100644 --- a/src/application/services/document_quality_service.py +++ b/src/application/services/document_quality_service.py @@ -10,8 +10,6 @@ """ import re -import os -import json import time import hashlib from typing import List, Dict, Any, Optional, Tuple @@ -26,7 +24,6 @@ StructuralClarityScore, EntityDensityScore, ChunkingQualityScore, - QualityLevel, ) from application.services.text_chunker import TextChunk diff --git a/src/application/services/document_service.py b/src/application/services/document_service.py index 0ec09cc50..3ffa8614b 100644 --- a/src/application/services/document_service.py +++ b/src/application/services/document_service.py @@ -15,7 +15,7 @@ from application.services.text_chunker import TextChunker, TextChunk from application.services.entity_extractor import EntityExtractor, ExtractedEntity from application.services.document_quality_service import DocumentQualityService -from domain.quality_models import DocumentQualityReport, QualityLevel +from domain.quality_models import DocumentQualityReport @dataclass @@ -304,7 +304,6 @@ async def _get_embedding(self, text: str) -> Optional[List[float]]: def _simple_embedding(self, text: str, dim: int = 256) -> List[float]: """Simple fallback embedding using hashing.""" import hashlib - import struct # Create a deterministic pseudo-embedding hash_bytes = hashlib.sha256(text.encode()).digest() @@ -539,7 +538,7 @@ async def _dual_write_to_postgres( assessed_at=quality_report.assessed_at, ) session.add(quality_entry) - print(f" 📊 Stored quality metrics in PostgreSQL") + print(" 📊 Stored quality metrics in PostgreSQL") await session.commit() return True diff --git a/src/application/services/document_tracker.py b/src/application/services/document_tracker.py index 531387fca..c32cebfa3 100644 --- a/src/application/services/document_tracker.py +++ b/src/application/services/document_tracker.py @@ -6,7 +6,7 @@ import json import hashlib -from dataclasses import dataclass, field, asdict +from dataclasses import dataclass, asdict from pathlib import Path from typing import Dict, List, Optional from datetime import datetime diff --git a/src/application/services/entity_extractor.py b/src/application/services/entity_extractor.py index f1fed7f6d..c283eb32a 100644 --- a/src/application/services/entity_extractor.py +++ b/src/application/services/entity_extractor.py @@ -9,7 +9,7 @@ import logging from typing import List, Dict, Any, Optional -from dataclasses import dataclass, field +from dataclasses import dataclass import os import json import re @@ -427,7 +427,7 @@ def _heuristic_extraction(self, prompt: str) -> Dict[str, Any]: entities.append({ "name": word, "type": entity_type, - "context": f"Found in text", + "context": "Found in text", "confidence": 0.5 }) diff --git a/src/application/services/entity_resolver.py b/src/application/services/entity_resolver.py index b572d9ba6..bce53a6f8 100644 --- a/src/application/services/entity_resolver.py +++ b/src/application/services/entity_resolver.py @@ -304,7 +304,6 @@ async def _embedding_match( matches = [] try: - import numpy as np from sklearn.metrics.pairwise import cosine_similarity # Generate embedding for query entity diff --git a/src/application/services/episodic_memory_service.py b/src/application/services/episodic_memory_service.py index c0242da17..b83a23a74 100644 --- a/src/application/services/episodic_memory_service.py +++ b/src/application/services/episodic_memory_service.py @@ -30,8 +30,7 @@ from application.event_bus import EventBus from graphiti_core import Graphiti -from graphiti_core.nodes import EpisodeType, EpisodicNode, EntityNode -from graphiti_core.edges import EntityEdge +from graphiti_core.nodes import EpisodeType, EpisodicNode from graphiti_core.driver.falkordb_driver import FalkorDriver from graphiti_core.search.search import search from graphiti_core.search.search_config import SearchResults diff --git a/src/application/services/extended_kg_audit_service.py b/src/application/services/extended_kg_audit_service.py index 78fc18d37..92c4a650d 100644 --- a/src/application/services/extended_kg_audit_service.py +++ b/src/application/services/extended_kg_audit_service.py @@ -20,7 +20,6 @@ import uuid from datetime import datetime from pathlib import Path -from typing import Any, Dict, List, Optional from domain.chunk_separation_models import ( SubgraphType, @@ -801,7 +800,7 @@ async def run_extended_audit( async def main(): report = await run_extended_audit(output_path=args.output) - print(f"\n========== EXTENDED AUDIT SUMMARY ==========") + print("\n========== EXTENDED AUDIT SUMMARY ==========") print(f"Separation Readiness: {report.separation_readiness.upper()}") print(f"Total Entities: {report.total_entities:,}") diff --git a/src/application/services/extraction_audit_service.py b/src/application/services/extraction_audit_service.py index cbaa0a3f3..915a776f9 100644 --- a/src/application/services/extraction_audit_service.py +++ b/src/application/services/extraction_audit_service.py @@ -18,7 +18,7 @@ import logging from datetime import datetime from dataclasses import dataclass, field -from typing import Dict, List, Any, Optional, Tuple +from typing import Dict, List, Any from pathlib import Path from enum import Enum @@ -651,8 +651,8 @@ def _format_markdown_report(self, report: ExtractionAuditReport) -> str: "", "## Executive Summary", "", - f"| Metric | Value | Status |", - f"|--------|-------|--------|", + "| Metric | Value | Status |", + "|--------|-------|--------|", f"| Total Entities | {report.total_entities:,} | — |", f"| Total Orphans | {report.total_orphans:,} | {severity_emoji[report.severity]} {report.severity.value.upper()} |", f"| Orphan Rate | {report.orphan_rate:.1%} | — |", @@ -831,7 +831,7 @@ async def main(): neo4j_uri=args.neo4j_uri, output_path=args.output, ) - print(f"\nAudit Summary:") + print("\nAudit Summary:") print(f" Severity: {report.severity.value.upper()}") print(f" Entities: {report.total_entities:,}") print(f" Orphans: {report.total_orphans:,} ({report.orphan_rate:.1%})") diff --git a/src/application/services/feedback_integrator.py b/src/application/services/feedback_integrator.py index de19cb7c1..a64c58d97 100644 --- a/src/application/services/feedback_integrator.py +++ b/src/application/services/feedback_integrator.py @@ -7,7 +7,7 @@ - Generates calibration reports """ -from typing import Dict, List, Any, Optional, Tuple +from typing import Dict, List, Any, Optional from dataclasses import dataclass, field from datetime import datetime, timedelta from collections import defaultdict diff --git a/src/application/services/feedback_tracer.py b/src/application/services/feedback_tracer.py index 1f60d8b2c..ecd8a64f4 100644 --- a/src/application/services/feedback_tracer.py +++ b/src/application/services/feedback_tracer.py @@ -9,7 +9,7 @@ - Feedback propagation to entity confidence """ -from typing import Dict, Any, List, Optional, Tuple +from typing import Dict, Any, List, Optional from dataclasses import dataclass, field from datetime import datetime from enum import Enum diff --git a/src/application/services/hypergraph_bridge_service.py b/src/application/services/hypergraph_bridge_service.py index 78bcbdccf..0259df0a7 100644 --- a/src/application/services/hypergraph_bridge_service.py +++ b/src/application/services/hypergraph_bridge_service.py @@ -39,25 +39,16 @@ import asyncio import logging from datetime import datetime -from typing import Any, Dict, List, Optional, Set, Tuple +from typing import Any, Dict, List from domain.hypergraph_models import ( FactUnit, - FactType, - HyperEdge, EntityMention, - ConfidenceScore, - ConfidenceSource, CoOccurrenceContext, - NeurosymbolicLink, BridgeStatistics, ) from domain.ontologies.registry import ( - get_ontology_config, is_known_type, - resolve_entity_type, - get_domain_for_type, - get_layer_for_type, ) logger = logging.getLogger(__name__) @@ -666,7 +657,7 @@ async def run_bridge_builder( args = parser.parse_args() async def main(): - stats = await run_bridge_builder() + await run_bridge_builder() if args.propagate: from infrastructure.neo4j_backend import Neo4jBackend diff --git a/src/application/services/intelligent_chat_service.py b/src/application/services/intelligent_chat_service.py index b62a79c24..70ad8f5a4 100644 --- a/src/application/services/intelligent_chat_service.py +++ b/src/application/services/intelligent_chat_service.py @@ -39,8 +39,6 @@ from application.agents.knowledge_manager.reasoning_engine import ReasoningEngine from application.agents.knowledge_manager.validation_engine import ValidationEngine from infrastructure.neo4j_backend import Neo4jBackend -from domain.event import KnowledgeEvent -from domain.roles import Role from domain.confidence_models import CrossLayerConfidencePropagation # Conversational layer imports (Phase 6) @@ -889,7 +887,7 @@ async def _apply_reasoning( ) # Add layer traversal information - layers_str = " → ".join([l.value for l in trace.layers_traversed]) + layers_str = " → ".join([layer.value for layer in trace.layers_traversed]) formatted_provenance.append( f"Layers Traversed: {layers_str}" ) diff --git a/src/application/services/knowledge_enricher.py b/src/application/services/knowledge_enricher.py index d7b76c5f2..88ffd8c26 100644 --- a/src/application/services/knowledge_enricher.py +++ b/src/application/services/knowledge_enricher.py @@ -7,9 +7,7 @@ """ from typing import List, Dict, Any, Optional -from datetime import datetime from graphiti_core import Graphiti -from domain.event import KnowledgeEvent from domain.ontologies.odin import ODIN from domain.knowledge_layers import KnowledgeLayer from application.services.entity_resolver import EntityResolver, ResolutionStrategy diff --git a/src/application/services/langgraph_chat_service.py b/src/application/services/langgraph_chat_service.py index 62d5f0c5d..bdc25b2cd 100644 --- a/src/application/services/langgraph_chat_service.py +++ b/src/application/services/langgraph_chat_service.py @@ -10,9 +10,7 @@ import logging from typing import List, Dict, Any, Optional -from dataclasses import dataclass, field from datetime import datetime -import uuid from application.services.conversation_graph import ConversationGraph from application.services.intelligent_chat_service import Message, ChatResponse diff --git a/src/application/services/medical_data_linker.py b/src/application/services/medical_data_linker.py index ed7f4954b..4f7d63663 100644 --- a/src/application/services/medical_data_linker.py +++ b/src/application/services/medical_data_linker.py @@ -350,7 +350,7 @@ def _create_link( try: # Use MERGE to avoid duplicates # Escape single quotes in strings - reasoning_escaped = link.reasoning.replace("'", "\\'") + link.reasoning.replace("'", "\\'") query = f""" MATCH (m), (d) diff --git a/src/application/services/memory_context_builder.py b/src/application/services/memory_context_builder.py index 2642fb653..33643c702 100644 --- a/src/application/services/memory_context_builder.py +++ b/src/application/services/memory_context_builder.py @@ -10,8 +10,8 @@ """ import logging -from typing import List, Optional, Dict, Any -from datetime import datetime, timedelta +from typing import List, Optional, Any +from datetime import datetime from collections import Counter from mem0 import Memory diff --git a/src/application/services/neo4j_pdf_ingestion.py b/src/application/services/neo4j_pdf_ingestion.py index b15e7b250..2fb89a4ed 100644 --- a/src/application/services/neo4j_pdf_ingestion.py +++ b/src/application/services/neo4j_pdf_ingestion.py @@ -12,10 +12,9 @@ import re import json from pathlib import Path -from typing import Any, Dict, List, Optional, Tuple +from typing import Any, Dict, List, Optional from dataclasses import dataclass from datetime import datetime -import asyncio import logging try: @@ -395,11 +394,11 @@ async def persist_to_neo4j( continue try: - rel_type_sanitized = self._sanitize_label(rel_type) + self._sanitize_label(rel_type) session.run( - f""" - MATCH (source:ExtractedEntity {{id: $source_id}}) - MATCH (target:ExtractedEntity {{id: $target_id}}) + """ + MATCH (source:ExtractedEntity {id: $source_id}) + MATCH (target:ExtractedEntity {id: $target_id}) MERGE (source)-[r:LINKS_TO]->(target) SET r.type = $rel_type, r.description = $description, diff --git a/src/application/services/neurosymbolic_query_service.py b/src/application/services/neurosymbolic_query_service.py index 33d14553d..25d5ac7fb 100644 --- a/src/application/services/neurosymbolic_query_service.py +++ b/src/application/services/neurosymbolic_query_service.py @@ -26,12 +26,11 @@ CrossLayerConfidencePropagation, create_confidence, symbolic_confidence, - neural_confidence, ) from domain.event import KnowledgeEvent from domain.roles import Role -from domain.temporal_models import TemporalQueryContext, TemporalWindow -from domain.query_intent_models import QueryIntent, DIKWLayer, RoutingDecision +from domain.temporal_models import TemporalQueryContext +from domain.query_intent_models import QueryIntent if TYPE_CHECKING: from application.services.temporal_scoring import TemporalScoringService @@ -183,7 +182,7 @@ def __init__( "temporal_adjustments": 0, "intent_routed": 0, "by_strategy": {s.value: 0 for s in QueryStrategy}, - "by_layer": {l.value: 0 for l in KnowledgeLayer}, + "by_layer": {layer.value: 0 for layer in KnowledgeLayer}, "by_intent": {intent.value: 0 for intent in QueryIntent}, } @@ -282,7 +281,7 @@ async def execute_query( # Add trace info to result result["query_id"] = query_id result["strategy"] = strategy.value - result["layers_traversed"] = [l.value for l in trace.layers_traversed] + result["layers_traversed"] = [layer.value for layer in trace.layers_traversed] result["confidence"] = trace.final_confidence.score result["execution_time_ms"] = total_time @@ -299,7 +298,7 @@ async def execute_query( result["routing"] = { "intent": routing_decision.intent.primary_intent.value, "intent_confidence": routing_decision.intent.confidence, - "recommended_layers": [l.value for l in routing_decision.layers], + "recommended_layers": [layer.value for layer in routing_decision.layers], "matched_patterns": routing_decision.intent.matched_patterns[:3], "requires_inference": routing_decision.intent.requires_inference, } @@ -314,7 +313,7 @@ async def execute_query( logger.info( f"Query {query_id} completed: confidence={trace.final_confidence.score:.2f}, " - f"layers={[l.value for l in trace.layers_traversed]}, time={total_time:.0f}ms, " + f"layers={[layer.value for layer in trace.layers_traversed]}, time={total_time:.0f}ms, " f"entities={len(entity_ids)}" ) diff --git a/src/application/services/ontology_cleanup_service.py b/src/application/services/ontology_cleanup_service.py index 777ff60a4..1812a37be 100644 --- a/src/application/services/ontology_cleanup_service.py +++ b/src/application/services/ontology_cleanup_service.py @@ -12,7 +12,7 @@ """ import logging -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List from datetime import datetime logger = logging.getLogger(__name__) diff --git a/src/application/services/ontology_quality_service.py b/src/application/services/ontology_quality_service.py index 16e153d98..27e307d44 100644 --- a/src/application/services/ontology_quality_service.py +++ b/src/application/services/ontology_quality_service.py @@ -25,7 +25,6 @@ NormalizationQualityScore, CrossReferenceValidityScore, InteroperabilityScore, - OntologyQualityLevel, ODIN_SCHEMAS, SCHEMA_ORG_MAPPINGS, ) @@ -431,7 +430,7 @@ async def _assess_taxonomy( # Detect orphans — prefer remediation metadata when available # Build lookup for entities by id - entity_by_id = {e.get("id"): e for e in entities if e.get("id")} + {e.get("id"): e for e in entities if e.get("id")} # Check if remediation orphan metadata exists entities_with_orphan_flag = [ diff --git a/src/application/services/patient_memory_service.py b/src/application/services/patient_memory_service.py index fbd8e6aba..100697687 100644 --- a/src/application/services/patient_memory_service.py +++ b/src/application/services/patient_memory_service.py @@ -1091,7 +1091,7 @@ async def add_procedure( ) # Store in Mem0 for conversational recall - status_text = f"is scheduled for" if status == "scheduled" else f"had" + status_text = "is scheduled for" if status == "scheduled" else "had" self.mem0.add( f"Patient {status_text} a {name} ({procedure_type})" + (f" on {scheduled_date}" if scheduled_date else ""), @@ -1755,7 +1755,7 @@ async def check_consent( result = await self.neo4j.query_raw(query, {"patient_id": patient_id}) if result: - consent = result[0]["consent"] == True + consent = result[0]["consent"] logger.debug(f"Consent check for patient {patient_id}: {consent}") return consent diff --git a/src/application/services/pdf_ingestion_service.py b/src/application/services/pdf_ingestion_service.py index 315a3b5b8..fcb48b013 100644 --- a/src/application/services/pdf_ingestion_service.py +++ b/src/application/services/pdf_ingestion_service.py @@ -15,7 +15,6 @@ from typing import Any, Dict, List, Optional, Tuple from dataclasses import dataclass from datetime import datetime -import asyncio import logging try: diff --git a/src/application/services/promotion_gate.py b/src/application/services/promotion_gate.py index dadf4ce5c..aeac65405 100644 --- a/src/application/services/promotion_gate.py +++ b/src/application/services/promotion_gate.py @@ -18,7 +18,7 @@ """ import logging -from datetime import datetime, timedelta +from datetime import datetime from typing import Any, Dict, List, Optional from dataclasses import dataclass, field diff --git a/src/application/services/quality_scanner_job.py b/src/application/services/quality_scanner_job.py index f3a796a0a..06c4824ef 100644 --- a/src/application/services/quality_scanner_job.py +++ b/src/application/services/quality_scanner_job.py @@ -14,7 +14,7 @@ import asyncio import os import logging -from datetime import datetime, timedelta +from datetime import datetime from pathlib import Path from typing import Optional, List, Dict, Any from dataclasses import dataclass, field diff --git a/src/application/services/rag_service.py b/src/application/services/rag_service.py index 499077e4a..452d7602f 100644 --- a/src/application/services/rag_service.py +++ b/src/application/services/rag_service.py @@ -6,7 +6,7 @@ 3. LLM for generating answers """ -from typing import List, Dict, Any, Optional +from typing import List, Dict, Any from dataclasses import dataclass import os diff --git a/src/application/services/response_modulator.py b/src/application/services/response_modulator.py index 61ecbfb14..33ecfbd09 100644 --- a/src/application/services/response_modulator.py +++ b/src/application/services/response_modulator.py @@ -10,7 +10,7 @@ """ import logging -from typing import Optional, Dict, Any +from typing import Optional, Any from openai import AsyncOpenAI import os @@ -18,8 +18,7 @@ IntentType, IntentResult, MemoryContext, - AgentPersona, - RESPONSE_TEMPLATES + AgentPersona ) logger = logging.getLogger(__name__) @@ -545,7 +544,7 @@ def _build_system_prompt( # Add memory context if available if is_returning: - prompt += f"- This is a RETURNING patient" + prompt += "- This is a RETURNING patient" if memory_context.days_since_last_session: prompt += f" (last session {memory_context.days_since_last_session} days ago)" prompt += "\n" diff --git a/src/application/services/rlhf_data_extractor.py b/src/application/services/rlhf_data_extractor.py index d062c89c0..07a292d31 100644 --- a/src/application/services/rlhf_data_extractor.py +++ b/src/application/services/rlhf_data_extractor.py @@ -9,9 +9,9 @@ - Multiple output format support (DPO, SFT, Alpaca) """ -from typing import Dict, Any, List, Optional, Tuple +from typing import Dict, Any, List, Optional from dataclasses import dataclass, field -from datetime import datetime, timedelta +from datetime import datetime from enum import Enum import logging import hashlib diff --git a/src/application/services/semantic_grounding.py b/src/application/services/semantic_grounding.py index 7b53dae9d..389dc6908 100644 --- a/src/application/services/semantic_grounding.py +++ b/src/application/services/semantic_grounding.py @@ -151,7 +151,7 @@ async def ground_entity( """ # Query entity from graph try: - query = f""" + query = """ MATCH (e) WHERE e.id = $entity_id OR id(e) = $entity_id OPTIONAL MATCH (e)-[r]-(n) @@ -447,7 +447,7 @@ async def _fallback_search( graph_score=score, combined_score=score, properties=dict(r["properties"]), - explanation=f"Name-based match (embeddings unavailable)" + explanation="Name-based match (embeddings unavailable)" )) return fallback_results diff --git a/src/application/services/simple_pdf_ingestion.py b/src/application/services/simple_pdf_ingestion.py index 4fffa3281..7c359f133 100644 --- a/src/application/services/simple_pdf_ingestion.py +++ b/src/application/services/simple_pdf_ingestion.py @@ -17,7 +17,6 @@ from typing import Any, Dict, List, Optional, Tuple from dataclasses import dataclass from datetime import datetime -import asyncio import logging try: diff --git a/src/application/services/temporal_scoring.py b/src/application/services/temporal_scoring.py index 5ff4da227..4ad1b318c 100644 --- a/src/application/services/temporal_scoring.py +++ b/src/application/services/temporal_scoring.py @@ -14,7 +14,7 @@ import math import re from datetime import datetime, timedelta -from typing import Dict, List, Optional, Tuple, Any +from typing import Dict, List, Optional, Any from dataclasses import dataclass from domain.temporal_models import ( diff --git a/src/composition_root.py b/src/composition_root.py index 6f32072c4..84712489d 100644 --- a/src/composition_root.py +++ b/src/composition_root.py @@ -5,49 +5,44 @@ validate_config() -from typing import Dict, Callable, Tuple, Optional, TYPE_CHECKING -from application.agents.data_architect.agent import DataArchitectAgent -from application.agents.data_engineer.agent import DataEngineerAgent -from application.agents.knowledge_manager.agent import KnowledgeManagerAgent -from application.agents.data_engineer.handlers.build_kg import BuildKGCommandHandler -from application.agents.echo_agent import EchoAgent +from typing import Dict, Callable, Tuple, Optional, TYPE_CHECKING # noqa: E402 +from application.agents.data_architect.agent import DataArchitectAgent # noqa: E402 +from application.agents.data_engineer.agent import DataEngineerAgent # noqa: E402 +from application.agents.knowledge_manager.agent import KnowledgeManagerAgent # noqa: E402 +from application.agents.echo_agent import EchoAgent # noqa: E402 # Lazy import for MedicalAssistantAgent to avoid loading mem0 for agents that don't need it if TYPE_CHECKING: - from application.agents.medical_assistant.agent import MedicalAssistantAgent -from application.commands.base import CommandBus -from application.commands.collaboration_commands import BuildKGCommand -from application.commands.echo_command import EchoCommand, EchoCommandHandler -from application.commands.file_commands import ( + pass +from application.commands.base import CommandBus # noqa: E402 +from application.commands.echo_command import EchoCommand, EchoCommandHandler # noqa: E402 +from application.commands.file_commands import ( # noqa: E402 CreateFileCommand, CreateFileCommandHandler, ReadFileCommand, ReadFileCommandHandler, ) -from application.commands.shell_commands import ( +from application.commands.shell_commands import ( # noqa: E402 ExecuteShellCommand, ExecuteShellCommandHandler, ) -from application.commands.modeling_command import ModelingCommand -from application.commands.modeling_handler import ModelingCommandHandler -from application.commands.metadata_command import GenerateMetadataCommand -from application.agents.data_engineer.handlers.generate_metadata import GenerateMetadataCommandHandler -from application.agents.data_architect.modeling_workflow import ModelingWorkflow -from application.agents.data_architect.dda_parser import DDAParserFactory -from application.agents.data_architect.domain_modeler import DomainModeler -from application.agents.data_engineer.metadata_workflow import MetadataGenerationWorkflow -from application.agents.data_engineer.metadata_graph_builder import MetadataGraphBuilder -from application.agents.data_engineer.type_inference import TypeInferenceService -from infrastructure.parsers.markdown_parser import MarkdownDDAParser -from domain.agent import Agent -from domain.communication import CommunicationChannel -from infrastructure.graphiti import get_graphiti -from graphiti_core import Graphiti -from infrastructure.in_memory_backend import InMemoryGraphBackend -from domain.kg_backends import KnowledgeGraphBackend -from application.event_bus import EventBus -from config.agent_config import get_agent_config, AgentInfraConfig -from infrastructure.agent_infrastructure_builder import AgentInfrastructureBuilder +from application.commands.modeling_handler import ModelingCommandHandler # noqa: E402 +from application.agents.data_engineer.handlers.generate_metadata import GenerateMetadataCommandHandler # noqa: E402 +from application.agents.data_architect.modeling_workflow import ModelingWorkflow # noqa: E402 +from application.agents.data_architect.dda_parser import DDAParserFactory # noqa: E402 +from application.agents.data_engineer.metadata_workflow import MetadataGenerationWorkflow # noqa: E402 +from application.agents.data_engineer.metadata_graph_builder import MetadataGraphBuilder # noqa: E402 +from application.agents.data_engineer.type_inference import TypeInferenceService # noqa: E402 +from infrastructure.parsers.markdown_parser import MarkdownDDAParser # noqa: E402 +from domain.agent import Agent # noqa: E402 +from domain.communication import CommunicationChannel # noqa: E402 +from infrastructure.graphiti import get_graphiti # noqa: E402 +from graphiti_core import Graphiti # noqa: E402 +from infrastructure.in_memory_backend import InMemoryGraphBackend # noqa: E402 +from domain.kg_backends import KnowledgeGraphBackend # noqa: E402 +from application.event_bus import EventBus # noqa: E402 +from config.agent_config import AgentInfraConfig # noqa: E402 +from infrastructure.agent_infrastructure_builder import AgentInfrastructureBuilder # noqa: E402 # --- Agent Factory Functions --- @@ -206,7 +201,6 @@ def create_generate_metadata_command_handler( "knowledge_manager": create_knowledge_manager_agent, "medical_assistant": create_medical_assistant_agent, "echo": create_echo_agent, - "knowledge_manager": create_knowledge_manager_agent, } @@ -333,11 +327,10 @@ async def bootstrap_episodic_memory(event_bus: Optional[EventBus] = None): EpisodicMemoryService or None if initialization fails """ import os - from application.services.feature_flag_service import is_flag_enabled # Check if episodic memory is enabled via feature flag # For now, we'll make it opt-in via environment variable since the feature flag might not exist yet - if not os.getenv("ENABLE_EPISODIC_MEMORY", "").lower() in ("true", "1", "yes"): + if os.getenv("ENABLE_EPISODIC_MEMORY", "").lower() not in ("true", "1", "yes"): print("ℹ️ Episodic memory not enabled (set ENABLE_EPISODIC_MEMORY=true to enable)") return None @@ -397,10 +390,9 @@ async def bootstrap_crystallization_pipeline( Tuple of (CrystallizationService, PromotionGate, EntityResolver) or (None, None, None) """ import os - from application.services.feature_flag_service import is_flag_enabled # Check if crystallization is enabled - if not os.getenv("ENABLE_CRYSTALLIZATION", "").lower() in ("true", "1", "yes"): + if os.getenv("ENABLE_CRYSTALLIZATION", "").lower() not in ("true", "1", "yes"): print("ℹ️ Crystallization pipeline not enabled (set ENABLE_CRYSTALLIZATION=true to enable)") return None, None, None @@ -505,7 +497,7 @@ async def bootstrap_knowledge_management() -> Tuple[KnowledgeGraphBackend, Event "password": os.environ["NEO4J_PASSWORD"], }) kg_backend = GraphitiBackend(graphiti_client) - print(f"✅ Using Graphiti backend") + print("✅ Using Graphiti backend") elif backend_type == "neo4j": kg_backend = await create_neo4j_backend() @@ -586,11 +578,11 @@ async def bootstrap_agent_infrastructure( """ from typing import TYPE_CHECKING if TYPE_CHECKING: - from application.services.agent_discovery import AgentDiscoveryService + pass builder = AgentInfrastructureBuilder(config) - print(f"🔄 Bootstrapping agent infrastructure...") + print("🔄 Bootstrapping agent infrastructure...") print(f" Deployment mode: {builder.config.deployment_mode.value}") print(f" Event bus: {builder.config.event_bus.type.value}") print(f" Channel: {builder.config.communication_channel.type.value}") diff --git a/src/domain/confidence_models.py b/src/domain/confidence_models.py index fb74cf6e3..76ebdcef2 100644 --- a/src/domain/confidence_models.py +++ b/src/domain/confidence_models.py @@ -484,9 +484,9 @@ def propagate_cross_layer( evidence=evidence, reasoning=f"Cross-layer aggregation using {strategy.value}", properties={ - "layers_involved": [l.value for l in confidences.keys()], + "layers_involved": [layer.value for layer in confidences.keys()], "strategy": strategy.value, - "individual_scores": {l.value: c.score for l, c in confidences.items()} + "individual_scores": {layer.value: c.score for layer, c in confidences.items()} } ) @@ -525,10 +525,10 @@ def resolve_conflict( # If one layer is clearly higher if weight1 > weight2: higher_layer, higher_conf = layer1, confidence1 - lower_layer, lower_conf = layer2, confidence2 + _lower_layer, _lower_conf = layer2, confidence2 else: higher_layer, higher_conf = layer2, confidence2 - lower_layer, lower_conf = layer1, confidence1 + _lower_layer, _lower_conf = layer1, confidence1 # Check if confidence gap overrides layer priority if gap > self.conflict_threshold: @@ -635,7 +635,7 @@ def needs_human_review( return False, "Single source, no conflict" # Check for significant disagreements - scores = [(l, c.score) for l, c in confidences.items()] + scores = [(layer, c.score) for layer, c in confidences.items()] scores.sort(key=lambda x: x[1], reverse=True) highest = scores[0] diff --git a/src/domain/goal_templates.py b/src/domain/goal_templates.py index a3f4f5449..a2b6f720f 100644 --- a/src/domain/goal_templates.py +++ b/src/domain/goal_templates.py @@ -5,7 +5,7 @@ information needs to be collected before the goal can be completed. """ -from typing import Dict, List, Callable, Any +from typing import Dict, List, Any from dataclasses import dataclass from domain.conversation_state import GoalType, GoalSlot, ActiveGoal diff --git a/src/domain/hypergraph_models.py b/src/domain/hypergraph_models.py index f7001dd93..84f237a1d 100644 --- a/src/domain/hypergraph_models.py +++ b/src/domain/hypergraph_models.py @@ -16,7 +16,7 @@ from dataclasses import dataclass, field from datetime import datetime from enum import Enum -from typing import Any, Dict, List, Optional, Set, Tuple +from typing import Any, Dict, List, Optional, Tuple import hashlib diff --git a/src/domain/knowledge_layers.py b/src/domain/knowledge_layers.py index 81315da5a..4c85c3fb6 100644 --- a/src/domain/knowledge_layers.py +++ b/src/domain/knowledge_layers.py @@ -1,5 +1,5 @@ from enum import Enum -from typing import Dict, Any, List, Optional +from typing import Dict, Any from pydantic import BaseModel, Field class KnowledgeLayer(str, Enum): diff --git a/src/domain/medical_rules_models.py b/src/domain/medical_rules_models.py index 62e874db5..50996909e 100644 --- a/src/domain/medical_rules_models.py +++ b/src/domain/medical_rules_models.py @@ -8,7 +8,7 @@ from dataclasses import dataclass, field from datetime import datetime from enum import Enum -from typing import Dict, List, Optional, Set, Any +from typing import Dict, List, Any class RuleSeverity(str, Enum): diff --git a/src/domain/metadata/workflow.py b/src/domain/metadata/workflow.py index 19f9ac4f3..797188f6d 100644 --- a/src/domain/metadata/workflow.py +++ b/src/domain/metadata/workflow.py @@ -6,7 +6,7 @@ """ from pydantic import Field -from typing import List, Optional, Dict, Any +from typing import List, Optional from datetime import datetime from .base import MetadataEntity diff --git a/src/domain/ontologies/registry.py b/src/domain/ontologies/registry.py index 0f15796a8..9373108fd 100644 --- a/src/domain/ontologies/registry.py +++ b/src/domain/ontologies/registry.py @@ -18,7 +18,7 @@ config = get_ontology_config("table") # -> data architecture (with alias) """ -from typing import Dict, List, Any, Optional, Set +from typing import Dict, List, Any, Optional from enum import Enum # Import core ODIN @@ -26,13 +26,8 @@ # Import medical extension from domain.ontologies.odin_medical import ( - ODINMedical, MEDICAL_ONTOLOGY_REGISTRY, MEDICAL_TYPE_ALIASES, - MedicalOntologySystem, - resolve_medical_type, - get_medical_ontology_config, - is_medical_type, ) diff --git a/src/domain/ontology_quality_models.py b/src/domain/ontology_quality_models.py index ae6720557..b8741c47d 100644 --- a/src/domain/ontology_quality_models.py +++ b/src/domain/ontology_quality_models.py @@ -314,7 +314,7 @@ def generate_recommendations(self) -> List[str]: # Use orphan breakdown for context-aware recommendations knowledge_orphans = self.taxonomy.orphan_breakdown.get("knowledge", 0) - episodic_orphans = self.taxonomy.orphan_breakdown.get("episodic", 0) + self.taxonomy.orphan_breakdown.get("episodic", 0) if knowledge_orphans > 0: recommendations.append( f"Connect {knowledge_orphans} knowledge orphan nodes to the hierarchy" diff --git a/src/domain/query_intent_models.py b/src/domain/query_intent_models.py index dc6a8b9aa..9db56a4ea 100644 --- a/src/domain/query_intent_models.py +++ b/src/domain/query_intent_models.py @@ -6,7 +6,7 @@ from dataclasses import dataclass, field from datetime import datetime from enum import Enum -from typing import Dict, List, Optional, Set +from typing import Dict, List class QueryIntent(str, Enum): diff --git a/src/domain/temporal_models.py b/src/domain/temporal_models.py index 77b39f69b..70a02d9e0 100644 --- a/src/domain/temporal_models.py +++ b/src/domain/temporal_models.py @@ -5,9 +5,9 @@ """ from dataclasses import dataclass, field -from datetime import datetime, timedelta +from datetime import datetime from enum import Enum -from typing import Dict, Optional, List +from typing import Dict, Optional import math diff --git a/src/infrastructure/architecture_graph_writer.py b/src/infrastructure/architecture_graph_writer.py index ae26db68f..62c0c903d 100644 --- a/src/infrastructure/architecture_graph_writer.py +++ b/src/infrastructure/architecture_graph_writer.py @@ -4,7 +4,7 @@ a structured graph from the parsed DDA document. """ -from typing import Dict, Any, List +from typing import Dict, Any from neo4j import GraphDatabase from domain.dda_models import DDADocument, DataEntity, Relationship diff --git a/src/infrastructure/database/config.py b/src/infrastructure/database/config.py index 2f51d0920..bc7091304 100644 --- a/src/infrastructure/database/config.py +++ b/src/infrastructure/database/config.py @@ -5,7 +5,6 @@ import os from dataclasses import dataclass -from typing import Optional @dataclass diff --git a/src/infrastructure/database/models.py b/src/infrastructure/database/models.py index 510648b9b..66ec1242e 100644 --- a/src/infrastructure/database/models.py +++ b/src/infrastructure/database/models.py @@ -9,8 +9,7 @@ - Feature Flags """ -from datetime import datetime -from typing import Optional, List, Dict, Any +from typing import Dict, Any from uuid import uuid4 from sqlalchemy import ( diff --git a/src/infrastructure/database/repositories.py b/src/infrastructure/database/repositories.py index 8984ab258..18e9924fa 100644 --- a/src/infrastructure/database/repositories.py +++ b/src/infrastructure/database/repositories.py @@ -9,7 +9,7 @@ from typing import List, Optional, Dict, Any, TypeVar, Generic from uuid import UUID -from sqlalchemy import select, update, delete, func, and_, or_ +from sqlalchemy import select, update, delete, func, and_ from sqlalchemy.ext.asyncio import AsyncSession from .models import ( @@ -21,7 +21,6 @@ DocumentQuality, OntologyQuality, AuditLog, - QueryAnalytics, FeatureFlag, ) @@ -235,12 +234,12 @@ async def get_statistics(self) -> Dict[str, Any]: # Positive/negative counts positive_result = await self.session.execute( - select(func.count()).select_from(Feedback).where(Feedback.thumbs_up == True) + select(func.count()).select_from(Feedback).where(Feedback.thumbs_up) ) positive = positive_result.scalar() or 0 negative_result = await self.session.execute( - select(func.count()).select_from(Feedback).where(Feedback.thumbs_up == False) + select(func.count()).select_from(Feedback).where(not Feedback.thumbs_up) ) negative = negative_result.scalar() or 0 @@ -310,7 +309,7 @@ async def get_for_training( """Get feedback not yet used for training.""" result = await self.session.execute( select(Feedback) - .where(Feedback.used_for_training == False) + .where(not Feedback.used_for_training) .order_by(Feedback.created_at.asc()) .limit(limit) ) diff --git a/src/infrastructure/graphiti_backend.py b/src/infrastructure/graphiti_backend.py index 80c746cde..9714e97c8 100644 --- a/src/infrastructure/graphiti_backend.py +++ b/src/infrastructure/graphiti_backend.py @@ -5,7 +5,7 @@ specific data structures (EntityNode, EntityEdge) and API. """ -from typing import Any, Dict, Optional +from typing import Any, Dict from datetime import datetime import json diff --git a/src/infrastructure/neo4j_backend.py b/src/infrastructure/neo4j_backend.py index e789f9641..9e0c2e4e3 100644 --- a/src/infrastructure/neo4j_backend.py +++ b/src/infrastructure/neo4j_backend.py @@ -11,10 +11,9 @@ - APPLICATION: Query patterns and cached results """ -import asyncio import logging from datetime import datetime -from typing import Any, Dict, List, Tuple, Optional +from typing import Any, Dict, List, Optional from enum import Enum from neo4j import AsyncGraphDatabase from domain.kg_backends import KnowledgeGraphBackend diff --git a/src/infrastructure/parsers/markdown_parser.py b/src/infrastructure/parsers/markdown_parser.py index 5ad12dd16..701b88187 100644 --- a/src/infrastructure/parsers/markdown_parser.py +++ b/src/infrastructure/parsers/markdown_parser.py @@ -1,5 +1,6 @@ +import os import re -from typing import List, Dict, Any, Optional +from typing import List, Optional from datetime import datetime from application.agents.data_architect.dda_parser import DDAParser from domain.dda_models import DDADocument, DataEntity, Relationship, DataQualityRequirement, AccessPattern, Governance @@ -253,7 +254,7 @@ def _parse_relationship_block(self, block: str) -> Optional[Relationship]: return None # Extract relationship name from first line - relationship_name = lines[0].strip() + lines[0].strip() source_entity = "" target_entity = "" @@ -324,6 +325,3 @@ def _extract_governance(self, content: str) -> Governance: # you'd want more sophisticated parsing return Governance() - -# Add missing import -import os \ No newline at end of file diff --git a/src/infrastructure/redis_session_cache.py b/src/infrastructure/redis_session_cache.py index 70628d3f6..075701c51 100644 --- a/src/infrastructure/redis_session_cache.py +++ b/src/infrastructure/redis_session_cache.py @@ -155,7 +155,7 @@ async def list_patient_sessions( list[str]: List of active session IDs for the patient """ try: - pattern = f"session:*" + pattern = "session:*" sessions = [] async for key in self.redis.scan_iter(match=pattern): diff --git a/src/interfaces/cli.py b/src/interfaces/cli.py index d01aaf0e8..2d781e346 100644 --- a/src/interfaces/cli.py +++ b/src/interfaces/cli.py @@ -11,7 +11,6 @@ bootstrap_graphiti, bootstrap_knowledge_management, AGENT_REGISTRY, - create_modeling_command_handler, create_generate_metadata_command_handler, ) from application.commands.agent_commands import ( @@ -22,7 +21,6 @@ from application.commands.echo_command import EchoCommand from application.commands.file_commands import CreateFileCommand, ReadFileCommand from application.commands.shell_commands import ExecuteShellCommand -from application.commands.modeling_command import ModelingCommand from application.commands.metadata_command import GenerateMetadataCommand from application.agent_runner import AgentRunner from domain.communication import Message @@ -106,7 +104,7 @@ async def run_modeling(): result = asyncio.run(run_modeling()) if result["success"]: - typer.echo(f"✅ Metadata Generation & Enrichment completed successfully!") + typer.echo("✅ Metadata Generation & Enrichment completed successfully!") typer.echo(f" Domain: {result.get('domain', 'Unknown')}") # The result from MetadataGenerationWorkflow is a simple dict @@ -117,10 +115,10 @@ async def run_modeling(): typer.echo(f" Nodes: {stats.get('nodes', 0)}") typer.echo(f" Relationships: {stats.get('relationships', 0)}") - typer.echo(f"\n💡 Knowledge Graph is now populated with enriched data.") + typer.echo("\n💡 Knowledge Graph is now populated with enriched data.") else: - typer.echo(f"❌ Modeling failed:") + typer.echo("❌ Modeling failed:") for error in result.get('errors', []): typer.echo(f" - {error}") @@ -158,12 +156,12 @@ async def run_ingestion(): try: document = asyncio.run(run_ingestion()) - typer.echo(f"\n✅ Document Ingested Successfully!") + typer.echo("\n✅ Document Ingested Successfully!") typer.echo(f" ID: {document.id}") typer.echo(f" Name: {document.name}") typer.echo(f" Chunks: {document.chunk_count}") typer.echo(f" Hash: {document.content_hash}") - typer.echo(f"\n💡 View in Neo4j:") + typer.echo("\n💡 View in Neo4j:") typer.echo(f" MATCH (d:Document {{id: '{document.id}'}})-[:HAS_CHUNK]->(c) RETURN d, c") except Exception as e: @@ -482,7 +480,6 @@ def create_template( file_path = f"examples/{name.lower().replace(' ', '_')}_dda.md" # Create directory if it doesn't exist - import os os.makedirs(os.path.dirname(file_path), exist_ok=True) # Write template to file @@ -491,7 +488,7 @@ def create_template( typer.echo(f"✅ DDA template created: {file_path}") typer.echo(f"📝 Template name: {name}") - typer.echo(f"🔧 Next steps: Edit the template with domain-specific information") + typer.echo("🔧 Next steps: Edit the template with domain-specific information") @app.command("export-rlhf") @@ -521,8 +518,8 @@ def export_rlhf( from pathlib import Path async def run_export(): - from application.services.rlhf_data_extractor import RLHFDataExtractor, TrainingDataFormat - from application.formatters import TrainingDataFormatter, OutputFormat, export_to_file, FormatterConfig + from application.services.rlhf_data_extractor import RLHFDataExtractor + from application.formatters import TrainingDataFormatter, FormatterConfig # Initialize backend kg_backend, _ = await bootstrap_knowledge_management() @@ -534,7 +531,7 @@ async def run_export(): ) # Extract data - typer.echo(f"📊 Extracting RLHF data...") + typer.echo("📊 Extracting RLHF data...") result = await extractor.extract_all(layer_filter=layer) # Configure formatter @@ -587,17 +584,16 @@ async def run_export(): result, formatted_pairs, formatted_sft, output_format = asyncio.run(run_export()) # Display statistics - typer.echo(f"\n📈 Extraction Statistics:") + typer.echo("\n📈 Extraction Statistics:") typer.echo(f" Preference pairs: {len(result.preference_pairs)}") typer.echo(f" SFT examples: {len(result.sft_examples)}") if result.layer_analysis: - typer.echo(f"\n📊 Layer Analysis:") + typer.echo("\n📊 Layer Analysis:") for layer_name, analysis in result.layer_analysis.items(): typer.echo(f" {layer_name}: avg_rating={analysis.average_rating:.2f}, negative_rate={analysis.negative_rate:.1%}") # Export files - from pathlib import Path from application.formatters import export_to_file, OutputFormat output_path = Path(output) @@ -638,7 +634,7 @@ async def run_export(): # Show suggestions if result.layer_analysis: - typer.echo(f"\n💡 Improvement Suggestions:") + typer.echo("\n💡 Improvement Suggestions:") for layer_name, analysis in result.layer_analysis.items(): if analysis.improvement_suggestions: typer.echo(f" {layer_name}:") @@ -668,17 +664,17 @@ async def get_stats(): typer.echo(f"Total feedbacks: {stats.total_feedbacks}") typer.echo(f"Average rating: {stats.average_rating:.2f}") - typer.echo(f"\nRating Distribution:") + typer.echo("\nRating Distribution:") for rating, count in sorted(stats.rating_distribution.items()): bar = "█" * count typer.echo(f" {rating}★: {bar} ({count})") - typer.echo(f"\nFeedback Types:") + typer.echo("\nFeedback Types:") for ftype, count in stats.feedback_type_distribution.items(): typer.echo(f" {ftype}: {count}") if stats.layer_performance: - typer.echo(f"\nLayer Performance:") + typer.echo("\nLayer Performance:") for layer, perf in stats.layer_performance.items(): typer.echo(f" {layer}: avg={perf['avg_rating']:.2f}, negative_rate={perf['negative_rate']:.1%}") diff --git a/src/interfaces/kg_operations_api.py b/src/interfaces/kg_operations_api.py index cfbd47f41..9e015a4b2 100644 --- a/src/interfaces/kg_operations_api.py +++ b/src/interfaces/kg_operations_api.py @@ -8,14 +8,12 @@ - Health monitoring """ -import asyncio import logging -from typing import Any, Dict, List, Optional, Union +from typing import Any, Dict, List, Optional from datetime import datetime from fastapi import FastAPI, HTTPException, Depends, BackgroundTasks, Query from fastapi.middleware.cors import CORSMiddleware -from fastapi.responses import JSONResponse from pydantic import BaseModel, Field, validator from domain.event import KnowledgeEvent @@ -667,14 +665,14 @@ async def get_statistics( try: entity_result = await kg_backend.query("MATCH (n) RETURN n") entity_count = len(entity_result.get("nodes", {})) if isinstance(entity_result, dict) else 0 - except: + except Exception: entity_count = 0 # Get relationship count - use a simple query that should work try: rel_result = await kg_backend.query("MATCH ()-[r]->() RETURN r") rel_count = len(rel_result.get("edges", {})) if isinstance(rel_result, dict) else 0 - except: + except Exception: rel_count = 0 return { diff --git a/src/multi_agent_system.egg-info/SOURCES.txt b/src/multi_agent_system.egg-info/SOURCES.txt index 612a16753..20edeeced 100644 --- a/src/multi_agent_system.egg-info/SOURCES.txt +++ b/src/multi_agent_system.egg-info/SOURCES.txt @@ -1,5 +1,6 @@ .DS_Store .coverage +.env.example .gitignore .pre-commit-config.yaml CLAUDE.md @@ -37,6 +38,30 @@ PDFs/lupus/Systemic-Lupus-Erythematosus.pdf PDFs/lupus/article.pdf PDFs/lupus/comprehensive-review-on-systemic-lupus-erythematosus.pdf PDFs/lupus/s41392-025-02168-0.pdf +audit/queries/dangling_relationships.cypher +audit/queries/duplicate_entities.cypher +audit/queries/invalid_layers.cypher +audit/queries/label_distribution.cypher +audit/queries/layer_confidence_issues.cypher +audit/queries/layer_distribution.cypher +audit/queries/nodes_without_type.cypher +audit/queries/null_layers_with_type.cypher +audit/queries/ontology_mapped_stats.cypher +audit/queries/orphan_nodes.cypher +audit/queries/orphan_source_stats.cypher +audit/queries/rel_type_distribution.cypher +audit/queries/type_distribution.cypher +audit/reports/01_structural_integrity.md +audit/reports/02_backend_consistency.md +audit/reports/03_feature_completeness.md +audit/reports/04_ontology_coverage.md +audit/reports/05_remediation_pipeline.md +audit/reports/06_test_coverage_gaps.md +audit/reports/07_operational_readiness.md +audit/reports/08_consolidated_recommendations.md +audit/reports/09_post_remediation_stats.md +audit/reports/pre_remediation_snapshot.csv +audit/reports/structural_integrity_raw.json config/agents.distributed.yaml config/agents.yaml data/document_tracking.json @@ -17434,7 +17459,6 @@ frontend/node_modules/zwitch/readme.md frontend/src/env.d.ts frontend/src/components/Navigation.astro frontend/src/components/Navigation.tsx -frontend/src/components/admin/AgentMonitor.tsx frontend/src/components/admin/DocumentDetailView.tsx frontend/src/components/admin/DocumentManagement.tsx frontend/src/components/admin/GDPRTools.tsx @@ -17443,12 +17467,23 @@ frontend/src/components/admin/JobNotificationToast.tsx frontend/src/components/admin/JobQueuePanel.tsx frontend/src/components/admin/PatientManagement.tsx frontend/src/components/admin/QualityDashboard.tsx -frontend/src/components/admin/SystemStats.tsx frontend/src/components/admin/document-detail/DocumentEntitiesTab.tsx frontend/src/components/admin/document-detail/DocumentGraphTab.tsx frontend/src/components/admin/document-detail/DocumentOverviewTab.tsx frontend/src/components/admin/document-detail/DocumentPreviewTab.tsx frontend/src/components/admin/document-detail/DocumentQualityTab.tsx +frontend/src/components/admin/ops-dashboard/AgentsPanel.tsx +frontend/src/components/admin/ops-dashboard/CrystallizationPanel.tsx +frontend/src/components/admin/ops-dashboard/DataSyncPanel.tsx +frontend/src/components/admin/ops-dashboard/FeedbackPanel.tsx +frontend/src/components/admin/ops-dashboard/HealthBar.tsx +frontend/src/components/admin/ops-dashboard/KnowledgeGraphPanel.tsx +frontend/src/components/admin/ops-dashboard/OntologyQualityPanel.tsx +frontend/src/components/admin/ops-dashboard/OperationsDashboard.tsx +frontend/src/components/admin/ops-dashboard/PanelWrapper.tsx +frontend/src/components/admin/ops-dashboard/PromotionGatePanel.tsx +frontend/src/components/admin/ops-dashboard/types.ts +frontend/src/components/admin/ops-dashboard/usePanelQuery.ts frontend/src/components/charts/MetricRadarChart.tsx frontend/src/components/charts/QualityTrendChart.tsx frontend/src/components/charts/SparklineChart.tsx @@ -17524,15 +17559,96 @@ markdown_output/jrcollphyslond146952-0082.md markdown_output/s1.full.md markdown_output/s41392-025-02168-0.md markdown_output/the-autoimmune-diseases-6nbsped-0128121025-9780128121023_compress.md -openspec/changes/ontology-type-mapping-remediation/.openspec.yaml -openspec/changes/ontology-type-mapping-remediation/design.md -openspec/changes/ontology-type-mapping-remediation/proposal.md -openspec/changes/ontology-type-mapping-remediation/tasks.md -openspec/changes/ontology-type-mapping-remediation/specs/kg-remediation-api/spec.md -openspec/changes/ontology-type-mapping-remediation/specs/medical-schema-compliance/spec.md -openspec/changes/ontology-type-mapping-remediation/specs/ontology-type-completeness/spec.md +openspec/config.yaml +openspec/changes/archive/2026-02-12-reclassify-conversation-nodes/.openspec.yaml +openspec/changes/archive/2026-02-12-reclassify-conversation-nodes/design.md +openspec/changes/archive/2026-02-12-reclassify-conversation-nodes/proposal.md +openspec/changes/archive/2026-02-12-reclassify-conversation-nodes/tasks.md +openspec/changes/archive/2026-02-12-reclassify-conversation-nodes/specs/conversation-node-classification/spec.md +openspec/changes/archive/2026-02-12-reclassify-conversation-nodes/specs/kg-remediation-api/spec.md +openspec/changes/archive/2026-02-13-kg-quality-improvement/.openspec.yaml +openspec/changes/archive/2026-02-13-kg-quality-improvement/design.md +openspec/changes/archive/2026-02-13-kg-quality-improvement/proposal.md +openspec/changes/archive/2026-02-13-kg-quality-improvement/tasks.md +openspec/changes/archive/2026-02-13-kg-quality-improvement/specs/kg-remediation-api/spec.md +openspec/changes/archive/2026-02-13-kg-quality-improvement/specs/medical-schema-compliance/spec.md +openspec/changes/archive/2026-02-17-kg-audit-remediation/.openspec.yaml +openspec/changes/archive/2026-02-17-kg-audit-remediation/design.md +openspec/changes/archive/2026-02-17-kg-audit-remediation/proposal.md +openspec/changes/archive/2026-02-17-kg-audit-remediation/tasks.md +openspec/changes/archive/2026-02-17-kg-audit-remediation/specs/cypher-injection-fix/spec.md +openspec/changes/archive/2026-02-17-kg-audit-remediation/specs/entity-deduplication/spec.md +openspec/changes/archive/2026-02-17-kg-audit-remediation/specs/kg-remediation-api/spec.md +openspec/changes/archive/2026-02-17-kg-audit-remediation/specs/ontology-type-completeness/spec.md +openspec/changes/archive/2026-02-17-kg-audit-remediation/specs/secrets-management/spec.md +openspec/changes/archive/2026-02-17-knowledge-graph-audit/.openspec.yaml +openspec/changes/archive/2026-02-17-knowledge-graph-audit/design.md +openspec/changes/archive/2026-02-17-knowledge-graph-audit/proposal.md +openspec/changes/archive/2026-02-17-knowledge-graph-audit/tasks.md +openspec/changes/archive/2026-02-17-knowledge-graph-audit/specs/backend-consistency/spec.md +openspec/changes/archive/2026-02-17-knowledge-graph-audit/specs/feature-completeness-inventory/spec.md +openspec/changes/archive/2026-02-17-knowledge-graph-audit/specs/kg-remediation-api/spec.md +openspec/changes/archive/2026-02-17-knowledge-graph-audit/specs/kg-structural-integrity/spec.md +openspec/changes/archive/2026-02-17-knowledge-graph-audit/specs/ontology-type-completeness/spec.md +openspec/changes/archive/2026-02-17-knowledge-graph-audit/specs/operational-readiness/spec.md +openspec/changes/archive/2026-02-17-knowledge-graph-audit/specs/test-coverage-gaps/spec.md +openspec/changes/archive/2026-02-17-ontology-type-mapping-remediation/.openspec.yaml +openspec/changes/archive/2026-02-17-ontology-type-mapping-remediation/design.md +openspec/changes/archive/2026-02-17-ontology-type-mapping-remediation/proposal.md +openspec/changes/archive/2026-02-17-ontology-type-mapping-remediation/tasks.md +openspec/changes/archive/2026-02-17-ontology-type-mapping-remediation/specs/kg-remediation-api/spec.md +openspec/changes/archive/2026-02-17-ontology-type-mapping-remediation/specs/medical-schema-compliance/spec.md +openspec/changes/archive/2026-02-17-ontology-type-mapping-remediation/specs/ontology-type-completeness/spec.md +openspec/changes/archive/2026-02-17-upgrade-graphiti-core/.openspec.yaml +openspec/changes/archive/2026-02-17-upgrade-graphiti-core/GRAPHITI_BUG_REPORT.md +openspec/changes/archive/2026-02-17-upgrade-graphiti-core/design.md +openspec/changes/archive/2026-02-17-upgrade-graphiti-core/proposal.md +openspec/changes/archive/2026-02-17-upgrade-graphiti-core/tasks.md +openspec/changes/archive/2026-02-17-upgrade-graphiti-core/specs/episodic-group-id/spec.md +openspec/changes/archive/2026-02-18-admin-ops-dashboard/.openspec.yaml +openspec/changes/archive/2026-02-18-admin-ops-dashboard/design.md +openspec/changes/archive/2026-02-18-admin-ops-dashboard/proposal.md +openspec/changes/archive/2026-02-18-admin-ops-dashboard/tasks.md +openspec/changes/archive/2026-02-18-admin-ops-dashboard/specs/ops-dashboard-panels/spec.md +openspec/changes/archive/2026-02-18-admin-ops-dashboard/specs/real-agent-monitoring/spec.md +openspec/changes/archive/2026-02-18-entity-duplicate-review/.openspec.yaml +openspec/changes/archive/2026-02-18-entity-duplicate-review/design.md +openspec/changes/archive/2026-02-18-entity-duplicate-review/proposal.md +openspec/changes/archive/2026-02-18-entity-duplicate-review/tasks.md +openspec/changes/archive/2026-02-18-entity-duplicate-review/specs/duplicate-review-workflow/spec.md +openspec/changes/archive/2026-02-18-entity-duplicate-review/specs/entity-deduplication/spec.md +openspec/changes/archive/2026-02-18-kg-assessment-sprint2/.openspec.yaml +openspec/changes/archive/2026-02-18-kg-assessment-sprint2/design.md +openspec/changes/archive/2026-02-18-kg-assessment-sprint2/proposal.md +openspec/changes/archive/2026-02-18-kg-assessment-sprint2/tasks.md +openspec/changes/archive/2026-02-18-kg-assessment-sprint2/specs/ontology-type-completeness/spec.md +openspec/changes/archive/2026-02-18-kg-assessment-sprint2/specs/quality-assessment-accuracy/spec.md +openspec/changes/archive/2026-02-18-postgres-conversations/.openspec.yaml +openspec/changes/archive/2026-02-18-postgres-conversations/design.md +openspec/changes/archive/2026-02-18-postgres-conversations/proposal.md +openspec/changes/archive/2026-02-18-postgres-conversations/tasks.md +openspec/changes/archive/2026-02-18-postgres-conversations/specs/conversation-data-sync/spec.md +openspec/changes/archive/2026-02-18-postgres-conversations/specs/ops-dashboard-panels/spec.md +openspec/changes/archive/2026-02-18-postgres-conversations/specs/postgres-session-storage/spec.md +openspec/specs/backend-consistency/spec.md +openspec/specs/conversation-data-sync/spec.md +openspec/specs/conversation-node-classification/spec.md +openspec/specs/cypher-injection-fix/spec.md +openspec/specs/duplicate-review-workflow/spec.md +openspec/specs/entity-deduplication/spec.md +openspec/specs/episodic-group-id/spec.md +openspec/specs/feature-completeness-inventory/spec.md openspec/specs/kg-remediation-api/spec.md +openspec/specs/kg-structural-integrity/spec.md openspec/specs/medical-schema-compliance/spec.md +openspec/specs/ontology-type-completeness/spec.md +openspec/specs/operational-readiness/spec.md +openspec/specs/ops-dashboard-panels/spec.md +openspec/specs/postgres-session-storage/spec.md +openspec/specs/quality-assessment-accuracy/spec.md +openspec/specs/real-agent-monitoring/spec.md +openspec/specs/secrets-management/spec.md +openspec/specs/test-coverage-gaps/spec.md scripts/README.md scripts/__init__.py scripts/analyze_token_costs.py @@ -17621,6 +17737,7 @@ src/application/api/evaluation_models.py src/application/api/evaluation_router.py src/application/api/kg_router.py src/application/api/main.py +src/application/api/remediation_router.py src/application/commands/__init__.py src/application/commands/agent_commands.py src/application/commands/base.py @@ -17656,6 +17773,7 @@ src/application/services/conversation_router.py src/application/services/conversational_intent_service.py src/application/services/cross_graph_query_builder.py src/application/services/crystallization_service.py +src/application/services/deduplication_service.py src/application/services/dikw_router.py src/application/services/document_quality_service.py src/application/services/document_service.py @@ -17752,6 +17870,8 @@ src/infrastructure/__init__.py src/infrastructure/agent_infrastructure_builder.py src/infrastructure/architecture_graph_writer.py src/infrastructure/communication.py +src/infrastructure/config_validation.py +src/infrastructure/cypher_utils.py src/infrastructure/falkor_backend.py src/infrastructure/graphiti.py src/infrastructure/graphiti_backend.py @@ -17825,6 +17945,7 @@ tests/application/test_automatic_layer_transition.py tests/application/test_command_bus.py tests/application/test_confidence_framework.py tests/application/test_data_engineer_metadata_integration.py +tests/application/test_deduplication_service.py tests/application/test_document_quality_service.py tests/application/test_entity_resolver.py tests/application/test_extended_kg_audit.py @@ -17843,6 +17964,8 @@ tests/application/test_neurosymbolic_query_service.py tests/application/test_ontology_quality_service.py tests/application/test_quality_models.py tests/application/test_quality_scanner_job.py +tests/application/test_remediation_router.py +tests/application/test_remediation_service.py tests/application/test_semantic_grounding.py tests/application/test_semantic_normalizer.py tests/application/test_shell_commands.py @@ -17899,6 +18022,8 @@ tests/eval/scenarios/temporal_reasoning/past_medication.yaml tests/fixtures/crohn_dda.pdf tests/infrastructure/test_a2a_channel.py tests/infrastructure/test_communication.py +tests/infrastructure/test_config_validation.py +tests/infrastructure/test_cypher_utils.py tests/infrastructure/test_falkor_backend.py tests/infrastructure/__pycache__/test_communication.cpython-311-pytest-8.4.0.pyc tests/infrastructure/__pycache__/test_llm_transformer.cpython-311-pytest-8.4.0.pyc diff --git a/src/services/governance_engine.py b/src/services/governance_engine.py index ec60c3db1..9bc74aac9 100644 --- a/src/services/governance_engine.py +++ b/src/services/governance_engine.py @@ -16,6 +16,7 @@ in a safe sandbox, and records any violations. """ +import asyncio import os import yaml from typing import List, Dict, Any @@ -37,8 +38,6 @@ def load_rules(path: str = RULES_PATH) -> List[Dict[str, Any]]: return data or [] -import asyncio - def evaluate_node(node: Dict[str, Any], condition: str) -> bool: """Safely evaluate a condition expression against a node. diff --git a/src/services/metadata_enrichment.py b/src/services/metadata_enrichment.py index 3997ce011..cc01289ae 100644 --- a/src/services/metadata_enrichment.py +++ b/src/services/metadata_enrichment.py @@ -8,7 +8,6 @@ python -m src.services.metadata_enrichment """ -import os import sys import json from pathlib import Path @@ -16,7 +15,6 @@ # Attempt to import Graphiti if available; otherwise fall back to heuristic parser try: - from graphiti_core import Graphiti # type: ignore _graphiti_available = True except Exception: _graphiti_available = False diff --git a/tests/application/test_automatic_layer_transition.py b/tests/application/test_automatic_layer_transition.py index 0019ab6cf..c0f68c9f4 100644 --- a/tests/application/test_automatic_layer_transition.py +++ b/tests/application/test_automatic_layer_transition.py @@ -2,7 +2,7 @@ import pytest from datetime import datetime, timedelta -from unittest.mock import AsyncMock, MagicMock, patch +from unittest.mock import AsyncMock, MagicMock from application.services.automatic_layer_transition import ( AutomaticLayerTransitionService, @@ -11,8 +11,6 @@ ) from application.services.layer_transition import ( Layer, - TransitionStatus, - LayerTransitionRecord, ) from domain.event import KnowledgeEvent from domain.roles import Role @@ -130,7 +128,7 @@ def test_initialization_custom_thresholds(self, mock_backend, mock_event_bus): def test_event_subscriptions(self, mock_backend, mock_event_bus): """Test that service subscribes to required events.""" - service = AutomaticLayerTransitionService( + AutomaticLayerTransitionService( backend=mock_backend, event_bus=mock_event_bus, ) diff --git a/tests/application/test_confidence_framework.py b/tests/application/test_confidence_framework.py index d6fb7c112..abc691f4b 100644 --- a/tests/application/test_confidence_framework.py +++ b/tests/application/test_confidence_framework.py @@ -1,14 +1,11 @@ """Unit tests for ConfidenceFrameworkService.""" import pytest -from datetime import datetime from application.services.confidence_framework import ( ConfidenceFrameworkService, - ConfidenceConfig, - WorkflowStep + ConfidenceConfig ) from domain.confidence_models import ( - Confidence, ConfidenceSource, AggregationStrategy, neural_confidence, diff --git a/tests/application/test_data_engineer_metadata_integration.py b/tests/application/test_data_engineer_metadata_integration.py index ba8cc7dfc..ca1702cc0 100644 --- a/tests/application/test_data_engineer_metadata_integration.py +++ b/tests/application/test_data_engineer_metadata_integration.py @@ -1,14 +1,13 @@ """Integration tests for Data Engineer metadata command registration.""" import unittest -from unittest.mock import Mock, AsyncMock, patch +from unittest.mock import Mock, AsyncMock import tempfile import os from application.commands.base import CommandBus from application.commands.metadata_command import GenerateMetadataCommand from application.agents.data_engineer.handlers.generate_metadata import GenerateMetadataCommandHandler from composition_root import create_generate_metadata_command_handler, bootstrap_knowledge_management -from infrastructure.in_memory_backend import InMemoryGraphBackend from graphiti_core import Graphiti diff --git a/tests/application/test_deduplication_service.py b/tests/application/test_deduplication_service.py index 7c1eea4c9..9e44ae21c 100644 --- a/tests/application/test_deduplication_service.py +++ b/tests/application/test_deduplication_service.py @@ -9,10 +9,8 @@ import pytest from unittest.mock import AsyncMock, MagicMock from application.services.deduplication_service import ( - CrossTypeDuplicateGroup, DeduplicationService, DuplicatePair, - MergePlan, ) diff --git a/tests/application/test_document_quality_service.py b/tests/application/test_document_quality_service.py index d624b1246..eb53d9b19 100644 --- a/tests/application/test_document_quality_service.py +++ b/tests/application/test_document_quality_service.py @@ -4,7 +4,6 @@ """ import pytest -from unittest.mock import AsyncMock, MagicMock, patch from dataclasses import dataclass from typing import Dict, Any @@ -13,7 +12,6 @@ QualityConfig, quick_quality_check, ) -from src.application.services.text_chunker import TextChunk from domain.quality_models import QualityLevel diff --git a/tests/application/test_entity_resolver.py b/tests/application/test_entity_resolver.py index 0502cd3b7..bfcd047ca 100644 --- a/tests/application/test_entity_resolver.py +++ b/tests/application/test_entity_resolver.py @@ -11,7 +11,7 @@ # Check if rapidfuzz is available try: - import rapidfuzz + import rapidfuzz # noqa: F401 RAPIDFUZZ_AVAILABLE = True except ImportError: RAPIDFUZZ_AVAILABLE = False @@ -205,7 +205,7 @@ async def test_embedding_match_similar(self, resolver, mock_backend, sample_enti with patch('sklearn.metrics.pairwise.cosine_similarity') as mock_cosine: mock_cosine.return_value = [[0.95]] - result = await resolver.resolve_entity( + await resolver.resolve_entity( "Client", "BusinessConcept", strategy=ResolutionStrategy.EMBEDDING_SIMILARITY diff --git a/tests/application/test_extended_kg_audit.py b/tests/application/test_extended_kg_audit.py index 60ff4871f..fd908e5c5 100644 --- a/tests/application/test_extended_kg_audit.py +++ b/tests/application/test_extended_kg_audit.py @@ -5,8 +5,7 @@ """ import pytest -from datetime import datetime -from unittest.mock import AsyncMock, MagicMock, patch +from unittest.mock import AsyncMock from domain.chunk_separation_models import ( SubgraphType, @@ -24,7 +23,6 @@ from application.services.extended_kg_audit_service import ExtendedKGAuditService from application.services.chunk_impact_analyzer import ( ChunkImpactAnalyzer, - ChunkDependency, CHUNK_DEPENDENCIES, ) diff --git a/tests/application/test_feedback_tracer.py b/tests/application/test_feedback_tracer.py index 2171d3281..71e670392 100644 --- a/tests/application/test_feedback_tracer.py +++ b/tests/application/test_feedback_tracer.py @@ -1,9 +1,8 @@ """Unit tests for Feedback Tracer Service.""" import pytest -from datetime import datetime, timedelta -from unittest.mock import AsyncMock, MagicMock, patch -import uuid +from datetime import datetime +from unittest.mock import AsyncMock, MagicMock from application.services.feedback_tracer import ( FeedbackTracerService, @@ -193,7 +192,7 @@ async def test_submit_feedback_basic(self, service): @pytest.mark.asyncio async def test_submit_feedback_negative(self, service): """Test negative feedback submission.""" - feedback = await service.submit_feedback( + await service.submit_feedback( response_id="response-123", patient_id="patient-456", session_id="session-789", diff --git a/tests/application/test_hypergraph_bridge.py b/tests/application/test_hypergraph_bridge.py index cf7a5ceaf..5bc8e3a32 100644 --- a/tests/application/test_hypergraph_bridge.py +++ b/tests/application/test_hypergraph_bridge.py @@ -4,13 +4,11 @@ """ import pytest -from datetime import datetime -from unittest.mock import AsyncMock, MagicMock +from unittest.mock import AsyncMock from domain.hypergraph_models import ( FactUnit, FactType, - HyperEdge, EntityMention, ConfidenceScore, ConfidenceSource, diff --git a/tests/application/test_knowledge_manager_agent.py b/tests/application/test_knowledge_manager_agent.py index ec40f0d78..da5a36c07 100644 --- a/tests/application/test_knowledge_manager_agent.py +++ b/tests/application/test_knowledge_manager_agent.py @@ -1,17 +1,13 @@ """Tests for the Knowledge Manager Agent.""" import pytest -import asyncio from unittest.mock import Mock, AsyncMock, patch -from datetime import datetime, timezone from src.application.agents.knowledge_manager.agent import ( KnowledgeManagerAgent, KGUpdateRequest, KGUpdateType, KGUpdateResult ) -from src.domain.communication import Message -from src.domain.agent import Agent class TestKnowledgeManagerAgent: diff --git a/tests/application/test_layer_transition.py b/tests/application/test_layer_transition.py index 60092bc73..365d0ffbe 100644 --- a/tests/application/test_layer_transition.py +++ b/tests/application/test_layer_transition.py @@ -2,7 +2,7 @@ import pytest from datetime import datetime, timedelta -from unittest.mock import AsyncMock, MagicMock +from unittest.mock import MagicMock from application.services.layer_transition import ( LayerTransitionService, LayerTransitionRequest, diff --git a/tests/application/test_neurosymbolic_query_service.py b/tests/application/test_neurosymbolic_query_service.py index 0cc0174da..80be4ed0b 100644 --- a/tests/application/test_neurosymbolic_query_service.py +++ b/tests/application/test_neurosymbolic_query_service.py @@ -1,8 +1,8 @@ """Unit tests for Neurosymbolic Query Service.""" import pytest -from datetime import datetime, timedelta -from unittest.mock import AsyncMock, MagicMock, patch +from datetime import datetime +from unittest.mock import AsyncMock, MagicMock from application.services.neurosymbolic_query_service import ( NeurosymbolicQueryService, @@ -12,7 +12,6 @@ QueryTrace, ) from domain.confidence_models import ( - Confidence, ConfidenceSource, KnowledgeLayer, CrossLayerConfidencePropagation, diff --git a/tests/application/test_ontology_quality_service.py b/tests/application/test_ontology_quality_service.py index cab90838e..a7662900a 100644 --- a/tests/application/test_ontology_quality_service.py +++ b/tests/application/test_ontology_quality_service.py @@ -4,8 +4,8 @@ """ import pytest -from unittest.mock import AsyncMock, MagicMock, patch -from typing import Dict, Any, List, Set +from unittest.mock import AsyncMock, patch +from typing import Dict, Any, List from domain.ontology_quality_models import OntologyQualityLevel diff --git a/tests/application/test_quality_models.py b/tests/application/test_quality_models.py index e67f0cd7d..e97ccbf10 100644 --- a/tests/application/test_quality_models.py +++ b/tests/application/test_quality_models.py @@ -4,7 +4,6 @@ """ import pytest -from datetime import datetime from src.domain.quality_models import ( QualityLevel, ContextualRelevancyScore, @@ -617,7 +616,7 @@ def test_interoperability_score_defaults(self): assert score.schema_org_types == 0 assert score.schema_org_properties == 0 assert score.schema_org_coverage == 0.0 - assert score.linked_data_ready == False - assert score.sparql_compatible == False - assert score.rdf_exportable == False + assert not score.linked_data_ready + assert not score.sparql_compatible + assert not score.rdf_exportable assert score.exchange_readiness == 0.0 diff --git a/tests/application/test_quality_scanner_job.py b/tests/application/test_quality_scanner_job.py index b9a627f26..1478b88fe 100644 --- a/tests/application/test_quality_scanner_job.py +++ b/tests/application/test_quality_scanner_job.py @@ -4,7 +4,7 @@ """ import pytest -from unittest.mock import AsyncMock, MagicMock, patch, PropertyMock +from unittest.mock import AsyncMock, MagicMock, patch from datetime import datetime from pathlib import Path from dataclasses import dataclass diff --git a/tests/application/test_remediation_router.py b/tests/application/test_remediation_router.py index 930b7ee7b..2bcfea781 100644 --- a/tests/application/test_remediation_router.py +++ b/tests/application/test_remediation_router.py @@ -8,14 +8,13 @@ """ import pytest -from unittest.mock import AsyncMock, MagicMock, patch +from unittest.mock import AsyncMock, MagicMock from fastapi.testclient import TestClient from application.api.remediation_router import ( router, set_remediation_service, set_deduplication_service, - _remediation_service, ) from fastapi import FastAPI diff --git a/tests/application/test_remediation_service.py b/tests/application/test_remediation_service.py index fae9e7bcc..d351af38f 100644 --- a/tests/application/test_remediation_service.py +++ b/tests/application/test_remediation_service.py @@ -8,7 +8,7 @@ """ import pytest -from unittest.mock import AsyncMock, MagicMock +from unittest.mock import MagicMock from application.services.remediation_service import ( RemediationService, diff --git a/tests/application/test_semantic_grounding.py b/tests/application/test_semantic_grounding.py index 33659e72b..941834bbd 100644 --- a/tests/application/test_semantic_grounding.py +++ b/tests/application/test_semantic_grounding.py @@ -12,7 +12,7 @@ # Check if sentence-transformers is available try: - import sentence_transformers + import sentence_transformers # noqa: F401 SENTENCE_TRANSFORMERS_AVAILABLE = True except ImportError: SENTENCE_TRANSFORMERS_AVAILABLE = False diff --git a/tests/application/test_semantic_normalizer.py b/tests/application/test_semantic_normalizer.py index 2f1d33b90..15e350474 100644 --- a/tests/application/test_semantic_normalizer.py +++ b/tests/application/test_semantic_normalizer.py @@ -154,7 +154,7 @@ def test_add_custom_rule(self, normalizer): def test_domain_specific_rules(self): """Test domain-specific rules.""" normalizer1 = SemanticNormalizer(domain="healthcare") - normalizer2 = SemanticNormalizer(domain="finance") + SemanticNormalizer(domain="finance") # Add domain-specific rule rule = NormalizationRule( diff --git a/tests/application/test_type_inference.py b/tests/application/test_type_inference.py index 2af488f15..0385d45ed 100644 --- a/tests/application/test_type_inference.py +++ b/tests/application/test_type_inference.py @@ -1,7 +1,7 @@ """Tests for TypeInferenceService.""" import unittest -from unittest.mock import Mock, AsyncMock, patch +from unittest.mock import Mock from application.agents.data_engineer.type_inference import TypeInferenceService from domain.odin_models import DataType, DataTypeEntity from graphiti_core import Graphiti @@ -376,7 +376,7 @@ async def test_infer_data_type_with_context_description(self): self.mock_llm.process = Mock(return_value=mock_doc) context = {"description": "A long description that might contain useful information"} - result = await self.service.infer_data_type("test_field", context) + await self.service.infer_data_type("test_field", context) # Verify description was included in prompt call_args = self.mock_llm.process.call_args[0][0] diff --git a/tests/conftest.py b/tests/conftest.py index c8ce32660..ce8e71dc8 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,6 +1,4 @@ -import pytest import os -from typing import AsyncGenerator from dotenv import load_dotenv load_dotenv() diff --git a/tests/domain/metadata/conftest.py b/tests/domain/metadata/conftest.py index bf5efcc1c..ad465b3c7 100644 --- a/tests/domain/metadata/conftest.py +++ b/tests/domain/metadata/conftest.py @@ -14,7 +14,6 @@ @pytest.fixture def sample_metadata_entities(): """Provide sample metadata entities for testing.""" - from domain.metadata.base import MetadataEntity from domain.metadata.database import Cluster, Database from domain.metadata.schema import Schema from domain.metadata.table import Table, Column @@ -96,7 +95,7 @@ def sample_metadata_entities(): @pytest.fixture def sample_relationships(): """Provide sample relationships for testing.""" - from domain.metadata.relationships import MetadataRelationship, RelationshipTypes + from domain.metadata.relationships import MetadataRelationship # Create sample relationships db_schema_rel = MetadataRelationship( diff --git a/tests/domain/metadata/test_database.py b/tests/domain/metadata/test_database.py index 8083a7728..8a72cc730 100644 --- a/tests/domain/metadata/test_database.py +++ b/tests/domain/metadata/test_database.py @@ -3,7 +3,6 @@ import unittest import os import sys -from datetime import datetime # Ensure the src directory is on the import path _TEST_DIR = os.path.dirname(__file__) diff --git a/tests/domain/metadata/test_integration.py b/tests/domain/metadata/test_integration.py index 027038071..b976bfe4c 100644 --- a/tests/domain/metadata/test_integration.py +++ b/tests/domain/metadata/test_integration.py @@ -1,14 +1,13 @@ """Integration tests for the complete metadata system.""" import unittest -from datetime import datetime, timezone -from domain.metadata.base import MetadataEntity +from datetime import datetime from domain.metadata.database import Cluster, Database from domain.metadata.schema import Schema from domain.metadata.table import Table, Column, ColumnStats from domain.metadata.metadata_objects import Tag, Watermark, Description from domain.metadata.workflow import User, AirflowDag -from domain.metadata.relationships import MetadataRelationship, RelationshipTypes +from domain.metadata.relationships import MetadataRelationship class TestMetadataSystemIntegration(unittest.TestCase): @@ -332,7 +331,7 @@ def test_metadata_system_performance(self): # Create multiple entities for i in range(100): - tag = Tag( + Tag( name=f"Test Tag {i}", tag_name=f"Test Tag {i}", tag_type="test" diff --git a/tests/domain/metadata/test_metadata_objects.py b/tests/domain/metadata/test_metadata_objects.py index 160547747..1b57fb694 100644 --- a/tests/domain/metadata/test_metadata_objects.py +++ b/tests/domain/metadata/test_metadata_objects.py @@ -1,7 +1,6 @@ """Tests for Tag, Watermark and Description metadata models.""" import unittest -from datetime import datetime, timezone from domain.metadata.metadata_objects import Tag, Watermark, Description from domain.metadata.table import Table from domain.metadata.schema import Schema diff --git a/tests/domain/metadata/test_relationships.py b/tests/domain/metadata/test_relationships.py index d0301dc4c..7ae4bd5fb 100644 --- a/tests/domain/metadata/test_relationships.py +++ b/tests/domain/metadata/test_relationships.py @@ -1,7 +1,7 @@ """Tests for metadata relationships.""" import unittest -from datetime import datetime, timezone +from datetime import datetime from domain.metadata.relationships import MetadataRelationship, RelationshipTypes diff --git a/tests/domain/metadata/test_workflow.py b/tests/domain/metadata/test_workflow.py index a436c5af4..3c766a6a7 100644 --- a/tests/domain/metadata/test_workflow.py +++ b/tests/domain/metadata/test_workflow.py @@ -1,7 +1,7 @@ """Tests for workflow metadata models.""" import unittest -from datetime import datetime, timezone +from datetime import datetime from domain.metadata.workflow import User, AirflowDag diff --git a/tests/domain/ontologies/test_ontology_registry.py b/tests/domain/ontologies/test_ontology_registry.py index b21c46d7c..2b5e666af 100644 --- a/tests/domain/ontologies/test_ontology_registry.py +++ b/tests/domain/ontologies/test_ontology_registry.py @@ -3,7 +3,6 @@ Tests the medical ontology extension and unified registry functionality. """ -import pytest from domain.ontologies.registry import ( get_ontology_config, is_known_type, diff --git a/tests/domain/test_canonical_concepts.py b/tests/domain/test_canonical_concepts.py index 41e7b3f29..cab3071fb 100644 --- a/tests/domain/test_canonical_concepts.py +++ b/tests/domain/test_canonical_concepts.py @@ -7,8 +7,7 @@ ConceptAlias, ConceptRelationship, ConceptRegistry, - ConceptStatus, - ConceptConfidenceSource + ConceptStatus ) diff --git a/tests/eval/conftest.py b/tests/eval/conftest.py index ecfc12f28..6398cf406 100644 --- a/tests/eval/conftest.py +++ b/tests/eval/conftest.py @@ -14,7 +14,7 @@ import os import pytest from pathlib import Path -from typing import Dict, List, Optional +from typing import List, Optional from tests.eval.runner import ( MemoryInspector, diff --git a/tests/eval/pytest_plugin.py b/tests/eval/pytest_plugin.py index a01d1cb16..20be93190 100644 --- a/tests/eval/pytest_plugin.py +++ b/tests/eval/pytest_plugin.py @@ -22,7 +22,7 @@ import os import pytest from pathlib import Path -from typing import List, Optional +from typing import List from tests.eval.runner import ( ScenarioLoader, @@ -31,8 +31,8 @@ Scenario, EvalResult, ) -from tests.eval.runner.evaluators import create_mock_judge, LLMJudgeEvaluator -from tests.eval.runner.reporting import ReportManager, SuiteReport +from tests.eval.runner.evaluators import create_mock_judge +from tests.eval.runner.reporting import ReportManager # ======================================== @@ -267,7 +267,7 @@ def pytest_generate_tests(metafunc): severity=severity, tag=tag, ) - except Exception as e: + except Exception: # No scenarios found, skip test generation scenarios = [] diff --git a/tests/eval/runner/evaluators/deterministic.py b/tests/eval/runner/evaluators/deterministic.py index bd2cb4a78..8a66072a0 100644 --- a/tests/eval/runner/evaluators/deterministic.py +++ b/tests/eval/runner/evaluators/deterministic.py @@ -10,7 +10,7 @@ import re from abc import ABC, abstractmethod from difflib import SequenceMatcher -from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union +from typing import Any, Callable, Dict, List, Optional from ..models import AssertionResult, AssertionSeverity from ..scenario_models import DeterministicAssertion @@ -430,7 +430,7 @@ def evaluate( details = f"Similarity: {similarity:.2%} (threshold: {threshold:.0%})" if not passed: - details += f" - Below threshold" + details += " - Below threshold" return AssertionResult( assertion_type=self.assertion_type, @@ -508,7 +508,7 @@ def evaluate( details = f"Semantic similarity ({method}): {similarity:.2%} (threshold: {threshold:.0%})" if not passed: - details += f" - Below threshold" + details += " - Below threshold" return AssertionResult( assertion_type=self.assertion_type, diff --git a/tests/eval/runner/evaluators/llm_judge.py b/tests/eval/runner/evaluators/llm_judge.py index 222bc13b1..fca7f5aaf 100644 --- a/tests/eval/runner/evaluators/llm_judge.py +++ b/tests/eval/runner/evaluators/llm_judge.py @@ -8,10 +8,9 @@ import json import logging import re -from abc import ABC, abstractmethod -from dataclasses import dataclass, field +from dataclasses import dataclass from enum import Enum -from typing import Any, Callable, Dict, List, Optional, Union +from typing import Any, Callable, Dict, List, Optional from ..models import AssertionResult, AssertionSeverity from ..scenario_models import JudgeAssertion @@ -504,7 +503,7 @@ def _build_prompt( if context: context_items = [f"- {k}: {v}" for k, v in context.items() if v] if context_items: - context_str = f"\n\nAdditional Context:\n" + "\n".join(context_items) + context_str = "\n\nAdditional Context:\n" + "\n".join(context_items) prompt = f"""You are an expert medical AI evaluator. Your task is to evaluate an AI medical assistant's response to a patient. diff --git a/tests/eval/runner/evaluators/test_deterministic.py b/tests/eval/runner/evaluators/test_deterministic.py index 0d7877aa6..9b9a97dec 100644 --- a/tests/eval/runner/evaluators/test_deterministic.py +++ b/tests/eval/runner/evaluators/test_deterministic.py @@ -7,22 +7,6 @@ from tests.eval.runner.evaluators.deterministic import ( DeterministicEvaluator, - MustContainEvaluator, - MustNotContainEvaluator, - MustContainOneOfEvaluator, - RegexMatchEvaluator, - RegexNotMatchEvaluator, - NotEmptyEvaluator, - MaxLengthEvaluator, - MinLengthEvaluator, - SimilarityEvaluator, - SemanticSimilarityEvaluator, - JsonSchemaEvaluator, - IntentMatchEvaluator, - StartsWithEvaluator, - EndsWithEvaluator, - ContainsQuestionEvaluator, - WordCountEvaluator, create_evaluator, evaluate_assertion, ) diff --git a/tests/eval/runner/evaluators/test_llm_judge.py b/tests/eval/runner/evaluators/test_llm_judge.py index 3d66aa627..3b9fd26c7 100644 --- a/tests/eval/runner/evaluators/test_llm_judge.py +++ b/tests/eval/runner/evaluators/test_llm_judge.py @@ -4,15 +4,12 @@ import json import pytest -from unittest.mock import MagicMock, AsyncMock -from typing import List from tests.eval.runner.evaluators.llm_judge import ( LLMJudgeEvaluator, JudgeResponse, EvaluationCriterion, DEFAULT_RUBRICS, - JudgeError, create_mock_judge, ) from tests.eval.runner.scenario_models import JudgeAssertion diff --git a/tests/eval/runner/memory_inspector.py b/tests/eval/runner/memory_inspector.py index 725576cb3..ced15dcaf 100644 --- a/tests/eval/runner/memory_inspector.py +++ b/tests/eval/runner/memory_inspector.py @@ -29,9 +29,6 @@ Neo4jDIKWLayerSnapshot, MemoryLayer, DIKWLayer, - normalize_text, - entities_match, - relationships_match, ) logger = logging.getLogger(__name__) diff --git a/tests/eval/runner/models.py b/tests/eval/runner/models.py index 5cfe6249a..d184266fa 100644 --- a/tests/eval/runner/models.py +++ b/tests/eval/runner/models.py @@ -6,7 +6,7 @@ """ from datetime import datetime, UTC -from typing import Any, Dict, List, Optional, Union +from typing import Any, Dict, List, Optional from enum import Enum from pydantic import BaseModel, Field import unicodedata diff --git a/tests/eval/runner/reporting.py b/tests/eval/runner/reporting.py index b278a4087..314bba1c7 100644 --- a/tests/eval/runner/reporting.py +++ b/tests/eval/runner/reporting.py @@ -12,7 +12,7 @@ from dataclasses import dataclass, field from datetime import datetime, UTC from pathlib import Path -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List from .models import AssertionResult, EvalResult, TurnResult @@ -582,7 +582,7 @@ def print_summary(self, suite: SuiteReport) -> None: print() # Scenarios - print(f" Scenarios:") + print(" Scenarios:") print(f" Total: {suite.total_scenarios}") print(f" Passed: {self._color(str(suite.passed_scenarios), 'green')}") print(f" Failed: {self._color(str(suite.failed_scenarios), 'red') if suite.failed_scenarios else '0'}") @@ -590,7 +590,7 @@ def print_summary(self, suite: SuiteReport) -> None: print() # Assertions - print(f" Assertions:") + print(" Assertions:") print(f" Total: {suite.total_assertions}") print(f" Passed: {self._color(str(suite.passed_assertions), 'green')}") print(f" Failed: {self._color(str(suite.failed_assertions), 'red') if suite.failed_assertions else '0'}") diff --git a/tests/eval/runner/scenario_loader.py b/tests/eval/runner/scenario_loader.py index 61b73c4dc..aa2896b5d 100644 --- a/tests/eval/runner/scenario_loader.py +++ b/tests/eval/runner/scenario_loader.py @@ -7,7 +7,6 @@ """ import logging -import os from pathlib import Path from typing import Dict, List, Optional, Any @@ -426,8 +425,8 @@ def _parse_turn(self, data: Dict[str, Any]) -> ScenarioTurn: for p in sa_data.get("entity_property_check", []) ] dikw_layer_check = [ - LayerAssertion(**l) - for l in sa_data.get("dikw_layer_check", []) + LayerAssertion(**layer_data) + for layer_data in sa_data.get("dikw_layer_check", []) ] memory_diff_check = None diff --git a/tests/eval/runner/scenario_models.py b/tests/eval/runner/scenario_models.py index fefeae961..8cd9e6ce1 100644 --- a/tests/eval/runner/scenario_models.py +++ b/tests/eval/runner/scenario_models.py @@ -5,8 +5,7 @@ de evaluación definidos en archivos YAML. """ -from datetime import datetime -from typing import Any, Dict, List, Optional, Union +from typing import Any, Dict, List, Optional from enum import Enum from pydantic import BaseModel, ConfigDict, Field, field_validator diff --git a/tests/eval/runner/scenario_orchestrator.py b/tests/eval/runner/scenario_orchestrator.py index 93eeb6461..f45c3d0ff 100644 --- a/tests/eval/runner/scenario_orchestrator.py +++ b/tests/eval/runner/scenario_orchestrator.py @@ -10,7 +10,7 @@ import logging import time from datetime import datetime, UTC -from typing import Any, Callable, Dict, List, Optional, Tuple +from typing import Any, Callable, List, Optional from .models import ( AssertionResult, @@ -22,11 +22,9 @@ ) from .memory_inspector import MemoryInspector from .scenario_models import ( - DeterministicAssertion, DiffAssertion, EntityAssertion, InitialState, - JudgeAssertion, LayerAssertion, PropertyAssertion, RelationshipAssertion, @@ -701,7 +699,7 @@ def _check_relationship_exists( assertion_type="relationship_must_exist", passed=found, reason=assertion.reason, - details="" if found else f"Relationship not found", + details="" if found else "Relationship not found", severity=AssertionSeverity.MEDIUM, ) diff --git a/tests/eval/runner/test_memory_inspector.py b/tests/eval/runner/test_memory_inspector.py index d22bb560e..d5641f199 100644 --- a/tests/eval/runner/test_memory_inspector.py +++ b/tests/eval/runner/test_memory_inspector.py @@ -9,8 +9,8 @@ """ import pytest -from datetime import datetime, timedelta, UTC -from unittest.mock import AsyncMock, MagicMock, patch +from datetime import datetime, UTC +from unittest.mock import AsyncMock, MagicMock from tests.eval.runner.models import ( MemorySnapshot, @@ -18,21 +18,15 @@ MemoryEntity, MemoryRelationship, Mem0Memory, - EntityChange, - RedisLayerSnapshot, Mem0LayerSnapshot, GraphitiLayerSnapshot, Neo4jDIKWLayerSnapshot, - MemoryLayer, - DIKWLayer, normalize_text, entities_match, relationships_match, ) from tests.eval.runner.memory_inspector import ( MemoryInspector, - MemoryInspectorError, - QuiescenceTimeoutError, ) diff --git a/tests/eval/runner/test_orchestrator_unit.py b/tests/eval/runner/test_orchestrator_unit.py index 78710e9ae..38821fe1b 100644 --- a/tests/eval/runner/test_orchestrator_unit.py +++ b/tests/eval/runner/test_orchestrator_unit.py @@ -6,28 +6,16 @@ import pytest from datetime import datetime, UTC -from unittest.mock import AsyncMock, MagicMock, patch -from typing import List +from unittest.mock import AsyncMock, MagicMock from tests.eval.runner.scenario_orchestrator import ( ScenarioOrchestrator, - OrchestratorError, - SetupError, - TurnExecutionError, ) from tests.eval.runner.models import ( - AssertionResult, - AssertionSeverity, - EvalResult, MemoryDiff, MemorySnapshot, MemoryEntity, - DIKWLayer, - TurnResult, - RedisLayerSnapshot, - Mem0LayerSnapshot, GraphitiLayerSnapshot, - Neo4jDIKWLayerSnapshot, ) from tests.eval.runner.scenario_models import ( Scenario, diff --git a/tests/eval/runner/test_scenario_loader.py b/tests/eval/runner/test_scenario_loader.py index e6a945985..f67d82e41 100644 --- a/tests/eval/runner/test_scenario_loader.py +++ b/tests/eval/runner/test_scenario_loader.py @@ -6,9 +6,7 @@ import pytest from pathlib import Path -from unittest.mock import patch, mock_open import tempfile -import os from tests.eval.runner.scenario_loader import ( ScenarioLoader, @@ -16,15 +14,6 @@ ScenarioValidationError, FixtureNotFoundError, ) -from tests.eval.runner.scenario_models import ( - Scenario, - ScenarioTurn, - DeterministicAssertion, - JudgeAssertion, - EntityAssertion, - InitialStateEntity, - InitialStateRelationship, -) # ======================================== diff --git a/tests/infrastructure/test_config_validation.py b/tests/infrastructure/test_config_validation.py index 7f80e64f7..258ca23be 100644 --- a/tests/infrastructure/test_config_validation.py +++ b/tests/infrastructure/test_config_validation.py @@ -1,8 +1,6 @@ """Tests for startup configuration validation.""" import logging -import os -from unittest.mock import patch import pytest from infrastructure.config_validation import validate_config, REQUIRED_VARS, OPTIONAL_VARS diff --git a/tests/integration/test_crystallization_api.py b/tests/integration/test_crystallization_api.py index 5e92cd206..7f715b4fd 100644 --- a/tests/integration/test_crystallization_api.py +++ b/tests/integration/test_crystallization_api.py @@ -226,7 +226,7 @@ async def test_promotion_evaluate_nonexistent_entity(async_client): if response.status_code == 200: data = response.json() - assert data.get("approved") == False + assert not data.get("approved") assert "not found" in data.get("reason", "").lower() print("✅ Nonexistent entity correctly rejected") diff --git a/tests/integration/test_crystallization_integration.py b/tests/integration/test_crystallization_integration.py index 34772bc1c..774cc0a3e 100644 --- a/tests/integration/test_crystallization_integration.py +++ b/tests/integration/test_crystallization_integration.py @@ -6,9 +6,7 @@ import pytest import os -import asyncio from datetime import datetime -from typing import Optional # Set test environment variables before imports os.environ.setdefault("NEO4J_URI", "bolt://localhost:7687") @@ -26,7 +24,6 @@ from domain.promotion_models import RiskLevel, PromotionStatus from application.event_bus import EventBus from domain.event import KnowledgeEvent -from domain.roles import Role # ======================================== @@ -218,7 +215,7 @@ async def test_crystallization_batch_processing(crystallization_service, neo4j_b assert result.entities_created >= 0 # May be 0 if entities already exist assert len(result.errors) == 0 - print(f"✅ Batch crystallization complete:") + print("✅ Batch crystallization complete:") print(f" Processed: {result.entities_processed}") print(f" Created: {result.entities_created}") print(f" Merged: {result.entities_merged}") @@ -366,7 +363,7 @@ async def test_full_pipeline_flow(neo4j_backend, entity_resolver, promotion_gate target_layer="SEMANTIC", ) - print(f"✅ Step 3: Promotion evaluation:") + print("✅ Step 3: Promotion evaluation:") print(f" Status: {decision.status.value}") print(f" Risk Level: {decision.risk_level.value}") print(f" Criteria Met: {decision.all_criteria_met}") diff --git a/tests/integration/test_modeling_integration.py b/tests/integration/test_modeling_integration.py index b9664d49a..22382c3e2 100644 --- a/tests/integration/test_modeling_integration.py +++ b/tests/integration/test_modeling_integration.py @@ -1,9 +1,6 @@ import pytest -import asyncio import tempfile import os -from pathlib import Path -from unittest.mock import AsyncMock, patch from src.application.commands.modeling_command import ModelingCommand from src.application.commands.modeling_handler import ModelingCommandHandler @@ -11,9 +8,6 @@ from src.application.agents.data_architect.domain_modeler import DomainModeler from src.application.agents.data_architect.dda_parser import DDAParserFactory from src.infrastructure.parsers.markdown_parser import MarkdownDDAParser -from src.domain.dda_models import DDADocument, DataEntity, Relationship, DataQualityRequirement, AccessPattern, Governance -from datetime import datetime -from graphiti_core import Graphiti class TestModelingIntegration: @@ -232,7 +226,7 @@ async def test_iterative_modeling_update(self, graphiti_instance, temp_dda_file) result1 = await workflow.execute(command1) assert result1["success"] is True - initial_episode_uuid = result1["graph_document"]["episode_uuid"] + result1["graph_document"]["episode_uuid"] # 2. Update existing graph command2 = ModelingCommand( diff --git a/tests/integration/test_patient_memory_integration.py b/tests/integration/test_patient_memory_integration.py index 46c12d53a..50ff6943b 100644 --- a/tests/integration/test_patient_memory_integration.py +++ b/tests/integration/test_patient_memory_integration.py @@ -107,7 +107,7 @@ async def test_patient_creation_and_retrieval(patient_memory_service): # Verify consent consent = await patient_memory_service.check_consent(TEST_PATIENT_ID) - assert consent == True + assert consent # ======================================== @@ -364,7 +364,7 @@ async def test_medication_adherence(chat_service, patient_memory_service): # Verify adherence concern detected assert response is not None - adherence_concern_found = any( + any( "adherence" in trail.lower() or "missed" in trail.lower() for trail in response.reasoning_trail ) @@ -444,7 +444,7 @@ async def test_gdpr_data_deletion(patient_memory_service, mem0_client, redis_cac # Delete all patient data success = await patient_memory_service.delete_patient_data(test_patient) - assert success == True + assert success # Verify data deleted from Neo4j context_after = await patient_memory_service.get_patient_context(test_patient) diff --git a/tests/integration/test_phase3_integration.py b/tests/integration/test_phase3_integration.py index d44965b55..f3de63714 100644 --- a/tests/integration/test_phase3_integration.py +++ b/tests/integration/test_phase3_integration.py @@ -8,8 +8,7 @@ """ import pytest -from unittest.mock import AsyncMock, MagicMock, patch -from datetime import datetime +from unittest.mock import MagicMock import sys from pathlib import Path diff --git a/tests/integration/test_quality_api_endpoints.py b/tests/integration/test_quality_api_endpoints.py index 9141ff1f7..ae89f3632 100644 --- a/tests/integration/test_quality_api_endpoints.py +++ b/tests/integration/test_quality_api_endpoints.py @@ -6,8 +6,7 @@ import pytest from unittest.mock import AsyncMock, MagicMock, patch from datetime import datetime -from pathlib import Path -from dataclasses import dataclass, field +from dataclasses import dataclass from typing import Optional from fastapi.testclient import TestClient diff --git a/tests/interfaces/test_agent_servers.py b/tests/interfaces/test_agent_servers.py index fceb033b1..7420b6a24 100644 --- a/tests/interfaces/test_agent_servers.py +++ b/tests/interfaces/test_agent_servers.py @@ -4,8 +4,6 @@ from src.application.agents.data_engineer.server import create_app as create_data_engineer_app from src.application.agents.data_architect.server import create_app as create_data_architect_app from src.application.commands.base import CommandBus -from src.application.commands.collaboration_commands import BuildKGCommand -from src.application.agents.data_engineer.handlers.build_kg import BuildKGCommandHandler from src.domain.agent_definition import AgentDefinition diff --git a/tests/manual/test_direct_writer.py b/tests/manual/test_direct_writer.py index a94d8a08d..a5adaa3b2 100644 --- a/tests/manual/test_direct_writer.py +++ b/tests/manual/test_direct_writer.py @@ -4,7 +4,6 @@ import asyncio import os from dotenv import load_dotenv -from pathlib import Path # Load environment variables load_dotenv() @@ -31,13 +30,13 @@ async def test_direct_writer(): parser = parser_factory.get_parser(dda_path) dda_document = await parser.parse(dda_path) - print(f"✅ Parsed DDA:") + print("✅ Parsed DDA:") print(f" Domain: {dda_document.domain}") print(f" Entities: {len(dda_document.entities)}") print(f" Relationships: {len(dda_document.relationships)}") # Create architecture graph directly - print(f"\n🔧 Creating architecture graph (direct write)...") + print("\n🔧 Creating architecture graph (direct write)...") writer = ArchitectureGraphWriter( uri="bolt://localhost:7687", @@ -48,7 +47,7 @@ async def test_direct_writer(): try: result = writer.create_architecture_graph(dda_document) - print(f"\n✅ Architecture graph created!") + print("\n✅ Architecture graph created!") print(f" Domain: {result['domain']}") print(f" Entities: {result['entities_count']}") print(f" Relationships: {result['relationships_count']}") @@ -56,7 +55,7 @@ async def test_direct_writer(): print(f" Edges created: {result['edges_created']}") # Show entity details - print(f"\n📊 Entity Details:") + print("\n📊 Entity Details:") for entity in dda_document.entities: print(f"\n {entity.name}:") print(f" Description: {entity.description[:80]}...") @@ -66,7 +65,7 @@ async def test_direct_writer(): print(f" Foreign Keys: {', '.join(entity.foreign_keys)}") # Show relationships - print(f"\n🔗 Relationships:") + print("\n🔗 Relationships:") for rel in dda_document.relationships: print(f" {rel.source_entity} --[{rel.relationship_type}]--> {rel.target_entity}") print(f" {rel.description[:80]}...") diff --git a/tests/manual/test_e2e_flow.py b/tests/manual/test_e2e_flow.py index 76c36363c..1c8e1cdb5 100644 --- a/tests/manual/test_e2e_flow.py +++ b/tests/manual/test_e2e_flow.py @@ -54,7 +54,7 @@ async def test_end_to_end(): modeling_result = await modeling_handler.handle(modeling_command) if modeling_result["success"]: - print(f" ✅ Architecture graph created!") + print(" ✅ Architecture graph created!") print(f" Domain: {modeling_result['graph_document']['domain']}") print(f" Entities: {modeling_result['graph_document']['entities_count']}") print(f" Nodes: {modeling_result['graph_document']['nodes_created']}") @@ -92,7 +92,7 @@ async def test_end_to_end(): metadata_result = await metadata_handler.handle(metadata_command) if metadata_result["success"]: - print(f" ✅ Metadata graph created!") + print(" ✅ Metadata graph created!") metadata_summary = metadata_result['metadata_graph'] print(f" Catalog: {metadata_summary.get('catalog_name')}") print(f" Schema: {metadata_summary.get('schema_name')}") @@ -119,7 +119,7 @@ async def test_end_to_end(): with neo4j_driver.session() as session: result = session.run("MATCH (n:DataEntity) RETURN count(n) as count") entity_count = result.single()["count"] - print(f" Neo4j (Architecture):") + print(" Neo4j (Architecture):") print(f" DataEntities: {entity_count}") neo4j_driver.close() @@ -130,12 +130,12 @@ async def test_end_to_end(): falkor_graph = falkor_client.select_graph("knowledge_graph") result = falkor_graph.query("MATCH (n) RETURN labels(n)[0] as label, count(n) as count") - print(f"\n FalkorDB (Metadata):") + print("\n FalkorDB (Metadata):") if result.result_set: for row in result.result_set: print(f" {row[0]}: {row[1]}") else: - print(f" No nodes found") + print(" No nodes found") print("\n" + "=" * 70) print("✅ END-TO-END TEST COMPLETE") diff --git a/tests/manual/test_falkor_connection.py b/tests/manual/test_falkor_connection.py index f1637bb1f..f6149816d 100644 --- a/tests/manual/test_falkor_connection.py +++ b/tests/manual/test_falkor_connection.py @@ -121,7 +121,6 @@ def test_openai_key(): async def test_falkor_backend_integration(): """Test FalkorDB backend integration with our codebase.""" try: - import asyncio from infrastructure.falkor_backend import FalkorBackend print("\n--- Testing FalkorBackend Integration ---") diff --git a/tests/manual/test_falkor_query.py b/tests/manual/test_falkor_query.py index 8c9df05c6..ad0a11d82 100644 --- a/tests/manual/test_falkor_query.py +++ b/tests/manual/test_falkor_query.py @@ -1,5 +1,4 @@ import asyncio -import os from src.infrastructure.falkor_backend import FalkorBackend async def test_query(): diff --git a/tests/manual/test_falkor_simple.py b/tests/manual/test_falkor_simple.py index 1e5f5b1d6..3f87fd331 100644 --- a/tests/manual/test_falkor_simple.py +++ b/tests/manual/test_falkor_simple.py @@ -11,7 +11,7 @@ async def test_simple_metadata(): from src.infrastructure.falkor_backend import FalkorBackend - from domain.odin_models import Catalog, Schema, Table, Column + from domain.odin_models import Catalog, Schema print("🔧 Testing FalkorDB backend...") @@ -22,7 +22,7 @@ async def test_simple_metadata(): try: backend.graph.delete() print("✅ Cleared test graph") - except: + except Exception: pass # Test 1: Create Catalog diff --git a/tests/manual/test_metadata_debug.py b/tests/manual/test_metadata_debug.py index cb4277629..85c2b1790 100644 --- a/tests/manual/test_metadata_debug.py +++ b/tests/manual/test_metadata_debug.py @@ -43,7 +43,7 @@ async def test_metadata_workflow(): try: backend.graph.delete() print("✅ Cleared test graph") - except: + except Exception: pass # Initialize type inference diff --git a/tests/performance/test_modeling_performance.py b/tests/performance/test_modeling_performance.py index 6e8f23d99..a585aebbd 100644 --- a/tests/performance/test_modeling_performance.py +++ b/tests/performance/test_modeling_performance.py @@ -3,19 +3,12 @@ import tempfile import os import time -import statistics -from pathlib import Path -from unittest.mock import AsyncMock, patch from src.application.commands.modeling_command import ModelingCommand -from src.application.commands.modeling_handler import ModelingCommandHandler from src.application.agents.data_architect.modeling_workflow import ModelingWorkflow from src.application.agents.data_architect.domain_modeler import DomainModeler from src.application.agents.data_architect.dda_parser import DDAParserFactory from src.infrastructure.parsers.markdown_parser import MarkdownDDAParser -from src.domain.dda_models import DDADocument, DataEntity, Relationship, DataQualityRequirement, AccessPattern, Governance -from datetime import datetime -from graphiti_core import Graphiti class TestModelingPerformance: diff --git a/tests/test_automatic_promotion.py b/tests/test_automatic_promotion.py index 07ca64cb1..7c567fce3 100644 --- a/tests/test_automatic_promotion.py +++ b/tests/test_automatic_promotion.py @@ -8,7 +8,7 @@ import pytest from datetime import datetime, timedelta -from unittest.mock import AsyncMock, MagicMock, patch +from unittest.mock import AsyncMock, MagicMock from application.services.automatic_layer_transition import ( AutomaticLayerTransitionService, @@ -18,7 +18,6 @@ from application.services.layer_transition import ( Layer, TransitionStatus, - LayerTransitionService, ) from application.jobs.promotion_scanner import PromotionScannerJob from application.event_bus import EventBus diff --git a/tests/test_chat_history_service.py b/tests/test_chat_history_service.py index 966f24236..7dc2f0620 100644 --- a/tests/test_chat_history_service.py +++ b/tests/test_chat_history_service.py @@ -6,9 +6,8 @@ """ import pytest -import asyncio from datetime import datetime, timedelta -from unittest.mock import Mock, AsyncMock, MagicMock +from unittest.mock import AsyncMock # Add src to path import sys diff --git a/tests/test_conversation_graph.py b/tests/test_conversation_graph.py index 7429edd41..bcf79a35c 100644 --- a/tests/test_conversation_graph.py +++ b/tests/test_conversation_graph.py @@ -9,17 +9,14 @@ """ import pytest -from unittest.mock import AsyncMock, MagicMock, patch +from unittest.mock import patch from datetime import datetime from domain.conversation_state import ( - ConversationState, ConversationMode, UrgencyLevel, EmotionalTone, GoalType, - ActiveGoal, - GoalSlot, PatientContext, create_initial_state, serialize_goal, @@ -469,7 +466,6 @@ def test_patient_context_has_temporal_fields(self): def test_patient_context_dataclass_defaults(self): """Test that PatientContext temporal fields have correct defaults.""" from application.services.patient_memory_service import PatientContext - from datetime import datetime context = PatientContext( patient_id="test-patient", diff --git a/tests/test_conversational_layer.py b/tests/test_conversational_layer.py index 2d3fe0c28..130a9e52e 100644 --- a/tests/test_conversational_layer.py +++ b/tests/test_conversational_layer.py @@ -16,7 +16,6 @@ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'src')) from application.services.conversational_intent_service import ConversationalIntentService -from application.services.memory_context_builder import MemoryContextBuilder from application.services.response_modulator import ResponseModulator from domain.conversation_models import IntentType, MemoryContext from config.persona_config import get_persona @@ -71,7 +70,7 @@ async def test_memory_context_mock(): conversation_turn_count=0 ) - print(f"✅ Mock memory context created:") + print("✅ Mock memory context created:") print(f" Patient: {mock_context.patient_name}") print(f" Recent topics: {', '.join(mock_context.recent_topics)}") print(f" Days since last session: {mock_context.days_since_last_session}") @@ -106,7 +105,7 @@ async def test_response_modulation(mock_context): intent=intent, memory_context=mock_context ) - print(f"✅ Generated greeting:") + print("✅ Generated greeting:") print(f" {response}") except Exception as e: print(f"❌ Error: {e}") @@ -125,7 +124,7 @@ async def test_response_modulation(mock_context): intent=intent, memory_context=new_user_context ) - print(f"✅ Generated greeting:") + print("✅ Generated greeting:") print(f" {response}") except Exception as e: print(f"❌ Error: {e}") @@ -141,7 +140,7 @@ async def test_response_modulation(mock_context): memory_context=mock_context, medical_response=medical_response ) - print(f"✅ Wrapped medical response:") + print("✅ Wrapped medical response:") print(f" {response}") except Exception as e: print(f"❌ Error: {e}") diff --git a/tests/test_crystallization_pipeline.py b/tests/test_crystallization_pipeline.py index 252af965c..97ea56f14 100644 --- a/tests/test_crystallization_pipeline.py +++ b/tests/test_crystallization_pipeline.py @@ -5,7 +5,7 @@ import pytest from datetime import datetime -from unittest.mock import AsyncMock, MagicMock, patch +from unittest.mock import AsyncMock, MagicMock from application.services.entity_resolver import ( EntityResolver, @@ -16,14 +16,11 @@ CrystallizationService, CrystallizationConfig, CrystallizationMode, - CrystallizationResult, ) from application.services.promotion_gate import ( PromotionGate, - PromotionGateConfig, ) from domain.promotion_models import ( - PromotionDecision, PromotionStatus, RiskLevel, EntityCategory, diff --git a/tests/test_dikw_router.py b/tests/test_dikw_router.py index 2e14e84a4..f8d1e4e20 100644 --- a/tests/test_dikw_router.py +++ b/tests/test_dikw_router.py @@ -4,7 +4,6 @@ """ import pytest -from datetime import datetime from domain.query_intent_models import ( QueryIntent, diff --git a/tests/test_evaluation_framework.py b/tests/test_evaluation_framework.py index 65bf8f0f0..1a6e9c46b 100644 --- a/tests/test_evaluation_framework.py +++ b/tests/test_evaluation_framework.py @@ -9,7 +9,6 @@ import os from datetime import datetime from unittest.mock import AsyncMock, MagicMock, patch -from fastapi.testclient import TestClient from httpx import AsyncClient, ASGITransport # Set eval mode before importing the app @@ -30,7 +29,6 @@ def test_is_eval_mode_enabled(self): def test_is_eval_mode_disabled_by_default(self): """Verifica que eval mode está deshabilitado por defecto.""" with patch.dict(os.environ, {"SYNAPSEFLOW_EVAL_MODE": "false"}): - from src.application.api.evaluation_auth import is_eval_mode_enabled # Need to reload to pick up env change assert os.getenv("SYNAPSEFLOW_EVAL_MODE") == "false" @@ -105,7 +103,6 @@ def test_memory_snapshot_all_entities(self): MemorySnapshot, MemoryEntityModel, Neo4jDIKWLayerSnapshot, - DIKWLayer, ) snapshot = MemorySnapshot( diff --git a/tests/test_event_bus.py b/tests/test_event_bus.py index cdf4460ec..6c631589a 100644 --- a/tests/test_event_bus.py +++ b/tests/test_event_bus.py @@ -1,6 +1,5 @@ """Tests for the application event bus.""" -import asyncio import unittest import os import sys diff --git a/tests/test_generate_data_map.py b/tests/test_generate_data_map.py index c5e110889..62b68c441 100644 --- a/tests/test_generate_data_map.py +++ b/tests/test_generate_data_map.py @@ -4,8 +4,8 @@ def test_generate_data_map(): # Ensure FalkorDB is reachable (use env vars) - host = os.getenv("FALKORDB_HOST", "localhost") - port = os.getenv("FALKORDB_PORT", "6379") + os.getenv("FALKORDB_HOST", "localhost") + os.getenv("FALKORDB_PORT", "6379") # Run the script result = subprocess.run([sys.executable, "scripts/generate_data_map.py"], capture_output=True, text=True) assert result.returncode == 0, f"Script failed: {result.stderr}" diff --git a/tests/test_improved_reasoning.py b/tests/test_improved_reasoning.py index 702e9fa8a..88cb1f94b 100644 --- a/tests/test_improved_reasoning.py +++ b/tests/test_improved_reasoning.py @@ -86,7 +86,7 @@ async def test_reasoning_improvements(): response = await chat_service.query(test["question"]) # Display results - print(f"\n📝 Answer Preview:") + print("\n📝 Answer Preview:") print(f" {response.answer[:200]}...") print(f"\n🎯 Confidence: {response.confidence:.2f}") @@ -110,7 +110,7 @@ async def test_reasoning_improvements(): expected = test["expected_reasoning"] reasoning_text = " ".join(response.reasoning_trail) - print(f"\n✅ Reasoning Validation:") + print("\n✅ Reasoning Validation:") for rule in expected: if rule in reasoning_text: print(f" ✓ {rule} - APPLIED") @@ -128,8 +128,8 @@ async def test_reasoning_improvements(): print("\n📊 Summary:") print(f" - Tested {len(test_queries)} queries") - print(f" - All queries completed successfully") - print(f" - Reasoning engine working with chat_query action") + print(" - All queries completed successfully") + print(" - Reasoning engine working with chat_query action") return 0 diff --git a/tests/test_in_memory_backend.py b/tests/test_in_memory_backend.py index 597926475..335ecd6f8 100644 --- a/tests/test_in_memory_backend.py +++ b/tests/test_in_memory_backend.py @@ -1,6 +1,5 @@ """Tests for the in‑memory knowledge graph backend.""" -import asyncio import unittest import os import sys diff --git a/tests/test_intelligent_chat_integration.py b/tests/test_intelligent_chat_integration.py index 56479596c..e83e7d93e 100644 --- a/tests/test_intelligent_chat_integration.py +++ b/tests/test_intelligent_chat_integration.py @@ -301,7 +301,7 @@ async def test_neurosymbolic_service_with_patient_context( ) # Act - response = await service.query( + await service.query( "Can I take ibuprofen?", patient_id="patient_123" ) diff --git a/tests/test_kg_operations_api.py b/tests/test_kg_operations_api.py index 135cf03ee..13bae4791 100644 --- a/tests/test_kg_operations_api.py +++ b/tests/test_kg_operations_api.py @@ -1,9 +1,6 @@ """Tests for the Knowledge Graph Operations API.""" -import asyncio -import json import unittest -from unittest.mock import Mock, patch, AsyncMock import os import sys diff --git a/tests/test_knowledge_manager.py b/tests/test_knowledge_manager.py index 8769a923d..268f31c13 100644 --- a/tests/test_knowledge_manager.py +++ b/tests/test_knowledge_manager.py @@ -1,6 +1,5 @@ """Tests for the knowledge manager service.""" -import asyncio import unittest import os import sys diff --git a/tests/test_medical_rules.py b/tests/test_medical_rules.py index 27ef80206..1edab6ccc 100644 --- a/tests/test_medical_rules.py +++ b/tests/test_medical_rules.py @@ -5,7 +5,6 @@ """ import pytest -from datetime import datetime from domain.medical_rules_models import ( MedicalRule, @@ -17,7 +16,6 @@ RuleEvaluationSummary, RuleSeverity, RuleCategory, - InteractionType, ) from application.rules.medical_rules import ( MedicalRulesEngine, diff --git a/tests/test_neo4j_ingestion.py b/tests/test_neo4j_ingestion.py index 8b98be01d..55f8437cf 100644 --- a/tests/test_neo4j_ingestion.py +++ b/tests/test_neo4j_ingestion.py @@ -110,7 +110,7 @@ async def test_ingestion(): try: persist_result = await service.persist_to_neo4j(extraction_result) - print(f"✓ Persistence complete") + print("✓ Persistence complete") print(f" Entities added: {persist_result['entities_added']}") print(f" Relationships added: {persist_result['relationships_added']}") print(f" Relationships skipped: {persist_result['relationships_skipped']}") diff --git a/tests/test_ontology_mapping.py b/tests/test_ontology_mapping.py index 8cc7bed88..91548a9af 100644 --- a/tests/test_ontology_mapping.py +++ b/tests/test_ontology_mapping.py @@ -3,7 +3,6 @@ import os sys.path.append(os.getcwd()) -import pytest from src.application.agents.knowledge_manager.ontology_mapper import OntologyMapper from domain.ontologies.odin import ODIN from domain.ontologies.schema_org import SCHEMA diff --git a/tests/test_phase2a_infrastructure.py b/tests/test_phase2a_infrastructure.py index 8f99c6278..8c4cf266b 100644 --- a/tests/test_phase2a_infrastructure.py +++ b/tests/test_phase2a_infrastructure.py @@ -100,7 +100,7 @@ async def test_mem0(): ) assert result is not None, "Failed to add memory" - print(f" ✅ Mem0 add memory successful") + print(" ✅ Mem0 add memory successful") # Test retrieve memories memories = mem0.get_all(user_id="test_patient_123", limit=5) @@ -230,7 +230,7 @@ async def test_patient_memory_service(): # Test consent check consent = await memory_service.check_consent(patient_id) - assert consent == True, "Consent check failed" + assert consent, "Consent check failed" print(" ✅ Consent check passed") # Test audit logging @@ -244,7 +244,7 @@ async def test_patient_memory_service(): # Cleanup success = await memory_service.delete_patient_data(patient_id) - assert success == True, "Patient data deletion failed" + assert success, "Patient data deletion failed" print(" ✅ Patient data deleted (GDPR right to be forgotten)") await redis.close() diff --git a/tests/test_rabbitmq_event_bus.py b/tests/test_rabbitmq_event_bus.py index 434128162..f69cfa792 100644 --- a/tests/test_rabbitmq_event_bus.py +++ b/tests/test_rabbitmq_event_bus.py @@ -1,9 +1,7 @@ """Tests for the RabbitMQ event bus.""" import pytest -import asyncio from unittest.mock import Mock, AsyncMock, patch -from datetime import datetime, timezone from src.infrastructure.event_bus.rabbitmq_event_bus import RabbitMQEventBus from src.domain.event import KnowledgeEvent diff --git a/tests/test_reasoning_demo.py b/tests/test_reasoning_demo.py index 1fcfb9255..aaef58e41 100644 --- a/tests/test_reasoning_demo.py +++ b/tests/test_reasoning_demo.py @@ -55,7 +55,7 @@ async def main(): for source in response.sources[:5]: print(f" - {source.get('type')}: {source.get('name')}") - print(f"\n💡 Related Concepts:") + print("\n💡 Related Concepts:") for concept in response.related_concepts[:5]: print(f" - {concept}") diff --git a/tests/test_shacl_validation.py b/tests/test_shacl_validation.py index 9f3b53043..af3ad0626 100644 --- a/tests/test_shacl_validation.py +++ b/tests/test_shacl_validation.py @@ -8,8 +8,8 @@ # Mock pyshacl and rdflib if not installed try: - import pyshacl - import rdflib + import pyshacl # noqa: F401 + import rdflib # noqa: F401 except ImportError: # Create mocks sys.modules["pyshacl"] = MagicMock() @@ -49,7 +49,7 @@ async def test_shacl_validation_pass(): ) result = await engine.validate_event(event) - assert result["is_valid"] == True + assert result["is_valid"] @pytest.mark.asyncio async def test_shacl_validation_fail(): diff --git a/tests/test_temporal_scoring.py b/tests/test_temporal_scoring.py index d754bd060..3b52929e0 100644 --- a/tests/test_temporal_scoring.py +++ b/tests/test_temporal_scoring.py @@ -14,8 +14,6 @@ TemporalScore, TemporalQueryContext, TemporalWindow, - TEMPORAL_KEYWORDS, - WINDOW_DURATIONS, ) from application.services.temporal_scoring import ( TemporalScoringService, From 4c81314f6eac2c342a2e6d9a7d62107cd59afce5 Mon Sep 17 00:00:00 2001 From: Claude Date: Thu, 19 Feb 2026 02:50:36 +0000 Subject: [PATCH 2/4] Fix remaining lint errors and pyproject.toml deprecation warning - Fix E402 (import ordering) with noqa comments in test/config files that require sys.path manipulation before imports - Fix E741 (ambiguous variable names) in knowledge manager and services - Fix remaining F401/F811/F821/F601/E721 issues - Migrate per-file-ignores to lint.per-file-ignores in pyproject.toml to resolve ruff deprecation warning All 763 original lint errors are now resolved. `ruff check .` passes cleanly. https://claude.ai/code/session_01JAbMU7GmooaRYsKsyPRdrc --- pyproject.toml | 2 +- .../agents/knowledge_manager/agent.py | 5 ++++- .../knowledge_manager/conflict_resolver.py | 2 +- src/application/services/entity_resolver.py | 5 ++++- src/application/services/medication_validator.py | 3 +-- src/composition_root.py | 2 +- tests/domain/metadata/test_base.py | 2 +- tests/domain/metadata/test_database.py | 2 +- tests/domain/metadata/test_schema_table.py | 4 ++-- tests/eval/conftest.py | 10 +++++----- tests/eval/runner/evaluators/deterministic.py | 2 +- tests/eval/runner/test_memory_inspector.py | 3 +-- tests/integration/test_phase3_integration.py | 10 +++++----- tests/test_api.py | 4 ++-- tests/test_event_bus.py | 6 +++--- tests/test_in_memory_backend.py | 2 +- tests/test_kg_operations_api.py | 8 ++++---- tests/test_knowledge_manager.py | 8 ++++---- tests/test_knowledge_manager_agent.py | 16 ++++++++-------- 19 files changed, 50 insertions(+), 46 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 67c378ae2..a74469133 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -95,7 +95,7 @@ line-length = 101 # Ignore specific lint rules for problematic test files. - [tool.ruff.per-file-ignores] + [tool.ruff.lint.per-file-ignores] "tests/performance/test_modeling_performance.py" = ["F541", "F841"] "tests/test_neo4j_connection.py" = ["F401"] diff --git a/src/application/agents/knowledge_manager/agent.py b/src/application/agents/knowledge_manager/agent.py index c8cef47a2..9e8809496 100644 --- a/src/application/agents/knowledge_manager/agent.py +++ b/src/application/agents/knowledge_manager/agent.py @@ -4,7 +4,7 @@ and conflict resolution that are beyond the scope of simple agent updates. """ -from typing import Dict, Any, List, Optional +from typing import Dict, Any, List, Optional, TYPE_CHECKING from datetime import datetime from enum import Enum from domain.agent import Agent @@ -19,6 +19,9 @@ from .validation_engine import ValidationEngine from .reasoning_engine import ReasoningEngine +if TYPE_CHECKING: + from graphiti_core import Graphiti + class KGUpdateType(str, Enum): """Types of KG updates that can be escalated.""" diff --git a/src/application/agents/knowledge_manager/conflict_resolver.py b/src/application/agents/knowledge_manager/conflict_resolver.py index 5ec4aa16a..6b96b9a9c 100644 --- a/src/application/agents/knowledge_manager/conflict_resolver.py +++ b/src/application/agents/knowledge_manager/conflict_resolver.py @@ -104,7 +104,7 @@ async def _detect_relationship_conflicts(self, event: KnowledgeEvent) -> List[Di "type": "duplicate_relationship", "source": source, "target": target, - "type": rel_type, + "rel_type": rel_type, "severity": "low", "description": f"Relationship {source}-[{rel_type}]->{target} already exists" }) diff --git a/src/application/services/entity_resolver.py b/src/application/services/entity_resolver.py index bce53a6f8..1c92ec7d7 100644 --- a/src/application/services/entity_resolver.py +++ b/src/application/services/entity_resolver.py @@ -16,12 +16,15 @@ - Observation count tracking for entity merging """ -from typing import List, Dict, Any, Optional, Tuple +from typing import List, Dict, Any, Optional, Tuple, TYPE_CHECKING from dataclasses import dataclass, field from datetime import datetime from enum import Enum import logging +if TYPE_CHECKING: + from domain.kg_backends import KnowledgeGraphBackend + logger = logging.getLogger(__name__) diff --git a/src/application/services/medication_validator.py b/src/application/services/medication_validator.py index d25a8c573..e59eba705 100644 --- a/src/application/services/medication_validator.py +++ b/src/application/services/medication_validator.py @@ -62,7 +62,7 @@ class MedicationValidationResult: "ranitidine": ["zantac"], "mesalamine": ["asacol", "pentasa", "lialda", "apriso", "delzicol"], "sulfasalazine": ["azulfidine"], - "budesonide": ["entocort", "uceris"], + "budesonide": ["entocort", "uceris", "pulmicort", "rhinocort"], "prednisone": ["deltasone", "rayos"], "loperamide": ["imodium"], "ondansetron": ["zofran"], @@ -131,7 +131,6 @@ class MedicationValidationResult: "albuterol": ["proventil", "ventolin", "proair"], "fluticasone": ["flovent", "flonase"], "montelukast": ["singulair"], - "budesonide": ["pulmicort", "rhinocort"], "tiotropium": ["spiriva"], "benzonatate": ["tessalon"], "guaifenesin": ["mucinex"], diff --git a/src/composition_root.py b/src/composition_root.py index 84712489d..3f150fd9c 100644 --- a/src/composition_root.py +++ b/src/composition_root.py @@ -13,7 +13,7 @@ # Lazy import for MedicalAssistantAgent to avoid loading mem0 for agents that don't need it if TYPE_CHECKING: - pass + from application.services.agent_discovery import AgentDiscoveryService from application.commands.base import CommandBus # noqa: E402 from application.commands.echo_command import EchoCommand, EchoCommandHandler # noqa: E402 from application.commands.file_commands import ( # noqa: E402 diff --git a/tests/domain/metadata/test_base.py b/tests/domain/metadata/test_base.py index a4c842ca4..606e9efa1 100644 --- a/tests/domain/metadata/test_base.py +++ b/tests/domain/metadata/test_base.py @@ -11,7 +11,7 @@ if _SRC_PATH not in sys.path: sys.path.insert(0, _SRC_PATH) -from domain.metadata.base import MetadataEntity +from domain.metadata.base import MetadataEntity # noqa: E402 class TestMetadataEntity(unittest.TestCase): diff --git a/tests/domain/metadata/test_database.py b/tests/domain/metadata/test_database.py index 8a72cc730..03c05194f 100644 --- a/tests/domain/metadata/test_database.py +++ b/tests/domain/metadata/test_database.py @@ -10,7 +10,7 @@ if _SRC_PATH not in sys.path: sys.path.insert(0, _SRC_PATH) -from domain.metadata.database import Cluster, Database +from domain.metadata.database import Cluster, Database # noqa: E402 class TestCluster(unittest.TestCase): diff --git a/tests/domain/metadata/test_schema_table.py b/tests/domain/metadata/test_schema_table.py index 0e6a30a5b..aa4247158 100644 --- a/tests/domain/metadata/test_schema_table.py +++ b/tests/domain/metadata/test_schema_table.py @@ -11,8 +11,8 @@ if _SRC_PATH not in sys.path: sys.path.insert(0, _SRC_PATH) -from domain.metadata.schema import Schema -from domain.metadata.table import Table, Column, ColumnStats +from domain.metadata.schema import Schema # noqa: E402 +from domain.metadata.table import Table, Column, ColumnStats # noqa: E402 class TestSchema(unittest.TestCase): diff --git a/tests/eval/conftest.py b/tests/eval/conftest.py index 6398cf406..590a3c742 100644 --- a/tests/eval/conftest.py +++ b/tests/eval/conftest.py @@ -11,12 +11,12 @@ # Register the pytest plugin for eval command-line options pytest_plugins = ["tests.eval.pytest_plugin"] -import os -import pytest -from pathlib import Path -from typing import List, Optional +import os # noqa: E402 +import pytest # noqa: E402 +from pathlib import Path # noqa: E402 +from typing import List, Optional # noqa: E402 -from tests.eval.runner import ( +from tests.eval.runner import ( # noqa: E402 MemoryInspector, ScenarioLoader, ScenarioOrchestrator, diff --git a/tests/eval/runner/evaluators/deterministic.py b/tests/eval/runner/evaluators/deterministic.py index 8a66072a0..cbc22b60b 100644 --- a/tests/eval/runner/evaluators/deterministic.py +++ b/tests/eval/runner/evaluators/deterministic.py @@ -606,7 +606,7 @@ def _validate_structure(self, actual: Any, expected: Any) -> bool: return True else: # For primitives, just check type matches - return type(actual) == type(expected) + return isinstance(actual, type(expected)) class IntentMatchEvaluator(AssertionEvaluator): diff --git a/tests/eval/runner/test_memory_inspector.py b/tests/eval/runner/test_memory_inspector.py index d5641f199..df7a97e0a 100644 --- a/tests/eval/runner/test_memory_inspector.py +++ b/tests/eval/runner/test_memory_inspector.py @@ -642,5 +642,4 @@ def test_total_assertions(self): assert result.pass_rate == 2 / 3 -# Import EvalResult at module level -from tests.eval.runner.models import EvalResult +from tests.eval.runner.models import EvalResult # noqa: E402 diff --git a/tests/integration/test_phase3_integration.py b/tests/integration/test_phase3_integration.py index f3de63714..574a0ec26 100644 --- a/tests/integration/test_phase3_integration.py +++ b/tests/integration/test_phase3_integration.py @@ -15,20 +15,20 @@ # Add src to path sys.path.insert(0, str(Path(__file__).parent.parent.parent / "src")) -from application.services.layer_transition import ( +from application.services.layer_transition import ( # noqa: E402 LayerTransitionService, LayerTransitionRequest, Layer, TransitionStatus ) -from domain.event import KnowledgeEvent -from domain.roles import Role +from domain.event import KnowledgeEvent # noqa: E402 +from domain.roles import Role # noqa: E402 # Mock graphiti_core to avoid import errors sys.modules['graphiti_core'] = MagicMock() -from application.agents.knowledge_manager.reasoning_engine import ReasoningEngine -from application.agents.knowledge_manager.validation_engine import ValidationEngine +from application.agents.knowledge_manager.reasoning_engine import ReasoningEngine # noqa: E402 +from application.agents.knowledge_manager.validation_engine import ValidationEngine # noqa: E402 class TestLayerTransitionWorkflow: diff --git a/tests/test_api.py b/tests/test_api.py index 99996ad21..aaa37d760 100644 --- a/tests/test_api.py +++ b/tests/test_api.py @@ -10,9 +10,9 @@ if _SRC_PATH not in sys.path: sys.path.insert(0, _SRC_PATH) -from fastapi.testclient import TestClient +from fastapi.testclient import TestClient # noqa: E402 -from interfaces.kg_api import app, get_backend +from interfaces.kg_api import app, get_backend # noqa: E402 class TestAPI(unittest.TestCase): diff --git a/tests/test_event_bus.py b/tests/test_event_bus.py index 6c631589a..d53dfe4b6 100644 --- a/tests/test_event_bus.py +++ b/tests/test_event_bus.py @@ -10,9 +10,9 @@ if _SRC_PATH not in sys.path: sys.path.insert(0, _SRC_PATH) -from application.event_bus import EventBus -from domain.event import KnowledgeEvent -from domain.roles import Role +from application.event_bus import EventBus # noqa: E402 +from domain.event import KnowledgeEvent # noqa: E402 +from domain.roles import Role # noqa: E402 class TestEventBus(unittest.IsolatedAsyncioTestCase): diff --git a/tests/test_in_memory_backend.py b/tests/test_in_memory_backend.py index 335ecd6f8..711cfa61d 100644 --- a/tests/test_in_memory_backend.py +++ b/tests/test_in_memory_backend.py @@ -10,7 +10,7 @@ if _SRC_PATH not in sys.path: sys.path.insert(0, _SRC_PATH) -from infrastructure.in_memory_backend import InMemoryGraphBackend +from infrastructure.in_memory_backend import InMemoryGraphBackend # noqa: E402 class TestInMemoryGraphBackend(unittest.IsolatedAsyncioTestCase): diff --git a/tests/test_kg_operations_api.py b/tests/test_kg_operations_api.py index 13bae4791..bbb9957ae 100644 --- a/tests/test_kg_operations_api.py +++ b/tests/test_kg_operations_api.py @@ -10,10 +10,10 @@ if _SRC_PATH not in sys.path: sys.path.insert(0, _SRC_PATH) -from fastapi.testclient import TestClient -from interfaces.kg_operations_api import app, initialize_api -from infrastructure.in_memory_backend import InMemoryGraphBackend -from application.event_bus import EventBus +from fastapi.testclient import TestClient # noqa: E402 +from interfaces.kg_operations_api import app, initialize_api # noqa: E402 +from infrastructure.in_memory_backend import InMemoryGraphBackend # noqa: E402 +from application.event_bus import EventBus # noqa: E402 class TestKGOperationsAPI(unittest.TestCase): diff --git a/tests/test_knowledge_manager.py b/tests/test_knowledge_manager.py index 268f31c13..3e17911d2 100644 --- a/tests/test_knowledge_manager.py +++ b/tests/test_knowledge_manager.py @@ -10,10 +10,10 @@ if _SRC_PATH not in sys.path: sys.path.insert(0, _SRC_PATH) -from application.knowledge_management import KnowledgeManagerService -from domain.event import KnowledgeEvent -from domain.roles import Role -from infrastructure.in_memory_backend import InMemoryGraphBackend +from application.knowledge_management import KnowledgeManagerService # noqa: E402 +from domain.event import KnowledgeEvent # noqa: E402 +from domain.roles import Role # noqa: E402 +from infrastructure.in_memory_backend import InMemoryGraphBackend # noqa: E402 class TestKnowledgeManagerService(unittest.IsolatedAsyncioTestCase): diff --git a/tests/test_knowledge_manager_agent.py b/tests/test_knowledge_manager_agent.py index 2de8a16be..44174f242 100644 --- a/tests/test_knowledge_manager_agent.py +++ b/tests/test_knowledge_manager_agent.py @@ -11,14 +11,14 @@ if _SRC_PATH not in sys.path: sys.path.insert(0, _SRC_PATH) -from application.agents.knowledge_manager.agent import KnowledgeManagerAgent -from domain.event import KnowledgeEvent -from domain.roles import Role -from domain.communication import Message -from infrastructure.in_memory_backend import InMemoryGraphBackend -from application.event_bus import EventBus -from application.commands.base import CommandBus -from infrastructure.communication.memory_channel import InMemoryCommunicationChannel +from application.agents.knowledge_manager.agent import KnowledgeManagerAgent # noqa: E402 +from domain.event import KnowledgeEvent # noqa: E402 +from domain.roles import Role # noqa: E402 +from domain.communication import Message # noqa: E402 +from infrastructure.in_memory_backend import InMemoryGraphBackend # noqa: E402 +from application.event_bus import EventBus # noqa: E402 +from application.commands.base import CommandBus # noqa: E402 +from infrastructure.communication.memory_channel import InMemoryCommunicationChannel # noqa: E402 class TestKnowledgeManagerAgent(unittest.IsolatedAsyncioTestCase): From 721a611b8534f13290a0b92b0afa7481619db60b Mon Sep 17 00:00:00 2001 From: Claude Date: Thu, 19 Feb 2026 02:51:27 +0000 Subject: [PATCH 3/4] Add .venv/ to .gitignore Virtual environment files should not be tracked in version control. https://claude.ai/code/session_01JAbMU7GmooaRYsKsyPRdrc --- .gitignore | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.gitignore b/.gitignore index 08643f310..e33c7b276 100644 --- a/.gitignore +++ b/.gitignore @@ -18,6 +18,10 @@ tests/__pycache__/ *.swp *.swo +# Virtual environment +.venv/ +venv/ + # Python *.egg-info/ .eggs/ From 60aea8026a28c671566a3b8d698808dc1f8165de Mon Sep 17 00:00:00 2001 From: Claude Date: Thu, 19 Feb 2026 02:51:51 +0000 Subject: [PATCH 4/4] Remove .venv/ from git tracking These files are now properly gitignored. https://claude.ai/code/session_01JAbMU7GmooaRYsKsyPRdrc --- .venv/.gitignore | 1 - .venv/CACHEDIR.TAG | 1 - .venv/bin/activate | 130 ------------------------------------- .venv/bin/activate.bat | 71 -------------------- .venv/bin/activate.csh | 76 ---------------------- .venv/bin/activate.fish | 124 ----------------------------------- .venv/bin/activate.nu | 117 --------------------------------- .venv/bin/activate.ps1 | 82 ----------------------- .venv/bin/activate_this.py | 59 ----------------- .venv/bin/deactivate.bat | 39 ----------- .venv/bin/dotenv | 10 --- .venv/bin/f2py | 10 --- .venv/bin/normalizer | 10 --- .venv/bin/numpy-config | 10 --- .venv/bin/pydoc.bat | 22 ------- .venv/bin/pygmentize | 10 --- .venv/bin/python | 1 - .venv/bin/python3 | 1 - .venv/pyvenv.cfg | 6 -- 19 files changed, 780 deletions(-) delete mode 100644 .venv/.gitignore delete mode 100644 .venv/CACHEDIR.TAG delete mode 100644 .venv/bin/activate delete mode 100644 .venv/bin/activate.bat delete mode 100644 .venv/bin/activate.csh delete mode 100644 .venv/bin/activate.fish delete mode 100644 .venv/bin/activate.nu delete mode 100644 .venv/bin/activate.ps1 delete mode 100644 .venv/bin/activate_this.py delete mode 100644 .venv/bin/deactivate.bat delete mode 100755 .venv/bin/dotenv delete mode 100755 .venv/bin/f2py delete mode 100755 .venv/bin/normalizer delete mode 100755 .venv/bin/numpy-config delete mode 100644 .venv/bin/pydoc.bat delete mode 100755 .venv/bin/pygmentize delete mode 120000 .venv/bin/python delete mode 120000 .venv/bin/python3 delete mode 100644 .venv/pyvenv.cfg diff --git a/.venv/.gitignore b/.venv/.gitignore deleted file mode 100644 index f59ec20aa..000000000 --- a/.venv/.gitignore +++ /dev/null @@ -1 +0,0 @@ -* \ No newline at end of file diff --git a/.venv/CACHEDIR.TAG b/.venv/CACHEDIR.TAG deleted file mode 100644 index bc1ecb967..000000000 --- a/.venv/CACHEDIR.TAG +++ /dev/null @@ -1 +0,0 @@ -Signature: 8a477f597d28d172789f06886806bc55 \ No newline at end of file diff --git a/.venv/bin/activate b/.venv/bin/activate deleted file mode 100644 index c1986a22c..000000000 --- a/.venv/bin/activate +++ /dev/null @@ -1,130 +0,0 @@ -# Copyright (c) 2020-202x The virtualenv developers -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -# This file must be used with "source bin/activate" *from bash* -# you cannot run it directly - -if ! [ -z "${SCRIPT_PATH+_}" ] ; then - _OLD_SCRIPT_PATH="$SCRIPT_PATH" -fi - -# Get script path (only used if environment is relocatable). -if [ -n "${BASH_VERSION:+x}" ] ; then - SCRIPT_PATH="${BASH_SOURCE[0]}" - if [ "$SCRIPT_PATH" = "$0" ]; then - # Only bash has a reasonably robust check for source'dness. - echo "You must source this script: \$ source $0" >&2 - exit 33 - fi -elif [ -n "${ZSH_VERSION:+x}" ] ; then - SCRIPT_PATH="${(%):-%x}" -elif [ -n "${KSH_VERSION:+x}" ] ; then - SCRIPT_PATH="${.sh.file}" -fi - -deactivate () { - unset -f pydoc >/dev/null 2>&1 || true - - # reset old environment variables - # ! [ -z ${VAR+_} ] returns true if VAR is declared at all - if ! [ -z "${_OLD_VIRTUAL_PATH:+_}" ] ; then - PATH="$_OLD_VIRTUAL_PATH" - export PATH - unset _OLD_VIRTUAL_PATH - fi - if ! [ -z "${_OLD_VIRTUAL_PYTHONHOME+_}" ] ; then - PYTHONHOME="$_OLD_VIRTUAL_PYTHONHOME" - export PYTHONHOME - unset _OLD_VIRTUAL_PYTHONHOME - fi - - # The hash command must be called to get it to forget past - # commands. Without forgetting past commands the $PATH changes - # we made may not be respected - hash -r 2>/dev/null - - if ! [ -z "${_OLD_VIRTUAL_PS1+_}" ] ; then - PS1="$_OLD_VIRTUAL_PS1" - export PS1 - unset _OLD_VIRTUAL_PS1 - fi - - unset VIRTUAL_ENV - unset VIRTUAL_ENV_PROMPT - if [ ! "${1-}" = "nondestructive" ] ; then - # Self destruct! - unset -f deactivate - fi -} - -# unset irrelevant variables -deactivate nondestructive - -VIRTUAL_ENV='/Users/pformoso/Documents/code/Notebooks/.venv' -if ([ "$OSTYPE" = "cygwin" ] || [ "$OSTYPE" = "msys" ]) && $(command -v cygpath &> /dev/null) ; then - VIRTUAL_ENV=$(cygpath -u "$VIRTUAL_ENV") -fi -export VIRTUAL_ENV - -# Unset the `SCRIPT_PATH` variable, now that the `VIRTUAL_ENV` variable -# has been set. This is important for relocatable environments. -if ! [ -z "${_OLD_SCRIPT_PATH+_}" ] ; then - SCRIPT_PATH="$_OLD_SCRIPT_PATH" - export SCRIPT_PATH - unset _OLD_SCRIPT_PATH -else - unset SCRIPT_PATH -fi - -_OLD_VIRTUAL_PATH="$PATH" -PATH="$VIRTUAL_ENV/bin:$PATH" -export PATH - -if [ "xmulti-agent-system" != x ] ; then - VIRTUAL_ENV_PROMPT="multi-agent-system" -else - VIRTUAL_ENV_PROMPT=$(basename "$VIRTUAL_ENV") -fi -export VIRTUAL_ENV_PROMPT - -# unset PYTHONHOME if set -if ! [ -z "${PYTHONHOME+_}" ] ; then - _OLD_VIRTUAL_PYTHONHOME="$PYTHONHOME" - unset PYTHONHOME -fi - -if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT-}" ] ; then - _OLD_VIRTUAL_PS1="${PS1-}" - PS1="(${VIRTUAL_ENV_PROMPT}) ${PS1-}" - export PS1 -fi - -# Make sure to unalias pydoc if it's already there -alias pydoc 2>/dev/null >/dev/null && unalias pydoc || true - -pydoc () { - python -m pydoc "$@" -} - -# The hash command must be called to get it to forget past -# commands. Without forgetting past commands the $PATH changes -# we made may not be respected -hash -r 2>/dev/null diff --git a/.venv/bin/activate.bat b/.venv/bin/activate.bat deleted file mode 100644 index e042c586c..000000000 --- a/.venv/bin/activate.bat +++ /dev/null @@ -1,71 +0,0 @@ -@REM Copyright (c) 2020-202x The virtualenv developers -@REM -@REM Permission is hereby granted, free of charge, to any person obtaining -@REM a copy of this software and associated documentation files (the -@REM "Software"), to deal in the Software without restriction, including -@REM without limitation the rights to use, copy, modify, merge, publish, -@REM distribute, sublicense, and/or sell copies of the Software, and to -@REM permit persons to whom the Software is furnished to do so, subject to -@REM the following conditions: -@REM -@REM The above copyright notice and this permission notice shall be -@REM included in all copies or substantial portions of the Software. -@REM -@REM THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -@REM EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -@REM MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -@REM NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -@REM LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -@REM OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -@REM WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -@REM This file is UTF-8 encoded, so we need to update the current code page while executing it -@for /f "tokens=2 delims=:." %%a in ('"%SystemRoot%\System32\chcp.com"') do @set _OLD_CODEPAGE=%%a -@if defined _OLD_CODEPAGE ( - @"%SystemRoot%\System32\chcp.com" 65001 > nul -) - -@for %%i in ("/Users/pformoso/Documents/code/Notebooks/.venv") do @set "VIRTUAL_ENV=%%~fi" - -@set "VIRTUAL_ENV_PROMPT=multi-agent-system" -@if NOT DEFINED VIRTUAL_ENV_PROMPT ( - @for %%d in ("%VIRTUAL_ENV%") do @set "VIRTUAL_ENV_PROMPT=%%~nxd" -) - -@if defined _OLD_VIRTUAL_PROMPT ( - @set "PROMPT=%_OLD_VIRTUAL_PROMPT%" -) else ( - @if not defined PROMPT ( - @set "PROMPT=$P$G" - ) - @if not defined VIRTUAL_ENV_DISABLE_PROMPT ( - @set "_OLD_VIRTUAL_PROMPT=%PROMPT%" - ) -) -@if not defined VIRTUAL_ENV_DISABLE_PROMPT ( - @set "PROMPT=(%VIRTUAL_ENV_PROMPT%) %PROMPT%" -) - -@REM Don't use () to avoid problems with them in %PATH% -@if defined _OLD_VIRTUAL_PYTHONHOME @goto ENDIFVHOME - @set "_OLD_VIRTUAL_PYTHONHOME=%PYTHONHOME%" -:ENDIFVHOME - -@set PYTHONHOME= - -@REM if defined _OLD_VIRTUAL_PATH ( -@if not defined _OLD_VIRTUAL_PATH @goto ENDIFVPATH1 - @set "PATH=%_OLD_VIRTUAL_PATH%" -:ENDIFVPATH1 -@REM ) else ( -@if defined _OLD_VIRTUAL_PATH @goto ENDIFVPATH2 - @set "_OLD_VIRTUAL_PATH=%PATH%" -:ENDIFVPATH2 - -@set "PATH=%VIRTUAL_ENV%\bin;%PATH%" - -:END -@if defined _OLD_CODEPAGE ( - @"%SystemRoot%\System32\chcp.com" %_OLD_CODEPAGE% > nul - @set _OLD_CODEPAGE= -) diff --git a/.venv/bin/activate.csh b/.venv/bin/activate.csh deleted file mode 100644 index 548f5b41d..000000000 --- a/.venv/bin/activate.csh +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright (c) 2020-202x The virtualenv developers -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -# This file must be used with "source bin/activate.csh" *from csh*. -# You cannot run it directly. -# Created by Davide Di Blasi . - -set newline='\ -' - -alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH:q" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT:q" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; unsetenv VIRTUAL_ENV_PROMPT; test "\!:*" != "nondestructive" && unalias deactivate && unalias pydoc' - -# Unset irrelevant variables. -deactivate nondestructive - -setenv VIRTUAL_ENV '/Users/pformoso/Documents/code/Notebooks/.venv' - -set _OLD_VIRTUAL_PATH="$PATH:q" -setenv PATH "$VIRTUAL_ENV:q/bin:$PATH:q" - - - -if ('multi-agent-system' != "") then - setenv VIRTUAL_ENV_PROMPT 'multi-agent-system' -else - setenv VIRTUAL_ENV_PROMPT "$VIRTUAL_ENV:t:q" -endif - -if ( $?VIRTUAL_ENV_DISABLE_PROMPT ) then - if ( $VIRTUAL_ENV_DISABLE_PROMPT == "" ) then - set do_prompt = "1" - else - set do_prompt = "0" - endif -else - set do_prompt = "1" -endif - -if ( $do_prompt == "1" ) then - # Could be in a non-interactive environment, - # in which case, $prompt is undefined and we wouldn't - # care about the prompt anyway. - if ( $?prompt ) then - set _OLD_VIRTUAL_PROMPT="$prompt:q" - if ( "$prompt:q" =~ *"$newline:q"* ) then - : - else - set prompt = '('"$VIRTUAL_ENV_PROMPT:q"') '"$prompt:q" - endif - endif -endif - -unset env_name -unset do_prompt - -alias pydoc python -m pydoc - -rehash diff --git a/.venv/bin/activate.fish b/.venv/bin/activate.fish deleted file mode 100644 index b8049bc5b..000000000 --- a/.venv/bin/activate.fish +++ /dev/null @@ -1,124 +0,0 @@ -# Copyright (c) 2020-202x The virtualenv developers -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -# This file must be used using `source bin/activate.fish` *within a running fish ( http://fishshell.com ) session*. -# Do not run it directly. - -function _bashify_path -d "Converts a fish path to something bash can recognize" - set fishy_path $argv - set bashy_path $fishy_path[1] - for path_part in $fishy_path[2..-1] - set bashy_path "$bashy_path:$path_part" - end - echo $bashy_path -end - -function _fishify_path -d "Converts a bash path to something fish can recognize" - echo $argv | tr ':' '\n' -end - -function deactivate -d 'Exit virtualenv mode and return to the normal environment.' - # reset old environment variables - if test -n "$_OLD_VIRTUAL_PATH" - # https://github.com/fish-shell/fish-shell/issues/436 altered PATH handling - if test (echo $FISH_VERSION | head -c 1) -lt 3 - set -gx PATH (_fishify_path "$_OLD_VIRTUAL_PATH") - else - set -gx PATH $_OLD_VIRTUAL_PATH - end - set -e _OLD_VIRTUAL_PATH - end - - if test -n "$_OLD_VIRTUAL_PYTHONHOME" - set -gx PYTHONHOME "$_OLD_VIRTUAL_PYTHONHOME" - set -e _OLD_VIRTUAL_PYTHONHOME - end - - if test -n "$_OLD_FISH_PROMPT_OVERRIDE" - and functions -q _old_fish_prompt - # Set an empty local `$fish_function_path` to allow the removal of `fish_prompt` using `functions -e`. - set -l fish_function_path - - # Erase virtualenv's `fish_prompt` and restore the original. - functions -e fish_prompt - functions -c _old_fish_prompt fish_prompt - functions -e _old_fish_prompt - set -e _OLD_FISH_PROMPT_OVERRIDE - end - - set -e VIRTUAL_ENV - set -e VIRTUAL_ENV_PROMPT - - if test "$argv[1]" != 'nondestructive' - # Self-destruct! - functions -e pydoc - functions -e deactivate - functions -e _bashify_path - functions -e _fishify_path - end -end - -# Unset irrelevant variables. -deactivate nondestructive - -set -gx VIRTUAL_ENV '/Users/pformoso/Documents/code/Notebooks/.venv' - -# https://github.com/fish-shell/fish-shell/issues/436 altered PATH handling -if test (echo $FISH_VERSION | head -c 1) -lt 3 - set -gx _OLD_VIRTUAL_PATH (_bashify_path $PATH) -else - set -gx _OLD_VIRTUAL_PATH $PATH -end -set -gx PATH "$VIRTUAL_ENV"'/bin' $PATH - -# Prompt override provided? -# If not, just use the environment name. -if test -n 'multi-agent-system' - set -gx VIRTUAL_ENV_PROMPT 'multi-agent-system' -else - set -gx VIRTUAL_ENV_PROMPT (basename "$VIRTUAL_ENV") -end - -# Unset `$PYTHONHOME` if set. -if set -q PYTHONHOME - set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME - set -e PYTHONHOME -end - -function pydoc - python -m pydoc $argv -end - -if test -z "$VIRTUAL_ENV_DISABLE_PROMPT" - # Copy the current `fish_prompt` function as `_old_fish_prompt`. - functions -c fish_prompt _old_fish_prompt - - function fish_prompt - # Run the user's prompt first; it might depend on (pipe)status. - set -l prompt (_old_fish_prompt) - - printf '(%s) ' $VIRTUAL_ENV_PROMPT - - string join -- \n $prompt # handle multi-line prompts - end - - set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV" -end diff --git a/.venv/bin/activate.nu b/.venv/bin/activate.nu deleted file mode 100644 index 7dbda2f79..000000000 --- a/.venv/bin/activate.nu +++ /dev/null @@ -1,117 +0,0 @@ -# Copyright (c) 2020-202x The virtualenv developers -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -# virtualenv activation module -# Activate with `overlay use activate.nu` -# Deactivate with `deactivate`, as usual -# -# To customize the overlay name, you can call `overlay use activate.nu as foo`, -# but then simply `deactivate` won't work because it is just an alias to hide -# the "activate" overlay. You'd need to call `overlay hide foo` manually. - -export-env { - def is-string [x] { - ($x | describe) == 'string' - } - - def has-env [...names] { - $names | each {|n| - $n in $env - } | all {|i| $i == true} - } - - # Emulates a `test -z`, but better as it handles e.g 'false' - def is-env-true [name: string] { - if (has-env $name) { - # Try to parse 'true', '0', '1', and fail if not convertible - let parsed = (do -i { $env | get $name | into bool }) - if ($parsed | describe) == 'bool' { - $parsed - } else { - not ($env | get -i $name | is-empty) - } - } else { - false - } - } - - let virtual_env = '/Users/pformoso/Documents/code/Notebooks/.venv' - let bin = 'bin' - - let is_windows = ($nu.os-info.family) == 'windows' - let path_name = (if (has-env 'Path') { - 'Path' - } else { - 'PATH' - } - ) - - let venv_path = ([$virtual_env $bin] | path join) - let new_path = ($env | get $path_name | prepend $venv_path) - - # If there is no default prompt, then use the env name instead - let virtual_env_prompt = (if ('multi-agent-system' | is-empty) { - ($virtual_env | path basename) - } else { - 'multi-agent-system' - }) - - let new_env = { - $path_name : $new_path - VIRTUAL_ENV : $virtual_env - VIRTUAL_ENV_PROMPT : $virtual_env_prompt - } - - let new_env = (if (is-env-true 'VIRTUAL_ENV_DISABLE_PROMPT') { - $new_env - } else { - # Creating the new prompt for the session - let virtual_prefix = $'(char lparen)($virtual_env_prompt)(char rparen) ' - - # Back up the old prompt builder - let old_prompt_command = (if (has-env 'PROMPT_COMMAND') { - $env.PROMPT_COMMAND - } else { - '' - }) - - let new_prompt = (if (has-env 'PROMPT_COMMAND') { - if 'closure' in ($old_prompt_command | describe) { - {|| $'($virtual_prefix)(do $old_prompt_command)' } - } else { - {|| $'($virtual_prefix)($old_prompt_command)' } - } - } else { - {|| $'($virtual_prefix)' } - }) - - $new_env | merge { - PROMPT_COMMAND : $new_prompt - VIRTUAL_PREFIX : $virtual_prefix - } - }) - - # Environment variables that will be loaded as the virtual env - load-env $new_env -} - -export alias pydoc = python -m pydoc -export alias deactivate = overlay hide activate diff --git a/.venv/bin/activate.ps1 b/.venv/bin/activate.ps1 deleted file mode 100644 index e83b9a055..000000000 --- a/.venv/bin/activate.ps1 +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright (c) 2020-202x The virtualenv developers -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -$script:THIS_PATH = $myinvocation.mycommand.path -$script:BASE_DIR = Split-Path (Resolve-Path "$THIS_PATH/..") -Parent - -function global:deactivate([switch] $NonDestructive) { - if (Test-Path variable:_OLD_VIRTUAL_PATH) { - $env:PATH = $variable:_OLD_VIRTUAL_PATH - Remove-Variable "_OLD_VIRTUAL_PATH" -Scope global - } - - if (Test-Path function:_old_virtual_prompt) { - $function:prompt = $function:_old_virtual_prompt - Remove-Item function:\_old_virtual_prompt - } - - if ($env:VIRTUAL_ENV) { - Remove-Item env:VIRTUAL_ENV -ErrorAction SilentlyContinue - } - - if ($env:VIRTUAL_ENV_PROMPT) { - Remove-Item env:VIRTUAL_ENV_PROMPT -ErrorAction SilentlyContinue - } - - if (!$NonDestructive) { - # Self destruct! - Remove-Item function:deactivate - Remove-Item function:pydoc - } -} - -function global:pydoc { - python -m pydoc $args -} - -# unset irrelevant variables -deactivate -nondestructive - -$VIRTUAL_ENV = $BASE_DIR -$env:VIRTUAL_ENV = $VIRTUAL_ENV - -if ("multi-agent-system" -ne "") { - $env:VIRTUAL_ENV_PROMPT = "multi-agent-system" -} -else { - $env:VIRTUAL_ENV_PROMPT = $( Split-Path $env:VIRTUAL_ENV -Leaf ) -} - -New-Variable -Scope global -Name _OLD_VIRTUAL_PATH -Value $env:PATH - -$env:PATH = "$env:VIRTUAL_ENV/bin:" + $env:PATH -if (!$env:VIRTUAL_ENV_DISABLE_PROMPT) { - function global:_old_virtual_prompt { - "" - } - $function:_old_virtual_prompt = $function:prompt - - function global:prompt { - # Add the custom prefix to the existing prompt - $previous_prompt_value = & $function:_old_virtual_prompt - ("(" + $env:VIRTUAL_ENV_PROMPT + ") " + $previous_prompt_value) - } -} diff --git a/.venv/bin/activate_this.py b/.venv/bin/activate_this.py deleted file mode 100644 index 95a00435d..000000000 --- a/.venv/bin/activate_this.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright (c) 2020-202x The virtualenv developers -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -""" -Activate virtualenv for current interpreter: - -import runpy -runpy.run_path(this_file) - -This can be used when you must use an existing Python interpreter, not the virtualenv bin/python. -""" # noqa: D415 - -from __future__ import annotations - -import os -import site -import sys - -try: - abs_file = os.path.abspath(__file__) -except NameError as exc: - msg = "You must use import runpy; runpy.run_path(this_file)" - raise AssertionError(msg) from exc - -bin_dir = os.path.dirname(abs_file) -base = bin_dir[: -len("bin") - 1] # strip away the bin part from the __file__, plus the path separator - -# prepend bin to PATH (this file is inside the bin directory) -os.environ["PATH"] = os.pathsep.join([bin_dir, *os.environ.get("PATH", "").split(os.pathsep)]) -os.environ["VIRTUAL_ENV"] = base # virtual env is right above bin directory -os.environ["VIRTUAL_ENV_PROMPT"] = "multi-agent-system" or os.path.basename(base) # noqa: SIM222 - -# add the virtual environments libraries to the host python import mechanism -prev_length = len(sys.path) -for lib in "../lib/python3.13/site-packages".split(os.pathsep): - path = os.path.realpath(os.path.join(bin_dir, lib)) - site.addsitedir(path) -sys.path[:] = sys.path[prev_length:] + sys.path[0:prev_length] - -sys.real_prefix = sys.prefix -sys.prefix = base diff --git a/.venv/bin/deactivate.bat b/.venv/bin/deactivate.bat deleted file mode 100644 index 07041bc45..000000000 --- a/.venv/bin/deactivate.bat +++ /dev/null @@ -1,39 +0,0 @@ -@REM Copyright (c) 2020-202x The virtualenv developers -@REM -@REM Permission is hereby granted, free of charge, to any person obtaining -@REM a copy of this software and associated documentation files (the -@REM "Software"), to deal in the Software without restriction, including -@REM without limitation the rights to use, copy, modify, merge, publish, -@REM distribute, sublicense, and/or sell copies of the Software, and to -@REM permit persons to whom the Software is furnished to do so, subject to -@REM the following conditions: -@REM -@REM The above copyright notice and this permission notice shall be -@REM included in all copies or substantial portions of the Software. -@REM -@REM THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -@REM EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -@REM MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -@REM NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -@REM LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -@REM OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -@REM WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -@set VIRTUAL_ENV= -@set VIRTUAL_ENV_PROMPT= - -@REM Don't use () to avoid problems with them in %PATH% -@if not defined _OLD_VIRTUAL_PROMPT @goto ENDIFVPROMPT - @set "PROMPT=%_OLD_VIRTUAL_PROMPT%" - @set _OLD_VIRTUAL_PROMPT= -:ENDIFVPROMPT - -@if not defined _OLD_VIRTUAL_PYTHONHOME @goto ENDIFVHOME - @set "PYTHONHOME=%_OLD_VIRTUAL_PYTHONHOME%" - @set _OLD_VIRTUAL_PYTHONHOME= -:ENDIFVHOME - -@if not defined _OLD_VIRTUAL_PATH @goto ENDIFVPATH - @set "PATH=%_OLD_VIRTUAL_PATH%" - @set _OLD_VIRTUAL_PATH= -:ENDIFVPATH \ No newline at end of file diff --git a/.venv/bin/dotenv b/.venv/bin/dotenv deleted file mode 100755 index cf0abf584..000000000 --- a/.venv/bin/dotenv +++ /dev/null @@ -1,10 +0,0 @@ -#!/Users/pformoso/Documents/code/Notebooks/.venv/bin/python -# -*- coding: utf-8 -*- -import sys -from dotenv.__main__ import cli -if __name__ == "__main__": - if sys.argv[0].endswith("-script.pyw"): - sys.argv[0] = sys.argv[0][:-11] - elif sys.argv[0].endswith(".exe"): - sys.argv[0] = sys.argv[0][:-4] - sys.exit(cli()) diff --git a/.venv/bin/f2py b/.venv/bin/f2py deleted file mode 100755 index e2cfbdf90..000000000 --- a/.venv/bin/f2py +++ /dev/null @@ -1,10 +0,0 @@ -#!/Users/pformoso/Documents/code/Notebooks/.venv/bin/python -# -*- coding: utf-8 -*- -import sys -from numpy.f2py.f2py2e import main -if __name__ == "__main__": - if sys.argv[0].endswith("-script.pyw"): - sys.argv[0] = sys.argv[0][:-11] - elif sys.argv[0].endswith(".exe"): - sys.argv[0] = sys.argv[0][:-4] - sys.exit(main()) diff --git a/.venv/bin/normalizer b/.venv/bin/normalizer deleted file mode 100755 index 0ac9309c2..000000000 --- a/.venv/bin/normalizer +++ /dev/null @@ -1,10 +0,0 @@ -#!/Users/pformoso/Documents/code/Notebooks/.venv/bin/python -# -*- coding: utf-8 -*- -import sys -from charset_normalizer import cli -if __name__ == "__main__": - if sys.argv[0].endswith("-script.pyw"): - sys.argv[0] = sys.argv[0][:-11] - elif sys.argv[0].endswith(".exe"): - sys.argv[0] = sys.argv[0][:-4] - sys.exit(cli.cli_detect()) diff --git a/.venv/bin/numpy-config b/.venv/bin/numpy-config deleted file mode 100755 index 8387a9945..000000000 --- a/.venv/bin/numpy-config +++ /dev/null @@ -1,10 +0,0 @@ -#!/Users/pformoso/Documents/code/Notebooks/.venv/bin/python -# -*- coding: utf-8 -*- -import sys -from numpy._configtool import main -if __name__ == "__main__": - if sys.argv[0].endswith("-script.pyw"): - sys.argv[0] = sys.argv[0][:-11] - elif sys.argv[0].endswith(".exe"): - sys.argv[0] = sys.argv[0][:-4] - sys.exit(main()) diff --git a/.venv/bin/pydoc.bat b/.venv/bin/pydoc.bat deleted file mode 100644 index daa20590b..000000000 --- a/.venv/bin/pydoc.bat +++ /dev/null @@ -1,22 +0,0 @@ -@REM Copyright (c) 2020-202x The virtualenv developers -@REM -@REM Permission is hereby granted, free of charge, to any person obtaining -@REM a copy of this software and associated documentation files (the -@REM "Software"), to deal in the Software without restriction, including -@REM without limitation the rights to use, copy, modify, merge, publish, -@REM distribute, sublicense, and/or sell copies of the Software, and to -@REM permit persons to whom the Software is furnished to do so, subject to -@REM the following conditions: -@REM -@REM The above copyright notice and this permission notice shall be -@REM included in all copies or substantial portions of the Software. -@REM -@REM THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -@REM EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -@REM MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -@REM NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -@REM LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -@REM OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -@REM WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -python.exe -m pydoc %* \ No newline at end of file diff --git a/.venv/bin/pygmentize b/.venv/bin/pygmentize deleted file mode 100755 index 5bc6c09cd..000000000 --- a/.venv/bin/pygmentize +++ /dev/null @@ -1,10 +0,0 @@ -#!/Users/pformoso/Documents/code/Notebooks/.venv/bin/python -# -*- coding: utf-8 -*- -import sys -from pygments.cmdline import main -if __name__ == "__main__": - if sys.argv[0].endswith("-script.pyw"): - sys.argv[0] = sys.argv[0][:-11] - elif sys.argv[0].endswith(".exe"): - sys.argv[0] = sys.argv[0][:-4] - sys.exit(main()) diff --git a/.venv/bin/python b/.venv/bin/python deleted file mode 120000 index 52c4bba06..000000000 --- a/.venv/bin/python +++ /dev/null @@ -1 +0,0 @@ -/Users/pformoso/.local/share/uv/python/cpython-3.13.5-macos-aarch64-none/bin/python3.13 \ No newline at end of file diff --git a/.venv/bin/python3 b/.venv/bin/python3 deleted file mode 120000 index d8654aa0e..000000000 --- a/.venv/bin/python3 +++ /dev/null @@ -1 +0,0 @@ -python \ No newline at end of file diff --git a/.venv/pyvenv.cfg b/.venv/pyvenv.cfg deleted file mode 100644 index 7d08189fb..000000000 --- a/.venv/pyvenv.cfg +++ /dev/null @@ -1,6 +0,0 @@ -home = /Users/pformoso/.local/share/uv/python/cpython-3.13.5-macos-aarch64-none/bin -implementation = CPython -uv = 0.7.21 -version_info = 3.13.5 -include-system-site-packages = false -prompt = multi-agent-system