diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c04c345..81a47a3 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -35,12 +35,10 @@ jobs: - uses: actions/setup-python@v5 with: python-version: "3.12" - cache: "pip" - cache-dependency-path: memorylayer-core-python/pyproject.toml - - name: Install package with dev and local extras + - name: Install package with dev and context extras working-directory: memorylayer-core-python - run: pip install -e ".[dev,local]" + run: pip install -e ".[dev,context]" - name: Lint with ruff working-directory: memorylayer-core-python @@ -52,7 +50,7 @@ jobs: - name: Run tests working-directory: memorylayer-core-python - run: pytest tests/ -m "not slow and not integration and not llm and not llm_quality" + run: pytest tests/ -m "not slow and not integration and not llm and not llm_quality" -x # ────────────────────────────────────────────────────────────────── # Python SDK (memorylayer-sdk-python) @@ -66,8 +64,6 @@ jobs: - uses: actions/setup-python@v5 with: python-version: "3.12" - cache: "pip" - cache-dependency-path: memorylayer-sdk-python/pyproject.toml - name: Install package with dev extras working-directory: memorylayer-sdk-python @@ -83,13 +79,39 @@ jobs: - name: Run tests working-directory: memorylayer-sdk-python - run: pytest tests/ + run: pytest tests/ -x # ────────────────────────────────────────────────────────────────── # TypeScript packages # ────────────────────────────────────────────────────────────────── + build-typescript-sdk: + name: "TypeScript: memorylayer-sdk (build)" + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - uses: actions/setup-node@v4 + with: + node-version: "22" + + - name: Install dependencies + working-directory: memorylayer-sdk-typescript + run: npm ci + + - name: Build + working-directory: memorylayer-sdk-typescript + run: npm run build + + - name: Upload SDK build artifacts + uses: actions/upload-artifact@v4 + with: + name: memorylayer-sdk-dist + path: memorylayer-sdk-typescript/dist/ + retention-days: 1 + test-typescript: name: "TypeScript: ${{ matrix.package.name }}" + needs: build-typescript-sdk runs-on: ubuntu-latest strategy: fail-fast: false @@ -102,13 +124,17 @@ jobs: - uses: actions/setup-node@v4 with: - node-version: "20" - cache: "npm" - cache-dependency-path: ${{ matrix.package.dir }}/package-lock.json + node-version: "22" + + - name: Download SDK build artifacts + uses: actions/download-artifact@v4 + with: + name: memorylayer-sdk-dist + path: memorylayer-sdk-typescript/dist/ - name: Install dependencies working-directory: ${{ matrix.package.dir }} - run: npm install + run: npm ci - name: Build working-directory: ${{ matrix.package.dir }} diff --git a/.gitignore b/.gitignore index 9fd88ce..db5bba5 100644 --- a/.gitignore +++ b/.gitignore @@ -6,5 +6,4 @@ node-modules/ .venv/ __pycache__/ *.pyc -uv.lock -package-lock.json \ No newline at end of file +uv.lock \ No newline at end of file diff --git a/memorylayer-cc-plugin/package-lock.json b/memorylayer-cc-plugin/package-lock.json new file mode 100644 index 0000000..dfc9519 --- /dev/null +++ b/memorylayer-cc-plugin/package-lock.json @@ -0,0 +1,81 @@ +{ + "name": "@scitrera/memorylayer-cc-plugin", + "version": "0.0.3", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "@scitrera/memorylayer-cc-plugin", + "version": "0.0.3", + "license": "Apache-2.0", + "dependencies": { + "@scitrera/memorylayer-mcp-server": "file:../memorylayer-mcp-typescript" + }, + "bin": { + "memorylayer-hook": "dist/bin/memorylayer-hook.js" + }, + "devDependencies": { + "@types/node": "^20.0.0", + "typescript": "^5.3.0" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "../memorylayer-mcp-typescript": { + "name": "@scitrera/memorylayer-mcp-server", + "version": "0.0.4", + "license": "Apache-2.0", + "dependencies": { + "@modelcontextprotocol/sdk": "^1.26.0", + "@scitrera/memorylayer-sdk": "file:../memorylayer-sdk-typescript" + }, + "bin": { + "memorylayer-mcp": "dist/bin/memorylayer-mcp.js" + }, + "devDependencies": { + "@types/node": "^20.0.0", + "typescript": "^5.9.3", + "vitest": "^4.0.18" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@scitrera/memorylayer-mcp-server": { + "resolved": "../memorylayer-mcp-typescript", + "link": true + }, + "node_modules/@types/node": { + "version": "20.19.33", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.19.33.tgz", + "integrity": "sha512-Rs1bVAIdBs5gbTIKza/tgpMuG1k3U/UMJLWecIMxNdJFDMzcM5LOiLVRYh3PilWEYDIeUDv7bpiHPLPsbydGcw==", + "dev": true, + "license": "MIT", + "dependencies": { + "undici-types": "~6.21.0" + } + }, + "node_modules/typescript": { + "version": "5.9.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz", + "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/undici-types": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz", + "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==", + "dev": true, + "license": "MIT" + } + } +} diff --git a/memorylayer-core-python/pyproject.toml b/memorylayer-core-python/pyproject.toml index 799793d..d673f6a 100644 --- a/memorylayer-core-python/pyproject.toml +++ b/memorylayer-core-python/pyproject.toml @@ -112,6 +112,15 @@ target-version = "py312" [tool.ruff.lint] select = ["E", "F", "I", "N", "W", "UP"] +ignore = [ + "E402", # module-level imports not at top — lazy imports used to avoid circular deps + "E501", # line too long — handled by formatter, some long strings intentional + "UP042", # str+Enum inheritance — StrEnum requires Python 3.11+ migration + "UP031", # printf-style formatting — used intentionally in logging + "F841", # unused variables — some intentional in batch/pipeline patterns + "F821", # undefined name — forward references and dynamic imports + "N806", # variable in function should be lowercase — established naming conventions +] [tool.pytest.ini_options] asyncio_mode = "auto" diff --git a/memorylayer-core-python/src/memorylayer_server/api/__init__.py b/memorylayer-core-python/src/memorylayer_server/api/__init__.py index 656461c..52cabf9 100644 --- a/memorylayer-core-python/src/memorylayer_server/api/__init__.py +++ b/memorylayer-core-python/src/memorylayer_server/api/__init__.py @@ -1,3 +1,3 @@ """API routers for MemoryLayer.ai.""" -EXT_MULTI_API_ROUTERS = 'memorylayer-server-api-routers' +EXT_MULTI_API_ROUTERS = "memorylayer-server-api-routers" diff --git a/memorylayer-core-python/src/memorylayer_server/api/health.py b/memorylayer-core-python/src/memorylayer_server/api/health.py index d52105e..8898c76 100644 --- a/memorylayer-core-python/src/memorylayer_server/api/health.py +++ b/memorylayer-core-python/src/memorylayer_server/api/health.py @@ -1,20 +1,19 @@ """Health check endpoints for MemoryLayer.ai API.""" -import logging -from typing import Dict +import logging -from fastapi import APIRouter, status, Depends +from fastapi import APIRouter, Depends, status from fastapi.responses import JSONResponse from scitrera_app_framework import Plugin, Variables from ..lifecycle.fastapi import get_logger from . import EXT_MULTI_API_ROUTERS -router = APIRouter(tags=['health']) +router = APIRouter(tags=["health"]) @router.get("/health") -async def health_check() -> Dict[str, str]: +async def health_check() -> dict[str, str]: """ Basic health check endpoint. @@ -25,7 +24,9 @@ async def health_check() -> Dict[str, str]: @router.get("/health/ready") -async def readiness_check(logger: logging.Logger = Depends(get_logger), ) -> JSONResponse: +async def readiness_check( + logger: logging.Logger = Depends(get_logger), +) -> JSONResponse: """ Readiness check endpoint verifying database and cache connectivity. @@ -40,6 +41,7 @@ async def readiness_check(logger: logging.Logger = Depends(get_logger), ) -> JSO # Check database connectivity try: from memorylayer_server.services.storage import get_storage_backend + storage = get_storage_backend() is_healthy = await storage.health_check() checks["services"]["database"] = "connected" if is_healthy else "disconnected" @@ -53,11 +55,7 @@ async def readiness_check(logger: logging.Logger = Depends(get_logger), ) -> JSO # Cache is optional and not yet configured via plugin checks["services"]["cache"] = "not_configured" - status_code = ( - status.HTTP_200_OK - if checks["status"] == "ready" - else status.HTTP_503_SERVICE_UNAVAILABLE - ) + status_code = status.HTTP_200_OK if checks["status"] == "ready" else status.HTTP_503_SERVICE_UNAVAILABLE return JSONResponse(content=checks, status_code=status_code) diff --git a/memorylayer-core-python/src/memorylayer_server/api/v1/associations.py b/memorylayer-core-python/src/memorylayer_server/api/v1/associations.py index dca6cd2..dc2a88c 100644 --- a/memorylayer-core-python/src/memorylayer_server/api/v1/associations.py +++ b/memorylayer-core-python/src/memorylayer_server/api/v1/associations.py @@ -6,36 +6,38 @@ - GET /v1/memories/{memory_id}/associations - Get associations - POST /v1/memories/{memory_id}/traverse - Graph traversal from memory """ + import logging -from fastapi import APIRouter, HTTPException, Depends, Request, status +from fastapi import APIRouter, Depends, HTTPException, Request, status from scitrera_app_framework import Plugin, Variables -from .. import EXT_MULTI_API_ROUTERS +from memorylayer_server.lifecycle.fastapi import get_logger, get_variables_dep from ...models.association import AssociateInput -from memorylayer_server.lifecycle.fastapi import get_logger, get_variables_dep from ...services.association import AssociationService +from ...services.audit import AuditEvent, AuditService from ...services.authentication import AuthenticationService from ...services.authorization import AuthorizationService +from .. import EXT_MULTI_API_ROUTERS +from .deps import get_audit_service, get_auth_service, get_authz_service from .schemas import ( AssociationCreateRequest, - MemoryTraverseRequest, - AssociationResponse, AssociationListResponse, - GraphQueryResult, + AssociationResponse, ErrorResponse, + GraphQueryResult, + MemoryTraverseRequest, ) -from .deps import get_auth_service, get_authz_service, get_audit_service -from ...services.audit import AuditService, AuditEvent -router = APIRouter(prefix='/v1', tags=["associations"]) +router = APIRouter(prefix="/v1", tags=["associations"]) # Dependencies for services async def get_association_service(v: Variables = Depends(get_variables_dep)) -> AssociationService: """Get association service instance from dependency injection.""" from ...services.association import get_association_service as _get_association_service + return _get_association_service(v) @@ -52,14 +54,14 @@ async def get_association_service(v: Variables = Depends(get_variables_dep)) -> }, ) async def create_association( - http_request: Request, - memory_id: str, - request: AssociationCreateRequest, - auth_service: AuthenticationService = Depends(get_auth_service), - authz_service: AuthorizationService = Depends(get_authz_service), - association_service: AssociationService = Depends(get_association_service), - audit_service: AuditService = Depends(get_audit_service), - logger: logging.Logger = Depends(get_logger), + http_request: Request, + memory_id: str, + request: AssociationCreateRequest, + auth_service: AuthenticationService = Depends(get_auth_service), + authz_service: AuthorizationService = Depends(get_authz_service), + association_service: AssociationService = Depends(get_association_service), + audit_service: AuditService = Depends(get_audit_service), + logger: logging.Logger = Depends(get_logger), ) -> AssociationResponse: """ Create a typed relationship between two memories. @@ -80,16 +82,9 @@ async def create_association( try: # Build request context and check authorization ctx = await auth_service.build_context(http_request, request) - await authz_service.require_authorization( - ctx, "associations", "create", workspace_id=ctx.workspace_id - ) + await authz_service.require_authorization(ctx, "associations", "create", workspace_id=ctx.workspace_id) - logger.info( - "Creating association: %s -[%s]-> %s", - memory_id, - request.relationship, - request.target_id - ) + logger.info("Creating association: %s -[%s]-> %s", memory_id, request.relationship, request.target_id) # Convert request to domain input associate_input = AssociateInput( @@ -108,38 +103,31 @@ async def create_association( logger.info("Created association: %s", association.id) try: - await audit_service.record(AuditEvent( - event_type="association", - action="create", - tenant_id=ctx.tenant_id, - workspace_id=ctx.workspace_id, - user_id=ctx.user_id, - resource_type="association", - resource_id=association.id, - )) + await audit_service.record( + AuditEvent( + event_type="association", + action="create", + tenant_id=ctx.tenant_id, + workspace_id=ctx.workspace_id, + user_id=ctx.user_id, + resource_type="association", + resource_id=association.id, + ) + ) except Exception: logger.debug("Audit record failed for association create") return AssociationResponse(association=association) except ValueError as e: logger.warning("Invalid association request: %s", e) - raise HTTPException( - status_code=status.HTTP_400_BAD_REQUEST, - detail=str(e) - ) + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=str(e)) except Exception as e: # Check if it's a "not found" error if "not found" in str(e).lower(): logger.warning("Association source or target not found: %s -> %s: %s", memory_id, request.target_id, e) - raise HTTPException( - status_code=status.HTTP_404_NOT_FOUND, - detail=f"Memory not found: {memory_id} or {request.target_id}" - ) + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"Memory not found: {memory_id} or {request.target_id}") logger.error("Failed to create association: %s", e, exc_info=True) - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail="Failed to create association" - ) + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to create association") @router.get( @@ -153,15 +141,15 @@ async def create_association( }, ) async def get_associations( - http_request: Request, - memory_id: str, - relationships: str | None = None, - direction: str = "both", - auth_service: AuthenticationService = Depends(get_auth_service), - authz_service: AuthorizationService = Depends(get_authz_service), - association_service: AssociationService = Depends(get_association_service), - audit_service: AuditService = Depends(get_audit_service), - logger: logging.Logger = Depends(get_logger), + http_request: Request, + memory_id: str, + relationships: str | None = None, + direction: str = "both", + auth_service: AuthenticationService = Depends(get_auth_service), + authz_service: AuthorizationService = Depends(get_authz_service), + association_service: AssociationService = Depends(get_association_service), + audit_service: AuditService = Depends(get_audit_service), + logger: logging.Logger = Depends(get_logger), ) -> AssociationListResponse: """ Get all associations for a memory. @@ -183,24 +171,14 @@ async def get_associations( try: # Build request context and check authorization ctx = await auth_service.build_context(http_request, None) - await authz_service.require_authorization( - ctx, "associations", "read", workspace_id=ctx.workspace_id - ) + await authz_service.require_authorization(ctx, "associations", "read", workspace_id=ctx.workspace_id) - logger.debug( - "Getting associations for memory: %s, direction: %s", - memory_id, - direction - ) + logger.debug("Getting associations for memory: %s, direction: %s", memory_id, direction) # Parse relationship types if provided relationship_types = None if relationships: - relationship_types = [ - rel.strip().upper() - for rel in relationships.split(",") - if rel.strip() - ] + relationship_types = [rel.strip().upper() for rel in relationships.split(",") if rel.strip()] # Get associations associations = await association_service.get_related( @@ -212,41 +190,29 @@ async def get_associations( logger.debug("Found %d associations for memory: %s", len(associations), memory_id) try: - await audit_service.record(AuditEvent( - event_type="association", - action="read", - tenant_id=ctx.tenant_id, - workspace_id=ctx.workspace_id, - user_id=ctx.user_id, - resource_type="association", - resource_id=memory_id, - )) + await audit_service.record( + AuditEvent( + event_type="association", + action="read", + tenant_id=ctx.tenant_id, + workspace_id=ctx.workspace_id, + user_id=ctx.user_id, + resource_type="association", + resource_id=memory_id, + ) + ) except Exception: logger.debug("Audit record failed for association read") - return AssociationListResponse( - associations=associations, - total_count=len(associations) - ) + return AssociationListResponse(associations=associations, total_count=len(associations)) except HTTPException: raise except ValueError as e: logger.warning("Invalid association query: %s", e) - raise HTTPException( - status_code=status.HTTP_400_BAD_REQUEST, - detail=str(e) - ) + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=str(e)) except Exception as e: - logger.error( - "Failed to get associations for memory %s: %s", - memory_id, - e, - exc_info=True - ) - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail="Failed to retrieve associations" - ) + logger.error("Failed to get associations for memory %s: %s", memory_id, e, exc_info=True) + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to retrieve associations") @router.post( @@ -261,14 +227,14 @@ async def get_associations( }, ) async def traverse_from_memory( - http_request: Request, - memory_id: str, - request: MemoryTraverseRequest, - auth_service: AuthenticationService = Depends(get_auth_service), - authz_service: AuthorizationService = Depends(get_authz_service), - association_service: AssociationService = Depends(get_association_service), - audit_service: AuditService = Depends(get_audit_service), - logger: logging.Logger = Depends(get_logger), + http_request: Request, + memory_id: str, + request: MemoryTraverseRequest, + auth_service: AuthenticationService = Depends(get_auth_service), + authz_service: AuthorizationService = Depends(get_authz_service), + association_service: AssociationService = Depends(get_association_service), + audit_service: AuditService = Depends(get_audit_service), + logger: logging.Logger = Depends(get_logger), ) -> GraphQueryResult: """ Traverse memory graph starting from a specific memory. @@ -294,16 +260,9 @@ async def traverse_from_memory( try: # Build request context and check authorization ctx = await auth_service.build_context(http_request, request) - await authz_service.require_authorization( - ctx, "associations", "read", workspace_id=ctx.workspace_id - ) + await authz_service.require_authorization(ctx, "associations", "read", workspace_id=ctx.workspace_id) - logger.info( - "Traversing graph from memory: %s, max_depth: %d, direction: %s", - memory_id, - request.max_depth, - request.direction - ) + logger.info("Traversing graph from memory: %s, max_depth: %d, direction: %s", memory_id, request.max_depth, request.direction) # Perform traversal via storage backend result = await association_service.storage.traverse_graph( @@ -314,44 +273,33 @@ async def traverse_from_memory( direction=request.direction, ) - logger.info( - "Graph traversal found %d paths, %d unique nodes", - result.total_paths, - len(result.unique_nodes) - ) + logger.info("Graph traversal found %d paths, %d unique nodes", result.total_paths, len(result.unique_nodes)) try: - await audit_service.record(AuditEvent( - event_type="association", - action="read", - tenant_id=ctx.tenant_id, - workspace_id=ctx.workspace_id, - user_id=ctx.user_id, - resource_type="association", - resource_id=memory_id, - )) + await audit_service.record( + AuditEvent( + event_type="association", + action="read", + tenant_id=ctx.tenant_id, + workspace_id=ctx.workspace_id, + user_id=ctx.user_id, + resource_type="association", + resource_id=memory_id, + ) + ) except Exception: logger.debug("Audit record failed for association traverse") return result except ValueError as e: logger.warning("Invalid graph traversal request: %s", e) - raise HTTPException( - status_code=status.HTTP_400_BAD_REQUEST, - detail=str(e) - ) + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=str(e)) except Exception as e: # Check if it's a "not found" error if "not found" in str(e).lower(): - raise HTTPException( - status_code=status.HTTP_404_NOT_FOUND, - detail=f"Memory not found: {memory_id}" - ) + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"Memory not found: {memory_id}") logger.error("Failed to traverse graph from memory %s: %s", memory_id, e, exc_info=True) - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail="Failed to traverse graph" - ) + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to traverse graph") class AssociationsAPIPlugin(Plugin): diff --git a/memorylayer-core-python/src/memorylayer_server/api/v1/audit.py b/memorylayer-core-python/src/memorylayer_server/api/v1/audit.py index e9dd4b0..3824c95 100644 --- a/memorylayer-core-python/src/memorylayer_server/api/v1/audit.py +++ b/memorylayer-core-python/src/memorylayer_server/api/v1/audit.py @@ -8,19 +8,17 @@ import logging from datetime import datetime -from typing import Optional -from fastapi import APIRouter, HTTPException, Depends, Request, Query, status +from fastapi import APIRouter, Depends, HTTPException, Query, Request, status from pydantic import BaseModel from scitrera_app_framework import Plugin, Variables -from .. import EXT_MULTI_API_ROUTERS from ...lifecycle.fastapi import get_logger from ...services.audit import AuditService -from ...services.authentication import AuthenticationService, AuthenticationError +from ...services.authentication import AuthenticationError, AuthenticationService from ...services.authorization import AuthorizationService - -from .deps import get_auth_service, get_authz_service, get_audit_service +from .. import EXT_MULTI_API_ROUTERS +from .deps import get_audit_service, get_auth_service, get_authz_service from .schemas import ErrorResponse router = APIRouter(prefix="/v1/audit", tags=["audit"]) @@ -30,6 +28,7 @@ # Response schemas # --------------------------------------------------------------------------- + class AuditEventResponse(BaseModel): """Response schema for a single audit event.""" @@ -37,10 +36,10 @@ class AuditEventResponse(BaseModel): event_type: str action: str tenant_id: str - workspace_id: Optional[str] = None - user_id: Optional[str] = None - resource_type: Optional[str] = None - resource_id: Optional[str] = None + workspace_id: str | None = None + user_id: str | None = None + resource_type: str | None = None + resource_id: str | None = None metadata: dict = {} timestamp: datetime @@ -56,6 +55,7 @@ class AuditEventsListResponse(BaseModel): # Endpoints # --------------------------------------------------------------------------- + @router.get( "/events", response_model=AuditEventsListResponse, @@ -68,9 +68,9 @@ class AuditEventsListResponse(BaseModel): ) async def query_audit_events( http_request: Request, - workspace_id: Optional[str] = Query(None, description="Filter by workspace ID"), - event_type: Optional[str] = Query(None, description="Filter by event type"), - since: Optional[str] = Query(None, description="Return events at or after this ISO datetime"), + workspace_id: str | None = Query(None, description="Filter by workspace ID"), + event_type: str | None = Query(None, description="Filter by event type"), + since: str | None = Query(None, description="Return events at or after this ISO datetime"), limit: int = Query(100, ge=1, le=1000, description="Maximum events to return"), auth_service: AuthenticationService = Depends(get_auth_service), authz_service: AuthorizationService = Depends(get_authz_service), @@ -102,7 +102,7 @@ async def query_audit_events( ctx = await auth_service.build_context(http_request, None) await authz_service.require_authorization(ctx, "admin", "read") - since_dt: Optional[datetime] = None + since_dt: datetime | None = None if since is not None: try: since_dt = datetime.fromisoformat(since) @@ -114,7 +114,11 @@ async def query_audit_events( logger.info( "Querying audit events: tenant=%s, workspace=%s, event_type=%s, since=%s, limit=%d", - ctx.tenant_id, workspace_id, event_type, since_dt, limit, + ctx.tenant_id, + workspace_id, + event_type, + since_dt, + limit, ) events = await audit_service.query( @@ -170,8 +174,8 @@ async def query_audit_events( ) async def audit_events_summary( http_request: Request, - workspace_id: Optional[str] = Query(None, description="Filter by workspace ID"), - since: Optional[str] = Query(None, description="Return events at or after this ISO datetime"), + workspace_id: str | None = Query(None, description="Filter by workspace ID"), + since: str | None = Query(None, description="Return events at or after this ISO datetime"), auth_service: AuthenticationService = Depends(get_auth_service), authz_service: AuthorizationService = Depends(get_authz_service), audit_service: AuditService = Depends(get_audit_service), @@ -200,7 +204,7 @@ async def audit_events_summary( ctx = await auth_service.build_context(http_request, None) await authz_service.require_authorization(ctx, "admin", "read") - since_dt: Optional[datetime] = None + since_dt: datetime | None = None if since is not None: try: since_dt = datetime.fromisoformat(since) @@ -212,7 +216,9 @@ async def audit_events_summary( logger.info( "Querying audit summary: tenant=%s, workspace=%s, since=%s", - ctx.tenant_id, workspace_id, since_dt, + ctx.tenant_id, + workspace_id, + since_dt, ) # Fetch up to 10 000 events to build the summary; this is an admin-only @@ -250,6 +256,7 @@ async def audit_events_summary( # Plugin registration # --------------------------------------------------------------------------- + class AuditAPIPlugin(Plugin): """Plugin to register audit query routes.""" diff --git a/memorylayer-core-python/src/memorylayer_server/api/v1/chat.py b/memorylayer-core-python/src/memorylayer_server/api/v1/chat.py index 8364b5b..dbfe512 100644 --- a/memorylayer-core-python/src/memorylayer_server/api/v1/chat.py +++ b/memorylayer-core-python/src/memorylayer_server/api/v1/chat.py @@ -12,37 +12,38 @@ - GET /v1/threads/{id}/messages - Get messages (paginated) - POST /v1/threads/{id}/decompose - Trigger memory decomposition """ + import logging -from typing import Optional -from fastapi import APIRouter, HTTPException, Depends, Request, Query, status +from fastapi import APIRouter, Depends, HTTPException, Query, Request, status from scitrera_app_framework import Plugin, Variables -from .. import EXT_MULTI_API_ROUTERS from memorylayer_server.lifecycle.fastapi import get_logger -from .schemas import ( - ThreadCreateRequest, - ThreadUpdateRequest, - ThreadResponse, - ThreadListResponse, - MessagesAppendRequest, - MessagesAppendResponse, - MessageListResponse, - ThreadWithMessagesResponse, - ThreadDecomposeResponse, - ErrorResponse, -) + from ...models.chat import ( - CreateThreadInput, AppendMessagesInput, - MessageInput, ChatMessageContent, + CreateThreadInput, + MessageInput, ) -from ...services.chat import ChatService +from ...services.audit import AuditEvent, AuditService from ...services.authentication import AuthenticationService from ...services.authorization import AuthorizationService -from .deps import get_auth_service, get_authz_service, get_chat_service, get_audit_service -from ...services.audit import AuditService, AuditEvent +from ...services.chat import ChatService +from .. import EXT_MULTI_API_ROUTERS +from .deps import get_audit_service, get_auth_service, get_authz_service, get_chat_service +from .schemas import ( + ErrorResponse, + MessageListResponse, + MessagesAppendRequest, + MessagesAppendResponse, + ThreadCreateRequest, + ThreadDecomposeResponse, + ThreadListResponse, + ThreadResponse, + ThreadUpdateRequest, + ThreadWithMessagesResponse, +) router = APIRouter(prefix="/v1/threads", tags=["chat"]) @@ -58,21 +59,19 @@ }, ) async def create_thread( - http_request: Request, - request: ThreadCreateRequest, - auth_service: AuthenticationService = Depends(get_auth_service), - authz_service: AuthorizationService = Depends(get_authz_service), - chat_service: ChatService = Depends(get_chat_service), - audit_service: AuditService = Depends(get_audit_service), - logger: logging.Logger = Depends(get_logger), + http_request: Request, + request: ThreadCreateRequest, + auth_service: AuthenticationService = Depends(get_auth_service), + authz_service: AuthorizationService = Depends(get_authz_service), + chat_service: ChatService = Depends(get_chat_service), + audit_service: AuditService = Depends(get_audit_service), + logger: logging.Logger = Depends(get_logger), ) -> ThreadResponse: """Create a new chat thread.""" try: ctx = await auth_service.build_context(http_request, request) workspace_id = request.workspace_id or ctx.workspace_id - await authz_service.require_authorization( - ctx, "threads", "write", workspace_id=workspace_id - ) + await authz_service.require_authorization(ctx, "threads", "write", workspace_id=workspace_id) input_data = CreateThreadInput( thread_id=request.thread_id, @@ -92,15 +91,17 @@ async def create_thread( ) try: - await audit_service.record(AuditEvent( - event_type="chat", - action="create", - tenant_id=ctx.tenant_id, - workspace_id=workspace_id, - user_id=ctx.user_id, - resource_type="thread", - resource_id=thread.thread_id, - )) + await audit_service.record( + AuditEvent( + event_type="chat", + action="create", + tenant_id=ctx.tenant_id, + workspace_id=workspace_id, + user_id=ctx.user_id, + resource_type="thread", + resource_id=thread.thread_id, + ) + ) except Exception: logger.debug("Audit record failed for thread create") return ThreadResponse(thread=thread) @@ -124,24 +125,22 @@ async def create_thread( }, ) async def list_threads( - http_request: Request, - workspace_id: Optional[str] = Query(None, description="Workspace filter"), - user_id: Optional[str] = Query(None, description="User filter"), - limit: int = Query(50, ge=1, le=200, description="Max threads to return"), - offset: int = Query(0, ge=0, description="Pagination offset"), - auth_service: AuthenticationService = Depends(get_auth_service), - authz_service: AuthorizationService = Depends(get_authz_service), - chat_service: ChatService = Depends(get_chat_service), - audit_service: AuditService = Depends(get_audit_service), - logger: logging.Logger = Depends(get_logger), + http_request: Request, + workspace_id: str | None = Query(None, description="Workspace filter"), + user_id: str | None = Query(None, description="User filter"), + limit: int = Query(50, ge=1, le=200, description="Max threads to return"), + offset: int = Query(0, ge=0, description="Pagination offset"), + auth_service: AuthenticationService = Depends(get_auth_service), + authz_service: AuthorizationService = Depends(get_authz_service), + chat_service: ChatService = Depends(get_chat_service), + audit_service: AuditService = Depends(get_audit_service), + logger: logging.Logger = Depends(get_logger), ) -> ThreadListResponse: """List chat threads, optionally filtered by workspace and user.""" try: ctx = await auth_service.build_context(http_request, None) workspace_id = workspace_id or ctx.workspace_id - await authz_service.require_authorization( - ctx, "threads", "read", workspace_id=workspace_id - ) + await authz_service.require_authorization(ctx, "threads", "read", workspace_id=workspace_id) threads = await chat_service.list_threads( workspace_id=workspace_id, @@ -151,14 +150,16 @@ async def list_threads( ) try: - await audit_service.record(AuditEvent( - event_type="chat", - action="read", - tenant_id=ctx.tenant_id, - workspace_id=workspace_id, - user_id=ctx.user_id, - resource_type="thread", - )) + await audit_service.record( + AuditEvent( + event_type="chat", + action="read", + tenant_id=ctx.tenant_id, + workspace_id=workspace_id, + user_id=ctx.user_id, + resource_type="thread", + ) + ) except Exception: logger.debug("Audit record failed for thread list") return ThreadListResponse(threads=threads, total_count=len(threads)) @@ -181,22 +182,20 @@ async def list_threads( }, ) async def get_thread( - http_request: Request, - thread_id: str, - workspace_id: Optional[str] = Query(None, description="Workspace filter"), - auth_service: AuthenticationService = Depends(get_auth_service), - authz_service: AuthorizationService = Depends(get_authz_service), - chat_service: ChatService = Depends(get_chat_service), - audit_service: AuditService = Depends(get_audit_service), - logger: logging.Logger = Depends(get_logger), + http_request: Request, + thread_id: str, + workspace_id: str | None = Query(None, description="Workspace filter"), + auth_service: AuthenticationService = Depends(get_auth_service), + authz_service: AuthorizationService = Depends(get_authz_service), + chat_service: ChatService = Depends(get_chat_service), + audit_service: AuditService = Depends(get_audit_service), + logger: logging.Logger = Depends(get_logger), ) -> ThreadResponse: """Get thread metadata by ID.""" try: ctx = await auth_service.build_context(http_request, None) workspace_id = workspace_id or ctx.workspace_id - await authz_service.require_authorization( - ctx, "threads", "read", workspace_id=workspace_id - ) + await authz_service.require_authorization(ctx, "threads", "read", workspace_id=workspace_id) thread = await chat_service.get_thread(workspace_id, thread_id) if not thread: @@ -206,15 +205,17 @@ async def get_thread( ) try: - await audit_service.record(AuditEvent( - event_type="chat", - action="read", - tenant_id=ctx.tenant_id, - workspace_id=workspace_id, - user_id=ctx.user_id, - resource_type="thread", - resource_id=thread_id, - )) + await audit_service.record( + AuditEvent( + event_type="chat", + action="read", + tenant_id=ctx.tenant_id, + workspace_id=workspace_id, + user_id=ctx.user_id, + resource_type="thread", + resource_id=thread_id, + ) + ) except Exception: logger.debug("Audit record failed for thread read") return ThreadResponse(thread=thread) @@ -239,23 +240,21 @@ async def get_thread( }, ) async def update_thread( - http_request: Request, - thread_id: str, - request: ThreadUpdateRequest, - workspace_id: Optional[str] = Query(None, description="Workspace filter"), - auth_service: AuthenticationService = Depends(get_auth_service), - authz_service: AuthorizationService = Depends(get_authz_service), - chat_service: ChatService = Depends(get_chat_service), - audit_service: AuditService = Depends(get_audit_service), - logger: logging.Logger = Depends(get_logger), + http_request: Request, + thread_id: str, + request: ThreadUpdateRequest, + workspace_id: str | None = Query(None, description="Workspace filter"), + auth_service: AuthenticationService = Depends(get_auth_service), + authz_service: AuthorizationService = Depends(get_authz_service), + chat_service: ChatService = Depends(get_chat_service), + audit_service: AuditService = Depends(get_audit_service), + logger: logging.Logger = Depends(get_logger), ) -> ThreadResponse: """Update a thread (e.g. rename).""" try: ctx = await auth_service.build_context(http_request, request) workspace_id = workspace_id or ctx.workspace_id - await authz_service.require_authorization( - ctx, "threads", "write", workspace_id=workspace_id - ) + await authz_service.require_authorization(ctx, "threads", "write", workspace_id=workspace_id) updates = request.model_dump(exclude_none=True) if not updates: @@ -276,15 +275,17 @@ async def update_thread( ) try: - await audit_service.record(AuditEvent( - event_type="chat", - action="update", - tenant_id=ctx.tenant_id, - workspace_id=workspace_id, - user_id=ctx.user_id, - resource_type="thread", - resource_id=thread_id, - )) + await audit_service.record( + AuditEvent( + event_type="chat", + action="update", + tenant_id=ctx.tenant_id, + workspace_id=workspace_id, + user_id=ctx.user_id, + resource_type="thread", + resource_id=thread_id, + ) + ) except Exception: logger.debug("Audit record failed for thread update") return ThreadResponse(thread=thread) @@ -307,25 +308,23 @@ async def update_thread( }, ) async def get_thread_full( - http_request: Request, - thread_id: str, - workspace_id: Optional[str] = Query(None, description="Workspace filter"), - limit: int = Query(100, ge=1, le=1000, description="Max messages to return"), - offset: int = Query(0, ge=0, description="Message pagination offset"), - order: str = Query("asc", pattern="^(asc|desc)$", description="Message order"), - auth_service: AuthenticationService = Depends(get_auth_service), - authz_service: AuthorizationService = Depends(get_authz_service), - chat_service: ChatService = Depends(get_chat_service), - audit_service: AuditService = Depends(get_audit_service), - logger: logging.Logger = Depends(get_logger), + http_request: Request, + thread_id: str, + workspace_id: str | None = Query(None, description="Workspace filter"), + limit: int = Query(100, ge=1, le=1000, description="Max messages to return"), + offset: int = Query(0, ge=0, description="Message pagination offset"), + order: str = Query("asc", pattern="^(asc|desc)$", description="Message order"), + auth_service: AuthenticationService = Depends(get_auth_service), + authz_service: AuthorizationService = Depends(get_authz_service), + chat_service: ChatService = Depends(get_chat_service), + audit_service: AuditService = Depends(get_audit_service), + logger: logging.Logger = Depends(get_logger), ) -> ThreadWithMessagesResponse: """Get thread with all messages inlined (paginated).""" try: ctx = await auth_service.build_context(http_request, None) workspace_id = workspace_id or ctx.workspace_id - await authz_service.require_authorization( - ctx, "threads", "read", workspace_id=workspace_id - ) + await authz_service.require_authorization(ctx, "threads", "read", workspace_id=workspace_id) result = await chat_service.get_thread_with_messages( workspace_id=workspace_id, @@ -341,15 +340,17 @@ async def get_thread_full( ) try: - await audit_service.record(AuditEvent( - event_type="chat", - action="read", - tenant_id=ctx.tenant_id, - workspace_id=workspace_id, - user_id=ctx.user_id, - resource_type="thread", - resource_id=thread_id, - )) + await audit_service.record( + AuditEvent( + event_type="chat", + action="read", + tenant_id=ctx.tenant_id, + workspace_id=workspace_id, + user_id=ctx.user_id, + resource_type="thread", + resource_id=thread_id, + ) + ) except Exception: logger.debug("Audit record failed for thread full read") return ThreadWithMessagesResponse( @@ -376,22 +377,20 @@ async def get_thread_full( }, ) async def delete_thread( - http_request: Request, - thread_id: str, - workspace_id: Optional[str] = Query(None, description="Workspace filter"), - auth_service: AuthenticationService = Depends(get_auth_service), - authz_service: AuthorizationService = Depends(get_authz_service), - chat_service: ChatService = Depends(get_chat_service), - audit_service: AuditService = Depends(get_audit_service), - logger: logging.Logger = Depends(get_logger), + http_request: Request, + thread_id: str, + workspace_id: str | None = Query(None, description="Workspace filter"), + auth_service: AuthenticationService = Depends(get_auth_service), + authz_service: AuthorizationService = Depends(get_authz_service), + chat_service: ChatService = Depends(get_chat_service), + audit_service: AuditService = Depends(get_audit_service), + logger: logging.Logger = Depends(get_logger), ): """Delete a thread and all its messages.""" try: ctx = await auth_service.build_context(http_request, None) workspace_id = workspace_id or ctx.workspace_id - await authz_service.require_authorization( - ctx, "threads", "write", workspace_id=workspace_id - ) + await authz_service.require_authorization(ctx, "threads", "write", workspace_id=workspace_id) deleted = await chat_service.delete_thread(workspace_id, thread_id) if not deleted: @@ -401,15 +400,17 @@ async def delete_thread( ) try: - await audit_service.record(AuditEvent( - event_type="chat", - action="delete", - tenant_id=ctx.tenant_id, - workspace_id=workspace_id, - user_id=ctx.user_id, - resource_type="thread", - resource_id=thread_id, - )) + await audit_service.record( + AuditEvent( + event_type="chat", + action="delete", + tenant_id=ctx.tenant_id, + workspace_id=workspace_id, + user_id=ctx.user_id, + resource_type="thread", + resource_id=thread_id, + ) + ) except Exception: logger.debug("Audit record failed for thread delete") @@ -432,38 +433,35 @@ async def delete_thread( }, ) async def append_messages( - http_request: Request, - thread_id: str, - request: MessagesAppendRequest, - workspace_id: Optional[str] = Query(None, description="Workspace filter"), - auth_service: AuthenticationService = Depends(get_auth_service), - authz_service: AuthorizationService = Depends(get_authz_service), - chat_service: ChatService = Depends(get_chat_service), - audit_service: AuditService = Depends(get_audit_service), - logger: logging.Logger = Depends(get_logger), + http_request: Request, + thread_id: str, + request: MessagesAppendRequest, + workspace_id: str | None = Query(None, description="Workspace filter"), + auth_service: AuthenticationService = Depends(get_auth_service), + authz_service: AuthorizationService = Depends(get_authz_service), + chat_service: ChatService = Depends(get_chat_service), + audit_service: AuditService = Depends(get_audit_service), + logger: logging.Logger = Depends(get_logger), ) -> MessagesAppendResponse: """Append messages to a chat thread.""" try: ctx = await auth_service.build_context(http_request, request) workspace_id = workspace_id or ctx.workspace_id - await authz_service.require_authorization( - ctx, "threads", "write", workspace_id=workspace_id - ) + await authz_service.require_authorization(ctx, "threads", "write", workspace_id=workspace_id) # Convert API schema to domain input msg_inputs = [] for msg in request.messages: content = msg.content if isinstance(content, list): - content = [ - ChatMessageContent(**block) if isinstance(block, dict) else block - for block in content - ] - msg_inputs.append(MessageInput( - role=msg.role, - content=content, - metadata=msg.metadata, - )) + content = [ChatMessageContent(**block) if isinstance(block, dict) else block for block in content] + msg_inputs.append( + MessageInput( + role=msg.role, + content=content, + metadata=msg.metadata, + ) + ) input_data = AppendMessagesInput(messages=msg_inputs) @@ -478,15 +476,17 @@ async def append_messages( new_count = thread.message_count if thread else len(messages) try: - await audit_service.record(AuditEvent( - event_type="chat", - action="create", - tenant_id=ctx.tenant_id, - workspace_id=workspace_id, - user_id=ctx.user_id, - resource_type="message", - resource_id=thread_id, - )) + await audit_service.record( + AuditEvent( + event_type="chat", + action="create", + tenant_id=ctx.tenant_id, + workspace_id=workspace_id, + user_id=ctx.user_id, + resource_type="message", + resource_id=thread_id, + ) + ) except Exception: logger.debug("Audit record failed for message append") return MessagesAppendResponse( @@ -515,26 +515,24 @@ async def append_messages( }, ) async def get_messages( - http_request: Request, - thread_id: str, - workspace_id: Optional[str] = Query(None, description="Workspace filter"), - limit: int = Query(100, ge=1, le=1000, description="Max messages to return"), - offset: int = Query(0, ge=0, description="Pagination offset"), - after_index: Optional[int] = Query(None, ge=0, description="Get messages after this index"), - order: str = Query("asc", pattern="^(asc|desc)$", description="Message order"), - auth_service: AuthenticationService = Depends(get_auth_service), - authz_service: AuthorizationService = Depends(get_authz_service), - chat_service: ChatService = Depends(get_chat_service), - audit_service: AuditService = Depends(get_audit_service), - logger: logging.Logger = Depends(get_logger), + http_request: Request, + thread_id: str, + workspace_id: str | None = Query(None, description="Workspace filter"), + limit: int = Query(100, ge=1, le=1000, description="Max messages to return"), + offset: int = Query(0, ge=0, description="Pagination offset"), + after_index: int | None = Query(None, ge=0, description="Get messages after this index"), + order: str = Query("asc", pattern="^(asc|desc)$", description="Message order"), + auth_service: AuthenticationService = Depends(get_auth_service), + authz_service: AuthorizationService = Depends(get_authz_service), + chat_service: ChatService = Depends(get_chat_service), + audit_service: AuditService = Depends(get_audit_service), + logger: logging.Logger = Depends(get_logger), ) -> MessageListResponse: """Get messages from a chat thread with pagination.""" try: ctx = await auth_service.build_context(http_request, None) workspace_id = workspace_id or ctx.workspace_id - await authz_service.require_authorization( - ctx, "threads", "read", workspace_id=workspace_id - ) + await authz_service.require_authorization(ctx, "threads", "read", workspace_id=workspace_id) # Verify thread exists thread = await chat_service.get_thread(workspace_id, thread_id) @@ -554,15 +552,17 @@ async def get_messages( ) try: - await audit_service.record(AuditEvent( - event_type="chat", - action="read", - tenant_id=ctx.tenant_id, - workspace_id=workspace_id, - user_id=ctx.user_id, - resource_type="message", - resource_id=thread_id, - )) + await audit_service.record( + AuditEvent( + event_type="chat", + action="read", + tenant_id=ctx.tenant_id, + workspace_id=workspace_id, + user_id=ctx.user_id, + resource_type="message", + resource_id=thread_id, + ) + ) except Exception: logger.debug("Audit record failed for message read") return MessageListResponse( @@ -589,35 +589,35 @@ async def get_messages( }, ) async def decompose_thread( - http_request: Request, - thread_id: str, - workspace_id: Optional[str] = Query(None, description="Workspace filter"), - auth_service: AuthenticationService = Depends(get_auth_service), - authz_service: AuthorizationService = Depends(get_authz_service), - chat_service: ChatService = Depends(get_chat_service), - audit_service: AuditService = Depends(get_audit_service), - logger: logging.Logger = Depends(get_logger), + http_request: Request, + thread_id: str, + workspace_id: str | None = Query(None, description="Workspace filter"), + auth_service: AuthenticationService = Depends(get_auth_service), + authz_service: AuthorizationService = Depends(get_authz_service), + chat_service: ChatService = Depends(get_chat_service), + audit_service: AuditService = Depends(get_audit_service), + logger: logging.Logger = Depends(get_logger), ) -> ThreadDecomposeResponse: """Trigger on-demand memory decomposition for unprocessed messages.""" try: ctx = await auth_service.build_context(http_request, None) workspace_id = workspace_id or ctx.workspace_id - await authz_service.require_authorization( - ctx, "threads", "write", workspace_id=workspace_id - ) + await authz_service.require_authorization(ctx, "threads", "write", workspace_id=workspace_id) result = await chat_service.trigger_decomposition(workspace_id, thread_id) try: - await audit_service.record(AuditEvent( - event_type="chat", - action="create", - tenant_id=ctx.tenant_id, - workspace_id=workspace_id, - user_id=ctx.user_id, - resource_type="thread", - resource_id=thread_id, - )) + await audit_service.record( + AuditEvent( + event_type="chat", + action="create", + tenant_id=ctx.tenant_id, + workspace_id=workspace_id, + user_id=ctx.user_id, + resource_type="thread", + resource_id=thread_id, + ) + ) except Exception: logger.debug("Audit record failed for thread decompose") return ThreadDecomposeResponse( diff --git a/memorylayer-core-python/src/memorylayer_server/api/v1/context_environment.py b/memorylayer-core-python/src/memorylayer_server/api/v1/context_environment.py index 3347535..609d815 100644 --- a/memorylayer-core-python/src/memorylayer_server/api/v1/context_environment.py +++ b/memorylayer-core-python/src/memorylayer_server/api/v1/context_environment.py @@ -11,22 +11,34 @@ - GET /v1/context/status - Get sandbox status - DELETE /v1/context/cleanup - Clean up sandbox """ + import logging -from typing import Optional -from fastapi import APIRouter, HTTPException, Depends, Header, Request, status +from fastapi import APIRouter, Depends, Header, HTTPException, Request, status from scitrera_app_framework import Plugin, Variables -from .. import EXT_MULTI_API_ROUTERS from memorylayer_server.lifecycle.fastapi import get_logger, get_variables_dep + +from ...services.audit import AuditEvent, AuditService +from ...services.authentication import AuthenticationError, AuthenticationService +from ...services.authorization import AuthorizationService +from ...services.context_environment import ( + ContextEnvironmentService, +) +from ...services.context_environment import ( + get_context_environment_service as _get_ctx_env_service, +) +from ...services.session import SessionService +from .. import EXT_MULTI_API_ROUTERS +from .deps import get_audit_service, get_auth_service, get_authz_service, get_session_service from .schemas import ( ContextExecuteRequest, ContextExecuteResponse, + ContextInjectRequest, + ContextInjectResponse, ContextInspectResponse, ContextLoadRequest, ContextLoadResponse, - ContextInjectRequest, - ContextInjectResponse, ContextQueryRequest, ContextQueryResponse, ContextRLMRequest, @@ -34,15 +46,6 @@ ContextStatusResponse, ErrorResponse, ) -from ...services.context_environment import ( - get_context_environment_service as _get_ctx_env_service, - ContextEnvironmentService, -) -from ...services.session import SessionService -from ...services.authentication import AuthenticationService, AuthenticationError -from ...services.authorization import AuthorizationService -from .deps import get_auth_service, get_authz_service, get_session_service, get_audit_service -from ...services.audit import AuditService, AuditEvent router = APIRouter(prefix="/v1/context", tags=["context-environment"]) @@ -53,7 +56,7 @@ def get_context_env_service(v: Variables = Depends(get_variables_dep)) -> Contex async def _resolve_session_id( - x_session_id: Optional[str], + x_session_id: str | None, session_service: SessionService, logger: logging.Logger, ) -> str: @@ -85,7 +88,7 @@ async def _resolve_session_id( async def execute_code( http_request: Request, request: ContextExecuteRequest, - x_session_id: Optional[str] = Header(None, alias="X-Session-ID"), + x_session_id: str | None = Header(None, alias="X-Session-ID"), auth_service: AuthenticationService = Depends(get_auth_service), authz_service: AuthorizationService = Depends(get_authz_service), ctx_env_service: ContextEnvironmentService = Depends(get_context_env_service), @@ -111,15 +114,17 @@ async def execute_code( ) try: - await audit_service.record(AuditEvent( - event_type="context", - action="execute", - tenant_id=ctx.tenant_id, - workspace_id=ctx.workspace_id, - user_id=ctx.user_id, - resource_type="context", - resource_id=session_id, - )) + await audit_service.record( + AuditEvent( + event_type="context", + action="execute", + tenant_id=ctx.tenant_id, + workspace_id=ctx.workspace_id, + user_id=ctx.user_id, + resource_type="context", + resource_id=session_id, + ) + ) except Exception: logger.debug("Audit record failed for context execute") return ContextExecuteResponse(**result) @@ -148,9 +153,9 @@ async def execute_code( ) async def inspect_state( http_request: Request, - variable: Optional[str] = None, + variable: str | None = None, preview_chars: int = 200, - x_session_id: Optional[str] = Header(None, alias="X-Session-ID"), + x_session_id: str | None = Header(None, alias="X-Session-ID"), auth_service: AuthenticationService = Depends(get_auth_service), authz_service: AuthorizationService = Depends(get_authz_service), ctx_env_service: ContextEnvironmentService = Depends(get_context_env_service), @@ -174,15 +179,17 @@ async def inspect_state( ) try: - await audit_service.record(AuditEvent( - event_type="context", - action="read", - tenant_id=ctx.tenant_id, - workspace_id=ctx.workspace_id, - user_id=ctx.user_id, - resource_type="context", - resource_id=session_id, - )) + await audit_service.record( + AuditEvent( + event_type="context", + action="read", + tenant_id=ctx.tenant_id, + workspace_id=ctx.workspace_id, + user_id=ctx.user_id, + resource_type="context", + resource_id=session_id, + ) + ) except Exception: logger.debug("Audit record failed for context inspect") return ContextInspectResponse(**result) @@ -212,7 +219,7 @@ async def inspect_state( async def load_memories( http_request: Request, request: ContextLoadRequest, - x_session_id: Optional[str] = Header(None, alias="X-Session-ID"), + x_session_id: str | None = Header(None, alias="X-Session-ID"), auth_service: AuthenticationService = Depends(get_auth_service), authz_service: AuthorizationService = Depends(get_authz_service), ctx_env_service: ContextEnvironmentService = Depends(get_context_env_service), @@ -241,15 +248,17 @@ async def load_memories( ) try: - await audit_service.record(AuditEvent( - event_type="context", - action="read", - tenant_id=ctx.tenant_id, - workspace_id=ctx.workspace_id, - user_id=ctx.user_id, - resource_type="context", - resource_id=session_id, - )) + await audit_service.record( + AuditEvent( + event_type="context", + action="read", + tenant_id=ctx.tenant_id, + workspace_id=ctx.workspace_id, + user_id=ctx.user_id, + resource_type="context", + resource_id=session_id, + ) + ) except Exception: logger.debug("Audit record failed for context load") return ContextLoadResponse(**result) @@ -279,7 +288,7 @@ async def load_memories( async def inject_value( http_request: Request, request: ContextInjectRequest, - x_session_id: Optional[str] = Header(None, alias="X-Session-ID"), + x_session_id: str | None = Header(None, alias="X-Session-ID"), auth_service: AuthenticationService = Depends(get_auth_service), authz_service: AuthorizationService = Depends(get_authz_service), ctx_env_service: ContextEnvironmentService = Depends(get_context_env_service), @@ -304,15 +313,17 @@ async def inject_value( ) try: - await audit_service.record(AuditEvent( - event_type="context", - action="write", - tenant_id=ctx.tenant_id, - workspace_id=ctx.workspace_id, - user_id=ctx.user_id, - resource_type="context", - resource_id=session_id, - )) + await audit_service.record( + AuditEvent( + event_type="context", + action="write", + tenant_id=ctx.tenant_id, + workspace_id=ctx.workspace_id, + user_id=ctx.user_id, + resource_type="context", + resource_id=session_id, + ) + ) except Exception: logger.debug("Audit record failed for context inject") return ContextInjectResponse(**result) @@ -342,7 +353,7 @@ async def inject_value( async def query_llm( http_request: Request, request: ContextQueryRequest, - x_session_id: Optional[str] = Header(None, alias="X-Session-ID"), + x_session_id: str | None = Header(None, alias="X-Session-ID"), auth_service: AuthenticationService = Depends(get_auth_service), authz_service: AuthorizationService = Depends(get_authz_service), ctx_env_service: ContextEnvironmentService = Depends(get_context_env_service), @@ -368,15 +379,17 @@ async def query_llm( ) try: - await audit_service.record(AuditEvent( - event_type="context", - action="execute", - tenant_id=ctx.tenant_id, - workspace_id=ctx.workspace_id, - user_id=ctx.user_id, - resource_type="context", - resource_id=session_id, - )) + await audit_service.record( + AuditEvent( + event_type="context", + action="execute", + tenant_id=ctx.tenant_id, + workspace_id=ctx.workspace_id, + user_id=ctx.user_id, + resource_type="context", + resource_id=session_id, + ) + ) except Exception: logger.debug("Audit record failed for context query") return ContextQueryResponse(**result) @@ -406,7 +419,7 @@ async def query_llm( async def run_rlm( http_request: Request, request: ContextRLMRequest, - x_session_id: Optional[str] = Header(None, alias="X-Session-ID"), + x_session_id: str | None = Header(None, alias="X-Session-ID"), auth_service: AuthenticationService = Depends(get_auth_service), authz_service: AuthorizationService = Depends(get_authz_service), ctx_env_service: ContextEnvironmentService = Depends(get_context_env_service), @@ -435,15 +448,17 @@ async def run_rlm( ) try: - await audit_service.record(AuditEvent( - event_type="context", - action="execute", - tenant_id=ctx.tenant_id, - workspace_id=ctx.workspace_id, - user_id=ctx.user_id, - resource_type="context", - resource_id=session_id, - )) + await audit_service.record( + AuditEvent( + event_type="context", + action="execute", + tenant_id=ctx.tenant_id, + workspace_id=ctx.workspace_id, + user_id=ctx.user_id, + resource_type="context", + resource_id=session_id, + ) + ) except Exception: logger.debug("Audit record failed for context rlm") return ContextRLMResponse(**result) @@ -472,7 +487,7 @@ async def run_rlm( ) async def get_status( http_request: Request, - x_session_id: Optional[str] = Header(None, alias="X-Session-ID"), + x_session_id: str | None = Header(None, alias="X-Session-ID"), auth_service: AuthenticationService = Depends(get_auth_service), authz_service: AuthorizationService = Depends(get_authz_service), ctx_env_service: ContextEnvironmentService = Depends(get_context_env_service), @@ -490,15 +505,17 @@ async def get_status( result = await ctx_env_service.status(session_id) try: - await audit_service.record(AuditEvent( - event_type="context", - action="read", - tenant_id=ctx.tenant_id, - workspace_id=ctx.workspace_id, - user_id=ctx.user_id, - resource_type="context", - resource_id=session_id, - )) + await audit_service.record( + AuditEvent( + event_type="context", + action="read", + tenant_id=ctx.tenant_id, + workspace_id=ctx.workspace_id, + user_id=ctx.user_id, + resource_type="context", + resource_id=session_id, + ) + ) except Exception: logger.debug("Audit record failed for context status") return ContextStatusResponse(**result) @@ -527,7 +544,7 @@ async def get_status( ) async def checkpoint_environment( http_request: Request, - x_session_id: Optional[str] = Header(None, alias="X-Session-ID"), + x_session_id: str | None = Header(None, alias="X-Session-ID"), auth_service: AuthenticationService = Depends(get_auth_service), authz_service: AuthorizationService = Depends(get_authz_service), ctx_env_service: ContextEnvironmentService = Depends(get_context_env_service), @@ -544,15 +561,17 @@ async def checkpoint_environment( logger.info("Context checkpoint for session: %s", session_id) await ctx_env_service.checkpoint(session_id) try: - await audit_service.record(AuditEvent( - event_type="context", - action="write", - tenant_id=ctx.tenant_id, - workspace_id=ctx.workspace_id, - user_id=ctx.user_id, - resource_type="context", - resource_id=session_id, - )) + await audit_service.record( + AuditEvent( + event_type="context", + action="write", + tenant_id=ctx.tenant_id, + workspace_id=ctx.workspace_id, + user_id=ctx.user_id, + resource_type="context", + resource_id=session_id, + ) + ) except Exception: logger.debug("Audit record failed for context checkpoint") except AuthenticationError as e: @@ -579,7 +598,7 @@ async def checkpoint_environment( ) async def cleanup_environment( http_request: Request, - x_session_id: Optional[str] = Header(None, alias="X-Session-ID"), + x_session_id: str | None = Header(None, alias="X-Session-ID"), auth_service: AuthenticationService = Depends(get_auth_service), authz_service: AuthorizationService = Depends(get_authz_service), ctx_env_service: ContextEnvironmentService = Depends(get_context_env_service), @@ -599,15 +618,17 @@ async def cleanup_environment( await ctx_env_service.cleanup_environment(session_id) try: - await audit_service.record(AuditEvent( - event_type="context", - action="write", - tenant_id=ctx.tenant_id, - workspace_id=ctx.workspace_id, - user_id=ctx.user_id, - resource_type="context", - resource_id=session_id, - )) + await audit_service.record( + AuditEvent( + event_type="context", + action="write", + tenant_id=ctx.tenant_id, + workspace_id=ctx.workspace_id, + user_id=ctx.user_id, + resource_type="context", + resource_id=session_id, + ) + ) except Exception: logger.debug("Audit record failed for context cleanup") diff --git a/memorylayer-core-python/src/memorylayer_server/api/v1/contradictions.py b/memorylayer-core-python/src/memorylayer_server/api/v1/contradictions.py index 96920d6..35fa98d 100644 --- a/memorylayer-core-python/src/memorylayer_server/api/v1/contradictions.py +++ b/memorylayer-core-python/src/memorylayer_server/api/v1/contradictions.py @@ -5,16 +5,20 @@ - GET /v1/workspaces/{workspace_id}/contradictions - List unresolved contradictions - POST /v1/contradictions/{contradiction_id}/resolve - Resolve a contradiction """ + import logging from fastapi import APIRouter, Depends, HTTPException, Request, status from scitrera_app_framework import Plugin, Variables -from .. import EXT_MULTI_API_ROUTERS +from memorylayer_server.lifecycle.fastapi import get_logger, get_variables_dep + +from ...services.audit import AuditEvent, AuditService from ...services.authentication import AuthenticationError, AuthenticationService from ...services.authorization import AuthorizationService from ...services.contradiction import ContradictionService, get_contradiction_service -from memorylayer_server.lifecycle.fastapi import get_logger, get_variables_dep +from .. import EXT_MULTI_API_ROUTERS +from .deps import get_audit_service, get_auth_service, get_authz_service from .schemas import ( ContradictionListResponse, ContradictionResolveRequest, @@ -23,10 +27,8 @@ ContradictionScanResponse, ErrorResponse, ) -from .deps import get_auth_service, get_authz_service, get_audit_service -from ...services.audit import AuditService, AuditEvent -router = APIRouter(prefix='/v1', tags=["contradictions"]) +router = APIRouter(prefix="/v1", tags=["contradictions"]) # Dependencies for services @@ -45,22 +47,20 @@ async def get_contradiction_svc(v: Variables = Depends(get_variables_dep)) -> Co }, ) async def list_contradictions( - http_request: Request, - workspace_id: str, - limit: int = 10, - auth_service: AuthenticationService = Depends(get_auth_service), - authz_service: AuthorizationService = Depends(get_authz_service), - contradiction_service: ContradictionService = Depends(get_contradiction_svc), - audit_service: AuditService = Depends(get_audit_service), - logger: logging.Logger = Depends(get_logger), + http_request: Request, + workspace_id: str, + limit: int = 10, + auth_service: AuthenticationService = Depends(get_auth_service), + authz_service: AuthorizationService = Depends(get_authz_service), + contradiction_service: ContradictionService = Depends(get_contradiction_svc), + audit_service: AuditService = Depends(get_audit_service), + logger: logging.Logger = Depends(get_logger), ) -> ContradictionListResponse: """List unresolved contradictions for a workspace.""" try: # Build request context and check authorization ctx = await auth_service.build_context(http_request, None) - await authz_service.require_authorization( - ctx, "contradictions", "read", workspace_id=workspace_id - ) + await authz_service.require_authorization(ctx, "contradictions", "read", workspace_id=workspace_id) except AuthenticationError as e: raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail=str(e)) except HTTPException: @@ -87,23 +87,22 @@ async def list_contradictions( for r in records ] try: - await audit_service.record(AuditEvent( - event_type="contradiction", - action="read", - tenant_id=ctx.tenant_id, - workspace_id=workspace_id, - user_id=ctx.user_id, - resource_type="contradiction", - )) + await audit_service.record( + AuditEvent( + event_type="contradiction", + action="read", + tenant_id=ctx.tenant_id, + workspace_id=workspace_id, + user_id=ctx.user_id, + resource_type="contradiction", + ) + ) except Exception: logger.debug("Audit record failed for contradiction list") return ContradictionListResponse(contradictions=contradictions, count=len(contradictions)) except Exception as e: logger.error("Failed to list contradictions: %s", e, exc_info=True) - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail="Failed to list contradictions" - ) + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to list contradictions") @router.post( @@ -118,23 +117,21 @@ async def list_contradictions( }, ) async def resolve_contradiction( - http_request: Request, - contradiction_id: str, - request: ContradictionResolveRequest, - workspace_id: str | None = None, - auth_service: AuthenticationService = Depends(get_auth_service), - authz_service: AuthorizationService = Depends(get_authz_service), - contradiction_service: ContradictionService = Depends(get_contradiction_svc), - audit_service: AuditService = Depends(get_audit_service), - logger: logging.Logger = Depends(get_logger), + http_request: Request, + contradiction_id: str, + request: ContradictionResolveRequest, + workspace_id: str | None = None, + auth_service: AuthenticationService = Depends(get_auth_service), + authz_service: AuthorizationService = Depends(get_authz_service), + contradiction_service: ContradictionService = Depends(get_contradiction_svc), + audit_service: AuditService = Depends(get_audit_service), + logger: logging.Logger = Depends(get_logger), ) -> ContradictionResponse: """Resolve a contradiction with a chosen strategy.""" try: # Build request context and check authorization ctx = await auth_service.build_context(http_request, None) - await authz_service.require_authorization( - ctx, "contradictions", "write", workspace_id=workspace_id or ctx.workspace_id - ) + await authz_service.require_authorization(ctx, "contradictions", "write", workspace_id=workspace_id or ctx.workspace_id) except AuthenticationError as e: raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail=str(e)) except HTTPException: @@ -147,15 +144,11 @@ async def resolve_contradiction( valid_resolutions = {"keep_a", "keep_b", "keep_both", "merge"} if request.resolution not in valid_resolutions: raise HTTPException( - status_code=status.HTTP_400_BAD_REQUEST, - detail=f"Invalid resolution. Must be one of: {', '.join(sorted(valid_resolutions))}" + status_code=status.HTTP_400_BAD_REQUEST, detail=f"Invalid resolution. Must be one of: {', '.join(sorted(valid_resolutions))}" ) if request.resolution == "merge" and not request.merged_content: - raise HTTPException( - status_code=status.HTTP_400_BAD_REQUEST, - detail="merged_content is required when resolution is 'merge'" - ) + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="merged_content is required when resolution is 'merge'") try: effective_workspace_id = workspace_id or ctx.workspace_id @@ -167,21 +160,20 @@ async def resolve_contradiction( ) if not record: - raise HTTPException( - status_code=status.HTTP_404_NOT_FOUND, - detail=f"Contradiction {contradiction_id} not found" - ) + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"Contradiction {contradiction_id} not found") try: - await audit_service.record(AuditEvent( - event_type="contradiction", - action="resolve", - tenant_id=ctx.tenant_id, - workspace_id=effective_workspace_id, - user_id=ctx.user_id, - resource_type="contradiction", - resource_id=contradiction_id, - )) + await audit_service.record( + AuditEvent( + event_type="contradiction", + action="resolve", + tenant_id=ctx.tenant_id, + workspace_id=effective_workspace_id, + user_id=ctx.user_id, + resource_type="contradiction", + resource_id=contradiction_id, + ) + ) except Exception: logger.debug("Audit record failed for contradiction resolve") return ContradictionResponse( @@ -200,10 +192,7 @@ async def resolve_contradiction( raise except Exception as e: logger.error("Failed to resolve contradiction %s: %s", contradiction_id, e, exc_info=True) - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail="Failed to resolve contradiction" - ) + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to resolve contradiction") @router.post( @@ -216,21 +205,19 @@ async def resolve_contradiction( }, ) async def scan_workspace_contradictions( - http_request: Request, - workspace_id: str, - request: ContradictionScanRequest = None, - auth_service: AuthenticationService = Depends(get_auth_service), - authz_service: AuthorizationService = Depends(get_authz_service), - contradiction_service: ContradictionService = Depends(get_contradiction_svc), - audit_service: AuditService = Depends(get_audit_service), - logger: logging.Logger = Depends(get_logger), + http_request: Request, + workspace_id: str, + request: ContradictionScanRequest = None, + auth_service: AuthenticationService = Depends(get_auth_service), + authz_service: AuthorizationService = Depends(get_authz_service), + contradiction_service: ContradictionService = Depends(get_contradiction_svc), + audit_service: AuditService = Depends(get_audit_service), + logger: logging.Logger = Depends(get_logger), ) -> ContradictionScanResponse: """Scan all memories in a workspace for contradictions.""" try: ctx = await auth_service.build_context(http_request, None) - await authz_service.require_authorization( - ctx, "contradictions", "write", workspace_id=workspace_id - ) + await authz_service.require_authorization(ctx, "contradictions", "write", workspace_id=workspace_id) except AuthenticationError as e: raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail=str(e)) except HTTPException: @@ -242,7 +229,7 @@ async def scan_workspace_contradictions( try: kwargs = {} if request and request.batch_size is not None: - kwargs['batch_size'] = request.batch_size + kwargs["batch_size"] = request.batch_size records = await contradiction_service.scan_workspace(workspace_id, **kwargs) contradictions = [ @@ -261,14 +248,16 @@ async def scan_workspace_contradictions( for r in records ] try: - await audit_service.record(AuditEvent( - event_type="contradiction", - action="scan", - tenant_id=ctx.tenant_id, - workspace_id=workspace_id, - user_id=ctx.user_id, - resource_type="contradiction", - )) + await audit_service.record( + AuditEvent( + event_type="contradiction", + action="scan", + tenant_id=ctx.tenant_id, + workspace_id=workspace_id, + user_id=ctx.user_id, + resource_type="contradiction", + ) + ) except Exception: logger.debug("Audit record failed for contradiction scan") return ContradictionScanResponse( @@ -278,10 +267,7 @@ async def scan_workspace_contradictions( ) except Exception as e: logger.error("Failed to scan contradictions for workspace %s: %s", workspace_id, e, exc_info=True) - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail="Failed to scan workspace contradictions" - ) + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to scan workspace contradictions") class ContradictionsAPIPlugin(Plugin): diff --git a/memorylayer-core-python/src/memorylayer_server/api/v1/deps.py b/memorylayer-core-python/src/memorylayer_server/api/v1/deps.py index 1aea71b..473bd25 100644 --- a/memorylayer-core-python/src/memorylayer_server/api/v1/deps.py +++ b/memorylayer-core-python/src/memorylayer_server/api/v1/deps.py @@ -6,18 +6,18 @@ from scitrera_app_framework import Variables, get_extension from ...lifecycle.fastapi import get_logger, get_variables_dep -from ...services.tasks import TaskService, EXT_TASK_SERVICE -from ...services.authentication import AuthenticationService, EXT_AUTHENTICATION_SERVICE -from ...services.authorization import AuthorizationService, EXT_AUTHORIZATION_SERVICE -from ...services.session import SessionService, EXT_SESSION_SERVICE -from ...services.workspace import WorkspaceService, EXT_WORKSPACE_SERVICE -from ...services.memory import MemoryService, EXT_MEMORY_SERVICE -from ...services.inference import DefaultInferenceService, EXT_INFERENCE_SERVICE +from ...services.audit import EXT_AUDIT_SERVICE, AuditService +from ...services.authentication import EXT_AUTHENTICATION_SERVICE, AuthenticationService +from ...services.authorization import EXT_AUTHORIZATION_SERVICE, AuthorizationService +from ...services.cache import EXT_CACHE_SERVICE, CacheService +from ...services.chat import EXT_CHAT_SERVICE, ChatService +from ...services.inference import EXT_INFERENCE_SERVICE, DefaultInferenceService +from ...services.memory import EXT_MEMORY_SERVICE, MemoryService +from ...services.metrics import EXT_METRICS_SERVICE, MetricsService from ...services.reflect import EXT_REFLECT_SERVICE -from ...services.cache import CacheService, EXT_CACHE_SERVICE -from ...services.chat import ChatService, EXT_CHAT_SERVICE -from ...services.audit import AuditService, EXT_AUDIT_SERVICE -from ...services.metrics import MetricsService, EXT_METRICS_SERVICE +from ...services.session import EXT_SESSION_SERVICE, SessionService +from ...services.tasks import EXT_TASK_SERVICE, TaskService +from ...services.workspace import EXT_WORKSPACE_SERVICE, WorkspaceService from ...tasks.session_touch_handler import SESSION_TOUCH_HANDLER_TASK @@ -26,9 +26,9 @@ async def get_task_service(v: Variables = Depends(get_variables_dep)) -> TaskSer async def get_active_session( - http_request: Request, - task_service: TaskService = Depends(get_task_service), - logger: logging.Logger = Depends(get_logger), + http_request: Request, + task_service: TaskService = Depends(get_task_service), + logger: logging.Logger = Depends(get_logger), ) -> str | None: session_id = http_request.headers.get("X-Session-ID") if not session_id: diff --git a/memorylayer-core-python/src/memorylayer_server/api/v1/entities.py b/memorylayer-core-python/src/memorylayer_server/api/v1/entities.py index 04289e5..2597ab9 100644 --- a/memorylayer-core-python/src/memorylayer_server/api/v1/entities.py +++ b/memorylayer-core-python/src/memorylayer_server/api/v1/entities.py @@ -6,36 +6,38 @@ - GET /v1/entities/{entity_id}/card - Get cached entity profile card - GET /v1/entities/{entity_id}/insights - Get derived insights for an entity """ + import logging -from datetime import datetime, timezone -from typing import Optional +from datetime import UTC, datetime -from fastapi import APIRouter, HTTPException, Depends, Request, status +from fastapi import APIRouter, Depends, HTTPException, Request, status from scitrera_app_framework import Plugin, Variables -from .. import EXT_MULTI_API_ROUTERS from memorylayer_server.lifecycle.fastapi import get_logger +from ...models import DetailLevel, ReflectInput +from ...services.audit import AuditEvent, AuditService +from ...services.authentication import AuthenticationService +from ...services.authorization import AuthorizationService +from ...services.cache import CacheService +from ...services.inference import DefaultInferenceService +from ...services.reflect import ReflectService +from .. import EXT_MULTI_API_ROUTERS +from .deps import ( + get_audit_service, + get_auth_service, + get_authz_service, + get_cache_service, + get_inference_service, + get_reflect_service, +) from .schemas import ( + EntityCardResponse, EntityDeriveRequest, EntityDeriveResponse, - EntityCardResponse, EntityInsightsResponse, ErrorResponse, ) -from ...models import ReflectInput, DetailLevel -from ...services.inference import DefaultInferenceService -from ...services.reflect import ReflectService -from ...services.cache import CacheService -from ...services.authentication import AuthenticationService -from ...services.authorization import AuthorizationService -from ...config import DEFAULT_TENANT_ID -from .deps import ( - get_auth_service, get_authz_service, - get_inference_service, get_reflect_service, get_cache_service, - get_audit_service, -) -from ...services.audit import AuditService, AuditEvent router = APIRouter(prefix="/v1/entities", tags=["entities"]) @@ -58,15 +60,15 @@ def _card_cache_key(workspace_id: str, entity_id: str) -> str: }, ) async def derive_entity_insights( - http_request: Request, - entity_id: str, - request: EntityDeriveRequest, - auth_service: AuthenticationService = Depends(get_auth_service), - authz_service: AuthorizationService = Depends(get_authz_service), - inference_service: DefaultInferenceService = Depends(get_inference_service), - cache_service: CacheService = Depends(get_cache_service), - audit_service: AuditService = Depends(get_audit_service), - logger: logging.Logger = Depends(get_logger), + http_request: Request, + entity_id: str, + request: EntityDeriveRequest, + auth_service: AuthenticationService = Depends(get_auth_service), + authz_service: AuthorizationService = Depends(get_authz_service), + inference_service: DefaultInferenceService = Depends(get_inference_service), + cache_service: CacheService = Depends(get_cache_service), + audit_service: AuditService = Depends(get_audit_service), + logger: logging.Logger = Depends(get_logger), ) -> EntityDeriveResponse: """ Trigger inference derivation for an entity. @@ -77,14 +79,9 @@ async def derive_entity_insights( try: ctx = await auth_service.build_context(http_request, request) workspace_id = request.workspace_id or ctx.workspace_id - await authz_service.require_authorization( - ctx, "entities", "write", workspace_id=workspace_id - ) + await authz_service.require_authorization(ctx, "entities", "write", workspace_id=workspace_id) - logger.info( - "Deriving insights for entity: %s in workspace: %s", - entity_id, workspace_id - ) + logger.info("Deriving insights for entity: %s in workspace: %s", entity_id, workspace_id) result = await inference_service.derive_insights( workspace_id=workspace_id, @@ -98,15 +95,17 @@ async def derive_entity_insights( await cache_service.delete(card_key) try: - await audit_service.record(AuditEvent( - event_type="entity", - action="update", - tenant_id=ctx.tenant_id, - workspace_id=workspace_id, - user_id=ctx.user_id, - resource_type="entity", - resource_id=entity_id, - )) + await audit_service.record( + AuditEvent( + event_type="entity", + action="update", + tenant_id=ctx.tenant_id, + workspace_id=workspace_id, + user_id=ctx.user_id, + resource_type="entity", + resource_id=entity_id, + ) + ) except Exception: logger.debug("Audit record failed for entity derive") return EntityDeriveResponse( @@ -122,10 +121,7 @@ async def derive_entity_insights( raise except Exception as e: logger.error("Failed to derive insights for entity %s: %s", entity_id, e, exc_info=True) - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail="Failed to derive entity insights" - ) + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to derive entity insights") @router.get( @@ -138,17 +134,17 @@ async def derive_entity_insights( }, ) async def get_entity_card( - http_request: Request, - entity_id: str, - workspace_id: Optional[str] = None, - force_refresh: bool = False, - auth_service: AuthenticationService = Depends(get_auth_service), - authz_service: AuthorizationService = Depends(get_authz_service), - reflect_service: ReflectService = Depends(get_reflect_service), - inference_service: DefaultInferenceService = Depends(get_inference_service), - cache_service: CacheService = Depends(get_cache_service), - audit_service: AuditService = Depends(get_audit_service), - logger: logging.Logger = Depends(get_logger), + http_request: Request, + entity_id: str, + workspace_id: str | None = None, + force_refresh: bool = False, + auth_service: AuthenticationService = Depends(get_auth_service), + authz_service: AuthorizationService = Depends(get_authz_service), + reflect_service: ReflectService = Depends(get_reflect_service), + inference_service: DefaultInferenceService = Depends(get_inference_service), + cache_service: CacheService = Depends(get_cache_service), + audit_service: AuditService = Depends(get_audit_service), + logger: logging.Logger = Depends(get_logger), ) -> EntityCardResponse: """ Get a cached entity profile card. @@ -160,9 +156,7 @@ async def get_entity_card( try: ctx = await auth_service.build_context(http_request, None) workspace_id = workspace_id or ctx.workspace_id - await authz_service.require_authorization( - ctx, "entities", "read", workspace_id=workspace_id - ) + await authz_service.require_authorization(ctx, "entities", "read", workspace_id=workspace_id) card_key = _card_cache_key(workspace_id, entity_id) @@ -197,7 +191,7 @@ async def get_entity_card( limit=20, ) - now = datetime.now(timezone.utc).isoformat() + now = datetime.now(UTC).isoformat() card_data = { "entity_id": entity_id, "workspace_id": workspace_id, @@ -213,15 +207,17 @@ async def get_entity_card( await cache_service.set(card_key, card_data, ttl_seconds=ENTITY_CARD_CACHE_TTL) try: - await audit_service.record(AuditEvent( - event_type="entity", - action="read", - tenant_id=ctx.tenant_id, - workspace_id=workspace_id, - user_id=ctx.user_id, - resource_type="entity", - resource_id=entity_id, - )) + await audit_service.record( + AuditEvent( + event_type="entity", + action="read", + tenant_id=ctx.tenant_id, + workspace_id=workspace_id, + user_id=ctx.user_id, + resource_type="entity", + resource_id=entity_id, + ) + ) except Exception: logger.debug("Audit record failed for entity card read") return EntityCardResponse(**card_data) @@ -230,10 +226,7 @@ async def get_entity_card( raise except Exception as e: logger.error("Failed to get entity card for %s: %s", entity_id, e, exc_info=True) - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail="Failed to generate entity card" - ) + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to generate entity card") @router.get( @@ -246,16 +239,16 @@ async def get_entity_card( }, ) async def get_entity_insights( - http_request: Request, - entity_id: str, - workspace_id: Optional[str] = None, - observer_id: Optional[str] = None, - limit: int = 20, - auth_service: AuthenticationService = Depends(get_auth_service), - authz_service: AuthorizationService = Depends(get_authz_service), - inference_service: DefaultInferenceService = Depends(get_inference_service), - audit_service: AuditService = Depends(get_audit_service), - logger: logging.Logger = Depends(get_logger), + http_request: Request, + entity_id: str, + workspace_id: str | None = None, + observer_id: str | None = None, + limit: int = 20, + auth_service: AuthenticationService = Depends(get_auth_service), + authz_service: AuthorizationService = Depends(get_authz_service), + inference_service: DefaultInferenceService = Depends(get_inference_service), + audit_service: AuditService = Depends(get_audit_service), + logger: logging.Logger = Depends(get_logger), ) -> EntityInsightsResponse: """ Get derived insights for an entity. @@ -266,9 +259,7 @@ async def get_entity_insights( try: ctx = await auth_service.build_context(http_request, None) workspace_id = workspace_id or ctx.workspace_id - await authz_service.require_authorization( - ctx, "entities", "read", workspace_id=workspace_id - ) + await authz_service.require_authorization(ctx, "entities", "read", workspace_id=workspace_id) logger.debug("Getting insights for entity: %s", entity_id) @@ -280,15 +271,17 @@ async def get_entity_insights( ) try: - await audit_service.record(AuditEvent( - event_type="entity", - action="read", - tenant_id=ctx.tenant_id, - workspace_id=workspace_id, - user_id=ctx.user_id, - resource_type="entity", - resource_id=entity_id, - )) + await audit_service.record( + AuditEvent( + event_type="entity", + action="read", + tenant_id=ctx.tenant_id, + workspace_id=workspace_id, + user_id=ctx.user_id, + resource_type="entity", + resource_id=entity_id, + ) + ) except Exception: logger.debug("Audit record failed for entity insights read") return EntityInsightsResponse( @@ -302,10 +295,7 @@ async def get_entity_insights( raise except Exception as e: logger.error("Failed to get insights for entity %s: %s", entity_id, e, exc_info=True) - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail="Failed to retrieve entity insights" - ) + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to retrieve entity insights") class EntitiesAPIPlugin(Plugin): diff --git a/memorylayer-core-python/src/memorylayer_server/api/v1/memories.py b/memorylayer-core-python/src/memorylayer_server/api/v1/memories.py index 6c8f3ff..e3b4c54 100644 --- a/memorylayer-core-python/src/memorylayer_server/api/v1/memories.py +++ b/memorylayer-core-python/src/memorylayer_server/api/v1/memories.py @@ -15,38 +15,36 @@ import logging import time as _time -from fastapi import APIRouter, HTTPException, Depends, Request, status +from fastapi import APIRouter, Depends, HTTPException, Request, status from scitrera_app_framework import Plugin, Variables, get_extension -from .. import EXT_MULTI_API_ROUTERS from ...lifecycle.fastapi import get_logger, get_variables_dep -from ...models.memory import RememberInput, RecallInput, ReflectInput -from ...models.auth import RequestContext -from ...services.memory import MemoryService -from ...services.reflect import ReflectService, EXT_REFLECT_SERVICE -from ...services.authentication import AuthenticationService, AuthenticationError +from ...models.memory import RecallInput, ReflectInput, RememberInput +from ...services.audit import AuditEvent, AuditService +from ...services.authentication import AuthenticationError, AuthenticationService from ...services.authorization import AuthorizationService - +from ...services.memory import MemoryService +from ...services.metrics import MetricsService +from ...services.reflect import EXT_REFLECT_SERVICE, ReflectService +from .. import EXT_MULTI_API_ROUTERS +from .deps import get_active_session, get_audit_service, get_auth_service, get_authz_service, get_memory_service, get_metrics_service from .schemas import ( + BatchCreateOp, + BatchDeleteOp, + BatchOperationResponse, + BatchOperationResult, + BatchUpdateOp, + ErrorResponse, + MemoryBatchRequest, MemoryCreateRequest, - MemoryUpdateRequest, + MemoryDecayRequest, MemoryRecallRequest, MemoryReflectRequest, - MemoryDecayRequest, - MemoryBatchRequest, - BatchCreateOp, - BatchUpdateOp, - BatchDeleteOp, MemoryResponse, + MemoryUpdateRequest, RecallResult, ReflectResult, - ErrorResponse, - BatchOperationResponse, - BatchOperationResult, ) -from .deps import get_active_session, get_auth_service, get_authz_service, get_memory_service, get_audit_service, get_metrics_service -from ...services.audit import AuditService, AuditEvent -from ...services.metrics import MetricsService router = APIRouter(prefix="/v1/memories", tags=["memories"]) @@ -135,19 +133,23 @@ async def create_memory( logger.info("Created memory: %s", memory.id) try: metrics_service.counter("memorylayer_remember_total", labels={"workspace": ctx.workspace_id}) - metrics_service.histogram("memorylayer_remember_duration_seconds", _time.monotonic() - _t0, labels={"workspace": ctx.workspace_id}) + metrics_service.histogram( + "memorylayer_remember_duration_seconds", _time.monotonic() - _t0, labels={"workspace": ctx.workspace_id} + ) except Exception: logger.debug("Metrics recording failed for memory create") try: - await audit_service.record(AuditEvent( - event_type="memory", - action="create", - tenant_id=ctx.tenant_id, - workspace_id=ctx.workspace_id, - user_id=ctx.user_id, - resource_type="memory", - resource_id=memory.id, - )) + await audit_service.record( + AuditEvent( + event_type="memory", + action="create", + tenant_id=ctx.tenant_id, + workspace_id=ctx.workspace_id, + user_id=ctx.user_id, + resource_type="memory", + resource_id=memory.id, + ) + ) except Exception: logger.debug("Audit record failed for memory create") return MemoryResponse(memory=memory) @@ -212,15 +214,17 @@ async def get_memory( await authz_service.require_authorization(ctx, "memories", "read", resource_id=memory_id, workspace_id=memory.workspace_id) try: - await audit_service.record(AuditEvent( - event_type="memory", - action="read", - tenant_id=ctx.tenant_id, - workspace_id=memory.workspace_id, - user_id=ctx.user_id, - resource_type="memory", - resource_id=memory_id, - )) + await audit_service.record( + AuditEvent( + event_type="memory", + action="read", + tenant_id=ctx.tenant_id, + workspace_id=memory.workspace_id, + user_id=ctx.user_id, + resource_type="memory", + resource_id=memory_id, + ) + ) except Exception: logger.debug("Audit record failed for memory read") return MemoryResponse(memory=memory) @@ -280,7 +284,9 @@ async def update_memory( if not existing_memory: raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"Memory not found: {memory_id}") - await authz_service.require_authorization(ctx, "memories", "write", resource_id=memory_id, workspace_id=existing_memory.workspace_id) + await authz_service.require_authorization( + ctx, "memories", "write", resource_id=memory_id, workspace_id=existing_memory.workspace_id + ) # Build update kwargs from non-None fields update_kwargs = {} @@ -307,15 +313,17 @@ async def update_memory( logger.info("Updated memory: %s", memory_id) try: - await audit_service.record(AuditEvent( - event_type="memory", - action="update", - tenant_id=ctx.tenant_id, - workspace_id=existing_memory.workspace_id, - user_id=ctx.user_id, - resource_type="memory", - resource_id=memory_id, - )) + await audit_service.record( + AuditEvent( + event_type="memory", + action="update", + tenant_id=ctx.tenant_id, + workspace_id=existing_memory.workspace_id, + user_id=ctx.user_id, + resource_type="memory", + resource_id=memory_id, + ) + ) except Exception: logger.debug("Audit record failed for memory update") return MemoryResponse(memory=updated_memory) @@ -386,15 +394,17 @@ async def delete_memory( except Exception: logger.debug("Metrics recording failed for memory delete") try: - await audit_service.record(AuditEvent( - event_type="memory", - action="delete", - tenant_id=ctx.tenant_id, - workspace_id=ctx.workspace_id, - user_id=ctx.user_id, - resource_type="memory", - resource_id=memory_id, - )) + await audit_service.record( + AuditEvent( + event_type="memory", + action="delete", + tenant_id=ctx.tenant_id, + workspace_id=ctx.workspace_id, + user_id=ctx.user_id, + resource_type="memory", + resource_id=memory_id, + ) + ) except Exception: logger.debug("Audit record failed for memory delete") @@ -490,20 +500,28 @@ async def recall_memories( try: metrics_service.counter("memorylayer_recall_total", labels={"workspace": ctx.workspace_id, "mode": request.mode or "default"}) - metrics_service.histogram("memorylayer_recall_duration_seconds", _time.monotonic() - _t0, labels={"workspace": ctx.workspace_id}) - metrics_service.histogram("memorylayer_recall_result_count", len(result.memories) if hasattr(result, "memories") else 0, labels={"workspace": ctx.workspace_id}) + metrics_service.histogram( + "memorylayer_recall_duration_seconds", _time.monotonic() - _t0, labels={"workspace": ctx.workspace_id} + ) + metrics_service.histogram( + "memorylayer_recall_result_count", + len(result.memories) if hasattr(result, "memories") else 0, + labels={"workspace": ctx.workspace_id}, + ) except Exception: logger.debug("Metrics recording failed for memory recall") try: - await audit_service.record(AuditEvent( - event_type="memory", - action="recall", - tenant_id=ctx.tenant_id, - workspace_id=ctx.workspace_id, - user_id=ctx.user_id, - resource_type="memory", - metadata={"query_length": len(request.query), "mode": request.mode}, - )) + await audit_service.record( + AuditEvent( + event_type="memory", + action="recall", + tenant_id=ctx.tenant_id, + workspace_id=ctx.workspace_id, + user_id=ctx.user_id, + resource_type="memory", + metadata={"query_length": len(request.query), "mode": request.mode}, + ) + ) except Exception: logger.debug("Audit record failed for memory recall") return result @@ -592,14 +610,16 @@ async def reflect_memories( logger.info("Reflected on %d source memories, generated %d tokens", len(result.source_memories), result.tokens_processed) try: - await audit_service.record(AuditEvent( - event_type="memory", - action="reflect", - tenant_id=ctx.tenant_id, - workspace_id=ctx.workspace_id, - user_id=ctx.user_id, - resource_type="memory", - )) + await audit_service.record( + AuditEvent( + event_type="memory", + action="reflect", + tenant_id=ctx.tenant_id, + workspace_id=ctx.workspace_id, + user_id=ctx.user_id, + resource_type="memory", + ) + ) except Exception: logger.debug("Audit record failed for memory reflect") return result @@ -789,9 +809,7 @@ async def batch_operations( update_kwargs["pinned"] = 1 if operation.pinned else 0 # Update memory via service layer - updated = await memory_service.update( - workspace_id=ctx.workspace_id, memory_id=memory_id, **update_kwargs - ) + updated = await memory_service.update(workspace_id=ctx.workspace_id, memory_id=memory_id, **update_kwargs) results.append( BatchOperationResult( @@ -842,15 +860,17 @@ async def batch_operations( logger.info("Completed batch operations: %d successful, %d failed", successful, failed) try: - await audit_service.record(AuditEvent( - event_type="memory", - action="batch", - tenant_id=ctx.tenant_id, - workspace_id=ctx.workspace_id, - user_id=ctx.user_id, - resource_type="memory", - metadata={"operation_count": len(request.operations)}, - )) + await audit_service.record( + AuditEvent( + event_type="memory", + action="batch", + tenant_id=ctx.tenant_id, + workspace_id=ctx.workspace_id, + user_id=ctx.user_id, + resource_type="memory", + metadata={"operation_count": len(request.operations)}, + ) + ) except Exception: logger.debug("Audit record failed for memory batch") return BatchOperationResponse( diff --git a/memorylayer-core-python/src/memorylayer_server/api/v1/schemas.py b/memorylayer-core-python/src/memorylayer_server/api/v1/schemas.py index a578a6b..c3d7299 100644 --- a/memorylayer-core-python/src/memorylayer_server/api/v1/schemas.py +++ b/memorylayer-core-python/src/memorylayer_server/api/v1/schemas.py @@ -3,78 +3,88 @@ These schemas define the HTTP API interface separate from core domain models. """ + from datetime import datetime -from typing import Annotated, Any, Literal, Optional, Union +from typing import Annotated, Any, Literal from pydantic import BaseModel, Field -from memorylayer_server.models.memory import ( - MemoryStatus, MemoryType, MemorySubtype, RecallMode, SearchTolerance, - Memory, RecallResult, ReflectResult -) from memorylayer_server.models.association import ( + Association, + GraphPath, # noqa: F401 — re-exported for associations.py + GraphQueryResult, # noqa: F401 — re-exported for associations.py RelationshipCategory, - Association, GraphQueryResult, GraphPath +) +from memorylayer_server.models.memory import ( + Memory, + MemorySubtype, + MemoryType, + RecallMode, + RecallResult, # noqa: F401 — re-exported for memories.py + ReflectResult, # noqa: F401 — re-exported for memories.py + SearchTolerance, ) from memorylayer_server.models.session import Session, SessionBriefing from memorylayer_server.models.workspace import Workspace + # Memory API Schemas class MemoryCreateRequest(BaseModel): """Request schema for creating a memory.""" content: str = Field(..., description="Memory content to store", min_length=1) - workspace_id: Optional[str] = Field(None, description="Workspace override (defaults to session workspace or _default)") - type: Optional[MemoryType] = Field(None, description="Cognitive type (auto-classified if omitted)") - subtype: Optional[MemorySubtype] = Field(None, description="Domain-specific classification") + workspace_id: str | None = Field(None, description="Workspace override (defaults to session workspace or _default)") + type: MemoryType | None = Field(None, description="Cognitive type (auto-classified if omitted)") + subtype: MemorySubtype | None = Field(None, description="Domain-specific classification") importance: float = Field(0.5, ge=0.0, le=1.0, description="Memory importance (0.0-1.0)") tags: list[str] = Field(default_factory=list, description="Tags for categorization") metadata: dict[str, Any] = Field(default_factory=dict, description="Arbitrary metadata") associations: list[str] = Field(default_factory=list, description="Memory IDs to associate with") - context_id: Optional[str] = Field(None, description="Target memory context") - observer_id: Optional[str] = Field(None, description="Entity doing the observing/remembering (agent ID, user ID, etc.)") - subject_id: Optional[str] = Field(None, description="Entity this memory is about") - user_id: Optional[str] = Field(None, description="User scope for this memory") + context_id: str | None = Field(None, description="Target memory context") + observer_id: str | None = Field(None, description="Entity doing the observing/remembering (agent ID, user ID, etc.)") + subject_id: str | None = Field(None, description="Entity this memory is about") + user_id: str | None = Field(None, description="User scope for this memory") class MemoryUpdateRequest(BaseModel): """Request schema for updating a memory.""" - content: Optional[str] = Field(None, description="Updated content", min_length=1) - type: Optional[MemoryType] = Field(None, description="Updated cognitive type") - subtype: Optional[MemorySubtype] = Field(None, description="Updated domain classification") - importance: Optional[float] = Field(None, ge=0.0, le=1.0, description="Updated importance") - tags: Optional[list[str]] = Field(None, description="Updated tags") - metadata: Optional[dict[str, Any]] = Field(None, description="Updated metadata") - pinned: Optional[bool] = Field(None, description="Pin/unpin memory (pinned memories are exempt from decay)") + content: str | None = Field(None, description="Updated content", min_length=1) + type: MemoryType | None = Field(None, description="Updated cognitive type") + subtype: MemorySubtype | None = Field(None, description="Updated domain classification") + importance: float | None = Field(None, ge=0.0, le=1.0, description="Updated importance") + tags: list[str] | None = Field(None, description="Updated tags") + metadata: dict[str, Any] | None = Field(None, description="Updated metadata") + pinned: bool | None = Field(None, description="Pin/unpin memory (pinned memories are exempt from decay)") class MemoryRecallRequest(BaseModel): """Request schema for querying memories.""" query: str = Field(..., description="Natural language query", min_length=1) - workspace_id: Optional[str] = Field(None, description="Workspace override (defaults to session workspace or _default)") + workspace_id: str | None = Field(None, description="Workspace override (defaults to session workspace or _default)") types: list[MemoryType] = Field(default_factory=list, description="Filter by cognitive types") subtypes: list[MemorySubtype] = Field(default_factory=list, description="Filter by domain subtypes") tags: list[str] = Field(default_factory=list, description="Filter by tags (AND logic)") - context_id: Optional[str] = Field(None, description="Filter by memory context") - observer_id: Optional[str] = Field(None, description="Filter by observer entity") - subject_id: Optional[str] = Field(None, description="Filter by subject entity") - user_id: Optional[str] = Field(None, description="Filter by user") - mode: Optional[RecallMode] = Field(None, description="Retrieval strategy (None = server default)") - tolerance: Optional[SearchTolerance] = Field(None, description="Search precision (None = server default)") + context_id: str | None = Field(None, description="Filter by memory context") + observer_id: str | None = Field(None, description="Filter by observer entity") + subject_id: str | None = Field(None, description="Filter by subject entity") + user_id: str | None = Field(None, description="Filter by user") + mode: RecallMode | None = Field(None, description="Retrieval strategy (None = server default)") + tolerance: SearchTolerance | None = Field(None, description="Search precision (None = server default)") limit: int = Field(10, ge=1, le=100, description="Maximum memories to return") - min_relevance: Optional[float] = Field(None, ge=0.0, le=1.0, description="Minimum relevance score (None = server default)") - recency_weight: Optional[float] = Field(None, ge=0.0, le=1.0, - description="Weight for recency boosting (0.0=disabled, 1.0=full). None = server default.") - include_associations: Optional[bool] = Field(None, description="Include linked memories (None = server default)") - traverse_depth: Optional[int] = Field(None, ge=0, le=5, description="Multi-hop graph traversal depth (None = server default)") - max_expansion: Optional[int] = Field(None, ge=1, le=500, description="Max memories discovered via graph expansion (None = server default)") - created_after: Optional[datetime] = Field(None, description="Filter memories created after this time") - created_before: Optional[datetime] = Field(None, description="Filter memories created before this time") + min_relevance: float | None = Field(None, ge=0.0, le=1.0, description="Minimum relevance score (None = server default)") + recency_weight: float | None = Field( + None, ge=0.0, le=1.0, description="Weight for recency boosting (0.0=disabled, 1.0=full). None = server default." + ) + include_associations: bool | None = Field(None, description="Include linked memories (None = server default)") + traverse_depth: int | None = Field(None, ge=0, le=5, description="Multi-hop graph traversal depth (None = server default)") + max_expansion: int | None = Field(None, ge=1, le=500, description="Max memories discovered via graph expansion (None = server default)") + created_after: datetime | None = Field(None, description="Filter memories created after this time") + created_before: datetime | None = Field(None, description="Filter memories created before this time") context: list[dict[str, str]] = Field(default_factory=list, description="Recent conversation context") rag_threshold: float = Field(0.8, ge=0.0, le=1.0, description="Use LLM if RAG confidence < threshold") - detail_level: Optional[str] = Field(None, description="Detail level: abstract, overview, or full (None = server default)") + detail_level: str | None = Field(None, description="Detail level: abstract, overview, or full (None = server default)") include_archived: bool = Field(False, description="Include archived memories in recall results") exclude_ids: list[str] = Field(default_factory=list, description="Memory IDs to exclude from results (already shown to user)") @@ -83,16 +93,16 @@ class MemoryReflectRequest(BaseModel): """Request schema for synthesizing memories.""" query: str = Field(..., description="What to reflect on", min_length=1) - workspace_id: Optional[str] = Field(None, description="Workspace override (defaults to session workspace or _default)") - detail_level: Optional[str] = Field(None, description="Level of detail: abstract, overview, full (None = server default)") + workspace_id: str | None = Field(None, description="Workspace override (defaults to session workspace or _default)") + detail_level: str | None = Field(None, description="Level of detail: abstract, overview, full (None = server default)") include_sources: bool = Field(True, description="Include source memory references") depth: int = Field(2, ge=1, le=5, description="Association traversal depth") types: list[MemoryType] = Field(default_factory=list, description="Filter by types") subtypes: list[MemorySubtype] = Field(default_factory=list, description="Filter by subtypes") tags: list[str] = Field(default_factory=list, description="Filter by tags") - context_id: Optional[str] = Field(None, description="Filter by memory context") - observer_id: Optional[str] = Field(None, description="Filter by observer entity") - subject_id: Optional[str] = Field(None, description="Filter by subject entity") + context_id: str | None = Field(None, description="Filter by memory context") + observer_id: str | None = Field(None, description="Filter by observer entity") + subject_id: str | None = Field(None, description="Filter by subject entity") class MemoryDecayRequest(BaseModel): @@ -106,13 +116,13 @@ class BatchCreateOp(BaseModel): op: Literal["create"] = Field(..., description="Operation type") content: str = Field(..., description="Memory content", min_length=1) - type: Optional[MemoryType] = Field(None, description="Cognitive type") - subtype: Optional[MemorySubtype] = Field(None, description="Domain classification") + type: MemoryType | None = Field(None, description="Cognitive type") + subtype: MemorySubtype | None = Field(None, description="Domain classification") importance: float = Field(0.5, ge=0.0, le=1.0, description="Importance") tags: list[str] = Field(default_factory=list, description="Tags") metadata: dict[str, Any] = Field(default_factory=dict, description="Metadata") - observer_id: Optional[str] = Field(None, description="Observer entity") - subject_id: Optional[str] = Field(None, description="Subject entity") + observer_id: str | None = Field(None, description="Observer entity") + subject_id: str | None = Field(None, description="Subject entity") class BatchUpdateOp(BaseModel): @@ -120,13 +130,13 @@ class BatchUpdateOp(BaseModel): op: Literal["update"] = Field(..., description="Operation type") memory_id: str = Field(..., description="Memory ID to update") - content: Optional[str] = Field(None, description="Updated content", min_length=1) - type: Optional[MemoryType] = Field(None, description="Updated type") - subtype: Optional[MemorySubtype] = Field(None, description="Updated subtype") - importance: Optional[float] = Field(None, ge=0.0, le=1.0, description="Updated importance") - tags: Optional[list[str]] = Field(None, description="Updated tags") - metadata: Optional[dict[str, Any]] = Field(None, description="Updated metadata") - pinned: Optional[bool] = Field(None, description="Pin/unpin memory") + content: str | None = Field(None, description="Updated content", min_length=1) + type: MemoryType | None = Field(None, description="Updated type") + subtype: MemorySubtype | None = Field(None, description="Updated subtype") + importance: float | None = Field(None, ge=0.0, le=1.0, description="Updated importance") + tags: list[str] | None = Field(None, description="Updated tags") + metadata: dict[str, Any] | None = Field(None, description="Updated metadata") + pinned: bool | None = Field(None, description="Pin/unpin memory") class BatchDeleteOp(BaseModel): @@ -138,7 +148,7 @@ class BatchDeleteOp(BaseModel): BatchOperation = Annotated[ - Union[BatchCreateOp, BatchUpdateOp, BatchDeleteOp], + BatchCreateOp | BatchUpdateOp | BatchDeleteOp, Field(discriminator="op"), ] @@ -146,10 +156,7 @@ class BatchDeleteOp(BaseModel): class MemoryBatchRequest(BaseModel): """Request schema for batch memory operations.""" - operations: list[BatchOperation] = Field( - ..., - description="List of typed batch operations (create, update, delete)" - ) + operations: list[BatchOperation] = Field(..., description="List of typed batch operations (create, update, delete)") class MemoryResponse(BaseModel): @@ -171,8 +178,8 @@ class BatchOperationResult(BaseModel): index: int = Field(..., description="Operation index in batch") type: str = Field(..., description="Operation type") status: str = Field(..., description="success or error") - memory_id: Optional[str] = Field(None, description="Memory ID for create/update operations") - error: Optional[str] = Field(None, description="Error message if failed") + memory_id: str | None = Field(None, description="Memory ID for create/update operations") + error: str | None = Field(None, description="Error message if failed") class BatchOperationResponse(BaseModel): @@ -192,7 +199,7 @@ class AssociationCreateRequest(BaseModel): relationship: str = Field(..., description="Relationship type (e.g., SIMILAR_TO, CAUSES, SOLVES)") strength: float = Field(0.5, ge=0.0, le=1.0, description="Relationship strength") metadata: dict[str, Any] = Field(default_factory=dict, description="Arbitrary metadata") - workspace_id: Optional[str] = Field(None, description="Workspace override (defaults to session workspace or _default)") + workspace_id: str | None = Field(None, description="Workspace override (defaults to session workspace or _default)") class AssociationCreateFullRequest(BaseModel): @@ -208,31 +215,17 @@ class AssociationCreateFullRequest(BaseModel): class AssociationListRequest(BaseModel): """Request schema for listing associations.""" - relationships: Optional[list[str]] = Field( - None, - description="Filter by relationship types (e.g., SIMILAR_TO, CAUSES)" - ) - direction: str = Field( - "both", - pattern="^(outgoing|incoming|both)$", - description="Association direction" - ) + relationships: list[str] | None = Field(None, description="Filter by relationship types (e.g., SIMILAR_TO, CAUSES)") + direction: str = Field("both", pattern="^(outgoing|incoming|both)$", description="Association direction") class MemoryTraverseRequest(BaseModel): """Request schema for traversing from a specific memory.""" - workspace_id: Optional[str] = Field(None, description="Workspace override (defaults to session workspace or _default)") + workspace_id: str | None = Field(None, description="Workspace override (defaults to session workspace or _default)") max_depth: int = Field(2, ge=1, le=5, description="Maximum traversal depth") - relationship_types: list[str] = Field( - default_factory=list, - description="Filter by specific relationship types (empty = all)" - ) - direction: str = Field( - "both", - pattern="^(outgoing|incoming|both)$", - description="Traversal direction: outgoing, incoming, both" - ) + relationship_types: list[str] = Field(default_factory=list, description="Filter by specific relationship types (empty = all)") + direction: str = Field("both", pattern="^(outgoing|incoming|both)$", description="Traversal direction: outgoing, incoming, both") min_strength: float = Field(0.0, ge=0.0, le=1.0, description="Minimum edge strength") @@ -240,20 +233,10 @@ class GraphTraverseRequest(BaseModel): """Request schema for graph traversal.""" start_memory_id: str = Field(..., description="Starting memory for traversal") - relationship_types: list[str] = Field( - default_factory=list, - description="Filter by specific relationship types" - ) - relationship_categories: list[RelationshipCategory] = Field( - default_factory=list, - description="Filter by relationship categories" - ) + relationship_types: list[str] = Field(default_factory=list, description="Filter by specific relationship types") + relationship_categories: list[RelationshipCategory] = Field(default_factory=list, description="Filter by relationship categories") max_depth: int = Field(3, ge=1, le=5, description="Maximum traversal depth") - direction: str = Field( - "both", - pattern="^(outgoing|incoming|both)$", - description="Traversal direction" - ) + direction: str = Field("both", pattern="^(outgoing|incoming|both)$", description="Traversal direction") min_strength: float = Field(0.0, ge=0.0, le=1.0, description="Minimum edge strength") max_paths: int = Field(100, ge=1, le=1000, description="Maximum paths to return") max_nodes: int = Field(50, ge=1, le=500, description="Maximum nodes in result") @@ -276,14 +259,14 @@ class AssociationListResponse(BaseModel): class SessionCreateRequest(BaseModel): """Request schema for creating a session.""" - session_id: Optional[str] = Field(None, description="Client-provided session ID (generated if omitted)") - workspace_id: Optional[str] = Field(None, description="Workspace ID (auto-created if doesn't exist, defaults to _default)") + session_id: str | None = Field(None, description="Client-provided session ID (generated if omitted)") + workspace_id: str | None = Field(None, description="Workspace ID (auto-created if doesn't exist, defaults to _default)") ttl_seconds: int = Field(3600, ge=60, le=86400, description="Session TTL in seconds") metadata: dict[str, Any] = Field(default_factory=dict, description="Session metadata") - context_id: Optional[str] = Field(None, description="Context to bind session to (defaults to _default, auto-created if needed)") - working_memory: Optional[dict[str, Any]] = Field(None, description="Initial working memory key-value pairs") + context_id: str | None = Field(None, description="Context to bind session to (defaults to _default, auto-created if needed)") + working_memory: dict[str, Any] | None = Field(None, description="Initial working memory key-value pairs") briefing: bool = Field(False, description="Include briefing with relevant memories") - briefing_options: Optional[dict] = Field(None, description="Briefing options: lookback_hours, detail_level, limit") + briefing_options: dict | None = Field(None, description="Briefing options: lookback_hours, detail_level, limit") class CommitOptions(BaseModel): @@ -291,7 +274,7 @@ class CommitOptions(BaseModel): min_importance: float = Field(0.5, ge=0.0, le=1.0, description="Minimum importance threshold") deduplicate: bool = Field(True, description="Enable deduplication of extracted memories") - categories: Optional[list[str]] = Field(None, description="Category names or None for all") + categories: list[str] | None = Field(None, description="Category names or None for all") max_memories: int = Field(50, ge=1, le=500, description="Maximum memories to extract") @@ -317,7 +300,7 @@ class WorkingMemorySetRequest(BaseModel): key: str = Field(..., description="Working memory key", min_length=1) value: Any = Field(..., description="Working memory value (JSON-serializable)") - ttl_seconds: Optional[int] = Field(None, description="Optional TTL override") + ttl_seconds: int | None = Field(None, description="Optional TTL override") class SessionListResponse(BaseModel): @@ -337,7 +320,7 @@ class SessionStartResponse(BaseModel): """Response schema for session creation with optional briefing.""" session: Session - briefing: Optional[SessionBriefing] = None + briefing: SessionBriefing | None = None class WorkingMemoryResponse(BaseModel): @@ -345,7 +328,7 @@ class WorkingMemoryResponse(BaseModel): key: str value: Any - ttl_seconds: Optional[int] = None + ttl_seconds: int | None = None created_at: datetime updated_at: datetime @@ -361,17 +344,14 @@ class WorkspaceCreateRequest(BaseModel): """Request schema for creating a workspace.""" name: str = Field(..., description="Workspace name", min_length=1) - settings: dict[str, Any] = Field( - default_factory=dict, - description="Workspace-level settings" - ) + settings: dict[str, Any] = Field(default_factory=dict, description="Workspace-level settings") class WorkspaceUpdateRequest(BaseModel): """Request schema for updating a workspace.""" - name: Optional[str] = Field(None, description="Updated workspace name", min_length=1) - settings: Optional[dict[str, Any]] = Field(None, description="Updated settings") + name: str | None = Field(None, description="Updated workspace name", min_length=1) + settings: dict[str, Any] | None = Field(None, description="Updated settings") class WorkspaceResponse(BaseModel): @@ -400,7 +380,7 @@ class ErrorResponse(BaseModel): error: str = Field(..., description="Error type") message: str = Field(..., description="Human-readable error message") - details: Optional[dict[str, Any]] = Field(None, description="Additional error details") + details: dict[str, Any] | None = Field(None, description="Additional error details") # Contradiction API Schemas @@ -411,12 +391,12 @@ class ContradictionResponse(BaseModel): workspace_id: str = Field(..., description="Workspace ID") memory_a_id: str = Field(..., description="First memory ID in the contradiction") memory_b_id: str = Field(..., description="Second memory ID in the contradiction") - contradiction_type: Optional[str] = Field(None, description="Type of contradiction (e.g., 'negation', 'value_conflict')") - confidence: Optional[float] = Field(None, description="Detection confidence score (0.0-1.0)") - detection_method: Optional[str] = Field(None, description="Method used to detect the contradiction") - detected_at: Optional[datetime] = Field(None, description="When the contradiction was detected") - resolved_at: Optional[datetime] = Field(None, description="When the contradiction was resolved") - resolution: Optional[str] = Field(None, description="Resolution strategy applied") + contradiction_type: str | None = Field(None, description="Type of contradiction (e.g., 'negation', 'value_conflict')") + confidence: float | None = Field(None, description="Detection confidence score (0.0-1.0)") + detection_method: str | None = Field(None, description="Method used to detect the contradiction") + detected_at: datetime | None = Field(None, description="When the contradiction was detected") + resolved_at: datetime | None = Field(None, description="When the contradiction was resolved") + resolution: str | None = Field(None, description="Resolution strategy applied") class ContradictionListResponse(BaseModel): @@ -430,13 +410,13 @@ class ContradictionResolveRequest(BaseModel): """Request model for resolving a contradiction.""" resolution: str = Field(..., description="Resolution strategy: 'keep_a', 'keep_b', 'keep_both', or 'merge'") - merged_content: Optional[str] = Field(None, description="Merged content (required when resolution is 'merge')") + merged_content: str | None = Field(None, description="Merged content (required when resolution is 'merge')") class ContradictionScanRequest(BaseModel): """Request model for triggering a workspace contradiction scan.""" - batch_size: Optional[int] = Field(None, ge=1, le=500, description="Number of memories to process per batch (server default if omitted)") + batch_size: int | None = Field(None, ge=1, le=500, description="Number of memories to process per batch (server default if omitted)") class ContradictionScanResponse(BaseModel): @@ -451,11 +431,12 @@ class ContradictionScanResponse(BaseModel): # Context Environment API Schemas # ============================================ + class ContextExecuteRequest(BaseModel): """Request to execute code in a session's sandbox environment.""" code: str = Field(..., description="Python code to execute", min_length=1) - result_var: Optional[str] = Field(None, description="Store expression result in this variable") + result_var: str | None = Field(None, description="Store expression result in this variable") return_result: bool = Field(True, description="Include result value in response") max_return_chars: int = Field(10_000, ge=100, le=100_000, description="Maximum chars for result serialization") @@ -463,23 +444,23 @@ class ContextExecuteRequest(BaseModel): class ContextExecuteResponse(BaseModel): """Response from code execution.""" - output: str = Field('', description="Captured stdout output") - result: Optional[str] = Field(None, description="Expression result (string preview)") - error: Optional[str] = Field(None, description="Error message if execution failed") + output: str = Field("", description="Captured stdout output") + result: str | None = Field(None, description="Expression result (string preview)") + error: str | None = Field(None, description="Error message if execution failed") variables_changed: list[str] = Field(default_factory=list, description="Variables created or modified") class ContextInspectResponse(BaseModel): """Response from inspecting sandbox state.""" - variable: Optional[str] = Field(None, description="Specific variable name if inspecting one") - type: Optional[str] = Field(None, description="Variable type name") - preview: Optional[str] = Field(None, description="Value preview string") - size_bytes: Optional[int] = Field(None, description="Estimated size in bytes") - variable_count: Optional[int] = Field(None, description="Total variable count (all-vars mode)") - variables: Optional[dict[str, Any]] = Field(None, description="Variable info map (all-vars mode)") - total_size_bytes: Optional[int] = Field(None, description="Total size across all variables") - error: Optional[str] = Field(None, description="Error message if inspection failed") + variable: str | None = Field(None, description="Specific variable name if inspecting one") + type: str | None = Field(None, description="Variable type name") + preview: str | None = Field(None, description="Value preview string") + size_bytes: int | None = Field(None, description="Estimated size in bytes") + variable_count: int | None = Field(None, description="Total variable count (all-vars mode)") + variables: dict[str, Any] | None = Field(None, description="Variable info map (all-vars mode)") + total_size_bytes: int | None = Field(None, description="Total size across all variables") + error: str | None = Field(None, description="Error message if inspection failed") class ContextLoadRequest(BaseModel): @@ -488,9 +469,9 @@ class ContextLoadRequest(BaseModel): var: str = Field(..., description="Variable name to store results in", min_length=1) query: str = Field(..., description="Memory recall query", min_length=1) limit: int = Field(50, ge=1, le=500, description="Maximum memories to recall") - types: Optional[list[str]] = Field(None, description="Filter by memory types") - tags: Optional[list[str]] = Field(None, description="Filter by tags") - min_relevance: Optional[float] = Field(None, ge=0.0, le=1.0, description="Minimum relevance score") + types: list[str] | None = Field(None, description="Filter by memory types") + tags: list[str] | None = Field(None, description="Filter by tags") + min_relevance: float | None = Field(None, ge=0.0, le=1.0, description="Minimum relevance score") include_embeddings: bool = Field(False, description="Include embedding vectors") @@ -498,10 +479,10 @@ class ContextLoadResponse(BaseModel): """Response from loading memories.""" count: int = Field(0, description="Number of memories loaded") - variable: Optional[str] = Field(None, description="Variable name where memories were stored") - query: Optional[str] = Field(None, description="The recall query used") - total_available: Optional[int] = Field(None, description="Total matching memories available") - error: Optional[str] = Field(None, description="Error message if load failed") + variable: str | None = Field(None, description="Variable name where memories were stored") + query: str | None = Field(None, description="The recall query used") + total_available: int | None = Field(None, description="Total matching memories available") + error: str | None = Field(None, description="Error message if load failed") class ContextInjectRequest(BaseModel): @@ -515,10 +496,10 @@ class ContextInjectRequest(BaseModel): class ContextInjectResponse(BaseModel): """Response from injecting a value.""" - variable: Optional[str] = Field(None, description="Variable name") - type: Optional[str] = Field(None, description="Value type name") - preview: Optional[str] = Field(None, description="Value preview string") - error: Optional[str] = Field(None, description="Error message if injection failed") + variable: str | None = Field(None, description="Variable name") + type: str | None = Field(None, description="Value type name") + preview: str | None = Field(None, description="Value preview string") + error: str | None = Field(None, description="Error message if injection failed") class ContextQueryRequest(BaseModel): @@ -526,38 +507,38 @@ class ContextQueryRequest(BaseModel): prompt: str = Field(..., description="Prompt for the LLM", min_length=1) variables: list[str] = Field(default_factory=list, description="Variable names to include as context") - max_context_chars: Optional[int] = Field(None, ge=100, le=500_000, description="Maximum chars for variable context") - result_var: Optional[str] = Field(None, description="Store LLM response in this variable") + max_context_chars: int | None = Field(None, ge=100, le=500_000, description="Maximum chars for variable context") + result_var: str | None = Field(None, description="Store LLM response in this variable") class ContextQueryResponse(BaseModel): """Response from LLM query.""" - response: Optional[str] = Field(None, description="LLM response text") + response: str | None = Field(None, description="LLM response text") variables_used: list[str] = Field(default_factory=list, description="Variables included in context") - result_var: Optional[str] = Field(None, description="Variable where response was stored") - error: Optional[str] = Field(None, description="Error message if query failed") + result_var: str | None = Field(None, description="Variable where response was stored") + error: str | None = Field(None, description="Error message if query failed") class ContextRLMRequest(BaseModel): """Request to run a Recursive Language Model (RLM) loop.""" goal: str = Field(..., description="Natural language description of the goal", min_length=1) - memory_query: Optional[str] = Field(None, description="Optional memory query to load initial data") + memory_query: str | None = Field(None, description="Optional memory query to load initial data") memory_limit: int = Field(100, ge=1, le=500, description="Maximum memories to load") max_iterations: int = Field(10, ge=1, le=50, description="Maximum reasoning iterations") - variables: Optional[list[str]] = Field(None, description="Variable names to include in context") - result_var: Optional[str] = Field(None, description="Store final result in this variable") + variables: list[str] | None = Field(None, description="Variable names to include in context") + result_var: str | None = Field(None, description="Store final result in this variable") detail_level: str = Field("standard", description="Detail level: minimal, standard, verbose") class ContextRLMResponse(BaseModel): """Response from RLM execution.""" - result: Optional[str] = Field(None, description="Final result") + result: str | None = Field(None, description="Final result") iterations: int = Field(0, description="Number of iterations performed") trace: list[dict[str, Any]] = Field(default_factory=list, description="Execution trace per iteration") - error: Optional[str] = Field(None, description="Error message if RLM failed") + error: str | None = Field(None, description="Error message if RLM failed") goal_achieved: bool = Field(False, description="Whether the goal was achieved") @@ -566,9 +547,9 @@ class ContextStatusResponse(BaseModel): exists: bool = Field(False, description="Whether the environment exists") variable_count: int = Field(0, description="Number of variables in sandbox") - variables: Optional[list[str]] = Field(None, description="Variable names") + variables: list[str] | None = Field(None, description="Variable names") total_size_bytes: int = Field(0, description="Total size of all variables") - memory_limit_bytes: Optional[int] = Field(None, description="Memory limit in bytes") + memory_limit_bytes: int | None = Field(None, description="Memory limit in bytes") metadata: dict[str, Any] = Field(default_factory=dict, description="Environment metadata") @@ -576,27 +557,30 @@ class ContextStatusResponse(BaseModel): # Workspace Export/Import API Schemas # ============================================ + class MemoryExportItem(BaseModel): """Serialized memory for export.""" + id: str content: str content_hash: str type: str - subtype: Optional[str] = None + subtype: str | None = None importance: float = 0.5 tags: list[str] = Field(default_factory=list) metadata: dict[str, Any] = Field(default_factory=dict) - abstract: Optional[str] = None - overview: Optional[str] = None - session_id: Optional[str] = None - observer_id: Optional[str] = None - subject_id: Optional[str] = None - created_at: Optional[str] = None - updated_at: Optional[str] = None + abstract: str | None = None + overview: str | None = None + session_id: str | None = None + observer_id: str | None = None + subject_id: str | None = None + created_at: str | None = None + updated_at: str | None = None class AssociationExportItem(BaseModel): """Serialized association for export.""" + source_id: str target_id: str relationship_type: str @@ -606,6 +590,7 @@ class AssociationExportItem(BaseModel): class WorkspaceExportData(BaseModel): """Export envelope for workspace data.""" + version: str = "1.0" exported_at: str workspace_id: str @@ -619,11 +604,13 @@ class WorkspaceExportData(BaseModel): class WorkspaceImportRequest(BaseModel): """Import request body.""" + data: WorkspaceExportData class WorkspaceImportResult(BaseModel): """Import operation results.""" + imported: int = 0 skipped_duplicates: int = 0 errors: int = 0 @@ -634,11 +621,12 @@ class WorkspaceImportResult(BaseModel): # Entity API Schemas # ============================================ + class EntityDeriveRequest(BaseModel): """Request to trigger inference derivation for an entity.""" - workspace_id: Optional[str] = Field(None, description="Workspace override (defaults to _default)") - observer_id: Optional[str] = Field(None, description="Optional observer perspective filter") + workspace_id: str | None = Field(None, description="Workspace override (defaults to _default)") + observer_id: str | None = Field(None, description="Optional observer perspective filter") force: bool = Field(False, description="Force re-derivation even if recent insights exist") @@ -663,7 +651,7 @@ class EntityCardResponse(BaseModel): source_memories: list[str] = Field(default_factory=list, description="Source memory IDs") confidence: float = Field(0.0, description="Confidence in the profile") cached: bool = Field(False, description="Whether this was served from cache") - generated_at: Optional[str] = Field(None, description="When this card was generated/last refreshed") + generated_at: str | None = Field(None, description="When this card was generated/last refreshed") class EntityInsightsResponse(BaseModel): @@ -680,29 +668,30 @@ class EntityInsightsResponse(BaseModel): # ============================================ from memorylayer_server.models.chat import ( - ChatThread, ChatMessage, ChatMessageContent, ChatThreadWithMessages, + ChatMessage, + ChatThread, ) class ThreadCreateRequest(BaseModel): """Request schema for creating a chat thread.""" - thread_id: Optional[str] = Field(None, description="Client-provided thread ID (auto-generated if omitted)") - workspace_id: Optional[str] = Field(None, description="Workspace override (defaults to session workspace or _default)") - user_id: Optional[str] = Field(None, description="User scope for this thread") - context_id: Optional[str] = Field(None, description="Context within workspace (defaults to _default)") - observer_id: Optional[str] = Field(None, description="Observer entity ID (typically the AI agent)") - subject_id: Optional[str] = Field(None, description="Subject entity ID (typically the human user)") - title: Optional[str] = Field(None, description="Optional display title") + thread_id: str | None = Field(None, description="Client-provided thread ID (auto-generated if omitted)") + workspace_id: str | None = Field(None, description="Workspace override (defaults to session workspace or _default)") + user_id: str | None = Field(None, description="User scope for this thread") + context_id: str | None = Field(None, description="Context within workspace (defaults to _default)") + observer_id: str | None = Field(None, description="Observer entity ID (typically the AI agent)") + subject_id: str | None = Field(None, description="Subject entity ID (typically the human user)") + title: str | None = Field(None, description="Optional display title") metadata: dict[str, Any] = Field(default_factory=dict, description="Arbitrary metadata") - expires_at: Optional[datetime] = Field(None, description="Optional expiration (None = permanent)") + expires_at: datetime | None = Field(None, description="Optional expiration (None = permanent)") class ThreadUpdateRequest(BaseModel): """Request schema for updating a chat thread.""" - title: Optional[str] = Field(None, description="Updated display title") - metadata: Optional[dict[str, Any]] = Field(None, description="Updated metadata") + title: str | None = Field(None, description="Updated display title") + metadata: dict[str, Any] | None = Field(None, description="Updated metadata") class ThreadResponse(BaseModel): diff --git a/memorylayer-core-python/src/memorylayer_server/api/v1/sessions.py b/memorylayer-core-python/src/memorylayer_server/api/v1/sessions.py index ae322a8..d4f81dc 100644 --- a/memorylayer-core-python/src/memorylayer_server/api/v1/sessions.py +++ b/memorylayer-core-python/src/memorylayer_server/api/v1/sessions.py @@ -12,36 +12,44 @@ - POST /v1/sessions/{session_id}/touch - Update session expiration - GET /v1/sessions/briefing - Session briefing """ + import logging -from typing import Optional from uuid import uuid4 -from fastapi import APIRouter, HTTPException, Depends, Request, status +from fastapi import APIRouter, Depends, HTTPException, Request, status from scitrera_app_framework import Plugin, Variables -from .. import EXT_MULTI_API_ROUTERS from memorylayer_server.lifecycle.fastapi import get_logger +from ...config import DEFAULT_CONTEXT_ID, DEFAULT_TENANT_ID +from ...services.audit import AuditEvent, AuditService +from ...services.authentication import AuthenticationService +from ...services.authorization import AuthorizationService +from ...services.metrics import MetricsService +from ...services.session import SessionService +from ...services.workspace import WorkspaceService +from .. import EXT_MULTI_API_ROUTERS +from .deps import ( + get_active_session, + get_audit_service, + get_auth_service, + get_authz_service, + get_metrics_service, + get_session_service, + get_workspace_service, +) from .schemas import ( + CommitOptions, + CommitResponse, + ErrorResponse, + SessionBriefingResponse, SessionCreateRequest, - WorkingMemorySetRequest, SessionListResponse, SessionResponse, SessionStartResponse, WorkingMemoryResponse, - SessionBriefingResponse, - CommitOptions, - CommitResponse, - ErrorResponse, + WorkingMemorySetRequest, ) -from ...services.session import SessionService -from ...services.workspace import WorkspaceService -from ...services.authentication import AuthenticationService -from ...services.authorization import AuthorizationService -from ...config import DEFAULT_TENANT_ID, DEFAULT_CONTEXT_ID -from .deps import get_auth_service, get_authz_service, get_session_service, get_workspace_service, get_active_session, get_audit_service, get_metrics_service -from ...services.audit import AuditService, AuditEvent -from ...services.metrics import MetricsService router = APIRouter(prefix="/v1/sessions", tags=["sessions"]) @@ -58,15 +66,15 @@ }, ) async def create_session( - http_request: Request, - request: SessionCreateRequest, - auth_service: AuthenticationService = Depends(get_auth_service), - authz_service: AuthorizationService = Depends(get_authz_service), - session_service: SessionService = Depends(get_session_service), - workspace_service: WorkspaceService = Depends(get_workspace_service), - audit_service: AuditService = Depends(get_audit_service), - metrics_service: MetricsService = Depends(get_metrics_service), - logger: logging.Logger = Depends(get_logger), + http_request: Request, + request: SessionCreateRequest, + auth_service: AuthenticationService = Depends(get_auth_service), + authz_service: AuthorizationService = Depends(get_authz_service), + session_service: SessionService = Depends(get_session_service), + workspace_service: WorkspaceService = Depends(get_workspace_service), + audit_service: AuditService = Depends(get_audit_service), + metrics_service: MetricsService = Depends(get_metrics_service), + logger: logging.Logger = Depends(get_logger), ) -> SessionStartResponse: """ Create a new working memory session. @@ -90,9 +98,7 @@ async def create_session( try: # Build request context and check authorization ctx = await auth_service.build_context(http_request, request) - await authz_service.require_authorization( - ctx, "sessions", "create", workspace_id=ctx.workspace_id - ) + await authz_service.require_authorization(ctx, "sessions", "create", workspace_id=ctx.workspace_id) # Generate session ID if not provided session_id = request.session_id or f"sess_{uuid4().hex}" @@ -104,11 +110,7 @@ async def create_session( context_id = request.context_id or DEFAULT_CONTEXT_ID logger.info( - "Creating session: %s in workspace: %s, ttl: %d, context: %s", - session_id, - workspace_id, - request.ttl_seconds, - context_id + "Creating session: %s in workspace: %s, ttl: %d, context: %s", session_id, workspace_id, request.ttl_seconds, context_id ) # Auto-create workspace if it doesn't exist (OSS "just works" pattern) @@ -125,6 +127,7 @@ async def create_session( # Create session with context_id from ...models.session import Session + session = Session.create_with_ttl( session_id=session_id, workspace_id=workspace_id, @@ -141,12 +144,7 @@ async def create_session( if request.working_memory: logger.info("Setting initial working memory: %d keys", len(request.working_memory)) for key, value in request.working_memory.items(): - await session_service.set_working_memory( - workspace_id=workspace_id, - session_id=session_id, - key=key, - value=value - ) + await session_service.set_working_memory(workspace_id=workspace_id, session_id=session_id, key=key, value=value) # Generate briefing if requested briefing = None @@ -167,31 +165,27 @@ async def create_session( except Exception: logger.debug("Metrics recording failed for session create") try: - await audit_service.record(AuditEvent( - event_type="session", - action="create", - tenant_id=ctx.tenant_id, - workspace_id=ctx.workspace_id, - user_id=ctx.user_id, - resource_type="session", - resource_id=session.id, - )) + await audit_service.record( + AuditEvent( + event_type="session", + action="create", + tenant_id=ctx.tenant_id, + workspace_id=ctx.workspace_id, + user_id=ctx.user_id, + resource_type="session", + resource_id=session.id, + ) + ) except Exception: logger.debug("Audit record failed for session create") return SessionStartResponse(session=session, briefing=briefing) except ValueError as e: logger.warning("Invalid session creation request: %s", e) - raise HTTPException( - status_code=status.HTTP_400_BAD_REQUEST, - detail=str(e) - ) + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=str(e)) except Exception as e: logger.error("Failed to create session: %s", e, exc_info=True) - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail="Failed to create session" - ) + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to create session") # NOTE: list and /briefing must be defined BEFORE /{session_id} to avoid route collision @@ -205,14 +199,14 @@ async def create_session( }, ) async def list_sessions( - http_request: Request, - workspace_id: Optional[str] = None, - context_id: Optional[str] = None, - include_expired: bool = False, - auth_service: AuthenticationService = Depends(get_auth_service), - authz_service: AuthorizationService = Depends(get_authz_service), - session_service: SessionService = Depends(get_session_service), - logger: logging.Logger = Depends(get_logger), + http_request: Request, + workspace_id: str | None = None, + context_id: str | None = None, + include_expired: bool = False, + auth_service: AuthenticationService = Depends(get_auth_service), + authz_service: AuthorizationService = Depends(get_authz_service), + session_service: SessionService = Depends(get_session_service), + logger: logging.Logger = Depends(get_logger), ) -> SessionListResponse: """ List sessions in a workspace. @@ -236,14 +230,9 @@ async def list_sessions( # Build context and check authorization ctx = await auth_service.build_context(http_request, None) workspace_id = workspace_id or ctx.workspace_id - await authz_service.require_authorization( - ctx, "sessions", "read", workspace_id=workspace_id - ) + await authz_service.require_authorization(ctx, "sessions", "read", workspace_id=workspace_id) - logger.debug( - "Listing sessions for workspace: %s, context: %s, include_expired: %s", - workspace_id, context_id, include_expired - ) + logger.debug("Listing sessions for workspace: %s, context: %s, include_expired: %s", workspace_id, context_id, include_expired) sessions = await session_service.list_sessions( workspace_id, @@ -260,10 +249,7 @@ async def list_sessions( raise except Exception as e: logger.error("Failed to list sessions: %s", e, exc_info=True) - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail="Failed to list sessions" - ) + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to list sessions") @router.get( @@ -276,17 +262,17 @@ async def list_sessions( }, ) async def get_briefing( - http_request: Request, - workspace_id: Optional[str] = None, - lookback_minutes: int = 60, - detail_level: str = "abstract", - limit: int = 10, - include_memories: bool = True, - include_contradictions: bool = True, - auth_service: AuthenticationService = Depends(get_auth_service), - authz_service: AuthorizationService = Depends(get_authz_service), - session_service: SessionService = Depends(get_session_service), - logger: logging.Logger = Depends(get_logger), + http_request: Request, + workspace_id: str | None = None, + lookback_minutes: int = 60, + detail_level: str = "abstract", + limit: int = 10, + include_memories: bool = True, + include_contradictions: bool = True, + auth_service: AuthenticationService = Depends(get_auth_service), + authz_service: AuthorizationService = Depends(get_authz_service), + session_service: SessionService = Depends(get_session_service), + logger: logging.Logger = Depends(get_logger), ) -> SessionBriefingResponse: """ Get a briefing of recent workspace activity and context. @@ -314,9 +300,7 @@ async def get_briefing( ctx = await auth_service.build_context(http_request, None) # Use explicit workspace_id if provided, otherwise fall back to context workspace_id = workspace_id or ctx.workspace_id - await authz_service.require_authorization( - ctx, "sessions", "read", workspace_id=workspace_id - ) + await authz_service.require_authorization(ctx, "sessions", "read", workspace_id=workspace_id) logger.info("Generating briefing for workspace: %s", workspace_id) @@ -334,10 +318,7 @@ async def get_briefing( raise except Exception as e: logger.error("Failed to generate briefing: %s", e, exc_info=True) - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail="Failed to generate briefing" - ) + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to generate briefing") @router.get( @@ -351,14 +332,14 @@ async def get_briefing( }, ) async def get_session( - http_request: Request, - session_id: str, - _active_session: str | None = Depends(get_active_session), - auth_service: AuthenticationService = Depends(get_auth_service), - authz_service: AuthorizationService = Depends(get_authz_service), - session_service: SessionService = Depends(get_session_service), - audit_service: AuditService = Depends(get_audit_service), - logger: logging.Logger = Depends(get_logger), + http_request: Request, + session_id: str, + _active_session: str | None = Depends(get_active_session), + auth_service: AuthenticationService = Depends(get_auth_service), + authz_service: AuthorizationService = Depends(get_authz_service), + session_service: SessionService = Depends(get_session_service), + audit_service: AuditService = Depends(get_audit_service), + logger: logging.Logger = Depends(get_logger), ) -> SessionResponse: """ Retrieve a session by ID. @@ -382,27 +363,23 @@ async def get_session( # Session service get() doesn't require workspace_id session = await session_service.get(session_id) if session is None: - raise HTTPException( - status_code=status.HTTP_404_NOT_FOUND, - detail=f"Session not found or expired: {session_id}" - ) + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"Session not found or expired: {session_id}") # Check authorization for the session's workspace - await authz_service.require_authorization( - ctx, "sessions", "read", - resource_id=session_id, workspace_id=session.workspace_id - ) + await authz_service.require_authorization(ctx, "sessions", "read", resource_id=session_id, workspace_id=session.workspace_id) try: - await audit_service.record(AuditEvent( - event_type="session", - action="read", - tenant_id=ctx.tenant_id, - workspace_id=session.workspace_id, - user_id=ctx.user_id, - resource_type="session", - resource_id=session_id, - )) + await audit_service.record( + AuditEvent( + event_type="session", + action="read", + tenant_id=ctx.tenant_id, + workspace_id=session.workspace_id, + user_id=ctx.user_id, + resource_type="session", + resource_id=session_id, + ) + ) except Exception: logger.debug("Audit record failed for session read") return SessionResponse(session=session) @@ -411,10 +388,7 @@ async def get_session( raise except Exception as e: logger.error("Failed to get session %s: %s", session_id, e, exc_info=True) - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail="Failed to retrieve session" - ) + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to retrieve session") @router.delete( @@ -428,14 +402,14 @@ async def get_session( }, ) async def delete_session( - http_request: Request, - session_id: str, - auth_service: AuthenticationService = Depends(get_auth_service), - authz_service: AuthorizationService = Depends(get_authz_service), - session_service: SessionService = Depends(get_session_service), - audit_service: AuditService = Depends(get_audit_service), - metrics_service: MetricsService = Depends(get_metrics_service), - logger: logging.Logger = Depends(get_logger), + http_request: Request, + session_id: str, + auth_service: AuthenticationService = Depends(get_auth_service), + authz_service: AuthorizationService = Depends(get_authz_service), + session_service: SessionService = Depends(get_session_service), + audit_service: AuditService = Depends(get_audit_service), + metrics_service: MetricsService = Depends(get_metrics_service), + logger: logging.Logger = Depends(get_logger), ) -> None: """ Delete a session and all its context data. @@ -456,38 +430,31 @@ async def delete_session( # Get session to find its workspace session = await session_service.get(session_id) if not session: - raise HTTPException( - status_code=status.HTTP_404_NOT_FOUND, - detail=f"Session not found: {session_id}" - ) + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"Session not found: {session_id}") # Check authorization - await authz_service.require_authorization( - ctx, "sessions", "delete", - resource_id=session_id, workspace_id=session.workspace_id - ) + await authz_service.require_authorization(ctx, "sessions", "delete", resource_id=session_id, workspace_id=session.workspace_id) success = await session_service.delete_session(session.workspace_id, session_id) if not success: - raise HTTPException( - status_code=status.HTTP_404_NOT_FOUND, - detail=f"Session not found: {session_id}" - ) + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"Session not found: {session_id}") try: metrics_service.counter("memorylayer_session_close_total", labels={"workspace": session.workspace_id}) except Exception: logger.debug("Metrics recording failed for session close") try: - await audit_service.record(AuditEvent( - event_type="session", - action="close", - tenant_id=ctx.tenant_id, - workspace_id=session.workspace_id, - user_id=ctx.user_id, - resource_type="session", - resource_id=session_id, - )) + await audit_service.record( + AuditEvent( + event_type="session", + action="close", + tenant_id=ctx.tenant_id, + workspace_id=session.workspace_id, + user_id=ctx.user_id, + resource_type="session", + resource_id=session_id, + ) + ) except Exception: logger.debug("Audit record failed for session close") @@ -495,10 +462,7 @@ async def delete_session( raise except Exception as e: logger.error("Failed to delete session %s: %s", session_id, e, exc_info=True) - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail="Failed to delete session" - ) + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to delete session") @router.post( @@ -514,14 +478,14 @@ async def delete_session( }, ) async def set_working_memory( - http_request: Request, - session_id: str, - request: WorkingMemorySetRequest, - _active_session: str | None = Depends(get_active_session), - auth_service: AuthenticationService = Depends(get_auth_service), - authz_service: AuthorizationService = Depends(get_authz_service), - session_service: SessionService = Depends(get_session_service), - logger: logging.Logger = Depends(get_logger), + http_request: Request, + session_id: str, + request: WorkingMemorySetRequest, + _active_session: str | None = Depends(get_active_session), + auth_service: AuthenticationService = Depends(get_auth_service), + authz_service: AuthorizationService = Depends(get_authz_service), + session_service: SessionService = Depends(get_session_service), + logger: logging.Logger = Depends(get_logger), ) -> WorkingMemoryResponse: """ Set a key-value working memory entry in a session. @@ -544,22 +508,12 @@ async def set_working_memory( # Get session to find its workspace session = await session_service.get(session_id) if not session: - raise HTTPException( - status_code=status.HTTP_404_NOT_FOUND, - detail=f"Session not found: {session_id}" - ) + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"Session not found: {session_id}") # Check authorization - await authz_service.require_authorization( - ctx, "sessions", "write", - resource_id=session_id, workspace_id=session.workspace_id - ) + await authz_service.require_authorization(ctx, "sessions", "write", resource_id=session_id, workspace_id=session.workspace_id) - logger.info( - "Setting working memory in session: %s, key: %s", - session_id, - request.key - ) + logger.info("Setting working memory in session: %s, key: %s", session_id, request.key) memory = await session_service.set_working_memory( workspace_id=session.workspace_id, @@ -569,32 +523,17 @@ async def set_working_memory( ttl_seconds=request.ttl_seconds, ) return WorkingMemoryResponse( - key=memory.key, - value=memory.value, - ttl_seconds=memory.ttl_seconds, - created_at=memory.created_at, - updated_at=memory.updated_at + key=memory.key, value=memory.value, ttl_seconds=memory.ttl_seconds, created_at=memory.created_at, updated_at=memory.updated_at ) except ValueError as e: logger.warning("Invalid working memory set request: %s", e) - raise HTTPException( - status_code=status.HTTP_404_NOT_FOUND, - detail=str(e) - ) + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=str(e)) except HTTPException: raise except Exception as e: - logger.error( - "Failed to set working memory in session %s: %s", - session_id, - e, - exc_info=True - ) - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail="Failed to set working memory" - ) + logger.error("Failed to set working memory in session %s: %s", session_id, e, exc_info=True) + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to set working memory") @router.get( @@ -608,14 +547,14 @@ async def set_working_memory( }, ) async def get_working_memory( - http_request: Request, - session_id: str, - key: str | None = None, - _active_session: str | None = Depends(get_active_session), - auth_service: AuthenticationService = Depends(get_auth_service), - authz_service: AuthorizationService = Depends(get_authz_service), - session_service: SessionService = Depends(get_session_service), - logger: logging.Logger = Depends(get_logger), + http_request: Request, + session_id: str, + key: str | None = None, + _active_session: str | None = Depends(get_active_session), + auth_service: AuthenticationService = Depends(get_auth_service), + authz_service: AuthorizationService = Depends(get_authz_service), + session_service: SessionService = Depends(get_session_service), + logger: logging.Logger = Depends(get_logger), ) -> dict: """ Get session working memory data. @@ -638,26 +577,17 @@ async def get_working_memory( # Get session to find its workspace session = await session_service.get(session_id) if not session: - raise HTTPException( - status_code=status.HTTP_404_NOT_FOUND, - detail=f"Session not found: {session_id}" - ) + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"Session not found: {session_id}") # Check authorization - await authz_service.require_authorization( - ctx, "sessions", "read", - resource_id=session_id, workspace_id=session.workspace_id - ) + await authz_service.require_authorization(ctx, "sessions", "read", resource_id=session_id, workspace_id=session.workspace_id) logger.debug("Getting working memory from session: %s, key: %s", session_id, key) if key: memory = await session_service.get_working_memory(session.workspace_id, session_id, key) if not memory: - raise HTTPException( - status_code=status.HTTP_404_NOT_FOUND, - detail=f"Working memory key not found: {key}" - ) + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"Working memory key not found: {key}") return {key: memory.value} else: memories = await session_service.get_all_working_memory(session.workspace_id, session_id) @@ -666,16 +596,8 @@ async def get_working_memory( except HTTPException: raise except Exception as e: - logger.error( - "Failed to get working memory from session %s: %s", - session_id, - e, - exc_info=True - ) - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail="Failed to retrieve working memory" - ) + logger.error("Failed to get working memory from session %s: %s", session_id, e, exc_info=True) + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to retrieve working memory") @router.post( @@ -690,13 +612,13 @@ async def get_working_memory( }, ) async def commit_session( - http_request: Request, - session_id: str, - options: Optional[CommitOptions] = None, - auth_service: AuthenticationService = Depends(get_auth_service), - authz_service: AuthorizationService = Depends(get_authz_service), - session_service: SessionService = Depends(get_session_service), - logger: logging.Logger = Depends(get_logger), + http_request: Request, + session_id: str, + options: CommitOptions | None = None, + auth_service: AuthenticationService = Depends(get_auth_service), + authz_service: AuthorizationService = Depends(get_authz_service), + session_service: SessionService = Depends(get_session_service), + logger: logging.Logger = Depends(get_logger), ) -> CommitResponse: """ Commit session and finalize working memory. @@ -719,35 +641,23 @@ async def commit_session( # Get session to find its workspace session = await session_service.get(session_id) if not session: - raise HTTPException( - status_code=status.HTTP_404_NOT_FOUND, - detail=f"Session not found: {session_id}" - ) + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"Session not found: {session_id}") # Check authorization - await authz_service.require_authorization( - ctx, "sessions", "write", - resource_id=session_id, workspace_id=session.workspace_id - ) + await authz_service.require_authorization(ctx, "sessions", "write", resource_id=session_id, workspace_id=session.workspace_id) logger.info("Committing session: %s with options: %s", session_id, options) # Convert Pydantic model to service CommitOptions from ...services.session.base import CommitOptions as ServiceCommitOptions + service_options = None if options: service_options = ServiceCommitOptions( - include_working_memory=True, - importance_threshold=options.min_importance, - delete_after_commit=False, - tags=[] + include_working_memory=True, importance_threshold=options.min_importance, delete_after_commit=False, tags=[] ) - result = await session_service.commit_session( - session.workspace_id, - session_id, - options=service_options - ) + result = await session_service.commit_session(session.workspace_id, session_id, options=service_options) # Build response from CommitResult fields return CommitResponse( @@ -755,24 +665,18 @@ async def commit_session( memories_extracted=result.memories_extracted, memories_deduplicated=result.memories_deduplicated, memories_created=result.memories_committed, - breakdown=result.extraction_summary.get('breakdown', {}), - extraction_time_ms=result.extraction_summary.get('extraction_time_ms', 0) + breakdown=result.extraction_summary.get("breakdown", {}), + extraction_time_ms=result.extraction_summary.get("extraction_time_ms", 0), ) except ValueError as e: logger.warning("Session commit failed: %s", e) - raise HTTPException( - status_code=status.HTTP_404_NOT_FOUND, - detail=str(e) - ) + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=str(e)) except HTTPException: raise except Exception as e: logger.error("Failed to commit session %s: %s", session_id, e, exc_info=True) - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail="Failed to commit session" - ) + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to commit session") @router.post( @@ -786,13 +690,13 @@ async def commit_session( }, ) async def touch_session( - http_request: Request, - session_id: str, - extend_seconds: Optional[int] = None, - auth_service: AuthenticationService = Depends(get_auth_service), - authz_service: AuthorizationService = Depends(get_authz_service), - session_service: SessionService = Depends(get_session_service), - logger: logging.Logger = Depends(get_logger), + http_request: Request, + session_id: str, + extend_seconds: int | None = None, + auth_service: AuthenticationService = Depends(get_auth_service), + authz_service: AuthorizationService = Depends(get_authz_service), + session_service: SessionService = Depends(get_session_service), + logger: logging.Logger = Depends(get_logger), ) -> dict: """ Update session expiration (extend TTL) using sliding window. @@ -818,37 +722,23 @@ async def touch_session( # Get session to find its workspace session = await session_service.get(session_id) if not session: - raise HTTPException( - status_code=status.HTTP_404_NOT_FOUND, - detail=f"Session not found: {session_id}" - ) + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"Session not found: {session_id}") # Check authorization - await authz_service.require_authorization( - ctx, "sessions", "write", - resource_id=session_id, workspace_id=session.workspace_id - ) + await authz_service.require_authorization(ctx, "sessions", "write", resource_id=session_id, workspace_id=session.workspace_id) logger.debug("Touching session: %s with extend_seconds=%s", session_id, extend_seconds) - updated_session = await session_service.touch_session( - session.workspace_id, session_id, extend_seconds=extend_seconds - ) + updated_session = await session_service.touch_session(session.workspace_id, session_id, extend_seconds=extend_seconds) if not updated_session: - raise HTTPException( - status_code=status.HTTP_404_NOT_FOUND, - detail=f"Session not found: {session_id}" - ) + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"Session not found: {session_id}") return {"expires_at": updated_session.expires_at.isoformat()} except HTTPException: raise except Exception as e: logger.error("Failed to touch session %s: %s", session_id, e, exc_info=True) - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail="Failed to touch session" - ) + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to touch session") class SessionsAPIPlugin(Plugin): diff --git a/memorylayer-core-python/src/memorylayer_server/api/v1/workspaces.py b/memorylayer-core-python/src/memorylayer_server/api/v1/workspaces.py index 86b9a9d..25e7a7c 100644 --- a/memorylayer-core-python/src/memorylayer_server/api/v1/workspaces.py +++ b/memorylayer-core-python/src/memorylayer_server/api/v1/workspaces.py @@ -8,36 +8,37 @@ - PUT /v1/workspaces/{workspace_id} - Update workspace - DELETE /v1/workspaces/{workspace_id} - Delete workspace """ + import json import logging +from datetime import UTC from uuid import uuid4 -from fastapi import APIRouter, HTTPException, Depends, Request, status +from fastapi import APIRouter, Depends, HTTPException, Request, status from fastapi.responses import StreamingResponse from scitrera_app_framework import Plugin, Variables -from .. import EXT_MULTI_API_ROUTERS from memorylayer_server.lifecycle.fastapi import get_logger +from ...services.audit import AuditEvent, AuditService +from ...services.authentication import AuthenticationService +from ...services.authorization import AuthorizationService +from ...services.memory import MemoryService +from ...services.ontology import get_ontology_service as _get_ontology_service +from ...services.workspace import WorkspaceService +from .. import EXT_MULTI_API_ROUTERS +from .deps import get_audit_service, get_auth_service, get_authz_service, get_memory_service, get_workspace_service from .schemas import ( - WorkspaceCreateRequest, - WorkspaceUpdateRequest, - WorkspaceResponse, - WorkspaceListResponse, + AssociationExportItem, ErrorResponse, MemoryExportItem, - AssociationExportItem, - WorkspaceExportData, + WorkspaceCreateRequest, WorkspaceImportRequest, WorkspaceImportResult, + WorkspaceListResponse, + WorkspaceResponse, + WorkspaceUpdateRequest, ) -from ...services.workspace import WorkspaceService -from ...services.ontology import get_ontology_service as _get_ontology_service -from ...services.memory import MemoryService -from ...services.authentication import AuthenticationService -from ...services.authorization import AuthorizationService -from .deps import get_auth_service, get_authz_service, get_workspace_service, get_memory_service, get_audit_service -from ...services.audit import AuditService, AuditEvent router = APIRouter(prefix="/v1/workspaces", tags=["workspaces"]) @@ -54,13 +55,13 @@ }, ) async def create_workspace( - http_request: Request, - request: WorkspaceCreateRequest, - auth_service: AuthenticationService = Depends(get_auth_service), - authz_service: AuthorizationService = Depends(get_authz_service), - workspace_service: WorkspaceService = Depends(get_workspace_service), - audit_service: AuditService = Depends(get_audit_service), - logger: logging.Logger = Depends(get_logger), + http_request: Request, + request: WorkspaceCreateRequest, + auth_service: AuthenticationService = Depends(get_auth_service), + authz_service: AuthorizationService = Depends(get_authz_service), + workspace_service: WorkspaceService = Depends(get_workspace_service), + audit_service: AuditService = Depends(get_audit_service), + logger: logging.Logger = Depends(get_logger), ) -> WorkspaceResponse: """ Create a new workspace. @@ -88,15 +89,11 @@ async def create_workspace( # Generate workspace ID workspace_id = f"ws_{uuid4().hex[:16]}" - logger.info( - "Creating workspace: %s for tenant: %s, name: %s", - workspace_id, - ctx.tenant_id, - request.name - ) + logger.info("Creating workspace: %s for tenant: %s, name: %s", workspace_id, ctx.tenant_id, request.name) # Create workspace from ...models.workspace import Workspace + workspace = Workspace( id=workspace_id, tenant_id=ctx.tenant_id, @@ -109,31 +106,27 @@ async def create_workspace( logger.info("Created workspace: %s", workspace_id) try: - await audit_service.record(AuditEvent( - event_type="workspace", - action="create", - tenant_id=ctx.tenant_id, - workspace_id=workspace.id, - user_id=ctx.user_id, - resource_type="workspace", - resource_id=workspace.id, - )) + await audit_service.record( + AuditEvent( + event_type="workspace", + action="create", + tenant_id=ctx.tenant_id, + workspace_id=workspace.id, + user_id=ctx.user_id, + resource_type="workspace", + resource_id=workspace.id, + ) + ) except Exception: logger.debug("Audit record failed for workspace create") return WorkspaceResponse(workspace=workspace) except ValueError as e: logger.warning("Invalid workspace creation request: %s", e) - raise HTTPException( - status_code=status.HTTP_400_BAD_REQUEST, - detail=str(e) - ) + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=str(e)) except Exception as e: logger.error("Failed to create workspace: %s", e, exc_info=True) - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail="Failed to create workspace" - ) + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to create workspace") @router.get( @@ -146,11 +139,11 @@ async def create_workspace( }, ) async def list_workspaces( - http_request: Request, - auth_service: AuthenticationService = Depends(get_auth_service), - authz_service: AuthorizationService = Depends(get_authz_service), - workspace_service: WorkspaceService = Depends(get_workspace_service), - logger: logging.Logger = Depends(get_logger), + http_request: Request, + auth_service: AuthenticationService = Depends(get_auth_service), + authz_service: AuthorizationService = Depends(get_authz_service), + workspace_service: WorkspaceService = Depends(get_workspace_service), + logger: logging.Logger = Depends(get_logger), ) -> WorkspaceListResponse: """ List all workspaces. @@ -171,10 +164,7 @@ async def list_workspaces( raise except Exception as e: logger.error("Failed to list workspaces: %s", e, exc_info=True) - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail="Failed to list workspaces" - ) + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to list workspaces") @router.get( @@ -188,13 +178,13 @@ async def list_workspaces( }, ) async def get_workspace( - http_request: Request, - workspace_id: str, - auth_service: AuthenticationService = Depends(get_auth_service), - authz_service: AuthorizationService = Depends(get_authz_service), - workspace_service: WorkspaceService = Depends(get_workspace_service), - audit_service: AuditService = Depends(get_audit_service), - logger: logging.Logger = Depends(get_logger), + http_request: Request, + workspace_id: str, + auth_service: AuthenticationService = Depends(get_auth_service), + authz_service: AuthorizationService = Depends(get_authz_service), + workspace_service: WorkspaceService = Depends(get_workspace_service), + audit_service: AuditService = Depends(get_audit_service), + logger: logging.Logger = Depends(get_logger), ) -> WorkspaceResponse: """ Retrieve a workspace by ID. @@ -215,31 +205,27 @@ async def get_workspace( try: # Build auth context and check authorization ctx = await auth_service.build_context(http_request, None) - await authz_service.require_authorization( - ctx, "workspaces", "read", - resource_id=workspace_id, workspace_id=workspace_id - ) + await authz_service.require_authorization(ctx, "workspaces", "read", resource_id=workspace_id, workspace_id=workspace_id) logger.debug("Getting workspace: %s", workspace_id) # Get workspace via workspace service workspace = await workspace_service.get_workspace(workspace_id) if not workspace: - raise HTTPException( - status_code=status.HTTP_404_NOT_FOUND, - detail=f"Workspace not found: {workspace_id}" - ) + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"Workspace not found: {workspace_id}") try: - await audit_service.record(AuditEvent( - event_type="workspace", - action="read", - tenant_id=ctx.tenant_id, - workspace_id=workspace_id, - user_id=ctx.user_id, - resource_type="workspace", - resource_id=workspace_id, - )) + await audit_service.record( + AuditEvent( + event_type="workspace", + action="read", + tenant_id=ctx.tenant_id, + workspace_id=workspace_id, + user_id=ctx.user_id, + resource_type="workspace", + resource_id=workspace_id, + ) + ) except Exception: logger.debug("Audit record failed for workspace read") return WorkspaceResponse(workspace=workspace) @@ -248,10 +234,7 @@ async def get_workspace( raise except Exception as e: logger.error("Failed to get workspace %s: %s", workspace_id, e, exc_info=True) - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail="Failed to retrieve workspace" - ) + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to retrieve workspace") @router.put( @@ -266,13 +249,13 @@ async def get_workspace( }, ) async def update_workspace( - http_request: Request, - workspace_id: str, - request: WorkspaceUpdateRequest, - auth_service: AuthenticationService = Depends(get_auth_service), - authz_service: AuthorizationService = Depends(get_authz_service), - workspace_service: WorkspaceService = Depends(get_workspace_service), - logger: logging.Logger = Depends(get_logger), + http_request: Request, + workspace_id: str, + request: WorkspaceUpdateRequest, + auth_service: AuthenticationService = Depends(get_auth_service), + authz_service: AuthorizationService = Depends(get_authz_service), + workspace_service: WorkspaceService = Depends(get_workspace_service), + logger: logging.Logger = Depends(get_logger), ) -> WorkspaceResponse: """ Update an existing workspace. @@ -294,20 +277,14 @@ async def update_workspace( try: # Build auth context and check authorization ctx = await auth_service.build_context(http_request, None) - await authz_service.require_authorization( - ctx, "workspaces", "write", - resource_id=workspace_id, workspace_id=workspace_id - ) + await authz_service.require_authorization(ctx, "workspaces", "write", resource_id=workspace_id, workspace_id=workspace_id) logger.info("Updating workspace: %s", workspace_id) # Get existing workspace workspace = await workspace_service.get_workspace(workspace_id) if not workspace: - raise HTTPException( - status_code=status.HTTP_404_NOT_FOUND, - detail=f"Workspace not found: {workspace_id}" - ) + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"Workspace not found: {workspace_id}") # Update fields if request.name is not None: @@ -321,16 +298,10 @@ async def update_workspace( raise except ValueError as e: logger.warning("Invalid workspace update request: %s", e) - raise HTTPException( - status_code=status.HTTP_400_BAD_REQUEST, - detail=str(e) - ) + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=str(e)) except Exception as e: logger.error("Failed to update workspace %s: %s", workspace_id, e, exc_info=True) - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail="Failed to update workspace" - ) + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to update workspace") @router.delete( @@ -344,39 +315,35 @@ async def update_workspace( }, ) async def delete_workspace( - http_request: Request, - workspace_id: str, - auth_service: AuthenticationService = Depends(get_auth_service), - authz_service: AuthorizationService = Depends(get_authz_service), - workspace_service: WorkspaceService = Depends(get_workspace_service), - audit_service: AuditService = Depends(get_audit_service), - logger: logging.Logger = Depends(get_logger), + http_request: Request, + workspace_id: str, + auth_service: AuthenticationService = Depends(get_auth_service), + authz_service: AuthorizationService = Depends(get_authz_service), + workspace_service: WorkspaceService = Depends(get_workspace_service), + audit_service: AuditService = Depends(get_audit_service), + logger: logging.Logger = Depends(get_logger), ): """Delete a workspace and all associated data.""" try: ctx = await auth_service.build_context(http_request, None) - await authz_service.require_authorization( - ctx, "workspaces", "delete", - resource_id=workspace_id, workspace_id=workspace_id - ) + await authz_service.require_authorization(ctx, "workspaces", "delete", resource_id=workspace_id, workspace_id=workspace_id) deleted = await workspace_service.delete_workspace(workspace_id) if not deleted: - raise HTTPException( - status_code=status.HTTP_404_NOT_FOUND, - detail=f"Workspace not found: {workspace_id}" - ) + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"Workspace not found: {workspace_id}") try: - await audit_service.record(AuditEvent( - event_type="workspace", - action="delete", - tenant_id=ctx.tenant_id, - workspace_id=workspace_id, - user_id=ctx.user_id, - resource_type="workspace", - resource_id=workspace_id, - )) + await audit_service.record( + AuditEvent( + event_type="workspace", + action="delete", + tenant_id=ctx.tenant_id, + workspace_id=workspace_id, + user_id=ctx.user_id, + resource_type="workspace", + resource_id=workspace_id, + ) + ) except Exception: logger.debug("Audit record failed for workspace delete") @@ -384,10 +351,7 @@ async def delete_workspace( raise except Exception as e: logger.error("Failed to delete workspace %s: %s", workspace_id, e, exc_info=True) - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail="Failed to delete workspace" - ) + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to delete workspace") @router.get( @@ -401,12 +365,12 @@ async def delete_workspace( }, ) async def get_workspace_schema( - http_request: Request, - workspace_id: str, - auth_service: AuthenticationService = Depends(get_auth_service), - authz_service: AuthorizationService = Depends(get_authz_service), - workspace_service: WorkspaceService = Depends(get_workspace_service), - logger: logging.Logger = Depends(get_logger), + http_request: Request, + workspace_id: str, + auth_service: AuthenticationService = Depends(get_auth_service), + authz_service: AuthorizationService = Depends(get_authz_service), + workspace_service: WorkspaceService = Depends(get_workspace_service), + logger: logging.Logger = Depends(get_logger), ) -> dict: """ Get workspace schema including relationship types and memory subtypes. @@ -427,32 +391,24 @@ async def get_workspace_schema( try: # Build auth context and check authorization ctx = await auth_service.build_context(http_request, None) - await authz_service.require_authorization( - ctx, "workspaces", "read", - resource_id=workspace_id, workspace_id=workspace_id - ) + await authz_service.require_authorization(ctx, "workspaces", "read", resource_id=workspace_id, workspace_id=workspace_id) logger.debug("Getting schema for workspace: %s", workspace_id) # Verify workspace exists workspace = await workspace_service.get_workspace(workspace_id) if not workspace: - raise HTTPException( - status_code=status.HTTP_404_NOT_FOUND, - detail=f"Workspace not found: {workspace_id}" - ) + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"Workspace not found: {workspace_id}") # Get ontology service ontology_service = _get_ontology_service() # Get relationship types from ontology - relationship_types = ontology_service.list_relationship_types( - tenant_id=ctx.tenant_id, - workspace_id=workspace_id - ) + relationship_types = ontology_service.list_relationship_types(tenant_id=ctx.tenant_id, workspace_id=workspace_id) # Get memory subtypes from model from ...models.memory import MemorySubtype + memory_subtypes = [subtype.value for subtype in MemorySubtype] return { @@ -464,16 +420,8 @@ async def get_workspace_schema( except HTTPException: raise except Exception as e: - logger.error( - "Failed to get schema for workspace %s: %s", - workspace_id, - e, - exc_info=True - ) - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail="Failed to retrieve workspace schema" - ) + logger.error("Failed to get schema for workspace %s: %s", workspace_id, e, exc_info=True) + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to retrieve workspace schema") def _serialize_memory(m: dict) -> dict: @@ -505,7 +453,7 @@ async def _generate_export_ndjson( logger: logging.Logger, ): """Generate NDJSON export stream.""" - from datetime import datetime, timezone + from datetime import datetime # Get workspace stats for counts stats = await storage.get_workspace_stats(workspace_id) @@ -517,7 +465,7 @@ async def _generate_export_ndjson( "type": "header", "version": "1.0", "workspace_id": workspace_id, - "exported_at": datetime.now(timezone.utc).isoformat(), + "exported_at": datetime.now(UTC).isoformat(), "total_memories": total_memories, "total_associations": total_associations, "offset": offset, @@ -545,7 +493,7 @@ async def _generate_export_ndjson( # Fetch batch batch = await storage.get_recent_memories( workspace_id, - created_after=datetime(2000, 1, 1, tzinfo=timezone.utc), + created_after=datetime(2000, 1, 1, tzinfo=UTC), limit=batch_limit, offset=batch_offset, detail_level="full", @@ -556,7 +504,7 @@ async def _generate_export_ndjson( # Stream each memory (convert Memory objects to dicts) for m_obj in batch: - m = m_obj.model_dump() if hasattr(m_obj, 'model_dump') else m_obj + m = m_obj.model_dump() if hasattr(m_obj, "model_dump") else m_obj memory_ids.append(m["id"]) memory_line = { "type": "memory", @@ -645,18 +593,12 @@ async def export_workspace( """ try: ctx = await auth_service.build_context(http_request, None) - await authz_service.require_authorization( - ctx, "workspaces", "read", - resource_id=workspace_id, workspace_id=workspace_id - ) + await authz_service.require_authorization(ctx, "workspaces", "read", resource_id=workspace_id, workspace_id=workspace_id) # Verify workspace exists workspace = await workspace_service.get_workspace(workspace_id) if not workspace: - raise HTTPException( - status_code=status.HTTP_404_NOT_FOUND, - detail=f"Workspace not found: {workspace_id}" - ) + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"Workspace not found: {workspace_id}") logger.info("Exporting workspace: %s (offset=%d, limit=%d)", workspace_id, offset, limit) @@ -676,16 +618,14 @@ async def export_workspace( raise except Exception as e: logger.error("Failed to export workspace %s: %s", workspace_id, e, exc_info=True) - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail="Failed to export workspace" - ) + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to export workspace") def _process_memory_import(item: MemoryExportItem, workspace_id: str, tenant_id: str, id_mapping: dict) -> tuple[bool, str, str]: """Process a single memory import. Returns (success, new_id, error_msg).""" from uuid import uuid4 - from ...models.memory import Memory, MemoryType, MemorySubtype + + from ...models.memory import Memory, MemorySubtype, MemoryType try: # Parse type/subtype @@ -733,6 +673,7 @@ def _process_association_import(assoc: AssociationExportItem, id_mapping: dict) return False, {}, "Source or target memory not found in mapping" from ...models.association import AssociateInput + assoc_input = AssociateInput( source_id=new_source, target_id=new_target, @@ -768,10 +709,7 @@ async def import_workspace( """Import memories and associations from JSON or NDJSON export.""" try: ctx = await auth_service.build_context(http_request, None) - await authz_service.require_authorization( - ctx, "workspaces", "write", - resource_id=workspace_id, workspace_id=workspace_id - ) + await authz_service.require_authorization(ctx, "workspaces", "write", resource_id=workspace_id, workspace_id=workspace_id) workspace = await workspace_service.get_workspace(workspace_id) if not workspace: @@ -786,7 +724,7 @@ async def import_workspace( if "application/x-ndjson" in content_type: # NDJSON format body = await http_request.body() - lines = body.decode().strip().split('\n') + lines = body.decode().strip().split("\n") for line in lines: if not line.strip(): @@ -862,10 +800,7 @@ async def import_workspace( if assoc_imported > 0: details.append(f"Imported {assoc_imported} associations") - logger.info( - "Import complete for workspace %s: imported=%d, skipped=%d, errors=%d", - workspace_id, imported, skipped, errors - ) + logger.info("Import complete for workspace %s: imported=%d, skipped=%d, errors=%d", workspace_id, imported, skipped, errors) return WorkspaceImportResult( imported=imported, diff --git a/memorylayer-core-python/src/memorylayer_server/cli.py b/memorylayer-core-python/src/memorylayer_server/cli.py index aad091b..2812429 100644 --- a/memorylayer-core-python/src/memorylayer_server/cli.py +++ b/memorylayer-core-python/src/memorylayer_server/cli.py @@ -2,8 +2,9 @@ import json import logging -import click +from datetime import UTC +import click from scitrera_app_framework import get_variables logger = logging.getLogger(__name__) @@ -24,8 +25,12 @@ def cli(verbose: bool): def serve(host: str, port: int): """Start the HTTP REST API server.""" import uvicorn + from memorylayer_server.config import ( - MEMORYLAYER_SERVER_HOST, MEMORYLAYER_SERVER_PORT, DEFAULT_MEMORYLAYER_SERVER_HOST, DEFAULT_MEMORYLAYER_SERVER_PORT + DEFAULT_MEMORYLAYER_SERVER_HOST, + DEFAULT_MEMORYLAYER_SERVER_PORT, + MEMORYLAYER_SERVER_HOST, + MEMORYLAYER_SERVER_PORT, ) from memorylayer_server.dependencies import preconfigure from memorylayer_server.lifecycle.fastapi import fastapi_app_factory @@ -53,28 +58,26 @@ def serve(host: str, port: int): def version(): """Show version information.""" from memorylayer_server import __version__ + click.echo(f"memorylayer.ai v{__version__}") @cli.command() -@click.option('--workspace', '-w', required=True, help='Workspace ID to export') -@click.option('--output', '-o', default=None, help='Output file (default: stdout)') -@click.option('--offset', default=0, type=int, help='Skip first N memories (default: 0)') -@click.option('--limit', default=0, type=int, help='Export at most N memories (default: 0 = unlimited)') -@click.option('--include-associations/--no-associations', default=True, help='Include associations') -@click.option('--server-url', default='http://localhost:61001', help='MemoryLayer server URL') -@click.option('--api-key', default=None, help='API key for authentication') +@click.option("--workspace", "-w", required=True, help="Workspace ID to export") +@click.option("--output", "-o", default=None, help="Output file (default: stdout)") +@click.option("--offset", default=0, type=int, help="Skip first N memories (default: 0)") +@click.option("--limit", default=0, type=int, help="Export at most N memories (default: 0 = unlimited)") +@click.option("--include-associations/--no-associations", default=True, help="Include associations") +@click.option("--server-url", default="http://localhost:61001", help="MemoryLayer server URL") +@click.option("--api-key", default=None, help="API key for authentication") def export(workspace, output, offset, limit, include_associations, server_url, api_key): """Export workspace memories to NDJSON (streaming).""" - import httpx import json + import httpx + url = f"{server_url}/v1/workspaces/{workspace}/export" - params = { - "offset": offset, - "limit": limit, - "include_associations": str(include_associations).lower() - } + params = {"offset": offset, "limit": limit, "include_associations": str(include_associations).lower()} headers = {} if api_key: headers["Authorization"] = f"Bearer {api_key}" @@ -88,11 +91,11 @@ def export(workspace, output, offset, limit, include_associations, server_url, a associations_count = 0 if output: - with open(output, 'w') as f: + with open(output, "w") as f: for line in response.iter_lines(): if not line.strip(): continue - f.write(line + '\n') + f.write(line + "\n") # Parse footer to get counts try: @@ -113,19 +116,20 @@ def export(workspace, output, offset, limit, include_associations, server_url, a raise SystemExit(1) -@cli.command(name='import') -@click.argument('file', type=click.Path(exists=True)) -@click.option('--workspace', '-w', required=True, help='Target workspace ID') -@click.option('--dry-run', is_flag=True, help='Show what would be imported without writing') -@click.option('--server-url', default='http://localhost:61001', help='MemoryLayer server URL') -@click.option('--api-key', default=None, help='API key for authentication') +@cli.command(name="import") +@click.argument("file", type=click.Path(exists=True)) +@click.option("--workspace", "-w", required=True, help="Target workspace ID") +@click.option("--dry-run", is_flag=True, help="Show what would be imported without writing") +@click.option("--server-url", default="http://localhost:61001", help="MemoryLayer server URL") +@click.option("--api-key", default=None, help="API key for authentication") def import_cmd(file, workspace, dry_run, server_url, api_key): """Import memories from JSON or NDJSON file into workspace.""" - import httpx import json + import httpx + # Auto-detect format by reading first line - with open(file, 'r') as f: + with open(file) as f: first_line = f.readline().strip() is_ndjson = False @@ -140,7 +144,7 @@ def import_cmd(file, workspace, dry_run, server_url, api_key): # NDJSON format memories = [] associations = [] - with open(file, 'r') as f: + with open(file) as f: for line in f: if not line.strip(): continue @@ -155,10 +159,10 @@ def import_cmd(file, workspace, dry_run, server_url, api_key): logger.debug("Skipped item during processing: %s", e) else: # JSON format - with open(file, 'r') as f: + with open(file) as f: data = json.load(f) - memories = data.get('memories', []) - associations = data.get('associations', []) + memories = data.get("memories", []) + associations = data.get("associations", []) if dry_run: click.echo(f"Would import {len(memories)} memories and {len(associations)} associations into workspace {workspace}") @@ -178,7 +182,7 @@ def import_cmd(file, workspace, dry_run, server_url, api_key): if is_ndjson: # Send as NDJSON headers["Content-Type"] = "application/x-ndjson" - with open(file, 'rb') as f: + with open(file, "rb") as f: response = client.post(url, content=f.read(), headers=headers) else: # Send as JSON @@ -191,11 +195,11 @@ def import_cmd(file, workspace, dry_run, server_url, api_key): click.echo(f"Error: Failed to import: {e}", err=True) raise SystemExit(1) - click.echo(f"Import complete:") + click.echo("Import complete:") click.echo(f" Imported: {result.get('imported', 0)}") click.echo(f" Skipped (duplicates): {result.get('skipped_duplicates', 0)}") click.echo(f" Errors: {result.get('errors', 0)}") - for detail in result.get('details', []): + for detail in result.get("details", []): click.echo(f" {detail}") @@ -205,9 +209,11 @@ def import_cmd(file, workspace, dry_run, server_url, api_key): def info(output_format: str, reveal_secrets: bool): """Show system information and configuration.""" + from datetime import datetime + from memorylayer_server import __version__ from memorylayer_server.dependencies import _initialize_sync - from datetime import datetime, timezone + v = get_variables() v.set("LOGGING_LEVEL", "ERROR") # suppress logs during info output v = _initialize_sync(v) @@ -216,21 +222,35 @@ def info(output_format: str, reveal_secrets: bool): # TODO: move redaction log to scitrera_app_framework and share with log_framework_variables def _redacted(k, val): - return '(redacted)' if any( - (not 'max_tokens' in k.lower()) and x in k.lower() for x in ('password', 'secret', 'credentials', 'token', 'key',)) else val - - settings = ({k: _redacted(k, v) for (k, v) in sorted(v.export_all_variables().items(), key=lambda kv: kv[0]) - if k.startswith('MEMORYLAYER')} if redact_keys else - {k: v for k, v in sorted(v.export_all_variables().items(), key=lambda kv: kv[0]) if k.startswith('MEMORYLAYER')}) + return ( + "(redacted)" + if any( + ("max_tokens" not in k.lower()) and x in k.lower() + for x in ( + "password", + "secret", + "credentials", + "token", + "key", + ) + ) + else val + ) + + settings = ( + {k: _redacted(k, v) for (k, v) in sorted(v.export_all_variables().items(), key=lambda kv: kv[0]) if k.startswith("MEMORYLAYER")} + if redact_keys + else {k: v for k, v in sorted(v.export_all_variables().items(), key=lambda kv: kv[0]) if k.startswith("MEMORYLAYER")} + ) if output_format == "json": - click.echo(json.dumps({k.removeprefix('MEMORYLAYER_').lower(): v for k, v in settings.items()}, indent=2)) + click.echo(json.dumps({k.removeprefix("MEMORYLAYER_").lower(): v for k, v in settings.items()}, indent=2)) else: - click.echo('# ' + "=" * 50) + click.echo("# " + "=" * 50) click.echo("# MemoryLayer.ai Configuration") - click.echo(f"# exported at {datetime.now(tz=timezone.utc).isoformat()}") + click.echo(f"# exported at {datetime.now(tz=UTC).isoformat()}") click.echo(f"# version = v{__version__}") - click.echo('# ' + "=" * 50) + click.echo("# " + "=" * 50) for k, v in settings.items(): click.echo(f"{k}={v}") click.echo("") diff --git a/memorylayer-core-python/src/memorylayer_server/config.py b/memorylayer-core-python/src/memorylayer_server/config.py index 2375c3a..53ea717 100644 --- a/memorylayer-core-python/src/memorylayer_server/config.py +++ b/memorylayer-core-python/src/memorylayer_server/config.py @@ -1,19 +1,18 @@ """Configuration management for MemoryLayer.ai using Pydantic Settings.""" from enum import Enum -from pathlib import Path # ============================================ # Data Home Directory # ============================================ -MEMORYLAYER_DATA_DIR = 'MEMORYLAYER_DATA_DIR' +MEMORYLAYER_DATA_DIR = "MEMORYLAYER_DATA_DIR" # ============================================ # Server Configuration # ============================================ -MEMORYLAYER_SERVER_HOST = 'MEMORYLAYER_SERVER_HOST' -DEFAULT_MEMORYLAYER_SERVER_HOST = '127.0.0.1' -MEMORYLAYER_SERVER_PORT = 'MEMORYLAYER_SERVER_PORT' +MEMORYLAYER_SERVER_HOST = "MEMORYLAYER_SERVER_HOST" +DEFAULT_MEMORYLAYER_SERVER_HOST = "127.0.0.1" +MEMORYLAYER_SERVER_PORT = "MEMORYLAYER_SERVER_PORT" DEFAULT_MEMORYLAYER_SERVER_PORT = 61001 @@ -29,88 +28,88 @@ class EmbeddingProviderType(str, Enum): MOCK = "mock" # Mock provider for testing only (deterministic hash-based) -MEMORYLAYER_EMBEDDING_PROVIDER = 'MEMORYLAYER_EMBEDDING_PROVIDER' +MEMORYLAYER_EMBEDDING_PROVIDER = "MEMORYLAYER_EMBEDDING_PROVIDER" DEFAULT_MEMORYLAYER_EMBEDDING_PROVIDER = EmbeddingProviderType.LOCAL -MEMORYLAYER_EMBEDDING_MODEL = 'MEMORYLAYER_EMBEDDING_MODEL' -MEMORYLAYER_EMBEDDING_DIMENSIONS = 'MEMORYLAYER_EMBEDDING_DIMENSIONS' -MEMORYLAYER_EMBEDDING_PRELOAD_ENABLED = 'MEMORYLAYER_EMBEDDING_PRELOAD_ENABLED' +MEMORYLAYER_EMBEDDING_MODEL = "MEMORYLAYER_EMBEDDING_MODEL" +MEMORYLAYER_EMBEDDING_DIMENSIONS = "MEMORYLAYER_EMBEDDING_DIMENSIONS" +MEMORYLAYER_EMBEDDING_PRELOAD_ENABLED = "MEMORYLAYER_EMBEDDING_PRELOAD_ENABLED" DEFAULT_MEMORYLAYER_EMBEDDING_PRELOAD_ENABLED = True # ============================================ # Embedding Service # ============================================ -MEMORYLAYER_EMBEDDING_SERVICE = 'MEMORYLAYER_EMBEDDING_SERVICE' -DEFAULT_MEMORYLAYER_EMBEDDING_SERVICE = 'default' +MEMORYLAYER_EMBEDDING_SERVICE = "MEMORYLAYER_EMBEDDING_SERVICE" +DEFAULT_MEMORYLAYER_EMBEDDING_SERVICE = "default" # ============================================ # Storage Backend # ============================================ -MEMORYLAYER_STORAGE_BACKEND = 'MEMORYLAYER_STORAGE_BACKEND' -DEFAULT_MEMORYLAYER_STORAGE_BACKEND = 'sqlite' +MEMORYLAYER_STORAGE_BACKEND = "MEMORYLAYER_STORAGE_BACKEND" +DEFAULT_MEMORYLAYER_STORAGE_BACKEND = "sqlite" -MEMORYLAYER_SQLITE_STORAGE_PATH = 'MEMORYLAYER_SQLITE_STORAGE_PATH' +MEMORYLAYER_SQLITE_STORAGE_PATH = "MEMORYLAYER_SQLITE_STORAGE_PATH" DEFAULT_MEMORYLAYER_SQLITE_STORAGE_PATH = "memorylayer.db" # ============================================ # Memory Service # ============================================ -MEMORYLAYER_MEMORY_SERVICE = 'MEMORYLAYER_MEMORY_SERVICE' -DEFAULT_MEMORYLAYER_MEMORY_SERVICE = 'default' +MEMORYLAYER_MEMORY_SERVICE = "MEMORYLAYER_MEMORY_SERVICE" +DEFAULT_MEMORYLAYER_MEMORY_SERVICE = "default" # ============================================ # Reflection Service # ============================================ -MEMORYLAYER_REFLECT_SERVICE = 'MEMORYLAYER_REFLECT_SERVICE' -DEFAULT_MEMORYLAYER_REFLECT_SERVICE = 'default' +MEMORYLAYER_REFLECT_SERVICE = "MEMORYLAYER_REFLECT_SERVICE" +DEFAULT_MEMORYLAYER_REFLECT_SERVICE = "default" # ============================================ # Session Service # ============================================ -MEMORYLAYER_SESSION_SERVICE = 'MEMORYLAYER_SESSION_SERVICE' -DEFAULT_MEMORYLAYER_SESSION_SERVICE = 'in-memory' +MEMORYLAYER_SESSION_SERVICE = "MEMORYLAYER_SESSION_SERVICE" +DEFAULT_MEMORYLAYER_SESSION_SERVICE = "in-memory" -MEMORYLAYER_SESSION_IMPLICIT_CREATE = 'MEMORYLAYER_SESSION_IMPLICIT_CREATE' +MEMORYLAYER_SESSION_IMPLICIT_CREATE = "MEMORYLAYER_SESSION_IMPLICIT_CREATE" DEFAULT_MEMORYLAYER_SESSION_IMPLICIT_CREATE = True -MEMORYLAYER_SESSION_TOUCH_TTL = 'MEMORYLAYER_SESSION_TOUCH_TTL' +MEMORYLAYER_SESSION_TOUCH_TTL = "MEMORYLAYER_SESSION_TOUCH_TTL" DEFAULT_MEMORYLAYER_SESSION_TOUCH_TTL = 3600 # Token-budget-aware extraction thresholds -MEMORYLAYER_SESSION_TOKEN_BUDGET_TOTAL = 'MEMORYLAYER_SESSION_TOKEN_BUDGET_TOTAL' +MEMORYLAYER_SESSION_TOKEN_BUDGET_TOTAL = "MEMORYLAYER_SESSION_TOKEN_BUDGET_TOTAL" DEFAULT_MEMORYLAYER_SESSION_TOKEN_BUDGET_TOTAL = 12000 -MEMORYLAYER_SESSION_TOKEN_TRIGGER_INIT = 'MEMORYLAYER_SESSION_TOKEN_TRIGGER_INIT' +MEMORYLAYER_SESSION_TOKEN_TRIGGER_INIT = "MEMORYLAYER_SESSION_TOKEN_TRIGGER_INIT" DEFAULT_MEMORYLAYER_SESSION_TOKEN_TRIGGER_INIT = 10000 -MEMORYLAYER_SESSION_TOKEN_TRIGGER_GROWTH = 'MEMORYLAYER_SESSION_TOKEN_TRIGGER_GROWTH' +MEMORYLAYER_SESSION_TOKEN_TRIGGER_GROWTH = "MEMORYLAYER_SESSION_TOKEN_TRIGGER_GROWTH" DEFAULT_MEMORYLAYER_SESSION_TOKEN_TRIGGER_GROWTH = 5000 # ============================================ # Workspace Service # ============================================ -MEMORYLAYER_WORKSPACE_SERVICE = 'MEMORYLAYER_WORKSPACE_SERVICE' -DEFAULT_MEMORYLAYER_WORKSPACE_SERVICE = 'default' +MEMORYLAYER_WORKSPACE_SERVICE = "MEMORYLAYER_WORKSPACE_SERVICE" +DEFAULT_MEMORYLAYER_WORKSPACE_SERVICE = "default" # ============================================ # Association Service # ============================================ -MEMORYLAYER_ASSOCIATION_SERVICE = 'MEMORYLAYER_ASSOCIATION_SERVICE' -DEFAULT_MEMORYLAYER_ASSOCIATION_SERVICE = 'default' +MEMORYLAYER_ASSOCIATION_SERVICE = "MEMORYLAYER_ASSOCIATION_SERVICE" +DEFAULT_MEMORYLAYER_ASSOCIATION_SERVICE = "default" -MEMORYLAYER_ASSOCIATION_SIMILARITY_THRESHOLD = 'MEMORYLAYER_ASSOCIATION_SIMILARITY_THRESHOLD' +MEMORYLAYER_ASSOCIATION_SIMILARITY_THRESHOLD = "MEMORYLAYER_ASSOCIATION_SIMILARITY_THRESHOLD" DEFAULT_MEMORYLAYER_ASSOCIATION_SIMILARITY_THRESHOLD = 0.85 # ============================================ # Authentication Service # ============================================ -MEMORYLAYER_AUTHENTICATION_SERVICE = 'MEMORYLAYER_AUTHENTICATION_SERVICE' -DEFAULT_MEMORYLAYER_AUTHENTICATION_SERVICE = 'default' # Open authentication (allow all) +MEMORYLAYER_AUTHENTICATION_SERVICE = "MEMORYLAYER_AUTHENTICATION_SERVICE" +DEFAULT_MEMORYLAYER_AUTHENTICATION_SERVICE = "default" # Open authentication (allow all) # ============================================ # Authorization Service # ============================================ -MEMORYLAYER_AUTHORIZATION_SERVICE = 'MEMORYLAYER_AUTHORIZATION_SERVICE' -DEFAULT_MEMORYLAYER_AUTHORIZATION_SERVICE = 'default' # Open permissions (allow all) +MEMORYLAYER_AUTHORIZATION_SERVICE = "MEMORYLAYER_AUTHORIZATION_SERVICE" +DEFAULT_MEMORYLAYER_AUTHORIZATION_SERVICE = "default" # Open permissions (allow all) # ============================================ @@ -118,6 +117,7 @@ class EmbeddingProviderType(str, Enum): # ============================================ class RerankerProviderType(str, Enum): """Available reranker provider types.""" + LLM = "llm" # Use LLM service for reranking HYDE = "hyde" # Hypothetical Document Embeddings (LLM + embedding) RRF = "rrf" # Reciprocal Rank Fusion (embedding-only multi-query) (default) @@ -125,20 +125,20 @@ class RerankerProviderType(str, Enum): NONE = "none" # Disabled (no reranking) -MEMORYLAYER_RERANKER_PROVIDER = 'MEMORYLAYER_RERANKER_PROVIDER' -DEFAULT_MEMORYLAYER_RERANKER_PROVIDER = 'rrf' +MEMORYLAYER_RERANKER_PROVIDER = "MEMORYLAYER_RERANKER_PROVIDER" +DEFAULT_MEMORYLAYER_RERANKER_PROVIDER = "rrf" -MEMORYLAYER_RERANKER_SERVICE = 'MEMORYLAYER_RERANKER_SERVICE' -DEFAULT_MEMORYLAYER_RERANKER_SERVICE = 'default' +MEMORYLAYER_RERANKER_SERVICE = "MEMORYLAYER_RERANKER_SERVICE" +DEFAULT_MEMORYLAYER_RERANKER_SERVICE = "default" -MEMORYLAYER_RERANKER_PRELOAD_ENABLED = 'MEMORYLAYER_RERANKER_PRELOAD_ENABLED' +MEMORYLAYER_RERANKER_PRELOAD_ENABLED = "MEMORYLAYER_RERANKER_PRELOAD_ENABLED" DEFAULT_MEMORYLAYER_RERANKER_PRELOAD_ENABLED = True # ============================================ # Cache Service # ============================================ -MEMORYLAYER_CACHE_SERVICE = 'MEMORYLAYER_CACHE_SERVICE' -DEFAULT_MEMORYLAYER_CACHE_SERVICE = 'lru' +MEMORYLAYER_CACHE_SERVICE = "MEMORYLAYER_CACHE_SERVICE" +DEFAULT_MEMORYLAYER_CACHE_SERVICE = "lru" # Default tenant and workspace constants # Use underscore prefix for all reserved/system entities @@ -154,41 +154,41 @@ class RerankerProviderType(str, Enum): # ============================================ # Semantic Tiering Service # ============================================ -MEMORYLAYER_SEMANTIC_TIERING_SERVICE = 'MEMORYLAYER_SEMANTIC_TIERING_SERVICE' -DEFAULT_MEMORYLAYER_SEMANTIC_TIERING_SERVICE = 'default' +MEMORYLAYER_SEMANTIC_TIERING_SERVICE = "MEMORYLAYER_SEMANTIC_TIERING_SERVICE" +DEFAULT_MEMORYLAYER_SEMANTIC_TIERING_SERVICE = "default" -MEMORYLAYER_SEMANTIC_TIERING_ENABLED = 'MEMORYLAYER_SEMANTIC_TIERING_ENABLED' +MEMORYLAYER_SEMANTIC_TIERING_ENABLED = "MEMORYLAYER_SEMANTIC_TIERING_ENABLED" DEFAULT_MEMORYLAYER_SEMANTIC_TIERING_ENABLED = True # ============================================ # Deduplication Service # ============================================ -MEMORYLAYER_DEDUPLICATION_SERVICE = 'MEMORYLAYER_DEDUPLICATION_SERVICE' -DEFAULT_MEMORYLAYER_DEDUPLICATION_SERVICE = 'default' +MEMORYLAYER_DEDUPLICATION_SERVICE = "MEMORYLAYER_DEDUPLICATION_SERVICE" +DEFAULT_MEMORYLAYER_DEDUPLICATION_SERVICE = "default" # ============================================ # Ontology Service # ============================================ -MEMORYLAYER_ONTOLOGY_SERVICE = 'MEMORYLAYER_ONTOLOGY_SERVICE' -DEFAULT_MEMORYLAYER_ONTOLOGY_SERVICE = 'default' +MEMORYLAYER_ONTOLOGY_SERVICE = "MEMORYLAYER_ONTOLOGY_SERVICE" +DEFAULT_MEMORYLAYER_ONTOLOGY_SERVICE = "default" # ============================================ # Extraction Service # ============================================ -MEMORYLAYER_EXTRACTION_SERVICE = 'MEMORYLAYER_EXTRACTION_SERVICE' -DEFAULT_MEMORYLAYER_EXTRACTION_SERVICE = 'default' +MEMORYLAYER_EXTRACTION_SERVICE = "MEMORYLAYER_EXTRACTION_SERVICE" +DEFAULT_MEMORYLAYER_EXTRACTION_SERVICE = "default" # ============================================ # Inference Service (entity insight derivation) # ============================================ -MEMORYLAYER_INFERENCE_SERVICE = 'MEMORYLAYER_INFERENCE_SERVICE' -DEFAULT_MEMORYLAYER_INFERENCE_SERVICE = 'default' +MEMORYLAYER_INFERENCE_SERVICE = "MEMORYLAYER_INFERENCE_SERVICE" +DEFAULT_MEMORYLAYER_INFERENCE_SERVICE = "default" # ============================================ # Task Service # ============================================ -MEMORYLAYER_TASK_PROVIDER = 'MEMORYLAYER_TASK_PROVIDER' -DEFAULT_MEMORYLAYER_TASK_PROVIDER = 'asyncio' +MEMORYLAYER_TASK_PROVIDER = "MEMORYLAYER_TASK_PROVIDER" +DEFAULT_MEMORYLAYER_TASK_PROVIDER = "asyncio" # ============================================ # Recall Scoring: Recency Boost @@ -199,135 +199,135 @@ class RerankerProviderType(str, Enum): # ============================================ # Recall Scoring: Freshness Annotation # ============================================ -MEMORYLAYER_FRESHNESS_HALF_LIFE_DAYS = 'MEMORYLAYER_FRESHNESS_HALF_LIFE_DAYS' +MEMORYLAYER_FRESHNESS_HALF_LIFE_DAYS = "MEMORYLAYER_FRESHNESS_HALF_LIFE_DAYS" DEFAULT_MEMORYLAYER_FRESHNESS_HALF_LIFE_DAYS = 7.0 # ============================================ # Recall Scoring: Scope Boosts # ============================================ -MEMORYLAYER_SCOPE_BOOST_SAME_CONTEXT = 'MEMORYLAYER_SCOPE_BOOST_SAME_CONTEXT' +MEMORYLAYER_SCOPE_BOOST_SAME_CONTEXT = "MEMORYLAYER_SCOPE_BOOST_SAME_CONTEXT" DEFAULT_MEMORYLAYER_SCOPE_BOOST_SAME_CONTEXT = 1.5 -MEMORYLAYER_SCOPE_BOOST_SAME_WORKSPACE = 'MEMORYLAYER_SCOPE_BOOST_SAME_WORKSPACE' +MEMORYLAYER_SCOPE_BOOST_SAME_WORKSPACE = "MEMORYLAYER_SCOPE_BOOST_SAME_WORKSPACE" DEFAULT_MEMORYLAYER_SCOPE_BOOST_SAME_WORKSPACE = 1.2 # ============================================ # Decay Service # ============================================ -MEMORYLAYER_DECAY_PROVIDER = 'MEMORYLAYER_DECAY_PROVIDER' -DEFAULT_MEMORYLAYER_DECAY_PROVIDER = 'default' +MEMORYLAYER_DECAY_PROVIDER = "MEMORYLAYER_DECAY_PROVIDER" +DEFAULT_MEMORYLAYER_DECAY_PROVIDER = "default" # ============================================ # Contradiction Service # ============================================ -MEMORYLAYER_CONTRADICTION_PROVIDER = 'MEMORYLAYER_CONTRADICTION_PROVIDER' -DEFAULT_MEMORYLAYER_CONTRADICTION_PROVIDER = 'default' +MEMORYLAYER_CONTRADICTION_PROVIDER = "MEMORYLAYER_CONTRADICTION_PROVIDER" +DEFAULT_MEMORYLAYER_CONTRADICTION_PROVIDER = "default" # ============================================ # Fact Decomposition # ============================================ -MEMORYLAYER_FACT_DECOMPOSITION_ENABLED = 'MEMORYLAYER_FACT_DECOMPOSITION_ENABLED' +MEMORYLAYER_FACT_DECOMPOSITION_ENABLED = "MEMORYLAYER_FACT_DECOMPOSITION_ENABLED" DEFAULT_MEMORYLAYER_FACT_DECOMPOSITION_ENABLED = True -MEMORYLAYER_FACT_DECOMPOSITION_MIN_LENGTH = 'MEMORYLAYER_FACT_DECOMPOSITION_MIN_LENGTH' +MEMORYLAYER_FACT_DECOMPOSITION_MIN_LENGTH = "MEMORYLAYER_FACT_DECOMPOSITION_MIN_LENGTH" DEFAULT_MEMORYLAYER_FACT_DECOMPOSITION_MIN_LENGTH = 80 # ============================================ # Context Environment Service # ============================================ -MEMORYLAYER_CONTEXT_ENVIRONMENT_SERVICE = 'MEMORYLAYER_CONTEXT_ENVIRONMENT_SERVICE' -DEFAULT_MEMORYLAYER_CONTEXT_ENVIRONMENT_SERVICE = 'default' +MEMORYLAYER_CONTEXT_ENVIRONMENT_SERVICE = "MEMORYLAYER_CONTEXT_ENVIRONMENT_SERVICE" +DEFAULT_MEMORYLAYER_CONTEXT_ENVIRONMENT_SERVICE = "default" -MEMORYLAYER_CONTEXT_EXECUTOR = 'MEMORYLAYER_CONTEXT_EXECUTOR' -DEFAULT_MEMORYLAYER_CONTEXT_EXECUTOR = 'smolagents' +MEMORYLAYER_CONTEXT_EXECUTOR = "MEMORYLAYER_CONTEXT_EXECUTOR" +DEFAULT_MEMORYLAYER_CONTEXT_EXECUTOR = "smolagents" -MEMORYLAYER_CONTEXT_MAX_OPERATIONS = 'MEMORYLAYER_CONTEXT_MAX_OPERATIONS' +MEMORYLAYER_CONTEXT_MAX_OPERATIONS = "MEMORYLAYER_CONTEXT_MAX_OPERATIONS" DEFAULT_MEMORYLAYER_CONTEXT_MAX_OPERATIONS = 1_000_000 -MEMORYLAYER_CONTEXT_MAX_EXEC_SECONDS = 'MEMORYLAYER_CONTEXT_MAX_EXEC_SECONDS' +MEMORYLAYER_CONTEXT_MAX_EXEC_SECONDS = "MEMORYLAYER_CONTEXT_MAX_EXEC_SECONDS" DEFAULT_MEMORYLAYER_CONTEXT_MAX_EXEC_SECONDS = 30 -MEMORYLAYER_CONTEXT_MAX_OUTPUT_CHARS = 'MEMORYLAYER_CONTEXT_MAX_OUTPUT_CHARS' +MEMORYLAYER_CONTEXT_MAX_OUTPUT_CHARS = "MEMORYLAYER_CONTEXT_MAX_OUTPUT_CHARS" DEFAULT_MEMORYLAYER_CONTEXT_MAX_OUTPUT_CHARS = 50_000 -MEMORYLAYER_CONTEXT_QUERY_MAX_TOKENS = 'MEMORYLAYER_CONTEXT_QUERY_MAX_TOKENS' +MEMORYLAYER_CONTEXT_QUERY_MAX_TOKENS = "MEMORYLAYER_CONTEXT_QUERY_MAX_TOKENS" DEFAULT_MEMORYLAYER_CONTEXT_QUERY_MAX_TOKENS = 4096 -MEMORYLAYER_CONTEXT_MAX_MEMORY_BYTES = 'MEMORYLAYER_CONTEXT_MAX_MEMORY_BYTES' +MEMORYLAYER_CONTEXT_MAX_MEMORY_BYTES = "MEMORYLAYER_CONTEXT_MAX_MEMORY_BYTES" DEFAULT_MEMORYLAYER_CONTEXT_MAX_MEMORY_BYTES = 256 * 1024 * 1024 # 256 MB -MEMORYLAYER_CONTEXT_RLM_MAX_ITERATIONS = 'MEMORYLAYER_CONTEXT_RLM_MAX_ITERATIONS' +MEMORYLAYER_CONTEXT_RLM_MAX_ITERATIONS = "MEMORYLAYER_CONTEXT_RLM_MAX_ITERATIONS" DEFAULT_MEMORYLAYER_CONTEXT_RLM_MAX_ITERATIONS = 10 -MEMORYLAYER_CONTEXT_RLM_MAX_EXEC_SECONDS = 'MEMORYLAYER_CONTEXT_RLM_MAX_EXEC_SECONDS' +MEMORYLAYER_CONTEXT_RLM_MAX_EXEC_SECONDS = "MEMORYLAYER_CONTEXT_RLM_MAX_EXEC_SECONDS" DEFAULT_MEMORYLAYER_CONTEXT_RLM_MAX_EXEC_SECONDS = 120 -MEMORYLAYER_CONTEXT_EXEC_SOFT_CAP = 'MEMORYLAYER_CONTEXT_EXEC_SOFT_CAP' +MEMORYLAYER_CONTEXT_EXEC_SOFT_CAP = "MEMORYLAYER_CONTEXT_EXEC_SOFT_CAP" DEFAULT_MEMORYLAYER_CONTEXT_EXEC_SOFT_CAP = 0 -MEMORYLAYER_CONTEXT_EXEC_HARD_CAP = 'MEMORYLAYER_CONTEXT_EXEC_HARD_CAP' +MEMORYLAYER_CONTEXT_EXEC_HARD_CAP = "MEMORYLAYER_CONTEXT_EXEC_HARD_CAP" DEFAULT_MEMORYLAYER_CONTEXT_EXEC_HARD_CAP = 0 # ============================================ # Chat History Service # ============================================ -MEMORYLAYER_CHAT_SERVICE = 'MEMORYLAYER_CHAT_SERVICE' -DEFAULT_MEMORYLAYER_CHAT_SERVICE = 'default' +MEMORYLAYER_CHAT_SERVICE = "MEMORYLAYER_CHAT_SERVICE" +DEFAULT_MEMORYLAYER_CHAT_SERVICE = "default" -MEMORYLAYER_CHAT_AUTO_DECOMPOSE_THRESHOLD = 'MEMORYLAYER_CHAT_AUTO_DECOMPOSE_THRESHOLD' +MEMORYLAYER_CHAT_AUTO_DECOMPOSE_THRESHOLD = "MEMORYLAYER_CHAT_AUTO_DECOMPOSE_THRESHOLD" DEFAULT_MEMORYLAYER_CHAT_AUTO_DECOMPOSE_THRESHOLD = 10 -MEMORYLAYER_CHAT_AUTO_DECOMPOSE_INTERVAL = 'MEMORYLAYER_CHAT_AUTO_DECOMPOSE_INTERVAL' +MEMORYLAYER_CHAT_AUTO_DECOMPOSE_INTERVAL = "MEMORYLAYER_CHAT_AUTO_DECOMPOSE_INTERVAL" DEFAULT_MEMORYLAYER_CHAT_AUTO_DECOMPOSE_INTERVAL = 300 # seconds -MEMORYLAYER_CHAT_DECOMPOSE_CHUNK_SIZE = 'MEMORYLAYER_CHAT_DECOMPOSE_CHUNK_SIZE' +MEMORYLAYER_CHAT_DECOMPOSE_CHUNK_SIZE = "MEMORYLAYER_CHAT_DECOMPOSE_CHUNK_SIZE" DEFAULT_MEMORYLAYER_CHAT_DECOMPOSE_CHUNK_SIZE = 20 -MEMORYLAYER_CHAT_DECOMPOSE_OVERLAP = 'MEMORYLAYER_CHAT_DECOMPOSE_OVERLAP' +MEMORYLAYER_CHAT_DECOMPOSE_OVERLAP = "MEMORYLAYER_CHAT_DECOMPOSE_OVERLAP" DEFAULT_MEMORYLAYER_CHAT_DECOMPOSE_OVERLAP = 5 # ============================================ # Audit Service # ============================================ -MEMORYLAYER_AUDIT_SERVICE = 'MEMORYLAYER_AUDIT_SERVICE' -DEFAULT_MEMORYLAYER_AUDIT_SERVICE = 'noop' +MEMORYLAYER_AUDIT_SERVICE = "MEMORYLAYER_AUDIT_SERVICE" +DEFAULT_MEMORYLAYER_AUDIT_SERVICE = "noop" # ============================================ # Rate Limiting Service # ============================================ -MEMORYLAYER_RATE_LIMIT_SERVICE = 'MEMORYLAYER_RATE_LIMIT_SERVICE' -DEFAULT_MEMORYLAYER_RATE_LIMIT_SERVICE = 'noop' +MEMORYLAYER_RATE_LIMIT_SERVICE = "MEMORYLAYER_RATE_LIMIT_SERVICE" +DEFAULT_MEMORYLAYER_RATE_LIMIT_SERVICE = "noop" # Rate limit defaults (requests per window) -MEMORYLAYER_RATE_LIMIT_REQUESTS = 'MEMORYLAYER_RATE_LIMIT_REQUESTS' +MEMORYLAYER_RATE_LIMIT_REQUESTS = "MEMORYLAYER_RATE_LIMIT_REQUESTS" DEFAULT_MEMORYLAYER_RATE_LIMIT_REQUESTS = 100 -MEMORYLAYER_RATE_LIMIT_WINDOW_SECONDS = 'MEMORYLAYER_RATE_LIMIT_WINDOW_SECONDS' +MEMORYLAYER_RATE_LIMIT_WINDOW_SECONDS = "MEMORYLAYER_RATE_LIMIT_WINDOW_SECONDS" DEFAULT_MEMORYLAYER_RATE_LIMIT_WINDOW_SECONDS = 60 # ============================================ # Metrics / Observability Service # ============================================ -MEMORYLAYER_METRICS_SERVICE = 'MEMORYLAYER_METRICS_SERVICE' -DEFAULT_MEMORYLAYER_METRICS_SERVICE = 'noop' +MEMORYLAYER_METRICS_SERVICE = "MEMORYLAYER_METRICS_SERVICE" +DEFAULT_MEMORYLAYER_METRICS_SERVICE = "noop" # ============================================ # LLM Query Rewriting # ============================================ -MEMORYLAYER_LLM_QUERY_REWRITE_ENABLED = 'MEMORYLAYER_LLM_QUERY_REWRITE_ENABLED' +MEMORYLAYER_LLM_QUERY_REWRITE_ENABLED = "MEMORYLAYER_LLM_QUERY_REWRITE_ENABLED" DEFAULT_MEMORYLAYER_LLM_QUERY_REWRITE_ENABLED = True # ============================================ # Memory Consolidation # ============================================ -MEMORYLAYER_CONSOLIDATION_ENABLED = 'MEMORYLAYER_CONSOLIDATION_ENABLED' +MEMORYLAYER_CONSOLIDATION_ENABLED = "MEMORYLAYER_CONSOLIDATION_ENABLED" DEFAULT_MEMORYLAYER_CONSOLIDATION_ENABLED = False -MEMORYLAYER_CONSOLIDATION_MIN_CLUSTER_SIZE = 'MEMORYLAYER_CONSOLIDATION_MIN_CLUSTER_SIZE' +MEMORYLAYER_CONSOLIDATION_MIN_CLUSTER_SIZE = "MEMORYLAYER_CONSOLIDATION_MIN_CLUSTER_SIZE" DEFAULT_MEMORYLAYER_CONSOLIDATION_MIN_CLUSTER_SIZE = 3 -MEMORYLAYER_CONSOLIDATION_MAX_IMPORTANCE = 'MEMORYLAYER_CONSOLIDATION_MAX_IMPORTANCE' +MEMORYLAYER_CONSOLIDATION_MAX_IMPORTANCE = "MEMORYLAYER_CONSOLIDATION_MAX_IMPORTANCE" DEFAULT_MEMORYLAYER_CONSOLIDATION_MAX_IMPORTANCE = 0.3 -MEMORYLAYER_CONSOLIDATION_MIN_SIMILARITY = 'MEMORYLAYER_CONSOLIDATION_MIN_SIMILARITY' +MEMORYLAYER_CONSOLIDATION_MIN_SIMILARITY = "MEMORYLAYER_CONSOLIDATION_MIN_SIMILARITY" DEFAULT_MEMORYLAYER_CONSOLIDATION_MIN_SIMILARITY = 0.85 diff --git a/memorylayer-core-python/src/memorylayer_server/dependencies.py b/memorylayer-core-python/src/memorylayer_server/dependencies.py index d48eccb..370f025 100644 --- a/memorylayer-core-python/src/memorylayer_server/dependencies.py +++ b/memorylayer-core-python/src/memorylayer_server/dependencies.py @@ -3,14 +3,13 @@ Uses scitrera-app-framework plugin pattern for service initialization. Services are lazily initialized on first access via get_extension(). """ + import logging +from collections.abc import Callable from logging import Logger -from typing import Callable -from scitrera_app_framework import ( - Variables, get_variables, get_logger, init_framework_desktop, - async_plugins_ready, async_plugins_stopping -) +from scitrera_app_framework import Variables, async_plugins_ready, async_plugins_stopping, get_logger, get_variables, init_framework_desktop + from .config import MEMORYLAYER_DATA_DIR # global preconfigure hooks (not specific to variables instance) @@ -19,74 +18,79 @@ # noinspection PyTypeHints def preconfigure(v: Variables = None, test_mode: bool = False, test_logger: Logger = None) -> (Variables, dict): - """ Pre-configure the framework """ + """Pre-configure the framework""" from scitrera_app_framework import register_package_plugins - from . import api, services, lifecycle, tasks, middleware # noqa: F401 + + from . import api, lifecycle, middleware, services, tasks # noqa: F401 # handle test mode - additional_kwargs = {} if not test_mode else { - 'fault_handler': False, - 'fixed_logger': test_logger, - 'pyroscope': False, - 'shutdown_hooks': False, - } + additional_kwargs = ( + {} + if not test_mode + else { + "fault_handler": False, + "fixed_logger": test_logger, + "pyroscope": False, + "shutdown_hooks": False, + } + ) # init framework (has internal protection against multiple invocations) v: Variables = init_framework_desktop( - 'memorylayer-server', + "memorylayer-server", base_plugins=False, # disable base plugins (we don't need them) stateful_chdir=True, # change working directory to stateful root stateful_root_env_key=MEMORYLAYER_DATA_DIR, # use MEMORYLAYER_DATA_DIR for stateful root async_auto_enabled=False, # manage async plugin lifecycle hooks manually v=v, # allow variables instance pass-through - **additional_kwargs + **additional_kwargs, ) # do some custom logging tweaks (TODO: upstream mechanism for logging tweaks to scitrera-app-framework) - logging.getLogger('urllib3.connectionpool').setLevel(logging.WARNING) - logging.getLogger('aiosqlite').setLevel(logging.WARNING) - logging.getLogger('httpcore.http11').setLevel(logging.WARNING) - logging.getLogger('httpcore.connection').setLevel(logging.WARNING) - logging.getLogger('httpx').setLevel(logging.WARNING) - logging.getLogger('openai._base_client').setLevel(logging.WARNING) - logging.getLogger('google_genai.models').setLevel(logging.WARNING) + logging.getLogger("urllib3.connectionpool").setLevel(logging.WARNING) + logging.getLogger("aiosqlite").setLevel(logging.WARNING) + logging.getLogger("httpcore.http11").setLevel(logging.WARNING) + logging.getLogger("httpcore.connection").setLevel(logging.WARNING) + logging.getLogger("httpx").setLevel(logging.WARNING) + logging.getLogger("openai._base_client").setLevel(logging.WARNING) + logging.getLogger("google_genai.models").setLevel(logging.WARNING) # avoid duplicate invocations of preconfigure() - if v.get('__preconfigure_complete__', default=False): + if v.get("__preconfigure_complete__", default=False): return v, services logger = get_logger(v) # register plugins - logger.debug('Registering core services') + logger.debug("Registering core services") register_package_plugins(services.__package__, v, recursive=True) - logger.debug('Registering lifecycle components') + logger.debug("Registering lifecycle components") register_package_plugins(lifecycle.__package__, v, recursive=True) - logger.debug('Registering API Routes') + logger.debug("Registering API Routes") register_package_plugins(api.__package__, v, recursive=True) - logger.debug('Registering Task Handlers') + logger.debug("Registering Task Handlers") register_package_plugins(tasks.__package__, v, recursive=True) - logger.debug('Registering Middleware') + logger.debug("Registering Middleware") register_package_plugins(middleware.__package__, v, recursive=True) # handle preconfiguration hooks - logger.debug('Evaluating preconfigure hooks') + logger.debug("Evaluating preconfigure hooks") global _preconfigure_hooks - if v.get('__preconfigure_hooks_installed__', default=0) == (lph := len(_preconfigure_hooks)): + if v.get("__preconfigure_hooks_installed__", default=0) == (lph := len(_preconfigure_hooks)): return v, services # run through preconfigure hooks (allows for registering additional plugins before initialization) for hook in _preconfigure_hooks: hook(v) - v.set('__preconfigure_hooks_installed__', lph) - logger.debug('Installed preconfiguration hooks') + v.set("__preconfigure_hooks_installed__", lph) + logger.debug("Installed preconfiguration hooks") - v.set('__preconfigure_complete__', True) + v.set("__preconfigure_complete__", True) return v, services @@ -97,6 +101,7 @@ def _initialize_sync(v: Variables = None) -> Variables: logger.debug("Initializing services") from scitrera_app_framework.core.plugins import init_all_plugins + init_all_plugins(v, async_enabled=False) # handle sync part return v @@ -121,4 +126,5 @@ async def shutdown_services(v: Variables = None) -> None: await async_plugins_stopping(v) from scitrera_app_framework.core.plugins import shutdown_all_plugins + shutdown_all_plugins(v) diff --git a/memorylayer-core-python/src/memorylayer_server/lifecycle/cors.py b/memorylayer-core-python/src/memorylayer_server/lifecycle/cors.py index 3a18c46..8a7b7b9 100644 --- a/memorylayer-core-python/src/memorylayer_server/lifecycle/cors.py +++ b/memorylayer-core-python/src/memorylayer_server/lifecycle/cors.py @@ -1,4 +1,4 @@ -from typing import Iterable +from collections.abc import Iterable from fastapi.middleware.cors import CORSMiddleware from scitrera_app_framework import Variables as Variables @@ -6,17 +6,17 @@ from .fastapi import EXT_FASTAPI_SERVER -MEMORYLAYER_SERVER_CORS_ALLOW_ORIGINS = 'MEMORYLAYER_SERVER_CORS_ALLOW_ORIGINS' -MEMORYLAYER_SERVER_CORS_ALLOW_CREDENTIALS = 'MEMORYLAYER_SERVER_CORS_ALLOW_CREDENTIALS' -MEMORYLAYER_SERVER_CORS_ALLOW_METHODS = 'MEMORYLAYER_SERVER_CORS_ALLOW_METHODS' -MEMORYLAYER_SERVER_CORS_ALLOW_HEADERS = 'MEMORYLAYER_SERVER_CORS_ALLOW_HEADERS' +MEMORYLAYER_SERVER_CORS_ALLOW_ORIGINS = "MEMORYLAYER_SERVER_CORS_ALLOW_ORIGINS" +MEMORYLAYER_SERVER_CORS_ALLOW_CREDENTIALS = "MEMORYLAYER_SERVER_CORS_ALLOW_CREDENTIALS" +MEMORYLAYER_SERVER_CORS_ALLOW_METHODS = "MEMORYLAYER_SERVER_CORS_ALLOW_METHODS" +MEMORYLAYER_SERVER_CORS_ALLOW_HEADERS = "MEMORYLAYER_SERVER_CORS_ALLOW_HEADERS" -DEFAULT_CORS_ALLOW_ORIGINS = ['http://localhost:3000', 'http://localhost:5173', 'http://127.0.0.1:3000'] +DEFAULT_CORS_ALLOW_ORIGINS = ["http://localhost:3000", "http://localhost:5173", "http://127.0.0.1:3000"] DEFAULT_CORS_ALLOW_CREDENTIALS = False -DEFAULT_CORS_ALLOW_METHODS = ['GET', 'POST', 'PUT', 'DELETE', 'OPTIONS'] -DEFAULT_CORS_ALLOW_HEADERS = ['Authorization', 'Content-Type', 'X-API-Key', 'X-Session-ID', 'X-Workspace-ID'] +DEFAULT_CORS_ALLOW_METHODS = ["GET", "POST", "PUT", "DELETE", "OPTIONS"] +DEFAULT_CORS_ALLOW_HEADERS = ["Authorization", "Content-Type", "X-API-Key", "X-Session-ID", "X-Workspace-ID"] -EXT_CORS = 'memorylayer-server-fastapi-middleware-cors' +EXT_CORS = "memorylayer-server-fastapi-middleware-cors" class CORSMiddlewarePlugin(Plugin): @@ -30,14 +30,12 @@ def extension_point_name(self, v: Variables) -> str: def initialize(self, v, logger) -> object | None: app = self.get_extension(EXT_FASTAPI_SERVER, v) - allow_origins = v.environ(MEMORYLAYER_SERVER_CORS_ALLOW_ORIGINS, - default=DEFAULT_CORS_ALLOW_ORIGINS, type_fn=ext_parse_csv) - allow_credentials = v.environ(MEMORYLAYER_SERVER_CORS_ALLOW_CREDENTIALS, - default=DEFAULT_CORS_ALLOW_CREDENTIALS, type_fn=ext_parse_bool) - allow_methods = v.environ(MEMORYLAYER_SERVER_CORS_ALLOW_METHODS, - default=DEFAULT_CORS_ALLOW_METHODS, type_fn=ext_parse_csv) - allow_headers = v.environ(MEMORYLAYER_SERVER_CORS_ALLOW_HEADERS, - default=DEFAULT_CORS_ALLOW_HEADERS, type_fn=ext_parse_csv) + allow_origins = v.environ(MEMORYLAYER_SERVER_CORS_ALLOW_ORIGINS, default=DEFAULT_CORS_ALLOW_ORIGINS, type_fn=ext_parse_csv) + allow_credentials = v.environ( + MEMORYLAYER_SERVER_CORS_ALLOW_CREDENTIALS, default=DEFAULT_CORS_ALLOW_CREDENTIALS, type_fn=ext_parse_bool + ) + allow_methods = v.environ(MEMORYLAYER_SERVER_CORS_ALLOW_METHODS, default=DEFAULT_CORS_ALLOW_METHODS, type_fn=ext_parse_csv) + allow_headers = v.environ(MEMORYLAYER_SERVER_CORS_ALLOW_HEADERS, default=DEFAULT_CORS_ALLOW_HEADERS, type_fn=ext_parse_csv) app.add_middleware( CORSMiddleware, diff --git a/memorylayer-core-python/src/memorylayer_server/lifecycle/fastapi.py b/memorylayer-core-python/src/memorylayer_server/lifecycle/fastapi.py index f12118a..5ac38ba 100644 --- a/memorylayer-core-python/src/memorylayer_server/lifecycle/fastapi.py +++ b/memorylayer-core-python/src/memorylayer_server/lifecycle/fastapi.py @@ -1,16 +1,17 @@ +from collections.abc import AsyncGenerator from contextlib import asynccontextmanager -from typing import AsyncGenerator from logging import Logger from fastapi import FastAPI, Request -from scitrera_app_framework import ( - Plugin, Variables, get_logger as _saf_get_logger, get_variables as _saf_get_variables, get_extension as _saf_get_extension -) +from scitrera_app_framework import Plugin, Variables +from scitrera_app_framework import get_extension as _saf_get_extension +from scitrera_app_framework import get_logger as _saf_get_logger +from scitrera_app_framework import get_variables as _saf_get_variables from scitrera_app_framework.core.plugins import init_all_plugins as _saf_init_all_plugins from .. import __version__ -EXT_FASTAPI_SERVER = 'memorylayer-server-fastapi-server' +EXT_FASTAPI_SERVER = "memorylayer-server-fastapi-server" async def get_variables_dep(request: Request) -> Variables: @@ -33,7 +34,7 @@ def extension_point_name(self, v: Variables) -> str: return EXT_FASTAPI_SERVER def initialize(self, v, logger) -> object | None: - logger.info('Initializing FastAPI App') + logger.info("Initializing FastAPI App") # noinspection PyShadowingNames @asynccontextmanager @@ -45,7 +46,7 @@ async def lifespan_context(app: FastAPI) -> AsyncGenerator[None, None]: await initialize_services(v) # store app in variables for access in services/plugins - v.set('app', app) + v.set("app", app) # store variables in app state app.state.v = v diff --git a/memorylayer-core-python/src/memorylayer_server/lifecycle/otel.py b/memorylayer-core-python/src/memorylayer_server/lifecycle/otel.py index d6e31b6..cc42085 100644 --- a/memorylayer-core-python/src/memorylayer_server/lifecycle/otel.py +++ b/memorylayer-core-python/src/memorylayer_server/lifecycle/otel.py @@ -1,25 +1,26 @@ """OpenTelemetry SDK initialization — configures TracerProvider and exporter.""" + import logging -from typing import Iterable +from collections.abc import Iterable from scitrera_app_framework import Plugin, Variables, ext_parse_bool # Config constants -MEMORYLAYER_OTEL_ENABLED = 'MEMORYLAYER_OTEL_ENABLED' -MEMORYLAYER_OTEL_EXPORTER = 'MEMORYLAYER_OTEL_EXPORTER' # 'otlp', 'console', 'none' -MEMORYLAYER_OTEL_ENDPOINT = 'MEMORYLAYER_OTEL_ENDPOINT' # e.g., 'http://localhost:4317' -MEMORYLAYER_OTEL_SERVICE_NAME = 'MEMORYLAYER_OTEL_SERVICE_NAME' +MEMORYLAYER_OTEL_ENABLED = "MEMORYLAYER_OTEL_ENABLED" +MEMORYLAYER_OTEL_EXPORTER = "MEMORYLAYER_OTEL_EXPORTER" # 'otlp', 'console', 'none' +MEMORYLAYER_OTEL_ENDPOINT = "MEMORYLAYER_OTEL_ENDPOINT" # e.g., 'http://localhost:4317' +MEMORYLAYER_OTEL_SERVICE_NAME = "MEMORYLAYER_OTEL_SERVICE_NAME" -DEFAULT_MEMORYLAYER_OTEL_SERVICE_NAME = 'memorylayer' -DEFAULT_MEMORYLAYER_OTEL_ENDPOINT = 'http://localhost:4317' +DEFAULT_MEMORYLAYER_OTEL_SERVICE_NAME = "memorylayer" +DEFAULT_MEMORYLAYER_OTEL_ENDPOINT = "http://localhost:4317" -EXT_OTEL_INIT = 'memorylayer-server-otel-init' +EXT_OTEL_INIT = "memorylayer-server-otel-init" try: from opentelemetry import trace + from opentelemetry.sdk.resources import SERVICE_NAME, Resource from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import BatchSpanProcessor, ConsoleSpanExporter - from opentelemetry.sdk.resources import Resource, SERVICE_NAME HAS_OTEL_SDK = True except ImportError: @@ -58,20 +59,19 @@ def initialize(self, v: Variables, logger: logging.Logger) -> object | None: MEMORYLAYER_OTEL_SERVICE_NAME, default=DEFAULT_MEMORYLAYER_OTEL_SERVICE_NAME, ) - exporter_type = v.environ(MEMORYLAYER_OTEL_EXPORTER, default='none').lower() + exporter_type = v.environ(MEMORYLAYER_OTEL_EXPORTER, default="none").lower() resource = Resource(attributes={SERVICE_NAME: service_name}) provider = TracerProvider(resource=resource) - if exporter_type == 'console': + if exporter_type == "console": exporter = ConsoleSpanExporter() provider.add_span_processor(BatchSpanProcessor(exporter)) logger.info("OTel initialized: exporter=%s", exporter_type) - elif exporter_type == 'otlp': + elif exporter_type == "otlp": if not HAS_OTLP: logger.warning( - "OTel exporter=otlp requested but opentelemetry-exporter-otlp-proto-grpc " - "is not installed; spans will not be exported" + "OTel exporter=otlp requested but opentelemetry-exporter-otlp-proto-grpc is not installed; spans will not be exported" ) else: endpoint = v.environ( diff --git a/memorylayer-core-python/src/memorylayer_server/lifecycle/routes.py b/memorylayer-core-python/src/memorylayer_server/lifecycle/routes.py index 2f55b73..1413357 100644 --- a/memorylayer-core-python/src/memorylayer_server/lifecycle/routes.py +++ b/memorylayer-core-python/src/memorylayer_server/lifecycle/routes.py @@ -1,13 +1,15 @@ -from typing import Iterable +from collections.abc import Iterable from fastapi import APIRouter, Request from fastapi.responses import JSONResponse -from scitrera_app_framework import get_extensions, Plugin, Variables as Variables +from scitrera_app_framework import Plugin, get_extensions +from scitrera_app_framework import Variables as Variables + from ..api import EXT_MULTI_API_ROUTERS -from .fastapi import EXT_FASTAPI_SERVER from .cors import EXT_CORS +from .fastapi import EXT_FASTAPI_SERVER -EXT_ROUTES = 'memorylayer-server-fastapi-routes' +EXT_ROUTES = "memorylayer-server-fastapi-routes" # Prefixes that are enterprise-only. When no enterprise plugin registers # the real router the fallback below will respond with 501 (Not Implemented) @@ -24,15 +26,15 @@ def extension_point_name(self, v: Variables) -> str: return EXT_ROUTES def initialize(self, v, logger) -> object | None: - logger.info('Initializing Routes') + logger.info("Initializing Routes") app = self.get_extension(EXT_FASTAPI_SERVER, v) # Register API routers -- requires that we run after all API router plugins are registered! registered_prefixes: set[str] = set() for ext_name, router in get_extensions(EXT_MULTI_API_ROUTERS, v).items(): - logger.debug('Adding API router from extension: %s', ext_name) + logger.debug("Adding API router from extension: %s", ext_name) app.include_router(router) - if hasattr(router, 'prefix'): + if hasattr(router, "prefix"): registered_prefixes.add(router.prefix) # Register 501 fallback routes for enterprise prefixes that have no @@ -41,7 +43,8 @@ def initialize(self, v, logger) -> object | None: for prefix in _ENTERPRISE_PREFIXES: if prefix not in registered_prefixes: logger.debug( - 'Registering 501 fallback for enterprise prefix: %s', prefix, + "Registering 501 fallback for enterprise prefix: %s", + prefix, ) fallback = APIRouter() diff --git a/memorylayer-core-python/src/memorylayer_server/main.py b/memorylayer-core-python/src/memorylayer_server/main.py index 3048178..9e6044e 100644 --- a/memorylayer-core-python/src/memorylayer_server/main.py +++ b/memorylayer-core-python/src/memorylayer_server/main.py @@ -9,5 +9,7 @@ app = fastapi_app_factory(v=None) __all__ = ( - 'app', 'get_logger', 'get_variables_dep', + "app", + "get_logger", + "get_variables_dep", ) diff --git a/memorylayer-core-python/src/memorylayer_server/middleware/__init__.py b/memorylayer-core-python/src/memorylayer_server/middleware/__init__.py index 222ca91..a346c91 100644 --- a/memorylayer-core-python/src/memorylayer_server/middleware/__init__.py +++ b/memorylayer-core-python/src/memorylayer_server/middleware/__init__.py @@ -3,12 +3,13 @@ Provides FastAPI middleware components for cross-cutting concerns such as rate limiting and distributed tracing. """ + from .rate_limit import RateLimitMiddleware, RateLimitMiddlewarePlugin from .tracing import TracingMiddleware, TracingMiddlewarePlugin __all__ = ( - 'RateLimitMiddleware', - 'RateLimitMiddlewarePlugin', - 'TracingMiddleware', - 'TracingMiddlewarePlugin', + "RateLimitMiddleware", + "RateLimitMiddlewarePlugin", + "TracingMiddleware", + "TracingMiddlewarePlugin", ) diff --git a/memorylayer-core-python/src/memorylayer_server/middleware/rate_limit.py b/memorylayer-core-python/src/memorylayer_server/middleware/rate_limit.py index 742f1e5..39235ff 100644 --- a/memorylayer-core-python/src/memorylayer_server/middleware/rate_limit.py +++ b/memorylayer-core-python/src/memorylayer_server/middleware/rate_limit.py @@ -1,6 +1,7 @@ """Rate limiting middleware for MemoryLayer FastAPI server.""" + import time -from typing import Iterable +from collections.abc import Iterable from fastapi import Request from fastapi.responses import JSONResponse @@ -16,7 +17,7 @@ # Health-check paths that bypass rate limiting _HEALTH_PATHS = frozenset({"/health", "/healthz"}) -EXT_RATE_LIMIT_MIDDLEWARE = 'memorylayer-server-fastapi-middleware-rate-limit' +EXT_RATE_LIMIT_MIDDLEWARE = "memorylayer-server-fastapi-middleware-rate-limit" class RateLimitMiddleware(BaseHTTPMiddleware): diff --git a/memorylayer-core-python/src/memorylayer_server/middleware/tracing.py b/memorylayer-core-python/src/memorylayer_server/middleware/tracing.py index 5cc1aab..e6900ea 100644 --- a/memorylayer-core-python/src/memorylayer_server/middleware/tracing.py +++ b/memorylayer-core-python/src/memorylayer_server/middleware/tracing.py @@ -1,23 +1,25 @@ """OpenTelemetry tracing middleware — active only when opentelemetry-api is installed.""" -from typing import Iterable + +from collections.abc import Iterable from fastapi import Request +from scitrera_app_framework import Variables +from scitrera_app_framework.api import Plugin from starlette.middleware.base import BaseHTTPMiddleware from starlette.types import ASGIApp -from scitrera_app_framework import Variables, get_logger -from scitrera_app_framework.api import Plugin from ..lifecycle.fastapi import EXT_FASTAPI_SERVER try: from opentelemetry import trace - from opentelemetry.trace import StatusCode from opentelemetry.propagate import extract, inject + from opentelemetry.trace import StatusCode + HAS_OTEL = True except ImportError: HAS_OTEL = False -EXT_TRACING_MIDDLEWARE = 'memorylayer-server-fastapi-middleware-tracing' +EXT_TRACING_MIDDLEWARE = "memorylayer-server-fastapi-middleware-tracing" class TracingMiddleware(BaseHTTPMiddleware): diff --git a/memorylayer-core-python/src/memorylayer_server/models/__init__.py b/memorylayer-core-python/src/memorylayer_server/models/__init__.py index c80b99f..2d51a55 100644 --- a/memorylayer-core-python/src/memorylayer_server/models/__init__.py +++ b/memorylayer-core-python/src/memorylayer_server/models/__init__.py @@ -3,16 +3,28 @@ Exports all Pydantic models for memory, associations, workspaces, and sessions. """ + from .association import ( - Association, + KNOWN_RELATIONSHIP_TYPES, AssociateInput, + Association, GraphPath, GraphQueryInput, GraphQueryResult, - KNOWN_RELATIONSHIP_TYPES, RelationshipCategory, get_relationship_category, ) +from .auth import AuthIdentity, RequestContext +from .chat import ( + AppendMessagesInput, + ChatMessage, + ChatMessageContent, + ChatThread, + ChatThreadWithMessages, + CreateThreadInput, + DecompositionResult, + MessageInput, +) from .memory import ( DetailLevel, Memory, @@ -37,17 +49,6 @@ WorkingMemory, WorkspaceSummary, ) -from .auth import AuthIdentity, RequestContext -from .chat import ( - ChatMessage, - ChatMessageContent, - ChatThread, - ChatThreadWithMessages, - CreateThreadInput, - AppendMessagesInput, - MessageInput, - DecompositionResult, -) from .workspace import ( Context, ContextSettings, diff --git a/memorylayer-core-python/src/memorylayer_server/models/association.py b/memorylayer-core-python/src/memorylayer_server/models/association.py index 69efb71..685b796 100644 --- a/memorylayer-core-python/src/memorylayer_server/models/association.py +++ b/memorylayer-core-python/src/memorylayer_server/models/association.py @@ -5,9 +5,10 @@ Relationship types are plain strings validated against the unified ontology in ``memorylayer_server.services.ontology.base.BASE_ONTOLOGY``. """ -from datetime import datetime, timezone + +from datetime import UTC, datetime from enum import Enum -from typing import Any, Optional +from typing import Any from pydantic import BaseModel, Field @@ -31,46 +32,93 @@ class RelationshipCategory(str, Enum): # All known relationship type strings, kept as a convenience constant. # The authoritative source of truth is BASE_ONTOLOGY in # memorylayer_server.services.ontology.base -KNOWN_RELATIONSHIP_TYPES: frozenset[str] = frozenset({ - # Hierarchical - "parent_of", "child_of", "part_of", "has_part", "instance_of", "type_of", - # Causal - "causes", "caused_by", "enables", "enabled_by", - "triggers", "triggered_by", "leads_to", "led_to_by", - "prevents", "prevented_by", - # Temporal - "before", "after", "during", - # Similarity - "similar_to", "duplicate_of", "related_to", "variant_of", - # Learning - "contradicts", "supports", "supported_by", - "builds_on", "built_upon_by", "confirms", - "supersedes", "superseded_by", - # Refinement - "refines", "refined_by", "replaces", "replaced_by", - # Reference - "references", "referenced_by", - # Solution - "solves", "solved_by", "addresses", "addressed_by", - "alternative_to", "improves", "improved_by", - # Context - "occurs_in", "contains_occurrence", "applies_to", "has_applicable", - "works_with", "requires", "required_by", - # Workflow - "follows", "followed_by", "depends_on", "depended_on_by", - "blocks", "blocked_by", - # Quality - "effective_for", "has_effective", "preferred_over", "less_preferred_than", - "deprecated_by", "deprecates", -}) - - -def get_relationship_category(relationship: str) -> Optional[str]: +KNOWN_RELATIONSHIP_TYPES: frozenset[str] = frozenset( + { + # Hierarchical + "parent_of", + "child_of", + "part_of", + "has_part", + "instance_of", + "type_of", + # Causal + "causes", + "caused_by", + "enables", + "enabled_by", + "triggers", + "triggered_by", + "leads_to", + "led_to_by", + "prevents", + "prevented_by", + # Temporal + "before", + "after", + "during", + # Similarity + "similar_to", + "duplicate_of", + "related_to", + "variant_of", + # Learning + "contradicts", + "supports", + "supported_by", + "builds_on", + "built_upon_by", + "confirms", + "supersedes", + "superseded_by", + # Refinement + "refines", + "refined_by", + "replaces", + "replaced_by", + # Reference + "references", + "referenced_by", + # Solution + "solves", + "solved_by", + "addresses", + "addressed_by", + "alternative_to", + "improves", + "improved_by", + # Context + "occurs_in", + "contains_occurrence", + "applies_to", + "has_applicable", + "works_with", + "requires", + "required_by", + # Workflow + "follows", + "followed_by", + "depends_on", + "depended_on_by", + "blocks", + "blocked_by", + # Quality + "effective_for", + "has_effective", + "preferred_over", + "less_preferred_than", + "deprecated_by", + "deprecates", + } +) + + +def get_relationship_category(relationship: str) -> str | None: """Get the category for a relationship type from the ontology. Returns None if the relationship is not in the known set. """ from ..services.ontology.base import BASE_ONTOLOGY + info = BASE_ONTOLOGY.get(relationship) return info.get("category") if info else None @@ -93,19 +141,14 @@ class Association(BaseModel): ) # Edge metadata - strength: float = Field( - 0.5, - ge=0.0, - le=1.0, - description="Relationship strength (0.0-1.0)" - ) + strength: float = Field(0.5, ge=0.0, le=1.0, description="Relationship strength (0.0-1.0)") metadata: dict[str, Any] = Field(default_factory=dict, description="Arbitrary metadata") # Timestamps - created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc), description="Creation timestamp") + created_at: datetime = Field(default_factory=lambda: datetime.now(UTC), description="Creation timestamp") @property - def category(self) -> Optional[str]: + def category(self) -> str | None: """Get the category of this relationship.""" return get_relationship_category(self.relationship) @@ -129,22 +172,14 @@ class GraphQueryInput(BaseModel): start_memory_id: str = Field(..., description="Starting memory for traversal") # Filters - relationship_types: list[str] = Field( - default_factory=list, - description="Filter by specific relationship types (empty = all)" - ) + relationship_types: list[str] = Field(default_factory=list, description="Filter by specific relationship types (empty = all)") relationship_categories: list[RelationshipCategory] = Field( - default_factory=list, - description="Filter by relationship categories (empty = all)" + default_factory=list, description="Filter by relationship categories (empty = all)" ) # Traversal settings max_depth: int = Field(3, ge=1, le=5, description="Maximum traversal depth") - direction: str = Field( - "both", - pattern="^(outgoing|incoming|both)$", - description="Traversal direction: outgoing, incoming, both" - ) + direction: str = Field("both", pattern="^(outgoing|incoming|both)$", description="Traversal direction: outgoing, incoming, both") min_strength: float = Field(0.0, ge=0.0, le=1.0, description="Minimum edge strength") # Result limits diff --git a/memorylayer-core-python/src/memorylayer_server/models/auth.py b/memorylayer-core-python/src/memorylayer_server/models/auth.py index 268b1fc..942caa2 100644 --- a/memorylayer-core-python/src/memorylayer_server/models/auth.py +++ b/memorylayer-core-python/src/memorylayer_server/models/auth.py @@ -3,8 +3,9 @@ These models represent the resolved identity and context for API requests. """ + from dataclasses import dataclass, field -from typing import Any, Optional +from typing import Any from .session import Session @@ -17,9 +18,10 @@ class AuthIdentity: In OSS, this always returns default tenant with no user. In Enterprise, this is populated from API key or JWT verification. """ + tenant_id: str - user_id: Optional[str] = None - api_key_id: Optional[str] = None # For audit/tracking + user_id: str | None = None + api_key_id: str | None = None # For audit/tracking @dataclass @@ -38,18 +40,19 @@ class RequestContext: The metadata dict carries extension-specific data (e.g., gateway-injected access levels) without coupling the core model to any particular auth scheme. """ + tenant_id: str workspace_id: str - user_id: Optional[str] = None - session: Optional[Session] = None + user_id: str | None = None + session: Session | None = None metadata: dict[str, Any] = field(default_factory=dict) @property - def session_id(self) -> Optional[str]: + def session_id(self) -> str | None: """Convenience property to get session ID if session exists.""" return self.session.id if self.session else None @property - def context_id(self) -> Optional[str]: + def context_id(self) -> str | None: """Get context_id from session if available.""" return self.session.context_id if self.session else None diff --git a/memorylayer-core-python/src/memorylayer_server/models/authz.py b/memorylayer-core-python/src/memorylayer_server/models/authz.py index c65ab78..7dec6cb 100644 --- a/memorylayer-core-python/src/memorylayer_server/models/authz.py +++ b/memorylayer-core-python/src/memorylayer_server/models/authz.py @@ -1,11 +1,11 @@ from enum import Enum -from typing import Optional from pydantic import BaseModel, Field class AuthorizationDecision(str, Enum): """Authorization decision result.""" + ALLOW = "allow" DENY = "deny" ABSTAIN = "abstain" # Let next handler decide (for chain-of-responsibility patterns) @@ -16,12 +16,13 @@ class AuthorizationContext(BaseModel): Contains all information needed to make an authorization decision. """ + model_config = {"frozen": True} - tenant_id: Optional[str] = Field(None, description="Tenant identifier") - workspace_id: Optional[str] = Field(None, description="Workspace identifier") - user_id: Optional[str] = Field(None, description="User identifier") + tenant_id: str | None = Field(None, description="Tenant identifier") + workspace_id: str | None = Field(None, description="Workspace identifier") + user_id: str | None = Field(None, description="User identifier") resource: str = Field("", description="Resource type (e.g., 'memories', 'workspaces')") action: str = Field("", description="Action type (e.g., 'read', 'write', 'delete')") - resource_id: Optional[str] = Field(None, description="Specific resource ID") + resource_id: str | None = Field(None, description="Specific resource ID") metadata: dict = Field(default_factory=dict, description="Additional context") diff --git a/memorylayer-core-python/src/memorylayer_server/models/chat.py b/memorylayer-core-python/src/memorylayer_server/models/chat.py index 58533ad..bcec02b 100644 --- a/memorylayer-core-python/src/memorylayer_server/models/chat.py +++ b/memorylayer-core-python/src/memorylayer_server/models/chat.py @@ -5,8 +5,9 @@ workspace / user / thread. Messages accumulate over time and are periodically decomposed into long-term memories via background tasks. """ -from datetime import datetime, timezone -from typing import Any, Optional, Union + +from datetime import UTC, datetime +from typing import Any from pydantic import BaseModel, Field, field_validator @@ -15,8 +16,8 @@ class ChatMessageContent(BaseModel): """Structured content block within a chat message (tool calls, images, etc.).""" type: str = Field(..., description="Content block type: text, tool_call, tool_result, image, etc.") - text: Optional[str] = Field(None, description="Text content (for type=text)") - data: Optional[dict[str, Any]] = Field(None, description="Structured data (tool args, image ref, etc.)") + text: str | None = Field(None, description="Text content (for type=text)") + data: dict[str, Any] | None = Field(None, description="Structured data (tool args, image ref, etc.)") class ChatMessage(BaseModel): @@ -28,12 +29,10 @@ class ChatMessage(BaseModel): thread_id: str = Field(..., description="Parent thread ID") message_index: int = Field(..., description="Sequential index within the thread (0-based)") role: str = Field(..., description="Message role: user, assistant, system, tool") - content: Union[str, list[ChatMessageContent]] = Field( - ..., description="Message content — plain string or structured blocks" - ) + content: str | list[ChatMessageContent] = Field(..., description="Message content — plain string or structured blocks") metadata: dict[str, Any] = Field(default_factory=dict, description="Arbitrary metadata") created_at: datetime = Field( - default_factory=lambda: datetime.now(timezone.utc), + default_factory=lambda: datetime.now(UTC), description="Creation timestamp", ) @@ -55,30 +54,30 @@ class ChatThread(BaseModel): id: str = Field(..., description="Thread ID (client-provided or auto-generated)") workspace_id: str = Field(..., description="Workspace boundary") tenant_id: str = Field("_default", description="Tenant") - user_id: Optional[str] = Field(None, description="User who owns this conversation") + user_id: str | None = Field(None, description="User who owns this conversation") context_id: str = Field("_default", description="Context within the workspace") # Entity attribution (for persona tracking / inference) - observer_id: Optional[str] = Field(None, description="Entity doing the observing (typically the AI agent)") - subject_id: Optional[str] = Field(None, description="Entity being observed (typically the human user)") + observer_id: str | None = Field(None, description="Entity doing the observing (typically the AI agent)") + subject_id: str | None = Field(None, description="Entity being observed (typically the human user)") # Display - title: Optional[str] = Field(None, description="Optional display title") + title: str | None = Field(None, description="Optional display title") metadata: dict[str, Any] = Field(default_factory=dict, description="Arbitrary metadata") # Counters and watermarks message_count: int = Field(0, description="Total messages in thread") - last_decomposed_at: Optional[datetime] = Field(None, description="When decomposition last ran") + last_decomposed_at: datetime | None = Field(None, description="When decomposition last ran") last_decomposed_index: int = Field(0, description="Message index watermark for decomposition") # Lifecycle - expires_at: Optional[datetime] = Field(None, description="Optional expiration (None = permanent)") + expires_at: datetime | None = Field(None, description="Optional expiration (None = permanent)") created_at: datetime = Field( - default_factory=lambda: datetime.now(timezone.utc), + default_factory=lambda: datetime.now(UTC), description="Creation timestamp", ) updated_at: datetime = Field( - default_factory=lambda: datetime.now(timezone.utc), + default_factory=lambda: datetime.now(UTC), description="Last update timestamp", ) @@ -86,7 +85,7 @@ class ChatThread(BaseModel): def is_expired(self) -> bool: if self.expires_at is None: return False - return datetime.now(timezone.utc) > self.expires_at + return datetime.now(UTC) > self.expires_at @property def unprocessed_count(self) -> int: @@ -103,17 +102,18 @@ class ChatThreadWithMessages(BaseModel): # Input models (for service layer — no IDs, no timestamps) + class CreateThreadInput(BaseModel): """Input for creating a new chat thread.""" - thread_id: Optional[str] = Field(None, description="Client-provided thread ID (auto-generated if omitted)") - user_id: Optional[str] = Field(None, description="User scope") + thread_id: str | None = Field(None, description="Client-provided thread ID (auto-generated if omitted)") + user_id: str | None = Field(None, description="User scope") context_id: str = Field("_default", description="Context within workspace") - observer_id: Optional[str] = Field(None, description="Observer entity ID") - subject_id: Optional[str] = Field(None, description="Subject entity ID") - title: Optional[str] = Field(None, description="Display title") + observer_id: str | None = Field(None, description="Observer entity ID") + subject_id: str | None = Field(None, description="Subject entity ID") + title: str | None = Field(None, description="Display title") metadata: dict[str, Any] = Field(default_factory=dict, description="Metadata") - expires_at: Optional[datetime] = Field(None, description="Optional expiration") + expires_at: datetime | None = Field(None, description="Optional expiration") class AppendMessagesInput(BaseModel): @@ -126,9 +126,7 @@ class MessageInput(BaseModel): """A single message to append (no ID or index — assigned by the service).""" role: str = Field(..., description="Message role: user, assistant, system, tool") - content: Union[str, list[ChatMessageContent]] = Field( - ..., description="Message content" - ) + content: str | list[ChatMessageContent] = Field(..., description="Message content") metadata: dict[str, Any] = Field(default_factory=dict, description="Metadata") @field_validator("role") diff --git a/memorylayer-core-python/src/memorylayer_server/models/llm.py b/memorylayer-core-python/src/memorylayer_server/models/llm.py index 74be297..7002cb5 100644 --- a/memorylayer-core-python/src/memorylayer_server/models/llm.py +++ b/memorylayer-core-python/src/memorylayer_server/models/llm.py @@ -1,10 +1,10 @@ from dataclasses import dataclass from enum import Enum -from typing import List, Optional class LLMRole(str, Enum): """Message role in conversation.""" + SYSTEM = "system" USER = "user" ASSISTANT = "assistant" @@ -13,6 +13,7 @@ class LLMRole(str, Enum): @dataclass class LLMMessage: """Single message in conversation.""" + role: LLMRole content: str @@ -28,18 +29,20 @@ class LLMRequest: ``max_tokens`` resolution: explicit value wins, else ``provider.default_max_tokens``. """ - messages: List[LLMMessage] - model: Optional[str] = None - max_tokens: Optional[int] = None - temperature: Optional[float] = None - temperature_factor: Optional[float] = None - stop: Optional[List[str]] = None + + messages: list[LLMMessage] + model: str | None = None + max_tokens: int | None = None + temperature: float | None = None + temperature_factor: float | None = None + stop: list[str] | None = None stream: bool = False @dataclass class LLMResponse: """Response from LLM provider.""" + content: str model: str prompt_tokens: int @@ -51,6 +54,7 @@ class LLMResponse: @dataclass class LLMStreamChunk: """Streaming response chunk.""" + content: str is_final: bool = False - finish_reason: Optional[str] = None + finish_reason: str | None = None diff --git a/memorylayer-core-python/src/memorylayer_server/models/memory.py b/memorylayer-core-python/src/memorylayer_server/models/memory.py index 0d822c2..e14dfe8 100644 --- a/memorylayer-core-python/src/memorylayer_server/models/memory.py +++ b/memorylayer-core-python/src/memorylayer_server/models/memory.py @@ -3,11 +3,12 @@ Defines cognitive types, domain subtypes, and core memory data structures. """ -from datetime import datetime, timezone + +from datetime import UTC, datetime from enum import Enum -from typing import Any, Optional +from typing import Any -from pydantic import BaseModel, Field, computed_field, field_validator, model_validator +from pydantic import BaseModel, Field, computed_field, field_validator class MemoryType(str, Enum): @@ -76,12 +77,13 @@ class MemoryStatus(str, Enum): class SourceType(str, Enum): """Types of sources that can produce memories.""" - MEMORY = "memory" # Fact decomposition - SESSION = "session" # Working memory commit - DOCUMENT = "document" # Document ingestion - PAGE = "page" # Document page - THREAD = "thread" # Chat history decomposition - DATASET = "dataset" # Dataset profiling/summarization + + MEMORY = "memory" # Fact decomposition + SESSION = "session" # Working memory commit + DOCUMENT = "document" # Document ingestion + PAGE = "page" # Document page + THREAD = "thread" # Chat history decomposition + DATASET = "dataset" # Dataset profiling/summarization class Memory(BaseModel): @@ -94,11 +96,11 @@ class Memory(BaseModel): workspace_id: str = Field(..., description="Workspace this memory belongs to") tenant_id: str = Field(..., description="Tenant this memory belongs to") context_id: str = Field("_default", description="Context for logical grouping (default: _default)") - user_id: Optional[str] = Field(None, description="Optional user scope") + user_id: str | None = Field(None, description="Optional user scope") # Entity attribution (v3) - "who remembers what about whom" - observer_id: Optional[str] = Field(None, description="Entity doing the observing/remembering (agent ID, user ID, etc.)") - subject_id: Optional[str] = Field(None, description="Entity the memory is about") + observer_id: str | None = Field(None, description="Entity doing the observing/remembering (agent ID, user ID, etc.)") + subject_id: str | None = Field(None, description="Entity the memory is about") # Content content: str = Field(..., description="The memory content") @@ -106,57 +108,52 @@ class Memory(BaseModel): # Classification type: MemoryType = Field(..., description="Cognitive type of memory") - subtype: Optional[MemorySubtype] = Field(None, description="Domain-specific classification") - importance: float = Field( - 0.5, - ge=0.0, - le=1.0, - description="Memory importance (0.0-1.0, affects retention/ranking)" - ) + subtype: MemorySubtype | None = Field(None, description="Domain-specific classification") + importance: float = Field(0.5, ge=0.0, le=1.0, description="Memory importance (0.0-1.0, affects retention/ranking)") tags: list[str] = Field(default_factory=list, description="Tags for categorization") metadata: dict[str, Any] = Field(default_factory=dict, description="Arbitrary metadata") # v2 additions for hierarchical memory - abstract: Optional[str] = Field(None, description="Brief summary/abstract of memory content") - overview: Optional[str] = Field(None, description="High-level overview (tier 3)") - session_id: Optional[str] = Field(None, description="Associated session ID") - source_memory_id: Optional[str] = Field(None, description="Parent memory this fact was decomposed from") + abstract: str | None = Field(None, description="Brief summary/abstract of memory content") + overview: str | None = Field(None, description="High-level overview (tier 3)") + session_id: str | None = Field(None, description="Associated session ID") + source_memory_id: str | None = Field(None, description="Parent memory this fact was decomposed from") # Document provenance - traces memory back to source document/page - source_document_id: Optional[str] = Field(None, description="Document this memory was derived from") - source_page_id: Optional[str] = Field(None, description="Document page this memory was extracted from") - source_dataset_id: Optional[str] = Field(None, description="Dataset this memory was derived from") - source_thread_id: Optional[str] = Field(None, description="Chat thread this memory was decomposed from") + source_document_id: str | None = Field(None, description="Document this memory was derived from") + source_page_id: str | None = Field(None, description="Document page this memory was extracted from") + source_dataset_id: str | None = Field(None, description="Dataset this memory was derived from") + source_thread_id: str | None = Field(None, description="Chat thread this memory was decomposed from") - category: Optional[str] = Field(None, description="User-defined category") + category: str | None = Field(None, description="User-defined category") # Vector embedding (optional - computed async or stored separately) - embedding: Optional[list[float]] = Field(None, description="Vector embedding for similarity search") + embedding: list[float] | None = Field(None, description="Vector embedding for similarity search") # Lifecycle & access tracking access_count: int = Field(0, ge=0, description="Number of times memory was accessed") - last_accessed_at: Optional[datetime] = Field(None, description="Last access timestamp") + last_accessed_at: datetime | None = Field(None, description="Last access timestamp") decay_factor: float = Field(1.0, ge=0.0, le=1.0, description="Memory decay over time") status: MemoryStatus = Field(MemoryStatus.ACTIVE, description="Memory lifecycle status") pinned: bool = Field(False, description="Pinned memories are exempt from decay and archival") # Locality-aware ranking metadata (populated during recall) - source_scope: Optional[str] = Field(None, description="Scope of memory source (same_context, same_workspace, global_workspace, other)") - relevance_score: Optional[float] = Field(None, description="Base relevance score from vector similarity") - boosted_score: Optional[float] = Field(None, description="Relevance score after locality boost applied") + source_scope: str | None = Field(None, description="Scope of memory source (same_context, same_workspace, global_workspace, other)") + relevance_score: float | None = Field(None, description="Base relevance score from vector similarity") + boosted_score: float | None = Field(None, description="Relevance score after locality boost applied") # Trust scoring (populated during recall) - trust_score: Optional[float] = Field(None, ge=0.0, le=1.0, description="Composite trust score (0.0-1.0)") - trust_signals: Optional[dict] = Field(None, description="Component trust scores used to compute trust_score") + trust_score: float | None = Field(None, ge=0.0, le=1.0, description="Composite trust score (0.0-1.0)") + trust_signals: dict | None = Field(None, description="Component trust scores used to compute trust_score") # Freshness metadata (populated during recall) - freshness_score: Optional[float] = Field(None, description="Exponential freshness score (1.0=new, 0.0=very old)") - staleness_warning: Optional[str] = Field(None, description="Staleness tier: none, mild, moderate, severe") - age_days: Optional[float] = Field(None, description="Age of memory in days since creation") + freshness_score: float | None = Field(None, description="Exponential freshness score (1.0=new, 0.0=very old)") + staleness_warning: str | None = Field(None, description="Staleness tier: none, mild, moderate, severe") + age_days: float | None = Field(None, description="Age of memory in days since creation") # Timestamps - created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc), description="Creation timestamp") - updated_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc), description="Last update timestamp") + created_at: datetime = Field(default_factory=lambda: datetime.now(UTC), description="Creation timestamp") + updated_at: datetime = Field(default_factory=lambda: datetime.now(UTC), description="Last update timestamp") @field_validator("content") @classmethod @@ -177,31 +174,26 @@ class RememberInput(BaseModel): """Request model for creating a new memory.""" content: str = Field(..., description="The memory content to store") - type: Optional[MemoryType] = Field(None, description="Cognitive type (auto-classified if omitted)") - subtype: Optional[MemorySubtype] = Field(None, description="Domain-specific classification") - importance: float = Field( - 0.5, - ge=0.0, - le=1.0, - description="Memory importance (0.0-1.0)" - ) + type: MemoryType | None = Field(None, description="Cognitive type (auto-classified if omitted)") + subtype: MemorySubtype | None = Field(None, description="Domain-specific classification") + importance: float = Field(0.5, ge=0.0, le=1.0, description="Memory importance (0.0-1.0)") tags: list[str] = Field(default_factory=list, description="Tags for categorization") metadata: dict[str, Any] = Field(default_factory=dict, description="Arbitrary metadata") associations: list[str] = Field(default_factory=list, description="Memory IDs to associate with") # Optional overrides (usually auto-computed) - context_id: Optional[str] = Field(None, description="Target context (default: _default)") - user_id: Optional[str] = Field(None, description="User scope override") + context_id: str | None = Field(None, description="Target context (default: _default)") + user_id: str | None = Field(None, description="User scope override") # Entity attribution (v3) - observer_id: Optional[str] = Field(None, description="Entity doing the observing/remembering") - subject_id: Optional[str] = Field(None, description="Entity this memory is about") + observer_id: str | None = Field(None, description="Entity doing the observing/remembering") + subject_id: str | None = Field(None, description="Entity this memory is about") # Document provenance - source_document_id: Optional[str] = Field(None, description="Source document ID for provenance tracking") - source_page_id: Optional[str] = Field(None, description="Source page ID for provenance tracking") - source_dataset_id: Optional[str] = Field(None, description="Source dataset ID for provenance tracking") - source_thread_id: Optional[str] = Field(None, description="Source thread ID for provenance tracking") + source_document_id: str | None = Field(None, description="Source document ID for provenance tracking") + source_page_id: str | None = Field(None, description="Source page ID for provenance tracking") + source_dataset_id: str | None = Field(None, description="Source dataset ID for provenance tracking") + source_thread_id: str | None = Field(None, description="Source thread ID for provenance tracking") class RecallInput(BaseModel): @@ -213,45 +205,37 @@ class RecallInput(BaseModel): types: list[MemoryType] = Field(default_factory=list, description="Filter by cognitive types") subtypes: list[MemorySubtype] = Field(default_factory=list, description="Filter by domain subtypes") tags: list[str] = Field(default_factory=list, description="Filter by tags (AND logic)") - context_id: Optional[str] = Field(None, description="Filter by context") - user_id: Optional[str] = Field(None, description="Filter by user") - observer_id: Optional[str] = Field(None, description="Filter by observer entity") - subject_id: Optional[str] = Field(None, description="Filter by subject entity") + context_id: str | None = Field(None, description="Filter by context") + user_id: str | None = Field(None, description="Filter by user") + observer_id: str | None = Field(None, description="Filter by observer entity") + subject_id: str | None = Field(None, description="Filter by subject entity") include_global: bool = Field(True, description="Include _global workspace in search") # Retrieval settings - mode: Optional[RecallMode] = Field(None, description="Retrieval strategy (None = server default)") - tolerance: Optional[SearchTolerance] = Field(None, description="Search precision (None = server default)") + mode: RecallMode | None = Field(None, description="Retrieval strategy (None = server default)") + tolerance: SearchTolerance | None = Field(None, description="Search precision (None = server default)") limit: int = Field(10, ge=1, le=100, description="Maximum memories to return") offset: int = Field(0, ge=0, description="Number of results to skip for pagination") - min_relevance: Optional[float] = Field(None, ge=0.0, le=1.0, description="Minimum relevance score (None = server default)") - recency_weight: Optional[float] = Field(None, ge=0.0, le=1.0, - description="Weight for recency boosting (0.0=disabled, 1.0=full). None = server default.") - detail_level: Optional[DetailLevel] = Field(None, description="Level of detail to return (None = server default)") + min_relevance: float | None = Field(None, ge=0.0, le=1.0, description="Minimum relevance score (None = server default)") + recency_weight: float | None = Field( + None, ge=0.0, le=1.0, description="Weight for recency boosting (0.0=disabled, 1.0=full). None = server default." + ) + detail_level: DetailLevel | None = Field(None, description="Level of detail to return (None = server default)") # Graph traversal (None = use server default from env config) - include_associations: Optional[bool] = Field(None, description="Include linked memories (None = server default)") - traverse_depth: Optional[int] = Field(None, ge=0, le=5, description="Multi-hop graph traversal depth (None = server default)") - max_expansion: Optional[int] = Field(None, ge=1, le=500, - description="Max memories discovered via graph expansion (None = server default)") + include_associations: bool | None = Field(None, description="Include linked memories (None = server default)") + traverse_depth: int | None = Field(None, ge=0, le=5, description="Multi-hop graph traversal depth (None = server default)") + max_expansion: int | None = Field(None, ge=1, le=500, description="Max memories discovered via graph expansion (None = server default)") # Time range filters - created_after: Optional[datetime] = Field(None, description="Filter memories created after this time") - created_before: Optional[datetime] = Field(None, description="Filter memories created before this time") + created_after: datetime | None = Field(None, description="Filter memories created after this time") + created_before: datetime | None = Field(None, description="Filter memories created before this time") # LLM mode options - context: list[dict[str, str]] = Field( - default_factory=list, - description="Recent conversation context for query rewriting (LLM mode)" - ) + context: list[dict[str, str]] = Field(default_factory=list, description="Recent conversation context for query rewriting (LLM mode)") # Hybrid mode options - rag_threshold: float = Field( - 0.8, - ge=0.0, - le=1.0, - description="Use LLM if RAG confidence < threshold (hybrid mode)" - ) + rag_threshold: float = Field(0.8, ge=0.0, le=1.0, description="Use LLM if RAG confidence < threshold (hybrid mode)") # Status filtering include_archived: bool = Field(False, description="Include archived memories in recall results") @@ -273,31 +257,31 @@ class RecallResult(BaseModel): mode_used: RecallMode = Field(..., description="Actual retrieval mode used") # LLM mode metadata - query_rewritten: Optional[str] = Field(None, description="Rewritten query (LLM mode)") - sufficiency_reached: Optional[bool] = Field(None, description="Whether search stopped early (LLM mode)") + query_rewritten: str | None = Field(None, description="Rewritten query (LLM mode)") + sufficiency_reached: bool | None = Field(None, description="Whether search stopped early (LLM mode)") # Locality-aware ranking metadata - source_scope: Optional[str] = Field(None, description="Scope of memory source (same_context, same_workspace, global_workspace, other)") - boosted_score: Optional[float] = Field(None, description="Relevance score after locality boost applied") + source_scope: str | None = Field(None, description="Scope of memory source (same_context, same_workspace, global_workspace, other)") + boosted_score: float | None = Field(None, description="Relevance score after locality boost applied") # Token efficiency metadata (for detail_level support) - token_summary: Optional[dict[str, Any]] = Field(None, description="Token usage summary when using detail_level") + token_summary: dict[str, Any] | None = Field(None, description="Token usage summary when using detail_level") # Trajectory tracing - trajectory: Optional[dict] = Field(None, description="Trajectory data if trace=True") + trajectory: dict | None = Field(None, description="Trajectory data if trace=True") # Trust scoring - drift_caveat: Optional[str] = Field(None, description="Warning when one or more recalled memories have low trust scores") + drift_caveat: str | None = Field(None, description="Warning when one or more recalled memories have low trust scores") # Freshness metadata - freshness_metadata: Optional[dict] = Field(None, description="Aggregate freshness statistics for returned memories") + freshness_metadata: dict | None = Field(None, description="Aggregate freshness statistics for returned memories") class ReflectInput(BaseModel): """Request model for synthesizing memories.""" query: str = Field(..., description="What to reflect on") - detail_level: Optional[DetailLevel] = Field(None, description="Level of detail for reflection output (None = server default)") + detail_level: DetailLevel | None = Field(None, description="Level of detail for reflection output (None = server default)") include_sources: bool = Field(True, description="Include source memory references") depth: int = Field(2, ge=1, le=5, description="Association traversal depth") @@ -305,10 +289,10 @@ class ReflectInput(BaseModel): types: list[MemoryType] = Field(default_factory=list) subtypes: list[MemorySubtype] = Field(default_factory=list) tags: list[str] = Field(default_factory=list) - context_id: Optional[str] = None - user_id: Optional[str] = None - observer_id: Optional[str] = None - subject_id: Optional[str] = None + context_id: str | None = None + user_id: str | None = None + observer_id: str | None = None + subject_id: str | None = None class ReflectResult(BaseModel): @@ -360,11 +344,7 @@ class SessionMemorySections(BaseModel): @property def total_tokens(self) -> int: """Estimated total tokens across all sections (len(content) / 4).""" - total_chars = sum( - len(entry) - for entries in self.sections.values() - for entry in entries - ) + total_chars = sum(len(entry) for entries in self.sections.values() for entry in entries) return total_chars // 4 def add_entry(self, section: str, entry: str) -> bool: diff --git a/memorylayer-core-python/src/memorylayer_server/models/session.py b/memorylayer-core-python/src/memorylayer_server/models/session.py index b383ebd..b3942c1 100644 --- a/memorylayer-core-python/src/memorylayer_server/models/session.py +++ b/memorylayer-core-python/src/memorylayer_server/models/session.py @@ -3,8 +3,9 @@ Sessions provide temporary, TTL-based context storage (working memory tier). """ -from datetime import datetime, timedelta, timezone -from typing import Any, Optional + +from datetime import UTC, datetime, timedelta +from typing import Any from pydantic import BaseModel, Field, field_validator @@ -19,45 +20,28 @@ class Session(BaseModel): workspace_id: str = Field(..., description="Workspace boundary") tenant_id: str = Field(..., description="Tenant this session belongs to") context_id: str = Field("_default", description="Context for this session (default: _default)") - user_id: Optional[str] = Field(None, description="Optional user scope") + user_id: str | None = Field(None, description="Optional user scope") # Metadata - metadata: dict[str, Any] = Field( - default_factory=dict, - description="Session metadata (client info, etc.)" - ) + metadata: dict[str, Any] = Field(default_factory=dict, description="Session metadata (client info, etc.)") # v2 additions for session lifecycle auto_commit: bool = Field(True, description="Auto-commit working memory on session end") - committed_at: Optional[datetime] = Field(None, description="When session was committed to long-term memory") + committed_at: datetime | None = Field(None, description="When session was committed to long-term memory") # Lifecycle expires_at: datetime = Field(..., description="Session expiration timestamp") - created_at: datetime = Field( - default_factory=lambda: datetime.now(timezone.utc), - description="Creation timestamp" - ) + created_at: datetime = Field(default_factory=lambda: datetime.now(UTC), description="Creation timestamp") @classmethod - def create_with_ttl( - cls, - session_id: str, - workspace_id: str, - ttl_seconds: int = 3600, - **kwargs - ) -> "Session": + def create_with_ttl(cls, session_id: str, workspace_id: str, ttl_seconds: int = 3600, **kwargs) -> "Session": """Create a session with TTL in seconds.""" - return cls( - id=session_id, - workspace_id=workspace_id, - expires_at=datetime.now(timezone.utc) + timedelta(seconds=ttl_seconds), - **kwargs - ) + return cls(id=session_id, workspace_id=workspace_id, expires_at=datetime.now(UTC) + timedelta(seconds=ttl_seconds), **kwargs) @property def is_expired(self) -> bool: """Check if session has expired.""" - return datetime.now(timezone.utc) > self.expires_at + return datetime.now(UTC) > self.expires_at class WorkingMemory(BaseModel): @@ -73,18 +57,9 @@ class WorkingMemory(BaseModel): value: Any = Field(..., description="Context value (JSON-serializable)") # Lifecycle - ttl_seconds: Optional[int] = Field( - None, - description="Optional TTL override (inherits session TTL if None)" - ) - created_at: datetime = Field( - default_factory=lambda: datetime.now(timezone.utc), - description="Creation timestamp" - ) - updated_at: datetime = Field( - default_factory=lambda: datetime.now(timezone.utc), - description="Last update timestamp" - ) + ttl_seconds: int | None = Field(None, description="Optional TTL override (inherits session TTL if None)") + created_at: datetime = Field(default_factory=lambda: datetime.now(UTC), description="Creation timestamp") + updated_at: datetime = Field(default_factory=lambda: datetime.now(UTC), description="Last update timestamp") @field_validator("key") @classmethod @@ -95,31 +70,14 @@ def key_not_empty(cls, v: str) -> str: return v.strip() - - class SessionBriefing(BaseModel): """Session briefing summarizing recent activity and context.""" - workspace_summary: dict[str, Any] = Field( - ..., - description="Workspace-level summary (total memories, recent activity, etc.)" - ) - recent_activity: list[dict[str, Any]] = Field( - default_factory=list, - description="Recent sessions/activity summaries" - ) - open_threads: list[dict[str, Any]] = Field( - default_factory=list, - description="Ongoing topics/threads" - ) - contradictions_detected: list[dict[str, Any]] = Field( - default_factory=list, - description="Memories with contradictory relationships" - ) - memories: list[dict[str, Any]] = Field( - default_factory=list, - description="Relevant memories for this session (v2 addition)" - ) + workspace_summary: dict[str, Any] = Field(..., description="Workspace-level summary (total memories, recent activity, etc.)") + recent_activity: list[dict[str, Any]] = Field(default_factory=list, description="Recent sessions/activity summaries") + open_threads: list[dict[str, Any]] = Field(default_factory=list, description="Ongoing topics/threads") + contradictions_detected: list[dict[str, Any]] = Field(default_factory=list, description="Memories with contradictory relationships") + memories: list[dict[str, Any]] = Field(default_factory=list, description="Relevant memories for this session (v2 addition)") class WorkspaceSummary(BaseModel): @@ -157,7 +115,4 @@ class Contradiction(BaseModel): memory_b: str = Field(..., description="Second memory ID") relationship: str = Field(..., description="contradicts") needs_resolution: bool = Field(True, description="Whether this needs user attention") - detected_at: datetime = Field( - default_factory=lambda: datetime.now(timezone.utc), - description="Detection timestamp" - ) + detected_at: datetime = Field(default_factory=lambda: datetime.now(UTC), description="Detection timestamp") diff --git a/memorylayer-core-python/src/memorylayer_server/models/workspace.py b/memorylayer-core-python/src/memorylayer_server/models/workspace.py index c631a52..5871c72 100644 --- a/memorylayer-core-python/src/memorylayer_server/models/workspace.py +++ b/memorylayer-core-python/src/memorylayer_server/models/workspace.py @@ -5,8 +5,9 @@ Hierarchy: Tenant -> Workspace -> Context -> Session -> WorkingMemory """ -from datetime import datetime, timezone -from typing import Any, Optional + +from datetime import UTC, datetime +from typing import Any from pydantic import BaseModel, Field, field_validator @@ -30,16 +31,8 @@ class WorkspaceSettings(BaseModel): # Auto-remember auto_remember_enabled: bool = Field(False, description="Auto-capture significant interactions") - auto_remember_min_importance: float = Field( - 0.6, - ge=0.0, - le=1.0, - description="Minimum importance for auto-capture" - ) - auto_remember_exclude_patterns: list[str] = Field( - default_factory=list, - description="Patterns to exclude from auto-capture" - ) + auto_remember_min_importance: float = Field(0.6, ge=0.0, le=1.0, description="Minimum importance for auto-capture") + auto_remember_exclude_patterns: list[str] = Field(default_factory=list, description="Patterns to exclude from auto-capture") # Embeddings embedding_model: str = Field("text-embedding-3-small", description="Embedding model to use") @@ -68,14 +61,11 @@ class Workspace(BaseModel): name: str = Field(..., description="Human-readable workspace name") # Configuration - settings: dict[str, Any] = Field( - default_factory=dict, - description="Workspace-level settings (retention, auto-remember, etc.)" - ) + settings: dict[str, Any] = Field(default_factory=dict, description="Workspace-level settings (retention, auto-remember, etc.)") # Timestamps - created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc), description="Creation timestamp") - updated_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc), description="Last update timestamp") + created_at: datetime = Field(default_factory=lambda: datetime.now(UTC), description="Creation timestamp") + updated_at: datetime = Field(default_factory=lambda: datetime.now(UTC), description="Last update timestamp") @field_validator("name") @classmethod @@ -93,12 +83,12 @@ class ContextSettings(BaseModel): inherit_workspace_settings: bool = Field(True, description="Inherit workspace settings") # Overrides (only apply if inherit_workspace_settings=False) - auto_remember_enabled: Optional[bool] = None - decay_enabled: Optional[bool] = None + auto_remember_enabled: bool | None = None + decay_enabled: bool | None = None # Context-specific settings (v2) - default_importance: Optional[float] = Field(None, ge=0.0, le=1.0, description="Override default importance") - session_auto_commit: Optional[bool] = Field(None, description="Override session auto-commit") + default_importance: float | None = Field(None, ge=0.0, le=1.0, description="Override default importance") + session_auto_commit: bool | None = Field(None, description="Override session auto-commit") class Context(BaseModel): @@ -114,16 +104,13 @@ class Context(BaseModel): id: str = Field(..., description="Unique context identifier") workspace_id: str = Field(..., description="Parent workspace ID") name: str = Field(..., description="Context name (unique within workspace)") - description: Optional[str] = Field(None, description="Context description") + description: str | None = Field(None, description="Context description") # Configuration - settings: dict[str, Any] = Field( - default_factory=dict, - description="Context-level settings (overrides workspace defaults)" - ) + settings: dict[str, Any] = Field(default_factory=dict, description="Context-level settings (overrides workspace defaults)") # Timestamps - created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc), description="Creation timestamp") + created_at: datetime = Field(default_factory=lambda: datetime.now(UTC), description="Creation timestamp") @field_validator("name") @classmethod @@ -132,5 +119,3 @@ def name_not_empty(cls, v: str) -> str: if not v or not v.strip(): raise ValueError("Context name cannot be empty") return v.strip() - - diff --git a/memorylayer-core-python/src/memorylayer_server/services/_constants.py b/memorylayer-core-python/src/memorylayer_server/services/_constants.py index 2ea9311..85170db 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/_constants.py +++ b/memorylayer-core-python/src/memorylayer_server/services/_constants.py @@ -9,125 +9,124 @@ # ============================================ # Storage # ============================================ -EXT_STORAGE_BACKEND = 'memorylayer-primary-storage' +EXT_STORAGE_BACKEND = "memorylayer-primary-storage" # ============================================ # Authentication & Authorization # ============================================ EXT_AUTHENTICATION_SERVICE = "memorylayer-authentication-service" -EXT_AUTHORIZATION_SERVICE = 'memorylayer-authorization-service' +EXT_AUTHORIZATION_SERVICE = "memorylayer-authorization-service" # ============================================ # Session # ============================================ -EXT_SESSION_SERVICE = 'memorylayer-session-service' +EXT_SESSION_SERVICE = "memorylayer-session-service" # ============================================ # Workspace # ============================================ -EXT_WORKSPACE_SERVICE = 'memorylayer-workspace-service' +EXT_WORKSPACE_SERVICE = "memorylayer-workspace-service" # ============================================ # Cache # ============================================ -EXT_CACHE_SERVICE = 'memorylayer-cache-service' +EXT_CACHE_SERVICE = "memorylayer-cache-service" # ============================================ # Embedding # ============================================ -EXT_EMBEDDING_PROVIDER = 'embedding-provider' -EXT_EMBEDDING_SERVICE = 'embedding-service' +EXT_EMBEDDING_PROVIDER = "embedding-provider" +EXT_EMBEDDING_SERVICE = "embedding-service" # ============================================ # LLM # ============================================ -EXT_LLM_PROVIDER = 'memorylayer-llm-provider' -EXT_LLM_SERVICE = 'memorylayer-llm-service' -EXT_LLM_REGISTRY = 'memorylayer-llm-registry' +EXT_LLM_SERVICE = "memorylayer-llm-service" +EXT_LLM_REGISTRY = "memorylayer-llm-registry" # ============================================ # Reranker # ============================================ -EXT_RERANKER_PROVIDER = 'reranker-provider' -EXT_RERANKER_SERVICE = 'reranker-service' +EXT_RERANKER_PROVIDER = "reranker-provider" +EXT_RERANKER_SERVICE = "reranker-service" # ============================================ # Memory # ============================================ -EXT_MEMORY_SERVICE = 'memorylayer-memory-service' +EXT_MEMORY_SERVICE = "memorylayer-memory-service" # ============================================ # Extraction # ============================================ -EXT_EXTRACTION_SERVICE = 'memorylayer-extraction-service' +EXT_EXTRACTION_SERVICE = "memorylayer-extraction-service" # ============================================ # Deduplication # ============================================ -EXT_DEDUPLICATION_SERVICE = 'memorylayer-deduplication-service' +EXT_DEDUPLICATION_SERVICE = "memorylayer-deduplication-service" # ============================================ # Contradiction # ============================================ -EXT_CONTRADICTION_SERVICE = 'memorylayer-contradiction-service' +EXT_CONTRADICTION_SERVICE = "memorylayer-contradiction-service" # ============================================ # Decay # ============================================ -EXT_DECAY_SERVICE = 'memorylayer-decay-service' +EXT_DECAY_SERVICE = "memorylayer-decay-service" # ============================================ # Semantic Tiering # ============================================ -EXT_SEMANTIC_TIERING_SERVICE = 'memorylayer-tier-generation-service' +EXT_SEMANTIC_TIERING_SERVICE = "memorylayer-tier-generation-service" # ============================================ # Association # ============================================ -EXT_ASSOCIATION_SERVICE = 'memorylayer-association-service' +EXT_ASSOCIATION_SERVICE = "memorylayer-association-service" # ============================================ # Ontology # ============================================ -EXT_ONTOLOGY_SERVICE = 'memorylayer-ontology-service' +EXT_ONTOLOGY_SERVICE = "memorylayer-ontology-service" # ============================================ # Reflect # ============================================ -EXT_REFLECT_SERVICE = 'memorylayer-reflect-service' +EXT_REFLECT_SERVICE = "memorylayer-reflect-service" # ============================================ # Inference (entity insight derivation) # ============================================ -EXT_INFERENCE_SERVICE = 'memorylayer-inference-service' +EXT_INFERENCE_SERVICE = "memorylayer-inference-service" # ============================================ # Context Environment # ============================================ -EXT_CONTEXT_ENVIRONMENT_SERVICE = 'memorylayer-context-environment-service' +EXT_CONTEXT_ENVIRONMENT_SERVICE = "memorylayer-context-environment-service" # ============================================ # Tasks # ============================================ -EXT_TASK_SERVICE = 'memorylayer-task-service' -EXT_MULTI_TASK_HANDLERS = 'memorylayer-multi-task-handlers' +EXT_TASK_SERVICE = "memorylayer-task-service" +EXT_MULTI_TASK_HANDLERS = "memorylayer-multi-task-handlers" # ============================================ # Chat History # ============================================ -EXT_CHAT_SERVICE = 'memorylayer-chat-service' +EXT_CHAT_SERVICE = "memorylayer-chat-service" # ============================================ # Audit # ============================================ -EXT_AUDIT_SERVICE = 'memorylayer-audit-service' +EXT_AUDIT_SERVICE = "memorylayer-audit-service" # ============================================ # Rate Limiting # ============================================ -EXT_RATE_LIMIT_SERVICE = 'memorylayer-rate-limit-service' +EXT_RATE_LIMIT_SERVICE = "memorylayer-rate-limit-service" # ============================================ # Metrics / Observability # ============================================ -EXT_METRICS_SERVICE = 'memorylayer-metrics-service' \ No newline at end of file +EXT_METRICS_SERVICE = "memorylayer-metrics-service" diff --git a/memorylayer-core-python/src/memorylayer_server/services/_plugin_factory.py b/memorylayer-core-python/src/memorylayer_server/services/_plugin_factory.py index e9289c0..778a548 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/_plugin_factory.py +++ b/memorylayer-core-python/src/memorylayer_server/services/_plugin_factory.py @@ -1,15 +1,16 @@ """Factory for generating service plugin base classes with common boilerplate.""" + from scitrera_app_framework import Plugin, Variables from scitrera_app_framework.api import enabled_option_pattern def make_service_plugin_base( - *, - ext_name: str, - config_key: str, - default_value: str, - dependencies: tuple[str, ...] = (), - extra_defaults: dict | None = None, + *, + ext_name: str, + config_key: str, + default_value: str, + dependencies: tuple[str, ...] = (), + extra_defaults: dict | None = None, ) -> type[Plugin]: """Create a PluginBase class with standard service plugin boilerplate. @@ -46,7 +47,7 @@ def extension_point_name(self, v: Variables) -> str: return ext_name def is_enabled(self, v: Variables) -> bool: - return enabled_option_pattern(self, v, config_key, self_attr='PROVIDER_NAME') + return enabled_option_pattern(self, v, config_key, self_attr="PROVIDER_NAME") def on_registration(self, v: Variables) -> None: v.set_default_value(config_key, default_value) diff --git a/memorylayer-core-python/src/memorylayer_server/services/association/__init__.py b/memorylayer-core-python/src/memorylayer_server/services/association/__init__.py index 85cad1b..4ea6c13 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/association/__init__.py +++ b/memorylayer-core-python/src/memorylayer_server/services/association/__init__.py @@ -1,14 +1,15 @@ """Association service package.""" + +from scitrera_app_framework import Variables, get_extension + from .base import ( - AssociationServicePluginBase, + DEFAULT_MEMORYLAYER_ASSOCIATION_SIMILARITY_THRESHOLD, EXT_ASSOCIATION_SERVICE, MEMORYLAYER_ASSOCIATION_SIMILARITY_THRESHOLD, - DEFAULT_MEMORYLAYER_ASSOCIATION_SIMILARITY_THRESHOLD, + AssociationServicePluginBase, ) from .default import AssociationService -from scitrera_app_framework import Variables, get_extension - def get_association_service(v: Variables = None) -> AssociationService: """Get the association service instance.""" @@ -16,10 +17,10 @@ def get_association_service(v: Variables = None) -> AssociationService: __all__ = ( - 'AssociationService', - 'AssociationServicePluginBase', - 'get_association_service', - 'EXT_ASSOCIATION_SERVICE', - 'MEMORYLAYER_ASSOCIATION_SIMILARITY_THRESHOLD', - 'DEFAULT_MEMORYLAYER_ASSOCIATION_SIMILARITY_THRESHOLD', + "AssociationService", + "AssociationServicePluginBase", + "get_association_service", + "EXT_ASSOCIATION_SERVICE", + "MEMORYLAYER_ASSOCIATION_SIMILARITY_THRESHOLD", + "DEFAULT_MEMORYLAYER_ASSOCIATION_SIMILARITY_THRESHOLD", ) diff --git a/memorylayer-core-python/src/memorylayer_server/services/association/base.py b/memorylayer-core-python/src/memorylayer_server/services/association/base.py index c64b282..c831c38 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/association/base.py +++ b/memorylayer-core-python/src/memorylayer_server/services/association/base.py @@ -1,4 +1,4 @@ -from ...config import MEMORYLAYER_ASSOCIATION_SERVICE, DEFAULT_MEMORYLAYER_ASSOCIATION_SERVICE +from ...config import DEFAULT_MEMORYLAYER_ASSOCIATION_SERVICE, MEMORYLAYER_ASSOCIATION_SERVICE from .._constants import EXT_ASSOCIATION_SERVICE, EXT_ONTOLOGY_SERVICE, EXT_STORAGE_BACKEND from .._plugin_factory import make_service_plugin_base @@ -6,7 +6,7 @@ # Association Configuration # ============================================ # Threshold for auto-associating similar memories -MEMORYLAYER_ASSOCIATION_SIMILARITY_THRESHOLD = 'MEMORYLAYER_ASSOCIATION_SIMILARITY_THRESHOLD' +MEMORYLAYER_ASSOCIATION_SIMILARITY_THRESHOLD = "MEMORYLAYER_ASSOCIATION_SIMILARITY_THRESHOLD" DEFAULT_MEMORYLAYER_ASSOCIATION_SIMILARITY_THRESHOLD = 0.85 diff --git a/memorylayer-core-python/src/memorylayer_server/services/association/default.py b/memorylayer-core-python/src/memorylayer_server/services/association/default.py index f30ffda..299ad4d 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/association/default.py +++ b/memorylayer-core-python/src/memorylayer_server/services/association/default.py @@ -9,48 +9,48 @@ """ from logging import Logger -from typing import Optional from scitrera_app_framework import get_logger from scitrera_app_framework.api import Variables +from ...models import ( + KNOWN_RELATIONSHIP_TYPES, + AssociateInput, + Association, + GraphQueryInput, + GraphQueryResult, + RelationshipCategory, + get_relationship_category, +) +from ..ontology import EXT_ONTOLOGY_SERVICE, OntologyService +from ..storage import EXT_STORAGE_BACKEND, StorageBackend from .base import ( - AssociationServicePluginBase, + DEFAULT_MEMORYLAYER_ASSOCIATION_SIMILARITY_THRESHOLD, MEMORYLAYER_ASSOCIATION_SIMILARITY_THRESHOLD, - DEFAULT_MEMORYLAYER_ASSOCIATION_SIMILARITY_THRESHOLD + AssociationServicePluginBase, ) -from ..storage import StorageBackend, EXT_STORAGE_BACKEND -from ...models import AssociateInput, Association, GraphQueryInput, GraphQueryResult, RelationshipCategory, KNOWN_RELATIONSHIP_TYPES, get_relationship_category -from ...utils import generate_id -from ..ontology import OntologyService, EXT_ONTOLOGY_SERVICE class AssociationService: """Service for managing memory associations and graph operations.""" - def __init__(self, storage: StorageBackend, ontology_service: Optional[OntologyService] = None, v: Variables = None): + def __init__(self, storage: StorageBackend, ontology_service: OntologyService | None = None, v: Variables = None): self.storage = storage self.ontology_service = ontology_service self.logger = get_logger(v, name=self.__class__.__name__) self.auto_association_threshold = v.get( - MEMORYLAYER_ASSOCIATION_SIMILARITY_THRESHOLD, - DEFAULT_MEMORYLAYER_ASSOCIATION_SIMILARITY_THRESHOLD + MEMORYLAYER_ASSOCIATION_SIMILARITY_THRESHOLD, DEFAULT_MEMORYLAYER_ASSOCIATION_SIMILARITY_THRESHOLD ) self.logger.info("Initialized AssociationService with auto_association_threshold=%.2f", self.auto_association_threshold) async def associate( - self, - workspace_id: str, - input: AssociateInput, + self, + workspace_id: str, + input: AssociateInput, ) -> Association: """Create a relationship between two memories.""" - self.logger.info( - "Creating association: %s -[%s]-> %s", - input.source_id, - input.relationship, - input.target_id - ) + self.logger.info("Creating association: %s -[%s]-> %s", input.source_id, input.relationship, input.target_id) # Validate that both memories exist source = await self.storage.get_memory(workspace_id, input.source_id, track_access=False) @@ -80,37 +80,30 @@ async def associate( return association async def get_related( - self, - workspace_id: str, - memory_id: str, - relationships: Optional[list[str]] = None, - direction: str = "both", + self, + workspace_id: str, + memory_id: str, + relationships: list[str] | None = None, + direction: str = "both", ) -> list[Association]: """Get all associations for a memory.""" - self.logger.debug( - "Getting related memories for: %s, direction: %s", - memory_id, - direction - ) + self.logger.debug("Getting related memories for: %s, direction: %s", memory_id, direction) # Validate direction if direction not in ["outgoing", "incoming", "both"]: raise ValueError(f"Invalid direction: {direction}") associations = await self.storage.get_associations( - workspace_id=workspace_id, - memory_id=memory_id, - direction=direction, - relationships=relationships + workspace_id=workspace_id, memory_id=memory_id, direction=direction, relationships=relationships ) self.logger.debug("Found %s associations for memory: %s", len(associations), memory_id) return associations async def traverse( - self, - workspace_id: str, - input: GraphQueryInput, + self, + workspace_id: str, + input: GraphQueryInput, ) -> GraphQueryResult: """ Multi-hop graph traversal. @@ -118,12 +111,7 @@ async def traverse( Example: Find what caused a problem: [Problem] <--CAUSED_BY-- [Error] <--TRIGGERED_BY-- [Change] """ - self.logger.info( - "Traversing graph from: %s, max_depth: %s, direction: %s", - input.start_memory_id, - input.max_depth, - input.direction - ) + self.logger.info("Traversing graph from: %s, max_depth: %s, direction: %s", input.start_memory_id, input.max_depth, input.direction) # Validate direction if input.direction not in ["outgoing", "incoming", "both"]: @@ -135,21 +123,17 @@ async def traverse( start_id=input.start_memory_id, max_depth=input.max_depth, relationships=input.relationship_types or None, - direction=input.direction + direction=input.direction, ) - self.logger.info( - "Graph traversal found %s paths, %s unique nodes", - result.total_paths, - len(result.unique_nodes) - ) + self.logger.info("Graph traversal found %s paths, %s unique nodes", result.total_paths, len(result.unique_nodes)) return result async def find_contradictions( - self, - workspace_id: str, - memory_id: Optional[str] = None, + self, + workspace_id: str, + memory_id: str | None = None, ) -> list[tuple[str, str]]: """ Find memories that contradict each other. @@ -163,10 +147,7 @@ async def find_contradictions( if memory_id: # Find contradictions for specific memory associations = await self.storage.get_associations( - workspace_id=workspace_id, - memory_id=memory_id, - direction="both", - relationships=["contradicts"] + workspace_id=workspace_id, memory_id=memory_id, direction="both", relationships=["contradicts"] ) for assoc in associations: @@ -195,12 +176,12 @@ async def find_contradictions( return contradictions async def auto_associate( - self, - workspace_id: str, - new_memory_id: str, - similar_memories: list[tuple[str, float]], - threshold: float = None, - new_memory_content: Optional[str] = None, + self, + workspace_id: str, + new_memory_id: str, + similar_memories: list[tuple[str, float]], + threshold: float = None, + new_memory_content: str | None = None, ) -> list[Association]: """ Automatically create associations for highly similar memories. @@ -221,16 +202,13 @@ async def auto_associate( threshold = self.auto_association_threshold self.logger.debug( - "Auto-associating memory: %s with %s similar memories (threshold=%.2f)", - new_memory_id, - len(similar_memories), - threshold + "Auto-associating memory: %s with %s similar memories (threshold=%.2f)", new_memory_id, len(similar_memories), threshold ) # Determine whether LLM classification is available use_llm = ( self.ontology_service is not None - and getattr(self.ontology_service, 'llm_service', None) is not None + and getattr(self.ontology_service, "llm_service", None) is not None and new_memory_content is not None ) @@ -259,7 +237,9 @@ async def auto_associate( except Exception as e: self.logger.debug( "LLM classification failed for %s <-> %s, using similar_to: %s", - new_memory_id, similar_id, e, + new_memory_id, + similar_id, + e, ) relationship = "similar_to" @@ -269,36 +249,27 @@ async def auto_associate( target_id=similar_id, relationship=relationship, strength=similarity_score, - metadata={"auto_generated": True, "similarity_score": similarity_score} + metadata={"auto_generated": True, "similarity_score": similarity_score}, ) association = await self.storage.create_association(workspace_id, assoc_input) associations.append(association) self.logger.debug( - "Auto-associated %s -[%s]-> %s (similarity: %.2f)", - new_memory_id, - relationship, - similar_id, - similarity_score + "Auto-associated %s -[%s]-> %s (similarity: %.2f)", new_memory_id, relationship, similar_id, similarity_score ) except Exception as e: - self.logger.warning( - "Failed to auto-associate %s with %s: %s", - new_memory_id, - similar_id, - e - ) + self.logger.warning("Failed to auto-associate %s with %s: %s", new_memory_id, similar_id, e) self.logger.info("Created %s auto-associations for memory: %s", len(associations), new_memory_id) return associations async def get_causal_chain( - self, - workspace_id: str, - effect_memory_id: str, - max_depth: int = 5, + self, + workspace_id: str, + effect_memory_id: str, + max_depth: int = 5, ) -> GraphQueryResult: """ Find causal chain leading to a specific memory. @@ -311,10 +282,7 @@ async def get_causal_chain( if self.ontology_service: causal_relationships = self.ontology_service.get_relationships_by_category("causal") else: - causal_relationships = [ - rel for rel in KNOWN_RELATIONSHIP_TYPES - if get_relationship_category(rel) == "causal" - ] + causal_relationships = [rel for rel in KNOWN_RELATIONSHIP_TYPES if get_relationship_category(rel) == "causal"] # Traverse incoming edges (what caused this) query = GraphQueryInput( @@ -323,7 +291,7 @@ async def get_causal_chain( max_depth=max_depth, direction="incoming", max_paths=50, - max_nodes=100 + max_nodes=100, ) result = await self.traverse(workspace_id, query) @@ -332,9 +300,9 @@ async def get_causal_chain( return result async def get_solutions_for_problem( - self, - workspace_id: str, - problem_memory_id: str, + self, + workspace_id: str, + problem_memory_id: str, ) -> list[str]: """ Find all memories that solve or address a specific problem. @@ -347,16 +315,13 @@ async def get_solutions_for_problem( if self.ontology_service: solution_relationships = self.ontology_service.get_relationships_by_category("solution") else: - solution_relationships = [ - rel for rel in KNOWN_RELATIONSHIP_TYPES - if get_relationship_category(rel) == "solution" - ] + solution_relationships = [rel for rel in KNOWN_RELATIONSHIP_TYPES if get_relationship_category(rel) == "solution"] associations = await self.storage.get_associations( workspace_id=workspace_id, memory_id=problem_memory_id, direction="incoming", # Things that solve this problem - relationships=solution_relationships + relationships=solution_relationships, ) solution_ids = [assoc.source_id for assoc in associations] @@ -365,35 +330,27 @@ async def get_solutions_for_problem( return solution_ids async def get_related_by_category( - self, - workspace_id: str, - memory_id: str, - category: RelationshipCategory, - max_depth: int = 2, + self, + workspace_id: str, + memory_id: str, + category: RelationshipCategory, + max_depth: int = 2, ) -> GraphQueryResult: """ Find memories related by a specific relationship category. Example: Get all causal relationships (CAUSES, TRIGGERS, LEADS_TO, PREVENTS) """ - self.logger.info( - "Finding memories related to %s by category: %s", - memory_id, - category - ) + self.logger.info("Finding memories related to %s by category: %s", memory_id, category) # Get all relationship types in this category using ontology service if self.ontology_service: - relationship_types = self.ontology_service.get_relationships_by_category( - category.value - ) + relationship_types = self.ontology_service.get_relationships_by_category(category.value) else: # Fallback: use KNOWN_RELATIONSHIP_TYPES + get_relationship_category helper from ...models.association import get_relationship_category - relationship_types = [ - rel for rel in KNOWN_RELATIONSHIP_TYPES - if get_relationship_category(rel) == category.value - ] + + relationship_types = [rel for rel in KNOWN_RELATIONSHIP_TYPES if get_relationship_category(rel) == category.value] query = GraphQueryInput( start_memory_id=memory_id, @@ -401,27 +358,24 @@ async def get_related_by_category( max_depth=max_depth, direction="both", max_paths=100, - max_nodes=200 + max_nodes=200, ) result = await self.traverse(workspace_id, query) - self.logger.info( - "Found %s paths in category %s", - len(result.paths), - category - ) + self.logger.info("Found %s paths in category %s", len(result.paths), category) return result class DefaultAssociationServicePlugin(AssociationServicePluginBase): """Default association service plugin.""" - PROVIDER_NAME = 'default' + + PROVIDER_NAME = "default" def initialize(self, v: Variables, logger: Logger) -> AssociationService: storage_backend: StorageBackend = self.get_extension(EXT_STORAGE_BACKEND, v) - ontology_service: Optional[OntologyService] = None + ontology_service: OntologyService | None = None try: ontology_service = self.get_extension(EXT_ONTOLOGY_SERVICE, v) except Exception: diff --git a/memorylayer-core-python/src/memorylayer_server/services/audit/__init__.py b/memorylayer-core-python/src/memorylayer_server/services/audit/__init__.py index 15fab5e..6c0deba 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/audit/__init__.py +++ b/memorylayer-core-python/src/memorylayer_server/services/audit/__init__.py @@ -1,5 +1,6 @@ """Audit Service package.""" -from .base import AuditEvent, AuditService, AuditServicePluginBase, EXT_AUDIT_SERVICE + +from .base import EXT_AUDIT_SERVICE, AuditEvent, AuditService, AuditServicePluginBase __all__ = [ "AuditEvent", diff --git a/memorylayer-core-python/src/memorylayer_server/services/audit/base.py b/memorylayer-core-python/src/memorylayer_server/services/audit/base.py index 44e98c5..517815a 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/audit/base.py +++ b/memorylayer-core-python/src/memorylayer_server/services/audit/base.py @@ -1,12 +1,11 @@ """Audit Service - Pluggable audit logging interface.""" + from abc import ABC, abstractmethod from dataclasses import dataclass, field -from datetime import datetime, timezone -from typing import Optional +from datetime import UTC, datetime from uuid import uuid4 -from ...config import MEMORYLAYER_AUDIT_SERVICE, DEFAULT_MEMORYLAYER_AUDIT_SERVICE - +from ...config import DEFAULT_MEMORYLAYER_AUDIT_SERVICE, MEMORYLAYER_AUDIT_SERVICE from .._constants import EXT_AUDIT_SERVICE from .._plugin_factory import make_service_plugin_base @@ -32,22 +31,22 @@ class AuditEvent: tenant_id: str """Tenant this event belongs to.""" - workspace_id: Optional[str] = None + workspace_id: str | None = None """Workspace scope, if applicable.""" - user_id: Optional[str] = None + user_id: str | None = None """Acting user or principal, if known.""" - resource_type: Optional[str] = None + resource_type: str | None = None """Type of resource acted upon: 'memory', 'session', 'workspace', etc.""" - resource_id: Optional[str] = None + resource_id: str | None = None """Identifier of the specific resource.""" metadata: dict = field(default_factory=dict) """Additional context (IP address, user-agent, request ID, etc.).""" - timestamp: datetime = field(default_factory=lambda: datetime.now(timezone.utc)) + timestamp: datetime = field(default_factory=lambda: datetime.now(UTC)) """UTC timestamp when the event occurred.""" id: str = field(default_factory=lambda: uuid4().hex) @@ -83,9 +82,9 @@ async def record_batch(self, events: list[AuditEvent]) -> None: async def query( self, tenant_id: str, - workspace_id: Optional[str] = None, - event_type: Optional[str] = None, - since: Optional[datetime] = None, + workspace_id: str | None = None, + event_type: str | None = None, + since: datetime | None = None, limit: int = 100, ) -> list[AuditEvent]: """Query audit events. diff --git a/memorylayer-core-python/src/memorylayer_server/services/audit/noop.py b/memorylayer-core-python/src/memorylayer_server/services/audit/noop.py index 855bfbb..19d1ec8 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/audit/noop.py +++ b/memorylayer-core-python/src/memorylayer_server/services/audit/noop.py @@ -1,7 +1,7 @@ """No-op audit service - silently discards all events (OSS default).""" + from datetime import datetime from logging import Logger -from typing import Optional from scitrera_app_framework.api import Variables @@ -20,9 +20,9 @@ async def record_batch(self, events: list[AuditEvent]) -> None: async def query( self, tenant_id: str, - workspace_id: Optional[str] = None, - event_type: Optional[str] = None, - since: Optional[datetime] = None, + workspace_id: str | None = None, + event_type: str | None = None, + since: datetime | None = None, limit: int = 100, ) -> list[AuditEvent]: return [] @@ -30,7 +30,8 @@ async def query( class NoopAuditServicePlugin(AuditServicePluginBase): """Plugin for no-op audit service.""" - PROVIDER_NAME = 'noop' - def initialize(self, v: Variables, logger: Logger) -> Optional[NoopAuditService]: + PROVIDER_NAME = "noop" + + def initialize(self, v: Variables, logger: Logger) -> NoopAuditService | None: return NoopAuditService() diff --git a/memorylayer-core-python/src/memorylayer_server/services/authentication/__init__.py b/memorylayer-core-python/src/memorylayer_server/services/authentication/__init__.py index 954aaa5..726d1a2 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/authentication/__init__.py +++ b/memorylayer-core-python/src/memorylayer_server/services/authentication/__init__.py @@ -3,13 +3,14 @@ Provides identity verification and request context resolution. """ + from .base import ( - AuthenticationService, - AuthenticationServicePluginBase, - AuthenticationError, EXT_AUTHENTICATION_SERVICE, HEADER_AUTHORIZATION, HEADER_SESSION_ID, + AuthenticationError, + AuthenticationService, + AuthenticationServicePluginBase, ) from .default import ( OpenAuthenticationService, diff --git a/memorylayer-core-python/src/memorylayer_server/services/authentication/base.py b/memorylayer-core-python/src/memorylayer_server/services/authentication/base.py index bbcb1f3..8ee31ab 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/authentication/base.py +++ b/memorylayer-core-python/src/memorylayer_server/services/authentication/base.py @@ -13,17 +13,19 @@ - RBAC integration - Gateway-injected identity headers (e.g. Aether auth-proxy) """ + import logging from abc import ABC, abstractmethod -from typing import Optional, TYPE_CHECKING +from typing import TYPE_CHECKING, Optional from fastapi import Request from pydantic import BaseModel -from ...models.auth import AuthIdentity, RequestContext + from ...config import ( - MEMORYLAYER_AUTHENTICATION_SERVICE, DEFAULT_MEMORYLAYER_AUTHENTICATION_SERVICE, + MEMORYLAYER_AUTHENTICATION_SERVICE, ) +from ...models.auth import AuthIdentity, RequestContext if TYPE_CHECKING: from ...models.session import Session @@ -55,11 +57,11 @@ class AuthenticationService(ABC): - Building RequestContext with resolved workspace """ - def __init__(self, logger: Optional[logging.Logger] = None): + def __init__(self, logger: logging.Logger | None = None): self.logger = logger or logging.getLogger(__name__) @abstractmethod - async def verify_api_key(self, api_key: Optional[str]) -> AuthIdentity: + async def verify_api_key(self, api_key: str | None) -> AuthIdentity: """ Verify API key and return identity. @@ -75,7 +77,7 @@ async def verify_api_key(self, api_key: Optional[str]) -> AuthIdentity: pass @abstractmethod - async def resolve_session(self, session_id: Optional[str]) -> Optional["Session"]: + async def resolve_session(self, session_id: str | None) -> Optional["Session"]: """ Resolve session from session ID. @@ -90,7 +92,7 @@ async def resolve_session(self, session_id: Optional[str]) -> Optional["Session" @abstractmethod async def resolve_workspace( self, - request_workspace_id: Optional[str], + request_workspace_id: str | None, session: Optional["Session"], tenant_id: str, ) -> str: @@ -143,7 +145,7 @@ async def ensure_session( async def build_context( self, request: Request, - body: Optional[BaseModel] = None, + body: BaseModel | None = None, ) -> RequestContext: """ Build full RequestContext from request headers and body. @@ -185,9 +187,7 @@ async def build_context( # Implicit session creation: if session_id was provided but session # not found, and client explicitly provided a workspace, auto-create if session_id and session is None and request_workspace_id: - session = await self.ensure_session( - session_id, workspace_id, identity.tenant_id - ) + session = await self.ensure_session(session_id, workspace_id, identity.tenant_id) self.logger.debug( "Built context: tenant=%s, workspace=%s, session=%s", @@ -203,7 +203,7 @@ async def build_context( session=session, ) - def _extract_api_key(self, request: Request) -> Optional[str]: + def _extract_api_key(self, request: Request) -> str | None: """Extract API key from Authorization header.""" auth_header = request.headers.get(HEADER_AUTHORIZATION) if not auth_header: diff --git a/memorylayer-core-python/src/memorylayer_server/services/authentication/default.py b/memorylayer-core-python/src/memorylayer_server/services/authentication/default.py index 3717e09..1bdd100 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/authentication/default.py +++ b/memorylayer-core-python/src/memorylayer_server/services/authentication/default.py @@ -6,26 +6,27 @@ - Session resolution via session service - Workspace auto-creation on first access """ + import logging -from typing import Optional, Iterable +from collections.abc import Iterable -from scitrera_app_framework import Variables, get_extension, ext_parse_bool +from scitrera_app_framework import Variables, ext_parse_bool, get_extension -from .base import ( - AuthenticationService, - AuthenticationServicePluginBase, - EXT_AUTHENTICATION_SERVICE, -) -from ...models.auth import AuthIdentity -from ...models.session import Session from ...config import ( + DEFAULT_MEMORYLAYER_SESSION_IMPLICIT_CREATE, DEFAULT_TENANT_ID, DEFAULT_WORKSPACE_ID, MEMORYLAYER_SESSION_IMPLICIT_CREATE, - DEFAULT_MEMORYLAYER_SESSION_IMPLICIT_CREATE, ) -from ...services.session import SessionService, EXT_SESSION_SERVICE -from ...services.workspace import WorkspaceService, EXT_WORKSPACE_SERVICE +from ...models.auth import AuthIdentity +from ...models.session import Session +from ...services.session import EXT_SESSION_SERVICE, SessionService +from ...services.workspace import EXT_WORKSPACE_SERVICE, WorkspaceService +from .base import ( + EXT_AUTHENTICATION_SERVICE, + AuthenticationService, + AuthenticationServicePluginBase, +) class OpenAuthenticationService(AuthenticationService): @@ -38,18 +39,18 @@ class OpenAuthenticationService(AuthenticationService): """ def __init__( - self, - session_service: SessionService, - workspace_service: WorkspaceService, - implicit_session_create: bool = True, - logger: Optional[logging.Logger] = None, + self, + session_service: SessionService, + workspace_service: WorkspaceService, + implicit_session_create: bool = True, + logger: logging.Logger | None = None, ): super().__init__(logger) self.session_service = session_service self.workspace_service = workspace_service self._implicit_session_create = implicit_session_create - async def verify_api_key(self, api_key: Optional[str]) -> AuthIdentity: + async def verify_api_key(self, api_key: str | None) -> AuthIdentity: """ Verify API key - always succeeds in OSS. @@ -63,7 +64,7 @@ async def verify_api_key(self, api_key: Optional[str]) -> AuthIdentity: api_key_id=None, ) - async def resolve_session(self, session_id: Optional[str]) -> Optional[Session]: + async def resolve_session(self, session_id: str | None) -> Session | None: """ Resolve session from session service. @@ -80,10 +81,10 @@ async def resolve_session(self, session_id: Optional[str]) -> Optional[Session]: return None async def resolve_workspace( - self, - request_workspace_id: Optional[str], - session: Optional[Session], - tenant_id: str, + self, + request_workspace_id: str | None, + session: Session | None, + tenant_id: str, ) -> str: """ Resolve workspace with priority order and auto-creation. @@ -94,11 +95,7 @@ async def resolve_workspace( 3. DEFAULT_WORKSPACE_ID ("_default") """ # Priority resolution - workspace_id = ( - request_workspace_id - or (session.workspace_id if session else None) - or DEFAULT_WORKSPACE_ID - ) + workspace_id = request_workspace_id or (session.workspace_id if session else None) or DEFAULT_WORKSPACE_ID # Auto-create workspace if needed (OSS "just works" pattern) await self.workspace_service.ensure_workspace( @@ -110,11 +107,11 @@ async def resolve_workspace( return workspace_id async def ensure_session( - self, - session_id: str, - workspace_id: str, - tenant_id: str, - ) -> Optional[Session]: + self, + session_id: str, + workspace_id: str, + tenant_id: str, + ) -> Session | None: """ Auto-create session for unknown session_id when workspace is explicit. @@ -138,20 +135,23 @@ async def ensure_session( created = await self.session_service.create_session(workspace_id, session) self.logger.info( "Implicitly created session %s in workspace %s", - session_id, workspace_id, + session_id, + workspace_id, ) return created except Exception as e: self.logger.warning( "Failed to implicitly create session %s: %s", - session_id, e, + session_id, + e, ) return None class OpenAuthenticationServicePlugin(AuthenticationServicePluginBase): """Plugin to register the OSS authentication service.""" - PROVIDER_NAME = 'default' + + PROVIDER_NAME = "default" def initialize(self, v: Variables, logger: logging.Logger) -> OpenAuthenticationService: session_service = self.get_extension(EXT_SESSION_SERVICE, v=v) @@ -172,7 +172,10 @@ def initialize(self, v: Variables, logger: logging.Logger) -> OpenAuthentication # noinspection PyMethodMayBeStatic def get_dependencies(self, v: Variables) -> Iterable[str]: - return EXT_SESSION_SERVICE, EXT_WORKSPACE_SERVICE, + return ( + EXT_SESSION_SERVICE, + EXT_WORKSPACE_SERVICE, + ) def get_authentication_service(v: Variables) -> AuthenticationService: diff --git a/memorylayer-core-python/src/memorylayer_server/services/authorization/__init__.py b/memorylayer-core-python/src/memorylayer_server/services/authorization/__init__.py index dc1f29d..f77d60c 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/authorization/__init__.py +++ b/memorylayer-core-python/src/memorylayer_server/services/authorization/__init__.py @@ -1,12 +1,13 @@ """Authorization service package.""" + +from scitrera_app_framework import Variables, get_extension + from .base import ( + EXT_AUTHORIZATION_SERVICE, AuthorizationService, AuthorizationServicePluginBase, - EXT_AUTHORIZATION_SERVICE, ) -from scitrera_app_framework import Variables, get_extension - def get_authorization_service(v: Variables = None) -> AuthorizationService: """Get the authorization service instance.""" @@ -14,8 +15,8 @@ def get_authorization_service(v: Variables = None) -> AuthorizationService: __all__ = ( - 'AuthorizationService', - 'AuthorizationServicePluginBase', - 'get_authorization_service', - 'EXT_AUTHORIZATION_SERVICE', + "AuthorizationService", + "AuthorizationServicePluginBase", + "get_authorization_service", + "EXT_AUTHORIZATION_SERVICE", ) diff --git a/memorylayer-core-python/src/memorylayer_server/services/authorization/base.py b/memorylayer-core-python/src/memorylayer_server/services/authorization/base.py index 9c732f8..1bdfc83 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/authorization/base.py +++ b/memorylayer-core-python/src/memorylayer_server/services/authorization/base.py @@ -1,10 +1,12 @@ """Authorization Service - Pluggable permission checking interface.""" + from abc import ABC, abstractmethod -from typing import Optional, TYPE_CHECKING +from typing import TYPE_CHECKING from fastapi import HTTPException, status -from ...config import MEMORYLAYER_AUTHORIZATION_SERVICE, DEFAULT_MEMORYLAYER_AUTHORIZATION_SERVICE -from ...models.authz import AuthorizationDecision, AuthorizationContext + +from ...config import DEFAULT_MEMORYLAYER_AUTHORIZATION_SERVICE, MEMORYLAYER_AUTHORIZATION_SERVICE +from ...models.authz import AuthorizationContext, AuthorizationDecision if TYPE_CHECKING: from ...models.auth import RequestContext @@ -24,11 +26,11 @@ class AuthorizationService(ABC): async def require_authorization( self, - ctx: 'RequestContext', + ctx: "RequestContext", resource: str, action: str, - resource_id: Optional[str] = None, - workspace_id: Optional[str] = None, + resource_id: str | None = None, + workspace_id: str | None = None, ) -> None: """Check authorization and raise HTTPException(403) if denied. @@ -57,15 +59,12 @@ async def require_authorization( resource=resource, action=action, resource_id=resource_id, - metadata=getattr(ctx, 'metadata', None) or {}, + metadata=getattr(ctx, "metadata", None) or {}, ) decision = await self.authorize(authz_ctx) if decision == AuthorizationDecision.DENY: - raise HTTPException( - status_code=status.HTTP_403_FORBIDDEN, - detail=f"Access denied to {resource}" - ) + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=f"Access denied to {resource}") @abstractmethod async def authorize(self, context: AuthorizationContext) -> AuthorizationDecision: @@ -80,11 +79,7 @@ async def authorize(self, context: AuthorizationContext) -> AuthorizationDecisio pass @abstractmethod - async def get_allowed_workspaces( - self, - tenant_id: str, - user_id: str - ) -> list[str]: + async def get_allowed_workspaces(self, tenant_id: str, user_id: str) -> list[str]: """Get list of workspace IDs user can access. Args: @@ -97,12 +92,7 @@ async def get_allowed_workspaces( pass @abstractmethod - async def get_user_role( - self, - tenant_id: str, - workspace_id: str, - user_id: str - ) -> Optional[str]: + async def get_user_role(self, tenant_id: str, workspace_id: str, user_id: str) -> str | None: """Get user's role in a workspace. Args: diff --git a/memorylayer-core-python/src/memorylayer_server/services/authorization/default.py b/memorylayer-core-python/src/memorylayer_server/services/authorization/default.py index 8137bcf..ab55fb2 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/authorization/default.py +++ b/memorylayer-core-python/src/memorylayer_server/services/authorization/default.py @@ -1,15 +1,12 @@ """Open permissions authorization - allows everything (OSS default).""" + from logging import Logger -from typing import Optional from scitrera_app_framework import get_logger from scitrera_app_framework.api import Variables -from .base import ( - AuthorizationService, - AuthorizationServicePluginBase -) -from ...models.authz import AuthorizationDecision, AuthorizationContext +from ...models.authz import AuthorizationContext, AuthorizationDecision +from .base import AuthorizationService, AuthorizationServicePluginBase class OpenPermissionsAuthorizationService(AuthorizationService): @@ -26,32 +23,23 @@ def __init__(self, v: Variables = None): async def authorize(self, context: AuthorizationContext) -> AuthorizationDecision: """Always allow - OSS default.""" self.logger.debug( - "Authorization check (allow-all): resource=%s action=%s workspace=%s", - context.resource, context.action, context.workspace_id + "Authorization check (allow-all): resource=%s action=%s workspace=%s", context.resource, context.action, context.workspace_id ) return AuthorizationDecision.ALLOW - async def get_allowed_workspaces( - self, - tenant_id: str, - user_id: str - ) -> list[str]: + async def get_allowed_workspaces(self, tenant_id: str, user_id: str) -> list[str]: """Return wildcard - all workspaces allowed.""" return ["*"] - async def get_user_role( - self, - tenant_id: str, - workspace_id: str, - user_id: str - ) -> Optional[str]: + async def get_user_role(self, tenant_id: str, workspace_id: str, user_id: str) -> str | None: """Return admin role - full access in OSS mode.""" return "admin" class OpenPermissionsAuthorizationPlugin(AuthorizationServicePluginBase): """Plugin for open permissions authorization.""" - PROVIDER_NAME = 'default' + + PROVIDER_NAME = "default" def initialize(self, v: Variables, logger: Logger) -> AuthorizationService: return OpenPermissionsAuthorizationService(v=v) diff --git a/memorylayer-core-python/src/memorylayer_server/services/cache/__init__.py b/memorylayer-core-python/src/memorylayer_server/services/cache/__init__.py index df25350..0faa62b 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/cache/__init__.py +++ b/memorylayer-core-python/src/memorylayer_server/services/cache/__init__.py @@ -1,12 +1,13 @@ """Cache service package.""" + +from scitrera_app_framework import Variables, get_extension + from .base import ( + EXT_CACHE_SERVICE, CacheService, CacheServicePluginBase, - EXT_CACHE_SERVICE, ) -from scitrera_app_framework import Variables, get_extension - def get_cache_service(v: Variables = None) -> CacheService: """Get the cache service instance.""" @@ -14,8 +15,8 @@ def get_cache_service(v: Variables = None) -> CacheService: __all__ = ( - 'CacheService', - 'CacheServicePluginBase', - 'get_cache_service', - 'EXT_CACHE_SERVICE', + "CacheService", + "CacheServicePluginBase", + "get_cache_service", + "EXT_CACHE_SERVICE", ) diff --git a/memorylayer-core-python/src/memorylayer_server/services/cache/base.py b/memorylayer-core-python/src/memorylayer_server/services/cache/base.py index c72ffbd..ed942e6 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/cache/base.py +++ b/memorylayer-core-python/src/memorylayer_server/services/cache/base.py @@ -1,9 +1,9 @@ """Cache Service - Pluggable caching interface.""" -from abc import ABC, abstractmethod -from typing import Optional, Any -from ...config import MEMORYLAYER_CACHE_SERVICE, DEFAULT_MEMORYLAYER_CACHE_SERVICE +from abc import ABC, abstractmethod +from typing import Any +from ...config import DEFAULT_MEMORYLAYER_CACHE_SERVICE, MEMORYLAYER_CACHE_SERVICE from .._constants import EXT_CACHE_SERVICE from .._plugin_factory import make_service_plugin_base @@ -16,7 +16,7 @@ class CacheService(ABC): """ @abstractmethod - async def get(self, key: str) -> Optional[Any]: + async def get(self, key: str) -> Any | None: """Get value from cache. Args: @@ -28,12 +28,7 @@ async def get(self, key: str) -> Optional[Any]: pass @abstractmethod - async def set( - self, - key: str, - value: Any, - ttl_seconds: Optional[int] = None - ) -> bool: + async def set(self, key: str, value: Any, ttl_seconds: int | None = None) -> bool: """Set value in cache. Args: @@ -83,10 +78,10 @@ async def clear_prefix(self, prefix: str) -> int: pass async def get_or_set( - self, - key: str, - factory, - ttl_seconds: Optional[int] = None, + self, + key: str, + factory, + ttl_seconds: int | None = None, ) -> Any: """Get from cache or compute and cache. diff --git a/memorylayer-core-python/src/memorylayer_server/services/cache/lru.py b/memorylayer-core-python/src/memorylayer_server/services/cache/lru.py index b6b5c03..1199ea4 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/cache/lru.py +++ b/memorylayer-core-python/src/memorylayer_server/services/cache/lru.py @@ -1,14 +1,15 @@ """In-memory LRU cache service.""" + import time from logging import Logger -from typing import Optional, Any +from typing import Any from scitrera_app_framework import Variables, get_logger from .base import CacheService, CacheServicePluginBase # Environment variable constants (specific to this implementation) -MEMORYLAYER_CACHE_LRU_MAXSIZE = 'MEMORYLAYER_CACHE_LRU_MAXSIZE' +MEMORYLAYER_CACHE_LRU_MAXSIZE = "MEMORYLAYER_CACHE_LRU_MAXSIZE" DEFAULT_MEMORYLAYER_CACHE_LRU_MAXSIZE = 4096 @@ -20,12 +21,13 @@ class LRUCacheService(CacheService): """ def __init__( - self, - v: Variables = None, - logger: Logger = None, - maxsize: int = DEFAULT_MEMORYLAYER_CACHE_LRU_MAXSIZE, + self, + v: Variables = None, + logger: Logger = None, + maxsize: int = DEFAULT_MEMORYLAYER_CACHE_LRU_MAXSIZE, ): from cachetools import LRUCache + self._v = v self._logger = logger or get_logger(v, name=self.__class__.__name__) self._cache: LRUCache = LRUCache(maxsize=maxsize) @@ -43,7 +45,7 @@ def _is_expired(self, key: str) -> bool: age = time.monotonic() - timestamp return age > ttl_seconds - async def get(self, key: str) -> Optional[Any]: + async def get(self, key: str) -> Any | None: """Get value from cache, checking TTL expiration.""" if key not in self._cache: return None @@ -53,12 +55,7 @@ async def get(self, key: str) -> Optional[Any]: return None return self._cache.get(key) - async def set( - self, - key: str, - value: Any, - ttl_seconds: Optional[int] = None - ) -> bool: + async def set(self, key: str, value: Any, ttl_seconds: int | None = None) -> bool: """Set value in cache with optional TTL.""" self._cache[key] = value self._timestamps[key] = (time.monotonic(), ttl_seconds) @@ -94,10 +91,10 @@ async def clear_prefix(self, prefix: str) -> int: return len(keys_to_delete) async def get_or_set( - self, - key: str, - factory, - ttl_seconds: Optional[int] = None, + self, + key: str, + factory, + ttl_seconds: int | None = None, ) -> Any: """Get from cache or compute and cache.""" value = await self.get(key) @@ -112,9 +109,10 @@ async def get_or_set( class LRUCacheServicePlugin(CacheServicePluginBase): """Plugin for LRU cache service.""" - PROVIDER_NAME = 'lru' - def initialize(self, v: Variables, logger: Logger) -> Optional[LRUCacheService]: + PROVIDER_NAME = "lru" + + def initialize(self, v: Variables, logger: Logger) -> LRUCacheService | None: maxsize = v.environ( MEMORYLAYER_CACHE_LRU_MAXSIZE, default=DEFAULT_MEMORYLAYER_CACHE_LRU_MAXSIZE, diff --git a/memorylayer-core-python/src/memorylayer_server/services/cache/noop.py b/memorylayer-core-python/src/memorylayer_server/services/cache/noop.py index 0130a6d..f88275c 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/cache/noop.py +++ b/memorylayer-core-python/src/memorylayer_server/services/cache/noop.py @@ -1,6 +1,7 @@ """No-op cache service - always returns None (OSS default).""" + from logging import Logger -from typing import Optional, Any +from typing import Any from scitrera_app_framework.api import Variables @@ -10,10 +11,10 @@ class NoOpCacheService(CacheService): """No-op cache service.""" - async def get(self, key: str) -> Optional[Any]: + async def get(self, key: str) -> Any | None: return None - async def set(self, key: str, value: Any, ttl_seconds: Optional[int] = None): + async def set(self, key: str, value: Any, ttl_seconds: int | None = None): return False async def delete(self, key: str) -> bool: @@ -25,14 +26,15 @@ async def exists(self, key: str) -> bool: async def clear_prefix(self, prefix: str) -> int: return 0 - async def get_or_set(self, key: str, factory, ttl_seconds: Optional[int] = None) -> Any: + async def get_or_set(self, key: str, factory, ttl_seconds: int | None = None) -> Any: value = await factory() return value class NoOpCacheServicePlugin(CacheServicePluginBase): """Plugin for no cache service.""" - PROVIDER_NAME = 'noop' - def initialize(self, v: Variables, logger: Logger) -> Optional[CacheService]: + PROVIDER_NAME = "noop" + + def initialize(self, v: Variables, logger: Logger) -> CacheService | None: return NoOpCacheService() diff --git a/memorylayer-core-python/src/memorylayer_server/services/chat/__init__.py b/memorylayer-core-python/src/memorylayer_server/services/chat/__init__.py index a801697..149b024 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/chat/__init__.py +++ b/memorylayer-core-python/src/memorylayer_server/services/chat/__init__.py @@ -1,4 +1,5 @@ """Chat history service — thread and message management with memory decomposition.""" -from .base import ChatService, ChatServicePluginBase, EXT_CHAT_SERVICE + +from .base import EXT_CHAT_SERVICE, ChatService, ChatServicePluginBase __all__ = ["ChatService", "ChatServicePluginBase", "EXT_CHAT_SERVICE"] diff --git a/memorylayer-core-python/src/memorylayer_server/services/chat/base.py b/memorylayer-core-python/src/memorylayer_server/services/chat/base.py index 887c3aa..e146396 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/chat/base.py +++ b/memorylayer-core-python/src/memorylayer_server/services/chat/base.py @@ -4,17 +4,17 @@ Manages conversation threads and their messages, with automatic decomposition of chat history into long-term memories via background tasks. """ + import logging from abc import ABC, abstractmethod -from typing import Optional -from ...config import MEMORYLAYER_CHAT_SERVICE, DEFAULT_MEMORYLAYER_CHAT_SERVICE +from ...config import DEFAULT_MEMORYLAYER_CHAT_SERVICE, MEMORYLAYER_CHAT_SERVICE from ...models.chat import ( - ChatThread, + AppendMessagesInput, ChatMessage, + ChatThread, ChatThreadWithMessages, CreateThreadInput, - AppendMessagesInput, DecompositionResult, ) from .._constants import EXT_CHAT_SERVICE, EXT_STORAGE_BACKEND, EXT_TASK_SERVICE @@ -28,93 +28,93 @@ class ChatService(ABC): @abstractmethod async def create_thread( - self, - workspace_id: str, - tenant_id: str, - input: CreateThreadInput, + self, + workspace_id: str, + tenant_id: str, + input: CreateThreadInput, ) -> ChatThread: """Create a new chat thread.""" pass @abstractmethod async def get_thread( - self, - workspace_id: str, - thread_id: str, - ) -> Optional[ChatThread]: + self, + workspace_id: str, + thread_id: str, + ) -> ChatThread | None: """Get thread metadata by ID.""" pass @abstractmethod async def list_threads( - self, - workspace_id: str, - user_id: Optional[str] = None, - limit: int = 50, - offset: int = 0, + self, + workspace_id: str, + user_id: str | None = None, + limit: int = 50, + offset: int = 0, ) -> list[ChatThread]: """List threads in a workspace, optionally filtered by user.""" pass @abstractmethod async def update_thread( - self, - workspace_id: str, - thread_id: str, - **updates, - ) -> Optional[ChatThread]: + self, + workspace_id: str, + thread_id: str, + **updates, + ) -> ChatThread | None: """Update thread fields (e.g. title, metadata).""" pass @abstractmethod async def delete_thread( - self, - workspace_id: str, - thread_id: str, + self, + workspace_id: str, + thread_id: str, ) -> bool: """Delete a thread and all its messages.""" pass @abstractmethod async def append_messages( - self, - workspace_id: str, - thread_id: str, - input: AppendMessagesInput, + self, + workspace_id: str, + thread_id: str, + input: AppendMessagesInput, ) -> list[ChatMessage]: """Append messages to a thread. Returns the created messages with IDs and indexes.""" pass @abstractmethod async def get_messages( - self, - workspace_id: str, - thread_id: str, - limit: int = 100, - offset: int = 0, - after_index: Optional[int] = None, - order: str = "asc", + self, + workspace_id: str, + thread_id: str, + limit: int = 100, + offset: int = 0, + after_index: int | None = None, + order: str = "asc", ) -> list[ChatMessage]: """Get messages from a thread with pagination.""" pass @abstractmethod async def get_thread_with_messages( - self, - workspace_id: str, - thread_id: str, - limit: int = 100, - offset: int = 0, - order: str = "asc", - ) -> Optional[ChatThreadWithMessages]: + self, + workspace_id: str, + thread_id: str, + limit: int = 100, + offset: int = 0, + order: str = "asc", + ) -> ChatThreadWithMessages | None: """Get thread metadata with messages inlined.""" pass @abstractmethod async def trigger_decomposition( - self, - workspace_id: str, - thread_id: str, + self, + workspace_id: str, + thread_id: str, ) -> DecompositionResult: """Trigger on-demand memory decomposition for unprocessed messages.""" pass diff --git a/memorylayer-core-python/src/memorylayer_server/services/chat/default.py b/memorylayer-core-python/src/memorylayer_server/services/chat/default.py index e996a6d..87f098d 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/chat/default.py +++ b/memorylayer-core-python/src/memorylayer_server/services/chat/default.py @@ -4,33 +4,33 @@ Delegates persistence to StorageBackend and schedules decomposition tasks when the unprocessed message threshold is exceeded. """ + import logging -from datetime import datetime, timezone -from typing import Optional +from datetime import UTC, datetime -from scitrera_app_framework import get_logger, get_extension, Variables +from scitrera_app_framework import Variables, get_extension, get_logger -from .base import ChatService, ChatServicePluginBase -from ..storage import StorageBackend -from ..tasks import TaskService -from .._constants import EXT_STORAGE_BACKEND, EXT_TASK_SERVICE from ...config import ( - DEFAULT_TENANT_ID, DEFAULT_CONTEXT_ID, - MEMORYLAYER_CHAT_AUTO_DECOMPOSE_THRESHOLD, + DEFAULT_MEMORYLAYER_CHAT_AUTO_DECOMPOSE_INTERVAL, DEFAULT_MEMORYLAYER_CHAT_AUTO_DECOMPOSE_THRESHOLD, + DEFAULT_TENANT_ID, MEMORYLAYER_CHAT_AUTO_DECOMPOSE_INTERVAL, - DEFAULT_MEMORYLAYER_CHAT_AUTO_DECOMPOSE_INTERVAL, + MEMORYLAYER_CHAT_AUTO_DECOMPOSE_THRESHOLD, ) from ...models.chat import ( - ChatThread, + AppendMessagesInput, ChatMessage, + ChatThread, ChatThreadWithMessages, CreateThreadInput, - AppendMessagesInput, DecompositionResult, ) -from ...utils import generate_id, utc_now_iso +from ...utils import generate_id +from .._constants import EXT_STORAGE_BACKEND, EXT_TASK_SERVICE +from ..storage import StorageBackend +from ..tasks import TaskService +from .base import ChatService, ChatServicePluginBase CHAT_DECOMPOSITION_TASK = "chat_decomposition" @@ -39,10 +39,10 @@ class DefaultChatService(ChatService): """Default chat service backed by StorageBackend.""" def __init__( - self, - storage: StorageBackend, - task_service: TaskService, - v: Variables, + self, + storage: StorageBackend, + task_service: TaskService, + v: Variables, ): self.storage = storage self.task_service = task_service @@ -64,13 +64,13 @@ def _auto_decompose_interval(self) -> int: ) async def create_thread( - self, - workspace_id: str, - tenant_id: str, - input: CreateThreadInput, + self, + workspace_id: str, + tenant_id: str, + input: CreateThreadInput, ) -> ChatThread: thread_id = input.thread_id or generate_id() - now = datetime.now(timezone.utc) + now = datetime.now(UTC) thread = ChatThread( id=thread_id, @@ -95,10 +95,10 @@ async def create_thread( return result async def get_thread( - self, - workspace_id: str, - thread_id: str, - ) -> Optional[ChatThread]: + self, + workspace_id: str, + thread_id: str, + ) -> ChatThread | None: thread = await self.storage.get_thread(workspace_id, thread_id) if thread and thread.is_expired: self.logger.debug("Thread %s is expired, returning None", thread_id) @@ -106,11 +106,11 @@ async def get_thread( return thread async def list_threads( - self, - workspace_id: str, - user_id: Optional[str] = None, - limit: int = 50, - offset: int = 0, + self, + workspace_id: str, + user_id: str | None = None, + limit: int = 50, + offset: int = 0, ) -> list[ChatThread]: return await self.storage.list_threads( workspace_id=workspace_id, @@ -120,11 +120,11 @@ async def list_threads( ) async def update_thread( - self, - workspace_id: str, - thread_id: str, - **updates, - ) -> Optional[ChatThread]: + self, + workspace_id: str, + thread_id: str, + **updates, + ) -> ChatThread | None: thread = await self.get_thread(workspace_id, thread_id) if not thread: return None @@ -134,9 +134,9 @@ async def update_thread( return result async def delete_thread( - self, - workspace_id: str, - thread_id: str, + self, + workspace_id: str, + thread_id: str, ) -> bool: result = await self.storage.delete_thread(workspace_id, thread_id) if result: @@ -144,10 +144,10 @@ async def delete_thread( return result async def append_messages( - self, - workspace_id: str, - thread_id: str, - input: AppendMessagesInput, + self, + workspace_id: str, + thread_id: str, + input: AppendMessagesInput, ) -> list[ChatMessage]: # Verify thread exists and is not expired thread = await self.get_thread(workspace_id, thread_id) @@ -158,7 +158,9 @@ async def append_messages( self.logger.debug( "Appended %d messages to thread %s (new total: %d)", - len(result), thread_id, thread.message_count + len(result), + len(result), + thread_id, + thread.message_count + len(result), ) # Check if we should schedule auto-decomposition @@ -167,13 +169,13 @@ async def append_messages( return result async def get_messages( - self, - workspace_id: str, - thread_id: str, - limit: int = 100, - offset: int = 0, - after_index: Optional[int] = None, - order: str = "asc", + self, + workspace_id: str, + thread_id: str, + limit: int = 100, + offset: int = 0, + after_index: int | None = None, + order: str = "asc", ) -> list[ChatMessage]: return await self.storage.get_messages( workspace_id=workspace_id, @@ -185,13 +187,13 @@ async def get_messages( ) async def get_thread_with_messages( - self, - workspace_id: str, - thread_id: str, - limit: int = 100, - offset: int = 0, - order: str = "asc", - ) -> Optional[ChatThreadWithMessages]: + self, + workspace_id: str, + thread_id: str, + limit: int = 100, + offset: int = 0, + order: str = "asc", + ) -> ChatThreadWithMessages | None: thread = await self.get_thread(workspace_id, thread_id) if not thread: return None @@ -211,9 +213,9 @@ async def get_thread_with_messages( ) async def trigger_decomposition( - self, - workspace_id: str, - thread_id: str, + self, + workspace_id: str, + thread_id: str, ) -> DecompositionResult: thread = await self.get_thread(workspace_id, thread_id) if not thread: @@ -249,11 +251,11 @@ async def trigger_decomposition( ) async def _maybe_schedule_decomposition( - self, - workspace_id: str, - thread_id: str, - thread: ChatThread, - new_message_count: int, + self, + workspace_id: str, + thread_id: str, + thread: ChatThread, + new_message_count: int, ) -> None: """Schedule decomposition if threshold conditions are met.""" new_total = thread.message_count + new_message_count @@ -264,13 +266,14 @@ async def _maybe_schedule_decomposition( # Check time interval since last decomposition if thread.last_decomposed_at: - elapsed = (datetime.now(timezone.utc) - thread.last_decomposed_at).total_seconds() + elapsed = (datetime.now(UTC) - thread.last_decomposed_at).total_seconds() if elapsed < self._auto_decompose_interval: return self.logger.info( "Auto-scheduling decomposition for thread %s (%d unprocessed messages)", - thread_id, unprocessed, + thread_id, + unprocessed, ) try: diff --git a/memorylayer-core-python/src/memorylayer_server/services/context_environment/__init__.py b/memorylayer-core-python/src/memorylayer_server/services/context_environment/__init__.py index 7acf264..86b9423 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/context_environment/__init__.py +++ b/memorylayer-core-python/src/memorylayer_server/services/context_environment/__init__.py @@ -1,12 +1,13 @@ """Context environment service package.""" + +from scitrera_app_framework import Variables, get_extension + from .base import ( + EXT_CONTEXT_ENVIRONMENT_SERVICE, ContextEnvironmentService, ContextEnvironmentServicePluginBase, - EXT_CONTEXT_ENVIRONMENT_SERVICE, ) -from scitrera_app_framework import Variables, get_extension - def get_context_environment_service(v: Variables = None) -> ContextEnvironmentService: """Get the context environment service instance.""" @@ -14,8 +15,8 @@ def get_context_environment_service(v: Variables = None) -> ContextEnvironmentSe __all__ = ( - 'ContextEnvironmentService', - 'ContextEnvironmentServicePluginBase', - 'get_context_environment_service', - 'EXT_CONTEXT_ENVIRONMENT_SERVICE', + "ContextEnvironmentService", + "ContextEnvironmentServicePluginBase", + "get_context_environment_service", + "EXT_CONTEXT_ENVIRONMENT_SERVICE", ) diff --git a/memorylayer-core-python/src/memorylayer_server/services/context_environment/base.py b/memorylayer-core-python/src/memorylayer_server/services/context_environment/base.py index 72db717..45e7e98 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/context_environment/base.py +++ b/memorylayer-core-python/src/memorylayer_server/services/context_environment/base.py @@ -3,14 +3,14 @@ The context environment service provides sandboxed Python execution environments tied to sessions, with access to memory recall and LLM queries. """ + from abc import ABC, abstractmethod -from typing import Any, Optional +from typing import Any from ...config import ( - MEMORYLAYER_CONTEXT_ENVIRONMENT_SERVICE, DEFAULT_MEMORYLAYER_CONTEXT_ENVIRONMENT_SERVICE, + MEMORYLAYER_CONTEXT_ENVIRONMENT_SERVICE, ) - from .._constants import EXT_CONTEXT_ENVIRONMENT_SERVICE from .._plugin_factory import make_service_plugin_base @@ -27,7 +27,7 @@ async def execute( self, session_id: str, code: str, - result_var: Optional[str] = None, + result_var: str | None = None, return_result: bool = True, max_return_chars: int = 10_000, ) -> dict: @@ -49,7 +49,7 @@ async def execute( async def inspect( self, session_id: str, - variable: Optional[str] = None, + variable: str | None = None, preview_chars: int = 200, ) -> dict: """Inspect sandbox state or a specific variable. @@ -71,9 +71,9 @@ async def load( var: str, query: str, limit: int = 50, - types: Optional[list[str]] = None, - tags: Optional[list[str]] = None, - min_relevance: Optional[float] = None, + types: list[str] | None = None, + tags: list[str] | None = None, + min_relevance: float | None = None, include_embeddings: bool = False, ) -> dict: """Load memories into the sandbox as a variable. @@ -123,8 +123,8 @@ async def query( session_id: str, prompt: str, variables: list[str], - max_context_chars: Optional[int] = None, - result_var: Optional[str] = None, + max_context_chars: int | None = None, + result_var: str | None = None, ) -> dict: """Send sandbox variables and a prompt to the LLM. @@ -145,11 +145,11 @@ async def rlm( self, session_id: str, goal: str, - memory_query: Optional[str] = None, + memory_query: str | None = None, memory_limit: int = 100, max_iterations: int = 10, - variables: Optional[list[str]] = None, - result_var: Optional[str] = None, + variables: list[str] | None = None, + result_var: str | None = None, detail_level: str = "standard", ) -> dict: """Run a Recursive Language Model (RLM) loop. diff --git a/memorylayer-core-python/src/memorylayer_server/services/context_environment/default.py b/memorylayer-core-python/src/memorylayer_server/services/context_environment/default.py index aeb2e8c..e3f6fa9 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/context_environment/default.py +++ b/memorylayer-core-python/src/memorylayer_server/services/context_environment/default.py @@ -3,39 +3,39 @@ Provides in-memory sandboxed Python environments per session with memory integration, LLM queries, and iterative reasoning loops. """ + import json import sys -from datetime import datetime, timezone +from datetime import UTC, datetime from logging import Logger -from typing import Any, Optional +from typing import Any -from scitrera_app_framework import get_logger, get_extension, Variables +from scitrera_app_framework import Variables, get_logger -from .base import ( - ContextEnvironmentService, - ContextEnvironmentServicePluginBase, - EXT_CONTEXT_ENVIRONMENT_SERVICE, -) -from .executors.base import ExecutorProvider, ExecutionResult -from .hooks import ContextPersistenceHook, NoOpPersistenceHook from ...config import ( - MEMORYLAYER_CONTEXT_EXECUTOR, + DEFAULT_MEMORYLAYER_CONTEXT_EXEC_HARD_CAP, + DEFAULT_MEMORYLAYER_CONTEXT_EXEC_SOFT_CAP, DEFAULT_MEMORYLAYER_CONTEXT_EXECUTOR, - MEMORYLAYER_CONTEXT_MAX_OPERATIONS, - DEFAULT_MEMORYLAYER_CONTEXT_MAX_OPERATIONS, - MEMORYLAYER_CONTEXT_MAX_EXEC_SECONDS, DEFAULT_MEMORYLAYER_CONTEXT_MAX_EXEC_SECONDS, - MEMORYLAYER_CONTEXT_MAX_OUTPUT_CHARS, + DEFAULT_MEMORYLAYER_CONTEXT_MAX_MEMORY_BYTES, + DEFAULT_MEMORYLAYER_CONTEXT_MAX_OPERATIONS, DEFAULT_MEMORYLAYER_CONTEXT_MAX_OUTPUT_CHARS, - MEMORYLAYER_CONTEXT_QUERY_MAX_TOKENS, DEFAULT_MEMORYLAYER_CONTEXT_QUERY_MAX_TOKENS, - MEMORYLAYER_CONTEXT_MAX_MEMORY_BYTES, - DEFAULT_MEMORYLAYER_CONTEXT_MAX_MEMORY_BYTES, - MEMORYLAYER_CONTEXT_EXEC_SOFT_CAP, - DEFAULT_MEMORYLAYER_CONTEXT_EXEC_SOFT_CAP, MEMORYLAYER_CONTEXT_EXEC_HARD_CAP, - DEFAULT_MEMORYLAYER_CONTEXT_EXEC_HARD_CAP, + MEMORYLAYER_CONTEXT_EXEC_SOFT_CAP, + MEMORYLAYER_CONTEXT_EXECUTOR, + MEMORYLAYER_CONTEXT_MAX_EXEC_SECONDS, + MEMORYLAYER_CONTEXT_MAX_MEMORY_BYTES, + MEMORYLAYER_CONTEXT_MAX_OPERATIONS, + MEMORYLAYER_CONTEXT_MAX_OUTPUT_CHARS, + MEMORYLAYER_CONTEXT_QUERY_MAX_TOKENS, ) +from .base import ( + ContextEnvironmentService, + ContextEnvironmentServicePluginBase, +) +from .executors.base import ExecutionResult, ExecutorProvider +from .hooks import ContextPersistenceHook, NoOpPersistenceHook def _safe_preview(value: Any, max_chars: int = 200) -> str: @@ -45,7 +45,7 @@ def _safe_preview(value: Any, max_chars: int = 200) -> str: except Exception: s = f"<{type(value).__name__}>" if len(s) > max_chars: - return s[:max_chars] + '...' + return s[:max_chars] + "..." return s @@ -60,19 +60,19 @@ def _estimate_size(value: Any) -> int: def _memory_to_dict(memory: Any, include_embeddings: bool = False) -> dict: """Convert a Memory model to a plain dict for sandbox use.""" d = { - 'id': memory.id, - 'content': memory.content, - 'type': str(memory.type.value) if memory.type else None, - 'importance': memory.importance, - 'tags': list(memory.tags) if memory.tags else [], - 'created_at': memory.created_at.isoformat() if memory.created_at else None, + "id": memory.id, + "content": memory.content, + "type": str(memory.type.value) if memory.type else None, + "importance": memory.importance, + "tags": list(memory.tags) if memory.tags else [], + "created_at": memory.created_at.isoformat() if memory.created_at else None, } if memory.metadata: - d['metadata'] = dict(memory.metadata) + d["metadata"] = dict(memory.metadata) if memory.abstract: - d['abstract'] = memory.abstract + d["abstract"] = memory.abstract if include_embeddings and memory.embedding: - d['embedding'] = list(memory.embedding) + d["embedding"] = list(memory.embedding) return d @@ -107,34 +107,48 @@ def __init__( self._env_metadata: dict[str, dict[str, Any]] = {} # Load config - self._max_operations = int(v.get( - MEMORYLAYER_CONTEXT_MAX_OPERATIONS, - DEFAULT_MEMORYLAYER_CONTEXT_MAX_OPERATIONS, - )) - self._max_exec_seconds = int(v.get( - MEMORYLAYER_CONTEXT_MAX_EXEC_SECONDS, - DEFAULT_MEMORYLAYER_CONTEXT_MAX_EXEC_SECONDS, - )) - self._max_output_chars = int(v.get( - MEMORYLAYER_CONTEXT_MAX_OUTPUT_CHARS, - DEFAULT_MEMORYLAYER_CONTEXT_MAX_OUTPUT_CHARS, - )) - self._query_max_tokens = int(v.get( - MEMORYLAYER_CONTEXT_QUERY_MAX_TOKENS, - DEFAULT_MEMORYLAYER_CONTEXT_QUERY_MAX_TOKENS, - )) - self._max_memory_bytes = int(v.get( - MEMORYLAYER_CONTEXT_MAX_MEMORY_BYTES, - DEFAULT_MEMORYLAYER_CONTEXT_MAX_MEMORY_BYTES, - )) - self._exec_soft_cap = int(v.get( - MEMORYLAYER_CONTEXT_EXEC_SOFT_CAP, - DEFAULT_MEMORYLAYER_CONTEXT_EXEC_SOFT_CAP, - )) - self._exec_hard_cap = int(v.get( - MEMORYLAYER_CONTEXT_EXEC_HARD_CAP, - DEFAULT_MEMORYLAYER_CONTEXT_EXEC_HARD_CAP, - )) + self._max_operations = int( + v.get( + MEMORYLAYER_CONTEXT_MAX_OPERATIONS, + DEFAULT_MEMORYLAYER_CONTEXT_MAX_OPERATIONS, + ) + ) + self._max_exec_seconds = int( + v.get( + MEMORYLAYER_CONTEXT_MAX_EXEC_SECONDS, + DEFAULT_MEMORYLAYER_CONTEXT_MAX_EXEC_SECONDS, + ) + ) + self._max_output_chars = int( + v.get( + MEMORYLAYER_CONTEXT_MAX_OUTPUT_CHARS, + DEFAULT_MEMORYLAYER_CONTEXT_MAX_OUTPUT_CHARS, + ) + ) + self._query_max_tokens = int( + v.get( + MEMORYLAYER_CONTEXT_QUERY_MAX_TOKENS, + DEFAULT_MEMORYLAYER_CONTEXT_QUERY_MAX_TOKENS, + ) + ) + self._max_memory_bytes = int( + v.get( + MEMORYLAYER_CONTEXT_MAX_MEMORY_BYTES, + DEFAULT_MEMORYLAYER_CONTEXT_MAX_MEMORY_BYTES, + ) + ) + self._exec_soft_cap = int( + v.get( + MEMORYLAYER_CONTEXT_EXEC_SOFT_CAP, + DEFAULT_MEMORYLAYER_CONTEXT_EXEC_SOFT_CAP, + ) + ) + self._exec_hard_cap = int( + v.get( + MEMORYLAYER_CONTEXT_EXEC_HARD_CAP, + DEFAULT_MEMORYLAYER_CONTEXT_EXEC_HARD_CAP, + ) + ) self.logger.info("DefaultContextEnvironmentService initialized") @@ -148,18 +162,18 @@ async def _init_environment(self, session_id: str) -> dict[str, Any]: if restored_state is not None: self._environments[session_id] = restored_state self._env_metadata[session_id] = { - 'created_at': datetime.now(timezone.utc).isoformat(), - 'exec_count': 0, - 'total_operations': 0, - 'restored': True, + "created_at": datetime.now(UTC).isoformat(), + "exec_count": 0, + "total_operations": 0, + "restored": True, } self.logger.info("Restored environment for session %s from persistence hook", session_id) else: self._environments[session_id] = {} self._env_metadata[session_id] = { - 'created_at': datetime.now(timezone.utc).isoformat(), - 'exec_count': 0, - 'total_operations': 0, + "created_at": datetime.now(UTC).isoformat(), + "exec_count": 0, + "total_operations": 0, } self.logger.info("Created environment for session: %s", session_id) @@ -168,7 +182,7 @@ async def _init_environment(self, session_id: str) -> dict[str, Any]: def _check_rate_limits(self, session_id: str) -> str | None: """Check rate limits. Returns error message if exceeded, None if ok.""" meta = self._env_metadata.get(session_id, {}) - exec_count = meta.get('exec_count', 0) + exec_count = meta.get("exec_count", 0) if self._exec_hard_cap > 0 and exec_count >= self._exec_hard_cap: return f"Hard execution cap reached: {exec_count} >= {self._exec_hard_cap}" @@ -176,7 +190,9 @@ def _check_rate_limits(self, session_id: str) -> str | None: if self._exec_soft_cap > 0 and exec_count >= self._exec_soft_cap: self.logger.warning( "Soft execution cap reached for session %s: %d >= %d", - session_id, exec_count, self._exec_soft_cap, + session_id, + exec_count, + self._exec_soft_cap, ) return None @@ -190,17 +206,14 @@ def _check_memory_limit(self, session_id: str) -> str | None: total_size = sum(_estimate_size(v) for v in state.values()) if total_size > self._max_memory_bytes: - return ( - f"Memory limit exceeded: {total_size} bytes > " - f"{self._max_memory_bytes} byte limit" - ) + return f"Memory limit exceeded: {total_size} bytes > {self._max_memory_bytes} byte limit" return None async def execute( self, session_id: str, code: str, - result_var: Optional[str] = None, + result_var: str | None = None, return_result: bool = True, max_return_chars: int = 10_000, ) -> dict: @@ -208,14 +221,14 @@ async def execute( # Rate limit check rate_error = self._check_rate_limits(session_id) if rate_error: - return {'output': '', 'result': None, 'error': rate_error, 'variables_changed': []} + return {"output": "", "result": None, "error": rate_error, "variables_changed": []} state = await self._init_environment(session_id) # Memory limit check mem_error = self._check_memory_limit(session_id) if mem_error: - return {'output': '', 'result': None, 'error': mem_error, 'variables_changed': []} + return {"output": "", "result": None, "error": mem_error, "variables_changed": []} self.logger.debug("Executing code in session %s: %s", session_id, code[:100]) @@ -229,9 +242,9 @@ async def execute( # Update metadata meta = self._env_metadata[session_id] - meta['exec_count'] = meta.get('exec_count', 0) + 1 - meta['total_operations'] = meta.get('total_operations', 0) + result.operations_count - meta['last_exec_at'] = datetime.now(timezone.utc).isoformat() + meta["exec_count"] = meta.get("exec_count", 0) + 1 + meta["total_operations"] = meta.get("total_operations", 0) + result.operations_count + meta["last_exec_at"] = datetime.now(UTC).isoformat() # Store result in variable if requested if result_var and result.result is not None and result.error is None: @@ -245,23 +258,23 @@ async def execute( # Build response response: dict[str, Any] = { - 'output': result.output, - 'error': result.error, - 'variables_changed': result.variables_changed, + "output": result.output, + "error": result.error, + "variables_changed": result.variables_changed, } if return_result and result.result is not None: preview = _safe_preview(result.result, max_return_chars) - response['result'] = preview + response["result"] = preview else: - response['result'] = None + response["result"] = None return response async def inspect( self, session_id: str, - variable: Optional[str] = None, + variable: str | None = None, preview_chars: int = 200, ) -> dict: """Inspect sandbox state or a specific variable.""" @@ -269,28 +282,28 @@ async def inspect( if variable is not None: if variable not in state: - return {'error': f"Variable '{variable}' not found"} + return {"error": f"Variable '{variable}' not found"} value = state[variable] return { - 'variable': variable, - 'type': type(value).__name__, - 'preview': _safe_preview(value, preview_chars), - 'size_bytes': _estimate_size(value), + "variable": variable, + "type": type(value).__name__, + "preview": _safe_preview(value, preview_chars), + "size_bytes": _estimate_size(value), } # Return overview of all variables variables = {} for key, value in state.items(): variables[key] = { - 'type': type(value).__name__, - 'preview': _safe_preview(value, preview_chars), - 'size_bytes': _estimate_size(value), + "type": type(value).__name__, + "preview": _safe_preview(value, preview_chars), + "size_bytes": _estimate_size(value), } return { - 'variable_count': len(variables), - 'variables': variables, - 'total_size_bytes': sum(v['size_bytes'] for v in variables.values()), + "variable_count": len(variables), + "variables": variables, + "total_size_bytes": sum(v["size_bytes"] for v in variables.values()), } async def load( @@ -299,9 +312,9 @@ async def load( var: str, query: str, limit: int = 50, - types: Optional[list[str]] = None, - tags: Optional[list[str]] = None, - min_relevance: Optional[float] = None, + types: list[str] | None = None, + tags: list[str] | None = None, + min_relevance: float | None = None, include_embeddings: bool = False, ) -> dict: """Load memories into the sandbox as a variable.""" @@ -310,18 +323,18 @@ async def load( # Rate limit check rate_error = self._check_rate_limits(session_id) if rate_error: - return {'error': rate_error, 'count': 0} + return {"error": rate_error, "count": 0} try: + from ...models.memory import MemoryType, RecallInput from ..memory import get_memory_service from ..session import get_session_service - from ...models.memory import RecallInput, MemoryType # Resolve the session to get workspace_id session_service = get_session_service(self._v) session = await session_service.get(session_id) if session is None: - return {'error': f"Session not found: {session_id}", 'count': 0} + return {"error": f"Session not found: {session_id}", "count": 0} # Build recall input type_filters = [] @@ -347,10 +360,7 @@ async def load( ) # Convert memories to dicts and store in sandbox - memory_dicts = [ - _memory_to_dict(m, include_embeddings=include_embeddings) - for m in recall_result.memories - ] + memory_dicts = [_memory_to_dict(m, include_embeddings=include_embeddings) for m in recall_result.memories] state[var] = memory_dicts # Notify persistence hook @@ -358,21 +368,23 @@ async def load( self.logger.info( "Loaded %d memories into session %s variable '%s'", - len(memory_dicts), session_id, var, + len(memory_dicts), + session_id, + var, ) return { - 'count': len(memory_dicts), - 'variable': var, - 'query': query, - 'total_available': recall_result.total_count, + "count": len(memory_dicts), + "variable": var, + "query": query, + "total_available": recall_result.total_count, } except ImportError as e: - return {'error': f"Memory service not available: {e}", 'count': 0} + return {"error": f"Memory service not available: {e}", "count": 0} except Exception as e: self.logger.error("Failed to load memories for session %s: %s", session_id, e, exc_info=True) - return {'error': f"Memory load failed: {e}", 'count': 0} + return {"error": f"Memory load failed: {e}", "count": 0} async def inject( self, @@ -387,13 +399,13 @@ async def inject( # Rate limit check rate_error = self._check_rate_limits(session_id) if rate_error: - return {'error': rate_error} + return {"error": rate_error} if parse_json and isinstance(value, str): try: value = json.loads(value) except json.JSONDecodeError as e: - return {'error': f"JSON parse error: {e}"} + return {"error": f"JSON parse error: {e}"} state[key] = value @@ -403,9 +415,9 @@ async def inject( self.logger.debug("Injected variable '%s' into session %s", key, session_id) return { - 'variable': key, - 'type': type(value).__name__, - 'preview': _safe_preview(value, 200), + "variable": key, + "type": type(value).__name__, + "preview": _safe_preview(value, 200), } async def query( @@ -413,8 +425,8 @@ async def query( session_id: str, prompt: str, variables: list[str], - max_context_chars: Optional[int] = None, - result_var: Optional[str] = None, + max_context_chars: int | None = None, + result_var: str | None = None, ) -> dict: """Send sandbox variables and a prompt to the LLM.""" state = await self._init_environment(session_id) @@ -433,7 +445,7 @@ async def query( preview = _safe_preview(value, max_chars // max(len(variables), 1)) context_parts.append(f"[{var_name}] ({type(value).__name__}):\n{preview}") - context = '\n\n'.join(context_parts) + context = "\n\n".join(context_parts) llm_service = get_llm_service(self._v) response_text = await llm_service.synthesize( @@ -450,26 +462,26 @@ async def query( self.logger.info("LLM query completed for session %s", session_id) return { - 'response': response_text, - 'variables_used': variables, - 'result_var': result_var, + "response": response_text, + "variables_used": variables, + "result_var": result_var, } except ImportError as e: - return {'error': f"LLM service not available: {e}"} + return {"error": f"LLM service not available: {e}"} except Exception as e: self.logger.error("LLM query failed for session %s: %s", session_id, e, exc_info=True) - return {'error': f"LLM query failed: {e}"} + return {"error": f"LLM query failed: {e}"} async def rlm( self, session_id: str, goal: str, - memory_query: Optional[str] = None, + memory_query: str | None = None, memory_limit: int = 100, max_iterations: int = 10, - variables: Optional[list[str]] = None, - result_var: Optional[str] = None, + variables: list[str] | None = None, + result_var: str | None = None, detail_level: str = "standard", ) -> dict: """Run a Recursive Language Model (RLM) loop. @@ -498,10 +510,10 @@ async def status(self, session_id: str) -> dict: """Get the status of a session's sandbox environment.""" if session_id not in self._environments: return { - 'exists': False, - 'variable_count': 0, - 'total_size_bytes': 0, - 'metadata': {}, + "exists": False, + "variable_count": 0, + "total_size_bytes": 0, + "metadata": {}, } state = self._environments[session_id] @@ -510,12 +522,12 @@ async def status(self, session_id: str) -> dict: total_size = sum(_estimate_size(v) for v in state.values()) return { - 'exists': True, - 'variable_count': len(state), - 'variables': list(state.keys()), - 'total_size_bytes': total_size, - 'memory_limit_bytes': self._max_memory_bytes, - 'metadata': meta, + "exists": True, + "variable_count": len(state), + "variables": list(state.keys()), + "total_size_bytes": total_size, + "memory_limit_bytes": self._max_memory_bytes, + "metadata": meta, } async def cleanup_environment(self, session_id: str) -> None: @@ -542,7 +554,8 @@ async def checkpoint(self, session_id: str) -> None: class DefaultContextEnvironmentServicePlugin(ContextEnvironmentServicePluginBase): """Plugin for the default context environment service.""" - PROVIDER_NAME = 'default' + + PROVIDER_NAME = "default" def initialize(self, v: Variables, logger: Logger) -> ContextEnvironmentService: """Initialize the default context environment service.""" @@ -552,26 +565,26 @@ def initialize(self, v: Variables, logger: Logger) -> ContextEnvironmentService: ) executor: ExecutorProvider - if executor_type == 'smolagents': + if executor_type == "smolagents": try: from .executors.smolagents_executor import SmolagentsExecutor + executor = SmolagentsExecutor() logger.info("Using smolagents executor for context environments") except ImportError: - logger.warning( - "smolagents not available, falling back to restricted executor" - ) + logger.warning("smolagents not available, falling back to restricted executor") from .executors.restricted import RestrictedExecutor + executor = RestrictedExecutor() - elif executor_type == 'restricted': + elif executor_type == "restricted": from .executors.restricted import RestrictedExecutor + executor = RestrictedExecutor() logger.info("Using restricted executor for context environments") else: - logger.warning( - "Unknown executor type '%s', falling back to restricted", executor_type - ) + logger.warning("Unknown executor type '%s', falling back to restricted", executor_type) from .executors.restricted import RestrictedExecutor + executor = RestrictedExecutor() return DefaultContextEnvironmentService( diff --git a/memorylayer-core-python/src/memorylayer_server/services/context_environment/executors/__init__.py b/memorylayer-core-python/src/memorylayer_server/services/context_environment/executors/__init__.py index 5aae2c5..e875540 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/context_environment/executors/__init__.py +++ b/memorylayer-core-python/src/memorylayer_server/services/context_environment/executors/__init__.py @@ -1,4 +1,5 @@ """Context environment executor providers.""" -from .base import ExecutorProvider, ExecutionResult -__all__ = ('ExecutorProvider', 'ExecutionResult') +from .base import ExecutionResult, ExecutorProvider + +__all__ = ("ExecutorProvider", "ExecutionResult") diff --git a/memorylayer-core-python/src/memorylayer_server/services/context_environment/executors/base.py b/memorylayer-core-python/src/memorylayer_server/services/context_environment/executors/base.py index 6a10b44..72d8290 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/context_environment/executors/base.py +++ b/memorylayer-core-python/src/memorylayer_server/services/context_environment/executors/base.py @@ -1,4 +1,5 @@ """Base executor provider interface for context environment sandboxes.""" + from abc import ABC, abstractmethod from dataclasses import dataclass, field from typing import Any diff --git a/memorylayer-core-python/src/memorylayer_server/services/context_environment/executors/restricted.py b/memorylayer-core-python/src/memorylayer_server/services/context_environment/executors/restricted.py index 4d62da6..84d97c6 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/context_environment/executors/restricted.py +++ b/memorylayer-core-python/src/memorylayer_server/services/context_environment/executors/restricted.py @@ -13,49 +13,52 @@ - import, for/while loops, def/class, exec/eval - File I/O, any module access """ + import ast import io -import sys import time from contextlib import redirect_stdout from typing import Any -from .base import ExecutorProvider, ExecutionResult - +from .base import ExecutionResult, ExecutorProvider # Safe built-in functions available in the sandbox _SAFE_BUILTINS = { - 'len': len, - 'sorted': sorted, - 'sum': sum, - 'min': min, - 'max': max, - 'filter': filter, - 'map': map, - 'list': list, - 'dict': dict, - 'set': set, - 'tuple': tuple, - 'str': str, - 'int': int, - 'float': float, - 'bool': bool, - 'abs': abs, - 'round': round, - 'enumerate': enumerate, - 'zip': zip, - 'range': range, - 'type': lambda *args: (_ for _ in ()).throw(TypeError("type() with multiple arguments is not allowed in restricted mode")) if len(args) != 1 else type(args[0]), - 'isinstance': isinstance, - 'any': any, - 'all': all, - 'reversed': reversed, - 'hash': hash, - 'repr': repr, - 'print': print, - 'None': None, - 'True': True, - 'False': False, + "len": len, + "sorted": sorted, + "sum": sum, + "min": min, + "max": max, + "filter": filter, + "map": map, + "list": list, + "dict": dict, + "set": set, + "tuple": tuple, + "str": str, + "int": int, + "float": float, + "bool": bool, + "abs": abs, + "round": round, + "enumerate": enumerate, + "zip": zip, + "range": range, + "type": lambda *args: ( + (_ for _ in ()).throw(TypeError("type() with multiple arguments is not allowed in restricted mode")) + if len(args) != 1 + else type(args[0]) + ), + "isinstance": isinstance, + "any": any, + "all": all, + "reversed": reversed, + "hash": hash, + "repr": repr, + "print": print, + "None": None, + "True": True, + "False": False, } # AST node types that are allowed @@ -156,9 +159,7 @@ def _check_node(self, node: ast.AST) -> None: return if isinstance(node, _ALLOWED_EXPR_NODES): return - self.errors.append( - f"Disallowed syntax: {type(node).__name__} at line {getattr(node, 'lineno', '?')}" - ) + self.errors.append(f"Disallowed syntax: {type(node).__name__} at line {getattr(node, 'lineno', '?')}") _DUNDER_ALLOWLIST = frozenset({"__name__", "__doc__", "__len__", "__getitem__", "__contains__"}) @@ -169,9 +170,7 @@ def generic_visit(self, node: ast.AST) -> None: if isinstance(node, ast.Attribute): attr = node.attr if attr.startswith("__") and attr.endswith("__") and attr not in self._DUNDER_ALLOWLIST: - self.errors.append( - f"Disallowed dunder attribute access: {attr} at line {getattr(node, 'lineno', '?')}" - ) + self.errors.append(f"Disallowed dunder attribute access: {attr} at line {getattr(node, 'lineno', '?')}") super().generic_visit(node) @@ -211,14 +210,14 @@ async def execute( """ code = code.strip() if not code: - return ExecutionResult(output='', result=None, error=None) + return ExecutionResult(output="", result=None, error=None) # Parse and validate AST try: - tree = ast.parse(code, mode='exec') + tree = ast.parse(code, mode="exec") except SyntaxError as e: return ExecutionResult( - output='', + output="", result=None, error=f"Syntax error: {e}", ) @@ -227,7 +226,7 @@ async def execute( node_count = sum(1 for _ in ast.walk(tree)) if node_count > max_operations: return ExecutionResult( - output='', + output="", result=None, error=f"Code complexity exceeds limit: {node_count} nodes > {max_operations} max", operations_count=node_count, @@ -237,14 +236,14 @@ async def execute( errors = _validate_ast(tree) if errors: return ExecutionResult( - output='', + output="", result=None, - error='; '.join(errors), + error="; ".join(errors), operations_count=node_count, ) # Build execution namespace with safe builtins and current state - namespace = {'__builtins__': _SAFE_BUILTINS.copy()} + namespace = {"__builtins__": _SAFE_BUILTINS.copy()} namespace.update(state) # Track which keys existed before execution @@ -264,7 +263,7 @@ async def execute( # Execute all but last statement module_head = ast.Module(body=stmts[:-1], type_ignores=[]) ast.fix_missing_locations(module_head) - compiled_head = compile(module_head, '', 'exec') + compiled_head = compile(module_head, "", "exec") with redirect_stdout(stdout_capture): exec(compiled_head, namespace) # noqa: S102 @@ -282,7 +281,7 @@ async def execute( # Evaluate last expression for its value expr_node = ast.Expression(body=stmts[-1].value) ast.fix_missing_locations(expr_node) - compiled_expr = compile(expr_node, '', 'eval') + compiled_expr = compile(expr_node, "", "eval") with redirect_stdout(stdout_capture): last_expr_result = eval(compiled_expr, namespace) # noqa: S307 @@ -291,14 +290,14 @@ async def execute( # Single expression - evaluate for result expr_node = ast.Expression(body=stmts[0].value) ast.fix_missing_locations(expr_node) - compiled_expr = compile(expr_node, '', 'eval') + compiled_expr = compile(expr_node, "", "eval") with redirect_stdout(stdout_capture): last_expr_result = eval(compiled_expr, namespace) # noqa: S307 else: # All statements, no expression result - compiled = compile(tree, '', 'exec') + compiled = compile(tree, "", "exec") with redirect_stdout(stdout_capture): exec(compiled, namespace) # noqa: S102 @@ -323,14 +322,14 @@ async def execute( # Sync namespace changes back to state variables_changed: list[str] = [] for key, value in namespace.items(): - if key == '__builtins__': + if key == "__builtins__": continue if key not in keys_before or state.get(key) is not value: state[key] = value variables_changed.append(key) # Track deletions - keys_after = {k for k in namespace if k != '__builtins__'} + keys_after = {k for k in namespace if k != "__builtins__"} for deleted_key in keys_before - keys_after: if deleted_key in state: del state[deleted_key] diff --git a/memorylayer-core-python/src/memorylayer_server/services/context_environment/executors/smolagents_executor.py b/memorylayer-core-python/src/memorylayer_server/services/context_environment/executors/smolagents_executor.py index 623a2dd..5c3fcc7 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/context_environment/executors/smolagents_executor.py +++ b/memorylayer-core-python/src/memorylayer_server/services/context_environment/executors/smolagents_executor.py @@ -3,29 +3,30 @@ Wraps the smolagents LocalPythonExecutor to provide a sandboxed Python execution environment with controlled imports and built-in functions. """ + import asyncio import logging from typing import Any -from .base import ExecutorProvider, ExecutionResult +from .base import ExecutionResult, ExecutorProvider logger = logging.getLogger(__name__) # Modules allowed for import in the sandbox _IMPORT_WHITELIST = [ - 'collections', - 'datetime', - 'itertools', - 'math', - 'queue', - 'random', - 're', - 'stat', - 'statistics', - 'time', - 'unicodedata', - 'json', - 'functools', + "collections", + "datetime", + "itertools", + "math", + "queue", + "random", + "re", + "stat", + "statistics", + "time", + "unicodedata", + "json", + "functools", ] @@ -67,8 +68,7 @@ def _ensure_executor(self) -> None: from smolagents.local_python_executor import LocalPythonExecutor except ImportError as exc: raise RuntimeError( - "smolagents package is required for SmolagentsExecutor. " - "Install it with: pip install 'smolagents>=1.0,<2.0'" + "smolagents package is required for SmolagentsExecutor. Install it with: pip install 'smolagents>=1.0,<2.0'" ) from exc all_imports = list(set(_IMPORT_WHITELIST) | set(self._additional_imports)) @@ -108,7 +108,7 @@ async def execute( """ code = code.strip() if not code: - return ExecutionResult(output='', result=None, error=None) + return ExecutionResult(output="", result=None, error=None) self._ensure_executor() @@ -130,45 +130,43 @@ async def execute( # Run in thread pool to avoid blocking the event loop loop = asyncio.get_event_loop() try: - code_output = await loop.run_in_executor( - None, self._executor, code - ) + code_output = await loop.run_in_executor(None, self._executor, code) except InterpreterError as e: return ExecutionResult( - output='', + output="", result=None, error=f"InterpreterError: {e}", ) except TimeoutError: return ExecutionResult( - output='', + output="", result=None, error=f"Execution timed out after {max_seconds}s", ) except Exception as e: return ExecutionResult( - output='', + output="", result=None, error=f"{type(e).__name__}: {e}", ) # Extract results - output = code_output.logs or '' + output = code_output.logs or "" if len(output) > max_output_chars: output = output[:max_output_chars] result_value = code_output.output # Track operations count from smolagents internal counter - ops_counter = self._executor.state.get('_operations_count', {}) - operations_count = ops_counter.get('counter', 0) if isinstance(ops_counter, dict) else 0 + ops_counter = self._executor.state.get("_operations_count", {}) + operations_count = ops_counter.get("counter", 0) if isinstance(ops_counter, dict) else 0 # Sync state changes back variables_changed: list[str] = [] executor_state = self._executor.state for key in list(executor_state.keys()): - if key.startswith('_'): + if key.startswith("_"): continue if key not in keys_before: # New variable @@ -183,7 +181,7 @@ async def execute( state[key] = executor_state[key] # Track deletions - executor_user_keys = {k for k in executor_state if not k.startswith('_')} + executor_user_keys = {k for k in executor_state if not k.startswith("_")} for deleted_key in keys_before - executor_user_keys: if deleted_key in state: del state[deleted_key] diff --git a/memorylayer-core-python/src/memorylayer_server/services/context_environment/hooks.py b/memorylayer-core-python/src/memorylayer_server/services/context_environment/hooks.py index d3800b2..268e418 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/context_environment/hooks.py +++ b/memorylayer-core-python/src/memorylayer_server/services/context_environment/hooks.py @@ -1,4 +1,5 @@ """Persistence hooks for context environment state.""" + from abc import ABC @@ -24,4 +25,5 @@ async def on_session_restore(self, session_id: str) -> dict | None: class NoOpPersistenceHook(ContextPersistenceHook): """Default no-op persistence hook.""" + pass diff --git a/memorylayer-core-python/src/memorylayer_server/services/context_environment/rlm.py b/memorylayer-core-python/src/memorylayer_server/services/context_environment/rlm.py index cd73b80..2616a66 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/context_environment/rlm.py +++ b/memorylayer-core-python/src/memorylayer_server/services/context_environment/rlm.py @@ -6,16 +6,17 @@ 3. LLM examines results and decides if the goal is met 4. Repeat until goal met or max iterations reached """ + import time -from typing import Any, Optional, TYPE_CHECKING +from typing import TYPE_CHECKING, Any -from scitrera_app_framework import get_logger, Variables +from scitrera_app_framework import Variables, get_logger from ...config import ( - MEMORYLAYER_CONTEXT_RLM_MAX_ITERATIONS, + DEFAULT_MEMORYLAYER_CONTEXT_RLM_MAX_EXEC_SECONDS, DEFAULT_MEMORYLAYER_CONTEXT_RLM_MAX_ITERATIONS, MEMORYLAYER_CONTEXT_RLM_MAX_EXEC_SECONDS, - DEFAULT_MEMORYLAYER_CONTEXT_RLM_MAX_EXEC_SECONDS, + MEMORYLAYER_CONTEXT_RLM_MAX_ITERATIONS, ) if TYPE_CHECKING: @@ -75,7 +76,7 @@ def _summarize_state(state: dict[str, Any], max_chars: int = 5000) -> str: except Exception: preview = f"<{type(value).__name__}>" if len(preview) > 500: - preview = preview[:500] + '...' + preview = preview[:500] + "..." line = f" {key} ({type(value).__name__}): {preview}" if total_chars + len(line) > max_chars: @@ -84,7 +85,7 @@ def _summarize_state(state: dict[str, Any], max_chars: int = 5000) -> str: parts.append(line) total_chars += len(line) - return '\n'.join(parts) if parts else ' (empty)' + return "\n".join(parts) if parts else " (empty)" class RLMRunner: @@ -92,31 +93,35 @@ class RLMRunner: def __init__( self, - service: 'DefaultContextEnvironmentService', + service: "DefaultContextEnvironmentService", v: Variables, ): self._service = service self._v = v self.logger = get_logger(v, name=self.__class__.__name__) - self._max_iterations = int(v.get( - MEMORYLAYER_CONTEXT_RLM_MAX_ITERATIONS, - DEFAULT_MEMORYLAYER_CONTEXT_RLM_MAX_ITERATIONS, - )) - self._max_exec_seconds = int(v.get( - MEMORYLAYER_CONTEXT_RLM_MAX_EXEC_SECONDS, - DEFAULT_MEMORYLAYER_CONTEXT_RLM_MAX_EXEC_SECONDS, - )) + self._max_iterations = int( + v.get( + MEMORYLAYER_CONTEXT_RLM_MAX_ITERATIONS, + DEFAULT_MEMORYLAYER_CONTEXT_RLM_MAX_ITERATIONS, + ) + ) + self._max_exec_seconds = int( + v.get( + MEMORYLAYER_CONTEXT_RLM_MAX_EXEC_SECONDS, + DEFAULT_MEMORYLAYER_CONTEXT_RLM_MAX_EXEC_SECONDS, + ) + ) async def run( self, session_id: str, goal: str, - memory_query: Optional[str] = None, + memory_query: str | None = None, memory_limit: int = 100, max_iterations: int = 10, - variables: Optional[list[str]] = None, - result_var: Optional[str] = None, + variables: list[str] | None = None, + result_var: str | None = None, detail_level: str = "standard", ) -> dict: """Run the RLM loop. @@ -140,40 +145,44 @@ async def run( self.logger.info( "RLM starting for session %s, goal: %s, max_iterations: %d", - session_id, goal[:80], effective_max, + session_id, + goal[:80], + effective_max, ) # Step 0: Load memories if requested if memory_query: load_result = await self._service.load( session_id=session_id, - var='_memories', + var="_memories", query=memory_query, limit=memory_limit, ) - if load_result.get('error'): + if load_result.get("error"): return { - 'result': None, - 'iterations': 0, - 'trace': [], - 'error': f"Memory load failed: {load_result['error']}", - 'goal_achieved': False, + "result": None, + "iterations": 0, + "trace": [], + "error": f"Memory load failed: {load_result['error']}", + "goal_achieved": False, } self.logger.info( "RLM loaded %d memories for session %s", - load_result.get('count', 0), session_id, + load_result.get("count", 0), + session_id, ) try: from ..llm import get_llm_service + llm_service = get_llm_service(self._v) except Exception as e: return { - 'result': None, - 'iterations': 0, - 'trace': [], - 'error': f"LLM service not available: {e}", - 'goal_achieved': False, + "result": None, + "iterations": 0, + "trace": [], + "error": f"LLM service not available: {e}", + "goal_achieved": False, } goal_achieved = False @@ -184,16 +193,19 @@ async def run( if elapsed > self._max_exec_seconds: self.logger.warning( "RLM timed out for session %s after %.1fs", - session_id, elapsed, + session_id, + elapsed, + ) + trace.append( + { + "iteration": iteration, + "action": "timeout", + "elapsed_seconds": round(elapsed, 1), + } ) - trace.append({ - 'iteration': iteration, - 'action': 'timeout', - 'elapsed_seconds': round(elapsed, 1), - }) break - iter_trace: dict[str, Any] = {'iteration': iteration} + iter_trace: dict[str, Any] = {"iteration": iteration} # Get current state summary status_result = await self._service.status(session_id) @@ -204,9 +216,9 @@ async def run( iteration_context = "" if iteration > 0 and trace: last = trace[-1] - if last.get('exec_error'): + if last.get("exec_error"): iteration_context = f"Previous iteration had an error: {last['exec_error']}" - elif last.get('exec_output'): + elif last.get("exec_output"): iteration_context = f"Previous output: {last['exec_output'][:500]}" # Step 1: Ask LLM to generate code @@ -219,7 +231,8 @@ async def run( ) try: - from ...models.llm import LLMRequest, LLMMessage, LLMRole + from ...models.llm import LLMMessage, LLMRequest, LLMRole + plan_request = LLMRequest( messages=[ LLMMessage(role=LLMRole.SYSTEM, content=plan_system), @@ -230,22 +243,22 @@ async def run( plan_response = await llm_service.complete(plan_request) generated_code = plan_response.content.strip() except Exception as e: - iter_trace['error'] = f"LLM plan generation failed: {e}" + iter_trace["error"] = f"LLM plan generation failed: {e}" trace.append(iter_trace) self.logger.error("RLM plan generation failed: %s", e) break # Strip markdown code fences if present - if generated_code.startswith('```'): - lines = generated_code.split('\n') + if generated_code.startswith("```"): + lines = generated_code.split("\n") # Remove first and last fence lines - if lines[0].startswith('```'): + if lines[0].startswith("```"): lines = lines[1:] - if lines and lines[-1].strip() == '```': + if lines and lines[-1].strip() == "```": lines = lines[:-1] - generated_code = '\n'.join(lines) + generated_code = "\n".join(lines) - iter_trace['generated_code'] = generated_code if detail_level != 'minimal' else '(omitted)' + iter_trace["generated_code"] = generated_code if detail_level != "minimal" else "(omitted)" # Step 2: Execute the generated code exec_result = await self._service.execute( @@ -253,38 +266,38 @@ async def run( code=generated_code, ) - iter_trace['exec_output'] = exec_result.get('output', '') - iter_trace['exec_error'] = exec_result.get('error') - iter_trace['variables_changed'] = exec_result.get('variables_changed', []) + iter_trace["exec_output"] = exec_result.get("output", "") + iter_trace["exec_error"] = exec_result.get("error") + iter_trace["variables_changed"] = exec_result.get("variables_changed", []) # Check if sandbox set _goal_achieved or _final_result state = self._service._environments.get(session_id, {}) - if state.get('_goal_achieved'): + if state.get("_goal_achieved"): goal_achieved = True - final_result = state.get('_final_result') - iter_trace['action'] = 'goal_achieved_by_code' + final_result = state.get("_final_result") + iter_trace["action"] = "goal_achieved_by_code" trace.append(iter_trace) break - if exec_result.get('error'): - iter_trace['action'] = 'exec_error' + if exec_result.get("error"): + iter_trace["action"] = "exec_error" trace.append(iter_trace) # Continue - LLM will see the error and adjust continue # Step 3: Ask LLM to evaluate progress - history_summary = '\n'.join( + history_summary = "\n".join( f" Iteration {t['iteration']}: " - + (f"error={t.get('exec_error')}" if t.get('exec_error') else f"changed={t.get('variables_changed', [])}") + + (f"error={t.get('exec_error')}" if t.get("exec_error") else f"changed={t.get('variables_changed', [])}") for t in trace[-3:] # Last 3 iterations for context ) - if iter_trace.get('variables_changed'): + if iter_trace.get("variables_changed"): history_summary += f"\n Current iteration: changed={iter_trace['variables_changed']}" eval_system = _EVALUATE_SYSTEM_PROMPT.format( goal=goal, state_summary=_summarize_state(state), - history=history_summary or ' (first iteration)', + history=history_summary or " (first iteration)", ) try: @@ -299,18 +312,18 @@ async def run( eval_response = await llm_service.complete(eval_request) evaluation = eval_response.content.strip() except Exception as e: - iter_trace['eval_error'] = str(e) + iter_trace["eval_error"] = str(e) evaluation = "CONTINUE" - iter_trace['evaluation'] = evaluation - iter_trace['action'] = 'evaluated' + iter_trace["evaluation"] = evaluation + iter_trace["action"] = "evaluated" trace.append(iter_trace) - if evaluation.startswith('ACHIEVED'): + if evaluation.startswith("ACHIEVED"): goal_achieved = True - final_result = state.get('_final_result') + final_result = state.get("_final_result") break - elif evaluation.startswith('FAILED'): + elif evaluation.startswith("FAILED"): self.logger.info("RLM goal failed: %s", evaluation) break @@ -318,7 +331,7 @@ async def run( if final_result is None and goal_achieved: # Try to find a meaningful result variable state = self._service._environments.get(session_id, {}) - final_result = state.get('_final_result') or state.get('result') + final_result = state.get("_final_result") or state.get("result") # Store result if requested if result_var and final_result is not None: @@ -328,26 +341,29 @@ async def run( self.logger.info( "RLM completed for session %s: achieved=%s, iterations=%d, elapsed=%.1fs", - session_id, goal_achieved, len(trace), total_elapsed, + session_id, + goal_achieved, + len(trace), + total_elapsed, ) # Clean trace for minimal detail level - if detail_level == 'minimal': - trace = [{'iteration': t['iteration'], 'action': t.get('action', 'unknown')} for t in trace] + if detail_level == "minimal": + trace = [{"iteration": t["iteration"], "action": t.get("action", "unknown")} for t in trace] result_str = None if final_result is not None: try: result_str = repr(final_result) if len(result_str) > 10_000: - result_str = result_str[:10_000] + '...' + result_str = result_str[:10_000] + "..." except Exception: result_str = f"<{type(final_result).__name__}>" return { - 'result': result_str, - 'iterations': len(trace), - 'trace': trace, - 'error': None, - 'goal_achieved': goal_achieved, + "result": result_str, + "iterations": len(trace), + "trace": trace, + "error": None, + "goal_achieved": goal_achieved, } diff --git a/memorylayer-core-python/src/memorylayer_server/services/contradiction/__init__.py b/memorylayer-core-python/src/memorylayer_server/services/contradiction/__init__.py index bc214a9..595cb9d 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/contradiction/__init__.py +++ b/memorylayer-core-python/src/memorylayer_server/services/contradiction/__init__.py @@ -1,10 +1,11 @@ """Contradiction service package.""" + from scitrera_app_framework import Variables, get_extension from .base import ( + EXT_CONTRADICTION_SERVICE, ContradictionService, ContradictionServicePluginBase, - EXT_CONTRADICTION_SERVICE, ) @@ -14,8 +15,8 @@ def get_contradiction_service(v: Variables = None) -> ContradictionService: __all__ = ( - 'ContradictionService', - 'ContradictionServicePluginBase', - 'get_contradiction_service', - 'EXT_CONTRADICTION_SERVICE', + "ContradictionService", + "ContradictionServicePluginBase", + "get_contradiction_service", + "EXT_CONTRADICTION_SERVICE", ) diff --git a/memorylayer-core-python/src/memorylayer_server/services/contradiction/base.py b/memorylayer-core-python/src/memorylayer_server/services/contradiction/base.py index a0dca30..0f9a492 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/contradiction/base.py +++ b/memorylayer-core-python/src/memorylayer_server/services/contradiction/base.py @@ -1,36 +1,37 @@ """Contradiction Service - Base interface and plugin.""" + from abc import ABC, abstractmethod from dataclasses import dataclass, field -from datetime import datetime, timezone -from typing import Optional +from datetime import UTC, datetime -from ...config import MEMORYLAYER_CONTRADICTION_PROVIDER, DEFAULT_MEMORYLAYER_CONTRADICTION_PROVIDER -from .._constants import EXT_STORAGE_BACKEND, EXT_CONTRADICTION_SERVICE -from .._plugin_factory import make_service_plugin_base +from ...config import DEFAULT_MEMORYLAYER_CONTRADICTION_PROVIDER, MEMORYLAYER_CONTRADICTION_PROVIDER from ...utils import generate_id +from .._constants import EXT_CONTRADICTION_SERVICE, EXT_STORAGE_BACKEND +from .._plugin_factory import make_service_plugin_base # Valid contradiction types -CONTRADICTION_TYPE_NEGATION = 'negation' -CONTRADICTION_TYPE_SEMANTIC_VALUE_CONFLICT = 'semantic_value_conflict' -CONTRADICTION_TYPE_TEMPORAL_SUPERSESSION = 'temporal_supersession' -CONTRADICTION_TYPE_SCOPE_CONFLICT = 'scope_conflict' +CONTRADICTION_TYPE_NEGATION = "negation" +CONTRADICTION_TYPE_SEMANTIC_VALUE_CONFLICT = "semantic_value_conflict" +CONTRADICTION_TYPE_TEMPORAL_SUPERSESSION = "temporal_supersession" +CONTRADICTION_TYPE_SCOPE_CONFLICT = "scope_conflict" @dataclass class ContradictionRecord: """A detected contradiction between two memories.""" + id: str = field(default_factory=lambda: generate_id("contra")) - workspace_id: str = '' - memory_a_id: str = '' - memory_b_id: str = '' - contradiction_type: Optional[str] = None # e.g., "negation", "semantic_value_conflict", "temporal_supersession", "scope_conflict" + workspace_id: str = "" + memory_a_id: str = "" + memory_b_id: str = "" + contradiction_type: str | None = None # e.g., "negation", "semantic_value_conflict", "temporal_supersession", "scope_conflict" confidence: float = 0.0 # 0.0-1.0 - detection_method: str = '' # e.g., "negation_pattern", "embedding_similarity", "entity_value_extraction" - detected_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc)) - resolved_at: Optional[datetime] = None - resolution: Optional[str] = None # e.g., "keep_a", "keep_b", "keep_both", "merge" - merged_content: Optional[str] = None - newer_memory_id: Optional[str] = None # Temporal ordering: which memory is more recent + detection_method: str = "" # e.g., "negation_pattern", "embedding_similarity", "entity_value_extraction" + detected_at: datetime = field(default_factory=lambda: datetime.now(UTC)) + resolved_at: datetime | None = None + resolution: str | None = None # e.g., "keep_a", "keep_b", "keep_both", "merge" + merged_content: str | None = None + newer_memory_id: str | None = None # Temporal ordering: which memory is more recent class ContradictionService(ABC): @@ -68,8 +69,8 @@ async def resolve( workspace_id: str, contradiction_id: str, resolution: str, - merged_content: Optional[str] = None, - ) -> Optional[ContradictionRecord]: + merged_content: str | None = None, + ) -> ContradictionRecord | None: """Resolve a contradiction. Args: @@ -108,7 +109,7 @@ async def check_semantic_conflict( self, memory_a, memory_b, - ) -> Optional[ContradictionRecord]: + ) -> ContradictionRecord | None: """Check if two memories have a semantic value conflict. Uses entity-value extraction (regex patterns) and embedding similarity diff --git a/memorylayer-core-python/src/memorylayer_server/services/contradiction/default.py b/memorylayer-core-python/src/memorylayer_server/services/contradiction/default.py index 0426468..b44f78c 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/contradiction/default.py +++ b/memorylayer-core-python/src/memorylayer_server/services/contradiction/default.py @@ -1,32 +1,47 @@ """Default contradiction service implementation.""" + import re +from datetime import UTC from logging import Logger -from typing import Optional from scitrera_app_framework import get_logger from scitrera_app_framework.api import Variables -from .base import ( - ContradictionService, ContradictionServicePluginBase, ContradictionRecord, - CONTRADICTION_TYPE_NEGATION, CONTRADICTION_TYPE_SEMANTIC_VALUE_CONFLICT, -) +from ...utils import dot_product as _dot_product_util from ..storage import EXT_STORAGE_BACKEND from ..storage.base import StorageBackend -from ...utils import dot_product as _dot_product_util +from .base import ( + CONTRADICTION_TYPE_NEGATION, + CONTRADICTION_TYPE_SEMANTIC_VALUE_CONFLICT, + ContradictionRecord, + ContradictionService, + ContradictionServicePluginBase, +) # Negation pairs used for simple textual contradiction detection. # For each pair, if text_a contains one term and text_b contains the other, # a negation-type contradiction is flagged. NEGATION_PAIRS = [ - ("use", "don't use"), ("use", "do not use"), ("use", "avoid"), - ("enable", "disable"), ("add", "remove"), - ("true", "false"), ("always", "never"), - ("should", "should not"), ("should", "shouldn't"), - ("must", "must not"), ("must", "mustn't"), - ("can", "cannot"), ("can", "can't"), - ("is", "is not"), ("is", "isn't"), - ("prefer", "avoid"), ("recommended", "not recommended"), - ("include", "exclude"), ("allow", "deny"), ("allow", "block"), + ("use", "don't use"), + ("use", "do not use"), + ("use", "avoid"), + ("enable", "disable"), + ("add", "remove"), + ("true", "false"), + ("always", "never"), + ("should", "should not"), + ("should", "shouldn't"), + ("must", "must not"), + ("must", "mustn't"), + ("can", "cannot"), + ("can", "can't"), + ("is", "is not"), + ("is", "isn't"), + ("prefer", "avoid"), + ("recommended", "not recommended"), + ("include", "exclude"), + ("allow", "deny"), + ("allow", "block"), ] @@ -78,14 +93,16 @@ async def check_new_memory(self, workspace_id: str, memory_id: str) -> list[Cont memory_b_id=existing_memory.id, contradiction_type=CONTRADICTION_TYPE_NEGATION, confidence=relevance, - detection_method='negation_pattern', + detection_method="negation_pattern", newer_memory_id=newer_id, ) stored = await self._storage.create_contradiction(record) contradictions.append(stored) self.logger.info( "Contradiction detected between %s and %s (confidence=%.2f)", - memory_id, existing_memory.id, relevance, + memory_id, + existing_memory.id, + relevance, ) return contradictions @@ -99,8 +116,8 @@ async def resolve( workspace_id: str, contradiction_id: str, resolution: str, - merged_content: Optional[str] = None, - ) -> Optional[ContradictionRecord]: + merged_content: str | None = None, + ) -> ContradictionRecord | None: """Resolve a contradiction by applying the chosen resolution strategy. Args: @@ -117,26 +134,29 @@ async def resolve( self.logger.warning("Contradiction %s not found in workspace %s", contradiction_id, workspace_id) return None - if resolution == 'keep_a': + if resolution == "keep_a": # Soft-delete memory B await self._storage.delete_memory(workspace_id, record.memory_b_id, hard=False) - self.logger.info("Resolved contradiction %s: keeping memory %s, soft-deleted %s", - contradiction_id, record.memory_a_id, record.memory_b_id) + self.logger.info( + "Resolved contradiction %s: keeping memory %s, soft-deleted %s", contradiction_id, record.memory_a_id, record.memory_b_id + ) - elif resolution == 'keep_b': + elif resolution == "keep_b": # Soft-delete memory A await self._storage.delete_memory(workspace_id, record.memory_a_id, hard=False) - self.logger.info("Resolved contradiction %s: keeping memory %s, soft-deleted %s", - contradiction_id, record.memory_b_id, record.memory_a_id) + self.logger.info( + "Resolved contradiction %s: keeping memory %s, soft-deleted %s", contradiction_id, record.memory_b_id, record.memory_a_id + ) - elif resolution == 'merge' and merged_content: + elif resolution == "merge" and merged_content: # Update memory A with merged content, soft-delete memory B await self._storage.update_memory(workspace_id, record.memory_a_id, content=merged_content) await self._storage.delete_memory(workspace_id, record.memory_b_id, hard=False) - self.logger.info("Resolved contradiction %s: merged into %s, soft-deleted %s", - contradiction_id, record.memory_a_id, record.memory_b_id) + self.logger.info( + "Resolved contradiction %s: merged into %s, soft-deleted %s", contradiction_id, record.memory_a_id, record.memory_b_id + ) - elif resolution == 'keep_both': + elif resolution == "keep_both": self.logger.info("Resolved contradiction %s: keeping both memories", contradiction_id) else: @@ -144,9 +164,7 @@ async def resolve( return None # Mark contradiction as resolved in storage - return await self._storage.resolve_contradiction( - workspace_id, contradiction_id, resolution, merged_content - ) + return await self._storage.resolve_contradiction(workspace_id, contradiction_id, resolution, merged_content) @staticmethod def _has_negation_pattern(text_a: str, text_b: str) -> bool: @@ -167,8 +185,7 @@ def _has_negation_pattern(text_a: str, text_b: str) -> bool: for term_pos, term_neg in NEGATION_PAIRS: # Check both directions: a has positive and b has negative, or vice versa - if (term_pos in lower_a and term_neg in lower_b) or \ - (term_neg in lower_a and term_pos in lower_b): + if (term_pos in lower_a and term_neg in lower_b) or (term_neg in lower_a and term_pos in lower_b): return True return False @@ -189,7 +206,7 @@ def _extract_entity_values(text: str) -> list[tuple[str, str, str]]: List of (subject, predicate, value) tuples (all lowercased) """ patterns = [ - r'(\w[\w\s]{1,30}?)\s+(is|uses|runs|has|uses|requires|needs)\s+([\w][\w\s\-\.]{0,40})', + r"(\w[\w\s]{1,30}?)\s+(is|uses|runs|has|uses|requires|needs)\s+([\w][\w\s\-\.]{0,40})", ] results = [] lower_text = text.lower() @@ -209,18 +226,18 @@ def _dot_product(vec_a: list[float], vec_b: list[float]) -> float: return _dot_product_util(vec_a, vec_b) @staticmethod - def _determine_newer_memory(memory_a, memory_b) -> Optional[str]: + def _determine_newer_memory(memory_a, memory_b) -> str | None: """Determine which memory is newer based on created_at timestamp. Returns the ID of the newer memory, or None if timestamps are unavailable. """ - created_a = getattr(memory_a, 'created_at', None) - created_b = getattr(memory_b, 'created_at', None) + created_a = getattr(memory_a, "created_at", None) + created_b = getattr(memory_b, "created_at", None) if created_a is None or created_b is None: return None return memory_a.id if created_a >= created_b else memory_b.id - async def check_semantic_conflict(self, memory_a, memory_b) -> Optional[ContradictionRecord]: + async def check_semantic_conflict(self, memory_a, memory_b) -> ContradictionRecord | None: """Check if two memories have a semantic value conflict. Detection logic: @@ -236,8 +253,8 @@ async def check_semantic_conflict(self, memory_a, memory_b) -> Optional[Contradi ContradictionRecord if a conflict is found, None otherwise """ # Both memories need embeddings for similarity check - emb_a = getattr(memory_a, 'embedding', None) - emb_b = getattr(memory_b, 'embedding', None) + emb_a = getattr(memory_a, "embedding", None) + emb_b = getattr(memory_b, "embedding", None) if not emb_a or not emb_b: return None @@ -254,9 +271,7 @@ async def check_semantic_conflict(self, memory_a, memory_b) -> Optional[Contradi return None # Build lookup: (subject, predicate) -> value for memory_b - lookup_b: dict[tuple[str, str], str] = { - (subj, pred): val for subj, pred, val in triples_b - } + lookup_b: dict[tuple[str, str], str] = {(subj, pred): val for subj, pred, val in triples_b} for subj_a, pred_a, val_a in triples_a: key = (subj_a, pred_a) @@ -265,17 +280,20 @@ async def check_semantic_conflict(self, memory_a, memory_b) -> Optional[Contradi if val_a != val_b: newer_id = self._determine_newer_memory(memory_a, memory_b) record = ContradictionRecord( - workspace_id=memory_a.workspace_id if hasattr(memory_a, 'workspace_id') else '', + workspace_id=memory_a.workspace_id if hasattr(memory_a, "workspace_id") else "", memory_a_id=memory_a.id, memory_b_id=memory_b.id, contradiction_type=CONTRADICTION_TYPE_SEMANTIC_VALUE_CONFLICT, confidence=similarity, - detection_method='entity_value_extraction', + detection_method="entity_value_extraction", newer_memory_id=newer_id, ) self.logger.debug( "Semantic conflict: subject=%r predicate=%r val_a=%r val_b=%r", - subj_a, pred_a, val_a, val_b, + subj_a, + pred_a, + val_a, + val_b, ) return record @@ -303,14 +321,12 @@ async def scan_workspace( # Collect all existing contradiction pairs to avoid duplicates existing = await self._storage.get_unresolved_contradictions(workspace_id, limit=10000) - existing_pairs: set[frozenset] = { - frozenset([c.memory_a_id, c.memory_b_id]) for c in existing - } + existing_pairs: set[frozenset] = {frozenset([c.memory_a_id, c.memory_b_id]) for c in existing} # Get workspace stats to understand scale try: stats = await self._storage.get_workspace_stats(workspace_id) - total_memories = stats.get('total_memories', 0) + total_memories = stats.get("total_memories", 0) except Exception: total_memories = 0 @@ -321,16 +337,17 @@ async def scan_workspace( # Use recent memories as scan seeds - fetch in batches offset = 0 - from datetime import datetime, timezone, timedelta + from datetime import datetime + # Use a far-back date to get all memories - epoch = datetime(2000, 1, 1, tzinfo=timezone.utc) + epoch = datetime(2000, 1, 1, tzinfo=UTC) while True: batch = await self._storage.get_recent_memories( workspace_id, created_after=epoch, limit=batch_size, - detail_level='full', + detail_level="full", offset=offset, ) if not batch: @@ -343,13 +360,13 @@ async def scan_workspace( for item in batch: if isinstance(item, dict): # get_recent_memories returns dicts; fetch the full Memory object - mem_id = item.get('id') + mem_id = item.get("id") if mem_id: mem = await self._storage.get_memory(workspace_id, mem_id, track_access=False) if mem and mem.embedding: memory_objects.append(mem) else: - if getattr(item, 'embedding', None): + if getattr(item, "embedding", None): memory_objects.append(item) for memory in memory_objects: @@ -382,14 +399,16 @@ async def scan_workspace( memory_b_id=candidate.id, contradiction_type=CONTRADICTION_TYPE_NEGATION, confidence=relevance, - detection_method='negation_pattern', + detection_method="negation_pattern", newer_memory_id=newer_id, ) stored = await self._storage.create_contradiction(record) new_contradictions.append(stored) self.logger.info( "Scan found negation contradiction: %s vs %s (%.2f)", - memory.id, candidate.id, relevance, + memory.id, + candidate.id, + relevance, ) continue @@ -401,7 +420,9 @@ async def scan_workspace( new_contradictions.append(stored) self.logger.info( "Scan found semantic conflict: %s vs %s (%.2f)", - memory.id, candidate.id, relevance, + memory.id, + candidate.id, + relevance, ) # If batch was smaller than batch_size, we've reached the end @@ -410,14 +431,16 @@ async def scan_workspace( self.logger.info( "Workspace scan complete for %s: found %d new contradictions", - workspace_id, len(new_contradictions), + workspace_id, + len(new_contradictions), ) return new_contradictions class DefaultContradictionServicePlugin(ContradictionServicePluginBase): """Plugin that creates the default contradiction service.""" - PROVIDER_NAME = 'default' + + PROVIDER_NAME = "default" def initialize(self, v: Variables, logger: Logger) -> ContradictionService: storage: StorageBackend = self.get_extension(EXT_STORAGE_BACKEND, v) diff --git a/memorylayer-core-python/src/memorylayer_server/services/decay/__init__.py b/memorylayer-core-python/src/memorylayer_server/services/decay/__init__.py index b4e72c2..5878c83 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/decay/__init__.py +++ b/memorylayer-core-python/src/memorylayer_server/services/decay/__init__.py @@ -1,10 +1,11 @@ """Decay service package.""" + from scitrera_app_framework import Variables, get_extension from .base import ( + EXT_DECAY_SERVICE, DecayService, DecayServicePluginBase, - EXT_DECAY_SERVICE, ) @@ -14,8 +15,8 @@ def get_decay_service(v: Variables = None) -> DecayService: __all__ = ( - 'DecayService', - 'DecayServicePluginBase', - 'get_decay_service', - 'EXT_DECAY_SERVICE', + "DecayService", + "DecayServicePluginBase", + "get_decay_service", + "EXT_DECAY_SERVICE", ) diff --git a/memorylayer-core-python/src/memorylayer_server/services/decay/base.py b/memorylayer-core-python/src/memorylayer_server/services/decay/base.py index 2ddc81c..31d0304 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/decay/base.py +++ b/memorylayer-core-python/src/memorylayer_server/services/decay/base.py @@ -1,17 +1,18 @@ """Decay Service - Base interface and plugin.""" + from abc import ABC, abstractmethod from dataclasses import dataclass -from typing import Optional -from ...config import MEMORYLAYER_DECAY_PROVIDER, DEFAULT_MEMORYLAYER_DECAY_PROVIDER +from ...config import DEFAULT_MEMORYLAYER_DECAY_PROVIDER, MEMORYLAYER_DECAY_PROVIDER from ...models import Memory -from .._constants import EXT_STORAGE_BACKEND, EXT_DECAY_SERVICE +from .._constants import EXT_DECAY_SERVICE, EXT_STORAGE_BACKEND from .._plugin_factory import make_service_plugin_base @dataclass class DecaySettings: """Configuration for memory decay behavior.""" + decay_rate: float = 0.95 # Per-day decay multiplier min_importance: float = 0.1 # Floor - importance never drops below this min_age_days: int = 7 # Don't decay memories younger than this @@ -24,6 +25,7 @@ class DecaySettings: @dataclass class DecayResult: """Result of a decay pass.""" + processed: int = 0 decayed: int = 0 archived: int = 0 @@ -33,26 +35,26 @@ class DecayService(ABC): """Interface for memory decay and archival.""" @abstractmethod - async def decay_workspace(self, workspace_id: str, settings: Optional[DecaySettings] = None) -> DecayResult: + async def decay_workspace(self, workspace_id: str, settings: DecaySettings | None = None) -> DecayResult: """Run decay pass on all eligible memories in a workspace.""" pass @abstractmethod - async def archive_stale_memories(self, workspace_id: str, settings: Optional[DecaySettings] = None) -> int: + async def archive_stale_memories(self, workspace_id: str, settings: DecaySettings | None = None) -> int: """Archive stale low-importance memories. Returns count archived.""" pass @abstractmethod - async def calculate_access_boost(self, memory: Memory, boost_factor: Optional[float] = None) -> Optional[float]: + async def calculate_access_boost(self, memory: Memory, boost_factor: float | None = None) -> float | None: pass @abstractmethod - async def boost_on_access(self, workspace_id: str, memory_id: str, boost_factor: Optional[float] = None) -> Optional[float]: + async def boost_on_access(self, workspace_id: str, memory_id: str, boost_factor: float | None = None) -> float | None: """Boost importance when memory is accessed. Returns new importance or None if not found.""" pass @abstractmethod - async def decay_all_workspaces(self, settings: Optional[DecaySettings] = None) -> DecayResult: + async def decay_all_workspaces(self, settings: DecaySettings | None = None) -> DecayResult: """Run decay and archival across all workspaces.""" pass diff --git a/memorylayer-core-python/src/memorylayer_server/services/decay/default.py b/memorylayer-core-python/src/memorylayer_server/services/decay/default.py index 2983469..6c5e30f 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/decay/default.py +++ b/memorylayer-core-python/src/memorylayer_server/services/decay/default.py @@ -1,14 +1,14 @@ """Default decay service implementation.""" -from datetime import datetime, timezone + +from datetime import UTC, datetime from logging import Logger -from typing import Optional from scitrera_app_framework import get_logger from scitrera_app_framework.api import Variables from ...models import Memory from ..storage import EXT_STORAGE_BACKEND, StorageBackend -from .base import DecayService, DecayServicePluginBase, DecaySettings, DecayResult +from .base import DecayResult, DecayService, DecayServicePluginBase, DecaySettings class DefaultDecayService(DecayService): @@ -18,7 +18,7 @@ def __init__(self, storage: StorageBackend, v: Variables = None): self._storage = storage self.logger = get_logger(v, name=self.__class__.__name__) - async def decay_workspace(self, workspace_id: str, settings: Optional[DecaySettings] = None) -> DecayResult: + async def decay_workspace(self, workspace_id: str, settings: DecaySettings | None = None) -> DecayResult: settings = settings or DecaySettings() result = DecayResult() @@ -29,34 +29,29 @@ async def decay_workspace(self, workspace_id: str, settings: Optional[DecaySetti ) result.processed = len(memories) - now = datetime.now(timezone.utc) + now = datetime.now(UTC) for memory in memories: last_access = memory.last_accessed_at or memory.created_at # Ensure timezone-aware comparison if last_access.tzinfo is None: - last_access = last_access.replace(tzinfo=timezone.utc) + last_access = last_access.replace(tzinfo=UTC) days_since_access = max(0, (now - last_access).days) - new_importance = max( - settings.min_importance, - memory.importance * (settings.decay_rate ** days_since_access) - ) + new_importance = max(settings.min_importance, memory.importance * (settings.decay_rate**days_since_access)) if abs(new_importance - memory.importance) > 0.001: await self._storage.update_memory( - workspace_id, memory.id, + workspace_id, + memory.id, importance=new_importance, decay_factor=new_importance / max(memory.importance, 0.001), ) result.decayed += 1 - self.logger.debug( - "Decay pass for workspace %s: %d processed, %d decayed", - workspace_id, result.processed, result.decayed - ) + self.logger.debug("Decay pass for workspace %s: %d processed, %d decayed", workspace_id, result.processed, result.decayed) return result - async def archive_stale_memories(self, workspace_id: str, settings: Optional[DecaySettings] = None) -> int: + async def archive_stale_memories(self, workspace_id: str, settings: DecaySettings | None = None) -> int: settings = settings or DecaySettings() candidates = await self._storage.get_archival_candidates( @@ -69,19 +64,17 @@ async def archive_stale_memories(self, workspace_id: str, settings: Optional[Dec archived = 0 for memory in candidates: await self._storage.update_memory( - workspace_id, memory.id, - status='archived', + workspace_id, + memory.id, + status="archived", ) archived += 1 if archived: - self.logger.info( - "Archived %d stale memories in workspace %s", - archived, workspace_id - ) + self.logger.info("Archived %d stale memories in workspace %s", archived, workspace_id) return archived - async def calculate_access_boost(self, memory: Memory, boost_factor: Optional[float] = None) -> Optional[float]: + async def calculate_access_boost(self, memory: Memory, boost_factor: float | None = None) -> float | None: boost = boost_factor or DecaySettings().access_boost if not memory or memory.pinned: return memory.importance if memory else None @@ -89,7 +82,7 @@ async def calculate_access_boost(self, memory: Memory, boost_factor: Optional[fl new_importance = min(1.0, memory.importance * boost) return new_importance - async def boost_on_access(self, workspace_id: str, memory_id: str, boost_factor: Optional[float] = None) -> Optional[float]: + async def boost_on_access(self, workspace_id: str, memory_id: str, boost_factor: float | None = None) -> float | None: memory = await self._storage.get_memory(workspace_id, memory_id, track_access=False) if not memory or memory.pinned: return memory.importance if memory else None @@ -97,12 +90,13 @@ async def boost_on_access(self, workspace_id: str, memory_id: str, boost_factor: new_importance = await self.calculate_access_boost(memory, boost_factor=boost_factor) if abs(new_importance - memory.importance) > 0.001: await self._storage.update_memory( - workspace_id, memory_id, + workspace_id, + memory_id, importance=new_importance, ) return new_importance - async def decay_all_workspaces(self, settings: Optional[DecaySettings] = None) -> DecayResult: + async def decay_all_workspaces(self, settings: DecaySettings | None = None) -> DecayResult: settings = settings or DecaySettings() total = DecayResult() @@ -116,16 +110,14 @@ async def decay_all_workspaces(self, settings: Optional[DecaySettings] = None) - archived = await self.archive_stale_memories(ws_id, settings) total.archived += archived - self.logger.info( - "Decay all workspaces: %d processed, %d decayed, %d archived", - total.processed, total.decayed, total.archived - ) + self.logger.info("Decay all workspaces: %d processed, %d decayed, %d archived", total.processed, total.decayed, total.archived) return total class DefaultDecayServicePlugin(DecayServicePluginBase): """Plugin that creates the default decay service.""" - PROVIDER_NAME = 'default' + + PROVIDER_NAME = "default" def initialize(self, v: Variables, logger: Logger) -> DecayService: storage: StorageBackend = self.get_extension(EXT_STORAGE_BACKEND, v) diff --git a/memorylayer-core-python/src/memorylayer_server/services/deduplication/__init__.py b/memorylayer-core-python/src/memorylayer_server/services/deduplication/__init__.py index d5682d4..b9e5bac 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/deduplication/__init__.py +++ b/memorylayer-core-python/src/memorylayer_server/services/deduplication/__init__.py @@ -1,14 +1,15 @@ """Deduplication service package.""" + +from scitrera_app_framework import Variables, get_extension + from .base import ( - DeduplicationServicePluginBase, EXT_DEDUPLICATION_SERVICE, - DeduplicationService, DeduplicationAction, DeduplicationResult, + DeduplicationService, + DeduplicationServicePluginBase, ) -from scitrera_app_framework import Variables, get_extension - def get_deduplication_service(v: Variables = None) -> DeduplicationService: """Get the deduplication service instance.""" @@ -16,10 +17,10 @@ def get_deduplication_service(v: Variables = None) -> DeduplicationService: __all__ = ( - 'DeduplicationService', - 'DeduplicationServicePluginBase', - 'get_deduplication_service', - 'EXT_DEDUPLICATION_SERVICE', - 'DeduplicationAction', - 'DeduplicationResult', + "DeduplicationService", + "DeduplicationServicePluginBase", + "get_deduplication_service", + "EXT_DEDUPLICATION_SERVICE", + "DeduplicationAction", + "DeduplicationResult", ) diff --git a/memorylayer-core-python/src/memorylayer_server/services/deduplication/base.py b/memorylayer-core-python/src/memorylayer_server/services/deduplication/base.py index 7245e88..75063a5 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/deduplication/base.py +++ b/memorylayer-core-python/src/memorylayer_server/services/deduplication/base.py @@ -4,35 +4,34 @@ Prevents duplicate memories during session extraction and manual remember operations. Uses content hashing for exact matches and embedding similarity for semantic matches. """ + from abc import ABC, abstractmethod from dataclasses import dataclass from enum import Enum -from typing import Optional - -from ...config import MEMORYLAYER_DEDUPLICATION_SERVICE, DEFAULT_MEMORYLAYER_DEDUPLICATION_SERVICE -from .._constants import EXT_STORAGE_BACKEND, EXT_EMBEDDING_SERVICE, EXT_DEDUPLICATION_SERVICE +from ...config import DEFAULT_MEMORYLAYER_DEDUPLICATION_SERVICE, MEMORYLAYER_DEDUPLICATION_SERVICE +from .._constants import EXT_DEDUPLICATION_SERVICE, EXT_EMBEDDING_SERVICE, EXT_STORAGE_BACKEND from .._plugin_factory import make_service_plugin_base # ============================================ # Deduplication Configuration # ============================================ # Threshold for considering content a semantic duplicate (triggers UPDATE action) -MEMORYLAYER_DEDUPLICATION_DUPLICATE_THRESHOLD = 'MEMORYLAYER_DEDUPLICATION_DUPLICATE_THRESHOLD' +MEMORYLAYER_DEDUPLICATION_DUPLICATE_THRESHOLD = "MEMORYLAYER_DEDUPLICATION_DUPLICATE_THRESHOLD" DEFAULT_MEMORYLAYER_DEDUPLICATION_DUPLICATE_THRESHOLD = 0.95 # Threshold for considering content similar enough to merge (triggers MERGE action) -MEMORYLAYER_DEDUPLICATION_MERGE_THRESHOLD = 'MEMORYLAYER_DEDUPLICATION_MERGE_THRESHOLD' +MEMORYLAYER_DEDUPLICATION_MERGE_THRESHOLD = "MEMORYLAYER_DEDUPLICATION_MERGE_THRESHOLD" DEFAULT_MEMORYLAYER_DEDUPLICATION_MERGE_THRESHOLD = 0.85 class DeduplicationAction(str, Enum): """Action to take for a candidate memory.""" - SKIP = "skip" # Exact duplicate, don't create - CREATE = "create" # New unique memory - UPDATE = "update" # Update existing with new info - MERGE = "merge" # Merge with existing memory + SKIP = "skip" # Exact duplicate, don't create + CREATE = "create" # New unique memory + UPDATE = "update" # Update existing with new info + MERGE = "merge" # Merge with existing memory @dataclass @@ -40,8 +39,8 @@ class DeduplicationResult: """Result of deduplication check for a single memory.""" action: DeduplicationAction - existing_memory_id: Optional[str] = None - similarity_score: Optional[float] = None + existing_memory_id: str | None = None + similarity_score: float | None = None reason: str = "" @@ -49,22 +48,12 @@ class DeduplicationService(ABC): """Interface for deduplication service.""" @abstractmethod - async def check_duplicate( - self, - content: str, - content_hash: str, - embedding: list[float], - workspace_id: str - ) -> DeduplicationResult: + async def check_duplicate(self, content: str, content_hash: str, embedding: list[float], workspace_id: str) -> DeduplicationResult: """Check if a memory is a duplicate.""" pass @abstractmethod - async def deduplicate_batch( - self, - candidates: list[tuple[str, str, list[float]]], - workspace_id: str - ) -> list[DeduplicationResult]: + async def deduplicate_batch(self, candidates: list[tuple[str, str, list[float]]], workspace_id: str) -> list[DeduplicationResult]: """Check multiple memories for duplicates.""" pass diff --git a/memorylayer-core-python/src/memorylayer_server/services/deduplication/default.py b/memorylayer-core-python/src/memorylayer_server/services/deduplication/default.py index 696ab48..65a90c3 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/deduplication/default.py +++ b/memorylayer-core-python/src/memorylayer_server/services/deduplication/default.py @@ -4,35 +4,30 @@ Prevents duplicate memories during session extraction and manual remember operations. Uses content hashing for exact matches and embedding similarity for semantic matches. """ -from typing import Optional + from logging import Logger from scitrera_app_framework import get_logger from scitrera_app_framework.api import Variables -from ..storage import EXT_STORAGE_BACKEND, StorageBackend from ..embedding import EXT_EMBEDDING_SERVICE, EmbeddingService +from ..storage import EXT_STORAGE_BACKEND, StorageBackend from .base import ( - DeduplicationService, - DeduplicationServicePluginBase, - DeduplicationAction, - DeduplicationResult, - MEMORYLAYER_DEDUPLICATION_DUPLICATE_THRESHOLD, DEFAULT_MEMORYLAYER_DEDUPLICATION_DUPLICATE_THRESHOLD, - MEMORYLAYER_DEDUPLICATION_MERGE_THRESHOLD, DEFAULT_MEMORYLAYER_DEDUPLICATION_MERGE_THRESHOLD, + MEMORYLAYER_DEDUPLICATION_DUPLICATE_THRESHOLD, + MEMORYLAYER_DEDUPLICATION_MERGE_THRESHOLD, + DeduplicationAction, + DeduplicationResult, + DeduplicationService, + DeduplicationServicePluginBase, ) class DefaultDeduplicationService(DeduplicationService): """Default deduplication service implementation.""" - def __init__( - self, - storage: StorageBackend, - embedding_service: EmbeddingService, - v: Variables = None - ): + def __init__(self, storage: StorageBackend, embedding_service: EmbeddingService, v: Variables = None): """ Initialize deduplication service. @@ -47,26 +42,17 @@ def __init__( # Get thresholds from config with defaults self.similarity_threshold = v.get( - MEMORYLAYER_DEDUPLICATION_DUPLICATE_THRESHOLD, - DEFAULT_MEMORYLAYER_DEDUPLICATION_DUPLICATE_THRESHOLD - ) - self.merge_threshold = v.get( - MEMORYLAYER_DEDUPLICATION_MERGE_THRESHOLD, - DEFAULT_MEMORYLAYER_DEDUPLICATION_MERGE_THRESHOLD + MEMORYLAYER_DEDUPLICATION_DUPLICATE_THRESHOLD, DEFAULT_MEMORYLAYER_DEDUPLICATION_DUPLICATE_THRESHOLD ) + self.merge_threshold = v.get(MEMORYLAYER_DEDUPLICATION_MERGE_THRESHOLD, DEFAULT_MEMORYLAYER_DEDUPLICATION_MERGE_THRESHOLD) self.logger.info( "Initialized DefaultDeduplicationService with thresholds: similarity=%.2f, merge=%.2f", - self.similarity_threshold, self.merge_threshold + self.similarity_threshold, + self.merge_threshold, ) - async def check_duplicate( - self, - content: str, - content_hash: str, - embedding: list[float], - workspace_id: str - ) -> DeduplicationResult: + async def check_duplicate(self, content: str, content_hash: str, embedding: list[float], workspace_id: str) -> DeduplicationResult: """ Check if a memory is a duplicate. @@ -90,57 +76,38 @@ async def check_duplicate( if existing: self.logger.debug("Found exact duplicate: %s", existing.id) return DeduplicationResult( - action=DeduplicationAction.SKIP, - existing_memory_id=existing.id, - similarity_score=1.0, - reason="Exact content duplicate" + action=DeduplicationAction.SKIP, existing_memory_id=existing.id, similarity_score=1.0, reason="Exact content duplicate" ) # 2. Check embedding similarity similar_memories = await self.storage.search_memories( - workspace_id=workspace_id, - query_embedding=embedding, - limit=5, - min_relevance=self.merge_threshold + workspace_id=workspace_id, query_embedding=embedding, limit=5, min_relevance=self.merge_threshold ) if similar_memories: top_match, top_score = similar_memories[0] if top_score >= self.similarity_threshold: - self.logger.debug( - "Found semantic duplicate: %s (similarity: %.3f)", - top_match.id, top_score - ) + self.logger.debug("Found semantic duplicate: %s (similarity: %.3f)", top_match.id, top_score) return DeduplicationResult( action=DeduplicationAction.UPDATE, existing_memory_id=top_match.id, similarity_score=top_score, - reason=f"Semantic duplicate (similarity: {top_score:.3f})" + reason=f"Semantic duplicate (similarity: {top_score:.3f})", ) elif top_score >= self.merge_threshold: - self.logger.debug( - "Found merge candidate: %s (similarity: %.3f)", - top_match.id, top_score - ) + self.logger.debug("Found merge candidate: %s (similarity: %.3f)", top_match.id, top_score) return DeduplicationResult( action=DeduplicationAction.MERGE, existing_memory_id=top_match.id, similarity_score=top_score, - reason=f"Potential merge candidate (similarity: {top_score:.3f})" + reason=f"Potential merge candidate (similarity: {top_score:.3f})", ) # 3. No duplicates found - return DeduplicationResult( - action=DeduplicationAction.CREATE, - reason="New unique memory" - ) + return DeduplicationResult(action=DeduplicationAction.CREATE, reason="New unique memory") - async def deduplicate_batch( - self, - candidates: list[tuple[str, str, list[float]]], - workspace_id: str - ) -> list[DeduplicationResult]: + async def deduplicate_batch(self, candidates: list[tuple[str, str, list[float]]], workspace_id: str) -> list[DeduplicationResult]: """ Check multiple memories for duplicates. @@ -153,9 +120,7 @@ async def deduplicate_batch( """ results = [] for content, content_hash, embedding in candidates: - result = await self.check_duplicate( - content, content_hash, embedding, workspace_id - ) + result = await self.check_duplicate(content, content_hash, embedding, workspace_id) results.append(result) self.logger.info( @@ -172,13 +137,10 @@ async def deduplicate_batch( class DefaultDeduplicationServicePlugin(DeduplicationServicePluginBase): """Default deduplication service plugin.""" - PROVIDER_NAME = 'default' + + PROVIDER_NAME = "default" def initialize(self, v: Variables, logger: Logger) -> DeduplicationService: storage: StorageBackend = self.get_extension(EXT_STORAGE_BACKEND, v) embedding_service: EmbeddingService = self.get_extension(EXT_EMBEDDING_SERVICE, v) - return DefaultDeduplicationService( - storage=storage, - embedding_service=embedding_service, - v=v - ) + return DefaultDeduplicationService(storage=storage, embedding_service=embedding_service, v=v) diff --git a/memorylayer-core-python/src/memorylayer_server/services/embedding/__init__.py b/memorylayer-core-python/src/memorylayer_server/services/embedding/__init__.py index 4eca12f..83d8831 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/embedding/__init__.py +++ b/memorylayer-core-python/src/memorylayer_server/services/embedding/__init__.py @@ -1,8 +1,8 @@ -from .base import EmbeddingProvider, EXT_EMBEDDING_PROVIDER, EXT_EMBEDDING_SERVICE -from .service_default import EmbeddingService - from scitrera_app_framework import Variables, get_extension +from .base import EXT_EMBEDDING_PROVIDER, EXT_EMBEDDING_SERVICE, EmbeddingProvider +from .service_default import EmbeddingService + def get_embedding_provider(v: Variables = None) -> EmbeddingProvider: return get_extension(EXT_EMBEDDING_PROVIDER, v) @@ -13,10 +13,10 @@ def get_embedding_service(v: Variables = None) -> EmbeddingService: __all__ = ( - 'EmbeddingProvider', - 'EmbeddingService', - 'get_embedding_provider', - 'get_embedding_service', - 'EXT_EMBEDDING_PROVIDER', - 'EXT_EMBEDDING_SERVICE', + "EmbeddingProvider", + "EmbeddingService", + "get_embedding_provider", + "get_embedding_service", + "EXT_EMBEDDING_PROVIDER", + "EXT_EMBEDDING_SERVICE", ) diff --git a/memorylayer-core-python/src/memorylayer_server/services/embedding/base.py b/memorylayer-core-python/src/memorylayer_server/services/embedding/base.py index 437ddc6..3c31484 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/embedding/base.py +++ b/memorylayer-core-python/src/memorylayer_server/services/embedding/base.py @@ -1,27 +1,28 @@ import base64 -from logging import Logger - from abc import ABC, abstractmethod from dataclasses import dataclass from enum import Enum +from logging import Logger from pathlib import Path -from typing import Optional, Union -from scitrera_app_framework.api import Variables, Plugin, enabled_option_pattern -from scitrera_app_framework import get_extension, get_logger, ext_parse_bool - -from .._plugin_factory import make_service_plugin_base +from scitrera_app_framework import ext_parse_bool, get_logger +from scitrera_app_framework.api import Plugin, Variables, enabled_option_pattern from ...config import ( - MEMORYLAYER_EMBEDDING_PROVIDER, DEFAULT_MEMORYLAYER_EMBEDDING_PROVIDER, - MEMORYLAYER_EMBEDDING_SERVICE, DEFAULT_MEMORYLAYER_EMBEDDING_SERVICE, - MEMORYLAYER_EMBEDDING_PRELOAD_ENABLED, DEFAULT_MEMORYLAYER_EMBEDDING_PRELOAD_ENABLED + DEFAULT_MEMORYLAYER_EMBEDDING_PRELOAD_ENABLED, + DEFAULT_MEMORYLAYER_EMBEDDING_PROVIDER, + DEFAULT_MEMORYLAYER_EMBEDDING_SERVICE, + MEMORYLAYER_EMBEDDING_PRELOAD_ENABLED, + MEMORYLAYER_EMBEDDING_PROVIDER, + MEMORYLAYER_EMBEDDING_SERVICE, ) from .._constants import EXT_CACHE_SERVICE, EXT_EMBEDDING_PROVIDER, EXT_EMBEDDING_SERVICE +from .._plugin_factory import make_service_plugin_base class EmbeddingType(str, Enum): """Type of content being embedded.""" + TEXT = "text" IMAGE = "image" MULTIMODAL = "multimodal" # Combined text + image @@ -30,8 +31,9 @@ class EmbeddingType(str, Enum): @dataclass class EmbeddingInput: """Input for embedding generation, supporting multimodal content.""" - text: Optional[str] = None - image: Optional[Union[str, bytes, Path]] = None # Base64, bytes, URL, or file path + + text: str | None = None + image: str | bytes | Path | None = None # Base64, bytes, URL, or file path def __post_init__(self): if not self.text and not self.image: @@ -58,7 +60,7 @@ def to_dict(self) -> dict: class EmbeddingProvider(ABC): """Abstract embedding provider.""" - def __init__(self, v: Variables = None, output_dimensions: Optional[int] = None): + def __init__(self, v: Variables = None, output_dimensions: int | None = None): self._dimensions = output_dimensions self.logger = get_logger(v, name=self.__class__.__name__) @@ -95,16 +97,12 @@ class MultimodalEmbeddingProvider(EmbeddingProvider): """ @abstractmethod - async def embed_image(self, image: Union[str, bytes, Path]) -> list[float]: + async def embed_image(self, image: str | bytes | Path) -> list[float]: """Generate embedding for an image.""" pass @abstractmethod - async def embed_multimodal( - self, - text: Optional[str] = None, - image: Optional[Union[str, bytes, Path]] = None - ) -> list[float]: + async def embed_multimodal(self, text: str | None = None, image: str | bytes | Path | None = None) -> list[float]: """Generate embedding for combined text and image.""" pass @@ -118,7 +116,7 @@ async def embed_input(self, input: EmbeddingInput) -> list[float]: return await self.embed_multimodal(input.text, input.image) @staticmethod - def load_image_bytes(image: Union[str, bytes, Path]) -> bytes: + def load_image_bytes(image: str | bytes | Path) -> bytes: """Load image as bytes from various input formats.""" if isinstance(image, bytes): return image @@ -133,6 +131,7 @@ def load_image_bytes(image: Union[str, bytes, Path]) -> bytes: elif image.startswith(("http://", "https://")): # URL - download import urllib.request + with urllib.request.urlopen(image) as response: return response.read() elif len(image) > 500 and not Path(image).exists(): @@ -147,7 +146,8 @@ def load_image_bytes(image: Union[str, bytes, Path]) -> bytes: # noinspection PyAbstractClass class EmbeddingProviderPluginBase(Plugin): """Base Plugin Implementation for embedding providers.""" - PROVIDER_NAME: str = '' + + PROVIDER_NAME: str = "" def name(self) -> str: return f"{EXT_EMBEDDING_PROVIDER}|{self.PROVIDER_NAME}" @@ -156,7 +156,7 @@ def extension_point_name(self, v: Variables) -> str: return EXT_EMBEDDING_PROVIDER def is_enabled(self, v: Variables) -> bool: - return enabled_option_pattern(self, v, MEMORYLAYER_EMBEDDING_PROVIDER, self_attr='PROVIDER_NAME') + return enabled_option_pattern(self, v, MEMORYLAYER_EMBEDDING_PROVIDER, self_attr="PROVIDER_NAME") def on_registration(self, v: Variables) -> None: v.set_default_value(MEMORYLAYER_EMBEDDING_PROVIDER, DEFAULT_MEMORYLAYER_EMBEDDING_PROVIDER) @@ -164,8 +164,9 @@ def on_registration(self, v: Variables) -> None: async def async_ready(self, v: Variables, logger: Logger, value: object | None) -> None: # noinspection PyTypeChecker embedding_provider: EmbeddingProvider = value - preload = v.environ(MEMORYLAYER_EMBEDDING_PRELOAD_ENABLED, - default=DEFAULT_MEMORYLAYER_EMBEDDING_PRELOAD_ENABLED, type_fn=ext_parse_bool) + preload = v.environ( + MEMORYLAYER_EMBEDDING_PRELOAD_ENABLED, default=DEFAULT_MEMORYLAYER_EMBEDDING_PRELOAD_ENABLED, type_fn=ext_parse_bool + ) # initiate preload if implemented in the embedding provider if preload: diff --git a/memorylayer-core-python/src/memorylayer_server/services/embedding/google.py b/memorylayer-core-python/src/memorylayer_server/services/embedding/google.py index 236f355..be33dd1 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/embedding/google.py +++ b/memorylayer-core-python/src/memorylayer_server/services/embedding/google.py @@ -1,15 +1,15 @@ """Google GenAI (Gemini) embedding provider.""" + from logging import Logger -from typing import Optional from scitrera_app_framework import Variables, get_logger -from ...config import EmbeddingProviderType, MEMORYLAYER_EMBEDDING_MODEL, MEMORYLAYER_EMBEDDING_DIMENSIONS +from ...config import MEMORYLAYER_EMBEDDING_DIMENSIONS, MEMORYLAYER_EMBEDDING_MODEL, EmbeddingProviderType from .base import EmbeddingProvider, EmbeddingProviderPluginBase -MEMORYLAYER_EMBEDDING_GOOGLE_API_KEY = 'MEMORYLAYER_EMBEDDING_GOOGLE_API_KEY' +MEMORYLAYER_EMBEDDING_GOOGLE_API_KEY = "MEMORYLAYER_EMBEDDING_GOOGLE_API_KEY" -DEFAULT_EMBEDDING_MODEL = 'gemini-embedding-001' +DEFAULT_EMBEDDING_MODEL = "gemini-embedding-001" DEFAULT_EMBEDDING_DIMENSIONS = 768 @@ -20,11 +20,11 @@ class GoogleEmbeddingProvider(EmbeddingProvider): """ def __init__( - self, - v: Variables = None, - api_key: Optional[str] = None, - model: str = DEFAULT_EMBEDDING_MODEL, - dimensions: int = DEFAULT_EMBEDDING_DIMENSIONS, + self, + v: Variables = None, + api_key: str | None = None, + model: str = DEFAULT_EMBEDDING_MODEL, + dimensions: int = DEFAULT_EMBEDDING_DIMENSIONS, ): super().__init__(v, output_dimensions=dimensions) self._api_key = api_key @@ -34,7 +34,8 @@ def __init__( self.logger = get_logger(v, name=self.__class__.__name__) self.logger.info( "Initialized GoogleEmbeddingProvider: model=%s, dimensions=%s", - model, dimensions, + model, + dimensions, ) def _get_client(self): @@ -42,16 +43,16 @@ def _get_client(self): if self._client is None: try: from google import genai + self._client = genai.Client(api_key=self._api_key) except ImportError: - raise ImportError( - "google-genai package not installed. Install with: pip install google-genai" - ) + raise ImportError("google-genai package not installed. Install with: pip install google-genai") return self._client def _get_config(self): """Build EmbedContentConfig with output dimensionality.""" from google.genai import types + return types.EmbedContentConfig( output_dimensionality=self._output_dimensionality, ) @@ -83,6 +84,7 @@ async def embed_batch(self, texts: list[str]) -> list[list[float]]: class GoogleEmbeddingProviderPlugin(EmbeddingProviderPluginBase): """Plugin for Google GenAI embedding provider.""" + PROVIDER_NAME = EmbeddingProviderType.GOOGLE def initialize(self, v: Variables, logger: Logger) -> object | None: diff --git a/memorylayer-core-python/src/memorylayer_server/services/embedding/local.py b/memorylayer-core-python/src/memorylayer_server/services/embedding/local.py index 39d19fc..27a61fd 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/embedding/local.py +++ b/memorylayer-core-python/src/memorylayer_server/services/embedding/local.py @@ -1,12 +1,11 @@ from logging import Logger -from typing import Optional -from scitrera_app_framework import Variables as Variables, get_logger +from scitrera_app_framework import Variables as Variables -from .base import EmbeddingProvider, EmbeddingProviderPluginBase from ...config import MEMORYLAYER_EMBEDDING_MODEL, EmbeddingProviderType +from .base import EmbeddingProvider, EmbeddingProviderPluginBase -DEFAULT_EMBEDDING_MODEL = 'all-MiniLM-L6-v2' +DEFAULT_EMBEDDING_MODEL = "all-MiniLM-L6-v2" class LocalEmbeddingProvider(EmbeddingProvider): @@ -27,6 +26,7 @@ def _get_model(self): """Lazy load the model.""" if self._model is None: from sentence_transformers import SentenceTransformer + self.logger.info("Loading sentence-transformers model: %s", self.model_name) self._model = SentenceTransformer(self.model_name) return self._model diff --git a/memorylayer-core-python/src/memorylayer_server/services/embedding/mock.py b/memorylayer-core-python/src/memorylayer_server/services/embedding/mock.py index cfe1148..84b7cd7 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/embedding/mock.py +++ b/memorylayer-core-python/src/memorylayer_server/services/embedding/mock.py @@ -1,13 +1,11 @@ import hashlib import math import random - from logging import Logger -from scitrera_app_framework import Variables as Variables, get_logger - -from ...config import EmbeddingProviderType, MEMORYLAYER_EMBEDDING_DIMENSIONS +from scitrera_app_framework import Variables as Variables +from ...config import MEMORYLAYER_EMBEDDING_DIMENSIONS, EmbeddingProviderType from .base import EmbeddingProvider, EmbeddingProviderPluginBase DEFAULT_EMBEDDING_DIMENSIONS = 384 @@ -58,6 +56,5 @@ class MockEmbeddingProviderPlugin(EmbeddingProviderPluginBase): def initialize(self, v: Variables, logger: Logger) -> MockEmbeddingProvider: return MockEmbeddingProvider( - v=v, - dimensions=v.environ(MEMORYLAYER_EMBEDDING_DIMENSIONS, default=DEFAULT_EMBEDDING_DIMENSIONS, type_fn=int) + v=v, dimensions=v.environ(MEMORYLAYER_EMBEDDING_DIMENSIONS, default=DEFAULT_EMBEDDING_DIMENSIONS, type_fn=int) ) diff --git a/memorylayer-core-python/src/memorylayer_server/services/embedding/openai.py b/memorylayer-core-python/src/memorylayer_server/services/embedding/openai.py index d958d7e..ee3689c 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/embedding/openai.py +++ b/memorylayer-core-python/src/memorylayer_server/services/embedding/openai.py @@ -1,18 +1,16 @@ from logging import Logger -from typing import Optional -from scitrera_app_framework import Variables as Variables, get_logger - -from ...config import EmbeddingProviderType, MEMORYLAYER_EMBEDDING_MODEL, MEMORYLAYER_EMBEDDING_DIMENSIONS +from scitrera_app_framework import Variables as Variables +from ...config import MEMORYLAYER_EMBEDDING_DIMENSIONS, MEMORYLAYER_EMBEDDING_MODEL, EmbeddingProviderType from .base import EmbeddingProvider, EmbeddingProviderPluginBase -MEMORYLAYER_EMBEDDING_OPENAI_API_KEY = 'MEMORYLAYER_EMBEDDING_OPENAI_API_KEY' -MEMORYLAYER_EMBEDDING_OPENAI_BASE_URL = 'MEMORYLAYER_EMBEDDING_OPENAI_BASE_URL' +MEMORYLAYER_EMBEDDING_OPENAI_API_KEY = "MEMORYLAYER_EMBEDDING_OPENAI_API_KEY" +MEMORYLAYER_EMBEDDING_OPENAI_BASE_URL = "MEMORYLAYER_EMBEDDING_OPENAI_BASE_URL" -DEFAULT_EMBEDDING_MODEL = 'text-embedding-3-small' +DEFAULT_EMBEDDING_MODEL = "text-embedding-3-small" DEFAULT_EMBEDDING_DIMENSIONS = 1536 -DEFAULT_OPENAI_API_KEY = 'x' +DEFAULT_OPENAI_API_KEY = "x" DEFAULT_OPENAI_BASE_URL = None @@ -25,15 +23,16 @@ class OpenAIEmbeddingProvider(EmbeddingProvider): """ def __init__( - self, - v: Variables = None, - api_key: Optional[str] = None, - model: str = "text-embedding-3-small", - base_url: Optional[str] = None, - dimensions: int = 1536, + self, + v: Variables = None, + api_key: str | None = None, + model: str = "text-embedding-3-small", + base_url: str | None = None, + dimensions: int = 1536, ): super().__init__(v, output_dimensions=dimensions) import openai + self.client = openai.AsyncOpenAI(api_key=api_key, base_url=base_url) self.model = model self._base_url = base_url @@ -41,19 +40,13 @@ def __init__( async def embed(self, text: str) -> list[float]: """Generate embedding for single text.""" self.logger.debug("Generating OpenAI embedding for text: %s chars", len(text)) - response = await self.client.embeddings.create( - input=text, - model=self.model - ) + response = await self.client.embeddings.create(input=text, model=self.model) return response.data[0].embedding async def embed_batch(self, texts: list[str]) -> list[list[float]]: """Generate embeddings for multiple texts (more efficient).""" self.logger.debug("Generating OpenAI embeddings for batch of %s texts", len(texts)) - response = await self.client.embeddings.create( - input=texts, - model=self.model - ) + response = await self.client.embeddings.create(input=texts, model=self.model) return [item.embedding for item in response.data] diff --git a/memorylayer-core-python/src/memorylayer_server/services/embedding/service_default.py b/memorylayer-core-python/src/memorylayer_server/services/embedding/service_default.py index d70c946..2c6142e 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/embedding/service_default.py +++ b/memorylayer-core-python/src/memorylayer_server/services/embedding/service_default.py @@ -1,18 +1,21 @@ import hashlib from logging import Logger - from pathlib import Path -from typing import Any, Optional, Union, Iterable +from typing import Any -from scitrera_app_framework import get_logger, Variables as Variables +from scitrera_app_framework import Variables as Variables +from scitrera_app_framework import get_logger +from ...utils import cosine_similarity as _cosine_similarity +from ..cache import EXT_CACHE_SERVICE from .base import ( - EmbeddingProvider, MultimodalEmbeddingProvider, - EmbeddingInput, EmbeddingType, - EmbeddingServicePluginBase, EXT_EMBEDDING_PROVIDER, + EXT_EMBEDDING_PROVIDER, + EmbeddingInput, + EmbeddingProvider, + EmbeddingServicePluginBase, + EmbeddingType, + MultimodalEmbeddingProvider, ) -from ..cache import EXT_CACHE_SERVICE -from ...utils import cosine_similarity as _cosine_similarity class EmbeddingService: @@ -23,7 +26,7 @@ class EmbeddingService: when a multimodal provider is configured. """ - def __init__(self, v: Variables = None, provider: EmbeddingProvider = None, cache: Optional[Any] = None): + def __init__(self, v: Variables = None, provider: EmbeddingProvider = None, cache: Any | None = None): self.provider = provider self.cache = cache self.logger = get_logger(v, name=self.__class__.__name__) @@ -33,7 +36,7 @@ def __init__(self, v: Variables = None, provider: EmbeddingProvider = None, cach "Initialized EmbeddingService with provider: %s, dimensions: %s, multimodal: %s", provider.__class__.__name__, provider.dimensions, - self._is_multimodal + self._is_multimodal, ) @property @@ -41,7 +44,6 @@ def is_multimodal(self) -> bool: """Whether this service supports multimodal (text + image) embeddings.""" return self._is_multimodal - async def embed(self, text: str) -> list[float]: """Generate embedding with optional caching.""" if not text or not text.strip(): @@ -65,7 +67,7 @@ async def embed(self, text: str) -> list[float]: return embedding - async def embed_image(self, image: Union[str, bytes, Path]) -> list[float]: + async def embed_image(self, image: str | bytes | Path) -> list[float]: """ Generate embedding for an image. @@ -80,11 +82,7 @@ async def embed_image(self, image: Union[str, bytes, Path]) -> list[float]: provider: MultimodalEmbeddingProvider = self.provider return await provider.embed_image(image) - async def embed_multimodal( - self, - text: Optional[str] = None, - image: Optional[Union[str, bytes, Path]] = None - ) -> list[float]: + async def embed_multimodal(self, text: str | None = None, image: str | bytes | Path | None = None) -> list[float]: """ Generate embedding for combined text and image. @@ -92,9 +90,7 @@ async def embed_multimodal( """ if not self._is_multimodal: if image: - raise ValueError( - f"Provider {self.provider.__class__.__name__} does not support image embeddings." - ) + raise ValueError(f"Provider {self.provider.__class__.__name__} does not support image embeddings.") return await self.embed(text) provider: MultimodalEmbeddingProvider = self.provider @@ -134,13 +130,10 @@ def cosine_similarity(a: list[float], b: list[float]) -> float: class EmbeddingServicePlugin(EmbeddingServicePluginBase): """Default plugin for embedding service.""" - PROVIDER_NAME = 'default' + + PROVIDER_NAME = "default" def initialize(self, v: Variables, logger: Logger) -> object | None: cache_service = self.get_extension(EXT_CACHE_SERVICE, v) embedding_provider: EmbeddingProvider = self.get_extension(EXT_EMBEDDING_PROVIDER, v) - return EmbeddingService( - v=v, - provider=embedding_provider, - cache=cache_service - ) + return EmbeddingService(v=v, provider=embedding_provider, cache=cache_service) diff --git a/memorylayer-core-python/src/memorylayer_server/services/extraction/__init__.py b/memorylayer-core-python/src/memorylayer_server/services/extraction/__init__.py index e115a7f..5df3967 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/extraction/__init__.py +++ b/memorylayer-core-python/src/memorylayer_server/services/extraction/__init__.py @@ -1,17 +1,18 @@ """Extraction service package.""" + +from scitrera_app_framework import Variables, get_extension + from .base import ( - ExtractionService, - ExtractionServicePluginBase, + CATEGORY_MAPPING, EXT_EXTRACTION_SERVICE, + ExtractedMemory, ExtractionCategory, ExtractionOptions, - ExtractedMemory, ExtractionResult, - CATEGORY_MAPPING, + ExtractionService, + ExtractionServicePluginBase, ) -from scitrera_app_framework import Variables, get_extension - def get_extraction_service(v: Variables = None) -> ExtractionService: """Get the extraction service instance.""" @@ -19,13 +20,13 @@ def get_extraction_service(v: Variables = None) -> ExtractionService: __all__ = ( - 'ExtractionService', - 'ExtractionServicePluginBase', - 'get_extraction_service', - 'EXT_EXTRACTION_SERVICE', - 'ExtractionCategory', - 'ExtractionOptions', - 'ExtractedMemory', - 'ExtractionResult', - 'CATEGORY_MAPPING', + "ExtractionService", + "ExtractionServicePluginBase", + "get_extraction_service", + "EXT_EXTRACTION_SERVICE", + "ExtractionCategory", + "ExtractionOptions", + "ExtractedMemory", + "ExtractionResult", + "CATEGORY_MAPPING", ) diff --git a/memorylayer-core-python/src/memorylayer_server/services/extraction/base.py b/memorylayer-core-python/src/memorylayer_server/services/extraction/base.py index 7567f5c..b18c37e 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/extraction/base.py +++ b/memorylayer-core-python/src/memorylayer_server/services/extraction/base.py @@ -3,14 +3,13 @@ Extracts memories from session content using LLM-based classification. """ + from abc import ABC, abstractmethod from dataclasses import dataclass, field from enum import Enum -from typing import Optional -from datetime import datetime -from ...config import MEMORYLAYER_EXTRACTION_SERVICE, DEFAULT_MEMORYLAYER_EXTRACTION_SERVICE -from ...models.memory import Memory, MemoryType, MemorySubtype +from ...config import DEFAULT_MEMORYLAYER_EXTRACTION_SERVICE, MEMORYLAYER_EXTRACTION_SERVICE +from ...models.memory import Memory, MemorySubtype, MemoryType from .._constants import ( EXT_DEDUPLICATION_SERVICE, EXT_EMBEDDING_SERVICE, @@ -49,7 +48,7 @@ class ExtractionOptions: min_importance: float = 0.5 deduplicate: bool = True - categories: Optional[list[ExtractionCategory]] = None # None = all categories + categories: list[ExtractionCategory] | None = None # None = all categories max_memories: int = 50 @@ -81,13 +80,7 @@ class ExtractionService(ABC): @abstractmethod async def extract_from_session( - self, - session_id: str, - workspace_id: str, - context_id: str, - session_content: str, - working_memory: dict, - options: ExtractionOptions + self, session_id: str, workspace_id: str, context_id: str, session_content: str, working_memory: dict, options: ExtractionOptions ) -> ExtractionResult: """Extract memories from a session.""" pass @@ -101,7 +94,7 @@ async def decompose_to_facts(self, content: str) -> list[dict]: pass @abstractmethod - async def classify_content(self, content: str) -> tuple['MemoryType', 'Optional[MemorySubtype]']: + async def classify_content(self, content: str) -> tuple["MemoryType", "MemorySubtype | None"]: """Classify a single memory's content into a type and subtype. Uses LLM to determine the extraction category, then maps through diff --git a/memorylayer-core-python/src/memorylayer_server/services/extraction/default.py b/memorylayer-core-python/src/memorylayer_server/services/extraction/default.py index 5b54af9..6d2011d 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/extraction/default.py +++ b/memorylayer-core-python/src/memorylayer_server/services/extraction/default.py @@ -10,31 +10,31 @@ - CASES: Problems with solutions - PATTERNS: Reusable processes """ + import json import re import time -from datetime import datetime, timezone -from typing import Optional +from datetime import UTC, datetime from scitrera_app_framework import get_logger from scitrera_app_framework.api import Variables -from ...models.memory import Memory, MemoryType, MemorySubtype +from ...config import DEFAULT_TENANT_ID from ...models.llm import LLMMessage, LLMRequest, LLMRole +from ...models.memory import Memory, MemorySubtype, MemoryType from ...utils import compute_content_hash, generate_id -from ..storage import EXT_STORAGE_BACKEND, StorageBackend -from ..llm import EXT_LLM_SERVICE, LLMService +from ..deduplication import EXT_DEDUPLICATION_SERVICE, DeduplicationAction, DeduplicationService from ..embedding import EXT_EMBEDDING_SERVICE, EmbeddingService -from ..deduplication import EXT_DEDUPLICATION_SERVICE, DeduplicationService, DeduplicationAction -from ...config import DEFAULT_TENANT_ID +from ..llm import EXT_LLM_SERVICE, LLMService +from ..storage import EXT_STORAGE_BACKEND, StorageBackend from .base import ( - ExtractionService, - ExtractionServicePluginBase, + CATEGORY_MAPPING, + ExtractedMemory, ExtractionCategory, ExtractionOptions, - ExtractedMemory, ExtractionResult, - CATEGORY_MAPPING, + ExtractionService, + ExtractionServicePluginBase, ) # System prompt for LLM extraction @@ -105,12 +105,12 @@ class DefaultExtractionService(ExtractionService): """Default extraction service implementation.""" def __init__( - self, - llm_service: Optional[LLMService] = None, - storage: Optional[StorageBackend] = None, - deduplication_service=None, - embedding_service: Optional[EmbeddingService] = None, - v: Variables = None + self, + llm_service: LLMService | None = None, + storage: StorageBackend | None = None, + deduplication_service=None, + embedding_service: EmbeddingService | None = None, + v: Variables = None, ): """ Initialize extraction service. @@ -130,13 +130,7 @@ def __init__( self.logger.info("Initialized DefaultExtractionService") async def extract_from_session( - self, - session_id: str, - workspace_id: str, - context_id: str, - session_content: str, - working_memory: dict, - options: ExtractionOptions + self, session_id: str, workspace_id: str, context_id: str, session_content: str, working_memory: dict, options: ExtractionOptions ) -> ExtractionResult: """ Extract memories from a session. @@ -171,22 +165,17 @@ async def extract_from_session( extracted = [m for m in extracted if m.importance >= options.min_importance] # Limit count - extracted = extracted[:options.max_memories] + extracted = extracted[: options.max_memories] # Deduplicate if enabled memories_deduplicated = 0 if options.deduplicate and self.deduplication_service: - extracted, memories_deduplicated = await self._deduplicate( - extracted, workspace_id - ) + extracted, memories_deduplicated = await self._deduplicate(extracted, workspace_id) # Convert to Memory objects memories_created = [] for em in extracted: - memory_type, memory_subtype = CATEGORY_MAPPING.get( - em.category, - (MemoryType.SEMANTIC, None) - ) + memory_type, memory_subtype = CATEGORY_MAPPING.get(em.category, (MemoryType.SEMANTIC, None)) memory = Memory( id=generate_id("mem"), @@ -202,8 +191,8 @@ async def extract_from_session( importance=em.importance, tags=em.tags, metadata=em.metadata, - created_at=datetime.now(timezone.utc), - updated_at=datetime.now(timezone.utc), + created_at=datetime.now(UTC), + updated_at=datetime.now(UTC), ) memories_created.append(memory) @@ -215,10 +204,7 @@ async def extract_from_session( elapsed_ms = int((time.time() - start_time) * 1000) - self.logger.info( - "Extracted %s memories from session %s in %s ms", - len(memories_created), session_id, elapsed_ms - ) + self.logger.info("Extracted %s memories from session %s in %s ms", len(memories_created), session_id, elapsed_ms) return ExtractionResult( session_id=session_id, @@ -226,7 +212,7 @@ async def extract_from_session( memories_deduplicated=memories_deduplicated, memories_created=memories_created, breakdown=breakdown, - extraction_time_ms=elapsed_ms + extraction_time_ms=elapsed_ms, ) async def decompose_to_facts(self, content: str) -> list[dict]: @@ -264,10 +250,7 @@ async def decompose_to_facts(self, content: str) -> list[dict]: "Return ONLY the JSON array, no additional text." ) - user_prompt = ( - "Decompose this content into atomic facts:\n\n" - f"---\n{content}\n---" - ) + user_prompt = f"Decompose this content into atomic facts:\n\n---\n{content}\n---" try: messages = [ @@ -294,7 +277,7 @@ async def decompose_to_facts(self, content: str) -> list[dict]: raw = "\n".join(lines) # Extract JSON array from response (handle surrounding text) - array_start = raw.find('[') + array_start = raw.find("[") if array_start > 0: raw = raw[array_start:] @@ -310,11 +293,13 @@ async def decompose_to_facts(self, content: str) -> list[dict]: validated = [] for item in facts: if isinstance(item, dict) and "content" in item and item["content"].strip(): - validated.append({ - "content": item["content"].strip(), - "type": item.get("type"), - "subtype": item.get("subtype"), - }) + validated.append( + { + "content": item["content"].strip(), + "type": item.get("type"), + "subtype": item.get("subtype"), + } + ) if not validated: self.logger.warning("No valid facts extracted, returning single fact") @@ -343,17 +328,17 @@ def _parse_partial_json_array(self, raw: str) -> list: json.JSONDecodeError: If the JSON cannot be recovered. """ # Remove trailing commas before } or ] - cleaned = re.sub(r',\s*([}\]])', r'\1', raw) + cleaned = re.sub(r",\s*([}\]])", r"\1", raw) try: return json.loads(cleaned) except json.JSONDecodeError: pass # Truncate at the last complete JSON object and close the array - last_brace = cleaned.rfind('}') + last_brace = cleaned.rfind("}") if last_brace >= 0: - candidate = cleaned[:last_brace + 1] + ']' - first_bracket = candidate.find('[') + candidate = cleaned[: last_brace + 1] + "]" + first_bracket = candidate.find("[") if first_bracket >= 0: candidate = candidate[first_bracket:] try: @@ -367,11 +352,9 @@ def _parse_partial_json_array(self, raw: str) -> list: except json.JSONDecodeError: pass - raise json.JSONDecodeError( - "Could not recover facts from malformed JSON", raw, 0 - ) + raise json.JSONDecodeError("Could not recover facts from malformed JSON", raw, 0) - async def classify_content(self, content: str) -> tuple[MemoryType, 'Optional[MemorySubtype]']: + async def classify_content(self, content: str) -> tuple[MemoryType, "MemorySubtype | None"]: """Classify a single memory's content into a type and subtype. Uses LLM to determine the extraction category, then maps through @@ -419,11 +402,7 @@ async def classify_content(self, content: str) -> tuple[MemoryType, 'Optional[Me self.logger.warning("Content classification failed: %s", e) return (MemoryType.SEMANTIC, None) - async def _llm_extraction( - self, - context: str, - categories: list[ExtractionCategory] - ) -> list[ExtractedMemory]: + async def _llm_extraction(self, context: str, categories: list[ExtractionCategory]) -> list[ExtractedMemory]: """ Extract memories using LLM-based classification. @@ -458,22 +437,18 @@ async def _llm_extraction( self.logger.info( "LLM extraction completed: %d memories extracted (tokens: %d prompt, %d completion)", - len(extracted), response.prompt_tokens, (response.completion_tokens or -1), + len(extracted), + response.prompt_tokens, + (response.completion_tokens or -1), ) return extracted except Exception as e: - self.logger.warning( - "LLM extraction failed, falling back to simple extraction: %s", str(e) - ) + self.logger.warning("LLM extraction failed, falling back to simple extraction: %s", str(e)) return self._simple_extraction(context, categories, ExtractionOptions()) - def _parse_llm_response( - self, - response_content: str, - categories: list[ExtractionCategory] - ) -> list[ExtractedMemory]: + def _parse_llm_response(self, response_content: str, categories: list[ExtractionCategory]) -> list[ExtractedMemory]: """ Parse LLM response into ExtractedMemory objects. @@ -540,13 +515,15 @@ def _parse_llm_response( tags = [] tags = [str(t) for t in tags] # Ensure strings - extracted.append(ExtractedMemory( - content=str(item["content"]), - category=category, - importance=importance, - tags=tags, - metadata={"extraction_method": "llm"} - )) + extracted.append( + ExtractedMemory( + content=str(item["content"]), + category=category, + importance=importance, + tags=tags, + metadata={"extraction_method": "llm"}, + ) + ) except (KeyError, ValueError, TypeError) as e: self.logger.debug("Skipping invalid memory item: %s", str(e)) @@ -554,11 +531,7 @@ def _parse_llm_response( return extracted - def _build_extraction_context( - self, - session_content: str, - working_memory: dict - ) -> str: + def _build_extraction_context(self, session_content: str, working_memory: dict) -> str: """Build context string for extraction.""" parts = [session_content] @@ -568,12 +541,7 @@ def _build_extraction_context( return "\n".join(parts) - def _simple_extraction( - self, - context: str, - categories: list[ExtractionCategory], - options: ExtractionOptions - ) -> list[ExtractedMemory]: + def _simple_extraction(self, context: str, categories: list[ExtractionCategory], options: ExtractionOptions) -> list[ExtractedMemory]: """Simple extraction without LLM - returns the full context as one memory.""" # Without LLM, we can't do sophisticated extraction # Just create a single memory from the context @@ -586,15 +554,11 @@ def _simple_extraction( category=ExtractionCategory.CASES, importance=0.6, # TODO: configurable default (same as value at LLM extraction) tags=["auto-extracted"], - metadata={"extraction_method": "simple"} + metadata={"extraction_method": "simple"}, ) ] - async def _deduplicate( - self, - extracted: list[ExtractedMemory], - workspace_id: str - ) -> tuple[list[ExtractedMemory], int]: + async def _deduplicate(self, extracted: list[ExtractedMemory], workspace_id: str) -> tuple[list[ExtractedMemory], int]: """ Deduplicate extracted memories against existing memories. @@ -624,9 +588,7 @@ async def _deduplicate( candidates.append((em.content, content_hash, embedding)) # Run batch deduplication - results = await self.deduplication_service.deduplicate_batch( - candidates, workspace_id - ) + results = await self.deduplication_service.deduplicate_batch(candidates, workspace_id) # Filter extracted memories based on deduplication results deduplicated = [] @@ -636,18 +598,12 @@ async def _deduplicate( if result.action == DeduplicationAction.SKIP: # Exact duplicate - skip entirely duplicates_count += 1 - self.logger.debug( - "Skipping duplicate memory (exact match): %s", - em.content[:50] - ) + self.logger.debug("Skipping duplicate memory (exact match): %s", em.content[:50]) elif result.action == DeduplicationAction.UPDATE: # Semantic duplicate - could update existing, but for extraction # we'll skip to avoid redundancy (existing memory is sufficient) duplicates_count += 1 - self.logger.debug( - "Skipping duplicate memory (semantic match %.3f): %s", - result.similarity_score or 0, em.content[:50] - ) + self.logger.debug("Skipping duplicate memory (semantic match %.3f): %s", result.similarity_score or 0, em.content[:50]) elif result.action == DeduplicationAction.MERGE: # Merge candidate - include but flag for potential merge em.metadata["merge_candidate"] = True @@ -659,22 +615,20 @@ async def _deduplicate( deduplicated.append(em) self.logger.info( - "Deduplication complete: %d memories in, %d out, %d duplicates removed", - len(extracted), len(deduplicated), duplicates_count + "Deduplication complete: %d memories in, %d out, %d duplicates removed", len(extracted), len(deduplicated), duplicates_count ) return deduplicated, duplicates_count except Exception as e: - self.logger.warning( - "Deduplication failed, returning all extracted memories: %s", str(e) - ) + self.logger.warning("Deduplication failed, returning all extracted memories: %s", str(e)) return extracted, 0 class DefaultExtractionServicePlugin(ExtractionServicePluginBase): """Default extraction service plugin.""" - PROVIDER_NAME = 'default' + + PROVIDER_NAME = "default" def initialize(self, v: Variables, logger) -> ExtractionService: storage: StorageBackend = self.get_extension(EXT_STORAGE_BACKEND, v) @@ -683,9 +637,5 @@ def initialize(self, v: Variables, logger) -> ExtractionService: embedding_service: EmbeddingService = self.get_extension(EXT_EMBEDDING_SERVICE, v) return DefaultExtractionService( - llm_service=llm_service, - storage=storage, - deduplication_service=deduplication_service, - embedding_service=embedding_service, - v=v + llm_service=llm_service, storage=storage, deduplication_service=deduplication_service, embedding_service=embedding_service, v=v ) diff --git a/memorylayer-core-python/src/memorylayer_server/services/inference/__init__.py b/memorylayer-core-python/src/memorylayer_server/services/inference/__init__.py index 924c1cc..4005f6b 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/inference/__init__.py +++ b/memorylayer-core-python/src/memorylayer_server/services/inference/__init__.py @@ -1,14 +1,15 @@ """Inference service package - entity insight derivation.""" + +from scitrera_app_framework import Variables, get_extension + from .base import ( - InferenceServicePluginBase, - InferenceService, - InferenceResult, EXT_INFERENCE_SERVICE, + InferenceResult, + InferenceService, + InferenceServicePluginBase, ) from .default import DefaultInferenceService -from scitrera_app_framework import Variables, get_extension - def get_inference_service(v: Variables = None) -> DefaultInferenceService: """Get the inference service instance.""" @@ -16,10 +17,10 @@ def get_inference_service(v: Variables = None) -> DefaultInferenceService: __all__ = ( - 'InferenceService', - 'DefaultInferenceService', - 'InferenceServicePluginBase', - 'InferenceResult', - 'get_inference_service', - 'EXT_INFERENCE_SERVICE', + "InferenceService", + "DefaultInferenceService", + "InferenceServicePluginBase", + "InferenceResult", + "get_inference_service", + "EXT_INFERENCE_SERVICE", ) diff --git a/memorylayer-core-python/src/memorylayer_server/services/inference/base.py b/memorylayer-core-python/src/memorylayer_server/services/inference/base.py index 80ea6a3..6e10ac7 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/inference/base.py +++ b/memorylayer-core-python/src/memorylayer_server/services/inference/base.py @@ -5,19 +5,19 @@ about an entity (subject). Unlike extraction (which captures what was said), inference derives what patterns and behaviors *mean* about the entity. """ + from abc import ABC, abstractmethod from dataclasses import dataclass, field -from typing import Optional -from ...config import MEMORYLAYER_INFERENCE_SERVICE, DEFAULT_MEMORYLAYER_INFERENCE_SERVICE +from ...config import DEFAULT_MEMORYLAYER_INFERENCE_SERVICE, MEMORYLAYER_INFERENCE_SERVICE from ...models.memory import Memory from .._constants import ( + EXT_ASSOCIATION_SERVICE, + EXT_CACHE_SERVICE, EXT_INFERENCE_SERVICE, + EXT_LLM_SERVICE, EXT_MEMORY_SERVICE, EXT_STORAGE_BACKEND, - EXT_LLM_SERVICE, - EXT_ASSOCIATION_SERVICE, - EXT_CACHE_SERVICE, ) from .._plugin_factory import make_service_plugin_base @@ -39,11 +39,11 @@ class InferenceService(ABC): @abstractmethod async def derive_insights( - self, - workspace_id: str, - subject_id: str, - observer_id: Optional[str] = None, - force: bool = False, + self, + workspace_id: str, + subject_id: str, + observer_id: str | None = None, + force: bool = False, ) -> InferenceResult: """Derive higher-order insights about a subject from accumulated memories. @@ -63,11 +63,11 @@ async def derive_insights( @abstractmethod async def get_insights( - self, - workspace_id: str, - subject_id: str, - observer_id: Optional[str] = None, - limit: int = 20, + self, + workspace_id: str, + subject_id: str, + observer_id: str | None = None, + limit: int = 20, ) -> list[Memory]: """Retrieve existing derived insights about a subject. diff --git a/memorylayer-core-python/src/memorylayer_server/services/inference/default.py b/memorylayer-core-python/src/memorylayer_server/services/inference/default.py index 767ab16..441bfde 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/inference/default.py +++ b/memorylayer-core-python/src/memorylayer_server/services/inference/default.py @@ -3,23 +3,27 @@ Derives higher-order insights from accumulated memories about entities. """ -from datetime import datetime, timezone + +from datetime import UTC, datetime from logging import Logger -from typing import Optional from scitrera_app_framework import get_logger from scitrera_app_framework.api import Variables -from .base import InferenceServicePluginBase, InferenceResult -from ..storage import StorageBackend, EXT_STORAGE_BACKEND -from ..memory import MemoryService, EXT_MEMORY_SERVICE -from ..association import AssociationService, EXT_ASSOCIATION_SERVICE -from ..cache import CacheService, EXT_CACHE_SERVICE -from ..llm import LLMService, EXT_LLM_SERVICE, LLMNotConfiguredError from ...models import ( - Memory, RememberInput, RecallInput, RecallMode, - MemoryType, MemorySubtype, MemoryStatus, + Memory, + MemorySubtype, + MemoryType, + RecallInput, + RecallMode, + RememberInput, ) +from ..association import EXT_ASSOCIATION_SERVICE, AssociationService +from ..cache import EXT_CACHE_SERVICE, CacheService +from ..llm import EXT_LLM_SERVICE, LLMNotConfiguredError, LLMService +from ..memory import EXT_MEMORY_SERVICE, MemoryService +from ..storage import EXT_STORAGE_BACKEND, StorageBackend +from .base import InferenceResult, InferenceServicePluginBase # Cache TTL for insights (15 minutes) INSIGHTS_CACHE_TTL = 900 @@ -47,13 +51,13 @@ class DefaultInferenceService: """LLM-based inference service that derives insights from memory patterns.""" def __init__( - self, - storage: StorageBackend, - memory_service: MemoryService, - llm_service: Optional[LLMService] = None, - association_service: Optional[AssociationService] = None, - cache_service: Optional[CacheService] = None, - v: Variables = None, + self, + storage: StorageBackend, + memory_service: MemoryService, + llm_service: LLMService | None = None, + association_service: AssociationService | None = None, + cache_service: CacheService | None = None, + v: Variables = None, ): self.storage = storage self.memory_service = memory_service @@ -64,16 +68,15 @@ def __init__( self.logger.info("Initialized DefaultInferenceService") async def derive_insights( - self, - workspace_id: str, - subject_id: str, - observer_id: Optional[str] = None, - force: bool = False, + self, + workspace_id: str, + subject_id: str, + observer_id: str | None = None, + force: bool = False, ) -> InferenceResult: """Derive higher-order insights about a subject from accumulated memories.""" self.logger.info( - "Deriving insights for subject=%s in workspace=%s (observer=%s, force=%s)", - subject_id, workspace_id, observer_id, force + "Deriving insights for subject=%s in workspace=%s (observer=%s, force=%s)", subject_id, workspace_id, observer_id, force ) # Check cache unless forced @@ -94,9 +97,7 @@ async def derive_insights( ) # Recall all memories about this subject - source_memories = await self._gather_subject_memories( - workspace_id, subject_id, observer_id - ) + source_memories = await self._gather_subject_memories(workspace_id, subject_id, observer_id) if not source_memories: self.logger.info("No memories found for subject=%s, skipping inference", subject_id) @@ -106,9 +107,7 @@ async def derive_insights( source_memory_count=0, ) - self.logger.debug( - "Gathered %d source memories for subject=%s", len(source_memories), subject_id - ) + self.logger.debug("Gathered %d source memories for subject=%s", len(source_memories), subject_id) # Derive insights via LLM raw_insights = await self._derive_with_llm(source_memories, subject_id) @@ -149,18 +148,17 @@ async def derive_insights( ) self.logger.info( - "Derived %d insights for subject=%s from %d source memories", - len(created_insights), subject_id, len(source_memories) + "Derived %d insights for subject=%s from %d source memories", len(created_insights), subject_id, len(source_memories) ) return result async def get_insights( - self, - workspace_id: str, - subject_id: str, - observer_id: Optional[str] = None, - limit: int = 20, + self, + workspace_id: str, + subject_id: str, + observer_id: str | None = None, + limit: int = 20, ) -> list[Memory]: """Retrieve existing derived insights about a subject.""" # Search for INFERENCE-subtype memories about this subject @@ -188,10 +186,10 @@ async def get_insights( return [] async def _gather_subject_memories( - self, - workspace_id: str, - subject_id: str, - observer_id: Optional[str] = None, + self, + workspace_id: str, + subject_id: str, + observer_id: str | None = None, ) -> list[Memory]: """Gather all memories about a subject for analysis.""" # Use a broad query to get all relevant memories @@ -214,18 +212,15 @@ async def _gather_subject_memories( input=recall_input, ) # Exclude existing inferences to avoid circular derivation - return [ - m for m in result.memories - if m.subtype != MemorySubtype.INFERENCE - ] + return [m for m in result.memories if m.subtype != MemorySubtype.INFERENCE] except Exception as e: self.logger.error("Failed to gather memories for subject=%s: %s", subject_id, e) return [] async def _derive_with_llm( - self, - memories: list[Memory], - subject_id: str, + self, + memories: list[Memory], + subject_id: str, ) -> list[tuple[float, str]]: """Use LLM to derive insights from memories.""" if not self.llm: @@ -237,9 +232,7 @@ async def _derive_with_llm( for i, memory in enumerate(memories[:50], 1): # Cap at 50 memories type_label = memory.type.value.upper() subtype_label = f" ({memory.subtype.value})" if memory.subtype else "" - context_parts.append( - f"[{i}] {type_label}{subtype_label}: {memory.content}" - ) + context_parts.append(f"[{i}] {type_label}{subtype_label}: {memory.content}") context = "\n".join(context_parts) @@ -281,7 +274,7 @@ def _parse_insights(self, response: str) -> list[tuple[float, str]]: bracket_end = line.index("]") importance = float(line[1:bracket_end]) importance = max(0.0, min(1.0, importance)) - text = line[bracket_end + 1:].strip() + text = line[bracket_end + 1 :].strip() if text: insights.append((importance, text)) except (ValueError, IndexError): @@ -293,9 +286,9 @@ def _parse_insights(self, response: str) -> list[tuple[float, str]]: return insights def _derive_fallback( - self, - memories: list[Memory], - subject_id: str, + self, + memories: list[Memory], + subject_id: str, ) -> list[tuple[float, str]]: """Simple fallback inference without LLM. @@ -318,11 +311,7 @@ def _derive_fallback( for type_name, type_memories in by_type.items(): if len(type_memories) >= 2: importance = min(0.8, 0.3 + (len(type_memories) * 0.05)) - insights.append(( - importance, - f"Has {len(type_memories)} {type_name} memories indicating " - f"significant {type_name} patterns" - )) + insights.append((importance, f"Has {len(type_memories)} {type_name} memories indicating significant {type_name} patterns")) # Group by subtype for more specific insights by_subtype: dict[str, list[Memory]] = {} @@ -336,23 +325,19 @@ def _derive_fallback( for subtype_name, subtype_memories in by_subtype.items(): if len(subtype_memories) >= 2: importance = min(0.9, 0.4 + (len(subtype_memories) * 0.05)) - insights.append(( - importance, - f"Recurring {subtype_name} pattern observed across " - f"{len(subtype_memories)} memories" - )) + insights.append((importance, f"Recurring {subtype_name} pattern observed across {len(subtype_memories)} memories")) return insights[:10] # Cap at 10 insights async def _store_insight( - self, - workspace_id: str, - subject_id: str, - observer_id: Optional[str], - content: str, - importance: float, - source_memory_ids: list[str], - ) -> Optional[Memory]: + self, + workspace_id: str, + subject_id: str, + observer_id: str | None, + content: str, + importance: float, + source_memory_ids: list[str], + ) -> Memory | None: """Store a derived insight as a memory.""" try: input_data = RememberInput( @@ -363,7 +348,7 @@ async def _store_insight( tags=["inference", "derived"], metadata={ "source_memory_count": len(source_memory_ids), - "derived_at": datetime.now(timezone.utc).isoformat(), + "derived_at": datetime.now(UTC).isoformat(), }, observer_id=observer_id, subject_id=subject_id, @@ -379,6 +364,7 @@ async def _store_insight( for source_id in source_memory_ids[:5]: # Cap associations try: from ...models.association import AssociateInput + await self.association_service.create_association( workspace_id=workspace_id, input_data=AssociateInput( @@ -398,14 +384,15 @@ async def _store_insight( return None @staticmethod - def _cache_key(workspace_id: str, subject_id: str, observer_id: Optional[str]) -> str: + def _cache_key(workspace_id: str, subject_id: str, observer_id: str | None) -> str: obs = observer_id or "_any" return f"inference:{workspace_id}:{subject_id}:{obs}" class DefaultInferenceServicePlugin(InferenceServicePluginBase): """Default inference service plugin.""" - PROVIDER_NAME = 'default' + + PROVIDER_NAME = "default" def initialize(self, v: Variables, logger: Logger) -> DefaultInferenceService: storage: StorageBackend = self.get_extension(EXT_STORAGE_BACKEND, v) diff --git a/memorylayer-core-python/src/memorylayer_server/services/llm/__init__.py b/memorylayer-core-python/src/memorylayer_server/services/llm/__init__.py index 6b7e5b1..0388913 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/llm/__init__.py +++ b/memorylayer-core-python/src/memorylayer_server/services/llm/__init__.py @@ -1,17 +1,17 @@ """LLM service package.""" + +from scitrera_app_framework import Variables, get_extension + from .base import ( + EXT_LLM_REGISTRY, + EXT_LLM_SERVICE, LLMProvider, LLMProviderRegistryPluginBase, LLMServicePluginBase, - EXT_LLM_PROVIDER, - EXT_LLM_REGISTRY, - EXT_LLM_SERVICE, ) +from .noop import LLMNotConfiguredError from .registry import LLMProviderRegistry from .service_default import LLMService -from .noop import LLMNotConfiguredError - -from scitrera_app_framework import Variables, get_extension def get_llm_registry(v: Variables = None) -> LLMProviderRegistry: @@ -19,30 +19,20 @@ def get_llm_registry(v: Variables = None) -> LLMProviderRegistry: return get_extension(EXT_LLM_REGISTRY, v) -def get_llm_provider(v: Variables = None) -> LLMProvider: - """Get the default LLM provider instance. - - Backward-compatible convenience function that delegates through the registry. - """ - return get_llm_registry(v).get_provider("default") - - def get_llm_service(v: Variables = None) -> LLMService: """Get the LLM service instance.""" return get_extension(EXT_LLM_SERVICE, v) __all__ = ( - 'LLMProvider', - 'LLMProviderRegistry', - 'LLMProviderRegistryPluginBase', - 'LLMService', - 'LLMServicePluginBase', - 'get_llm_provider', - 'get_llm_registry', - 'get_llm_service', - 'EXT_LLM_PROVIDER', - 'EXT_LLM_REGISTRY', - 'EXT_LLM_SERVICE', - 'LLMNotConfiguredError', + "LLMProvider", + "LLMProviderRegistry", + "LLMProviderRegistryPluginBase", + "LLMService", + "LLMServicePluginBase", + "get_llm_registry", + "get_llm_service", + "EXT_LLM_REGISTRY", + "EXT_LLM_SERVICE", + "LLMNotConfiguredError", ) diff --git a/memorylayer-core-python/src/memorylayer_server/services/llm/anthropic.py b/memorylayer-core-python/src/memorylayer_server/services/llm/anthropic.py index ffe6f52..8a231a8 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/llm/anthropic.py +++ b/memorylayer-core-python/src/memorylayer_server/services/llm/anthropic.py @@ -1,13 +1,14 @@ """Anthropic Claude LLM provider.""" -from typing import AsyncIterator + +from collections.abc import AsyncIterator from scitrera_app_framework import get_logger from scitrera_app_framework.api import Variables -from .base import LLMProvider from ...models.llm import LLMRequest, LLMResponse, LLMStreamChunk +from .base import LLMProvider -DEFAULT_LLM_ANTHROPIC_MODEL = 'claude-sonnet-4-20250514' +DEFAULT_LLM_ANTHROPIC_MODEL = "claude-sonnet-4-20250514" # Anthropic stop_reason -> our finish_reason _STOP_REASON_MAP = { @@ -24,12 +25,12 @@ class AnthropicLLMProvider(LLMProvider): """ def __init__( - self, - api_key: str, - model: str = DEFAULT_LLM_ANTHROPIC_MODEL, - default_max_tokens: int | None = None, - default_temperature: float | None = None, - v: Variables = None, + self, + api_key: str, + model: str = DEFAULT_LLM_ANTHROPIC_MODEL, + default_max_tokens: int | None = None, + default_temperature: float | None = None, + v: Variables = None, ): self.api_key = api_key self.model = model @@ -37,22 +38,19 @@ def __init__( self.default_temperature = default_temperature self._client = None self.logger = get_logger(v, name=self.__class__.__name__) - self.logger.info( - "Initialized AnthropicLLMProvider: model=%s", model - ) + self.logger.info("Initialized AnthropicLLMProvider: model=%s", model) def _get_client(self): """Lazy-load Anthropic async client.""" if self._client is None: try: from anthropic import AsyncAnthropic + self._client = AsyncAnthropic( api_key=self.api_key, ) except ImportError: - raise ImportError( - "anthropic package not installed. Install with: pip install anthropic" - ) + raise ImportError("anthropic package not installed. Install with: pip install anthropic") return self._client @staticmethod @@ -117,10 +115,7 @@ async def complete(self, request: LLMRequest) -> LLMResponse: finish_reason=_STOP_REASON_MAP.get(response.stop_reason, response.stop_reason or "stop"), ) - async def complete_stream( - self, - request: LLMRequest - ) -> AsyncIterator[LLMStreamChunk]: + async def complete_stream(self, request: LLMRequest) -> AsyncIterator[LLMStreamChunk]: """Generate streaming completion using Anthropic Messages API.""" client = self._get_client() diff --git a/memorylayer-core-python/src/memorylayer_server/services/llm/base.py b/memorylayer-core-python/src/memorylayer_server/services/llm/base.py index 326d340..0308812 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/llm/base.py +++ b/memorylayer-core-python/src/memorylayer_server/services/llm/base.py @@ -1,19 +1,19 @@ """LLM Service - Pluggable LLM provider interface.""" + from abc import ABC, abstractmethod -from typing import AsyncIterator +from collections.abc import AsyncIterator from ...models.llm import LLMRequest, LLMResponse, LLMStreamChunk - -from .._constants import EXT_LLM_PROVIDER, EXT_LLM_SERVICE, EXT_LLM_REGISTRY +from .._constants import EXT_LLM_REGISTRY, EXT_LLM_SERVICE from .._plugin_factory import make_service_plugin_base # Registry config constants -MEMORYLAYER_LLM_REGISTRY = 'MEMORYLAYER_LLM_REGISTRY' -DEFAULT_MEMORYLAYER_LLM_REGISTRY = 'default' +MEMORYLAYER_LLM_REGISTRY = "MEMORYLAYER_LLM_REGISTRY" +DEFAULT_MEMORYLAYER_LLM_REGISTRY = "default" # Service config constants -MEMORYLAYER_LLM_SERVICE = 'MEMORYLAYER_LLM_SERVICE' -DEFAULT_MEMORYLAYER_LLM_SERVICE = 'default' +MEMORYLAYER_LLM_SERVICE = "MEMORYLAYER_LLM_SERVICE" +DEFAULT_MEMORYLAYER_LLM_SERVICE = "default" class LLMProvider(ABC): @@ -58,10 +58,7 @@ async def complete(self, request: LLMRequest) -> LLMResponse: pass @abstractmethod - async def complete_stream( - self, - request: LLMRequest - ) -> AsyncIterator[LLMStreamChunk]: + async def complete_stream(self, request: LLMRequest) -> AsyncIterator[LLMStreamChunk]: """Generate streaming completion. Args: diff --git a/memorylayer-core-python/src/memorylayer_server/services/llm/google.py b/memorylayer-core-python/src/memorylayer_server/services/llm/google.py index 7624bc3..2640ffd 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/llm/google.py +++ b/memorylayer-core-python/src/memorylayer_server/services/llm/google.py @@ -1,13 +1,14 @@ """Google GenAI (Gemini) LLM provider.""" -from typing import AsyncIterator + +from collections.abc import AsyncIterator from scitrera_app_framework import get_logger from scitrera_app_framework.api import Variables -from .base import LLMProvider from ...models.llm import LLMRequest, LLMResponse, LLMStreamChunk +from .base import LLMProvider -DEFAULT_LLM_GOOGLE_MODEL = 'gemini-3-flash-preview' +DEFAULT_LLM_GOOGLE_MODEL = "gemini-3-flash-preview" # Google finish_reason -> our finish_reason _FINISH_REASON_MAP = { @@ -27,12 +28,12 @@ class GoogleLLMProvider(LLMProvider): """ def __init__( - self, - api_key: str, - model: str = DEFAULT_LLM_GOOGLE_MODEL, - default_max_tokens: int | None = None, - default_temperature: float | None = None, - v: Variables = None, + self, + api_key: str, + model: str = DEFAULT_LLM_GOOGLE_MODEL, + default_max_tokens: int | None = None, + default_temperature: float | None = None, + v: Variables = None, ): self.api_key = api_key self.model = model @@ -40,20 +41,17 @@ def __init__( self.default_temperature = default_temperature self._client = None self.logger = get_logger(v, name=self.__class__.__name__) - self.logger.info( - "Initialized GoogleLLMProvider: model=%s", model - ) + self.logger.info("Initialized GoogleLLMProvider: model=%s", model) def _get_client(self): """Lazy-load Google GenAI client.""" if self._client is None: try: from google import genai + self._client = genai.Client(api_key=self.api_key) except ImportError: - raise ImportError( - "google-genai package not installed. Install with: pip install google-genai" - ) + raise ImportError("google-genai package not installed. Install with: pip install google-genai") return self._client @staticmethod @@ -81,8 +79,7 @@ def _extract_messages(request: LLMRequest): return system_text, messages @staticmethod - def _build_request(system_text, messages, request: LLMRequest, - max_tokens: int | None = None, temperature: float | None = None): + def _build_request(system_text, messages, request: LLMRequest, max_tokens: int | None = None, temperature: float | None = None): """Build Google GenAI SDK types from extracted messages. Requires google-genai to be installed. Called only at API call time. @@ -125,7 +122,11 @@ async def complete(self, request: LLMRequest) -> LLMResponse: system_text, messages = self._extract_messages(request) max_tokens, temperature = self.resolve_params(request) contents, config = self._build_request( - system_text, messages, request, max_tokens=max_tokens, temperature=temperature, + system_text, + messages, + request, + max_tokens=max_tokens, + temperature=temperature, ) model = request.model or self.model @@ -159,17 +160,18 @@ async def complete(self, request: LLMRequest) -> LLMResponse: finish_reason=finish_reason, ) - async def complete_stream( - self, - request: LLMRequest - ) -> AsyncIterator[LLMStreamChunk]: + async def complete_stream(self, request: LLMRequest) -> AsyncIterator[LLMStreamChunk]: """Generate streaming completion using Google GenAI API.""" client = self._get_client() system_text, messages = self._extract_messages(request) max_tokens, temperature = self.resolve_params(request) contents, config = self._build_request( - system_text, messages, request, max_tokens=max_tokens, temperature=temperature, + system_text, + messages, + request, + max_tokens=max_tokens, + temperature=temperature, ) model = request.model or self.model diff --git a/memorylayer-core-python/src/memorylayer_server/services/llm/noop.py b/memorylayer-core-python/src/memorylayer_server/services/llm/noop.py index 25b3e8d..716e6b7 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/llm/noop.py +++ b/memorylayer-core-python/src/memorylayer_server/services/llm/noop.py @@ -1,15 +1,17 @@ """No-op LLM provider - raises NotConfigured (OSS default).""" -from typing import AsyncIterator + +from collections.abc import AsyncIterator from scitrera_app_framework import get_logger from scitrera_app_framework.api import Variables -from .base import LLMProvider from ...models.llm import LLMRequest, LLMResponse, LLMStreamChunk +from .base import LLMProvider class LLMNotConfiguredError(Exception): """Raised when LLM is used but not configured.""" + pass @@ -35,10 +37,7 @@ async def complete(self, request: LLMRequest) -> LLMResponse: "and MEMORYLAYER_LLM_PROFILE_DEFAULT_MODEL to enable LLM features." ) - async def complete_stream( - self, - request: LLMRequest - ) -> AsyncIterator[LLMStreamChunk]: + async def complete_stream(self, request: LLMRequest) -> AsyncIterator[LLMStreamChunk]: raise LLMNotConfiguredError( "LLM provider not configured. Set MEMORYLAYER_LLM_PROFILE_DEFAULT_PROVIDER " "and MEMORYLAYER_LLM_PROFILE_DEFAULT_MODEL to enable LLM features." diff --git a/memorylayer-core-python/src/memorylayer_server/services/llm/openai.py b/memorylayer-core-python/src/memorylayer_server/services/llm/openai.py index 9be2a3d..37ecdc7 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/llm/openai.py +++ b/memorylayer-core-python/src/memorylayer_server/services/llm/openai.py @@ -1,13 +1,14 @@ """OpenAI-compatible LLM provider.""" -from typing import AsyncIterator + +from collections.abc import AsyncIterator from scitrera_app_framework import get_logger from scitrera_app_framework.api import Variables -from .base import LLMProvider from ...models.llm import LLMRequest, LLMResponse, LLMStreamChunk +from .base import LLMProvider -DEFAULT_LLM_OPENAI_MODEL = 'gpt-5-nano' +DEFAULT_LLM_OPENAI_MODEL = "gpt-5-nano" class OpenAILLMProvider(LLMProvider): @@ -18,13 +19,13 @@ class OpenAILLMProvider(LLMProvider): """ def __init__( - self, - api_key: str, - base_url: str = None, - model: str = DEFAULT_LLM_OPENAI_MODEL, - default_max_tokens: int | None = None, - default_temperature: float | None = None, - v: Variables = None, + self, + api_key: str, + base_url: str = None, + model: str = DEFAULT_LLM_OPENAI_MODEL, + default_max_tokens: int | None = None, + default_temperature: float | None = None, + v: Variables = None, ): self.api_key = api_key self.base_url = base_url @@ -33,34 +34,27 @@ def __init__( self.default_temperature = default_temperature self._client = None self.logger = get_logger(v, name=self.__class__.__name__) - self.logger.info( - "Initialized OpenAILLMProvider: base_url=%s, model=%s", - base_url, model - ) + self.logger.info("Initialized OpenAILLMProvider: base_url=%s, model=%s", base_url, model) def _get_client(self): """Lazy-load OpenAI async client.""" if self._client is None: try: from openai import AsyncOpenAI + self._client = AsyncOpenAI( api_key=self.api_key, base_url=self.base_url, ) except ImportError: - raise ImportError( - "openai package not installed. Install with: pip install openai" - ) + raise ImportError("openai package not installed. Install with: pip install openai") return self._client async def complete(self, request: LLMRequest) -> LLMResponse: """Generate completion using OpenAI API.""" client = self._get_client() - messages = [ - {"role": msg.role, "content": msg.content} - for msg in request.messages - ] + messages = [{"role": msg.role, "content": msg.content} for msg in request.messages] model = request.model or self.model max_tokens, temperature = self.resolve_params(request) @@ -91,17 +85,11 @@ async def complete(self, request: LLMRequest) -> LLMResponse: finish_reason=choice.finish_reason or "stop", ) - async def complete_stream( - self, - request: LLMRequest - ) -> AsyncIterator[LLMStreamChunk]: + async def complete_stream(self, request: LLMRequest) -> AsyncIterator[LLMStreamChunk]: """Generate streaming completion using OpenAI API.""" client = self._get_client() - messages = [ - {"role": msg.role, "content": msg.content} - for msg in request.messages - ] + messages = [{"role": msg.role, "content": msg.content} for msg in request.messages] model = request.model or self.model max_tokens, temperature = self.resolve_params(request) diff --git a/memorylayer-core-python/src/memorylayer_server/services/llm/registry.py b/memorylayer-core-python/src/memorylayer_server/services/llm/registry.py index 8ad593c..907935b 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/llm/registry.py +++ b/memorylayer-core-python/src/memorylayer_server/services/llm/registry.py @@ -1,12 +1,13 @@ """LLM Provider Registry - profile-based provider routing.""" + +from collections.abc import AsyncIterator from logging import Logger -from typing import AsyncIterator -from scitrera_app_framework import get_logger, Variables +from scitrera_app_framework import Variables -from .base import LLMProvider, LLMProviderRegistryPluginBase, EXT_LLM_REGISTRY -from .noop import NoOpLLMProvider from ...models.llm import LLMRequest, LLMResponse, LLMStreamChunk +from .base import LLMProvider, LLMProviderRegistryPluginBase +from .noop import NoOpLLMProvider class LLMProviderRegistry: @@ -35,9 +36,7 @@ async def complete(self, request: LLMRequest, profile: str = "default") -> LLMRe provider = self.get_provider(profile) return await provider.complete(request) - async def complete_stream( - self, request: LLMRequest, profile: str = "default" - ) -> AsyncIterator[LLMStreamChunk]: + async def complete_stream(self, request: LLMRequest, profile: str = "default") -> AsyncIterator[LLMStreamChunk]: """Route streaming request to the provider for the given profile.""" provider = self.get_provider(profile) async for chunk in provider.complete_stream(request): @@ -87,21 +86,34 @@ def create_provider_from_config( if provider_type == "openai": from .openai import OpenAILLMProvider + return OpenAILLMProvider( - api_key=api_key, base_url=base_url, **model_kwarg, - default_max_tokens=max_tokens, default_temperature=temperature, v=v, + api_key=api_key, + base_url=base_url, + **model_kwarg, + default_max_tokens=max_tokens, + default_temperature=temperature, + v=v, ) elif provider_type == "anthropic": from .anthropic import AnthropicLLMProvider + return AnthropicLLMProvider( - api_key=api_key, **model_kwarg, - default_max_tokens=max_tokens, default_temperature=temperature, v=v, + api_key=api_key, + **model_kwarg, + default_max_tokens=max_tokens, + default_temperature=temperature, + v=v, ) elif provider_type == "google": from .google import GoogleLLMProvider + return GoogleLLMProvider( - api_key=api_key, **model_kwarg, - default_max_tokens=max_tokens, default_temperature=temperature, v=v, + api_key=api_key, + **model_kwarg, + default_max_tokens=max_tokens, + default_temperature=temperature, + v=v, ) elif provider_type == "noop": return NoOpLLMProvider(v=v) @@ -123,7 +135,8 @@ class DefaultLLMProviderRegistryPlugin(LLMProviderRegistryPluginBase): Activity-to-profile assignment uses: MEMORYLAYER_LLM_ASSIGN_= """ - PROVIDER_NAME = 'default' + + PROVIDER_NAME = "default" def initialize(self, v: Variables, logger: Logger) -> LLMProviderRegistry: """Build LLM provider registry from environment configuration. @@ -132,18 +145,18 @@ def initialize(self, v: Variables, logger: Logger) -> LLMProviderRegistry: from either real environment variables **or** values already loaded into the ``Variables`` instance (converged configuration). """ - known_fields = {'provider', 'base_url', 'api_key', 'model', 'max_tokens', 'temperature'} + known_fields = {"provider", "base_url", "api_key", "model", "max_tokens", "temperature"} # Import MEMORYLAYER_LLM_PROFILE_* into Variables and get flattened dict. # Keys are lowercased with prefix stripped, e.g. "default_provider". - profile_vars = v.import_from_env_by_prefix('MEMORYLAYER_LLM_PROFILE') + profile_vars = v.import_from_env_by_prefix("MEMORYLAYER_LLM_PROFILE") # Discover profile names by stripping known field suffixes from keys profile_names: set[str] = set() for key in profile_vars: for fld in known_fields: - if key.endswith(f'_{fld}'): - name = key[:-(len(fld) + 1)] + if key.endswith(f"_{fld}"): + name = key[: -(len(fld) + 1)] if name: profile_names.add(name) break @@ -151,53 +164,57 @@ def initialize(self, v: Variables, logger: Logger) -> LLMProviderRegistry: # Build providers from discovered profiles providers: dict[str, LLMProvider] = {} for name in sorted(profile_names): - provider_type = profile_vars.get(f'{name}_provider') + provider_type = profile_vars.get(f"{name}_provider") if not provider_type: logger.warning("LLM profile '%s' missing PROVIDER, skipping", name) continue - model = profile_vars.get(f'{name}_model') # None = provider default + model = profile_vars.get(f"{name}_model") # None = provider default - max_tokens_raw = profile_vars.get(f'{name}_max_tokens') + max_tokens_raw = profile_vars.get(f"{name}_max_tokens") max_tokens = int(max_tokens_raw) if max_tokens_raw is not None else None - temp_raw = profile_vars.get(f'{name}_temperature') + temp_raw = profile_vars.get(f"{name}_temperature") temperature = float(temp_raw) if temp_raw is not None else None provider = create_provider_from_config( - name=name, provider_type=provider_type, model=model, - base_url=profile_vars.get(f'{name}_base_url'), - api_key=profile_vars.get(f'{name}_api_key'), - max_tokens=max_tokens, temperature=temperature, v=v, + name=name, + provider_type=provider_type, + model=model, + base_url=profile_vars.get(f"{name}_base_url"), + api_key=profile_vars.get(f"{name}_api_key"), + max_tokens=max_tokens, + temperature=temperature, + v=v, ) # Post-init validation: provider must resolve to a non-None model if provider.default_model is None: logger.warning( "LLM profile '%s' (%s) resolved to model=None, skipping", - name, provider_type, + name, + provider_type, ) continue providers[name] = provider logger.info("LLM registry: profile '%s' (%s/%s)", name, provider_type, provider.default_model) - if 'default' not in providers: + if "default" not in providers: logger.info("No LLM profiles configured, using NoOp provider for 'default'") - providers['default'] = NoOpLLMProvider(v=v) + providers["default"] = NoOpLLMProvider(v=v) # Read activity-to-profile assignments via Variables - assign_vars = v.import_from_env_by_prefix('MEMORYLAYER_LLM_ASSIGN') - profile_map: dict[str, str] = { - activity: str(profile_name).lower() - for activity, profile_name in assign_vars.items() - } + assign_vars = v.import_from_env_by_prefix("MEMORYLAYER_LLM_ASSIGN") + profile_map: dict[str, str] = {activity: str(profile_name).lower() for activity, profile_name in assign_vars.items()} for activity, profile_name in profile_map.items(): logger.info("LLM registry: assign '%s' -> profile '%s'", activity, profile_name) registry = LLMProviderRegistry(providers=providers, profile_map=profile_map) logger.info( "LLM registry initialized: %d profiles (%s), %d assignments", - len(registry.profile_names), ', '.join(registry.profile_names), len(profile_map), + len(registry.profile_names), + ", ".join(registry.profile_names), + len(profile_map), ) return registry diff --git a/memorylayer-core-python/src/memorylayer_server/services/llm/service_default.py b/memorylayer-core-python/src/memorylayer_server/services/llm/service_default.py index 36634b9..8b3bbee 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/llm/service_default.py +++ b/memorylayer-core-python/src/memorylayer_server/services/llm/service_default.py @@ -1,11 +1,12 @@ """Default LLM service implementation.""" + +from collections.abc import AsyncIterator from logging import Logger -from typing import AsyncIterator, Optional, List -from scitrera_app_framework import get_logger, Variables +from scitrera_app_framework import Variables, get_logger -from ...models.llm import LLMRequest, LLMResponse, LLMStreamChunk, LLMMessage, LLMRole -from .base import LLMProvider, EXT_LLM_REGISTRY, LLMServicePluginBase +from ...models.llm import LLMMessage, LLMRequest, LLMResponse, LLMRole, LLMStreamChunk +from .base import EXT_LLM_REGISTRY, LLMServicePluginBase from .registry import LLMProviderRegistry @@ -21,32 +22,27 @@ def __init__(self, registry: LLMProviderRegistry, v: Variables = None): self.registry = registry self.logger = get_logger(v, name=self.__class__.__name__) - @property - def provider(self) -> LLMProvider: - """Default provider for backward compatibility.""" - return self.registry.get_provider("default") - async def complete(self, request: LLMRequest, profile: str = "default") -> LLMResponse: """Route completion to the provider for the given profile.""" return await self.registry.complete(request, profile=profile) async def complete_stream( - self, - request: LLMRequest, - profile: str = "default", + self, + request: LLMRequest, + profile: str = "default", ) -> AsyncIterator[LLMStreamChunk]: """Route streaming completion to the provider for the given profile.""" async for chunk in self.registry.complete_stream(request, profile=profile): yield chunk async def synthesize( - self, - prompt: str, - context: Optional[str] = None, - max_tokens: int = None, - temperature: float = None, - temperature_factor: float = None, - profile: str = "default", + self, + prompt: str, + context: str | None = None, + max_tokens: int = None, + temperature: float = None, + temperature_factor: float = None, + profile: str = "default", ) -> str: """Simple synthesis - prompt with optional context. @@ -64,10 +60,7 @@ async def synthesize( messages = [] if context: - messages.append(LLMMessage( - role=LLMRole.SYSTEM, - content=f"Use this context to inform your response:\n\n{context}" - )) + messages.append(LLMMessage(role=LLMRole.SYSTEM, content=f"Use this context to inform your response:\n\n{context}")) messages.append(LLMMessage(role=LLMRole.USER, content=prompt)) @@ -82,11 +75,11 @@ async def synthesize( return response.content async def answer_question( - self, - question: str, - memories: List[str], - max_tokens: int = 500, - profile: str = "default", + self, + question: str, + memories: list[str], + max_tokens: int = 500, + profile: str = "default", ) -> str: """Answer question using memories as context. @@ -124,18 +117,19 @@ async def answer_question( @property def default_model(self) -> str: - """Default model from provider.""" - return self.provider.default_model + """Default model from the default profile provider.""" + return self.registry.get_provider("default").default_model @property def supports_streaming(self) -> bool: - """Streaming support from provider.""" - return self.provider.supports_streaming + """Streaming support from the default profile provider.""" + return self.registry.get_provider("default").supports_streaming class DefaultLLMServicePlugin(LLMServicePluginBase): """Plugin for default LLM service.""" - PROVIDER_NAME = 'default' + + PROVIDER_NAME = "default" def initialize(self, v: Variables, logger: Logger) -> LLMService: registry: LLMProviderRegistry = self.get_extension(EXT_LLM_REGISTRY, v) diff --git a/memorylayer-core-python/src/memorylayer_server/services/memory/__init__.py b/memorylayer-core-python/src/memorylayer_server/services/memory/__init__.py index e694733..f76cfd7 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/memory/__init__.py +++ b/memorylayer-core-python/src/memorylayer_server/services/memory/__init__.py @@ -1,12 +1,13 @@ """Memory service package.""" + +from scitrera_app_framework import Variables, get_extension + from .base import ( - MemoryServicePluginBase, EXT_MEMORY_SERVICE, + MemoryServicePluginBase, ) from .default import DefaultMemoryServicePlugin, MemoryService -from scitrera_app_framework import Variables, get_extension - def get_memory_service(v: Variables = None) -> MemoryService: """Get the memory service instance.""" @@ -14,9 +15,9 @@ def get_memory_service(v: Variables = None) -> MemoryService: __all__ = ( - 'MemoryService', - 'MemoryServicePluginBase', - 'get_memory_service', - 'EXT_MEMORY_SERVICE', - 'DefaultMemoryServicePlugin', + "MemoryService", + "MemoryServicePluginBase", + "get_memory_service", + "EXT_MEMORY_SERVICE", + "DefaultMemoryServicePlugin", ) diff --git a/memorylayer-core-python/src/memorylayer_server/services/memory/base.py b/memorylayer-core-python/src/memorylayer_server/services/memory/base.py index c81e996..81b4cf0 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/memory/base.py +++ b/memorylayer-core-python/src/memorylayer_server/services/memory/base.py @@ -1,4 +1,4 @@ -from ...config import MEMORYLAYER_MEMORY_SERVICE, DEFAULT_MEMORYLAYER_MEMORY_SERVICE +from ...config import DEFAULT_MEMORYLAYER_MEMORY_SERVICE, MEMORYLAYER_MEMORY_SERVICE from .._constants import ( EXT_CACHE_SERVICE, EXT_CONTRADICTION_SERVICE, @@ -13,19 +13,19 @@ from .._plugin_factory import make_service_plugin_base # Recall overfetch multiplier for reranker candidate pool -MEMORYLAYER_MEMORY_RECALL_OVERFETCH = 'MEMORYLAYER_MEMORY_RECALL_OVERFETCH' +MEMORYLAYER_MEMORY_RECALL_OVERFETCH = "MEMORYLAYER_MEMORY_RECALL_OVERFETCH" DEFAULT_MEMORYLAYER_MEMORY_RECALL_OVERFETCH = 3 # Maximum memories discovered via association graph expansion -MEMORYLAYER_MEMORY_MAX_GRAPH_EXPANSION = 'MEMORYLAYER_MEMORY_MAX_GRAPH_EXPANSION' +MEMORYLAYER_MEMORY_MAX_GRAPH_EXPANSION = "MEMORYLAYER_MEMORY_MAX_GRAPH_EXPANSION" DEFAULT_MEMORYLAYER_MEMORY_MAX_GRAPH_EXPANSION = 50 # Default include_associations for recall (graph expansion enabled by default) -MEMORYLAYER_MEMORY_INCLUDE_ASSOCIATIONS = 'MEMORYLAYER_MEMORY_INCLUDE_ASSOCIATIONS' +MEMORYLAYER_MEMORY_INCLUDE_ASSOCIATIONS = "MEMORYLAYER_MEMORY_INCLUDE_ASSOCIATIONS" DEFAULT_MEMORYLAYER_MEMORY_INCLUDE_ASSOCIATIONS = True # Default traverse_depth for recall (multi-hop graph traversal) -MEMORYLAYER_MEMORY_TRAVERSE_DEPTH = 'MEMORYLAYER_MEMORY_TRAVERSE_DEPTH' +MEMORYLAYER_MEMORY_TRAVERSE_DEPTH = "MEMORYLAYER_MEMORY_TRAVERSE_DEPTH" DEFAULT_MEMORYLAYER_MEMORY_TRAVERSE_DEPTH = 2 diff --git a/memorylayer-core-python/src/memorylayer_server/services/memory/default.py b/memorylayer-core-python/src/memorylayer_server/services/memory/default.py index c000f14..2f27754 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/memory/default.py +++ b/memorylayer-core-python/src/memorylayer_server/services/memory/default.py @@ -8,69 +8,69 @@ - decay: Reduce memory importance over time - get: Retrieve single memory by ID """ + import asyncio import json import math import re from dataclasses import dataclass -from datetime import datetime, timezone -from logging import Logger, DEBUG -from typing import Optional, Any, TYPE_CHECKING - -from scitrera_app_framework import get_logger, get_extension, Variables +from datetime import UTC, datetime +from logging import DEBUG, Logger +from typing import TYPE_CHECKING, Any, Optional -from ...models import RememberInput, Memory, RecallInput, RecallResult, RecallMode, MemoryType, MemoryStatus, SearchTolerance, DetailLevel -from ...utils import compute_content_hash, generate_id +from scitrera_app_framework import Variables, get_logger -from ..cache import CacheService, EXT_CACHE_SERVICE -from ..contradiction import ContradictionService, EXT_CONTRADICTION_SERVICE -from ..decay import DecayService, EXT_DECAY_SERVICE -from ..deduplication import DeduplicationService, EXT_DEDUPLICATION_SERVICE, DeduplicationAction -from ..extraction import EXT_EXTRACTION_SERVICE, ExtractionService -from ..llm import LLMService, EXT_LLM_SERVICE -from ..storage import StorageBackend, EXT_STORAGE_BACKEND -from ..embedding import EmbeddingService, EXT_EMBEDDING_SERVICE -from ..semantic_tiering import SemanticTieringService, EXT_SEMANTIC_TIERING_SERVICE -from ..reranker import RerankerService, EXT_RERANKER_SERVICE +from ...models import DetailLevel, Memory, MemoryStatus, MemoryType, RecallInput, RecallMode, RecallResult, RememberInput, SearchTolerance +from ...utils import compute_content_hash from .._constants import EXT_TASK_SERVICE +from ..cache import EXT_CACHE_SERVICE +from ..contradiction import EXT_CONTRADICTION_SERVICE, ContradictionService +from ..decay import EXT_DECAY_SERVICE, DecayService +from ..deduplication import EXT_DEDUPLICATION_SERVICE, DeduplicationAction, DeduplicationService +from ..embedding import EXT_EMBEDDING_SERVICE, EmbeddingService +from ..extraction import EXT_EXTRACTION_SERVICE, ExtractionService +from ..llm import EXT_LLM_SERVICE, LLMService +from ..reranker import EXT_RERANKER_SERVICE, RerankerService +from ..semantic_tiering import EXT_SEMANTIC_TIERING_SERVICE, SemanticTieringService +from ..storage import EXT_STORAGE_BACKEND, StorageBackend if TYPE_CHECKING: from ..tasks import TaskService -from ..association import ( - AssociationService, EXT_ASSOCIATION_SERVICE, - MEMORYLAYER_ASSOCIATION_SIMILARITY_THRESHOLD, - DEFAULT_MEMORYLAYER_ASSOCIATION_SIMILARITY_THRESHOLD, -) - -from ...config import DEFAULT_CONTEXT_ID, GLOBAL_WORKSPACE_ID - -from .base import ( - MemoryServicePluginBase, - MEMORYLAYER_MEMORY_RECALL_OVERFETCH, - DEFAULT_MEMORYLAYER_MEMORY_RECALL_OVERFETCH, - MEMORYLAYER_MEMORY_MAX_GRAPH_EXPANSION, - DEFAULT_MEMORYLAYER_MEMORY_MAX_GRAPH_EXPANSION, - MEMORYLAYER_MEMORY_INCLUDE_ASSOCIATIONS, - DEFAULT_MEMORYLAYER_MEMORY_INCLUDE_ASSOCIATIONS, - MEMORYLAYER_MEMORY_TRAVERSE_DEPTH, - DEFAULT_MEMORYLAYER_MEMORY_TRAVERSE_DEPTH, -) from ...config import ( - DEFAULT_RECENCY_WEIGHT, - DEFAULT_RECENCY_HALF_LIFE_HOURS, - MEMORYLAYER_FACT_DECOMPOSITION_ENABLED, + DEFAULT_CONTEXT_ID, DEFAULT_MEMORYLAYER_FACT_DECOMPOSITION_ENABLED, - MEMORYLAYER_FACT_DECOMPOSITION_MIN_LENGTH, DEFAULT_MEMORYLAYER_FACT_DECOMPOSITION_MIN_LENGTH, - MEMORYLAYER_LLM_QUERY_REWRITE_ENABLED, + DEFAULT_MEMORYLAYER_FRESHNESS_HALF_LIFE_DAYS, DEFAULT_MEMORYLAYER_LLM_QUERY_REWRITE_ENABLED, + DEFAULT_MEMORYLAYER_SCOPE_BOOST_SAME_CONTEXT, + DEFAULT_MEMORYLAYER_SCOPE_BOOST_SAME_WORKSPACE, + DEFAULT_RECENCY_HALF_LIFE_HOURS, + DEFAULT_RECENCY_WEIGHT, + GLOBAL_WORKSPACE_ID, + MEMORYLAYER_FACT_DECOMPOSITION_ENABLED, + MEMORYLAYER_FACT_DECOMPOSITION_MIN_LENGTH, MEMORYLAYER_FRESHNESS_HALF_LIFE_DAYS, - DEFAULT_MEMORYLAYER_FRESHNESS_HALF_LIFE_DAYS, + MEMORYLAYER_LLM_QUERY_REWRITE_ENABLED, MEMORYLAYER_SCOPE_BOOST_SAME_CONTEXT, - DEFAULT_MEMORYLAYER_SCOPE_BOOST_SAME_CONTEXT, MEMORYLAYER_SCOPE_BOOST_SAME_WORKSPACE, - DEFAULT_MEMORYLAYER_SCOPE_BOOST_SAME_WORKSPACE, +) +from ..association import ( + DEFAULT_MEMORYLAYER_ASSOCIATION_SIMILARITY_THRESHOLD, + EXT_ASSOCIATION_SERVICE, + MEMORYLAYER_ASSOCIATION_SIMILARITY_THRESHOLD, + AssociationService, +) +from .base import ( + DEFAULT_MEMORYLAYER_MEMORY_INCLUDE_ASSOCIATIONS, + DEFAULT_MEMORYLAYER_MEMORY_MAX_GRAPH_EXPANSION, + DEFAULT_MEMORYLAYER_MEMORY_RECALL_OVERFETCH, + DEFAULT_MEMORYLAYER_MEMORY_TRAVERSE_DEPTH, + MEMORYLAYER_MEMORY_INCLUDE_ASSOCIATIONS, + MEMORYLAYER_MEMORY_MAX_GRAPH_EXPANSION, + MEMORYLAYER_MEMORY_RECALL_OVERFETCH, + MEMORYLAYER_MEMORY_TRAVERSE_DEPTH, + MemoryServicePluginBase, ) # Internal constant for LLM recall token budget @@ -80,6 +80,7 @@ @dataclass class ScopeBoosts: """Configuration for locality-based score boosting.""" + same_context: float = 1.5 # 50% boost for same context same_workspace: float = 1.2 # 20% boost for same workspace global_workspace: float = 1.0 # No boost for global @@ -110,20 +111,20 @@ def _exponential_freshness(age: float, half_life: float) -> float: return math.exp(-math.log(2) * age / half_life) def __init__( - self, - storage: StorageBackend, - embedding_service: EmbeddingService, - deduplication_service: DeduplicationService, - association_service: Optional[AssociationService] = None, - cache: Optional[Any] = None, - v: Variables = None, - tier_generation_service: Optional[SemanticTieringService] = None, - llm_service: Optional[LLMService] = None, - reranker_service: Optional[RerankerService] = None, - decay_service: Optional[DecayService] = None, - contradiction_service: Optional[ContradictionService] = None, - task_service: Optional["TaskService"] = None, - extraction_service: Optional[ExtractionService] = None, + self, + storage: StorageBackend, + embedding_service: EmbeddingService, + deduplication_service: DeduplicationService, + association_service: AssociationService | None = None, + cache: Any | None = None, + v: Variables = None, + tier_generation_service: SemanticTieringService | None = None, + llm_service: LLMService | None = None, + reranker_service: RerankerService | None = None, + decay_service: DecayService | None = None, + contradiction_service: ContradictionService | None = None, + task_service: Optional["TaskService"] = None, + extraction_service: ExtractionService | None = None, ): self.storage = storage self.embedding = embedding_service @@ -142,8 +143,7 @@ def __init__( # Get auto-association threshold from config self.auto_association_threshold = v.get( - MEMORYLAYER_ASSOCIATION_SIMILARITY_THRESHOLD, - DEFAULT_MEMORYLAYER_ASSOCIATION_SIMILARITY_THRESHOLD + MEMORYLAYER_ASSOCIATION_SIMILARITY_THRESHOLD, DEFAULT_MEMORYLAYER_ASSOCIATION_SIMILARITY_THRESHOLD ) # Fact decomposition config @@ -214,28 +214,32 @@ def __init__( def _recall_cache_key(self, workspace_id: str, query: str, input: RecallInput) -> str: """Generate a deterministic cache key for recall results.""" - filter_data = json.dumps({ - "types": [t.value if hasattr(t, 'value') else str(t) for t in (input.types or [])], - "subtypes": [s.value if hasattr(s, 'value') else str(s) for s in (input.subtypes or [])], - "tags": sorted(input.tags or []), - "mode": input.mode.value if input.mode and hasattr(input.mode, 'value') else str(input.mode), - "tolerance": input.tolerance.value if input.tolerance and hasattr(input.tolerance, 'value') else str(input.tolerance), - "limit": input.limit, - "context_id": input.context_id, - "detail_level": input.detail_level.value if input.detail_level and hasattr(input.detail_level, 'value') else str( - input.detail_level), - }, sort_keys=True) + filter_data = json.dumps( + { + "types": [t.value if hasattr(t, "value") else str(t) for t in (input.types or [])], + "subtypes": [s.value if hasattr(s, "value") else str(s) for s in (input.subtypes or [])], + "tags": sorted(input.tags or []), + "mode": input.mode.value if input.mode and hasattr(input.mode, "value") else str(input.mode), + "tolerance": input.tolerance.value if input.tolerance and hasattr(input.tolerance, "value") else str(input.tolerance), + "limit": input.limit, + "context_id": input.context_id, + "detail_level": input.detail_level.value + if input.detail_level and hasattr(input.detail_level, "value") + else str(input.detail_level), + }, + sort_keys=True, + ) hash_input = f"{query}|{filter_data}" key_hash = compute_content_hash(hash_input)[:16] return f"recall:{workspace_id}:{key_hash}" # noinspection PyShadowingBuiltins async def remember( - self, - workspace_id: str, - input: RememberInput, - user_id: Optional[str] = None, - inline: bool = False, + self, + workspace_id: str, + input: RememberInput, + user_id: str | None = None, + inline: bool = False, ) -> Memory: """ Store a new memory. @@ -271,39 +275,25 @@ async def remember( content_hash = compute_content_hash(input.content) # 2. Generate embedding (needed for deduplication) - start_time = datetime.now(timezone.utc) + start_time = datetime.now(UTC) embedding = await self.embedding.embed(input.content) if self.logger.isEnabledFor(DEBUG): - self.logger.debug( - "Generated embedding in %s ms", - (datetime.now(timezone.utc) - start_time).total_seconds() * 1000 - ) + self.logger.debug("Generated embedding in %s ms", (datetime.now(UTC) - start_time).total_seconds() * 1000) # 3. Check for duplicates using DeduplicationService dedup_result = await self.deduplication.check_duplicate( - content=input.content, - content_hash=content_hash, - embedding=embedding, - workspace_id=workspace_id + content=input.content, content_hash=content_hash, embedding=embedding, workspace_id=workspace_id ) if dedup_result.action == DeduplicationAction.SKIP: # Exact duplicate found, return existing memory - self.logger.info( - "Found duplicate memory: %s (%s)", - dedup_result.existing_memory_id, - dedup_result.reason - ) + self.logger.info("Found duplicate memory: %s (%s)", dedup_result.existing_memory_id, dedup_result.reason) existing = await self.storage.get_memory(workspace_id, dedup_result.existing_memory_id, track_access=False) return existing elif dedup_result.action == DeduplicationAction.UPDATE: # Semantic duplicate found, update existing memory - self.logger.info( - "Updating existing memory: %s (%s)", - dedup_result.existing_memory_id, - dedup_result.reason - ) + self.logger.info("Updating existing memory: %s (%s)", dedup_result.existing_memory_id, dedup_result.reason) updated = await self.storage.update_memory( workspace_id=workspace_id, memory_id=dedup_result.existing_memory_id, @@ -314,11 +304,7 @@ async def remember( return updated elif dedup_result.action == DeduplicationAction.MERGE: - self.logger.info( - "Merging with existing memory: %s (%s)", - dedup_result.existing_memory_id, - dedup_result.reason - ) + self.logger.info("Merging with existing memory: %s (%s)", dedup_result.existing_memory_id, dedup_result.reason) existing = await self.storage.get_memory(workspace_id, dedup_result.existing_memory_id, track_access=False) updated = await self._merge_memories(workspace_id, existing, input.content, input.tags, input.metadata, input.importance) return updated @@ -375,14 +361,15 @@ async def remember( elif self.task_service: try: await self.task_service.schedule_task( - 'decompose_facts', - {'memory_id': memory.id, 'workspace_id': workspace_id}, + "decompose_facts", + {"memory_id": memory.id, "workspace_id": workspace_id}, ) self.logger.debug("Scheduled fact decomposition for memory %s", memory.id) except Exception as e: self.logger.warning( "Failed to schedule decomposition for %s, running post-store on composite: %s", - memory.id, e, + memory.id, + e, ) # Fallback: run post-store pipeline on composite await self._post_store_pipeline(workspace_id, memory, embedding, inline=False) @@ -395,12 +382,12 @@ async def remember( return memory async def _post_store_pipeline( - self, - workspace_id: str, - memory: Memory, - embedding: list[float], - inline: bool = False, - classify_type: bool = False, + self, + workspace_id: str, + memory: Memory, + embedding: list[float], + inline: bool = False, + classify_type: bool = False, ) -> None: """Run post-store normalization: cache invalidation, association, contradiction, tier gen. @@ -447,26 +434,30 @@ async def _post_store_pipeline( await self._inline_auto_enrich(workspace_id, memory, embedding, classify_type=classify_type) else: try: - await self.task_service.schedule_task('auto_enrich', { - 'memory_id': memory.id, - 'workspace_id': workspace_id, - 'content': memory.content, - 'classify_type': classify_type, - }) + await self.task_service.schedule_task( + "auto_enrich", + { + "memory_id": memory.id, + "workspace_id": workspace_id, + "content": memory.content, + "classify_type": classify_type, + }, + ) except Exception as e: self.logger.warning( "Failed to schedule auto-enrich for %s, falling back to inline: %s", - memory.id, e, + memory.id, + e, ) await self._inline_auto_enrich(workspace_id, memory, embedding, classify_type=classify_type) async def ingest_fact( - self, - workspace_id: str, - input: RememberInput, - embedding: list[float] | None = None, - source_memory_id: str | None = None, - inline: bool = False, + self, + workspace_id: str, + input: RememberInput, + embedding: list[float] | None = None, + source_memory_id: str | None = None, + inline: bool = False, ) -> Memory | None: """Process a single memory through the full pipeline: dedup, store, post-store. @@ -501,14 +492,16 @@ async def ingest_fact( if dedup_result.action == DeduplicationAction.SKIP: self.logger.info( "Fact is duplicate (SKIP): %s (%s)", - dedup_result.existing_memory_id, dedup_result.reason, + dedup_result.existing_memory_id, + dedup_result.reason, ) return None if dedup_result.action == DeduplicationAction.UPDATE: self.logger.info( "Fact updates existing memory: %s (%s)", - dedup_result.existing_memory_id, dedup_result.reason, + dedup_result.existing_memory_id, + dedup_result.reason, ) updated = await self.storage.update_memory( workspace_id=workspace_id, @@ -523,10 +516,13 @@ async def ingest_fact( if dedup_result.action == DeduplicationAction.MERGE: self.logger.info( "Fact merges with existing memory: %s (%s)", - dedup_result.existing_memory_id, dedup_result.reason, + dedup_result.existing_memory_id, + dedup_result.reason, ) existing = await self.storage.get_memory( - workspace_id, dedup_result.existing_memory_id, track_access=False, + workspace_id, + dedup_result.existing_memory_id, + track_access=False, ) updated = await self._merge_memories(workspace_id, existing, input.content, input.tags, input.metadata, input.importance) await self._post_store_pipeline(workspace_id, updated, updated.embedding, inline=inline) @@ -553,13 +549,17 @@ async def ingest_fact( if memory.embedding is None: memory = await self.storage.update_memory( - workspace_id, memory.id, embedding=embedding, + workspace_id, + memory.id, + embedding=embedding, ) # Set source_memory_id if this fact came from decomposition if source_memory_id: memory = await self.storage.update_memory( - workspace_id, memory.id, source_memory_id=source_memory_id, + workspace_id, + memory.id, + source_memory_id=source_memory_id, ) self.logger.info("Stored fact memory: %s", memory.id) @@ -570,10 +570,10 @@ async def ingest_fact( return memory async def _decompose_and_process_inline( - self, - workspace_id: str, - memory: Memory, - embedding: list[float], + self, + workspace_id: str, + memory: Memory, + embedding: list[float], ) -> list[Memory]: """Decompose a composite memory and process each fact inline. @@ -610,6 +610,7 @@ async def _decompose_and_process_inline( pass try: from ...models import MemorySubtype + if fact.get("subtype"): fact_subtype = MemorySubtype(fact["subtype"]) except (ValueError, ImportError): @@ -626,7 +627,8 @@ async def _decompose_and_process_inline( user_id=memory.user_id, ) result = await self.ingest_fact( - workspace_id, fact_input, + workspace_id, + fact_input, source_memory_id=memory.id, inline=True, ) @@ -647,17 +649,22 @@ async def _decompose_and_process_inline( except Exception as e: self.logger.warning( "Failed PART_OF association %s->%s: %s", - fact_mem.id, memory.id, e, + fact_mem.id, + memory.id, + e, ) # Archive the parent memory try: await self.storage.update_memory( - workspace_id, memory.id, status=MemoryStatus.ARCHIVED.value, + workspace_id, + memory.id, + status=MemoryStatus.ARCHIVED.value, ) self.logger.info( "Decomposed memory %s into %d facts inline, archived parent", - memory.id, len(created), + memory.id, + len(created), ) except Exception as e: self.logger.warning("Failed to archive parent memory %s: %s", memory.id, e) @@ -665,11 +672,11 @@ async def _decompose_and_process_inline( return created async def _inline_auto_enrich( - self, - workspace_id: str, - memory: Memory, - embedding: list[float], - classify_type: bool = False, + self, + workspace_id: str, + memory: Memory, + embedding: list[float], + classify_type: bool = False, ) -> None: """ Fallback inline auto-enrich: association + optional type classification. @@ -689,7 +696,7 @@ async def _inline_auto_enrich( self.logger.warning("Failed to search similar memories for %s: %s", memory.id, e) similar_memories = [] - for similar_memory, score in (similar_memories or []): + for similar_memory, score in similar_memories or []: if similar_memory.id != memory.id: # Don't self-associate try: await self.association_service.auto_associate( @@ -701,7 +708,9 @@ async def _inline_auto_enrich( except Exception as e: self.logger.warning( "Failed to auto-associate %s with %s: %s", - memory.id, similar_memory.id, e, + memory.id, + similar_memory.id, + e, ) # Type classification @@ -711,9 +720,9 @@ async def _inline_auto_enrich( memory.content, ) if classified_type != memory.type: - update_kwargs: dict = {'type': classified_type.value} + update_kwargs: dict = {"type": classified_type.value} if classified_subtype is not None: - update_kwargs['subtype'] = classified_subtype.value + update_kwargs["subtype"] = classified_subtype.value await self.storage.update_memory( workspace_id=workspace_id, memory_id=memory.id, @@ -721,16 +730,18 @@ async def _inline_auto_enrich( ) self.logger.info( "Reclassified memory %s from %s to %s", - memory.id, memory.type, classified_type, + memory.id, + memory.type, + classified_type, ) except Exception as e: self.logger.debug("Inline type classification skipped for %s: %s", memory.id, e) async def recall( - self, - workspace_id: str, - input: RecallInput, - user_id: Optional[str] = None, + self, + workspace_id: str, + input: RecallInput, + user_id: str | None = None, ) -> RecallResult: """ Query memories using vector similarity and optional filters. @@ -740,14 +751,9 @@ async def recall( - LLM: Query rewriting + tiered search (accurate, ~500ms) - HYBRID: RAG first, LLM if insufficient (balanced) """ - self.logger.info( - "Recalling memories in workspace: %s, mode: %s, query: %s", - workspace_id, - input.mode, - input.query[:50] - ) + self.logger.info("Recalling memories in workspace: %s, mode: %s, query: %s", workspace_id, input.mode, input.query[:50]) - start_time = datetime.now(timezone.utc) + start_time = datetime.now(UTC) # Resolve None → server defaults for mode, tolerance, and detail_level effective_mode = input.mode if input.mode is not None else RecallMode.RAG @@ -809,27 +815,20 @@ async def recall( result.mode_used = RecallMode.RAG # Calculate search-only latency (vector/LLM search phase) - search_latency_ms = int((datetime.now(timezone.utc) - start_time).total_seconds() * 1000) + search_latency_ms = int((datetime.now(UTC) - start_time).total_seconds() * 1000) result.search_latency_ms = search_latency_ms # Resolve None → server defaults for graph traversal effective_include_associations = ( - input.include_associations if input.include_associations is not None - else self.default_include_associations - ) - effective_traverse_depth = ( - input.traverse_depth if input.traverse_depth is not None - else self.default_traverse_depth - ) - effective_max_expansion = ( - input.max_expansion if input.max_expansion is not None - else self.max_graph_expansion + input.include_associations if input.include_associations is not None else self.default_include_associations ) + effective_traverse_depth = input.traverse_depth if input.traverse_depth is not None else self.default_traverse_depth + effective_max_expansion = input.max_expansion if input.max_expansion is not None else self.max_graph_expansion # Association expansion (Phase 3A) assoc_ms = 0 if effective_include_associations or effective_traverse_depth > 0: - t0 = datetime.now(timezone.utc) + t0 = datetime.now(UTC) result.memories = await self._expand_with_associations( workspace_id=workspace_id, memories=result.memories, @@ -837,42 +836,39 @@ async def recall( include_associations=effective_include_associations, max_expansion=effective_max_expansion, ) - assoc_ms = int((datetime.now(timezone.utc) - t0).total_seconds() * 1000) + assoc_ms = int((datetime.now(UTC) - t0).total_seconds() * 1000) # Reranking across all modes (Phase 3B) # Skip reranking for wildcard/trivial queries where ranking is meaningless rerank_ms = 0 trivial_query = input.query.strip() in ("*", "", "**") if self.reranker_service and len(result.memories) > input.limit and not trivial_query: - t0 = datetime.now(timezone.utc) + t0 = datetime.now(UTC) result.memories = await self._apply_reranking( query=input.query, memories=result.memories, limit=input.limit, ) - rerank_ms = int((datetime.now(timezone.utc) - t0).total_seconds() * 1000) + rerank_ms = int((datetime.now(UTC) - t0).total_seconds() * 1000) elif len(result.memories) > input.limit: # Truncate without reranking (trivial query or no reranker) - result.memories = result.memories[:input.limit] + result.memories = result.memories[: input.limit] # Apply detail_level filtering if requested detail_ms = 0 if effective_detail_level != DetailLevel.FULL: - t0 = datetime.now(timezone.utc) - filtered_memories = self._apply_detail_level( - result.memories, - effective_detail_level - ) + t0 = datetime.now(UTC) + filtered_memories = self._apply_detail_level(result.memories, effective_detail_level) result.memories = filtered_memories - detail_ms = int((datetime.now(timezone.utc) - t0).total_seconds() * 1000) + detail_ms = int((datetime.now(UTC) - t0).total_seconds() * 1000) # Increment access counts (and boost importance) in parallel access_ms = 0 if result.memories: - t0 = datetime.now(timezone.utc) + t0 = datetime.now(UTC) access_tasks = [self.increment_access(workspace_id, m.id) for m in result.memories] await asyncio.gather(*access_tasks, return_exceptions=True) - access_ms = int((datetime.now(timezone.utc) - t0).total_seconds() * 1000) + access_ms = int((datetime.now(UTC) - t0).total_seconds() * 1000) # Annotate memories with trust scores and set drift_caveat if any are low-trust if result.memories: @@ -880,8 +876,7 @@ async def recall( low_trust = [m for m in result.memories if m.trust_score is not None and m.trust_score < 0.5] if low_trust: result.drift_caveat = ( - f"{len(low_trust)} of {len(result.memories)} recalled memories have low trust scores " - f"and may be stale or unreliable." + f"{len(low_trust)} of {len(result.memories)} recalled memories have low trust scores and may be stale or unreliable." ) # Annotate memories with freshness scores and staleness warnings @@ -890,15 +885,15 @@ async def recall( freshness_scores = [m.freshness_score for m in result.memories if m.freshness_score is not None] if freshness_scores: result.freshness_metadata = { - 'avg_freshness': round(sum(freshness_scores) / len(freshness_scores), 4), - 'min_freshness': round(min(freshness_scores), 4), - 'max_freshness': round(max(freshness_scores), 4), - 'severe_count': sum(1 for m in result.memories if m.staleness_warning == 'severe'), - 'moderate_count': sum(1 for m in result.memories if m.staleness_warning == 'moderate'), + "avg_freshness": round(sum(freshness_scores) / len(freshness_scores), 4), + "min_freshness": round(min(freshness_scores), 4), + "max_freshness": round(max(freshness_scores), 4), + "severe_count": sum(1 for m in result.memories if m.staleness_warning == "severe"), + "moderate_count": sum(1 for m in result.memories if m.staleness_warning == "moderate"), } # Calculate total latency - total_latency_ms = int((datetime.now(timezone.utc) - start_time).total_seconds() * 1000) + total_latency_ms = int((datetime.now(UTC) - start_time).total_seconds() * 1000) self.logger.info( "Recalled %s memories in %s ms " @@ -912,7 +907,7 @@ async def recall( detail_ms, access_ms, result.mode_used, - effective_detail_level.value + effective_detail_level.value, ) # Phase 4: Cache recall result @@ -925,13 +920,13 @@ async def recall( return result async def _merge_memories( - self, - workspace_id: str, - existing: Memory, - new_content: str, - new_tags: list, - new_metadata: dict, - new_importance: float, + self, + workspace_id: str, + existing: Memory, + new_content: str, + new_tags: list, + new_metadata: dict, + new_importance: float, ) -> Memory: """ Merge new memory content into an existing memory. @@ -950,7 +945,7 @@ async def _merge_memories( # Deep-merge metadata: old base, new overrides, plus provenance merged_metadata = {**existing.metadata, **new_metadata} - merged_metadata['merged_from'] = existing.content_hash + merged_metadata["merged_from"] = existing.content_hash # Importance boost capped at 1.0 merged_importance = min(max(existing.importance, new_importance) * 1.1, 1.0) @@ -991,7 +986,7 @@ def _compute_trust_score(self, memory: Memory) -> tuple[float, dict]: Returns: Tuple of (trust_score, trust_signals dict) """ - now = datetime.now(timezone.utc) + now = datetime.now(UTC) # Freshness component: exponential decay (aligned with configurable freshness_half_life_days) age_hours = (now - memory.created_at).total_seconds() / 3600.0 @@ -1005,7 +1000,7 @@ def _compute_trust_score(self, memory: Memory) -> tuple[float, dict]: decay = memory.decay_factor # Verification - if memory.pinned or memory.metadata.get('verified'): + if memory.pinned or memory.metadata.get("verified"): verification = 1.0 else: verification = 0.5 @@ -1014,32 +1009,24 @@ def _compute_trust_score(self, memory: Memory) -> tuple[float, dict]: if memory.source_memory_id: # Came from session commit decomposition source_reliability = 1.0 - elif memory.metadata.get('source') == 'manual' or ( - not memory.source_memory_id - and not memory.source_document_id - and not memory.source_thread_id + elif memory.metadata.get("source") == "manual" or ( + not memory.source_memory_id and not memory.source_document_id and not memory.source_thread_id ): source_reliability = 0.8 else: # Extracted from document/thread source_reliability = 0.6 - trust_score = ( - 0.3 * freshness - + 0.2 * access_freq - + 0.2 * decay - + 0.15 * verification - + 0.15 * source_reliability - ) + trust_score = 0.3 * freshness + 0.2 * access_freq + 0.2 * decay + 0.15 * verification + 0.15 * source_reliability # Clamp to [0.0, 1.0] trust_score = max(0.0, min(1.0, trust_score)) trust_signals = { - 'freshness': round(freshness, 4), - 'access_frequency': round(access_freq, 4), - 'decay_factor': round(decay, 4), - 'verification': round(verification, 4), - 'source_reliability': round(source_reliability, 4), + "freshness": round(freshness, 4), + "access_frequency": round(access_freq, 4), + "decay_factor": round(decay, 4), + "verification": round(verification, 4), + "source_reliability": round(source_reliability, 4), } return trust_score, trust_signals @@ -1057,9 +1044,9 @@ def _annotate_trust(self, memories: list[Memory]) -> list[Memory]: return memories async def _recall_browse( - self, - workspace_id: str, - input: RecallInput, + self, + workspace_id: str, + input: RecallInput, ) -> RecallResult: """Browse memories without embedding — used for wildcard queries like '*'. @@ -1067,7 +1054,7 @@ async def _recall_browse( embedding + vector search path entirely. """ self.logger.debug("Wildcard query detected, using browse mode for workspace %s", workspace_id) - far_past = datetime(2000, 1, 1, tzinfo=timezone.utc) + far_past = datetime(2000, 1, 1, tzinfo=UTC) recent = await self.storage.get_recent_memories( workspace_id=workspace_id, created_after=far_past, @@ -1091,10 +1078,10 @@ async def _recall_browse( ) async def _recall_rag( - self, - workspace_id: str, - input: RecallInput, - relevance_threshold: float, + self, + workspace_id: str, + input: RecallInput, + relevance_threshold: float, ) -> RecallResult: """Pure vector similarity search.""" # Wildcard/browse: skip embedding for trivial queries like "*" @@ -1108,20 +1095,24 @@ async def _recall_rag( overfetch_limit = input.limit * self.recall_overfetch # Search memories in current workspace - include_archived = getattr(input, 'include_archived', False) + include_archived = getattr(input, "include_archived", False) entity_filters = {} - if getattr(input, 'observer_id', None) is not None: - entity_filters['observer_id'] = input.observer_id - if getattr(input, 'subject_id', None) is not None: - entity_filters['subject_id'] = input.subject_id - if getattr(input, 'user_id', None) is not None: - entity_filters['user_id'] = input.user_id + if getattr(input, "observer_id", None) is not None: + entity_filters["observer_id"] = input.observer_id + if getattr(input, "subject_id", None) is not None: + entity_filters["subject_id"] = input.subject_id + if getattr(input, "user_id", None) is not None: + entity_filters["user_id"] = input.user_id date_filters = {} - if getattr(input, 'created_after', None) is not None: - date_filters['created_after'] = input.created_after.isoformat() if hasattr(input.created_after, 'isoformat') else str(input.created_after) - if getattr(input, 'created_before', None) is not None: - date_filters['created_before'] = input.created_before.isoformat() if hasattr(input.created_before, 'isoformat') else str(input.created_before) + if getattr(input, "created_after", None) is not None: + date_filters["created_after"] = ( + input.created_after.isoformat() if hasattr(input.created_after, "isoformat") else str(input.created_after) + ) + if getattr(input, "created_before", None) is not None: + date_filters["created_before"] = ( + input.created_before.isoformat() if hasattr(input.created_before, "isoformat") else str(input.created_before) + ) results = await self.storage.search_memories( workspace_id=workspace_id, @@ -1158,7 +1149,7 @@ async def _recall_rag( all_results = results + global_results # Filter out already-surfaced memory IDs - exclude_ids = getattr(input, 'exclude_ids', None) + exclude_ids = getattr(input, "exclude_ids", None) if exclude_ids: exclude_set = set(exclude_ids) all_results = [(m, s) for m, s in all_results if m.id not in exclude_set] @@ -1169,7 +1160,7 @@ async def _recall_rag( all_results, query_context_id=context_id, query_workspace_id=workspace_id, - boosts=None # Use default boosts + boosts=None, # Use default boosts ) # Apply recency boost @@ -1180,7 +1171,7 @@ async def _recall_rag( ) # Take top limit results after boosting - memories = boosted_memories[:input.limit] + memories = boosted_memories[: input.limit] return RecallResult( memories=memories, @@ -1191,10 +1182,10 @@ async def _recall_rag( ) async def _recall_llm( - self, - workspace_id: str, - input: RecallInput, - relevance_threshold: float, + self, + workspace_id: str, + input: RecallInput, + relevance_threshold: float, ) -> RecallResult: """ LLM-enhanced retrieval with query rewriting. @@ -1206,6 +1197,7 @@ async def _recall_llm( Note: Re-ranking is handled at the top level in recall() for all modes. """ import time + start_time = time.time() # Check if LLM service is available @@ -1223,15 +1215,12 @@ async def _recall_llm( # Serialize context list to a string for the rewriter context_str = None if input.context: - context_str = "\n".join( - f"{msg.get('role', 'user')}: {msg.get('content', '')}" - for msg in input.context - ) + context_str = "\n".join(f"{msg.get('role', 'user')}: {msg.get('content', '')}" for msg in input.context) rewritten_query = await self._rewrite_query_with_llm(input.query, context_str) self.logger.info( "LLM query rewrite: '%s' -> '%s'", input.query[:50], - rewritten_query[:50] if rewritten_query != input.query else "(unchanged)" + rewritten_query[:50] if rewritten_query != input.query else "(unchanged)", ) # Step 2: Search with rewritten query (fetch more candidates for re-ranking) @@ -1269,17 +1258,13 @@ async def _recall_llm( search_latency_ms=int((time.time() - start_time) * 1000), mode_used=RecallMode.LLM, query_rewritten=rewritten_query, - sufficiency_reached=False + sufficiency_reached=False, ) # Reranking is now handled at the top level in recall() for all modes search_latency_ms = int((time.time() - start_time) * 1000) - self.logger.info( - "LLM recall complete: %d candidates in %d ms", - len(rag_result.memories), - search_latency_ms - ) + self.logger.info("LLM recall complete: %d candidates in %d ms", len(rag_result.memories), search_latency_ms) return RecallResult( memories=rag_result.memories, @@ -1287,14 +1272,10 @@ async def _recall_llm( search_latency_ms=search_latency_ms, mode_used=RecallMode.LLM, query_rewritten=rewritten_query, - sufficiency_reached=len(rag_result.memories) >= input.limit + sufficiency_reached=len(rag_result.memories) >= input.limit, ) - async def _rewrite_query_with_llm( - self, - query: str, - context: Optional[str] = None - ) -> str: + async def _rewrite_query_with_llm(self, query: str, context: str | None = None) -> str: """ Use LLM to rewrite query for better semantic search. @@ -1323,12 +1304,7 @@ async def _rewrite_query_with_llm( self.logger.warning("Query rewriting failed: %s, using original", e) return query - async def _rerank_with_llm( - self, - query: str, - memories: list, - limit: int - ) -> list: + async def _rerank_with_llm(self, query: str, memories: list, limit: int) -> list: """ Re-rank memories by relevance to query. @@ -1342,23 +1318,19 @@ async def _rerank_with_llm( if self.reranker_service: try: # Get initial scores for adaptive sizing - initial_scores = [getattr(mem, 'relevance', 0.5) for mem in memories] + initial_scores = [getattr(mem, "relevance", 0.5) for mem in memories] # Use adaptive reranking results = await self.reranker_service.rerank_objects_adaptive( query=query, objects=memories, content_fn=lambda m: m.content, - score_fn=lambda m: getattr(m, 'relevance', 0.5), + score_fn=lambda m: getattr(m, "relevance", 0.5), requested_k=limit, ) if results: - self.logger.debug( - "Reranker service: %d candidates -> %d results", - len(memories), - len(results) - ) + self.logger.debug("Reranker service: %d candidates -> %d results", len(memories), len(results)) return [r.document for r in results] except Exception as e: @@ -1412,7 +1384,7 @@ async def _rerank_with_llm( # Fill remaining slots if needed if len(ranked) < limit: remaining = [m for i, m in enumerate(memories) if i not in indices] - ranked.extend(remaining[:limit - len(ranked)]) + ranked.extend(remaining[: limit - len(ranked)]) return ranked except Exception as e: @@ -1422,10 +1394,10 @@ async def _rerank_with_llm( return memories[:limit] async def _apply_reranking( - self, - query: str, - memories: list[Memory], - limit: int, + self, + query: str, + memories: list[Memory], + limit: int, ) -> list[Memory]: """Apply reranking to memories using the reranker service. @@ -1447,7 +1419,7 @@ async def _apply_reranking( query=query, objects=memories, content_fn=lambda m: m.content, - score_fn=lambda m: getattr(m, 'boosted_score', None) or getattr(m, 'relevance_score', 0.5), + score_fn=lambda m: getattr(m, "boosted_score", None) or getattr(m, "relevance_score", 0.5), requested_k=limit, ) if reranked: @@ -1458,11 +1430,11 @@ async def _apply_reranking( return memories[:limit] async def forget( - self, - workspace_id: str, - memory_id: str, - hard: bool = False, - reason: Optional[str] = None, + self, + workspace_id: str, + memory_id: str, + hard: bool = False, + reason: str | None = None, ) -> bool: """ Delete or soft-delete a memory. @@ -1470,18 +1442,9 @@ async def forget( Soft delete: Sets deleted_at timestamp Hard delete: Removes from database entirely """ - self.logger.info( - "Forgetting memory: %s in workspace: %s, hard: %s", - memory_id, - workspace_id, - hard - ) + self.logger.info("Forgetting memory: %s in workspace: %s, hard: %s", memory_id, workspace_id, hard) - success = await self.storage.delete_memory( - workspace_id=workspace_id, - memory_id=memory_id, - hard=hard - ) + success = await self.storage.delete_memory(workspace_id=workspace_id, memory_id=memory_id, hard=hard) if success: self.logger.info("Memory forgotten: %s", memory_id) @@ -1491,21 +1454,17 @@ async def forget( return success async def decay( - self, - workspace_id: str, - memory_id: str, - decay_rate: float = 0.1, - ) -> Optional[Memory]: + self, + workspace_id: str, + memory_id: str, + decay_rate: float = 0.1, + ) -> Memory | None: """ Reduce memory importance by decay_rate. Used for implementing memory decay over time. """ - self.logger.debug( - "Decaying memory: %s by rate: %s", - memory_id, - decay_rate - ) + self.logger.debug("Decaying memory: %s by rate: %s", memory_id, decay_rate) # Get current memory memory = await self.storage.get_memory(workspace_id, memory_id) @@ -1517,26 +1476,18 @@ async def decay( new_importance = max(0.0, memory.importance - decay_rate) # Update memory - updated = await self.storage.update_memory( - workspace_id=workspace_id, - memory_id=memory_id, - importance=new_importance - ) + updated = await self.storage.update_memory(workspace_id=workspace_id, memory_id=memory_id, importance=new_importance) - self.logger.debug( - "Decayed memory: %s, new importance: %s", - memory_id, - new_importance - ) + self.logger.debug("Decayed memory: %s, new importance: %s", memory_id, new_importance) return updated async def update( - self, - workspace_id: str, - memory_id: str, - **updates, - ) -> Optional[Memory]: + self, + workspace_id: str, + memory_id: str, + **updates, + ) -> Memory | None: """ Update a memory, recomputing content_hash and embedding when content changes. @@ -1562,26 +1513,26 @@ async def update( ) async def get( - self, - workspace_id: str, - memory_id: str, - ) -> Optional[Memory]: + self, + workspace_id: str, + memory_id: str, + ) -> Memory | None: """Get a single memory by ID within a workspace.""" self.logger.debug("Getting memory: %s in workspace: %s", memory_id, workspace_id) return await self.storage.get_memory(workspace_id, memory_id) async def get_by_id( - self, - memory_id: str, - ) -> Optional[Memory]: + self, + memory_id: str, + ) -> Memory | None: """Get a single memory by ID without workspace filter. Memory IDs are globally unique.""" self.logger.debug("Getting memory by ID: %s", memory_id) return await self.storage.get_memory_by_id(memory_id) async def increment_access( - self, - workspace_id: str, - memory_id: str, + self, + workspace_id: str, + memory_id: str, ) -> None: """Increment access count and update last_accessed_at.""" try: @@ -1592,7 +1543,7 @@ async def increment_access( workspace_id=workspace_id, memory_id=memory_id, access_count=memory.access_count + 1, - last_accessed_at=datetime.now(timezone.utc), + last_accessed_at=datetime.now(UTC), importance=importance, ) except Exception as e: @@ -1622,28 +1573,28 @@ def _apply_detail_level(self, memories: list[Memory], detail_level: DetailLevel) if detail_level == DetailLevel.ABSTRACT: # Use abstract field if available, else truncate to ~100 chars if memory.abstract: - memory_dict['content'] = memory.abstract + memory_dict["content"] = memory.abstract else: - memory_dict['content'] = memory.content[:100] + "..." if len(memory.content) > 100 else memory.content + memory_dict["content"] = memory.content[:100] + "..." if len(memory.content) > 100 else memory.content elif detail_level == DetailLevel.OVERVIEW: # Use overview field if available, else truncate to ~500 chars if memory.overview: - memory_dict['content'] = memory.overview + memory_dict["content"] = memory.overview else: - memory_dict['content'] = memory.content[:500] + "..." if len(memory.content) > 500 else memory.content + memory_dict["content"] = memory.content[:500] + "..." if len(memory.content) > 500 else memory.content filtered_memories.append(Memory(**memory_dict)) return filtered_memories async def _expand_with_associations( - self, - workspace_id: str, - memories: list[Memory], - traverse_depth: int, - include_associations: bool, - max_expansion: int = 50, + self, + workspace_id: str, + memories: list[Memory], + traverse_depth: int, + include_associations: bool, + max_expansion: int = 50, ) -> list[Memory]: """Expand recall results by traversing association graph. @@ -1687,7 +1638,7 @@ async def _expand_with_associations( # BFS queue: (memory_id, parent_score, current_depth) queue: list[tuple[str, float, int]] = [] for memory in memories: - parent_score = getattr(memory, 'boosted_score', None) or getattr(memory, 'relevance_score', 0.5) + parent_score = getattr(memory, "boosted_score", None) or getattr(memory, "relevance_score", 0.5) queue.append((memory.id, parent_score, 0)) while queue: @@ -1730,7 +1681,7 @@ async def _expand_with_associations( continue # Skip non-active memories - if hasattr(target_memory, 'status') and target_memory.status != MemoryStatus.ACTIVE: + if hasattr(target_memory, "status") and target_memory.status != MemoryStatus.ACTIVE: continue # Score: parent_score * strength * decay_per_hop @@ -1739,9 +1690,9 @@ async def _expand_with_associations( # Attach score metadata memory_dict = target_memory.model_dump() - memory_dict['relevance_score'] = score - memory_dict['boosted_score'] = score - memory_dict['source_scope'] = 'association' + memory_dict["relevance_score"] = score + memory_dict["boosted_score"] = score + memory_dict["source_scope"] = "association" scored_memory = Memory(**memory_dict) discovered.append((scored_memory, score)) @@ -1758,7 +1709,7 @@ async def _expand_with_associations( # Sort by boosted_score descending combined.sort( - key=lambda m: getattr(m, 'boosted_score', 0.0) or 0.0, + key=lambda m: getattr(m, "boosted_score", 0.0) or 0.0, reverse=True, ) @@ -1775,7 +1726,7 @@ async def _expand_with_associations( return combined - def _should_decompose(self, content: str, memory_type: Optional[MemoryType]) -> bool: + def _should_decompose(self, content: str, memory_type: MemoryType | None) -> bool: """Determine whether a memory should be decomposed into atomic facts. Criteria: @@ -1801,9 +1752,9 @@ def _should_decompose(self, content: str, memory_type: Optional[MemoryType]) -> return False # Check for multiple sentences (periods, semicolons, or question marks followed by space/end) - sentence_terminators = re.findall(r'[.;?!]\s', content) + sentence_terminators = re.findall(r"[.;?!]\s", content) # Also check for a terminator at the very end of the string - if content and content[-1] in '.;?!': + if content and content[-1] in ".;?!": sentence_terminators.append(content[-1]) if len(sentence_terminators) <= 1: return False @@ -1821,31 +1772,21 @@ async def _classify_memory_type(self, content: str) -> MemoryType: content_lower = content.lower() # Procedural: How-to, steps, instructions - if any(keyword in content_lower for keyword in [ - "how to", "steps", "procedure", "process", "method", "workflow" - ]): + if any(keyword in content_lower for keyword in ["how to", "steps", "procedure", "process", "method", "workflow"]): return MemoryType.PROCEDURAL # Episodic: Time-based, events, specific instances - if any(keyword in content_lower for keyword in [ - "when", "yesterday", "today", "occurred", "happened", "at that time" - ]): + if any(keyword in content_lower for keyword in ["when", "yesterday", "today", "occurred", "happened", "at that time"]): return MemoryType.EPISODIC # Working: Current context, temporary - if any(keyword in content_lower for keyword in [ - "currently", "working on", "in progress", "now", "right now" - ]): + if any(keyword in content_lower for keyword in ["currently", "working on", "in progress", "now", "right now"]): return MemoryType.WORKING # Default to semantic (facts, concepts) return MemoryType.SEMANTIC - def _get_relevance_threshold( - self, - tolerance: SearchTolerance, - min_relevance: Optional[float] - ) -> float: + def _get_relevance_threshold(self, tolerance: SearchTolerance, min_relevance: float | None) -> float: """ Calculate effective relevance threshold. @@ -1876,11 +1817,7 @@ def _get_relevance_threshold( return max(min_relevance, floor) def apply_scope_boosts( - self, - memories: list, - query_context_id: str, - query_workspace_id: str, - boosts: Optional[ScopeBoosts] = None + self, memories: list, query_context_id: str, query_workspace_id: str, boosts: ScopeBoosts | None = None ) -> list[Memory]: """ Apply locality-based score boosts to recalled memories. @@ -1895,7 +1832,7 @@ def apply_scope_boosts( List of Memory objects sorted by boosted score with source_scope added """ if boosts is None: - boosts = getattr(self, 'default_scope_boosts', ScopeBoosts()) + boosts = getattr(self, "default_scope_boosts", ScopeBoosts()) boosted_memories = [] @@ -1921,9 +1858,9 @@ def apply_scope_boosts( # Create new Memory object with ranking metadata memory_dict = memory.model_dump() - memory_dict['source_scope'] = source_scope - memory_dict['relevance_score'] = base_score - memory_dict['boosted_score'] = boosted_score + memory_dict["source_scope"] = source_scope + memory_dict["relevance_score"] = base_score + memory_dict["boosted_score"] = boosted_score boosted_memory = Memory(**memory_dict) boosted_memories.append((boosted_memory, boosted_score)) @@ -1934,10 +1871,10 @@ def apply_scope_boosts( return [m for m, _ in boosted_memories] def apply_recency_boost( - self, - memories: list[Memory], - recency_weight: float, - half_life_hours: float = DEFAULT_RECENCY_HALF_LIFE_HOURS, + self, + memories: list[Memory], + recency_weight: float, + half_life_hours: float = DEFAULT_RECENCY_HALF_LIFE_HOURS, ) -> list[Memory]: """ Apply time-based recency boost to recalled memories. @@ -1957,7 +1894,7 @@ def apply_recency_boost( if recency_weight <= 0.0 or not memories: return memories - now = datetime.now(timezone.utc) + now = datetime.now(UTC) for memory in memories: age_hours = max(0.0, (now - memory.updated_at).total_seconds() / 3600.0) @@ -1971,9 +1908,9 @@ def apply_recency_boost( return memories def _annotate_freshness( - self, - memories: list[Memory], - half_life_days: Optional[float] = None, + self, + memories: list[Memory], + half_life_days: float | None = None, ) -> list[Memory]: """ Annotate recalled memories with freshness scores and staleness warnings. @@ -2001,9 +1938,9 @@ def _annotate_freshness( return memories if half_life_days is None: - half_life_days = getattr(self, 'freshness_half_life_days', DEFAULT_MEMORYLAYER_FRESHNESS_HALF_LIFE_DAYS) + half_life_days = getattr(self, "freshness_half_life_days", DEFAULT_MEMORYLAYER_FRESHNESS_HALF_LIFE_DAYS) - now = datetime.now(timezone.utc) + now = datetime.now(UTC) for memory in memories: age_days = max(0.0, (now - memory.created_at).total_seconds() / 86400.0) @@ -2032,13 +1969,7 @@ def _annotate_freshness( return memories async def recall_with_global( - self, - workspace_id: str, - context_id: str, - query: str, - include_global: bool = True, - boosts: Optional[ScopeBoosts] = None, - **kwargs + self, workspace_id: str, context_id: str, query: str, include_global: bool = True, boosts: ScopeBoosts | None = None, **kwargs ) -> list[Memory]: """ Recall memories from workspace and optionally _global. @@ -2058,23 +1989,23 @@ async def recall_with_global( recall_input = RecallInput( query=query, context_id=context_id, - limit=kwargs.get('limit', 10), - types=kwargs.get('types', []), - subtypes=kwargs.get('subtypes', []), - tags=kwargs.get('tags', []), - mode=kwargs.get('mode', RecallMode.RAG), - tolerance=kwargs.get('tolerance', SearchTolerance.MODERATE), - min_relevance=kwargs.get('min_relevance'), + limit=kwargs.get("limit", 10), + types=kwargs.get("types", []), + subtypes=kwargs.get("subtypes", []), + tags=kwargs.get("tags", []), + mode=kwargs.get("mode", RecallMode.RAG), + tolerance=kwargs.get("tolerance", SearchTolerance.MODERATE), + min_relevance=kwargs.get("min_relevance"), ) # Generate query embedding once query_embedding = await self.embedding.embed(query) entity_filters = {} - if kwargs.get('observer_id') is not None: - entity_filters['observer_id'] = kwargs['observer_id'] - if kwargs.get('subject_id') is not None: - entity_filters['subject_id'] = kwargs['subject_id'] + if kwargs.get("observer_id") is not None: + entity_filters["observer_id"] = kwargs["observer_id"] + if kwargs.get("subject_id") is not None: + entity_filters["subject_id"] = kwargs["subject_id"] # Get memories from current workspace workspace_results = await self.storage.search_memories( @@ -2106,15 +2037,10 @@ async def recall_with_global( all_memories = workspace_results + global_results # Apply scope boosts and return sorted - ranked = self.apply_scope_boosts( - all_memories, - query_context_id=context_id, - query_workspace_id=workspace_id, - boosts=boosts - ) + ranked = self.apply_scope_boosts(all_memories, query_context_id=context_id, query_workspace_id=workspace_id, boosts=boosts) # Apply recency boost - effective_recency_weight = kwargs.get('recency_weight', DEFAULT_RECENCY_WEIGHT) + effective_recency_weight = kwargs.get("recency_weight", DEFAULT_RECENCY_WEIGHT) ranked = self.apply_recency_boost( ranked, recency_weight=effective_recency_weight, @@ -2125,7 +2051,8 @@ async def recall_with_global( class DefaultMemoryServicePlugin(MemoryServicePluginBase): """Default memory service plugin.""" - PROVIDER_NAME = 'default' + + PROVIDER_NAME = "default" def initialize(self, v: Variables, logger: Logger) -> MemoryService: cache = self.get_extension(EXT_CACHE_SERVICE, v) @@ -2141,7 +2068,7 @@ def initialize(self, v: Variables, logger: Logger) -> MemoryService: extraction_service: ExtractionService = self.get_extension(EXT_EXTRACTION_SERVICE, v) # TaskService is optional -- auto-association works inline without it - task_service: Optional["TaskService"] = None + task_service: TaskService | None = None try: task_service = self.get_extension(EXT_TASK_SERVICE, v) except Exception: diff --git a/memorylayer-core-python/src/memorylayer_server/services/metrics/__init__.py b/memorylayer-core-python/src/memorylayer_server/services/metrics/__init__.py index e8aa132..fcde15e 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/metrics/__init__.py +++ b/memorylayer-core-python/src/memorylayer_server/services/metrics/__init__.py @@ -1,4 +1,5 @@ """Metrics Service - Pluggable observability interface.""" -from .base import MetricsService, MetricsServicePluginBase, EXT_METRICS_SERVICE + +from .base import EXT_METRICS_SERVICE, MetricsService, MetricsServicePluginBase __all__ = ["MetricsService", "MetricsServicePluginBase", "EXT_METRICS_SERVICE"] diff --git a/memorylayer-core-python/src/memorylayer_server/services/metrics/base.py b/memorylayer-core-python/src/memorylayer_server/services/metrics/base.py index aceece0..0f276ce 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/metrics/base.py +++ b/memorylayer-core-python/src/memorylayer_server/services/metrics/base.py @@ -1,10 +1,11 @@ """Metrics Service - Pluggable metrics/observability interface.""" + import time from abc import ABC, abstractmethod +from collections.abc import Generator from contextlib import contextmanager -from typing import Generator -from ...config import MEMORYLAYER_METRICS_SERVICE, DEFAULT_MEMORYLAYER_METRICS_SERVICE +from ...config import DEFAULT_MEMORYLAYER_METRICS_SERVICE, MEMORYLAYER_METRICS_SERVICE from .._constants import EXT_METRICS_SERVICE from .._plugin_factory import make_service_plugin_base diff --git a/memorylayer-core-python/src/memorylayer_server/services/metrics/noop.py b/memorylayer-core-python/src/memorylayer_server/services/metrics/noop.py index e818cdc..cd248e5 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/metrics/noop.py +++ b/memorylayer-core-python/src/memorylayer_server/services/metrics/noop.py @@ -1,6 +1,6 @@ """No-op metrics service - discards all observations (OSS default).""" + from logging import Logger -from typing import Optional from scitrera_app_framework.api import Variables @@ -22,7 +22,8 @@ def gauge(self, name: str, value: float, labels: dict[str, str] | None = None) - class NoopMetricsServicePlugin(MetricsServicePluginBase): """Plugin for no-op metrics service.""" - PROVIDER_NAME = 'noop' - def initialize(self, v: Variables, logger: Logger) -> Optional[NoopMetricsService]: + PROVIDER_NAME = "noop" + + def initialize(self, v: Variables, logger: Logger) -> NoopMetricsService | None: return NoopMetricsService() diff --git a/memorylayer-core-python/src/memorylayer_server/services/metrics/prometheus.py b/memorylayer-core-python/src/memorylayer_server/services/metrics/prometheus.py index 8edfba4..bec4f5c 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/metrics/prometheus.py +++ b/memorylayer-core-python/src/memorylayer_server/services/metrics/prometheus.py @@ -4,9 +4,9 @@ pip install prometheus_client """ + import threading from logging import Logger -from typing import Optional from scitrera_app_framework.api import Variables @@ -42,8 +42,7 @@ def __init__(self) -> None: import prometheus_client as _pc # noqa: F401 except ImportError as exc: raise ImportError( - "prometheus_client is required for PrometheusMetricsService. " - "Install it with: pip install prometheus_client" + "prometheus_client is required for PrometheusMetricsService. Install it with: pip install prometheus_client" ) from exc self._lock = threading.Lock() @@ -65,6 +64,7 @@ def _label_values(labels: dict[str, str] | None) -> tuple[str, ...]: def _get_counter(self, name: str, label_names: tuple[str, ...]): import prometheus_client as pc + if name not in self._counters: with self._lock: if name not in self._counters: @@ -73,6 +73,7 @@ def _get_counter(self, name: str, label_names: tuple[str, ...]): def _get_histogram(self, name: str, label_names: tuple[str, ...]): import prometheus_client as pc + if name not in self._histograms: with self._lock: if name not in self._histograms: @@ -81,6 +82,7 @@ def _get_histogram(self, name: str, label_names: tuple[str, ...]): def _get_gauge(self, name: str, label_names: tuple[str, ...]): import prometheus_client as pc + if name not in self._gauges: with self._lock: if name not in self._gauges: @@ -121,8 +123,9 @@ def gauge(self, name: str, value: float, labels: dict[str, str] | None = None) - class PrometheusMetricsServicePlugin(MetricsServicePluginBase): """Plugin for Prometheus metrics service.""" - PROVIDER_NAME = 'prometheus' - def initialize(self, v: Variables, logger: Logger) -> Optional[PrometheusMetricsService]: + PROVIDER_NAME = "prometheus" + + def initialize(self, v: Variables, logger: Logger) -> PrometheusMetricsService | None: logger.info("Initializing PrometheusMetricsService") return PrometheusMetricsService() diff --git a/memorylayer-core-python/src/memorylayer_server/services/metrics/routes.py b/memorylayer-core-python/src/memorylayer_server/services/metrics/routes.py index 3aa073e..e7bd008 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/metrics/routes.py +++ b/memorylayer-core-python/src/memorylayer_server/services/metrics/routes.py @@ -1,4 +1,5 @@ """Prometheus /metrics endpoint - only active when metrics service is 'prometheus'.""" + import logging from fastapi import APIRouter @@ -19,12 +20,9 @@ async def prometheus_metrics() -> Response: """Expose Prometheus metrics in the standard text exposition format.""" try: - from prometheus_client import generate_latest, CONTENT_TYPE_LATEST + from prometheus_client import CONTENT_TYPE_LATEST, generate_latest except ImportError as exc: - raise RuntimeError( - "prometheus_client is required to serve /metrics. " - "Install it with: pip install prometheus_client" - ) from exc + raise RuntimeError("prometheus_client is required to serve /metrics. Install it with: pip install prometheus_client") from exc data = generate_latest() return Response(content=data, media_type=CONTENT_TYPE_LATEST) @@ -37,8 +35,8 @@ def extension_point_name(self, v: Variables) -> str: return EXT_MULTI_API_ROUTERS def is_enabled(self, v: Variables) -> bool: - provider = v.environ(MEMORYLAYER_METRICS_SERVICE, default='noop') - return provider == 'prometheus' + provider = v.environ(MEMORYLAYER_METRICS_SERVICE, default="noop") + return provider == "prometheus" def is_multi_extension(self, v: Variables) -> bool: return True diff --git a/memorylayer-core-python/src/memorylayer_server/services/ontology/__init__.py b/memorylayer-core-python/src/memorylayer_server/services/ontology/__init__.py index 184026f..2912c6f 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/ontology/__init__.py +++ b/memorylayer-core-python/src/memorylayer_server/services/ontology/__init__.py @@ -4,17 +4,18 @@ OSS version includes unified ontology with 65 relationship types across 11 categories. Enterprise version supports custom ontologies. """ + +from scitrera_app_framework import Variables, get_extension + from .base import ( - OntologyService, - OntologyServicePluginBase, - EXT_ONTOLOGY_SERVICE, - FeatureRequiresUpgradeError, BASE_ONTOLOGY, + EXT_ONTOLOGY_SERVICE, RELATIONSHIP_CATEGORIES, + FeatureRequiresUpgradeError, + OntologyService, + OntologyServicePluginBase, ) -from scitrera_app_framework import Variables, get_extension - def get_ontology_service(v: Variables = None) -> OntologyService: """Get the ontology service instance.""" @@ -22,11 +23,11 @@ def get_ontology_service(v: Variables = None) -> OntologyService: __all__ = ( - 'OntologyService', - 'OntologyServicePluginBase', - 'get_ontology_service', - 'EXT_ONTOLOGY_SERVICE', - 'FeatureRequiresUpgradeError', - 'BASE_ONTOLOGY', - 'RELATIONSHIP_CATEGORIES', + "OntologyService", + "OntologyServicePluginBase", + "get_ontology_service", + "EXT_ONTOLOGY_SERVICE", + "FeatureRequiresUpgradeError", + "BASE_ONTOLOGY", + "RELATIONSHIP_CATEGORIES", ) diff --git a/memorylayer-core-python/src/memorylayer_server/services/ontology/base.py b/memorylayer-core-python/src/memorylayer_server/services/ontology/base.py index 4a677da..d999fdc 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/ontology/base.py +++ b/memorylayer-core-python/src/memorylayer_server/services/ontology/base.py @@ -1,13 +1,20 @@ -from ...config import MEMORYLAYER_ONTOLOGY_SERVICE, DEFAULT_MEMORYLAYER_ONTOLOGY_SERVICE - +from ...config import DEFAULT_MEMORYLAYER_ONTOLOGY_SERVICE, MEMORYLAYER_ONTOLOGY_SERVICE from .._constants import EXT_ONTOLOGY_SERVICE from .._plugin_factory import make_service_plugin_base # All valid relationship categories RELATIONSHIP_CATEGORIES = { - "hierarchical", "causal", "temporal", "similarity", - "learning", "refinement", "reference", - "solution", "context", "workflow", "quality", + "hierarchical", + "causal", + "temporal", + "similarity", + "learning", + "refinement", + "reference", + "solution", + "context", + "workflow", + "quality", } @@ -16,10 +23,7 @@ class FeatureRequiresUpgradeError(Exception): def __init__(self, feature: str): self.feature = feature - super().__init__( - f"Feature '{feature}' requires MemoryLayer Enterprise. " - "Visit https://memorylayer.ai/enterprise to upgrade." - ) + super().__init__(f"Feature '{feature}' requires MemoryLayer Enterprise. Visit https://memorylayer.ai/enterprise to upgrade.") # Unified ontology with 45 relationship types across 11 categories @@ -67,7 +71,6 @@ def __init__(self, feature: str): "inverse": "instance_of", "category": "hierarchical", }, - # --- Causal relationships --- "causes": { "description": "Direct causation", @@ -139,7 +142,6 @@ def __init__(self, feature: str): "inverse": "prevents", "category": "causal", }, - # --- Temporal relationships --- "before": { "description": "Occurs before in time", @@ -162,7 +164,6 @@ def __init__(self, feature: str): "inverse": None, "category": "temporal", }, - # --- Similarity relationships --- "similar_to": { "description": "Similar content or meaning", @@ -192,7 +193,6 @@ def __init__(self, feature: str): "inverse": "variant_of", "category": "similarity", }, - # --- Learning relationships (formerly "logical") --- "contradicts": { "description": "Logically contradicts", @@ -250,7 +250,6 @@ def __init__(self, feature: str): "inverse": "supersedes", "category": "learning", }, - # --- Refinement relationships --- "refines": { "description": "Refines or elaborates on", @@ -280,7 +279,6 @@ def __init__(self, feature: str): "inverse": "replaces", "category": "refinement", }, - # --- Reference relationships --- "references": { "description": "References or cites", @@ -296,7 +294,6 @@ def __init__(self, feature: str): "inverse": "references", "category": "reference", }, - # --- Solution relationships --- "solves": { "description": "A solves problem B", @@ -347,7 +344,6 @@ def __init__(self, feature: str): "inverse": "improves", "category": "solution", }, - # --- Context relationships --- "occurs_in": { "description": "A occurs in context B", @@ -398,7 +394,6 @@ def __init__(self, feature: str): "inverse": "requires", "category": "context", }, - # --- Workflow relationships --- "follows": { "description": "A follows B in sequence", @@ -442,7 +437,6 @@ def __init__(self, feature: str): "inverse": "blocks", "category": "workflow", }, - # --- Quality relationships --- "effective_for": { "description": "A is effective for B", @@ -490,58 +484,33 @@ def __init__(self, feature: str): from abc import ABC, abstractmethod -from typing import Optional class OntologyService(ABC): """Interface for ontology service.""" @abstractmethod - def get_merged_ontology( - self, - tenant_id: str, - workspace_id: Optional[str] = None - ) -> dict: + def get_merged_ontology(self, tenant_id: str, workspace_id: str | None = None) -> dict: """Get merged ontology (base + custom for enterprise).""" pass @abstractmethod - def validate_relationship( - self, - relationship_type: str, - tenant_id: str, - workspace_id: Optional[str] = None - ) -> bool: + def validate_relationship(self, relationship_type: str, tenant_id: str, workspace_id: str | None = None) -> bool: """Validate that a relationship type exists in the ontology.""" pass @abstractmethod - def get_relationship_info( - self, - relationship_type: str, - tenant_id: str, - workspace_id: Optional[str] = None - ) -> dict: + def get_relationship_info(self, relationship_type: str, tenant_id: str, workspace_id: str | None = None) -> dict: """Get metadata about a relationship type.""" pass @abstractmethod - def create_ontology( - self, - tenant_id: str, - name: str, - relationships: dict, - workspace_id: Optional[str] = None - ) -> dict: + def create_ontology(self, tenant_id: str, name: str, relationships: dict, workspace_id: str | None = None) -> dict: """Create a custom ontology (Enterprise only).""" pass @abstractmethod - def list_relationship_types( - self, - tenant_id: str, - workspace_id: Optional[str] = None - ) -> list[str]: + def list_relationship_types(self, tenant_id: str, workspace_id: str | None = None) -> list[str]: """List all available relationship types.""" pass @@ -551,7 +520,7 @@ async def classify_relationship( content_a: str, content_b: str, tenant_id: str = "_default", - workspace_id: Optional[str] = None, + workspace_id: str | None = None, ) -> str: """Use LLM to classify the relationship between two memory contents. @@ -565,7 +534,7 @@ def get_relationships_by_category( self, category: str, tenant_id: str = "_default", - workspace_id: Optional[str] = None, + workspace_id: str | None = None, ) -> list[str]: """Get all relationship types in a category.""" pass diff --git a/memorylayer-core-python/src/memorylayer_server/services/ontology/default.py b/memorylayer-core-python/src/memorylayer_server/services/ontology/default.py index d965b7f..6bfffb7 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/ontology/default.py +++ b/memorylayer-core-python/src/memorylayer_server/services/ontology/default.py @@ -4,17 +4,16 @@ Provides relationship type definitions and validation. OSS version includes unified ontology with 65 relationship types across 11 categories. """ -from typing import Optional from scitrera_app_framework import get_logger from scitrera_app_framework.api import Variables from .base import ( - OntologyService, - OntologyServicePluginBase, - FeatureRequiresUpgradeError, BASE_ONTOLOGY, RELATIONSHIP_CATEGORIES, + FeatureRequiresUpgradeError, + OntologyService, + OntologyServicePluginBase, ) @@ -37,11 +36,7 @@ def __init__(self, v: Variables = None, llm_service=None): len(RELATIONSHIP_CATEGORIES), ) - def get_merged_ontology( - self, - tenant_id: str, - workspace_id: Optional[str] = None - ) -> dict: + def get_merged_ontology(self, tenant_id: str, workspace_id: str | None = None) -> dict: """ Get merged ontology (base + custom for enterprise). @@ -57,12 +52,7 @@ def get_merged_ontology( # OSS: Return base ontology only return self.base_ontology.copy() - def validate_relationship( - self, - relationship_type: str, - tenant_id: str, - workspace_id: Optional[str] = None - ) -> bool: + def validate_relationship(self, relationship_type: str, tenant_id: str, workspace_id: str | None = None) -> bool: """ Validate that a relationship type exists in the ontology. @@ -81,19 +71,11 @@ def validate_relationship( if relationship_type not in ontology: valid_types = ", ".join(sorted(ontology.keys())) - raise ValueError( - f"Invalid relationship type: {relationship_type}. " - f"Valid types: {valid_types}" - ) + raise ValueError(f"Invalid relationship type: {relationship_type}. Valid types: {valid_types}") return True - def get_relationship_info( - self, - relationship_type: str, - tenant_id: str, - workspace_id: Optional[str] = None - ) -> dict: + def get_relationship_info(self, relationship_type: str, tenant_id: str, workspace_id: str | None = None) -> dict: """ Get metadata about a relationship type. @@ -112,13 +94,7 @@ def get_relationship_info( ontology = self.get_merged_ontology(tenant_id, workspace_id) return ontology[relationship_type].copy() - def create_ontology( - self, - tenant_id: str, - name: str, - relationships: dict, - workspace_id: Optional[str] = None - ) -> dict: + def create_ontology(self, tenant_id: str, name: str, relationships: dict, workspace_id: str | None = None) -> dict: """ Create a custom ontology. @@ -135,11 +111,7 @@ def create_ontology( """ raise FeatureRequiresUpgradeError("custom_ontologies") - def list_relationship_types( - self, - tenant_id: str, - workspace_id: Optional[str] = None - ) -> list[str]: + def list_relationship_types(self, tenant_id: str, workspace_id: str | None = None) -> list[str]: """ List all available relationship types. @@ -154,11 +126,11 @@ def list_relationship_types( return sorted(ontology.keys()) async def classify_relationship( - self, - content_a: str, - content_b: str, - tenant_id: str = "_default", - workspace_id: Optional[str] = None, + self, + content_a: str, + content_b: str, + tenant_id: str = "_default", + workspace_id: str | None = None, ) -> str: """Use LLM to classify the relationship between two memory contents. @@ -202,7 +174,7 @@ async def classify_relationship( ) try: - from ...models.llm import LLMRequest, LLMMessage, LLMRole + from ...models.llm import LLMMessage, LLMRequest, LLMRole request = LLMRequest( messages=[ @@ -213,7 +185,7 @@ async def classify_relationship( ) response = await self.llm_service.complete(request, profile="ontology") - result = response.content.strip().lower().replace('"', '').replace("'", '').rstrip('.') + result = response.content.strip().lower().replace('"', "").replace("'", "").rstrip(".") # Validate the LLM response against the ontology if result in ontology: @@ -228,7 +200,8 @@ async def classify_relationship( matched = prefix_matches[0] self.logger.debug( "Prefix-matched truncated relationship '%s' to '%s'", - result, matched, + result, + matched, ) return matched @@ -243,10 +216,10 @@ async def classify_relationship( return "related_to" def get_relationships_by_category( - self, - category: str, - tenant_id: str = "_default", - workspace_id: Optional[str] = None, + self, + category: str, + tenant_id: str = "_default", + workspace_id: str | None = None, ) -> list[str]: """Get all relationship types in a category. @@ -262,22 +235,16 @@ def get_relationships_by_category( ValueError: If the category is not recognized. """ if category not in RELATIONSHIP_CATEGORIES: - raise ValueError( - f"Invalid category: {category}. " - f"Valid categories: {', '.join(sorted(RELATIONSHIP_CATEGORIES))}" - ) + raise ValueError(f"Invalid category: {category}. Valid categories: {', '.join(sorted(RELATIONSHIP_CATEGORIES))}") ontology = self.get_merged_ontology(tenant_id, workspace_id) - return sorted( - rel_type - for rel_type, info in ontology.items() - if info.get("category") == category - ) + return sorted(rel_type for rel_type, info in ontology.items() if info.get("category") == category) class DefaultOntologyServicePlugin(OntologyServicePluginBase): """Default ontology service plugin.""" - PROVIDER_NAME = 'default' + + PROVIDER_NAME = "default" def get_dependencies(self, v: Variables): return () # LLM is optional, don't require it @@ -287,6 +254,7 @@ def initialize(self, v: Variables, logger) -> OntologyService: llm_service = None try: from ..llm import EXT_LLM_SERVICE + llm_service = self.get_extension(EXT_LLM_SERVICE, v) except Exception: logger.debug("LLM service not available for ontology classification") diff --git a/memorylayer-core-python/src/memorylayer_server/services/rate_limit/__init__.py b/memorylayer-core-python/src/memorylayer_server/services/rate_limit/__init__.py index 6275665..e1c15ed 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/rate_limit/__init__.py +++ b/memorylayer-core-python/src/memorylayer_server/services/rate_limit/__init__.py @@ -1,13 +1,14 @@ """Rate limit service package.""" + +from scitrera_app_framework import Variables, get_extension + from .base import ( + EXT_RATE_LIMIT_SERVICE, RateLimitResult, RateLimitService, RateLimitServicePluginBase, - EXT_RATE_LIMIT_SERVICE, ) -from scitrera_app_framework import Variables, get_extension - def get_rate_limit_service(v: Variables = None) -> RateLimitService: """Get the rate limit service instance.""" @@ -15,9 +16,9 @@ def get_rate_limit_service(v: Variables = None) -> RateLimitService: __all__ = ( - 'RateLimitResult', - 'RateLimitService', - 'RateLimitServicePluginBase', - 'get_rate_limit_service', - 'EXT_RATE_LIMIT_SERVICE', + "RateLimitResult", + "RateLimitService", + "RateLimitServicePluginBase", + "get_rate_limit_service", + "EXT_RATE_LIMIT_SERVICE", ) diff --git a/memorylayer-core-python/src/memorylayer_server/services/rate_limit/base.py b/memorylayer-core-python/src/memorylayer_server/services/rate_limit/base.py index 8a02075..f60e1d6 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/rate_limit/base.py +++ b/memorylayer-core-python/src/memorylayer_server/services/rate_limit/base.py @@ -1,28 +1,29 @@ """Rate Limit Service - Pluggable rate limiting interface.""" + from abc import ABC, abstractmethod from dataclasses import dataclass -from ...config import MEMORYLAYER_RATE_LIMIT_SERVICE, DEFAULT_MEMORYLAYER_RATE_LIMIT_SERVICE - +from ...config import DEFAULT_MEMORYLAYER_RATE_LIMIT_SERVICE, MEMORYLAYER_RATE_LIMIT_SERVICE from .._constants import EXT_RATE_LIMIT_SERVICE from .._plugin_factory import make_service_plugin_base # Re-export for convenience __all__ = ( - 'RateLimitResult', - 'RateLimitService', - 'RateLimitServicePluginBase', - 'EXT_RATE_LIMIT_SERVICE', + "RateLimitResult", + "RateLimitService", + "RateLimitServicePluginBase", + "EXT_RATE_LIMIT_SERVICE", ) @dataclass class RateLimitResult: """Result of a rate limit check.""" + allowed: bool - limit: int # max requests per window - remaining: int # requests remaining in current window - reset_at: float # unix timestamp when window resets + limit: int # max requests per window + remaining: int # requests remaining in current window + reset_at: float # unix timestamp when window resets class RateLimitService(ABC): diff --git a/memorylayer-core-python/src/memorylayer_server/services/rate_limit/noop.py b/memorylayer-core-python/src/memorylayer_server/services/rate_limit/noop.py index dd3ffc4..48b8c41 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/rate_limit/noop.py +++ b/memorylayer-core-python/src/memorylayer_server/services/rate_limit/noop.py @@ -1,7 +1,7 @@ """No-op rate limit service - always allows requests (OSS default).""" + import time from logging import Logger -from typing import Optional from scitrera_app_framework.api import Variables @@ -31,7 +31,8 @@ async def get_usage(self, key: str) -> tuple[int, int]: class NoopRateLimitServicePlugin(RateLimitServicePluginBase): """Plugin for no-op rate limit service.""" - PROVIDER_NAME = 'noop' - def initialize(self, v: Variables, logger: Logger) -> Optional[RateLimitService]: + PROVIDER_NAME = "noop" + + def initialize(self, v: Variables, logger: Logger) -> RateLimitService | None: return NoopRateLimitService() diff --git a/memorylayer-core-python/src/memorylayer_server/services/reflect/__init__.py b/memorylayer-core-python/src/memorylayer_server/services/reflect/__init__.py index f993f32..38e9b65 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/reflect/__init__.py +++ b/memorylayer-core-python/src/memorylayer_server/services/reflect/__init__.py @@ -1,12 +1,13 @@ """Reflect service package.""" + +from scitrera_app_framework import Variables, get_extension + from .base import ( - ReflectServicePluginBase, EXT_REFLECT_SERVICE, + ReflectServicePluginBase, ) from .default import ReflectService -from scitrera_app_framework import Variables, get_extension - def get_reflect_service(v: Variables = None) -> ReflectService: """Get the reflect service instance.""" @@ -14,8 +15,8 @@ def get_reflect_service(v: Variables = None) -> ReflectService: __all__ = ( - 'ReflectService', - 'ReflectServicePluginBase', - 'get_reflect_service', - 'EXT_REFLECT_SERVICE', + "ReflectService", + "ReflectServicePluginBase", + "get_reflect_service", + "EXT_REFLECT_SERVICE", ) diff --git a/memorylayer-core-python/src/memorylayer_server/services/reflect/base.py b/memorylayer-core-python/src/memorylayer_server/services/reflect/base.py index dbd3526..818034e 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/reflect/base.py +++ b/memorylayer-core-python/src/memorylayer_server/services/reflect/base.py @@ -1,8 +1,7 @@ -from ...config import MEMORYLAYER_REFLECT_SERVICE, DEFAULT_MEMORYLAYER_REFLECT_SERVICE -from .._constants import EXT_STORAGE_BACKEND, EXT_MEMORY_SERVICE, EXT_REFLECT_SERVICE +from ...config import DEFAULT_MEMORYLAYER_REFLECT_SERVICE, MEMORYLAYER_REFLECT_SERVICE +from .._constants import EXT_MEMORY_SERVICE, EXT_REFLECT_SERVICE, EXT_STORAGE_BACKEND from .._plugin_factory import make_service_plugin_base - # noinspection PyAbstractClass ReflectServicePluginBase = make_service_plugin_base( ext_name=EXT_REFLECT_SERVICE, diff --git a/memorylayer-core-python/src/memorylayer_server/services/reflect/default.py b/memorylayer-core-python/src/memorylayer_server/services/reflect/default.py index bbdfdb2..f168d2c 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/reflect/default.py +++ b/memorylayer-core-python/src/memorylayer_server/services/reflect/default.py @@ -6,18 +6,18 @@ - Generate category summaries - Answer complex queries requiring reasoning """ -from datetime import datetime, timezone + +from datetime import UTC, datetime from logging import Logger -from typing import Optional, Any from scitrera_app_framework import get_logger from scitrera_app_framework.api import Variables +from ...models import DetailLevel, RecallInput, RecallMode, ReflectInput, ReflectResult +from ..llm import EXT_LLM_SERVICE, LLMNotConfiguredError, LLMService +from ..memory import EXT_MEMORY_SERVICE, MemoryService +from ..storage import EXT_STORAGE_BACKEND, StorageBackend from .base import ReflectServicePluginBase -from ..storage import StorageBackend, EXT_STORAGE_BACKEND -from ..memory import MemoryService, EXT_MEMORY_SERVICE -from ..llm import LLMService, EXT_LLM_SERVICE, LLMNotConfiguredError -from ...models import ReflectInput, ReflectResult, RecallInput, RecallMode, DetailLevel # Token budget mapping for detail levels REFLECT_TOKEN_BUDGETS = { @@ -30,13 +30,7 @@ class ReflectService: """Service for LLM-powered memory synthesis.""" - def __init__( - self, - storage: StorageBackend, - memory_service: MemoryService, - llm_service: Optional[LLMService] = None, - v: Variables = None - ): + def __init__(self, storage: StorageBackend, memory_service: MemoryService, llm_service: LLMService | None = None, v: Variables = None): self.storage = storage self.memory_service = memory_service self.llm = llm_service @@ -50,13 +44,13 @@ def _build_recall_input( self, query: str, *, - types: Optional[list] = None, - subtypes: Optional[list] = None, - tags: Optional[list] = None, - context_id: Optional[str] = None, - user_id: Optional[str] = None, - observer_id: Optional[str] = None, - subject_id: Optional[str] = None, + types: list | None = None, + subtypes: list | None = None, + tags: list | None = None, + context_id: str | None = None, + user_id: str | None = None, + observer_id: str | None = None, + subject_id: str | None = None, mode: RecallMode = RecallMode.LLM, limit: int = 20, min_relevance: float = 0.5, @@ -81,9 +75,9 @@ def _build_recall_input( ) async def reflect( - self, - workspace_id: str, - input: ReflectInput, + self, + workspace_id: str, + input: ReflectInput, ) -> ReflectResult: """ Synthesize memories matching query into coherent reflection. @@ -94,13 +88,9 @@ async def reflect( 3. Send to LLM with synthesis prompt 4. Return reflection with source references """ - self.logger.info( - "Generating reflection in workspace: %s, query: %s", - workspace_id, - input.query[:50] - ) + self.logger.info("Generating reflection in workspace: %s, query: %s", workspace_id, input.query[:50]) - start_time = datetime.now(timezone.utc) + start_time = datetime.now(UTC) # 1. Recall relevant memories recall_input = self._build_recall_input( @@ -110,8 +100,8 @@ async def reflect( tags=input.tags, context_id=input.context_id, user_id=input.user_id, - observer_id=getattr(input, 'observer_id', None), - subject_id=getattr(input, 'subject_id', None), + observer_id=getattr(input, "observer_id", None), + subject_id=getattr(input, "subject_id", None), mode=RecallMode.LLM, # Use LLM mode for best semantic matching limit=20, # Get more memories for synthesis min_relevance=0.5, @@ -119,18 +109,12 @@ async def reflect( traverse_depth=input.depth, ) - recall_result = await self.memory_service.recall( - workspace_id=workspace_id, - input=recall_input - ) + recall_result = await self.memory_service.recall(workspace_id=workspace_id, input=recall_input) if not recall_result.memories: self.logger.warning("No memories found for reflection query: %s", input.query) return ReflectResult( - reflection="No relevant memories found to reflect upon.", - source_memories=[], - confidence=0.0, - tokens_processed=0 + reflection="No relevant memories found to reflect upon.", source_memories=[], confidence=0.0, tokens_processed=0 ) self.logger.debug("Found %s memories for reflection", len(recall_result.memories)) @@ -142,43 +126,29 @@ async def reflect( max_tokens = REFLECT_TOKEN_BUDGETS.get(input.detail_level, 4096) if self.llm: reflection, tokens_used = await self._synthesize_with_llm( - memories=recall_result.memories, - query=input.query, - max_tokens=max_tokens + memories=recall_result.memories, query=input.query, max_tokens=max_tokens ) confidence = self._calculate_confidence(recall_result.memories) else: # Fallback: Simple concatenation if no LLM available self.logger.warning("No LLM client available, using simple synthesis") reflection, tokens_used, confidence = self._simple_synthesis( - memories=recall_result.memories, - query=input.query, - max_tokens=max_tokens + memories=recall_result.memories, query=input.query, max_tokens=max_tokens ) - latency_ms = int((datetime.now(timezone.utc) - start_time).total_seconds() * 1000) - self.logger.info( - "Generated reflection in %s ms, %s tokens, confidence: %.2f", - latency_ms, - tokens_used, - confidence - ) + latency_ms = int((datetime.now(UTC) - start_time).total_seconds() * 1000) + self.logger.info("Generated reflection in %s ms, %s tokens, confidence: %.2f", latency_ms, tokens_used, confidence) result = ReflectResult( reflection=reflection, source_memories=source_memory_ids if input.include_sources else [], confidence=confidence, - tokens_processed=tokens_used + tokens_processed=tokens_used, ) return result - async def _synthesize_with_llm( - self, - memories: list, - query: str, - max_tokens: int - ) -> tuple[str, int]: + async def _synthesize_with_llm(self, memories: list, query: str, max_tokens: int) -> tuple[str, int]: """ Use LLM to synthesize memories into coherent reflection. @@ -189,9 +159,7 @@ async def _synthesize_with_llm( # Build context from memories context_parts = [] for i, memory in enumerate(memories, 1): - context_parts.append( - f"[{i}] {memory.type.value.upper()} - {memory.content}" - ) + context_parts.append(f"[{i}] {memory.type.value.upper()} - {memory.content}") context = "\n\n".join(context_parts) @@ -242,12 +210,7 @@ async def _call_llm(self, prompt: str, max_tokens: int) -> str: self.logger.error("LLM call failed: %s", e) return f"LLM synthesis failed: {e}" - def _simple_synthesis( - self, - memories: list, - query: str, - max_tokens: int - ) -> tuple[str, int, float]: + def _simple_synthesis(self, memories: list, query: str, max_tokens: int) -> tuple[str, int, float]: """ Simple synthesis without LLM. @@ -312,10 +275,10 @@ def _calculate_confidence(self, memories: list) -> float: return min(1.0, confidence) async def answer_question( - self, - workspace_id: str, - question: str, - context_memories: Optional[list[str]] = None, + self, + workspace_id: str, + question: str, + context_memories: list[str] | None = None, ) -> ReflectResult: """ Answer a question using memories as knowledge base. @@ -348,16 +311,11 @@ async def answer_question( reflection="I don't have enough information to answer this question.", source_memories=[], confidence=0.0, - tokens_processed=0 + tokens_processed=0, ) # Generate answer using reflection - reflect_input = ReflectInput( - query=question, - detail_level=DetailLevel.OVERVIEW, - include_sources=True, - depth=1 - ) + reflect_input = ReflectInput(query=question, detail_level=DetailLevel.OVERVIEW, include_sources=True, depth=1) result = await self.reflect(workspace_id, reflect_input) return result @@ -365,7 +323,8 @@ async def answer_question( class DefaultReflectServicePlugin(ReflectServicePluginBase): """Default reflect service plugin.""" - PROVIDER_NAME = 'default' + + PROVIDER_NAME = "default" def get_dependencies(self, v: Variables): return (EXT_STORAGE_BACKEND, EXT_MEMORY_SERVICE, EXT_LLM_SERVICE) @@ -374,9 +333,4 @@ def initialize(self, v: Variables, logger: Logger) -> ReflectService: storage: StorageBackend = self.get_extension(EXT_STORAGE_BACKEND, v) memory: MemoryService = self.get_extension(EXT_MEMORY_SERVICE, v) llm_service: LLMService = self.get_extension(EXT_LLM_SERVICE, v) - return ReflectService( - storage=storage, - memory_service=memory, - llm_service=llm_service, - v=v - ) + return ReflectService(storage=storage, memory_service=memory, llm_service=llm_service, v=v) diff --git a/memorylayer-core-python/src/memorylayer_server/services/reranker/__init__.py b/memorylayer-core-python/src/memorylayer_server/services/reranker/__init__.py index eac00bc..23ef73c 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/reranker/__init__.py +++ b/memorylayer-core-python/src/memorylayer_server/services/reranker/__init__.py @@ -6,17 +6,17 @@ - reranker-service: High-level service wrapping providers """ -from scitrera_app_framework import get_extension, Variables +from scitrera_app_framework import Variables, get_extension from .base import ( EXT_RERANKER_PROVIDER, EXT_RERANKER_SERVICE, - RerankerProvider, MultimodalRerankerProvider, - RerankerService, - RerankResult, + RerankerProvider, RerankerProviderPluginBase, + RerankerService, RerankerServicePluginBase, + RerankResult, ) @@ -32,17 +32,17 @@ def get_reranker_service(v: Variables) -> RerankerService: __all__ = [ # Extension points - 'EXT_RERANKER_PROVIDER', - 'EXT_RERANKER_SERVICE', + "EXT_RERANKER_PROVIDER", + "EXT_RERANKER_SERVICE", # Base classes - 'RerankerProvider', - 'MultimodalRerankerProvider', - 'RerankerService', - 'RerankResult', + "RerankerProvider", + "MultimodalRerankerProvider", + "RerankerService", + "RerankResult", # Plugin bases - 'RerankerProviderPluginBase', - 'RerankerServicePluginBase', + "RerankerProviderPluginBase", + "RerankerServicePluginBase", # Getters - 'get_reranker_provider', - 'get_reranker_service', + "get_reranker_provider", + "get_reranker_service", ] diff --git a/memorylayer-core-python/src/memorylayer_server/services/reranker/base.py b/memorylayer-core-python/src/memorylayer_server/services/reranker/base.py index 190fbf8..8aa1992 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/reranker/base.py +++ b/memorylayer-core-python/src/memorylayer_server/services/reranker/base.py @@ -12,25 +12,27 @@ from abc import ABC, abstractmethod from dataclasses import dataclass from logging import Logger -from typing import Optional, Union, Any +from typing import Any -from scitrera_app_framework.api import Variables, Plugin, enabled_option_pattern from scitrera_app_framework import get_logger - -from .._plugin_factory import make_service_plugin_base +from scitrera_app_framework.api import Plugin, Variables, enabled_option_pattern from ...config import ( - MEMORYLAYER_RERANKER_PROVIDER, DEFAULT_MEMORYLAYER_RERANKER_PROVIDER, - MEMORYLAYER_RERANKER_SERVICE, DEFAULT_MEMORYLAYER_RERANKER_SERVICE, - MEMORYLAYER_RERANKER_PRELOAD_ENABLED, DEFAULT_MEMORYLAYER_RERANKER_PRELOAD_ENABLED, + DEFAULT_MEMORYLAYER_RERANKER_PRELOAD_ENABLED, + DEFAULT_MEMORYLAYER_RERANKER_PROVIDER, + DEFAULT_MEMORYLAYER_RERANKER_SERVICE, + MEMORYLAYER_RERANKER_PRELOAD_ENABLED, + MEMORYLAYER_RERANKER_PROVIDER, + MEMORYLAYER_RERANKER_SERVICE, ) - from .._constants import EXT_RERANKER_PROVIDER, EXT_RERANKER_SERVICE +from .._plugin_factory import make_service_plugin_base @dataclass class RerankResult: """Result of a reranking operation.""" + index: int # Original index in input list score: float # Relevance score (0-1) document: Any # Original document @@ -57,10 +59,10 @@ async def preload(self): @abstractmethod async def rerank( - self, - query: str, - documents: list[str], - instruction: Optional[str] = None, + self, + query: str, + documents: list[str], + instruction: str | None = None, ) -> list[float]: """ Score documents by relevance to query. @@ -76,11 +78,11 @@ async def rerank( pass async def rerank_with_indices( - self, - query: str, - documents: list[str], - instruction: Optional[str] = None, - top_k: Optional[int] = None, + self, + query: str, + documents: list[str], + instruction: str | None = None, + top_k: int | None = None, ) -> list[tuple[int, float]]: """ Score documents and return sorted indices with scores. @@ -113,10 +115,10 @@ class MultimodalRerankerProvider(RerankerProvider): @abstractmethod async def rerank_multimodal( - self, - query: Union[str, dict], - documents: list[Union[str, dict]], - instruction: Optional[str] = None, + self, + query: str | dict, + documents: list[str | dict], + instruction: str | None = None, ) -> list[float]: """ Score multimodal documents by relevance to a multimodal query. @@ -144,31 +146,31 @@ def __init__(self, provider: RerankerProvider, v: Variables = None): self.logger = get_logger(v, name=self.__class__.__name__) async def rerank( - self, - query: str, - documents: list[str], - instruction: Optional[str] = None, + self, + query: str, + documents: list[str], + instruction: str | None = None, ) -> list[float]: """Score documents by relevance to query.""" return await self.provider.rerank(query, documents, instruction) async def rerank_with_indices( - self, - query: str, - documents: list[str], - instruction: Optional[str] = None, - top_k: Optional[int] = None, + self, + query: str, + documents: list[str], + instruction: str | None = None, + top_k: int | None = None, ) -> list[tuple[int, float]]: """Score documents and return sorted indices with scores.""" return await self.provider.rerank_with_indices(query, documents, instruction, top_k) async def rerank_objects( - self, - query: str, - objects: list[Any], - content_fn, - instruction: Optional[str] = None, - top_k: Optional[int] = None, + self, + query: str, + objects: list[Any], + content_fn, + instruction: str | None = None, + top_k: int | None = None, ) -> list[RerankResult]: """ Rerank arbitrary objects using a content extraction function. @@ -190,26 +192,21 @@ async def rerank_objects( documents = [content_fn(obj) for obj in objects] # Get ranked indices - ranked = await self.provider.rerank_with_indices( - query, documents, instruction, top_k - ) + ranked = await self.provider.rerank_with_indices(query, documents, instruction, top_k) # Build results with original objects - results = [ - RerankResult(index=idx, score=score, document=objects[idx]) - for idx, score in ranked - ] + results = [RerankResult(index=idx, score=score, document=objects[idx]) for idx, score in ranked] return results async def rerank_objects_adaptive( - self, - query: str, - objects: list[Any], - content_fn, - score_fn, - requested_k: int, - instruction: Optional[str] = None, + self, + query: str, + objects: list[Any], + content_fn, + score_fn, + requested_k: int, + instruction: str | None = None, ) -> list[RerankResult]: """ Rerank objects with adaptive candidate sizing based on initial scores. @@ -244,9 +241,11 @@ def supports_multimodal(self) -> bool: # Plugin base classes + class RerankerProviderPluginBase(Plugin): """Base Plugin for reranker providers.""" - PROVIDER_NAME: str = '' + + PROVIDER_NAME: str = "" def name(self) -> str: return f"{EXT_RERANKER_PROVIDER}|{self.PROVIDER_NAME}" @@ -255,7 +254,7 @@ def extension_point_name(self, v: Variables) -> str: return EXT_RERANKER_PROVIDER def is_enabled(self, v: Variables) -> bool: - return enabled_option_pattern(self, v, MEMORYLAYER_RERANKER_PROVIDER, self_attr='PROVIDER_NAME') + return enabled_option_pattern(self, v, MEMORYLAYER_RERANKER_PROVIDER, self_attr="PROVIDER_NAME") def on_registration(self, v: Variables) -> None: v.set_default_value(MEMORYLAYER_RERANKER_PROVIDER, DEFAULT_MEMORYLAYER_RERANKER_PROVIDER) @@ -267,9 +266,7 @@ async def async_ready(self, v: Variables, logger: Logger, value: object | None) # noinspection PyTypeChecker provider: RerankerProvider = value preload = v.environ( - MEMORYLAYER_RERANKER_PRELOAD_ENABLED, - default=DEFAULT_MEMORYLAYER_RERANKER_PRELOAD_ENABLED, - type_fn=ext_parse_bool + MEMORYLAYER_RERANKER_PRELOAD_ENABLED, default=DEFAULT_MEMORYLAYER_RERANKER_PRELOAD_ENABLED, type_fn=ext_parse_bool ) if preload: diff --git a/memorylayer-core-python/src/memorylayer_server/services/reranker/default.py b/memorylayer-core-python/src/memorylayer_server/services/reranker/default.py index 15c0c62..f691061 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/reranker/default.py +++ b/memorylayer-core-python/src/memorylayer_server/services/reranker/default.py @@ -5,9 +5,9 @@ """ from logging import Logger -from typing import Any, Optional +from typing import Any -from scitrera_app_framework import Variables, get_extension +from scitrera_app_framework import Variables from .base import ( EXT_RERANKER_PROVIDER, @@ -17,7 +17,6 @@ RerankResult, ) - # Adaptive reranking configuration ADAPTIVE_MIN_CANDIDATES = 10 # Minimum candidates to consider ADAPTIVE_MAX_CANDIDATES = 50 # Maximum candidates to consider @@ -98,7 +97,7 @@ async def rerank_with_adaptive_k( documents: list[str], initial_scores: list[float], requested_k: int, - instruction: Optional[str] = None, + instruction: str | None = None, ) -> list[tuple[int, float]]: """ Rerank with adaptive candidate list sizing. @@ -117,9 +116,7 @@ async def rerank_with_adaptive_k( return [] # Compute adaptive candidate count - candidates_k = self.compute_adaptive_k( - initial_scores, requested_k, len(documents) - ) + candidates_k = self.compute_adaptive_k(initial_scores, requested_k, len(documents)) # Get top candidates by initial score indexed_initial = list(enumerate(initial_scores)) @@ -134,10 +131,7 @@ async def rerank_with_adaptive_k( rerank_scores = await self.provider.rerank(query, candidate_docs, instruction) # Map back to original indices and sort by rerank score - results = [ - (candidate_indices[i], score) - for i, score in enumerate(rerank_scores) - ] + results = [(candidate_indices[i], score) for i, score in enumerate(rerank_scores)] results.sort(key=lambda x: x[1], reverse=True) return results[:requested_k] @@ -149,7 +143,7 @@ async def rerank_objects_adaptive( content_fn, score_fn, requested_k: int, - instruction: Optional[str] = None, + instruction: str | None = None, ) -> list[RerankResult]: """ Rerank objects with adaptive candidate sizing. @@ -173,15 +167,10 @@ async def rerank_objects_adaptive( initial_scores = [score_fn(obj) for obj in objects] # Rerank with adaptive sizing - ranked = await self.rerank_with_adaptive_k( - query, documents, initial_scores, requested_k, instruction - ) + ranked = await self.rerank_with_adaptive_k(query, documents, initial_scores, requested_k, instruction) # Build results - results = [ - RerankResult(index=idx, score=score, document=objects[idx]) - for idx, score in ranked - ] + results = [RerankResult(index=idx, score=score, document=objects[idx]) for idx, score in ranked] return results @@ -189,7 +178,7 @@ async def rerank_objects_adaptive( class DefaultRerankerServicePlugin(RerankerServicePluginBase): """Plugin for default reranker service.""" - PROVIDER_NAME = 'default' + PROVIDER_NAME = "default" def initialize(self, v: Variables, logger: Logger) -> object | None: provider = self.get_extension(EXT_RERANKER_PROVIDER, v) diff --git a/memorylayer-core-python/src/memorylayer_server/services/reranker/hyde/__init__.py b/memorylayer-core-python/src/memorylayer_server/services/reranker/hyde/__init__.py index 1fa3fdd..f3cf388 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/reranker/hyde/__init__.py +++ b/memorylayer-core-python/src/memorylayer_server/services/reranker/hyde/__init__.py @@ -3,6 +3,6 @@ from .provider import HyDERerankerProvider, HyDERerankerProviderPlugin __all__ = [ - 'HyDERerankerProvider', - 'HyDERerankerProviderPlugin', + "HyDERerankerProvider", + "HyDERerankerProviderPlugin", ] diff --git a/memorylayer-core-python/src/memorylayer_server/services/reranker/hyde/provider.py b/memorylayer-core-python/src/memorylayer_server/services/reranker/hyde/provider.py index 8b83f01..2cacf1b 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/reranker/hyde/provider.py +++ b/memorylayer-core-python/src/memorylayer_server/services/reranker/hyde/provider.py @@ -12,19 +12,18 @@ """ from logging import Logger -from typing import Optional from scitrera_app_framework import Variables, get_extension from ....config import RerankerProviderType from ....utils import cosine_similarity -from ..base import RerankerProvider, RerankerProviderPluginBase -from ...llm import EXT_LLM_SERVICE, LLMService from ...embedding import EXT_EMBEDDING_SERVICE, EmbeddingService +from ...llm import EXT_LLM_SERVICE, LLMService +from ..base import RerankerProvider, RerankerProviderPluginBase # Environment variable names -MEMORYLAYER_RERANKER_HYDE_MAX_TOKENS = 'MEMORYLAYER_RERANKER_HYDE_MAX_TOKENS' -MEMORYLAYER_RERANKER_HYDE_TEMPERATURE = 'MEMORYLAYER_RERANKER_HYDE_TEMPERATURE' +MEMORYLAYER_RERANKER_HYDE_MAX_TOKENS = "MEMORYLAYER_RERANKER_HYDE_MAX_TOKENS" +MEMORYLAYER_RERANKER_HYDE_TEMPERATURE = "MEMORYLAYER_RERANKER_HYDE_TEMPERATURE" # Defaults DEFAULT_HYDE_MAX_TOKENS = 2048 @@ -53,12 +52,12 @@ class HyDERerankerProvider(RerankerProvider): """ def __init__( - self, - v: Variables, - llm_service: LLMService, - embedding_service: EmbeddingService, - max_tokens: int = DEFAULT_HYDE_MAX_TOKENS, - temperature: float = DEFAULT_HYDE_TEMPERATURE, + self, + v: Variables, + llm_service: LLMService, + embedding_service: EmbeddingService, + max_tokens: int = DEFAULT_HYDE_MAX_TOKENS, + temperature: float = DEFAULT_HYDE_TEMPERATURE, ): super().__init__(v) self.llm_service = llm_service @@ -66,7 +65,7 @@ def __init__( self.max_tokens = max_tokens self.temperature = temperature - async def _generate_hypothetical_answer(self, query: str, instruction: Optional[str] = None) -> str: + async def _generate_hypothetical_answer(self, query: str, instruction: str | None = None) -> str: """Generate a hypothetical answer to the query using the LLM.""" full_query = query if instruction: @@ -84,10 +83,10 @@ async def _generate_hypothetical_answer(self, query: str, instruction: Optional[ return hypothetical_answer async def rerank( - self, - query: str, - documents: list[str], - instruction: Optional[str] = None, + self, + query: str, + documents: list[str], + instruction: str | None = None, ) -> list[float]: """ Score documents by HyDE similarity. diff --git a/memorylayer-core-python/src/memorylayer_server/services/reranker/llm/__init__.py b/memorylayer-core-python/src/memorylayer_server/services/reranker/llm/__init__.py index 167d57f..a64f8d2 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/reranker/llm/__init__.py +++ b/memorylayer-core-python/src/memorylayer_server/services/reranker/llm/__init__.py @@ -3,6 +3,6 @@ from .provider import LLMRerankerProvider, LLMRerankerProviderPlugin __all__ = [ - 'LLMRerankerProvider', - 'LLMRerankerProviderPlugin', + "LLMRerankerProvider", + "LLMRerankerProviderPlugin", ] diff --git a/memorylayer-core-python/src/memorylayer_server/services/reranker/llm/provider.py b/memorylayer-core-python/src/memorylayer_server/services/reranker/llm/provider.py index ae2f3b4..d9b8bcc 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/reranker/llm/provider.py +++ b/memorylayer-core-python/src/memorylayer_server/services/reranker/llm/provider.py @@ -6,14 +6,12 @@ """ from logging import Logger -from typing import Optional from scitrera_app_framework import Variables, get_extension from ....config import RerankerProviderType -from ..base import RerankerProvider, RerankerProviderPluginBase from ...llm import EXT_LLM_SERVICE - +from ..base import RerankerProvider, RerankerProviderPluginBase RERANK_PROMPT_TEMPLATE = """You are a relevance scoring assistant. Score how relevant each document is to the given query. @@ -54,7 +52,7 @@ async def rerank( self, query: str, documents: list[str], - instruction: Optional[str] = None, + instruction: str | None = None, ) -> list[float]: """ Score documents by relevance to query using LLM. @@ -73,10 +71,7 @@ async def rerank( self.logger.debug("LLM reranking %d documents", len(documents)) # Format documents for prompt - docs_text = "\n".join( - f"[{i+1}] {doc[:500]}{'...' if len(doc) > 500 else ''}" - for i, doc in enumerate(documents) - ) + docs_text = "\n".join(f"[{i + 1}] {doc[:500]}{'...' if len(doc) > 500 else ''}" for i, doc in enumerate(documents)) # Build query with optional instruction full_query = query @@ -96,7 +91,7 @@ async def rerank( import re # Extract JSON array from response - match = re.search(r'\[[\d.,\s]+\]', response) + match = re.search(r"\[[\d.,\s]+\]", response) if match: scores = json.loads(match.group()) # Ensure we have the right number of scores diff --git a/memorylayer-core-python/src/memorylayer_server/services/reranker/local/__init__.py b/memorylayer-core-python/src/memorylayer_server/services/reranker/local/__init__.py index b0f9f1f..e30a0b1 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/reranker/local/__init__.py +++ b/memorylayer-core-python/src/memorylayer_server/services/reranker/local/__init__.py @@ -1,7 +1,8 @@ """Local reranker provider using sentence-transformers CrossEncoder.""" + from .provider import LocalRerankerProvider, LocalRerankerProviderPlugin __all__ = [ - 'LocalRerankerProvider', - 'LocalRerankerProviderPlugin', + "LocalRerankerProvider", + "LocalRerankerProviderPlugin", ] diff --git a/memorylayer-core-python/src/memorylayer_server/services/reranker/local/provider.py b/memorylayer-core-python/src/memorylayer_server/services/reranker/local/provider.py index 9eb4fa1..f9e53ec 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/reranker/local/provider.py +++ b/memorylayer-core-python/src/memorylayer_server/services/reranker/local/provider.py @@ -1,16 +1,16 @@ """Local reranker provider using sentence-transformers CrossEncoder.""" + import math from logging import Logger -from typing import Optional from scitrera_app_framework import get_logger from scitrera_app_framework.api import Variables -from ..base import RerankerProvider, RerankerProviderPluginBase from ....config import RerankerProviderType +from ..base import RerankerProvider, RerankerProviderPluginBase -MEMORYLAYER_RERANKER_LOCAL_MODEL = 'MEMORYLAYER_RERANKER_LOCAL_MODEL' -DEFAULT_RERANKER_LOCAL_MODEL = 'cross-encoder/ms-marco-MiniLM-L-6-v2' +MEMORYLAYER_RERANKER_LOCAL_MODEL = "MEMORYLAYER_RERANKER_LOCAL_MODEL" +DEFAULT_RERANKER_LOCAL_MODEL = "cross-encoder/ms-marco-MiniLM-L-6-v2" def _sigmoid(x: float) -> float: @@ -27,30 +27,26 @@ class LocalRerankerProvider(RerankerProvider): """ def __init__( - self, - v: Variables = None, - model_name: str = DEFAULT_RERANKER_LOCAL_MODEL, + self, + v: Variables = None, + model_name: str = DEFAULT_RERANKER_LOCAL_MODEL, ): super().__init__(v) self.model_name = model_name self._model = None self.logger = get_logger(v, name=self.__class__.__name__) - self.logger.info( - "Initialized LocalRerankerProvider: model=%s", model_name - ) + self.logger.info("Initialized LocalRerankerProvider: model=%s", model_name) def _get_model(self): """Lazy-load the CrossEncoder model.""" if self._model is None: try: from sentence_transformers import CrossEncoder + self.logger.info("Loading CrossEncoder model: %s", self.model_name) self._model = CrossEncoder(self.model_name) except ImportError: - raise ImportError( - "sentence-transformers package not installed. " - "Install with: pip install sentence-transformers" - ) + raise ImportError("sentence-transformers package not installed. Install with: pip install sentence-transformers") return self._model async def preload(self): @@ -58,10 +54,10 @@ async def preload(self): self._get_model() async def rerank( - self, - query: str, - documents: list[str], - instruction: Optional[str] = None, + self, + query: str, + documents: list[str], + instruction: str | None = None, ) -> list[float]: """Score documents by relevance to query using CrossEncoder. @@ -84,7 +80,8 @@ async def rerank( self.logger.debug( "Reranking %d documents for query: %s chars", - len(documents), len(effective_query), + len(documents), + len(effective_query), ) # CrossEncoder expects list of (query, document) pairs @@ -99,6 +96,7 @@ async def rerank( class LocalRerankerProviderPlugin(RerankerProviderPluginBase): """Plugin for local CrossEncoder reranker.""" + PROVIDER_NAME = RerankerProviderType.LOCAL def initialize(self, v: Variables, logger: Logger) -> RerankerProvider: diff --git a/memorylayer-core-python/src/memorylayer_server/services/reranker/none/__init__.py b/memorylayer-core-python/src/memorylayer_server/services/reranker/none/__init__.py index 0550897..6bf5a1e 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/reranker/none/__init__.py +++ b/memorylayer-core-python/src/memorylayer_server/services/reranker/none/__init__.py @@ -3,6 +3,6 @@ from .provider import NoneRerankerProvider, NoneRerankerProviderPlugin __all__ = [ - 'NoneRerankerProvider', - 'NoneRerankerProviderPlugin', + "NoneRerankerProvider", + "NoneRerankerProviderPlugin", ] diff --git a/memorylayer-core-python/src/memorylayer_server/services/reranker/none/provider.py b/memorylayer-core-python/src/memorylayer_server/services/reranker/none/provider.py index 125d3b3..8b7c8f7 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/reranker/none/provider.py +++ b/memorylayer-core-python/src/memorylayer_server/services/reranker/none/provider.py @@ -5,11 +5,9 @@ """ from logging import Logger -from typing import Optional from scitrera_app_framework import Variables -from ....config import RerankerProviderType from ..base import RerankerProvider, RerankerProviderPluginBase @@ -21,10 +19,10 @@ class NoneRerankerProvider(RerankerProvider): """ async def rerank( - self, - query: str, - documents: list[str], - instruction: Optional[str] = None, + self, + query: str, + documents: list[str], + instruction: str | None = None, ) -> list[float]: """Return uniform scores (1.0) for all documents.""" return [1.0] * len(documents) @@ -33,7 +31,7 @@ async def rerank( class NoneRerankerProviderPlugin(RerankerProviderPluginBase): """Plugin for disabled reranker provider.""" - PROVIDER_NAME = 'none' + PROVIDER_NAME = "none" def initialize(self, v: Variables, logger: Logger) -> object | None: return NoneRerankerProvider(v=v) diff --git a/memorylayer-core-python/src/memorylayer_server/services/reranker/rrf/__init__.py b/memorylayer-core-python/src/memorylayer_server/services/reranker/rrf/__init__.py index ffe711b..c997500 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/reranker/rrf/__init__.py +++ b/memorylayer-core-python/src/memorylayer_server/services/reranker/rrf/__init__.py @@ -3,6 +3,6 @@ from .provider import RRFRerankerProvider, RRFRerankerProviderPlugin __all__ = [ - 'RRFRerankerProvider', - 'RRFRerankerProviderPlugin', + "RRFRerankerProvider", + "RRFRerankerProviderPlugin", ] diff --git a/memorylayer-core-python/src/memorylayer_server/services/reranker/rrf/provider.py b/memorylayer-core-python/src/memorylayer_server/services/reranker/rrf/provider.py index bf7799c..63a44c7 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/reranker/rrf/provider.py +++ b/memorylayer-core-python/src/memorylayer_server/services/reranker/rrf/provider.py @@ -15,41 +15,135 @@ import re from logging import Logger -from typing import Optional from scitrera_app_framework import Variables, get_extension from ....config import RerankerProviderType from ....utils import cosine_similarity -from ..base import RerankerProvider, RerankerProviderPluginBase from ...embedding import EXT_EMBEDDING_SERVICE, EmbeddingService +from ..base import RerankerProvider, RerankerProviderPluginBase # Environment variable names -MEMORYLAYER_RERANKER_RRF_K = 'MEMORYLAYER_RERANKER_RRF_K' -MEMORYLAYER_RERANKER_RRF_MIN_QUERIES = 'MEMORYLAYER_RERANKER_RRF_MIN_QUERIES' +MEMORYLAYER_RERANKER_RRF_K = "MEMORYLAYER_RERANKER_RRF_K" +MEMORYLAYER_RERANKER_RRF_MIN_QUERIES = "MEMORYLAYER_RERANKER_RRF_MIN_QUERIES" # Defaults DEFAULT_RRF_K = 60 DEFAULT_RRF_MIN_QUERIES = 2 # Common English stopwords for keyword extraction -_STOPWORDS = frozenset({ - 'a', 'an', 'the', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for', - 'of', 'with', 'by', 'from', 'is', 'are', 'was', 'were', 'be', 'been', - 'being', 'have', 'has', 'had', 'do', 'does', 'did', 'will', 'would', - 'could', 'should', 'may', 'might', 'shall', 'can', 'need', 'dare', - 'it', 'its', 'this', 'that', 'these', 'those', 'i', 'me', 'my', - 'we', 'our', 'you', 'your', 'he', 'him', 'his', 'she', 'her', - 'they', 'them', 'their', 'what', 'which', 'who', 'whom', 'how', - 'when', 'where', 'why', 'not', 'no', 'nor', 'so', 'if', 'then', - 'than', 'too', 'very', 'just', 'about', 'above', 'after', 'again', - 'all', 'also', 'am', 'any', 'because', 'before', 'between', 'both', - 'each', 'few', 'more', 'most', 'other', 'over', 'own', 'same', - 'some', 'such', 'up', 'down', 'out', 'off', 'only', 'into', -}) +_STOPWORDS = frozenset( + { + "a", + "an", + "the", + "and", + "or", + "but", + "in", + "on", + "at", + "to", + "for", + "of", + "with", + "by", + "from", + "is", + "are", + "was", + "were", + "be", + "been", + "being", + "have", + "has", + "had", + "do", + "does", + "did", + "will", + "would", + "could", + "should", + "may", + "might", + "shall", + "can", + "need", + "dare", + "it", + "its", + "this", + "that", + "these", + "those", + "i", + "me", + "my", + "we", + "our", + "you", + "your", + "he", + "him", + "his", + "she", + "her", + "they", + "them", + "their", + "what", + "which", + "who", + "whom", + "how", + "when", + "where", + "why", + "not", + "no", + "nor", + "so", + "if", + "then", + "than", + "too", + "very", + "just", + "about", + "above", + "after", + "again", + "all", + "also", + "am", + "any", + "because", + "before", + "between", + "both", + "each", + "few", + "more", + "most", + "other", + "over", + "own", + "same", + "some", + "such", + "up", + "down", + "out", + "off", + "only", + "into", + } +) # Sentence boundary pattern -_SENTENCE_SPLIT = re.compile(r'[.?!;]\s+') +_SENTENCE_SPLIT = re.compile(r"[.?!;]\s+") def _extract_keywords(text: str) -> str: @@ -61,9 +155,9 @@ def _extract_keywords(text: str) -> str: Returns: Space-joined content words, or empty string if none remain. """ - words = re.findall(r'\b\w+\b', text.lower()) + words = re.findall(r"\b\w+\b", text.lower()) keywords = [w for w in words if w not in _STOPWORDS and len(w) > 1] - return ' '.join(keywords) + return " ".join(keywords) def _split_sentences(text: str) -> list[str]: @@ -80,9 +174,9 @@ def _split_sentences(text: str) -> list[str]: def decompose_query( - query: str, - instruction: Optional[str] = None, - min_queries: int = DEFAULT_RRF_MIN_QUERIES, + query: str, + instruction: str | None = None, + min_queries: int = DEFAULT_RRF_MIN_QUERIES, ) -> list[str]: """Decompose a query into multiple sub-queries for multi-query RRF. @@ -134,9 +228,9 @@ def decompose_query( def compute_rrf_scores( - rankings: list[list[int]], - num_documents: int, - k: int = DEFAULT_RRF_K, + rankings: list[list[int]], + num_documents: int, + k: int = DEFAULT_RRF_K, ) -> list[float]: """Compute Reciprocal Rank Fusion scores from multiple rankings. @@ -192,11 +286,11 @@ class RRFRerankerProvider(RerankerProvider): """ def __init__( - self, - v: Variables, - embedding_service: EmbeddingService, - rrf_k: int = DEFAULT_RRF_K, - min_queries: int = DEFAULT_RRF_MIN_QUERIES, + self, + v: Variables, + embedding_service: EmbeddingService, + rrf_k: int = DEFAULT_RRF_K, + min_queries: int = DEFAULT_RRF_MIN_QUERIES, ): super().__init__(v) self.embedding_service = embedding_service @@ -204,10 +298,10 @@ def __init__( self.min_queries = min_queries async def rerank( - self, - query: str, - documents: list[str], - instruction: Optional[str] = None, + self, + query: str, + documents: list[str], + instruction: str | None = None, ) -> list[float]: """ Score documents by multi-query RRF fusion. @@ -235,7 +329,8 @@ async def rerank( # Step 1: Decompose query sub_queries = decompose_query(query, instruction, self.min_queries) self.logger.debug( - "Decomposed query into %d sub-queries", len(sub_queries), + "Decomposed query into %d sub-queries", + len(sub_queries), ) # Step 2: Embed sub-queries @@ -247,10 +342,7 @@ async def rerank( # Step 4: For each sub-query, rank documents by cosine similarity rankings = [] for q_emb in query_embeddings: - similarities = [ - cosine_similarity(q_emb, d_emb) - for d_emb in doc_embeddings - ] + similarities = [cosine_similarity(q_emb, d_emb) for d_emb in doc_embeddings] # Sort document indices by similarity (descending) ranking = sorted( range(len(documents)), diff --git a/memorylayer-core-python/src/memorylayer_server/services/semantic_tiering/__init__.py b/memorylayer-core-python/src/memorylayer_server/services/semantic_tiering/__init__.py index 0f8e8fb..934df2c 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/semantic_tiering/__init__.py +++ b/memorylayer-core-python/src/memorylayer_server/services/semantic_tiering/__init__.py @@ -1,10 +1,11 @@ """Tier generation service package.""" + from scitrera_app_framework import Variables, get_extension from .base import ( + EXT_SEMANTIC_TIERING_SERVICE, SemanticTieringService, SemanticTieringServicePluginBase, - EXT_SEMANTIC_TIERING_SERVICE, ) @@ -14,8 +15,8 @@ def get_semantic_tiering_service(v: Variables = None) -> SemanticTieringService: __all__ = ( - 'SemanticTieringService', - 'SemanticTieringServicePluginBase', - 'get_semantic_tiering_service', - 'EXT_SEMANTIC_TIERING_SERVICE', + "SemanticTieringService", + "SemanticTieringServicePluginBase", + "get_semantic_tiering_service", + "EXT_SEMANTIC_TIERING_SERVICE", ) diff --git a/memorylayer-core-python/src/memorylayer_server/services/semantic_tiering/base.py b/memorylayer-core-python/src/memorylayer_server/services/semantic_tiering/base.py index 6bb9cd1..d6a4d17 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/semantic_tiering/base.py +++ b/memorylayer-core-python/src/memorylayer_server/services/semantic_tiering/base.py @@ -3,12 +3,11 @@ Provides the ABC interface and plugin base for tier generation services. """ + from abc import ABC, abstractmethod -from typing import Optional -from ...config import MEMORYLAYER_SEMANTIC_TIERING_SERVICE, DEFAULT_MEMORYLAYER_SEMANTIC_TIERING_SERVICE +from ...config import DEFAULT_MEMORYLAYER_SEMANTIC_TIERING_SERVICE, MEMORYLAYER_SEMANTIC_TIERING_SERVICE from ...models.memory import Memory - from .._constants import EXT_LLM_SERVICE, EXT_SEMANTIC_TIERING_SERVICE, EXT_STORAGE_BACKEND from .._plugin_factory import make_service_plugin_base @@ -17,42 +16,26 @@ class SemanticTieringService(ABC): """Interface for tier generation service.""" @abstractmethod - async def generate_abstract( - self, - content: str, - max_tokens: int = 30 - ) -> str: + async def generate_abstract(self, content: str, max_tokens: int = 30) -> str: """Generate brief abstract (tier 1) from memory content.""" pass @abstractmethod - async def generate_overview( - self, - content: str, - max_tokens: int = 100 - ) -> str: + async def generate_overview(self, content: str, max_tokens: int = 100) -> str: """Generate overview (tier 2) from memory content.""" pass @abstractmethod - async def generate_tiers( - self, - memory_id: str, - workspace_id: str, - force: bool = False - ) -> Memory: + async def generate_tiers(self, memory_id: str, workspace_id: str, force: bool = False) -> Memory: """Generate all tiers (abstract, overview) for a memory.""" pass @abstractmethod - async def generate_tiers_for_content( - self, - content: str - ) -> tuple[str, str]: + async def generate_tiers_for_content(self, content: str) -> tuple[str, str]: """Generate tiers for content without persisting.""" pass - async def request_tier_generation(self, memory_id: str, workspace_id: str) -> Optional[str]: + async def request_tier_generation(self, memory_id: str, workspace_id: str) -> str | None: """ Request tier generation for a memory, potentially as a background task. diff --git a/memorylayer-core-python/src/memorylayer_server/services/semantic_tiering/default.py b/memorylayer-core-python/src/memorylayer_server/services/semantic_tiering/default.py index 41a73eb..dcbe134 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/semantic_tiering/default.py +++ b/memorylayer-core-python/src/memorylayer_server/services/semantic_tiering/default.py @@ -3,19 +3,18 @@ Generates hierarchical summaries (abstract, overview) for memories using LLM. """ + from logging import Logger -from typing import Optional -from scitrera_app_framework import get_logger, ext_parse_bool +from scitrera_app_framework import ext_parse_bool, get_logger from scitrera_app_framework.api import Variables -from ...config import MEMORYLAYER_SEMANTIC_TIERING_ENABLED, DEFAULT_MEMORYLAYER_SEMANTIC_TIERING_ENABLED +from ...config import DEFAULT_MEMORYLAYER_SEMANTIC_TIERING_ENABLED, MEMORYLAYER_SEMANTIC_TIERING_ENABLED +from ...models.llm import LLMMessage, LLMRequest, LLMRole from ...models.memory import Memory -from ...models.llm import LLMRequest, LLMMessage, LLMRole -from ..storage import EXT_STORAGE_BACKEND, StorageBackend from ..llm import EXT_LLM_SERVICE, LLMService +from ..storage import EXT_STORAGE_BACKEND, StorageBackend from ..tasks.base import EXT_TASK_SERVICE, TaskService - from .base import SemanticTieringService, SemanticTieringServicePluginBase @@ -45,7 +44,7 @@ def __init__( storage: StorageBackend, v: Variables = None, enabled: bool = True, - task_service: Optional[TaskService] = None, + task_service: TaskService | None = None, ): """ Initialize tier generation service. @@ -63,16 +62,10 @@ def __init__( self.task_service = task_service self.logger = get_logger(v, name=self.__class__.__name__) self.logger.info( - "Initialized DefaultTierGenerationService (enabled=%s, background=%s)", - self.enabled, - self.task_service is not None + "Initialized DefaultTierGenerationService (enabled=%s, background=%s)", self.enabled, self.task_service is not None ) - async def generate_abstract( - self, - content: str, - max_tokens: int = 500 - ) -> str: + async def generate_abstract(self, content: str, max_tokens: int = 500) -> str: """ Generate brief abstract (tier 1) from memory content. @@ -100,11 +93,7 @@ async def generate_abstract( # Fallback: truncate content return content[:100] + "..." if len(content) > 100 else content - async def generate_overview( - self, - content: str, - max_tokens: int = 500 - ) -> str: + async def generate_overview(self, content: str, max_tokens: int = 500) -> str: """ Generate overview (tier 2) from memory content. @@ -132,12 +121,7 @@ async def generate_overview( # Fallback: truncate content return content[:500] + "..." if len(content) > 500 else content - async def generate_tiers( - self, - memory_id: str, - workspace_id: str, - force: bool = False - ) -> Memory: + async def generate_tiers(self, memory_id: str, workspace_id: str, force: bool = False) -> Memory: """ Generate all tiers (abstract, overview) for a memory. @@ -176,19 +160,13 @@ async def generate_tiers( # Update memory in storage updated_memory = await self.storage.update_memory( - workspace_id=workspace_id, - memory_id=memory_id, - abstract=abstract, - overview=overview + workspace_id=workspace_id, memory_id=memory_id, abstract=abstract, overview=overview ) self.logger.info("Generated tiers for memory %s", memory_id) return updated_memory - async def generate_tiers_for_content( - self, - content: str - ) -> tuple[str, str]: + async def generate_tiers_for_content(self, content: str) -> tuple[str, str]: """ Generate tiers for content without persisting. @@ -205,7 +183,7 @@ async def generate_tiers_for_content( abstract = await self.generate_abstract(overview) return abstract, overview - async def request_tier_generation(self, memory_id: str, workspace_id: str) -> Optional[str]: + async def request_tier_generation(self, memory_id: str, workspace_id: str) -> str | None: """ Request tier generation, scheduling as background task if possible. @@ -222,8 +200,8 @@ async def request_tier_generation(self, memory_id: str, workspace_id: str) -> Op if self.task_service: task_id = await self.task_service.schedule_task( - task_type='generate_tiers', - payload={'memory_id': memory_id, 'workspace_id': workspace_id}, + task_type="generate_tiers", + payload={"memory_id": memory_id, "workspace_id": workspace_id}, ) self.logger.debug("Scheduled background tier generation for memory %s (task=%s)", memory_id, task_id) return task_id @@ -236,7 +214,8 @@ async def request_tier_generation(self, memory_id: str, workspace_id: str) -> Op class DefaultSemanticTieringServicePlugin(SemanticTieringServicePluginBase): """Default tier generation service plugin.""" - PROVIDER_NAME = 'default' + + PROVIDER_NAME = "default" def initialize(self, v: Variables, logger: Logger) -> DefaultSemanticTieringService: storage: StorageBackend = self.get_extension(EXT_STORAGE_BACKEND, v) @@ -249,7 +228,7 @@ def initialize(self, v: Variables, logger: Logger) -> DefaultSemanticTieringServ ) # TaskService is optional — tier generation works inline without it - task_service: Optional[TaskService] = None + task_service: TaskService | None = None try: task_service = self.get_extension(EXT_TASK_SERVICE, v) except Exception: diff --git a/memorylayer-core-python/src/memorylayer_server/services/session/__init__.py b/memorylayer-core-python/src/memorylayer_server/services/session/__init__.py index 529f8f2..56f11c7 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/session/__init__.py +++ b/memorylayer-core-python/src/memorylayer_server/services/session/__init__.py @@ -1,14 +1,15 @@ """Session service package.""" + +from scitrera_app_framework import Variables, get_extension + from .base import ( - SessionService, - SessionServicePluginBase, EXT_SESSION_SERVICE, - CommitResult, CommitOptions, + CommitResult, + SessionService, + SessionServicePluginBase, ) -from scitrera_app_framework import Variables, get_extension - def get_session_service(v: Variables = None) -> SessionService: """Get the session service instance.""" @@ -16,10 +17,10 @@ def get_session_service(v: Variables = None) -> SessionService: __all__ = ( - 'SessionService', - 'SessionServicePluginBase', - 'get_session_service', - 'EXT_SESSION_SERVICE', - 'CommitResult', - 'CommitOptions', + "SessionService", + "SessionServicePluginBase", + "get_session_service", + "EXT_SESSION_SERVICE", + "CommitResult", + "CommitOptions", ) diff --git a/memorylayer-core-python/src/memorylayer_server/services/session/base.py b/memorylayer-core-python/src/memorylayer_server/services/session/base.py index 188f694..9e982c8 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/session/base.py +++ b/memorylayer-core-python/src/memorylayer_server/services/session/base.py @@ -18,11 +18,13 @@ import logging from abc import ABC, abstractmethod from datetime import datetime -from typing import Optional, Any, List, Dict, TYPE_CHECKING +from typing import TYPE_CHECKING, Any from ...config import ( - MEMORYLAYER_SESSION_SERVICE, DEFAULT_MEMORYLAYER_SESSION_SERVICE, - MEMORYLAYER_SESSION_TOUCH_TTL, DEFAULT_MEMORYLAYER_SESSION_TOUCH_TTL, + DEFAULT_MEMORYLAYER_SESSION_SERVICE, + DEFAULT_MEMORYLAYER_SESSION_TOUCH_TTL, + MEMORYLAYER_SESSION_SERVICE, + MEMORYLAYER_SESSION_TOUCH_TTL, ) from ...models import Session, WorkingMemory @@ -37,16 +39,16 @@ class CommitResult: """Result of a session commit operation.""" def __init__( - self, - session_id: str, - committed_at: datetime, - memories_committed: int = 0, - associations_committed: int = 0, - success: bool = True, - error: Optional[str] = None, - memories_extracted: int = 0, - memories_deduplicated: int = 0, - extraction_summary: Optional[Dict[str, Any]] = None + self, + session_id: str, + committed_at: datetime, + memories_committed: int = 0, + associations_committed: int = 0, + success: bool = True, + error: str | None = None, + memories_extracted: int = 0, + memories_deduplicated: int = 0, + extraction_summary: dict[str, Any] | None = None, ): self.session_id = session_id self.committed_at = committed_at @@ -63,11 +65,11 @@ class CommitOptions: """Options for session commit operation.""" def __init__( - self, - include_working_memory: bool = True, - importance_threshold: Optional[float] = None, - delete_after_commit: bool = False, - tags: Optional[List[str]] = None + self, + include_working_memory: bool = True, + importance_threshold: float | None = None, + delete_after_commit: bool = False, + tags: list[str] | None = None, ): self.include_working_memory = include_working_memory self.importance_threshold = importance_threshold @@ -82,12 +84,7 @@ class SessionService(ABC): default_touch_ttl: int = 3600 @abstractmethod - async def create_session( - self, - workspace_id: str, - session: Session, - context_id: Optional[str] = None - ) -> Session: + async def create_session(self, workspace_id: str, session: Session, context_id: str | None = None) -> Session: """Store a new session. Args: @@ -101,12 +98,12 @@ async def create_session( pass @abstractmethod - async def get_session(self, workspace_id: str, session_id: str) -> Optional[Session]: + async def get_session(self, workspace_id: str, session_id: str) -> Session | None: """Retrieve session if not expired.""" pass @abstractmethod - async def get(self, session_id: str) -> Optional[Session]: + async def get(self, session_id: str) -> Session | None: """Retrieve session by ID without workspace filter. This method allows looking up a session when the workspace is not known, @@ -135,36 +132,31 @@ async def delete_session(self, workspace_id: str, session_id: str, skip_auto_com @abstractmethod async def set_working_memory( - self, - workspace_id: str, - session_id: str, - key: str, - value: Any, - ttl_seconds: Optional[int] = None + self, workspace_id: str, session_id: str, key: str, value: Any, ttl_seconds: int | None = None ) -> WorkingMemory: """Store key-value data within a session's working memory.""" pass @abstractmethod - async def get_working_memory(self, workspace_id: str, session_id: str, key: str) -> Optional[WorkingMemory]: + async def get_working_memory(self, workspace_id: str, session_id: str, key: str) -> WorkingMemory | None: """Retrieve specific working memory entry.""" pass @abstractmethod - async def get_all_working_memory(self, workspace_id: str, session_id: str) -> List[WorkingMemory]: + async def get_all_working_memory(self, workspace_id: str, session_id: str) -> list[WorkingMemory]: """Get all working memory entries for a session.""" pass @abstractmethod async def get_briefing( - self, - workspace_id: str, - lookback_minutes: int = 60, - detail_level: str = "abstract", - limit: int = 10, - include_memories: bool = True, - include_contradictions: bool = True, - ) -> 'SessionBriefing': + self, + workspace_id: str, + lookback_minutes: int = 60, + detail_level: str = "abstract", + limit: int = 10, + include_memories: bool = True, + include_contradictions: bool = True, + ) -> "SessionBriefing": """Generate a session briefing with workspace summary and recent activity. Args: @@ -182,12 +174,7 @@ async def get_briefing( # ======================================== @abstractmethod - async def commit_session( - self, - workspace_id: str, - session_id: str, - options: Optional[CommitOptions] = None - ) -> CommitResult: + async def commit_session(self, workspace_id: str, session_id: str, options: CommitOptions | None = None) -> CommitResult: """Commit session working memory to long-term storage. This moves working memory entries to persistent memory storage, @@ -204,12 +191,7 @@ async def commit_session( pass @abstractmethod - async def touch_session( - self, - workspace_id: str, - session_id: str, - extend_seconds: Optional[int] = None - ) -> Session: + async def touch_session(self, workspace_id: str, session_id: str, extend_seconds: int | None = None) -> Session: """Extend session TTL using sliding window. Resets expires_at to now + TTL. If extend_seconds is provided, @@ -229,12 +211,7 @@ async def touch_session( pass @abstractmethod - async def list_sessions( - self, - workspace_id: str, - context_id: Optional[str] = None, - include_expired: bool = False - ) -> List[Session]: + async def list_sessions(self, workspace_id: str, context_id: str | None = None, include_expired: bool = False) -> list[Session]: """List sessions in a workspace. Args: diff --git a/memorylayer-core-python/src/memorylayer_server/services/session/in_memory.py b/memorylayer-core-python/src/memorylayer_server/services/session/in_memory.py index 461d452..479f35d 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/session/in_memory.py +++ b/memorylayer-core-python/src/memorylayer_server/services/session/in_memory.py @@ -1,22 +1,23 @@ """Default session service implementation.""" + import json -from datetime import datetime, timezone, timedelta +from datetime import UTC, datetime, timedelta from logging import Logger -from typing import Optional, Any, TYPE_CHECKING +from typing import TYPE_CHECKING, Any, Optional from scitrera_app_framework import get_logger from scitrera_app_framework.api import Variables -from .base import SessionServicePluginBase, SessionService, CommitOptions, CommitResult from ...models import Session, WorkingMemory from ...models.session import SessionBriefing +from .base import CommitOptions, CommitResult, SessionService, SessionServicePluginBase if TYPE_CHECKING: - from ..storage import StorageBackend - from ..extraction import ExtractionService + from ..contradiction import ContradictionService from ..deduplication import DeduplicationService + from ..extraction import ExtractionService from ..memory import MemoryService - from ..contradiction import ContradictionService + from ..storage import StorageBackend from ..tasks import TaskService @@ -31,12 +32,12 @@ class InMemorySessionService(SessionService): def __init__( self, v: Variables = None, - storage: Optional['StorageBackend'] = None, - extraction_service: Optional['ExtractionService'] = None, - deduplication_service: Optional['DeduplicationService'] = None, - memory_service: Optional['MemoryService'] = None, - contradiction_service: Optional['ContradictionService'] = None, - task_service: Optional['TaskService'] = None, + storage: Optional["StorageBackend"] = None, + extraction_service: Optional["ExtractionService"] = None, + deduplication_service: Optional["DeduplicationService"] = None, + memory_service: Optional["MemoryService"] = None, + contradiction_service: Optional["ContradictionService"] = None, + task_service: Optional["TaskService"] = None, default_touch_ttl: int = 3600, ): """Initialize in-memory session storage. @@ -71,12 +72,7 @@ def _make_key(self, workspace_id: str, session_id: str) -> str: """Create composite key for session storage.""" return f"{workspace_id}:{session_id}" - async def create_session( - self, - workspace_id: str, - session: Session, - context_id: Optional[str] = None - ) -> Session: + async def create_session(self, workspace_id: str, session: Session, context_id: str | None = None) -> Session: """ Store a new session. @@ -95,15 +91,10 @@ async def create_session( self._sessions[key] = session # Initialize empty working memory dict for this session self._working_memory[key] = {} - self.logger.info( - "Created session: %s in workspace: %s, context: %s", - session.id, - workspace_id, - session.context_id - ) + self.logger.info("Created session: %s in workspace: %s, context: %s", session.id, workspace_id, session.context_id) return session - async def get_session(self, workspace_id: str, session_id: str) -> Optional[Session]: + async def get_session(self, workspace_id: str, session_id: str) -> Session | None: """ Retrieve session if it exists and has not expired. @@ -134,7 +125,7 @@ async def get_session(self, workspace_id: str, session_id: str) -> Optional[Sess self.logger.debug("Retrieved session: %s in workspace: %s", session_id, workspace_id) return session - async def get(self, session_id: str) -> Optional[Session]: + async def get(self, session_id: str) -> Session | None: """Retrieve session by ID without workspace filter. Searches all sessions. Within a tenant's session service, @@ -170,22 +161,12 @@ async def delete_session(self, workspace_id: str, session_id: str, skip_auto_com if session and not skip_auto_commit and session.auto_commit and session.committed_at is None: if self.extraction_service and self._memory_service: try: - self.logger.info( - "Auto-committing session %s before deletion (auto_commit=True)", - session_id - ) + self.logger.info("Auto-committing session %s before deletion (auto_commit=True)", session_id) await self.commit_session(workspace_id, session_id) except Exception as e: - self.logger.warning( - "Auto-commit failed for session %s, proceeding with deletion: %s", - session_id, - e - ) + self.logger.warning("Auto-commit failed for session %s, proceeding with deletion: %s", session_id, e) else: - self.logger.debug( - "Session %s has auto_commit=True but no extraction/memory services configured", - session_id - ) + self.logger.debug("Session %s has auto_commit=True but no extraction/memory services configured", session_id) # Remove session session_existed = key in self._sessions @@ -204,12 +185,7 @@ async def delete_session(self, workspace_id: str, session_id: str, skip_auto_com return session_existed async def set_working_memory( - self, - workspace_id: str, - session_id: str, - key: str, - value: Any, - ttl_seconds: Optional[int] = None + self, workspace_id: str, session_id: str, key: str, value: Any, ttl_seconds: int | None = None ) -> WorkingMemory: """ Set a working memory key-value pair within a session. @@ -240,29 +216,17 @@ async def set_working_memory( # Check if updating existing entry existing = self._working_memory[session_key].get(key) - now = datetime.now(timezone.utc) + now = datetime.now(UTC) if existing: # Update existing entry entry = WorkingMemory( - session_id=session_id, - key=key, - value=value, - ttl_seconds=ttl_seconds, - created_at=existing.created_at, - updated_at=now + session_id=session_id, key=key, value=value, ttl_seconds=ttl_seconds, created_at=existing.created_at, updated_at=now ) self.logger.debug("Updated working memory key: %s in session: %s", key, session_id) else: # Create new entry - entry = WorkingMemory( - session_id=session_id, - key=key, - value=value, - ttl_seconds=ttl_seconds, - created_at=now, - updated_at=now - ) + entry = WorkingMemory(session_id=session_id, key=key, value=value, ttl_seconds=ttl_seconds, created_at=now, updated_at=now) self.logger.debug("Created working memory key: %s in session: %s", key, session_id) self._working_memory[session_key][key] = entry @@ -270,24 +234,19 @@ async def set_working_memory( # Write-behind: persist to long-term memory via background task content_str = value if isinstance(value, str) else json.dumps(value, default=str) await self.task_service.schedule_task( - 'remember_working_memory', + "remember_working_memory", { - 'workspace_id': workspace_id, - 'session_id': session_id, - 'key': key, - 'content': content_str, - 'context_id': session.context_id if hasattr(session, 'context_id') else None, + "workspace_id": workspace_id, + "session_id": session_id, + "key": key, + "content": content_str, + "context_id": session.context_id if hasattr(session, "context_id") else None, }, ) return entry - async def get_working_memory( - self, - workspace_id: str, - session_id: str, - key: str - ) -> Optional[WorkingMemory]: + async def get_working_memory(self, workspace_id: str, session_id: str, key: str) -> WorkingMemory | None: """ Get a specific working memory entry. @@ -315,11 +274,7 @@ async def get_working_memory( return entry - async def get_all_working_memory( - self, - workspace_id: str, - session_id: str - ) -> list[WorkingMemory]: + async def get_all_working_memory(self, workspace_id: str, session_id: str) -> list[WorkingMemory]: """ Get all working memory entries for a session. @@ -338,11 +293,7 @@ async def get_all_working_memory( session_key = self._make_key(workspace_id, session_id) entries = self._working_memory.get(session_key, {}) - self.logger.debug( - "Retrieved %d working memory entries from session: %s", - len(entries), - session_id - ) + self.logger.debug("Retrieved %d working memory entries from session: %s", len(entries), session_id) return list(entries.values()) @@ -390,11 +341,10 @@ async def get_briefing( # Get recent memories if requested memories = [] if include_memories: - now = datetime.now(timezone.utc) + now = datetime.now(UTC) created_after = now - timedelta(minutes=lookback_minutes) memories = await self.storage.get_recent_memories( - workspace_id, created_after=created_after, - limit=limit, detail_level=detail_level + workspace_id, created_after=created_after, limit=limit, detail_level=detail_level ) # Update workspace summary with recent count @@ -402,19 +352,21 @@ async def get_briefing( # Build recent activity list recent_activity = [] - recent_activity.append({ - "timestamp": datetime.now(timezone.utc).isoformat(), - "summary": f"Workspace stats: {workspace_summary['total_memories']} total memories", - "memories_created": 0, - "key_decisions": [], - }) + recent_activity.append( + { + "timestamp": datetime.now(UTC).isoformat(), + "summary": f"Workspace stats: {workspace_summary['total_memories']} total memories", + "memories_created": 0, + "key_decisions": [], + } + ) self.logger.debug( "Generated briefing for workspace %s: %d memories, %d associations, %d recent memories", workspace_id, workspace_summary["total_memories"], workspace_summary["total_associations"], - workspace_summary["recent_memories"] + workspace_summary["recent_memories"], ) # Get unresolved contradictions @@ -423,13 +375,15 @@ async def get_briefing( try: records = await self.contradiction_service.get_unresolved(workspace_id, limit=3) for record in records: - contradictions_detected.append({ - "id": record.id, - "memory_a_id": record.memory_a_id, - "memory_b_id": record.memory_b_id, - "type": record.contradiction_type, - "confidence": record.confidence, - }) + contradictions_detected.append( + { + "id": record.id, + "memory_a_id": record.memory_a_id, + "memory_b_id": record.memory_b_id, + "type": record.contradiction_type, + "confidence": record.confidence, + } + ) except Exception as e: self.logger.warning("Failed to get contradictions for briefing: %s", e) @@ -449,7 +403,7 @@ async def get_briefing( active_sessions.append(session) # Calculate recent activity (sessions created in last 24 hours) - now = datetime.now(timezone.utc) + now = datetime.now(UTC) cutoff = now - timedelta(hours=24) recent_sessions = [s for s in active_sessions if s.created_at >= cutoff] @@ -467,18 +421,17 @@ async def get_briefing( # Build recent activity list (simple version for OSS) recent_activity = [] for session in sorted(recent_sessions, key=lambda s: s.created_at, reverse=True)[:5]: - recent_activity.append({ - "timestamp": session.created_at.isoformat(), - "session_id": session.id, - "summary": f"Session {session.id} created", - "metadata": session.metadata, - }) + recent_activity.append( + { + "timestamp": session.created_at.isoformat(), + "session_id": session.id, + "summary": f"Session {session.id} created", + "metadata": session.metadata, + } + ) self.logger.debug( - "Generated briefing for workspace %s: %d active sessions, %d recent", - workspace_id, - len(active_sessions), - len(recent_sessions) + "Generated briefing for workspace %s: %d active sessions, %d recent", workspace_id, len(active_sessions), len(recent_sessions) ) return SessionBriefing( @@ -489,12 +442,7 @@ async def get_briefing( memories=[], # In-memory service has no storage backend ) - async def commit_session( - self, - workspace_id: str, - session_id: str, - options: Optional['CommitOptions'] = None - ) -> 'CommitResult': + async def commit_session(self, workspace_id: str, session_id: str, options: Optional["CommitOptions"] = None) -> "CommitResult": """ Finalize a session and mark it as committed. @@ -525,12 +473,13 @@ async def commit_session( memory_count = len(working_memory_list) # Mark session as committed - committed_at = datetime.now(timezone.utc) + committed_at = datetime.now(UTC) session.committed_at = committed_at self.logger.info( "Committed session %s: %d working memory entries (persisted via write-behind)", - session_id, memory_count, + session_id, + memory_count, ) return CommitResult( @@ -542,12 +491,7 @@ async def commit_session( success=True, ) - async def touch_session( - self, - workspace_id: str, - session_id: str, - extend_seconds: Optional[int] = None - ) -> Session: + async def touch_session(self, workspace_id: str, session_id: str, extend_seconds: int | None = None) -> Session: """Extend session TTL using sliding window. Resets expires_at to now + TTL. If extend_seconds is provided, @@ -569,27 +513,17 @@ async def touch_session( raise ValueError(f"Session {session_id} not found or expired") ttl = extend_seconds if extend_seconds is not None else self.default_touch_ttl - session.expires_at = datetime.now(timezone.utc) + timedelta(seconds=ttl) + session.expires_at = datetime.now(UTC) + timedelta(seconds=ttl) # Update in storage key = self._make_key(workspace_id, session_id) self._sessions[key] = session - self.logger.info( - "Refreshed session %s TTL to %d seconds, new expiration: %s", - session_id, - ttl, - session.expires_at.isoformat() - ) + self.logger.info("Refreshed session %s TTL to %d seconds, new expiration: %s", session_id, ttl, session.expires_at.isoformat()) return session - async def list_sessions( - self, - workspace_id: str, - context_id: Optional[str] = None, - include_expired: bool = False - ) -> list[Session]: + async def list_sessions(self, workspace_id: str, context_id: str | None = None, include_expired: bool = False) -> list[Session]: """ List sessions in a workspace. @@ -621,26 +555,35 @@ async def list_sessions( class InMemorySessionServicePlugin(SessionServicePluginBase): """In-memory session service plugin (no persistence).""" - PROVIDER_NAME = 'in-memory' + + PROVIDER_NAME = "in-memory" def get_dependencies(self, v: Variables): - from ..storage import EXT_STORAGE_BACKEND - from ..extraction import EXT_EXTRACTION_SERVICE + from .._constants import EXT_TASK_SERVICE + from ..contradiction import EXT_CONTRADICTION_SERVICE from ..deduplication import EXT_DEDUPLICATION_SERVICE + from ..extraction import EXT_EXTRACTION_SERVICE from ..memory import EXT_MEMORY_SERVICE - from ..contradiction import EXT_CONTRADICTION_SERVICE - from .._constants import EXT_TASK_SERVICE - return (EXT_STORAGE_BACKEND, EXT_EXTRACTION_SERVICE, EXT_DEDUPLICATION_SERVICE, EXT_MEMORY_SERVICE, EXT_CONTRADICTION_SERVICE, EXT_TASK_SERVICE) + from ..storage import EXT_STORAGE_BACKEND + + return ( + EXT_STORAGE_BACKEND, + EXT_EXTRACTION_SERVICE, + EXT_DEDUPLICATION_SERVICE, + EXT_MEMORY_SERVICE, + EXT_CONTRADICTION_SERVICE, + EXT_TASK_SERVICE, + ) def initialize(self, v: Variables, logger: Logger) -> SessionService: - from ..storage import StorageBackend, EXT_STORAGE_BACKEND - from ..extraction import ExtractionService, EXT_EXTRACTION_SERVICE - from ..deduplication import DeduplicationService, EXT_DEDUPLICATION_SERVICE - from ..memory import MemoryService, EXT_MEMORY_SERVICE - from ..contradiction import ContradictionService, EXT_CONTRADICTION_SERVICE - from ..tasks import TaskService + from ...config import DEFAULT_MEMORYLAYER_SESSION_TOUCH_TTL, MEMORYLAYER_SESSION_TOUCH_TTL from .._constants import EXT_TASK_SERVICE - from ...config import MEMORYLAYER_SESSION_TOUCH_TTL, DEFAULT_MEMORYLAYER_SESSION_TOUCH_TTL + from ..contradiction import EXT_CONTRADICTION_SERVICE, ContradictionService + from ..deduplication import EXT_DEDUPLICATION_SERVICE, DeduplicationService + from ..extraction import EXT_EXTRACTION_SERVICE, ExtractionService + from ..memory import EXT_MEMORY_SERVICE, MemoryService + from ..storage import EXT_STORAGE_BACKEND, StorageBackend + from ..tasks import TaskService storage: StorageBackend = self.get_extension(EXT_STORAGE_BACKEND, v) extraction_service: ExtractionService = self.get_extension(EXT_EXTRACTION_SERVICE, v) diff --git a/memorylayer-core-python/src/memorylayer_server/services/session/persistent.py b/memorylayer-core-python/src/memorylayer_server/services/session/persistent.py index 1622ab5..2fea1c4 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/session/persistent.py +++ b/memorylayer-core-python/src/memorylayer_server/services/session/persistent.py @@ -1,26 +1,27 @@ """Persistent session service using storage backend.""" + import json -from datetime import datetime, timezone, timedelta +from datetime import UTC, datetime, timedelta from logging import Logger -from typing import Optional, Any, List, TYPE_CHECKING +from typing import Any, Optional from scitrera_app_framework import get_logger from scitrera_app_framework.api import Variables -from .base import SessionServicePluginBase, SessionService, CommitResult, CommitOptions -from ..storage import StorageBackend, EXT_STORAGE_BACKEND -from ..extraction import ExtractionService, EXT_EXTRACTION_SERVICE, ExtractionOptions -from ..deduplication import DeduplicationService, EXT_DEDUPLICATION_SERVICE -from ..memory import MemoryService, EXT_MEMORY_SERVICE -from ..contradiction import ContradictionService, EXT_CONTRADICTION_SERVICE -from ...models import Session, WorkingMemory -from ...models.session import SessionBriefing from ...config import ( - MEMORYLAYER_SESSION_TOKEN_TRIGGER_INIT, + DEFAULT_MEMORYLAYER_SESSION_TOKEN_TRIGGER_GROWTH, DEFAULT_MEMORYLAYER_SESSION_TOKEN_TRIGGER_INIT, MEMORYLAYER_SESSION_TOKEN_TRIGGER_GROWTH, - DEFAULT_MEMORYLAYER_SESSION_TOKEN_TRIGGER_GROWTH, + MEMORYLAYER_SESSION_TOKEN_TRIGGER_INIT, ) +from ...models import Session, WorkingMemory +from ...models.session import SessionBriefing +from ..contradiction import EXT_CONTRADICTION_SERVICE, ContradictionService +from ..deduplication import EXT_DEDUPLICATION_SERVICE, DeduplicationService +from ..extraction import EXT_EXTRACTION_SERVICE, ExtractionService +from ..memory import EXT_MEMORY_SERVICE, MemoryService +from ..storage import EXT_STORAGE_BACKEND, StorageBackend +from .base import CommitOptions, CommitResult, SessionService, SessionServicePluginBase class PersistentSessionService(SessionService): @@ -34,15 +35,15 @@ class PersistentSessionService(SessionService): """ def __init__( - self, - storage: StorageBackend, - v: Variables = None, - extraction_service: Optional[ExtractionService] = None, - deduplication_service: Optional[DeduplicationService] = None, - memory_service: Optional[MemoryService] = None, - contradiction_service: Optional[ContradictionService] = None, - task_service: Optional['TaskService'] = None, - default_touch_ttl: int = 3600, + self, + storage: StorageBackend, + v: Variables = None, + extraction_service: ExtractionService | None = None, + deduplication_service: DeduplicationService | None = None, + memory_service: MemoryService | None = None, + contradiction_service: ContradictionService | None = None, + task_service: Optional["TaskService"] = None, + default_touch_ttl: int = 3600, ): self.storage = storage self.v = v @@ -55,20 +56,15 @@ def __init__( self.logger = get_logger(v, name=self.__class__.__name__) self.logger.info("Initialized PersistentSessionService with storage backend") - async def create_session( - self, - workspace_id: str, - session: Session, - context_id: Optional[str] = None - ) -> Session: + async def create_session(self, workspace_id: str, session: Session, context_id: str | None = None) -> Session: """Store a new session in storage backend.""" return await self.storage.create_session(workspace_id, session) - async def get_session(self, workspace_id: str, session_id: str) -> Optional[Session]: + async def get_session(self, workspace_id: str, session_id: str) -> Session | None: """Retrieve session from storage if not expired.""" return await self.storage.get_session(workspace_id, session_id) - async def get(self, session_id: str) -> Optional[Session]: + async def get(self, session_id: str) -> Session | None: """Retrieve session by ID without workspace filter.""" return await self.storage.get_session_by_id(session_id) @@ -95,27 +91,15 @@ async def delete_session(self, workspace_id: str, session_id: str, skip_auto_com # Auto-commit if enabled and not already committed if not skip_auto_commit and session.auto_commit and session.committed_at is None: try: - self.logger.info( - "Auto-committing session %s before deletion (auto_commit=True)", - session_id - ) + self.logger.info("Auto-committing session %s before deletion (auto_commit=True)", session_id) await self.commit_session(workspace_id, session_id) except Exception as e: - self.logger.warning( - "Auto-commit failed for session %s, proceeding with deletion: %s", - session_id, - e - ) + self.logger.warning("Auto-commit failed for session %s, proceeding with deletion: %s", session_id, e) return await self.storage.delete_session(workspace_id, session_id) async def set_working_memory( - self, - workspace_id: str, - session_id: str, - key: str, - value: Any, - ttl_seconds: Optional[int] = None + self, workspace_id: str, session_id: str, key: str, value: Any, ttl_seconds: int | None = None ) -> WorkingMemory: """Set working memory in storage backend.""" # Verify session exists @@ -123,31 +107,24 @@ async def set_working_memory( if session is None: raise ValueError(f"Session {session_id} not found or expired") - result = await self.storage.set_working_memory( - workspace_id, session_id, key, value, ttl_seconds - ) + result = await self.storage.set_working_memory(workspace_id, session_id, key, value, ttl_seconds) # Write-behind: persist to long-term memory via background task content_str = value if isinstance(value, str) else json.dumps(value, default=str) await self.task_service.schedule_task( - 'remember_working_memory', + "remember_working_memory", { - 'workspace_id': workspace_id, - 'session_id': session_id, - 'key': key, - 'content': content_str, - 'context_id': session.context_id if hasattr(session, 'context_id') else None, + "workspace_id": workspace_id, + "session_id": session_id, + "key": key, + "content": content_str, + "context_id": session.context_id if hasattr(session, "context_id") else None, }, ) return result - async def get_working_memory( - self, - workspace_id: str, - session_id: str, - key: str - ) -> Optional[WorkingMemory]: + async def get_working_memory(self, workspace_id: str, session_id: str, key: str) -> WorkingMemory | None: """Get working memory from storage backend.""" session = await self.get_session(workspace_id, session_id) if session is None: @@ -155,11 +132,7 @@ async def get_working_memory( return await self.storage.get_working_memory(workspace_id, session_id, key) - async def get_all_working_memory( - self, - workspace_id: str, - session_id: str - ) -> List[WorkingMemory]: + async def get_all_working_memory(self, workspace_id: str, session_id: str) -> list[WorkingMemory]: """Get all working memory from storage backend.""" session = await self.get_session(workspace_id, session_id) if session is None: @@ -175,13 +148,13 @@ async def cleanup_expired(self, workspace_id: str) -> int: return count async def get_briefing( - self, - workspace_id: str, - lookback_minutes: int = 60, - detail_level: str = "abstract", - limit: int = 10, - include_memories: bool = True, - include_contradictions: bool = True, + self, + workspace_id: str, + lookback_minutes: int = 60, + detail_level: str = "abstract", + limit: int = 10, + include_memories: bool = True, + include_contradictions: bool = True, ) -> SessionBriefing: """ Generate a session briefing with workspace summary and recent activity. @@ -217,11 +190,10 @@ async def get_briefing( # Get recent memories if requested memories = [] if include_memories: - now = datetime.now(timezone.utc) + now = datetime.now(UTC) created_after = now - timedelta(minutes=lookback_minutes) memories = await self.storage.get_recent_memories( - workspace_id, created_after=created_after, - limit=limit, detail_level=detail_level + workspace_id, created_after=created_after, limit=limit, detail_level=detail_level ) # Update workspace summary with recent count @@ -231,19 +203,21 @@ async def get_briefing( recent_activity = [] # Note: Storage backend doesn't track detailed session activity # Custom implementations can enhance this with actual activity tracking - recent_activity.append({ - "timestamp": datetime.now(timezone.utc).isoformat(), - "summary": f"Workspace stats: {workspace_summary['total_memories']} total memories", - "memories_created": 0, - "key_decisions": [], - }) + recent_activity.append( + { + "timestamp": datetime.now(UTC).isoformat(), + "summary": f"Workspace stats: {workspace_summary['total_memories']} total memories", + "memories_created": 0, + "key_decisions": [], + } + ) self.logger.debug( "Generated briefing for workspace %s: %d memories, %d associations, %d recent memories", workspace_id, workspace_summary["total_memories"], workspace_summary["total_associations"], - workspace_summary["recent_memories"] + workspace_summary["recent_memories"], ) # Get unresolved contradictions @@ -252,13 +226,15 @@ async def get_briefing( try: records = await self.contradiction_service.get_unresolved(workspace_id, limit=3) for record in records: - contradictions_detected.append({ - "id": record.id, - "memory_a_id": record.memory_a_id, - "memory_b_id": record.memory_b_id, - "type": record.contradiction_type, - "confidence": record.confidence, - }) + contradictions_detected.append( + { + "id": record.id, + "memory_a_id": record.memory_a_id, + "memory_b_id": record.memory_b_id, + "type": record.contradiction_type, + "confidence": record.confidence, + } + ) except Exception as e: self.logger.warning("Failed to get contradictions for briefing: %s", e) @@ -270,12 +246,7 @@ async def get_briefing( memories=memories, ) - async def commit_session( - self, - workspace_id: str, - session_id: str, - options: Optional[CommitOptions] = None - ) -> CommitResult: + async def commit_session(self, workspace_id: str, session_id: str, options: CommitOptions | None = None) -> CommitResult: """ Finalize a session and mark it as committed. @@ -304,25 +275,19 @@ async def commit_session( memory_count = len(working_memory_list) # Mark session as committed - committed_at = datetime.now(timezone.utc) + committed_at = datetime.now(UTC) session.committed_at = committed_at # Persist the committed_at timestamp try: - await self.storage.update_session( - workspace_id, - session_id, - committed_at=committed_at - ) + await self.storage.update_session(workspace_id, session_id, committed_at=committed_at) except Exception as e: - self.logger.warning( - "Failed to persist committed_at for session %s: %s", - session_id, e - ) + self.logger.warning("Failed to persist committed_at for session %s: %s", session_id, e) self.logger.info( "Committed session %s: %d working memory entries (persisted via write-behind)", - session_id, memory_count, + session_id, + memory_count, ) return CommitResult( @@ -340,11 +305,11 @@ def _estimate_tokens(content: str) -> int: return len(content) // 4 async def touch_session( - self, - workspace_id: str, - session_id: str, - extend_seconds: int | None = None, - ) -> 'Session': + self, + workspace_id: str, + session_id: str, + extend_seconds: int | None = None, + ) -> "Session": """Extend session TTL using sliding window. Resets expires_at to now + TTL. If extend_seconds is provided, @@ -384,7 +349,7 @@ async def touch_session( raise ValueError(f"Session {session_id} not found in workspace {workspace_id}") ttl = extend_seconds if extend_seconds is not None else self.default_touch_ttl - new_expires_at = datetime.now(timezone.utc) + timedelta(seconds=ttl) + new_expires_at = datetime.now(UTC) + timedelta(seconds=ttl) # Track cumulative token usage from working memory metadata_updates = {} @@ -395,10 +360,10 @@ async def touch_session( content_str = wm.value if isinstance(wm.value, str) else json.dumps(wm.value, default=str) cumulative_tokens += self._estimate_tokens(content_str) - prev_tokens = int(session.metadata.get('cumulative_tokens', 0)) - prev_extraction_tokens = int(session.metadata.get('last_extraction_tokens', 0)) + prev_tokens = int(session.metadata.get("cumulative_tokens", 0)) + prev_extraction_tokens = int(session.metadata.get("last_extraction_tokens", 0)) - metadata_updates['cumulative_tokens'] = cumulative_tokens + metadata_updates["cumulative_tokens"] = cumulative_tokens # Determine if extraction should be triggered should_extract = False @@ -408,41 +373,36 @@ async def touch_session( should_extract = True if should_extract and self.task_service is not None: - metadata_updates['last_extraction_tokens'] = cumulative_tokens + metadata_updates["last_extraction_tokens"] = cumulative_tokens self.logger.info( "Token budget trigger reached for session %s (tokens: %d), scheduling extraction", - session_id, cumulative_tokens, + session_id, + cumulative_tokens, ) await self.task_service.schedule_task( - 'session_extraction', + "session_extraction", { - 'workspace_id': workspace_id, - 'session_id': session_id, - 'context_id': session.context_id if hasattr(session, 'context_id') else None, + "workspace_id": workspace_id, + "session_id": session_id, + "context_id": session.context_id if hasattr(session, "context_id") else None, }, ) except Exception as e: self.logger.warning("Failed to track token usage for session %s: %s", session_id, e) updated = await self.storage.update_session( - workspace_id, session_id, expires_at=new_expires_at, + workspace_id, + session_id, + expires_at=new_expires_at, metadata={**session.metadata, **metadata_updates} if metadata_updates else None, ) if updated is None: raise ValueError(f"Failed to update session {session_id} in storage") - self.logger.info( - "Refreshed session %s TTL to %d seconds, new expiration: %s", - session_id, ttl, updated.expires_at.isoformat() - ) + self.logger.info("Refreshed session %s TTL to %d seconds, new expiration: %s", session_id, ttl, updated.expires_at.isoformat()) return updated - async def list_sessions( - self, - workspace_id: str, - context_id: str | None = None, - include_expired: bool = False - ) -> list['Session']: + async def list_sessions(self, workspace_id: str, context_id: str | None = None, include_expired: bool = False) -> list["Session"]: """List sessions for a workspace. Args: @@ -453,23 +413,30 @@ async def list_sessions( Returns: List of sessions """ - return await self.storage.list_sessions( - workspace_id, context_id=context_id, include_expired=include_expired - ) + return await self.storage.list_sessions(workspace_id, context_id=context_id, include_expired=include_expired) class PersistentSessionServicePlugin(SessionServicePluginBase): """Plugin for persistent session service.""" - PROVIDER_NAME = 'persistent' + + PROVIDER_NAME = "persistent" def get_dependencies(self, v: Variables): from .._constants import EXT_TASK_SERVICE - return (EXT_STORAGE_BACKEND, EXT_EXTRACTION_SERVICE, EXT_DEDUPLICATION_SERVICE, EXT_MEMORY_SERVICE, EXT_CONTRADICTION_SERVICE, EXT_TASK_SERVICE) + + return ( + EXT_STORAGE_BACKEND, + EXT_EXTRACTION_SERVICE, + EXT_DEDUPLICATION_SERVICE, + EXT_MEMORY_SERVICE, + EXT_CONTRADICTION_SERVICE, + EXT_TASK_SERVICE, + ) def initialize(self, v: Variables, logger: Logger) -> SessionService: - from ..tasks import TaskService + from ...config import DEFAULT_MEMORYLAYER_SESSION_TOUCH_TTL, MEMORYLAYER_SESSION_TOUCH_TTL from .._constants import EXT_TASK_SERVICE - from ...config import MEMORYLAYER_SESSION_TOUCH_TTL, DEFAULT_MEMORYLAYER_SESSION_TOUCH_TTL + from ..tasks import TaskService storage: StorageBackend = self.get_extension(EXT_STORAGE_BACKEND, v) extraction_service: ExtractionService = self.get_extension(EXT_EXTRACTION_SERVICE, v) @@ -492,5 +459,5 @@ def initialize(self, v: Variables, logger: Logger) -> SessionService: contradiction_service=contradiction_service, task_service=task_service, default_touch_ttl=default_touch_ttl, - v=v + v=v, ) diff --git a/memorylayer-core-python/src/memorylayer_server/services/storage/__init__.py b/memorylayer-core-python/src/memorylayer_server/services/storage/__init__.py index 4fe673a..e2ea3f5 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/storage/__init__.py +++ b/memorylayer-core-python/src/memorylayer_server/services/storage/__init__.py @@ -1,12 +1,14 @@ -from .base import StorageBackend, EXT_STORAGE_BACKEND - from scitrera_app_framework import Variables, get_extension +from .base import EXT_STORAGE_BACKEND, StorageBackend + def get_storage_backend(v: Variables = None) -> StorageBackend: return get_extension(EXT_STORAGE_BACKEND, v) __all__ = ( - 'StorageBackend', 'get_storage_backend', 'EXT_STORAGE_BACKEND', + "StorageBackend", + "get_storage_backend", + "EXT_STORAGE_BACKEND", ) diff --git a/memorylayer-core-python/src/memorylayer_server/services/storage/base.py b/memorylayer-core-python/src/memorylayer_server/services/storage/base.py index 17d40e2..2be5b1f 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/storage/base.py +++ b/memorylayer-core-python/src/memorylayer_server/services/storage/base.py @@ -1,20 +1,21 @@ """Abstract storage backend interface.""" + from abc import ABC, abstractmethod from datetime import datetime from logging import Logger -from typing import TYPE_CHECKING, Optional, Any +from typing import TYPE_CHECKING, Any, Optional -from scitrera_app_framework import get_logger, get_extension -from scitrera_app_framework.api import Variables, Plugin, enabled_option_pattern +from scitrera_app_framework import get_logger +from scitrera_app_framework.api import Plugin, Variables, enabled_option_pattern -from ...config import MEMORYLAYER_STORAGE_BACKEND, DEFAULT_MEMORYLAYER_STORAGE_BACKEND +from ...config import DEFAULT_MEMORYLAYER_STORAGE_BACKEND, MEMORYLAYER_STORAGE_BACKEND +from ...models.association import AssociateInput, Association, GraphQueryResult +from ...models.memory import Memory, RememberInput +from ...models.workspace import Context, Workspace -from ...models.memory import Memory, RememberInput, RecallInput, RecallResult -from ...models.association import Association, AssociateInput, GraphQueryInput, GraphQueryResult -from ...models.workspace import Workspace, Context if TYPE_CHECKING: from ...models import Session, WorkingMemory - from ...models.chat import ChatThread, ChatMessage, MessageInput + from ...models.chat import ChatMessage, ChatThread, MessageInput from .._constants import EXT_STORAGE_BACKEND @@ -50,16 +51,16 @@ async def create_memory(self, workspace_id: str, input: RememberInput) -> Memory pass @abstractmethod - async def get_memory(self, workspace_id: str, memory_id: str, track_access: bool = True) -> Optional[Memory]: + async def get_memory(self, workspace_id: str, memory_id: str, track_access: bool = True) -> Memory | None: """Get memory by ID within a workspace. Set track_access=False for internal reads that should not affect decay tracking.""" pass - async def get_memory_by_id(self, memory_id: str, track_access: bool = True) -> Optional[Memory]: + async def get_memory_by_id(self, memory_id: str, track_access: bool = True) -> Memory | None: """Get memory by ID without workspace filter. Memory IDs are globally unique.""" raise NotImplementedError("Subclass should implement get_memory_by_id") @abstractmethod - async def update_memory(self, workspace_id: str, memory_id: str, **updates) -> Optional[Memory]: + async def update_memory(self, workspace_id: str, memory_id: str, **updates) -> Memory | None: """Update memory fields.""" pass @@ -70,38 +71,38 @@ async def delete_memory(self, workspace_id: str, memory_id: str, hard: bool = Fa @abstractmethod async def search_memories( - self, - workspace_id: str, - query_embedding: list[float], - limit: int = 10, - offset: int = 0, - min_relevance: float = 0.5, - types: Optional[list[str]] = None, - subtypes: Optional[list[str]] = None, - tags: Optional[list[str]] = None, - include_archived: bool = False, - observer_id: Optional[str] = None, - subject_id: Optional[str] = None, - created_after: Optional[str] = None, - created_before: Optional[str] = None, - user_id: Optional[str] = None, + self, + workspace_id: str, + query_embedding: list[float], + limit: int = 10, + offset: int = 0, + min_relevance: float = 0.5, + types: list[str] | None = None, + subtypes: list[str] | None = None, + tags: list[str] | None = None, + include_archived: bool = False, + observer_id: str | None = None, + subject_id: str | None = None, + created_after: str | None = None, + created_before: str | None = None, + user_id: str | None = None, ) -> list[tuple[Memory, float]]: """Vector similarity search, returns (memory, relevance_score) tuples.""" pass @abstractmethod async def full_text_search( - self, - workspace_id: str, - query: str, - limit: int = 10, - offset: int = 0, + self, + workspace_id: str, + query: str, + limit: int = 10, + offset: int = 0, ) -> list[Memory]: """Full-text search on memory content.""" pass @abstractmethod - async def get_memory_by_hash(self, workspace_id: str, content_hash: str) -> Optional[Memory]: + async def get_memory_by_hash(self, workspace_id: str, content_hash: str) -> Memory | None: """Get memory by content hash for deduplication.""" pass @@ -136,23 +137,23 @@ async def create_association(self, workspace_id: str, input: AssociateInput) -> @abstractmethod async def get_associations( - self, - workspace_id: str, - memory_id: str, - direction: str = "both", # outgoing, incoming, both - relationships: Optional[list[str]] = None, + self, + workspace_id: str, + memory_id: str, + direction: str = "both", # outgoing, incoming, both + relationships: list[str] | None = None, ) -> list[Association]: """Get associations for a memory.""" pass @abstractmethod async def traverse_graph( - self, - workspace_id: str, - start_id: str, - max_depth: int = 3, - relationships: Optional[list[str]] = None, - direction: str = "both", + self, + workspace_id: str, + start_id: str, + max_depth: int = 3, + relationships: list[str] | None = None, + direction: str = "both", ) -> GraphQueryResult: """Multi-hop graph traversal.""" pass @@ -164,7 +165,7 @@ async def create_workspace(self, workspace: Workspace) -> Workspace: pass @abstractmethod - async def get_workspace(self, workspace_id: str) -> Optional[Workspace]: + async def get_workspace(self, workspace_id: str) -> Workspace | None: """Get workspace by ID.""" pass @@ -175,7 +176,7 @@ async def create_context(self, workspace_id: str, context: Context) -> Context: pass @abstractmethod - async def get_context(self, workspace_id: str, context_id: str) -> Optional[Context]: + async def get_context(self, workspace_id: str, context_id: str) -> Context | None: """Get context by ID.""" pass @@ -201,17 +202,17 @@ async def get_workspace_stats(self, workspace_id: str) -> dict: # Session operations (for persistent sessions) @abstractmethod - async def create_session(self, workspace_id: str, session: 'Session') -> 'Session': + async def create_session(self, workspace_id: str, session: "Session") -> "Session": """Store a new session.""" pass @abstractmethod - async def get_session(self, workspace_id: str, session_id: str) -> Optional['Session']: + async def get_session(self, workspace_id: str, session_id: str) -> Optional["Session"]: """Get session by ID (returns None if not found or expired).""" pass @abstractmethod - async def get_session_by_id(self, session_id: str) -> Optional['Session']: + async def get_session_by_id(self, session_id: str) -> Optional["Session"]: """Get session by ID without workspace filter. Useful when looking up a session from the X-Session-ID header @@ -226,32 +227,18 @@ async def delete_session(self, workspace_id: str, session_id: str) -> bool: @abstractmethod async def set_working_memory( - self, - workspace_id: str, - session_id: str, - key: str, - value: Any, - ttl_seconds: Optional[int] = None - ) -> 'WorkingMemory': + self, workspace_id: str, session_id: str, key: str, value: Any, ttl_seconds: int | None = None + ) -> "WorkingMemory": """Set working memory key-value within session.""" pass @abstractmethod - async def get_working_memory( - self, - workspace_id: str, - session_id: str, - key: str - ) -> Optional['WorkingMemory']: + async def get_working_memory(self, workspace_id: str, session_id: str, key: str) -> Optional["WorkingMemory"]: """Get specific working memory entry.""" pass @abstractmethod - async def get_all_working_memory( - self, - workspace_id: str, - session_id: str - ) -> list['WorkingMemory']: + async def get_all_working_memory(self, workspace_id: str, session_id: str) -> list["WorkingMemory"]: """Get all working memory entries for session.""" pass @@ -265,7 +252,7 @@ async def cleanup_all_expired_sessions(self) -> int: # Default implementation: no-op (subclasses should override for efficiency) return 0 - async def list_expired_sessions(self, limit: int = 100) -> list['Session']: + async def list_expired_sessions(self, limit: int = 100) -> list["Session"]: """List expired sessions that need cleanup. Used by the cleanup task to retrieve sessions before deletion, @@ -280,12 +267,7 @@ async def list_expired_sessions(self, limit: int = 100) -> list['Session']: # Default implementation: empty list (subclasses should override) return [] - async def update_session( - self, - workspace_id: str, - session_id: str, - **updates - ) -> Optional['Session']: + async def update_session(self, workspace_id: str, session_id: str, **updates) -> Optional["Session"]: """Update session fields. Args: @@ -300,11 +282,11 @@ async def update_session( return None async def list_sessions( - self, - workspace_id: str, - context_id: str | None = None, - include_expired: bool = False, - ) -> list['Session']: + self, + workspace_id: str, + context_id: str | None = None, + include_expired: bool = False, + ) -> list["Session"]: """List sessions for a workspace. Args: @@ -345,15 +327,15 @@ async def list_all_workspace_ids(self) -> list[str]: # Contradiction service support methods (non-abstract with default no-op implementations) - async def create_contradiction(self, contradiction: 'ContradictionRecord') -> 'ContradictionRecord': + async def create_contradiction(self, contradiction: "ContradictionRecord") -> "ContradictionRecord": """Store a contradiction record. Override in subclasses.""" return contradiction - async def get_contradiction(self, workspace_id: str, contradiction_id: str) -> Optional['ContradictionRecord']: + async def get_contradiction(self, workspace_id: str, contradiction_id: str) -> Optional["ContradictionRecord"]: """Get a specific contradiction. Override in subclasses.""" return None - async def get_unresolved_contradictions(self, workspace_id: str, limit: int = 10) -> list['ContradictionRecord']: + async def get_unresolved_contradictions(self, workspace_id: str, limit: int = 10) -> list["ContradictionRecord"]: """Get unresolved contradictions. Override in subclasses.""" return [] @@ -362,32 +344,32 @@ async def resolve_contradiction( workspace_id: str, contradiction_id: str, resolution: str, - merged_content: Optional[str] = None, - ) -> Optional['ContradictionRecord']: + merged_content: str | None = None, + ) -> Optional["ContradictionRecord"]: """Resolve a contradiction. Override in subclasses.""" return None # Chat history operations (non-abstract with default no-op implementations) - async def create_thread(self, thread: 'ChatThread') -> 'ChatThread': + async def create_thread(self, thread: "ChatThread") -> "ChatThread": """Store a new chat thread. Override in subclasses.""" return thread - async def get_thread(self, workspace_id: str, thread_id: str) -> Optional['ChatThread']: + async def get_thread(self, workspace_id: str, thread_id: str) -> Optional["ChatThread"]: """Get chat thread by ID. Override in subclasses.""" return None async def list_threads( self, workspace_id: str, - user_id: Optional[str] = None, + user_id: str | None = None, limit: int = 50, offset: int = 0, - ) -> list['ChatThread']: + ) -> list["ChatThread"]: """List chat threads in a workspace. Override in subclasses.""" return [] - async def update_thread(self, workspace_id: str, thread_id: str, **updates) -> Optional['ChatThread']: + async def update_thread(self, workspace_id: str, thread_id: str, **updates) -> Optional["ChatThread"]: """Update thread fields. Override in subclasses.""" return None @@ -399,8 +381,8 @@ async def append_messages( self, workspace_id: str, thread_id: str, - messages: list['MessageInput'], - ) -> list['ChatMessage']: + messages: list["MessageInput"], + ) -> list["ChatMessage"]: """Append messages to a thread. Override in subclasses.""" return [] @@ -410,9 +392,9 @@ async def get_messages( thread_id: str, limit: int = 100, offset: int = 0, - after_index: Optional[int] = None, + after_index: int | None = None, order: str = "asc", - ) -> list['ChatMessage']: + ) -> list["ChatMessage"]: """Get messages from a thread. Override in subclasses.""" return [] @@ -420,7 +402,7 @@ async def get_message_count(self, workspace_id: str, thread_id: str) -> int: """Get total message count for a thread. Override in subclasses.""" return 0 - async def list_expired_threads(self, limit: int = 100) -> list['ChatThread']: + async def list_expired_threads(self, limit: int = 100) -> list["ChatThread"]: """List expired chat threads across all workspaces. Enables efficient cleanup of expired threads via background tasks. @@ -446,7 +428,7 @@ def extension_point_name(self, v: Variables) -> str: return EXT_STORAGE_BACKEND def is_enabled(self, v: Variables) -> bool: - return enabled_option_pattern(self, v, MEMORYLAYER_STORAGE_BACKEND, self_attr='PROVIDER_NAME') + return enabled_option_pattern(self, v, MEMORYLAYER_STORAGE_BACKEND, self_attr="PROVIDER_NAME") def on_registration(self, v: Variables) -> None: v.set_default_value(MEMORYLAYER_STORAGE_BACKEND, DEFAULT_MEMORYLAYER_STORAGE_BACKEND) diff --git a/memorylayer-core-python/src/memorylayer_server/services/storage/in_memory.py b/memorylayer-core-python/src/memorylayer_server/services/storage/in_memory.py index 1e450b9..ac880c5 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/storage/in_memory.py +++ b/memorylayer-core-python/src/memorylayer_server/services/storage/in_memory.py @@ -4,19 +4,20 @@ Provides a complete storage implementation that stores all data in memory. Data is lost on service restart - use only for testing. """ -from datetime import datetime, timezone + +from datetime import UTC, datetime from logging import Logger -from typing import Any, Optional +from typing import Any from scitrera_app_framework import Variables -from .base import StorageBackend, StoragePluginBase -from ...models.memory import Memory, RememberInput, MemoryType, MemorySubtype -from ...models.association import Association, AssociateInput, GraphQueryResult, GraphPath -from ...models.workspace import Workspace, Context +from ...config import DEFAULT_CONTEXT_ID, DEFAULT_TENANT_ID +from ...models.association import AssociateInput, Association, GraphPath, GraphQueryResult +from ...models.memory import Memory, MemoryType, RememberInput from ...models.session import Session, WorkingMemory -from ...config import DEFAULT_TENANT_ID, DEFAULT_CONTEXT_ID -from ...utils import generate_id, utc_now_iso, compute_content_hash, cosine_similarity +from ...models.workspace import Context, Workspace +from ...utils import compute_content_hash, cosine_similarity, generate_id, utc_now_iso +from .base import StorageBackend, StoragePluginBase class MemoryStorageBackend(StorageBackend): @@ -65,15 +66,15 @@ async def create_memory(self, workspace_id: str, input: RememberInput) -> Memory memory = Memory( id=generate_id("mem"), workspace_id=workspace_id, - tenant_id=getattr(input, 'tenant_id', None) or DEFAULT_TENANT_ID, - context_id=getattr(input, 'context_id', None) or DEFAULT_CONTEXT_ID, + tenant_id=getattr(input, "tenant_id", None) or DEFAULT_TENANT_ID, + context_id=getattr(input, "context_id", None) or DEFAULT_CONTEXT_ID, user_id=input.user_id, - observer_id=getattr(input, 'observer_id', None), - subject_id=getattr(input, 'subject_id', None), - source_document_id=getattr(input, 'source_document_id', None), - source_page_id=getattr(input, 'source_page_id', None), - source_dataset_id=getattr(input, 'source_dataset_id', None), - source_thread_id=getattr(input, 'source_thread_id', None), + observer_id=getattr(input, "observer_id", None), + subject_id=getattr(input, "subject_id", None), + source_document_id=getattr(input, "source_document_id", None), + source_page_id=getattr(input, "source_page_id", None), + source_dataset_id=getattr(input, "source_dataset_id", None), + source_thread_id=getattr(input, "source_thread_id", None), content=input.content, content_hash=content_hash, type=input.type or MemoryType.SEMANTIC, @@ -90,7 +91,7 @@ async def create_memory(self, workspace_id: str, input: RememberInput) -> Memory self.logger.debug("Created memory: %s in workspace: %s", memory.id, workspace_id) return memory - async def get_memory(self, workspace_id: str, memory_id: str, track_access: bool = True) -> Optional[Memory]: + async def get_memory(self, workspace_id: str, memory_id: str, track_access: bool = True) -> Memory | None: """Get memory by ID within a workspace.""" ws_memories = self._memories.get(workspace_id, {}) memory = ws_memories.get(memory_id) @@ -98,7 +99,7 @@ async def get_memory(self, workspace_id: str, memory_id: str, track_access: bool return None return memory - async def get_memory_by_id(self, memory_id: str, track_access: bool = True) -> Optional[Memory]: + async def get_memory_by_id(self, memory_id: str, track_access: bool = True) -> Memory | None: """Get memory by ID without workspace filter. Memory IDs are globally unique.""" if memory_id in self._deleted_memories: return None @@ -107,7 +108,7 @@ async def get_memory_by_id(self, memory_id: str, track_access: bool = True) -> O return ws_memories[memory_id] return None - async def update_memory(self, workspace_id: str, memory_id: str, **updates) -> Optional[Memory]: + async def update_memory(self, workspace_id: str, memory_id: str, **updates) -> Memory | None: """Update memory fields.""" memory = await self.get_memory(workspace_id, memory_id, track_access=False) if not memory: @@ -133,21 +134,21 @@ async def delete_memory(self, workspace_id: str, memory_id: str, hard: bool = Fa return True async def search_memories( - self, - workspace_id: str, - query_embedding: list[float], - limit: int = 10, - offset: int = 0, - min_relevance: float = 0.5, - types: Optional[list[str]] = None, - subtypes: Optional[list[str]] = None, - tags: Optional[list[str]] = None, - include_archived: bool = False, - observer_id: Optional[str] = None, - subject_id: Optional[str] = None, - created_after: Optional[str] = None, - created_before: Optional[str] = None, - user_id: Optional[str] = None, + self, + workspace_id: str, + query_embedding: list[float], + limit: int = 10, + offset: int = 0, + min_relevance: float = 0.5, + types: list[str] | None = None, + subtypes: list[str] | None = None, + tags: list[str] | None = None, + include_archived: bool = False, + observer_id: str | None = None, + subject_id: str | None = None, + created_after: str | None = None, + created_before: str | None = None, + user_id: str | None = None, ) -> list[tuple[Memory, float]]: """Vector similarity search using cosine similarity.""" ws_memories = self._memories.get(workspace_id, {}) @@ -173,11 +174,11 @@ async def search_memories( continue # Filter by entity attribution - if observer_id is not None and getattr(memory, 'observer_id', None) != observer_id: + if observer_id is not None and getattr(memory, "observer_id", None) != observer_id: continue - if subject_id is not None and getattr(memory, 'subject_id', None) != subject_id: + if subject_id is not None and getattr(memory, "subject_id", None) != subject_id: continue - if user_id is not None and getattr(memory, 'user_id', None) != user_id: + if user_id is not None and getattr(memory, "user_id", None) != user_id: continue # Calculate cosine similarity @@ -190,14 +191,14 @@ async def search_memories( results.sort(key=lambda x: x[1], reverse=True) # Apply offset and limit - return results[offset:offset + limit] + return results[offset : offset + limit] async def full_text_search( - self, - workspace_id: str, - query: str, - limit: int = 10, - offset: int = 0, + self, + workspace_id: str, + query: str, + limit: int = 10, + offset: int = 0, ) -> list[Memory]: """Full-text search on memory content.""" ws_memories = self._memories.get(workspace_id, {}) @@ -210,9 +211,9 @@ async def full_text_search( if query_lower in memory.content.lower(): results.append(memory) - return results[offset:offset + limit] + return results[offset : offset + limit] - async def get_memory_by_hash(self, workspace_id: str, content_hash: str) -> Optional[Memory]: + async def get_memory_by_hash(self, workspace_id: str, content_hash: str) -> Memory | None: """Get memory by content hash for deduplication.""" ws_memories = self._memories.get(workspace_id, {}) for memory in ws_memories.values(): @@ -237,8 +238,8 @@ async def get_recent_memories( if memory.id in self._deleted_memories: continue # Check status - status = getattr(memory, 'status', None) - if status and str(status) != 'active': + status = getattr(memory, "status", None) + if status and str(status) != "active": continue if memory.created_at > created_after: candidates.append(memory) @@ -248,7 +249,7 @@ async def get_recent_memories( # Apply offset and limit if limit > 0: - candidates = candidates[offset:offset + limit] + candidates = candidates[offset : offset + limit] else: candidates = candidates[offset:] @@ -257,40 +258,58 @@ async def get_recent_memories( for memory in candidates: if detail_level == "abstract": # Return only id, abstract, type, subtype, importance, tags, created_at - results.append({ - "id": memory.id, - "abstract": getattr(memory, 'abstract', None), - "type": memory.type.value if hasattr(memory.type, 'value') else str(memory.type), - "subtype": memory.subtype.value if memory.subtype and hasattr(memory.subtype, 'value') else str(memory.subtype) if memory.subtype else None, - "importance": memory.importance, - "tags": memory.tags if memory.tags else [], - "created_at": memory.created_at.isoformat() if memory.created_at else None, - }) + results.append( + { + "id": memory.id, + "abstract": getattr(memory, "abstract", None), + "type": memory.type.value if hasattr(memory.type, "value") else str(memory.type), + "subtype": memory.subtype.value + if memory.subtype and hasattr(memory.subtype, "value") + else str(memory.subtype) + if memory.subtype + else None, + "importance": memory.importance, + "tags": memory.tags if memory.tags else [], + "created_at": memory.created_at.isoformat() if memory.created_at else None, + } + ) elif detail_level == "overview": # Add overview field - results.append({ - "id": memory.id, - "abstract": getattr(memory, 'abstract', None), - "overview": getattr(memory, 'overview', None), - "type": memory.type.value if hasattr(memory.type, 'value') else str(memory.type), - "subtype": memory.subtype.value if memory.subtype and hasattr(memory.subtype, 'value') else str(memory.subtype) if memory.subtype else None, - "importance": memory.importance, - "tags": memory.tags if memory.tags else [], - "created_at": memory.created_at.isoformat() if memory.created_at else None, - }) + results.append( + { + "id": memory.id, + "abstract": getattr(memory, "abstract", None), + "overview": getattr(memory, "overview", None), + "type": memory.type.value if hasattr(memory.type, "value") else str(memory.type), + "subtype": memory.subtype.value + if memory.subtype and hasattr(memory.subtype, "value") + else str(memory.subtype) + if memory.subtype + else None, + "importance": memory.importance, + "tags": memory.tags if memory.tags else [], + "created_at": memory.created_at.isoformat() if memory.created_at else None, + } + ) else: # "full" # Return everything - results.append({ - "id": memory.id, - "content": memory.content, - "abstract": getattr(memory, 'abstract', None), - "overview": getattr(memory, 'overview', None), - "type": memory.type.value if hasattr(memory.type, 'value') else str(memory.type), - "subtype": memory.subtype.value if memory.subtype and hasattr(memory.subtype, 'value') else str(memory.subtype) if memory.subtype else None, - "importance": memory.importance, - "tags": memory.tags if memory.tags else [], - "created_at": memory.created_at.isoformat() if memory.created_at else None, - }) + results.append( + { + "id": memory.id, + "content": memory.content, + "abstract": getattr(memory, "abstract", None), + "overview": getattr(memory, "overview", None), + "type": memory.type.value if hasattr(memory.type, "value") else str(memory.type), + "subtype": memory.subtype.value + if memory.subtype and hasattr(memory.subtype, "value") + else str(memory.subtype) + if memory.subtype + else None, + "importance": memory.importance, + "tags": memory.tags if memory.tags else [], + "created_at": memory.created_at.isoformat() if memory.created_at else None, + } + ) return results @@ -315,11 +334,11 @@ async def create_association(self, workspace_id: str, input: AssociateInput) -> return assoc async def get_associations( - self, - workspace_id: str, - memory_id: str, - direction: str = "both", - relationships: Optional[list[str]] = None, + self, + workspace_id: str, + memory_id: str, + direction: str = "both", + relationships: list[str] | None = None, ) -> list[Association]: """Get associations for a memory.""" ws_assocs = self._associations.get(workspace_id, {}) @@ -344,12 +363,12 @@ async def get_associations( return results async def traverse_graph( - self, - workspace_id: str, - start_id: str, - max_depth: int = 3, - relationships: Optional[list[str]] = None, - direction: str = "both", + self, + workspace_id: str, + start_id: str, + max_depth: int = 3, + relationships: list[str] | None = None, + direction: str = "both", ) -> GraphQueryResult: """Multi-hop graph traversal.""" visited = set() @@ -368,9 +387,7 @@ async def traverse(current_id: str, path: list[str], depth: int): paths.append(GraphPath(nodes=current_path, depth=depth)) if depth < max_depth: - associations = await self.get_associations( - workspace_id, current_id, direction, relationships - ) + associations = await self.get_associations(workspace_id, current_id, direction, relationships) for assoc in associations: next_id = assoc.target_id if assoc.source_id == current_id else assoc.source_id await traverse(next_id, current_path, depth + 1) @@ -390,7 +407,7 @@ async def create_workspace(self, workspace: Workspace) -> Workspace: self._workspaces[workspace.id] = workspace return workspace - async def get_workspace(self, workspace_id: str) -> Optional[Workspace]: + async def get_workspace(self, workspace_id: str) -> Workspace | None: """Get workspace by ID.""" return self._workspaces.get(workspace_id) @@ -407,7 +424,7 @@ async def create_context(self, workspace_id: str, context: Context) -> Context: self._contexts[workspace_id][context.id] = context return context - async def get_context(self, workspace_id: str, context_id: str) -> Optional[Context]: + async def get_context(self, workspace_id: str, context_id: str) -> Context | None: """Get context by ID.""" ws_contexts = self._contexts.get(workspace_id, {}) return ws_contexts.get(context_id) @@ -439,7 +456,7 @@ async def create_session(self, workspace_id: str, session: Session) -> Session: self._sessions[workspace_id][session.id] = session return session - async def get_session(self, workspace_id: str, session_id: str) -> Optional[Session]: + async def get_session(self, workspace_id: str, session_id: str) -> Session | None: """Get session by ID.""" ws_sessions = self._sessions.get(workspace_id, {}) session = ws_sessions.get(session_id) @@ -447,7 +464,7 @@ async def get_session(self, workspace_id: str, session_id: str) -> Optional[Sess return None return session - async def get_session_by_id(self, session_id: str) -> Optional[Session]: + async def get_session_by_id(self, session_id: str) -> Session | None: """Get session by ID without workspace filter. Searches all workspaces. Within a tenant's storage backend, @@ -474,12 +491,7 @@ async def delete_session(self, workspace_id: str, session_id: str) -> bool: return False async def set_working_memory( - self, - workspace_id: str, - session_id: str, - key: str, - value: Any, - ttl_seconds: Optional[int] = None + self, workspace_id: str, session_id: str, key: str, value: Any, ttl_seconds: int | None = None ) -> WorkingMemory: """Set working memory key-value within session.""" if workspace_id not in self._working_memory: @@ -487,7 +499,7 @@ async def set_working_memory( if session_id not in self._working_memory[workspace_id]: self._working_memory[workspace_id][session_id] = {} - now = datetime.now(timezone.utc) + now = datetime.now(UTC) existing = self._working_memory[workspace_id][session_id].get(key) wm = WorkingMemory( @@ -501,22 +513,13 @@ async def set_working_memory( self._working_memory[workspace_id][session_id][key] = wm return wm - async def get_working_memory( - self, - workspace_id: str, - session_id: str, - key: str - ) -> Optional[WorkingMemory]: + async def get_working_memory(self, workspace_id: str, session_id: str, key: str) -> WorkingMemory | None: """Get specific working memory entry.""" ws_wm = self._working_memory.get(workspace_id, {}) sess_wm = ws_wm.get(session_id, {}) return sess_wm.get(key) - async def get_all_working_memory( - self, - workspace_id: str, - session_id: str - ) -> list[WorkingMemory]: + async def get_all_working_memory(self, workspace_id: str, session_id: str) -> list[WorkingMemory]: """Get all working memory entries for session.""" ws_wm = self._working_memory.get(workspace_id, {}) sess_wm = ws_wm.get(session_id, {}) @@ -531,11 +534,10 @@ async def cleanup_expired_sessions(self, workspace_id: str) -> int: return len(expired) - class MemoryStoragePlugin(StoragePluginBase): """Plugin for in-memory storage backend.""" - PROVIDER_NAME = 'memory' + PROVIDER_NAME = "memory" def initialize(self, v: Variables, logger: Logger) -> MemoryStorageBackend: return MemoryStorageBackend(v=v) diff --git a/memorylayer-core-python/src/memorylayer_server/services/storage/sqlite.py b/memorylayer-core-python/src/memorylayer_server/services/storage/sqlite.py index f0e7308..b7f71a7 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/storage/sqlite.py +++ b/memorylayer-core-python/src/memorylayer_server/services/storage/sqlite.py @@ -1,11 +1,12 @@ """SQLite storage backend with sqlite-vec support.""" + import hashlib import json import sqlite3 -from datetime import datetime, timezone +from datetime import UTC, datetime from logging import Logger from pathlib import Path -from typing import Any, Optional +from typing import Any import aiosqlite @@ -14,33 +15,56 @@ sqlite3.register_adapter(datetime, lambda dt: dt.isoformat()) sqlite3.register_converter("datetime", lambda b: datetime.fromisoformat(b.decode())) -from scitrera_app_framework import Plugin, Variables as Variables +from scitrera_app_framework import Variables as Variables -from ...models.memory import Memory, MemoryStatus, RememberInput, MemoryType, MemorySubtype -from ...models.association import Association, AssociateInput, GraphQueryResult, GraphPath -from ...models.workspace import Workspace, Context +from ...config import DEFAULT_CONTEXT_ID, DEFAULT_MEMORYLAYER_SQLITE_STORAGE_PATH, DEFAULT_TENANT_ID, MEMORYLAYER_SQLITE_STORAGE_PATH +from ...models.association import AssociateInput, Association, GraphPath, GraphQueryResult +from ...models.memory import Memory, MemoryStatus, MemorySubtype, MemoryType, RememberInput from ...models.session import Session, WorkingMemory -from .base import StorageBackend, StoragePluginBase -from ...config import MEMORYLAYER_SQLITE_STORAGE_PATH, DEFAULT_MEMORYLAYER_SQLITE_STORAGE_PATH -from ...utils import generate_id, utc_now_iso, parse_datetime_utc, cosine_similarity -from ...config import DEFAULT_TENANT_ID, DEFAULT_CONTEXT_ID +from ...models.workspace import Context, Workspace +from ...utils import cosine_similarity, generate_id, parse_datetime_utc, utc_now_iso from ..contradiction.base import ContradictionRecord +from .base import StorageBackend, StoragePluginBase - -_UPDATABLE_MEMORY_COLUMNS = frozenset({ - "content", "content_hash", "type", "subtype", "importance", - "tags", "metadata", "embedding", "abstract", "overview", - "pinned", "category", "decay_factor", "status", "archived_at", - "observer_id", "subject_id", - "access_count", "last_accessed_at", "created_at", "updated_at", - "source_memory_id", -}) - -_UPDATABLE_THREAD_COLUMNS = frozenset({ - "title", "metadata", "model", "system_prompt", - "max_messages", "ttl_seconds", "expires_at", - "last_decomposed_index", -}) +_UPDATABLE_MEMORY_COLUMNS = frozenset( + { + "content", + "content_hash", + "type", + "subtype", + "importance", + "tags", + "metadata", + "embedding", + "abstract", + "overview", + "pinned", + "category", + "decay_factor", + "status", + "archived_at", + "observer_id", + "subject_id", + "access_count", + "last_accessed_at", + "created_at", + "updated_at", + "source_memory_id", + } +) + +_UPDATABLE_THREAD_COLUMNS = frozenset( + { + "title", + "metadata", + "model", + "system_prompt", + "max_messages", + "ttl_seconds", + "expires_at", + "last_decomposed_index", + } +) class SQLiteStorageBackend(StorageBackend): @@ -56,7 +80,7 @@ def __init__(self, db_path: str = "memorylayer.db", v: Variables = None): """ super().__init__(v) self.db_path = db_path - self._connection: Optional[aiosqlite.Connection] = None + self._connection: aiosqlite.Connection | None = None self._has_vec_extension = False async def connect(self) -> None: @@ -78,6 +102,7 @@ async def connect(self) -> None: try: await self._connection.enable_load_extension(True) from sqlite_vec import loadable_path + lp = loadable_path() self.logger.debug("sqlite-vec extension path: %s", lp) await self._connection.load_extension(lp) @@ -192,9 +217,7 @@ async def _create_tables(self) -> None: ) ) """) - await self._connection.execute( - "CREATE INDEX IF NOT EXISTS idx_contexts_workspace ON contexts(workspace_id)" - ) + await self._connection.execute("CREATE INDEX IF NOT EXISTS idx_contexts_workspace ON contexts(workspace_id)") # Memories await self._connection.execute(""" @@ -371,15 +394,9 @@ async def _create_tables(self) -> None: ) ) """) - await self._connection.execute( - "CREATE INDEX IF NOT EXISTS idx_associations_workspace ON memory_associations(workspace_id)" - ) - await self._connection.execute( - "CREATE INDEX IF NOT EXISTS idx_associations_source ON memory_associations(source_id)" - ) - await self._connection.execute( - "CREATE INDEX IF NOT EXISTS idx_associations_target ON memory_associations(target_id)" - ) + await self._connection.execute("CREATE INDEX IF NOT EXISTS idx_associations_workspace ON memory_associations(workspace_id)") + await self._connection.execute("CREATE INDEX IF NOT EXISTS idx_associations_source ON memory_associations(source_id)") + await self._connection.execute("CREATE INDEX IF NOT EXISTS idx_associations_target ON memory_associations(target_id)") # Sessions table (for persistent session storage) await self._connection.execute(""" @@ -437,15 +454,9 @@ async def _create_tables(self) -> None: ) ) """) - await self._connection.execute( - "CREATE INDEX IF NOT EXISTS idx_sessions_workspace ON sessions(workspace_id)" - ) - await self._connection.execute( - "CREATE INDEX IF NOT EXISTS idx_sessions_context ON sessions(context_id)" - ) - await self._connection.execute( - "CREATE INDEX IF NOT EXISTS idx_sessions_expires ON sessions(expires_at)" - ) + await self._connection.execute("CREATE INDEX IF NOT EXISTS idx_sessions_workspace ON sessions(workspace_id)") + await self._connection.execute("CREATE INDEX IF NOT EXISTS idx_sessions_context ON sessions(context_id)") + await self._connection.execute("CREATE INDEX IF NOT EXISTS idx_sessions_expires ON sessions(expires_at)") # Working memory table (formerly session_contexts) await self._connection.execute(""" @@ -492,9 +503,7 @@ async def _create_tables(self) -> None: ) ON DELETE CASCADE ) """) - await self._connection.execute( - "CREATE INDEX IF NOT EXISTS idx_working_memory_session ON working_memory(session_id)" - ) + await self._connection.execute("CREATE INDEX IF NOT EXISTS idx_working_memory_session ON working_memory(session_id)") # Contradictions table await self._connection.execute(""" @@ -515,9 +524,7 @@ async def _create_tables(self) -> None: FOREIGN KEY (memory_b_id) REFERENCES memories (id) ) """) - await self._connection.execute( - "CREATE INDEX IF NOT EXISTS idx_contradictions_workspace ON contradictions(workspace_id)" - ) + await self._connection.execute("CREATE INDEX IF NOT EXISTS idx_contradictions_workspace ON contradictions(workspace_id)") await self._connection.execute( "CREATE INDEX IF NOT EXISTS idx_contradictions_unresolved ON contradictions(workspace_id) WHERE resolved_at IS NULL" ) @@ -543,9 +550,7 @@ async def _create_tables(self) -> None: FOREIGN KEY (workspace_id) REFERENCES workspaces (id) ) """) - await self._connection.execute( - "CREATE INDEX IF NOT EXISTS idx_chat_threads_workspace ON chat_threads(workspace_id)" - ) + await self._connection.execute("CREATE INDEX IF NOT EXISTS idx_chat_threads_workspace ON chat_threads(workspace_id)") await self._connection.execute( "CREATE INDEX IF NOT EXISTS idx_chat_threads_user ON chat_threads(workspace_id, user_id) WHERE user_id IS NOT NULL" ) @@ -564,12 +569,8 @@ async def _create_tables(self) -> None: FOREIGN KEY (thread_id) REFERENCES chat_threads (id) ON DELETE CASCADE ) """) - await self._connection.execute( - "CREATE INDEX IF NOT EXISTS idx_chat_messages_thread ON chat_messages(thread_id, message_index)" - ) - await self._connection.execute( - "CREATE INDEX IF NOT EXISTS idx_chat_messages_workspace ON chat_messages(workspace_id, thread_id)" - ) + await self._connection.execute("CREATE INDEX IF NOT EXISTS idx_chat_messages_thread ON chat_messages(thread_id, message_index)") + await self._connection.execute("CREATE INDEX IF NOT EXISTS idx_chat_messages_workspace ON chat_messages(workspace_id, thread_id)") await self._connection.commit() @@ -580,20 +581,26 @@ async def _ensure_reserved_entities(self) -> None: now = utc_now_iso() # Create _default workspace (main default for auto-discovery) - await self._connection.execute(""" + await self._connection.execute( + """ INSERT OR IGNORE INTO workspaces (id, tenant_id, name, settings, created_at, updated_at) VALUES (?, ?, 'Default Workspace', '{}', ?, ?) - """, (DEFAULT_WORKSPACE_ID, DEFAULT_TENANT_ID, now, now)) + """, + (DEFAULT_WORKSPACE_ID, DEFAULT_TENANT_ID, now, now), + ) # Create _global workspace (cross-workspace shared storage) - await self._connection.execute(""" + await self._connection.execute( + """ INSERT OR IGNORE INTO workspaces (id, tenant_id, name, settings, created_at, updated_at) VALUES (?, ?, 'Global Workspace', '{}', ?, ?) - """, (GLOBAL_WORKSPACE_ID, DEFAULT_TENANT_ID, now, now)) + """, + (GLOBAL_WORKSPACE_ID, DEFAULT_TENANT_ID, now, now), + ) # Get all workspaces cursor = await self._connection.execute("SELECT id FROM workspaces") @@ -602,12 +609,15 @@ async def _ensure_reserved_entities(self) -> None: # Create _default context for each workspace if not exists for workspace in workspaces: workspace_id = workspace["id"] - await self._connection.execute(""" + await self._connection.execute( + """ INSERT OR IGNORE INTO contexts (id, workspace_id, name, description, settings, created_at, updated_at) VALUES ('_default', ?, '_default', 'Default context', '{}', ?, ?) - """, (workspace_id, now, now)) + """, + (workspace_id, now, now), + ) await self._connection.commit() self.logger.info("Reserved entities initialized (_default workspace, _global workspace, _default contexts)") @@ -637,30 +647,30 @@ async def create_memory(self, workspace_id: str, input: RememberInput) -> Memory """, ( memory_id, - getattr(input, 'tenant_id', None) or DEFAULT_TENANT_ID, + getattr(input, "tenant_id", None) or DEFAULT_TENANT_ID, workspace_id, - getattr(input, 'context_id', None) or '_default', - getattr(input, 'session_id', None), + getattr(input, "context_id", None) or "_default", + getattr(input, "session_id", None), input.user_id, input.content, content_hash, input.type.value if input.type else MemoryType.SEMANTIC.value, input.subtype.value if input.subtype else None, - getattr(input, 'category', None), + getattr(input, "category", None), input.importance, json.dumps(input.tags), json.dumps(input.metadata), - getattr(input, 'abstract', None), - getattr(input, 'overview', None), - getattr(input, 'source_memory_id', None), + getattr(input, "abstract", None), + getattr(input, "overview", None), + getattr(input, "source_memory_id", None), MemoryStatus.ACTIVE.value, 0, - getattr(input, 'observer_id', None), - getattr(input, 'subject_id', None), - getattr(input, 'source_document_id', None), - getattr(input, 'source_page_id', None), - getattr(input, 'source_dataset_id', None), - getattr(input, 'source_thread_id', None), + getattr(input, "observer_id", None), + getattr(input, "subject_id", None), + getattr(input, "source_document_id", None), + getattr(input, "source_page_id", None), + getattr(input, "source_dataset_id", None), + getattr(input, "source_thread_id", None), now, now, ), @@ -676,7 +686,7 @@ async def create_memory(self, workspace_id: str, input: RememberInput) -> Memory return await self.get_memory(workspace_id, memory_id, track_access=False) - async def get_memory(self, workspace_id: str, memory_id: str, track_access: bool = True) -> Optional[Memory]: + async def get_memory(self, workspace_id: str, memory_id: str, track_access: bool = True) -> Memory | None: """Get memory by ID within a workspace. Set track_access=False for internal reads that should not affect decay tracking.""" cursor = await self._connection.execute( """ @@ -708,11 +718,11 @@ async def get_memory(self, workspace_id: str, memory_id: str, track_access: bool ) await self._connection.commit() memory.access_count = (memory.access_count or 0) + 1 - memory.last_accessed_at = datetime.now(timezone.utc) + memory.last_accessed_at = datetime.now(UTC) return memory - async def get_memory_by_id(self, memory_id: str, track_access: bool = True) -> Optional[Memory]: + async def get_memory_by_id(self, memory_id: str, track_access: bool = True) -> Memory | None: """Get memory by ID without workspace filter. Memory IDs are globally unique.""" cursor = await self._connection.execute( """ @@ -743,11 +753,11 @@ async def get_memory_by_id(self, memory_id: str, track_access: bool = True) -> O await self._connection.commit() # Reflect the increment in the returned object memory.access_count = (memory.access_count or 0) + 1 - memory.last_accessed_at = datetime.now(timezone.utc) + memory.last_accessed_at = datetime.now(UTC) return memory - async def update_memory(self, workspace_id: str, memory_id: str, **updates) -> Optional[Memory]: + async def update_memory(self, workspace_id: str, memory_id: str, **updates) -> Memory | None: """Update memory fields.""" invalid_keys = set(updates.keys()) - _UPDATABLE_MEMORY_COLUMNS if invalid_keys: @@ -775,7 +785,7 @@ async def update_memory(self, workspace_id: str, memory_id: str, **updates) -> O query = f""" UPDATE memories - SET {', '.join(set_parts)} + SET {", ".join(set_parts)} WHERE id = ? AND workspace_id = ? AND deleted_at IS NULL """ @@ -815,10 +825,10 @@ async def delete_memory(self, workspace_id: str, memory_id: str, hard: bool = Fa return cursor.rowcount > 0 async def get_memories_for_decay( - self, - workspace_id: str, - min_age_days: int = 7, - exclude_pinned: bool = True, + self, + workspace_id: str, + min_age_days: int = 7, + exclude_pinned: bool = True, ) -> list[Memory]: """Get memories eligible for importance decay.""" where_parts = [ @@ -834,7 +844,7 @@ async def get_memories_for_decay( query = f""" SELECT * FROM memories - WHERE {' AND '.join(where_parts)} + WHERE {" AND ".join(where_parts)} ORDER BY importance DESC """ cursor = await self._connection.execute(query, params) @@ -842,12 +852,12 @@ async def get_memories_for_decay( return [self._row_to_memory(row) for row in rows] async def get_archival_candidates( - self, - workspace_id: str, - max_importance: float = 0.3, - max_access_count: int = 5, - older_than_days: int = 90, - limit: int = 100, + self, + workspace_id: str, + max_importance: float = 0.3, + max_access_count: int = 5, + older_than_days: int = 90, + limit: int = 100, ) -> list[Memory]: """Get memories eligible for archival.""" query = """ @@ -863,9 +873,7 @@ async def get_archival_candidates( ORDER BY importance ASC LIMIT ? """ - cursor = await self._connection.execute( - query, (workspace_id, max_importance, max_access_count, older_than_days, limit) - ) + cursor = await self._connection.execute(query, (workspace_id, max_importance, max_access_count, older_than_days, limit)) rows = await cursor.fetchall() return [self._row_to_memory(row) for row in rows] @@ -876,52 +884,74 @@ async def list_all_workspace_ids(self) -> list[str]: return [row["id"] for row in rows] async def search_memories( - self, - workspace_id: str, - query_embedding: list[float], - limit: int = 10, - offset: int = 0, - min_relevance: float = 0.5, - types: Optional[list[str]] = None, - subtypes: Optional[list[str]] = None, - tags: Optional[list[str]] = None, - include_archived: bool = False, - observer_id: Optional[str] = None, - subject_id: Optional[str] = None, - created_after: Optional[str] = None, - created_before: Optional[str] = None, - user_id: Optional[str] = None, + self, + workspace_id: str, + query_embedding: list[float], + limit: int = 10, + offset: int = 0, + min_relevance: float = 0.5, + types: list[str] | None = None, + subtypes: list[str] | None = None, + tags: list[str] | None = None, + include_archived: bool = False, + observer_id: str | None = None, + subject_id: str | None = None, + created_after: str | None = None, + created_before: str | None = None, + user_id: str | None = None, ) -> list[tuple[Memory, float]]: """Vector similarity search using sqlite-vec or fallback.""" if self._has_vec_extension: return await self._search_with_vec( - workspace_id, query_embedding, limit, offset, min_relevance, types, subtypes, tags, - include_archived=include_archived, observer_id=observer_id, subject_id=subject_id, - created_after=created_after, created_before=created_before, user_id=user_id, + workspace_id, + query_embedding, + limit, + offset, + min_relevance, + types, + subtypes, + tags, + include_archived=include_archived, + observer_id=observer_id, + subject_id=subject_id, + created_after=created_after, + created_before=created_before, + user_id=user_id, ) else: return await self._search_with_fallback( - workspace_id, query_embedding, limit, offset, min_relevance, types, subtypes, tags, - include_archived=include_archived, observer_id=observer_id, subject_id=subject_id, - created_after=created_after, created_before=created_before, user_id=user_id, + workspace_id, + query_embedding, + limit, + offset, + min_relevance, + types, + subtypes, + tags, + include_archived=include_archived, + observer_id=observer_id, + subject_id=subject_id, + created_after=created_after, + created_before=created_before, + user_id=user_id, ) async def _search_with_vec( - self, - workspace_id: str, - query_embedding: list[float], - limit: int, - offset: int, - min_relevance: float, - types: Optional[list[str]], - subtypes: Optional[list[str]], - tags: Optional[list[str]], - include_archived: bool = False, - observer_id: Optional[str] = None, - subject_id: Optional[str] = None, - created_after: Optional[str] = None, - created_before: Optional[str] = None, - user_id: Optional[str] = None, + self, + workspace_id: str, + query_embedding: list[float], + limit: int, + offset: int, + min_relevance: float, + types: list[str] | None, + subtypes: list[str] | None, + tags: list[str] | None, + include_archived: bool = False, + observer_id: str | None = None, + subject_id: str | None = None, + created_after: str | None = None, + created_before: str | None = None, + user_id: str | None = None, ) -> list[tuple[Memory, float]]: """Search using sqlite-vec extension.""" # Build WHERE clause @@ -997,21 +1027,21 @@ async def _search_with_vec( return results async def _search_with_fallback( - self, - workspace_id: str, - query_embedding: list[float], - limit: int, - offset: int, - min_relevance: float, - types: Optional[list[str]], - subtypes: Optional[list[str]], - tags: Optional[list[str]], - include_archived: bool = False, - observer_id: Optional[str] = None, - subject_id: Optional[str] = None, - created_after: Optional[str] = None, - created_before: Optional[str] = None, - user_id: Optional[str] = None, + self, + workspace_id: str, + query_embedding: list[float], + limit: int, + offset: int, + min_relevance: float, + types: list[str] | None, + subtypes: list[str] | None, + tags: list[str] | None, + include_archived: bool = False, + observer_id: str | None = None, + subject_id: str | None = None, + created_after: str | None = None, + created_before: str | None = None, + user_id: str | None = None, ) -> list[tuple[Memory, float]]: """Fallback: compute cosine similarity in Python.""" # Build WHERE clause @@ -1074,7 +1104,7 @@ async def _search_with_fallback( # Sort by relevance descending, apply offset and limit results.sort(key=lambda x: x[1], reverse=True) - return results[offset:offset + limit] + return results[offset : offset + limit] @staticmethod def _sanitize_fts5_query(query: str) -> str: @@ -1083,11 +1113,11 @@ def _sanitize_fts5_query(query: str) -> str: return f'"{escaped}"' async def full_text_search( - self, - workspace_id: str, - query: str, - limit: int = 10, - offset: int = 0, + self, + workspace_id: str, + query: str, + limit: int = 10, + offset: int = 0, ) -> list[Memory]: """Full-text search using SQLite FTS5.""" cursor = await self._connection.execute( @@ -1106,7 +1136,7 @@ async def full_text_search( return [self._row_to_memory(row) for row in rows] - async def get_memory_by_hash(self, workspace_id: str, content_hash: str) -> Optional[Memory]: + async def get_memory_by_hash(self, workspace_id: str, content_hash: str) -> Memory | None: """Get memory by content hash for deduplication.""" cursor = await self._connection.execute( """ @@ -1123,12 +1153,12 @@ async def get_memory_by_hash(self, workspace_id: str, content_hash: str) -> Opti return self._row_to_memory(row) if row else None async def get_recent_memories( - self, - workspace_id: str, - created_after: datetime, - limit: int = 10, - detail_level: str = "abstract", - offset: int = 0, + self, + workspace_id: str, + created_after: datetime, + limit: int = 10, + detail_level: str = "abstract", + offset: int = 0, ) -> list: """Get recent memories ordered by creation time (newest first).""" cursor = await self._connection.execute( @@ -1151,42 +1181,51 @@ async def get_recent_memories( for row in rows: if detail_level == "abstract": # Return only id, abstract, type, subtype, importance, tags, created_at - results.append({ - "id": row["id"], - "abstract": row["abstract"] if row["abstract"] else None, - "type": row["type"], - "subtype": row["subtype"] if row["subtype"] else None, - "importance": row["importance"], - "tags": json.loads(row["tags"]) if row["tags"] else [], - "created_at": row["created_at"], - }) + results.append( + { + "id": row["id"], + "abstract": row["abstract"] if row["abstract"] else None, + "type": row["type"], + "subtype": row["subtype"] if row["subtype"] else None, + "importance": row["importance"], + "tags": json.loads(row["tags"]) if row["tags"] else [], + "created_at": row["created_at"], + } + ) elif detail_level == "overview": # Add overview field (and exclude abstract field) - results.append({ - "id": row["id"], - # "abstract": row["abstract"] if row["abstract"] else None, - "overview": row["overview"] if row["overview"] else None, - "type": row["type"], - "subtype": row["subtype"] if row["subtype"] else None, - "importance": row["importance"], - "tags": json.loads(row["tags"]) if row["tags"] else [], - "created_at": row["created_at"], - }) + results.append( + { + "id": row["id"], + # "abstract": row["abstract"] if row["abstract"] else None, + "overview": row["overview"] if row["overview"] else None, + "type": row["type"], + "subtype": row["subtype"] if row["subtype"] else None, + "importance": row["importance"], + "tags": json.loads(row["tags"]) if row["tags"] else [], + "created_at": row["created_at"], + } + ) else: # "full" -- full detail will return the content and doesn't need to return the abstract and overview fields # Return everything as dict memory = self._row_to_memory(row) - results.append({ - "id": memory.id, - "content": memory.content, - # "abstract": memory.abstract, - # "overview": memory.overview, - "type": memory.type.value if hasattr(memory.type, 'value') else str(memory.type), - "subtype": memory.subtype.value if memory.subtype and hasattr(memory.subtype, 'value') else str( - memory.subtype) if memory.subtype else None, - "importance": memory.importance, - "tags": memory.tags, - "created_at": memory.created_at.isoformat() if memory.created_at else None, - }) + results.append( + { + "id": memory.id, + "content": memory.content, + # "abstract": memory.abstract, + # "overview": memory.overview, + "type": memory.type.value if hasattr(memory.type, "value") else str(memory.type), + "subtype": memory.subtype.value + if memory.subtype and hasattr(memory.subtype, "value") + else str(memory.subtype) + if memory.subtype + else None, + "importance": memory.importance, + "tags": memory.tags, + "created_at": memory.created_at.isoformat() if memory.created_at else None, + } + ) return results @@ -1224,11 +1263,11 @@ async def create_association(self, workspace_id: str, input: AssociateInput) -> return self._row_to_association(row) async def get_associations( - self, - workspace_id: str, - memory_id: str, - direction: str = "both", - relationships: Optional[list[str]] = None, + self, + workspace_id: str, + memory_id: str, + direction: str = "both", + relationships: list[str] | None = None, ) -> list[Association]: """Get associations for a memory.""" # Build WHERE clause @@ -1261,12 +1300,12 @@ async def get_associations( return [self._row_to_association(row) for row in rows] async def traverse_graph( - self, - workspace_id: str, - start_id: str, - max_depth: int = 3, - relationships: Optional[list[str]] = None, - direction: str = "both", + self, + workspace_id: str, + start_id: str, + max_depth: int = 3, + relationships: list[str] | None = None, + direction: str = "both", ) -> GraphQueryResult: """Multi-hop graph traversal using recursive CTE.""" # Build recursive CTE @@ -1410,7 +1449,7 @@ async def create_workspace(self, workspace: Workspace) -> Workspace: return workspace - async def get_workspace(self, workspace_id: str) -> Optional[Workspace]: + async def get_workspace(self, workspace_id: str) -> Workspace | None: """Get workspace by ID.""" cursor = await self._connection.execute( "SELECT * FROM workspaces WHERE id = ?", @@ -1453,7 +1492,7 @@ async def create_context(self, workspace_id: str, context: Context) -> Context: return context - async def get_context(self, workspace_id: str, context_id: str) -> Optional[Context]: + async def get_context(self, workspace_id: str, context_id: str) -> Context | None: """Get context by ID.""" cursor = await self._connection.execute( "SELECT * FROM contexts WHERE id = ? AND workspace_id = ?", @@ -1518,7 +1557,7 @@ async def create_session(self, workspace_id: str, session: Session) -> Session: INTO workspaces (id, tenant_id, name, created_at, updated_at) VALUES (?, ?, ?, ?, ?) """, - (workspace_id, "default", workspace_id, now, now) + (workspace_id, "default", workspace_id, now, now), ) await self._connection.execute( @@ -1545,7 +1584,7 @@ async def create_session(self, workspace_id: str, session: Session) -> Session: self.logger.info("Created persistent session: %s in workspace: %s", session.id, workspace_id) return session - async def get_session(self, workspace_id: str, session_id: str) -> Optional[Session]: + async def get_session(self, workspace_id: str, session_id: str) -> Session | None: """Get session by ID (returns None if not found or expired).""" cursor = await self._connection.execute( "SELECT * FROM sessions WHERE id = ? AND workspace_id = ?", @@ -1569,7 +1608,7 @@ async def get_session(self, workspace_id: str, session_id: str) -> Optional[Sess return session - async def get_session_by_id(self, session_id: str) -> Optional[Session]: + async def get_session_by_id(self, session_id: str) -> Session | None: """Get session by ID without workspace filter. This allows looking up a session when the workspace is not yet known, @@ -1605,16 +1644,11 @@ async def delete_session(self, workspace_id: str, session_id: str) -> bool: return deleted async def set_working_memory( - self, - workspace_id: str, - session_id: str, - key: str, - value: Any, - ttl_seconds: Optional[int] = None + self, workspace_id: str, session_id: str, key: str, value: Any, ttl_seconds: int | None = None ) -> WorkingMemory: """Set working memory key-value within session.""" now_iso = utc_now_iso() - now = datetime.now(timezone.utc) + now = datetime.now(UTC) # Use INSERT OR REPLACE for upsert behavior await self._connection.execute( @@ -1645,12 +1679,7 @@ async def set_working_memory( updated_at=now, ) - async def get_working_memory( - self, - workspace_id: str, - session_id: str, - key: str - ) -> Optional[WorkingMemory]: + async def get_working_memory(self, workspace_id: str, session_id: str, key: str) -> WorkingMemory | None: """Get specific working memory entry.""" cursor = await self._connection.execute( "SELECT * FROM working_memory WHERE session_id = ? AND key = ?", @@ -1663,11 +1692,7 @@ async def get_working_memory( return self._row_to_working_memory(row) - async def get_all_working_memory( - self, - workspace_id: str, - session_id: str - ) -> list[WorkingMemory]: + async def get_all_working_memory(self, workspace_id: str, session_id: str) -> list[WorkingMemory]: """Get all working memory entries for session.""" cursor = await self._connection.execute( "SELECT * FROM working_memory WHERE session_id = ?", @@ -1729,12 +1754,7 @@ async def list_expired_sessions(self, limit: int = 100) -> list[Session]: return [self._row_to_session(row) for row in rows] - async def update_session( - self, - workspace_id: str, - session_id: str, - **updates - ) -> Optional[Session]: + async def update_session(self, workspace_id: str, session_id: str, **updates) -> Session | None: """Update session fields. Args: @@ -1752,11 +1772,11 @@ async def update_session( set_clauses = [] values = [] for field, value in updates.items(): - if field in ('committed_at', 'expires_at') and isinstance(value, datetime): + if field in ("committed_at", "expires_at") and isinstance(value, datetime): values.append(value.isoformat()) - elif field == 'auto_commit': + elif field == "auto_commit": values.append(1 if value else 0) - elif field == 'metadata': + elif field == "metadata": values.append(json.dumps(value)) else: values.append(value) @@ -1766,7 +1786,7 @@ async def update_session( query = f""" UPDATE sessions - SET {', '.join(set_clauses)} + SET {", ".join(set_clauses)} WHERE id = ? AND workspace_id = ? """ @@ -1779,10 +1799,10 @@ async def update_session( return await self.get_session(workspace_id, session_id) async def list_sessions( - self, - workspace_id: str, - context_id: str | None = None, - include_expired: bool = False, + self, + workspace_id: str, + context_id: str | None = None, + include_expired: bool = False, ) -> list[Session]: """List sessions for a workspace.""" conditions = ["workspace_id = ?"] @@ -1829,7 +1849,7 @@ async def create_contradiction(self, contradiction: ContradictionRecord) -> Cont self.logger.debug("Created contradiction record: %s", contradiction.id) return contradiction - async def get_contradiction(self, workspace_id: str, contradiction_id: str) -> Optional[ContradictionRecord]: + async def get_contradiction(self, workspace_id: str, contradiction_id: str) -> ContradictionRecord | None: """Get a specific contradiction.""" cursor = await self._connection.execute( "SELECT * FROM contradictions WHERE id = ? AND workspace_id = ?", @@ -1857,12 +1877,12 @@ async def get_unresolved_contradictions(self, workspace_id: str, limit: int = 10 return [self._row_to_contradiction(row) for row in rows] async def resolve_contradiction( - self, - workspace_id: str, - contradiction_id: str, - resolution: str, - merged_content: Optional[str] = None, - ) -> Optional[ContradictionRecord]: + self, + workspace_id: str, + contradiction_id: str, + resolution: str, + merged_content: str | None = None, + ) -> ContradictionRecord | None: """Resolve a contradiction.""" now = utc_now_iso() cursor = await self._connection.execute( @@ -1892,7 +1912,7 @@ def _row_to_contradiction(self, row: aiosqlite.Row) -> ContradictionRecord: memory_b_id=row["memory_b_id"], contradiction_type=row["contradiction_type"], confidence=row["confidence"] if row["confidence"] else 0.0, - detection_method=row["detection_method"] if row["detection_method"] else '', + detection_method=row["detection_method"] if row["detection_method"] else "", detected_at=parse_datetime_utc(row["detected_at"]), resolved_at=parse_datetime_utc(row["resolved_at"]) if row["resolved_at"] else None, resolution=row["resolution"], @@ -2000,21 +2020,21 @@ def _row_to_working_memory(self, row: aiosqlite.Row) -> WorkingMemory: def _serialize_embedding(self, embedding: list[float]) -> bytes: """Serialize embedding to binary format for storage.""" import struct - return struct.pack(f'{len(embedding)}f', *embedding) + + return struct.pack(f"{len(embedding)}f", *embedding) def _deserialize_embedding(self, blob: bytes) -> list[float]: """Deserialize embedding from binary format.""" import struct - num_floats = len(blob) // 4 - return list(struct.unpack(f'{num_floats}f', blob)) + num_floats = len(blob) // 4 + return list(struct.unpack(f"{num_floats}f", blob)) # ============================================ # Chat History Operations # ============================================ - async def create_thread(self, thread: 'ChatThread') -> 'ChatThread': - from ...models.chat import ChatThread as ChatThreadModel + async def create_thread(self, thread: "ChatThread") -> "ChatThread": await self._connection.execute( """INSERT INTO chat_threads (id, workspace_id, tenant_id, user_id, context_id, @@ -2023,9 +2043,14 @@ async def create_thread(self, thread: 'ChatThread') -> 'ChatThread': expires_at, created_at, updated_at) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)""", ( - thread.id, thread.workspace_id, thread.tenant_id, - thread.user_id, thread.context_id, - thread.observer_id, thread.subject_id, thread.title, + thread.id, + thread.workspace_id, + thread.tenant_id, + thread.user_id, + thread.context_id, + thread.observer_id, + thread.subject_id, + thread.title, json.dumps(thread.metadata), thread.message_count, thread.last_decomposed_at.isoformat() if thread.last_decomposed_at else None, @@ -2038,8 +2063,7 @@ async def create_thread(self, thread: 'ChatThread') -> 'ChatThread': await self._connection.commit() return thread - async def get_thread(self, workspace_id: str, thread_id: str) -> 'Optional[ChatThread]': - from ...models.chat import ChatThread as ChatThreadModel + async def get_thread(self, workspace_id: str, thread_id: str) -> "ChatThread | None": cursor = await self._connection.execute( "SELECT * FROM chat_threads WHERE id = ? AND workspace_id = ?", (thread_id, workspace_id), @@ -2052,7 +2076,7 @@ async def get_thread(self, workspace_id: str, thread_id: str) -> 'Optional[ChatT async def list_threads( self, workspace_id: str, - user_id: Optional[str] = None, + user_id: str | None = None, limit: int = 50, offset: int = 0, ) -> list: @@ -2076,7 +2100,7 @@ async def list_threads( rows = await cursor.fetchall() return [self._row_to_chat_thread(row) for row in rows] - async def update_thread(self, workspace_id: str, thread_id: str, **updates) -> 'Optional[ChatThread]': + async def update_thread(self, workspace_id: str, thread_id: str, **updates) -> "ChatThread | None": if not updates: return await self.get_thread(workspace_id, thread_id) @@ -2117,7 +2141,7 @@ async def delete_thread(self, workspace_id: str, thread_id: str) -> bool: await self._connection.commit() return cursor.rowcount > 0 - async def list_expired_threads(self, limit: int = 100) -> list['ChatThread']: + async def list_expired_threads(self, limit: int = 100) -> list["ChatThread"]: """List expired chat threads across all workspaces. Queries for threads where expires_at is set and in the past. @@ -2178,20 +2202,27 @@ async def append_messages( (id, thread_id, workspace_id, message_index, role, content, metadata, created_at) VALUES (?, ?, ?, ?, ?, ?, ?, ?)""", ( - msg_id, thread_id, workspace_id, msg_index, - msg_input.role, content, - json.dumps(msg_input.metadata or {}), now, + msg_id, + thread_id, + workspace_id, + msg_index, + msg_input.role, + content, + json.dumps(msg_input.metadata or {}), + now, ), ) - created_messages.append(ChatMessage( - id=msg_id, - thread_id=thread_id, - message_index=msg_index, - role=msg_input.role, - content=msg_input.content, - metadata=msg_input.metadata or {}, - created_at=parse_datetime_utc(now), - )) + created_messages.append( + ChatMessage( + id=msg_id, + thread_id=thread_id, + message_index=msg_index, + role=msg_input.role, + content=msg_input.content, + metadata=msg_input.metadata or {}, + created_at=parse_datetime_utc(now), + ) + ) # Update thread message count and updated_at new_count = current_count + len(messages) @@ -2208,7 +2239,7 @@ async def get_messages( thread_id: str, limit: int = 100, offset: int = 0, - after_index: Optional[int] = None, + after_index: int | None = None, order: str = "asc", ) -> list: order_clause = "ASC" if order.lower() == "asc" else "DESC" @@ -2239,8 +2270,9 @@ async def get_message_count(self, workspace_id: str, thread_id: str) -> int: row = await cursor.fetchone() return row["message_count"] if row else 0 - def _row_to_chat_thread(self, row: aiosqlite.Row) -> 'ChatThread': + def _row_to_chat_thread(self, row: aiosqlite.Row) -> "ChatThread": from ...models.chat import ChatThread + return ChatThread( id=row["id"], workspace_id=row["workspace_id"], @@ -2259,8 +2291,9 @@ def _row_to_chat_thread(self, row: aiosqlite.Row) -> 'ChatThread': updated_at=parse_datetime_utc(row["updated_at"]), ) - def _row_to_chat_message(self, row: aiosqlite.Row) -> 'ChatMessage': + def _row_to_chat_message(self, row: aiosqlite.Row) -> "ChatMessage": from ...models.chat import ChatMessage, ChatMessageContent + raw_content = row["content"] # Try to parse as structured content (JSON array) try: @@ -2284,10 +2317,9 @@ def _row_to_chat_message(self, row: aiosqlite.Row) -> 'ChatMessage': class SqliteStorageBackendPlugin(StoragePluginBase): - PROVIDER_NAME = 'sqlite' + PROVIDER_NAME = "sqlite" def initialize(self, v: Variables, logger: Logger) -> object | None: return SQLiteStorageBackend( - db_path=v.environ(MEMORYLAYER_SQLITE_STORAGE_PATH, default=DEFAULT_MEMORYLAYER_SQLITE_STORAGE_PATH), - v=v + db_path=v.environ(MEMORYLAYER_SQLITE_STORAGE_PATH, default=DEFAULT_MEMORYLAYER_SQLITE_STORAGE_PATH), v=v ) diff --git a/memorylayer-core-python/src/memorylayer_server/services/tasks/__init__.py b/memorylayer-core-python/src/memorylayer_server/services/tasks/__init__.py index 97c62a7..59f49b9 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/tasks/__init__.py +++ b/memorylayer-core-python/src/memorylayer_server/services/tasks/__init__.py @@ -1,16 +1,17 @@ """Task service package.""" + +from scitrera_app_framework import Variables, get_extension + from .base import ( - TaskServicePluginBase, - EXT_TASK_SERVICE, EXT_MULTI_TASK_HANDLERS, + EXT_TASK_SERVICE, + TaskSchedule, TaskService, + TaskServicePluginBase, TaskStatus, - TaskSchedule, ) from .handlers import TaskHandlerPlugin -from scitrera_app_framework import Variables, get_extension - def get_task_service(v: Variables = None) -> TaskService: """Get the configured TaskService instance.""" @@ -18,12 +19,12 @@ def get_task_service(v: Variables = None) -> TaskService: __all__ = ( - 'TaskService', - 'TaskServicePluginBase', - 'TaskHandlerPlugin', - 'TaskStatus', - 'TaskSchedule', - 'get_task_service', - 'EXT_TASK_SERVICE', - 'EXT_MULTI_TASK_HANDLERS', + "TaskService", + "TaskServicePluginBase", + "TaskHandlerPlugin", + "TaskStatus", + "TaskSchedule", + "get_task_service", + "EXT_TASK_SERVICE", + "EXT_MULTI_TASK_HANDLERS", ) diff --git a/memorylayer-core-python/src/memorylayer_server/services/tasks/asyncio_impl.py b/memorylayer-core-python/src/memorylayer_server/services/tasks/asyncio_impl.py index 8577c7a..d3e5d1a 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/tasks/asyncio_impl.py +++ b/memorylayer-core-python/src/memorylayer_server/services/tasks/asyncio_impl.py @@ -3,16 +3,17 @@ Simple in-memory task service using asyncio for local development and single-node deployments. """ + import asyncio -from typing import Callable, Awaitable, Optional -from uuid import uuid4 +from collections.abc import Awaitable, Callable from logging import Logger +from uuid import uuid4 -from scitrera_app_framework import get_logger, Variables, ext_parse_bool +from scitrera_app_framework import Variables, ext_parse_bool, get_logger from .base import TaskService, TaskServicePluginBase, TaskStatus -MEMORYLAYER_TASKS_ENABLED = 'MEMORYLAYER_TASKS_ENABLED' +MEMORYLAYER_TASKS_ENABLED = "MEMORYLAYER_TASKS_ENABLED" DEFAULT_TASKS_ENABLED = True @@ -43,13 +44,7 @@ def __init__(self, v: Variables = None, tasks_enabled: bool = DEFAULT_TASKS_ENAB self.logger = get_logger(v, name=self.__class__.__name__) self.logger.info("Initialized AsyncIOTaskService") - async def schedule_task( - self, - task_type: str, - payload: dict, - delay_seconds: int = 0, - priority: int = 5 - ) -> Optional[str]: + async def schedule_task(self, task_type: str, payload: dict, delay_seconds: int = 0, priority: int = 5) -> str | None: """ Schedule a task for background execution. @@ -85,12 +80,7 @@ async def run_after_delay(): self._tasks[task_id] = asyncio.create_task(run_after_delay()) return task_id - async def schedule_recurring( - self, - task_type: str, - interval_seconds: int, - payload: dict - ) -> Optional[str]: + async def schedule_recurring(self, task_type: str, interval_seconds: int, payload: dict) -> str | None: """ Schedule a recurring task. @@ -116,24 +106,14 @@ async def run_recurring(): self.logger.debug("Executing recurring task (type: %s)", task_type) await handler(self._v, payload) except Exception as e: - self.logger.error( - "Recurring task %s failed: %s", - task_type, - e, - exc_info=True - ) + self.logger.error("Recurring task %s failed: %s", task_type, e, exc_info=True) else: self.logger.error("No handler registered for task type: %s", task_type) await asyncio.sleep(interval_seconds) self._recurring_tasks[schedule_id] = asyncio.create_task(run_recurring()) - self.logger.info( - "Scheduled recurring task %s: type=%s, interval=%ss", - schedule_id, - task_type, - interval_seconds - ) + self.logger.info("Scheduled recurring task %s: type=%s, interval=%ss", schedule_id, task_type, interval_seconds) return schedule_id async def cancel_task(self, task_id: str) -> bool: @@ -191,11 +171,7 @@ async def get_task_status(self, task_id: str) -> TaskStatus: return TaskStatus.RUNNING - def register_handler( - self, - task_type: str, - handler: Callable[[Variables, dict], Awaitable[None]] - ) -> None: + def register_handler(self, task_type: str, handler: Callable[[Variables, dict], Awaitable[None]]) -> None: """ Register a handler for a task type. @@ -236,7 +212,7 @@ class AsyncIOTaskServicePlugin(TaskServicePluginBase): """ # This MUST match what users set in MEMORYLAYER_TASK_PROVIDER - PROVIDER_NAME = 'asyncio' + PROVIDER_NAME = "asyncio" def initialize(self, v: Variables, logger: Logger) -> TaskService: """ diff --git a/memorylayer-core-python/src/memorylayer_server/services/tasks/base.py b/memorylayer-core-python/src/memorylayer_server/services/tasks/base.py index 2b010cb..fc1c821 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/tasks/base.py +++ b/memorylayer-core-python/src/memorylayer_server/services/tasks/base.py @@ -3,19 +3,17 @@ Provides background task scheduling abstraction for memory lifecycle operations. """ + from abc import ABC, abstractmethod +from collections.abc import Awaitable, Callable from dataclasses import dataclass from enum import Enum -from typing import Callable, Awaitable, Optional from scitrera_app_framework.api import Variables -from ...config import MEMORYLAYER_TASK_PROVIDER, DEFAULT_MEMORYLAYER_TASK_PROVIDER - +from ...config import DEFAULT_MEMORYLAYER_TASK_PROVIDER, MEMORYLAYER_TASK_PROVIDER from .._constants import ( - EXT_MEMORY_SERVICE, - EXT_MULTI_TASK_HANDLERS, - EXT_SESSION_SERVICE, + EXT_MULTI_TASK_HANDLERS, # noqa: F401 — re-exported for handlers.py and __init__.py EXT_STORAGE_BACKEND, EXT_TASK_SERVICE, ) @@ -24,6 +22,7 @@ class TaskStatus(str, Enum): """Task execution status.""" + PENDING = "pending" RUNNING = "running" COMPLETED = "completed" @@ -35,6 +34,7 @@ class TaskStatus(str, Enum): @dataclass class TaskSchedule: """Configuration for recurring task schedule.""" + interval_seconds: int default_payload: dict @@ -48,13 +48,7 @@ class TaskService(ABC): """ @abstractmethod - async def schedule_task( - self, - task_type: str, - payload: dict, - delay_seconds: int = 0, - priority: int = 5 - ) -> Optional[str]: + async def schedule_task(self, task_type: str, payload: dict, delay_seconds: int = 0, priority: int = 5) -> str | None: """ Schedule a task for background execution. @@ -70,12 +64,7 @@ async def schedule_task( pass @abstractmethod - async def schedule_recurring( - self, - task_type: str, - interval_seconds: int, - payload: dict - ) -> Optional[str]: + async def schedule_recurring(self, task_type: str, interval_seconds: int, payload: dict) -> str | None: """ Schedule a recurring task. @@ -116,11 +105,7 @@ async def get_task_status(self, task_id: str) -> TaskStatus: pass @abstractmethod - def register_handler( - self, - task_type: str, - handler: Callable[[Variables, dict], Awaitable[None]] - ) -> None: + def register_handler(self, task_type: str, handler: Callable[[Variables, dict], Awaitable[None]]) -> None: """ Register a handler for a task type. diff --git a/memorylayer-core-python/src/memorylayer_server/services/tasks/handlers.py b/memorylayer-core-python/src/memorylayer_server/services/tasks/handlers.py index 454ba31..55cb95c 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/tasks/handlers.py +++ b/memorylayer-core-python/src/memorylayer_server/services/tasks/handlers.py @@ -3,15 +3,16 @@ Base class for task handler plugins that are auto-discovered via multi-extension. """ + from abc import ABC, abstractmethod +from collections.abc import Awaitable, Callable, Iterable from logging import Logger -from typing import Optional, Awaitable, Callable, Iterable from scitrera_app_framework import Plugin, Variables, get_extensions -from .base import EXT_MULTI_TASK_HANDLERS, TaskSchedule, TaskService, EXT_TASK_SERVICE from ..memory import EXT_MEMORY_SERVICE from ..session import EXT_SESSION_SERVICE +from .base import EXT_MULTI_TASK_HANDLERS, EXT_TASK_SERVICE, TaskSchedule, TaskService class TaskHandlerPlugin(Plugin, ABC): @@ -53,7 +54,7 @@ async def handle(self, v: Variables, payload: dict) -> None: pass @abstractmethod - def get_schedule(self, v: Variables) -> Optional[TaskSchedule]: + def get_schedule(self, v: Variables) -> TaskSchedule | None: """ Return a recurring schedule, or None if not recurring. Takes variables instance for context for dynamic schedules that are dependent on configuration. @@ -88,7 +89,7 @@ def extension_point_name(self, v: Variables) -> str: return EXT_MULTI_TASK_HANDLERS def initialize(self, v, logger) -> object | None: - logger.info('Initializing Task Service Handlers') + logger.info("Initializing Task Service Handlers") task_service: TaskService = self.get_extension(EXT_TASK_SERVICE, v) # Register task service handlers @@ -101,27 +102,23 @@ def initialize(self, v, logger) -> object | None: async def async_ready(self, v: Variables, logger: Logger, value: TaskService) -> None: task_service: TaskService = value - logger.info('Scheduling Recurring Task Handlers') + logger.info("Scheduling Recurring Task Handlers") for handler_plugin in get_extensions(EXT_MULTI_TASK_HANDLERS, v).values(): # type: TaskHandlerPlugin # Schedule recurring tasks schedule = handler_plugin.get_schedule(v) if schedule: # Guard: verify payload is serializable (catches service objects at startup) import json + try: json.dumps(schedule.default_payload) except TypeError as e: raise TypeError( "Task handler '%s' has a non-serializable default_payload: %s. " - "Move service resolution from get_schedule() to handle()." - % (handler_plugin.get_task_type(), e) + "Move service resolution from get_schedule() to handle()." % (handler_plugin.get_task_type(), e) ) from e - await task_service.schedule_recurring( - handler_plugin.get_task_type(), - schedule.interval_seconds, - schedule.default_payload - ) + await task_service.schedule_recurring(handler_plugin.get_task_type(), schedule.interval_seconds, schedule.default_payload) return diff --git a/memorylayer-core-python/src/memorylayer_server/services/workspace/__init__.py b/memorylayer-core-python/src/memorylayer_server/services/workspace/__init__.py index 09f5be7..6222db6 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/workspace/__init__.py +++ b/memorylayer-core-python/src/memorylayer_server/services/workspace/__init__.py @@ -1,12 +1,13 @@ """Workspace service package.""" + +from scitrera_app_framework import Variables, get_extension + from .base import ( - WorkspaceServicePluginBase, EXT_WORKSPACE_SERVICE, + WorkspaceServicePluginBase, ) from .default import WorkspaceService -from scitrera_app_framework import Variables, get_extension - def get_workspace_service(v: Variables = None) -> WorkspaceService: """Get the workspace service instance.""" @@ -14,8 +15,8 @@ def get_workspace_service(v: Variables = None) -> WorkspaceService: __all__ = ( - 'WorkspaceService', - 'WorkspaceServicePluginBase', - 'get_workspace_service', - 'EXT_WORKSPACE_SERVICE', + "WorkspaceService", + "WorkspaceServicePluginBase", + "get_workspace_service", + "EXT_WORKSPACE_SERVICE", ) diff --git a/memorylayer-core-python/src/memorylayer_server/services/workspace/base.py b/memorylayer-core-python/src/memorylayer_server/services/workspace/base.py index ec74bb6..a6562ec 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/workspace/base.py +++ b/memorylayer-core-python/src/memorylayer_server/services/workspace/base.py @@ -7,11 +7,10 @@ - update_workspace: Update workspace settings """ -from ...config import MEMORYLAYER_WORKSPACE_SERVICE, DEFAULT_MEMORYLAYER_WORKSPACE_SERVICE +from ...config import DEFAULT_MEMORYLAYER_WORKSPACE_SERVICE, MEMORYLAYER_WORKSPACE_SERVICE from .._constants import EXT_STORAGE_BACKEND, EXT_WORKSPACE_SERVICE from .._plugin_factory import make_service_plugin_base - # noinspection PyAbstractClass WorkspaceServicePluginBase = make_service_plugin_base( ext_name=EXT_WORKSPACE_SERVICE, diff --git a/memorylayer-core-python/src/memorylayer_server/services/workspace/default.py b/memorylayer-core-python/src/memorylayer_server/services/workspace/default.py index 3a9f0f4..ab174fc 100644 --- a/memorylayer-core-python/src/memorylayer_server/services/workspace/default.py +++ b/memorylayer-core-python/src/memorylayer_server/services/workspace/default.py @@ -1,15 +1,15 @@ """Default workspace service implementation.""" -from datetime import datetime, timezone + +from datetime import UTC, datetime from logging import Logger -from typing import Optional from scitrera_app_framework import get_logger from scitrera_app_framework.api import Variables +from ...config import DEFAULT_CONTEXT_ID, DEFAULT_TENANT_ID from ...models import Workspace from ...models.workspace import Context from ..storage import EXT_STORAGE_BACKEND, StorageBackend -from ...config import DEFAULT_TENANT_ID, DEFAULT_CONTEXT_ID from .base import WorkspaceServicePluginBase @@ -45,11 +45,7 @@ async def create_workspace(self, workspace: Workspace) -> Workspace: Raises: ValueError: If workspace validation fails """ - self.logger.info( - "Creating workspace: %s for tenant: %s", - workspace.name, - workspace.tenant_id - ) + self.logger.info("Creating workspace: %s for tenant: %s", workspace.name, workspace.tenant_id) # Create workspace via storage backend created = await self._storage.create_workspace(workspace) @@ -57,7 +53,7 @@ async def create_workspace(self, workspace: Workspace) -> Workspace: self.logger.info("Created workspace: %s", created.id) return created - async def get_workspace(self, workspace_id: str) -> Optional[Workspace]: + async def get_workspace(self, workspace_id: str) -> Workspace | None: """ Get workspace by ID. @@ -81,11 +77,11 @@ async def list_workspaces(self) -> list["Workspace"]: return await self._storage.list_workspaces() async def ensure_workspace( - self, - workspace_id: str, - tenant_id: str = None, - auto_create: bool = True, - ) -> Optional[Workspace]: + self, + workspace_id: str, + tenant_id: str = None, + auto_create: bool = True, + ) -> Workspace | None: """ Ensure a workspace exists, optionally creating it if missing. @@ -114,7 +110,7 @@ async def ensure_workspace( # Auto-create workspace self.logger.info("Auto-creating workspace: %s", workspace_id) tenant_id = tenant_id or DEFAULT_TENANT_ID - now = datetime.now(timezone.utc) + now = datetime.now(UTC) workspace = Workspace( id=workspace_id, @@ -135,11 +131,11 @@ async def ensure_default_context(self, workspace_id: str) -> None: Creates it via storage if missing. This is a lightweight bootstrapping step — no separate ContextService required. """ - if hasattr(self._storage, 'get_context'): + if hasattr(self._storage, "get_context"): existing = await self._storage.get_context(workspace_id, f"{workspace_id}:{DEFAULT_CONTEXT_ID}") if existing: return - if hasattr(self._storage, 'create_context'): + if hasattr(self._storage, "create_context"): default_context = Context( id=f"{workspace_id}:{DEFAULT_CONTEXT_ID}", workspace_id=workspace_id, @@ -167,7 +163,7 @@ async def delete_workspace(self, workspace_id: str) -> bool: if not existing: return False - if hasattr(self._storage, 'delete_workspace'): + if hasattr(self._storage, "delete_workspace"): await self._storage.delete_workspace(workspace_id) else: self.logger.warning( @@ -212,11 +208,9 @@ async def update_workspace(self, workspace: Workspace) -> Workspace: class DefaultWorkspaceServicePlugin(WorkspaceServicePluginBase): """Default workspace service plugin.""" - PROVIDER_NAME = 'default' + + PROVIDER_NAME = "default" def initialize(self, v: Variables, logger: Logger) -> WorkspaceService: storage: StorageBackend = self.get_extension(EXT_STORAGE_BACKEND, v) - return WorkspaceService( - storage=storage, - v=v - ) + return WorkspaceService(storage=storage, v=v) diff --git a/memorylayer-core-python/src/memorylayer_server/tasks/auto_enrich_handler.py b/memorylayer-core-python/src/memorylayer_server/tasks/auto_enrich_handler.py index 9ec195b..3adc5ca 100644 --- a/memorylayer-core-python/src/memorylayer_server/tasks/auto_enrich_handler.py +++ b/memorylayer-core-python/src/memorylayer_server/tasks/auto_enrich_handler.py @@ -3,16 +3,16 @@ Delegates association work to AssociationService (which owns ontology-based relationship classification). Type classification uses ExtractionService. """ + from logging import Logger -from typing import Optional from scitrera_app_framework import Variables, get_logger -from ..services.tasks import TaskHandlerPlugin, TaskSchedule -from ..services.storage import StorageBackend, EXT_STORAGE_BACKEND -from ..services.embedding import EmbeddingService, EXT_EMBEDDING_SERVICE from ..services.association import EXT_ASSOCIATION_SERVICE +from ..services.embedding import EXT_EMBEDDING_SERVICE, EmbeddingService from ..services.extraction import EXT_EXTRACTION_SERVICE +from ..services.storage import EXT_STORAGE_BACKEND, StorageBackend +from ..services.tasks import TaskHandlerPlugin, TaskSchedule class AutoEnrichTaskHandler(TaskHandlerPlugin): @@ -35,24 +35,25 @@ class AutoEnrichTaskHandler(TaskHandlerPlugin): SIMILARITY_THRESHOLD = 0.6 def get_task_type(self) -> str: - return 'auto_enrich' + return "auto_enrich" - def get_schedule(self, v: Variables) -> Optional[TaskSchedule]: + def get_schedule(self, v: Variables) -> TaskSchedule | None: # No recurring schedule - triggered on-demand after remember return None async def handle(self, v: Variables, payload: dict) -> None: logger: Logger = get_logger(v, name=self.get_task_type()) - memory_id = payload.get('memory_id') - workspace_id = payload.get('workspace_id') - content = payload.get('content') - embedding = payload.get('embedding') + memory_id = payload.get("memory_id") + workspace_id = payload.get("workspace_id") + content = payload.get("content") + embedding = payload.get("embedding") if not memory_id or not workspace_id: logger.warning( "Missing required payload fields: workspace_id=%s, memory_id=%s", - workspace_id, memory_id, + workspace_id, + memory_id, ) return @@ -95,10 +96,7 @@ async def handle(self, v: Variables, payload: dict) -> None: # Delegate association creation to AssociationService if similar_memories: - candidates = [ - (mem.id, score) for mem, score in similar_memories - if mem.id != memory_id - ] + candidates = [(mem.id, score) for mem, score in similar_memories if mem.id != memory_id] if candidates: try: association_service = self.get_extension(EXT_ASSOCIATION_SERVICE, v) @@ -111,17 +109,21 @@ async def handle(self, v: Variables, payload: dict) -> None: ) logger.info( "Created %d auto-association(s) for memory %s in workspace %s", - len(associations), memory_id, workspace_id, + len(associations), + memory_id, + workspace_id, ) except Exception as e: logger.warning( - "Auto-association failed for memory %s: %s", memory_id, e, + "Auto-association failed for memory %s: %s", + memory_id, + e, ) else: logger.debug("No similar memories found for %s in workspace %s", memory_id, workspace_id) # Type classification (when flag is set) - if payload.get('classify_type', False): + if payload.get("classify_type", False): try: extraction_service = self.get_extension(EXT_EXTRACTION_SERVICE, v) classified_type, classified_subtype = await extraction_service.classify_content(content) @@ -129,9 +131,9 @@ async def handle(self, v: Variables, payload: dict) -> None: # Fetch current memory to compare types current_memory = await storage.get_memory(workspace_id, memory_id) if current_memory and current_memory.type != classified_type: - update_kwargs = {'type': classified_type.value} + update_kwargs = {"type": classified_type.value} if classified_subtype is not None: - update_kwargs['subtype'] = classified_subtype.value + update_kwargs["subtype"] = classified_subtype.value await storage.update_memory( workspace_id=workspace_id, memory_id=memory_id, @@ -139,7 +141,9 @@ async def handle(self, v: Variables, payload: dict) -> None: ) logger.info( "Reclassified memory %s from %s to %s", - memory_id, current_memory.type, classified_type, + memory_id, + current_memory.type, + classified_type, ) except Exception as e: logger.debug("Type classification skipped for %s: %s", memory_id, e) diff --git a/memorylayer-core-python/src/memorylayer_server/tasks/chat_decomposition_handler.py b/memorylayer-core-python/src/memorylayer_server/tasks/chat_decomposition_handler.py index 6ed57c2..384645b 100644 --- a/memorylayer-core-python/src/memorylayer_server/tasks/chat_decomposition_handler.py +++ b/memorylayer-core-python/src/memorylayer_server/tasks/chat_decomposition_handler.py @@ -11,25 +11,24 @@ 5. Routes each memory through MemoryService.remember() for full pipeline 6. Updates thread decomposition watermark """ + import json -from datetime import datetime, timezone +from datetime import UTC, datetime from logging import Logger -from typing import Optional -from scitrera_app_framework import get_logger, Variables +from scitrera_app_framework import Variables, get_logger -from ..models.memory import RememberInput -from ..services.storage import StorageBackend, EXT_STORAGE_BACKEND -from ..services.tasks import TaskHandlerPlugin, TaskSchedule -from ..services.memory import MemoryService, EXT_MEMORY_SERVICE -from ..services.llm import EXT_LLM_SERVICE -from ..services._constants import EXT_CHAT_SERVICE from ..config import ( - MEMORYLAYER_CHAT_DECOMPOSE_CHUNK_SIZE, DEFAULT_MEMORYLAYER_CHAT_DECOMPOSE_CHUNK_SIZE, - MEMORYLAYER_CHAT_DECOMPOSE_OVERLAP, DEFAULT_MEMORYLAYER_CHAT_DECOMPOSE_OVERLAP, + MEMORYLAYER_CHAT_DECOMPOSE_CHUNK_SIZE, + MEMORYLAYER_CHAT_DECOMPOSE_OVERLAP, ) +from ..models.memory import RememberInput +from ..services.llm import EXT_LLM_SERVICE +from ..services.memory import EXT_MEMORY_SERVICE, MemoryService +from ..services.storage import EXT_STORAGE_BACKEND, StorageBackend +from ..services.tasks import TaskHandlerPlugin, TaskSchedule CHAT_DECOMPOSITION_TASK = "chat_decomposition" @@ -70,7 +69,7 @@ class ChatDecompositionTaskHandler(TaskHandlerPlugin): def get_task_type(self) -> str: return CHAT_DECOMPOSITION_TASK - def get_schedule(self, v: Variables) -> Optional[TaskSchedule]: + def get_schedule(self, v: Variables) -> TaskSchedule | None: return None # On-demand only async def handle(self, v: Variables, payload: dict) -> None: @@ -84,7 +83,8 @@ async def handle(self, v: Variables, payload: dict) -> None: if not workspace_id or not thread_id: logger.warning( "Missing required payload fields: workspace_id=%s, thread_id=%s", - workspace_id, thread_id, + workspace_id, + thread_id, ) return @@ -113,8 +113,10 @@ async def handle(self, v: Variables, payload: dict) -> None: logger.info( "Decomposing %d messages from thread %s (index %d to %d)", - len(messages), thread_id, - thread.last_decomposed_index, thread.message_count, + len(messages), + thread_id, + thread.last_decomposed_index, + thread.message_count, ) # 3. Chunk messages @@ -146,12 +148,14 @@ async def handle(self, v: Variables, payload: dict) -> None: except Exception as e: logger.error( "Failed to decompose chunk for thread %s: %s", - thread_id, e, exc_info=True, + thread_id, + e, + exc_info=True, ) # 5. Update watermark max_index = max(m.message_index for m in messages) + 1 - now = datetime.now(timezone.utc) + now = datetime.now(UTC) await storage.update_thread( workspace_id, thread_id, @@ -161,7 +165,9 @@ async def handle(self, v: Variables, payload: dict) -> None: logger.info( "Decomposed thread %s: %d memories created from %d messages", - thread_id, total_memories_created, len(messages), + thread_id, + total_memories_created, + len(messages), ) def _chunk_messages(self, messages: list, chunk_size: int, overlap: int) -> list[list]: @@ -171,21 +177,21 @@ def _chunk_messages(self, messages: list, chunk_size: int, overlap: int) -> list chunks = [] step = max(1, chunk_size - overlap) for i in range(0, len(messages), step): - chunk = messages[i:i + chunk_size] + chunk = messages[i : i + chunk_size] chunks.append(chunk) if i + chunk_size >= len(messages): break return chunks async def _decompose_chunk( - self, - v: Variables, - logger: Logger, - storage: StorageBackend, - memory_service: MemoryService, - workspace_id: str, - thread, - messages: list, + self, + v: Variables, + logger: Logger, + storage: StorageBackend, + memory_service: MemoryService, + workspace_id: str, + thread, + messages: list, ) -> int: """Decompose a chunk of messages into memories via LLM.""" # Format conversation for LLM @@ -263,7 +269,8 @@ async def _decompose_chunk( except Exception as e: logger.warning( "Failed to store decomposed memory from thread %s: %s", - thread.id, e, + thread.id, + e, ) return memories_created diff --git a/memorylayer-core-python/src/memorylayer_server/tasks/chat_thread_cleanup_handler.py b/memorylayer-core-python/src/memorylayer_server/tasks/chat_thread_cleanup_handler.py index 61aaf5c..73c49ff 100644 --- a/memorylayer-core-python/src/memorylayer_server/tasks/chat_thread_cleanup_handler.py +++ b/memorylayer-core-python/src/memorylayer_server/tasks/chat_thread_cleanup_handler.py @@ -2,6 +2,7 @@ Periodic background task that cleans up expired chat threads. """ + from logging import Logger from typing import Optional @@ -10,13 +11,13 @@ from ..services.storage import EXT_STORAGE_BACKEND, StorageBackend from ..services.tasks import TaskHandlerPlugin, TaskSchedule -MEMORYLAYER_BACKGROUND_CHAT_THREAD_CLEANUP_INTERVAL = 'MEMORYLAYER_BACKGROUND_CHAT_THREAD_CLEANUP_INTERVAL' +MEMORYLAYER_BACKGROUND_CHAT_THREAD_CLEANUP_INTERVAL = "MEMORYLAYER_BACKGROUND_CHAT_THREAD_CLEANUP_INTERVAL" DEFAULT_CLEANUP_INTERVAL: int = 3600 async def periodic_chat_thread_cleanup_task( - storage: StorageBackend, - logger: Logger, + storage: StorageBackend, + logger: Logger, ) -> None: """ Task to clean up expired chat threads. @@ -40,19 +41,11 @@ async def periodic_chat_thread_cleanup_task( for thread in expired_threads: try: - logger.debug( - "Deleting expired chat thread %s from workspace %s", - thread.id, - thread.workspace_id - ) + logger.debug("Deleting expired chat thread %s from workspace %s", thread.id, thread.workspace_id) await storage.delete_thread(thread.workspace_id, thread.id) cleaned_count += 1 except Exception as e: - logger.warning( - "Failed to delete expired thread %s: %s", - thread.id, - e - ) + logger.warning("Failed to delete expired thread %s: %s", thread.id, e) if cleaned_count > 0: logger.info("Background cleanup removed %d expired chat threads", cleaned_count) @@ -65,14 +58,10 @@ class ChatThreadCleanupTaskHandlerPlugin(TaskHandlerPlugin): """Task handler for periodic chat thread cleanup.""" def get_task_type(self) -> str: - return 'cleanup_expired_threads' - - def get_schedule(self, v: Variables) -> Optional['TaskSchedule']: - interval: int = v.environ( - MEMORYLAYER_BACKGROUND_CHAT_THREAD_CLEANUP_INTERVAL, - default=DEFAULT_CLEANUP_INTERVAL, - type_fn=int - ) + return "cleanup_expired_threads" + + def get_schedule(self, v: Variables) -> Optional["TaskSchedule"]: + interval: int = v.environ(MEMORYLAYER_BACKGROUND_CHAT_THREAD_CLEANUP_INTERVAL, default=DEFAULT_CLEANUP_INTERVAL, type_fn=int) return TaskSchedule(interval_seconds=interval, default_payload={}) async def handle(self, v: Variables, payload: dict): diff --git a/memorylayer-core-python/src/memorylayer_server/tasks/cleanup_task.py b/memorylayer-core-python/src/memorylayer_server/tasks/cleanup_task.py index 0e45dc8..9d877d7 100644 --- a/memorylayer-core-python/src/memorylayer_server/tasks/cleanup_task.py +++ b/memorylayer-core-python/src/memorylayer_server/tasks/cleanup_task.py @@ -3,30 +3,31 @@ Periodic background task that cleans up expired sessions, optionally auto-committing working memory before deletion. """ + from logging import Logger from typing import Optional from scitrera_app_framework import Variables, ext_parse_bool, get_logger +from ..services.session import EXT_SESSION_SERVICE, SessionService from ..services.storage import EXT_STORAGE_BACKEND, StorageBackend from ..services.tasks import TaskHandlerPlugin, TaskSchedule -from ..services.session import EXT_SESSION_SERVICE, SessionService -MEMORYLAYER_BACKGROUND_SESSION_CLEANUP_ENABLED = 'MEMORYLAYER_BACKGROUND_SESSION_CLEANUP_ENABLED' +MEMORYLAYER_BACKGROUND_SESSION_CLEANUP_ENABLED = "MEMORYLAYER_BACKGROUND_SESSION_CLEANUP_ENABLED" DEFAULT_CLEANUP_ENABLED = True -MEMORYLAYER_BACKGROUND_SESSION_CLEANUP_INTERVAL = 'MEMORYLAYER_BACKGROUND_SESSION_CLEANUP_INTERVAL' +MEMORYLAYER_BACKGROUND_SESSION_CLEANUP_INTERVAL = "MEMORYLAYER_BACKGROUND_SESSION_CLEANUP_INTERVAL" DEFAULT_CLEANUP_INTERVAL: float = 300 -MEMORYLAYER_BACKGROUND_SESSION_AUTO_COMMIT = 'MEMORYLAYER_BACKGROUND_SESSION_AUTO_COMMIT' +MEMORYLAYER_BACKGROUND_SESSION_AUTO_COMMIT = "MEMORYLAYER_BACKGROUND_SESSION_AUTO_COMMIT" DEFAULT_AUTO_COMMIT_ENABLED = True async def periodic_session_cleanup_task( - storage: StorageBackend, - session_service: Optional['SessionService'], - auto_commit_enabled: bool, - logger: Logger, + storage: StorageBackend, + session_service: Optional["SessionService"], + auto_commit_enabled: bool, + logger: Logger, ) -> None: """ Task to clean up expired sessions. @@ -52,27 +53,14 @@ async def periodic_session_cleanup_task( for session in expired_sessions: if session.auto_commit and session.committed_at is None: try: - logger.debug( - "Auto-committing expired session %s before cleanup", - session.id - ) - await session_service.commit_session( - session.workspace_id, - session.id - ) + logger.debug("Auto-committing expired session %s before cleanup", session.id) + await session_service.commit_session(session.workspace_id, session.id) committed_count += 1 except Exception as e: - logger.warning( - "Auto-commit failed for expired session %s: %s", - session.id, - e - ) + logger.warning("Auto-commit failed for expired session %s: %s", session.id, e) if committed_count > 0: - logger.info( - "Auto-committed %d expired sessions before cleanup", - committed_count - ) + logger.info("Auto-committed %d expired sessions before cleanup", committed_count) # Now delete all expired sessions count = await storage.cleanup_all_expired_sessions() @@ -87,22 +75,19 @@ class SessionCleanupTaskHandlerPlugin(TaskHandlerPlugin): """Task handler for periodic session cleanup.""" def get_task_type(self) -> str: - return 'session_cleanup' + return "session_cleanup" - def get_schedule(self, v: Variables) -> Optional['TaskSchedule']: - interval: int = v.environ( - MEMORYLAYER_BACKGROUND_SESSION_CLEANUP_INTERVAL, - default=DEFAULT_CLEANUP_INTERVAL, - type_fn=int - ) + def get_schedule(self, v: Variables) -> Optional["TaskSchedule"]: + interval: int = v.environ(MEMORYLAYER_BACKGROUND_SESSION_CLEANUP_INTERVAL, default=DEFAULT_CLEANUP_INTERVAL, type_fn=int) auto_commit_enabled: bool = v.environ( - MEMORYLAYER_BACKGROUND_SESSION_AUTO_COMMIT, - default=DEFAULT_AUTO_COMMIT_ENABLED, - type_fn=ext_parse_bool + MEMORYLAYER_BACKGROUND_SESSION_AUTO_COMMIT, default=DEFAULT_AUTO_COMMIT_ENABLED, type_fn=ext_parse_bool + ) + return TaskSchedule( + interval_seconds=interval, + default_payload={ + "auto_commit_enabled": auto_commit_enabled, + }, ) - return TaskSchedule(interval_seconds=interval, default_payload={ - 'auto_commit_enabled': auto_commit_enabled, - }) async def handle(self, v: Variables, payload: dict): storage: StorageBackend = self.get_extension(EXT_STORAGE_BACKEND, v) @@ -111,6 +96,6 @@ async def handle(self, v: Variables, payload: dict): return await periodic_session_cleanup_task( storage=storage, session_service=session_service, - auto_commit_enabled=payload.get('auto_commit_enabled', DEFAULT_AUTO_COMMIT_ENABLED), + auto_commit_enabled=payload.get("auto_commit_enabled", DEFAULT_AUTO_COMMIT_ENABLED), logger=logger, ) diff --git a/memorylayer-core-python/src/memorylayer_server/tasks/consolidation_handler.py b/memorylayer-core-python/src/memorylayer_server/tasks/consolidation_handler.py index 6154817..b944650 100644 --- a/memorylayer-core-python/src/memorylayer_server/tasks/consolidation_handler.py +++ b/memorylayer-core-python/src/memorylayer_server/tasks/consolidation_handler.py @@ -1,24 +1,26 @@ """Memory consolidation task handler — daily scheduled merge of low-importance similar memories.""" + +from datetime import UTC from logging import Logger -from typing import Optional, Any +from typing import Any from scitrera_app_framework import get_logger from scitrera_app_framework.api import Variables -from ..services.storage import EXT_STORAGE_BACKEND -from ..services.storage.base import StorageBackend -from ..services.tasks import TaskHandlerPlugin, TaskSchedule -from ..utils import dot_product as _dot_product from ..config import ( - MEMORYLAYER_CONSOLIDATION_ENABLED, DEFAULT_MEMORYLAYER_CONSOLIDATION_ENABLED, - MEMORYLAYER_CONSOLIDATION_MIN_CLUSTER_SIZE, + DEFAULT_MEMORYLAYER_CONSOLIDATION_MAX_IMPORTANCE, DEFAULT_MEMORYLAYER_CONSOLIDATION_MIN_CLUSTER_SIZE, + DEFAULT_MEMORYLAYER_CONSOLIDATION_MIN_SIMILARITY, + MEMORYLAYER_CONSOLIDATION_ENABLED, MEMORYLAYER_CONSOLIDATION_MAX_IMPORTANCE, - DEFAULT_MEMORYLAYER_CONSOLIDATION_MAX_IMPORTANCE, + MEMORYLAYER_CONSOLIDATION_MIN_CLUSTER_SIZE, MEMORYLAYER_CONSOLIDATION_MIN_SIMILARITY, - DEFAULT_MEMORYLAYER_CONSOLIDATION_MIN_SIMILARITY, ) +from ..services.storage import EXT_STORAGE_BACKEND +from ..services.storage.base import StorageBackend +from ..services.tasks import TaskHandlerPlugin, TaskSchedule +from ..utils import dot_product as _dot_product def _is_enabled(v: Variables) -> bool: @@ -26,8 +28,7 @@ def _is_enabled(v: Variables) -> bool: raw = v.get(MEMORYLAYER_CONSOLIDATION_ENABLED, DEFAULT_MEMORYLAYER_CONSOLIDATION_ENABLED) if isinstance(raw, bool): return raw - return str(raw).lower() in ('1', 'true', 'yes') - + return str(raw).lower() in ("1", "true", "yes") def _merge_memories_simplified(primary: Any, others: list[Any]) -> dict: @@ -62,16 +63,16 @@ def _merge_memories_simplified(primary: Any, others: list[Any]) -> dict: new_importance = min(max_importance * 1.1, 1.0) # Record provenance in metadata - existing_provenance = merged_metadata.get('consolidated_from', []) + existing_provenance = merged_metadata.get("consolidated_from", []) if isinstance(existing_provenance, list): - merged_metadata['consolidated_from'] = existing_provenance + provenance_ids + merged_metadata["consolidated_from"] = existing_provenance + provenance_ids else: - merged_metadata['consolidated_from'] = provenance_ids + merged_metadata["consolidated_from"] = provenance_ids return { - 'tags': list(merged_tags), - 'metadata': merged_metadata, - 'importance': new_importance, + "tags": list(merged_tags), + "metadata": merged_metadata, + "importance": new_importance, } @@ -133,6 +134,7 @@ def union(x: int, y: int) -> None: # Group by root from collections import defaultdict + groups: dict[int, list[int]] = defaultdict(list) for i in range(n): groups[find(i)].append(i) @@ -157,9 +159,9 @@ class ConsolidationTaskHandler(TaskHandlerPlugin): """ def get_task_type(self) -> str: - return 'memory_consolidation' + return "memory_consolidation" - def get_schedule(self, v: Variables) -> Optional[TaskSchedule]: + def get_schedule(self, v: Variables) -> TaskSchedule | None: if not _is_enabled(v): return None return TaskSchedule( @@ -174,20 +176,26 @@ async def handle(self, v: Variables, payload: dict) -> None: storage: StorageBackend = self.get_extension(EXT_STORAGE_BACKEND, v) logger: Logger = get_logger(v, name=self.get_task_type()) - min_cluster_size = int(v.get( - MEMORYLAYER_CONSOLIDATION_MIN_CLUSTER_SIZE, - DEFAULT_MEMORYLAYER_CONSOLIDATION_MIN_CLUSTER_SIZE, - )) - max_importance = float(v.get( - MEMORYLAYER_CONSOLIDATION_MAX_IMPORTANCE, - DEFAULT_MEMORYLAYER_CONSOLIDATION_MAX_IMPORTANCE, - )) - min_similarity = float(v.get( - MEMORYLAYER_CONSOLIDATION_MIN_SIMILARITY, - DEFAULT_MEMORYLAYER_CONSOLIDATION_MIN_SIMILARITY, - )) - - workspace_id = payload.get('workspace_id') + min_cluster_size = int( + v.get( + MEMORYLAYER_CONSOLIDATION_MIN_CLUSTER_SIZE, + DEFAULT_MEMORYLAYER_CONSOLIDATION_MIN_CLUSTER_SIZE, + ) + ) + max_importance = float( + v.get( + MEMORYLAYER_CONSOLIDATION_MAX_IMPORTANCE, + DEFAULT_MEMORYLAYER_CONSOLIDATION_MAX_IMPORTANCE, + ) + ) + min_similarity = float( + v.get( + MEMORYLAYER_CONSOLIDATION_MIN_SIMILARITY, + DEFAULT_MEMORYLAYER_CONSOLIDATION_MIN_SIMILARITY, + ) + ) + + workspace_id = payload.get("workspace_id") if workspace_id: workspaces_to_process = [workspace_id] @@ -201,8 +209,12 @@ async def handle(self, v: Variables, payload: dict) -> None: for ws_id in workspaces_to_process: try: merged, deleted = await self._consolidate_workspace( - storage, logger, ws_id, - min_cluster_size, max_importance, min_similarity, + storage, + logger, + ws_id, + min_cluster_size, + max_importance, + min_similarity, ) total_merged += merged total_deleted += deleted @@ -211,7 +223,8 @@ async def handle(self, v: Variables, payload: dict) -> None: logger.info( "Consolidation complete: %d memories merged (primary updated), %d memories deleted", - total_merged, total_deleted, + total_merged, + total_deleted, ) async def _consolidate_workspace( @@ -228,9 +241,9 @@ async def _consolidate_workspace( Returns: (merged_count, deleted_count) tuple """ - from datetime import datetime, timezone + from datetime import datetime - epoch = datetime(2000, 1, 1, tzinfo=timezone.utc) + epoch = datetime(2000, 1, 1, tzinfo=UTC) # Collect all low-importance memories with embeddings candidates = [] @@ -242,7 +255,7 @@ async def _consolidate_workspace( workspace_id, created_after=epoch, limit=batch_size, - detail_level='full', + detail_level="full", offset=offset, ) if not batch: @@ -250,16 +263,16 @@ async def _consolidate_workspace( for item in batch: if isinstance(item, dict): - mem_id = item.get('id') - importance = item.get('importance', 1.0) + mem_id = item.get("id") + importance = item.get("importance", 1.0) if importance is not None and importance <= max_importance and mem_id: mem = await storage.get_memory(workspace_id, mem_id, track_access=False) - if mem and mem.embedding and not getattr(mem, 'pinned', False): + if mem and mem.embedding and not getattr(mem, "pinned", False): candidates.append(mem) else: - importance = getattr(item, 'importance', 1.0) - if importance <= max_importance and getattr(item, 'embedding', None): - if not getattr(item, 'pinned', False): + importance = getattr(item, "importance", 1.0) + if importance <= max_importance and getattr(item, "embedding", None): + if not getattr(item, "pinned", False): candidates.append(item) offset += len(batch) @@ -269,13 +282,15 @@ async def _consolidate_workspace( if len(candidates) < min_cluster_size: logger.debug( "Workspace %s: only %d candidate(s) below importance threshold, skipping", - workspace_id, len(candidates), + workspace_id, + len(candidates), ) return 0, 0 logger.info( "Workspace %s: found %d candidate memories for consolidation", - workspace_id, len(candidates), + workspace_id, + len(candidates), ) clusters = _find_clusters(candidates, min_similarity, min_cluster_size) @@ -302,7 +317,8 @@ async def _consolidate_workspace( except Exception as exc: logger.error( "Failed to update primary memory %s during consolidation: %s", - primary.id, exc, + primary.id, + exc, ) continue @@ -313,16 +329,22 @@ async def _consolidate_workspace( deleted_count += 1 logger.debug( "Consolidated memory %s into primary %s (workspace %s)", - mem.id, primary.id, workspace_id, + mem.id, + primary.id, + workspace_id, ) except Exception as exc: logger.error( "Failed to delete memory %s during consolidation: %s", - mem.id, exc, + mem.id, + exc, ) logger.info( "Workspace %s consolidation: %d cluster(s) processed, %d primaries updated, %d memories deleted", - workspace_id, len(clusters), merged_count, deleted_count, + workspace_id, + len(clusters), + merged_count, + deleted_count, ) return merged_count, deleted_count diff --git a/memorylayer-core-python/src/memorylayer_server/tasks/contradiction_check_handler.py b/memorylayer-core-python/src/memorylayer_server/tasks/contradiction_check_handler.py index 43a8f4f..ecdf2d3 100644 --- a/memorylayer-core-python/src/memorylayer_server/tasks/contradiction_check_handler.py +++ b/memorylayer-core-python/src/memorylayer_server/tasks/contradiction_check_handler.py @@ -1,11 +1,11 @@ """Contradiction check task handler for on-demand contradiction detection.""" + from logging import Logger -from typing import Optional from scitrera_app_framework import get_logger from scitrera_app_framework.api import Variables -from ..services.contradiction import ContradictionService, EXT_CONTRADICTION_SERVICE +from ..services.contradiction import EXT_CONTRADICTION_SERVICE, ContradictionService from ..services.tasks import TaskHandlerPlugin, TaskSchedule @@ -19,25 +19,24 @@ class ContradictionCheckTaskHandler(TaskHandlerPlugin): """ def get_task_type(self) -> str: - return 'check_contradictions' + return "check_contradictions" - def get_schedule(self, v: Variables) -> Optional[TaskSchedule]: + def get_schedule(self, v: Variables) -> TaskSchedule | None: # No recurring schedule - triggered on-demand after remember return None async def handle(self, v: Variables, payload: dict) -> None: - contradiction_service: ContradictionService = self.get_extension( - EXT_CONTRADICTION_SERVICE, v - ) + contradiction_service: ContradictionService = self.get_extension(EXT_CONTRADICTION_SERVICE, v) logger: Logger = get_logger(v, name=self.get_task_type()) - workspace_id = payload.get('workspace_id') - memory_id = payload.get('memory_id') + workspace_id = payload.get("workspace_id") + memory_id = payload.get("memory_id") if not workspace_id or not memory_id: logger.warning( "Missing required payload fields: workspace_id=%s, memory_id=%s", - workspace_id, memory_id, + workspace_id, + memory_id, ) return @@ -47,10 +46,13 @@ async def handle(self, v: Variables, payload: dict) -> None: if contradictions: logger.info( "Found %d contradiction(s) for memory %s in workspace %s", - len(contradictions), memory_id, workspace_id, + len(contradictions), + memory_id, + workspace_id, ) else: logger.debug( "No contradictions found for memory %s in workspace %s", - memory_id, workspace_id, + memory_id, + workspace_id, ) diff --git a/memorylayer-core-python/src/memorylayer_server/tasks/decay_task_handler.py b/memorylayer-core-python/src/memorylayer_server/tasks/decay_task_handler.py index cce8615..0dafc3a 100644 --- a/memorylayer-core-python/src/memorylayer_server/tasks/decay_task_handler.py +++ b/memorylayer-core-python/src/memorylayer_server/tasks/decay_task_handler.py @@ -1,11 +1,11 @@ """Decay task handler for periodic background decay.""" + from logging import Logger -from typing import Optional from scitrera_app_framework import get_logger from scitrera_app_framework.api import Variables -from ..services.decay import DecayService, EXT_DECAY_SERVICE +from ..services.decay import EXT_DECAY_SERVICE, DecayService from ..services.tasks import TaskHandlerPlugin, TaskSchedule @@ -17,9 +17,9 @@ class DecayTaskHandler(TaskHandlerPlugin): """ def get_task_type(self) -> str: - return 'decay_memories' + return "decay_memories" - def get_schedule(self, v: Variables) -> Optional[TaskSchedule]: + def get_schedule(self, v: Variables) -> TaskSchedule | None: return TaskSchedule( interval_seconds=6 * 3600, # Every 6 hours # TODO: make configurable default_payload={}, @@ -29,19 +29,13 @@ async def handle(self, v: Variables, payload: dict) -> None: decay_service: DecayService = self.get_extension(EXT_DECAY_SERVICE, v) logger: Logger = get_logger(v, name=self.get_task_type()) - workspace_id = payload.get('workspace_id') + workspace_id = payload.get("workspace_id") if workspace_id: logger.info("Running decay for workspace %s", workspace_id) result = await decay_service.decay_workspace(workspace_id) archived = await decay_service.archive_stale_memories(workspace_id) - logger.info( - "Decay complete for workspace %s: %d decayed, %d archived", - workspace_id, result.decayed, archived - ) + logger.info("Decay complete for workspace %s: %d decayed, %d archived", workspace_id, result.decayed, archived) else: logger.info("Running decay for all workspaces") result = await decay_service.decay_all_workspaces() - logger.info( - "Decay complete: %d processed, %d decayed, %d archived", - result.processed, result.decayed, result.archived - ) + logger.info("Decay complete: %d processed, %d decayed, %d archived", result.processed, result.decayed, result.archived) diff --git a/memorylayer-core-python/src/memorylayer_server/tasks/fact_decomposition_handler.py b/memorylayer-core-python/src/memorylayer_server/tasks/fact_decomposition_handler.py index 849c91a..6e93c3b 100644 --- a/memorylayer-core-python/src/memorylayer_server/tasks/fact_decomposition_handler.py +++ b/memorylayer-core-python/src/memorylayer_server/tasks/fact_decomposition_handler.py @@ -12,17 +12,17 @@ 4. Creates PART_OF associations from each fact to the parent 5. Archives the parent memory (status = ARCHIVED) """ + from logging import Logger -from typing import Optional -from scitrera_app_framework import get_logger, Variables +from scitrera_app_framework import Variables, get_logger from ..models.association import AssociateInput -from ..models.memory import MemoryType, MemorySubtype, MemoryStatus, RememberInput -from ..services.storage import StorageBackend, EXT_STORAGE_BACKEND -from ..services.tasks import TaskHandlerPlugin, TaskSchedule -from ..services.extraction import ExtractionService, EXT_EXTRACTION_SERVICE +from ..models.memory import MemoryStatus, MemorySubtype, MemoryType, RememberInput +from ..services.extraction import EXT_EXTRACTION_SERVICE, ExtractionService from ..services.memory import EXT_MEMORY_SERVICE, MemoryService +from ..services.storage import EXT_STORAGE_BACKEND, StorageBackend +from ..services.tasks import TaskHandlerPlugin, TaskSchedule class FactDecompositionTaskHandler(TaskHandlerPlugin): @@ -39,9 +39,9 @@ class FactDecompositionTaskHandler(TaskHandlerPlugin): """ def get_task_type(self) -> str: - return 'decompose_facts' + return "decompose_facts" - def get_schedule(self, v: Variables) -> Optional[TaskSchedule]: + def get_schedule(self, v: Variables) -> TaskSchedule | None: return None # On-demand only, not recurring async def handle(self, v: Variables, payload: dict) -> None: @@ -58,13 +58,14 @@ async def handle(self, v: Variables, payload: dict) -> None: # Get memory service for per-fact pipeline memory_service: MemoryService = self.get_extension(EXT_MEMORY_SERVICE, v) - memory_id = payload.get('memory_id') - workspace_id = payload.get('workspace_id') + memory_id = payload.get("memory_id") + workspace_id = payload.get("workspace_id") if not memory_id or not workspace_id: logger.warning( "Missing required payload fields: workspace_id=%s, memory_id=%s", - workspace_id, memory_id, + workspace_id, + memory_id, ) return @@ -139,7 +140,9 @@ async def handle(self, v: Variables, payload: dict) -> None: except Exception as e: logger.warning( "Failed to create PART_OF association from %s to %s: %s", - fact_id, memory_id, e, + fact_id, + memory_id, + e, ) # 5. Archive the parent memory @@ -151,7 +154,8 @@ async def handle(self, v: Variables, payload: dict) -> None: ) logger.info( "Decomposed memory %s into %d atomic facts and archived parent", - memory_id, len(created_fact_ids), + memory_id, + len(created_fact_ids), ) except Exception as e: logger.warning("Failed to archive parent memory %s: %s", memory_id, e) diff --git a/memorylayer-core-python/src/memorylayer_server/tasks/remember_working_memory_handler.py b/memorylayer-core-python/src/memorylayer_server/tasks/remember_working_memory_handler.py index f213ed5..1e5583a 100644 --- a/memorylayer-core-python/src/memorylayer_server/tasks/remember_working_memory_handler.py +++ b/memorylayer-core-python/src/memorylayer_server/tasks/remember_working_memory_handler.py @@ -4,14 +4,14 @@ entry via the standard remember pipeline. Working memories are stored with type=WORKING, which naturally skips fact decomposition. """ + from logging import Logger -from typing import Optional from scitrera_app_framework import Variables, get_logger -from ..services.tasks import TaskHandlerPlugin, TaskSchedule +from ..models import MemoryType, RememberInput from ..services._constants import EXT_MEMORY_SERVICE -from ..models import RememberInput, MemoryType +from ..services.tasks import TaskHandlerPlugin, TaskSchedule class RememberWorkingMemoryHandler(TaskHandlerPlugin): @@ -26,31 +26,33 @@ class RememberWorkingMemoryHandler(TaskHandlerPlugin): """ def get_task_type(self) -> str: - return 'remember_working_memory' + return "remember_working_memory" - def get_schedule(self, v: Variables) -> Optional[TaskSchedule]: + def get_schedule(self, v: Variables) -> TaskSchedule | None: # No recurring schedule - triggered on-demand after set_working_memory return None async def handle(self, v: Variables, payload: dict) -> None: logger: Logger = get_logger(v, name=self.get_task_type()) - workspace_id = payload.get('workspace_id') - session_id = payload.get('session_id') - key = payload.get('key') - content = payload.get('content') - context_id = payload.get('context_id') - importance = payload.get('importance', 0.5) + workspace_id = payload.get("workspace_id") + session_id = payload.get("session_id") + key = payload.get("key") + content = payload.get("content") + context_id = payload.get("context_id") + importance = payload.get("importance", 0.5) if not workspace_id or not content: logger.warning( "Missing required payload fields: workspace_id=%s, content=%s", - workspace_id, content, + workspace_id, + content, ) return # Resolve memory service from the framework from ..services.memory import MemoryService + memory_service: MemoryService = self.get_extension(EXT_MEMORY_SERVICE, v) # Build RememberInput @@ -64,16 +66,17 @@ async def handle(self, v: Variables, payload: dict) -> None: # Store memory via remember pipeline try: - memory = await memory_service.remember( - workspace_id=workspace_id, - input=remember_input - ) + memory = await memory_service.remember(workspace_id=workspace_id, input=remember_input) logger.info( "Persisted working memory entry as memory %s (session: %s, key: %s)", - memory.id, session_id, key, + memory.id, + session_id, + key, ) except Exception as e: logger.warning( "Failed to persist working memory entry (session: %s, key: %s): %s", - session_id, key, e, + session_id, + key, + e, ) diff --git a/memorylayer-core-python/src/memorylayer_server/tasks/semantic_tiering_task_handler.py b/memorylayer-core-python/src/memorylayer_server/tasks/semantic_tiering_task_handler.py index de44948..ac262cb 100644 --- a/memorylayer-core-python/src/memorylayer_server/tasks/semantic_tiering_task_handler.py +++ b/memorylayer-core-python/src/memorylayer_server/tasks/semantic_tiering_task_handler.py @@ -4,13 +4,13 @@ Background task handler for generating memory tiers (abstract, overview) via the TaskService infrastructure. """ + from logging import Logger -from typing import Optional -from scitrera_app_framework import get_logger, Variables +from scitrera_app_framework import Variables, get_logger +from ..services.semantic_tiering import EXT_SEMANTIC_TIERING_SERVICE, SemanticTieringService from ..services.tasks import TaskHandlerPlugin, TaskSchedule -from ..services.semantic_tiering import SemanticTieringService, EXT_SEMANTIC_TIERING_SERVICE class TierGenerationTaskHandler(TaskHandlerPlugin): @@ -22,14 +22,14 @@ class TierGenerationTaskHandler(TaskHandlerPlugin): """ def get_task_type(self) -> str: - return 'generate_tiers' + return "generate_tiers" - def get_schedule(self, v: Variables) -> Optional[TaskSchedule]: + def get_schedule(self, v: Variables) -> TaskSchedule | None: return None # On-demand only, not recurring async def handle(self, v: Variables, payload: dict) -> None: - memory_id = payload['memory_id'] - workspace_id = payload['workspace_id'] + memory_id = payload["memory_id"] + workspace_id = payload["workspace_id"] tier_service: SemanticTieringService = self.get_extension(EXT_SEMANTIC_TIERING_SERVICE, v) logger: Logger = get_logger(v, name=self.get_task_type()) diff --git a/memorylayer-core-python/src/memorylayer_server/tasks/session_extraction_handler.py b/memorylayer-core-python/src/memorylayer_server/tasks/session_extraction_handler.py index f9f37d5..d1a5f75 100644 --- a/memorylayer-core-python/src/memorylayer_server/tasks/session_extraction_handler.py +++ b/memorylayer-core-python/src/memorylayer_server/tasks/session_extraction_handler.py @@ -1,14 +1,14 @@ """Session extraction task handler for on-demand token-budget-triggered extraction.""" + import json from logging import Logger -from typing import Optional from scitrera_app_framework import get_logger from scitrera_app_framework.api import Variables -from ..services.tasks import TaskHandlerPlugin, TaskSchedule +from ..models import MemoryType, RememberInput from ..services._constants import EXT_MEMORY_SERVICE, EXT_SESSION_SERVICE -from ..models import RememberInput, MemoryType +from ..services.tasks import TaskHandlerPlugin, TaskSchedule class SessionExtractionTaskHandler(TaskHandlerPlugin): @@ -24,32 +24,33 @@ class SessionExtractionTaskHandler(TaskHandlerPlugin): """ def get_task_type(self) -> str: - return 'session_extraction' + return "session_extraction" - def get_schedule(self, v: Variables) -> Optional[TaskSchedule]: + def get_schedule(self, v: Variables) -> TaskSchedule | None: # No recurring schedule - triggered on-demand by token budget logic return None async def handle(self, v: Variables, payload: dict) -> None: logger: Logger = get_logger(v, name=self.get_task_type()) - workspace_id = payload.get('workspace_id') - session_id = payload.get('session_id') - context_id = payload.get('context_id') + workspace_id = payload.get("workspace_id") + session_id = payload.get("session_id") + context_id = payload.get("context_id") if not workspace_id or not session_id: logger.warning( "Missing required payload fields: workspace_id=%s, session_id=%s", - workspace_id, session_id, + workspace_id, + session_id, ) return - from ..services.session import SessionService - from ..services.memory import MemoryService from ..config import ( - MEMORYLAYER_SESSION_TOKEN_BUDGET_TOTAL, DEFAULT_MEMORYLAYER_SESSION_TOKEN_BUDGET_TOTAL, + MEMORYLAYER_SESSION_TOKEN_BUDGET_TOTAL, ) + from ..services.memory import MemoryService + from ..services.session import SessionService session_service: SessionService = self.get_extension(EXT_SESSION_SERVICE, v) memory_service: MemoryService = self.get_extension(EXT_MEMORY_SERVICE, v) @@ -66,7 +67,9 @@ async def handle(self, v: Variables, payload: dict) -> None: except Exception as e: logger.warning( "Failed to fetch working memory for session %s in workspace %s: %s", - session_id, workspace_id, e, + session_id, + workspace_id, + e, ) return @@ -76,7 +79,9 @@ async def handle(self, v: Variables, payload: dict) -> None: logger.info( "Extracting %d working memory entries for session %s (token budget: %d)", - len(working_memory_entries), session_id, token_budget, + len(working_memory_entries), + session_id, + token_budget, ) # Extract entries respecting the token budget @@ -90,7 +95,8 @@ async def handle(self, v: Variables, payload: dict) -> None: if tokens_used + entry_tokens > token_budget: logger.debug( "Token budget (%d) reached after %d entries, stopping extraction", - token_budget, extracted_count, + token_budget, + extracted_count, ) break @@ -99,9 +105,9 @@ async def handle(self, v: Variables, payload: dict) -> None: type=MemoryType.WORKING, importance=0.5, metadata={ - 'session_id': session_id, - 'working_memory_key': wm.key, - 'extraction_trigger': 'token_budget', + "session_id": session_id, + "working_memory_key": wm.key, + "extraction_trigger": "token_budget", }, context_id=context_id, ) @@ -115,15 +121,21 @@ async def handle(self, v: Variables, payload: dict) -> None: extracted_count += 1 logger.debug( "Extracted working memory key '%s' as memory %s", - wm.key, memory.id, + wm.key, + memory.id, ) except Exception as e: logger.warning( "Failed to extract working memory key '%s' for session %s: %s", - wm.key, session_id, e, + wm.key, + session_id, + e, ) logger.info( "Session extraction complete for %s: %d/%d entries extracted (%d tokens)", - session_id, extracted_count, len(working_memory_entries), tokens_used, + session_id, + extracted_count, + len(working_memory_entries), + tokens_used, ) diff --git a/memorylayer-core-python/src/memorylayer_server/tasks/session_touch_handler.py b/memorylayer-core-python/src/memorylayer_server/tasks/session_touch_handler.py index 27a64db..0d44a10 100644 --- a/memorylayer-core-python/src/memorylayer_server/tasks/session_touch_handler.py +++ b/memorylayer-core-python/src/memorylayer_server/tasks/session_touch_handler.py @@ -1,17 +1,16 @@ """Session touch task handler.""" -from typing import Optional from scitrera_app_framework import Variables -from ..services.tasks import TaskHandlerPlugin, TaskSchedule from ..services.session import EXT_SESSION_SERVICE, SessionService +from ..services.tasks import TaskHandlerPlugin, TaskSchedule SESSION_TOUCH_HANDLER_TASK = "session_touch" async def handle_session_touch( - session_service: SessionService, - session_id: str, + session_service: SessionService, + session_id: str, ) -> None: session = await session_service.get(session_id) logger = session_service.logger @@ -27,11 +26,10 @@ async def handle_session_touch( class SessionTouchHandler(TaskHandlerPlugin): - def get_task_type(self) -> str: return SESSION_TOUCH_HANDLER_TASK - def get_schedule(self, v: Variables) -> Optional[TaskSchedule]: + def get_schedule(self, v: Variables) -> TaskSchedule | None: return None async def handle(self, v: Variables, payload: dict) -> None: diff --git a/memorylayer-core-python/src/memorylayer_server/tasks/workspace_contradiction_scan_handler.py b/memorylayer-core-python/src/memorylayer_server/tasks/workspace_contradiction_scan_handler.py index 4197250..0532bf9 100644 --- a/memorylayer-core-python/src/memorylayer_server/tasks/workspace_contradiction_scan_handler.py +++ b/memorylayer-core-python/src/memorylayer_server/tasks/workspace_contradiction_scan_handler.py @@ -1,11 +1,11 @@ """Workspace contradiction scan task handler — daily scheduled scan.""" + from logging import Logger -from typing import Optional from scitrera_app_framework import get_logger from scitrera_app_framework.api import Variables -from ..services.contradiction import ContradictionService, EXT_CONTRADICTION_SERVICE +from ..services.contradiction import EXT_CONTRADICTION_SERVICE, ContradictionService from ..services.storage import EXT_STORAGE_BACKEND from ..services.storage.base import StorageBackend from ..services.tasks import TaskHandlerPlugin, TaskSchedule @@ -21,22 +21,20 @@ class WorkspaceContradictionScanHandler(TaskHandlerPlugin): """ def get_task_type(self) -> str: - return 'workspace_contradiction_scan' + return "workspace_contradiction_scan" - def get_schedule(self, v: Variables) -> Optional[TaskSchedule]: + def get_schedule(self, v: Variables) -> TaskSchedule | None: return TaskSchedule( interval_seconds=86400, # Once per day default_payload={}, ) async def handle(self, v: Variables, payload: dict) -> None: - contradiction_service: ContradictionService = self.get_extension( - EXT_CONTRADICTION_SERVICE, v - ) + contradiction_service: ContradictionService = self.get_extension(EXT_CONTRADICTION_SERVICE, v) storage: StorageBackend = self.get_extension(EXT_STORAGE_BACKEND, v) logger: Logger = get_logger(v, name=self.get_task_type()) - workspace_id = payload.get('workspace_id') + workspace_id = payload.get("workspace_id") if workspace_id: # Single-workspace scan (e.g., triggered on-demand with a specific workspace) @@ -44,7 +42,8 @@ async def handle(self, v: Variables, payload: dict) -> None: records = await contradiction_service.scan_workspace(workspace_id) logger.info( "Contradiction scan complete for workspace %s: %d new contradiction(s) found", - workspace_id, len(records), + workspace_id, + len(records), ) else: # Scan all workspaces @@ -58,16 +57,19 @@ async def handle(self, v: Variables, payload: dict) -> None: if records: logger.info( "Workspace %s: %d new contradiction(s) found", - workspace.id, len(records), + workspace.id, + len(records), ) else: logger.debug("Workspace %s: no new contradictions found", workspace.id) except Exception as exc: logger.error( "Contradiction scan failed for workspace %s: %s", - workspace.id, exc, + workspace.id, + exc, ) logger.info( "Contradiction scan complete: %d workspace(s) scanned, %d total contradiction(s) found", - len(workspaces), total_found, + len(workspaces), + total_found, ) diff --git a/memorylayer-core-python/src/memorylayer_server/utils/__init__.py b/memorylayer-core-python/src/memorylayer_server/utils/__init__.py index ae5c7fb..a5343d9 100644 --- a/memorylayer-core-python/src/memorylayer_server/utils/__init__.py +++ b/memorylayer-core-python/src/memorylayer_server/utils/__init__.py @@ -1,8 +1,8 @@ """Shared utilities for MemoryLayer services.""" +from .datetime import parse_datetime_utc, utc_now, utc_now_iso from .hashing import compute_content_hash from .id_generation import generate_id -from .datetime import utc_now, utc_now_iso, parse_datetime_utc from .vector_math import cosine_similarity, dot_product __all__ = [ diff --git a/memorylayer-core-python/src/memorylayer_server/utils/datetime.py b/memorylayer-core-python/src/memorylayer_server/utils/datetime.py index f15efca..36f8a9a 100644 --- a/memorylayer-core-python/src/memorylayer_server/utils/datetime.py +++ b/memorylayer-core-python/src/memorylayer_server/utils/datetime.py @@ -1,7 +1,6 @@ """Datetime utilities for consistent timestamp handling.""" -from datetime import datetime, timezone -from typing import Optional +from datetime import UTC, datetime def utc_now() -> datetime: @@ -10,7 +9,7 @@ def utc_now() -> datetime: Returns: Timezone-aware datetime in UTC """ - return datetime.now(timezone.utc) + return datetime.now(UTC) def utc_now_iso() -> str: @@ -19,10 +18,10 @@ def utc_now_iso() -> str: Returns: ISO 8601 formatted datetime string """ - return datetime.now(timezone.utc).isoformat() + return datetime.now(UTC).isoformat() -def parse_datetime_utc(dt_str: Optional[str]) -> Optional[datetime]: +def parse_datetime_utc(dt_str: str | None) -> datetime | None: """Parse datetime string and ensure it's timezone-aware (UTC). Args: @@ -35,5 +34,5 @@ def parse_datetime_utc(dt_str: Optional[str]) -> Optional[datetime]: return None dt = datetime.fromisoformat(dt_str) if dt.tzinfo is None: - dt = dt.replace(tzinfo=timezone.utc) + dt = dt.replace(tzinfo=UTC) return dt diff --git a/memorylayer-core-python/src/memorylayer_server/utils/vector_math.py b/memorylayer-core-python/src/memorylayer_server/utils/vector_math.py index 0da6d39..7020746 100644 --- a/memorylayer-core-python/src/memorylayer_server/utils/vector_math.py +++ b/memorylayer-core-python/src/memorylayer_server/utils/vector_math.py @@ -1,4 +1,5 @@ """Vector math utilities for embedding operations.""" + import numpy as np diff --git a/memorylayer-core-python/tests/conftest.py b/memorylayer-core-python/tests/conftest.py index 1323517..35519e6 100644 --- a/memorylayer-core-python/tests/conftest.py +++ b/memorylayer-core-python/tests/conftest.py @@ -9,28 +9,30 @@ async def test_something(memory_service): result = await memory_service.remember(...) """ + +import asyncio import logging +from datetime import UTC + import pytest import pytest_asyncio -import asyncio - from scitrera_app_framework import Variables, get_extension -from memorylayer_server.models.memory import RememberInput, MemoryType + from memorylayer_server.config import ( - MEMORYLAYER_EMBEDDING_PROVIDER, - MEMORYLAYER_STORAGE_BACKEND, - MEMORYLAYER_SQLITE_STORAGE_PATH, MEMORYLAYER_DATA_DIR, + MEMORYLAYER_EMBEDDING_PROVIDER, MEMORYLAYER_RERANKER_PROVIDER, + MEMORYLAYER_STORAGE_BACKEND, ) +from memorylayer_server.models.memory import MemoryType, RememberInput from memorylayer_server.services.llm.base import MEMORYLAYER_LLM_REGISTRY from memorylayer_server.services.tasks.asyncio_impl import MEMORYLAYER_TASKS_ENABLED - # ----------------------------------------------------------------------------- # Logging Configuration (initialized by test harness, not framework) # ----------------------------------------------------------------------------- + @pytest.fixture(scope="session") def test_logger() -> logging.Logger: """ @@ -46,10 +48,7 @@ def test_logger() -> logging.Logger: if not logger.handlers: handler = logging.StreamHandler() handler.setLevel(logging.DEBUG) - formatter = logging.Formatter( - '%(asctime)s %(levelname)s %(name)s %(funcName)s() > %(message)s', - datefmt='%Y/%m/%d %H:%M:%S' - ) + formatter = logging.Formatter("%(asctime)s %(levelname)s %(name)s %(funcName)s() > %(message)s", datefmt="%Y/%m/%d %H:%M:%S") handler.setFormatter(formatter) logger.addHandler(handler) @@ -60,6 +59,7 @@ def test_logger() -> logging.Logger: # Framework Initialization with Test Isolation # ----------------------------------------------------------------------------- + @pytest_asyncio.fixture(scope="session") async def test_configuration(): """ @@ -88,8 +88,7 @@ async def test_framework(test_configuration, tmp_path_factory, test_logger): Yields: tuple: (v: Variables, services: module) for use in tests """ - from scitrera_app_framework import Variables - from memorylayer_server.dependencies import preconfigure, initialize_services, shutdown_services + from memorylayer_server.dependencies import initialize_services, preconfigure, shutdown_services # Create session-scoped temp directory for database tmp_dir = tmp_path_factory.mktemp("memorylayer_test") @@ -126,10 +125,11 @@ def v(test_framework): # return services -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def fastapi_app(test_framework): """FastAPI app instance for tests.""" from memorylayer_server.lifecycle.fastapi import fastapi_app_factory + v, _ = test_framework app = fastapi_app_factory(v=v) return app @@ -139,6 +139,7 @@ def fastapi_app(test_framework): # Event Loop Configuration # ----------------------------------------------------------------------------- + @pytest.fixture(scope="session") def event_loop(): """Create event loop for async tests.""" @@ -152,10 +153,12 @@ def event_loop(): # These just call the DI system with the isolated Variables instance. # ----------------------------------------------------------------------------- + @pytest_asyncio.fixture async def memory_service(v): """Get the memory service.""" from memorylayer_server.services.memory import EXT_MEMORY_SERVICE + return get_extension(EXT_MEMORY_SERVICE, v) @@ -163,6 +166,7 @@ async def memory_service(v): async def association_service(v): """Get the association service.""" from memorylayer_server.services.association import EXT_ASSOCIATION_SERVICE + return get_extension(EXT_ASSOCIATION_SERVICE, v) @@ -170,6 +174,7 @@ async def association_service(v): async def storage_backend(v): """Get the storage backend.""" from memorylayer_server.services.storage import EXT_STORAGE_BACKEND + return get_extension(EXT_STORAGE_BACKEND, v) @@ -177,6 +182,7 @@ async def storage_backend(v): async def embedding_service(v): """Get the embedding service.""" from memorylayer_server.services.embedding import EXT_EMBEDDING_SERVICE + return get_extension(EXT_EMBEDDING_SERVICE, v) @@ -184,6 +190,7 @@ async def embedding_service(v): async def deduplication_service(v): """Get the deduplication service.""" from memorylayer_server.services.deduplication import EXT_DEDUPLICATION_SERVICE + return get_extension(EXT_DEDUPLICATION_SERVICE, v) @@ -191,6 +198,7 @@ async def deduplication_service(v): # Test Data Factories # ----------------------------------------------------------------------------- + @pytest.fixture def sample_remember_input() -> RememberInput: """Create sample remember input.""" @@ -199,15 +207,16 @@ def sample_remember_input() -> RememberInput: type=MemoryType.SEMANTIC, importance=0.8, tags=["preferences", "programming"], - metadata={"source": "conversation", "confidence": 0.95} + metadata={"source": "conversation", "confidence": 0.95}, ) @pytest_asyncio.fixture async def workspace_id(storage_backend) -> str: """Default test workspace ID with workspace and context created.""" + from datetime import datetime + from memorylayer_server.models.workspace import Workspace - from datetime import datetime, timezone workspace_id = "default" # TODO: do these align with new defaults?!?! @@ -218,8 +227,8 @@ async def workspace_id(storage_backend) -> str: id=workspace_id, tenant_id="default_tenant", name="Default Test Workspace", - created_at=datetime.now(timezone.utc), - updated_at=datetime.now(timezone.utc), + created_at=datetime.now(UTC), + updated_at=datetime.now(UTC), ) await storage_backend.create_workspace(workspace) @@ -230,10 +239,12 @@ async def workspace_id(storage_backend) -> str: # Test Isolation Helpers # ----------------------------------------------------------------------------- + @pytest.fixture def unique_workspace_id() -> str: """Generate a unique workspace ID for test isolation (function-scoped).""" import uuid + return f"test_{uuid.uuid4().hex[:8]}" @@ -245,6 +256,7 @@ def class_workspace_id(request) -> str: Use this for test classes that need isolated data across all their test methods. """ import uuid + class_name = request.cls.__name__ if request.cls else "unknown" return f"test_{class_name}_{uuid.uuid4().hex[:8]}" diff --git a/memorylayer-core-python/tests/integration/conftest.py b/memorylayer-core-python/tests/integration/conftest.py index 3255691..a01cd3d 100644 --- a/memorylayer-core-python/tests/integration/conftest.py +++ b/memorylayer-core-python/tests/integration/conftest.py @@ -5,7 +5,7 @@ a properly initialized FastAPI app using the test framework's Variables instance. """ -from typing import AsyncGenerator, Generator +from collections.abc import AsyncGenerator, Generator import pytest from fastapi import FastAPI diff --git a/memorylayer-core-python/tests/integration/test_health.py b/memorylayer-core-python/tests/integration/test_health.py index d754f8e..ccfbadf 100644 --- a/memorylayer-core-python/tests/integration/test_health.py +++ b/memorylayer-core-python/tests/integration/test_health.py @@ -1,6 +1,5 @@ """Integration tests for health check endpoints.""" -import pytest from fastapi.testclient import TestClient diff --git a/memorylayer-core-python/tests/integration/test_llm_extraction.py b/memorylayer-core-python/tests/integration/test_llm_extraction.py index 1b93d06..c823787 100644 --- a/memorylayer-core-python/tests/integration/test_llm_extraction.py +++ b/memorylayer-core-python/tests/integration/test_llm_extraction.py @@ -9,9 +9,10 @@ MEMORYLAYER_LLM_OPENAI_API_KEY=local MEMORYLAYER_LLM_OPENAI_MODEL=nemotron3-30b-a3b-2512 """ + import os + import pytest -import pytest_asyncio from memorylayer_server.services.extraction import ExtractionCategory from memorylayer_server.services.extraction.default import ( @@ -20,7 +21,6 @@ ) from memorylayer_server.services.llm.openai import OpenAILLMProvider - # Skip all tests in this module if LLM is not configured pytestmark = pytest.mark.llm diff --git a/memorylayer-core-python/tests/integration/test_llm_extraction_quality.py b/memorylayer-core-python/tests/integration/test_llm_extraction_quality.py index 6d93dc6..935113b 100644 --- a/memorylayer-core-python/tests/integration/test_llm_extraction_quality.py +++ b/memorylayer-core-python/tests/integration/test_llm_extraction_quality.py @@ -17,17 +17,17 @@ MEMORYLAYER_LLM_OPENAI_API_KEY=local MEMORYLAYER_LLM_OPENAI_MODEL=nemotron3-30b-a3b-2512 """ + import os + import pytest from memorylayer_server.services.extraction import ExtractionCategory from memorylayer_server.services.extraction.default import ( DefaultExtractionService, - ExtractionOptions, ) from memorylayer_server.services.llm.openai import OpenAILLMProvider - pytestmark = pytest.mark.llm_quality @@ -81,20 +81,16 @@ async def test_profile_vs_entity_distinction(self, extraction_service): result = await extraction_service._llm_extraction(context, categories) # Find memories about the user vs others - user_memories = [m for m in result if "senior python developer" in m.content.lower() - or "10 years" in m.content.lower()] - other_memories = [m for m in result if "john" in m.content.lower() - or "sarah" in m.content.lower()] + user_memories = [m for m in result if "senior python developer" in m.content.lower() or "10 years" in m.content.lower()] + other_memories = [m for m in result if "john" in m.content.lower() or "sarah" in m.content.lower()] # User info should be PROFILE for m in user_memories: - assert m.category == ExtractionCategory.PROFILE, \ - f"User info should be PROFILE, got {m.category}: {m.content}" + assert m.category == ExtractionCategory.PROFILE, f"User info should be PROFILE, got {m.category}: {m.content}" # Others should be ENTITIES for m in other_memories: - assert m.category == ExtractionCategory.ENTITIES, \ - f"Info about others should be ENTITIES, got {m.category}: {m.content}" + assert m.category == ExtractionCategory.ENTITIES, f"Info about others should be ENTITIES, got {m.category}: {m.content}" @pytest.mark.asyncio async def test_preference_vs_event_distinction(self, extraction_service): @@ -125,11 +121,9 @@ async def test_preference_vs_event_distinction(self, extraction_service): has_event_keyword = any(k in content_lower for k in event_keywords) if has_pref_keyword and not has_event_keyword: - assert m.category == ExtractionCategory.PREFERENCES, \ - f"Ongoing preference should be PREFERENCES: {m.content}" + assert m.category == ExtractionCategory.PREFERENCES, f"Ongoing preference should be PREFERENCES: {m.content}" elif has_event_keyword and "always" not in content_lower: - assert m.category == ExtractionCategory.EVENTS, \ - f"One-time event should be EVENTS: {m.content}" + assert m.category == ExtractionCategory.EVENTS, f"One-time event should be EVENTS: {m.content}" @pytest.mark.asyncio async def test_case_extraction_has_problem_and_solution(self, extraction_service): @@ -163,15 +157,11 @@ async def test_case_extraction_has_problem_and_solution(self, extraction_service # Combined, all cases should have both elements (LLMs may rephrase) all_content = " ".join(m.content.lower() for m in case_memories) - problem_indicators = ["timeout", "bug", "issue", "problem", "exhausted", - "error", "fail", "request", "api", "connection"] - solution_indicators = ["fix", "increase", "resolved", "solution", "cleanup", - "change", "set", "add", "20", "max_connections"] + problem_indicators = ["timeout", "bug", "issue", "problem", "exhausted", "error", "fail", "request", "api", "connection"] + solution_indicators = ["fix", "increase", "resolved", "solution", "cleanup", "change", "set", "add", "20", "max_connections"] - assert any(w in all_content for w in problem_indicators), \ - f"Cases should mention the problem. Content: {all_content}" - assert any(w in all_content for w in solution_indicators), \ - f"Cases should mention the solution. Content: {all_content}" + assert any(w in all_content for w in problem_indicators), f"Cases should mention the problem. Content: {all_content}" + assert any(w in all_content for w in solution_indicators), f"Cases should mention the solution. Content: {all_content}" class TestImportanceCalibration: @@ -196,8 +186,7 @@ async def test_high_importance_for_critical_info(self, extraction_service): assert len(cto_memories) > 0, "Should extract CTO information" for m in cto_memories: - assert m.importance >= 0.7, \ - f"CTO role should be high importance (>=0.7), got {m.importance}: {m.content}" + assert m.importance >= 0.7, f"CTO role should be high importance (>=0.7), got {m.importance}: {m.content}" @pytest.mark.asyncio async def test_lower_importance_for_minor_preferences(self, extraction_service): @@ -218,17 +207,16 @@ async def test_lower_importance_for_minor_preferences(self, extraction_service): minor_keywords = ["tabs", "spaces", "dark mode", "ide"] major_keywords = ["payment", "rewrite", "authentication", "oauth", "migrate"] - minor_memories = [m for m in result - if any(k in m.content.lower() for k in minor_keywords)] - major_memories = [m for m in result - if any(k in m.content.lower() for k in major_keywords)] + minor_memories = [m for m in result if any(k in m.content.lower() for k in minor_keywords)] + major_memories = [m for m in result if any(k in m.content.lower() for k in major_keywords)] if minor_memories and major_memories: avg_minor = sum(m.importance for m in minor_memories) / len(minor_memories) avg_major = sum(m.importance for m in major_memories) / len(major_memories) - assert avg_major > avg_minor, \ + assert avg_major > avg_minor, ( f"Major decisions ({avg_major:.2f}) should have higher importance than minor preferences ({avg_minor:.2f})" + ) class TestNoiseRejection: @@ -254,12 +242,11 @@ async def test_filters_greetings_and_chatter(self, extraction_service): # Should not extract greetings/thank yous as memories for m in result: content_lower = m.content.lower() - assert "hello" not in content_lower or "python" in content_lower, \ - f"Should not extract bare greetings: {m.content}" - assert "thank" not in content_lower or any(k in content_lower for k in ["python", "pytorch", "fastapi"]), \ + assert "hello" not in content_lower or "python" in content_lower, f"Should not extract bare greetings: {m.content}" + assert "thank" not in content_lower or any(k in content_lower for k in ["python", "pytorch", "fastapi"]), ( f"Should not extract thank yous: {m.content}" - assert "you're welcome" not in content_lower, \ - f"Should not extract pleasantries: {m.content}" + ) + assert "you're welcome" not in content_lower, f"Should not extract pleasantries: {m.content}" @pytest.mark.asyncio async def test_extracts_technical_content_from_noisy_conversation(self, extraction_service): @@ -280,10 +267,8 @@ async def test_extracts_technical_content_from_noisy_conversation(self, extracti # Should extract the actual technical content all_content = " ".join(m.content.lower() for m in result) - assert "redis" in all_content or "cache" in all_content, \ - "Should extract Redis/cache information from noisy text" - assert "invalidation" in all_content or "stale" in all_content, \ - "Should extract the problem/solution despite noise" + assert "redis" in all_content or "cache" in all_content, "Should extract Redis/cache information from noisy text" + assert "invalidation" in all_content or "stale" in all_content, "Should extract the problem/solution despite noise" class TestContentQuality: @@ -307,16 +292,16 @@ async def test_memories_are_standalone(self, extraction_service): for m in result: # Memory should not start with dangling references first_word = m.content.split()[0].lower() if m.content.split() else "" - assert first_word not in ["it", "this", "that", "these", "those"], \ + assert first_word not in ["it", "this", "that", "these", "those"], ( f"Memory should not start with dangling reference: {m.content}" + ) # Memory should contain the subject, not just pronouns content_lower = m.content.lower() - has_subject = any(k in content_lower for k in - ["recommendation", "engine", "filtering", "e-commerce", - "platform", "hybrid", "approach"]) - assert has_subject, \ - f"Memory should contain concrete subject, not just pronouns: {m.content}" + has_subject = any( + k in content_lower for k in ["recommendation", "engine", "filtering", "e-commerce", "platform", "hybrid", "approach"] + ) + assert has_subject, f"Memory should contain concrete subject, not just pronouns: {m.content}" @pytest.mark.asyncio async def test_memories_are_concise(self, extraction_service): @@ -338,8 +323,7 @@ async def test_memories_are_concise(self, extraction_service): for m in result: # Memories should generally be under 300 characters for conciseness # (not a hard rule, but most should be) - assert len(m.content) < 500, \ - f"Memory might be too verbose ({len(m.content)} chars): {m.content[:100]}..." + assert len(m.content) < 500, f"Memory might be too verbose ({len(m.content)} chars): {m.content[:100]}..." class TestCompleteness: @@ -369,8 +353,7 @@ async def test_captures_all_key_entities(self, extraction_service): captured = [t for t in key_techs if t in all_content] # Should capture at least most of them (allow for some summarization) - assert len(captured) >= 4, \ - f"Should capture most key technologies. Captured: {captured}, Missing: {set(key_techs) - set(captured)}" + assert len(captured) >= 4, f"Should capture most key technologies. Captured: {captured}, Missing: {set(key_techs) - set(captured)}" @pytest.mark.asyncio async def test_captures_both_problem_and_solution_in_case(self, extraction_service): @@ -390,10 +373,10 @@ async def test_captures_both_problem_and_solution_in_case(self, extraction_servi all_content = " ".join(m.content.lower() for m in result) # Should capture both problem and solution elements - assert any(w in all_content for w in ["oom", "out of memory", "512mb", "heap"]), \ - "Should capture the problem (OOM/memory issue)" - assert any(w in all_content for w in ["2gb", "g1gc", "increased", "gc tuning"]), \ + assert any(w in all_content for w in ["oom", "out of memory", "512mb", "heap"]), "Should capture the problem (OOM/memory issue)" + assert any(w in all_content for w in ["2gb", "g1gc", "increased", "gc tuning"]), ( "Should capture the solution (heap increase, GC tuning)" + ) class TestModelEvaluationSummary: @@ -449,7 +432,7 @@ async def test_overall_extraction_quality(self, extraction_service): "user_correctly_profiled": False, "others_are_entities": False, "no_noise_extracted": True, - } + }, } for cat in ExtractionCategory: @@ -474,7 +457,7 @@ async def test_overall_extraction_quality(self, extraction_service): stats["avg_importance"] = sum(m.importance for m in result) / len(result) # Check specific quality items - all_content = " ".join(m.content.lower() for m in result) + " ".join(m.content.lower() for m in result) # User should be correctly profiled profile_memories = [m for m in result if m.category == ExtractionCategory.PROFILE] @@ -515,7 +498,7 @@ async def test_overall_extraction_quality(self, extraction_service): print(f" {status}: {check}") if result_val: passed += 1 - print(f"\nOverall: {passed}/{total} checks passed ({100*passed/total:.0f}%)") + print(f"\nOverall: {passed}/{total} checks passed ({100 * passed / total:.0f}%)") print("=" * 60 + "\n") # Assert minimum quality bar diff --git a/memorylayer-core-python/tests/integration/test_memories_api.py b/memorylayer-core-python/tests/integration/test_memories_api.py index 943205f..3921619 100644 --- a/memorylayer-core-python/tests/integration/test_memories_api.py +++ b/memorylayer-core-python/tests/integration/test_memories_api.py @@ -327,19 +327,15 @@ def test_batch_operations(self, test_client: TestClient, workspace_headers: dict json={ "operations": [ { - "type": "create", - "data": { - "content": "First batch memory", - "importance": 0.5, - } + "op": "create", + "content": "First batch memory", + "importance": 0.5, }, { - "type": "create", - "data": { - "content": "Second batch memory", - "importance": 0.6, - } - } + "op": "create", + "content": "Second batch memory", + "importance": 0.6, + }, ] }, headers=workspace_headers, @@ -369,26 +365,20 @@ def test_batch_operations_mixed_types(self, test_client: TestClient, workspace_h json={ "operations": [ { - "type": "create", - "data": { - "content": "New batch memory", - } + "op": "create", + "content": "New batch memory", }, { - "type": "update", - "data": { - "memory_id": memory_id, - "content": "Updated content", - "importance": 0.9, - } + "op": "update", + "memory_id": memory_id, + "content": "Updated content", + "importance": 0.9, }, { - "type": "delete", - "data": { - "memory_id": memory_id, - "hard": False, - } - } + "op": "delete", + "memory_id": memory_id, + "hard": False, + }, ] }, headers=workspace_headers, @@ -404,9 +394,7 @@ def test_batch_operations_empty(self, test_client: TestClient, workspace_headers """Test batch with no operations.""" response = test_client.post( "/v1/memories/batch", - json={ - "operations": [] - }, + json={"operations": []}, headers=workspace_headers, ) @@ -548,8 +536,8 @@ def test_deduplication_same_content(self, test_client: TestClient, workspace_hea assert response2.status_code == 201 # If implemented, should return same ID - memory1_id = response1.json()["memory"]["id"] - memory2_id = response2.json()["memory"]["id"] + response1.json()["memory"]["id"] + response2.json()["memory"]["id"] # Note: Deduplication may or may not be implemented yet # Just verify both calls work @@ -752,7 +740,7 @@ def test_reflect_include_sources_true(self, test_client: TestClient, workspace_h }, headers=workspace_headers, ) - memory_id = create_response.json()["memory"]["id"] + create_response.json()["memory"]["id"] response = test_client.post( "/v1/memories/reflect", @@ -997,9 +985,7 @@ def test_create_association_with_metadata(self, test_client: TestClient, workspa assert data["association"]["metadata"]["priority"] == "high" assert data["association"]["metadata"]["team"] == "backend" - def test_create_association_all_relationships( - self, test_client: TestClient, workspace_headers: dict[str, str] - ) -> None: + def test_create_association_all_relationships(self, test_client: TestClient, workspace_headers: dict[str, str]) -> None: """Test creating associations with all relationship types.""" # Create base memory base_response = test_client.post( @@ -1012,13 +998,32 @@ def test_create_association_all_relationships( # Test various relationship types relationships = [ - "causes", "triggers", "leads_to", "prevents", - "solves", "addresses", "alternative_to", "improves", - "occurs_in", "applies_to", "works_with", "requires", - "builds_on", "contradicts", "confirms", "supersedes", - "similar_to", "variant_of", "related_to", - "follows", "depends_on", "enables", "blocks", - "effective_for", "preferred_over", "deprecated_by", + "causes", + "triggers", + "leads_to", + "prevents", + "solves", + "addresses", + "alternative_to", + "improves", + "occurs_in", + "applies_to", + "works_with", + "requires", + "builds_on", + "contradicts", + "confirms", + "supersedes", + "similar_to", + "variant_of", + "related_to", + "follows", + "depends_on", + "enables", + "blocks", + "effective_for", + "preferred_over", + "deprecated_by", ] for relationship in relationships: @@ -1096,9 +1101,7 @@ def test_list_associations(self, test_client: TestClient, workspace_headers: dic assert data["total_count"] >= 2 assert len(data["associations"]) >= 2 - def test_list_associations_with_relationship_filter( - self, test_client: TestClient, workspace_headers: dict[str, str] - ) -> None: + def test_list_associations_with_relationship_filter(self, test_client: TestClient, workspace_headers: dict[str, str]) -> None: """Test listing associations filtered by relationship types.""" # Create memories response1 = test_client.post( diff --git a/memorylayer-core-python/tests/integration/test_sdk_integration.py b/memorylayer-core-python/tests/integration/test_sdk_integration.py index e6e4ba2..92f6c66 100644 --- a/memorylayer-core-python/tests/integration/test_sdk_integration.py +++ b/memorylayer-core-python/tests/integration/test_sdk_integration.py @@ -3,7 +3,6 @@ These tests verify the SDK client works correctly against the actual server API. """ -import pytest from fastapi.testclient import TestClient @@ -265,6 +264,7 @@ def test_sdk_create_session(self, test_client: TestClient, workspace_headers: di def test_sdk_session_context(self, test_client: TestClient, workspace_headers: dict[str, str]) -> None: """Test setting session working memory (v2: /memory endpoint).""" import uuid + session_id = f"test_session_ctx_{uuid.uuid4().hex[:8]}" # Create session first diff --git a/memorylayer-core-python/tests/unit/test_association_service.py b/memorylayer-core-python/tests/unit/test_association_service.py index cffdfff..4264c32 100644 --- a/memorylayer-core-python/tests/unit/test_association_service.py +++ b/memorylayer-core-python/tests/unit/test_association_service.py @@ -1,9 +1,11 @@ """Unit tests for AssociationService.""" + import pytest -from memorylayer_server.models.memory import RememberInput, MemoryType + from memorylayer_server.models.association import AssociateInput, GraphQueryInput -from memorylayer_server.services.memory import MemoryService +from memorylayer_server.models.memory import RememberInput from memorylayer_server.services.association import AssociationService +from memorylayer_server.services.memory import MemoryService class TestAssociate: @@ -11,31 +13,19 @@ class TestAssociate: @pytest.mark.asyncio async def test_create_association( - self, - memory_service: MemoryService, - association_service: AssociationService, - workspace_id: str, + self, + memory_service: MemoryService, + association_service: AssociationService, + workspace_id: str, ): """Test creating an association between memories.""" # Create two memories - mem1 = await memory_service.remember( - workspace_id, - RememberInput(content="Problem: Database connection timeout") - ) - mem2 = await memory_service.remember( - workspace_id, - RememberInput(content="Solution: Increase connection pool size") - ) + mem1 = await memory_service.remember(workspace_id, RememberInput(content="Problem: Database connection timeout")) + mem2 = await memory_service.remember(workspace_id, RememberInput(content="Solution: Increase connection pool size")) # Create association assoc = await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem2.id, - target_id=mem1.id, - relationship="solves", - strength=0.9 - ) + workspace_id, AssociateInput(source_id=mem2.id, target_id=mem1.id, relationship="solves", strength=0.9) ) assert assoc.id is not None @@ -45,33 +35,20 @@ async def test_create_association( @pytest.mark.asyncio async def test_get_related_memories( - self, - memory_service: MemoryService, - association_service: AssociationService, - workspace_id: str, + self, + memory_service: MemoryService, + association_service: AssociationService, + workspace_id: str, ): """Test retrieving related memories.""" # Create memories and association - mem1 = await memory_service.remember( - workspace_id, RememberInput(content="First memory") - ) - mem2 = await memory_service.remember( - workspace_id, RememberInput(content="Related memory") - ) + mem1 = await memory_service.remember(workspace_id, RememberInput(content="First memory")) + mem2 = await memory_service.remember(workspace_id, RememberInput(content="Related memory")) - await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem1.id, - target_id=mem2.id, - relationship="related_to" - ) - ) + await association_service.associate(workspace_id, AssociateInput(source_id=mem1.id, target_id=mem2.id, relationship="related_to")) # Get related - associations = await association_service.get_related( - workspace_id, mem1.id - ) + associations = await association_service.get_related(workspace_id, mem1.id) assert len(associations) >= 1 @@ -81,458 +58,252 @@ class TestRelationshipTypes: @pytest.mark.asyncio async def test_causal_relationships( - self, - memory_service: MemoryService, - association_service: AssociationService, - workspace_id: str, + self, + memory_service: MemoryService, + association_service: AssociationService, + workspace_id: str, ): """Test CAUSAL category: CAUSES, TRIGGERS, LEADS_TO, PREVENTS.""" # Create memories for causal chain - mem_error = await memory_service.remember( - workspace_id, RememberInput(content="System error occurred") - ) - mem_config = await memory_service.remember( - workspace_id, RememberInput(content="Invalid configuration") - ) - mem_timeout = await memory_service.remember( - workspace_id, RememberInput(content="Connection timeout") - ) - mem_solution = await memory_service.remember( - workspace_id, RememberInput(content="Retry mechanism") - ) + mem_error = await memory_service.remember(workspace_id, RememberInput(content="System error occurred")) + mem_config = await memory_service.remember(workspace_id, RememberInput(content="Invalid configuration")) + mem_timeout = await memory_service.remember(workspace_id, RememberInput(content="Connection timeout")) + mem_solution = await memory_service.remember(workspace_id, RememberInput(content="Retry mechanism")) # Test CAUSES assoc_causes = await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_config.id, - target_id=mem_error.id, - relationship="causes", - strength=0.9 - ) + workspace_id, AssociateInput(source_id=mem_config.id, target_id=mem_error.id, relationship="causes", strength=0.9) ) assert assoc_causes.relationship == "causes" # Test TRIGGERS assoc_triggers = await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_error.id, - target_id=mem_timeout.id, - relationship="triggers", - strength=0.8 - ) + workspace_id, AssociateInput(source_id=mem_error.id, target_id=mem_timeout.id, relationship="triggers", strength=0.8) ) assert assoc_triggers.relationship == "triggers" # Test LEADS_TO assoc_leads = await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_timeout.id, - target_id=mem_error.id, - relationship="leads_to", - strength=0.7 - ) + workspace_id, AssociateInput(source_id=mem_timeout.id, target_id=mem_error.id, relationship="leads_to", strength=0.7) ) assert assoc_leads.relationship == "leads_to" # Test PREVENTS assoc_prevents = await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_solution.id, - target_id=mem_timeout.id, - relationship="prevents", - strength=0.85 - ) + workspace_id, AssociateInput(source_id=mem_solution.id, target_id=mem_timeout.id, relationship="prevents", strength=0.85) ) assert assoc_prevents.relationship == "prevents" @pytest.mark.asyncio async def test_solution_relationships( - self, - memory_service: MemoryService, - association_service: AssociationService, - workspace_id: str, + self, + memory_service: MemoryService, + association_service: AssociationService, + workspace_id: str, ): """Test SOLUTION category: SOLVES, ADDRESSES, ALTERNATIVE_TO, IMPROVES.""" - mem_problem = await memory_service.remember( - workspace_id, RememberInput(content="Slow database queries") - ) - mem_solution1 = await memory_service.remember( - workspace_id, RememberInput(content="Add index to table") - ) - mem_solution2 = await memory_service.remember( - workspace_id, RememberInput(content="Use query caching") - ) - mem_improvement = await memory_service.remember( - workspace_id, RememberInput(content="Optimized index strategy") - ) + mem_problem = await memory_service.remember(workspace_id, RememberInput(content="Slow database queries")) + mem_solution1 = await memory_service.remember(workspace_id, RememberInput(content="Add index to table")) + mem_solution2 = await memory_service.remember(workspace_id, RememberInput(content="Use query caching")) + mem_improvement = await memory_service.remember(workspace_id, RememberInput(content="Optimized index strategy")) # Test SOLVES assoc_solves = await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_solution1.id, - target_id=mem_problem.id, - relationship="solves", - strength=0.95 - ) + workspace_id, AssociateInput(source_id=mem_solution1.id, target_id=mem_problem.id, relationship="solves", strength=0.95) ) assert assoc_solves.relationship == "solves" # Test ADDRESSES assoc_addresses = await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_solution2.id, - target_id=mem_problem.id, - relationship="addresses", - strength=0.85 - ) + workspace_id, AssociateInput(source_id=mem_solution2.id, target_id=mem_problem.id, relationship="addresses", strength=0.85) ) assert assoc_addresses.relationship == "addresses" # Test ALTERNATIVE_TO assoc_alt = await association_service.associate( workspace_id, - AssociateInput( - source_id=mem_solution2.id, - target_id=mem_solution1.id, - relationship="alternative_to", - strength=0.8 - ) + AssociateInput(source_id=mem_solution2.id, target_id=mem_solution1.id, relationship="alternative_to", strength=0.8), ) assert assoc_alt.relationship == "alternative_to" # Test IMPROVES assoc_improves = await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_improvement.id, - target_id=mem_solution1.id, - relationship="improves", - strength=0.9 - ) + workspace_id, AssociateInput(source_id=mem_improvement.id, target_id=mem_solution1.id, relationship="improves", strength=0.9) ) assert assoc_improves.relationship == "improves" @pytest.mark.asyncio async def test_context_relationships( - self, - memory_service: MemoryService, - association_service: AssociationService, - workspace_id: str, + self, + memory_service: MemoryService, + association_service: AssociationService, + workspace_id: str, ): """Test CONTEXT category: OCCURS_IN, APPLIES_TO, WORKS_WITH, REQUIRES.""" - mem_bug = await memory_service.remember( - workspace_id, RememberInput(content="Race condition in auth") - ) - mem_context = await memory_service.remember( - workspace_id, RememberInput(content="Production environment") - ) - mem_tool = await memory_service.remember( - workspace_id, RememberInput(content="Thread debugger") - ) - mem_dependency = await memory_service.remember( - workspace_id, RememberInput(content="Thread-safe library") - ) + mem_bug = await memory_service.remember(workspace_id, RememberInput(content="Race condition in auth")) + mem_context = await memory_service.remember(workspace_id, RememberInput(content="Production environment")) + mem_tool = await memory_service.remember(workspace_id, RememberInput(content="Thread debugger")) + mem_dependency = await memory_service.remember(workspace_id, RememberInput(content="Thread-safe library")) # Test OCCURS_IN assoc_occurs = await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_bug.id, - target_id=mem_context.id, - relationship="occurs_in", - strength=0.9 - ) + workspace_id, AssociateInput(source_id=mem_bug.id, target_id=mem_context.id, relationship="occurs_in", strength=0.9) ) assert assoc_occurs.relationship == "occurs_in" # Test APPLIES_TO assoc_applies = await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_tool.id, - target_id=mem_bug.id, - relationship="applies_to", - strength=0.85 - ) + workspace_id, AssociateInput(source_id=mem_tool.id, target_id=mem_bug.id, relationship="applies_to", strength=0.85) ) assert assoc_applies.relationship == "applies_to" # Test WORKS_WITH assoc_works = await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_tool.id, - target_id=mem_dependency.id, - relationship="works_with", - strength=0.8 - ) + workspace_id, AssociateInput(source_id=mem_tool.id, target_id=mem_dependency.id, relationship="works_with", strength=0.8) ) assert assoc_works.relationship == "works_with" # Test REQUIRES assoc_requires = await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_bug.id, - target_id=mem_dependency.id, - relationship="requires", - strength=0.95 - ) + workspace_id, AssociateInput(source_id=mem_bug.id, target_id=mem_dependency.id, relationship="requires", strength=0.95) ) assert assoc_requires.relationship == "requires" @pytest.mark.asyncio async def test_learning_relationships( - self, - memory_service: MemoryService, - association_service: AssociationService, - workspace_id: str, + self, + memory_service: MemoryService, + association_service: AssociationService, + workspace_id: str, ): """Test LEARNING category: BUILDS_ON, CONTRADICTS, CONFIRMS, SUPERSEDES.""" - mem_old_theory = await memory_service.remember( - workspace_id, RememberInput(content="Old optimization approach") - ) - mem_new_theory = await memory_service.remember( - workspace_id, RememberInput(content="Improved optimization approach") - ) - mem_evidence = await memory_service.remember( - workspace_id, RememberInput(content="Benchmark results confirm improvement") - ) - mem_contradiction = await memory_service.remember( - workspace_id, RememberInput(content="Single-threaded is faster") - ) + mem_old_theory = await memory_service.remember(workspace_id, RememberInput(content="Old optimization approach")) + mem_new_theory = await memory_service.remember(workspace_id, RememberInput(content="Improved optimization approach")) + mem_evidence = await memory_service.remember(workspace_id, RememberInput(content="Benchmark results confirm improvement")) + mem_contradiction = await memory_service.remember(workspace_id, RememberInput(content="Single-threaded is faster")) # Test BUILDS_ON assoc_builds = await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_new_theory.id, - target_id=mem_old_theory.id, - relationship="builds_on", - strength=0.9 - ) + workspace_id, AssociateInput(source_id=mem_new_theory.id, target_id=mem_old_theory.id, relationship="builds_on", strength=0.9) ) assert assoc_builds.relationship == "builds_on" # Test CONTRADICTS assoc_contradicts = await association_service.associate( workspace_id, - AssociateInput( - source_id=mem_contradiction.id, - target_id=mem_new_theory.id, - relationship="contradicts", - strength=0.85 - ) + AssociateInput(source_id=mem_contradiction.id, target_id=mem_new_theory.id, relationship="contradicts", strength=0.85), ) assert assoc_contradicts.relationship == "contradicts" # Test CONFIRMS assoc_confirms = await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_evidence.id, - target_id=mem_new_theory.id, - relationship="confirms", - strength=0.95 - ) + workspace_id, AssociateInput(source_id=mem_evidence.id, target_id=mem_new_theory.id, relationship="confirms", strength=0.95) ) assert assoc_confirms.relationship == "confirms" # Test SUPERSEDES assoc_supersedes = await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_new_theory.id, - target_id=mem_old_theory.id, - relationship="supersedes", - strength=0.9 - ) + workspace_id, AssociateInput(source_id=mem_new_theory.id, target_id=mem_old_theory.id, relationship="supersedes", strength=0.9) ) assert assoc_supersedes.relationship == "supersedes" @pytest.mark.asyncio async def test_similarity_relationships( - self, - memory_service: MemoryService, - association_service: AssociationService, - workspace_id: str, + self, + memory_service: MemoryService, + association_service: AssociationService, + workspace_id: str, ): """Test SIMILARITY category: SIMILAR_TO, VARIANT_OF, RELATED_TO.""" - mem_concept1 = await memory_service.remember( - workspace_id, RememberInput(content="REST API design") - ) - mem_concept2 = await memory_service.remember( - workspace_id, RememberInput(content="GraphQL API design") - ) - mem_variant = await memory_service.remember( - workspace_id, RememberInput(content="REST API with HATEOAS") - ) + mem_concept1 = await memory_service.remember(workspace_id, RememberInput(content="REST API design")) + mem_concept2 = await memory_service.remember(workspace_id, RememberInput(content="GraphQL API design")) + mem_variant = await memory_service.remember(workspace_id, RememberInput(content="REST API with HATEOAS")) # Test SIMILAR_TO assoc_similar = await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_concept1.id, - target_id=mem_concept2.id, - relationship="similar_to", - strength=0.75 - ) + workspace_id, AssociateInput(source_id=mem_concept1.id, target_id=mem_concept2.id, relationship="similar_to", strength=0.75) ) assert assoc_similar.relationship == "similar_to" # Test VARIANT_OF assoc_variant = await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_variant.id, - target_id=mem_concept1.id, - relationship="variant_of", - strength=0.9 - ) + workspace_id, AssociateInput(source_id=mem_variant.id, target_id=mem_concept1.id, relationship="variant_of", strength=0.9) ) assert assoc_variant.relationship == "variant_of" # Test RELATED_TO assoc_related = await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_concept2.id, - target_id=mem_concept1.id, - relationship="related_to", - strength=0.8 - ) + workspace_id, AssociateInput(source_id=mem_concept2.id, target_id=mem_concept1.id, relationship="related_to", strength=0.8) ) assert assoc_related.relationship == "related_to" @pytest.mark.asyncio async def test_workflow_relationships( - self, - memory_service: MemoryService, - association_service: AssociationService, - workspace_id: str, + self, + memory_service: MemoryService, + association_service: AssociationService, + workspace_id: str, ): """Test WORKFLOW category: FOLLOWS, DEPENDS_ON, ENABLES, BLOCKS.""" - mem_step1 = await memory_service.remember( - workspace_id, RememberInput(content="Authenticate user") - ) - mem_step2 = await memory_service.remember( - workspace_id, RememberInput(content="Load user data") - ) - mem_requirement = await memory_service.remember( - workspace_id, RememberInput(content="Valid session token") - ) - mem_blocker = await memory_service.remember( - workspace_id, RememberInput(content="Rate limit exceeded") - ) + mem_step1 = await memory_service.remember(workspace_id, RememberInput(content="Authenticate user")) + mem_step2 = await memory_service.remember(workspace_id, RememberInput(content="Load user data")) + mem_requirement = await memory_service.remember(workspace_id, RememberInput(content="Valid session token")) + mem_blocker = await memory_service.remember(workspace_id, RememberInput(content="Rate limit exceeded")) # Test FOLLOWS assoc_follows = await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_step2.id, - target_id=mem_step1.id, - relationship="follows", - strength=0.95 - ) + workspace_id, AssociateInput(source_id=mem_step2.id, target_id=mem_step1.id, relationship="follows", strength=0.95) ) assert assoc_follows.relationship == "follows" # Test DEPENDS_ON assoc_depends = await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_step2.id, - target_id=mem_requirement.id, - relationship="depends_on", - strength=1.0 - ) + workspace_id, AssociateInput(source_id=mem_step2.id, target_id=mem_requirement.id, relationship="depends_on", strength=1.0) ) assert assoc_depends.relationship == "depends_on" # Test ENABLES assoc_enables = await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_step1.id, - target_id=mem_step2.id, - relationship="enables", - strength=0.9 - ) + workspace_id, AssociateInput(source_id=mem_step1.id, target_id=mem_step2.id, relationship="enables", strength=0.9) ) assert assoc_enables.relationship == "enables" # Test BLOCKS assoc_blocks = await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_blocker.id, - target_id=mem_step1.id, - relationship="blocks", - strength=0.95 - ) + workspace_id, AssociateInput(source_id=mem_blocker.id, target_id=mem_step1.id, relationship="blocks", strength=0.95) ) assert assoc_blocks.relationship == "blocks" @pytest.mark.asyncio async def test_quality_relationships( - self, - memory_service: MemoryService, - association_service: AssociationService, - workspace_id: str, + self, + memory_service: MemoryService, + association_service: AssociationService, + workspace_id: str, ): """Test QUALITY category: EFFECTIVE_FOR, PREFERRED_OVER, DEPRECATED_BY.""" - mem_approach1 = await memory_service.remember( - workspace_id, RememberInput(content="Synchronous processing") - ) - mem_approach2 = await memory_service.remember( - workspace_id, RememberInput(content="Async processing") - ) - mem_usecase = await memory_service.remember( - workspace_id, RememberInput(content="High-throughput API") - ) - mem_new_approach = await memory_service.remember( - workspace_id, RememberInput(content="Reactive streams") - ) + mem_approach1 = await memory_service.remember(workspace_id, RememberInput(content="Synchronous processing")) + mem_approach2 = await memory_service.remember(workspace_id, RememberInput(content="Async processing")) + mem_usecase = await memory_service.remember(workspace_id, RememberInput(content="High-throughput API")) + mem_new_approach = await memory_service.remember(workspace_id, RememberInput(content="Reactive streams")) # Test EFFECTIVE_FOR assoc_effective = await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_approach2.id, - target_id=mem_usecase.id, - relationship="effective_for", - strength=0.9 - ) + workspace_id, AssociateInput(source_id=mem_approach2.id, target_id=mem_usecase.id, relationship="effective_for", strength=0.9) ) assert assoc_effective.relationship == "effective_for" # Test PREFERRED_OVER assoc_preferred = await association_service.associate( workspace_id, - AssociateInput( - source_id=mem_approach2.id, - target_id=mem_approach1.id, - relationship="preferred_over", - strength=0.85 - ) + AssociateInput(source_id=mem_approach2.id, target_id=mem_approach1.id, relationship="preferred_over", strength=0.85), ) assert assoc_preferred.relationship == "preferred_over" # Test DEPRECATED_BY assoc_deprecated = await association_service.associate( workspace_id, - AssociateInput( - source_id=mem_approach2.id, - target_id=mem_new_approach.id, - relationship="deprecated_by", - strength=0.8 - ) + AssociateInput(source_id=mem_approach2.id, target_id=mem_new_approach.id, relationship="deprecated_by", strength=0.8), ) assert assoc_deprecated.relationship == "deprecated_by" @@ -542,48 +313,23 @@ class TestGraphTraversal: @pytest.mark.asyncio async def test_multi_hop_traversal( - self, - memory_service: MemoryService, - association_service: AssociationService, - workspace_id: str, + self, + memory_service: MemoryService, + association_service: AssociationService, + workspace_id: str, ): """Test multi-hop graph traversal.""" # Create chain: A -> B -> C - mem_a = await memory_service.remember( - workspace_id, RememberInput(content="Memory A") - ) - mem_b = await memory_service.remember( - workspace_id, RememberInput(content="Memory B") - ) - mem_c = await memory_service.remember( - workspace_id, RememberInput(content="Memory C") - ) + mem_a = await memory_service.remember(workspace_id, RememberInput(content="Memory A")) + mem_b = await memory_service.remember(workspace_id, RememberInput(content="Memory B")) + mem_c = await memory_service.remember(workspace_id, RememberInput(content="Memory C")) - await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_a.id, - target_id=mem_b.id, - relationship="leads_to" - ) - ) - await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_b.id, - target_id=mem_c.id, - relationship="leads_to" - ) - ) + await association_service.associate(workspace_id, AssociateInput(source_id=mem_a.id, target_id=mem_b.id, relationship="leads_to")) + await association_service.associate(workspace_id, AssociateInput(source_id=mem_b.id, target_id=mem_c.id, relationship="leads_to")) # Traverse from A with depth 2 result = await association_service.traverse( - workspace_id, - GraphQueryInput( - start_memory_id=mem_a.id, - max_depth=2, - relationship_types=["leads_to"] - ) + workspace_id, GraphQueryInput(start_memory_id=mem_a.id, max_depth=2, relationship_types=["leads_to"]) ) assert len(result.paths) > 0 @@ -591,10 +337,10 @@ async def test_multi_hop_traversal( @pytest.mark.asyncio async def test_direction_outgoing( - self, - memory_service: MemoryService, - association_service: AssociationService, - workspace_id: str, + self, + memory_service: MemoryService, + association_service: AssociationService, + workspace_id: str, ): """Test traversal with direction='outgoing' (only follow outgoing edges). @@ -602,49 +348,17 @@ async def test_direction_outgoing( This test documents expected behavior for when fully implemented. """ # Create: A -> B -> C and B -> A (reverse) - mem_a = await memory_service.remember( - workspace_id, RememberInput(content="Node A") - ) - mem_b = await memory_service.remember( - workspace_id, RememberInput(content="Node B") - ) - mem_c = await memory_service.remember( - workspace_id, RememberInput(content="Node C") - ) + mem_a = await memory_service.remember(workspace_id, RememberInput(content="Node A")) + mem_b = await memory_service.remember(workspace_id, RememberInput(content="Node B")) + mem_c = await memory_service.remember(workspace_id, RememberInput(content="Node C")) - await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_a.id, - target_id=mem_b.id, - relationship="leads_to" - ) - ) - await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_b.id, - target_id=mem_c.id, - relationship="leads_to" - ) - ) - await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_b.id, - target_id=mem_a.id, - relationship="leads_to" - ) - ) + await association_service.associate(workspace_id, AssociateInput(source_id=mem_a.id, target_id=mem_b.id, relationship="leads_to")) + await association_service.associate(workspace_id, AssociateInput(source_id=mem_b.id, target_id=mem_c.id, relationship="leads_to")) + await association_service.associate(workspace_id, AssociateInput(source_id=mem_b.id, target_id=mem_a.id, relationship="leads_to")) # Traverse from A with outgoing only result = await association_service.traverse( - workspace_id, - GraphQueryInput( - start_memory_id=mem_a.id, - max_depth=3, - direction="outgoing" - ) + workspace_id, GraphQueryInput(start_memory_id=mem_a.id, max_depth=3, direction="outgoing") ) # Should reach B and C, but not follow B -> A @@ -655,51 +369,26 @@ async def test_direction_outgoing( @pytest.mark.asyncio async def test_direction_incoming( - self, - memory_service: MemoryService, - association_service: AssociationService, - workspace_id: str, + self, + memory_service: MemoryService, + association_service: AssociationService, + workspace_id: str, ): """Test traversal with direction='incoming' (only follow incoming edges). Note: Documents expected behavior - implementation may need enhancements. """ # Create: A -> B -> C - mem_a = await memory_service.remember( - workspace_id, RememberInput(content="Cause A") - ) - mem_b = await memory_service.remember( - workspace_id, RememberInput(content="Intermediate B") - ) - mem_c = await memory_service.remember( - workspace_id, RememberInput(content="Effect C") - ) + mem_a = await memory_service.remember(workspace_id, RememberInput(content="Cause A")) + mem_b = await memory_service.remember(workspace_id, RememberInput(content="Intermediate B")) + mem_c = await memory_service.remember(workspace_id, RememberInput(content="Effect C")) - await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_a.id, - target_id=mem_b.id, - relationship="causes" - ) - ) - await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_b.id, - target_id=mem_c.id, - relationship="causes" - ) - ) + await association_service.associate(workspace_id, AssociateInput(source_id=mem_a.id, target_id=mem_b.id, relationship="causes")) + await association_service.associate(workspace_id, AssociateInput(source_id=mem_b.id, target_id=mem_c.id, relationship="causes")) # Traverse from C with incoming only (find causes) result = await association_service.traverse( - workspace_id, - GraphQueryInput( - start_memory_id=mem_c.id, - max_depth=3, - direction="incoming" - ) + workspace_id, GraphQueryInput(start_memory_id=mem_c.id, max_depth=3, direction="incoming") ) # Should find B and A (causes of C) when fully implemented @@ -709,60 +398,24 @@ async def test_direction_incoming( @pytest.mark.asyncio async def test_direction_both( - self, - memory_service: MemoryService, - association_service: AssociationService, - workspace_id: str, + self, + memory_service: MemoryService, + association_service: AssociationService, + workspace_id: str, ): """Test traversal with direction='both' (bidirectional).""" # Create: A -> B, B -> C, C -> D - mem_a = await memory_service.remember( - workspace_id, RememberInput(content="Node A") - ) - mem_b = await memory_service.remember( - workspace_id, RememberInput(content="Node B") - ) - mem_c = await memory_service.remember( - workspace_id, RememberInput(content="Node C") - ) - mem_d = await memory_service.remember( - workspace_id, RememberInput(content="Node D") - ) + mem_a = await memory_service.remember(workspace_id, RememberInput(content="Node A")) + mem_b = await memory_service.remember(workspace_id, RememberInput(content="Node B")) + mem_c = await memory_service.remember(workspace_id, RememberInput(content="Node C")) + mem_d = await memory_service.remember(workspace_id, RememberInput(content="Node D")) - await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_a.id, - target_id=mem_b.id, - relationship="related_to" - ) - ) - await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_b.id, - target_id=mem_c.id, - relationship="related_to" - ) - ) - await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_c.id, - target_id=mem_d.id, - relationship="related_to" - ) - ) + await association_service.associate(workspace_id, AssociateInput(source_id=mem_a.id, target_id=mem_b.id, relationship="related_to")) + await association_service.associate(workspace_id, AssociateInput(source_id=mem_b.id, target_id=mem_c.id, relationship="related_to")) + await association_service.associate(workspace_id, AssociateInput(source_id=mem_c.id, target_id=mem_d.id, relationship="related_to")) # Traverse from B with both directions - result = await association_service.traverse( - workspace_id, - GraphQueryInput( - start_memory_id=mem_b.id, - max_depth=2, - direction="both" - ) - ) + result = await association_service.traverse(workspace_id, GraphQueryInput(start_memory_id=mem_b.id, max_depth=2, direction="both")) # Should find A (incoming), C (outgoing), and D (2 hops outgoing) when fully implemented # For now, verify basic structure @@ -772,104 +425,58 @@ async def test_direction_both( @pytest.mark.asyncio async def test_max_depth_limits( - self, - memory_service: MemoryService, - association_service: AssociationService, - workspace_id: str, + self, + memory_service: MemoryService, + association_service: AssociationService, + workspace_id: str, ): """Test max_depth limits (1, 2, 3, 4, 5).""" # Create chain: A -> B -> C -> D -> E -> F memories = [] for i in range(6): - mem = await memory_service.remember( - workspace_id, RememberInput(content=f"Node {i}") - ) + mem = await memory_service.remember(workspace_id, RememberInput(content=f"Node {i}")) memories.append(mem) # Create chain for i in range(5): await association_service.associate( - workspace_id, - AssociateInput( - source_id=memories[i].id, - target_id=memories[i + 1].id, - relationship="leads_to" - ) + workspace_id, AssociateInput(source_id=memories[i].id, target_id=memories[i + 1].id, relationship="leads_to") ) # Test that different depth values execute without error # Actual depth behavior depends on implementation details for depth in [1, 2, 3, 4, 5]: result = await association_service.traverse( - workspace_id, - GraphQueryInput( - start_memory_id=memories[0].id, - max_depth=depth, - direction="outgoing" - ) + workspace_id, GraphQueryInput(start_memory_id=memories[0].id, max_depth=depth, direction="outgoing") ) assert result is not None assert isinstance(result.paths, list) @pytest.mark.asyncio async def test_relationship_types_filtering( - self, - memory_service: MemoryService, - association_service: AssociationService, - workspace_id: str, + self, + memory_service: MemoryService, + association_service: AssociationService, + workspace_id: str, ): """Test filtering by specific relationship types.""" # Create graph with mixed relationships - mem_a = await memory_service.remember( - workspace_id, RememberInput(content="Node A") - ) - mem_b = await memory_service.remember( - workspace_id, RememberInput(content="Node B") - ) - mem_c = await memory_service.remember( - workspace_id, RememberInput(content="Node C") - ) - mem_d = await memory_service.remember( - workspace_id, RememberInput(content="Node D") - ) + mem_a = await memory_service.remember(workspace_id, RememberInput(content="Node A")) + mem_b = await memory_service.remember(workspace_id, RememberInput(content="Node B")) + mem_c = await memory_service.remember(workspace_id, RememberInput(content="Node C")) + mem_d = await memory_service.remember(workspace_id, RememberInput(content="Node D")) # A -CAUSES-> B - await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_a.id, - target_id=mem_b.id, - relationship="causes" - ) - ) + await association_service.associate(workspace_id, AssociateInput(source_id=mem_a.id, target_id=mem_b.id, relationship="causes")) # A -RELATED_TO-> C - await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_a.id, - target_id=mem_c.id, - relationship="related_to" - ) - ) + await association_service.associate(workspace_id, AssociateInput(source_id=mem_a.id, target_id=mem_c.id, relationship="related_to")) # B -TRIGGERS-> D - await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_b.id, - target_id=mem_d.id, - relationship="triggers" - ) - ) + await association_service.associate(workspace_id, AssociateInput(source_id=mem_b.id, target_id=mem_d.id, relationship="triggers")) # Filter for only CAUSES and TRIGGERS result = await association_service.traverse( workspace_id, - GraphQueryInput( - start_memory_id=mem_a.id, - max_depth=3, - relationship_types=["causes", "triggers"], - direction="outgoing" - ) + GraphQueryInput(start_memory_id=mem_a.id, max_depth=3, relationship_types=["causes", "triggers"], direction="outgoing"), ) # Verify the query accepts relationship type filtering @@ -878,56 +485,32 @@ async def test_relationship_types_filtering( @pytest.mark.asyncio async def test_min_strength_filtering( - self, - memory_service: MemoryService, - association_service: AssociationService, - workspace_id: str, + self, + memory_service: MemoryService, + association_service: AssociationService, + workspace_id: str, ): """Test filtering by minimum edge strength. Note: min_strength filtering is accepted by API but may need backend implementation. """ # Create memories with varying edge strengths - mem_a = await memory_service.remember( - workspace_id, RememberInput(content="Start") - ) - mem_b = await memory_service.remember( - workspace_id, RememberInput(content="Strong connection") - ) - mem_c = await memory_service.remember( - workspace_id, RememberInput(content="Weak connection") - ) + mem_a = await memory_service.remember(workspace_id, RememberInput(content="Start")) + mem_b = await memory_service.remember(workspace_id, RememberInput(content="Strong connection")) + mem_c = await memory_service.remember(workspace_id, RememberInput(content="Weak connection")) # Strong edge A -> B (0.9) await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_a.id, - target_id=mem_b.id, - relationship="leads_to", - strength=0.9 - ) + workspace_id, AssociateInput(source_id=mem_a.id, target_id=mem_b.id, relationship="leads_to", strength=0.9) ) # Weak edge A -> C (0.3) await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_a.id, - target_id=mem_c.id, - relationship="leads_to", - strength=0.3 - ) + workspace_id, AssociateInput(source_id=mem_a.id, target_id=mem_c.id, relationship="leads_to", strength=0.3) ) # Filter with min_strength=0.7 result = await association_service.traverse( - workspace_id, - GraphQueryInput( - start_memory_id=mem_a.id, - max_depth=1, - min_strength=0.7, - direction="outgoing" - ) + workspace_id, GraphQueryInput(start_memory_id=mem_a.id, max_depth=1, min_strength=0.7, direction="outgoing") ) # Verify API accepts min_strength parameter @@ -940,50 +523,28 @@ class TestAdvancedQueries: @pytest.mark.asyncio async def test_causal_chain_discovery( - self, - memory_service: MemoryService, - association_service: AssociationService, - workspace_id: str, + self, + memory_service: MemoryService, + association_service: AssociationService, + workspace_id: str, ): """Test discovering causal chain A->B->C via CAUSES/LEADS_TO.""" # Create causal chain - mem_root_cause = await memory_service.remember( - workspace_id, RememberInput(content="Configuration error") - ) - mem_intermediate = await memory_service.remember( - workspace_id, RememberInput(content="Service crash") - ) - mem_effect = await memory_service.remember( - workspace_id, RememberInput(content="User data loss") - ) + mem_root_cause = await memory_service.remember(workspace_id, RememberInput(content="Configuration error")) + mem_intermediate = await memory_service.remember(workspace_id, RememberInput(content="Service crash")) + mem_effect = await memory_service.remember(workspace_id, RememberInput(content="User data loss")) # Root cause -> Intermediate await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_root_cause.id, - target_id=mem_intermediate.id, - relationship="causes", - strength=0.95 - ) + workspace_id, AssociateInput(source_id=mem_root_cause.id, target_id=mem_intermediate.id, relationship="causes", strength=0.95) ) # Intermediate -> Effect await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_intermediate.id, - target_id=mem_effect.id, - relationship="leads_to", - strength=0.9 - ) + workspace_id, AssociateInput(source_id=mem_intermediate.id, target_id=mem_effect.id, relationship="leads_to", strength=0.9) ) # Use dedicated causal chain method - result = await association_service.get_causal_chain( - workspace_id, - mem_effect.id, - max_depth=3 - ) + result = await association_service.get_causal_chain(workspace_id, mem_effect.id, max_depth=3) # Verify dedicated method executes assert result is not None @@ -991,47 +552,26 @@ async def test_causal_chain_discovery( @pytest.mark.asyncio async def test_solution_chain_discovery( - self, - memory_service: MemoryService, - association_service: AssociationService, - workspace_id: str, + self, + memory_service: MemoryService, + association_service: AssociationService, + workspace_id: str, ): """Test finding solution chains (Problem -> Solution via SOLVES).""" - mem_problem = await memory_service.remember( - workspace_id, RememberInput(content="High memory usage") - ) - mem_solution1 = await memory_service.remember( - workspace_id, RememberInput(content="Implement caching") - ) - mem_solution2 = await memory_service.remember( - workspace_id, RememberInput(content="Reduce object creation") - ) + mem_problem = await memory_service.remember(workspace_id, RememberInput(content="High memory usage")) + mem_solution1 = await memory_service.remember(workspace_id, RememberInput(content="Implement caching")) + mem_solution2 = await memory_service.remember(workspace_id, RememberInput(content="Reduce object creation")) # Solutions -> Problem await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_solution1.id, - target_id=mem_problem.id, - relationship="solves", - strength=0.9 - ) + workspace_id, AssociateInput(source_id=mem_solution1.id, target_id=mem_problem.id, relationship="solves", strength=0.9) ) await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_solution2.id, - target_id=mem_problem.id, - relationship="addresses", - strength=0.85 - ) + workspace_id, AssociateInput(source_id=mem_solution2.id, target_id=mem_problem.id, relationship="addresses", strength=0.85) ) # Use dedicated solutions method - solutions = await association_service.get_solutions_for_problem( - workspace_id, - mem_problem.id - ) + solutions = await association_service.get_solutions_for_problem(workspace_id, mem_problem.id) # Should find both solutions assert len(solutions) == 2 @@ -1040,38 +580,23 @@ async def test_solution_chain_discovery( @pytest.mark.asyncio async def test_contradiction_detection( - self, - memory_service: MemoryService, - association_service: AssociationService, - workspace_id: str, + self, + memory_service: MemoryService, + association_service: AssociationService, + workspace_id: str, ): """Test detecting contradictions between memories.""" - mem_claim1 = await memory_service.remember( - workspace_id, RememberInput(content="REST is always better") - ) - mem_claim2 = await memory_service.remember( - workspace_id, RememberInput(content="GraphQL is always better") - ) - mem_neutral = await memory_service.remember( - workspace_id, RememberInput(content="Both have trade-offs") - ) + mem_claim1 = await memory_service.remember(workspace_id, RememberInput(content="REST is always better")) + mem_claim2 = await memory_service.remember(workspace_id, RememberInput(content="GraphQL is always better")) + await memory_service.remember(workspace_id, RememberInput(content="Both have trade-offs")) # Create contradiction await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_claim1.id, - target_id=mem_claim2.id, - relationship="contradicts", - strength=0.95 - ) + workspace_id, AssociateInput(source_id=mem_claim1.id, target_id=mem_claim2.id, relationship="contradicts", strength=0.95) ) # Find contradictions for claim1 - contradictions = await association_service.find_contradictions( - workspace_id, - mem_claim1.id - ) + contradictions = await association_service.find_contradictions(workspace_id, mem_claim1.id) # Should find claim2 assert len(contradictions) > 0 @@ -1079,60 +604,28 @@ async def test_contradiction_detection( @pytest.mark.asyncio async def test_circular_reference_handling( - self, - memory_service: MemoryService, - association_service: AssociationService, - workspace_id: str, + self, + memory_service: MemoryService, + association_service: AssociationService, + workspace_id: str, ): """Test handling of circular references (A->B->C->A). Tests that traversal doesn't infinite loop on cycles. """ # Create circular graph - use unique content to avoid deduplication conflicts - mem_a = await memory_service.remember( - workspace_id, RememberInput(content="Circular Node Alpha") - ) - mem_b = await memory_service.remember( - workspace_id, RememberInput(content="Circular Node Beta") - ) - mem_c = await memory_service.remember( - workspace_id, RememberInput(content="Circular Node Gamma") - ) + mem_a = await memory_service.remember(workspace_id, RememberInput(content="Circular Node Alpha")) + mem_b = await memory_service.remember(workspace_id, RememberInput(content="Circular Node Beta")) + mem_c = await memory_service.remember(workspace_id, RememberInput(content="Circular Node Gamma")) # Create circle - await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_a.id, - target_id=mem_b.id, - relationship="leads_to" - ) - ) - await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_b.id, - target_id=mem_c.id, - relationship="leads_to" - ) - ) - await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_c.id, - target_id=mem_a.id, - relationship="leads_to" - ) - ) + await association_service.associate(workspace_id, AssociateInput(source_id=mem_a.id, target_id=mem_b.id, relationship="leads_to")) + await association_service.associate(workspace_id, AssociateInput(source_id=mem_b.id, target_id=mem_c.id, relationship="leads_to")) + await association_service.associate(workspace_id, AssociateInput(source_id=mem_c.id, target_id=mem_a.id, relationship="leads_to")) # Traverse - should not get stuck in infinite loop result = await association_service.traverse( - workspace_id, - GraphQueryInput( - start_memory_id=mem_a.id, - max_depth=5, - direction="outgoing" - ) + workspace_id, GraphQueryInput(start_memory_id=mem_a.id, max_depth=5, direction="outgoing") ) # Should complete without error (cycle detection prevents infinite loop) @@ -1141,70 +634,29 @@ async def test_circular_reference_handling( @pytest.mark.asyncio async def test_multiple_paths_to_same_node( - self, - memory_service: MemoryService, - association_service: AssociationService, - workspace_id: str, + self, + memory_service: MemoryService, + association_service: AssociationService, + workspace_id: str, ): """Test finding multiple paths to the same destination node (diamond pattern).""" # Create diamond graph: A -> B -> D, A -> C -> D - mem_a = await memory_service.remember( - workspace_id, RememberInput(content="Start") - ) - mem_b = await memory_service.remember( - workspace_id, RememberInput(content="Path 1") - ) - mem_c = await memory_service.remember( - workspace_id, RememberInput(content="Path 2") - ) - mem_d = await memory_service.remember( - workspace_id, RememberInput(content="Destination") - ) + mem_a = await memory_service.remember(workspace_id, RememberInput(content="Start")) + mem_b = await memory_service.remember(workspace_id, RememberInput(content="Path 1")) + mem_c = await memory_service.remember(workspace_id, RememberInput(content="Path 2")) + mem_d = await memory_service.remember(workspace_id, RememberInput(content="Destination")) # Path 1: A -> B -> D - await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_a.id, - target_id=mem_b.id, - relationship="leads_to" - ) - ) - await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_b.id, - target_id=mem_d.id, - relationship="leads_to" - ) - ) + await association_service.associate(workspace_id, AssociateInput(source_id=mem_a.id, target_id=mem_b.id, relationship="leads_to")) + await association_service.associate(workspace_id, AssociateInput(source_id=mem_b.id, target_id=mem_d.id, relationship="leads_to")) # Path 2: A -> C -> D - await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_a.id, - target_id=mem_c.id, - relationship="leads_to" - ) - ) - await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_c.id, - target_id=mem_d.id, - relationship="leads_to" - ) - ) + await association_service.associate(workspace_id, AssociateInput(source_id=mem_a.id, target_id=mem_c.id, relationship="leads_to")) + await association_service.associate(workspace_id, AssociateInput(source_id=mem_c.id, target_id=mem_d.id, relationship="leads_to")) # Traverse from A result = await association_service.traverse( - workspace_id, - GraphQueryInput( - start_memory_id=mem_a.id, - max_depth=3, - direction="outgoing" - ) + workspace_id, GraphQueryInput(start_memory_id=mem_a.id, max_depth=3, direction="outgoing") ) # Verify traversal completes (diamond pattern should be supported) @@ -1213,62 +665,39 @@ async def test_multiple_paths_to_same_node( @pytest.mark.asyncio async def test_path_strength_calculation( - self, - memory_service: MemoryService, - association_service: AssociationService, - workspace_id: str, + self, + memory_service: MemoryService, + association_service: AssociationService, + workspace_id: str, ): """Test path strength calculation (product of edge strengths). Documents expected behavior: path strength = product of edge strengths. """ # Create path with known strengths: A -0.8-> B -0.9-> C - mem_a = await memory_service.remember( - workspace_id, RememberInput(content="Start") - ) - mem_b = await memory_service.remember( - workspace_id, RememberInput(content="Middle") - ) - mem_c = await memory_service.remember( - workspace_id, RememberInput(content="End") - ) + mem_a = await memory_service.remember(workspace_id, RememberInput(content="Start")) + mem_b = await memory_service.remember(workspace_id, RememberInput(content="Middle")) + mem_c = await memory_service.remember(workspace_id, RememberInput(content="End")) # A -> B with strength 0.8 await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_a.id, - target_id=mem_b.id, - relationship="leads_to", - strength=0.8 - ) + workspace_id, AssociateInput(source_id=mem_a.id, target_id=mem_b.id, relationship="leads_to", strength=0.8) ) # B -> C with strength 0.9 await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_b.id, - target_id=mem_c.id, - relationship="leads_to", - strength=0.9 - ) + workspace_id, AssociateInput(source_id=mem_b.id, target_id=mem_c.id, relationship="leads_to", strength=0.9) ) # Traverse result = await association_service.traverse( - workspace_id, - GraphQueryInput( - start_memory_id=mem_a.id, - max_depth=3, - direction="outgoing" - ) + workspace_id, GraphQueryInput(start_memory_id=mem_a.id, max_depth=3, direction="outgoing") ) # Verify API returns path structure with total_strength field assert result is not None assert isinstance(result.paths, list) if len(result.paths) > 0: - assert hasattr(result.paths[0], 'total_strength') + assert hasattr(result.paths[0], "total_strength") class TestEdgeCases: @@ -1276,110 +705,71 @@ class TestEdgeCases: @pytest.mark.asyncio async def test_association_with_nonexistent_source( - self, - association_service: AssociationService, - memory_service: MemoryService, - workspace_id: str, + self, + association_service: AssociationService, + memory_service: MemoryService, + workspace_id: str, ): """Test creating association with non-existent source memory ID.""" - mem_target = await memory_service.remember( - workspace_id, RememberInput(content="Valid target") - ) + mem_target = await memory_service.remember(workspace_id, RememberInput(content="Valid target")) with pytest.raises(ValueError, match="Source memory not found"): await association_service.associate( - workspace_id, - AssociateInput( - source_id="nonexistent_id", - target_id=mem_target.id, - relationship="related_to" - ) + workspace_id, AssociateInput(source_id="nonexistent_id", target_id=mem_target.id, relationship="related_to") ) @pytest.mark.asyncio async def test_association_with_nonexistent_target( - self, - association_service: AssociationService, - memory_service: MemoryService, - workspace_id: str, + self, + association_service: AssociationService, + memory_service: MemoryService, + workspace_id: str, ): """Test creating association with non-existent target memory ID.""" - mem_source = await memory_service.remember( - workspace_id, RememberInput(content="Valid source") - ) + mem_source = await memory_service.remember(workspace_id, RememberInput(content="Valid source")) with pytest.raises(ValueError, match="Target memory not found"): await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_source.id, - target_id="nonexistent_id", - relationship="related_to" - ) + workspace_id, AssociateInput(source_id=mem_source.id, target_id="nonexistent_id", relationship="related_to") ) @pytest.mark.asyncio async def test_self_association( - self, - association_service: AssociationService, - memory_service: MemoryService, - workspace_id: str, + self, + association_service: AssociationService, + memory_service: MemoryService, + workspace_id: str, ): """Test that self-associations are prevented (source_id == target_id).""" - mem = await memory_service.remember( - workspace_id, RememberInput(content="Self-referential") - ) + mem = await memory_service.remember(workspace_id, RememberInput(content="Self-referential")) with pytest.raises(ValueError, match="Cannot create self-association"): - await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem.id, - target_id=mem.id, - relationship="related_to" - ) - ) + await association_service.associate(workspace_id, AssociateInput(source_id=mem.id, target_id=mem.id, relationship="related_to")) @pytest.mark.asyncio async def test_duplicate_association( - self, - association_service: AssociationService, - memory_service: MemoryService, - workspace_id: str, + self, + association_service: AssociationService, + memory_service: MemoryService, + workspace_id: str, ): """Test creating duplicate association (same source/target/relationship). Database has UNIQUE constraint on (source_id, target_id, relationship). Attempting to create duplicate should fail with IntegrityError. """ - mem_a = await memory_service.remember( - workspace_id, RememberInput(content="Memory A") - ) - mem_b = await memory_service.remember( - workspace_id, RememberInput(content="Memory B") - ) + mem_a = await memory_service.remember(workspace_id, RememberInput(content="Memory A")) + mem_b = await memory_service.remember(workspace_id, RememberInput(content="Memory B")) # Create first association assoc1 = await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_a.id, - target_id=mem_b.id, - relationship="related_to", - strength=0.8 - ) + workspace_id, AssociateInput(source_id=mem_a.id, target_id=mem_b.id, relationship="related_to", strength=0.8) ) # Attempt to create duplicate - should fail due to unique constraint with pytest.raises(Exception): # IntegrityError or similar await association_service.associate( - workspace_id, - AssociateInput( - source_id=mem_a.id, - target_id=mem_b.id, - relationship="related_to", - strength=0.9 - ) + workspace_id, AssociateInput(source_id=mem_a.id, target_id=mem_b.id, relationship="related_to", strength=0.9) ) # First association should still exist @@ -1387,25 +777,17 @@ async def test_duplicate_association( @pytest.mark.asyncio async def test_empty_graph_traversal( - self, - association_service: AssociationService, - memory_service: MemoryService, - workspace_id: str, + self, + association_service: AssociationService, + memory_service: MemoryService, + workspace_id: str, ): """Test traversal from a node with no connections.""" # Create isolated memory - mem_isolated = await memory_service.remember( - workspace_id, RememberInput(content="Isolated node") - ) + mem_isolated = await memory_service.remember(workspace_id, RememberInput(content="Isolated node")) # Traverse from isolated node - result = await association_service.traverse( - workspace_id, - GraphQueryInput( - start_memory_id=mem_isolated.id, - max_depth=3 - ) - ) + result = await association_service.traverse(workspace_id, GraphQueryInput(start_memory_id=mem_isolated.id, max_depth=3)) # Should return empty result assert len(result.paths) == 0 @@ -1413,15 +795,13 @@ async def test_empty_graph_traversal( @pytest.mark.asyncio async def test_invalid_direction( - self, - association_service: AssociationService, - memory_service: MemoryService, - workspace_id: str, + self, + association_service: AssociationService, + memory_service: MemoryService, + workspace_id: str, ): """Test that invalid direction values are rejected.""" - mem = await memory_service.remember( - workspace_id, RememberInput(content="Test memory") - ) + mem = await memory_service.remember(workspace_id, RememberInput(content="Test memory")) # GraphQueryInput validation should catch this at Pydantic level with pytest.raises(Exception): # Could be ValidationError @@ -1430,6 +810,6 @@ async def test_invalid_direction( GraphQueryInput( start_memory_id=mem.id, max_depth=3, - direction="invalid_direction" # type: ignore - ) + direction="invalid_direction", # type: ignore + ), ) diff --git a/memorylayer-core-python/tests/unit/test_audit_service.py b/memorylayer-core-python/tests/unit/test_audit_service.py index 647428c..2ab77ca 100644 --- a/memorylayer-core-python/tests/unit/test_audit_service.py +++ b/memorylayer-core-python/tests/unit/test_audit_service.py @@ -1,14 +1,14 @@ """ Unit tests for the audit service — AuditEvent dataclass and NoopAuditService. """ -from datetime import datetime, timezone + +from datetime import UTC, datetime import pytest from memorylayer_server.services.audit.base import AuditEvent from memorylayer_server.services.audit.noop import NoopAuditService - # ============================================================================ # AuditEvent dataclass tests # ============================================================================ @@ -33,12 +33,12 @@ def test_id_is_unique_across_instances(self): def test_timestamp_defaults_to_utc(self): """timestamp defaults to a timezone-aware UTC datetime.""" - before = datetime.now(timezone.utc) + before = datetime.now(UTC) event = AuditEvent(event_type="memory", action="create", tenant_id="t1") - after = datetime.now(timezone.utc) + after = datetime.now(UTC) assert event.timestamp.tzinfo is not None - assert event.timestamp.tzinfo == timezone.utc + assert event.timestamp.tzinfo == UTC assert before <= event.timestamp <= after def test_metadata_defaults_to_empty_dict(self): @@ -140,7 +140,7 @@ async def test_query_with_all_filters_returns_empty_list(self): tenant_id="t1", workspace_id="ws-1", event_type="auth", - since=datetime.now(timezone.utc), + since=datetime.now(UTC), limit=50, ) assert results == [] diff --git a/memorylayer-core-python/tests/unit/test_chat_history.py b/memorylayer-core-python/tests/unit/test_chat_history.py index 8245c7f..6ef07be 100644 --- a/memorylayer-core-python/tests/unit/test_chat_history.py +++ b/memorylayer-core-python/tests/unit/test_chat_history.py @@ -1,18 +1,18 @@ """Unit tests for chat history models and service.""" -import json -from datetime import datetime, timezone, timedelta + +from datetime import UTC, datetime, timedelta import pytest from memorylayer_server.models.chat import ( + AppendMessagesInput, ChatMessage, ChatMessageContent, ChatThread, ChatThreadWithMessages, CreateThreadInput, - AppendMessagesInput, - MessageInput, DecompositionResult, + MessageInput, ) @@ -127,7 +127,7 @@ def test_is_expired_true_when_past(self): thread = ChatThread( id="t1", workspace_id="ws", - expires_at=datetime.now(timezone.utc) - timedelta(hours=1), + expires_at=datetime.now(UTC) - timedelta(hours=1), ) assert thread.is_expired @@ -135,7 +135,7 @@ def test_is_expired_false_when_future(self): thread = ChatThread( id="t1", workspace_id="ws", - expires_at=datetime.now(timezone.utc) + timedelta(hours=1), + expires_at=datetime.now(UTC) + timedelta(hours=1), ) assert not thread.is_expired @@ -201,7 +201,7 @@ def test_with_all_fields(self): subject_id="drew", title="Planning Session", metadata={"source": "mcp"}, - expires_at=datetime(2026, 12, 31, tzinfo=timezone.utc), + expires_at=datetime(2026, 12, 31, tzinfo=UTC), ) assert inp.thread_id == "my-thread" assert inp.title == "Planning Session" @@ -266,15 +266,20 @@ class TestChatMessageSerialization: def test_string_content_serializes(self): msg = ChatMessage( - id="m1", thread_id="t1", message_index=0, - role="user", content="hello", + id="m1", + thread_id="t1", + message_index=0, + role="user", + content="hello", ) data = msg.model_dump(mode="json") assert data["content"] == "hello" def test_structured_content_serializes(self): msg = ChatMessage( - id="m1", thread_id="t1", message_index=0, + id="m1", + thread_id="t1", + message_index=0, role="assistant", content=[ChatMessageContent(type="text", text="hi")], ) diff --git a/memorylayer-core-python/tests/unit/test_chat_storage.py b/memorylayer-core-python/tests/unit/test_chat_storage.py index 1264004..a415bdf 100644 --- a/memorylayer-core-python/tests/unit/test_chat_storage.py +++ b/memorylayer-core-python/tests/unit/test_chat_storage.py @@ -4,24 +4,25 @@ Tests the SQLite storage backend directly with chat thread and message operations. Each test class gets its own isolated storage backend with an in-memory (temp file) database. """ + +from datetime import UTC, datetime, timedelta + import pytest import pytest_asyncio -from datetime import datetime, timezone, timedelta -from memorylayer_server.services.storage.sqlite import SQLiteStorageBackend from memorylayer_server.models.chat import ( - ChatThread, - ChatMessage, ChatMessageContent, + ChatThread, MessageInput, ) from memorylayer_server.models.workspace import Workspace - +from memorylayer_server.services.storage.sqlite import SQLiteStorageBackend # --------------------------------------------------------------------------- # Fixtures # --------------------------------------------------------------------------- + @pytest_asyncio.fixture async def storage(tmp_path): """ @@ -46,8 +47,8 @@ async def workspace_id(storage) -> str: id=ws_id, tenant_id="_default", name="Chat Test Workspace", - created_at=datetime.now(timezone.utc), - updated_at=datetime.now(timezone.utc), + created_at=datetime.now(UTC), + updated_at=datetime.now(UTC), ) await storage.create_workspace(workspace) return ws_id @@ -55,7 +56,7 @@ async def workspace_id(storage) -> str: def _make_thread(workspace_id: str, thread_id: str = "thread-1", **kwargs) -> ChatThread: """Helper: build a ChatThread with sensible defaults.""" - now = datetime.now(timezone.utc) + now = datetime.now(UTC) return ChatThread( id=thread_id, workspace_id=workspace_id, @@ -75,6 +76,7 @@ def _make_msg_input(role: str = "user", content: str = "hello") -> MessageInput: # TestChatThreadStorage # --------------------------------------------------------------------------- + @pytest.mark.asyncio class TestChatThreadStorage: """Tests for chat thread CRUD in SQLite.""" @@ -125,9 +127,7 @@ async def test_get_thread_not_found(self, storage, workspace_id): async def test_list_threads(self, storage, workspace_id): """Create multiple threads and verify list_threads returns all of them.""" for i in range(3): - await storage.create_thread( - _make_thread(workspace_id, thread_id=f"t-list-{i}", title=f"Thread {i}") - ) + await storage.create_thread(_make_thread(workspace_id, thread_id=f"t-list-{i}", title=f"Thread {i}")) threads = await storage.list_threads(workspace_id) @@ -138,15 +138,9 @@ async def test_list_threads(self, storage, workspace_id): async def test_list_threads_by_user(self, storage, workspace_id): """list_threads with user_id filters to that user's threads only.""" - await storage.create_thread( - _make_thread(workspace_id, thread_id="t-user-a", user_id="alice") - ) - await storage.create_thread( - _make_thread(workspace_id, thread_id="t-user-b", user_id="bob") - ) - await storage.create_thread( - _make_thread(workspace_id, thread_id="t-user-a2", user_id="alice") - ) + await storage.create_thread(_make_thread(workspace_id, thread_id="t-user-a", user_id="alice")) + await storage.create_thread(_make_thread(workspace_id, thread_id="t-user-b", user_id="bob")) + await storage.create_thread(_make_thread(workspace_id, thread_id="t-user-a2", user_id="alice")) alice_threads = await storage.list_threads(workspace_id, user_id="alice") thread_ids = {t.id for t in alice_threads} @@ -157,15 +151,11 @@ async def test_list_threads_by_user(self, storage, workspace_id): async def test_list_threads_excludes_expired(self, storage, workspace_id): """list_threads does not return threads whose expires_at is in the past.""" - past = datetime.now(timezone.utc) - timedelta(hours=1) - future = datetime.now(timezone.utc) + timedelta(hours=1) + past = datetime.now(UTC) - timedelta(hours=1) + future = datetime.now(UTC) + timedelta(hours=1) - await storage.create_thread( - _make_thread(workspace_id, thread_id="t-expired", expires_at=past) - ) - await storage.create_thread( - _make_thread(workspace_id, thread_id="t-active", expires_at=future) - ) + await storage.create_thread(_make_thread(workspace_id, thread_id="t-expired", expires_at=past)) + await storage.create_thread(_make_thread(workspace_id, thread_id="t-active", expires_at=future)) await storage.create_thread( _make_thread(workspace_id, thread_id="t-permanent") # no expiry ) @@ -210,6 +200,7 @@ async def test_delete_thread(self, storage, workspace_id): # TestChatMessageStorage # --------------------------------------------------------------------------- + @pytest.mark.asyncio class TestChatMessageStorage: """Tests for chat message operations in SQLite.""" @@ -239,16 +230,24 @@ async def test_append_increments_count(self, storage, workspace_id): thread = _make_thread(workspace_id, thread_id="t-count") await storage.create_thread(thread) - await storage.append_messages(workspace_id, "t-count", [ - _make_msg_input("user", "msg 1"), - _make_msg_input("assistant", "msg 2"), - ]) + await storage.append_messages( + workspace_id, + "t-count", + [ + _make_msg_input("user", "msg 1"), + _make_msg_input("assistant", "msg 2"), + ], + ) t = await storage.get_thread(workspace_id, "t-count") assert t.message_count == 2 - await storage.append_messages(workspace_id, "t-count", [ - _make_msg_input("user", "msg 3"), - ]) + await storage.append_messages( + workspace_id, + "t-count", + [ + _make_msg_input("user", "msg 3"), + ], + ) t = await storage.get_thread(workspace_id, "t-count") assert t.message_count == 3 @@ -257,11 +256,15 @@ async def test_get_messages_ordered(self, storage, workspace_id): thread = _make_thread(workspace_id, thread_id="t-order") await storage.create_thread(thread) - await storage.append_messages(workspace_id, "t-order", [ - _make_msg_input("user", "first"), - _make_msg_input("assistant", "second"), - _make_msg_input("user", "third"), - ]) + await storage.append_messages( + workspace_id, + "t-order", + [ + _make_msg_input("user", "first"), + _make_msg_input("assistant", "second"), + _make_msg_input("user", "third"), + ], + ) messages = await storage.get_messages(workspace_id, "t-order") @@ -333,10 +336,14 @@ async def test_delete_thread_cascades_messages(self, storage, workspace_id): thread = _make_thread(workspace_id, thread_id="t-cascade") await storage.create_thread(thread) - await storage.append_messages(workspace_id, "t-cascade", [ - _make_msg_input("user", "will be gone"), - _make_msg_input("assistant", "also gone"), - ]) + await storage.append_messages( + workspace_id, + "t-cascade", + [ + _make_msg_input("user", "will be gone"), + _make_msg_input("assistant", "also gone"), + ], + ) # Confirm messages exist before deletion before = await storage.get_messages(workspace_id, "t-cascade") diff --git a/memorylayer-core-python/tests/unit/test_context_environment.py b/memorylayer-core-python/tests/unit/test_context_environment.py index f740dbf..e293a7e 100644 --- a/memorylayer-core-python/tests/unit/test_context_environment.py +++ b/memorylayer-core-python/tests/unit/test_context_environment.py @@ -8,12 +8,36 @@ - Hooks: persistence hook interface - RLM: reasoning loop runner (unit-level, no LLM) """ -import asyncio -import json + import pytest +from memorylayer_server.config import ( + DEFAULT_MEMORYLAYER_CONTEXT_ENVIRONMENT_SERVICE, + DEFAULT_MEMORYLAYER_CONTEXT_EXEC_HARD_CAP, + DEFAULT_MEMORYLAYER_CONTEXT_EXEC_SOFT_CAP, + DEFAULT_MEMORYLAYER_CONTEXT_EXECUTOR, + DEFAULT_MEMORYLAYER_CONTEXT_MAX_EXEC_SECONDS, + DEFAULT_MEMORYLAYER_CONTEXT_MAX_MEMORY_BYTES, + DEFAULT_MEMORYLAYER_CONTEXT_MAX_OPERATIONS, + DEFAULT_MEMORYLAYER_CONTEXT_MAX_OUTPUT_CHARS, + DEFAULT_MEMORYLAYER_CONTEXT_QUERY_MAX_TOKENS, + DEFAULT_MEMORYLAYER_CONTEXT_RLM_MAX_EXEC_SECONDS, + DEFAULT_MEMORYLAYER_CONTEXT_RLM_MAX_ITERATIONS, + MEMORYLAYER_CONTEXT_ENVIRONMENT_SERVICE, + MEMORYLAYER_CONTEXT_EXEC_HARD_CAP, + MEMORYLAYER_CONTEXT_EXECUTOR, +) +from memorylayer_server.services.context_environment.base import ( + EXT_CONTEXT_ENVIRONMENT_SERVICE, + ContextEnvironmentServicePluginBase, +) +from memorylayer_server.services.context_environment.default import ( + DefaultContextEnvironmentService, + DefaultContextEnvironmentServicePlugin, + _estimate_size, + _safe_preview, +) from memorylayer_server.services.context_environment.executors.base import ( - ExecutorProvider, ExecutionResult, ) from memorylayer_server.services.context_environment.executors.restricted import ( @@ -23,55 +47,15 @@ ContextPersistenceHook, NoOpPersistenceHook, ) -from memorylayer_server.services.context_environment.base import ( - ContextEnvironmentService, - ContextEnvironmentServicePluginBase, - EXT_CONTEXT_ENVIRONMENT_SERVICE, -) -from memorylayer_server.services.context_environment import ( - get_context_environment_service, -) -from memorylayer_server.services.context_environment.default import ( - DefaultContextEnvironmentService, - DefaultContextEnvironmentServicePlugin, - _safe_preview, - _estimate_size, - _memory_to_dict, -) from memorylayer_server.services.context_environment.rlm import ( - RLMRunner, _summarize_state, ) -from memorylayer_server.config import ( - MEMORYLAYER_CONTEXT_ENVIRONMENT_SERVICE, - DEFAULT_MEMORYLAYER_CONTEXT_ENVIRONMENT_SERVICE, - MEMORYLAYER_CONTEXT_EXECUTOR, - DEFAULT_MEMORYLAYER_CONTEXT_EXECUTOR, - MEMORYLAYER_CONTEXT_MAX_OPERATIONS, - DEFAULT_MEMORYLAYER_CONTEXT_MAX_OPERATIONS, - MEMORYLAYER_CONTEXT_MAX_EXEC_SECONDS, - DEFAULT_MEMORYLAYER_CONTEXT_MAX_EXEC_SECONDS, - MEMORYLAYER_CONTEXT_MAX_OUTPUT_CHARS, - DEFAULT_MEMORYLAYER_CONTEXT_MAX_OUTPUT_CHARS, - MEMORYLAYER_CONTEXT_QUERY_MAX_TOKENS, - DEFAULT_MEMORYLAYER_CONTEXT_QUERY_MAX_TOKENS, - MEMORYLAYER_CONTEXT_MAX_MEMORY_BYTES, - DEFAULT_MEMORYLAYER_CONTEXT_MAX_MEMORY_BYTES, - MEMORYLAYER_CONTEXT_RLM_MAX_ITERATIONS, - DEFAULT_MEMORYLAYER_CONTEXT_RLM_MAX_ITERATIONS, - MEMORYLAYER_CONTEXT_RLM_MAX_EXEC_SECONDS, - DEFAULT_MEMORYLAYER_CONTEXT_RLM_MAX_EXEC_SECONDS, - MEMORYLAYER_CONTEXT_EXEC_SOFT_CAP, - DEFAULT_MEMORYLAYER_CONTEXT_EXEC_SOFT_CAP, - MEMORYLAYER_CONTEXT_EXEC_HARD_CAP, - DEFAULT_MEMORYLAYER_CONTEXT_EXEC_HARD_CAP, -) - # ============================================ # Fixtures # ============================================ + class MockVariables: """Minimal Variables mock for testing.""" @@ -140,16 +124,17 @@ def service(mock_vars, restricted_executor, tracking_hook): # Config Constants Tests # ============================================ + class TestConfigConstants: """Test that all config constants are properly defined.""" def test_service_config(self): - assert MEMORYLAYER_CONTEXT_ENVIRONMENT_SERVICE == 'MEMORYLAYER_CONTEXT_ENVIRONMENT_SERVICE' - assert DEFAULT_MEMORYLAYER_CONTEXT_ENVIRONMENT_SERVICE == 'default' + assert MEMORYLAYER_CONTEXT_ENVIRONMENT_SERVICE == "MEMORYLAYER_CONTEXT_ENVIRONMENT_SERVICE" + assert DEFAULT_MEMORYLAYER_CONTEXT_ENVIRONMENT_SERVICE == "default" def test_executor_config(self): - assert MEMORYLAYER_CONTEXT_EXECUTOR == 'MEMORYLAYER_CONTEXT_EXECUTOR' - assert DEFAULT_MEMORYLAYER_CONTEXT_EXECUTOR == 'smolagents' + assert MEMORYLAYER_CONTEXT_EXECUTOR == "MEMORYLAYER_CONTEXT_EXECUTOR" + assert DEFAULT_MEMORYLAYER_CONTEXT_EXECUTOR == "smolagents" def test_limits_config(self): assert DEFAULT_MEMORYLAYER_CONTEXT_MAX_OPERATIONS == 1_000_000 @@ -171,12 +156,13 @@ def test_cap_config(self): # ExecutionResult Tests # ============================================ + class TestExecutionResult: """Test the ExecutionResult dataclass.""" def test_default_fields(self): - result = ExecutionResult(output='hello', result=42, error=None) - assert result.output == 'hello' + result = ExecutionResult(output="hello", result=42, error=None) + assert result.output == "hello" assert result.result == 42 assert result.error is None assert result.variables_changed == [] @@ -184,13 +170,13 @@ def test_default_fields(self): def test_with_all_fields(self): result = ExecutionResult( - output='out', - result='val', - error='err', - variables_changed=['x', 'y'], + output="out", + result="val", + error="err", + variables_changed=["x", "y"], operations_count=10, ) - assert result.variables_changed == ['x', 'y'] + assert result.variables_changed == ["x", "y"] assert result.operations_count == 10 @@ -198,198 +184,191 @@ def test_with_all_fields(self): # RestrictedExecutor Tests # ============================================ + class TestRestrictedExecutor: """Test the AST-based restricted executor.""" async def test_empty_code(self, restricted_executor): state = {} - result = await restricted_executor.execute('', state) + result = await restricted_executor.execute("", state) assert result.error is None - assert result.output == '' + assert result.output == "" assert result.result is None async def test_whitespace_only(self, restricted_executor): state = {} - result = await restricted_executor.execute(' \n \n ', state) + result = await restricted_executor.execute(" \n \n ", state) assert result.error is None async def test_simple_assignment(self, restricted_executor): state = {} - result = await restricted_executor.execute('x = 42', state) + result = await restricted_executor.execute("x = 42", state) assert result.error is None - assert state['x'] == 42 - assert 'x' in result.variables_changed + assert state["x"] == 42 + assert "x" in result.variables_changed async def test_multiple_assignments(self, restricted_executor): state = {} - result = await restricted_executor.execute('x = 1\ny = 2\nz = 3', state) + result = await restricted_executor.execute("x = 1\ny = 2\nz = 3", state) assert result.error is None - assert state['x'] == 1 - assert state['y'] == 2 - assert state['z'] == 3 + assert state["x"] == 1 + assert state["y"] == 2 + assert state["z"] == 3 async def test_expression_result(self, restricted_executor): - state = {'x': 10} - result = await restricted_executor.execute('x + 5', state) + state = {"x": 10} + result = await restricted_executor.execute("x + 5", state) assert result.error is None assert result.result == 15 async def test_assignment_then_expression(self, restricted_executor): state = {} - result = await restricted_executor.execute('x = 10\ny = 20\nx + y', state) + result = await restricted_executor.execute("x = 10\ny = 20\nx + y", state) assert result.error is None assert result.result == 30 async def test_list_comprehension(self, restricted_executor): - state = {'data': [1, 2, 3, 4, 5]} - result = await restricted_executor.execute( - 'evens = [x for x in data if x % 2 == 0]', state - ) + state = {"data": [1, 2, 3, 4, 5]} + result = await restricted_executor.execute("evens = [x for x in data if x % 2 == 0]", state) assert result.error is None - assert state['evens'] == [2, 4] + assert state["evens"] == [2, 4] async def test_dict_comprehension(self, restricted_executor): - state = {'keys': ['a', 'b', 'c']} - result = await restricted_executor.execute( - 'mapping = {k: i for i, k in enumerate(keys)}', state - ) + state = {"keys": ["a", "b", "c"]} + result = await restricted_executor.execute("mapping = {k: i for i, k in enumerate(keys)}", state) assert result.error is None - assert state['mapping'] == {'a': 0, 'b': 1, 'c': 2} + assert state["mapping"] == {"a": 0, "b": 1, "c": 2} async def test_builtin_functions(self, restricted_executor): - state = {'data': [3, 1, 4, 1, 5]} - result = await restricted_executor.execute('result = sorted(data)', state) + state = {"data": [3, 1, 4, 1, 5]} + result = await restricted_executor.execute("result = sorted(data)", state) assert result.error is None - assert state['result'] == [1, 1, 3, 4, 5] + assert state["result"] == [1, 1, 3, 4, 5] async def test_len_min_max_sum(self, restricted_executor): - state = {'nums': [10, 20, 30]} + state = {"nums": [10, 20, 30]} result = await restricted_executor.execute( - 'total = sum(nums)\ncount = len(nums)\nlo = min(nums)\nhi = max(nums)', + "total = sum(nums)\ncount = len(nums)\nlo = min(nums)\nhi = max(nums)", state, ) assert result.error is None - assert state['total'] == 60 - assert state['count'] == 3 - assert state['lo'] == 10 - assert state['hi'] == 30 + assert state["total"] == 60 + assert state["count"] == 3 + assert state["lo"] == 10 + assert state["hi"] == 30 async def test_string_methods(self, restricted_executor): - state = {'text': 'Hello World'} + state = {"text": "Hello World"} result = await restricted_executor.execute( - 'upper = text.upper()\nlower = text.lower()\nwords = text.split()', + "upper = text.upper()\nlower = text.lower()\nwords = text.split()", state, ) assert result.error is None - assert state['upper'] == 'HELLO WORLD' - assert state['lower'] == 'hello world' - assert state['words'] == ['Hello', 'World'] + assert state["upper"] == "HELLO WORLD" + assert state["lower"] == "hello world" + assert state["words"] == ["Hello", "World"] async def test_subscript_and_slice(self, restricted_executor): - state = {'items': [10, 20, 30, 40, 50]} + state = {"items": [10, 20, 30, 40, 50]} result = await restricted_executor.execute( - 'first = items[0]\nlast = items[-1]\nmid = items[1:4]', + "first = items[0]\nlast = items[-1]\nmid = items[1:4]", state, ) assert result.error is None - assert state['first'] == 10 - assert state['last'] == 50 - assert state['mid'] == [20, 30, 40] + assert state["first"] == 10 + assert state["last"] == 50 + assert state["mid"] == [20, 30, 40] async def test_dict_access(self, restricted_executor): - state = {'d': {'name': 'test', 'value': 42}} + state = {"d": {"name": "test", "value": 42}} result = await restricted_executor.execute('name = d["name"]', state) assert result.error is None - assert state['name'] == 'test' + assert state["name"] == "test" async def test_boolean_logic(self, restricted_executor): - state = {'a': True, 'b': False} + state = {"a": True, "b": False} result = await restricted_executor.execute( - 'c = a and b\nd = a or b\ne = not b', + "c = a and b\nd = a or b\ne = not b", state, ) assert result.error is None - assert state['c'] is False - assert state['d'] is True - assert state['e'] is True + assert state["c"] is False + assert state["d"] is True + assert state["e"] is True async def test_comparison(self, restricted_executor): - state = {'x': 5} + state = {"x": 5} result = await restricted_executor.execute( - 'gt = x > 3\neq = x == 5\nlt = x < 2', + "gt = x > 3\neq = x == 5\nlt = x < 2", state, ) assert result.error is None - assert state['gt'] is True - assert state['eq'] is True - assert state['lt'] is False + assert state["gt"] is True + assert state["eq"] is True + assert state["lt"] is False async def test_if_expression(self, restricted_executor): - state = {'x': 10} + state = {"x": 10} result = await restricted_executor.execute( 'label = "big" if x > 5 else "small"', state, ) assert result.error is None - assert state['label'] == 'big' + assert state["label"] == "big" async def test_del_statement(self, restricted_executor): - state = {'x': 1, 'y': 2} - result = await restricted_executor.execute('del x', state) + state = {"x": 1, "y": 2} + result = await restricted_executor.execute("del x", state) assert result.error is None - assert 'x' not in state - assert 'y' in state - assert 'x' in result.variables_changed + assert "x" not in state + assert "y" in state + assert "x" in result.variables_changed async def test_augmented_assignment(self, restricted_executor): - state = {'x': 10} - result = await restricted_executor.execute('x += 5', state) + state = {"x": 10} + result = await restricted_executor.execute("x += 5", state) assert result.error is None - assert state['x'] == 15 + assert state["x"] == 15 async def test_print_capture(self, restricted_executor): state = {} result = await restricted_executor.execute('print("hello world")', state) assert result.error is None - assert 'hello world' in result.output + assert "hello world" in result.output async def test_state_persistence(self, restricted_executor): state = {} - await restricted_executor.execute('x = 1', state) - await restricted_executor.execute('y = x + 1', state) - result = await restricted_executor.execute('x + y', state) + await restricted_executor.execute("x = 1", state) + await restricted_executor.execute("y = x + 1", state) + result = await restricted_executor.execute("x + y", state) assert result.result == 3 # --- Blocked operations --- async def test_import_blocked(self, restricted_executor): state = {} - result = await restricted_executor.execute('import os', state) + result = await restricted_executor.execute("import os", state) assert result.error is not None - assert 'Disallowed' in result.error or 'Import' in result.error + assert "Disallowed" in result.error or "Import" in result.error async def test_for_loop_blocked(self, restricted_executor): state = {} - result = await restricted_executor.execute( - 'total = 0\nfor i in range(5):\n total += i', state - ) + result = await restricted_executor.execute("total = 0\nfor i in range(5):\n total += i", state) assert result.error is not None async def test_while_loop_blocked(self, restricted_executor): state = {} - result = await restricted_executor.execute( - 'x = 0\nwhile x < 5:\n x += 1', state - ) + result = await restricted_executor.execute("x = 0\nwhile x < 5:\n x += 1", state) assert result.error is not None async def test_function_def_blocked(self, restricted_executor): state = {} - result = await restricted_executor.execute('def foo(): return 42', state) + result = await restricted_executor.execute("def foo(): return 42", state) assert result.error is not None async def test_class_def_blocked(self, restricted_executor): state = {} - result = await restricted_executor.execute('class Foo: pass', state) + result = await restricted_executor.execute("class Foo: pass", state) assert result.error is not None async def test_exec_eval_blocked(self, restricted_executor): @@ -401,27 +380,25 @@ async def test_exec_eval_blocked(self, restricted_executor): async def test_syntax_error(self, restricted_executor): state = {} - result = await restricted_executor.execute('x = ', state) + result = await restricted_executor.execute("x = ", state) assert result.error is not None - assert 'Syntax error' in result.error + assert "Syntax error" in result.error async def test_runtime_error(self, restricted_executor): state = {} - result = await restricted_executor.execute('x = 1 / 0', state) + result = await restricted_executor.execute("x = 1 / 0", state) assert result.error is not None - assert 'ZeroDivision' in result.error + assert "ZeroDivision" in result.error async def test_name_error(self, restricted_executor): state = {} - result = await restricted_executor.execute('x = undefined_var', state) + result = await restricted_executor.execute("x = undefined_var", state) assert result.error is not None - assert 'NameError' in result.error + assert "NameError" in result.error async def test_output_truncation(self, restricted_executor): state = {} - result = await restricted_executor.execute( - 'print("x" * 200)', state, max_output_chars=50 - ) + result = await restricted_executor.execute('print("x" * 200)', state, max_output_chars=50) assert result.error is None assert len(result.output) <= 50 @@ -429,19 +406,20 @@ async def test_allowed_modules_empty(self, restricted_executor): assert restricted_executor.get_allowed_modules() == [] async def test_if_statement(self, restricted_executor): - state = {'x': 10} + state = {"x": 10} result = await restricted_executor.execute( 'if x > 5:\n label = "big"\n', state, ) assert result.error is None - assert state['label'] == 'big' + assert state["label"] == "big" # ============================================ # SmolagentsExecutor Tests # ============================================ + class TestSmolagentsExecutor: """Test the smolagents executor wrapper.""" @@ -450,27 +428,26 @@ def smolagents_executor(self): from memorylayer_server.services.context_environment.executors.smolagents_executor import ( SmolagentsExecutor, ) + return SmolagentsExecutor() async def test_simple_assignment(self, smolagents_executor): state = {} - result = await smolagents_executor.execute('x = 42', state) + result = await smolagents_executor.execute("x = 42", state) assert result.error is None - assert state['x'] == 42 + assert state["x"] == 42 async def test_expression_result(self, smolagents_executor): - state = {'x': 10} - result = await smolagents_executor.execute('x + 5', state) + state = {"x": 10} + result = await smolagents_executor.execute("x + 5", state) assert result.error is None assert result.result == 15 async def test_builtin_sum(self, smolagents_executor): state = {} - result = await smolagents_executor.execute( - 'data = [1, 2, 3]\ntotal = sum(data)', state - ) + result = await smolagents_executor.execute("data = [1, 2, 3]\ntotal = sum(data)", state) assert result.error is None - assert state['total'] == 6 + assert state["total"] == 6 async def test_json_import(self, smolagents_executor): state = {} @@ -479,96 +456,98 @@ async def test_json_import(self, smolagents_executor): state, ) assert result.error is None - assert state['result'] == '{"key": "value"}' + assert state["result"] == '{"key": "value"}' async def test_forbidden_import(self, smolagents_executor): state = {} - result = await smolagents_executor.execute('import os', state) + result = await smolagents_executor.execute("import os", state) assert result.error is not None async def test_state_persistence(self, smolagents_executor): state = {} - await smolagents_executor.execute('x = 10', state) - await smolagents_executor.execute('y = x * 2', state) - result = await smolagents_executor.execute('x + y', state) + await smolagents_executor.execute("x = 10", state) + await smolagents_executor.execute("y = x * 2", state) + result = await smolagents_executor.execute("x + y", state) assert result.result == 30 async def test_variables_changed_tracking(self, smolagents_executor): state = {} - result = await smolagents_executor.execute('a = 1\nb = 2', state) - assert 'a' in result.variables_changed - assert 'b' in result.variables_changed + result = await smolagents_executor.execute("a = 1\nb = 2", state) + assert "a" in result.variables_changed + assert "b" in result.variables_changed async def test_allowed_modules(self, smolagents_executor): modules = smolagents_executor.get_allowed_modules() - assert 'json' in modules - assert 'math' in modules - assert 'collections' in modules - assert 'datetime' in modules - assert 'functools' in modules + assert "json" in modules + assert "math" in modules + assert "collections" in modules + assert "datetime" in modules + assert "functools" in modules async def test_empty_code(self, smolagents_executor): state = {} - result = await smolagents_executor.execute('', state) + result = await smolagents_executor.execute("", state) assert result.error is None async def test_comprehension(self, smolagents_executor): - state = {'nums': [1, 2, 3, 4, 5]} - result = await smolagents_executor.execute( - 'doubled = [x * 2 for x in nums]', state - ) + state = {"nums": [1, 2, 3, 4, 5]} + result = await smolagents_executor.execute("doubled = [x * 2 for x in nums]", state) assert result.error is None - assert state['doubled'] == [2, 4, 6, 8, 10] + assert state["doubled"] == [2, 4, 6, 8, 10] # ============================================ # Hooks Tests # ============================================ + class TestHooks: """Test persistence hook interface.""" async def test_noop_hook(self): hook = NoOpPersistenceHook() # All methods should be callable without error - await hook.on_state_changed('sess', {}) - await hook.on_checkpoint('sess', {}) - await hook.on_session_end('sess', {}) - result = await hook.on_session_restore('sess') + await hook.on_state_changed("sess", {}) + await hook.on_checkpoint("sess", {}) + await hook.on_session_end("sess", {}) + result = await hook.on_session_restore("sess") assert result is None async def test_tracking_hook(self, tracking_hook): - await tracking_hook.on_state_changed('s1', {'x': 1}) - await tracking_hook.on_checkpoint('s1', {'x': 1}) - await tracking_hook.on_session_end('s1', {'x': 1}) - await tracking_hook.on_session_restore('s1') + await tracking_hook.on_state_changed("s1", {"x": 1}) + await tracking_hook.on_checkpoint("s1", {"x": 1}) + await tracking_hook.on_session_end("s1", {"x": 1}) + await tracking_hook.on_session_restore("s1") assert len(tracking_hook.state_changed_calls) == 1 - assert tracking_hook.state_changed_calls[0][0] == 's1' + assert tracking_hook.state_changed_calls[0][0] == "s1" assert len(tracking_hook.checkpoint_calls) == 1 assert len(tracking_hook.session_end_calls) == 1 - assert tracking_hook.restore_calls == ['s1'] + assert tracking_hook.restore_calls == ["s1"] # ============================================ # Service ABC Tests # ============================================ + class TestServiceABC: """Test service ABC and plugin base.""" def test_extension_point_constant(self): - assert EXT_CONTEXT_ENVIRONMENT_SERVICE == 'memorylayer-context-environment-service' + assert EXT_CONTEXT_ENVIRONMENT_SERVICE == "memorylayer-context-environment-service" def test_plugin_base_name(self): class TestPlugin(ContextEnvironmentServicePluginBase): - PROVIDER_NAME = 'test' + PROVIDER_NAME = "test" + plugin = TestPlugin() - assert plugin.name() == 'memorylayer-context-environment-service|test' + assert plugin.name() == "memorylayer-context-environment-service|test" def test_plugin_base_extension_point(self): class TestPlugin(ContextEnvironmentServicePluginBase): - PROVIDER_NAME = 'test' + PROVIDER_NAME = "test" + plugin = TestPlugin() v = MockVariables() assert plugin.extension_point_name(v) == EXT_CONTEXT_ENVIRONMENT_SERVICE @@ -578,183 +557,180 @@ class TestPlugin(ContextEnvironmentServicePluginBase): # DefaultContextEnvironmentService Tests # ============================================ + class TestDefaultContextEnvironmentService: """Test the default service implementation.""" async def test_execute_basic(self, service): - result = await service.execute('s1', 'x = 42') - assert result['error'] is None - assert 'x' in result['variables_changed'] + result = await service.execute("s1", "x = 42") + assert result["error"] is None + assert "x" in result["variables_changed"] async def test_execute_expression(self, service): - await service.execute('s1', 'x = 10') - result = await service.execute('s1', 'x + 5') - assert result['error'] is None - assert '15' in str(result['result']) + await service.execute("s1", "x = 10") + result = await service.execute("s1", "x + 5") + assert result["error"] is None + assert "15" in str(result["result"]) async def test_execute_result_var(self, service): - await service.execute('s1', 'x = [1, 2, 3]') - result = await service.execute('s1', 'len(x)', result_var='count') - assert result['error'] is None - assert 'count' in result['variables_changed'] + await service.execute("s1", "x = [1, 2, 3]") + result = await service.execute("s1", "len(x)", result_var="count") + assert result["error"] is None + assert "count" in result["variables_changed"] async def test_execute_return_result_false(self, service): - result = await service.execute('s1', '42', return_result=False) - assert result['result'] is None + result = await service.execute("s1", "42", return_result=False) + assert result["result"] is None async def test_execute_error(self, service): - result = await service.execute('s1', 'x = 1 / 0') - assert result['error'] is not None - assert 'ZeroDivision' in result['error'] + result = await service.execute("s1", "x = 1 / 0") + assert result["error"] is not None + assert "ZeroDivision" in result["error"] async def test_inspect_all(self, service): - await service.execute('s1', 'x = 1\ny = "hello"') - result = await service.inspect('s1') - assert result['variable_count'] == 2 - assert 'x' in result['variables'] - assert 'y' in result['variables'] + await service.execute("s1", 'x = 1\ny = "hello"') + result = await service.inspect("s1") + assert result["variable_count"] == 2 + assert "x" in result["variables"] + assert "y" in result["variables"] async def test_inspect_specific_variable(self, service): - await service.execute('s1', 'x = 42') - result = await service.inspect('s1', variable='x') - assert result['type'] == 'int' - assert '42' in result['preview'] + await service.execute("s1", "x = 42") + result = await service.inspect("s1", variable="x") + assert result["type"] == "int" + assert "42" in result["preview"] async def test_inspect_nonexistent_variable(self, service): - await service._init_environment('s1') - result = await service.inspect('s1', variable='missing') - assert 'error' in result + await service._init_environment("s1") + result = await service.inspect("s1", variable="missing") + assert "error" in result async def test_inspect_creates_environment(self, service): - result = await service.inspect('new_session') - assert result['variable_count'] == 0 + result = await service.inspect("new_session") + assert result["variable_count"] == 0 async def test_inject_value(self, service): - result = await service.inject('s1', 'data', [1, 2, 3]) - assert result['variable'] == 'data' - assert result['type'] == 'list' + result = await service.inject("s1", "data", [1, 2, 3]) + assert result["variable"] == "data" + assert result["type"] == "list" async def test_inject_json(self, service): - result = await service.inject( - 's1', 'config', '{"key": "value"}', parse_json=True - ) - assert result['type'] == 'dict' + result = await service.inject("s1", "config", '{"key": "value"}', parse_json=True) + assert result["type"] == "dict" async def test_inject_invalid_json(self, service): - result = await service.inject( - 's1', 'bad', 'not json{', parse_json=True - ) - assert 'error' in result + result = await service.inject("s1", "bad", "not json{", parse_json=True) + assert "error" in result async def test_inject_then_execute(self, service): - await service.inject('s1', 'data', [10, 20, 30]) - result = await service.execute('s1', 'total = sum(data)') - assert result['error'] is None + await service.inject("s1", "data", [10, 20, 30]) + result = await service.execute("s1", "total = sum(data)") + assert result["error"] is None async def test_status_no_environment(self, service): - result = await service.status('nonexistent') - assert result['exists'] is False + result = await service.status("nonexistent") + assert result["exists"] is False async def test_status_with_environment(self, service): - await service.execute('s1', 'x = 1\ny = 2') - result = await service.status('s1') - assert result['exists'] is True - assert result['variable_count'] == 2 - assert 'x' in result['variables'] + await service.execute("s1", "x = 1\ny = 2") + result = await service.status("s1") + assert result["exists"] is True + assert result["variable_count"] == 2 + assert "x" in result["variables"] async def test_cleanup(self, service): - await service.execute('s1', 'x = 42') - await service.cleanup_environment('s1') - result = await service.status('s1') - assert result['exists'] is False + await service.execute("s1", "x = 42") + await service.cleanup_environment("s1") + result = await service.status("s1") + assert result["exists"] is False async def test_cleanup_nonexistent(self, service): # Should not raise - await service.cleanup_environment('nonexistent') + await service.cleanup_environment("nonexistent") async def test_session_isolation(self, service): - await service.execute('s1', 'x = 1') - await service.execute('s2', 'x = 2') + await service.execute("s1", "x = 1") + await service.execute("s2", "x = 2") - r1 = await service.execute('s1', 'x') - r2 = await service.execute('s2', 'x') + r1 = await service.execute("s1", "x") + r2 = await service.execute("s2", "x") - assert '1' in str(r1['result']) - assert '2' in str(r2['result']) + assert "1" in str(r1["result"]) + assert "2" in str(r2["result"]) async def test_persistence_hook_state_changed(self, service, tracking_hook): - await service.execute('s1', 'x = 42') + await service.execute("s1", "x = 42") assert len(tracking_hook.state_changed_calls) >= 1 - assert tracking_hook.state_changed_calls[0][0] == 's1' + assert tracking_hook.state_changed_calls[0][0] == "s1" async def test_persistence_hook_no_call_on_error(self, service, tracking_hook): initial_count = len(tracking_hook.state_changed_calls) - await service.execute('s1', 'x = 1 / 0') + await service.execute("s1", "x = 1 / 0") assert len(tracking_hook.state_changed_calls) == initial_count async def test_persistence_hook_session_end(self, service, tracking_hook): - await service.execute('s1', 'x = 1') - await service.cleanup_environment('s1') + await service.execute("s1", "x = 1") + await service.cleanup_environment("s1") assert len(tracking_hook.session_end_calls) == 1 - assert tracking_hook.session_end_calls[0][0] == 's1' + assert tracking_hook.session_end_calls[0][0] == "s1" async def test_metadata_tracking(self, service): - await service.execute('s1', 'x = 1') - await service.execute('s1', 'y = 2') - meta = service._env_metadata['s1'] - assert meta['exec_count'] == 2 - assert 'last_exec_at' in meta - assert 'created_at' in meta + await service.execute("s1", "x = 1") + await service.execute("s1", "y = 2") + meta = service._env_metadata["s1"] + assert meta["exec_count"] == 2 + assert "last_exec_at" in meta + assert "created_at" in meta async def test_query_without_llm(self, service): - await service.inject('s1', 'data', [1, 2, 3]) - result = await service.query('s1', 'Summarize the data', ['data']) - assert 'error' in result + await service.inject("s1", "data", [1, 2, 3]) + result = await service.query("s1", "Summarize the data", ["data"]) + assert "error" in result # LLM not available in test environment async def test_load_without_memory_service(self, service): - result = await service.load('s1', 'memories', 'test query') - assert result.get('error') is not None or result.get('count') == 0 + result = await service.load("s1", "memories", "test query") + assert result.get("error") is not None or result.get("count") == 0 async def test_rlm_without_llm(self, service): - result = await service.rlm('s1', 'test goal') - assert result.get('error') is not None - assert 'LLM' in result['error'] + result = await service.rlm("s1", "test goal") + assert result.get("error") is not None + assert "LLM" in result["error"] async def test_hard_cap_enforcement(self): v = MockVariables({MEMORYLAYER_CONTEXT_EXEC_HARD_CAP: 2}) - svc = DefaultContextEnvironmentService( - v=v, executor=RestrictedExecutor() - ) - await svc.execute('s1', 'x = 1') - await svc.execute('s1', 'y = 2') - result = await svc.execute('s1', 'z = 3') - assert result['error'] is not None - assert 'cap' in result['error'].lower() + svc = DefaultContextEnvironmentService(v=v, executor=RestrictedExecutor()) + await svc.execute("s1", "x = 1") + await svc.execute("s1", "y = 2") + result = await svc.execute("s1", "z = 3") + assert result["error"] is not None + assert "cap" in result["error"].lower() # ============================================ # Helper Function Tests # ============================================ + class TestHelperFunctions: """Test utility/helper functions.""" def test_safe_preview_short(self): - assert _safe_preview(42) == '42' + assert _safe_preview(42) == "42" def test_safe_preview_long(self): long_list = list(range(1000)) preview = _safe_preview(long_list, max_chars=50) assert len(preview) <= 53 # 50 + '...' - assert preview.endswith('...') + assert preview.endswith("...") def test_safe_preview_unprintable(self): class BadRepr: def __repr__(self): raise RuntimeError("bad") + preview = _safe_preview(BadRepr()) - assert 'BadRepr' in preview + assert "BadRepr" in preview def test_estimate_size(self): assert _estimate_size(42) > 0 @@ -762,35 +738,36 @@ def test_estimate_size(self): assert _estimate_size("hello") > 0 def test_summarize_state(self): - state = {'x': 42, 'data': [1, 2, 3]} + state = {"x": 42, "data": [1, 2, 3]} summary = _summarize_state(state) - assert 'x' in summary - assert 'data' in summary - assert 'int' in summary - assert 'list' in summary + assert "x" in summary + assert "data" in summary + assert "int" in summary + assert "list" in summary def test_summarize_state_empty(self): summary = _summarize_state({}) - assert 'empty' in summary + assert "empty" in summary def test_summarize_state_truncation(self): - state = {f'var_{i}': i for i in range(100)} + state = {f"var_{i}": i for i in range(100)} summary = _summarize_state(state, max_chars=200) - assert 'more variables' in summary + assert "more variables" in summary # ============================================ # Schema Tests # ============================================ + class TestSchemas: """Test API request/response schema validation.""" def test_execute_request(self): from memorylayer_server.api.v1.schemas import ContextExecuteRequest - req = ContextExecuteRequest(code='x = 42') - assert req.code == 'x = 42' + req = ContextExecuteRequest(code="x = 42") + assert req.code == "x = 42" assert req.result_var is None assert req.return_result is True assert req.max_return_chars == 10_000 @@ -799,58 +776,58 @@ def test_execute_request_min_length(self): from memorylayer_server.api.v1.schemas import ContextExecuteRequest with pytest.raises(Exception): - ContextExecuteRequest(code='') + ContextExecuteRequest(code="") def test_execute_response(self): from memorylayer_server.api.v1.schemas import ContextExecuteResponse - resp = ContextExecuteResponse(output='hello', result='42') - assert resp.output == 'hello' - assert resp.result == '42' + resp = ContextExecuteResponse(output="hello", result="42") + assert resp.output == "hello" + assert resp.result == "42" assert resp.error is None assert resp.variables_changed == [] def test_load_request(self): from memorylayer_server.api.v1.schemas import ContextLoadRequest - req = ContextLoadRequest(var='memories', query='test') - assert req.var == 'memories' - assert req.query == 'test' + req = ContextLoadRequest(var="memories", query="test") + assert req.var == "memories" + assert req.query == "test" assert req.limit == 50 assert req.include_embeddings is False def test_inject_request(self): from memorylayer_server.api.v1.schemas import ContextInjectRequest - req = ContextInjectRequest(key='data', value=[1, 2, 3]) - assert req.key == 'data' + req = ContextInjectRequest(key="data", value=[1, 2, 3]) + assert req.key == "data" assert req.value == [1, 2, 3] assert req.parse_json is False def test_query_request(self): from memorylayer_server.api.v1.schemas import ContextQueryRequest - req = ContextQueryRequest(prompt='Summarize', variables=['data', 'config']) - assert req.prompt == 'Summarize' - assert req.variables == ['data', 'config'] + req = ContextQueryRequest(prompt="Summarize", variables=["data", "config"]) + assert req.prompt == "Summarize" + assert req.variables == ["data", "config"] def test_rlm_request(self): from memorylayer_server.api.v1.schemas import ContextRLMRequest - req = ContextRLMRequest(goal='Analyze trends') - assert req.goal == 'Analyze trends' + req = ContextRLMRequest(goal="Analyze trends") + assert req.goal == "Analyze trends" assert req.max_iterations == 10 - assert req.detail_level == 'standard' + assert req.detail_level == "standard" def test_rlm_response(self): from memorylayer_server.api.v1.schemas import ContextRLMResponse resp = ContextRLMResponse( - result='done', + result="done", iterations=3, goal_achieved=True, ) - assert resp.result == 'done' + assert resp.result == "done" assert resp.iterations == 3 assert resp.goal_achieved is True assert resp.trace == [] @@ -865,17 +842,16 @@ def test_status_response(self): def test_inspect_response(self): from memorylayer_server.api.v1.schemas import ContextInspectResponse - resp = ContextInspectResponse( - variable='x', type='int', preview='42', size_bytes=28 - ) - assert resp.variable == 'x' - assert resp.type == 'int' + resp = ContextInspectResponse(variable="x", type="int", preview="42", size_bytes=28) + assert resp.variable == "x" + assert resp.type == "int" # ============================================ # API Router Tests # ============================================ + class TestAPIRouter: """Test API router registration.""" @@ -883,21 +859,21 @@ def test_router_has_all_routes(self): from memorylayer_server.api.v1.context_environment import router paths = [r.path for r in router.routes] - assert '/v1/context/execute' in paths - assert '/v1/context/inspect' in paths - assert '/v1/context/load' in paths - assert '/v1/context/inject' in paths - assert '/v1/context/query' in paths - assert '/v1/context/rlm' in paths - assert '/v1/context/status' in paths - assert '/v1/context/cleanup' in paths + assert "/v1/context/execute" in paths + assert "/v1/context/inspect" in paths + assert "/v1/context/load" in paths + assert "/v1/context/inject" in paths + assert "/v1/context/query" in paths + assert "/v1/context/rlm" in paths + assert "/v1/context/status" in paths + assert "/v1/context/cleanup" in paths def test_plugin_registration(self): from memorylayer_server.api.v1.context_environment import ContextEnvironmentAPIPlugin plugin = ContextEnvironmentAPIPlugin() v = MockVariables() - assert plugin.extension_point_name(v) == 'memorylayer-server-api-routers' + assert plugin.extension_point_name(v) == "memorylayer-server-api-routers" assert plugin.is_multi_extension(v) is True assert plugin.is_enabled(v) is False # Multi-extension pattern @@ -906,13 +882,14 @@ def test_plugin_registration(self): # Plugin Tests # ============================================ + class TestDefaultPlugin: """Test the default service plugin.""" def test_plugin_provider_name(self): plugin = DefaultContextEnvironmentServicePlugin() - assert plugin.PROVIDER_NAME == 'default' - assert 'default' in plugin.name() + assert plugin.PROVIDER_NAME == "default" + assert "default" in plugin.name() def test_plugin_extension_point(self): plugin = DefaultContextEnvironmentServicePlugin() @@ -923,4 +900,4 @@ def test_plugin_on_registration(self): plugin = DefaultContextEnvironmentServicePlugin() v = MockVariables() plugin.on_registration(v) - assert v.get(MEMORYLAYER_CONTEXT_ENVIRONMENT_SERVICE) == 'default' + assert v.get(MEMORYLAYER_CONTEXT_ENVIRONMENT_SERVICE) == "default" diff --git a/memorylayer-core-python/tests/unit/test_contradiction_api.py b/memorylayer-core-python/tests/unit/test_contradiction_api.py index 32fc659..5be3d92 100644 --- a/memorylayer-core-python/tests/unit/test_contradiction_api.py +++ b/memorylayer-core-python/tests/unit/test_contradiction_api.py @@ -1,7 +1,8 @@ """Tests for contradiction API endpoints.""" + import pytest import pytest_asyncio -from httpx import AsyncClient, ASGITransport +from httpx import ASGITransport, AsyncClient from memorylayer_server.models.memory import RememberInput from memorylayer_server.services.contradiction.base import ContradictionRecord @@ -33,9 +34,7 @@ async def test_list_contradictions_empty(self, async_client): assert "count" in data assert data["count"] == 0 - async def test_list_contradictions_with_data( - self, async_client, storage_backend, workspace_id - ): + async def test_list_contradictions_with_data(self, async_client, storage_backend, workspace_id): """Should return unresolved contradictions.""" # Create real memories for FK constraints input_a = RememberInput(content="API test memory A", importance=0.5) @@ -64,9 +63,7 @@ async def test_list_contradictions_with_data( found = any(c["id"] == record.id for c in data["contradictions"]) assert found - async def test_list_contradictions_with_limit( - self, async_client, workspace_id - ): + async def test_list_contradictions_with_limit(self, async_client, workspace_id): """Limit parameter should be respected.""" response = await async_client.get( f"/v1/workspaces/{workspace_id}/contradictions?limit=1", @@ -81,9 +78,7 @@ async def test_list_contradictions_with_limit( class TestResolveContradiction: """Test POST /v1/contradictions/{contradiction_id}/resolve endpoint.""" - async def test_resolve_keep_both( - self, async_client, storage_backend, workspace_id - ): + async def test_resolve_keep_both(self, async_client, storage_backend, workspace_id): """Resolve a contradiction with keep_both strategy.""" # Create real memories for FK constraints input_a = RememberInput(content="Resolve API test A", importance=0.5) @@ -111,9 +106,7 @@ async def test_resolve_keep_both( assert data["id"] == stored.id assert data["resolution"] == "keep_both" - async def test_resolve_invalid_strategy( - self, async_client, workspace_id - ): + async def test_resolve_invalid_strategy(self, async_client, workspace_id): """Invalid resolution strategy should return 400.""" response = await async_client.post( "/v1/contradictions/contra_fake/resolve", @@ -122,9 +115,7 @@ async def test_resolve_invalid_strategy( ) assert response.status_code == 400 - async def test_resolve_merge_without_content( - self, async_client, workspace_id - ): + async def test_resolve_merge_without_content(self, async_client, workspace_id): """Merge without merged_content should return 400.""" response = await async_client.post( "/v1/contradictions/contra_fake/resolve", @@ -133,9 +124,7 @@ async def test_resolve_merge_without_content( ) assert response.status_code == 400 - async def test_resolve_nonexistent_contradiction( - self, async_client, workspace_id - ): + async def test_resolve_nonexistent_contradiction(self, async_client, workspace_id): """Resolving nonexistent contradiction should return 404.""" response = await async_client.post( "/v1/contradictions/contra_nonexistent_xxx/resolve", diff --git a/memorylayer-core-python/tests/unit/test_contradiction_improvements.py b/memorylayer-core-python/tests/unit/test_contradiction_improvements.py index ea7bc6b..8b1eba0 100644 --- a/memorylayer-core-python/tests/unit/test_contradiction_improvements.py +++ b/memorylayer-core-python/tests/unit/test_contradiction_improvements.py @@ -1,18 +1,17 @@ """Tests for Phase 3a contradiction improvements: semantic value conflict detection, workspace scan, temporal ordering, and new task handlers.""" -import pytest -import pytest_asyncio -from datetime import datetime, timezone, timedelta + +from datetime import UTC, datetime, timedelta from unittest.mock import AsyncMock, MagicMock +import pytest + from memorylayer_server.services.contradiction.base import ( - ContradictionRecord, CONTRADICTION_TYPE_NEGATION, CONTRADICTION_TYPE_SEMANTIC_VALUE_CONFLICT, + ContradictionRecord, ) from memorylayer_server.services.contradiction.default import DefaultContradictionService -from memorylayer_server.models.memory import RememberInput - # ============================================================================= # Entity-value extraction tests @@ -23,39 +22,29 @@ class TestEntityValueExtraction: """Tests for _extract_entity_values static method.""" def test_extracts_simple_is_pattern(self): - triples = DefaultContradictionService._extract_entity_values( - "The server is Python" - ) + triples = DefaultContradictionService._extract_entity_values("The server is Python") assert len(triples) >= 1 subjects = [t[0] for t in triples] assert any("server" in s for s in subjects) def test_extracts_uses_pattern(self): - triples = DefaultContradictionService._extract_entity_values( - "The project uses PostgreSQL" - ) + triples = DefaultContradictionService._extract_entity_values("The project uses PostgreSQL") assert len(triples) >= 1 values = [t[2] for t in triples] assert any("postgresql" in v for v in values) def test_extracts_runs_pattern(self): - triples = DefaultContradictionService._extract_entity_values( - "The service runs Python 3" - ) + triples = DefaultContradictionService._extract_entity_values("The service runs Python 3") assert len(triples) >= 1 def test_case_insensitive_extraction(self): - triples = DefaultContradictionService._extract_entity_values( - "The API IS FastAPI" - ) + triples = DefaultContradictionService._extract_entity_values("The API IS FastAPI") # Case-insensitive lowercased assert all(t[0] == t[0].lower() for t in triples) assert all(t[2] == t[2].lower() for t in triples) def test_returns_empty_for_no_match(self): - triples = DefaultContradictionService._extract_entity_values( - "Hello world this has no entity patterns at all here" - ) + triples = DefaultContradictionService._extract_entity_values("Hello world this has no entity patterns at all here") # May or may not match - just ensure it returns a list assert isinstance(triples, list) @@ -91,6 +80,7 @@ def test_mismatched_lengths_return_zero(self): def test_partial_similarity(self): import math + # 45-degree vectors a = [math.sqrt(0.5), math.sqrt(0.5)] b = [1.0, 0.0] @@ -113,21 +103,21 @@ def _make_mem(self, mem_id: str, created_at: datetime): return mem def test_newer_memory_returned(self): - now = datetime.now(timezone.utc) + now = datetime.now(UTC) older = self._make_mem("mem_old", now - timedelta(hours=1)) newer = self._make_mem("mem_new", now) result = DefaultContradictionService._determine_newer_memory(older, newer) assert result == "mem_new" def test_a_is_newer(self): - now = datetime.now(timezone.utc) + now = datetime.now(UTC) mem_a = self._make_mem("mem_a", now) mem_b = self._make_mem("mem_b", now - timedelta(hours=2)) result = DefaultContradictionService._determine_newer_memory(mem_a, mem_b) assert result == "mem_a" def test_same_timestamp_returns_a(self): - now = datetime.now(timezone.utc) + now = datetime.now(UTC) mem_a = self._make_mem("mem_a", now) mem_b = self._make_mem("mem_b", now) result = DefaultContradictionService._determine_newer_memory(mem_a, mem_b) @@ -159,19 +149,19 @@ def _make_service(self, storage=None): storage = MagicMock() return DefaultContradictionService(storage=storage) - def _make_memory(self, mem_id: str, content: str, embedding: list[float], - workspace_id: str = "ws1", created_at: datetime = None): + def _make_memory(self, mem_id: str, content: str, embedding: list[float], workspace_id: str = "ws1", created_at: datetime = None): mem = MagicMock() mem.id = mem_id mem.content = content mem.embedding = embedding mem.workspace_id = workspace_id - mem.created_at = created_at or datetime.now(timezone.utc) + mem.created_at = created_at or datetime.now(UTC) return mem def _similar_embedding(self, base: list[float], offset: float = 0.1) -> list[float]: """Create a slightly-different embedding that stays in [0.7, 0.9] similarity range.""" import math + # Create a vector that's similar but not identical to base result = [base[0] * (1 - offset), base[1] * (1 - offset), offset] # Normalize @@ -187,7 +177,6 @@ async def test_no_embeddings_returns_none(self): async def test_identical_embeddings_returns_none(self): """Similarity == 1.0 is outside [0.7, 0.9] window.""" - import math service = self._make_service() emb = [1.0, 0.0, 0.0] mem_a = self._make_memory("a", "Server is Python", emb) @@ -208,7 +197,6 @@ async def test_low_similarity_returns_none(self): async def test_detects_value_conflict_in_similarity_range(self): """Two memories with similar embeddings (0.7-0.9) and conflicting values → conflict.""" - import math service = self._make_service() # Craft embeddings with dot product ~0.8 (within [0.7, 0.9]) @@ -228,12 +216,11 @@ async def test_detects_value_conflict_in_similarity_range(self): assert result.contradiction_type == CONTRADICTION_TYPE_SEMANTIC_VALUE_CONFLICT assert result.memory_a_id == "a" assert result.memory_b_id == "b" - assert result.detection_method == 'entity_value_extraction' + assert result.detection_method == "entity_value_extraction" assert 0.7 <= result.confidence <= 0.9 async def test_no_conflict_for_same_values(self): """Same subject+predicate+value should not trigger conflict.""" - import math service = self._make_service() emb_a = [0.8, 0.6, 0.0] @@ -249,7 +236,7 @@ async def test_temporal_ordering_set_on_conflict(self): """Conflict record should have newer_memory_id populated.""" service = self._make_service() - now = datetime.now(timezone.utc) + now = datetime.now(UTC) emb_a = [0.8, 0.6, 0.0] emb_b = [1.0, 0.0, 0.0] @@ -287,21 +274,23 @@ def test_semantic_value_conflict_type(self): record = ContradictionRecord( contradiction_type=CONTRADICTION_TYPE_SEMANTIC_VALUE_CONFLICT, ) - assert record.contradiction_type == 'semantic_value_conflict' + assert record.contradiction_type == "semantic_value_conflict" def test_temporal_supersession_type(self): from memorylayer_server.services.contradiction.base import CONTRADICTION_TYPE_TEMPORAL_SUPERSESSION + record = ContradictionRecord( contradiction_type=CONTRADICTION_TYPE_TEMPORAL_SUPERSESSION, ) - assert record.contradiction_type == 'temporal_supersession' + assert record.contradiction_type == "temporal_supersession" def test_scope_conflict_type(self): from memorylayer_server.services.contradiction.base import CONTRADICTION_TYPE_SCOPE_CONFLICT + record = ContradictionRecord( contradiction_type=CONTRADICTION_TYPE_SCOPE_CONFLICT, ) - assert record.contradiction_type == 'scope_conflict' + assert record.contradiction_type == "scope_conflict" # ============================================================================= @@ -316,15 +305,15 @@ class TestScanWorkspace: def _make_service(self, storage): return DefaultContradictionService(storage=storage) - def _make_memory(self, mem_id: str, content: str, embedding: list[float], - workspace_id: str = "ws1"): - from datetime import datetime, timezone + def _make_memory(self, mem_id: str, content: str, embedding: list[float], workspace_id: str = "ws1"): + from datetime import datetime + mem = MagicMock() mem.id = mem_id mem.content = content mem.embedding = embedding mem.workspace_id = workspace_id - mem.created_at = datetime.now(timezone.utc) + mem.created_at = datetime.now(UTC) mem.tags = [] mem.metadata = {} mem.importance = 0.5 @@ -334,7 +323,7 @@ async def test_scan_empty_workspace(self): """Empty workspace should return empty list.""" storage = MagicMock() storage.get_unresolved_contradictions = AsyncMock(return_value=[]) - storage.get_workspace_stats = AsyncMock(return_value={'total_memories': 0}) + storage.get_workspace_stats = AsyncMock(return_value={"total_memories": 0}) storage.get_recent_memories = AsyncMock(return_value=[]) service = self._make_service(storage) @@ -345,21 +334,25 @@ async def test_scan_finds_negation_contradiction(self): """Scan should detect negation contradiction between similar memories.""" storage = MagicMock() storage.get_unresolved_contradictions = AsyncMock(return_value=[]) - storage.get_workspace_stats = AsyncMock(return_value={'total_memories': 2}) + storage.get_workspace_stats = AsyncMock(return_value={"total_memories": 2}) emb = [1.0, 0.0, 0.0] mem_a = self._make_memory("mem_a", "Always use type hints", emb) mem_b = self._make_memory("mem_b", "Never use type hints", emb) # Return mem_a dict in get_recent_memories, then empty to stop loop - storage.get_recent_memories = AsyncMock(side_effect=[ - [{'id': 'mem_a'}], - [], - ]) - storage.get_memory = AsyncMock(side_effect=lambda ws, mid, track_access=True: { - 'mem_a': mem_a, - 'mem_b': mem_b, - }.get(mid)) + storage.get_recent_memories = AsyncMock( + side_effect=[ + [{"id": "mem_a"}], + [], + ] + ) + storage.get_memory = AsyncMock( + side_effect=lambda ws, mid, track_access=True: { + "mem_a": mem_a, + "mem_b": mem_b, + }.get(mid) + ) storage.search_memories = AsyncMock(return_value=[(mem_b, 0.95)]) stored_record = ContradictionRecord( workspace_id="ws1", @@ -367,7 +360,7 @@ async def test_scan_finds_negation_contradiction(self): memory_b_id="mem_b", contradiction_type=CONTRADICTION_TYPE_NEGATION, confidence=0.95, - detection_method='negation_pattern', + detection_method="negation_pattern", ) storage.create_contradiction = AsyncMock(return_value=stored_record) @@ -387,16 +380,18 @@ async def test_scan_skips_existing_pairs(self): memory_b_id="mem_b", ) storage.get_unresolved_contradictions = AsyncMock(return_value=[existing]) - storage.get_workspace_stats = AsyncMock(return_value={'total_memories': 2}) + storage.get_workspace_stats = AsyncMock(return_value={"total_memories": 2}) emb = [1.0, 0.0, 0.0] mem_a = self._make_memory("mem_a", "Always use type hints", emb) mem_b = self._make_memory("mem_b", "Never use type hints", emb) - storage.get_recent_memories = AsyncMock(side_effect=[ - [{'id': 'mem_a'}], - [], - ]) + storage.get_recent_memories = AsyncMock( + side_effect=[ + [{"id": "mem_a"}], + [], + ] + ) storage.get_memory = AsyncMock(return_value=mem_a) storage.search_memories = AsyncMock(return_value=[(mem_b, 0.95)]) storage.create_contradiction = AsyncMock() @@ -417,15 +412,14 @@ class TestWorkspaceContradictionScanHandler: """Tests for WorkspaceContradictionScanHandler.""" def _make_handler(self): - from memorylayer_server.tasks.workspace_contradiction_scan_handler import ( - WorkspaceContradictionScanHandler - ) + from memorylayer_server.tasks.workspace_contradiction_scan_handler import WorkspaceContradictionScanHandler + handler = WorkspaceContradictionScanHandler.__new__(WorkspaceContradictionScanHandler) return handler async def test_get_task_type(self): handler = self._make_handler() - assert handler.get_task_type() == 'workspace_contradiction_scan' + assert handler.get_task_type() == "workspace_contradiction_scan" async def test_get_schedule_returns_daily(self): handler = self._make_handler() @@ -439,26 +433,25 @@ async def test_handle_single_workspace(self): handler = self._make_handler() contradiction_service = MagicMock() - contradiction_service.scan_workspace = AsyncMock(return_value=[ - ContradictionRecord(workspace_id="ws1", memory_a_id="a", memory_b_id="b") - ]) + contradiction_service.scan_workspace = AsyncMock( + return_value=[ContradictionRecord(workspace_id="ws1", memory_a_id="a", memory_b_id="b")] + ) storage = MagicMock() v = MagicMock() def get_ext(ext_name, variables): - from memorylayer_server.services._constants import ( - EXT_CONTRADICTION_SERVICE, EXT_STORAGE_BACKEND - ) + from memorylayer_server.services._constants import EXT_CONTRADICTION_SERVICE + if ext_name == EXT_CONTRADICTION_SERVICE: return contradiction_service return storage handler.get_extension = get_ext - await handler.handle(v, {'workspace_id': 'ws1'}) - contradiction_service.scan_workspace.assert_called_once_with('ws1') + await handler.handle(v, {"workspace_id": "ws1"}) + contradiction_service.scan_workspace.assert_called_once_with("ws1") async def test_handle_all_workspaces(self): handler = self._make_handler() @@ -467,9 +460,9 @@ async def test_handle_all_workspaces(self): contradiction_service.scan_workspace = AsyncMock(return_value=[]) ws1 = MagicMock() - ws1.id = 'ws1' + ws1.id = "ws1" ws2 = MagicMock() - ws2.id = 'ws2' + ws2.id = "ws2" storage = MagicMock() storage.list_workspaces = AsyncMock(return_value=[ws1, ws2]) @@ -477,9 +470,8 @@ async def test_handle_all_workspaces(self): v = MagicMock() def get_ext(ext_name, variables): - from memorylayer_server.services._constants import ( - EXT_CONTRADICTION_SERVICE, EXT_STORAGE_BACKEND - ) + from memorylayer_server.services._constants import EXT_CONTRADICTION_SERVICE + if ext_name == EXT_CONTRADICTION_SERVICE: return contradiction_service return storage @@ -500,16 +492,19 @@ class TestFindClusters: def test_empty_returns_empty(self): from memorylayer_server.tasks.consolidation_handler import _find_clusters + assert _find_clusters([], 0.85, 3) == [] def test_too_few_memories_returns_empty(self): from memorylayer_server.tasks.consolidation_handler import _find_clusters + mem = MagicMock() mem.embedding = [1.0, 0.0] assert _find_clusters([mem, mem], 0.85, 3) == [] def test_finds_tight_cluster(self): from memorylayer_server.tasks.consolidation_handler import _find_clusters + emb = [1.0, 0.0, 0.0] memories = [] for i in range(3): @@ -523,6 +518,7 @@ def test_finds_tight_cluster(self): def test_separates_distinct_clusters(self): from memorylayer_server.tasks.consolidation_handler import _find_clusters + emb_a = [1.0, 0.0, 0.0] emb_b = [0.0, 1.0, 0.0] # orthogonal to emb_a, dot=0.0 @@ -557,44 +553,49 @@ def _make_mem(self, mem_id: str, importance: float, tags: list, metadata: dict): def test_unions_tags(self): from memorylayer_server.tasks.consolidation_handler import _merge_memories_simplified + primary = self._make_mem("p", 0.2, ["a", "b"], {}) other = self._make_mem("o", 0.15, ["b", "c"], {}) result = _merge_memories_simplified(primary, [other]) - assert set(result['tags']) == {"a", "b", "c"} + assert set(result["tags"]) == {"a", "b", "c"} def test_deep_merges_metadata(self): from memorylayer_server.tasks.consolidation_handler import _merge_memories_simplified + primary = self._make_mem("p", 0.2, [], {"source": "a"}) other = self._make_mem("o", 0.15, [], {"tool": "pytest", "source": "b"}) result = _merge_memories_simplified(primary, [other]) # Primary's "source" wins; "tool" from other fills in - assert result['metadata']['source'] == "a" - assert result['metadata']['tool'] == "pytest" + assert result["metadata"]["source"] == "a" + assert result["metadata"]["tool"] == "pytest" def test_importance_boosted(self): from memorylayer_server.tasks.consolidation_handler import _merge_memories_simplified + primary = self._make_mem("p", 0.2, [], {}) other = self._make_mem("o", 0.25, [], {}) result = _merge_memories_simplified(primary, [other]) # max importance = 0.25, boosted = min(0.25 * 1.1, 1.0) = 0.275 - assert result['importance'] == pytest.approx(0.275) + assert result["importance"] == pytest.approx(0.275) def test_importance_capped_at_one(self): from memorylayer_server.tasks.consolidation_handler import _merge_memories_simplified + primary = self._make_mem("p", 0.95, [], {}) other = self._make_mem("o", 0.95, [], {}) result = _merge_memories_simplified(primary, [other]) - assert result['importance'] <= 1.0 + assert result["importance"] <= 1.0 def test_provenance_tracked(self): from memorylayer_server.tasks.consolidation_handler import _merge_memories_simplified + primary = self._make_mem("p", 0.2, [], {}) other1 = self._make_mem("o1", 0.1, [], {}) other2 = self._make_mem("o2", 0.15, [], {}) result = _merge_memories_simplified(primary, [other1, other2]) - assert 'consolidated_from' in result['metadata'] - assert 'o1' in result['metadata']['consolidated_from'] - assert 'o2' in result['metadata']['consolidated_from'] + assert "consolidated_from" in result["metadata"] + assert "o1" in result["metadata"]["consolidated_from"] + assert "o2" in result["metadata"]["consolidated_from"] @pytest.mark.asyncio @@ -603,21 +604,23 @@ class TestConsolidationTaskHandler: def _make_handler(self): from memorylayer_server.tasks.consolidation_handler import ConsolidationTaskHandler + handler = ConsolidationTaskHandler.__new__(ConsolidationTaskHandler) return handler def _make_v(self, enabled: bool = True): from scitrera_app_framework import Variables + v = Variables() - v.set('MEMORYLAYER_CONSOLIDATION_ENABLED', 'true' if enabled else 'false') - v.set('MEMORYLAYER_CONSOLIDATION_MIN_CLUSTER_SIZE', '3') - v.set('MEMORYLAYER_CONSOLIDATION_MAX_IMPORTANCE', '0.3') - v.set('MEMORYLAYER_CONSOLIDATION_MIN_SIMILARITY', '0.85') + v.set("MEMORYLAYER_CONSOLIDATION_ENABLED", "true" if enabled else "false") + v.set("MEMORYLAYER_CONSOLIDATION_MIN_CLUSTER_SIZE", "3") + v.set("MEMORYLAYER_CONSOLIDATION_MAX_IMPORTANCE", "0.3") + v.set("MEMORYLAYER_CONSOLIDATION_MIN_SIMILARITY", "0.85") return v async def test_get_task_type(self): handler = self._make_handler() - assert handler.get_task_type() == 'memory_consolidation' + assert handler.get_task_type() == "memory_consolidation" async def test_get_schedule_when_disabled_returns_none(self): handler = self._make_handler() @@ -656,35 +659,35 @@ def _make_candidate(mem_id, importance): m.id = mem_id m.embedding = emb m.importance = importance - m.tags = ['tag1'] + m.tags = ["tag1"] m.metadata = {} m.pinned = False - m.workspace_id = 'ws1' + m.workspace_id = "ws1" return m - c1 = _make_candidate('c1', 0.25) - c2 = _make_candidate('c2', 0.20) - c3 = _make_candidate('c3', 0.15) + c1 = _make_candidate("c1", 0.25) + c2 = _make_candidate("c2", 0.20) + c3 = _make_candidate("c3", 0.15) storage = MagicMock() ws = MagicMock() - ws.id = 'ws1' + ws.id = "ws1" storage.list_workspaces = AsyncMock(return_value=[ws]) # Return candidates as dicts from get_recent_memories, then empty - storage.get_recent_memories = AsyncMock(side_effect=[ - [ - {'id': 'c1', 'importance': 0.25}, - {'id': 'c2', 'importance': 0.20}, - {'id': 'c3', 'importance': 0.15}, - ], - [], - ]) - - mem_map = {'c1': c1, 'c2': c2, 'c3': c3} - storage.get_memory = AsyncMock( - side_effect=lambda ws_id, mid, track_access=True: mem_map.get(mid) + storage.get_recent_memories = AsyncMock( + side_effect=[ + [ + {"id": "c1", "importance": 0.25}, + {"id": "c2", "importance": 0.20}, + {"id": "c3", "importance": 0.15}, + ], + [], + ] ) + + mem_map = {"c1": c1, "c2": c2, "c3": c3} + storage.get_memory = AsyncMock(side_effect=lambda ws_id, mid, track_access=True: mem_map.get(mid)) storage.update_memory = AsyncMock(return_value=c1) storage.delete_memory = AsyncMock(return_value=True) @@ -695,7 +698,7 @@ def _make_candidate(mem_id, importance): # Primary (c1, highest importance) should be updated storage.update_memory.assert_called_once() call_args = storage.update_memory.call_args - assert call_args[0][1] == 'c1' # primary memory id + assert call_args[0][1] == "c1" # primary memory id # Others (c2, c3) should be soft-deleted assert storage.delete_memory.call_count == 2 diff --git a/memorylayer-core-python/tests/unit/test_contradiction_service.py b/memorylayer-core-python/tests/unit/test_contradiction_service.py index 36bc826..f762367 100644 --- a/memorylayer-core-python/tests/unit/test_contradiction_service.py +++ b/memorylayer-core-python/tests/unit/test_contradiction_service.py @@ -1,14 +1,13 @@ """Tests for ContradictionService - negation detection, contradiction creation, resolution logic.""" + import pytest -import pytest_asyncio +from memorylayer_server.models.memory import RememberInput from memorylayer_server.services.contradiction.base import ContradictionRecord from memorylayer_server.services.contradiction.default import ( - DefaultContradictionService, NEGATION_PAIRS, + DefaultContradictionService, ) -from memorylayer_server.models.memory import RememberInput, MemoryType - # ============================================================================= # Pure unit tests: negation pattern detection (no fixtures needed) @@ -19,71 +18,40 @@ class TestNegationPatternDetection: """Tests for _has_negation_pattern static method.""" def test_detects_use_vs_dont_use(self): - assert DefaultContradictionService._has_negation_pattern( - "Use React for the frontend", - "Don't use React for the frontend" - ) + assert DefaultContradictionService._has_negation_pattern("Use React for the frontend", "Don't use React for the frontend") def test_detects_enable_vs_disable(self): - assert DefaultContradictionService._has_negation_pattern( - "Enable dark mode by default", - "Disable dark mode by default" - ) + assert DefaultContradictionService._has_negation_pattern("Enable dark mode by default", "Disable dark mode by default") def test_detects_always_vs_never(self): - assert DefaultContradictionService._has_negation_pattern( - "Always use type hints", - "Never use type hints" - ) + assert DefaultContradictionService._has_negation_pattern("Always use type hints", "Never use type hints") def test_detects_true_vs_false(self): - assert DefaultContradictionService._has_negation_pattern( - "Set debug to true", - "Set debug to false" - ) + assert DefaultContradictionService._has_negation_pattern("Set debug to true", "Set debug to false") def test_detects_should_vs_should_not(self): - assert DefaultContradictionService._has_negation_pattern( - "You should use async", - "You should not use async" - ) + assert DefaultContradictionService._has_negation_pattern("You should use async", "You should not use async") def test_detects_prefer_vs_avoid(self): assert DefaultContradictionService._has_negation_pattern( - "Prefer composition over inheritance", - "Avoid composition, use inheritance" + "Prefer composition over inheritance", "Avoid composition, use inheritance" ) def test_detects_include_vs_exclude(self): - assert DefaultContradictionService._has_negation_pattern( - "Include logging in all services", - "Exclude logging from services" - ) + assert DefaultContradictionService._has_negation_pattern("Include logging in all services", "Exclude logging from services") def test_detects_bidirectional(self): """Negation should be detected regardless of which text has the positive term.""" - assert DefaultContradictionService._has_negation_pattern( - "Don't use tabs", - "Use tabs for indentation" - ) + assert DefaultContradictionService._has_negation_pattern("Don't use tabs", "Use tabs for indentation") def test_case_insensitive(self): - assert DefaultContradictionService._has_negation_pattern( - "ALWAYS run tests", - "Never run tests" - ) + assert DefaultContradictionService._has_negation_pattern("ALWAYS run tests", "Never run tests") def test_no_negation_for_unrelated_texts(self): - assert not DefaultContradictionService._has_negation_pattern( - "The sky is blue", - "Python is a programming language" - ) + assert not DefaultContradictionService._has_negation_pattern("The sky is blue", "Python is a programming language") def test_no_negation_for_agreeing_texts(self): - assert not DefaultContradictionService._has_negation_pattern( - "Use Python for backend", - "Use Python for data science" - ) + assert not DefaultContradictionService._has_negation_pattern("Use Python for backend", "Use Python for data science") def test_negation_pairs_list_is_populated(self): """Ensure NEGATION_PAIRS has meaningful entries.""" @@ -157,9 +125,7 @@ async def _create_memory_with_embedding(storage_backend, embedding_service, work class TestContradictionDetection: """Test contradiction detection against real storage.""" - async def test_check_new_memory_finds_negation( - self, storage_backend, embedding_service, workspace_id - ): + async def test_check_new_memory_finds_negation(self, storage_backend, embedding_service, workspace_id): """When two memories share an embedding but have negating content, detect contradiction.""" service = DefaultContradictionService(storage=storage_backend) @@ -168,12 +134,16 @@ async def test_check_new_memory_finds_negation( shared_embedding_text = "tabs for indentation" mem1 = await _create_memory_with_embedding( - storage_backend, embedding_service, workspace_id, + storage_backend, + embedding_service, + workspace_id, content="Use tabs for indentation", embedding_text=shared_embedding_text, ) mem2 = await _create_memory_with_embedding( - storage_backend, embedding_service, workspace_id, + storage_backend, + embedding_service, + workspace_id, content="Don't use tabs for indentation", embedding_text=shared_embedding_text, ) @@ -181,24 +151,23 @@ async def test_check_new_memory_finds_negation( contradictions = await service.check_new_memory(workspace_id, mem2.id) assert len(contradictions) >= 1 - found = any( - c.memory_a_id == mem2.id and c.memory_b_id == mem1.id - for c in contradictions - ) + found = any(c.memory_a_id == mem2.id and c.memory_b_id == mem1.id for c in contradictions) assert found, f"Expected contradiction between {mem2.id} and {mem1.id}" - async def test_check_new_memory_no_contradiction_for_unrelated( - self, storage_backend, embedding_service, workspace_id - ): + async def test_check_new_memory_no_contradiction_for_unrelated(self, storage_backend, embedding_service, workspace_id): """Unrelated memories should not trigger contradictions.""" service = DefaultContradictionService(storage=storage_backend) mem1 = await _create_memory_with_embedding( - storage_backend, embedding_service, workspace_id, + storage_backend, + embedding_service, + workspace_id, content="Python is great for data science", ) mem2 = await _create_memory_with_embedding( - storage_backend, embedding_service, workspace_id, + storage_backend, + embedding_service, + workspace_id, content="The weather is sunny today", ) @@ -206,9 +175,7 @@ async def test_check_new_memory_no_contradiction_for_unrelated( related = [c for c in contradictions if c.memory_b_id == mem1.id] assert len(related) == 0 - async def test_check_new_memory_skips_without_embedding( - self, storage_backend, workspace_id - ): + async def test_check_new_memory_skips_without_embedding(self, storage_backend, workspace_id): """Memory without embedding should return empty contradictions list.""" service = DefaultContradictionService(storage=storage_backend) @@ -218,9 +185,7 @@ async def test_check_new_memory_skips_without_embedding( contradictions = await service.check_new_memory(workspace_id, mem1.id) assert contradictions == [] - async def test_check_new_memory_nonexistent_returns_empty( - self, storage_backend, workspace_id - ): + async def test_check_new_memory_nonexistent_returns_empty(self, storage_backend, workspace_id): """Nonexistent memory ID should return empty list.""" service = DefaultContradictionService(storage=storage_backend) contradictions = await service.check_new_memory(workspace_id, "mem_nonexistent") @@ -236,9 +201,7 @@ async def test_check_new_memory_nonexistent_returns_empty( class TestContradictionResolution: """Test contradiction resolution logic.""" - async def test_resolve_keep_a_soft_deletes_b( - self, storage_backend, embedding_service, workspace_id - ): + async def test_resolve_keep_a_soft_deletes_b(self, storage_backend, embedding_service, workspace_id): """Resolving with keep_a should soft-delete the contradiction's memory_b. check_new_memory sets memory_a_id=new_memory, memory_b_id=existing_memory. @@ -249,12 +212,16 @@ async def test_resolve_keep_a_soft_deletes_b( shared_embedding_text = "caching for services" existing_mem = await _create_memory_with_embedding( - storage_backend, embedding_service, workspace_id, + storage_backend, + embedding_service, + workspace_id, content="Enable caching for all services", embedding_text=shared_embedding_text, ) new_mem = await _create_memory_with_embedding( - storage_backend, embedding_service, workspace_id, + storage_backend, + embedding_service, + workspace_id, content="Disable caching for all services", embedding_text=shared_embedding_text, ) @@ -267,9 +234,7 @@ async def test_resolve_keep_a_soft_deletes_b( assert contradiction.memory_b_id == existing_mem.id # Resolve: keep A (new_mem) → soft-delete B (existing_mem) - resolved = await service.resolve( - workspace_id, contradiction.id, "keep_a" - ) + resolved = await service.resolve(workspace_id, contradiction.id, "keep_a") assert resolved is not None assert resolved.resolution == "keep_a" @@ -281,9 +246,7 @@ async def test_resolve_keep_a_soft_deletes_b( new_after = await storage_backend.get_memory(workspace_id, new_mem.id) assert new_after is not None - async def test_resolve_keep_both( - self, storage_backend, workspace_id - ): + async def test_resolve_keep_both(self, storage_backend, workspace_id): """Resolving with keep_both should keep both memories intact.""" service = DefaultContradictionService(storage=storage_backend) @@ -307,9 +270,7 @@ async def test_resolve_keep_both( assert resolved is not None assert resolved.resolution == "keep_both" - async def test_resolve_nonexistent_returns_none( - self, storage_backend, workspace_id - ): + async def test_resolve_nonexistent_returns_none(self, storage_backend, workspace_id): """Resolving a nonexistent contradiction should return None.""" service = DefaultContradictionService(storage=storage_backend) result = await service.resolve(workspace_id, "contra_nonexistent", "keep_a") @@ -325,9 +286,7 @@ async def test_resolve_nonexistent_returns_none( class TestGetUnresolved: """Test retrieving unresolved contradictions.""" - async def test_get_unresolved_returns_pending( - self, storage_backend, workspace_id - ): + async def test_get_unresolved_returns_pending(self, storage_backend, workspace_id): """Unresolved contradictions should be returned.""" service = DefaultContradictionService(storage=storage_backend) @@ -352,9 +311,7 @@ async def test_get_unresolved_returns_pending( ids = [c.id for c in unresolved] assert record.id in ids - async def test_get_unresolved_excludes_resolved( - self, storage_backend, workspace_id - ): + async def test_get_unresolved_excludes_resolved(self, storage_backend, workspace_id): """Resolved contradictions should not appear in unresolved list.""" service = DefaultContradictionService(storage=storage_backend) @@ -379,9 +336,7 @@ async def test_get_unresolved_excludes_resolved( ids = [c.id for c in unresolved] assert stored.id not in ids - async def test_get_unresolved_respects_limit( - self, storage_backend, workspace_id - ): + async def test_get_unresolved_respects_limit(self, storage_backend, workspace_id): """Limit parameter should be respected.""" service = DefaultContradictionService(storage=storage_backend) unresolved = await service.get_unresolved(workspace_id, limit=1) diff --git a/memorylayer-core-python/tests/unit/test_core_models.py b/memorylayer-core-python/tests/unit/test_core_models.py index 06a77ea..cb2d527 100644 --- a/memorylayer-core-python/tests/unit/test_core_models.py +++ b/memorylayer-core-python/tests/unit/test_core_models.py @@ -4,19 +4,20 @@ Tests all domain models, enums, validators, and factory methods from the MemoryLayer.ai core modules. """ -from datetime import datetime, timedelta, timezone + +from datetime import UTC, datetime, timedelta import pytest from pydantic import ValidationError from memorylayer_server.models.association import ( - Association, + KNOWN_RELATIONSHIP_TYPES, AssociateInput, + Association, GraphPath, GraphQueryInput, GraphQueryResult, RelationshipCategory, - KNOWN_RELATIONSHIP_TYPES, get_relationship_category, ) from memorylayer_server.models.memory import ( @@ -25,9 +26,7 @@ MemoryType, RecallInput, RecallMode, - RecallResult, ReflectInput, - ReflectResult, RememberInput, SearchTolerance, ) @@ -47,7 +46,6 @@ WorkspaceSettings, ) - # ============================================================================ # MEMORY MODEL TESTS # ============================================================================ @@ -566,15 +564,11 @@ def test_associate_input_required_fields(self): """Test required fields for AssociateInput.""" # Missing source_id with pytest.raises(ValidationError): - AssociateInput( - target_id="mem-2", relationship="solves" - ) + AssociateInput(target_id="mem-2", relationship="solves") # Missing target_id with pytest.raises(ValidationError): - AssociateInput( - source_id="mem-1", relationship="solves" - ) + AssociateInput(source_id="mem-1", relationship="solves") # Missing relationship with pytest.raises(ValidationError): @@ -615,9 +609,7 @@ def test_graph_query_direction_pattern(self): """Test direction must match pattern.""" # Valid directions for direction in ["outgoing", "incoming", "both"]: - input_data = GraphQueryInput( - start_memory_id="mem-1", direction=direction - ) + input_data = GraphQueryInput(start_memory_id="mem-1", direction=direction) assert input_data.direction == direction # Invalid direction @@ -682,7 +674,7 @@ class TestSessionModel: def test_session_create_with_ttl_factory(self): """Test Session.create_with_ttl() factory method.""" - now = datetime.now(timezone.utc) + now = datetime.now(UTC) session = Session.create_with_ttl( session_id="sess-1", workspace_id="ws-1", @@ -704,7 +696,7 @@ def test_session_is_expired_property(self): id="sess-1", workspace_id="ws-1", tenant_id="default_tenant", - expires_at=datetime.now(timezone.utc) + timedelta(hours=1), + expires_at=datetime.now(UTC) + timedelta(hours=1), ) assert session.is_expired is False @@ -713,7 +705,7 @@ def test_session_is_expired_property(self): id="sess-1", workspace_id="ws-1", tenant_id="default_tenant", - expires_at=datetime.now(timezone.utc) - timedelta(hours=1), + expires_at=datetime.now(UTC) - timedelta(hours=1), ) assert session.is_expired is True @@ -774,7 +766,7 @@ def test_workspace_summary_model(self): def test_activity_summary_model(self): """Test ActivitySummary model.""" summary = ActivitySummary( - timestamp=datetime.now(timezone.utc), + timestamp=datetime.now(UTC), summary="Implemented new feature", memories_created=5, key_decisions=["Use SQLite", "Add caching"], @@ -788,7 +780,7 @@ def test_open_thread_model(self): thread = OpenThread( topic="Authentication refactor", status="in_progress", - last_activity=datetime.now(timezone.utc), + last_activity=datetime.now(UTC), key_memories=["mem-1", "mem-2"], ) @@ -899,5 +891,3 @@ def test_context_settings_inheritance_default(self): assert settings.inherit_workspace_settings is True assert settings.auto_remember_enabled is None assert settings.decay_enabled is None - - diff --git a/memorylayer-core-python/tests/unit/test_decay_service.py b/memorylayer-core-python/tests/unit/test_decay_service.py index 4ade163..d293b87 100644 --- a/memorylayer-core-python/tests/unit/test_decay_service.py +++ b/memorylayer-core-python/tests/unit/test_decay_service.py @@ -3,16 +3,17 @@ Tests decay formula, archival criteria, boost logic, and pinned exclusion. """ + +from datetime import UTC, datetime, timedelta + import pytest -from datetime import datetime, timezone, timedelta +from scitrera_app_framework import get_extension -from memorylayer_server.models.memory import RememberInput, MemoryType, MemoryStatus +from memorylayer_server.models.memory import MemoryStatus, MemoryType, RememberInput +from memorylayer_server.services.decay import EXT_DECAY_SERVICE +from memorylayer_server.services.decay.base import DecayResult, DecaySettings from memorylayer_server.services.memory import MemoryService from memorylayer_server.services.storage.base import StorageBackend -from memorylayer_server.services.decay.base import DecaySettings, DecayResult -from memorylayer_server.services.decay import EXT_DECAY_SERVICE - -from scitrera_app_framework import get_extension @pytest.fixture @@ -55,8 +56,11 @@ class TestBoostOnAccess: @pytest.mark.asyncio async def test_boost_increases_importance( - self, decay_service, memory_service: MemoryService, - workspace_id: str, storage_backend: StorageBackend, + self, + decay_service, + memory_service: MemoryService, + workspace_id: str, + storage_backend: StorageBackend, ): memory = await memory_service.remember( workspace_id, @@ -68,8 +72,11 @@ async def test_boost_increases_importance( @pytest.mark.asyncio async def test_boost_caps_at_one( - self, decay_service, memory_service: MemoryService, - workspace_id: str, storage_backend: StorageBackend, + self, + decay_service, + memory_service: MemoryService, + workspace_id: str, + storage_backend: StorageBackend, ): memory = await memory_service.remember( workspace_id, @@ -81,7 +88,9 @@ async def test_boost_caps_at_one( @pytest.mark.asyncio async def test_boost_custom_factor( - self, decay_service, memory_service: MemoryService, + self, + decay_service, + memory_service: MemoryService, workspace_id: str, ): memory = await memory_service.remember( @@ -103,8 +112,11 @@ class TestDecayWorkspace: @pytest.mark.asyncio async def test_decay_reduces_importance( - self, decay_service, memory_service: MemoryService, - workspace_id: str, storage_backend: StorageBackend, + self, + decay_service, + memory_service: MemoryService, + workspace_id: str, + storage_backend: StorageBackend, ): """Memories with old last_accessed_at should have reduced importance.""" memory = await memory_service.remember( @@ -112,9 +124,10 @@ async def test_decay_reduces_importance( RememberInput(content="Decay reduction test", type=MemoryType.SEMANTIC, importance=0.8), ) # Make the memory look old by updating created_at and last_accessed_at - old_time = (datetime.now(timezone.utc) - timedelta(days=30)).strftime('%Y-%m-%d %H:%M:%S') + old_time = (datetime.now(UTC) - timedelta(days=30)).strftime("%Y-%m-%d %H:%M:%S") await storage_backend.update_memory( - workspace_id, memory.id, + workspace_id, + memory.id, created_at=old_time, last_accessed_at=old_time, ) @@ -130,8 +143,11 @@ async def test_decay_reduces_importance( @pytest.mark.asyncio async def test_decay_respects_min_age( - self, decay_service, memory_service: MemoryService, - workspace_id: str, storage_backend: StorageBackend, + self, + decay_service, + memory_service: MemoryService, + workspace_id: str, + storage_backend: StorageBackend, ): """Recent memories should NOT be decayed.""" memory = await memory_service.remember( @@ -141,24 +157,28 @@ async def test_decay_respects_min_age( original_importance = memory.importance settings = DecaySettings(min_age_days=365) # Nothing is 365 days old - result = await decay_service.decay_workspace(workspace_id, settings) + await decay_service.decay_workspace(workspace_id, settings) updated = await storage_backend.get_memory(workspace_id, memory.id) assert updated.importance == original_importance @pytest.mark.asyncio async def test_decay_skips_pinned( - self, decay_service, memory_service: MemoryService, - workspace_id: str, storage_backend: StorageBackend, + self, + decay_service, + memory_service: MemoryService, + workspace_id: str, + storage_backend: StorageBackend, ): """Pinned memories should NOT be decayed.""" memory = await memory_service.remember( workspace_id, RememberInput(content="Pinned no-decay test", type=MemoryType.SEMANTIC, importance=0.8), ) - old_time = (datetime.now(timezone.utc) - timedelta(days=30)).strftime('%Y-%m-%d %H:%M:%S') + old_time = (datetime.now(UTC) - timedelta(days=30)).strftime("%Y-%m-%d %H:%M:%S") await storage_backend.update_memory( - workspace_id, memory.id, + workspace_id, + memory.id, pinned=1, created_at=old_time, last_accessed_at=old_time, @@ -172,17 +192,21 @@ async def test_decay_skips_pinned( @pytest.mark.asyncio async def test_decay_respects_min_importance_floor( - self, decay_service, memory_service: MemoryService, - workspace_id: str, storage_backend: StorageBackend, + self, + decay_service, + memory_service: MemoryService, + workspace_id: str, + storage_backend: StorageBackend, ): """Importance should never drop below min_importance.""" memory = await memory_service.remember( workspace_id, RememberInput(content="Floor test decay", type=MemoryType.SEMANTIC, importance=0.3), ) - very_old = (datetime.now(timezone.utc) - timedelta(days=365)).strftime('%Y-%m-%d %H:%M:%S') + very_old = (datetime.now(UTC) - timedelta(days=365)).strftime("%Y-%m-%d %H:%M:%S") await storage_backend.update_memory( - workspace_id, memory.id, + workspace_id, + memory.id, created_at=very_old, last_accessed_at=very_old, ) @@ -199,17 +223,21 @@ class TestArchiveStaleMemories: @pytest.mark.asyncio async def test_archive_low_importance_old_memory( - self, decay_service, memory_service: MemoryService, - workspace_id: str, storage_backend: StorageBackend, + self, + decay_service, + memory_service: MemoryService, + workspace_id: str, + storage_backend: StorageBackend, ): """Low importance, old, rarely accessed memories should be archived.""" memory = await memory_service.remember( workspace_id, RememberInput(content="Stale archive candidate", type=MemoryType.SEMANTIC, importance=0.1), ) - very_old = (datetime.now(timezone.utc) - timedelta(days=120)).strftime('%Y-%m-%d %H:%M:%S') + very_old = (datetime.now(UTC) - timedelta(days=120)).strftime("%Y-%m-%d %H:%M:%S") await storage_backend.update_memory( - workspace_id, memory.id, + workspace_id, + memory.id, created_at=very_old, importance=0.1, ) @@ -227,17 +255,21 @@ async def test_archive_low_importance_old_memory( @pytest.mark.asyncio async def test_archive_skips_high_importance( - self, decay_service, memory_service: MemoryService, - workspace_id: str, storage_backend: StorageBackend, + self, + decay_service, + memory_service: MemoryService, + workspace_id: str, + storage_backend: StorageBackend, ): """High importance memories should NOT be archived.""" memory = await memory_service.remember( workspace_id, RememberInput(content="High importance no archive", type=MemoryType.SEMANTIC, importance=0.9), ) - old_time = (datetime.now(timezone.utc) - timedelta(days=120)).strftime('%Y-%m-%d %H:%M:%S') + old_time = (datetime.now(UTC) - timedelta(days=120)).strftime("%Y-%m-%d %H:%M:%S") await storage_backend.update_memory( - workspace_id, memory.id, + workspace_id, + memory.id, created_at=old_time, ) @@ -249,17 +281,21 @@ async def test_archive_skips_high_importance( @pytest.mark.asyncio async def test_archive_skips_pinned( - self, decay_service, memory_service: MemoryService, - workspace_id: str, storage_backend: StorageBackend, + self, + decay_service, + memory_service: MemoryService, + workspace_id: str, + storage_backend: StorageBackend, ): """Pinned memories should NOT be archived even if low importance.""" memory = await memory_service.remember( workspace_id, RememberInput(content="Pinned no archive test", type=MemoryType.SEMANTIC, importance=0.1), ) - old_time = (datetime.now(timezone.utc) - timedelta(days=120)).strftime('%Y-%m-%d %H:%M:%S') + old_time = (datetime.now(UTC) - timedelta(days=120)).strftime("%Y-%m-%d %H:%M:%S") await storage_backend.update_memory( - workspace_id, memory.id, + workspace_id, + memory.id, created_at=old_time, pinned=1, importance=0.1, @@ -277,40 +313,53 @@ class TestStorageDecayMethods: @pytest.mark.asyncio async def test_get_memories_for_decay( - self, memory_service: MemoryService, - workspace_id: str, storage_backend: StorageBackend, + self, + memory_service: MemoryService, + workspace_id: str, + storage_backend: StorageBackend, ): memory = await memory_service.remember( workspace_id, RememberInput(content="Decay eligible storage test", type=MemoryType.SEMANTIC), ) - old_time = (datetime.now(timezone.utc) - timedelta(days=30)).strftime('%Y-%m-%d %H:%M:%S') + old_time = (datetime.now(UTC) - timedelta(days=30)).strftime("%Y-%m-%d %H:%M:%S") await storage_backend.update_memory( - workspace_id, memory.id, created_at=old_time, + workspace_id, + memory.id, + created_at=old_time, ) memories = await storage_backend.get_memories_for_decay( - workspace_id, min_age_days=1, exclude_pinned=True, + workspace_id, + min_age_days=1, + exclude_pinned=True, ) found_ids = [m.id for m in memories] assert memory.id in found_ids @pytest.mark.asyncio async def test_get_memories_for_decay_excludes_pinned( - self, memory_service: MemoryService, - workspace_id: str, storage_backend: StorageBackend, + self, + memory_service: MemoryService, + workspace_id: str, + storage_backend: StorageBackend, ): memory = await memory_service.remember( workspace_id, RememberInput(content="Pinned decay exclusion test", type=MemoryType.SEMANTIC), ) - old_time = (datetime.now(timezone.utc) - timedelta(days=30)).strftime('%Y-%m-%d %H:%M:%S') + old_time = (datetime.now(UTC) - timedelta(days=30)).strftime("%Y-%m-%d %H:%M:%S") await storage_backend.update_memory( - workspace_id, memory.id, created_at=old_time, pinned=1, + workspace_id, + memory.id, + created_at=old_time, + pinned=1, ) memories = await storage_backend.get_memories_for_decay( - workspace_id, min_age_days=1, exclude_pinned=True, + workspace_id, + min_age_days=1, + exclude_pinned=True, ) found_ids = [m.id for m in memories] assert memory.id not in found_ids diff --git a/memorylayer-core-python/tests/unit/test_deduplication_service.py b/memorylayer-core-python/tests/unit/test_deduplication_service.py index e031a57..e3a4520 100644 --- a/memorylayer-core-python/tests/unit/test_deduplication_service.py +++ b/memorylayer-core-python/tests/unit/test_deduplication_service.py @@ -1,8 +1,9 @@ """Tests for DeduplicationService.""" + import pytest -from memorylayer_server.services.deduplication import DeduplicationAction from memorylayer_server.models import RememberInput +from memorylayer_server.services.deduplication import DeduplicationAction from memorylayer_server.utils import compute_content_hash @@ -10,10 +11,7 @@ async def test_check_duplicate_new_memory(deduplication_service): """Test that new unique content returns CREATE.""" result = await deduplication_service.check_duplicate( - content="test content for dedup", - content_hash="unique_hash_abc123", - embedding=[0.1] * 384, - workspace_id="ws-dedup-1" + content="test content for dedup", content_hash="unique_hash_abc123", embedding=[0.1] * 384, workspace_id="ws-dedup-1" ) assert result.action == DeduplicationAction.CREATE @@ -35,10 +33,7 @@ async def test_check_duplicate_exact_match(storage_backend, deduplication_servic # Try to add duplicate with same content (same hash) result = await deduplication_service.check_duplicate( - content=existing_content, - content_hash=existing_hash, - embedding=existing_embedding, - workspace_id="ws-dedup-2" + content=existing_content, content_hash=existing_hash, embedding=existing_embedding, workspace_id="ws-dedup-2" ) assert result.action == DeduplicationAction.SKIP @@ -64,7 +59,7 @@ async def test_check_duplicate_semantic_match(storage_backend, deduplication_ser content="similar content there for semantic test", content_hash="new_hash_789", embedding=existing_embedding, # Same embedding = similarity 1.0 - workspace_id="ws-dedup-3" + workspace_id="ws-dedup-3", ) assert result.action == DeduplicationAction.UPDATE @@ -81,10 +76,7 @@ async def test_deduplicate_batch(deduplication_service): ("batch content 3", "batch_hash_3", [0.3] * 384), ] - results = await deduplication_service.deduplicate_batch( - candidates=candidates, - workspace_id="ws-dedup-batch" - ) + results = await deduplication_service.deduplicate_batch(candidates=candidates, workspace_id="ws-dedup-batch") assert len(results) == 3 # All should be CREATE since workspace is empty diff --git a/memorylayer-core-python/tests/unit/test_embedding_service.py b/memorylayer-core-python/tests/unit/test_embedding_service.py index 9776621..c5af034 100644 --- a/memorylayer-core-python/tests/unit/test_embedding_service.py +++ b/memorylayer-core-python/tests/unit/test_embedding_service.py @@ -1,5 +1,7 @@ """Unit tests for EmbeddingService.""" + import pytest + from memorylayer_server.services.embedding import EmbeddingService # Mock provider default dimensions diff --git a/memorylayer-core-python/tests/unit/test_entity_attribution.py b/memorylayer-core-python/tests/unit/test_entity_attribution.py index 8f22552..b53fc09 100644 --- a/memorylayer-core-python/tests/unit/test_entity_attribution.py +++ b/memorylayer-core-python/tests/unit/test_entity_attribution.py @@ -7,10 +7,14 @@ - recall filtering by subject_id - recall filtering by both observer_id and subject_id """ + import pytest from memorylayer_server.models.memory import ( - RememberInput, RecallInput, MemoryType, RecallMode, + MemoryType, + RecallInput, + RecallMode, + RememberInput, ) from memorylayer_server.services.memory import MemoryService @@ -20,9 +24,9 @@ class TestEntityAttribution: @pytest.mark.asyncio async def test_remember_with_observer_id( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Storing a memory with observer_id persists the field.""" input_data = RememberInput( @@ -38,9 +42,9 @@ async def test_remember_with_observer_id( @pytest.mark.asyncio async def test_remember_with_subject_id( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Storing a memory with subject_id persists the field.""" input_data = RememberInput( @@ -56,9 +60,9 @@ async def test_remember_with_subject_id( @pytest.mark.asyncio async def test_remember_with_both_entity_fields( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Storing a memory with both observer and subject persists both.""" input_data = RememberInput( @@ -75,9 +79,9 @@ async def test_remember_with_both_entity_fields( @pytest.mark.asyncio async def test_recall_filter_by_subject_id( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Recall with subject_id filter returns only matching memories.""" # Create memories about different subjects @@ -86,12 +90,15 @@ async def test_recall_filter_by_subject_id( ("entity-A", "Entity A uses PostgreSQL"), ("entity-B", "Entity B prefers GraphQL"), ]: - await memory_service.remember(workspace_id, RememberInput( - content=content, - type=MemoryType.SEMANTIC, - subject_id=subject, - importance=0.8, - )) + await memory_service.remember( + workspace_id, + RememberInput( + content=content, + type=MemoryType.SEMANTIC, + subject_id=subject, + importance=0.8, + ), + ) # Recall only entity-A memories recall_input = RecallInput( @@ -110,9 +117,9 @@ async def test_recall_filter_by_subject_id( @pytest.mark.asyncio async def test_recall_filter_by_observer_id( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Recall with observer_id filter returns only memories from that observer.""" for observer, content in [ @@ -120,12 +127,15 @@ async def test_recall_filter_by_observer_id( ("observer-X", "Observer X noted the user's preference for Vim"), ("observer-Y", "Observer Y saw the user coding in Go"), ]: - await memory_service.remember(workspace_id, RememberInput( - content=content, - type=MemoryType.SEMANTIC, - observer_id=observer, - importance=0.8, - )) + await memory_service.remember( + workspace_id, + RememberInput( + content=content, + type=MemoryType.SEMANTIC, + observer_id=observer, + importance=0.8, + ), + ) recall_input = RecallInput( query="what did observer X notice", @@ -142,23 +152,29 @@ async def test_recall_filter_by_observer_id( @pytest.mark.asyncio async def test_recall_without_entity_filter_returns_all( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Recall without entity filters includes all memories regardless of attribution.""" - await memory_service.remember(workspace_id, RememberInput( - content="Unattributed memory about test isolation patterns", - type=MemoryType.SEMANTIC, - importance=0.9, - )) - await memory_service.remember(workspace_id, RememberInput( - content="Attributed memory about test isolation patterns", - type=MemoryType.SEMANTIC, - observer_id="obs-1", - subject_id="subj-1", - importance=0.9, - )) + await memory_service.remember( + workspace_id, + RememberInput( + content="Unattributed memory about test isolation patterns", + type=MemoryType.SEMANTIC, + importance=0.9, + ), + ) + await memory_service.remember( + workspace_id, + RememberInput( + content="Attributed memory about test isolation patterns", + type=MemoryType.SEMANTIC, + observer_id="obs-1", + subject_id="subj-1", + importance=0.9, + ), + ) recall_input = RecallInput( query="test isolation patterns", diff --git a/memorylayer-core-python/tests/unit/test_extraction_service.py b/memorylayer-core-python/tests/unit/test_extraction_service.py index f6c72c8..88733a3 100644 --- a/memorylayer-core-python/tests/unit/test_extraction_service.py +++ b/memorylayer-core-python/tests/unit/test_extraction_service.py @@ -1,10 +1,12 @@ """Unit tests for ExtractionService.""" + import pytest + from memorylayer_server.services.extraction import ExtractionCategory from memorylayer_server.services.extraction.default import ( + EXTRACTION_SYSTEM_PROMPT, DefaultExtractionService, ExtractionOptions, - EXTRACTION_SYSTEM_PROMPT, ) @@ -23,7 +25,7 @@ def extraction_service(self): def test_parse_valid_json_response(self, extraction_service): """Test parsing a valid JSON response.""" - response = '''[ + response = """[ { "content": "User is a Python developer at TechCorp", "category": "profile", @@ -36,7 +38,7 @@ def test_parse_valid_json_response(self, extraction_service): "importance": 0.7, "tags": ["testing", "preferences"] } - ]''' + ]""" categories = list(ExtractionCategory) result = extraction_service._parse_llm_response(response, categories) @@ -53,7 +55,7 @@ def test_parse_valid_json_response(self, extraction_service): def test_parse_json_with_markdown_code_block(self, extraction_service): """Test parsing JSON wrapped in markdown code block.""" - response = '''```json + response = """```json [ { "content": "Project Aurora is a microservices migration", @@ -62,7 +64,7 @@ def test_parse_json_with_markdown_code_block(self, extraction_service): "tags": ["project"] } ] -```''' +```""" categories = list(ExtractionCategory) result = extraction_service._parse_llm_response(response, categories) @@ -73,11 +75,11 @@ def test_parse_json_with_markdown_code_block(self, extraction_service): def test_parse_filters_by_category(self, extraction_service): """Test that parsing filters by allowed categories.""" - response = '''[ + response = """[ {"content": "Memory 1", "category": "profile", "importance": 0.8}, {"content": "Memory 2", "category": "events", "importance": 0.7}, {"content": "Memory 3", "category": "cases", "importance": 0.9} - ]''' + ]""" # Only allow profile and cases categories = [ExtractionCategory.PROFILE, ExtractionCategory.CASES] @@ -89,9 +91,9 @@ def test_parse_filters_by_category(self, extraction_service): def test_parse_handles_missing_importance(self, extraction_service): """Test that missing importance defaults to 0.6.""" - response = '''[ + response = """[ {"content": "Some memory", "category": "profile"} - ]''' + ]""" categories = list(ExtractionCategory) result = extraction_service._parse_llm_response(response, categories) @@ -101,10 +103,10 @@ def test_parse_handles_missing_importance(self, extraction_service): def test_parse_clamps_importance(self, extraction_service): """Test that importance is clamped to [0, 1].""" - response = '''[ + response = """[ {"content": "Memory 1", "category": "profile", "importance": 1.5}, {"content": "Memory 2", "category": "profile", "importance": -0.5} - ]''' + ]""" categories = list(ExtractionCategory) result = extraction_service._parse_llm_response(response, categories) @@ -132,11 +134,11 @@ def test_parse_handles_non_array_json(self, extraction_service): def test_parse_skips_invalid_items(self, extraction_service): """Test that invalid items are skipped.""" - response = '''[ + response = """[ {"content": "Valid", "category": "profile", "importance": 0.8}, {"invalid": "Missing content and category"}, {"content": "Also valid", "category": "events", "importance": 0.7} - ]''' + ]""" categories = list(ExtractionCategory) result = extraction_service._parse_llm_response(response, categories) @@ -145,10 +147,10 @@ def test_parse_skips_invalid_items(self, extraction_service): def test_parse_handles_unknown_category(self, extraction_service): """Test that unknown categories are skipped.""" - response = '''[ + response = """[ {"content": "Valid", "category": "profile", "importance": 0.8}, {"content": "Unknown", "category": "unknown_category", "importance": 0.7} - ]''' + ]""" categories = list(ExtractionCategory) result = extraction_service._parse_llm_response(response, categories) @@ -222,9 +224,7 @@ def test_build_context_with_session_content_only(self, extraction_service): session_content = "User talked about Python." working_memory = {} - result = extraction_service._build_extraction_context( - session_content, working_memory - ) + result = extraction_service._build_extraction_context(session_content, working_memory) assert result == "User talked about Python." @@ -233,9 +233,7 @@ def test_build_context_with_working_memory(self, extraction_service): session_content = "User talked about Python." working_memory = {"current_task": "debugging", "framework": "FastAPI"} - result = extraction_service._build_extraction_context( - session_content, working_memory - ) + result = extraction_service._build_extraction_context(session_content, working_memory) assert "User talked about Python." in result assert "Working Memory:" in result @@ -289,10 +287,7 @@ def test_truncated_mid_object(self, extraction_service): def test_truncated_mid_string_value(self, extraction_service): """Unterminated string in last object should recover earlier objects.""" - raw = ( - '[{"content": "User prefers Python", "type": "semantic"}, ' - '{"content": "User likes testing with pyt' - ) + raw = '[{"content": "User prefers Python", "type": "semantic"}, {"content": "User likes testing with pyt' result = extraction_service._parse_partial_json_array(raw) assert len(result) == 1 assert result[0]["content"] == "User prefers Python" diff --git a/memorylayer-core-python/tests/unit/test_global_workspace.py b/memorylayer-core-python/tests/unit/test_global_workspace.py index 44be500..6c6aab3 100644 --- a/memorylayer-core-python/tests/unit/test_global_workspace.py +++ b/memorylayer-core-python/tests/unit/test_global_workspace.py @@ -1,15 +1,16 @@ """Tests for _global workspace functionality.""" + import pytest import pytest_asyncio + +from memorylayer_server.config import DEFAULT_TENANT_ID, GLOBAL_WORKSPACE_ID from memorylayer_server.models import ( - RememberInput, + MemoryType, RecallInput, RecallMode, - MemoryType, + RememberInput, Workspace, - WorkspaceSettings, ) -from memorylayer_server.config import GLOBAL_WORKSPACE_ID, DEFAULT_TENANT_ID @pytest_asyncio.fixture @@ -61,9 +62,7 @@ async def test_recall_input_include_global_can_be_disabled(): @pytest.mark.asyncio -async def test_recall_searches_global_workspace_by_default( - memory_service, test_workspace, global_workspace -): +async def test_recall_searches_global_workspace_by_default(memory_service, test_workspace, global_workspace): """Test that recall searches both workspace and _global by default.""" # Store memory in test workspace workspace_memory = await memory_service.remember( @@ -116,9 +115,7 @@ async def test_recall_searches_global_workspace_by_default( @pytest.mark.asyncio -async def test_recall_can_exclude_global_workspace( - memory_service, test_workspace, global_workspace -): +async def test_recall_can_exclude_global_workspace(memory_service, test_workspace, global_workspace): """Test that recall can exclude _global workspace when include_global=False.""" # Store memory in test workspace workspace_memory = await memory_service.remember( @@ -159,9 +156,7 @@ async def test_recall_can_exclude_global_workspace( @pytest.mark.asyncio -async def test_recall_from_global_workspace_does_not_duplicate( - memory_service, global_workspace -): +async def test_recall_from_global_workspace_does_not_duplicate(memory_service, global_workspace): """Test that searching _global workspace directly doesn't duplicate results.""" # Store memory in _global workspace global_memory = await memory_service.remember( @@ -190,9 +185,7 @@ async def test_recall_from_global_workspace_does_not_duplicate( @pytest.mark.asyncio -async def test_scope_boosts_prioritize_workspace_over_global( - memory_service, test_workspace, global_workspace -): +async def test_scope_boosts_prioritize_workspace_over_global(memory_service, test_workspace, global_workspace): """Test that scope boosts prioritize workspace memories over global.""" # Store identical content in both workspaces workspace_memory = await memory_service.remember( @@ -203,7 +196,7 @@ async def test_scope_boosts_prioritize_workspace_over_global( ), ) - global_memory = await memory_service.remember( + await memory_service.remember( workspace_id=GLOBAL_WORKSPACE_ID, input=RememberInput( content="Important information about the project", @@ -230,9 +223,7 @@ async def test_scope_boosts_prioritize_workspace_over_global( @pytest.mark.asyncio -async def test_global_workspace_persists_across_sessions( - storage_backend, global_workspace -): +async def test_global_workspace_persists_across_sessions(storage_backend, global_workspace): """Test that _global workspace can be retrieved.""" retrieved = await storage_backend.get_workspace(GLOBAL_WORKSPACE_ID) assert retrieved is not None diff --git a/memorylayer-core-python/tests/unit/test_global_workspace_simple.py b/memorylayer-core-python/tests/unit/test_global_workspace_simple.py index b3d58fa..8bfbfd5 100644 --- a/memorylayer-core-python/tests/unit/test_global_workspace_simple.py +++ b/memorylayer-core-python/tests/unit/test_global_workspace_simple.py @@ -1,7 +1,9 @@ """Simple tests for _global workspace functionality without full framework.""" + import pytest + +from memorylayer_server.config import DEFAULT_TENANT_ID, GLOBAL_WORKSPACE_ID from memorylayer_server.models import RecallInput -from memorylayer_server.config import GLOBAL_WORKSPACE_ID, DEFAULT_TENANT_ID @pytest.mark.asyncio diff --git a/memorylayer-core-python/tests/unit/test_google_embedding_provider.py b/memorylayer-core-python/tests/unit/test_google_embedding_provider.py index dfe6ff6..508a57f 100644 --- a/memorylayer-core-python/tests/unit/test_google_embedding_provider.py +++ b/memorylayer-core-python/tests/unit/test_google_embedding_provider.py @@ -1,7 +1,9 @@ """Unit tests for Google GenAI embedding provider.""" -import pytest + from unittest.mock import AsyncMock, MagicMock, patch +import pytest + class TestGoogleEmbeddingProvider: """Tests for GoogleEmbeddingProvider.""" @@ -9,6 +11,7 @@ class TestGoogleEmbeddingProvider: @pytest.fixture def provider(self): from memorylayer_server.services.embedding.google import GoogleEmbeddingProvider + return GoogleEmbeddingProvider( api_key="test-key", model="gemini-embedding-001", @@ -39,7 +42,7 @@ async def test_embed(self, provider): mock_client.aio.models = mock_aio_models provider._client = mock_client - with patch.object(provider, '_get_config', return_value=MagicMock()): + with patch.object(provider, "_get_config", return_value=MagicMock()): result = await provider.embed("test text") assert result == [0.1, 0.2, 0.3] @@ -68,7 +71,7 @@ async def test_embed_batch(self, provider): provider._client = mock_client texts = ["first", "second", "third"] - with patch.object(provider, '_get_config', return_value=MagicMock()): + with patch.object(provider, "_get_config", return_value=MagicMock()): result = await provider.embed_batch(texts) assert len(result) == 3 @@ -95,13 +98,13 @@ async def test_embed_returns_list_of_floats(self, provider): mock_client.aio.models = mock_aio_models provider._client = mock_client - with patch.object(provider, '_get_config', return_value=MagicMock()): + with patch.object(provider, "_get_config", return_value=MagicMock()): result = await provider.embed("test") assert isinstance(result, list) def test_lazy_client_import_error(self, provider): - with patch.dict('sys.modules', {'google': None, 'google.genai': None}): + with patch.dict("sys.modules", {"google": None, "google.genai": None}): provider._client = None with pytest.raises(ImportError, match="google-genai package not installed"): provider._get_client() @@ -111,12 +114,14 @@ class TestGoogleEmbeddingProviderPlugin: """Tests for GoogleEmbeddingProviderPlugin.""" def test_plugin_provider_name(self): - from memorylayer_server.services.embedding.google import GoogleEmbeddingProviderPlugin from memorylayer_server.config import EmbeddingProviderType + from memorylayer_server.services.embedding.google import GoogleEmbeddingProviderPlugin + plugin = GoogleEmbeddingProviderPlugin() assert plugin.PROVIDER_NAME == EmbeddingProviderType.GOOGLE def test_plugin_name(self): from memorylayer_server.services.embedding.google import GoogleEmbeddingProviderPlugin + plugin = GoogleEmbeddingProviderPlugin() - assert 'GOOGLE' in plugin.name() or 'google' in plugin.name() + assert "GOOGLE" in plugin.name() or "google" in plugin.name() diff --git a/memorylayer-core-python/tests/unit/test_hyde_reranker.py b/memorylayer-core-python/tests/unit/test_hyde_reranker.py index f51d116..825037e 100644 --- a/memorylayer-core-python/tests/unit/test_hyde_reranker.py +++ b/memorylayer-core-python/tests/unit/test_hyde_reranker.py @@ -6,18 +6,18 @@ from scitrera_app_framework import Variables from memorylayer_server.config import RerankerProviderType +from memorylayer_server.services.embedding import EXT_EMBEDDING_SERVICE +from memorylayer_server.services.llm import EXT_LLM_SERVICE from memorylayer_server.services.reranker.hyde.provider import ( + HYDE_PROMPT_TEMPLATE, HyDERerankerProvider, HyDERerankerProviderPlugin, - HYDE_PROMPT_TEMPLATE, ) from memorylayer_server.utils import cosine_similarity -from memorylayer_server.services.llm import EXT_LLM_SERVICE -from memorylayer_server.services.embedding import EXT_EMBEDDING_SERVICE - # --- Fixtures --- + @pytest.fixture def mock_v(): """Provide a Variables instance for test provider construction.""" @@ -39,11 +39,13 @@ def mock_embedding_service(): # Hypothetical answer embedding service.embed = AsyncMock(return_value=[1.0, 0.0, 0.0]) # Document embeddings - first doc is similar, second is orthogonal - service.embed_batch = AsyncMock(return_value=[ - [0.9, 0.1, 0.0], # Similar to hyp - [0.0, 0.0, 1.0], # Orthogonal to hyp - [0.5, 0.5, 0.0], # Partially similar - ]) + service.embed_batch = AsyncMock( + return_value=[ + [0.9, 0.1, 0.0], # Similar to hyp + [0.0, 0.0, 1.0], # Orthogonal to hyp + [0.5, 0.5, 0.0], # Partially similar + ] + ) return service @@ -59,6 +61,7 @@ def provider(mock_v, mock_llm_service, mock_embedding_service): # --- Cosine similarity tests --- + class TestCosineSimilarity: def test_identical_vectors(self): assert cosine_similarity([1.0, 0.0], [1.0, 0.0]) == pytest.approx(1.0) @@ -82,6 +85,7 @@ def test_both_zero_vectors(self): # --- Provider tests --- + class TestHyDERerankerProvider: @pytest.mark.asyncio async def test_rerank_returns_correct_count(self, provider): @@ -141,9 +145,7 @@ async def test_rerank_with_instruction(self, provider, mock_llm_service): assert "my query" in prompt @pytest.mark.asyncio - async def test_rerank_llm_failure_returns_uniform_scores( - self, mock_v, mock_embedding_service - ): + async def test_rerank_llm_failure_returns_uniform_scores(self, mock_v, mock_embedding_service): """When LLM fails, should return uniform 0.5 scores.""" llm = AsyncMock() llm.synthesize = AsyncMock(side_effect=RuntimeError("LLM unavailable")) @@ -157,9 +159,7 @@ async def test_rerank_llm_failure_returns_uniform_scores( assert scores == [0.5, 0.5] @pytest.mark.asyncio - async def test_rerank_embedding_failure_returns_uniform_scores( - self, mock_v, mock_llm_service - ): + async def test_rerank_embedding_failure_returns_uniform_scores(self, mock_v, mock_llm_service): """When embedding fails, should return uniform 0.5 scores.""" emb = AsyncMock() emb.embed = AsyncMock(side_effect=RuntimeError("Embedding unavailable")) @@ -203,6 +203,7 @@ async def test_custom_max_tokens_and_temperature(self, mock_v, mock_llm_service, # --- Plugin tests --- + class TestHyDERerankerPlugin: def test_provider_name(self): plugin = HyDERerankerProviderPlugin() @@ -223,6 +224,7 @@ def test_plugin_declares_dependencies(self): # --- Prompt template tests --- + class TestHyDEPromptTemplate: def test_prompt_contains_query_placeholder(self): assert "{query}" in HYDE_PROMPT_TEMPLATE @@ -235,6 +237,7 @@ def test_prompt_formatted_correctly(self): # --- Integration-style test with rerank_with_indices --- + class TestHyDERerankerWithIndices: @pytest.mark.asyncio async def test_rerank_with_indices_sorted_by_score(self, provider): diff --git a/memorylayer-core-python/tests/unit/test_inference_service.py b/memorylayer-core-python/tests/unit/test_inference_service.py index b84461d..a2e7762 100644 --- a/memorylayer-core-python/tests/unit/test_inference_service.py +++ b/memorylayer-core-python/tests/unit/test_inference_service.py @@ -9,15 +9,19 @@ - _parse_insights parses LLM output format - _derive_fallback generates type/subtype-based insights """ + +from datetime import UTC + import pytest +from scitrera_app_framework import get_extension from memorylayer_server.models.memory import ( - RememberInput, MemoryType, MemorySubtype, + MemorySubtype, + MemoryType, + RememberInput, ) +from memorylayer_server.services.inference import EXT_INFERENCE_SERVICE, DefaultInferenceService from memorylayer_server.services.memory import MemoryService -from memorylayer_server.services.inference import DefaultInferenceService, EXT_INFERENCE_SERVICE - -from scitrera_app_framework import get_extension @pytest.fixture @@ -31,9 +35,9 @@ class TestInferenceDerivation: @pytest.mark.asyncio async def test_derive_no_memories_returns_empty( - self, - inference_service: DefaultInferenceService, - workspace_id: str, + self, + inference_service: DefaultInferenceService, + workspace_id: str, ): """Derivation with no source memories returns zero insights.""" result = await inference_service.derive_insights( @@ -48,10 +52,10 @@ async def test_derive_no_memories_returns_empty( @pytest.mark.asyncio async def test_derive_with_fallback_creates_insights( - self, - inference_service: DefaultInferenceService, - memory_service: MemoryService, - workspace_id: str, + self, + inference_service: DefaultInferenceService, + memory_service: MemoryService, + workspace_id: str, ): """Fallback derivation (no LLM) generates type-based insights from multiple memories.""" subject = "test-derive-subject" @@ -63,12 +67,15 @@ async def test_derive_with_fallback_creates_insights( "Subject has experience with PostgreSQL", "Subject values clean code practices", ]: - await memory_service.remember(workspace_id, RememberInput( - content=content, - type=MemoryType.SEMANTIC, - subject_id=subject, - importance=0.7, - )) + await memory_service.remember( + workspace_id, + RememberInput( + content=content, + type=MemoryType.SEMANTIC, + subject_id=subject, + importance=0.7, + ), + ) result = await inference_service.derive_insights( workspace_id=workspace_id, @@ -85,10 +92,10 @@ async def test_derive_with_fallback_creates_insights( @pytest.mark.asyncio async def test_derived_insights_are_inference_subtype( - self, - inference_service: DefaultInferenceService, - memory_service: MemoryService, - workspace_id: str, + self, + inference_service: DefaultInferenceService, + memory_service: MemoryService, + workspace_id: str, ): """Derived insights are stored with subtype=INFERENCE.""" subject = "test-subtype-subject" @@ -98,12 +105,15 @@ async def test_derived_insights_are_inference_subtype( "This subject reviews PRs carefully", "This subject mentors junior developers", ]: - await memory_service.remember(workspace_id, RememberInput( - content=content, - type=MemoryType.SEMANTIC, - subject_id=subject, - importance=0.7, - )) + await memory_service.remember( + workspace_id, + RememberInput( + content=content, + type=MemoryType.SEMANTIC, + subject_id=subject, + importance=0.7, + ), + ) result = await inference_service.derive_insights( workspace_id=workspace_id, @@ -122,10 +132,10 @@ class TestGetInsights: @pytest.mark.asyncio async def test_get_insights_returns_derived( - self, - inference_service: DefaultInferenceService, - memory_service: MemoryService, - workspace_id: str, + self, + inference_service: DefaultInferenceService, + memory_service: MemoryService, + workspace_id: str, ): """get_insights returns previously derived insights.""" subject = "test-get-insights-subject" @@ -136,12 +146,15 @@ async def test_get_insights_returns_derived( "Subject A uses TDD methodology", "Subject A writes comprehensive unit tests", ]: - await memory_service.remember(workspace_id, RememberInput( - content=content, - type=MemoryType.SEMANTIC, - subject_id=subject, - importance=0.7, - )) + await memory_service.remember( + workspace_id, + RememberInput( + content=content, + type=MemoryType.SEMANTIC, + subject_id=subject, + importance=0.7, + ), + ) derive_result = await inference_service.derive_insights( workspace_id=workspace_id, @@ -166,8 +179,8 @@ async def test_get_insights_returns_derived( @pytest.mark.asyncio async def test_get_insights_empty_for_unknown_subject( - self, - inference_service: DefaultInferenceService, + self, + inference_service: DefaultInferenceService, ): """get_insights returns empty list for unknown subject.""" insights = await inference_service.get_insights( @@ -226,8 +239,9 @@ class TestDeriveFallback: def test_fallback_needs_minimum_memories(self, inference_service: DefaultInferenceService): """Fallback returns empty with fewer than 2 memories.""" + from datetime import datetime + from memorylayer_server.models.memory import Memory - from datetime import datetime, timezone single_memory = Memory( id="mem_1", @@ -237,8 +251,8 @@ def test_fallback_needs_minimum_memories(self, inference_service: DefaultInferen content_hash="abc123", type=MemoryType.SEMANTIC, importance=0.5, - created_at=datetime.now(timezone.utc), - updated_at=datetime.now(timezone.utc), + created_at=datetime.now(UTC), + updated_at=datetime.now(UTC), ) results = inference_service._derive_fallback([single_memory], "subj") @@ -246,15 +260,23 @@ def test_fallback_needs_minimum_memories(self, inference_service: DefaultInferen def test_fallback_groups_by_type(self, inference_service: DefaultInferenceService): """Fallback generates insights based on type grouping.""" + from datetime import datetime + from memorylayer_server.models.memory import Memory - from datetime import datetime, timezone - now = datetime.now(timezone.utc) + now = datetime.now(UTC) memories = [ - Memory(id=f"mem_{i}", workspace_id="ws", tenant_id="_default", - content=f"Memory {i}", content_hash=f"hash_{i}", - type=MemoryType.SEMANTIC, importance=0.5, - created_at=now, updated_at=now) + Memory( + id=f"mem_{i}", + workspace_id="ws", + tenant_id="_default", + content=f"Memory {i}", + content_hash=f"hash_{i}", + type=MemoryType.SEMANTIC, + importance=0.5, + created_at=now, + updated_at=now, + ) for i in range(5) ] diff --git a/memorylayer-core-python/tests/unit/test_llm_providers.py b/memorylayer-core-python/tests/unit/test_llm_providers.py index 6df1b28..6b69b2f 100644 --- a/memorylayer-core-python/tests/unit/test_llm_providers.py +++ b/memorylayer-core-python/tests/unit/test_llm_providers.py @@ -1,23 +1,28 @@ """Unit tests for Anthropic and Google GenAI LLM providers.""" -import pytest + from unittest.mock import AsyncMock, MagicMock, patch +import pytest + from memorylayer_server.models.llm import ( - LLMMessage, LLMRequest, LLMResponse, LLMRole, LLMStreamChunk, + LLMMessage, + LLMRequest, + LLMResponse, + LLMRole, ) - # ============================================ # Shared fixtures # ============================================ + def _make_request( - messages=None, - model=None, - max_tokens=512, - temperature=0.7, - stop=None, - stream=False, + messages=None, + model=None, + max_tokens=512, + temperature=0.7, + stop=None, + stream=False, ): if messages is None: messages = [ @@ -34,24 +39,28 @@ def _make_request( def _make_system_request(): - return _make_request(messages=[ - LLMMessage(role=LLMRole.SYSTEM, content="You are helpful."), - LLMMessage(role=LLMRole.USER, content="Hi"), - LLMMessage(role=LLMRole.ASSISTANT, content="Hello!"), - LLMMessage(role=LLMRole.USER, content="How are you?"), - ]) + return _make_request( + messages=[ + LLMMessage(role=LLMRole.SYSTEM, content="You are helpful."), + LLMMessage(role=LLMRole.USER, content="Hi"), + LLMMessage(role=LLMRole.ASSISTANT, content="Hello!"), + LLMMessage(role=LLMRole.USER, content="How are you?"), + ] + ) # ============================================ # Anthropic Provider Tests # ============================================ + class TestAnthropicLLMProvider: """Tests for AnthropicLLMProvider.""" @pytest.fixture def provider(self): from memorylayer_server.services.llm.anthropic import AnthropicLLMProvider + return AnthropicLLMProvider( api_key="test-key", model="claude-sonnet-4-20250514", @@ -83,11 +92,13 @@ def test_prepare_messages_with_system(self, provider): def test_prepare_messages_multiple_system(self, provider): """Multiple system messages are concatenated.""" - request = _make_request(messages=[ - LLMMessage(role=LLMRole.SYSTEM, content="First system."), - LLMMessage(role=LLMRole.SYSTEM, content="Second system."), - LLMMessage(role=LLMRole.USER, content="Hi"), - ]) + request = _make_request( + messages=[ + LLMMessage(role=LLMRole.SYSTEM, content="First system."), + LLMMessage(role=LLMRole.SYSTEM, content="Second system."), + LLMMessage(role=LLMRole.USER, content="Hi"), + ] + ) system_text, messages = provider._prepare_messages(request) assert system_text == "First system.\nSecond system." @@ -257,7 +268,7 @@ async def mock_text_stream(): assert chunks[2].finish_reason == "stop" def test_lazy_client_import_error(self, provider): - with patch.dict('sys.modules', {'anthropic': None}): + with patch.dict("sys.modules", {"anthropic": None}): provider._client = None with pytest.raises(ImportError, match="anthropic package not installed"): provider._get_client() @@ -267,12 +278,14 @@ def test_lazy_client_import_error(self, provider): # Google GenAI Provider Tests # ============================================ + class TestGoogleLLMProvider: """Tests for GoogleLLMProvider.""" @pytest.fixture def provider(self): from memorylayer_server.services.llm.google import GoogleLLMProvider + return GoogleLLMProvider( api_key="test-key", model="gemini-3-flash-preview", @@ -306,11 +319,13 @@ def test_extract_messages_with_system(self, provider): def test_extract_messages_multiple_system(self, provider): """Multiple system messages are concatenated.""" - request = _make_request(messages=[ - LLMMessage(role=LLMRole.SYSTEM, content="First."), - LLMMessage(role=LLMRole.SYSTEM, content="Second."), - LLMMessage(role=LLMRole.USER, content="Hi"), - ]) + request = _make_request( + messages=[ + LLMMessage(role=LLMRole.SYSTEM, content="First."), + LLMMessage(role=LLMRole.SYSTEM, content="Second."), + LLMMessage(role=LLMRole.USER, content="Hi"), + ] + ) system_text, messages = provider._extract_messages(request) assert system_text == "First.\nSecond." @@ -330,7 +345,8 @@ def _mock_build(self, provider): mock_contents = MagicMock() mock_config = MagicMock() with patch.object( - provider.__class__, '_build_request', + provider.__class__, + "_build_request", return_value=(mock_contents, mock_config), ) as mock_build: mock_build.mock_contents = mock_contents @@ -516,9 +532,7 @@ async def mock_stream(): assert chunks[2].is_final is True def test_lazy_client_import_error(self, provider): - with patch.dict('sys.modules', {'google': None, 'google.genai': None}): + with patch.dict("sys.modules", {"google": None, "google.genai": None}): provider._client = None with pytest.raises(ImportError, match="google-genai package not installed"): provider._get_client() - - diff --git a/memorylayer-core-python/tests/unit/test_llm_registry.py b/memorylayer-core-python/tests/unit/test_llm_registry.py index 09dcbe4..085eebf 100644 --- a/memorylayer-core-python/tests/unit/test_llm_registry.py +++ b/memorylayer-core-python/tests/unit/test_llm_registry.py @@ -1,26 +1,31 @@ """Unit tests for LLM Provider Registry.""" + import os -import pytest from unittest.mock import AsyncMock, MagicMock, patch +import pytest from scitrera_app_framework.api import Variables +from memorylayer_server.models.llm import ( + LLMMessage, + LLMRequest, + LLMResponse, + LLMRole, + LLMStreamChunk, +) +from memorylayer_server.services.llm.noop import NoOpLLMProvider from memorylayer_server.services.llm.registry import ( - LLMProviderRegistry, DefaultLLMProviderRegistryPlugin, + LLMProviderRegistry, create_provider_from_config, ) from memorylayer_server.services.llm.service_default import LLMService -from memorylayer_server.services.llm.noop import NoOpLLMProvider, LLMNotConfiguredError -from memorylayer_server.models.llm import ( - LLMMessage, LLMRequest, LLMResponse, LLMRole, LLMStreamChunk, -) - # ============================================ # Helper factories # ============================================ + def _mock_provider(name: str = "mock") -> AsyncMock: """Create a mock LLMProvider with standard defaults.""" provider = AsyncMock() @@ -54,6 +59,7 @@ def _make_request() -> LLMRequest: # LLMProviderRegistry Tests # ============================================ + class TestLLMProviderRegistry: """Tests for LLMProviderRegistry.""" @@ -173,6 +179,7 @@ def test_profile_map_returns_copy(self): # create_provider_from_config Tests # ============================================ + class TestCreateProviderFromConfig: """Tests for create_provider_from_config factory function.""" @@ -186,6 +193,7 @@ def test_create_openai_provider(self): base_url="https://api.example.com", ) from memorylayer_server.services.llm.openai import OpenAILLMProvider + assert isinstance(provider, OpenAILLMProvider) assert provider.model == "gpt-4o-mini" assert provider.api_key == "test-key" @@ -200,6 +208,7 @@ def test_create_anthropic_provider(self): api_key="test-key", ) from memorylayer_server.services.llm.anthropic import AnthropicLLMProvider + assert isinstance(provider, AnthropicLLMProvider) assert provider.model == "claude-sonnet-4-20250514" assert provider.api_key == "test-key" @@ -213,6 +222,7 @@ def test_create_google_provider(self): api_key="test-key", ) from memorylayer_server.services.llm.google import GoogleLLMProvider + assert isinstance(provider, GoogleLLMProvider) assert provider.model == "gemini-3-flash-preview" assert provider.api_key == "test-key" @@ -251,6 +261,7 @@ def test_create_with_max_tokens(self): # DefaultLLMProviderRegistryPlugin Tests # ============================================ + class TestDefaultLLMProviderRegistryPlugin: """Tests for DefaultLLMProviderRegistryPlugin environment discovery.""" @@ -259,11 +270,15 @@ def _make_v(): """Create a real Variables instance that reads from os.environ.""" return Variables() - @patch.dict(os.environ, { - 'MEMORYLAYER_LLM_PROFILE_DEFAULT_PROVIDER': 'openai', - 'MEMORYLAYER_LLM_PROFILE_DEFAULT_MODEL': 'gpt-4o-mini', - 'MEMORYLAYER_LLM_PROFILE_DEFAULT_API_KEY': 'test-key', - }, clear=False) + @patch.dict( + os.environ, + { + "MEMORYLAYER_LLM_PROFILE_DEFAULT_PROVIDER": "openai", + "MEMORYLAYER_LLM_PROFILE_DEFAULT_MODEL": "gpt-4o-mini", + "MEMORYLAYER_LLM_PROFILE_DEFAULT_API_KEY": "test-key", + }, + clear=False, + ) def test_discover_single_profile(self): """One DEFAULT profile creates one provider.""" plugin = DefaultLLMProviderRegistryPlugin() @@ -275,17 +290,21 @@ def test_discover_single_profile(self): assert "default" in registry.profile_names assert len(registry.profile_names) == 1 - @patch.dict(os.environ, { - 'MEMORYLAYER_LLM_PROFILE_DEFAULT_PROVIDER': 'openai', - 'MEMORYLAYER_LLM_PROFILE_DEFAULT_MODEL': 'gpt-4o-mini', - 'MEMORYLAYER_LLM_PROFILE_DEFAULT_API_KEY': 'test-key', - 'MEMORYLAYER_LLM_PROFILE_CHEAP_PROVIDER': 'openai', - 'MEMORYLAYER_LLM_PROFILE_CHEAP_MODEL': 'gpt-4o-mini', - 'MEMORYLAYER_LLM_PROFILE_CHEAP_API_KEY': 'test-key-2', - 'MEMORYLAYER_LLM_PROFILE_REASONING_PROVIDER': 'anthropic', - 'MEMORYLAYER_LLM_PROFILE_REASONING_MODEL': 'claude-sonnet-4-20250514', - 'MEMORYLAYER_LLM_PROFILE_REASONING_API_KEY': 'test-key-3', - }, clear=False) + @patch.dict( + os.environ, + { + "MEMORYLAYER_LLM_PROFILE_DEFAULT_PROVIDER": "openai", + "MEMORYLAYER_LLM_PROFILE_DEFAULT_MODEL": "gpt-4o-mini", + "MEMORYLAYER_LLM_PROFILE_DEFAULT_API_KEY": "test-key", + "MEMORYLAYER_LLM_PROFILE_CHEAP_PROVIDER": "openai", + "MEMORYLAYER_LLM_PROFILE_CHEAP_MODEL": "gpt-4o-mini", + "MEMORYLAYER_LLM_PROFILE_CHEAP_API_KEY": "test-key-2", + "MEMORYLAYER_LLM_PROFILE_REASONING_PROVIDER": "anthropic", + "MEMORYLAYER_LLM_PROFILE_REASONING_MODEL": "claude-sonnet-4-20250514", + "MEMORYLAYER_LLM_PROFILE_REASONING_API_KEY": "test-key-3", + }, + clear=False, + ) def test_discover_multiple_profiles(self): """DEFAULT + CHEAP + REASONING profiles are all discovered.""" plugin = DefaultLLMProviderRegistryPlugin() @@ -310,12 +329,16 @@ def test_no_profiles_creates_noop_default(self): provider = registry.get_provider("default") assert isinstance(provider, NoOpLLMProvider) - @patch.dict(os.environ, { - 'MEMORYLAYER_LLM_PROFILE_DEFAULT_PROVIDER': 'noop', - 'MEMORYLAYER_LLM_PROFILE_DEFAULT_MODEL': 'unused', - 'MEMORYLAYER_LLM_ASSIGN_TIER_GENERATION': 'cheap', - 'MEMORYLAYER_LLM_ASSIGN_EMBEDDING': 'fast', - }, clear=False) + @patch.dict( + os.environ, + { + "MEMORYLAYER_LLM_PROFILE_DEFAULT_PROVIDER": "noop", + "MEMORYLAYER_LLM_PROFILE_DEFAULT_MODEL": "unused", + "MEMORYLAYER_LLM_ASSIGN_TIER_GENERATION": "cheap", + "MEMORYLAYER_LLM_ASSIGN_EMBEDDING": "fast", + }, + clear=False, + ) def test_assignment_mapping(self): """MEMORYLAYER_LLM_ASSIGN_* creates profile_map entries.""" plugin = DefaultLLMProviderRegistryPlugin() @@ -328,11 +351,15 @@ def test_assignment_mapping(self): assert profile_map["tier_generation"] == "cheap" assert profile_map["embedding"] == "fast" - @patch.dict(os.environ, { - 'MEMORYLAYER_LLM_PROFILE_BAD_MODEL': 'gpt-4o-mini', - 'MEMORYLAYER_LLM_PROFILE_BAD_API_KEY': 'test-key', - # Missing PROVIDER - }, clear=True) + @patch.dict( + os.environ, + { + "MEMORYLAYER_LLM_PROFILE_BAD_MODEL": "gpt-4o-mini", + "MEMORYLAYER_LLM_PROFILE_BAD_API_KEY": "test-key", + # Missing PROVIDER + }, + clear=True, + ) def test_missing_provider_type_skips(self): """Profile without PROVIDER is skipped, falls back to NoOp default.""" plugin = DefaultLLMProviderRegistryPlugin() @@ -344,18 +371,21 @@ def test_missing_provider_type_skips(self): # "bad" profile should be skipped; only default (NoOp) remains provider = registry.get_provider("default") assert isinstance(provider, NoOpLLMProvider) - mock_logger.warning.assert_any_call( - "LLM profile '%s' missing PROVIDER, skipping", "bad" - ) - - @patch.dict(os.environ, { - 'MEMORYLAYER_LLM_PROFILE_BAD_PROVIDER': 'openai', - 'MEMORYLAYER_LLM_PROFILE_BAD_API_KEY': 'test-key', - # Missing MODEL — provider uses its built-in default - }, clear=True) + mock_logger.warning.assert_any_call("LLM profile '%s' missing PROVIDER, skipping", "bad") + + @patch.dict( + os.environ, + { + "MEMORYLAYER_LLM_PROFILE_BAD_PROVIDER": "openai", + "MEMORYLAYER_LLM_PROFILE_BAD_API_KEY": "test-key", + # Missing MODEL — provider uses its built-in default + }, + clear=True, + ) def test_missing_model_uses_provider_default(self): """Profile without MODEL uses the provider's built-in default model.""" - from memorylayer_server.services.llm.openai import OpenAILLMProvider, DEFAULT_LLM_OPENAI_MODEL + from memorylayer_server.services.llm.openai import DEFAULT_LLM_OPENAI_MODEL, OpenAILLMProvider + plugin = DefaultLLMProviderRegistryPlugin() mock_v = self._make_v() mock_logger = MagicMock() @@ -372,11 +402,11 @@ def test_converged_config_via_variables(self): """Profiles set directly on Variables instance (no env vars) are discovered.""" v = Variables() # Set profile config directly on Variables — simulates converged config - v.set('MEMORYLAYER_LLM_PROFILE_DEFAULT_PROVIDER', 'noop') - v.set('MEMORYLAYER_LLM_PROFILE_DEFAULT_MODEL', 'test-model') - v.set('MEMORYLAYER_LLM_PROFILE_FAST_PROVIDER', 'noop') - v.set('MEMORYLAYER_LLM_PROFILE_FAST_MODEL', 'fast-model') - v.set('MEMORYLAYER_LLM_ASSIGN_EXTRACTION', 'fast') + v.set("MEMORYLAYER_LLM_PROFILE_DEFAULT_PROVIDER", "noop") + v.set("MEMORYLAYER_LLM_PROFILE_DEFAULT_MODEL", "test-model") + v.set("MEMORYLAYER_LLM_PROFILE_FAST_PROVIDER", "noop") + v.set("MEMORYLAYER_LLM_PROFILE_FAST_MODEL", "fast-model") + v.set("MEMORYLAYER_LLM_ASSIGN_EXTRACTION", "fast") plugin = DefaultLLMProviderRegistryPlugin() mock_logger = MagicMock() @@ -393,6 +423,7 @@ def test_converged_config_via_variables(self): # LLMService with Registry Tests # ============================================ + class TestLLMServiceWithRegistry: """Tests for LLMService profile-based routing through registry.""" @@ -446,13 +477,14 @@ async def test_default_profile_when_not_specified(self): default.complete.assert_called_once_with(request) cheap.complete.assert_not_called() - def test_provider_property_returns_default(self): - """service.provider returns default provider from registry.""" + def test_default_model_uses_default_profile(self): + """service.default_model delegates to default profile provider.""" default = _mock_provider("default") + default.default_model = "gpt-4o" cheap = _mock_provider("cheap") registry = LLMProviderRegistry( providers={"default": default, "cheap": cheap}, ) service = LLMService(registry=registry) - assert service.provider is default + assert service.default_model == "gpt-4o" diff --git a/memorylayer-core-python/tests/unit/test_local_reranker.py b/memorylayer-core-python/tests/unit/test_local_reranker.py index f300abb..60e5bad 100644 --- a/memorylayer-core-python/tests/unit/test_local_reranker.py +++ b/memorylayer-core-python/tests/unit/test_local_reranker.py @@ -1,9 +1,10 @@ """Unit tests for local sentence-transformers CrossEncoder reranker.""" + import math -import pytest from unittest.mock import MagicMock, patch import numpy as np +import pytest def _sigmoid(x): @@ -16,6 +17,7 @@ class TestLocalRerankerProvider: @pytest.fixture def provider(self): from memorylayer_server.services.reranker.local.provider import LocalRerankerProvider + return LocalRerankerProvider( model_name="cross-encoder/ms-marco-MiniLM-L-6-v2", ) @@ -106,12 +108,12 @@ async def test_rerank_without_instruction(self, provider): @pytest.mark.asyncio async def test_preload_loads_model(self, provider): mock_model = MagicMock() - with patch.object(provider, '_get_model', return_value=mock_model) as mock_get: + with patch.object(provider, "_get_model", return_value=mock_model) as mock_get: await provider.preload() mock_get.assert_called_once() def test_lazy_client_import_error(self, provider): - with patch.dict('sys.modules', {'sentence_transformers': None}): + with patch.dict("sys.modules", {"sentence_transformers": None}): provider._model = None with pytest.raises(ImportError, match="sentence-transformers package not installed"): provider._get_model() @@ -122,14 +124,17 @@ class TestSigmoid: def test_sigmoid_zero(self): from memorylayer_server.services.reranker.local.provider import _sigmoid + assert _sigmoid(0.0) == pytest.approx(0.5) def test_sigmoid_large_positive(self): from memorylayer_server.services.reranker.local.provider import _sigmoid + assert _sigmoid(10.0) == pytest.approx(1.0, abs=1e-4) def test_sigmoid_large_negative(self): from memorylayer_server.services.reranker.local.provider import _sigmoid + assert _sigmoid(-10.0) == pytest.approx(0.0, abs=1e-4) @@ -137,12 +142,14 @@ class TestLocalRerankerProviderPlugin: """Tests for LocalRerankerProviderPlugin.""" def test_plugin_provider_name(self): - from memorylayer_server.services.reranker.local.provider import LocalRerankerProviderPlugin from memorylayer_server.config import RerankerProviderType + from memorylayer_server.services.reranker.local.provider import LocalRerankerProviderPlugin + plugin = LocalRerankerProviderPlugin() assert plugin.PROVIDER_NAME == RerankerProviderType.LOCAL def test_plugin_name(self): from memorylayer_server.services.reranker.local.provider import LocalRerankerProviderPlugin + plugin = LocalRerankerProviderPlugin() - assert 'local' in plugin.name().lower() + assert "local" in plugin.name().lower() diff --git a/memorylayer-core-python/tests/unit/test_memory_service.py b/memorylayer-core-python/tests/unit/test_memory_service.py index 0836895..c9d47f6 100644 --- a/memorylayer-core-python/tests/unit/test_memory_service.py +++ b/memorylayer-core-python/tests/unit/test_memory_service.py @@ -7,9 +7,12 @@ - forget: soft and hard delete - decay: importance reduction """ + +from datetime import UTC + import pytest -from memorylayer_server.models.memory import RememberInput, RecallInput, MemoryType, RecallMode, SearchTolerance +from memorylayer_server.models.memory import MemoryType, RecallInput, RecallMode, RememberInput, SearchTolerance from memorylayer_server.services.memory import MemoryService # Mock provider default dimensions @@ -21,9 +24,9 @@ class TestRemember: @pytest.mark.asyncio async def test_remember_creates_memory( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Test that remember creates a new memory with ID.""" input = RememberInput( @@ -41,9 +44,9 @@ async def test_remember_creates_memory( @pytest.mark.asyncio async def test_remember_with_all_fields( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Test remember with all optional fields.""" input = RememberInput( @@ -62,9 +65,9 @@ async def test_remember_with_all_fields( @pytest.mark.asyncio async def test_remember_generates_embedding( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Test that remember generates embedding vector.""" input = RememberInput(content="Memory with embedding") @@ -76,9 +79,9 @@ async def test_remember_generates_embedding( @pytest.mark.asyncio async def test_remember_deduplication( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Test that duplicate content returns existing memory.""" input = RememberInput(content="Duplicate content") @@ -90,9 +93,9 @@ async def test_remember_deduplication( @pytest.mark.asyncio async def test_remember_generates_content_hash( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Test that remember generates content hash.""" input = RememberInput(content="Content to hash") @@ -108,53 +111,35 @@ class TestRecall: @pytest.mark.asyncio async def test_recall_finds_similar_memories( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Test that recall finds semantically similar memories.""" # Store some memories - await memory_service.remember( - workspace_id, - RememberInput(content="Python is great for data science") - ) - await memory_service.remember( - workspace_id, - RememberInput(content="JavaScript is used for web development") - ) + await memory_service.remember(workspace_id, RememberInput(content="Python is great for data science")) + await memory_service.remember(workspace_id, RememberInput(content="JavaScript is used for web development")) # Recall related memories (use LOOSE tolerance because mock embeddings are hash-based) result = await memory_service.recall( workspace_id, - RecallInput( - query="programming languages for data analysis", - limit=10, - tolerance=SearchTolerance.LOOSE, - min_relevance=0.0 - ) + RecallInput(query="programming languages for data analysis", limit=10, tolerance=SearchTolerance.LOOSE, min_relevance=0.0), ) assert len(result.memories) > 0 @pytest.mark.asyncio async def test_recall_with_type_filter( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Test recall with memory type filter.""" - await memory_service.remember( - workspace_id, - RememberInput(content="Semantic fact", type=MemoryType.SEMANTIC) - ) - await memory_service.remember( - workspace_id, - RememberInput(content="Episodic event", type=MemoryType.EPISODIC) - ) + await memory_service.remember(workspace_id, RememberInput(content="Semantic fact", type=MemoryType.SEMANTIC)) + await memory_service.remember(workspace_id, RememberInput(content="Episodic event", type=MemoryType.EPISODIC)) result = await memory_service.recall( - workspace_id, - RecallInput(query="fact", types=[MemoryType.SEMANTIC], include_associations=False, traverse_depth=0) + workspace_id, RecallInput(query="fact", types=[MemoryType.SEMANTIC], include_associations=False, traverse_depth=0) ) for memory in result.memories: @@ -162,38 +147,29 @@ async def test_recall_with_type_filter( @pytest.mark.asyncio async def test_recall_respects_limit( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Test that recall respects limit parameter.""" # Store multiple memories for i in range(10): - await memory_service.remember( - workspace_id, - RememberInput(content=f"Memory number {i}") - ) + await memory_service.remember(workspace_id, RememberInput(content=f"Memory number {i}")) - result = await memory_service.recall( - workspace_id, - RecallInput(query="memory", limit=5) - ) + result = await memory_service.recall(workspace_id, RecallInput(query="memory", limit=5)) assert len(result.memories) <= 5 @pytest.mark.asyncio async def test_recall_returns_relevance_scores( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Test that recall includes relevance scores.""" # Use exact same text for memory and query to ensure embedding match content = "Relevant content for query" - await memory_service.remember( - workspace_id, - RememberInput(content=content) - ) + await memory_service.remember(workspace_id, RememberInput(content=content)) # Query with same content to get exact embedding match (similarity=1.0) result = await memory_service.recall( @@ -201,8 +177,8 @@ async def test_recall_returns_relevance_scores( RecallInput( query=content, # Same text ensures identical embedding tolerance=SearchTolerance.LOOSE, - min_relevance=0.0 - ) + min_relevance=0.0, + ), ) assert len(result.memories) > 0 @@ -214,15 +190,12 @@ class TestForget: @pytest.mark.asyncio async def test_soft_delete( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Test soft delete sets deleted_at.""" - memory = await memory_service.remember( - workspace_id, - RememberInput(content="Memory to forget") - ) + memory = await memory_service.remember(workspace_id, RememberInput(content="Memory to forget")) result = await memory_service.forget(workspace_id, memory.id, hard=False) @@ -234,15 +207,12 @@ async def test_soft_delete( @pytest.mark.asyncio async def test_hard_delete( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Test hard delete removes from database.""" - memory = await memory_service.remember( - workspace_id, - RememberInput(content="Memory to permanently delete") - ) + memory = await memory_service.remember(workspace_id, RememberInput(content="Memory to permanently delete")) result = await memory_service.forget(workspace_id, memory.id, hard=True) @@ -250,9 +220,9 @@ async def test_hard_delete( @pytest.mark.asyncio async def test_forget_nonexistent_returns_false( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Test forgetting non-existent memory returns False.""" result = await memory_service.forget(workspace_id, "nonexistent_id") @@ -265,15 +235,12 @@ class TestDecay: @pytest.mark.asyncio async def test_decay_reduces_importance( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Test that decay reduces memory importance.""" - memory = await memory_service.remember( - workspace_id, - RememberInput(content="Important memory", importance=0.8) - ) + memory = await memory_service.remember(workspace_id, RememberInput(content="Important memory", importance=0.8)) updated = await memory_service.decay(workspace_id, memory.id, decay_rate=0.2) @@ -287,9 +254,9 @@ class TestMemoryTypesAndSubtypes: @pytest.mark.asyncio async def test_remember_with_episodic_type( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Test storing episodic memory (specific event).""" from memorylayer_server.models.memory import MemoryType @@ -304,9 +271,9 @@ async def test_remember_with_episodic_type( @pytest.mark.asyncio async def test_remember_with_semantic_type( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Test storing semantic memory (fact/concept).""" from memorylayer_server.models.memory import MemoryType @@ -321,9 +288,9 @@ async def test_remember_with_semantic_type( @pytest.mark.asyncio async def test_remember_with_procedural_type( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Test storing procedural memory (how-to).""" from memorylayer_server.models.memory import MemoryType @@ -338,12 +305,12 @@ async def test_remember_with_procedural_type( @pytest.mark.asyncio async def test_remember_with_solution_subtype( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Test storing memory with SOLUTION subtype.""" - from memorylayer_server.models.memory import MemoryType, MemorySubtype + from memorylayer_server.models.memory import MemorySubtype, MemoryType input = RememberInput( content="Fixed TypeScript error by adding type annotation", @@ -356,12 +323,12 @@ async def test_remember_with_solution_subtype( @pytest.mark.asyncio async def test_remember_with_problem_subtype( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Test storing memory with PROBLEM subtype.""" - from memorylayer_server.models.memory import MemoryType, MemorySubtype + from memorylayer_server.models.memory import MemorySubtype, MemoryType input = RememberInput( content="Database connection pool exhaustion during load test", @@ -374,12 +341,12 @@ async def test_remember_with_problem_subtype( @pytest.mark.asyncio async def test_remember_with_code_pattern_subtype( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Test storing memory with CODE_PATTERN subtype.""" - from memorylayer_server.models.memory import MemoryType, MemorySubtype + from memorylayer_server.models.memory import MemorySubtype, MemoryType input = RememberInput( content="Use factory pattern for creating database connections", @@ -392,12 +359,12 @@ async def test_remember_with_code_pattern_subtype( @pytest.mark.asyncio async def test_remember_with_preference_subtype( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Test storing memory with PREFERENCE subtype.""" - from memorylayer_server.models.memory import MemoryType, MemorySubtype + from memorylayer_server.models.memory import MemorySubtype, MemoryType input = RememberInput( content="User prefers tabs over spaces for indentation", @@ -410,25 +377,20 @@ async def test_remember_with_preference_subtype( @pytest.mark.asyncio async def test_recall_filter_by_single_type( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Test recall filtering by single memory type.""" from memorylayer_server.models.memory import MemoryType # Store memories of different types with similar but unique content - await memory_service.remember( - workspace_id, - RememberInput(content="Programming language concepts event", type=MemoryType.EPISODIC) - ) + await memory_service.remember(workspace_id, RememberInput(content="Programming language concepts event", type=MemoryType.EPISODIC)) semantic_memory = await memory_service.remember( - workspace_id, - RememberInput(content="Programming language concepts knowledge", type=MemoryType.SEMANTIC) + workspace_id, RememberInput(content="Programming language concepts knowledge", type=MemoryType.SEMANTIC) ) await memory_service.remember( - workspace_id, - RememberInput(content="Programming language concepts steps", type=MemoryType.PROCEDURAL) + workspace_id, RememberInput(content="Programming language concepts steps", type=MemoryType.PROCEDURAL) ) # Recall only semantic memories (use semantic query for higher similarity to semantic memory) @@ -438,8 +400,8 @@ async def test_recall_filter_by_single_type( query="Programming language concepts knowledge", types=[MemoryType.SEMANTIC], tolerance=SearchTolerance.LOOSE, - min_relevance=0.0 - ) + min_relevance=0.0, + ), ) assert len(result.memories) > 0 @@ -450,26 +412,17 @@ async def test_recall_filter_by_single_type( @pytest.mark.asyncio async def test_recall_filter_by_multiple_types( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Test recall filtering by multiple memory types.""" from memorylayer_server.models.memory import MemoryType # Store memories of different types - await memory_service.remember( - workspace_id, - RememberInput(content="Episodic event", type=MemoryType.EPISODIC) - ) - await memory_service.remember( - workspace_id, - RememberInput(content="Semantic fact", type=MemoryType.SEMANTIC) - ) - await memory_service.remember( - workspace_id, - RememberInput(content="Procedural steps", type=MemoryType.PROCEDURAL) - ) + await memory_service.remember(workspace_id, RememberInput(content="Episodic event", type=MemoryType.EPISODIC)) + await memory_service.remember(workspace_id, RememberInput(content="Semantic fact", type=MemoryType.SEMANTIC)) + await memory_service.remember(workspace_id, RememberInput(content="Procedural steps", type=MemoryType.PROCEDURAL)) # Recall episodic and procedural, exclude semantic result = await memory_service.recall( @@ -481,7 +434,7 @@ async def test_recall_filter_by_multiple_types( min_relevance=0.0, include_associations=False, traverse_depth=0, - ) + ), ) assert len(result.memories) > 0 @@ -490,29 +443,21 @@ async def test_recall_filter_by_multiple_types( @pytest.mark.asyncio async def test_recall_filter_by_subtype( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Test recall filtering by memory subtype.""" - from memorylayer_server.models.memory import MemoryType, MemorySubtype + from memorylayer_server.models.memory import MemorySubtype, MemoryType # Store memories with different subtypes (use similar content) solution_memory = await memory_service.remember( workspace_id, - RememberInput( - content="How to fix database connection issue", - type=MemoryType.PROCEDURAL, - subtype=MemorySubtype.SOLUTION - ) + RememberInput(content="How to fix database connection issue", type=MemoryType.PROCEDURAL, subtype=MemorySubtype.SOLUTION), ) await memory_service.remember( workspace_id, - RememberInput( - content="How to fix database connection issue", - type=MemoryType.EPISODIC, - subtype=MemorySubtype.PROBLEM - ) + RememberInput(content="How to fix database connection issue", type=MemoryType.EPISODIC, subtype=MemorySubtype.PROBLEM), ) # Recall only solutions (use same query for high similarity) @@ -522,8 +467,8 @@ async def test_recall_filter_by_subtype( query="How to fix database connection issue", subtypes=[MemorySubtype.SOLUTION], tolerance=SearchTolerance.LOOSE, - min_relevance=0.0 - ) + min_relevance=0.0, + ), ) assert len(result.memories) > 0 @@ -538,24 +483,15 @@ class TestRecallModes: @pytest.mark.asyncio async def test_recall_rag_mode( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Test RAG mode uses pure vector similarity.""" - await memory_service.remember( - workspace_id, - RememberInput(content="Python data structures") - ) + await memory_service.remember(workspace_id, RememberInput(content="Python data structures")) result = await memory_service.recall( - workspace_id, - RecallInput( - query="data structures", - mode=RecallMode.RAG, - tolerance=SearchTolerance.LOOSE, - min_relevance=0.0 - ) + workspace_id, RecallInput(query="data structures", mode=RecallMode.RAG, tolerance=SearchTolerance.LOOSE, min_relevance=0.0) ) assert result.mode_used == RecallMode.RAG @@ -563,15 +499,12 @@ async def test_recall_rag_mode( @pytest.mark.asyncio async def test_recall_llm_mode( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Test LLM mode with query rewriting.""" - await memory_service.remember( - workspace_id, - RememberInput(content="Machine learning algorithms") - ) + await memory_service.remember(workspace_id, RememberInput(content="Machine learning algorithms")) result = await memory_service.recall( workspace_id, @@ -580,8 +513,8 @@ async def test_recall_llm_mode( mode=RecallMode.LLM, context=[{"role": "user", "content": "I'm learning about AI"}], tolerance=SearchTolerance.LOOSE, - min_relevance=0.0 - ) + min_relevance=0.0, + ), ) assert result.mode_used == RecallMode.LLM @@ -590,17 +523,14 @@ async def test_recall_llm_mode( @pytest.mark.asyncio async def test_recall_hybrid_mode_uses_rag_when_sufficient( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Test HYBRID mode uses RAG when results are sufficient.""" # Use identical text for perfect match content = "High quality machine learning content" - await memory_service.remember( - workspace_id, - RememberInput(content=content, importance=0.95) - ) + await memory_service.remember(workspace_id, RememberInput(content=content, importance=0.95)) result = await memory_service.recall( workspace_id, @@ -609,8 +539,8 @@ async def test_recall_hybrid_mode_uses_rag_when_sufficient( mode=RecallMode.HYBRID, rag_threshold=0.8, # High threshold tolerance=SearchTolerance.LOOSE, - min_relevance=0.0 - ) + min_relevance=0.0, + ), ) # Should use RAG mode since result is high quality @@ -618,15 +548,12 @@ async def test_recall_hybrid_mode_uses_rag_when_sufficient( @pytest.mark.asyncio async def test_recall_hybrid_mode_falls_back_to_llm( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Test HYBRID mode falls back to LLM when RAG results are insufficient.""" - await memory_service.remember( - workspace_id, - RememberInput(content="Low importance memory", importance=0.1) - ) + await memory_service.remember(workspace_id, RememberInput(content="Low importance memory", importance=0.1)) result = await memory_service.recall( workspace_id, @@ -635,8 +562,8 @@ async def test_recall_hybrid_mode_falls_back_to_llm( mode=RecallMode.HYBRID, rag_threshold=0.9, # Very high threshold tolerance=SearchTolerance.LOOSE, - min_relevance=0.0 - ) + min_relevance=0.0, + ), ) # Should fall back to LLM mode due to low quality RAG results @@ -649,29 +576,17 @@ class TestToleranceLevels: @pytest.mark.asyncio async def test_loose_tolerance_returns_more_results( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Test LOOSE tolerance has broader matching.""" # Store some memories (use similar content for better matching) - await memory_service.remember( - workspace_id, - RememberInput(content="Programming language design patterns") - ) - await memory_service.remember( - workspace_id, - RememberInput(content="Programming web applications") - ) + await memory_service.remember(workspace_id, RememberInput(content="Programming language design patterns")) + await memory_service.remember(workspace_id, RememberInput(content="Programming web applications")) result = await memory_service.recall( - workspace_id, - RecallInput( - query="Programming", - tolerance=SearchTolerance.LOOSE, - limit=10, - min_relevance=0.0 - ) + workspace_id, RecallInput(query="Programming", tolerance=SearchTolerance.LOOSE, limit=10, min_relevance=0.0) ) # LOOSE tolerance should return results @@ -679,49 +594,36 @@ async def test_loose_tolerance_returns_more_results( @pytest.mark.asyncio async def test_moderate_tolerance_balanced( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Test MODERATE tolerance provides balanced results.""" - await memory_service.remember( - workspace_id, - RememberInput(content="Python data analysis") - ) + await memory_service.remember(workspace_id, RememberInput(content="Python data analysis")) - result = await memory_service.recall( - workspace_id, - RecallInput( - query="data science", - tolerance=SearchTolerance.MODERATE, - limit=10 - ) - ) + result = await memory_service.recall(workspace_id, RecallInput(query="data science", tolerance=SearchTolerance.MODERATE, limit=10)) # MODERATE is default, should work reasonably assert isinstance(result.memories, list) @pytest.mark.asyncio async def test_strict_tolerance_high_precision( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Test STRICT tolerance requires high relevance.""" # Use exact matching text for strict tolerance content = "Machine learning model training" - await memory_service.remember( - workspace_id, - RememberInput(content=content) - ) + await memory_service.remember(workspace_id, RememberInput(content=content)) result = await memory_service.recall( workspace_id, RecallInput( query=content, # Exact match tolerance=SearchTolerance.STRICT, - limit=10 - ) + limit=10, + ), ) # STRICT should find exact matches @@ -733,21 +635,18 @@ class TestAdvancedRecallFeatures: @pytest.mark.asyncio async def test_recall_with_time_range_filter( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Test filtering memories by creation time range.""" - from datetime import datetime, timedelta, timezone + from datetime import datetime, timedelta # Store a memory - memory = await memory_service.remember( - workspace_id, - RememberInput(content="Recent memory") - ) + await memory_service.remember(workspace_id, RememberInput(content="Recent memory")) # Query with time range - now = datetime.now(timezone.utc) + now = datetime.now(UTC) result = await memory_service.recall( workspace_id, RecallInput( @@ -755,51 +654,27 @@ async def test_recall_with_time_range_filter( created_after=now - timedelta(minutes=5), created_before=now + timedelta(minutes=5), tolerance=SearchTolerance.LOOSE, - min_relevance=0.0 - ) + min_relevance=0.0, + ), ) assert len(result.memories) > 0 @pytest.mark.asyncio async def test_recall_with_tag_filter_and_logic( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Test filtering by tags with AND logic.""" # Store memories with different tag combinations - await memory_service.remember( - workspace_id, - RememberInput( - content="Python backend code", - tags=["python", "backend"] - ) - ) - await memory_service.remember( - workspace_id, - RememberInput( - content="JavaScript frontend code", - tags=["javascript", "frontend"] - ) - ) - await memory_service.remember( - workspace_id, - RememberInput( - content="Python data science", - tags=["python", "datascience"] - ) - ) + await memory_service.remember(workspace_id, RememberInput(content="Python backend code", tags=["python", "backend"])) + await memory_service.remember(workspace_id, RememberInput(content="JavaScript frontend code", tags=["javascript", "frontend"])) + await memory_service.remember(workspace_id, RememberInput(content="Python data science", tags=["python", "datascience"])) # Query with tag filter (AND logic) result = await memory_service.recall( - workspace_id, - RecallInput( - query="code", - tags=["python", "backend"], - tolerance=SearchTolerance.LOOSE, - min_relevance=0.0 - ) + workspace_id, RecallInput(query="code", tags=["python", "backend"], tolerance=SearchTolerance.LOOSE, min_relevance=0.0) ) # Should only return memories with both tags @@ -810,16 +685,13 @@ async def test_recall_with_tag_filter_and_logic( @pytest.mark.asyncio async def test_recall_with_include_associations( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Test include_associations flag.""" content = "Memory with associations for testing" - memory = await memory_service.remember( - workspace_id, - RememberInput(content=content) - ) + memory = await memory_service.remember(workspace_id, RememberInput(content=content)) result = await memory_service.recall( workspace_id, @@ -827,8 +699,8 @@ async def test_recall_with_include_associations( query=content, # Use exact query for high similarity include_associations=True, tolerance=SearchTolerance.LOOSE, - min_relevance=0.0 - ) + min_relevance=0.0, + ), ) assert len(result.memories) > 0 @@ -839,15 +711,12 @@ async def test_recall_with_include_associations( @pytest.mark.asyncio async def test_recall_with_traverse_depth( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Test multi-hop graph traversal depth.""" - await memory_service.remember( - workspace_id, - RememberInput(content="Starting point memory") - ) + await memory_service.remember(workspace_id, RememberInput(content="Starting point memory")) result = await memory_service.recall( workspace_id, @@ -856,25 +725,22 @@ async def test_recall_with_traverse_depth( include_associations=True, traverse_depth=2, # Multi-hop traversal tolerance=SearchTolerance.LOOSE, - min_relevance=0.0 - ) + min_relevance=0.0, + ), ) assert len(result.memories) > 0 @pytest.mark.asyncio async def test_recall_with_min_relevance( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Test min_relevance threshold filtering.""" # Use exact match for guaranteed high relevance content = "Exact match test content" - await memory_service.remember( - workspace_id, - RememberInput(content=content) - ) + await memory_service.remember(workspace_id, RememberInput(content=content)) # Query with high min_relevance result = await memory_service.recall( @@ -882,8 +748,8 @@ async def test_recall_with_min_relevance( RecallInput( query=content, # Exact match min_relevance=0.8, # High threshold - tolerance=SearchTolerance.LOOSE - ) + tolerance=SearchTolerance.LOOSE, + ), ) # Should find exact match @@ -895,30 +761,20 @@ class TestAccessTracking: @pytest.mark.asyncio async def test_access_count_increments_on_recall( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Test that recall increments access_count.""" # Create a memory content = "Memory to track access" - memory = await memory_service.remember( - workspace_id, - RememberInput(content=content) - ) + memory = await memory_service.remember(workspace_id, RememberInput(content=content)) initial_count = memory.access_count # Recall the memory multiple times for _ in range(3): - await memory_service.recall( - workspace_id, - RecallInput( - query=content, - tolerance=SearchTolerance.LOOSE, - min_relevance=0.0 - ) - ) + await memory_service.recall(workspace_id, RecallInput(query=content, tolerance=SearchTolerance.LOOSE, min_relevance=0.0)) # Get updated memory updated_memory = await memory_service.get(workspace_id, memory.id) @@ -929,36 +785,26 @@ async def test_access_count_increments_on_recall( @pytest.mark.asyncio async def test_last_accessed_at_updates_on_recall( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Test that recall updates last_accessed_at timestamp.""" - from datetime import datetime, timezone import asyncio + from datetime import datetime # Create a memory with unique content content = "Unique memory to track timestamp update for test" - memory = await memory_service.remember( - workspace_id, - RememberInput(content=content) - ) + memory = await memory_service.remember(workspace_id, RememberInput(content=content)) # Record time before recall (truncate to seconds for comparison with SQLite) - before_recall = datetime.now(timezone.utc).replace(microsecond=0) + before_recall = datetime.now(UTC).replace(microsecond=0) # Wait to ensure timestamp difference await asyncio.sleep(1) # Recall the memory (use exact match) - result = await memory_service.recall( - workspace_id, - RecallInput( - query=content, - tolerance=SearchTolerance.LOOSE, - min_relevance=0.0 - ) - ) + result = await memory_service.recall(workspace_id, RecallInput(query=content, tolerance=SearchTolerance.LOOSE, min_relevance=0.0)) # Verify memory was found in recall assert len(result.memories) > 0 @@ -975,17 +821,14 @@ async def test_last_accessed_at_updates_on_recall( @pytest.mark.asyncio async def test_increment_access_method( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Test direct increment_access method.""" import asyncio - memory = await memory_service.remember( - workspace_id, - RememberInput(content="Memory for direct access tracking") - ) + memory = await memory_service.remember(workspace_id, RememberInput(content="Memory for direct access tracking")) # Get initial state initial_memory = await memory_service.get(workspace_id, memory.id) @@ -1011,15 +854,12 @@ class TestBatchOperations: @pytest.mark.asyncio async def test_batch_remember_multiple_memories( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Test storing multiple memories in sequence (simulated batch).""" - inputs = [ - RememberInput(content=f"Batch memory {i}") - for i in range(5) - ] + inputs = [RememberInput(content=f"Batch memory {i}") for i in range(5)] memories = [] for input in inputs: @@ -1033,62 +873,41 @@ async def test_batch_remember_multiple_memories( @pytest.mark.asyncio async def test_get_memories_by_workspace( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Test listing all memories in a workspace.""" # Store several memories for i in range(3): - await memory_service.remember( - workspace_id, - RememberInput(content=f"Workspace memory {i}") - ) + await memory_service.remember(workspace_id, RememberInput(content=f"Workspace memory {i}")) # Get all memories via recall with loose filters result = await memory_service.recall( - workspace_id, - RecallInput( - query="memory", - limit=100, - tolerance=SearchTolerance.LOOSE, - min_relevance=0.0 - ) + workspace_id, RecallInput(query="memory", limit=100, tolerance=SearchTolerance.LOOSE, min_relevance=0.0) ) assert len(result.memories) >= 3 @pytest.mark.asyncio async def test_workspace_isolation( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Test that memories are isolated by workspace.""" workspace1 = "workspace_1" workspace2 = "workspace_2" # Store in workspace 1 - await memory_service.remember( - workspace1, - RememberInput(content="Memory in workspace 1") - ) + await memory_service.remember(workspace1, RememberInput(content="Memory in workspace 1")) # Store in workspace 2 - await memory_service.remember( - workspace2, - RememberInput(content="Memory in workspace 2") - ) + await memory_service.remember(workspace2, RememberInput(content="Memory in workspace 2")) # Recall from workspace 1 should not see workspace 2 memories result1 = await memory_service.recall( - workspace1, - RecallInput( - query="memory", - tolerance=SearchTolerance.LOOSE, - min_relevance=0.0, - include_global=False - ) + workspace1, RecallInput(query="memory", tolerance=SearchTolerance.LOOSE, min_relevance=0.0, include_global=False) ) for memory in result1.memories: @@ -1096,13 +915,7 @@ async def test_workspace_isolation( # Recall from workspace 2 should not see workspace 1 memories result2 = await memory_service.recall( - workspace2, - RecallInput( - query="memory", - tolerance=SearchTolerance.LOOSE, - min_relevance=0.0, - include_global=False - ) + workspace2, RecallInput(query="memory", tolerance=SearchTolerance.LOOSE, min_relevance=0.0, include_global=False) ) for memory in result2.memories: @@ -1114,24 +927,21 @@ class TestRecallOverfetch: @pytest.mark.asyncio async def test_recall_rag_overfetches_for_reranker( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Verify overfetch multiplier is applied: storage receives limit * overfetch.""" # Store enough memories to have a pool for i in range(5): - await memory_service.remember( - workspace_id, - RememberInput(content=f"Overfetch test memory number {i}") - ) + await memory_service.remember(workspace_id, RememberInput(content=f"Overfetch test memory number {i}")) # Patch storage.search_memories to capture the limit argument original_search = memory_service.storage.search_memories captured_limits = [] async def capturing_search(*args, **kwargs): - captured_limits.append(kwargs.get('limit')) + captured_limits.append(kwargs.get("limit")) return await original_search(*args, **kwargs) memory_service.storage.search_memories = capturing_search @@ -1146,7 +956,7 @@ async def capturing_search(*args, **kwargs): tolerance=SearchTolerance.LOOSE, min_relevance=0.0, include_global=False, - ) + ), ) # Storage should have been called with limit * recall_overfetch @@ -1158,16 +968,13 @@ async def capturing_search(*args, **kwargs): @pytest.mark.asyncio async def test_recall_result_trimmed_to_requested_limit( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """Even with overfetch, final result respects the requested limit.""" for i in range(10): - await memory_service.remember( - workspace_id, - RememberInput(content=f"Trim test memory content {i}") - ) + await memory_service.remember(workspace_id, RememberInput(content=f"Trim test memory content {i}")) requested_limit = 3 result = await memory_service.recall( @@ -1179,36 +986,33 @@ async def test_recall_result_trimmed_to_requested_limit( tolerance=SearchTolerance.LOOSE, min_relevance=0.0, include_global=False, - ) + ), ) assert len(result.memories) <= requested_limit @pytest.mark.asyncio async def test_recall_overfetch_default_value( - self, - memory_service: MemoryService, + self, + memory_service: MemoryService, ): """Default overfetch multiplier should be 3.""" assert memory_service.recall_overfetch == 3 @pytest.mark.asyncio async def test_recall_llm_uses_overfetch_config( - self, - memory_service: MemoryService, - workspace_id: str, + self, + memory_service: MemoryService, + workspace_id: str, ): """LLM recall path should use the config overfetch value, not a hardcoded multiplier.""" - await memory_service.remember( - workspace_id, - RememberInput(content="LLM overfetch test memory") - ) + await memory_service.remember(workspace_id, RememberInput(content="LLM overfetch test memory")) original_search = memory_service.storage.search_memories captured_limits = [] async def capturing_search(*args, **kwargs): - captured_limits.append(kwargs.get('limit')) + captured_limits.append(kwargs.get("limit")) return await original_search(*args, **kwargs) memory_service.storage.search_memories = capturing_search @@ -1223,14 +1027,14 @@ async def capturing_search(*args, **kwargs): tolerance=SearchTolerance.LOOSE, min_relevance=0.0, include_global=False, - ) + ), ) # LLM path calls _recall_rag internally, which applies overfetch assert len(captured_limits) >= 1 - expected_limit = min( - requested_limit * memory_service.recall_overfetch, 50 - ) * memory_service.recall_overfetch # _recall_llm sets limit, then _recall_rag overfetches + ( + min(requested_limit * memory_service.recall_overfetch, 50) * memory_service.recall_overfetch + ) # _recall_llm sets limit, then _recall_rag overfetches # The innermost storage call should use overfetched limit from _recall_rag # _recall_llm sets limit=min(5*3, 50)=15, then _recall_rag overfetches 15*3=45 inner_llm_limit = min(requested_limit * memory_service.recall_overfetch, 50) diff --git a/memorylayer-core-python/tests/unit/test_memory_status.py b/memorylayer-core-python/tests/unit/test_memory_status.py index 41c9814..507ce9d 100644 --- a/memorylayer-core-python/tests/unit/test_memory_status.py +++ b/memorylayer-core-python/tests/unit/test_memory_status.py @@ -1,9 +1,14 @@ """ Unit tests for MemoryStatus enum and status/pinned fields. """ + import pytest + from memorylayer_server.models.memory import ( - Memory, MemoryType, MemoryStatus, RememberInput, RecallInput, + Memory, + MemoryStatus, + MemoryType, + RememberInput, ) from memorylayer_server.services.memory import MemoryService from memorylayer_server.services.storage.base import StorageBackend @@ -131,9 +136,7 @@ async def test_update_status_to_archived( workspace_id, RememberInput(content="Archive status test", type=MemoryType.SEMANTIC), ) - updated = await storage_backend.update_memory( - workspace_id, memory.id, status="archived" - ) + updated = await storage_backend.update_memory(workspace_id, memory.id, status="archived") assert updated is not None assert updated.status == MemoryStatus.ARCHIVED @@ -148,9 +151,7 @@ async def test_pinned_field_persists( workspace_id, RememberInput(content="Pinned test memory", type=MemoryType.SEMANTIC), ) - updated = await storage_backend.update_memory( - workspace_id, memory.id, pinned=1 - ) + updated = await storage_backend.update_memory(workspace_id, memory.id, pinned=1) assert updated is not None assert updated.pinned is True @@ -170,13 +171,14 @@ async def test_search_excludes_archived_by_default( ), ) # Archive it - await storage_backend.update_memory( - workspace_id, memory.id, status="archived" - ) + await storage_backend.update_memory(workspace_id, memory.id, status="archived") # Search should not find it embedding = (await embedding_service.embed_batch(["Archived search exclusion test unique_xyzzy"]))[0] results = await storage_backend.search_memories( - workspace_id, embedding, limit=50, min_relevance=0.0, + workspace_id, + embedding, + limit=50, + min_relevance=0.0, ) found_ids = [m.id for m, _ in results] assert memory.id not in found_ids @@ -198,13 +200,14 @@ async def test_search_includes_archived_when_requested( ), ) # Archive it - await storage_backend.update_memory( - workspace_id, memory.id, status="archived" - ) + await storage_backend.update_memory(workspace_id, memory.id, status="archived") # Search with include_archived should find it embedding = (await embedding_service.embed_batch(["Archived include test unique_qwerty"]))[0] results = await storage_backend.search_memories( - workspace_id, embedding, limit=50, min_relevance=0.0, + workspace_id, + embedding, + limit=50, + min_relevance=0.0, include_archived=True, ) found_ids = [m.id for m, _ in results] diff --git a/memorylayer-core-python/tests/unit/test_metrics_service.py b/memorylayer-core-python/tests/unit/test_metrics_service.py index 5e3c157..93c6eb5 100644 --- a/memorylayer-core-python/tests/unit/test_metrics_service.py +++ b/memorylayer-core-python/tests/unit/test_metrics_service.py @@ -1,13 +1,13 @@ """ Unit tests for the metrics service — NoopMetricsService and PrometheusMetricsService. """ + import time import pytest from memorylayer_server.services.metrics.noop import NoopMetricsService - # ============================================================================ # NoopMetricsService tests # ============================================================================ @@ -112,6 +112,7 @@ def test_multiple_calls_all_succeed(self): def _prometheus_available() -> bool: try: import prometheus_client # noqa: F401 + return True except ImportError: return False @@ -131,49 +132,43 @@ class TestPrometheusMetricsService: def _make_service(self): """Return a PrometheusMetricsService backed by a fresh registry.""" import prometheus_client + from memorylayer_server.services.metrics.prometheus import PrometheusMetricsService registry = prometheus_client.CollectorRegistry() service = PrometheusMetricsService.__new__(PrometheusMetricsService) import threading + service._lock = threading.Lock() service._counters = {} service._histograms = {} service._gauges = {} # Monkey-patch _get_* to use the isolated registry - original_get_counter = PrometheusMetricsService._get_counter - original_get_histogram = PrometheusMetricsService._get_histogram - original_get_gauge = PrometheusMetricsService._get_gauge def _get_counter(self, name, label_names): if name not in self._counters: with self._lock: if name not in self._counters: - self._counters[name] = prometheus_client.Counter( - name, name, list(label_names), registry=registry - ) + self._counters[name] = prometheus_client.Counter(name, name, list(label_names), registry=registry) return self._counters[name] def _get_histogram(self, name, label_names): if name not in self._histograms: with self._lock: if name not in self._histograms: - self._histograms[name] = prometheus_client.Histogram( - name, name, list(label_names), registry=registry - ) + self._histograms[name] = prometheus_client.Histogram(name, name, list(label_names), registry=registry) return self._histograms[name] def _get_gauge(self, name, label_names): if name not in self._gauges: with self._lock: if name not in self._gauges: - self._gauges[name] = prometheus_client.Gauge( - name, name, list(label_names), registry=registry - ) + self._gauges[name] = prometheus_client.Gauge(name, name, list(label_names), registry=registry) return self._gauges[name] import types + service._get_counter = types.MethodType(_get_counter, service) service._get_histogram = types.MethodType(_get_histogram, service) service._get_gauge = types.MethodType(_get_gauge, service) @@ -182,9 +177,9 @@ def _get_gauge(self, name, label_names): def test_instantiation_succeeds_when_prometheus_client_available(self): """PrometheusMetricsService can be instantiated when prometheus_client is present.""" - from memorylayer_server.services.metrics.prometheus import PrometheusMetricsService import prometheus_client - registry = prometheus_client.CollectorRegistry() + + prometheus_client.CollectorRegistry() # Use _make_service to avoid global registry pollution service = self._make_service() assert service is not None diff --git a/memorylayer-core-python/tests/unit/test_ontology_service.py b/memorylayer-core-python/tests/unit/test_ontology_service.py index 3e78c99..5e6ccb1 100644 --- a/memorylayer-core-python/tests/unit/test_ontology_service.py +++ b/memorylayer-core-python/tests/unit/test_ontology_service.py @@ -1,9 +1,10 @@ """Unit tests for OntologyService relationship classification.""" -import pytest + from unittest.mock import AsyncMock, MagicMock +import pytest + from memorylayer_server.services.ontology.default import DefaultOntologyService -from memorylayer_server.services.ontology.base import BASE_ONTOLOGY class TestClassifyRelationshipPrefixMatching: diff --git a/memorylayer-core-python/tests/unit/test_phase1_quick_wins.py b/memorylayer-core-python/tests/unit/test_phase1_quick_wins.py index d611250..3fd32f1 100644 --- a/memorylayer-core-python/tests/unit/test_phase1_quick_wins.py +++ b/memorylayer-core-python/tests/unit/test_phase1_quick_wins.py @@ -4,34 +4,34 @@ 1b. Already-surfaced filtering (exclude_ids in _recall_rag) 1c. Configurable scope/recency boosts (ScopeBoosts read from config) """ + import math -import pytest -from datetime import datetime, timezone, timedelta -from unittest.mock import AsyncMock, MagicMock +from datetime import UTC, datetime, timedelta +from unittest.mock import AsyncMock +import pytest from scitrera_app_framework import Variables -from memorylayer_server.models.memory import Memory, MemoryType, MemoryStatus, RecallInput -from memorylayer_server.services.memory.default import MemoryService, ScopeBoosts from memorylayer_server.config import ( - DEFAULT_MEMORYLAYER_FRESHNESS_HALF_LIFE_DAYS, - MEMORYLAYER_FRESHNESS_HALF_LIFE_DAYS, - MEMORYLAYER_SCOPE_BOOST_SAME_CONTEXT, DEFAULT_MEMORYLAYER_SCOPE_BOOST_SAME_CONTEXT, - MEMORYLAYER_SCOPE_BOOST_SAME_WORKSPACE, DEFAULT_MEMORYLAYER_SCOPE_BOOST_SAME_WORKSPACE, MEMORYLAYER_FACT_DECOMPOSITION_ENABLED, MEMORYLAYER_FACT_DECOMPOSITION_MIN_LENGTH, + MEMORYLAYER_FRESHNESS_HALF_LIFE_DAYS, + MEMORYLAYER_SCOPE_BOOST_SAME_CONTEXT, + MEMORYLAYER_SCOPE_BOOST_SAME_WORKSPACE, ) +from memorylayer_server.models.memory import Memory, MemoryStatus, MemoryType, RecallInput from memorylayer_server.services.association.base import MEMORYLAYER_ASSOCIATION_SIMILARITY_THRESHOLD -from memorylayer_server.services.memory.base import MEMORYLAYER_MEMORY_RECALL_OVERFETCH from memorylayer_server.services.deduplication import DeduplicationAction, DeduplicationResult - +from memorylayer_server.services.memory.base import MEMORYLAYER_MEMORY_RECALL_OVERFETCH +from memorylayer_server.services.memory.default import MemoryService, ScopeBoosts # --------------------------------------------------------------------------- # Helpers # --------------------------------------------------------------------------- + def _make_v(**overrides) -> Variables: """Create a Variables instance with required defaults for MemoryService.""" v = Variables() @@ -54,10 +54,12 @@ def _make_service(v: Variables = None) -> MemoryService: embedding = AsyncMock() embedding.embed = AsyncMock(return_value=[0.1] * 384) dedup = AsyncMock() - dedup.check_duplicate = AsyncMock(return_value=DeduplicationResult( - action=DeduplicationAction.CREATE, - reason="New unique memory", - )) + dedup.check_duplicate = AsyncMock( + return_value=DeduplicationResult( + action=DeduplicationAction.CREATE, + reason="New unique memory", + ) + ) return MemoryService( storage=storage, @@ -75,7 +77,7 @@ def _make_memory( last_accessed_at: datetime = None, boosted_score: float = 0.8, ) -> Memory: - now = datetime.now(timezone.utc) + now = datetime.now(UTC) return Memory( id=memory_id, workspace_id=workspace_id, @@ -96,6 +98,7 @@ def _make_memory( # 1a. Freshness annotation tests # =========================================================================== + class TestAnnotateFreshness: """Tests for MemoryService._annotate_freshness().""" @@ -107,7 +110,7 @@ def test_empty_list_returns_empty(self): def test_fresh_memory_score_near_one(self): """A memory created moments ago should have freshness ~1.0.""" service = _make_service() - memory = _make_memory(created_at=datetime.now(timezone.utc) - timedelta(minutes=5)) + memory = _make_memory(created_at=datetime.now(UTC) - timedelta(minutes=5)) result = service._annotate_freshness([memory]) assert result[0].freshness_score is not None assert result[0].freshness_score > 0.99 @@ -117,21 +120,21 @@ def test_fresh_memory_score_near_one(self): def test_1_day_old_staleness_mild(self): """A memory 2 days old should have 'mild' staleness warning.""" service = _make_service() - memory = _make_memory(created_at=datetime.now(timezone.utc) - timedelta(days=2)) + memory = _make_memory(created_at=datetime.now(UTC) - timedelta(days=2)) result = service._annotate_freshness([memory]) assert result[0].staleness_warning == "mild" def test_7_day_old_staleness_moderate(self): """A memory 10 days old should have 'moderate' staleness warning.""" service = _make_service() - memory = _make_memory(created_at=datetime.now(timezone.utc) - timedelta(days=10)) + memory = _make_memory(created_at=datetime.now(UTC) - timedelta(days=10)) result = service._annotate_freshness([memory]) assert result[0].staleness_warning == "moderate" def test_30_day_old_staleness_severe(self): """A memory 31 days old should have 'severe' staleness warning.""" service = _make_service() - memory = _make_memory(created_at=datetime.now(timezone.utc) - timedelta(days=31)) + memory = _make_memory(created_at=datetime.now(UTC) - timedelta(days=31)) result = service._annotate_freshness([memory]) assert result[0].staleness_warning == "severe" @@ -140,7 +143,7 @@ def test_half_life_7_days_at_7_days(self): service = _make_service() # Use exactly 7 days age with 7 days half-life half_life = 7.0 - memory = _make_memory(created_at=datetime.now(timezone.utc) - timedelta(days=7)) + memory = _make_memory(created_at=datetime.now(UTC) - timedelta(days=7)) result = service._annotate_freshness([memory], half_life_days=half_life) assert result[0].freshness_score == pytest.approx(0.5, abs=0.02) @@ -149,7 +152,7 @@ def test_exponential_decay_formula(self): service = _make_service() half_life = 7.0 age_days = 14.0 # Two half-lives - memory = _make_memory(created_at=datetime.now(timezone.utc) - timedelta(days=age_days)) + memory = _make_memory(created_at=datetime.now(UTC) - timedelta(days=age_days)) result = service._annotate_freshness([memory], half_life_days=half_life) expected = math.exp(-math.log(2) / half_life * age_days) # ~0.25 assert result[0].freshness_score == pytest.approx(expected, abs=0.02) @@ -159,8 +162,8 @@ def test_access_recency_bonus(self): service = _make_service() half_life = 7.0 # Memory created 5 days ago, accessed 1 hour ago - created = datetime.now(timezone.utc) - timedelta(days=5) - last_accessed = datetime.now(timezone.utc) - timedelta(hours=1) + created = datetime.now(UTC) - timedelta(days=5) + last_accessed = datetime.now(UTC) - timedelta(hours=1) memory_with_access = _make_memory(created_at=created, last_accessed_at=last_accessed) memory_no_access = _make_memory(memory_id="mem_2", created_at=created) @@ -175,8 +178,8 @@ def test_access_recency_bonus_capped_at_one(self): """Freshness bonus should not push score above 1.0.""" service = _make_service() # Very fresh memory (score ~1.0) + access bonus should cap at 1.0 - just_created = datetime.now(timezone.utc) - timedelta(seconds=10) - last_accessed = datetime.now(timezone.utc) - timedelta(hours=1) + just_created = datetime.now(UTC) - timedelta(seconds=10) + last_accessed = datetime.now(UTC) - timedelta(hours=1) memory = _make_memory(created_at=just_created, last_accessed_at=last_accessed) result = service._annotate_freshness([memory]) assert result[0].freshness_score <= 1.0 @@ -184,21 +187,21 @@ def test_access_recency_bonus_capped_at_one(self): def test_old_access_no_bonus(self): """Memory accessed more than 24h ago should NOT get the access bonus.""" service = _make_service() - created = datetime.now(timezone.utc) - timedelta(days=3) - last_accessed = datetime.now(timezone.utc) - timedelta(hours=25) # >24h ago + created = datetime.now(UTC) - timedelta(days=3) + last_accessed = datetime.now(UTC) - timedelta(hours=25) # >24h ago memory = _make_memory(created_at=created, last_accessed_at=last_accessed) result = service._annotate_freshness([memory]) # Score should match plain formula (no bonus) half_life = service.freshness_half_life_days - age_days = (datetime.now(timezone.utc) - created).total_seconds() / 86400.0 + age_days = (datetime.now(UTC) - created).total_seconds() / 86400.0 expected = math.exp(-math.log(2) / half_life * age_days) assert result[0].freshness_score == pytest.approx(expected, abs=0.02) def test_age_days_populated(self): """age_days field should reflect the memory's age.""" service = _make_service() - created = datetime.now(timezone.utc) - timedelta(days=5) + created = datetime.now(UTC) - timedelta(days=5) memory = _make_memory(created_at=created) result = service._annotate_freshness([memory]) assert result[0].age_days == pytest.approx(5.0, abs=0.05) @@ -211,7 +214,7 @@ def test_configurable_half_life_from_init(self): assert service.freshness_half_life_days == 14.0 # At 7 days age with 14 day half-life, score should be ~0.707 - memory = _make_memory(created_at=datetime.now(timezone.utc) - timedelta(days=7)) + memory = _make_memory(created_at=datetime.now(UTC) - timedelta(days=7)) result = service._annotate_freshness([memory]) assert result[0].freshness_score == pytest.approx(0.707, abs=0.02) @@ -219,9 +222,9 @@ def test_multiple_memories_annotated(self): """All memories in the list should be annotated.""" service = _make_service() memories = [ - _make_memory("m1", created_at=datetime.now(timezone.utc) - timedelta(hours=1)), - _make_memory("m2", created_at=datetime.now(timezone.utc) - timedelta(days=5)), - _make_memory("m3", created_at=datetime.now(timezone.utc) - timedelta(days=35)), + _make_memory("m1", created_at=datetime.now(UTC) - timedelta(hours=1)), + _make_memory("m2", created_at=datetime.now(UTC) - timedelta(days=5)), + _make_memory("m3", created_at=datetime.now(UTC) - timedelta(days=35)), ] result = service._annotate_freshness(memories) assert all(m.freshness_score is not None for m in result) @@ -233,21 +236,21 @@ def test_multiple_memories_annotated(self): def test_staleness_boundary_exactly_1_day(self): """Memory exactly 1 day old should be 'mild' (boundary >= 1.0).""" service = _make_service() - memory = _make_memory(created_at=datetime.now(timezone.utc) - timedelta(days=1, seconds=1)) + memory = _make_memory(created_at=datetime.now(UTC) - timedelta(days=1, seconds=1)) result = service._annotate_freshness([memory]) assert result[0].staleness_warning == "mild" def test_staleness_boundary_exactly_7_days(self): """Memory exactly 7 days old should be 'moderate' (boundary >= 7.0).""" service = _make_service() - memory = _make_memory(created_at=datetime.now(timezone.utc) - timedelta(days=7, seconds=1)) + memory = _make_memory(created_at=datetime.now(UTC) - timedelta(days=7, seconds=1)) result = service._annotate_freshness([memory]) assert result[0].staleness_warning == "moderate" def test_staleness_boundary_exactly_30_days(self): """Memory exactly 30 days old should be 'severe' (boundary >= 30.0).""" service = _make_service() - memory = _make_memory(created_at=datetime.now(timezone.utc) - timedelta(days=30, seconds=1)) + memory = _make_memory(created_at=datetime.now(UTC) - timedelta(days=30, seconds=1)) result = service._annotate_freshness([memory]) assert result[0].staleness_warning == "severe" @@ -256,6 +259,7 @@ def test_staleness_boundary_exactly_30_days(self): # 1b. exclude_ids filtering tests # =========================================================================== + class TestExcludeIds: """Tests for exclude_ids field on RecallInput and filtering in _recall_rag.""" @@ -274,15 +278,19 @@ async def test_recall_rag_filters_excluded_ids(self): """_recall_rag should filter out memories whose IDs are in exclude_ids.""" service = _make_service() - now = datetime.now(timezone.utc) + now = datetime.now(UTC) mem_a = _make_memory("mem_a", created_at=now) mem_b = _make_memory("mem_b", created_at=now) mem_c = _make_memory("mem_c", created_at=now) # Storage returns all three memories - service.storage.search_memories = AsyncMock(return_value=[ - (mem_a, 0.9), (mem_b, 0.85), (mem_c, 0.8), - ]) + service.storage.search_memories = AsyncMock( + return_value=[ + (mem_a, 0.9), + (mem_b, 0.85), + (mem_c, 0.8), + ] + ) input = RecallInput( query="test query", @@ -307,13 +315,16 @@ async def test_recall_rag_no_exclude_ids_returns_all(self): """_recall_rag should return all results when exclude_ids is empty.""" service = _make_service() - now = datetime.now(timezone.utc) + now = datetime.now(UTC) mem_a = _make_memory("mem_a", created_at=now) mem_b = _make_memory("mem_b", created_at=now) - service.storage.search_memories = AsyncMock(return_value=[ - (mem_a, 0.9), (mem_b, 0.85), - ]) + service.storage.search_memories = AsyncMock( + return_value=[ + (mem_a, 0.9), + (mem_b, 0.85), + ] + ) input = RecallInput( query="test query", @@ -337,7 +348,7 @@ async def test_recall_rag_exclude_multiple_ids(self): """_recall_rag should filter out all IDs in exclude_ids.""" service = _make_service() - now = datetime.now(timezone.utc) + now = datetime.now(UTC) memories = [(_make_memory(f"mem_{i}", created_at=now), 0.9 - i * 0.05) for i in range(5)] service.storage.search_memories = AsyncMock(return_value=memories) @@ -366,10 +377,12 @@ async def test_recall_rag_exclude_all_returns_empty(self): """_recall_rag should return empty when all results are excluded.""" service = _make_service() - now = datetime.now(timezone.utc) - service.storage.search_memories = AsyncMock(return_value=[ - (_make_memory("mem_a", created_at=now), 0.9), - ]) + now = datetime.now(UTC) + service.storage.search_memories = AsyncMock( + return_value=[ + (_make_memory("mem_a", created_at=now), 0.9), + ] + ) input = RecallInput( query="test", @@ -391,6 +404,7 @@ async def test_recall_rag_exclude_all_returns_empty(self): # 1c. Configurable scope boosts tests # =========================================================================== + class TestConfigurableScopeBoosts: """Tests for configurable scope boosts read from config in MemoryService.__init__.""" @@ -415,13 +429,15 @@ def test_custom_same_workspace_boost(self): def test_apply_scope_boosts_uses_instance_defaults(self): """apply_scope_boosts(boosts=None) should use self.default_scope_boosts.""" - v = _make_v(**{ - MEMORYLAYER_SCOPE_BOOST_SAME_CONTEXT: 2.0, - MEMORYLAYER_SCOPE_BOOST_SAME_WORKSPACE: 1.5, - }) + v = _make_v( + **{ + MEMORYLAYER_SCOPE_BOOST_SAME_CONTEXT: 2.0, + MEMORYLAYER_SCOPE_BOOST_SAME_WORKSPACE: 1.5, + } + ) service = _make_service(v) - now = datetime.now(timezone.utc) + now = datetime.now(UTC) memory = _make_memory("mem_1", workspace_id="ws_test", context_id="ctx_test", created_at=now) memories = [(memory, 0.8)] @@ -441,7 +457,7 @@ def test_apply_scope_boosts_explicit_overrides_instance(self): v = _make_v(**{MEMORYLAYER_SCOPE_BOOST_SAME_CONTEXT: 2.0}) service = _make_service(v) - now = datetime.now(timezone.utc) + now = datetime.now(UTC) memory = _make_memory("mem_1", workspace_id="ws_test", context_id="ctx_test", created_at=now) memories = [(memory, 0.8)] @@ -458,13 +474,15 @@ def test_apply_scope_boosts_explicit_overrides_instance(self): def test_scope_boost_same_workspace(self): """Memory from same workspace but different context gets workspace boost.""" - v = _make_v(**{ - MEMORYLAYER_SCOPE_BOOST_SAME_CONTEXT: 1.5, - MEMORYLAYER_SCOPE_BOOST_SAME_WORKSPACE: 1.3, - }) + v = _make_v( + **{ + MEMORYLAYER_SCOPE_BOOST_SAME_CONTEXT: 1.5, + MEMORYLAYER_SCOPE_BOOST_SAME_WORKSPACE: 1.3, + } + ) service = _make_service(v) - now = datetime.now(timezone.utc) + now = datetime.now(UTC) memory = _make_memory("mem_1", workspace_id="ws_test", context_id="ctx_other", created_at=now) memories = [(memory, 0.8)] @@ -483,7 +501,7 @@ def test_scope_boost_no_boost_for_other(self): """Memory from different workspace and context gets no boost (factor 1.0).""" service = _make_service() - now = datetime.now(timezone.utc) + now = datetime.now(UTC) memory = _make_memory("mem_1", workspace_id="ws_other", context_id="ctx_other", created_at=now) memories = [(memory, 0.8)] @@ -502,7 +520,7 @@ def test_apply_scope_boosts_on_service_without_init(self): # Create service without going through __init__ (edge case for legacy code) service = object.__new__(MemoryService) - now = datetime.now(timezone.utc) + now = datetime.now(UTC) memory = _make_memory("mem_1", workspace_id="ws_test", context_id="ctx_test", created_at=now) memories = [(memory, 0.8)] diff --git a/memorylayer-core-python/tests/unit/test_phase2_quality.py b/memorylayer-core-python/tests/unit/test_phase2_quality.py index e3f6498..3befe3b 100644 --- a/memorylayer-core-python/tests/unit/test_phase2_quality.py +++ b/memorylayer-core-python/tests/unit/test_phase2_quality.py @@ -4,30 +4,35 @@ - 2b: LLM query rewriting gated by config flag - 2c: Trust score computation and annotation """ -import math -import pytest -from datetime import datetime, timezone, timedelta -from unittest.mock import AsyncMock, MagicMock, patch +from datetime import UTC, datetime, timedelta +from unittest.mock import AsyncMock, patch + +import pytest from scitrera_app_framework import Variables -from memorylayer_server.models.memory import ( - Memory, MemoryType, MemoryStatus, RecallInput, RecallResult, RecallMode, -) -from memorylayer_server.services.memory import MemoryService -from memorylayer_server.services.deduplication import DeduplicationAction, DeduplicationResult from memorylayer_server.config import ( - MEMORYLAYER_LLM_QUERY_REWRITE_ENABLED, DEFAULT_MEMORYLAYER_LLM_QUERY_REWRITE_ENABLED, + MEMORYLAYER_LLM_QUERY_REWRITE_ENABLED, +) +from memorylayer_server.models.memory import ( + Memory, + MemoryStatus, + MemoryType, + RecallInput, + RecallMode, + RecallResult, ) from memorylayer_server.services.association.base import MEMORYLAYER_ASSOCIATION_SIMILARITY_THRESHOLD +from memorylayer_server.services.deduplication import DeduplicationAction, DeduplicationResult +from memorylayer_server.services.memory import MemoryService from memorylayer_server.services.memory.base import MEMORYLAYER_MEMORY_RECALL_OVERFETCH - # --------------------------------------------------------------------------- # Helpers # --------------------------------------------------------------------------- + def _make_v(**overrides): v = Variables() v.set(MEMORYLAYER_ASSOCIATION_SIMILARITY_THRESHOLD, 0.85) @@ -36,6 +41,7 @@ def _make_v(**overrides): MEMORYLAYER_FACT_DECOMPOSITION_ENABLED, MEMORYLAYER_FACT_DECOMPOSITION_MIN_LENGTH, ) + v.set(MEMORYLAYER_FACT_DECOMPOSITION_ENABLED, True) v.set(MEMORYLAYER_FACT_DECOMPOSITION_MIN_LENGTH, 20) for k, val in overrides.items(): @@ -64,8 +70,7 @@ def _make_memory(memory_id="mem_test", content="original content", **kwargs): return Memory(**defaults) -def _make_service(v=None, storage=None, embedding=None, dedup=None, - tier_gen=None, llm=None, **kwargs): +def _make_service(v=None, storage=None, embedding=None, dedup=None, tier_gen=None, llm=None, **kwargs): if v is None: v = _make_v() if storage is None: @@ -79,10 +84,12 @@ def _make_service(v=None, storage=None, embedding=None, dedup=None, embedding.embed = AsyncMock(return_value=[0.2] * 384) if dedup is None: dedup = AsyncMock() - dedup.check_duplicate = AsyncMock(return_value=DeduplicationResult( - action=DeduplicationAction.CREATE, - reason="new", - )) + dedup.check_duplicate = AsyncMock( + return_value=DeduplicationResult( + action=DeduplicationAction.CREATE, + reason="new", + ) + ) if tier_gen is None: tier_gen = AsyncMock() tier_gen.generate_tiers = AsyncMock(return_value=None) @@ -103,8 +110,8 @@ def _make_service(v=None, storage=None, embedding=None, dedup=None, # 2a: _merge_memories tests # --------------------------------------------------------------------------- -class TestMergeMemories: +class TestMergeMemories: @pytest.mark.asyncio async def test_merge_uses_new_content_as_primary(self): """Merged memory should use the new content (not naive concatenation).""" @@ -118,7 +125,7 @@ async def test_merge_uses_new_content_as_primary(self): svc = _make_service(storage=storage, embedding=embedding) - result = await svc._merge_memories( + await svc._merge_memories( workspace_id="ws_test", existing=existing, new_content="new content", @@ -317,8 +324,8 @@ async def test_merge_re_embeds_new_content(self): # 2b: LLM query rewriting tests # --------------------------------------------------------------------------- -class TestQueryRewriting: +class TestQueryRewriting: @pytest.mark.asyncio async def test_query_rewrite_called_when_enabled(self): """When enabled and LLM available, _rewrite_query_with_llm should be called.""" @@ -333,10 +340,17 @@ async def test_query_rewrite_called_when_enabled(self): svc = _make_service(v=v, storage=storage, llm=llm) - with patch.object(svc, '_rewrite_query_with_llm', wraps=svc._rewrite_query_with_llm) as mock_rewrite: - with patch.object(svc, '_recall_rag', return_value=RecallResult( - memories=[], total_count=0, search_latency_ms=0, mode_used=RecallMode.LLM, - )): + with patch.object(svc, "_rewrite_query_with_llm", wraps=svc._rewrite_query_with_llm) as mock_rewrite: + with patch.object( + svc, + "_recall_rag", + return_value=RecallResult( + memories=[], + total_count=0, + search_latency_ms=0, + mode_used=RecallMode.LLM, + ), + ): input_ = RecallInput(query="test query", context=[]) await svc._recall_llm("ws_test", input_, 0.5) @@ -355,10 +369,17 @@ async def test_query_rewrite_skipped_when_disabled(self): svc = _make_service(v=v, storage=storage, llm=llm) - with patch.object(svc, '_rewrite_query_with_llm', wraps=svc._rewrite_query_with_llm) as mock_rewrite: - with patch.object(svc, '_recall_rag', return_value=RecallResult( - memories=[], total_count=0, search_latency_ms=0, mode_used=RecallMode.LLM, - )): + with patch.object(svc, "_rewrite_query_with_llm", wraps=svc._rewrite_query_with_llm) as mock_rewrite: + with patch.object( + svc, + "_recall_rag", + return_value=RecallResult( + memories=[], + total_count=0, + search_latency_ms=0, + mode_used=RecallMode.LLM, + ), + ): input_ = RecallInput(query="test query", context=[]) await svc._recall_llm("ws_test", input_, 0.5) @@ -397,21 +418,28 @@ async def test_query_rewrite_serializes_context_list(self): captured_prompt = {} async def capture_synthesize(prompt, **kwargs): - captured_prompt['value'] = prompt + captured_prompt["value"] = prompt return "rewritten query" llm.synthesize = capture_synthesize context = [{"role": "user", "content": "hello"}, {"role": "assistant", "content": "hi"}] - with patch.object(svc, '_recall_rag', return_value=RecallResult( - memories=[], total_count=0, search_latency_ms=0, mode_used=RecallMode.LLM, - )): + with patch.object( + svc, + "_recall_rag", + return_value=RecallResult( + memories=[], + total_count=0, + search_latency_ms=0, + mode_used=RecallMode.LLM, + ), + ): input_ = RecallInput(query="test query", context=context) await svc._recall_llm("ws_test", input_, 0.5) # The LLM synthesize should have been called with context serialized as string - assert 'value' in captured_prompt - assert "user: hello" in captured_prompt['value'] or "hello" in captured_prompt['value'] + assert "value" in captured_prompt + assert "user: hello" in captured_prompt["value"] or "hello" in captured_prompt["value"] def test_llm_query_rewrite_default_enabled(self): """The default value for LLM query rewrite should be True.""" @@ -422,8 +450,8 @@ def test_llm_query_rewrite_default_enabled(self): # 2c: Trust scoring tests # --------------------------------------------------------------------------- -class TestTrustScoring: +class TestTrustScoring: def _fresh_memory(self, **kwargs): """A very recently created memory.""" defaults = dict( @@ -440,7 +468,7 @@ def _fresh_memory(self, **kwargs): access_count=0, decay_factor=1.0, pinned=False, - created_at=datetime.now(timezone.utc), + created_at=datetime.now(UTC), ) defaults.update(kwargs) return Memory(**defaults) @@ -461,7 +489,7 @@ def _old_memory(self, **kwargs): access_count=0, decay_factor=0.2, pinned=False, - created_at=datetime.now(timezone.utc) - timedelta(days=365), + created_at=datetime.now(UTC) - timedelta(days=365), ) defaults.update(kwargs) return Memory(**defaults) @@ -487,28 +515,28 @@ def test_trust_score_pinned_memory_has_full_verification(self): svc = _make_service() pinned = self._fresh_memory(pinned=True) _, signals = svc._compute_trust_score(pinned) - assert signals['verification'] == 1.0 + assert signals["verification"] == 1.0 def test_trust_score_verified_metadata_has_full_verification(self): """Memory with metadata.verified=True should have verification=1.0.""" svc = _make_service() mem = self._fresh_memory(metadata={"verified": True}) _, signals = svc._compute_trust_score(mem) - assert signals['verification'] == 1.0 + assert signals["verification"] == 1.0 def test_trust_score_unverified_memory_has_half_verification(self): """Unverified, unpinned memory should have verification=0.5.""" svc = _make_service() mem = self._fresh_memory(pinned=False, metadata={}) _, signals = svc._compute_trust_score(mem) - assert signals['verification'] == 0.5 + assert signals["verification"] == 0.5 def test_trust_score_session_source_has_high_reliability(self): """Memory from session commit (source_memory_id set) should have reliability=1.0.""" svc = _make_service() mem = self._fresh_memory(source_memory_id="parent_mem_id") _, signals = svc._compute_trust_score(mem) - assert signals['source_reliability'] == 1.0 + assert signals["source_reliability"] == 1.0 def test_trust_score_manual_memory_has_moderate_reliability(self): """Memory without any source links should have reliability=0.8 (manual).""" @@ -519,7 +547,7 @@ def test_trust_score_manual_memory_has_moderate_reliability(self): source_thread_id=None, ) _, signals = svc._compute_trust_score(mem) - assert signals['source_reliability'] == 0.8 + assert signals["source_reliability"] == 0.8 def test_trust_score_extracted_memory_has_lower_reliability(self): """Memory extracted from a document should have reliability=0.6.""" @@ -530,25 +558,25 @@ def test_trust_score_extracted_memory_has_lower_reliability(self): source_thread_id=None, ) _, signals = svc._compute_trust_score(mem) - assert signals['source_reliability'] == 0.6 + assert signals["source_reliability"] == 0.6 def test_trust_score_access_frequency_capped_at_1(self): """access_frequency component should be capped at 1.0.""" svc = _make_service() mem = self._fresh_memory(access_count=100) _, signals = svc._compute_trust_score(mem) - assert signals['access_frequency'] == 1.0 + assert signals["access_frequency"] == 1.0 def test_trust_score_signals_have_all_components(self): """trust_signals should contain all 5 component keys.""" svc = _make_service() mem = self._fresh_memory() _, signals = svc._compute_trust_score(mem) - assert 'freshness' in signals - assert 'access_frequency' in signals - assert 'decay_factor' in signals - assert 'verification' in signals - assert 'source_reliability' in signals + assert "freshness" in signals + assert "access_frequency" in signals + assert "decay_factor" in signals + assert "verification" in signals + assert "source_reliability" in signals def test_annotate_trust_sets_fields_on_memories(self): """_annotate_trust should set trust_score and trust_signals on each memory.""" @@ -570,17 +598,21 @@ async def test_recall_sets_drift_caveat_when_low_trust_memories(self): id="m_old", access_count=0, decay_factor=0.05, - created_at=datetime.now(timezone.utc) - timedelta(days=730), + created_at=datetime.now(UTC) - timedelta(days=730), ) old_mem_with_score = old_mem.model_copy(deep=True) - with patch.object(svc, '_recall_rag', return_value=RecallResult( - memories=[old_mem_with_score], - total_count=1, - search_latency_ms=0, - mode_used=RecallMode.RAG, - )): - with patch.object(svc, 'increment_access', new_callable=AsyncMock): + with patch.object( + svc, + "_recall_rag", + return_value=RecallResult( + memories=[old_mem_with_score], + total_count=1, + search_latency_ms=0, + mode_used=RecallMode.RAG, + ), + ): + with patch.object(svc, "increment_access", new_callable=AsyncMock): input_ = RecallInput(query="test", include_associations=False) result = await svc.recall("ws_test", input_) @@ -599,16 +631,20 @@ async def test_recall_no_drift_caveat_for_fresh_trusted_memories(self): access_count=5, decay_factor=1.0, pinned=True, - created_at=datetime.now(timezone.utc), + created_at=datetime.now(UTC), ) - with patch.object(svc, '_recall_rag', return_value=RecallResult( - memories=[fresh_mem], - total_count=1, - search_latency_ms=0, - mode_used=RecallMode.RAG, - )): - with patch.object(svc, 'increment_access', new_callable=AsyncMock): + with patch.object( + svc, + "_recall_rag", + return_value=RecallResult( + memories=[fresh_mem], + total_count=1, + search_latency_ms=0, + mode_used=RecallMode.RAG, + ), + ): + with patch.object(svc, "increment_access", new_callable=AsyncMock): input_ = RecallInput(query="test", include_associations=False) result = await svc.recall("ws_test", input_) diff --git a/memorylayer-core-python/tests/unit/test_rate_limit_service.py b/memorylayer-core-python/tests/unit/test_rate_limit_service.py index 3913bf7..afdaedf 100644 --- a/memorylayer-core-python/tests/unit/test_rate_limit_service.py +++ b/memorylayer-core-python/tests/unit/test_rate_limit_service.py @@ -1,6 +1,7 @@ """ Unit tests for the rate limit service — RateLimitResult dataclass and NoopRateLimitService. """ + import time import pytest @@ -8,7 +9,6 @@ from memorylayer_server.services.rate_limit.base import RateLimitResult from memorylayer_server.services.rate_limit.noop import NoopRateLimitService - # ============================================================================ # RateLimitResult dataclass tests # ============================================================================ diff --git a/memorylayer-core-python/tests/unit/test_recency_boost.py b/memorylayer-core-python/tests/unit/test_recency_boost.py index 23c3304..63d6e5a 100644 --- a/memorylayer-core-python/tests/unit/test_recency_boost.py +++ b/memorylayer-core-python/tests/unit/test_recency_boost.py @@ -4,12 +4,14 @@ Tests the apply_recency_boost() method which applies time-based decay to memory scores based on their updated_at timestamp. """ -import math + +from datetime import UTC, datetime, timedelta + import pytest -from datetime import datetime, timezone, timedelta -from memorylayer_server.services.memory.default import MemoryService -from memorylayer_server.models.memory import Memory, MemoryType + from memorylayer_server.config import DEFAULT_RECENCY_HALF_LIFE_HOURS, DEFAULT_RECENCY_WEIGHT +from memorylayer_server.models.memory import Memory, MemoryType +from memorylayer_server.services.memory.default import MemoryService def create_test_memory( @@ -59,12 +61,10 @@ def test_fresh_memory_minimal_decay(self): # create a minimal instance service = object.__new__(MemoryService) - now = datetime.now(timezone.utc) + now = datetime.now(UTC) one_minute_ago = now - timedelta(minutes=1) - memories = [ - create_test_memory("mem_1", boosted_score=0.9, updated_at=one_minute_ago) - ] + memories = [create_test_memory("mem_1", boosted_score=0.9, updated_at=one_minute_ago)] result = service.apply_recency_boost( memories=memories, @@ -86,12 +86,10 @@ def test_old_memory_significant_decay(self): """ service = object.__new__(MemoryService) - now = datetime.now(timezone.utc) + now = datetime.now(UTC) thirty_days_ago = now - timedelta(days=30) - memories = [ - create_test_memory("mem_1", boosted_score=0.9, updated_at=thirty_days_ago) - ] + memories = [create_test_memory("mem_1", boosted_score=0.9, updated_at=thirty_days_ago)] result = service.apply_recency_boost( memories=memories, @@ -122,7 +120,7 @@ def test_recency_weight_zero_no_effect(self): """ service = object.__new__(MemoryService) - now = datetime.now(timezone.utc) + now = datetime.now(UTC) old_memory = create_test_memory("mem_1", boosted_score=0.9, updated_at=now - timedelta(days=30)) recent_memory = create_test_memory("mem_2", boosted_score=0.8, updated_at=now - timedelta(minutes=1)) @@ -149,12 +147,10 @@ def test_recency_weight_one_full_effect(self): """ service = object.__new__(MemoryService) - now = datetime.now(timezone.utc) + now = datetime.now(UTC) thirty_days_ago = now - timedelta(days=30) - memories = [ - create_test_memory("mem_1", boosted_score=0.9, updated_at=thirty_days_ago) - ] + memories = [create_test_memory("mem_1", boosted_score=0.9, updated_at=thirty_days_ago)] result = service.apply_recency_boost( memories=memories, @@ -180,19 +176,9 @@ def test_recency_reorders_equal_scores(self): """ service = object.__new__(MemoryService) - now = datetime.now(timezone.utc) - old_memory = create_test_memory( - "mem_old", - boosted_score=0.8, - updated_at=now - timedelta(days=14), - content="Old memory" - ) - recent_memory = create_test_memory( - "mem_recent", - boosted_score=0.8, - updated_at=now - timedelta(hours=1), - content="Recent memory" - ) + now = datetime.now(UTC) + old_memory = create_test_memory("mem_old", boosted_score=0.8, updated_at=now - timedelta(days=14), content="Old memory") + recent_memory = create_test_memory("mem_recent", boosted_score=0.8, updated_at=now - timedelta(hours=1), content="Recent memory") # Start with old memory first memories = [old_memory, recent_memory] @@ -221,12 +207,10 @@ def test_half_life_correctness(self): service = object.__new__(MemoryService) half_life_hours = 168.0 # 7 days - now = datetime.now(timezone.utc) + now = datetime.now(UTC) half_life_ago = now - timedelta(hours=half_life_hours) - memories = [ - create_test_memory("mem_1", boosted_score=1.0, updated_at=half_life_ago) - ] + memories = [create_test_memory("mem_1", boosted_score=1.0, updated_at=half_life_ago)] result = service.apply_recency_boost( memories=memories, @@ -266,7 +250,7 @@ def test_multiple_memories_correct_ordering(self): """ service = object.__new__(MemoryService) - now = datetime.now(timezone.utc) + now = datetime.now(UTC) # Create memories with varying scores and ages memories = [ @@ -297,10 +281,8 @@ def test_negative_recency_weight_treated_as_zero(self): """ service = object.__new__(MemoryService) - now = datetime.now(timezone.utc) - memories = [ - create_test_memory("mem_1", boosted_score=0.9, updated_at=now - timedelta(days=30)) - ] + now = datetime.now(UTC) + memories = [create_test_memory("mem_1", boosted_score=0.9, updated_at=now - timedelta(days=30))] original_score = memories[0].boosted_score result = service.apply_recency_boost( @@ -321,13 +303,11 @@ def test_custom_half_life(self): """ service = object.__new__(MemoryService) - now = datetime.now(timezone.utc) + now = datetime.now(UTC) one_week_ago = now - timedelta(days=7) # Test with 7-day half-life (default) - memories_default = [ - create_test_memory("mem_1", boosted_score=1.0, updated_at=one_week_ago) - ] + memories_default = [create_test_memory("mem_1", boosted_score=1.0, updated_at=one_week_ago)] result_default = service.apply_recency_boost( memories=memories_default, recency_weight=1.0, @@ -335,9 +315,7 @@ def test_custom_half_life(self): ) # Test with 14-day half-life (slower decay) - memories_slow = [ - create_test_memory("mem_2", boosted_score=1.0, updated_at=one_week_ago) - ] + memories_slow = [create_test_memory("mem_2", boosted_score=1.0, updated_at=one_week_ago)] result_slow = service.apply_recency_boost( memories=memories_slow, recency_weight=1.0, @@ -358,13 +336,8 @@ def test_preserves_other_memory_fields(self): """ service = object.__new__(MemoryService) - now = datetime.now(timezone.utc) - memory = create_test_memory( - "mem_1", - boosted_score=0.8, - updated_at=now - timedelta(days=7), - content="Test content" - ) + now = datetime.now(UTC) + memory = create_test_memory("mem_1", boosted_score=0.8, updated_at=now - timedelta(days=7), content="Test content") # Store original values original_id = memory.id diff --git a/memorylayer-core-python/tests/unit/test_remember_pipeline.py b/memorylayer-core-python/tests/unit/test_remember_pipeline.py index 66255f3..3a83caa 100644 --- a/memorylayer-core-python/tests/unit/test_remember_pipeline.py +++ b/memorylayer-core-python/tests/unit/test_remember_pipeline.py @@ -8,22 +8,26 @@ - _decompose_and_process_inline() decomposes and processes facts inline - FactDecompositionTaskHandler uses ingest_fact() for per-fact pipeline """ -import pytest + from unittest.mock import AsyncMock, MagicMock, patch +import pytest from scitrera_app_framework import Variables from memorylayer_server.models.memory import ( - RememberInput, MemoryType, MemoryStatus, Memory, + Memory, + MemoryStatus, + MemoryType, + RememberInput, ) -from memorylayer_server.services.memory import MemoryService from memorylayer_server.services.deduplication import DeduplicationAction, DeduplicationResult - +from memorylayer_server.services.memory import MemoryService # --------------------------------------------------------------------------- # Fixtures # --------------------------------------------------------------------------- + @pytest.fixture def mock_v(): """Provide a Variables instance for test construction.""" @@ -37,6 +41,7 @@ def mock_v(): from memorylayer_server.services.memory.base import ( MEMORYLAYER_MEMORY_RECALL_OVERFETCH, ) + v = Variables() v.set(MEMORYLAYER_FACT_DECOMPOSITION_ENABLED, True) v.set(MEMORYLAYER_FACT_DECOMPOSITION_MIN_LENGTH, 20) @@ -69,10 +74,12 @@ def mock_embedding(): def mock_dedup(): """Mock deduplication service that always returns CREATE.""" service = AsyncMock() - service.check_duplicate = AsyncMock(return_value=DeduplicationResult( - action=DeduplicationAction.CREATE, - reason="New unique memory", - )) + service.check_duplicate = AsyncMock( + return_value=DeduplicationResult( + action=DeduplicationAction.CREATE, + reason="New unique memory", + ) + ) return service @@ -130,8 +137,14 @@ def _make_memory(memory_id="mem_test", content="test content", **kwargs): @pytest.fixture def memory_service_unit( - mock_v, mock_storage, mock_embedding, mock_dedup, - mock_task_service, mock_tier_gen, mock_contradiction, mock_association, + mock_v, + mock_storage, + mock_embedding, + mock_dedup, + mock_task_service, + mock_tier_gen, + mock_contradiction, + mock_association, ): """Construct a MemoryService with all mocked dependencies.""" mem = _make_memory() @@ -155,6 +168,7 @@ def memory_service_unit( # _post_store_pipeline tests # --------------------------------------------------------------------------- + class TestPostStorePipeline: """Tests for _post_store_pipeline().""" @@ -166,7 +180,7 @@ async def test_schedules_auto_association_background(self, memory_service_unit, mock_task_service.schedule_task.assert_called_once() call_args = mock_task_service.schedule_task.call_args - assert call_args[0][0] == 'auto_enrich' + assert call_args[0][0] == "auto_enrich" @pytest.mark.asyncio async def test_runs_inline_association(self, memory_service_unit, mock_task_service, mock_storage): @@ -179,7 +193,7 @@ async def test_runs_inline_association(self, memory_service_unit, mock_task_serv # Should NOT schedule auto_enrich task for call in mock_task_service.schedule_task.call_args_list: - assert call[0][0] != 'auto_enrich' + assert call[0][0] != "auto_enrich" @pytest.mark.asyncio async def test_calls_tier_generation(self, memory_service_unit, mock_tier_gen): @@ -218,7 +232,10 @@ async def test_handles_tier_gen_error_gracefully(self, memory_service_unit, mock @pytest.mark.asyncio async def test_handles_association_schedule_failure( - self, memory_service_unit, mock_task_service, mock_storage, + self, + memory_service_unit, + mock_task_service, + mock_storage, ): """If scheduling auto_enrich fails, should fall back to inline.""" mock_task_service.schedule_task = AsyncMock(side_effect=RuntimeError("task service down")) @@ -233,6 +250,7 @@ async def test_handles_association_schedule_failure( # ingest_fact tests # --------------------------------------------------------------------------- + class TestIngestFact: """Tests for ingest_fact().""" @@ -253,11 +271,13 @@ async def test_creates_new_fact(self, memory_service_unit, mock_storage, mock_de @pytest.mark.asyncio async def test_returns_none_on_skip(self, memory_service_unit, mock_dedup): """Should return None when dedup returns SKIP.""" - mock_dedup.check_duplicate = AsyncMock(return_value=DeduplicationResult( - action=DeduplicationAction.SKIP, - existing_memory_id="mem_existing", - reason="Exact duplicate", - )) + mock_dedup.check_duplicate = AsyncMock( + return_value=DeduplicationResult( + action=DeduplicationAction.SKIP, + existing_memory_id="mem_existing", + reason="Exact duplicate", + ) + ) input_data = RememberInput(content="duplicate", type=MemoryType.SEMANTIC) result = await memory_service_unit.ingest_fact("ws_test", input_data) @@ -268,12 +288,14 @@ async def test_returns_none_on_skip(self, memory_service_unit, mock_dedup): async def test_updates_on_dedup_update(self, memory_service_unit, mock_dedup, mock_storage): """Should update existing memory when dedup returns UPDATE.""" updated_mem = _make_memory(memory_id="mem_existing", content="updated") - mock_dedup.check_duplicate = AsyncMock(return_value=DeduplicationResult( - action=DeduplicationAction.UPDATE, - existing_memory_id="mem_existing", - similarity_score=0.96, - reason="Semantic duplicate", - )) + mock_dedup.check_duplicate = AsyncMock( + return_value=DeduplicationResult( + action=DeduplicationAction.UPDATE, + existing_memory_id="mem_existing", + similarity_score=0.96, + reason="Semantic duplicate", + ) + ) mock_storage.update_memory = AsyncMock(return_value=updated_mem) input_data = RememberInput(content="updated content", type=MemoryType.SEMANTIC) @@ -292,17 +314,23 @@ async def test_sets_source_memory_id(self, memory_service_unit, mock_storage): input_data = RememberInput(content="decomposed fact", type=MemoryType.SEMANTIC) await memory_service_unit.ingest_fact( - "ws_test", input_data, source_memory_id="mem_parent", + "ws_test", + input_data, + source_memory_id="mem_parent", ) # Should have an update_memory call with source_memory_id update_calls = mock_storage.update_memory.call_args_list - source_calls = [c for c in update_calls if c.kwargs.get('source_memory_id') == 'mem_parent'] + source_calls = [c for c in update_calls if c.kwargs.get("source_memory_id") == "mem_parent"] assert len(source_calls) >= 1 @pytest.mark.asyncio async def test_runs_post_store_pipeline( - self, memory_service_unit, mock_tier_gen, mock_contradiction, mock_storage, + self, + memory_service_unit, + mock_tier_gen, + mock_contradiction, + mock_storage, ): """Should run post-store pipeline after storing fact.""" fact_mem = _make_memory(memory_id="mem_fact") @@ -317,7 +345,10 @@ async def test_runs_post_store_pipeline( @pytest.mark.asyncio async def test_generates_embedding_when_not_provided( - self, memory_service_unit, mock_embedding, mock_storage, + self, + memory_service_unit, + mock_embedding, + mock_storage, ): """Should generate embedding if not provided.""" fact_mem = _make_memory(memory_id="mem_fact") @@ -331,7 +362,10 @@ async def test_generates_embedding_when_not_provided( @pytest.mark.asyncio async def test_uses_provided_embedding( - self, memory_service_unit, mock_embedding, mock_storage, + self, + memory_service_unit, + mock_embedding, + mock_storage, ): """Should use provided embedding without generating a new one.""" fact_mem = _make_memory(memory_id="mem_fact") @@ -349,12 +383,16 @@ async def test_uses_provided_embedding( # remember() conditional pipeline tests # --------------------------------------------------------------------------- + class TestRememberConditionalPipeline: """Tests verifying remember() routes correctly based on decomposition.""" @pytest.mark.asyncio async def test_non_decomposable_runs_post_store( - self, memory_service_unit, mock_tier_gen, mock_contradiction, + self, + memory_service_unit, + mock_tier_gen, + mock_contradiction, ): """Non-decomposable memory should run post-store pipeline directly.""" input_data = RememberInput(content="short", type=MemoryType.SEMANTIC) @@ -366,7 +404,12 @@ async def test_non_decomposable_runs_post_store( @pytest.mark.asyncio async def test_decomposable_schedules_decomposition( - self, memory_service_unit, mock_storage, mock_task_service, mock_tier_gen, mock_contradiction, + self, + memory_service_unit, + mock_storage, + mock_task_service, + mock_tier_gen, + mock_contradiction, ): """Decomposable memory should schedule decompose_facts, not run post-store.""" # Content that qualifies for decomposition (multiple sentences, long enough) @@ -381,10 +424,7 @@ async def test_decomposable_schedules_decomposition( await memory_service_unit.remember("ws_test", input_data) # Should have scheduled decompose_facts - decompose_calls = [ - c for c in mock_task_service.schedule_task.call_args_list - if c[0][0] == 'decompose_facts' - ] + decompose_calls = [c for c in mock_task_service.schedule_task.call_args_list if c[0][0] == "decompose_facts"] assert len(decompose_calls) == 1 # Should NOT have run post-store pipeline on the composite @@ -393,7 +433,10 @@ async def test_decomposable_schedules_decomposition( @pytest.mark.asyncio async def test_decomposable_no_auto_association_on_composite( - self, memory_service_unit, mock_storage, mock_task_service, + self, + memory_service_unit, + mock_storage, + mock_task_service, ): """Decomposable memory should NOT schedule auto_enrich on the composite.""" content = "First sentence here. Second sentence here. Third for good measure." @@ -407,15 +450,16 @@ async def test_decomposable_no_auto_association_on_composite( await memory_service_unit.remember("ws_test", input_data) # auto_enrich should NOT be scheduled - auto_assoc_calls = [ - c for c in mock_task_service.schedule_task.call_args_list - if c[0][0] == 'auto_enrich' - ] + auto_assoc_calls = [c for c in mock_task_service.schedule_task.call_args_list if c[0][0] == "auto_enrich"] assert len(auto_assoc_calls) == 0 @pytest.mark.asyncio async def test_decompose_failure_falls_back_to_post_store( - self, memory_service_unit, mock_task_service, mock_tier_gen, mock_contradiction, + self, + memory_service_unit, + mock_task_service, + mock_tier_gen, + mock_contradiction, ): """If scheduling decomposition fails, should fall back to post-store pipeline.""" content = "First sentence for decomp. Second sentence for decomp." @@ -423,7 +467,7 @@ async def test_decompose_failure_falls_back_to_post_store( # Make schedule_task fail for decompose_facts async def selective_fail(task_type, payload, **kwargs): - if task_type == 'decompose_facts': + if task_type == "decompose_facts": raise RuntimeError("task service down") return "task_123" @@ -444,7 +488,10 @@ async def test_remember_inline_parameter_accepted(self, memory_service_unit): @pytest.mark.asyncio async def test_working_memory_not_decomposed( - self, memory_service_unit, mock_task_service, mock_tier_gen, + self, + memory_service_unit, + mock_task_service, + mock_tier_gen, ): """WORKING type memories should never be decomposed.""" content = "Currently working on this task. Making good progress on it." @@ -453,10 +500,7 @@ async def test_working_memory_not_decomposed( await memory_service_unit.remember("ws_test", input_data) # Should NOT schedule decompose_facts - decompose_calls = [ - c for c in mock_task_service.schedule_task.call_args_list - if c[0][0] == 'decompose_facts' - ] + decompose_calls = [c for c in mock_task_service.schedule_task.call_args_list if c[0][0] == "decompose_facts"] assert len(decompose_calls) == 0 # Should have run post-store pipeline instead @@ -467,6 +511,7 @@ async def test_working_memory_not_decomposed( # FactDecompositionTaskHandler integration tests # --------------------------------------------------------------------------- + class TestFactDecompositionHandlerIntegration: """Tests for FactDecompositionTaskHandler using ingest_fact().""" @@ -492,10 +537,12 @@ async def test_handler_uses_ingest_fact(self): mock_storage.create_association = AsyncMock() mock_extraction = AsyncMock() - mock_extraction.decompose_to_facts = AsyncMock(return_value=[ - {"content": "Drew likes Python"}, - {"content": "Drew uses vim"}, - ]) + mock_extraction.decompose_to_facts = AsyncMock( + return_value=[ + {"content": "Drew likes Python"}, + {"content": "Drew uses vim"}, + ] + ) mock_memory_service = AsyncMock() fact1 = _make_memory(memory_id="mem_fact1", content="Drew likes Python") @@ -503,9 +550,10 @@ async def test_handler_uses_ingest_fact(self): mock_memory_service.ingest_fact = AsyncMock(side_effect=[fact1, fact2]) def get_ext(name, v): - from memorylayer_server.services.storage import EXT_STORAGE_BACKEND from memorylayer_server.services.extraction.base import EXT_EXTRACTION_SERVICE from memorylayer_server.services.memory import EXT_MEMORY_SERVICE + from memorylayer_server.services.storage import EXT_STORAGE_BACKEND + if name == EXT_STORAGE_BACKEND: return mock_storage elif name == EXT_EXTRACTION_SERVICE: @@ -514,27 +562,32 @@ def get_ext(name, v): return mock_memory_service return MagicMock() - with patch.object(handler, 'get_extension', side_effect=get_ext): - await handler.handle(mock_v, { - 'memory_id': 'mem_parent', - 'workspace_id': 'ws_test', - }) + with patch.object(handler, "get_extension", side_effect=get_ext): + await handler.handle( + mock_v, + { + "memory_id": "mem_parent", + "workspace_id": "ws_test", + }, + ) # ingest_fact should have been called twice (once per fact) assert mock_memory_service.ingest_fact.call_count == 2 # First call first_call = mock_memory_service.ingest_fact.call_args_list[0] - assert first_call.kwargs['workspace_id'] == 'ws_test' - assert first_call.kwargs['source_memory_id'] == 'mem_parent' - assert first_call.kwargs['input'].content == 'Drew likes Python' + assert first_call.kwargs["workspace_id"] == "ws_test" + assert first_call.kwargs["source_memory_id"] == "mem_parent" + assert first_call.kwargs["input"].content == "Drew likes Python" # PART_OF associations should be created assert mock_storage.create_association.call_count == 2 # Parent should be archived mock_storage.update_memory.assert_called_once_with( - 'ws_test', 'mem_parent', status=MemoryStatus.ARCHIVED.value, + "ws_test", + "mem_parent", + status=MemoryStatus.ARCHIVED.value, ) @pytest.mark.asyncio @@ -553,16 +606,19 @@ async def test_handler_skips_atomic_memory(self): mock_storage.get_memory = AsyncMock(return_value=parent) mock_extraction = AsyncMock() - mock_extraction.decompose_to_facts = AsyncMock(return_value=[ - {"content": "Single fact."}, - ]) + mock_extraction.decompose_to_facts = AsyncMock( + return_value=[ + {"content": "Single fact."}, + ] + ) mock_memory_service = AsyncMock() def get_ext(name, v): - from memorylayer_server.services.storage import EXT_STORAGE_BACKEND from memorylayer_server.services.extraction.base import EXT_EXTRACTION_SERVICE from memorylayer_server.services.memory import EXT_MEMORY_SERVICE + from memorylayer_server.services.storage import EXT_STORAGE_BACKEND + if name == EXT_STORAGE_BACKEND: return mock_storage elif name == EXT_EXTRACTION_SERVICE: @@ -571,11 +627,14 @@ def get_ext(name, v): return mock_memory_service return MagicMock() - with patch.object(handler, 'get_extension', side_effect=get_ext): - await handler.handle(mock_v, { - 'memory_id': 'mem_atomic', - 'workspace_id': 'ws_test', - }) + with patch.object(handler, "get_extension", side_effect=get_ext): + await handler.handle( + mock_v, + { + "memory_id": "mem_atomic", + "workspace_id": "ws_test", + }, + ) # ingest_fact should NOT have been called mock_memory_service.ingest_fact.assert_not_called() @@ -602,10 +661,12 @@ async def test_handler_handles_dedup_skip_in_fact(self): mock_storage.create_association = AsyncMock() mock_extraction = AsyncMock() - mock_extraction.decompose_to_facts = AsyncMock(return_value=[ - {"content": "Drew likes Python"}, - {"content": "Drew likes Python"}, # Duplicate fact - ]) + mock_extraction.decompose_to_facts = AsyncMock( + return_value=[ + {"content": "Drew likes Python"}, + {"content": "Drew likes Python"}, # Duplicate fact + ] + ) mock_memory_service = AsyncMock() fact1 = _make_memory(memory_id="mem_f1", content="Drew likes Python") @@ -613,9 +674,10 @@ async def test_handler_handles_dedup_skip_in_fact(self): mock_memory_service.ingest_fact = AsyncMock(side_effect=[fact1, None]) def get_ext(name, v): - from memorylayer_server.services.storage import EXT_STORAGE_BACKEND from memorylayer_server.services.extraction.base import EXT_EXTRACTION_SERVICE from memorylayer_server.services.memory import EXT_MEMORY_SERVICE + from memorylayer_server.services.storage import EXT_STORAGE_BACKEND + if name == EXT_STORAGE_BACKEND: return mock_storage elif name == EXT_EXTRACTION_SERVICE: @@ -624,11 +686,14 @@ def get_ext(name, v): return mock_memory_service return MagicMock() - with patch.object(handler, 'get_extension', side_effect=get_ext): - await handler.handle(mock_v, { - 'memory_id': 'mem_parent2', - 'workspace_id': 'ws_test', - }) + with patch.object(handler, "get_extension", side_effect=get_ext): + await handler.handle( + mock_v, + { + "memory_id": "mem_parent2", + "workspace_id": "ws_test", + }, + ) # Only 1 PART_OF association (the non-None fact) assert mock_storage.create_association.call_count == 1 @@ -641,44 +706,45 @@ def get_ext(name, v): # classify_type flag tests # --------------------------------------------------------------------------- + class TestClassifyTypeFlag: """Tests for classify_type flag in the auto-enrich pipeline.""" @pytest.mark.asyncio async def test_classify_type_true_in_payload_when_type_auto( - self, memory_service_unit, mock_task_service, + self, + memory_service_unit, + mock_task_service, ): """When input.type is None, classify_type should be True in the task payload.""" input_data = RememberInput(content="short", type=None) await memory_service_unit.remember("ws_test", input_data) - enrich_calls = [ - c for c in mock_task_service.schedule_task.call_args_list - if c[0][0] == 'auto_enrich' - ] + enrich_calls = [c for c in mock_task_service.schedule_task.call_args_list if c[0][0] == "auto_enrich"] assert len(enrich_calls) == 1 payload = enrich_calls[0][0][1] - assert payload['classify_type'] is True + assert payload["classify_type"] is True @pytest.mark.asyncio async def test_classify_type_false_in_payload_when_type_explicit( - self, memory_service_unit, mock_task_service, + self, + memory_service_unit, + mock_task_service, ): """When input.type is explicitly set, classify_type should be False.""" input_data = RememberInput(content="short", type=MemoryType.EPISODIC) await memory_service_unit.remember("ws_test", input_data) - enrich_calls = [ - c for c in mock_task_service.schedule_task.call_args_list - if c[0][0] == 'auto_enrich' - ] + enrich_calls = [c for c in mock_task_service.schedule_task.call_args_list if c[0][0] == "auto_enrich"] assert len(enrich_calls) == 1 payload = enrich_calls[0][0][1] - assert payload['classify_type'] is False + assert payload["classify_type"] is False @pytest.mark.asyncio async def test_inline_classify_type_calls_extraction_service( - self, memory_service_unit, mock_storage, + self, + memory_service_unit, + mock_storage, ): """Inline auto-enrich with classify_type=True should call extraction_service.classify_content.""" mock_extraction = AsyncMock() @@ -692,14 +758,20 @@ async def test_inline_classify_type_calls_extraction_service( mem = _make_memory(type=MemoryType.SEMANTIC) await memory_service_unit._post_store_pipeline( - "ws_test", mem, [0.1] * 384, inline=True, classify_type=True, + "ws_test", + mem, + [0.1] * 384, + inline=True, + classify_type=True, ) mock_extraction.classify_content.assert_called_once_with(mem.content) @pytest.mark.asyncio async def test_inline_classify_type_false_skips_extraction( - self, memory_service_unit, mock_storage, + self, + memory_service_unit, + mock_storage, ): """Inline auto-enrich with classify_type=False should NOT call extraction service.""" mock_extraction = AsyncMock() @@ -711,14 +783,20 @@ async def test_inline_classify_type_false_skips_extraction( mem = _make_memory() await memory_service_unit._post_store_pipeline( - "ws_test", mem, [0.1] * 384, inline=True, classify_type=False, + "ws_test", + mem, + [0.1] * 384, + inline=True, + classify_type=False, ) mock_extraction.classify_content.assert_not_called() @pytest.mark.asyncio async def test_inline_classify_type_updates_when_different( - self, memory_service_unit, mock_storage, + self, + memory_service_unit, + mock_storage, ): """When LLM classifies a different type, memory should be updated.""" mock_extraction = AsyncMock() @@ -733,20 +811,23 @@ async def test_inline_classify_type_updates_when_different( mem = _make_memory(type=MemoryType.SEMANTIC) await memory_service_unit._post_store_pipeline( - "ws_test", mem, [0.1] * 384, inline=True, classify_type=True, + "ws_test", + mem, + [0.1] * 384, + inline=True, + classify_type=True, ) # Should have called update_memory with the new type update_calls = mock_storage.update_memory.call_args_list - type_updates = [ - c for c in update_calls - if c.kwargs.get('type') == MemoryType.PROCEDURAL.value - ] + type_updates = [c for c in update_calls if c.kwargs.get("type") == MemoryType.PROCEDURAL.value] assert len(type_updates) == 1 @pytest.mark.asyncio async def test_inline_classify_type_graceful_without_extraction_service( - self, memory_service_unit, mock_storage, + self, + memory_service_unit, + mock_storage, ): """classify_type=True should not fail when extraction_service is None.""" memory_service_unit.extraction_service = None @@ -757,5 +838,9 @@ async def test_inline_classify_type_graceful_without_extraction_service( mem = _make_memory() # Should not raise await memory_service_unit._post_store_pipeline( - "ws_test", mem, [0.1] * 384, inline=True, classify_type=True, + "ws_test", + mem, + [0.1] * 384, + inline=True, + classify_type=True, ) diff --git a/memorylayer-core-python/tests/unit/test_rrf_reranker.py b/memorylayer-core-python/tests/unit/test_rrf_reranker.py index fd38a9d..26bbc3b 100644 --- a/memorylayer-core-python/tests/unit/test_rrf_reranker.py +++ b/memorylayer-core-python/tests/unit/test_rrf_reranker.py @@ -6,22 +6,19 @@ from scitrera_app_framework import Variables from memorylayer_server.config import RerankerProviderType +from memorylayer_server.services.embedding import EXT_EMBEDDING_SERVICE from memorylayer_server.services.reranker.rrf.provider import ( RRFRerankerProvider, RRFRerankerProviderPlugin, - decompose_query, - compute_rrf_scores, _extract_keywords, _split_sentences, - DEFAULT_RRF_K, - DEFAULT_RRF_MIN_QUERIES, - MEMORYLAYER_RERANKER_RRF_K, + compute_rrf_scores, + decompose_query, ) -from memorylayer_server.services.embedding import EXT_EMBEDDING_SERVICE - # --- Fixtures --- + @pytest.fixture def mock_v(): """Provide a Variables instance for test provider construction.""" @@ -56,6 +53,7 @@ def _default_embed_batch(texts): # Larger batch: likely documents # Create a spread of vectors import math + angle = (2 * math.pi * i) / len(texts) result.append([math.cos(angle), math.sin(angle), 0.0]) return result @@ -72,6 +70,7 @@ def provider(mock_v, mock_embedding_service): # --- Query decomposition tests --- + class TestDecomposeQuery: def test_single_word_query(self): """Single word should produce at least the original query.""" @@ -93,10 +92,7 @@ def test_keywords_extracted(self): # Should have the full query + keywords variant at minimum assert len(result) >= 2 # Keywords variant should not contain stopwords - keywords_found = any( - 'authentication' in sq and 'the' not in sq.split() - for sq in result if sq != query - ) + keywords_found = any("authentication" in sq and "the" not in sq.split() for sq in result if sq != query) assert keywords_found def test_with_instruction(self): @@ -184,6 +180,7 @@ def test_whitespace_only(self): # --- RRF score computation tests --- + class TestComputeRRFScores: def test_single_ranking(self): """Single ranking should produce valid scores.""" @@ -248,13 +245,16 @@ def test_out_of_bounds_indices_ignored(self): # --- Provider tests --- + class TestRRFRerankerProvider: @pytest.mark.asyncio async def test_rerank_returns_correct_count(self, provider, mock_embedding_service): - mock_embedding_service.embed_batch = AsyncMock(side_effect=[ - [[1.0, 0.0, 0.0], [0.7, 0.7, 0.0]], # sub-query embeddings - [[0.9, 0.1, 0.0], [0.0, 0.0, 1.0], [0.5, 0.5, 0.0]], # doc embeddings - ]) + mock_embedding_service.embed_batch = AsyncMock( + side_effect=[ + [[1.0, 0.0, 0.0], [0.7, 0.7, 0.0]], # sub-query embeddings + [[0.9, 0.1, 0.0], [0.0, 0.0, 1.0], [0.5, 0.5, 0.0]], # doc embeddings + ] + ) docs = ["doc one", "doc two", "doc three"] scores = await provider.rerank("test query", docs) assert len(scores) == 3 @@ -267,10 +267,12 @@ async def test_rerank_empty_documents(self, provider, mock_embedding_service): @pytest.mark.asyncio async def test_rerank_scores_in_range(self, provider, mock_embedding_service): - mock_embedding_service.embed_batch = AsyncMock(side_effect=[ - [[1.0, 0.0, 0.0], [0.7, 0.7, 0.0]], - [[0.9, 0.1, 0.0], [0.0, 0.0, 1.0], [0.5, 0.5, 0.0]], - ]) + mock_embedding_service.embed_batch = AsyncMock( + side_effect=[ + [[1.0, 0.0, 0.0], [0.7, 0.7, 0.0]], + [[0.9, 0.1, 0.0], [0.0, 0.0, 1.0], [0.5, 0.5, 0.0]], + ] + ) docs = ["doc one", "doc two", "doc three"] scores = await provider.rerank("test query", docs) for score in scores: @@ -280,10 +282,12 @@ async def test_rerank_scores_in_range(self, provider, mock_embedding_service): async def test_rerank_similar_doc_scores_higher(self, provider, mock_embedding_service): """Document most similar to query embeddings should score highest.""" # Sub-queries both point toward [1, 0, 0] - mock_embedding_service.embed_batch = AsyncMock(side_effect=[ - [[1.0, 0.0, 0.0], [0.9, 0.1, 0.0]], # sub-query embeddings - [[0.95, 0.05, 0.0], [0.0, 0.0, 1.0], [0.0, 1.0, 0.0]], # docs - ]) + mock_embedding_service.embed_batch = AsyncMock( + side_effect=[ + [[1.0, 0.0, 0.0], [0.9, 0.1, 0.0]], # sub-query embeddings + [[0.95, 0.05, 0.0], [0.0, 0.0, 1.0], [0.0, 1.0, 0.0]], # docs + ] + ) docs = ["similar doc", "orthogonal doc", "another orthogonal"] scores = await provider.rerank("test query", docs) assert scores[0] > scores[1] @@ -292,20 +296,24 @@ async def test_rerank_similar_doc_scores_higher(self, provider, mock_embedding_s @pytest.mark.asyncio async def test_rerank_calls_embed_batch_twice(self, provider, mock_embedding_service): """Should call embed_batch once for sub-queries, once for documents.""" - mock_embedding_service.embed_batch = AsyncMock(side_effect=[ - [[1.0, 0.0, 0.0], [0.7, 0.7, 0.0]], - [[0.9, 0.1, 0.0]], - ]) + mock_embedding_service.embed_batch = AsyncMock( + side_effect=[ + [[1.0, 0.0, 0.0], [0.7, 0.7, 0.0]], + [[0.9, 0.1, 0.0]], + ] + ) await provider.rerank("test query", ["doc one"]) assert mock_embedding_service.embed_batch.call_count == 2 @pytest.mark.asyncio async def test_rerank_with_instruction(self, provider, mock_embedding_service): """Instruction should be included in sub-queries.""" - mock_embedding_service.embed_batch = AsyncMock(side_effect=[ - [[1.0, 0.0, 0.0], [0.7, 0.7, 0.0]], - [[0.9, 0.1, 0.0]], - ]) + mock_embedding_service.embed_batch = AsyncMock( + side_effect=[ + [[1.0, 0.0, 0.0], [0.7, 0.7, 0.0]], + [[0.9, 0.1, 0.0]], + ] + ) await provider.rerank("my query", ["doc"], instruction="Find scientific papers") # First embed_batch call is sub-queries sub_queries_arg = mock_embedding_service.embed_batch.call_args_list[0][0][0] @@ -324,12 +332,16 @@ async def test_rerank_embedding_failure_returns_uniform_scores(self, mock_v): @pytest.mark.asyncio async def test_custom_rrf_k(self, mock_v, mock_embedding_service): """Custom k value should be used in RRF computation.""" - mock_embedding_service.embed_batch = AsyncMock(side_effect=[ - [[1.0, 0.0, 0.0], [0.7, 0.7, 0.0]], - [[0.9, 0.1, 0.0], [0.0, 1.0, 0.0]], - ]) + mock_embedding_service.embed_batch = AsyncMock( + side_effect=[ + [[1.0, 0.0, 0.0], [0.7, 0.7, 0.0]], + [[0.9, 0.1, 0.0], [0.0, 1.0, 0.0]], + ] + ) provider = RRFRerankerProvider( - v=mock_v, embedding_service=mock_embedding_service, rrf_k=1, + v=mock_v, + embedding_service=mock_embedding_service, + rrf_k=1, ) scores = await provider.rerank("test", ["doc1", "doc2"]) assert len(scores) == 2 @@ -339,10 +351,12 @@ async def test_custom_rrf_k(self, mock_v, mock_embedding_service): @pytest.mark.asyncio async def test_rerank_single_document(self, provider, mock_embedding_service): """Single document should get a valid score.""" - mock_embedding_service.embed_batch = AsyncMock(side_effect=[ - [[1.0, 0.0, 0.0], [0.7, 0.7, 0.0]], - [[0.9, 0.1, 0.0]], - ]) + mock_embedding_service.embed_batch = AsyncMock( + side_effect=[ + [[1.0, 0.0, 0.0], [0.7, 0.7, 0.0]], + [[0.9, 0.1, 0.0]], + ] + ) scores = await provider.rerank("test query", ["only doc"]) assert len(scores) == 1 assert 0.0 <= scores[0] <= 1.0 @@ -350,13 +364,16 @@ async def test_rerank_single_document(self, provider, mock_embedding_service): # --- Integration-style test with rerank_with_indices --- + class TestRRFRerankerWithIndices: @pytest.mark.asyncio async def test_rerank_with_indices_sorted_by_score(self, provider, mock_embedding_service): - mock_embedding_service.embed_batch = AsyncMock(side_effect=[ - [[1.0, 0.0, 0.0], [0.7, 0.7, 0.0]], - [[0.9, 0.1, 0.0], [0.0, 0.0, 1.0], [0.5, 0.5, 0.0]], - ]) + mock_embedding_service.embed_batch = AsyncMock( + side_effect=[ + [[1.0, 0.0, 0.0], [0.7, 0.7, 0.0]], + [[0.9, 0.1, 0.0], [0.0, 0.0, 1.0], [0.5, 0.5, 0.0]], + ] + ) docs = ["similar doc", "orthogonal doc", "partial doc"] results = await provider.rerank_with_indices("test query", docs) scores = [score for _, score in results] @@ -364,10 +381,12 @@ async def test_rerank_with_indices_sorted_by_score(self, provider, mock_embeddin @pytest.mark.asyncio async def test_rerank_with_indices_top_k(self, provider, mock_embedding_service): - mock_embedding_service.embed_batch = AsyncMock(side_effect=[ - [[1.0, 0.0, 0.0], [0.7, 0.7, 0.0]], - [[0.9, 0.1, 0.0], [0.0, 0.0, 1.0], [0.5, 0.5, 0.0]], - ]) + mock_embedding_service.embed_batch = AsyncMock( + side_effect=[ + [[1.0, 0.0, 0.0], [0.7, 0.7, 0.0]], + [[0.9, 0.1, 0.0], [0.0, 0.0, 1.0], [0.5, 0.5, 0.0]], + ] + ) docs = ["similar doc", "orthogonal doc", "partial doc"] results = await provider.rerank_with_indices("test query", docs, top_k=2) assert len(results) == 2 @@ -375,6 +394,7 @@ async def test_rerank_with_indices_top_k(self, provider, mock_embedding_service) # --- Plugin tests --- + class TestRRFRerankerPlugin: def test_provider_name(self): plugin = RRFRerankerProviderPlugin() @@ -394,6 +414,7 @@ def test_plugin_declares_embedding_dependency(self): def test_plugin_has_no_llm_dependency(self): """RRF should not depend on LLM service.""" from memorylayer_server.services.llm import EXT_LLM_SERVICE + plugin = RRFRerankerProviderPlugin() v = Variables() deps = plugin.get_dependencies(v) diff --git a/memorylayer-core-python/tests/unit/test_session_memory.py b/memorylayer-core-python/tests/unit/test_session_memory.py index f41ceae..94326d6 100644 --- a/memorylayer-core-python/tests/unit/test_session_memory.py +++ b/memorylayer-core-python/tests/unit/test_session_memory.py @@ -7,38 +7,42 @@ - Extraction trigger thresholds (init and growth) - SessionMemorySections model (sections, total_tokens, add_entry, budget enforcement) """ + +from datetime import UTC, datetime, timedelta +from unittest.mock import AsyncMock, MagicMock + import pytest -from unittest.mock import AsyncMock, MagicMock, patch -from datetime import datetime, timezone, timedelta from memorylayer_server.models.memory import ( - SessionMemorySections, - SESSION_MEMORY_SECTION_NAMES, DEFAULT_SESSION_SECTION_TOKEN_BUDGET, + SESSION_MEMORY_SECTION_NAMES, + SessionMemorySections, ) from memorylayer_server.services.session.persistent import PersistentSessionService - # --------------------------------------------------------------------------- # Helpers # --------------------------------------------------------------------------- + def _make_session(session_id="sess-1", workspace_id="ws-1", metadata=None): """Build a minimal Session-like mock.""" from memorylayer_server.models.session import Session + return Session( id=session_id, workspace_id=workspace_id, tenant_id="tenant-1", context_id="_default", metadata=metadata or {}, - expires_at=datetime.now(timezone.utc) + timedelta(hours=1), + expires_at=datetime.now(UTC) + timedelta(hours=1), ) def _make_working_memory(key, value): """Build a minimal WorkingMemory-like mock.""" from memorylayer_server.models.session import WorkingMemory + return WorkingMemory(session_id="sess-1", key=key, value=value) @@ -46,6 +50,7 @@ def _make_working_memory(key, value): # Token estimation # --------------------------------------------------------------------------- + class TestTokenEstimation: """Tests for PersistentSessionService._estimate_tokens().""" @@ -71,20 +76,24 @@ def test_long_content(self): # touch_session token tracking # --------------------------------------------------------------------------- + def _make_variables(token_trigger_init=10000, token_trigger_growth=5000): """Build a mock Variables that returns config values via environ().""" v = MagicMock() + def _environ(key, default=None, type_fn=None): from memorylayer_server.config import ( - MEMORYLAYER_SESSION_TOKEN_TRIGGER_INIT, MEMORYLAYER_SESSION_TOKEN_TRIGGER_GROWTH, + MEMORYLAYER_SESSION_TOKEN_TRIGGER_INIT, ) + mapping = { MEMORYLAYER_SESSION_TOKEN_TRIGGER_INIT: token_trigger_init, MEMORYLAYER_SESSION_TOKEN_TRIGGER_GROWTH: token_trigger_growth, } val = mapping.get(key, default) return type_fn(val) if type_fn and val is not None else val + v.environ = _environ return v @@ -120,9 +129,9 @@ async def test_cumulative_tokens_stored_in_metadata(self): # update_session should have been called with metadata containing cumulative_tokens call_kwargs = storage.update_session.call_args - metadata = call_kwargs.kwargs.get('metadata') or (call_kwargs.args[3] if len(call_kwargs.args) > 3 else None) + metadata = call_kwargs.kwargs.get("metadata") or (call_kwargs.args[3] if len(call_kwargs.args) > 3 else None) assert metadata is not None - assert metadata['cumulative_tokens'] == 100 + assert metadata["cumulative_tokens"] == 100 @pytest.mark.asyncio async def test_extraction_not_triggered_below_init_threshold(self): @@ -162,19 +171,21 @@ async def test_extraction_triggered_at_init_threshold(self): task_service.schedule_task.assert_called_once() call_args = task_service.schedule_task.call_args - assert call_args.args[0] == 'session_extraction' + assert call_args.args[0] == "session_extraction" payload = call_args.args[1] - assert payload['workspace_id'] == 'ws-1' - assert payload['session_id'] == 'sess-1' + assert payload["workspace_id"] == "ws-1" + assert payload["session_id"] == "sess-1" @pytest.mark.asyncio async def test_extraction_triggered_on_growth_threshold(self): """Extraction is re-triggered after growth threshold exceeded.""" # Session has already had one extraction at 1000 tokens - session = _make_session(metadata={ - 'cumulative_tokens': 1000, - 'last_extraction_tokens': 1000, - }) + session = _make_session( + metadata={ + "cumulative_tokens": 1000, + "last_extraction_tokens": 1000, + } + ) # New working memory totals 1600 tokens (600 more than last extraction) wm_entries = [_make_working_memory("key1", "a" * 6400)] # 1600 tokens @@ -191,15 +202,17 @@ async def test_extraction_triggered_on_growth_threshold(self): task_service.schedule_task.assert_called_once() call_args = task_service.schedule_task.call_args - assert call_args.args[0] == 'session_extraction' + assert call_args.args[0] == "session_extraction" @pytest.mark.asyncio async def test_extraction_not_retriggered_below_growth_threshold(self): """No extraction if growth since last extraction is below growth threshold.""" - session = _make_session(metadata={ - 'cumulative_tokens': 1000, - 'last_extraction_tokens': 1000, - }) + session = _make_session( + metadata={ + "cumulative_tokens": 1000, + "last_extraction_tokens": 1000, + } + ) # New total is only 1200 (200 more than last extraction, below 500 growth threshold) wm_entries = [_make_working_memory("key1", "a" * 4800)] # 1200 tokens @@ -250,15 +263,16 @@ async def test_last_extraction_tokens_updated_on_trigger(self): await svc.touch_session("ws-1", "sess-1") call_kwargs = storage.update_session.call_args - metadata = call_kwargs.kwargs.get('metadata') or (call_kwargs.args[3] if len(call_kwargs.args) > 3 else None) + metadata = call_kwargs.kwargs.get("metadata") or (call_kwargs.args[3] if len(call_kwargs.args) > 3 else None) assert metadata is not None - assert metadata.get('last_extraction_tokens') == 1000 + assert metadata.get("last_extraction_tokens") == 1000 # --------------------------------------------------------------------------- # SessionMemorySections model # --------------------------------------------------------------------------- + class TestSessionMemorySections: """Tests for the SessionMemorySections Pydantic model.""" @@ -273,14 +287,16 @@ def test_total_tokens_zero_when_empty(self): assert sms.total_tokens == 0 def test_total_tokens_computed_correctly(self): - sms = SessionMemorySections(sections={ - "context": ["a" * 400], # 100 tokens - "decisions": ["b" * 800], # 200 tokens - "learnings": [], - "errors": [], - "progress": [], - "open_items": [], - }) + sms = SessionMemorySections( + sections={ + "context": ["a" * 400], # 100 tokens + "decisions": ["b" * 800], # 200 tokens + "learnings": [], + "errors": [], + "progress": [], + "open_items": [], + } + ) assert sms.total_tokens == 300 def test_add_entry_to_valid_section(self): @@ -332,14 +348,16 @@ def test_custom_section_token_budget(self): assert sms.section_token_budget == 512 def test_sections_can_be_provided_at_construction(self): - sms = SessionMemorySections(sections={ - "context": ["Entry 1", "Entry 2"], - "decisions": [], - "learnings": [], - "errors": [], - "progress": [], - "open_items": [], - }) + sms = SessionMemorySections( + sections={ + "context": ["Entry 1", "Entry 2"], + "decisions": [], + "learnings": [], + "errors": [], + "progress": [], + "open_items": [], + } + ) assert sms.sections["context"] == ["Entry 1", "Entry 2"] def test_total_tokens_updates_after_add_entry(self): diff --git a/memorylayer-core-python/tests/unit/test_session_workspace.py b/memorylayer-core-python/tests/unit/test_session_workspace.py index 8a19e00..7823a9d 100644 --- a/memorylayer-core-python/tests/unit/test_session_workspace.py +++ b/memorylayer-core-python/tests/unit/test_session_workspace.py @@ -11,26 +11,28 @@ - ContextSettings inheritance - Memory modes (explicit vs auto_remember) """ + +from datetime import UTC, datetime, timedelta + import pytest -from datetime import datetime, timedelta, timezone + from memorylayer_server.models.session import ( + ActivitySummary, + Contradiction, + OpenThread, Session, - WorkingMemory, SessionBriefing, + WorkingMemory, WorkspaceSummary, - ActivitySummary, - OpenThread, - Contradiction, ) from memorylayer_server.models.workspace import ( - Workspace, Context, - WorkspaceSettings, ContextSettings, + Workspace, + WorkspaceSettings, ) - class TestSession: """Tests for Session model (Section 3.10, 5.5).""" @@ -48,7 +50,7 @@ def test_create_with_ttl_creates_correct_expiration(self): assert session.workspace_id == "ws_test" # Check expiration is approximately ttl_seconds from now - expected_expiration = datetime.now(timezone.utc) + timedelta(seconds=ttl_seconds) + expected_expiration = datetime.now(UTC) + timedelta(seconds=ttl_seconds) # Allow 5 second tolerance for test execution time assert abs((session.expires_at - expected_expiration).total_seconds()) < 5 @@ -200,15 +202,9 @@ def test_session_briefing_creation(self): "total_memories": 150, "recent_memories": 12, }, - recent_activity=[ - {"summary": "Added 5 new memories", "timestamp": datetime.now(timezone.utc).isoformat()} - ], - open_threads=[ - {"topic": "API refactoring", "status": "in_progress"} - ], - contradictions_detected=[ - {"memory_a": "mem_1", "memory_b": "mem_2"} - ], + recent_activity=[{"summary": "Added 5 new memories", "timestamp": datetime.now(UTC).isoformat()}], + open_threads=[{"topic": "API refactoring", "status": "in_progress"}], + contradictions_detected=[{"memory_a": "mem_1", "memory_b": "mem_2"}], ) assert briefing.workspace_summary["total_memories"] == 150 @@ -252,7 +248,7 @@ class TestActivitySummary: def test_activity_summary_creation(self): """Test ActivitySummary model creation.""" - timestamp = datetime.now(timezone.utc) + timestamp = datetime.now(UTC) activity = ActivitySummary( timestamp=timestamp, summary="User added authentication memories", @@ -276,7 +272,7 @@ def test_open_thread_with_status_values(self): thread = OpenThread( topic=f"Test thread {status}", status=status, - last_activity=datetime.now(timezone.utc), + last_activity=datetime.now(UTC), key_memories=["mem_1", "mem_2"], ) assert thread.status == status @@ -286,7 +282,7 @@ def test_open_thread_with_key_memories(self): thread = OpenThread( topic="Feature implementation", status="in_progress", - last_activity=datetime.now(timezone.utc), + last_activity=datetime.now(UTC), key_memories=["mem_123", "mem_456", "mem_789"], ) diff --git a/memorylayer-core-python/tests/unit/test_storage_backend.py b/memorylayer-core-python/tests/unit/test_storage_backend.py index 2cc9a5d..a846edc 100644 --- a/memorylayer-core-python/tests/unit/test_storage_backend.py +++ b/memorylayer-core-python/tests/unit/test_storage_backend.py @@ -9,21 +9,22 @@ - Workspace storage - Session persistence storage """ + import hashlib +from datetime import UTC, datetime + import pytest -from datetime import datetime, timedelta, timezone -from memorylayer_server.services.storage.sqlite import SQLiteStorageBackend -from memorylayer_server.models.memory import RememberInput, MemoryType, MemorySubtype from memorylayer_server.models.association import AssociateInput +from memorylayer_server.models.memory import MemorySubtype, MemoryType, RememberInput +from memorylayer_server.models.session import Session from memorylayer_server.models.workspace import Workspace -from memorylayer_server.models.session import Session, WorkingMemory +from memorylayer_server.services.storage.sqlite import SQLiteStorageBackend # Embedding dimension constant (must match mock embedding provider) EMBEDDING_DIM = 384 - # ============================================================================ # SQLite Backend Tests # ============================================================================ @@ -72,7 +73,7 @@ async def test_create_memory_stores_correctly(self, storage_backend, workspace_i subtype=MemorySubtype.PREFERENCE, importance=0.8, tags=["programming", "languages"], - metadata={"source": "conversation", "confidence": 0.95} + metadata={"source": "conversation", "confidence": 0.95}, ) memory = await storage_backend.create_memory(workspace_id, input_data) @@ -94,11 +95,7 @@ async def test_create_memory_stores_correctly(self, storage_backend, workspace_i async def test_get_memory_retrieves_by_id(self, storage_backend, workspace_id): """Test get_memory() retrieves by ID.""" # Create a memory - input_data = RememberInput( - content="Test memory content", - type=MemoryType.EPISODIC, - importance=0.7 - ) + input_data = RememberInput(content="Test memory content", type=MemoryType.EPISODIC, importance=0.7) created = await storage_backend.create_memory(workspace_id, input_data) # Retrieve it @@ -146,20 +143,12 @@ async def test_get_memory_returns_none_for_deleted(self, storage_backend, worksp async def test_update_memory_modifies_fields(self, storage_backend, workspace_id): """Test update_memory() modifies fields correctly.""" # Create a memory - input_data = RememberInput( - content="Original content", - importance=0.5, - tags=["tag1"] - ) + input_data = RememberInput(content="Original content", importance=0.5, tags=["tag1"]) created = await storage_backend.create_memory(workspace_id, input_data) # Update multiple fields updated = await storage_backend.update_memory( - workspace_id, - created.id, - importance=0.9, - tags=["tag1", "tag2", "tag3"], - metadata={"updated": True} + workspace_id, created.id, importance=0.9, tags=["tag1", "tag2", "tag3"], metadata={"updated": True} ) assert updated is not None @@ -180,11 +169,7 @@ async def test_update_memory_with_embedding(self, storage_backend, workspace_id) # Add embedding embedding = ([0.1, 0.2, 0.3, 0.4, 0.5] * 77)[:EMBEDDING_DIM] # 384-dim vector - updated = await storage_backend.update_memory( - workspace_id, - created.id, - embedding=embedding - ) + updated = await storage_backend.update_memory(workspace_id, created.id, embedding=embedding) assert updated.embedding is not None assert len(updated.embedding) == EMBEDDING_DIM @@ -265,41 +250,18 @@ async def test_search_memories_with_embedding_similarity(self, storage_backend, """Test search_memories() with embedding similarity.""" workspace_id = class_workspace_id # Local alias for cleaner code # Create memories with embeddings - mem1 = await storage_backend.create_memory( - workspace_id, - RememberInput(content="Python programming", importance=0.8) - ) - await storage_backend.update_memory( - workspace_id, mem1.id, - embedding=[1.0] + [0.0] * (EMBEDDING_DIM - 1) - ) + mem1 = await storage_backend.create_memory(workspace_id, RememberInput(content="Python programming", importance=0.8)) + await storage_backend.update_memory(workspace_id, mem1.id, embedding=[1.0] + [0.0] * (EMBEDDING_DIM - 1)) - mem2 = await storage_backend.create_memory( - workspace_id, - RememberInput(content="JavaScript development", importance=0.7) - ) - await storage_backend.update_memory( - workspace_id, mem2.id, - embedding=[0.9, 0.1] + [0.0] * (EMBEDDING_DIM - 2) - ) + mem2 = await storage_backend.create_memory(workspace_id, RememberInput(content="JavaScript development", importance=0.7)) + await storage_backend.update_memory(workspace_id, mem2.id, embedding=[0.9, 0.1] + [0.0] * (EMBEDDING_DIM - 2)) - mem3 = await storage_backend.create_memory( - workspace_id, - RememberInput(content="Database design", importance=0.6) - ) - await storage_backend.update_memory( - workspace_id, mem3.id, - embedding=[0.0, 0.0, 1.0] + [0.0] * (EMBEDDING_DIM - 3) - ) + mem3 = await storage_backend.create_memory(workspace_id, RememberInput(content="Database design", importance=0.6)) + await storage_backend.update_memory(workspace_id, mem3.id, embedding=[0.0, 0.0, 1.0] + [0.0] * (EMBEDDING_DIM - 3)) # Search with query embedding similar to mem1 query_embedding = [1.0] + [0.0] * (EMBEDDING_DIM - 1) - results = await storage_backend.search_memories( - workspace_id, - query_embedding, - limit=10, - min_relevance=0.5 - ) + results = await storage_backend.search_memories(workspace_id, query_embedding, limit=10, min_relevance=0.5) # Should return memories ordered by relevance assert len(results) > 0 @@ -312,22 +274,12 @@ async def test_search_memories_respects_limit(self, storage_backend, class_works workspace_id = class_workspace_id # Create 5 memories with embeddings for i in range(5): - mem = await storage_backend.create_memory( - workspace_id, - RememberInput(content=f"Memory {i}", importance=0.5) - ) - await storage_backend.update_memory( - workspace_id, mem.id, - embedding=[0.5] * EMBEDDING_DIM - ) + mem = await storage_backend.create_memory(workspace_id, RememberInput(content=f"Memory {i}", importance=0.5)) + await storage_backend.update_memory(workspace_id, mem.id, embedding=[0.5] * EMBEDDING_DIM) # Search with limit=3 query_embedding = [0.5] * EMBEDDING_DIM - results = await storage_backend.search_memories( - workspace_id, - query_embedding, - limit=3 - ) + results = await storage_backend.search_memories(workspace_id, query_embedding, limit=3) assert len(results) == 3 @@ -335,22 +287,18 @@ async def test_search_memories_respects_min_relevance(self, storage_backend, cla """Test search_memories() respects min_relevance threshold.""" workspace_id = class_workspace_id # Create memories with different similarity levels - mem1 = await storage_backend.create_memory( - workspace_id, - RememberInput(content="High relevance", importance=0.8) - ) + mem1 = await storage_backend.create_memory(workspace_id, RememberInput(content="High relevance", importance=0.8)) await storage_backend.update_memory( - workspace_id, mem1.id, - embedding=[1.0] + [0.0] * (EMBEDDING_DIM - 1) # Very similar - ) - - mem2 = await storage_backend.create_memory( workspace_id, - RememberInput(content="Low relevance", importance=0.5) + mem1.id, + embedding=[1.0] + [0.0] * (EMBEDDING_DIM - 1), # Very similar ) + + mem2 = await storage_backend.create_memory(workspace_id, RememberInput(content="Low relevance", importance=0.5)) await storage_backend.update_memory( - workspace_id, mem2.id, - embedding=[0.0, 1.0] + [0.0] * (EMBEDDING_DIM - 2) # Very different + workspace_id, + mem2.id, + embedding=[0.0, 1.0] + [0.0] * (EMBEDDING_DIM - 2), # Very different ) # Search with high min_relevance @@ -359,7 +307,7 @@ async def test_search_memories_respects_min_relevance(self, storage_backend, cla workspace_id, query_embedding, limit=10, - min_relevance=0.9 # High threshold + min_relevance=0.9, # High threshold ) # Should only return high relevance memory @@ -373,39 +321,18 @@ async def test_search_memories_filters_by_types(self, storage_backend, class_wor workspace_id = class_workspace_id # Create memories of different types mem1 = await storage_backend.create_memory( - workspace_id, - RememberInput( - content="Episodic memory", - type=MemoryType.EPISODIC, - importance=0.7 - ) - ) - await storage_backend.update_memory( - workspace_id, mem1.id, - embedding=[0.5] * EMBEDDING_DIM + workspace_id, RememberInput(content="Episodic memory", type=MemoryType.EPISODIC, importance=0.7) ) + await storage_backend.update_memory(workspace_id, mem1.id, embedding=[0.5] * EMBEDDING_DIM) mem2 = await storage_backend.create_memory( - workspace_id, - RememberInput( - content="Semantic memory", - type=MemoryType.SEMANTIC, - importance=0.7 - ) - ) - await storage_backend.update_memory( - workspace_id, mem2.id, - embedding=[0.5] * EMBEDDING_DIM + workspace_id, RememberInput(content="Semantic memory", type=MemoryType.SEMANTIC, importance=0.7) ) + await storage_backend.update_memory(workspace_id, mem2.id, embedding=[0.5] * EMBEDDING_DIM) # Search filtered by type query_embedding = [0.5] * EMBEDDING_DIM - results = await storage_backend.search_memories( - workspace_id, - query_embedding, - limit=10, - types=["episodic"] - ) + results = await storage_backend.search_memories(workspace_id, query_embedding, limit=10, types=["episodic"]) # Should only return episodic memories assert len(results) == 1 @@ -417,39 +344,18 @@ async def test_search_memories_filters_by_subtypes(self, storage_backend, class_ workspace_id = class_workspace_id # Create memories with different subtypes mem1 = await storage_backend.create_memory( - workspace_id, - RememberInput( - content="Preference memory", - subtype=MemorySubtype.PREFERENCE, - importance=0.7 - ) - ) - await storage_backend.update_memory( - workspace_id, mem1.id, - embedding=[0.5] * EMBEDDING_DIM + workspace_id, RememberInput(content="Preference memory", subtype=MemorySubtype.PREFERENCE, importance=0.7) ) + await storage_backend.update_memory(workspace_id, mem1.id, embedding=[0.5] * EMBEDDING_DIM) mem2 = await storage_backend.create_memory( - workspace_id, - RememberInput( - content="Solution memory", - subtype=MemorySubtype.SOLUTION, - importance=0.7 - ) - ) - await storage_backend.update_memory( - workspace_id, mem2.id, - embedding=[0.5] * EMBEDDING_DIM + workspace_id, RememberInput(content="Solution memory", subtype=MemorySubtype.SOLUTION, importance=0.7) ) + await storage_backend.update_memory(workspace_id, mem2.id, embedding=[0.5] * EMBEDDING_DIM) # Search filtered by subtype query_embedding = [0.5] * EMBEDDING_DIM - results = await storage_backend.search_memories( - workspace_id, - query_embedding, - limit=10, - subtypes=["preference"] - ) + results = await storage_backend.search_memories(workspace_id, query_embedding, limit=10, subtypes=["preference"]) # Should only return preference memories assert len(results) == 1 @@ -461,39 +367,18 @@ async def test_search_memories_filters_by_tags(self, storage_backend, class_work workspace_id = class_workspace_id # Create memories with different tags mem1 = await storage_backend.create_memory( - workspace_id, - RememberInput( - content="Python memory", - tags=["python", "programming"], - importance=0.7 - ) - ) - await storage_backend.update_memory( - workspace_id, mem1.id, - embedding=[0.5] * EMBEDDING_DIM + workspace_id, RememberInput(content="Python memory", tags=["python", "programming"], importance=0.7) ) + await storage_backend.update_memory(workspace_id, mem1.id, embedding=[0.5] * EMBEDDING_DIM) mem2 = await storage_backend.create_memory( - workspace_id, - RememberInput( - content="JavaScript memory", - tags=["javascript", "programming"], - importance=0.7 - ) - ) - await storage_backend.update_memory( - workspace_id, mem2.id, - embedding=[0.5] * EMBEDDING_DIM + workspace_id, RememberInput(content="JavaScript memory", tags=["javascript", "programming"], importance=0.7) ) + await storage_backend.update_memory(workspace_id, mem2.id, embedding=[0.5] * EMBEDDING_DIM) # Search filtered by tag query_embedding = [0.5] * EMBEDDING_DIM - results = await storage_backend.search_memories( - workspace_id, - query_embedding, - limit=10, - tags=["python"] - ) + results = await storage_backend.search_memories(workspace_id, query_embedding, limit=10, tags=["python"]) # Should only return python-tagged memories assert len(results) == 1 @@ -517,18 +402,9 @@ async def test_full_text_search_finds_content_matches(self, storage_backend, cla """Test full_text_search() finds content matches.""" workspace_id = class_workspace_id # Create memories with searchable content - await storage_backend.create_memory( - workspace_id, - RememberInput(content="Python is great for backend development", importance=0.7) - ) - await storage_backend.create_memory( - workspace_id, - RememberInput(content="JavaScript is used for frontend", importance=0.6) - ) - await storage_backend.create_memory( - workspace_id, - RememberInput(content="Database design patterns", importance=0.5) - ) + await storage_backend.create_memory(workspace_id, RememberInput(content="Python is great for backend development", importance=0.7)) + await storage_backend.create_memory(workspace_id, RememberInput(content="JavaScript is used for frontend", importance=0.6)) + await storage_backend.create_memory(workspace_id, RememberInput(content="Database design patterns", importance=0.5)) # Search for "Python" results = await storage_backend.full_text_search(workspace_id, "Python", limit=10) @@ -541,10 +417,7 @@ async def test_full_text_search_respects_limit(self, storage_backend, class_work workspace_id = class_workspace_id # Create multiple memories with common word for i in range(5): - await storage_backend.create_memory( - workspace_id, - RememberInput(content=f"Programming language {i}", importance=0.5) - ) + await storage_backend.create_memory(workspace_id, RememberInput(content=f"Programming language {i}", importance=0.5)) # Search with limit results = await storage_backend.full_text_search(workspace_id, "programming", limit=3) @@ -555,10 +428,7 @@ async def test_full_text_search_case_insensitive(self, storage_backend, class_wo """Test full_text_search() is case insensitive.""" workspace_id = class_workspace_id # Use a unique word to avoid collision with other tests in same class - await storage_backend.create_memory( - workspace_id, - RememberInput(content="UPPERCASE xyzuniqueterm LOWERCASE", importance=0.5) - ) + await storage_backend.create_memory(workspace_id, RememberInput(content="UPPERCASE xyzuniqueterm LOWERCASE", importance=0.5)) # Search with different cases results_lower = await storage_backend.full_text_search(workspace_id, "xyzuniqueterm", limit=10) @@ -574,8 +444,7 @@ async def test_full_text_search_excludes_deleted(self, storage_backend, class_wo workspace_id = class_workspace_id # Create and delete a memory mem = await storage_backend.create_memory( - workspace_id, - RememberInput(content="This will be deleted uniqueftsexcluded", importance=0.5) + workspace_id, RememberInput(content="This will be deleted uniqueftsexcluded", importance=0.5) ) await storage_backend.delete_memory(workspace_id, mem.id, hard=False) @@ -596,22 +465,12 @@ class TestAssociationStorage: async def test_create_association_stores_correctly(self, storage_backend, workspace_id): """Test create_association() stores correctly.""" # Create two memories to associate - mem1 = await storage_backend.create_memory( - workspace_id, - RememberInput(content="Memory 1", importance=0.5) - ) - mem2 = await storage_backend.create_memory( - workspace_id, - RememberInput(content="Memory 2", importance=0.5) - ) + mem1 = await storage_backend.create_memory(workspace_id, RememberInput(content="Memory 1", importance=0.5)) + mem2 = await storage_backend.create_memory(workspace_id, RememberInput(content="Memory 2", importance=0.5)) # Create association assoc_input = AssociateInput( - source_id=mem1.id, - target_id=mem2.id, - relationship="solves", - strength=0.8, - metadata={"reason": "test association"} + source_id=mem1.id, target_id=mem2.id, relationship="solves", strength=0.8, metadata={"reason": "test association"} ) association = await storage_backend.create_association(workspace_id, assoc_input) @@ -628,45 +487,20 @@ async def test_create_association_stores_correctly(self, storage_backend, worksp async def test_get_associations_retrieves_by_memory_id(self, storage_backend, workspace_id): """Test get_associations() retrieves by memory ID.""" # Create memories - mem1 = await storage_backend.create_memory( - workspace_id, - RememberInput(content="Source memory", importance=0.5) - ) - mem2 = await storage_backend.create_memory( - workspace_id, - RememberInput(content="Target memory 1", importance=0.5) - ) - mem3 = await storage_backend.create_memory( - workspace_id, - RememberInput(content="Target memory 2", importance=0.5) - ) + mem1 = await storage_backend.create_memory(workspace_id, RememberInput(content="Source memory", importance=0.5)) + mem2 = await storage_backend.create_memory(workspace_id, RememberInput(content="Target memory 1", importance=0.5)) + mem3 = await storage_backend.create_memory(workspace_id, RememberInput(content="Target memory 2", importance=0.5)) # Create associations await storage_backend.create_association( - workspace_id, - AssociateInput( - source_id=mem1.id, - target_id=mem2.id, - relationship="related_to", - strength=0.7 - ) + workspace_id, AssociateInput(source_id=mem1.id, target_id=mem2.id, relationship="related_to", strength=0.7) ) await storage_backend.create_association( - workspace_id, - AssociateInput( - source_id=mem1.id, - target_id=mem3.id, - relationship="solves", - strength=0.9 - ) + workspace_id, AssociateInput(source_id=mem1.id, target_id=mem3.id, relationship="solves", strength=0.9) ) # Get all associations for mem1 - associations = await storage_backend.get_associations( - workspace_id, - mem1.id, - direction="both" - ) + associations = await storage_backend.get_associations(workspace_id, mem1.id, direction="both") assert len(associations) == 2 assert all(assoc.source_id == mem1.id for assoc in associations) @@ -674,42 +508,20 @@ async def test_get_associations_retrieves_by_memory_id(self, storage_backend, wo async def test_get_associations_filters_by_direction_outgoing(self, storage_backend, workspace_id): """Test get_associations() filters by direction (outgoing).""" # Create memories and bidirectional associations - mem1 = await storage_backend.create_memory( - workspace_id, - RememberInput(content="Memory 1", importance=0.5) - ) - mem2 = await storage_backend.create_memory( - workspace_id, - RememberInput(content="Memory 2", importance=0.5) - ) + mem1 = await storage_backend.create_memory(workspace_id, RememberInput(content="Memory 1", importance=0.5)) + mem2 = await storage_backend.create_memory(workspace_id, RememberInput(content="Memory 2", importance=0.5)) # mem1 -> mem2 await storage_backend.create_association( - workspace_id, - AssociateInput( - source_id=mem1.id, - target_id=mem2.id, - relationship="leads_to", - strength=0.8 - ) + workspace_id, AssociateInput(source_id=mem1.id, target_id=mem2.id, relationship="leads_to", strength=0.8) ) # mem2 -> mem1 await storage_backend.create_association( - workspace_id, - AssociateInput( - source_id=mem2.id, - target_id=mem1.id, - relationship="builds_on", - strength=0.6 - ) + workspace_id, AssociateInput(source_id=mem2.id, target_id=mem1.id, relationship="builds_on", strength=0.6) ) # Get outgoing associations from mem1 - outgoing = await storage_backend.get_associations( - workspace_id, - mem1.id, - direction="outgoing" - ) + outgoing = await storage_backend.get_associations(workspace_id, mem1.id, direction="outgoing") assert len(outgoing) == 1 assert outgoing[0].source_id == mem1.id @@ -717,42 +529,20 @@ async def test_get_associations_filters_by_direction_outgoing(self, storage_back async def test_get_associations_filters_by_direction_incoming(self, storage_backend, workspace_id): """Test get_associations() filters by direction (incoming).""" - mem1 = await storage_backend.create_memory( - workspace_id, - RememberInput(content="Memory 1", importance=0.5) - ) - mem2 = await storage_backend.create_memory( - workspace_id, - RememberInput(content="Memory 2", importance=0.5) - ) + mem1 = await storage_backend.create_memory(workspace_id, RememberInput(content="Memory 1", importance=0.5)) + mem2 = await storage_backend.create_memory(workspace_id, RememberInput(content="Memory 2", importance=0.5)) # mem1 -> mem2 await storage_backend.create_association( - workspace_id, - AssociateInput( - source_id=mem1.id, - target_id=mem2.id, - relationship="leads_to", - strength=0.8 - ) + workspace_id, AssociateInput(source_id=mem1.id, target_id=mem2.id, relationship="leads_to", strength=0.8) ) # mem2 -> mem1 await storage_backend.create_association( - workspace_id, - AssociateInput( - source_id=mem2.id, - target_id=mem1.id, - relationship="builds_on", - strength=0.6 - ) + workspace_id, AssociateInput(source_id=mem2.id, target_id=mem1.id, relationship="builds_on", strength=0.6) ) # Get incoming associations to mem1 - incoming = await storage_backend.get_associations( - workspace_id, - mem1.id, - direction="incoming" - ) + incoming = await storage_backend.get_associations(workspace_id, mem1.id, direction="incoming") assert len(incoming) == 1 assert incoming[0].source_id == mem2.id @@ -760,45 +550,20 @@ async def test_get_associations_filters_by_direction_incoming(self, storage_back async def test_get_associations_filters_by_relationship_type(self, storage_backend, workspace_id): """Test get_associations() filters by relationship type.""" - mem1 = await storage_backend.create_memory( - workspace_id, - RememberInput(content="Source", importance=0.5) - ) - mem2 = await storage_backend.create_memory( - workspace_id, - RememberInput(content="Target 1", importance=0.5) - ) - mem3 = await storage_backend.create_memory( - workspace_id, - RememberInput(content="Target 2", importance=0.5) - ) + mem1 = await storage_backend.create_memory(workspace_id, RememberInput(content="Source", importance=0.5)) + mem2 = await storage_backend.create_memory(workspace_id, RememberInput(content="Target 1", importance=0.5)) + mem3 = await storage_backend.create_memory(workspace_id, RememberInput(content="Target 2", importance=0.5)) # Different relationship types await storage_backend.create_association( - workspace_id, - AssociateInput( - source_id=mem1.id, - target_id=mem2.id, - relationship="solves", - strength=0.8 - ) + workspace_id, AssociateInput(source_id=mem1.id, target_id=mem2.id, relationship="solves", strength=0.8) ) await storage_backend.create_association( - workspace_id, - AssociateInput( - source_id=mem1.id, - target_id=mem3.id, - relationship="related_to", - strength=0.6 - ) + workspace_id, AssociateInput(source_id=mem1.id, target_id=mem3.id, relationship="related_to", strength=0.6) ) # Filter by SOLVES relationship - solves_assocs = await storage_backend.get_associations( - workspace_id, - mem1.id, - relationships=["solves"] - ) + solves_assocs = await storage_backend.get_associations(workspace_id, mem1.id, relationships=["solves"]) assert len(solves_assocs) == 1 assert solves_assocs[0].relationship == "solves" @@ -807,59 +572,24 @@ async def test_get_associations_filters_by_relationship_type(self, storage_backe async def test_traverse_graph_multi_hop_queries(self, storage_backend, workspace_id): """Test traverse_graph() multi-hop queries.""" # Create a chain: mem1 -> mem2 -> mem3 -> mem4 - mem1 = await storage_backend.create_memory( - workspace_id, - RememberInput(content="Memory 1", importance=0.5) - ) - mem2 = await storage_backend.create_memory( - workspace_id, - RememberInput(content="Memory 2", importance=0.5) - ) - mem3 = await storage_backend.create_memory( - workspace_id, - RememberInput(content="Memory 3", importance=0.5) - ) - mem4 = await storage_backend.create_memory( - workspace_id, - RememberInput(content="Memory 4", importance=0.5) - ) + mem1 = await storage_backend.create_memory(workspace_id, RememberInput(content="Memory 1", importance=0.5)) + mem2 = await storage_backend.create_memory(workspace_id, RememberInput(content="Memory 2", importance=0.5)) + mem3 = await storage_backend.create_memory(workspace_id, RememberInput(content="Memory 3", importance=0.5)) + mem4 = await storage_backend.create_memory(workspace_id, RememberInput(content="Memory 4", importance=0.5)) # Create chain await storage_backend.create_association( - workspace_id, - AssociateInput( - source_id=mem1.id, - target_id=mem2.id, - relationship="leads_to", - strength=0.8 - ) + workspace_id, AssociateInput(source_id=mem1.id, target_id=mem2.id, relationship="leads_to", strength=0.8) ) await storage_backend.create_association( - workspace_id, - AssociateInput( - source_id=mem2.id, - target_id=mem3.id, - relationship="leads_to", - strength=0.9 - ) + workspace_id, AssociateInput(source_id=mem2.id, target_id=mem3.id, relationship="leads_to", strength=0.9) ) await storage_backend.create_association( - workspace_id, - AssociateInput( - source_id=mem3.id, - target_id=mem4.id, - relationship="leads_to", - strength=0.7 - ) + workspace_id, AssociateInput(source_id=mem3.id, target_id=mem4.id, relationship="leads_to", strength=0.7) ) # Traverse from mem1 with max_depth=3 - result = await storage_backend.traverse_graph( - workspace_id, - mem1.id, - max_depth=3, - direction="outgoing" - ) + result = await storage_backend.traverse_graph(workspace_id, mem1.id, max_depth=3, direction="outgoing") # Should find 3 paths: mem1->mem2 (depth 1), mem1->mem2->mem3 (depth 2), mem1->mem2->mem3->mem4 (depth 3) assert result.total_paths == 3 @@ -879,30 +609,16 @@ async def test_traverse_graph_respects_max_depth(self, storage_backend, workspac # Create a long chain memories = [] for i in range(5): - mem = await storage_backend.create_memory( - workspace_id, - RememberInput(content=f"Memory {i}", importance=0.5) - ) + mem = await storage_backend.create_memory(workspace_id, RememberInput(content=f"Memory {i}", importance=0.5)) memories.append(mem) if i > 0: await storage_backend.create_association( - workspace_id, - AssociateInput( - source_id=memories[i-1].id, - target_id=mem.id, - relationship="leads_to", - strength=0.8 - ) + workspace_id, AssociateInput(source_id=memories[i - 1].id, target_id=mem.id, relationship="leads_to", strength=0.8) ) # Traverse with max_depth=2 - result = await storage_backend.traverse_graph( - workspace_id, - memories[0].id, - max_depth=2, - direction="outgoing" - ) + result = await storage_backend.traverse_graph(workspace_id, memories[0].id, max_depth=2, direction="outgoing") # Should only reach depth 2 (paths to memories[1] and memories[2]) max_path_depth = max(path.depth for path in result.paths) if result.paths else 0 @@ -920,45 +636,19 @@ async def test_traverse_graph_respects_max_depth(self, storage_backend, workspac async def test_traverse_graph_filters_by_relationship(self, storage_backend, workspace_id): """Test traverse_graph() filters by relationship types.""" # Create memories with different relationships - mem1 = await storage_backend.create_memory( - workspace_id, - RememberInput(content="Start", importance=0.5) - ) - mem2 = await storage_backend.create_memory( - workspace_id, - RememberInput(content="Solves path", importance=0.5) - ) - mem3 = await storage_backend.create_memory( - workspace_id, - RememberInput(content="Related path", importance=0.5) - ) + mem1 = await storage_backend.create_memory(workspace_id, RememberInput(content="Start", importance=0.5)) + mem2 = await storage_backend.create_memory(workspace_id, RememberInput(content="Solves path", importance=0.5)) + mem3 = await storage_backend.create_memory(workspace_id, RememberInput(content="Related path", importance=0.5)) await storage_backend.create_association( - workspace_id, - AssociateInput( - source_id=mem1.id, - target_id=mem2.id, - relationship="solves", - strength=0.8 - ) + workspace_id, AssociateInput(source_id=mem1.id, target_id=mem2.id, relationship="solves", strength=0.8) ) await storage_backend.create_association( - workspace_id, - AssociateInput( - source_id=mem1.id, - target_id=mem3.id, - relationship="related_to", - strength=0.6 - ) + workspace_id, AssociateInput(source_id=mem1.id, target_id=mem3.id, relationship="related_to", strength=0.6) ) # Traverse with SOLVES filter - result = await storage_backend.traverse_graph( - workspace_id, - mem1.id, - max_depth=2, - relationships=["solves"] - ) + result = await storage_backend.traverse_graph(workspace_id, mem1.id, max_depth=2, relationships=["solves"]) # Should only find mem2 assert mem2.id in result.unique_nodes @@ -981,8 +671,8 @@ async def test_create_workspace_and_get_workspace(self, storage_backend): tenant_id="tenant_abc", name="Test Workspace", settings={"auto_remember": True}, - created_at=datetime.now(timezone.utc), - updated_at=datetime.now(timezone.utc) + created_at=datetime.now(UTC), + updated_at=datetime.now(UTC), ) # Create @@ -1004,8 +694,6 @@ async def test_get_workspace_returns_none_for_nonexistent(self, storage_backend) assert result is None - - @pytest.mark.asyncio class TestWorkspaceStats: """Test workspace statistics. @@ -1018,36 +706,15 @@ async def test_get_workspace_stats(self, storage_backend, unique_workspace_id): """Test get_workspace_stats() returns correct counts.""" workspace_id = unique_workspace_id # Create memories of different types - await storage_backend.create_memory( - workspace_id, - RememberInput(content="Episodic 1", type=MemoryType.EPISODIC, importance=0.5) - ) - await storage_backend.create_memory( - workspace_id, - RememberInput(content="Episodic 2", type=MemoryType.EPISODIC, importance=0.5) - ) - await storage_backend.create_memory( - workspace_id, - RememberInput(content="Semantic 1", type=MemoryType.SEMANTIC, importance=0.5) - ) + await storage_backend.create_memory(workspace_id, RememberInput(content="Episodic 1", type=MemoryType.EPISODIC, importance=0.5)) + await storage_backend.create_memory(workspace_id, RememberInput(content="Episodic 2", type=MemoryType.EPISODIC, importance=0.5)) + await storage_backend.create_memory(workspace_id, RememberInput(content="Semantic 1", type=MemoryType.SEMANTIC, importance=0.5)) # Create associations - mem1 = await storage_backend.create_memory( - workspace_id, - RememberInput(content="Source", importance=0.5) - ) - mem2 = await storage_backend.create_memory( - workspace_id, - RememberInput(content="Target", importance=0.5) - ) + mem1 = await storage_backend.create_memory(workspace_id, RememberInput(content="Source", importance=0.5)) + mem2 = await storage_backend.create_memory(workspace_id, RememberInput(content="Target", importance=0.5)) await storage_backend.create_association( - workspace_id, - AssociateInput( - source_id=mem1.id, - target_id=mem2.id, - relationship="related_to", - strength=0.8 - ) + workspace_id, AssociateInput(source_id=mem1.id, target_id=mem2.id, relationship="related_to", strength=0.8) ) # Get stats @@ -1063,14 +730,8 @@ async def test_get_workspace_stats_excludes_deleted(self, storage_backend, uniqu """Test get_workspace_stats() excludes soft-deleted memories.""" workspace_id = unique_workspace_id # Create and delete a memory - mem1 = await storage_backend.create_memory( - workspace_id, - RememberInput(content="Active memory stats", importance=0.5) - ) - mem2 = await storage_backend.create_memory( - workspace_id, - RememberInput(content="Deleted memory stats", importance=0.5) - ) + await storage_backend.create_memory(workspace_id, RememberInput(content="Active memory stats", importance=0.5)) + mem2 = await storage_backend.create_memory(workspace_id, RememberInput(content="Deleted memory stats", importance=0.5)) await storage_backend.delete_memory(workspace_id, mem2.id, hard=False) # Stats should only count active memory @@ -1095,8 +756,8 @@ async def _ensure_workspace(self, sqlite_storage, workspace_id: str): id=workspace_id, tenant_id="test_tenant", name=f"Test Workspace {workspace_id}", - created_at=datetime.now(timezone.utc), - updated_at=datetime.now(timezone.utc), + created_at=datetime.now(UTC), + updated_at=datetime.now(UTC), ) await sqlite_storage.create_workspace(workspace) @@ -1208,8 +869,8 @@ async def _ensure_workspace(self, sqlite_storage, workspace_id: str): id=workspace_id, tenant_id="test_tenant", name=f"Test Workspace {workspace_id}", - created_at=datetime.now(timezone.utc), - updated_at=datetime.now(timezone.utc), + created_at=datetime.now(UTC), + updated_at=datetime.now(UTC), ) await sqlite_storage.create_workspace(workspace) @@ -1293,12 +954,8 @@ async def test_get_working_memory_retrieves_by_key(self, storage_backend, unique await storage_backend.create_session(workspace_id, session) # Set multiple contexts - await storage_backend.set_working_memory( - workspace_id, "sess_ctx_get", "key1", "value1" - ) - await storage_backend.set_working_memory( - workspace_id, "sess_ctx_get", "key2", "value2" - ) + await storage_backend.set_working_memory(workspace_id, "sess_ctx_get", "key1", "value1") + await storage_backend.set_working_memory(workspace_id, "sess_ctx_get", "key2", "value2") # Get specific key ctx = await storage_backend.get_working_memory(workspace_id, "sess_ctx_get", "key1") @@ -1307,9 +964,7 @@ async def test_get_working_memory_retrieves_by_key(self, storage_backend, unique assert ctx.key == "key1" assert ctx.value == "value1" - async def test_get_working_memory_returns_none_for_nonexistent_key( - self, storage_backend, unique_workspace_id - ): + async def test_get_working_memory_returns_none_for_nonexistent_key(self, storage_backend, unique_workspace_id): """Test get_working_memory() returns None for non-existent key.""" workspace_id = unique_workspace_id await self._ensure_workspace(storage_backend, workspace_id) @@ -1322,9 +977,7 @@ async def test_get_working_memory_returns_none_for_nonexistent_key( ) await storage_backend.create_session(workspace_id, session) - result = await storage_backend.get_working_memory( - workspace_id, "sess_ctx_nokey", "nonexistent_key" - ) + result = await storage_backend.get_working_memory(workspace_id, "sess_ctx_nokey", "nonexistent_key") assert result is None async def test_get_all_working_memory_retrieves_all_keys(self, storage_backend, unique_workspace_id): @@ -1341,15 +994,9 @@ async def test_get_all_working_memory_retrieves_all_keys(self, storage_backend, await storage_backend.create_session(workspace_id, session) # Set multiple contexts - await storage_backend.set_working_memory( - workspace_id, "sess_ctx_all", "topic", "Python" - ) - await storage_backend.set_working_memory( - workspace_id, "sess_ctx_all", "mode", "learning" - ) - await storage_backend.set_working_memory( - workspace_id, "sess_ctx_all", "preferences", {"dark_mode": True} - ) + await storage_backend.set_working_memory(workspace_id, "sess_ctx_all", "topic", "Python") + await storage_backend.set_working_memory(workspace_id, "sess_ctx_all", "mode", "learning") + await storage_backend.set_working_memory(workspace_id, "sess_ctx_all", "preferences", {"dark_mode": True}) # Get all all_ctx = await storage_backend.get_all_working_memory(workspace_id, "sess_ctx_all") @@ -1358,9 +1005,7 @@ async def test_get_all_working_memory_retrieves_all_keys(self, storage_backend, keys = {ctx.key for ctx in all_ctx} assert keys == {"topic", "mode", "preferences"} - async def test_get_all_working_memory_returns_empty_for_no_entries( - self, storage_backend, unique_workspace_id - ): + async def test_get_all_working_memory_returns_empty_for_no_entries(self, storage_backend, unique_workspace_id): """Test get_all_working_memory() returns empty list when no entries exist.""" workspace_id = unique_workspace_id await self._ensure_workspace(storage_backend, workspace_id) @@ -1390,29 +1035,17 @@ async def test_working_memory_stores_various_value_types(self, storage_backend, await storage_backend.create_session(workspace_id, session) # String - await storage_backend.set_working_memory( - workspace_id, "sess_ctx_types", "string_key", "string_value" - ) + await storage_backend.set_working_memory(workspace_id, "sess_ctx_types", "string_key", "string_value") # Number - await storage_backend.set_working_memory( - workspace_id, "sess_ctx_types", "number_key", 42.5 - ) + await storage_backend.set_working_memory(workspace_id, "sess_ctx_types", "number_key", 42.5) # Boolean - await storage_backend.set_working_memory( - workspace_id, "sess_ctx_types", "bool_key", True - ) + await storage_backend.set_working_memory(workspace_id, "sess_ctx_types", "bool_key", True) # List - await storage_backend.set_working_memory( - workspace_id, "sess_ctx_types", "list_key", [1, 2, 3, "four"] - ) + await storage_backend.set_working_memory(workspace_id, "sess_ctx_types", "list_key", [1, 2, 3, "four"]) # Dict - await storage_backend.set_working_memory( - workspace_id, "sess_ctx_types", "dict_key", {"nested": {"deep": "value"}} - ) + await storage_backend.set_working_memory(workspace_id, "sess_ctx_types", "dict_key", {"nested": {"deep": "value"}}) # None/null - await storage_backend.set_working_memory( - workspace_id, "sess_ctx_types", "null_key", None - ) + await storage_backend.set_working_memory(workspace_id, "sess_ctx_types", "null_key", None) # Verify all values ctx_str = await storage_backend.get_working_memory(workspace_id, "sess_ctx_types", "string_key") @@ -1446,8 +1079,8 @@ async def _ensure_workspace(self, sqlite_storage, workspace_id: str): id=workspace_id, tenant_id="test_tenant", name=f"Test Workspace {workspace_id}", - created_at=datetime.now(timezone.utc), - updated_at=datetime.now(timezone.utc), + created_at=datetime.now(UTC), + updated_at=datetime.now(UTC), ) await sqlite_storage.create_workspace(workspace) @@ -1485,9 +1118,7 @@ async def test_cleanup_expired_sessions_removes_expired(self, storage_backend, u active = await storage_backend.get_session(workspace_id, "sess_active_cleanup") assert active is not None - async def test_cleanup_expired_sessions_returns_zero_when_none_expired( - self, storage_backend, unique_workspace_id - ): + async def test_cleanup_expired_sessions_returns_zero_when_none_expired(self, storage_backend, unique_workspace_id): """Test cleanup_expired_sessions() returns 0 when no sessions are expired.""" workspace_id = unique_workspace_id await self._ensure_workspace(storage_backend, workspace_id) @@ -1520,12 +1151,8 @@ async def test_delete_session_cascades_to_context(self, storage_backend, unique_ await storage_backend.create_session(workspace_id, session) # Add context entries - await storage_backend.set_working_memory( - workspace_id, "sess_cascade_delete", "key1", "value1" - ) - await storage_backend.set_working_memory( - workspace_id, "sess_cascade_delete", "key2", "value2" - ) + await storage_backend.set_working_memory(workspace_id, "sess_cascade_delete", "key1", "value1") + await storage_backend.set_working_memory(workspace_id, "sess_cascade_delete", "key2", "value2") # Verify context exists ctx_before = await storage_backend.get_all_working_memory(workspace_id, "sess_cascade_delete") diff --git a/memorylayer-core-python/tests/unit/test_tier_generation.py b/memorylayer-core-python/tests/unit/test_tier_generation.py index 063072a..a815588 100644 --- a/memorylayer-core-python/tests/unit/test_tier_generation.py +++ b/memorylayer-core-python/tests/unit/test_tier_generation.py @@ -1,7 +1,9 @@ """Unit tests for DefaultTierGenerationService.""" -import pytest + from unittest.mock import AsyncMock, MagicMock +import pytest + from memorylayer_server.models.llm import LLMRole from memorylayer_server.services.semantic_tiering.default import DefaultSemanticTieringService diff --git a/memorylayer-core-python/tests/unit/test_tier_generation_service.py b/memorylayer-core-python/tests/unit/test_tier_generation_service.py index 9018eb1..2e85778 100644 --- a/memorylayer-core-python/tests/unit/test_tier_generation_service.py +++ b/memorylayer-core-python/tests/unit/test_tier_generation_service.py @@ -7,19 +7,21 @@ - TierGenerationTaskHandler: background task handler - Integration with remember() flow """ -import pytest + from unittest.mock import AsyncMock, MagicMock, patch +import pytest + from memorylayer_server.models.memory import Memory, MemoryType -from memorylayer_server.services.semantic_tiering.default import DefaultSemanticTieringService from memorylayer_server.services.semantic_tiering.base import SemanticTieringService +from memorylayer_server.services.semantic_tiering.default import DefaultSemanticTieringService from memorylayer_server.tasks.semantic_tiering_task_handler import TierGenerationTaskHandler - # --------------------------------------------------------------------------- # Fixtures # --------------------------------------------------------------------------- + @pytest.fixture def mock_llm_service(): """Mock LLM service that returns canned responses.""" @@ -111,6 +113,7 @@ def tier_service_no_task_service(mock_llm_service, mock_storage, mock_variables) # request_tier_generation() Tests # --------------------------------------------------------------------------- + class TestRequestTierGeneration: """Tests for request_tier_generation() dispatch logic.""" @@ -121,8 +124,8 @@ async def test_schedules_background_task_when_enabled(self, tier_service_enabled assert task_id == "task_001" mock_task_service.schedule_task.assert_called_once_with( - task_type='generate_tiers', - payload={'memory_id': 'mem_123', 'workspace_id': 'ws_test'}, + task_type="generate_tiers", + payload={"memory_id": "mem_123", "workspace_id": "ws_test"}, ) @pytest.mark.asyncio @@ -134,9 +137,7 @@ async def test_returns_none_when_disabled(self, tier_service_disabled, mock_task mock_task_service.schedule_task.assert_not_called() @pytest.mark.asyncio - async def test_falls_back_to_inline_without_task_service( - self, tier_service_no_task_service, mock_storage, mock_llm_service - ): + async def test_falls_back_to_inline_without_task_service(self, tier_service_no_task_service, mock_storage, mock_llm_service): """Without task service, should generate tiers inline.""" result = await tier_service_no_task_service.request_tier_generation("mem_test123", "ws_test") @@ -171,6 +172,7 @@ async def generate_tiers_for_content(self, content): # Config Toggle Tests # --------------------------------------------------------------------------- + class TestTierGenerationConfig: """Tests for MEMORYLAYER_TIER_GENERATION_ENABLED config behavior.""" @@ -188,22 +190,14 @@ async def test_enabled_false_prevents_scheduling(self, tier_service_disabled, mo def test_service_stores_enabled_flag(self, mock_llm_service, mock_storage, mock_variables): """Verify the enabled flag is properly stored on the service.""" - service_on = DefaultSemanticTieringService( - llm_service=mock_llm_service, storage=mock_storage, - v=mock_variables, enabled=True - ) - service_off = DefaultSemanticTieringService( - llm_service=mock_llm_service, storage=mock_storage, - v=mock_variables, enabled=False - ) + service_on = DefaultSemanticTieringService(llm_service=mock_llm_service, storage=mock_storage, v=mock_variables, enabled=True) + service_off = DefaultSemanticTieringService(llm_service=mock_llm_service, storage=mock_storage, v=mock_variables, enabled=False) assert service_on.enabled is True assert service_off.enabled is False def test_service_defaults_to_enabled(self, mock_llm_service, mock_storage, mock_variables): """Default constructor should have enabled=True.""" - service = DefaultSemanticTieringService( - llm_service=mock_llm_service, storage=mock_storage, v=mock_variables - ) + service = DefaultSemanticTieringService(llm_service=mock_llm_service, storage=mock_storage, v=mock_variables) assert service.enabled is True @@ -211,13 +205,14 @@ def test_service_defaults_to_enabled(self, mock_llm_service, mock_storage, mock_ # TierGenerationTaskHandler Tests # --------------------------------------------------------------------------- + class TestTierGenerationTaskHandler: """Tests for the background task handler.""" def test_task_type_is_generate_tiers(self): """Handler should register for 'generate_tiers' task type.""" handler = TierGenerationTaskHandler() - assert handler.get_task_type() == 'generate_tiers' + assert handler.get_task_type() == "generate_tiers" def test_schedule_returns_none(self): """Handler is on-demand only, no recurring schedule.""" @@ -232,13 +227,16 @@ async def test_handle_delegates_to_tier_service(self): mock_tier_service = AsyncMock() mock_v = MagicMock() - with patch.object(handler, 'get_extension', return_value=mock_tier_service): - await handler.handle(mock_v, { - 'memory_id': 'mem_abc', - 'workspace_id': 'ws_xyz', - }) + with patch.object(handler, "get_extension", return_value=mock_tier_service): + await handler.handle( + mock_v, + { + "memory_id": "mem_abc", + "workspace_id": "ws_xyz", + }, + ) - mock_tier_service.generate_tiers.assert_called_once_with('mem_abc', 'ws_xyz') + mock_tier_service.generate_tiers.assert_called_once_with("mem_abc", "ws_xyz") def test_initialize_returns_self(self): """initialize() should return handler instance.""" @@ -255,6 +253,7 @@ def test_initialize_returns_self(self): # Integration with remember() flow # --------------------------------------------------------------------------- + class TestRememberTierGenerationIntegration: """Tests verifying remember() delegates to tier generation service.""" @@ -285,7 +284,7 @@ async def test_remember_calls_request_tier_generation( mock_request.assert_called_once_with(memory.id, unique_workspace_id) finally: # Restore original if we patched it - if original_service and hasattr(original_service, 'request_tier_generation'): + if original_service and hasattr(original_service, "request_tier_generation"): # The base class method will be restored since we only patched the instance pass @@ -323,9 +322,7 @@ async def test_remember_handles_tier_generation_error( original_service = memory_service.tier_generation_service try: if original_service: - original_service.request_tier_generation = AsyncMock( - side_effect=RuntimeError("LLM unavailable") - ) + original_service.request_tier_generation = AsyncMock(side_effect=RuntimeError("LLM unavailable")) input_data = RememberInput( content="Test content with tier error", @@ -343,17 +340,20 @@ async def test_remember_handles_tier_generation_error( # RememberInput model tests # --------------------------------------------------------------------------- + class TestRememberInputModel: """Verify generate_tiers field has been removed from RememberInput.""" def test_no_generate_tiers_field(self): """RememberInput should not have a generate_tiers field.""" from memorylayer_server.models.memory import RememberInput + fields = RememberInput.model_fields - assert 'generate_tiers' not in fields + assert "generate_tiers" not in fields def test_remember_input_ignores_generate_tiers_kwarg(self): """RememberInput should not store generate_tiers even if passed.""" from memorylayer_server.models.memory import RememberInput + input_data = RememberInput(content="test") - assert not hasattr(input_data, 'generate_tiers') or 'generate_tiers' not in input_data.model_fields + assert not hasattr(input_data, "generate_tiers") or "generate_tiers" not in input_data.model_fields diff --git a/memorylayer-explorer/package-lock.json b/memorylayer-explorer/package-lock.json new file mode 100644 index 0000000..0affb4e --- /dev/null +++ b/memorylayer-explorer/package-lock.json @@ -0,0 +1,7882 @@ +{ + "name": "@scitrera/memorylayer-explorer", + "version": "0.1.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "@scitrera/memorylayer-explorer", + "version": "0.1.0", + "license": "Apache-2.0", + "dependencies": { + "@radix-ui/react-dialog": "^1.1.15", + "@radix-ui/react-dropdown-menu": "^2.1.16", + "@radix-ui/react-scroll-area": "^1.2.10", + "@radix-ui/react-select": "^2.2.6", + "@radix-ui/react-separator": "^1.1.8", + "@radix-ui/react-slider": "^1.3.6", + "@radix-ui/react-slot": "^1.2.4", + "@radix-ui/react-tabs": "^1.1.13", + "@radix-ui/react-tooltip": "^1.2.8", + "@scitrera/memorylayer-sdk": "file:../memorylayer-sdk-typescript", + "@tanstack/react-query": "^5.0.0", + "@xyflow/react": "^12.0.0", + "class-variance-authority": "^0.7.0", + "clsx": "^2.0.0", + "cmdk": "^1.1.1", + "d3-force": "^3.0.0", + "dagre": "^0.8.5", + "date-fns": "^3.0.0", + "lucide-react": "^0.400.0", + "next": "^15.0.0", + "react": "^19.0.0", + "react-dom": "^19.0.0", + "recharts": "^2.12.0", + "sonner": "^1.0.0", + "tailwind-merge": "^2.0.0" + }, + "devDependencies": { + "@tailwindcss/postcss": "^4.0.0", + "@types/d3-force": "^3.0.0", + "@types/dagre": "^0.7.0", + "@types/node": "^20.0.0", + "@types/react": "^19.0.0", + "@types/react-dom": "^19.0.0", + "eslint": "^9.0.0", + "eslint-config-next": "^15.0.0", + "postcss": "^8.4.0", + "tailwindcss": "^4.0.0", + "typescript": "^5.5.0" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "../memorylayer-sdk-typescript": { + "name": "@scitrera/memorylayer-sdk", + "version": "0.0.3", + "license": "Apache-2.0", + "devDependencies": { + "@types/node": "^20.0.0", + "typescript": "^5.3.0", + "vitest": "^1.0.0" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@alloc/quick-lru": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/@alloc/quick-lru/-/quick-lru-5.2.0.tgz", + "integrity": "sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@babel/runtime": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.28.6.tgz", + "integrity": "sha512-05WQkdpL9COIMz4LjTxGpPNCdlpyimKppYNoJ5Di5EUObifl8t4tuLuUBBZEpoLYOmfvIWrsp9fCl0HoPRVTdA==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@emnapi/core": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/@emnapi/core/-/core-1.8.1.tgz", + "integrity": "sha512-AvT9QFpxK0Zd8J0jopedNm+w/2fIzvtPKPjqyw9jwvBaReTTqPBk9Hixaz7KbjimP+QNz605/XnjFcDAL2pqBg==", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "@emnapi/wasi-threads": "1.1.0", + "tslib": "^2.4.0" + } + }, + "node_modules/@emnapi/runtime": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.8.1.tgz", + "integrity": "sha512-mehfKSMWjjNol8659Z8KxEMrdSJDDot5SXMq00dM8BN4o+CLNXQ0xH2V7EchNHV4RmbZLmmPdEaXZc5H2FXmDg==", + "license": "MIT", + "optional": true, + "dependencies": { + "tslib": "^2.4.0" + } + }, + "node_modules/@emnapi/wasi-threads": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@emnapi/wasi-threads/-/wasi-threads-1.1.0.tgz", + "integrity": "sha512-WI0DdZ8xFSbgMjR1sFsKABJ/C5OnRrjT06JXbZKexJGrDuPTzZdDYfFlsgcCXCyf+suG5QU2e/y1Wo2V/OapLQ==", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "tslib": "^2.4.0" + } + }, + "node_modules/@eslint-community/eslint-utils": { + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.9.1.tgz", + "integrity": "sha512-phrYmNiYppR7znFEdqgfWHXR6NCkZEK7hwWDHZUjit/2/U0r6XvkDl0SYnoM51Hq7FhCGdLDT6zxCCOY1hexsQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "eslint-visitor-keys": "^3.4.3" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" + } + }, + "node_modules/@eslint-community/eslint-utils/node_modules/eslint-visitor-keys": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", + "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@eslint-community/regexpp": { + "version": "4.12.2", + "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.2.tgz", + "integrity": "sha512-EriSTlt5OC9/7SXkRSCAhfSxxoSUgBm33OH+IkwbdpgoqsSsUg7y3uh+IICI/Qg4BBWr3U2i39RpmycbxMq4ew==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.0.0 || ^14.0.0 || >=16.0.0" + } + }, + "node_modules/@eslint/config-array": { + "version": "0.21.1", + "resolved": "https://registry.npmjs.org/@eslint/config-array/-/config-array-0.21.1.tgz", + "integrity": "sha512-aw1gNayWpdI/jSYVgzN5pL0cfzU02GT3NBpeT/DXbx1/1x7ZKxFPd9bwrzygx/qiwIQiJ1sw/zD8qY/kRvlGHA==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@eslint/object-schema": "^2.1.7", + "debug": "^4.3.1", + "minimatch": "^3.1.2" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/config-helpers": { + "version": "0.4.2", + "resolved": "https://registry.npmjs.org/@eslint/config-helpers/-/config-helpers-0.4.2.tgz", + "integrity": "sha512-gBrxN88gOIf3R7ja5K9slwNayVcZgK6SOUORm2uBzTeIEfeVaIhOpCtTox3P6R7o2jLFwLFTLnC7kU/RGcYEgw==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@eslint/core": "^0.17.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/core": { + "version": "0.17.0", + "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.17.0.tgz", + "integrity": "sha512-yL/sLrpmtDaFEiUj1osRP4TI2MDz1AddJL+jZ7KSqvBuliN4xqYY54IfdN8qD8Toa6g1iloph1fxQNkjOxrrpQ==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@types/json-schema": "^7.0.15" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/eslintrc": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-3.3.3.tgz", + "integrity": "sha512-Kr+LPIUVKz2qkx1HAMH8q1q6azbqBAsXJUxBl/ODDuVPX45Z9DfwB8tPjTi6nNZ8BuM3nbJxC5zCAg5elnBUTQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ajv": "^6.12.4", + "debug": "^4.3.2", + "espree": "^10.0.1", + "globals": "^14.0.0", + "ignore": "^5.2.0", + "import-fresh": "^3.2.1", + "js-yaml": "^4.1.1", + "minimatch": "^3.1.2", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@eslint/js": { + "version": "9.39.2", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.39.2.tgz", + "integrity": "sha512-q1mjIoW1VX4IvSocvM/vbTiveKC4k9eLrajNEuSsmjymSDEbpGddtpfOoN7YGAqBK3NG+uqo8ia4PDTt8buCYA==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://eslint.org/donate" + } + }, + "node_modules/@eslint/object-schema": { + "version": "2.1.7", + "resolved": "https://registry.npmjs.org/@eslint/object-schema/-/object-schema-2.1.7.tgz", + "integrity": "sha512-VtAOaymWVfZcmZbp6E2mympDIHvyjXs/12LqWYjVw6qjrfF+VK+fyG33kChz3nnK+SU5/NeHOqrTEHS8sXO3OA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/plugin-kit": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/@eslint/plugin-kit/-/plugin-kit-0.4.1.tgz", + "integrity": "sha512-43/qtrDUokr7LJqoF2c3+RInu/t4zfrpYdoSDfYyhg52rwLV6TnOvdG4fXm7IkSB3wErkcmJS9iEhjVtOSEjjA==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@eslint/core": "^0.17.0", + "levn": "^0.4.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@floating-ui/core": { + "version": "1.7.4", + "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-1.7.4.tgz", + "integrity": "sha512-C3HlIdsBxszvm5McXlB8PeOEWfBhcGBTZGkGlWc2U0KFY5IwG5OQEuQ8rq52DZmcHDlPLd+YFBK+cZcytwIFWg==", + "license": "MIT", + "dependencies": { + "@floating-ui/utils": "^0.2.10" + } + }, + "node_modules/@floating-ui/dom": { + "version": "1.7.5", + "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.7.5.tgz", + "integrity": "sha512-N0bD2kIPInNHUHehXhMke1rBGs1dwqvC9O9KYMyyjK7iXt7GAhnro7UlcuYcGdS/yYOlq0MAVgrow8IbWJwyqg==", + "license": "MIT", + "dependencies": { + "@floating-ui/core": "^1.7.4", + "@floating-ui/utils": "^0.2.10" + } + }, + "node_modules/@floating-ui/react-dom": { + "version": "2.1.7", + "resolved": "https://registry.npmjs.org/@floating-ui/react-dom/-/react-dom-2.1.7.tgz", + "integrity": "sha512-0tLRojf/1Go2JgEVm+3Frg9A3IW8bJgKgdO0BN5RkF//ufuz2joZM63Npau2ff3J6lUVYgDSNzNkR+aH3IVfjg==", + "license": "MIT", + "dependencies": { + "@floating-ui/dom": "^1.7.5" + }, + "peerDependencies": { + "react": ">=16.8.0", + "react-dom": ">=16.8.0" + } + }, + "node_modules/@floating-ui/utils": { + "version": "0.2.10", + "resolved": "https://registry.npmjs.org/@floating-ui/utils/-/utils-0.2.10.tgz", + "integrity": "sha512-aGTxbpbg8/b5JfU1HXSrbH3wXZuLPJcNEcZQFMxLs3oSzgtVu6nFPkbbGGUvBcUjKV2YyB9Wxxabo+HEH9tcRQ==", + "license": "MIT" + }, + "node_modules/@humanfs/core": { + "version": "0.19.1", + "resolved": "https://registry.npmjs.org/@humanfs/core/-/core-0.19.1.tgz", + "integrity": "sha512-5DyQ4+1JEUzejeK1JGICcideyfUbGixgS9jNgex5nqkW+cY7WZhxBigmieN5Qnw9ZosSNVC9KQKyb+GUaGyKUA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=18.18.0" + } + }, + "node_modules/@humanfs/node": { + "version": "0.16.7", + "resolved": "https://registry.npmjs.org/@humanfs/node/-/node-0.16.7.tgz", + "integrity": "sha512-/zUx+yOsIrG4Y43Eh2peDeKCxlRt/gET6aHfaKpuq267qXdYDFViVHfMaLyygZOnl0kGWxFIgsBy8QFuTLUXEQ==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@humanfs/core": "^0.19.1", + "@humanwhocodes/retry": "^0.4.0" + }, + "engines": { + "node": ">=18.18.0" + } + }, + "node_modules/@humanwhocodes/module-importer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", + "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=12.22" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/@humanwhocodes/retry": { + "version": "0.4.3", + "resolved": "https://registry.npmjs.org/@humanwhocodes/retry/-/retry-0.4.3.tgz", + "integrity": "sha512-bV0Tgo9K4hfPCek+aMAn81RppFKv2ySDQeMoSZuvTASywNTnVJCArCZE2FWqpvIatKu7VMRLWlR1EazvVhDyhQ==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=18.18" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/@img/colour": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@img/colour/-/colour-1.0.0.tgz", + "integrity": "sha512-A5P/LfWGFSl6nsckYtjw9da+19jB8hkJ6ACTGcDfEJ0aE+l2n2El7dsVM7UVHZQ9s2lmYMWlrS21YLy2IR1LUw==", + "license": "MIT", + "optional": true, + "engines": { + "node": ">=18" + } + }, + "node_modules/@img/sharp-darwin-arm64": { + "version": "0.34.5", + "resolved": "https://registry.npmjs.org/@img/sharp-darwin-arm64/-/sharp-darwin-arm64-0.34.5.tgz", + "integrity": "sha512-imtQ3WMJXbMY4fxb/Ndp6HBTNVtWCUI0WdobyheGf5+ad6xX8VIDO8u2xE4qc/fr08CKG/7dDseFtn6M6g/r3w==", + "cpu": [ + "arm64" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-darwin-arm64": "1.2.4" + } + }, + "node_modules/@img/sharp-darwin-x64": { + "version": "0.34.5", + "resolved": "https://registry.npmjs.org/@img/sharp-darwin-x64/-/sharp-darwin-x64-0.34.5.tgz", + "integrity": "sha512-YNEFAF/4KQ/PeW0N+r+aVVsoIY0/qxxikF2SWdp+NRkmMB7y9LBZAVqQ4yhGCm/H3H270OSykqmQMKLBhBJDEw==", + "cpu": [ + "x64" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-darwin-x64": "1.2.4" + } + }, + "node_modules/@img/sharp-libvips-darwin-arm64": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-darwin-arm64/-/sharp-libvips-darwin-arm64-1.2.4.tgz", + "integrity": "sha512-zqjjo7RatFfFoP0MkQ51jfuFZBnVE2pRiaydKJ1G/rHZvnsrHAOcQALIi9sA5co5xenQdTugCvtb1cuf78Vf4g==", + "cpu": [ + "arm64" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "darwin" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-darwin-x64": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-darwin-x64/-/sharp-libvips-darwin-x64-1.2.4.tgz", + "integrity": "sha512-1IOd5xfVhlGwX+zXv2N93k0yMONvUlANylbJw1eTah8K/Jtpi15KC+WSiaX/nBmbm2HxRM1gZ0nSdjSsrZbGKg==", + "cpu": [ + "x64" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "darwin" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linux-arm": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-arm/-/sharp-libvips-linux-arm-1.2.4.tgz", + "integrity": "sha512-bFI7xcKFELdiNCVov8e44Ia4u2byA+l3XtsAj+Q8tfCwO6BQ8iDojYdvoPMqsKDkuoOo+X6HZA0s0q11ANMQ8A==", + "cpu": [ + "arm" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linux-arm64": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-arm64/-/sharp-libvips-linux-arm64-1.2.4.tgz", + "integrity": "sha512-excjX8DfsIcJ10x1Kzr4RcWe1edC9PquDRRPx3YVCvQv+U5p7Yin2s32ftzikXojb1PIFc/9Mt28/y+iRklkrw==", + "cpu": [ + "arm64" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linux-ppc64": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-ppc64/-/sharp-libvips-linux-ppc64-1.2.4.tgz", + "integrity": "sha512-FMuvGijLDYG6lW+b/UvyilUWu5Ayu+3r2d1S8notiGCIyYU/76eig1UfMmkZ7vwgOrzKzlQbFSuQfgm7GYUPpA==", + "cpu": [ + "ppc64" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linux-riscv64": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-riscv64/-/sharp-libvips-linux-riscv64-1.2.4.tgz", + "integrity": "sha512-oVDbcR4zUC0ce82teubSm+x6ETixtKZBh/qbREIOcI3cULzDyb18Sr/Wcyx7NRQeQzOiHTNbZFF1UwPS2scyGA==", + "cpu": [ + "riscv64" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linux-s390x": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-s390x/-/sharp-libvips-linux-s390x-1.2.4.tgz", + "integrity": "sha512-qmp9VrzgPgMoGZyPvrQHqk02uyjA0/QrTO26Tqk6l4ZV0MPWIW6LTkqOIov+J1yEu7MbFQaDpwdwJKhbJvuRxQ==", + "cpu": [ + "s390x" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linux-x64": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-x64/-/sharp-libvips-linux-x64-1.2.4.tgz", + "integrity": "sha512-tJxiiLsmHc9Ax1bz3oaOYBURTXGIRDODBqhveVHonrHJ9/+k89qbLl0bcJns+e4t4rvaNBxaEZsFtSfAdquPrw==", + "cpu": [ + "x64" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linuxmusl-arm64": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linuxmusl-arm64/-/sharp-libvips-linuxmusl-arm64-1.2.4.tgz", + "integrity": "sha512-FVQHuwx1IIuNow9QAbYUzJ+En8KcVm9Lk5+uGUQJHaZmMECZmOlix9HnH7n1TRkXMS0pGxIJokIVB9SuqZGGXw==", + "cpu": [ + "arm64" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linuxmusl-x64": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linuxmusl-x64/-/sharp-libvips-linuxmusl-x64-1.2.4.tgz", + "integrity": "sha512-+LpyBk7L44ZIXwz/VYfglaX/okxezESc6UxDSoyo2Ks6Jxc4Y7sGjpgU9s4PMgqgjj1gZCylTieNamqA1MF7Dg==", + "cpu": [ + "x64" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-linux-arm": { + "version": "0.34.5", + "resolved": "https://registry.npmjs.org/@img/sharp-linux-arm/-/sharp-linux-arm-0.34.5.tgz", + "integrity": "sha512-9dLqsvwtg1uuXBGZKsxem9595+ujv0sJ6Vi8wcTANSFpwV/GONat5eCkzQo/1O6zRIkh0m/8+5BjrRr7jDUSZw==", + "cpu": [ + "arm" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linux-arm": "1.2.4" + } + }, + "node_modules/@img/sharp-linux-arm64": { + "version": "0.34.5", + "resolved": "https://registry.npmjs.org/@img/sharp-linux-arm64/-/sharp-linux-arm64-0.34.5.tgz", + "integrity": "sha512-bKQzaJRY/bkPOXyKx5EVup7qkaojECG6NLYswgktOZjaXecSAeCWiZwwiFf3/Y+O1HrauiE3FVsGxFg8c24rZg==", + "cpu": [ + "arm64" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linux-arm64": "1.2.4" + } + }, + "node_modules/@img/sharp-linux-ppc64": { + "version": "0.34.5", + "resolved": "https://registry.npmjs.org/@img/sharp-linux-ppc64/-/sharp-linux-ppc64-0.34.5.tgz", + "integrity": "sha512-7zznwNaqW6YtsfrGGDA6BRkISKAAE1Jo0QdpNYXNMHu2+0dTrPflTLNkpc8l7MUP5M16ZJcUvysVWWrMefZquA==", + "cpu": [ + "ppc64" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linux-ppc64": "1.2.4" + } + }, + "node_modules/@img/sharp-linux-riscv64": { + "version": "0.34.5", + "resolved": "https://registry.npmjs.org/@img/sharp-linux-riscv64/-/sharp-linux-riscv64-0.34.5.tgz", + "integrity": "sha512-51gJuLPTKa7piYPaVs8GmByo7/U7/7TZOq+cnXJIHZKavIRHAP77e3N2HEl3dgiqdD/w0yUfiJnII77PuDDFdw==", + "cpu": [ + "riscv64" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linux-riscv64": "1.2.4" + } + }, + "node_modules/@img/sharp-linux-s390x": { + "version": "0.34.5", + "resolved": "https://registry.npmjs.org/@img/sharp-linux-s390x/-/sharp-linux-s390x-0.34.5.tgz", + "integrity": "sha512-nQtCk0PdKfho3eC5MrbQoigJ2gd1CgddUMkabUj+rBevs8tZ2cULOx46E7oyX+04WGfABgIwmMC0VqieTiR4jg==", + "cpu": [ + "s390x" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linux-s390x": "1.2.4" + } + }, + "node_modules/@img/sharp-linux-x64": { + "version": "0.34.5", + "resolved": "https://registry.npmjs.org/@img/sharp-linux-x64/-/sharp-linux-x64-0.34.5.tgz", + "integrity": "sha512-MEzd8HPKxVxVenwAa+JRPwEC7QFjoPWuS5NZnBt6B3pu7EG2Ge0id1oLHZpPJdn3OQK+BQDiw9zStiHBTJQQQQ==", + "cpu": [ + "x64" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linux-x64": "1.2.4" + } + }, + "node_modules/@img/sharp-linuxmusl-arm64": { + "version": "0.34.5", + "resolved": "https://registry.npmjs.org/@img/sharp-linuxmusl-arm64/-/sharp-linuxmusl-arm64-0.34.5.tgz", + "integrity": "sha512-fprJR6GtRsMt6Kyfq44IsChVZeGN97gTD331weR1ex1c1rypDEABN6Tm2xa1wE6lYb5DdEnk03NZPqA7Id21yg==", + "cpu": [ + "arm64" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linuxmusl-arm64": "1.2.4" + } + }, + "node_modules/@img/sharp-linuxmusl-x64": { + "version": "0.34.5", + "resolved": "https://registry.npmjs.org/@img/sharp-linuxmusl-x64/-/sharp-linuxmusl-x64-0.34.5.tgz", + "integrity": "sha512-Jg8wNT1MUzIvhBFxViqrEhWDGzqymo3sV7z7ZsaWbZNDLXRJZoRGrjulp60YYtV4wfY8VIKcWidjojlLcWrd8Q==", + "cpu": [ + "x64" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linuxmusl-x64": "1.2.4" + } + }, + "node_modules/@img/sharp-wasm32": { + "version": "0.34.5", + "resolved": "https://registry.npmjs.org/@img/sharp-wasm32/-/sharp-wasm32-0.34.5.tgz", + "integrity": "sha512-OdWTEiVkY2PHwqkbBI8frFxQQFekHaSSkUIJkwzclWZe64O1X4UlUjqqqLaPbUpMOQk6FBu/HtlGXNblIs0huw==", + "cpu": [ + "wasm32" + ], + "license": "Apache-2.0 AND LGPL-3.0-or-later AND MIT", + "optional": true, + "dependencies": { + "@emnapi/runtime": "^1.7.0" + }, + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-win32-arm64": { + "version": "0.34.5", + "resolved": "https://registry.npmjs.org/@img/sharp-win32-arm64/-/sharp-win32-arm64-0.34.5.tgz", + "integrity": "sha512-WQ3AgWCWYSb2yt+IG8mnC6Jdk9Whs7O0gxphblsLvdhSpSTtmu69ZG1Gkb6NuvxsNACwiPV6cNSZNzt0KPsw7g==", + "cpu": [ + "arm64" + ], + "license": "Apache-2.0 AND LGPL-3.0-or-later", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-win32-ia32": { + "version": "0.34.5", + "resolved": "https://registry.npmjs.org/@img/sharp-win32-ia32/-/sharp-win32-ia32-0.34.5.tgz", + "integrity": "sha512-FV9m/7NmeCmSHDD5j4+4pNI8Cp3aW+JvLoXcTUo0IqyjSfAZJ8dIUmijx1qaJsIiU+Hosw6xM5KijAWRJCSgNg==", + "cpu": [ + "ia32" + ], + "license": "Apache-2.0 AND LGPL-3.0-or-later", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-win32-x64": { + "version": "0.34.5", + "resolved": "https://registry.npmjs.org/@img/sharp-win32-x64/-/sharp-win32-x64-0.34.5.tgz", + "integrity": "sha512-+29YMsqY2/9eFEiW93eqWnuLcWcufowXewwSNIT6UwZdUUCrM3oFjMWH/Z6/TMmb4hlFenmfAVbpWeup2jryCw==", + "cpu": [ + "x64" + ], + "license": "Apache-2.0 AND LGPL-3.0-or-later", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.13", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz", + "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.0", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/remapping": { + "version": "2.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/remapping/-/remapping-2.3.5.tgz", + "integrity": "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", + "dev": true, + "license": "MIT" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.31", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz", + "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@napi-rs/wasm-runtime": { + "version": "0.2.12", + "resolved": "https://registry.npmjs.org/@napi-rs/wasm-runtime/-/wasm-runtime-0.2.12.tgz", + "integrity": "sha512-ZVWUcfwY4E/yPitQJl481FjFo3K22D6qF0DuFH6Y/nbnE11GY5uguDxZMGXPQ8WQ0128MXQD7TnfHyK4oWoIJQ==", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "@emnapi/core": "^1.4.3", + "@emnapi/runtime": "^1.4.3", + "@tybys/wasm-util": "^0.10.0" + } + }, + "node_modules/@next/env": { + "version": "15.5.12", + "resolved": "https://registry.npmjs.org/@next/env/-/env-15.5.12.tgz", + "integrity": "sha512-pUvdJN1on574wQHjaBfNGDt9Mz5utDSZFsIIQkMzPgNS8ZvT4H2mwOrOIClwsQOb6EGx5M76/CZr6G8i6pSpLg==", + "license": "MIT" + }, + "node_modules/@next/eslint-plugin-next": { + "version": "15.5.12", + "resolved": "https://registry.npmjs.org/@next/eslint-plugin-next/-/eslint-plugin-next-15.5.12.tgz", + "integrity": "sha512-+ZRSDFTv4aC96aMb5E41rMjysx8ApkryevnvEYZvPZO52KvkqP5rNExLUXJFr9P4s0f3oqNQR6vopCZsPWKDcQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "fast-glob": "3.3.1" + } + }, + "node_modules/@next/swc-darwin-arm64": { + "version": "15.5.12", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-15.5.12.tgz", + "integrity": "sha512-RnRjBtH8S8eXCpUNkQ+543DUc7ys8y15VxmFU9HRqlo9BG3CcBUiwNtF8SNoi2xvGCVJq1vl2yYq+3oISBS0Zg==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-darwin-x64": { + "version": "15.5.12", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-15.5.12.tgz", + "integrity": "sha512-nqa9/7iQlboF1EFtNhWxQA0rQstmYRSBGxSM6g3GxvxHxcoeqVXfGNr9stJOme674m2V7r4E3+jEhhGvSQhJRA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-linux-arm64-gnu": { + "version": "15.5.12", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-15.5.12.tgz", + "integrity": "sha512-dCzAjqhDHwmoB2M4eYfVKqXs99QdQxNQVpftvP1eGVppamXh/OkDAwV737Zr0KPXEqRUMN4uCjh6mjO+XtF3Mw==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-linux-arm64-musl": { + "version": "15.5.12", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-15.5.12.tgz", + "integrity": "sha512-+fpGWvQiITgf7PUtbWY1H7qUSnBZsPPLyyq03QuAKpVoTy/QUx1JptEDTQMVvQhvizCEuNLEeghrQUyXQOekuw==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-linux-x64-gnu": { + "version": "15.5.12", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-15.5.12.tgz", + "integrity": "sha512-jSLvgdRRL/hrFAPqEjJf1fFguC719kmcptjNVDJl26BnJIpjL3KH5h6mzR4mAweociLQaqvt4UyzfbFjgAdDcw==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-linux-x64-musl": { + "version": "15.5.12", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-15.5.12.tgz", + "integrity": "sha512-/uaF0WfmYqQgLfPmN6BvULwxY0dufI2mlN2JbOKqqceZh1G4hjREyi7pg03zjfyS6eqNemHAZPSoP84x17vo6w==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-win32-arm64-msvc": { + "version": "15.5.12", + "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-15.5.12.tgz", + "integrity": "sha512-xhsL1OvQSfGmlL5RbOmU+FV120urrgFpYLq+6U8C6KIym32gZT6XF/SDE92jKzzlPWskkbjOKCpqk5m4i8PEfg==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-win32-x64-msvc": { + "version": "15.5.12", + "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-15.5.12.tgz", + "integrity": "sha512-Z1Dh6lhFkxvBDH1FoW6OU/L6prYwPSlwjLiZkExIAh8fbP6iI/M7iGTQAJPYJ9YFlWobCZ1PHbchFhFYb2ADkw==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nolyfill/is-core-module": { + "version": "1.0.39", + "resolved": "https://registry.npmjs.org/@nolyfill/is-core-module/-/is-core-module-1.0.39.tgz", + "integrity": "sha512-nn5ozdjYQpUCZlWGuxcJY/KpxkWQs4DcbMCmKojjyrYDEAGy4Ce19NN4v5MduafTwJlbKc99UA8YhSVqq9yPZA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12.4.0" + } + }, + "node_modules/@radix-ui/number": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/number/-/number-1.1.1.tgz", + "integrity": "sha512-MkKCwxlXTgz6CFoJx3pCwn07GKp36+aZyu/u2Ln2VrA5DcdyCZkASEDBTd8x5whTQQL5CiYf4prXKLcgQdv29g==", + "license": "MIT" + }, + "node_modules/@radix-ui/primitive": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/primitive/-/primitive-1.1.3.tgz", + "integrity": "sha512-JTF99U/6XIjCBo0wqkU5sK10glYe27MRRsfwoiq5zzOEZLHU3A3KCMa5X/azekYRCJ0HlwI0crAXS/5dEHTzDg==", + "license": "MIT" + }, + "node_modules/@radix-ui/react-arrow": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-arrow/-/react-arrow-1.1.7.tgz", + "integrity": "sha512-F+M1tLhO+mlQaOWspE8Wstg+z6PwxwRd8oQ8IXceWz92kfAmalTRf0EjrouQeo7QssEPfCn05B4Ihs1K9WQ/7w==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.1.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-collection": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-collection/-/react-collection-1.1.7.tgz", + "integrity": "sha512-Fh9rGN0MoI4ZFUNyfFVNU4y9LUz93u9/0K+yLgA2bwRojxM8JU1DyvvMBabnZPBgMWREAJvU2jjVzq+LrFUglw==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-collection/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-compose-refs": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-compose-refs/-/react-compose-refs-1.1.2.tgz", + "integrity": "sha512-z4eqJvfiNnFMHIIvXP3CY57y2WJs5g2v3X0zm9mEJkrkNv4rDxu+sg9Jh8EkXyeqBkB7SOcboo9dMVqhyrACIg==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-context": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.2.tgz", + "integrity": "sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dialog": { + "version": "1.1.15", + "resolved": "https://registry.npmjs.org/@radix-ui/react-dialog/-/react-dialog-1.1.15.tgz", + "integrity": "sha512-TCglVRtzlffRNxRMEyR36DGBLJpeusFcgMVD9PZEzAKnUs1lKCgX5u9BmC2Yg+LL9MgZDugFFs1Vl+Jp4t/PGw==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-dismissable-layer": "1.1.11", + "@radix-ui/react-focus-guards": "1.1.3", + "@radix-ui/react-focus-scope": "1.1.7", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-portal": "1.1.9", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3", + "@radix-ui/react-use-controllable-state": "1.2.2", + "aria-hidden": "^1.2.4", + "react-remove-scroll": "^2.6.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-direction": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-direction/-/react-direction-1.1.1.tgz", + "integrity": "sha512-1UEWRX6jnOA2y4H5WczZ44gOOjTEmlqv1uNW4GAJEO5+bauCBhv8snY65Iw5/VOS/ghKN9gr2KjnLKxrsvoMVw==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dismissable-layer": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.1.11.tgz", + "integrity": "sha512-Nqcp+t5cTB8BinFkZgXiMJniQH0PsUt2k51FUhbdfeKvc4ACcG2uQniY/8+h1Yv6Kza4Q7lD7PQV0z0oicE0Mg==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-escape-keydown": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dropdown-menu": { + "version": "2.1.16", + "resolved": "https://registry.npmjs.org/@radix-ui/react-dropdown-menu/-/react-dropdown-menu-2.1.16.tgz", + "integrity": "sha512-1PLGQEynI/3OX/ftV54COn+3Sud/Mn8vALg2rWnBLnRaGtJDduNW/22XjlGgPdpcIbiQxjKtb7BkcjP00nqfJw==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-menu": "2.1.16", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-controllable-state": "1.2.2" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-focus-guards": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-guards/-/react-focus-guards-1.1.3.tgz", + "integrity": "sha512-0rFg/Rj2Q62NCm62jZw0QX7a3sz6QCQU0LpZdNrJX8byRGaGVTqbrW9jAoIAHyMQqsNpeZ81YgSizOt5WXq0Pw==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-focus-scope": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-scope/-/react-focus-scope-1.1.7.tgz", + "integrity": "sha512-t2ODlkXBQyn7jkl6TNaw/MtVEVvIGelJDCG41Okq/KwUsJBwQ4XVZsHAVUkK4mBv3ewiAS3PGuUWuY2BoK4ZUw==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-id": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-id/-/react-id-1.1.1.tgz", + "integrity": "sha512-kGkGegYIdQsOb4XjsfM97rXsiHaBwco+hFI66oO4s9LU+PLAC5oJ7khdOVFxkhsmlbpUqDAvXw11CluXP+jkHg==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-menu": { + "version": "2.1.16", + "resolved": "https://registry.npmjs.org/@radix-ui/react-menu/-/react-menu-2.1.16.tgz", + "integrity": "sha512-72F2T+PLlphrqLcAotYPp0uJMr5SjP5SL01wfEspJbru5Zs5vQaSHb4VB3ZMJPimgHHCHG7gMOeOB9H3Hdmtxg==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-collection": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-dismissable-layer": "1.1.11", + "@radix-ui/react-focus-guards": "1.1.3", + "@radix-ui/react-focus-scope": "1.1.7", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-popper": "1.2.8", + "@radix-ui/react-portal": "1.1.9", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-roving-focus": "1.1.11", + "@radix-ui/react-slot": "1.2.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "aria-hidden": "^1.2.4", + "react-remove-scroll": "^2.6.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-menu/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-popper": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@radix-ui/react-popper/-/react-popper-1.2.8.tgz", + "integrity": "sha512-0NJQ4LFFUuWkE7Oxf0htBKS6zLkkjBH+hM1uk7Ng705ReR8m/uelduy1DBo0PyBXPKVnBA6YBlU94MBGXrSBCw==", + "license": "MIT", + "dependencies": { + "@floating-ui/react-dom": "^2.0.0", + "@radix-ui/react-arrow": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-layout-effect": "1.1.1", + "@radix-ui/react-use-rect": "1.1.1", + "@radix-ui/react-use-size": "1.1.1", + "@radix-ui/rect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-portal": { + "version": "1.1.9", + "resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.1.9.tgz", + "integrity": "sha512-bpIxvq03if6UNwXZ+HTK71JLh4APvnXntDc6XOX8UVq4XQOVl7lwok0AvIl+b8zgCw3fSaVTZMpAPPagXbKmHQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-presence": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/@radix-ui/react-presence/-/react-presence-1.1.5.tgz", + "integrity": "sha512-/jfEwNDdQVBCNvjkGit4h6pMOzq8bHkopq458dPt2lMjx+eBQUohZNG9A7DtO/O5ukSbxuaNGXMjHicgwy6rQQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-primitive/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-roving-focus": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/@radix-ui/react-roving-focus/-/react-roving-focus-1.1.11.tgz", + "integrity": "sha512-7A6S9jSgm/S+7MdtNDSb+IU859vQqJ/QAtcYQcfFC6W8RS4IxIZDldLR0xqCFZ6DCyrQLjLPsxtTNch5jVA4lA==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-collection": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-controllable-state": "1.2.2" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-scroll-area": { + "version": "1.2.10", + "resolved": "https://registry.npmjs.org/@radix-ui/react-scroll-area/-/react-scroll-area-1.2.10.tgz", + "integrity": "sha512-tAXIa1g3sM5CGpVT0uIbUx/U3Gs5N8T52IICuCtObaos1S8fzsrPXG5WObkQN3S6NVl6wKgPhAIiBGbWnvc97A==", + "license": "MIT", + "dependencies": { + "@radix-ui/number": "1.1.1", + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-select": { + "version": "2.2.6", + "resolved": "https://registry.npmjs.org/@radix-ui/react-select/-/react-select-2.2.6.tgz", + "integrity": "sha512-I30RydO+bnn2PQztvo25tswPH+wFBjehVGtmagkU78yMdwTwVf12wnAOF+AeP8S2N8xD+5UPbGhkUfPyvT+mwQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/number": "1.1.1", + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-collection": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-dismissable-layer": "1.1.11", + "@radix-ui/react-focus-guards": "1.1.3", + "@radix-ui/react-focus-scope": "1.1.7", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-popper": "1.2.8", + "@radix-ui/react-portal": "1.1.9", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-use-layout-effect": "1.1.1", + "@radix-ui/react-use-previous": "1.1.1", + "@radix-ui/react-visually-hidden": "1.2.3", + "aria-hidden": "^1.2.4", + "react-remove-scroll": "^2.6.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-separator": { + "version": "1.1.8", + "resolved": "https://registry.npmjs.org/@radix-ui/react-separator/-/react-separator-1.1.8.tgz", + "integrity": "sha512-sDvqVY4itsKwwSMEe0jtKgfTh+72Sy3gPmQpjqcQneqQ4PFmr/1I0YA+2/puilhggCe2gJcx5EBAYFkWkdpa5g==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.1.4" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-separator/node_modules/@radix-ui/react-primitive": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.4.tgz", + "integrity": "sha512-9hQc4+GNVtJAIEPEqlYqW5RiYdrr8ea5XQ0ZOnD6fgru+83kqT15mq2OCcbe8KnjRZl5vF3ks69AKz3kh1jrhg==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.4" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-slider": { + "version": "1.3.6", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slider/-/react-slider-1.3.6.tgz", + "integrity": "sha512-JPYb1GuM1bxfjMRlNLE+BcmBC8onfCi60Blk7OBqi2MLTFdS+8401U4uFjnwkOr49BLmXxLC6JHkvAsx5OJvHw==", + "license": "MIT", + "dependencies": { + "@radix-ui/number": "1.1.1", + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-collection": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-use-layout-effect": "1.1.1", + "@radix-ui/react-use-previous": "1.1.1", + "@radix-ui/react-use-size": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-slot": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.4.tgz", + "integrity": "sha512-Jl+bCv8HxKnlTLVrcDE8zTMJ09R9/ukw4qBs/oZClOfoQk/cOTbDn+NceXfV7j09YPVQUryJPHurafcSg6EVKA==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-tabs": { + "version": "1.1.13", + "resolved": "https://registry.npmjs.org/@radix-ui/react-tabs/-/react-tabs-1.1.13.tgz", + "integrity": "sha512-7xdcatg7/U+7+Udyoj2zodtI9H/IIopqo+YOIcZOq1nJwXWBZ9p8xiu5llXlekDbZkca79a/fozEYQXIA4sW6A==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-roving-focus": "1.1.11", + "@radix-ui/react-use-controllable-state": "1.2.2" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-tooltip": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@radix-ui/react-tooltip/-/react-tooltip-1.2.8.tgz", + "integrity": "sha512-tY7sVt1yL9ozIxvmbtN5qtmH2krXcBCfjEiCgKGLqunJHvgvZG2Pcl2oQ3kbcZARb1BGEHdkLzcYGO8ynVlieg==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-dismissable-layer": "1.1.11", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-popper": "1.2.8", + "@radix-ui/react-portal": "1.1.9", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-visually-hidden": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-callback-ref": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-callback-ref/-/react-use-callback-ref-1.1.1.tgz", + "integrity": "sha512-FkBMwD+qbGQeMu1cOHnuGB6x4yzPjho8ap5WtbEJ26umhgqVXbhekKUQO+hZEL1vU92a3wHwdp0HAcqAUF5iDg==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-controllable-state": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-controllable-state/-/react-use-controllable-state-1.2.2.tgz", + "integrity": "sha512-BjasUjixPFdS+NKkypcyyN5Pmg83Olst0+c6vGov0diwTEo6mgdqVR6hxcEgFuh4QrAs7Rc+9KuGJ9TVCj0Zzg==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-effect-event": "0.0.2", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-effect-event": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-effect-event/-/react-use-effect-event-0.0.2.tgz", + "integrity": "sha512-Qp8WbZOBe+blgpuUT+lw2xheLP8q0oatc9UpmiemEICxGvFLYmHm9QowVZGHtJlGbS6A6yJ3iViad/2cVjnOiA==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-escape-keydown": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-escape-keydown/-/react-use-escape-keydown-1.1.1.tgz", + "integrity": "sha512-Il0+boE7w/XebUHyBjroE+DbByORGR9KKmITzbR7MyQ4akpORYP/ZmbhAr0DG7RmmBqoOnZdy2QlvajJ2QA59g==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-callback-ref": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-layout-effect": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-layout-effect/-/react-use-layout-effect-1.1.1.tgz", + "integrity": "sha512-RbJRS4UWQFkzHTTwVymMTUv8EqYhOp8dOOviLj2ugtTiXRaRQS7GLGxZTLL1jWhMeoSCf5zmcZkqTl9IiYfXcQ==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-previous": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-previous/-/react-use-previous-1.1.1.tgz", + "integrity": "sha512-2dHfToCj/pzca2Ck724OZ5L0EVrr3eHRNsG/b3xQJLA2hZpVCS99bLAX+hm1IHXDEnzU6by5z/5MIY794/a8NQ==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-rect": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-rect/-/react-use-rect-1.1.1.tgz", + "integrity": "sha512-QTYuDesS0VtuHNNvMh+CjlKJ4LJickCMUAqjlE3+j8w+RlRpwyX3apEQKGFzbZGdo7XNG1tXa+bQqIE7HIXT2w==", + "license": "MIT", + "dependencies": { + "@radix-ui/rect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-size": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-size/-/react-use-size-1.1.1.tgz", + "integrity": "sha512-ewrXRDTAqAXlkl6t/fkXWNAhFX9I+CkKlw6zjEwk86RSPKwZr3xpBRso655aqYafwtnbpHLj6toFzmd6xdVptQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-visually-hidden": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-visually-hidden/-/react-visually-hidden-1.2.3.tgz", + "integrity": "sha512-pzJq12tEaaIhqjbzpCuv/OypJY/BPavOofm+dbab+MHLajy277+1lLm6JFcGgF5eskJ6mquGirhXY2GD/8u8Ug==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.1.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/rect": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/rect/-/rect-1.1.1.tgz", + "integrity": "sha512-HPwpGIzkl28mWyZqG52jiqDJ12waP11Pa1lGoiyUkIEuMLBP0oeK/C89esbXrxsky5we7dfd8U58nm0SgAWpVw==", + "license": "MIT" + }, + "node_modules/@rtsao/scc": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@rtsao/scc/-/scc-1.1.0.tgz", + "integrity": "sha512-zt6OdqaDoOnJ1ZYsCYGt9YmWzDXl4vQdKTyJev62gFhRGKdx7mcT54V9KIjg+d2wi9EXsPvAPKe7i7WjfVWB8g==", + "dev": true, + "license": "MIT" + }, + "node_modules/@rushstack/eslint-patch": { + "version": "1.15.0", + "resolved": "https://registry.npmjs.org/@rushstack/eslint-patch/-/eslint-patch-1.15.0.tgz", + "integrity": "sha512-ojSshQPKwVvSMR8yT2L/QtUkV5SXi/IfDiJ4/8d6UbTPjiHVmxZzUAzGD8Tzks1b9+qQkZa0isUOvYObedITaw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@scitrera/memorylayer-sdk": { + "resolved": "../memorylayer-sdk-typescript", + "link": true + }, + "node_modules/@swc/helpers": { + "version": "0.5.15", + "resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.15.tgz", + "integrity": "sha512-JQ5TuMi45Owi4/BIMAJBoSQoOJu12oOk/gADqlcUL9JEdHB8vyjUSsxqeNXnmXHjYKMi2WcYtezGEEhqUI/E2g==", + "license": "Apache-2.0", + "dependencies": { + "tslib": "^2.8.0" + } + }, + "node_modules/@tailwindcss/node": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/node/-/node-4.1.18.tgz", + "integrity": "sha512-DoR7U1P7iYhw16qJ49fgXUlry1t4CpXeErJHnQ44JgTSKMaZUdf17cfn5mHchfJ4KRBZRFA/Coo+MUF5+gOaCQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/remapping": "^2.3.4", + "enhanced-resolve": "^5.18.3", + "jiti": "^2.6.1", + "lightningcss": "1.30.2", + "magic-string": "^0.30.21", + "source-map-js": "^1.2.1", + "tailwindcss": "4.1.18" + } + }, + "node_modules/@tailwindcss/oxide": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide/-/oxide-4.1.18.tgz", + "integrity": "sha512-EgCR5tTS5bUSKQgzeMClT6iCY3ToqE1y+ZB0AKldj809QXk1Y+3jB0upOYZrn9aGIzPtUsP7sX4QQ4XtjBB95A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 10" + }, + "optionalDependencies": { + "@tailwindcss/oxide-android-arm64": "4.1.18", + "@tailwindcss/oxide-darwin-arm64": "4.1.18", + "@tailwindcss/oxide-darwin-x64": "4.1.18", + "@tailwindcss/oxide-freebsd-x64": "4.1.18", + "@tailwindcss/oxide-linux-arm-gnueabihf": "4.1.18", + "@tailwindcss/oxide-linux-arm64-gnu": "4.1.18", + "@tailwindcss/oxide-linux-arm64-musl": "4.1.18", + "@tailwindcss/oxide-linux-x64-gnu": "4.1.18", + "@tailwindcss/oxide-linux-x64-musl": "4.1.18", + "@tailwindcss/oxide-wasm32-wasi": "4.1.18", + "@tailwindcss/oxide-win32-arm64-msvc": "4.1.18", + "@tailwindcss/oxide-win32-x64-msvc": "4.1.18" + } + }, + "node_modules/@tailwindcss/oxide-android-arm64": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-android-arm64/-/oxide-android-arm64-4.1.18.tgz", + "integrity": "sha512-dJHz7+Ugr9U/diKJA0W6N/6/cjI+ZTAoxPf9Iz9BFRF2GzEX8IvXxFIi/dZBloVJX/MZGvRuFA9rqwdiIEZQ0Q==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-darwin-arm64": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-arm64/-/oxide-darwin-arm64-4.1.18.tgz", + "integrity": "sha512-Gc2q4Qhs660bhjyBSKgq6BYvwDz4G+BuyJ5H1xfhmDR3D8HnHCmT/BSkvSL0vQLy/nkMLY20PQ2OoYMO15Jd0A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-darwin-x64": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-x64/-/oxide-darwin-x64-4.1.18.tgz", + "integrity": "sha512-FL5oxr2xQsFrc3X9o1fjHKBYBMD1QZNyc1Xzw/h5Qu4XnEBi3dZn96HcHm41c/euGV+GRiXFfh2hUCyKi/e+yw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-freebsd-x64": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-freebsd-x64/-/oxide-freebsd-x64-4.1.18.tgz", + "integrity": "sha512-Fj+RHgu5bDodmV1dM9yAxlfJwkkWvLiRjbhuO2LEtwtlYlBgiAT4x/j5wQr1tC3SANAgD+0YcmWVrj8R9trVMA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-linux-arm-gnueabihf": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm-gnueabihf/-/oxide-linux-arm-gnueabihf-4.1.18.tgz", + "integrity": "sha512-Fp+Wzk/Ws4dZn+LV2Nqx3IilnhH51YZoRaYHQsVq3RQvEl+71VGKFpkfHrLM/Li+kt5c0DJe/bHXK1eHgDmdiA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-linux-arm64-gnu": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm64-gnu/-/oxide-linux-arm64-gnu-4.1.18.tgz", + "integrity": "sha512-S0n3jboLysNbh55Vrt7pk9wgpyTTPD0fdQeh7wQfMqLPM/Hrxi+dVsLsPrycQjGKEQk85Kgbx+6+QnYNiHalnw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-linux-arm64-musl": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm64-musl/-/oxide-linux-arm64-musl-4.1.18.tgz", + "integrity": "sha512-1px92582HkPQlaaCkdRcio71p8bc8i/ap5807tPRDK/uw953cauQBT8c5tVGkOwrHMfc2Yh6UuxaH4vtTjGvHg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-linux-x64-gnu": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-gnu/-/oxide-linux-x64-gnu-4.1.18.tgz", + "integrity": "sha512-v3gyT0ivkfBLoZGF9LyHmts0Isc8jHZyVcbzio6Wpzifg/+5ZJpDiRiUhDLkcr7f/r38SWNe7ucxmGW3j3Kb/g==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-linux-x64-musl": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-musl/-/oxide-linux-x64-musl-4.1.18.tgz", + "integrity": "sha512-bhJ2y2OQNlcRwwgOAGMY0xTFStt4/wyU6pvI6LSuZpRgKQwxTec0/3Scu91O8ir7qCR3AuepQKLU/kX99FouqQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-wasm32-wasi": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-wasm32-wasi/-/oxide-wasm32-wasi-4.1.18.tgz", + "integrity": "sha512-LffYTvPjODiP6PT16oNeUQJzNVyJl1cjIebq/rWWBF+3eDst5JGEFSc5cWxyRCJ0Mxl+KyIkqRxk1XPEs9x8TA==", + "bundleDependencies": [ + "@napi-rs/wasm-runtime", + "@emnapi/core", + "@emnapi/runtime", + "@tybys/wasm-util", + "@emnapi/wasi-threads", + "tslib" + ], + "cpu": [ + "wasm32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "@emnapi/core": "^1.7.1", + "@emnapi/runtime": "^1.7.1", + "@emnapi/wasi-threads": "^1.1.0", + "@napi-rs/wasm-runtime": "^1.1.0", + "@tybys/wasm-util": "^0.10.1", + "tslib": "^2.4.0" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/@tailwindcss/oxide-win32-arm64-msvc": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-arm64-msvc/-/oxide-win32-arm64-msvc-4.1.18.tgz", + "integrity": "sha512-HjSA7mr9HmC8fu6bdsZvZ+dhjyGCLdotjVOgLA2vEqxEBZaQo9YTX4kwgEvPCpRh8o4uWc4J/wEoFzhEmjvPbA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-win32-x64-msvc": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-x64-msvc/-/oxide-win32-x64-msvc-4.1.18.tgz", + "integrity": "sha512-bJWbyYpUlqamC8dpR7pfjA0I7vdF6t5VpUGMWRkXVE3AXgIZjYUYAK7II1GNaxR8J1SSrSrppRar8G++JekE3Q==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/postcss": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/postcss/-/postcss-4.1.18.tgz", + "integrity": "sha512-Ce0GFnzAOuPyfV5SxjXGn0CubwGcuDB0zcdaPuCSzAa/2vII24JTkH+I6jcbXLb1ctjZMZZI6OjDaLPJQL1S0g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@alloc/quick-lru": "^5.2.0", + "@tailwindcss/node": "4.1.18", + "@tailwindcss/oxide": "4.1.18", + "postcss": "^8.4.41", + "tailwindcss": "4.1.18" + } + }, + "node_modules/@tanstack/query-core": { + "version": "5.90.20", + "resolved": "https://registry.npmjs.org/@tanstack/query-core/-/query-core-5.90.20.tgz", + "integrity": "sha512-OMD2HLpNouXEfZJWcKeVKUgQ5n+n3A2JFmBaScpNDUqSrQSjiveC7dKMe53uJUg1nDG16ttFPz2xfilz6i2uVg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + } + }, + "node_modules/@tanstack/react-query": { + "version": "5.90.21", + "resolved": "https://registry.npmjs.org/@tanstack/react-query/-/react-query-5.90.21.tgz", + "integrity": "sha512-0Lu6y5t+tvlTJMTO7oh5NSpJfpg/5D41LlThfepTixPYkJ0sE2Jj0m0f6yYqujBwIXlId87e234+MxG3D3g7kg==", + "license": "MIT", + "dependencies": { + "@tanstack/query-core": "5.90.20" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + }, + "peerDependencies": { + "react": "^18 || ^19" + } + }, + "node_modules/@tybys/wasm-util": { + "version": "0.10.1", + "resolved": "https://registry.npmjs.org/@tybys/wasm-util/-/wasm-util-0.10.1.tgz", + "integrity": "sha512-9tTaPJLSiejZKx+Bmog4uSubteqTvFrVrURwkmHixBo0G4seD0zUxp98E1DzUBJxLQ3NPwXrGKDiVjwx/DpPsg==", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "tslib": "^2.4.0" + } + }, + "node_modules/@types/d3-array": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/@types/d3-array/-/d3-array-3.2.2.tgz", + "integrity": "sha512-hOLWVbm7uRza0BYXpIIW5pxfrKe0W+D5lrFiAEYR+pb6w3N2SwSMaJbXdUfSEv+dT4MfHBLtn5js0LAWaO6otw==", + "license": "MIT" + }, + "node_modules/@types/d3-color": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/@types/d3-color/-/d3-color-3.1.3.tgz", + "integrity": "sha512-iO90scth9WAbmgv7ogoq57O9YpKmFBbmoEoCHDB2xMBY0+/KVrqAaCDyCE16dUspeOvIxFFRI+0sEtqDqy2b4A==", + "license": "MIT" + }, + "node_modules/@types/d3-drag": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/@types/d3-drag/-/d3-drag-3.0.7.tgz", + "integrity": "sha512-HE3jVKlzU9AaMazNufooRJ5ZpWmLIoc90A37WU2JMmeq28w1FQqCZswHZ3xR+SuxYftzHq6WU6KJHvqxKzTxxQ==", + "license": "MIT", + "dependencies": { + "@types/d3-selection": "*" + } + }, + "node_modules/@types/d3-ease": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@types/d3-ease/-/d3-ease-3.0.2.tgz", + "integrity": "sha512-NcV1JjO5oDzoK26oMzbILE6HW7uVXOHLQvHshBUW4UMdZGfiY6v5BeQwh9a9tCzv+CeefZQHJt5SRgK154RtiA==", + "license": "MIT" + }, + "node_modules/@types/d3-force": { + "version": "3.0.10", + "resolved": "https://registry.npmjs.org/@types/d3-force/-/d3-force-3.0.10.tgz", + "integrity": "sha512-ZYeSaCF3p73RdOKcjj+swRlZfnYpK1EbaDiYICEEp5Q6sUiqFaFQ9qgoshp5CzIyyb/yD09kD9o2zEltCexlgw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/d3-interpolate": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/d3-interpolate/-/d3-interpolate-3.0.4.tgz", + "integrity": "sha512-mgLPETlrpVV1YRJIglr4Ez47g7Yxjl1lj7YKsiMCb27VJH9W8NVM6Bb9d8kkpG/uAQS5AmbA48q2IAolKKo1MA==", + "license": "MIT", + "dependencies": { + "@types/d3-color": "*" + } + }, + "node_modules/@types/d3-path": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@types/d3-path/-/d3-path-3.1.1.tgz", + "integrity": "sha512-VMZBYyQvbGmWyWVea0EHs/BwLgxc+MKi1zLDCONksozI4YJMcTt8ZEuIR4Sb1MMTE8MMW49v0IwI5+b7RmfWlg==", + "license": "MIT" + }, + "node_modules/@types/d3-scale": { + "version": "4.0.9", + "resolved": "https://registry.npmjs.org/@types/d3-scale/-/d3-scale-4.0.9.tgz", + "integrity": "sha512-dLmtwB8zkAeO/juAMfnV+sItKjlsw2lKdZVVy6LRr0cBmegxSABiLEpGVmSJJ8O08i4+sGR6qQtb6WtuwJdvVw==", + "license": "MIT", + "dependencies": { + "@types/d3-time": "*" + } + }, + "node_modules/@types/d3-selection": { + "version": "3.0.11", + "resolved": "https://registry.npmjs.org/@types/d3-selection/-/d3-selection-3.0.11.tgz", + "integrity": "sha512-bhAXu23DJWsrI45xafYpkQ4NtcKMwWnAC/vKrd2l+nxMFuvOT3XMYTIj2opv8vq8AO5Yh7Qac/nSeP/3zjTK0w==", + "license": "MIT" + }, + "node_modules/@types/d3-shape": { + "version": "3.1.8", + "resolved": "https://registry.npmjs.org/@types/d3-shape/-/d3-shape-3.1.8.tgz", + "integrity": "sha512-lae0iWfcDeR7qt7rA88BNiqdvPS5pFVPpo5OfjElwNaT2yyekbM0C9vK+yqBqEmHr6lDkRnYNoTBYlAgJa7a4w==", + "license": "MIT", + "dependencies": { + "@types/d3-path": "*" + } + }, + "node_modules/@types/d3-time": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/d3-time/-/d3-time-3.0.4.tgz", + "integrity": "sha512-yuzZug1nkAAaBlBBikKZTgzCeA+k1uy4ZFwWANOfKw5z5LRhV0gNA7gNkKm7HoK+HRN0wX3EkxGk0fpbWhmB7g==", + "license": "MIT" + }, + "node_modules/@types/d3-timer": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@types/d3-timer/-/d3-timer-3.0.2.tgz", + "integrity": "sha512-Ps3T8E8dZDam6fUyNiMkekK3XUsaUEik+idO9/YjPtfj2qruF8tFBXS7XhtE4iIXBLxhmLjP3SXpLhVf21I9Lw==", + "license": "MIT" + }, + "node_modules/@types/d3-transition": { + "version": "3.0.9", + "resolved": "https://registry.npmjs.org/@types/d3-transition/-/d3-transition-3.0.9.tgz", + "integrity": "sha512-uZS5shfxzO3rGlu0cC3bjmMFKsXv+SmZZcgp0KD22ts4uGXp5EVYGzu/0YdwZeKmddhcAccYtREJKkPfXkZuCg==", + "license": "MIT", + "dependencies": { + "@types/d3-selection": "*" + } + }, + "node_modules/@types/d3-zoom": { + "version": "3.0.8", + "resolved": "https://registry.npmjs.org/@types/d3-zoom/-/d3-zoom-3.0.8.tgz", + "integrity": "sha512-iqMC4/YlFCSlO8+2Ii1GGGliCAY4XdeG748w5vQUbevlbDu0zSjH/+jojorQVBK/se0j6DUFNPBGSqD3YWYnDw==", + "license": "MIT", + "dependencies": { + "@types/d3-interpolate": "*", + "@types/d3-selection": "*" + } + }, + "node_modules/@types/dagre": { + "version": "0.7.53", + "resolved": "https://registry.npmjs.org/@types/dagre/-/dagre-0.7.53.tgz", + "integrity": "sha512-f4gkWqzPZvYmKhOsDnhq/R8mO4UMcKdxZo+i5SCkOU1wvGeHJeUXGIHeE9pnwGyPMDof1Vx5ZQo4nxpeg2TTVQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/estree": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", + "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/json-schema": { + "version": "7.0.15", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", + "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/json5": { + "version": "0.0.29", + "resolved": "https://registry.npmjs.org/@types/json5/-/json5-0.0.29.tgz", + "integrity": "sha512-dRLjCWHYg4oaA77cxO64oO+7JwCwnIzkZPdrrC71jQmQtlhM556pwKo5bUzqvZndkVbeFLIIi+9TC40JNF5hNQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/node": { + "version": "20.19.33", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.19.33.tgz", + "integrity": "sha512-Rs1bVAIdBs5gbTIKza/tgpMuG1k3U/UMJLWecIMxNdJFDMzcM5LOiLVRYh3PilWEYDIeUDv7bpiHPLPsbydGcw==", + "dev": true, + "license": "MIT", + "dependencies": { + "undici-types": "~6.21.0" + } + }, + "node_modules/@types/react": { + "version": "19.2.14", + "resolved": "https://registry.npmjs.org/@types/react/-/react-19.2.14.tgz", + "integrity": "sha512-ilcTH/UniCkMdtexkoCN0bI7pMcJDvmQFPvuPvmEaYA/NSfFTAgdUSLAoVjaRJm7+6PvcM+q1zYOwS4wTYMF9w==", + "devOptional": true, + "license": "MIT", + "peer": true, + "dependencies": { + "csstype": "^3.2.2" + } + }, + "node_modules/@types/react-dom": { + "version": "19.2.3", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-19.2.3.tgz", + "integrity": "sha512-jp2L/eY6fn+KgVVQAOqYItbF0VY/YApe5Mz2F0aykSO8gx31bYCZyvSeYxCHKvzHG5eZjc+zyaS5BrBWya2+kQ==", + "devOptional": true, + "license": "MIT", + "peer": true, + "peerDependencies": { + "@types/react": "^19.2.0" + } + }, + "node_modules/@typescript-eslint/eslint-plugin": { + "version": "8.55.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.55.0.tgz", + "integrity": "sha512-1y/MVSz0NglV1ijHC8OT49mPJ4qhPYjiK08YUQVbIOyu+5k862LKUHFkpKHWu//zmr7hDR2rhwUm6gnCGNmGBQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/regexpp": "^4.12.2", + "@typescript-eslint/scope-manager": "8.55.0", + "@typescript-eslint/type-utils": "8.55.0", + "@typescript-eslint/utils": "8.55.0", + "@typescript-eslint/visitor-keys": "8.55.0", + "ignore": "^7.0.5", + "natural-compare": "^1.4.0", + "ts-api-utils": "^2.4.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "@typescript-eslint/parser": "^8.55.0", + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/ignore": { + "version": "7.0.5", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-7.0.5.tgz", + "integrity": "sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/@typescript-eslint/parser": { + "version": "8.55.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.55.0.tgz", + "integrity": "sha512-4z2nCSBfVIMnbuu8uinj+f0o4qOeggYJLbjpPHka3KH1om7e+H9yLKTYgksTaHcGco+NClhhY2vyO3HsMH1RGw==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "@typescript-eslint/scope-manager": "8.55.0", + "@typescript-eslint/types": "8.55.0", + "@typescript-eslint/typescript-estree": "8.55.0", + "@typescript-eslint/visitor-keys": "8.55.0", + "debug": "^4.4.3" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/project-service": { + "version": "8.55.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/project-service/-/project-service-8.55.0.tgz", + "integrity": "sha512-zRcVVPFUYWa3kNnjaZGXSu3xkKV1zXy8M4nO/pElzQhFweb7PPtluDLQtKArEOGmjXoRjnUZ29NjOiF0eCDkcQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/tsconfig-utils": "^8.55.0", + "@typescript-eslint/types": "^8.55.0", + "debug": "^4.4.3" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/scope-manager": { + "version": "8.55.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.55.0.tgz", + "integrity": "sha512-fVu5Omrd3jeqeQLiB9f1YsuK/iHFOwb04bCtY4BSCLgjNbOD33ZdV6KyEqplHr+IlpgT0QTZ/iJ+wT7hvTx49Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.55.0", + "@typescript-eslint/visitor-keys": "8.55.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/tsconfig-utils": { + "version": "8.55.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/tsconfig-utils/-/tsconfig-utils-8.55.0.tgz", + "integrity": "sha512-1R9cXqY7RQd7WuqSN47PK9EDpgFUK3VqdmbYrvWJZYDd0cavROGn+74ktWBlmJ13NXUQKlZ/iAEQHI/V0kKe0Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/type-utils": { + "version": "8.55.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.55.0.tgz", + "integrity": "sha512-x1iH2unH4qAt6I37I2CGlsNs+B9WGxurP2uyZLRz6UJoZWDBx9cJL1xVN/FiOmHEONEg6RIufdvyT0TEYIgC5g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.55.0", + "@typescript-eslint/typescript-estree": "8.55.0", + "@typescript-eslint/utils": "8.55.0", + "debug": "^4.4.3", + "ts-api-utils": "^2.4.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/types": { + "version": "8.55.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.55.0.tgz", + "integrity": "sha512-ujT0Je8GI5BJWi+/mMoR0wxwVEQaxM+pi30xuMiJETlX80OPovb2p9E8ss87gnSVtYXtJoU9U1Cowcr6w2FE0w==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/typescript-estree": { + "version": "8.55.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.55.0.tgz", + "integrity": "sha512-EwrH67bSWdx/3aRQhCoxDaHM+CrZjotc2UCCpEDVqfCE+7OjKAGWNY2HsCSTEVvWH2clYQK8pdeLp42EVs+xQw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/project-service": "8.55.0", + "@typescript-eslint/tsconfig-utils": "8.55.0", + "@typescript-eslint/types": "8.55.0", + "@typescript-eslint/visitor-keys": "8.55.0", + "debug": "^4.4.3", + "minimatch": "^9.0.5", + "semver": "^7.7.3", + "tinyglobby": "^0.2.15", + "ts-api-utils": "^2.4.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/brace-expansion": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", + "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@typescript-eslint/utils": { + "version": "8.55.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.55.0.tgz", + "integrity": "sha512-BqZEsnPGdYpgyEIkDC1BadNY8oMwckftxBT+C8W0g1iKPdeqKZBtTfnvcq0nf60u7MkjFO8RBvpRGZBPw4L2ow==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/eslint-utils": "^4.9.1", + "@typescript-eslint/scope-manager": "8.55.0", + "@typescript-eslint/types": "8.55.0", + "@typescript-eslint/typescript-estree": "8.55.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/visitor-keys": { + "version": "8.55.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.55.0.tgz", + "integrity": "sha512-AxNRwEie8Nn4eFS1FzDMJWIISMGoXMb037sgCBJ3UR6o0fQTzr2tqN9WT+DkWJPhIdQCfV7T6D387566VtnCJA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.55.0", + "eslint-visitor-keys": "^4.2.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@unrs/resolver-binding-android-arm-eabi": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-android-arm-eabi/-/resolver-binding-android-arm-eabi-1.11.1.tgz", + "integrity": "sha512-ppLRUgHVaGRWUx0R0Ut06Mjo9gBaBkg3v/8AxusGLhsIotbBLuRk51rAzqLC8gq6NyyAojEXglNjzf6R948DNw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@unrs/resolver-binding-android-arm64": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-android-arm64/-/resolver-binding-android-arm64-1.11.1.tgz", + "integrity": "sha512-lCxkVtb4wp1v+EoN+HjIG9cIIzPkX5OtM03pQYkG+U5O/wL53LC4QbIeazgiKqluGeVEeBlZahHalCaBvU1a2g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@unrs/resolver-binding-darwin-arm64": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-darwin-arm64/-/resolver-binding-darwin-arm64-1.11.1.tgz", + "integrity": "sha512-gPVA1UjRu1Y/IsB/dQEsp2V1pm44Of6+LWvbLc9SDk1c2KhhDRDBUkQCYVWe6f26uJb3fOK8saWMgtX8IrMk3g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@unrs/resolver-binding-darwin-x64": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-darwin-x64/-/resolver-binding-darwin-x64-1.11.1.tgz", + "integrity": "sha512-cFzP7rWKd3lZaCsDze07QX1SC24lO8mPty9vdP+YVa3MGdVgPmFc59317b2ioXtgCMKGiCLxJ4HQs62oz6GfRQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@unrs/resolver-binding-freebsd-x64": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-freebsd-x64/-/resolver-binding-freebsd-x64-1.11.1.tgz", + "integrity": "sha512-fqtGgak3zX4DCB6PFpsH5+Kmt/8CIi4Bry4rb1ho6Av2QHTREM+47y282Uqiu3ZRF5IQioJQ5qWRV6jduA+iGw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@unrs/resolver-binding-linux-arm-gnueabihf": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-linux-arm-gnueabihf/-/resolver-binding-linux-arm-gnueabihf-1.11.1.tgz", + "integrity": "sha512-u92mvlcYtp9MRKmP+ZvMmtPN34+/3lMHlyMj7wXJDeXxuM0Vgzz0+PPJNsro1m3IZPYChIkn944wW8TYgGKFHw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@unrs/resolver-binding-linux-arm-musleabihf": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-linux-arm-musleabihf/-/resolver-binding-linux-arm-musleabihf-1.11.1.tgz", + "integrity": "sha512-cINaoY2z7LVCrfHkIcmvj7osTOtm6VVT16b5oQdS4beibX2SYBwgYLmqhBjA1t51CarSaBuX5YNsWLjsqfW5Cw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@unrs/resolver-binding-linux-arm64-gnu": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-linux-arm64-gnu/-/resolver-binding-linux-arm64-gnu-1.11.1.tgz", + "integrity": "sha512-34gw7PjDGB9JgePJEmhEqBhWvCiiWCuXsL9hYphDF7crW7UgI05gyBAi6MF58uGcMOiOqSJ2ybEeCvHcq0BCmQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@unrs/resolver-binding-linux-arm64-musl": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-linux-arm64-musl/-/resolver-binding-linux-arm64-musl-1.11.1.tgz", + "integrity": "sha512-RyMIx6Uf53hhOtJDIamSbTskA99sPHS96wxVE/bJtePJJtpdKGXO1wY90oRdXuYOGOTuqjT8ACccMc4K6QmT3w==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@unrs/resolver-binding-linux-ppc64-gnu": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-linux-ppc64-gnu/-/resolver-binding-linux-ppc64-gnu-1.11.1.tgz", + "integrity": "sha512-D8Vae74A4/a+mZH0FbOkFJL9DSK2R6TFPC9M+jCWYia/q2einCubX10pecpDiTmkJVUH+y8K3BZClycD8nCShA==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@unrs/resolver-binding-linux-riscv64-gnu": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-linux-riscv64-gnu/-/resolver-binding-linux-riscv64-gnu-1.11.1.tgz", + "integrity": "sha512-frxL4OrzOWVVsOc96+V3aqTIQl1O2TjgExV4EKgRY09AJ9leZpEg8Ak9phadbuX0BA4k8U5qtvMSQQGGmaJqcQ==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@unrs/resolver-binding-linux-riscv64-musl": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-linux-riscv64-musl/-/resolver-binding-linux-riscv64-musl-1.11.1.tgz", + "integrity": "sha512-mJ5vuDaIZ+l/acv01sHoXfpnyrNKOk/3aDoEdLO/Xtn9HuZlDD6jKxHlkN8ZhWyLJsRBxfv9GYM2utQ1SChKew==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@unrs/resolver-binding-linux-s390x-gnu": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-linux-s390x-gnu/-/resolver-binding-linux-s390x-gnu-1.11.1.tgz", + "integrity": "sha512-kELo8ebBVtb9sA7rMe1Cph4QHreByhaZ2QEADd9NzIQsYNQpt9UkM9iqr2lhGr5afh885d/cB5QeTXSbZHTYPg==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@unrs/resolver-binding-linux-x64-gnu": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-linux-x64-gnu/-/resolver-binding-linux-x64-gnu-1.11.1.tgz", + "integrity": "sha512-C3ZAHugKgovV5YvAMsxhq0gtXuwESUKc5MhEtjBpLoHPLYM+iuwSj3lflFwK3DPm68660rZ7G8BMcwSro7hD5w==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@unrs/resolver-binding-linux-x64-musl": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-linux-x64-musl/-/resolver-binding-linux-x64-musl-1.11.1.tgz", + "integrity": "sha512-rV0YSoyhK2nZ4vEswT/QwqzqQXw5I6CjoaYMOX0TqBlWhojUf8P94mvI7nuJTeaCkkds3QE4+zS8Ko+GdXuZtA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@unrs/resolver-binding-wasm32-wasi": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-wasm32-wasi/-/resolver-binding-wasm32-wasi-1.11.1.tgz", + "integrity": "sha512-5u4RkfxJm+Ng7IWgkzi3qrFOvLvQYnPBmjmZQ8+szTK/b31fQCnleNl1GgEt7nIsZRIf5PLhPwT0WM+q45x/UQ==", + "cpu": [ + "wasm32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "@napi-rs/wasm-runtime": "^0.2.11" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/@unrs/resolver-binding-win32-arm64-msvc": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-win32-arm64-msvc/-/resolver-binding-win32-arm64-msvc-1.11.1.tgz", + "integrity": "sha512-nRcz5Il4ln0kMhfL8S3hLkxI85BXs3o8EYoattsJNdsX4YUU89iOkVn7g0VHSRxFuVMdM4Q1jEpIId1Ihim/Uw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@unrs/resolver-binding-win32-ia32-msvc": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-win32-ia32-msvc/-/resolver-binding-win32-ia32-msvc-1.11.1.tgz", + "integrity": "sha512-DCEI6t5i1NmAZp6pFonpD5m7i6aFrpofcp4LA2i8IIq60Jyo28hamKBxNrZcyOwVOZkgsRp9O2sXWBWP8MnvIQ==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@unrs/resolver-binding-win32-x64-msvc": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-win32-x64-msvc/-/resolver-binding-win32-x64-msvc-1.11.1.tgz", + "integrity": "sha512-lrW200hZdbfRtztbygyaq/6jP6AKE8qQN2KvPcJ+x7wiD038YtnYtZ82IMNJ69GJibV7bwL3y9FgK+5w/pYt6g==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@xyflow/react": { + "version": "12.10.0", + "resolved": "https://registry.npmjs.org/@xyflow/react/-/react-12.10.0.tgz", + "integrity": "sha512-eOtz3whDMWrB4KWVatIBrKuxECHqip6PfA8fTpaS2RUGVpiEAe+nqDKsLqkViVWxDGreq0lWX71Xth/SPAzXiw==", + "license": "MIT", + "dependencies": { + "@xyflow/system": "0.0.74", + "classcat": "^5.0.3", + "zustand": "^4.4.0" + }, + "peerDependencies": { + "react": ">=17", + "react-dom": ">=17" + } + }, + "node_modules/@xyflow/system": { + "version": "0.0.74", + "resolved": "https://registry.npmjs.org/@xyflow/system/-/system-0.0.74.tgz", + "integrity": "sha512-7v7B/PkiVrkdZzSbL+inGAo6tkR/WQHHG0/jhSvLQToCsfa8YubOGmBYd1s08tpKpihdHDZFwzQZeR69QSBb4Q==", + "license": "MIT", + "dependencies": { + "@types/d3-drag": "^3.0.7", + "@types/d3-interpolate": "^3.0.4", + "@types/d3-selection": "^3.0.10", + "@types/d3-transition": "^3.0.8", + "@types/d3-zoom": "^3.0.8", + "d3-drag": "^3.0.0", + "d3-interpolate": "^3.0.1", + "d3-selection": "^3.0.0", + "d3-zoom": "^3.0.0" + } + }, + "node_modules/acorn": { + "version": "8.15.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz", + "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", + "dev": true, + "license": "MIT", + "peer": true, + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "dev": true, + "license": "Python-2.0" + }, + "node_modules/aria-hidden": { + "version": "1.2.6", + "resolved": "https://registry.npmjs.org/aria-hidden/-/aria-hidden-1.2.6.tgz", + "integrity": "sha512-ik3ZgC9dY/lYVVM++OISsaYDeg1tb0VtP5uL3ouh1koGOaUMDPpbFIei4JkFimWUFPn90sbMNMXQAIVOlnYKJA==", + "license": "MIT", + "dependencies": { + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/aria-query": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/aria-query/-/aria-query-5.3.2.tgz", + "integrity": "sha512-COROpnaoap1E2F000S62r6A60uHZnmlvomhfyT2DlTcrY1OrBKn2UhH7qn5wTC9zMvD0AY7csdPSNwKP+7WiQw==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/array-buffer-byte-length": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/array-buffer-byte-length/-/array-buffer-byte-length-1.0.2.tgz", + "integrity": "sha512-LHE+8BuR7RYGDKvnrmcuSq3tDcKv9OFEXQt/HpbZhY7V6h0zlUXutnAD82GiFx9rdieCMjkvtcsPqBwgUl1Iiw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "is-array-buffer": "^3.0.5" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array-includes": { + "version": "3.1.9", + "resolved": "https://registry.npmjs.org/array-includes/-/array-includes-3.1.9.tgz", + "integrity": "sha512-FmeCCAenzH0KH381SPT5FZmiA/TmpndpcaShhfgEN9eCVjnFBqq3l1xrI42y8+PPLI6hypzou4GXw00WHmPBLQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.4", + "define-properties": "^1.2.1", + "es-abstract": "^1.24.0", + "es-object-atoms": "^1.1.1", + "get-intrinsic": "^1.3.0", + "is-string": "^1.1.1", + "math-intrinsics": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array.prototype.findlast": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/array.prototype.findlast/-/array.prototype.findlast-1.2.5.tgz", + "integrity": "sha512-CVvd6FHg1Z3POpBLxO6E6zr+rSKEQ9L6rZHAaY7lLfhKsWYUBBOuMs0e9o24oopj6H+geRCX0YJ+TJLBK2eHyQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.0.0", + "es-shim-unscopables": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array.prototype.findlastindex": { + "version": "1.2.6", + "resolved": "https://registry.npmjs.org/array.prototype.findlastindex/-/array.prototype.findlastindex-1.2.6.tgz", + "integrity": "sha512-F/TKATkzseUExPlfvmwQKGITM3DGTK+vkAsCZoDc5daVygbJBnjEUCbgkAvVFsgfXfX4YIqZ/27G3k3tdXrTxQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.4", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.9", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "es-shim-unscopables": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array.prototype.flat": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/array.prototype.flat/-/array.prototype.flat-1.3.3.tgz", + "integrity": "sha512-rwG/ja1neyLqCuGZ5YYrznA62D4mZXg0i1cIskIUKSiqF3Cje9/wXAls9B9s1Wa2fomMsIv8czB8jZcPmxCXFg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.5", + "es-shim-unscopables": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array.prototype.flatmap": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/array.prototype.flatmap/-/array.prototype.flatmap-1.3.3.tgz", + "integrity": "sha512-Y7Wt51eKJSyi80hFrJCePGGNo5ktJCslFuboqJsbf57CCPcm5zztluPlc4/aD8sWsKvlwatezpV4U1efk8kpjg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.5", + "es-shim-unscopables": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array.prototype.tosorted": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/array.prototype.tosorted/-/array.prototype.tosorted-1.1.4.tgz", + "integrity": "sha512-p6Fx8B7b7ZhL/gmUsAy0D15WhvDccw3mnGNbZpi3pmeJdxtWsj2jEaI4Y6oo3XiHfzuSgPwKc04MYt6KgvC/wA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.3", + "es-errors": "^1.3.0", + "es-shim-unscopables": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/arraybuffer.prototype.slice": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/arraybuffer.prototype.slice/-/arraybuffer.prototype.slice-1.0.4.tgz", + "integrity": "sha512-BNoCY6SXXPQ7gF2opIP4GBE+Xw7U+pHMYKuzjgCN3GwiaIR09UUeKfheyIry77QtrCBlC0KK0q5/TER/tYh3PQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "array-buffer-byte-length": "^1.0.1", + "call-bind": "^1.0.8", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.5", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.6", + "is-array-buffer": "^3.0.4" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/ast-types-flow": { + "version": "0.0.8", + "resolved": "https://registry.npmjs.org/ast-types-flow/-/ast-types-flow-0.0.8.tgz", + "integrity": "sha512-OH/2E5Fg20h2aPrbe+QL8JZQFko0YZaF+j4mnQ7BGhfavO7OpSLa8a0y9sBwomHdSbkhTS8TQNayBfnW5DwbvQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/async-function": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/async-function/-/async-function-1.0.0.tgz", + "integrity": "sha512-hsU18Ae8CDTR6Kgu9DYf0EbCr/a5iGL0rytQDobUcdpYOKokk8LEjVphnXkDkgpi0wYVsqrXuP0bZxJaTqdgoA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/available-typed-arrays": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/available-typed-arrays/-/available-typed-arrays-1.0.7.tgz", + "integrity": "sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "possible-typed-array-names": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/axe-core": { + "version": "4.11.1", + "resolved": "https://registry.npmjs.org/axe-core/-/axe-core-4.11.1.tgz", + "integrity": "sha512-BASOg+YwO2C+346x3LZOeoovTIoTrRqEsqMa6fmfAV0P+U9mFr9NsyOEpiYvFjbc64NMrSswhV50WdXzdb/Z5A==", + "dev": true, + "license": "MPL-2.0", + "engines": { + "node": ">=4" + } + }, + "node_modules/axobject-query": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/axobject-query/-/axobject-query-4.1.0.tgz", + "integrity": "sha512-qIj0G9wZbMGNLjLmg1PT6v2mE9AH2zlnADJD/2tC6E00hgmhUOfEB6greHPAfLRSufHqROIUTkw6E+M3lH0PTQ==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "dev": true, + "license": "MIT", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/call-bind": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.8.tgz", + "integrity": "sha512-oKlSFMcMwpUg2ednkhQ454wfWiU/ul3CkJe/PEHcTKuiX6RpbehUiFMXu13HalGZxfUwCQzZG747YXBn1im9ww==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.0", + "es-define-property": "^1.0.0", + "get-intrinsic": "^1.2.4", + "set-function-length": "^1.2.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/call-bind-apply-helpers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", + "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/call-bound": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz", + "integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "get-intrinsic": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001769", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001769.tgz", + "integrity": "sha512-BCfFL1sHijQlBGWBMuJyhZUhzo7wer5sVj9hqekB/7xn0Ypy+pER/edCYQm4exbXj4WiySGp40P8UuTh6w1srg==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "CC-BY-4.0" + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/class-variance-authority": { + "version": "0.7.1", + "resolved": "https://registry.npmjs.org/class-variance-authority/-/class-variance-authority-0.7.1.tgz", + "integrity": "sha512-Ka+9Trutv7G8M6WT6SeiRWz792K5qEqIGEGzXKhAE6xOWAY6pPH8U+9IY3oCMv6kqTmLsv7Xh/2w2RigkePMsg==", + "license": "Apache-2.0", + "dependencies": { + "clsx": "^2.1.1" + }, + "funding": { + "url": "https://polar.sh/cva" + } + }, + "node_modules/classcat": { + "version": "5.0.5", + "resolved": "https://registry.npmjs.org/classcat/-/classcat-5.0.5.tgz", + "integrity": "sha512-JhZUT7JFcQy/EzW605k/ktHtncoo9vnyW/2GspNYwFlN1C/WmjuV/xtS04e9SOkL2sTdw0VAZ2UGCcQ9lR6p6w==", + "license": "MIT" + }, + "node_modules/client-only": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/client-only/-/client-only-0.0.1.tgz", + "integrity": "sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA==", + "license": "MIT" + }, + "node_modules/clsx": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.1.tgz", + "integrity": "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/cmdk": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/cmdk/-/cmdk-1.1.1.tgz", + "integrity": "sha512-Vsv7kFaXm+ptHDMZ7izaRsP70GgrW9NBNGswt9OZaVBLlE0SNpDq8eu/VGXyF9r7M0azK3Wy7OlYXsuyYLFzHg==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "^1.1.1", + "@radix-ui/react-dialog": "^1.1.6", + "@radix-ui/react-id": "^1.1.0", + "@radix-ui/react-primitive": "^2.0.2" + }, + "peerDependencies": { + "react": "^18 || ^19 || ^19.0.0-rc", + "react-dom": "^18 || ^19 || ^19.0.0-rc" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true, + "license": "MIT" + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "dev": true, + "license": "MIT" + }, + "node_modules/cross-spawn": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/csstype": { + "version": "3.2.3", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.2.3.tgz", + "integrity": "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==", + "license": "MIT" + }, + "node_modules/d3-array": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/d3-array/-/d3-array-3.2.4.tgz", + "integrity": "sha512-tdQAmyA18i4J7wprpYq8ClcxZy3SC31QMeByyCFyRt7BVHdREQZ5lpzoe5mFEYZUWe+oq8HBvk9JjpibyEV4Jg==", + "license": "ISC", + "dependencies": { + "internmap": "1 - 2" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-color": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/d3-color/-/d3-color-3.1.0.tgz", + "integrity": "sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-dispatch": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-dispatch/-/d3-dispatch-3.0.1.tgz", + "integrity": "sha512-rzUyPU/S7rwUflMyLc1ETDeBj0NRuHKKAcvukozwhshr6g6c5d8zh4c2gQjY2bZ0dXeGLWc1PF174P2tVvKhfg==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-drag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-drag/-/d3-drag-3.0.0.tgz", + "integrity": "sha512-pWbUJLdETVA8lQNJecMxoXfH6x+mO2UQo8rSmZ+QqxcbyA3hfeprFgIT//HW2nlHChWeIIMwS2Fq+gEARkhTkg==", + "license": "ISC", + "dependencies": { + "d3-dispatch": "1 - 3", + "d3-selection": "3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-ease": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-ease/-/d3-ease-3.0.1.tgz", + "integrity": "sha512-wR/XK3D3XcLIZwpbvQwQ5fK+8Ykds1ip7A2Txe0yxncXSdq1L9skcG7blcedkOX+ZcgxGAmLX1FrRGbADwzi0w==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-force": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-force/-/d3-force-3.0.0.tgz", + "integrity": "sha512-zxV/SsA+U4yte8051P4ECydjD/S+qeYtnaIyAs9tgHCqfguma/aAQDjo85A9Z6EKhBirHRJHXIgJUlffT4wdLg==", + "license": "ISC", + "dependencies": { + "d3-dispatch": "1 - 3", + "d3-quadtree": "1 - 3", + "d3-timer": "1 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-format": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/d3-format/-/d3-format-3.1.2.tgz", + "integrity": "sha512-AJDdYOdnyRDV5b6ArilzCPPwc1ejkHcoyFarqlPqT7zRYjhavcT3uSrqcMvsgh2CgoPbK3RCwyHaVyxYcP2Arg==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-interpolate": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-interpolate/-/d3-interpolate-3.0.1.tgz", + "integrity": "sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g==", + "license": "ISC", + "dependencies": { + "d3-color": "1 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-path": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/d3-path/-/d3-path-3.1.0.tgz", + "integrity": "sha512-p3KP5HCf/bvjBSSKuXid6Zqijx7wIfNW+J/maPs+iwR35at5JCbLUT0LzF1cnjbCHWhqzQTIN2Jpe8pRebIEFQ==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-quadtree": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-quadtree/-/d3-quadtree-3.0.1.tgz", + "integrity": "sha512-04xDrxQTDTCFwP5H6hRhsRcb9xxv2RzkcsygFzmkSIOJy3PeRJP7sNk3VRIbKXcog561P9oU0/rVH6vDROAgUw==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-scale": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/d3-scale/-/d3-scale-4.0.2.tgz", + "integrity": "sha512-GZW464g1SH7ag3Y7hXjf8RoUuAFIqklOAq3MRl4OaWabTFJY9PN/E1YklhXLh+OQ3fM9yS2nOkCoS+WLZ6kvxQ==", + "license": "ISC", + "dependencies": { + "d3-array": "2.10.0 - 3", + "d3-format": "1 - 3", + "d3-interpolate": "1.2.0 - 3", + "d3-time": "2.1.1 - 3", + "d3-time-format": "2 - 4" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-selection": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-selection/-/d3-selection-3.0.0.tgz", + "integrity": "sha512-fmTRWbNMmsmWq6xJV8D19U/gw/bwrHfNXxrIN+HfZgnzqTHp9jOmKMhsTUjXOJnZOdZY9Q28y4yebKzqDKlxlQ==", + "license": "ISC", + "peer": true, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-shape": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/d3-shape/-/d3-shape-3.2.0.tgz", + "integrity": "sha512-SaLBuwGm3MOViRq2ABk3eLoxwZELpH6zhl3FbAoJ7Vm1gofKx6El1Ib5z23NUEhF9AsGl7y+dzLe5Cw2AArGTA==", + "license": "ISC", + "dependencies": { + "d3-path": "^3.1.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-time": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/d3-time/-/d3-time-3.1.0.tgz", + "integrity": "sha512-VqKjzBLejbSMT4IgbmVgDjpkYrNWUYJnbCGo874u7MMKIWsILRX+OpX/gTk8MqjpT1A/c6HY2dCA77ZN0lkQ2Q==", + "license": "ISC", + "dependencies": { + "d3-array": "2 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-time-format": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/d3-time-format/-/d3-time-format-4.1.0.tgz", + "integrity": "sha512-dJxPBlzC7NugB2PDLwo9Q8JiTR3M3e4/XANkreKSUxF8vvXKqm1Yfq4Q5dl8budlunRVlUUaDUgFt7eA8D6NLg==", + "license": "ISC", + "dependencies": { + "d3-time": "1 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-timer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-timer/-/d3-timer-3.0.1.tgz", + "integrity": "sha512-ndfJ/JxxMd3nw31uyKoY2naivF+r29V+Lc0svZxe1JvvIRmi8hUsrMvdOwgS1o6uBHmiz91geQ0ylPP0aj1VUA==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-transition": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-transition/-/d3-transition-3.0.1.tgz", + "integrity": "sha512-ApKvfjsSR6tg06xrL434C0WydLr7JewBB3V+/39RMHsaXTOG0zmt/OAXeng5M5LBm0ojmxJrpomQVZ1aPvBL4w==", + "license": "ISC", + "dependencies": { + "d3-color": "1 - 3", + "d3-dispatch": "1 - 3", + "d3-ease": "1 - 3", + "d3-interpolate": "1 - 3", + "d3-timer": "1 - 3" + }, + "engines": { + "node": ">=12" + }, + "peerDependencies": { + "d3-selection": "2 - 3" + } + }, + "node_modules/d3-zoom": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-zoom/-/d3-zoom-3.0.0.tgz", + "integrity": "sha512-b8AmV3kfQaqWAuacbPuNbL6vahnOJflOhexLzMMNLga62+/nh0JzvJ0aO/5a5MVgUFGS7Hu1P9P03o3fJkDCyw==", + "license": "ISC", + "dependencies": { + "d3-dispatch": "1 - 3", + "d3-drag": "2 - 3", + "d3-interpolate": "1 - 3", + "d3-selection": "2 - 3", + "d3-transition": "2 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/dagre": { + "version": "0.8.5", + "resolved": "https://registry.npmjs.org/dagre/-/dagre-0.8.5.tgz", + "integrity": "sha512-/aTqmnRta7x7MCCpExk7HQL2O4owCT2h8NT//9I1OQ9vt29Pa0BzSAkR5lwFUcQ7491yVi/3CXU9jQ5o0Mn2Sw==", + "license": "MIT", + "dependencies": { + "graphlib": "^2.1.8", + "lodash": "^4.17.15" + } + }, + "node_modules/damerau-levenshtein": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/damerau-levenshtein/-/damerau-levenshtein-1.0.8.tgz", + "integrity": "sha512-sdQSFB7+llfUcQHUQO3+B8ERRj0Oa4w9POWMI/puGtuf7gFywGmkaLCElnudfTiKZV+NvHqL0ifzdrI8Ro7ESA==", + "dev": true, + "license": "BSD-2-Clause" + }, + "node_modules/data-view-buffer": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/data-view-buffer/-/data-view-buffer-1.0.2.tgz", + "integrity": "sha512-EmKO5V3OLXh1rtK2wgXRansaK1/mtVdTUEiEI0W8RkvgT05kfxaH29PliLnpLP73yYO6142Q72QNa8Wx/A5CqQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "es-errors": "^1.3.0", + "is-data-view": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/data-view-byte-length": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/data-view-byte-length/-/data-view-byte-length-1.0.2.tgz", + "integrity": "sha512-tuhGbE6CfTM9+5ANGf+oQb72Ky/0+s3xKUpHvShfiz2RxMFgFPjsXuRLBVMtvMs15awe45SRb83D6wH4ew6wlQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "es-errors": "^1.3.0", + "is-data-view": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/inspect-js" + } + }, + "node_modules/data-view-byte-offset": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/data-view-byte-offset/-/data-view-byte-offset-1.0.1.tgz", + "integrity": "sha512-BS8PfmtDGnrgYdOonGZQdLZslWIeCGFP9tpan0hi1Co2Zr2NKADsvGYA8XxuG/4UWgJ6Cjtv+YJnB6MM69QGlQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "is-data-view": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/date-fns": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/date-fns/-/date-fns-3.6.0.tgz", + "integrity": "sha512-fRHTG8g/Gif+kSh50gaGEdToemgfj74aRX3swtiouboip5JDLAyDE9F11nHMIcvOaXeOC6D7SpNhi7uFyB7Uww==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/kossnocorp" + } + }, + "node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/decimal.js-light": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/decimal.js-light/-/decimal.js-light-2.5.1.tgz", + "integrity": "sha512-qIMFpTMZmny+MMIitAB6D7iVPEorVw6YQRWkvarTkT4tBeSLLiHzcwj6q0MmYSFCiVpiqPJTJEYIrpcPzVEIvg==", + "license": "MIT" + }, + "node_modules/deep-is": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", + "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/define-data-property": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", + "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "gopd": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/define-properties": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.2.1.tgz", + "integrity": "sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==", + "dev": true, + "license": "MIT", + "dependencies": { + "define-data-property": "^1.0.1", + "has-property-descriptors": "^1.0.0", + "object-keys": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/detect-libc": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.1.2.tgz", + "integrity": "sha512-Btj2BOOO83o3WyH59e8MgXsxEQVcarkUOpEYrubB0urwnN10yQ364rsiByU11nZlqWYZm05i/of7io4mzihBtQ==", + "devOptional": true, + "license": "Apache-2.0", + "engines": { + "node": ">=8" + } + }, + "node_modules/detect-node-es": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/detect-node-es/-/detect-node-es-1.1.0.tgz", + "integrity": "sha512-ypdmJU/TbBby2Dxibuv7ZLW3Bs1QEmM7nHjEANfohJLvE0XVujisn1qPJcZxg+qDucsr+bP6fLD1rPS3AhJ7EQ==", + "license": "MIT" + }, + "node_modules/doctrine": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-2.1.0.tgz", + "integrity": "sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "esutils": "^2.0.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/dom-helpers": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/dom-helpers/-/dom-helpers-5.2.1.tgz", + "integrity": "sha512-nRCa7CK3VTrM2NmGkIy4cbK7IZlgBE/PYMn55rrXefr5xXDP0LdtfPnblFDoVdcAfslJ7or6iqAUnx0CCGIWQA==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.8.7", + "csstype": "^3.0.2" + } + }, + "node_modules/dunder-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", + "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.1", + "es-errors": "^1.3.0", + "gopd": "^1.2.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/emoji-regex": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", + "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", + "dev": true, + "license": "MIT" + }, + "node_modules/enhanced-resolve": { + "version": "5.19.0", + "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.19.0.tgz", + "integrity": "sha512-phv3E1Xl4tQOShqSte26C7Fl84EwUdZsyOuSSk9qtAGyyQs2s3jJzComh+Abf4g187lUUAvH+H26omrqia2aGg==", + "dev": true, + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.4", + "tapable": "^2.3.0" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/es-abstract": { + "version": "1.24.1", + "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.24.1.tgz", + "integrity": "sha512-zHXBLhP+QehSSbsS9Pt23Gg964240DPd6QCf8WpkqEXxQ7fhdZzYsocOr5u7apWonsS5EjZDmTF+/slGMyasvw==", + "dev": true, + "license": "MIT", + "dependencies": { + "array-buffer-byte-length": "^1.0.2", + "arraybuffer.prototype.slice": "^1.0.4", + "available-typed-arrays": "^1.0.7", + "call-bind": "^1.0.8", + "call-bound": "^1.0.4", + "data-view-buffer": "^1.0.2", + "data-view-byte-length": "^1.0.2", + "data-view-byte-offset": "^1.0.1", + "es-define-property": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "es-set-tostringtag": "^2.1.0", + "es-to-primitive": "^1.3.0", + "function.prototype.name": "^1.1.8", + "get-intrinsic": "^1.3.0", + "get-proto": "^1.0.1", + "get-symbol-description": "^1.1.0", + "globalthis": "^1.0.4", + "gopd": "^1.2.0", + "has-property-descriptors": "^1.0.2", + "has-proto": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "internal-slot": "^1.1.0", + "is-array-buffer": "^3.0.5", + "is-callable": "^1.2.7", + "is-data-view": "^1.0.2", + "is-negative-zero": "^2.0.3", + "is-regex": "^1.2.1", + "is-set": "^2.0.3", + "is-shared-array-buffer": "^1.0.4", + "is-string": "^1.1.1", + "is-typed-array": "^1.1.15", + "is-weakref": "^1.1.1", + "math-intrinsics": "^1.1.0", + "object-inspect": "^1.13.4", + "object-keys": "^1.1.1", + "object.assign": "^4.1.7", + "own-keys": "^1.0.1", + "regexp.prototype.flags": "^1.5.4", + "safe-array-concat": "^1.1.3", + "safe-push-apply": "^1.0.0", + "safe-regex-test": "^1.1.0", + "set-proto": "^1.0.0", + "stop-iteration-iterator": "^1.1.0", + "string.prototype.trim": "^1.2.10", + "string.prototype.trimend": "^1.0.9", + "string.prototype.trimstart": "^1.0.8", + "typed-array-buffer": "^1.0.3", + "typed-array-byte-length": "^1.0.3", + "typed-array-byte-offset": "^1.0.4", + "typed-array-length": "^1.0.7", + "unbox-primitive": "^1.1.0", + "which-typed-array": "^1.1.19" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/es-define-property": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", + "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-iterator-helpers": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/es-iterator-helpers/-/es-iterator-helpers-1.2.2.tgz", + "integrity": "sha512-BrUQ0cPTB/IwXj23HtwHjS9n7O4h9FX94b4xc5zlTHxeLgTAdzYUDyy6KdExAl9lbN5rtfe44xpjpmj9grxs5w==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.4", + "define-properties": "^1.2.1", + "es-abstract": "^1.24.1", + "es-errors": "^1.3.0", + "es-set-tostringtag": "^2.1.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.3.0", + "globalthis": "^1.0.4", + "gopd": "^1.2.0", + "has-property-descriptors": "^1.0.2", + "has-proto": "^1.2.0", + "has-symbols": "^1.1.0", + "internal-slot": "^1.1.0", + "iterator.prototype": "^1.1.5", + "safe-array-concat": "^1.1.3" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-object-atoms": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", + "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-set-tostringtag": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz", + "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.6", + "has-tostringtag": "^1.0.2", + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-shim-unscopables": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/es-shim-unscopables/-/es-shim-unscopables-1.1.0.tgz", + "integrity": "sha512-d9T8ucsEhh8Bi1woXCf+TIKDIROLG5WCkxg8geBCbvk22kzwC5G2OnXVMO6FUsvQlgUUXQ2itephWDLqDzbeCw==", + "dev": true, + "license": "MIT", + "dependencies": { + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-to-primitive": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-to-primitive/-/es-to-primitive-1.3.0.tgz", + "integrity": "sha512-w+5mJ3GuFL+NjVtJlvydShqE1eN3h3PbI7/5LAsYJP/2qtuMXjfL2LpHSRqo4b4eSF5K/DH1JXKUAHSB2UW50g==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-callable": "^1.2.7", + "is-date-object": "^1.0.5", + "is-symbol": "^1.0.4" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint": { + "version": "9.39.2", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.39.2.tgz", + "integrity": "sha512-LEyamqS7W5HB3ujJyvi0HQK/dtVINZvd5mAAp9eT5S/ujByGjiZLCzPcHVzuXbpJDJF/cxwHlfceVUDZ2lnSTw==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "@eslint-community/eslint-utils": "^4.8.0", + "@eslint-community/regexpp": "^4.12.1", + "@eslint/config-array": "^0.21.1", + "@eslint/config-helpers": "^0.4.2", + "@eslint/core": "^0.17.0", + "@eslint/eslintrc": "^3.3.1", + "@eslint/js": "9.39.2", + "@eslint/plugin-kit": "^0.4.1", + "@humanfs/node": "^0.16.6", + "@humanwhocodes/module-importer": "^1.0.1", + "@humanwhocodes/retry": "^0.4.2", + "@types/estree": "^1.0.6", + "ajv": "^6.12.4", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.6", + "debug": "^4.3.2", + "escape-string-regexp": "^4.0.0", + "eslint-scope": "^8.4.0", + "eslint-visitor-keys": "^4.2.1", + "espree": "^10.4.0", + "esquery": "^1.5.0", + "esutils": "^2.0.2", + "fast-deep-equal": "^3.1.3", + "file-entry-cache": "^8.0.0", + "find-up": "^5.0.0", + "glob-parent": "^6.0.2", + "ignore": "^5.2.0", + "imurmurhash": "^0.1.4", + "is-glob": "^4.0.0", + "json-stable-stringify-without-jsonify": "^1.0.1", + "lodash.merge": "^4.6.2", + "minimatch": "^3.1.2", + "natural-compare": "^1.4.0", + "optionator": "^0.9.3" + }, + "bin": { + "eslint": "bin/eslint.js" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://eslint.org/donate" + }, + "peerDependencies": { + "jiti": "*" + }, + "peerDependenciesMeta": { + "jiti": { + "optional": true + } + } + }, + "node_modules/eslint-config-next": { + "version": "15.5.12", + "resolved": "https://registry.npmjs.org/eslint-config-next/-/eslint-config-next-15.5.12.tgz", + "integrity": "sha512-ktW3XLfd+ztEltY5scJNjxjHwtKWk6vU2iwzZqSN09UsbBmMeE/cVlJ1yESg6Yx5LW7p/Z8WzUAgYXGLEmGIpg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@next/eslint-plugin-next": "15.5.12", + "@rushstack/eslint-patch": "^1.10.3", + "@typescript-eslint/eslint-plugin": "^5.4.2 || ^6.0.0 || ^7.0.0 || ^8.0.0", + "@typescript-eslint/parser": "^5.4.2 || ^6.0.0 || ^7.0.0 || ^8.0.0", + "eslint-import-resolver-node": "^0.3.6", + "eslint-import-resolver-typescript": "^3.5.2", + "eslint-plugin-import": "^2.31.0", + "eslint-plugin-jsx-a11y": "^6.10.0", + "eslint-plugin-react": "^7.37.0", + "eslint-plugin-react-hooks": "^5.0.0" + }, + "peerDependencies": { + "eslint": "^7.23.0 || ^8.0.0 || ^9.0.0", + "typescript": ">=3.3.1" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/eslint-import-resolver-node": { + "version": "0.3.9", + "resolved": "https://registry.npmjs.org/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.9.tgz", + "integrity": "sha512-WFj2isz22JahUv+B788TlO3N6zL3nNJGU8CcZbPZvVEkBPaJdCV4vy5wyghty5ROFbCRnm132v8BScu5/1BQ8g==", + "dev": true, + "license": "MIT", + "dependencies": { + "debug": "^3.2.7", + "is-core-module": "^2.13.0", + "resolve": "^1.22.4" + } + }, + "node_modules/eslint-import-resolver-node/node_modules/debug": { + "version": "3.2.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", + "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.1" + } + }, + "node_modules/eslint-import-resolver-typescript": { + "version": "3.10.1", + "resolved": "https://registry.npmjs.org/eslint-import-resolver-typescript/-/eslint-import-resolver-typescript-3.10.1.tgz", + "integrity": "sha512-A1rHYb06zjMGAxdLSkN2fXPBwuSaQ0iO5M/hdyS0Ajj1VBaRp0sPD3dn1FhME3c/JluGFbwSxyCfqdSbtQLAHQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "@nolyfill/is-core-module": "1.0.39", + "debug": "^4.4.0", + "get-tsconfig": "^4.10.0", + "is-bun-module": "^2.0.0", + "stable-hash": "^0.0.5", + "tinyglobby": "^0.2.13", + "unrs-resolver": "^1.6.2" + }, + "engines": { + "node": "^14.18.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint-import-resolver-typescript" + }, + "peerDependencies": { + "eslint": "*", + "eslint-plugin-import": "*", + "eslint-plugin-import-x": "*" + }, + "peerDependenciesMeta": { + "eslint-plugin-import": { + "optional": true + }, + "eslint-plugin-import-x": { + "optional": true + } + } + }, + "node_modules/eslint-module-utils": { + "version": "2.12.1", + "resolved": "https://registry.npmjs.org/eslint-module-utils/-/eslint-module-utils-2.12.1.tgz", + "integrity": "sha512-L8jSWTze7K2mTg0vos/RuLRS5soomksDPoJLXIslC7c8Wmut3bx7CPpJijDcBZtxQ5lrbUdM+s0OlNbz0DCDNw==", + "dev": true, + "license": "MIT", + "dependencies": { + "debug": "^3.2.7" + }, + "engines": { + "node": ">=4" + }, + "peerDependenciesMeta": { + "eslint": { + "optional": true + } + } + }, + "node_modules/eslint-module-utils/node_modules/debug": { + "version": "3.2.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", + "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.1" + } + }, + "node_modules/eslint-plugin-import": { + "version": "2.32.0", + "resolved": "https://registry.npmjs.org/eslint-plugin-import/-/eslint-plugin-import-2.32.0.tgz", + "integrity": "sha512-whOE1HFo/qJDyX4SnXzP4N6zOWn79WhnCUY/iDR0mPfQZO8wcYE4JClzI2oZrhBnnMUCBCHZhO6VQyoBU95mZA==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "@rtsao/scc": "^1.1.0", + "array-includes": "^3.1.9", + "array.prototype.findlastindex": "^1.2.6", + "array.prototype.flat": "^1.3.3", + "array.prototype.flatmap": "^1.3.3", + "debug": "^3.2.7", + "doctrine": "^2.1.0", + "eslint-import-resolver-node": "^0.3.9", + "eslint-module-utils": "^2.12.1", + "hasown": "^2.0.2", + "is-core-module": "^2.16.1", + "is-glob": "^4.0.3", + "minimatch": "^3.1.2", + "object.fromentries": "^2.0.8", + "object.groupby": "^1.0.3", + "object.values": "^1.2.1", + "semver": "^6.3.1", + "string.prototype.trimend": "^1.0.9", + "tsconfig-paths": "^3.15.0" + }, + "engines": { + "node": ">=4" + }, + "peerDependencies": { + "eslint": "^2 || ^3 || ^4 || ^5 || ^6 || ^7.2.0 || ^8 || ^9" + } + }, + "node_modules/eslint-plugin-import/node_modules/debug": { + "version": "3.2.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", + "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.1" + } + }, + "node_modules/eslint-plugin-import/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/eslint-plugin-jsx-a11y": { + "version": "6.10.2", + "resolved": "https://registry.npmjs.org/eslint-plugin-jsx-a11y/-/eslint-plugin-jsx-a11y-6.10.2.tgz", + "integrity": "sha512-scB3nz4WmG75pV8+3eRUQOHZlNSUhFNq37xnpgRkCCELU3XMvXAxLk1eqWWyE22Ki4Q01Fnsw9BA3cJHDPgn2Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "aria-query": "^5.3.2", + "array-includes": "^3.1.8", + "array.prototype.flatmap": "^1.3.2", + "ast-types-flow": "^0.0.8", + "axe-core": "^4.10.0", + "axobject-query": "^4.1.0", + "damerau-levenshtein": "^1.0.8", + "emoji-regex": "^9.2.2", + "hasown": "^2.0.2", + "jsx-ast-utils": "^3.3.5", + "language-tags": "^1.0.9", + "minimatch": "^3.1.2", + "object.fromentries": "^2.0.8", + "safe-regex-test": "^1.0.3", + "string.prototype.includes": "^2.0.1" + }, + "engines": { + "node": ">=4.0" + }, + "peerDependencies": { + "eslint": "^3 || ^4 || ^5 || ^6 || ^7 || ^8 || ^9" + } + }, + "node_modules/eslint-plugin-react": { + "version": "7.37.5", + "resolved": "https://registry.npmjs.org/eslint-plugin-react/-/eslint-plugin-react-7.37.5.tgz", + "integrity": "sha512-Qteup0SqU15kdocexFNAJMvCJEfa2xUKNV4CC1xsVMrIIqEy3SQ/rqyxCWNzfrd3/ldy6HMlD2e0JDVpDg2qIA==", + "dev": true, + "license": "MIT", + "dependencies": { + "array-includes": "^3.1.8", + "array.prototype.findlast": "^1.2.5", + "array.prototype.flatmap": "^1.3.3", + "array.prototype.tosorted": "^1.1.4", + "doctrine": "^2.1.0", + "es-iterator-helpers": "^1.2.1", + "estraverse": "^5.3.0", + "hasown": "^2.0.2", + "jsx-ast-utils": "^2.4.1 || ^3.0.0", + "minimatch": "^3.1.2", + "object.entries": "^1.1.9", + "object.fromentries": "^2.0.8", + "object.values": "^1.2.1", + "prop-types": "^15.8.1", + "resolve": "^2.0.0-next.5", + "semver": "^6.3.1", + "string.prototype.matchall": "^4.0.12", + "string.prototype.repeat": "^1.0.0" + }, + "engines": { + "node": ">=4" + }, + "peerDependencies": { + "eslint": "^3 || ^4 || ^5 || ^6 || ^7 || ^8 || ^9.7" + } + }, + "node_modules/eslint-plugin-react-hooks": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-5.2.0.tgz", + "integrity": "sha512-+f15FfK64YQwZdJNELETdn5ibXEUQmW1DZL6KXhNnc2heoy/sg9VJJeT7n8TlMWouzWqSWavFkIhHyIbIAEapg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "eslint": "^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0 || ^9.0.0" + } + }, + "node_modules/eslint-plugin-react/node_modules/resolve": { + "version": "2.0.0-next.5", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-2.0.0-next.5.tgz", + "integrity": "sha512-U7WjGVG9sH8tvjW5SmGbQuui75FiyjAX72HX15DwBBwF9dNiQZRQAg9nnPhYy+TUnE0+VcrttuvNI8oSxZcocA==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-core-module": "^2.13.0", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/eslint-plugin-react/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/eslint-scope": { + "version": "8.4.0", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-8.4.0.tgz", + "integrity": "sha512-sNXOfKCn74rt8RICKMvJS7XKV/Xk9kA7DyJr8mJik3S7Cwgy3qlkkmyS2uQB3jiJg6VNdZd/pDBJu0nvG2NlTg==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^5.2.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-visitor-keys": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.1.tgz", + "integrity": "sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/espree": { + "version": "10.4.0", + "resolved": "https://registry.npmjs.org/espree/-/espree-10.4.0.tgz", + "integrity": "sha512-j6PAQ2uUr79PZhBjP5C5fhl8e39FmRnOjsD5lGnWrFU8i2G776tBK7+nP8KuQUTTyAZUwfQqXAgrVH5MbH9CYQ==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "acorn": "^8.15.0", + "acorn-jsx": "^5.3.2", + "eslint-visitor-keys": "^4.2.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/esquery": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.7.0.tgz", + "integrity": "sha512-Ap6G0WQwcU/LHsvLwON1fAQX9Zp0A2Y6Y/cJBl9r/JbW90Zyg4/zbG6zzKa2OTALELarYHmKu0GhpM5EO+7T0g==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "estraverse": "^5.1.0" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "estraverse": "^5.2.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/eventemitter3": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz", + "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==", + "license": "MIT" + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-equals": { + "version": "5.4.0", + "resolved": "https://registry.npmjs.org/fast-equals/-/fast-equals-5.4.0.tgz", + "integrity": "sha512-jt2DW/aNFNwke7AUd+Z+e6pz39KO5rzdbbFCg2sGafS4mk13MI7Z8O5z9cADNn5lhGODIgLwug6TZO2ctf7kcw==", + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/fast-glob": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.1.tgz", + "integrity": "sha512-kNFPyjhh5cKjrUltxs+wFx+ZkbRaxxmZ+X0ZU31SOsxCEtP9VPgtq2teZw1DebupL5GmDaNQ6yKMMVcM41iqDg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.4" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fast-glob/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-levenshtein": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", + "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", + "dev": true, + "license": "MIT" + }, + "node_modules/fastq": { + "version": "1.20.1", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.20.1.tgz", + "integrity": "sha512-GGToxJ/w1x32s/D2EKND7kTil4n8OVk/9mycTc4VDza13lOvpUZTGX3mFSCtV9ksdGBVzvsyAVLM6mHFThxXxw==", + "dev": true, + "license": "ISC", + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/file-entry-cache": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-8.0.0.tgz", + "integrity": "sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "flat-cache": "^4.0.0" + }, + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "dev": true, + "license": "MIT", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat-cache": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-4.0.1.tgz", + "integrity": "sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw==", + "dev": true, + "license": "MIT", + "dependencies": { + "flatted": "^3.2.9", + "keyv": "^4.5.4" + }, + "engines": { + "node": ">=16" + } + }, + "node_modules/flatted": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.3.tgz", + "integrity": "sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==", + "dev": true, + "license": "ISC" + }, + "node_modules/for-each": { + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/for-each/-/for-each-0.3.5.tgz", + "integrity": "sha512-dKx12eRCVIzqCxFGplyFKJMPvLEWgmNtUrpTiJIR5u97zEhRG8ySrtboPHZXx7daLxQVrl643cTzbab2tkQjxg==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-callable": "^1.2.7" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/function.prototype.name": { + "version": "1.1.8", + "resolved": "https://registry.npmjs.org/function.prototype.name/-/function.prototype.name-1.1.8.tgz", + "integrity": "sha512-e5iwyodOHhbMr/yNrc7fDYG4qlbIvI5gajyzPnb5TCwyhjApznQh1BMFou9b30SevY43gCJKXycoCBjMbsuW0Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.3", + "define-properties": "^1.2.1", + "functions-have-names": "^1.2.3", + "hasown": "^2.0.2", + "is-callable": "^1.2.7" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/functions-have-names": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/functions-have-names/-/functions-have-names-1.2.3.tgz", + "integrity": "sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/generator-function": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/generator-function/-/generator-function-2.0.1.tgz", + "integrity": "sha512-SFdFmIJi+ybC0vjlHN0ZGVGHc3lgE0DxPAT0djjVg+kjOnSqclqmj0KQ7ykTOLP6YxoqOvuAODGdcHJn+43q3g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/get-intrinsic": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", + "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "es-define-property": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "function-bind": "^1.1.2", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "math-intrinsics": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-nonce": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-nonce/-/get-nonce-1.0.1.tgz", + "integrity": "sha512-FJhYRoDaiatfEkUK8HKlicmu/3SGFD51q3itKDGoSTysQJBnfOcxU5GxnhE1E6soB76MbT0MBtnKJuXyAx+96Q==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/get-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", + "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", + "dev": true, + "license": "MIT", + "dependencies": { + "dunder-proto": "^1.0.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/get-symbol-description": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/get-symbol-description/-/get-symbol-description-1.1.0.tgz", + "integrity": "sha512-w9UMqWwJxHNOvoNzSJ2oPF5wvYcvP7jUvYzhp67yEhTi17ZDBBC1z9pTdGuzjD+EFIqLSYRweZjqfiPzQ06Ebg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.6" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-tsconfig": { + "version": "4.13.6", + "resolved": "https://registry.npmjs.org/get-tsconfig/-/get-tsconfig-4.13.6.tgz", + "integrity": "sha512-shZT/QMiSHc/YBLxxOkMtgSid5HFoauqCE3/exfsEcwg1WkeqjG+V40yBbBrsD+jW2HDXcs28xOfcbm2jI8Ddw==", + "dev": true, + "license": "MIT", + "dependencies": { + "resolve-pkg-maps": "^1.0.0" + }, + "funding": { + "url": "https://github.com/privatenumber/get-tsconfig?sponsor=1" + } + }, + "node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/globals": { + "version": "14.0.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-14.0.0.tgz", + "integrity": "sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/globalthis": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/globalthis/-/globalthis-1.0.4.tgz", + "integrity": "sha512-DpLKbNU4WylpxJykQujfCcwYWiV/Jhm50Goo0wrVILAv5jOr9d+H+UR3PhSCD2rCCEIg0uc+G+muBTwD54JhDQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "define-properties": "^1.2.1", + "gopd": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/gopd": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", + "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/graphlib": { + "version": "2.1.8", + "resolved": "https://registry.npmjs.org/graphlib/-/graphlib-2.1.8.tgz", + "integrity": "sha512-jcLLfkpoVGmH7/InMC/1hIvOPSUh38oJtGhvrOFGzioE1DZ+0YW16RgmOJhHiuWTvGiJQ9Z1Ik43JvkRPRvE+A==", + "license": "MIT", + "dependencies": { + "lodash": "^4.17.15" + } + }, + "node_modules/has-bigints": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-bigints/-/has-bigints-1.1.0.tgz", + "integrity": "sha512-R3pbpkcIqv2Pm3dUwgjclDRVmWpTJW2DcMzcIhEXEx1oh/CEMObMm3KLmRJOdvhM7o4uQBnwr8pzRK2sJWIqfg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/has-property-descriptors": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", + "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-define-property": "^1.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-proto": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.2.0.tgz", + "integrity": "sha512-KIL7eQPfHQRC8+XluaIw7BHUwwqL19bQn4hzNgdr+1wXoU0KKj6rufu47lhY7KbJR2C6T6+PfyN0Ea7wkSS+qQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "dunder-proto": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-symbols": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", + "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-tostringtag": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", + "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-symbols": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/ignore": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", + "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/import-fresh": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.1.tgz", + "integrity": "sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/internal-slot": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/internal-slot/-/internal-slot-1.1.0.tgz", + "integrity": "sha512-4gd7VpWNQNB4UKKCFFVcp1AVv+FMOgs9NKzjHKusc8jTMhd5eL1NqQqOpE0KzMds804/yHlglp3uxgluOqAPLw==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "hasown": "^2.0.2", + "side-channel": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/internmap": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/internmap/-/internmap-2.0.3.tgz", + "integrity": "sha512-5Hh7Y1wQbvY5ooGgPbDaL5iYLAPzMTUrjMulskHLH6wnv/A+1q5rgEaiuqEjB+oxGXIVZs1FF+R/KPN3ZSQYYg==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/is-array-buffer": { + "version": "3.0.5", + "resolved": "https://registry.npmjs.org/is-array-buffer/-/is-array-buffer-3.0.5.tgz", + "integrity": "sha512-DDfANUiiG2wC1qawP66qlTugJeL5HyzMpfr8lLK+jMQirGzNod0B12cFB/9q838Ru27sBwfw78/rdoU7RERz6A==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.3", + "get-intrinsic": "^1.2.6" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-async-function": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-async-function/-/is-async-function-2.1.1.tgz", + "integrity": "sha512-9dgM/cZBnNvjzaMYHVoxxfPj2QXt22Ev7SuuPrs+xav0ukGB0S6d4ydZdEiM48kLx5kDV+QBPrpVnFyefL8kkQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "async-function": "^1.0.0", + "call-bound": "^1.0.3", + "get-proto": "^1.0.1", + "has-tostringtag": "^1.0.2", + "safe-regex-test": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-bigint": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-bigint/-/is-bigint-1.1.0.tgz", + "integrity": "sha512-n4ZT37wG78iz03xPRKJrHTdZbe3IicyucEtdRsV5yglwc3GyUfbAfpSeD0FJ41NbUNSt5wbhqfp1fS+BgnvDFQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-bigints": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-boolean-object": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.2.2.tgz", + "integrity": "sha512-wa56o2/ElJMYqjCjGkXri7it5FbebW5usLw/nPmCMs5DeZ7eziSYZhSmPRn0txqeW4LnAmQQU7FgqLpsEFKM4A==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "has-tostringtag": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-bun-module": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-bun-module/-/is-bun-module-2.0.0.tgz", + "integrity": "sha512-gNCGbnnnnFAUGKeZ9PdbyeGYJqewpmc2aKHUEMO5nQPWU9lOmv7jcmQIv+qHD8fXW6W7qfuCwX4rY9LNRjXrkQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "semver": "^7.7.1" + } + }, + "node_modules/is-callable": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.7.tgz", + "integrity": "sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-core-module": { + "version": "2.16.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz", + "integrity": "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==", + "dev": true, + "license": "MIT", + "dependencies": { + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-data-view": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-data-view/-/is-data-view-1.0.2.tgz", + "integrity": "sha512-RKtWF8pGmS87i2D6gqQu/l7EYRlVdfzemCJN/P3UOs//x1QE7mfhvzHIApBTRf7axvT6DMGwSwBXYCT0nfB9xw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "get-intrinsic": "^1.2.6", + "is-typed-array": "^1.1.13" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-date-object": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.1.0.tgz", + "integrity": "sha512-PwwhEakHVKTdRNVOw+/Gyh0+MzlCl4R6qKvkhuvLtPMggI1WAHt9sOwZxQLSGpUaDnrdyDsomoRgNnCfKNSXXg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "has-tostringtag": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-finalizationregistry": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/is-finalizationregistry/-/is-finalizationregistry-1.1.1.tgz", + "integrity": "sha512-1pC6N8qWJbWoPtEjgcL2xyhQOP491EQjeUo3qTKcmV8YSDDJrOepfG8pcC7h/QgnQHYSv0mJ3Z/ZWxmatVrysg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-generator-function": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/is-generator-function/-/is-generator-function-1.1.2.tgz", + "integrity": "sha512-upqt1SkGkODW9tsGNG5mtXTXtECizwtS2kA161M+gJPc1xdb/Ax629af6YrTwcOeQHbewrPNlE5Dx7kzvXTizA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.4", + "generator-function": "^2.0.0", + "get-proto": "^1.0.1", + "has-tostringtag": "^1.0.2", + "safe-regex-test": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-map": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/is-map/-/is-map-2.0.3.tgz", + "integrity": "sha512-1Qed0/Hr2m+YqxnM09CjA2d/i6YZNfF6R2oRAOj36eUdS6qIV/huPJNSEpKbupewFs+ZsJlxsjjPbc0/afW6Lw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-negative-zero": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/is-negative-zero/-/is-negative-zero-2.0.3.tgz", + "integrity": "sha512-5KoIu2Ngpyek75jXodFvnafB6DJgr3u8uuK0LEZJjrU19DrMD3EVERaR8sjz8CCGgpZvxPl9SuE1GMVPFHx1mw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-number-object": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/is-number-object/-/is-number-object-1.1.1.tgz", + "integrity": "sha512-lZhclumE1G6VYD8VHe35wFaIif+CTy5SJIi5+3y4psDgWu4wPDoBhF8NxUOinEc7pHgiTsT6MaBb92rKhhD+Xw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "has-tostringtag": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-regex": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.2.1.tgz", + "integrity": "sha512-MjYsKHO5O7mCsmRGxWcLWheFqN9DJ/2TmngvjKXihe6efViPqc274+Fx/4fYj/r03+ESvBdTXK0V6tA3rgez1g==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "gopd": "^1.2.0", + "has-tostringtag": "^1.0.2", + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-set": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/is-set/-/is-set-2.0.3.tgz", + "integrity": "sha512-iPAjerrse27/ygGLxw+EBR9agv9Y6uLeYVJMu+QNCoouJ1/1ri0mGrcWpfCqFZuzzx3WjtwxG098X+n4OuRkPg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-shared-array-buffer": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-shared-array-buffer/-/is-shared-array-buffer-1.0.4.tgz", + "integrity": "sha512-ISWac8drv4ZGfwKl5slpHG9OwPNty4jOWPRIhBpxOoD+hqITiwuipOQ2bNthAzwA3B4fIjO4Nln74N0S9byq8A==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-string": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/is-string/-/is-string-1.1.1.tgz", + "integrity": "sha512-BtEeSsoaQjlSPBemMQIrY1MY0uM6vnS1g5fmufYOtnxLGUZM2178PKbhsk7Ffv58IX+ZtcvoGwccYsh0PglkAA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "has-tostringtag": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-symbol": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.1.1.tgz", + "integrity": "sha512-9gGx6GTtCQM73BgmHQXfDmLtfjjTUDSyoxTCbp5WtoixAhfgsDirWIcVQ/IHpvI5Vgd5i/J5F7B9cN/WlVbC/w==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "has-symbols": "^1.1.0", + "safe-regex-test": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-typed-array": { + "version": "1.1.15", + "resolved": "https://registry.npmjs.org/is-typed-array/-/is-typed-array-1.1.15.tgz", + "integrity": "sha512-p3EcsicXjit7SaskXHs1hA91QxgTw46Fv6EFKKGS5DRFLD8yKnohjF3hxoju94b/OcMZoQukzpPpBE9uLVKzgQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "which-typed-array": "^1.1.16" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-weakmap": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/is-weakmap/-/is-weakmap-2.0.2.tgz", + "integrity": "sha512-K5pXYOm9wqY1RgjpL3YTkF39tni1XajUIkawTLUo9EZEVUFga5gSQJF8nNS7ZwJQ02y+1YCNYcMh+HIf1ZqE+w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-weakref": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/is-weakref/-/is-weakref-1.1.1.tgz", + "integrity": "sha512-6i9mGWSlqzNMEqpCp93KwRS1uUOodk2OJ6b+sq7ZPDSy2WuI5NFIxp/254TytR8ftefexkWn5xNiHUNpPOfSew==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-weakset": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/is-weakset/-/is-weakset-2.0.4.tgz", + "integrity": "sha512-mfcwb6IzQyOKTs84CQMrOwW4gQcaTOAWJ0zzJCl2WSPDrWk/OzDaImWFH3djXhb24g4eudZfLRozAvPGw4d9hQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "get-intrinsic": "^1.2.6" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/isarray": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz", + "integrity": "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==", + "dev": true, + "license": "MIT" + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true, + "license": "ISC" + }, + "node_modules/iterator.prototype": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/iterator.prototype/-/iterator.prototype-1.1.5.tgz", + "integrity": "sha512-H0dkQoCa3b2VEeKQBOxFph+JAbcrQdE7KC0UkqwpLmv2EC4P41QXP+rqo9wYodACiG5/WM5s9oDApTU8utwj9g==", + "dev": true, + "license": "MIT", + "dependencies": { + "define-data-property": "^1.1.4", + "es-object-atoms": "^1.0.0", + "get-intrinsic": "^1.2.6", + "get-proto": "^1.0.0", + "has-symbols": "^1.1.0", + "set-function-name": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/jiti": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/jiti/-/jiti-2.6.1.tgz", + "integrity": "sha512-ekilCSN1jwRvIbgeg/57YFh8qQDNbwDb9xT/qu2DAHbFFZUicIl4ygVaAvzveMhMVr3LnpSKTNnwt8PoOfmKhQ==", + "dev": true, + "license": "MIT", + "bin": { + "jiti": "lib/jiti-cli.mjs" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "license": "MIT" + }, + "node_modules/js-yaml": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", + "dev": true, + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", + "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/json5": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/json5/-/json5-1.0.2.tgz", + "integrity": "sha512-g1MWMLBiz8FKi1e4w0UyVL3w+iJceWAFBAaBnnGKOpNa5f8TLktkbre1+s6oICydWAm+HRUGTmI+//xv2hvXYA==", + "dev": true, + "license": "MIT", + "dependencies": { + "minimist": "^1.2.0" + }, + "bin": { + "json5": "lib/cli.js" + } + }, + "node_modules/jsx-ast-utils": { + "version": "3.3.5", + "resolved": "https://registry.npmjs.org/jsx-ast-utils/-/jsx-ast-utils-3.3.5.tgz", + "integrity": "sha512-ZZow9HBI5O6EPgSJLUb8n2NKgmVWTwCvHGwFuJlMjvLFqlGG6pjirPhtdsseaLZjSibD8eegzmYpUZwoIlj2cQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "array-includes": "^3.1.6", + "array.prototype.flat": "^1.3.1", + "object.assign": "^4.1.4", + "object.values": "^1.1.6" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/keyv": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", + "dev": true, + "license": "MIT", + "dependencies": { + "json-buffer": "3.0.1" + } + }, + "node_modules/language-subtag-registry": { + "version": "0.3.23", + "resolved": "https://registry.npmjs.org/language-subtag-registry/-/language-subtag-registry-0.3.23.tgz", + "integrity": "sha512-0K65Lea881pHotoGEa5gDlMxt3pctLi2RplBb7Ezh4rRdLEOtgi7n4EwK9lamnUCkKBqaeKRVebTq6BAxSkpXQ==", + "dev": true, + "license": "CC0-1.0" + }, + "node_modules/language-tags": { + "version": "1.0.9", + "resolved": "https://registry.npmjs.org/language-tags/-/language-tags-1.0.9.tgz", + "integrity": "sha512-MbjN408fEndfiQXbFQ1vnd+1NoLDsnQW41410oQBXiyXDMYH5z505juWa4KUE1LqxRC7DgOgZDbKLxHIwm27hA==", + "dev": true, + "license": "MIT", + "dependencies": { + "language-subtag-registry": "^0.3.20" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/levn": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", + "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/lightningcss": { + "version": "1.30.2", + "resolved": "https://registry.npmjs.org/lightningcss/-/lightningcss-1.30.2.tgz", + "integrity": "sha512-utfs7Pr5uJyyvDETitgsaqSyjCb2qNRAtuqUeWIAKztsOYdcACf2KtARYXg2pSvhkt+9NfoaNY7fxjl6nuMjIQ==", + "dev": true, + "license": "MPL-2.0", + "dependencies": { + "detect-libc": "^2.0.3" + }, + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + }, + "optionalDependencies": { + "lightningcss-android-arm64": "1.30.2", + "lightningcss-darwin-arm64": "1.30.2", + "lightningcss-darwin-x64": "1.30.2", + "lightningcss-freebsd-x64": "1.30.2", + "lightningcss-linux-arm-gnueabihf": "1.30.2", + "lightningcss-linux-arm64-gnu": "1.30.2", + "lightningcss-linux-arm64-musl": "1.30.2", + "lightningcss-linux-x64-gnu": "1.30.2", + "lightningcss-linux-x64-musl": "1.30.2", + "lightningcss-win32-arm64-msvc": "1.30.2", + "lightningcss-win32-x64-msvc": "1.30.2" + } + }, + "node_modules/lightningcss-android-arm64": { + "version": "1.30.2", + "resolved": "https://registry.npmjs.org/lightningcss-android-arm64/-/lightningcss-android-arm64-1.30.2.tgz", + "integrity": "sha512-BH9sEdOCahSgmkVhBLeU7Hc9DWeZ1Eb6wNS6Da8igvUwAe0sqROHddIlvU06q3WyXVEOYDZ6ykBZQnjTbmo4+A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-darwin-arm64": { + "version": "1.30.2", + "resolved": "https://registry.npmjs.org/lightningcss-darwin-arm64/-/lightningcss-darwin-arm64-1.30.2.tgz", + "integrity": "sha512-ylTcDJBN3Hp21TdhRT5zBOIi73P6/W0qwvlFEk22fkdXchtNTOU4Qc37SkzV+EKYxLouZ6M4LG9NfZ1qkhhBWA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-darwin-x64": { + "version": "1.30.2", + "resolved": "https://registry.npmjs.org/lightningcss-darwin-x64/-/lightningcss-darwin-x64-1.30.2.tgz", + "integrity": "sha512-oBZgKchomuDYxr7ilwLcyms6BCyLn0z8J0+ZZmfpjwg9fRVZIR5/GMXd7r9RH94iDhld3UmSjBM6nXWM2TfZTQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-freebsd-x64": { + "version": "1.30.2", + "resolved": "https://registry.npmjs.org/lightningcss-freebsd-x64/-/lightningcss-freebsd-x64-1.30.2.tgz", + "integrity": "sha512-c2bH6xTrf4BDpK8MoGG4Bd6zAMZDAXS569UxCAGcA7IKbHNMlhGQ89eRmvpIUGfKWNVdbhSbkQaWhEoMGmGslA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-arm-gnueabihf": { + "version": "1.30.2", + "resolved": "https://registry.npmjs.org/lightningcss-linux-arm-gnueabihf/-/lightningcss-linux-arm-gnueabihf-1.30.2.tgz", + "integrity": "sha512-eVdpxh4wYcm0PofJIZVuYuLiqBIakQ9uFZmipf6LF/HRj5Bgm0eb3qL/mr1smyXIS1twwOxNWndd8z0E374hiA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-arm64-gnu": { + "version": "1.30.2", + "resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-gnu/-/lightningcss-linux-arm64-gnu-1.30.2.tgz", + "integrity": "sha512-UK65WJAbwIJbiBFXpxrbTNArtfuznvxAJw4Q2ZGlU8kPeDIWEX1dg3rn2veBVUylA2Ezg89ktszWbaQnxD/e3A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-arm64-musl": { + "version": "1.30.2", + "resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-musl/-/lightningcss-linux-arm64-musl-1.30.2.tgz", + "integrity": "sha512-5Vh9dGeblpTxWHpOx8iauV02popZDsCYMPIgiuw97OJ5uaDsL86cnqSFs5LZkG3ghHoX5isLgWzMs+eD1YzrnA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-x64-gnu": { + "version": "1.30.2", + "resolved": "https://registry.npmjs.org/lightningcss-linux-x64-gnu/-/lightningcss-linux-x64-gnu-1.30.2.tgz", + "integrity": "sha512-Cfd46gdmj1vQ+lR6VRTTadNHu6ALuw2pKR9lYq4FnhvgBc4zWY1EtZcAc6EffShbb1MFrIPfLDXD6Xprbnni4w==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-x64-musl": { + "version": "1.30.2", + "resolved": "https://registry.npmjs.org/lightningcss-linux-x64-musl/-/lightningcss-linux-x64-musl-1.30.2.tgz", + "integrity": "sha512-XJaLUUFXb6/QG2lGIW6aIk6jKdtjtcffUT0NKvIqhSBY3hh9Ch+1LCeH80dR9q9LBjG3ewbDjnumefsLsP6aiA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-win32-arm64-msvc": { + "version": "1.30.2", + "resolved": "https://registry.npmjs.org/lightningcss-win32-arm64-msvc/-/lightningcss-win32-arm64-msvc-1.30.2.tgz", + "integrity": "sha512-FZn+vaj7zLv//D/192WFFVA0RgHawIcHqLX9xuWiQt7P0PtdFEVaxgF9rjM/IRYHQXNnk61/H/gb2Ei+kUQ4xQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-win32-x64-msvc": { + "version": "1.30.2", + "resolved": "https://registry.npmjs.org/lightningcss-win32-x64-msvc/-/lightningcss-win32-x64-msvc-1.30.2.tgz", + "integrity": "sha512-5g1yc73p+iAkid5phb4oVFMB45417DkRevRbt/El/gKXJk4jid+vPFF/AXbxn05Aky8PapwzZrdJShv5C0avjw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lodash": { + "version": "4.17.23", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.23.tgz", + "integrity": "sha512-LgVTMpQtIopCi79SJeDiP0TfWi5CNEc/L/aRdTh3yIvmZXTnheWpKjSZhnvMl8iXbC1tFg9gdHHDMLoV7CnG+w==", + "license": "MIT" + }, + "node_modules/lodash.merge": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", + "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/loose-envify": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", + "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "license": "MIT", + "dependencies": { + "js-tokens": "^3.0.0 || ^4.0.0" + }, + "bin": { + "loose-envify": "cli.js" + } + }, + "node_modules/lucide-react": { + "version": "0.400.0", + "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.400.0.tgz", + "integrity": "sha512-rpp7pFHh3Xd93KHixNgB0SqThMHpYNzsGUu69UaQbSZ75Q/J3m5t6EhKyMT3m4w2WOxmJ2mY0tD3vebnXqQryQ==", + "license": "ISC", + "peerDependencies": { + "react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/magic-string": { + "version": "0.30.21", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.21.tgz", + "integrity": "sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.5" + } + }, + "node_modules/math-intrinsics": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", + "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/micromatch": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", + "dev": true, + "license": "MIT", + "dependencies": { + "braces": "^3.0.3", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/minimist": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", + "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/nanoid": { + "version": "3.3.11", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", + "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/napi-postinstall": { + "version": "0.3.4", + "resolved": "https://registry.npmjs.org/napi-postinstall/-/napi-postinstall-0.3.4.tgz", + "integrity": "sha512-PHI5f1O0EP5xJ9gQmFGMS6IZcrVvTjpXjz7Na41gTE7eE2hK11lg04CECCYEEjdc17EV4DO+fkGEtt7TpTaTiQ==", + "dev": true, + "license": "MIT", + "bin": { + "napi-postinstall": "lib/cli.js" + }, + "engines": { + "node": "^12.20.0 || ^14.18.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/napi-postinstall" + } + }, + "node_modules/natural-compare": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", + "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", + "dev": true, + "license": "MIT" + }, + "node_modules/next": { + "version": "15.5.12", + "resolved": "https://registry.npmjs.org/next/-/next-15.5.12.tgz", + "integrity": "sha512-Fi/wQ4Etlrn60rz78bebG1i1SR20QxvV8tVp6iJspjLUSHcZoeUXCt+vmWoEcza85ElZzExK/jJ/F6SvtGktjA==", + "license": "MIT", + "dependencies": { + "@next/env": "15.5.12", + "@swc/helpers": "0.5.15", + "caniuse-lite": "^1.0.30001579", + "postcss": "8.4.31", + "styled-jsx": "5.1.6" + }, + "bin": { + "next": "dist/bin/next" + }, + "engines": { + "node": "^18.18.0 || ^19.8.0 || >= 20.0.0" + }, + "optionalDependencies": { + "@next/swc-darwin-arm64": "15.5.12", + "@next/swc-darwin-x64": "15.5.12", + "@next/swc-linux-arm64-gnu": "15.5.12", + "@next/swc-linux-arm64-musl": "15.5.12", + "@next/swc-linux-x64-gnu": "15.5.12", + "@next/swc-linux-x64-musl": "15.5.12", + "@next/swc-win32-arm64-msvc": "15.5.12", + "@next/swc-win32-x64-msvc": "15.5.12", + "sharp": "^0.34.3" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.1.0", + "@playwright/test": "^1.51.1", + "babel-plugin-react-compiler": "*", + "react": "^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0", + "react-dom": "^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0", + "sass": "^1.3.0" + }, + "peerDependenciesMeta": { + "@opentelemetry/api": { + "optional": true + }, + "@playwright/test": { + "optional": true + }, + "babel-plugin-react-compiler": { + "optional": true + }, + "sass": { + "optional": true + } + } + }, + "node_modules/next/node_modules/postcss": { + "version": "8.4.31", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz", + "integrity": "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.6", + "picocolors": "^1.0.0", + "source-map-js": "^1.0.2" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-inspect": { + "version": "1.13.4", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.4.tgz", + "integrity": "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object-keys": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", + "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/object.assign": { + "version": "4.1.7", + "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.7.tgz", + "integrity": "sha512-nK28WOo+QIjBkDduTINE4JkF/UJJKyf2EJxvJKfblDpyg0Q+pkOHNTL0Qwy6NP6FhE/EnzV73BxxqcJaXY9anw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.3", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0", + "has-symbols": "^1.1.0", + "object-keys": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object.entries": { + "version": "1.1.9", + "resolved": "https://registry.npmjs.org/object.entries/-/object.entries-1.1.9.tgz", + "integrity": "sha512-8u/hfXFRBD1O0hPUjioLhoWFHRmt6tKA4/vZPyckBr18l1KE9uHrFaFaUi8MDRTpi4uak2goyPTSNJLXX2k2Hw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.4", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/object.fromentries": { + "version": "2.0.8", + "resolved": "https://registry.npmjs.org/object.fromentries/-/object.fromentries-2.0.8.tgz", + "integrity": "sha512-k6E21FzySsSK5a21KRADBd/NGneRegFO5pLHfdQLpRDETUNJueLXs3WCzyQ3tFRDYgbq3KHGXfTbi2bs8WQ6rQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object.groupby": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/object.groupby/-/object.groupby-1.0.3.tgz", + "integrity": "sha512-+Lhy3TQTuzXI5hevh8sBGqbmurHbbIjAi0Z4S63nthVLmLxfbj4T54a4CfZrXIrt9iP4mVAPYMo/v99taj3wjQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/object.values": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.2.1.tgz", + "integrity": "sha512-gXah6aZrcUxjWg2zR2MwouP2eHlCBzdV4pygudehaKXSGW4v2AsRQUK+lwwXhii6KFZcunEnmSUoYp5CXibxtA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.3", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/optionator": { + "version": "0.9.4", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz", + "integrity": "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==", + "dev": true, + "license": "MIT", + "dependencies": { + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0", + "word-wrap": "^1.2.5" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/own-keys": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/own-keys/-/own-keys-1.0.1.tgz", + "integrity": "sha512-qFOyK5PjiWZd+QQIh+1jhdb9LpxTF0qs7Pm8o5QHYZ0M3vKqSqzsZaEB6oWlxZ+q2sJBMI/Ktgd2N5ZwQoRHfg==", + "dev": true, + "license": "MIT", + "dependencies": { + "get-intrinsic": "^1.2.6", + "object-keys": "^1.1.1", + "safe-push-apply": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "dev": true, + "license": "MIT", + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", + "dev": true, + "license": "MIT" + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/possible-typed-array-names": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/possible-typed-array-names/-/possible-typed-array-names-1.1.0.tgz", + "integrity": "sha512-/+5VFTchJDoVj3bhoqi6UeymcD00DAwb1nJwamzPvHEszJ4FpF6SNNbUbOS8yI56qHzdV8eK0qEfOSiodkTdxg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/postcss": { + "version": "8.5.6", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.6.tgz", + "integrity": "sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.11", + "picocolors": "^1.1.1", + "source-map-js": "^1.2.1" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/prelude-ls": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", + "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/prop-types": { + "version": "15.8.1", + "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz", + "integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==", + "license": "MIT", + "dependencies": { + "loose-envify": "^1.4.0", + "object-assign": "^4.1.1", + "react-is": "^16.13.1" + } + }, + "node_modules/punycode": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/react": { + "version": "19.2.4", + "resolved": "https://registry.npmjs.org/react/-/react-19.2.4.tgz", + "integrity": "sha512-9nfp2hYpCwOjAN+8TZFGhtWEwgvWHXqESH8qT89AT/lWklpLON22Lc8pEtnpsZz7VmawabSU0gCjnj8aC0euHQ==", + "license": "MIT", + "peer": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-dom": { + "version": "19.2.4", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.2.4.tgz", + "integrity": "sha512-AXJdLo8kgMbimY95O2aKQqsz2iWi9jMgKJhRBAxECE4IFxfcazB2LmzloIoibJI3C12IlY20+KFaLv+71bUJeQ==", + "license": "MIT", + "peer": true, + "dependencies": { + "scheduler": "^0.27.0" + }, + "peerDependencies": { + "react": "^19.2.4" + } + }, + "node_modules/react-is": { + "version": "16.13.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", + "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==", + "license": "MIT" + }, + "node_modules/react-remove-scroll": { + "version": "2.7.2", + "resolved": "https://registry.npmjs.org/react-remove-scroll/-/react-remove-scroll-2.7.2.tgz", + "integrity": "sha512-Iqb9NjCCTt6Hf+vOdNIZGdTiH1QSqr27H/Ek9sv/a97gfueI/5h1s3yRi1nngzMUaOOToin5dI1dXKdXiF+u0Q==", + "license": "MIT", + "dependencies": { + "react-remove-scroll-bar": "^2.3.7", + "react-style-singleton": "^2.2.3", + "tslib": "^2.1.0", + "use-callback-ref": "^1.3.3", + "use-sidecar": "^1.1.3" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/react-remove-scroll-bar": { + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/react-remove-scroll-bar/-/react-remove-scroll-bar-2.3.8.tgz", + "integrity": "sha512-9r+yi9+mgU33AKcj6IbT9oRCO78WriSj6t/cF8DWBZJ9aOGPOTEDvdUDz1FwKim7QXWwmHqtdHnRJfhAxEG46Q==", + "license": "MIT", + "dependencies": { + "react-style-singleton": "^2.2.2", + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/react-smooth": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/react-smooth/-/react-smooth-4.0.4.tgz", + "integrity": "sha512-gnGKTpYwqL0Iii09gHobNolvX4Kiq4PKx6eWBCYYix+8cdw+cGo3do906l1NBPKkSWx1DghC1dlWG9L2uGd61Q==", + "license": "MIT", + "dependencies": { + "fast-equals": "^5.0.1", + "prop-types": "^15.8.1", + "react-transition-group": "^4.4.5" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0", + "react-dom": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/react-style-singleton": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/react-style-singleton/-/react-style-singleton-2.2.3.tgz", + "integrity": "sha512-b6jSvxvVnyptAiLjbkWLE/lOnR4lfTtDAl+eUC7RZy+QQWc6wRzIV2CE6xBuMmDxc2qIihtDCZD5NPOFl7fRBQ==", + "license": "MIT", + "dependencies": { + "get-nonce": "^1.0.0", + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/react-transition-group": { + "version": "4.4.5", + "resolved": "https://registry.npmjs.org/react-transition-group/-/react-transition-group-4.4.5.tgz", + "integrity": "sha512-pZcd1MCJoiKiBR2NRxeCRg13uCXbydPnmB4EOeRrY7480qNWO8IIgQG6zlDkm6uRMsURXPuKq0GWtiM59a5Q6g==", + "license": "BSD-3-Clause", + "dependencies": { + "@babel/runtime": "^7.5.5", + "dom-helpers": "^5.0.1", + "loose-envify": "^1.4.0", + "prop-types": "^15.6.2" + }, + "peerDependencies": { + "react": ">=16.6.0", + "react-dom": ">=16.6.0" + } + }, + "node_modules/recharts": { + "version": "2.15.4", + "resolved": "https://registry.npmjs.org/recharts/-/recharts-2.15.4.tgz", + "integrity": "sha512-UT/q6fwS3c1dHbXv2uFgYJ9BMFHu3fwnd7AYZaEQhXuYQ4hgsxLvsUXzGdKeZrW5xopzDCvuA2N41WJ88I7zIw==", + "license": "MIT", + "dependencies": { + "clsx": "^2.0.0", + "eventemitter3": "^4.0.1", + "lodash": "^4.17.21", + "react-is": "^18.3.1", + "react-smooth": "^4.0.4", + "recharts-scale": "^0.4.4", + "tiny-invariant": "^1.3.1", + "victory-vendor": "^36.6.8" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "react": "^16.0.0 || ^17.0.0 || ^18.0.0 || ^19.0.0", + "react-dom": "^16.0.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/recharts-scale": { + "version": "0.4.5", + "resolved": "https://registry.npmjs.org/recharts-scale/-/recharts-scale-0.4.5.tgz", + "integrity": "sha512-kivNFO+0OcUNu7jQquLXAxz1FIwZj8nrj+YkOKc5694NbjCvcT6aSZiIzNzd2Kul4o4rTto8QVR9lMNtxD4G1w==", + "license": "MIT", + "dependencies": { + "decimal.js-light": "^2.4.1" + } + }, + "node_modules/recharts/node_modules/react-is": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", + "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==", + "license": "MIT" + }, + "node_modules/reflect.getprototypeof": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/reflect.getprototypeof/-/reflect.getprototypeof-1.0.10.tgz", + "integrity": "sha512-00o4I+DVrefhv+nX0ulyi3biSHCPDe+yLv5o/p6d/UVlirijB8E16FtfwSAi4g3tcqrQ4lRAqQSoFEZJehYEcw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.9", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.0.0", + "get-intrinsic": "^1.2.7", + "get-proto": "^1.0.1", + "which-builtin-type": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/regexp.prototype.flags": { + "version": "1.5.4", + "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.5.4.tgz", + "integrity": "sha512-dYqgNSZbDwkaJ2ceRd9ojCGjBq+mOm9LmtXnAnEGyHhN/5R7iDW2TRw3h+o/jCFxus3P2LfWIIiwowAjANm7IA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "define-properties": "^1.2.1", + "es-errors": "^1.3.0", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "set-function-name": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/resolve": { + "version": "1.22.11", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.11.tgz", + "integrity": "sha512-RfqAvLnMl313r7c9oclB1HhUEAezcpLjz95wFH4LVuhk9JF/r22qmVP9AMmOU4vMX7Q8pN8jwNg/CSpdFnMjTQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-core-module": "^2.16.1", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/resolve-pkg-maps": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/resolve-pkg-maps/-/resolve-pkg-maps-1.0.0.tgz", + "integrity": "sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/privatenumber/resolve-pkg-maps?sponsor=1" + } + }, + "node_modules/reusify": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.1.0.tgz", + "integrity": "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==", + "dev": true, + "license": "MIT", + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/safe-array-concat": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/safe-array-concat/-/safe-array-concat-1.1.3.tgz", + "integrity": "sha512-AURm5f0jYEOydBj7VQlVvDrjeFgthDdEF5H1dP+6mNpoXOMo1quQqJ4wvJDyRZ9+pO3kGWoOdmV08cSv2aJV6Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.2", + "get-intrinsic": "^1.2.6", + "has-symbols": "^1.1.0", + "isarray": "^2.0.5" + }, + "engines": { + "node": ">=0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/safe-push-apply": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/safe-push-apply/-/safe-push-apply-1.0.0.tgz", + "integrity": "sha512-iKE9w/Z7xCzUMIZqdBsp6pEQvwuEebH4vdpjcDWnyzaI6yl6O9FHvVpmGelvEHNsoY6wGblkxR6Zty/h00WiSA==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "isarray": "^2.0.5" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/safe-regex-test": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/safe-regex-test/-/safe-regex-test-1.1.0.tgz", + "integrity": "sha512-x/+Cz4YrimQxQccJf5mKEbIa1NzeCRNI5Ecl/ekmlYaampdNLPalVyIcCZNNH3MvmqBugV5TMYZXv0ljslUlaw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "is-regex": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/scheduler": { + "version": "0.27.0", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.27.0.tgz", + "integrity": "sha512-eNv+WrVbKu1f3vbYJT/xtiF5syA5HPIMtf9IgY/nKg0sWqzAUEvqY/xm7OcZc/qafLx/iO9FgOmeSAp4v5ti/Q==", + "license": "MIT" + }, + "node_modules/semver": { + "version": "7.7.4", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.4.tgz", + "integrity": "sha512-vFKC2IEtQnVhpT78h1Yp8wzwrf8CM+MzKMHGJZfBtzhZNycRFnXsHk6E5TxIkkMsgNS7mdX3AGB7x2QM2di4lA==", + "devOptional": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/set-function-length": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", + "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", + "dev": true, + "license": "MIT", + "dependencies": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/set-function-name": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/set-function-name/-/set-function-name-2.0.2.tgz", + "integrity": "sha512-7PGFlmtwsEADb0WYyvCMa1t+yke6daIG4Wirafur5kcf+MhUnPms1UeR0CKQdTZD81yESwMHbtn+TR+dMviakQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "functions-have-names": "^1.2.3", + "has-property-descriptors": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/set-proto": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/set-proto/-/set-proto-1.0.0.tgz", + "integrity": "sha512-RJRdvCo6IAnPdsvP/7m6bsQqNnn1FCBX5ZNtFL98MmFF/4xAIJTIg1YbHW5DC2W5SKZanrC6i4HsJqlajw/dZw==", + "dev": true, + "license": "MIT", + "dependencies": { + "dunder-proto": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/sharp": { + "version": "0.34.5", + "resolved": "https://registry.npmjs.org/sharp/-/sharp-0.34.5.tgz", + "integrity": "sha512-Ou9I5Ft9WNcCbXrU9cMgPBcCK8LiwLqcbywW3t4oDV37n1pzpuNLsYiAV8eODnjbtQlSDwZ2cUEeQz4E54Hltg==", + "hasInstallScript": true, + "license": "Apache-2.0", + "optional": true, + "dependencies": { + "@img/colour": "^1.0.0", + "detect-libc": "^2.1.2", + "semver": "^7.7.3" + }, + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-darwin-arm64": "0.34.5", + "@img/sharp-darwin-x64": "0.34.5", + "@img/sharp-libvips-darwin-arm64": "1.2.4", + "@img/sharp-libvips-darwin-x64": "1.2.4", + "@img/sharp-libvips-linux-arm": "1.2.4", + "@img/sharp-libvips-linux-arm64": "1.2.4", + "@img/sharp-libvips-linux-ppc64": "1.2.4", + "@img/sharp-libvips-linux-riscv64": "1.2.4", + "@img/sharp-libvips-linux-s390x": "1.2.4", + "@img/sharp-libvips-linux-x64": "1.2.4", + "@img/sharp-libvips-linuxmusl-arm64": "1.2.4", + "@img/sharp-libvips-linuxmusl-x64": "1.2.4", + "@img/sharp-linux-arm": "0.34.5", + "@img/sharp-linux-arm64": "0.34.5", + "@img/sharp-linux-ppc64": "0.34.5", + "@img/sharp-linux-riscv64": "0.34.5", + "@img/sharp-linux-s390x": "0.34.5", + "@img/sharp-linux-x64": "0.34.5", + "@img/sharp-linuxmusl-arm64": "0.34.5", + "@img/sharp-linuxmusl-x64": "0.34.5", + "@img/sharp-wasm32": "0.34.5", + "@img/sharp-win32-arm64": "0.34.5", + "@img/sharp-win32-ia32": "0.34.5", + "@img/sharp-win32-x64": "0.34.5" + } + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/side-channel": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz", + "integrity": "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3", + "side-channel-list": "^1.0.0", + "side-channel-map": "^1.0.1", + "side-channel-weakmap": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-list": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/side-channel-list/-/side-channel-list-1.0.0.tgz", + "integrity": "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-map": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/side-channel-map/-/side-channel-map-1.0.1.tgz", + "integrity": "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-weakmap": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz", + "integrity": "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3", + "side-channel-map": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/sonner": { + "version": "1.7.4", + "resolved": "https://registry.npmjs.org/sonner/-/sonner-1.7.4.tgz", + "integrity": "sha512-DIS8z4PfJRbIyfVFDVnK9rO3eYDtse4Omcm6bt0oEr5/jtLgysmjuBl1frJ9E/EQZrFmKx2A8m/s5s9CRXIzhw==", + "license": "MIT", + "peerDependencies": { + "react": "^18.0.0 || ^19.0.0 || ^19.0.0-rc", + "react-dom": "^18.0.0 || ^19.0.0 || ^19.0.0-rc" + } + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/stable-hash": { + "version": "0.0.5", + "resolved": "https://registry.npmjs.org/stable-hash/-/stable-hash-0.0.5.tgz", + "integrity": "sha512-+L3ccpzibovGXFK+Ap/f8LOS0ahMrHTf3xu7mMLSpEGU0EO9ucaysSylKo9eRDFNhWve/y275iPmIZ4z39a9iA==", + "dev": true, + "license": "MIT" + }, + "node_modules/stop-iteration-iterator": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/stop-iteration-iterator/-/stop-iteration-iterator-1.1.0.tgz", + "integrity": "sha512-eLoXW/DHyl62zxY4SCaIgnRhuMr6ri4juEYARS8E6sCEqzKpOiE521Ucofdx+KnDZl5xmvGYaaKCk5FEOxJCoQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "internal-slot": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/string.prototype.includes": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/string.prototype.includes/-/string.prototype.includes-2.0.1.tgz", + "integrity": "sha512-o7+c9bW6zpAdJHTtujeePODAhkuicdAryFsfVKwA+wGw89wJ4GTY484WTucM9hLtDEOpOvI+aHnzqnC5lHp4Rg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.3" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/string.prototype.matchall": { + "version": "4.0.12", + "resolved": "https://registry.npmjs.org/string.prototype.matchall/-/string.prototype.matchall-4.0.12.tgz", + "integrity": "sha512-6CC9uyBL+/48dYizRf7H7VAYCMCNTBeM78x/VTUe9bFEaxBepPJDa1Ow99LqI/1yF7kuy7Q3cQsYMrcjGUcskA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.3", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.6", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.0.0", + "get-intrinsic": "^1.2.6", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "internal-slot": "^1.1.0", + "regexp.prototype.flags": "^1.5.3", + "set-function-name": "^2.0.2", + "side-channel": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/string.prototype.repeat": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/string.prototype.repeat/-/string.prototype.repeat-1.0.0.tgz", + "integrity": "sha512-0u/TldDbKD8bFCQ/4f5+mNRrXwZ8hg2w7ZR8wa16e8z9XpePWl3eGEcUD0OXpEH/VJH/2G3gjUtR3ZOiBe2S/w==", + "dev": true, + "license": "MIT", + "dependencies": { + "define-properties": "^1.1.3", + "es-abstract": "^1.17.5" + } + }, + "node_modules/string.prototype.trim": { + "version": "1.2.10", + "resolved": "https://registry.npmjs.org/string.prototype.trim/-/string.prototype.trim-1.2.10.tgz", + "integrity": "sha512-Rs66F0P/1kedk5lyYyH9uBzuiI/kNRmwJAR9quK6VOtIpZ2G+hMZd+HQbbv25MgCA6gEffoMZYxlTod4WcdrKA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.2", + "define-data-property": "^1.1.4", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.5", + "es-object-atoms": "^1.0.0", + "has-property-descriptors": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/string.prototype.trimend": { + "version": "1.0.9", + "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.9.tgz", + "integrity": "sha512-G7Ok5C6E/j4SGfyLCloXTrngQIQU3PWtXGst3yM7Bea9FRURf1S42ZHlZZtsNque2FN2PoUhfZXYLNWwEr4dLQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.2", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/string.prototype.trimstart": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.8.tgz", + "integrity": "sha512-UXSH262CSZY1tfu3G3Secr6uGLCFVPMhIqHjlgCUtCCcgihYc/xKs9djMTMUOb2j1mVSeU8EU6NWc/iQKU6Gfg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/strip-bom": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz", + "integrity": "sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/styled-jsx": { + "version": "5.1.6", + "resolved": "https://registry.npmjs.org/styled-jsx/-/styled-jsx-5.1.6.tgz", + "integrity": "sha512-qSVyDTeMotdvQYoHWLNGwRFJHC+i+ZvdBRYosOFgC+Wg1vx4frN2/RG/NA7SYqqvKNLf39P2LSRA2pu6n0XYZA==", + "license": "MIT", + "dependencies": { + "client-only": "0.0.1" + }, + "engines": { + "node": ">= 12.0.0" + }, + "peerDependencies": { + "react": ">= 16.8.0 || 17.x.x || ^18.0.0-0 || ^19.0.0-0" + }, + "peerDependenciesMeta": { + "@babel/core": { + "optional": true + }, + "babel-plugin-macros": { + "optional": true + } + } + }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/tailwind-merge": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/tailwind-merge/-/tailwind-merge-2.6.1.tgz", + "integrity": "sha512-Oo6tHdpZsGpkKG88HJ8RR1rg/RdnEkQEfMoEk2x1XRI3F1AxeU+ijRXpiVUF4UbLfcxxRGw6TbUINKYdWVsQTQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/dcastil" + } + }, + "node_modules/tailwindcss": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-4.1.18.tgz", + "integrity": "sha512-4+Z+0yiYyEtUVCScyfHCxOYP06L5Ne+JiHhY2IjR2KWMIWhJOYZKLSGZaP5HkZ8+bY0cxfzwDE5uOmzFXyIwxw==", + "dev": true, + "license": "MIT" + }, + "node_modules/tapable": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.3.0.tgz", + "integrity": "sha512-g9ljZiwki/LfxmQADO3dEY1CbpmXT5Hm2fJ+QaGKwSXUylMybePR7/67YW7jOrrvjEgL1Fmz5kzyAjWVWLlucg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/tiny-invariant": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/tiny-invariant/-/tiny-invariant-1.3.3.tgz", + "integrity": "sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg==", + "license": "MIT" + }, + "node_modules/tinyglobby": { + "version": "0.2.15", + "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz", + "integrity": "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "fdir": "^6.5.0", + "picomatch": "^4.0.3" + }, + "engines": { + "node": ">=12.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/SuperchupuDev" + } + }, + "node_modules/tinyglobby/node_modules/fdir": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", + "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, + "node_modules/tinyglobby/node_modules/picomatch": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", + "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", + "dev": true, + "license": "MIT", + "peer": true, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/ts-api-utils": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-2.4.0.tgz", + "integrity": "sha512-3TaVTaAv2gTiMB35i3FiGJaRfwb3Pyn/j3m/bfAvGe8FB7CF6u+LMYqYlDh7reQf7UNvoTvdfAqHGmPGOSsPmA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18.12" + }, + "peerDependencies": { + "typescript": ">=4.8.4" + } + }, + "node_modules/tsconfig-paths": { + "version": "3.15.0", + "resolved": "https://registry.npmjs.org/tsconfig-paths/-/tsconfig-paths-3.15.0.tgz", + "integrity": "sha512-2Ac2RgzDe/cn48GvOe3M+o82pEFewD3UPbyoUHHdKasHwJKjds4fLXWf/Ux5kATBKN20oaFGu+jbElp1pos0mg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/json5": "^0.0.29", + "json5": "^1.0.2", + "minimist": "^1.2.6", + "strip-bom": "^3.0.0" + } + }, + "node_modules/tslib": { + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", + "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", + "license": "0BSD" + }, + "node_modules/type-check": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", + "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", + "dev": true, + "license": "MIT", + "dependencies": { + "prelude-ls": "^1.2.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/typed-array-buffer": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/typed-array-buffer/-/typed-array-buffer-1.0.3.tgz", + "integrity": "sha512-nAYYwfY3qnzX30IkA6AQZjVbtK6duGontcQm1WSG1MD94YLqK0515GNApXkoxKOWMusVssAHWLh9SeaoefYFGw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "es-errors": "^1.3.0", + "is-typed-array": "^1.1.14" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/typed-array-byte-length": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/typed-array-byte-length/-/typed-array-byte-length-1.0.3.tgz", + "integrity": "sha512-BaXgOuIxz8n8pIq3e7Atg/7s+DpiYrxn4vdot3w9KbnBhcRQq6o3xemQdIfynqSeXeDrF32x+WvfzmOjPiY9lg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "for-each": "^0.3.3", + "gopd": "^1.2.0", + "has-proto": "^1.2.0", + "is-typed-array": "^1.1.14" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/typed-array-byte-offset": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/typed-array-byte-offset/-/typed-array-byte-offset-1.0.4.tgz", + "integrity": "sha512-bTlAFB/FBYMcuX81gbL4OcpH5PmlFHqlCCpAl8AlEzMz5k53oNDvN8p1PNOWLEmI2x4orp3raOFB51tv9X+MFQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "available-typed-arrays": "^1.0.7", + "call-bind": "^1.0.8", + "for-each": "^0.3.3", + "gopd": "^1.2.0", + "has-proto": "^1.2.0", + "is-typed-array": "^1.1.15", + "reflect.getprototypeof": "^1.0.9" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/typed-array-length": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/typed-array-length/-/typed-array-length-1.0.7.tgz", + "integrity": "sha512-3KS2b+kL7fsuk/eJZ7EQdnEmQoaho/r6KUef7hxvltNA5DR8NAUM+8wJMbJyZ4G9/7i3v5zPBIMN5aybAh2/Jg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "for-each": "^0.3.3", + "gopd": "^1.0.1", + "is-typed-array": "^1.1.13", + "possible-typed-array-names": "^1.0.0", + "reflect.getprototypeof": "^1.0.6" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/typescript": { + "version": "5.9.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz", + "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", + "dev": true, + "license": "Apache-2.0", + "peer": true, + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/unbox-primitive": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/unbox-primitive/-/unbox-primitive-1.1.0.tgz", + "integrity": "sha512-nWJ91DjeOkej/TA8pXQ3myruKpKEYgqvpw9lz4OPHj/NWFNluYrjbz9j01CJ8yKQd2g4jFoOkINCTW2I5LEEyw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "has-bigints": "^1.0.2", + "has-symbols": "^1.1.0", + "which-boxed-primitive": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/undici-types": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz", + "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/unrs-resolver": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/unrs-resolver/-/unrs-resolver-1.11.1.tgz", + "integrity": "sha512-bSjt9pjaEBnNiGgc9rUiHGKv5l4/TGzDmYw3RhnkJGtLhbnnA/5qJj7x3dNDCRx/PJxu774LlH8lCOlB4hEfKg==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "dependencies": { + "napi-postinstall": "^0.3.0" + }, + "funding": { + "url": "https://opencollective.com/unrs-resolver" + }, + "optionalDependencies": { + "@unrs/resolver-binding-android-arm-eabi": "1.11.1", + "@unrs/resolver-binding-android-arm64": "1.11.1", + "@unrs/resolver-binding-darwin-arm64": "1.11.1", + "@unrs/resolver-binding-darwin-x64": "1.11.1", + "@unrs/resolver-binding-freebsd-x64": "1.11.1", + "@unrs/resolver-binding-linux-arm-gnueabihf": "1.11.1", + "@unrs/resolver-binding-linux-arm-musleabihf": "1.11.1", + "@unrs/resolver-binding-linux-arm64-gnu": "1.11.1", + "@unrs/resolver-binding-linux-arm64-musl": "1.11.1", + "@unrs/resolver-binding-linux-ppc64-gnu": "1.11.1", + "@unrs/resolver-binding-linux-riscv64-gnu": "1.11.1", + "@unrs/resolver-binding-linux-riscv64-musl": "1.11.1", + "@unrs/resolver-binding-linux-s390x-gnu": "1.11.1", + "@unrs/resolver-binding-linux-x64-gnu": "1.11.1", + "@unrs/resolver-binding-linux-x64-musl": "1.11.1", + "@unrs/resolver-binding-wasm32-wasi": "1.11.1", + "@unrs/resolver-binding-win32-arm64-msvc": "1.11.1", + "@unrs/resolver-binding-win32-ia32-msvc": "1.11.1", + "@unrs/resolver-binding-win32-x64-msvc": "1.11.1" + } + }, + "node_modules/uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "punycode": "^2.1.0" + } + }, + "node_modules/use-callback-ref": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/use-callback-ref/-/use-callback-ref-1.3.3.tgz", + "integrity": "sha512-jQL3lRnocaFtu3V00JToYz/4QkNWswxijDaCVNZRiRTO3HQDLsdu1ZtmIUvV4yPp+rvWm5j0y0TG/S61cuijTg==", + "license": "MIT", + "dependencies": { + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/use-sidecar": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/use-sidecar/-/use-sidecar-1.1.3.tgz", + "integrity": "sha512-Fedw0aZvkhynoPYlA5WXrMCAMm+nSWdZt6lzJQ7Ok8S6Q+VsHmHpRWndVRJ8Be0ZbkfPc5LRYH+5XrzXcEeLRQ==", + "license": "MIT", + "dependencies": { + "detect-node-es": "^1.1.0", + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/use-sync-external-store": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/use-sync-external-store/-/use-sync-external-store-1.6.0.tgz", + "integrity": "sha512-Pp6GSwGP/NrPIrxVFAIkOQeyw8lFenOHijQWkUTrDvrF4ALqylP2C/KCkeS9dpUM3KvYRQhna5vt7IL95+ZQ9w==", + "license": "MIT", + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/victory-vendor": { + "version": "36.9.2", + "resolved": "https://registry.npmjs.org/victory-vendor/-/victory-vendor-36.9.2.tgz", + "integrity": "sha512-PnpQQMuxlwYdocC8fIJqVXvkeViHYzotI+NJrCuav0ZYFoq912ZHBk3mCeuj+5/VpodOjPe1z0Fk2ihgzlXqjQ==", + "license": "MIT AND ISC", + "dependencies": { + "@types/d3-array": "^3.0.3", + "@types/d3-ease": "^3.0.0", + "@types/d3-interpolate": "^3.0.1", + "@types/d3-scale": "^4.0.2", + "@types/d3-shape": "^3.1.0", + "@types/d3-time": "^3.0.0", + "@types/d3-timer": "^3.0.0", + "d3-array": "^3.1.6", + "d3-ease": "^3.0.1", + "d3-interpolate": "^3.0.1", + "d3-scale": "^4.0.2", + "d3-shape": "^3.1.0", + "d3-time": "^3.0.0", + "d3-timer": "^3.0.1" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/which-boxed-primitive": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/which-boxed-primitive/-/which-boxed-primitive-1.1.1.tgz", + "integrity": "sha512-TbX3mj8n0odCBFVlY8AxkqcHASw3L60jIuF8jFP78az3C2YhmGvqbHBpAjTRH2/xqYunrJ9g1jSyjCjpoWzIAA==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-bigint": "^1.1.0", + "is-boolean-object": "^1.2.1", + "is-number-object": "^1.1.1", + "is-string": "^1.1.1", + "is-symbol": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/which-builtin-type": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/which-builtin-type/-/which-builtin-type-1.2.1.tgz", + "integrity": "sha512-6iBczoX+kDQ7a3+YJBnh3T+KZRxM/iYNPXicqk66/Qfm1b93iu+yOImkg0zHbj5LNOcNv1TEADiZ0xa34B4q6Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "function.prototype.name": "^1.1.6", + "has-tostringtag": "^1.0.2", + "is-async-function": "^2.0.0", + "is-date-object": "^1.1.0", + "is-finalizationregistry": "^1.1.0", + "is-generator-function": "^1.0.10", + "is-regex": "^1.2.1", + "is-weakref": "^1.0.2", + "isarray": "^2.0.5", + "which-boxed-primitive": "^1.1.0", + "which-collection": "^1.0.2", + "which-typed-array": "^1.1.16" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/which-collection": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/which-collection/-/which-collection-1.0.2.tgz", + "integrity": "sha512-K4jVyjnBdgvc86Y6BkaLZEN933SwYOuBFkdmBu9ZfkcAbdVbpITnDmjvZ/aQjRXQrv5EPkTnD1s39GiiqbngCw==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-map": "^2.0.3", + "is-set": "^2.0.3", + "is-weakmap": "^2.0.2", + "is-weakset": "^2.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/which-typed-array": { + "version": "1.1.20", + "resolved": "https://registry.npmjs.org/which-typed-array/-/which-typed-array-1.1.20.tgz", + "integrity": "sha512-LYfpUkmqwl0h9A2HL09Mms427Q1RZWuOHsukfVcKRq9q95iQxdw0ix1JQrqbcDR9PH1QDwf5Qo8OZb5lksZ8Xg==", + "dev": true, + "license": "MIT", + "dependencies": { + "available-typed-arrays": "^1.0.7", + "call-bind": "^1.0.8", + "call-bound": "^1.0.4", + "for-each": "^0.3.5", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-tostringtag": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/word-wrap": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz", + "integrity": "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/zustand": { + "version": "4.5.7", + "resolved": "https://registry.npmjs.org/zustand/-/zustand-4.5.7.tgz", + "integrity": "sha512-CHOUy7mu3lbD6o6LJLfllpjkzhHXSBlX8B9+qPddUsIfeF5S/UZ5q0kmCsnRqT1UHFQZchNFDDzMbQsuesHWlw==", + "license": "MIT", + "dependencies": { + "use-sync-external-store": "^1.2.2" + }, + "engines": { + "node": ">=12.7.0" + }, + "peerDependencies": { + "@types/react": ">=16.8", + "immer": ">=9.0.6", + "react": ">=16.8" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "immer": { + "optional": true + }, + "react": { + "optional": true + } + } + } + } +} diff --git a/memorylayer-mcp-typescript/package-lock.json b/memorylayer-mcp-typescript/package-lock.json new file mode 100644 index 0000000..4478378 --- /dev/null +++ b/memorylayer-mcp-typescript/package-lock.json @@ -0,0 +1,2655 @@ +{ + "name": "@scitrera/memorylayer-mcp-server", + "version": "0.0.4", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "@scitrera/memorylayer-mcp-server", + "version": "0.0.4", + "license": "Apache-2.0", + "dependencies": { + "@modelcontextprotocol/sdk": "^1.26.0", + "@scitrera/memorylayer-sdk": "file:../memorylayer-sdk-typescript" + }, + "bin": { + "memorylayer-mcp": "dist/bin/memorylayer-mcp.js" + }, + "devDependencies": { + "@types/node": "^20.0.0", + "typescript": "^5.9.3", + "vitest": "^4.0.18" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "../memorylayer-sdk-typescript": { + "name": "@scitrera/memorylayer-sdk", + "version": "0.0.4", + "license": "Apache-2.0", + "devDependencies": { + "@types/node": "^20.0.0", + "typescript": "^5.3.0", + "vitest": "^4.0.18" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.27.3.tgz", + "integrity": "sha512-9fJMTNFTWZMh5qwrBItuziu834eOCUcEqymSH7pY+zoMVEZg3gcPuBNxH1EvfVYe9h0x/Ptw8KBzv7qxb7l8dg==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.27.3.tgz", + "integrity": "sha512-i5D1hPY7GIQmXlXhs2w8AWHhenb00+GxjxRncS2ZM7YNVGNfaMxgzSGuO8o8SJzRc/oZwU2bcScvVERk03QhzA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.27.3.tgz", + "integrity": "sha512-YdghPYUmj/FX2SYKJ0OZxf+iaKgMsKHVPF1MAq/P8WirnSpCStzKJFjOjzsW0QQ7oIAiccHdcqjbHmJxRb/dmg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.27.3.tgz", + "integrity": "sha512-IN/0BNTkHtk8lkOM8JWAYFg4ORxBkZQf9zXiEOfERX/CzxW3Vg1ewAhU7QSWQpVIzTW+b8Xy+lGzdYXV6UZObQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.27.3.tgz", + "integrity": "sha512-Re491k7ByTVRy0t3EKWajdLIr0gz2kKKfzafkth4Q8A5n1xTHrkqZgLLjFEHVD+AXdUGgQMq+Godfq45mGpCKg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.27.3.tgz", + "integrity": "sha512-vHk/hA7/1AckjGzRqi6wbo+jaShzRowYip6rt6q7VYEDX4LEy1pZfDpdxCBnGtl+A5zq8iXDcyuxwtv3hNtHFg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.27.3.tgz", + "integrity": "sha512-ipTYM2fjt3kQAYOvo6vcxJx3nBYAzPjgTCk7QEgZG8AUO3ydUhvelmhrbOheMnGOlaSFUoHXB6un+A7q4ygY9w==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.27.3.tgz", + "integrity": "sha512-dDk0X87T7mI6U3K9VjWtHOXqwAMJBNN2r7bejDsc+j03SEjtD9HrOl8gVFByeM0aJksoUuUVU9TBaZa2rgj0oA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.27.3.tgz", + "integrity": "sha512-s6nPv2QkSupJwLYyfS+gwdirm0ukyTFNl3KTgZEAiJDd+iHZcbTPPcWCcRYH+WlNbwChgH2QkE9NSlNrMT8Gfw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.27.3.tgz", + "integrity": "sha512-sZOuFz/xWnZ4KH3YfFrKCf1WyPZHakVzTiqji3WDc0BCl2kBwiJLCXpzLzUBLgmp4veFZdvN5ChW4Eq/8Fc2Fg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.27.3.tgz", + "integrity": "sha512-yGlQYjdxtLdh0a3jHjuwOrxQjOZYD/C9PfdbgJJF3TIZWnm/tMd/RcNiLngiu4iwcBAOezdnSLAwQDPqTmtTYg==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.27.3.tgz", + "integrity": "sha512-WO60Sn8ly3gtzhyjATDgieJNet/KqsDlX5nRC5Y3oTFcS1l0KWba+SEa9Ja1GfDqSF1z6hif/SkpQJbL63cgOA==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.27.3.tgz", + "integrity": "sha512-APsymYA6sGcZ4pD6k+UxbDjOFSvPWyZhjaiPyl/f79xKxwTnrn5QUnXR5prvetuaSMsb4jgeHewIDCIWljrSxw==", + "cpu": [ + "mips64el" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.27.3.tgz", + "integrity": "sha512-eizBnTeBefojtDb9nSh4vvVQ3V9Qf9Df01PfawPcRzJH4gFSgrObw+LveUyDoKU3kxi5+9RJTCWlj4FjYXVPEA==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.27.3.tgz", + "integrity": "sha512-3Emwh0r5wmfm3ssTWRQSyVhbOHvqegUDRd0WhmXKX2mkHJe1SFCMJhagUleMq+Uci34wLSipf8Lagt4LlpRFWQ==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.27.3.tgz", + "integrity": "sha512-pBHUx9LzXWBc7MFIEEL0yD/ZVtNgLytvx60gES28GcWMqil8ElCYR4kvbV2BDqsHOvVDRrOxGySBM9Fcv744hw==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.27.3.tgz", + "integrity": "sha512-Czi8yzXUWIQYAtL/2y6vogER8pvcsOsk5cpwL4Gk5nJqH5UZiVByIY8Eorm5R13gq+DQKYg0+JyQoytLQas4dA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-arm64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.27.3.tgz", + "integrity": "sha512-sDpk0RgmTCR/5HguIZa9n9u+HVKf40fbEUt+iTzSnCaGvY9kFP0YKBWZtJaraonFnqef5SlJ8/TiPAxzyS+UoA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.27.3.tgz", + "integrity": "sha512-P14lFKJl/DdaE00LItAukUdZO5iqNH7+PjoBm+fLQjtxfcfFE20Xf5CrLsmZdq5LFFZzb5JMZ9grUwvtVYzjiA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-arm64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.27.3.tgz", + "integrity": "sha512-AIcMP77AvirGbRl/UZFTq5hjXK+2wC7qFRGoHSDrZ5v5b8DK/GYpXW3CPRL53NkvDqb9D+alBiC/dV0Fb7eJcw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.27.3.tgz", + "integrity": "sha512-DnW2sRrBzA+YnE70LKqnM3P+z8vehfJWHXECbwBmH/CU51z6FiqTQTHFenPlHmo3a8UgpLyH3PT+87OViOh1AQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openharmony-arm64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.27.3.tgz", + "integrity": "sha512-NinAEgr/etERPTsZJ7aEZQvvg/A6IsZG/LgZy+81wON2huV7SrK3e63dU0XhyZP4RKGyTm7aOgmQk0bGp0fy2g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.27.3.tgz", + "integrity": "sha512-PanZ+nEz+eWoBJ8/f8HKxTTD172SKwdXebZ0ndd953gt1HRBbhMsaNqjTyYLGLPdoWHy4zLU7bDVJztF5f3BHA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.27.3.tgz", + "integrity": "sha512-B2t59lWWYrbRDw/tjiWOuzSsFh1Y/E95ofKz7rIVYSQkUYBjfSgf6oeYPNWHToFRr2zx52JKApIcAS/D5TUBnA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.27.3.tgz", + "integrity": "sha512-QLKSFeXNS8+tHW7tZpMtjlNb7HKau0QDpwm49u0vUp9y1WOF+PEzkU84y9GqYaAVW8aH8f3GcBck26jh54cX4Q==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.27.3.tgz", + "integrity": "sha512-4uJGhsxuptu3OcpVAzli+/gWusVGwZZHTlS63hh++ehExkVT8SgiEf7/uC/PclrPPkLhZqGgCTjd0VWLo6xMqA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@hono/node-server": { + "version": "1.19.9", + "resolved": "https://registry.npmjs.org/@hono/node-server/-/node-server-1.19.9.tgz", + "integrity": "sha512-vHL6w3ecZsky+8P5MD+eFfaGTyCeOHUIFYMGpQGbrBTSmNNoxv0if69rEZ5giu36weC5saFuznL411gRX7bJDw==", + "license": "MIT", + "engines": { + "node": ">=18.14.1" + }, + "peerDependencies": { + "hono": "^4" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", + "dev": true, + "license": "MIT" + }, + "node_modules/@modelcontextprotocol/sdk": { + "version": "1.26.0", + "resolved": "https://registry.npmjs.org/@modelcontextprotocol/sdk/-/sdk-1.26.0.tgz", + "integrity": "sha512-Y5RmPncpiDtTXDbLKswIJzTqu2hyBKxTNsgKqKclDbhIgg1wgtf1fRuvxgTnRfcnxtvvgbIEcqUOzZrJ6iSReg==", + "license": "MIT", + "dependencies": { + "@hono/node-server": "^1.19.9", + "ajv": "^8.17.1", + "ajv-formats": "^3.0.1", + "content-type": "^1.0.5", + "cors": "^2.8.5", + "cross-spawn": "^7.0.5", + "eventsource": "^3.0.2", + "eventsource-parser": "^3.0.0", + "express": "^5.2.1", + "express-rate-limit": "^8.2.1", + "hono": "^4.11.4", + "jose": "^6.1.3", + "json-schema-typed": "^8.0.2", + "pkce-challenge": "^5.0.0", + "raw-body": "^3.0.0", + "zod": "^3.25 || ^4.0", + "zod-to-json-schema": "^3.25.1" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@cfworker/json-schema": "^4.1.1", + "zod": "^3.25 || ^4.0" + }, + "peerDependenciesMeta": { + "@cfworker/json-schema": { + "optional": true + }, + "zod": { + "optional": false + } + } + }, + "node_modules/@rollup/rollup-android-arm-eabi": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.57.1.tgz", + "integrity": "sha512-A6ehUVSiSaaliTxai040ZpZ2zTevHYbvu/lDoeAteHI8QnaosIzm4qwtezfRg1jOYaUmnzLX1AOD6Z+UJjtifg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-android-arm64": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.57.1.tgz", + "integrity": "sha512-dQaAddCY9YgkFHZcFNS/606Exo8vcLHwArFZ7vxXq4rigo2bb494/xKMMwRRQW6ug7Js6yXmBZhSBRuBvCCQ3w==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-darwin-arm64": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.57.1.tgz", + "integrity": "sha512-crNPrwJOrRxagUYeMn/DZwqN88SDmwaJ8Cvi/TN1HnWBU7GwknckyosC2gd0IqYRsHDEnXf328o9/HC6OkPgOg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-darwin-x64": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.57.1.tgz", + "integrity": "sha512-Ji8g8ChVbKrhFtig5QBV7iMaJrGtpHelkB3lsaKzadFBe58gmjfGXAOfI5FV0lYMH8wiqsxKQ1C9B0YTRXVy4w==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-freebsd-arm64": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.57.1.tgz", + "integrity": "sha512-R+/WwhsjmwodAcz65guCGFRkMb4gKWTcIeLy60JJQbXrJ97BOXHxnkPFrP+YwFlaS0m+uWJTstrUA9o+UchFug==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-freebsd-x64": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.57.1.tgz", + "integrity": "sha512-IEQTCHeiTOnAUC3IDQdzRAGj3jOAYNr9kBguI7MQAAZK3caezRrg0GxAb6Hchg4lxdZEI5Oq3iov/w/hnFWY9Q==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-linux-arm-gnueabihf": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.57.1.tgz", + "integrity": "sha512-F8sWbhZ7tyuEfsmOxwc2giKDQzN3+kuBLPwwZGyVkLlKGdV1nvnNwYD0fKQ8+XS6hp9nY7B+ZeK01EBUE7aHaw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm-musleabihf": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.57.1.tgz", + "integrity": "sha512-rGfNUfn0GIeXtBP1wL5MnzSj98+PZe/AXaGBCRmT0ts80lU5CATYGxXukeTX39XBKsxzFpEeK+Mrp9faXOlmrw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-gnu": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.57.1.tgz", + "integrity": "sha512-MMtej3YHWeg/0klK2Qodf3yrNzz6CGjo2UntLvk2RSPlhzgLvYEB3frRvbEF2wRKh1Z2fDIg9KRPe1fawv7C+g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-musl": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.57.1.tgz", + "integrity": "sha512-1a/qhaaOXhqXGpMFMET9VqwZakkljWHLmZOX48R0I/YLbhdxr1m4gtG1Hq7++VhVUmf+L3sTAf9op4JlhQ5u1Q==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-gnu": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.57.1.tgz", + "integrity": "sha512-QWO6RQTZ/cqYtJMtxhkRkidoNGXc7ERPbZN7dVW5SdURuLeVU7lwKMpo18XdcmpWYd0qsP1bwKPf7DNSUinhvA==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-musl": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.57.1.tgz", + "integrity": "sha512-xpObYIf+8gprgWaPP32xiN5RVTi/s5FCR+XMXSKmhfoJjrpRAjCuuqQXyxUa/eJTdAE6eJ+KDKaoEqjZQxh3Gw==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-gnu": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.57.1.tgz", + "integrity": "sha512-4BrCgrpZo4hvzMDKRqEaW1zeecScDCR+2nZ86ATLhAoJ5FQ+lbHVD3ttKe74/c7tNT9c6F2viwB3ufwp01Oh2w==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-musl": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.57.1.tgz", + "integrity": "sha512-NOlUuzesGauESAyEYFSe3QTUguL+lvrN1HtwEEsU2rOwdUDeTMJdO5dUYl/2hKf9jWydJrO9OL/XSSf65R5+Xw==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-gnu": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.57.1.tgz", + "integrity": "sha512-ptA88htVp0AwUUqhVghwDIKlvJMD/fmL/wrQj99PRHFRAG6Z5nbWoWG4o81Nt9FT+IuqUQi+L31ZKAFeJ5Is+A==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-musl": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.57.1.tgz", + "integrity": "sha512-S51t7aMMTNdmAMPpBg7OOsTdn4tySRQvklmL3RpDRyknk87+Sp3xaumlatU+ppQ+5raY7sSTcC2beGgvhENfuw==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-s390x-gnu": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.57.1.tgz", + "integrity": "sha512-Bl00OFnVFkL82FHbEqy3k5CUCKH6OEJL54KCyx2oqsmZnFTR8IoNqBF+mjQVcRCT5sB6yOvK8A37LNm/kPJiZg==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-gnu": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.57.1.tgz", + "integrity": "sha512-ABca4ceT4N+Tv/GtotnWAeXZUZuM/9AQyCyKYyKnpk4yoA7QIAuBt6Hkgpw8kActYlew2mvckXkvx0FfoInnLg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-musl": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.57.1.tgz", + "integrity": "sha512-HFps0JeGtuOR2convgRRkHCekD7j+gdAuXM+/i6kGzQtFhlCtQkpwtNzkNj6QhCDp7DRJ7+qC/1Vg2jt5iSOFw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-openbsd-x64": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.57.1.tgz", + "integrity": "sha512-H+hXEv9gdVQuDTgnqD+SQffoWoc0Of59AStSzTEj/feWTBAnSfSD3+Dql1ZruJQxmykT/JVY0dE8Ka7z0DH1hw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ] + }, + "node_modules/@rollup/rollup-openharmony-arm64": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.57.1.tgz", + "integrity": "sha512-4wYoDpNg6o/oPximyc/NG+mYUejZrCU2q+2w6YZqrAs2UcNUChIZXjtafAiiZSUc7On8v5NyNj34Kzj/Ltk6dQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ] + }, + "node_modules/@rollup/rollup-win32-arm64-msvc": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.57.1.tgz", + "integrity": "sha512-O54mtsV/6LW3P8qdTcamQmuC990HDfR71lo44oZMZlXU4tzLrbvTii87Ni9opq60ds0YzuAlEr/GNwuNluZyMQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-ia32-msvc": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.57.1.tgz", + "integrity": "sha512-P3dLS+IerxCT/7D2q2FYcRdWRl22dNbrbBEtxdWhXrfIMPP9lQhb5h4Du04mdl5Woq05jVCDPCMF7Ub0NAjIew==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-gnu": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.57.1.tgz", + "integrity": "sha512-VMBH2eOOaKGtIJYleXsi2B8CPVADrh+TyNxJ4mWPnKfLB/DBUmzW+5m1xUrcwWoMfSLagIRpjUFeW5CO5hyciQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-msvc": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.57.1.tgz", + "integrity": "sha512-mxRFDdHIWRxg3UfIIAwCm6NzvxG0jDX/wBN6KsQFTvKFqqg9vTrWUE68qEjHt19A5wwx5X5aUi2zuZT7YR0jrA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@scitrera/memorylayer-sdk": { + "resolved": "../memorylayer-sdk-typescript", + "link": true + }, + "node_modules/@standard-schema/spec": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@standard-schema/spec/-/spec-1.1.0.tgz", + "integrity": "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/chai": { + "version": "5.2.3", + "resolved": "https://registry.npmjs.org/@types/chai/-/chai-5.2.3.tgz", + "integrity": "sha512-Mw558oeA9fFbv65/y4mHtXDs9bPnFMZAL/jxdPFUpOHHIXX91mcgEHbS5Lahr+pwZFR8A7GQleRWeI6cGFC2UA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/deep-eql": "*", + "assertion-error": "^2.0.1" + } + }, + "node_modules/@types/deep-eql": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@types/deep-eql/-/deep-eql-4.0.2.tgz", + "integrity": "sha512-c9h9dVVMigMPc4bwTvC5dxqtqJZwQPePsWjPlpSOnojbor6pGqdk541lfA7AqFQr5pB1BRdq0juY9db81BwyFw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/estree": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", + "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/node": { + "version": "20.19.30", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.19.30.tgz", + "integrity": "sha512-WJtwWJu7UdlvzEAUm484QNg5eAoq5QR08KDNx7g45Usrs2NtOPiX8ugDqmKdXkyL03rBqU5dYNYVQetEpBHq2g==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "undici-types": "~6.21.0" + } + }, + "node_modules/@vitest/expect": { + "version": "4.0.18", + "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-4.0.18.tgz", + "integrity": "sha512-8sCWUyckXXYvx4opfzVY03EOiYVxyNrHS5QxX3DAIi5dpJAAkyJezHCP77VMX4HKA2LDT/Jpfo8i2r5BE3GnQQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@standard-schema/spec": "^1.0.0", + "@types/chai": "^5.2.2", + "@vitest/spy": "4.0.18", + "@vitest/utils": "4.0.18", + "chai": "^6.2.1", + "tinyrainbow": "^3.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/mocker": { + "version": "4.0.18", + "resolved": "https://registry.npmjs.org/@vitest/mocker/-/mocker-4.0.18.tgz", + "integrity": "sha512-HhVd0MDnzzsgevnOWCBj5Otnzobjy5wLBe4EdeeFGv8luMsGcYqDuFRMcttKWZA5vVO8RFjexVovXvAM4JoJDQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/spy": "4.0.18", + "estree-walker": "^3.0.3", + "magic-string": "^0.30.21" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "msw": "^2.4.9", + "vite": "^6.0.0 || ^7.0.0-0" + }, + "peerDependenciesMeta": { + "msw": { + "optional": true + }, + "vite": { + "optional": true + } + } + }, + "node_modules/@vitest/pretty-format": { + "version": "4.0.18", + "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-4.0.18.tgz", + "integrity": "sha512-P24GK3GulZWC5tz87ux0m8OADrQIUVDPIjjj65vBXYG17ZeU3qD7r+MNZ1RNv4l8CGU2vtTRqixrOi9fYk/yKw==", + "dev": true, + "license": "MIT", + "dependencies": { + "tinyrainbow": "^3.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/runner": { + "version": "4.0.18", + "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-4.0.18.tgz", + "integrity": "sha512-rpk9y12PGa22Jg6g5M3UVVnTS7+zycIGk9ZNGN+m6tZHKQb7jrP7/77WfZy13Y/EUDd52NDsLRQhYKtv7XfPQw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/utils": "4.0.18", + "pathe": "^2.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/snapshot": { + "version": "4.0.18", + "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-4.0.18.tgz", + "integrity": "sha512-PCiV0rcl7jKQjbgYqjtakly6T1uwv/5BQ9SwBLekVg/EaYeQFPiXcgrC2Y7vDMA8dM1SUEAEV82kgSQIlXNMvA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/pretty-format": "4.0.18", + "magic-string": "^0.30.21", + "pathe": "^2.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/spy": { + "version": "4.0.18", + "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-4.0.18.tgz", + "integrity": "sha512-cbQt3PTSD7P2OARdVW3qWER5EGq7PHlvE+QfzSC0lbwO+xnt7+XH06ZzFjFRgzUX//JmpxrCu92VdwvEPlWSNw==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/utils": { + "version": "4.0.18", + "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-4.0.18.tgz", + "integrity": "sha512-msMRKLMVLWygpK3u2Hybgi4MNjcYJvwTb0Ru09+fOyCXIgT5raYP041DRRdiJiI3k/2U6SEbAETB3YtBrUkCFA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/pretty-format": "4.0.18", + "tinyrainbow": "^3.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/accepts": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/accepts/-/accepts-2.0.0.tgz", + "integrity": "sha512-5cvg6CtKwfgdmVqY1WIiXKc3Q1bkRqGLi+2W/6ao+6Y7gu/RCwRuAhGEzh5B4KlszSuTLgZYuqFqo5bImjNKng==", + "license": "MIT", + "dependencies": { + "mime-types": "^3.0.0", + "negotiator": "^1.0.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/ajv": { + "version": "8.17.1", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.17.1.tgz", + "integrity": "sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g==", + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.3", + "fast-uri": "^3.0.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ajv-formats": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/ajv-formats/-/ajv-formats-3.0.1.tgz", + "integrity": "sha512-8iUql50EUR+uUcdRQ3HDqa6EVyo3docL8g5WJ3FNcWmu62IbkGUue/pEyLBW8VGKKucTPgqeks4fIU1DA4yowQ==", + "license": "MIT", + "dependencies": { + "ajv": "^8.0.0" + }, + "peerDependencies": { + "ajv": "^8.0.0" + }, + "peerDependenciesMeta": { + "ajv": { + "optional": true + } + } + }, + "node_modules/assertion-error": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-2.0.1.tgz", + "integrity": "sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + } + }, + "node_modules/body-parser": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-2.2.2.tgz", + "integrity": "sha512-oP5VkATKlNwcgvxi0vM0p/D3n2C3EReYVX+DNYs5TjZFn/oQt2j+4sVJtSMr18pdRr8wjTcBl6LoV+FUwzPmNA==", + "license": "MIT", + "dependencies": { + "bytes": "^3.1.2", + "content-type": "^1.0.5", + "debug": "^4.4.3", + "http-errors": "^2.0.0", + "iconv-lite": "^0.7.0", + "on-finished": "^2.4.1", + "qs": "^6.14.1", + "raw-body": "^3.0.1", + "type-is": "^2.0.1" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/bytes": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", + "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/call-bind-apply-helpers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", + "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/call-bound": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz", + "integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "get-intrinsic": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/chai": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/chai/-/chai-6.2.2.tgz", + "integrity": "sha512-NUPRluOfOiTKBKvWPtSD4PhFvWCqOi0BGStNWs57X9js7XGTprSmFoz5F0tWhR4WPjNeR9jXqdC7/UpSJTnlRg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + } + }, + "node_modules/content-disposition": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-1.0.1.tgz", + "integrity": "sha512-oIXISMynqSqm241k6kcQ5UwttDILMK4BiurCfGEREw6+X9jkkpEe5T9FZaApyLGGOnFuyMWZpdolTXMtvEJ08Q==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/content-type": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz", + "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/cookie": { + "version": "0.7.2", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.2.tgz", + "integrity": "sha512-yki5XnKuf750l50uGTllt6kKILY4nQ1eNIQatoXEByZ5dWgnKqbnqmTrBE5B4N7lrMJKQ2ytWMiTO2o0v6Ew/w==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/cookie-signature": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.2.2.tgz", + "integrity": "sha512-D76uU73ulSXrD1UXF4KE2TMxVVwhsnCgfAyTg9k8P6KGZjlXKrOLe4dJQKI3Bxi5wjesZoFXJWElNWBjPZMbhg==", + "license": "MIT", + "engines": { + "node": ">=6.6.0" + } + }, + "node_modules/cors": { + "version": "2.8.6", + "resolved": "https://registry.npmjs.org/cors/-/cors-2.8.6.tgz", + "integrity": "sha512-tJtZBBHA6vjIAaF6EnIaq6laBBP9aq/Y3ouVJjEfoHbRBcHBAHYcMh/w8LDrk2PvIMMq8gmopa5D4V8RmbrxGw==", + "license": "MIT", + "dependencies": { + "object-assign": "^4", + "vary": "^1" + }, + "engines": { + "node": ">= 0.10" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/cross-spawn": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", + "license": "MIT", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/depd": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", + "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/dunder-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", + "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.1", + "es-errors": "^1.3.0", + "gopd": "^1.2.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/ee-first": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", + "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==", + "license": "MIT" + }, + "node_modules/encodeurl": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz", + "integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/es-define-property": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", + "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-module-lexer": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.7.0.tgz", + "integrity": "sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA==", + "dev": true, + "license": "MIT" + }, + "node_modules/es-object-atoms": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", + "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/esbuild": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.27.3.tgz", + "integrity": "sha512-8VwMnyGCONIs6cWue2IdpHxHnAjzxnw2Zr7MkVxB2vjmQ2ivqGFb4LEG3SMnv0Gb2F/G/2yA8zUaiL1gywDCCg==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=18" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.27.3", + "@esbuild/android-arm": "0.27.3", + "@esbuild/android-arm64": "0.27.3", + "@esbuild/android-x64": "0.27.3", + "@esbuild/darwin-arm64": "0.27.3", + "@esbuild/darwin-x64": "0.27.3", + "@esbuild/freebsd-arm64": "0.27.3", + "@esbuild/freebsd-x64": "0.27.3", + "@esbuild/linux-arm": "0.27.3", + "@esbuild/linux-arm64": "0.27.3", + "@esbuild/linux-ia32": "0.27.3", + "@esbuild/linux-loong64": "0.27.3", + "@esbuild/linux-mips64el": "0.27.3", + "@esbuild/linux-ppc64": "0.27.3", + "@esbuild/linux-riscv64": "0.27.3", + "@esbuild/linux-s390x": "0.27.3", + "@esbuild/linux-x64": "0.27.3", + "@esbuild/netbsd-arm64": "0.27.3", + "@esbuild/netbsd-x64": "0.27.3", + "@esbuild/openbsd-arm64": "0.27.3", + "@esbuild/openbsd-x64": "0.27.3", + "@esbuild/openharmony-arm64": "0.27.3", + "@esbuild/sunos-x64": "0.27.3", + "@esbuild/win32-arm64": "0.27.3", + "@esbuild/win32-ia32": "0.27.3", + "@esbuild/win32-x64": "0.27.3" + } + }, + "node_modules/escape-html": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", + "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==", + "license": "MIT" + }, + "node_modules/estree-walker": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", + "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0" + } + }, + "node_modules/etag": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", + "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/eventsource": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/eventsource/-/eventsource-3.0.7.tgz", + "integrity": "sha512-CRT1WTyuQoD771GW56XEZFQ/ZoSfWid1alKGDYMmkt2yl8UXrVR4pspqWNEcqKvVIzg6PAltWjxcSSPrboA4iA==", + "license": "MIT", + "dependencies": { + "eventsource-parser": "^3.0.1" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/eventsource-parser": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/eventsource-parser/-/eventsource-parser-3.0.6.tgz", + "integrity": "sha512-Vo1ab+QXPzZ4tCa8SwIHJFaSzy4R6SHf7BY79rFBDf0idraZWAkYrDjDj8uWaSm3S2TK+hJ7/t1CEmZ7jXw+pg==", + "license": "MIT", + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/expect-type": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/expect-type/-/expect-type-1.3.0.tgz", + "integrity": "sha512-knvyeauYhqjOYvQ66MznSMs83wmHrCycNEN6Ao+2AeYEfxUIkuiVxdEa1qlGEPK+We3n0THiDciYSsCcgW/DoA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/express": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/express/-/express-5.2.1.tgz", + "integrity": "sha512-hIS4idWWai69NezIdRt2xFVofaF4j+6INOpJlVOLDO8zXGpUVEVzIYk12UUi2JzjEzWL3IOAxcTubgz9Po0yXw==", + "license": "MIT", + "peer": true, + "dependencies": { + "accepts": "^2.0.0", + "body-parser": "^2.2.1", + "content-disposition": "^1.0.0", + "content-type": "^1.0.5", + "cookie": "^0.7.1", + "cookie-signature": "^1.2.1", + "debug": "^4.4.0", + "depd": "^2.0.0", + "encodeurl": "^2.0.0", + "escape-html": "^1.0.3", + "etag": "^1.8.1", + "finalhandler": "^2.1.0", + "fresh": "^2.0.0", + "http-errors": "^2.0.0", + "merge-descriptors": "^2.0.0", + "mime-types": "^3.0.0", + "on-finished": "^2.4.1", + "once": "^1.4.0", + "parseurl": "^1.3.3", + "proxy-addr": "^2.0.7", + "qs": "^6.14.0", + "range-parser": "^1.2.1", + "router": "^2.2.0", + "send": "^1.1.0", + "serve-static": "^2.2.0", + "statuses": "^2.0.1", + "type-is": "^2.0.1", + "vary": "^1.1.2" + }, + "engines": { + "node": ">= 18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/express-rate-limit": { + "version": "8.2.1", + "resolved": "https://registry.npmjs.org/express-rate-limit/-/express-rate-limit-8.2.1.tgz", + "integrity": "sha512-PCZEIEIxqwhzw4KF0n7QF4QqruVTcF73O5kFKUnGOyjbCCgizBBiFaYpd/fnBLUMPw/BWw9OsiN7GgrNYr7j6g==", + "license": "MIT", + "dependencies": { + "ip-address": "10.0.1" + }, + "engines": { + "node": ">= 16" + }, + "funding": { + "url": "https://github.com/sponsors/express-rate-limit" + }, + "peerDependencies": { + "express": ">= 4.11" + } + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", + "license": "MIT" + }, + "node_modules/fast-uri": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/fast-uri/-/fast-uri-3.1.0.tgz", + "integrity": "sha512-iPeeDKJSWf4IEOasVVrknXpaBV0IApz/gp7S2bb7Z4Lljbl2MGJRqInZiUrQwV16cpzw/D3S5j5Julj/gT52AA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/fastify" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/fastify" + } + ], + "license": "BSD-3-Clause" + }, + "node_modules/fdir": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", + "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, + "node_modules/finalhandler": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-2.1.1.tgz", + "integrity": "sha512-S8KoZgRZN+a5rNwqTxlZZePjT/4cnm0ROV70LedRHZ0p8u9fRID0hJUZQpkKLzro8LfmC8sx23bY6tVNxv8pQA==", + "license": "MIT", + "dependencies": { + "debug": "^4.4.0", + "encodeurl": "^2.0.0", + "escape-html": "^1.0.3", + "on-finished": "^2.4.1", + "parseurl": "^1.3.3", + "statuses": "^2.0.1" + }, + "engines": { + "node": ">= 18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/forwarded": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", + "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/fresh": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/fresh/-/fresh-2.0.0.tgz", + "integrity": "sha512-Rx/WycZ60HOaqLKAi6cHRKKI7zxWbJ31MhntmtwMoaTeF7XFH9hhBp8vITaMidfljRQ6eYWCKkaTK+ykVJHP2A==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-intrinsic": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", + "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "es-define-property": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "function-bind": "^1.1.2", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "math-intrinsics": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", + "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", + "license": "MIT", + "dependencies": { + "dunder-proto": "^1.0.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/gopd": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", + "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-symbols": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", + "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/hono": { + "version": "4.11.7", + "resolved": "https://registry.npmjs.org/hono/-/hono-4.11.7.tgz", + "integrity": "sha512-l7qMiNee7t82bH3SeyUCt9UF15EVmaBvsppY2zQtrbIhl/yzBTny+YUxsVjSjQ6gaqaeVtZmGocom8TzBlA4Yw==", + "license": "MIT", + "peer": true, + "engines": { + "node": ">=16.9.0" + } + }, + "node_modules/http-errors": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.1.tgz", + "integrity": "sha512-4FbRdAX+bSdmo4AUFuS0WNiPz8NgFt+r8ThgNWmlrjQjt1Q7ZR9+zTlce2859x4KSXrwIsaeTqDoKQmtP8pLmQ==", + "license": "MIT", + "dependencies": { + "depd": "~2.0.0", + "inherits": "~2.0.4", + "setprototypeof": "~1.2.0", + "statuses": "~2.0.2", + "toidentifier": "~1.0.1" + }, + "engines": { + "node": ">= 0.8" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/iconv-lite": { + "version": "0.7.2", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.7.2.tgz", + "integrity": "sha512-im9DjEDQ55s9fL4EYzOAv0yMqmMBSZp6G0VvFyTMPKWxiSBHUj9NW/qqLmXUwXrrM7AvqSlTCfvqRb0cM8yYqw==", + "license": "MIT", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3.0.0" + }, + "engines": { + "node": ">=0.10.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "license": "ISC" + }, + "node_modules/ip-address": { + "version": "10.0.1", + "resolved": "https://registry.npmjs.org/ip-address/-/ip-address-10.0.1.tgz", + "integrity": "sha512-NWv9YLW4PoW2B7xtzaS3NCot75m6nK7Icdv0o3lfMceJVRfSoQwqD4wEH5rLwoKJwUiZ/rfpiVBhnaF0FK4HoA==", + "license": "MIT", + "engines": { + "node": ">= 12" + } + }, + "node_modules/ipaddr.js": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", + "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==", + "license": "MIT", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/is-promise": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/is-promise/-/is-promise-4.0.0.tgz", + "integrity": "sha512-hvpoI6korhJMnej285dSg6nu1+e6uxs7zG3BYAm5byqDsgJNWwxzM6z6iZiAgQR4TJ30JmBTOwqZUw3WlyH3AQ==", + "license": "MIT" + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "license": "ISC" + }, + "node_modules/jose": { + "version": "6.1.3", + "resolved": "https://registry.npmjs.org/jose/-/jose-6.1.3.tgz", + "integrity": "sha512-0TpaTfihd4QMNwrz/ob2Bp7X04yuxJkjRGi4aKmOqwhov54i6u79oCv7T+C7lo70MKH6BesI3vscD1yb/yzKXQ==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/panva" + } + }, + "node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", + "license": "MIT" + }, + "node_modules/json-schema-typed": { + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/json-schema-typed/-/json-schema-typed-8.0.2.tgz", + "integrity": "sha512-fQhoXdcvc3V28x7C7BMs4P5+kNlgUURe2jmUT1T//oBRMDrqy1QPelJimwZGo7Hg9VPV3EQV5Bnq4hbFy2vetA==", + "license": "BSD-2-Clause" + }, + "node_modules/magic-string": { + "version": "0.30.21", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.21.tgz", + "integrity": "sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.5" + } + }, + "node_modules/math-intrinsics": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", + "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/media-typer": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-1.1.0.tgz", + "integrity": "sha512-aisnrDP4GNe06UcKFnV5bfMNPBUw4jsLGaWwWfnH3v02GnBuXX2MCVn5RbrWo0j3pczUilYblq7fQ7Nw2t5XKw==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/merge-descriptors": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-2.0.0.tgz", + "integrity": "sha512-Snk314V5ayFLhp3fkUREub6WtjBfPdCPY1Ln8/8munuLuiYhsABgBVWsozAG+MWMbVEvcdcpbi9R7ww22l9Q3g==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mime-db": { + "version": "1.54.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.54.0.tgz", + "integrity": "sha512-aU5EJuIN2WDemCcAp2vFBfp/m4EAhWJnUNSSw0ixs7/kXbd6Pg64EmwJkNdFhB8aWt1sH2CTXrLxo/iAGV3oPQ==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-3.0.2.tgz", + "integrity": "sha512-Lbgzdk0h4juoQ9fCKXW4by0UJqj+nOOrI9MJ1sSj4nI8aI2eo1qmvQEie4VD1glsS250n15LsWsYtCugiStS5A==", + "license": "MIT", + "dependencies": { + "mime-db": "^1.54.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, + "node_modules/nanoid": { + "version": "3.3.11", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", + "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/negotiator": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-1.0.0.tgz", + "integrity": "sha512-8Ofs/AUQh8MaEcrlq5xOX0CQ9ypTF5dl78mjlMNfOK08fzpgTHQRQPBxcPlEtIw0yRpws+Zo/3r+5WRby7u3Gg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-inspect": { + "version": "1.13.4", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.4.tgz", + "integrity": "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/obug": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/obug/-/obug-2.1.1.tgz", + "integrity": "sha512-uTqF9MuPraAQ+IsnPf366RG4cP9RtUi7MLO1N3KEc+wb0a6yKpeL0lmk2IB1jY5KHPAlTc6T/JRdC/YqxHNwkQ==", + "dev": true, + "funding": [ + "https://github.com/sponsors/sxzz", + "https://opencollective.com/debug" + ], + "license": "MIT" + }, + "node_modules/on-finished": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz", + "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==", + "license": "MIT", + "dependencies": { + "ee-first": "1.1.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "license": "ISC", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/parseurl": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", + "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-to-regexp": { + "version": "8.3.0", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-8.3.0.tgz", + "integrity": "sha512-7jdwVIRtsP8MYpdXSwOS0YdD0Du+qOoF/AEPIt88PcCFrZCzx41oxku1jD88hZBwbNUIEfpqvuhjFaMAqMTWnA==", + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/pathe": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/pathe/-/pathe-2.0.3.tgz", + "integrity": "sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==", + "dev": true, + "license": "MIT" + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "dev": true, + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", + "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", + "dev": true, + "license": "MIT", + "peer": true, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pkce-challenge": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/pkce-challenge/-/pkce-challenge-5.0.1.tgz", + "integrity": "sha512-wQ0b/W4Fr01qtpHlqSqspcj3EhBvimsdh0KlHhH8HRZnMsEa0ea2fTULOXOS9ccQr3om+GcGRk4e+isrZWV8qQ==", + "license": "MIT", + "engines": { + "node": ">=16.20.0" + } + }, + "node_modules/postcss": { + "version": "8.5.6", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.6.tgz", + "integrity": "sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.11", + "picocolors": "^1.1.1", + "source-map-js": "^1.2.1" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/proxy-addr": { + "version": "2.0.7", + "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz", + "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==", + "license": "MIT", + "dependencies": { + "forwarded": "0.2.0", + "ipaddr.js": "1.9.1" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/qs": { + "version": "6.14.2", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.14.2.tgz", + "integrity": "sha512-V/yCWTTF7VJ9hIh18Ugr2zhJMP01MY7c5kh4J870L7imm6/DIzBsNLTXzMwUA3yZ5b/KBqLx8Kp3uRvd7xSe3Q==", + "license": "BSD-3-Clause", + "dependencies": { + "side-channel": "^1.1.0" + }, + "engines": { + "node": ">=0.6" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/raw-body": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-3.0.2.tgz", + "integrity": "sha512-K5zQjDllxWkf7Z5xJdV0/B0WTNqx6vxG70zJE4N0kBs4LovmEYWJzQGxC9bS9RAKu3bgM40lrd5zoLJ12MQ5BA==", + "license": "MIT", + "dependencies": { + "bytes": "~3.1.2", + "http-errors": "~2.0.1", + "iconv-lite": "~0.7.0", + "unpipe": "~1.0.0" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/require-from-string": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", + "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/rollup": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.57.1.tgz", + "integrity": "sha512-oQL6lgK3e2QZeQ7gcgIkS2YZPg5slw37hYufJ3edKlfQSGGm8ICoxswK15ntSzF/a8+h7ekRy7k7oWc3BQ7y8A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "1.0.8" + }, + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=18.0.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.57.1", + "@rollup/rollup-android-arm64": "4.57.1", + "@rollup/rollup-darwin-arm64": "4.57.1", + "@rollup/rollup-darwin-x64": "4.57.1", + "@rollup/rollup-freebsd-arm64": "4.57.1", + "@rollup/rollup-freebsd-x64": "4.57.1", + "@rollup/rollup-linux-arm-gnueabihf": "4.57.1", + "@rollup/rollup-linux-arm-musleabihf": "4.57.1", + "@rollup/rollup-linux-arm64-gnu": "4.57.1", + "@rollup/rollup-linux-arm64-musl": "4.57.1", + "@rollup/rollup-linux-loong64-gnu": "4.57.1", + "@rollup/rollup-linux-loong64-musl": "4.57.1", + "@rollup/rollup-linux-ppc64-gnu": "4.57.1", + "@rollup/rollup-linux-ppc64-musl": "4.57.1", + "@rollup/rollup-linux-riscv64-gnu": "4.57.1", + "@rollup/rollup-linux-riscv64-musl": "4.57.1", + "@rollup/rollup-linux-s390x-gnu": "4.57.1", + "@rollup/rollup-linux-x64-gnu": "4.57.1", + "@rollup/rollup-linux-x64-musl": "4.57.1", + "@rollup/rollup-openbsd-x64": "4.57.1", + "@rollup/rollup-openharmony-arm64": "4.57.1", + "@rollup/rollup-win32-arm64-msvc": "4.57.1", + "@rollup/rollup-win32-ia32-msvc": "4.57.1", + "@rollup/rollup-win32-x64-gnu": "4.57.1", + "@rollup/rollup-win32-x64-msvc": "4.57.1", + "fsevents": "~2.3.2" + } + }, + "node_modules/router": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/router/-/router-2.2.0.tgz", + "integrity": "sha512-nLTrUKm2UyiL7rlhapu/Zl45FwNgkZGaCpZbIHajDYgwlJCOzLSk+cIPAnsEqV955GjILJnKbdQC1nVPz+gAYQ==", + "license": "MIT", + "dependencies": { + "debug": "^4.4.0", + "depd": "^2.0.0", + "is-promise": "^4.0.0", + "parseurl": "^1.3.3", + "path-to-regexp": "^8.0.0" + }, + "engines": { + "node": ">= 18" + } + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", + "license": "MIT" + }, + "node_modules/send": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/send/-/send-1.2.1.tgz", + "integrity": "sha512-1gnZf7DFcoIcajTjTwjwuDjzuz4PPcY2StKPlsGAQ1+YH20IRVrBaXSWmdjowTJ6u8Rc01PoYOGHXfP1mYcZNQ==", + "license": "MIT", + "dependencies": { + "debug": "^4.4.3", + "encodeurl": "^2.0.0", + "escape-html": "^1.0.3", + "etag": "^1.8.1", + "fresh": "^2.0.0", + "http-errors": "^2.0.1", + "mime-types": "^3.0.2", + "ms": "^2.1.3", + "on-finished": "^2.4.1", + "range-parser": "^1.2.1", + "statuses": "^2.0.2" + }, + "engines": { + "node": ">= 18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/serve-static": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-2.2.1.tgz", + "integrity": "sha512-xRXBn0pPqQTVQiC8wyQrKs2MOlX24zQ0POGaj0kultvoOCstBQM5yvOhAVSUwOMjQtTvsPWoNCHfPGwaaQJhTw==", + "license": "MIT", + "dependencies": { + "encodeurl": "^2.0.0", + "escape-html": "^1.0.3", + "parseurl": "^1.3.3", + "send": "^1.2.0" + }, + "engines": { + "node": ">= 18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/setprototypeof": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", + "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==", + "license": "ISC" + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "license": "MIT", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/side-channel": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz", + "integrity": "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3", + "side-channel-list": "^1.0.0", + "side-channel-map": "^1.0.1", + "side-channel-weakmap": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-list": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/side-channel-list/-/side-channel-list-1.0.0.tgz", + "integrity": "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-map": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/side-channel-map/-/side-channel-map-1.0.1.tgz", + "integrity": "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==", + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-weakmap": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz", + "integrity": "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==", + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3", + "side-channel-map": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/siginfo": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/siginfo/-/siginfo-2.0.0.tgz", + "integrity": "sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==", + "dev": true, + "license": "ISC" + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/stackback": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/stackback/-/stackback-0.0.2.tgz", + "integrity": "sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==", + "dev": true, + "license": "MIT" + }, + "node_modules/statuses": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.2.tgz", + "integrity": "sha512-DvEy55V3DB7uknRo+4iOGT5fP1slR8wQohVdknigZPMpMstaKJQWhwiYBACJE3Ul2pTnATihhBYnRhZQHGBiRw==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/std-env": { + "version": "3.10.0", + "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.10.0.tgz", + "integrity": "sha512-5GS12FdOZNliM5mAOxFRg7Ir0pWz8MdpYm6AY6VPkGpbA7ZzmbzNcBJQ0GPvvyWgcY7QAhCgf9Uy89I03faLkg==", + "dev": true, + "license": "MIT" + }, + "node_modules/tinybench": { + "version": "2.9.0", + "resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.9.0.tgz", + "integrity": "sha512-0+DUvqWMValLmha6lr4kD8iAMK1HzV0/aKnCtWb9v9641TnP/MFb7Pc2bxoxQjTXAErryXVgUOfv2YqNllqGeg==", + "dev": true, + "license": "MIT" + }, + "node_modules/tinyexec": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/tinyexec/-/tinyexec-1.0.2.tgz", + "integrity": "sha512-W/KYk+NFhkmsYpuHq5JykngiOCnxeVL8v8dFnqxSD8qEEdRfXk1SDM6JzNqcERbcGYj9tMrDQBYV9cjgnunFIg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + } + }, + "node_modules/tinyglobby": { + "version": "0.2.15", + "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz", + "integrity": "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "fdir": "^6.5.0", + "picomatch": "^4.0.3" + }, + "engines": { + "node": ">=12.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/SuperchupuDev" + } + }, + "node_modules/tinyrainbow": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/tinyrainbow/-/tinyrainbow-3.0.3.tgz", + "integrity": "sha512-PSkbLUoxOFRzJYjjxHJt9xro7D+iilgMX/C9lawzVuYiIdcihh9DXmVibBe8lmcFrRi/VzlPjBxbN7rH24q8/Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/toidentifier": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz", + "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==", + "license": "MIT", + "engines": { + "node": ">=0.6" + } + }, + "node_modules/type-is": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/type-is/-/type-is-2.0.1.tgz", + "integrity": "sha512-OZs6gsjF4vMp32qrCbiVSkrFmXtG/AZhY3t0iAMrMBiAZyV9oALtXO8hsrHbMXF9x6L3grlFuwW2oAz7cav+Gw==", + "license": "MIT", + "dependencies": { + "content-type": "^1.0.5", + "media-typer": "^1.1.0", + "mime-types": "^3.0.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/typescript": { + "version": "5.9.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz", + "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/undici-types": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz", + "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/unpipe": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", + "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/vary": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", + "integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/vite": { + "version": "7.3.1", + "resolved": "https://registry.npmjs.org/vite/-/vite-7.3.1.tgz", + "integrity": "sha512-w+N7Hifpc3gRjZ63vYBXA56dvvRlNWRczTdmCBBa+CotUzAPf5b7YMdMR/8CQoeYE5LX3W4wj6RYTgonm1b9DA==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "esbuild": "^0.27.0", + "fdir": "^6.5.0", + "picomatch": "^4.0.3", + "postcss": "^8.5.6", + "rollup": "^4.43.0", + "tinyglobby": "^0.2.15" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^20.19.0 || >=22.12.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^20.19.0 || >=22.12.0", + "jiti": ">=1.21.0", + "less": "^4.0.0", + "lightningcss": "^1.21.0", + "sass": "^1.70.0", + "sass-embedded": "^1.70.0", + "stylus": ">=0.54.8", + "sugarss": "^5.0.0", + "terser": "^5.16.0", + "tsx": "^4.8.1", + "yaml": "^2.4.2" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "jiti": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "sass-embedded": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + }, + "tsx": { + "optional": true + }, + "yaml": { + "optional": true + } + } + }, + "node_modules/vitest": { + "version": "4.0.18", + "resolved": "https://registry.npmjs.org/vitest/-/vitest-4.0.18.tgz", + "integrity": "sha512-hOQuK7h0FGKgBAas7v0mSAsnvrIgAvWmRFjmzpJ7SwFHH3g1k2u37JtYwOwmEKhK6ZO3v9ggDBBm0La1LCK4uQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/expect": "4.0.18", + "@vitest/mocker": "4.0.18", + "@vitest/pretty-format": "4.0.18", + "@vitest/runner": "4.0.18", + "@vitest/snapshot": "4.0.18", + "@vitest/spy": "4.0.18", + "@vitest/utils": "4.0.18", + "es-module-lexer": "^1.7.0", + "expect-type": "^1.2.2", + "magic-string": "^0.30.21", + "obug": "^2.1.1", + "pathe": "^2.0.3", + "picomatch": "^4.0.3", + "std-env": "^3.10.0", + "tinybench": "^2.9.0", + "tinyexec": "^1.0.2", + "tinyglobby": "^0.2.15", + "tinyrainbow": "^3.0.3", + "vite": "^6.0.0 || ^7.0.0", + "why-is-node-running": "^2.3.0" + }, + "bin": { + "vitest": "vitest.mjs" + }, + "engines": { + "node": "^20.0.0 || ^22.0.0 || >=24.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "@edge-runtime/vm": "*", + "@opentelemetry/api": "^1.9.0", + "@types/node": "^20.0.0 || ^22.0.0 || >=24.0.0", + "@vitest/browser-playwright": "4.0.18", + "@vitest/browser-preview": "4.0.18", + "@vitest/browser-webdriverio": "4.0.18", + "@vitest/ui": "4.0.18", + "happy-dom": "*", + "jsdom": "*" + }, + "peerDependenciesMeta": { + "@edge-runtime/vm": { + "optional": true + }, + "@opentelemetry/api": { + "optional": true + }, + "@types/node": { + "optional": true + }, + "@vitest/browser-playwright": { + "optional": true + }, + "@vitest/browser-preview": { + "optional": true + }, + "@vitest/browser-webdriverio": { + "optional": true + }, + "@vitest/ui": { + "optional": true + }, + "happy-dom": { + "optional": true + }, + "jsdom": { + "optional": true + } + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/why-is-node-running": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/why-is-node-running/-/why-is-node-running-2.3.0.tgz", + "integrity": "sha512-hUrmaWBdVDcxvYqnyh09zunKzROWjbZTiNy8dBEjkS7ehEDQibXJ7XvlmtbwuTclUiIyN+CyXQD4Vmko8fNm8w==", + "dev": true, + "license": "MIT", + "dependencies": { + "siginfo": "^2.0.0", + "stackback": "0.0.2" + }, + "bin": { + "why-is-node-running": "cli.js" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", + "license": "ISC" + }, + "node_modules/zod": { + "version": "4.3.6", + "resolved": "https://registry.npmjs.org/zod/-/zod-4.3.6.tgz", + "integrity": "sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg==", + "license": "MIT", + "peer": true, + "funding": { + "url": "https://github.com/sponsors/colinhacks" + } + }, + "node_modules/zod-to-json-schema": { + "version": "3.25.1", + "resolved": "https://registry.npmjs.org/zod-to-json-schema/-/zod-to-json-schema-3.25.1.tgz", + "integrity": "sha512-pM/SU9d3YAggzi6MtR4h7ruuQlqKtad8e9S0fmxcMi+ueAK5Korys/aWcV9LIIHTVbj01NdzxcnXSN+O74ZIVA==", + "license": "ISC", + "peerDependencies": { + "zod": "^3.25 || ^4" + } + } + } +} diff --git a/memorylayer-sdk-python/examples/basic_usage.py b/memorylayer-sdk-python/examples/basic_usage.py index d4d5aeb..b41137a 100644 --- a/memorylayer-sdk-python/examples/basic_usage.py +++ b/memorylayer-sdk-python/examples/basic_usage.py @@ -41,9 +41,7 @@ async def basic_example(): print(f" - {mem.content}") # Synthesize memories - reflection = await client.reflect( - query="summarize user's technology preferences", max_tokens=300 - ) + reflection = await client.reflect(query="summarize user's technology preferences", max_tokens=300) print(f"\nReflection:\n{reflection.reflection}") @@ -104,15 +102,11 @@ async def session_example(): {"description": "Debugging auth", "file": "auth.py", "line": 42}, ) - await client.set_context( - session.id, "user_intent", "Fix token refresh issue" - ) + await client.set_context(session.id, "user_intent", "Fix token refresh issue") # Retrieve context - context = await client.get_context( - session.id, ["current_task", "user_intent"] - ) - print(f"\nSession context:") + context = await client.get_context(session.id, ["current_task", "user_intent"]) + print("\nSession context:") print(f" Task: {context.get('current_task')}") print(f" Intent: {context.get('user_intent')}") @@ -167,9 +161,7 @@ async def memory_lifecycle_example(): print(f"Created: {memory.id}") # Update memory - updated = await client.update_memory( - memory.id, importance=0.5, tags=["testing", "temporary"] - ) + updated = await client.update_memory(memory.id, importance=0.5, tags=["testing", "temporary"]) print(f"Updated importance: {updated.importance}") # Get specific memory diff --git a/memorylayer-sdk-python/examples/sync_client_example.py b/memorylayer-sdk-python/examples/sync_client_example.py index e46f375..0ff734d 100644 --- a/memorylayer-sdk-python/examples/sync_client_example.py +++ b/memorylayer-sdk-python/examples/sync_client_example.py @@ -1,6 +1,6 @@ """Example usage of synchronous MemoryLayer client.""" -from memorylayer import SyncMemoryLayerClient, sync_client, MemoryType, MemorySubtype +from memorylayer import MemorySubtype, MemoryType, SyncMemoryLayerClient, sync_client def example_with_context_manager(): diff --git a/memorylayer-sdk-python/src/memorylayer/client.py b/memorylayer-sdk-python/src/memorylayer/client.py index e348c2b..669376a 100644 --- a/memorylayer-sdk-python/src/memorylayer/client.py +++ b/memorylayer-sdk-python/src/memorylayer/client.py @@ -2,7 +2,8 @@ import json import logging -from typing import Any, AsyncGenerator, Optional, Union +from collections.abc import AsyncGenerator +from typing import Any import httpx from pydantic import TypeAdapter @@ -22,7 +23,6 @@ ChatMessage, ChatThread, ChatThreadWithMessages, - DatasetColumn, DatasetInfo, DatasetJobInfo, DatasetSliceResult, @@ -81,9 +81,9 @@ class MemoryLayerClient: def __init__( self, base_url: str = "http://localhost:61001", - api_key: Optional[str] = None, - workspace_id: Optional[str] = None, - session_id: Optional[str] = None, + api_key: str | None = None, + workspace_id: str | None = None, + session_id: str | None = None, timeout: float = 30.0, ): """ @@ -101,7 +101,7 @@ def __init__( self.workspace_id = workspace_id self.session_id = session_id self.timeout = timeout - self._client: Optional[httpx.AsyncClient] = None + self._client: httpx.AsyncClient | None = None async def __aenter__(self) -> "MemoryLayerClient": """Async context manager entry.""" @@ -143,7 +143,7 @@ def clear_session(self) -> None: if self._client and "X-Session-ID" in self._client.headers: del self._client.headers["X-Session-ID"] - def get_session_id(self) -> Optional[str]: + def get_session_id(self) -> str | None: """ Get the current session ID, if any. @@ -168,9 +168,9 @@ async def _request( method: str, path: str, *, - json: Optional[dict[str, Any]] = None, - params: Optional[dict[str, Any]] = None, - enterprise_feature: Optional[str] = None, + json: dict[str, Any] | None = None, + params: dict[str, Any] | None = None, + enterprise_feature: str | None = None, ) -> dict[str, Any]: """ Make HTTP request with error handling. @@ -244,13 +244,13 @@ async def _request( async def remember( self, content: str, - type: Optional[Union[str, MemoryType]] = None, - subtype: Optional[Union[str, MemorySubtype]] = None, + type: str | MemoryType | None = None, + subtype: str | MemorySubtype | None = None, importance: float = 0.5, - tags: Optional[list[str]] = None, - metadata: Optional[dict[str, Any]] = None, - context_id: Optional[str] = None, - user_id: Optional[str] = None, + tags: list[str] | None = None, + metadata: dict[str, Any] | None = None, + context_id: str | None = None, + user_id: str | None = None, ) -> Memory: """ Store a new memory. @@ -299,20 +299,20 @@ async def remember( async def recall( self, query: str, - types: Optional[list[Union[str, MemoryType]]] = None, - subtypes: Optional[list[Union[str, MemorySubtype]]] = None, - tags: Optional[list[str]] = None, - mode: Optional[Union[str, RecallMode]] = None, + types: list[str | MemoryType] | None = None, + subtypes: list[str | MemorySubtype] | None = None, + tags: list[str] | None = None, + mode: str | RecallMode | None = None, limit: int = 10, - min_relevance: Optional[float] = None, - recency_weight: Optional[float] = None, - tolerance: Optional[Union[str, SearchTolerance]] = None, - include_associations: Optional[bool] = None, - traverse_depth: Optional[int] = None, - max_expansion: Optional[int] = None, - created_after: Optional[str] = None, - created_before: Optional[str] = None, - user_id: Optional[str] = None, + min_relevance: float | None = None, + recency_weight: float | None = None, + tolerance: str | SearchTolerance | None = None, + include_associations: bool | None = None, + traverse_depth: int | None = None, + max_expansion: int | None = None, + created_after: str | None = None, + created_before: str | None = None, + user_id: str | None = None, ) -> RecallResult: """ Search memories by semantic query. @@ -459,10 +459,10 @@ async def get_memory(self, memory_id: str) -> Memory: async def update_memory( self, memory_id: str, - content: Optional[str] = None, - importance: Optional[float] = None, - tags: Optional[list[str]] = None, - metadata: Optional[dict[str, Any]] = None, + content: str | None = None, + importance: float | None = None, + tags: list[str] | None = None, + metadata: dict[str, Any] | None = None, ) -> Memory: """ Update an existing memory. @@ -503,9 +503,9 @@ async def associate( self, source_id: str, target_id: str, - relationship: Union[str, RelationshipType], + relationship: str | RelationshipType, strength: float = 0.5, - metadata: Optional[dict[str, Any]] = None, + metadata: dict[str, Any] | None = None, ) -> Association: """ Link two memories with a relationship. @@ -569,8 +569,8 @@ async def get_associations( async def create_session( self, ttl_seconds: int = 3600, - workspace_id: Optional[str] = None, - context_id: Optional[str] = None, + workspace_id: str | None = None, + context_id: str | None = None, auto_set_session: bool = True, ) -> Session: """ @@ -629,7 +629,7 @@ async def set_context( session_id: str, key: str, value: Any, - ttl_seconds: Optional[int] = None, + ttl_seconds: int | None = None, ) -> None: """ Set a context value in a session. @@ -683,7 +683,7 @@ async def get_context( async def get_briefing( self, - lookback_hours: Optional[int] = None, + lookback_hours: int | None = None, lookback_minutes: int = 60, detail_level: str = "abstract", limit: int = 10, @@ -736,7 +736,7 @@ async def create_workspace(self, name: str) -> Workspace: data = await self._request("POST", "/workspaces", json=payload) return Workspace(**data) - async def get_workspace(self, workspace_id: Optional[str] = None) -> Workspace: + async def get_workspace(self, workspace_id: str | None = None) -> Workspace: """ Get workspace details. @@ -759,8 +759,8 @@ async def get_workspace(self, workspace_id: Optional[str] = None) -> Workspace: async def update_workspace( self, workspace_id: str, - name: Optional[str] = None, - settings: Optional[dict[str, Any]] = None, + name: str | None = None, + settings: dict[str, Any] | None = None, ) -> Workspace: """ Update an existing workspace. @@ -793,8 +793,8 @@ async def create_context( self, workspace_id: str, name: str, - description: Optional[str] = None, - settings: Optional[dict[str, Any]] = None, + description: str | None = None, + settings: dict[str, Any] | None = None, ) -> dict[str, Any]: """ Create a context within a workspace. @@ -860,7 +860,7 @@ async def get_workspace_schema(self, workspace_id: str) -> dict[str, Any]: async def export_workspace( self, - workspace_id: Optional[str] = None, + workspace_id: str | None = None, include_associations: bool = True, offset: int = 0, limit: int = 0, @@ -911,8 +911,9 @@ async def export_workspace( # Parse NDJSON response import json + text = response.text - lines = [line.strip() for line in text.strip().split('\n') if line.strip()] + lines = [line.strip() for line in text.strip().split("\n") if line.strip()] header = None memories = [] @@ -956,15 +957,12 @@ async def import_workspace( result = await client.import_workspace("ws_123", export_data) print(f"Imported {result['imported']} memories") """ - response = await self._request( - "POST", f"/workspaces/{workspace_id}/import", - json={"data": data} - ) + response = await self._request("POST", f"/workspaces/{workspace_id}/import", json={"data": data}) return response async def export_workspace_stream( self, - workspace_id: Optional[str] = None, + workspace_id: str | None = None, include_associations: bool = True, offset: int = 0, limit: int = 0, @@ -1015,7 +1013,7 @@ async def export_workspace_stream( response.raise_for_status() text = response.text - lines = [line.strip() for line in text.strip().split('\n') if line.strip()] + lines = [line.strip() for line in text.strip().split("\n") if line.strip()] for line in lines: yield json.loads(line) @@ -1043,13 +1041,11 @@ async def import_workspace_stream( print(f"Imported {result['imported']} memories") """ # Serialize to NDJSON - ndjson_body = '\n'.join(json.dumps(line) for line in ndjson_lines) + ndjson_body = "\n".join(json.dumps(line) for line in ndjson_lines) client = self._ensure_client() response = await client.post( - f"/workspaces/{workspace_id}/import", - content=ndjson_body, - headers={"Content-Type": "application/x-ndjson"} + f"/workspaces/{workspace_id}/import", content=ndjson_body, headers={"Content-Type": "application/x-ndjson"} ) if response.status_code >= 400: @@ -1144,8 +1140,8 @@ async def batch_memories( async def list_sessions( self, - workspace_id: Optional[str] = None, - context_id: Optional[str] = None, + workspace_id: str | None = None, + context_id: str | None = None, include_expired: bool = False, ) -> list[dict[str, Any]]: """ @@ -1194,7 +1190,7 @@ async def commit_session( session_id: str, min_importance: float = 0.5, deduplicate: bool = True, - categories: Optional[list[str]] = None, + categories: list[str] | None = None, max_memories: int = 50, ) -> dict[str, Any]: """ @@ -1248,7 +1244,7 @@ async def touch_session(self, session_id: str) -> dict[str, Any]: async def context_exec( self, code: str, - result_var: Optional[str] = None, + result_var: str | None = None, return_result: bool = True, max_return_chars: int = 10_000, ) -> dict[str, Any]: @@ -1283,7 +1279,7 @@ async def context_exec( async def context_inspect( self, - variable: Optional[str] = None, + variable: str | None = None, preview_chars: int = 200, ) -> dict[str, Any]: """ @@ -1313,9 +1309,9 @@ async def context_load( var: str, query: str, limit: int = 50, - types: Optional[list[str]] = None, - tags: Optional[list[str]] = None, - min_relevance: Optional[float] = None, + types: list[str] | None = None, + tags: list[str] | None = None, + min_relevance: float | None = None, include_embeddings: bool = False, ) -> dict[str, Any]: """ @@ -1391,8 +1387,8 @@ async def context_query( self, prompt: str, variables: list[str], - max_context_chars: Optional[int] = None, - result_var: Optional[str] = None, + max_context_chars: int | None = None, + result_var: str | None = None, ) -> dict[str, Any]: """ Send sandbox variables and a prompt to the LLM. @@ -1429,11 +1425,11 @@ async def context_query( async def context_rlm( self, goal: str, - memory_query: Optional[str] = None, + memory_query: str | None = None, memory_limit: int = 100, max_iterations: int = 10, - variables: Optional[list[str]] = None, - result_var: Optional[str] = None, + variables: list[str] | None = None, + result_var: str | None = None, detail_level: str = "standard", ) -> dict[str, Any]: """ @@ -1524,15 +1520,15 @@ async def context_cleanup(self) -> None: async def create_thread( self, *, - workspace_id: Optional[str] = None, - thread_id: Optional[str] = None, - user_id: Optional[str] = None, - context_id: Optional[str] = None, - observer_id: Optional[str] = None, - subject_id: Optional[str] = None, - title: Optional[str] = None, - metadata: Optional[dict[str, Any]] = None, - expires_at: Optional[str] = None, + workspace_id: str | None = None, + thread_id: str | None = None, + user_id: str | None = None, + context_id: str | None = None, + observer_id: str | None = None, + subject_id: str | None = None, + title: str | None = None, + metadata: dict[str, Any] | None = None, + expires_at: str | None = None, ) -> ChatThread: """ Create a new chat thread. @@ -1581,8 +1577,8 @@ async def create_thread( async def list_threads( self, *, - workspace_id: Optional[str] = None, - user_id: Optional[str] = None, + workspace_id: str | None = None, + user_id: str | None = None, limit: int = 50, offset: int = 0, ) -> list[ChatThread]: @@ -1616,7 +1612,7 @@ async def get_thread( self, thread_id: str, *, - workspace_id: Optional[str] = None, + workspace_id: str | None = None, ) -> ChatThread: """ Get thread metadata. @@ -1643,7 +1639,7 @@ async def get_thread_full( self, thread_id: str, *, - workspace_id: Optional[str] = None, + workspace_id: str | None = None, limit: int = 100, offset: int = 0, order: str = "asc", @@ -1676,7 +1672,7 @@ async def delete_thread( self, thread_id: str, *, - workspace_id: Optional[str] = None, + workspace_id: str | None = None, ) -> None: """ Delete a thread and its messages. @@ -1700,7 +1696,7 @@ async def append_messages( thread_id: str, messages: list[dict[str, Any]], *, - workspace_id: Optional[str] = None, + workspace_id: str | None = None, ) -> list[ChatMessage]: """ Append messages to a thread. @@ -1726,8 +1722,10 @@ async def append_messages( payload: dict[str, Any] = {"messages": messages} data = await self._request( - "POST", f"/threads/{thread_id}/messages", - json=payload, params=params or None, + "POST", + f"/threads/{thread_id}/messages", + json=payload, + params=params or None, ) messages_adapter = TypeAdapter(list[ChatMessage]) return messages_adapter.validate_python(data.get("messages", data if isinstance(data, list) else [])) @@ -1736,10 +1734,10 @@ async def get_messages( self, thread_id: str, *, - workspace_id: Optional[str] = None, + workspace_id: str | None = None, limit: int = 100, offset: int = 0, - after_index: Optional[int] = None, + after_index: int | None = None, order: str = "asc", ) -> list[ChatMessage]: """ @@ -1774,7 +1772,7 @@ async def decompose_thread( self, thread_id: str, *, - workspace_id: Optional[str] = None, + workspace_id: str | None = None, ) -> DecompositionResult: """ Trigger memory decomposition for unprocessed messages. @@ -1796,7 +1794,8 @@ async def decompose_thread( params["workspace_id"] = ws_id data = await self._request( - "POST", f"/threads/{thread_id}/decompose", + "POST", + f"/threads/{thread_id}/decompose", params=params or None, ) return DecompositionResult(**data) @@ -1865,13 +1864,14 @@ async def upload_document( raise except httpx.HTTPStatusError as exc: raise MemoryLayerError( - str(exc), status_code=exc.response.status_code, + str(exc), + status_code=exc.response.status_code, ) async def list_documents( self, *, - status: Optional[str] = None, + status: str | None = None, limit: int = 50, offset: int = 0, ) -> tuple[list[DocumentInfo], int]: @@ -1886,7 +1886,9 @@ async def list_documents( params["status"] = status data = await self._request( - "GET", "/documents", params=params, + "GET", + "/documents", + params=params, enterprise_feature="Document management", ) docs = [DocumentInfo(**d) for d in data.get("documents", [])] @@ -1895,17 +1897,22 @@ async def list_documents( async def get_document(self, document_id: str) -> DocumentInfo: """Get document metadata and processing status.""" data = await self._request( - "GET", f"/documents/{document_id}", + "GET", + f"/documents/{document_id}", enterprise_feature="Document management", ) return DocumentInfo(**data) async def delete_document( - self, document_id: str, *, delete_memories: bool = False, + self, + document_id: str, + *, + delete_memories: bool = False, ) -> None: """Delete a document and optionally its extracted memories.""" await self._request( - "DELETE", f"/documents/{document_id}", + "DELETE", + f"/documents/{document_id}", params={"delete_memories": str(delete_memories).lower()}, enterprise_feature="Document management", ) @@ -1915,7 +1922,7 @@ async def search_document_pages( query: str, *, limit: int = 10, - doc_ids: Optional[list[str]] = None, + doc_ids: list[str] | None = None, ) -> PageSearchResult: """ Search document pages using ColPali MaxSim visual similarity. @@ -1935,7 +1942,9 @@ async def search_document_pages( payload["doc_ids"] = doc_ids data = await self._request( - "POST", "/documents/search", json=payload, + "POST", + "/documents/search", + json=payload, enterprise_feature="Document page search", ) return PageSearchResult( @@ -1947,7 +1956,8 @@ async def search_document_pages( async def get_document_pages(self, document_id: str) -> list[DocumentPage]: """Get all pages for a document.""" data = await self._request( - "GET", f"/documents/{document_id}/pages", + "GET", + f"/documents/{document_id}/pages", enterprise_feature="Document pages", ) return [DocumentPage(**p) for p in data.get("pages", [])] @@ -1972,13 +1982,15 @@ async def get_page_image(self, document_id: str, page_id: str) -> bytes: raise except httpx.HTTPStatusError as exc: raise MemoryLayerError( - str(exc), status_code=exc.response.status_code, + str(exc), + status_code=exc.response.status_code, ) async def get_job(self, job_id: str) -> JobInfo: """Get ingestion job status and progress.""" data = await self._request( - "GET", f"/documents/jobs/{job_id}", + "GET", + f"/documents/jobs/{job_id}", enterprise_feature="Document ingestion jobs", ) return JobInfo(**data) @@ -1986,7 +1998,7 @@ async def get_job(self, job_id: str) -> JobInfo: async def list_jobs( self, *, - status: Optional[str] = None, + status: str | None = None, limit: int = 50, ) -> list[JobInfo]: """List ingestion jobs in the workspace.""" @@ -1995,7 +2007,9 @@ async def list_jobs( params["status"] = status data = await self._request( - "GET", "/documents/jobs", params=params, + "GET", + "/documents/jobs", + params=params, enterprise_feature="Document ingestion jobs", ) return [JobInfo(**j) for j in data.get("jobs", [])] @@ -2003,7 +2017,8 @@ async def list_jobs( async def cancel_job(self, job_id: str) -> None: """Cancel a queued or running ingestion job.""" await self._request( - "POST", f"/documents/jobs/{job_id}/cancel", + "POST", + f"/documents/jobs/{job_id}/cancel", enterprise_feature="Document ingestion jobs", ) @@ -2011,11 +2026,11 @@ async def reprocess_document( self, document_id: str, *, - target_context_id: Optional[str] = None, - chunking_strategy: Optional[str] = None, - chunk_size: Optional[int] = None, - chunk_overlap: Optional[int] = None, - importance: Optional[float] = None, + target_context_id: str | None = None, + chunking_strategy: str | None = None, + chunk_size: int | None = None, + chunk_overlap: int | None = None, + importance: float | None = None, ) -> JobInfo: """Reprocess a document with optionally different extraction options.""" payload: dict[str, Any] = {} @@ -2031,7 +2046,8 @@ async def reprocess_document( payload["importance"] = importance data = await self._request( - "POST", f"/documents/{document_id}/reprocess", + "POST", + f"/documents/{document_id}/reprocess", json=payload if payload else None, enterprise_feature="Document reprocessing", ) @@ -2046,7 +2062,7 @@ async def upload_dataset( file_data: bytes, filename: str, *, - name: Optional[str] = None, + name: str | None = None, target_context_id: str = "_default", importance: float = 0.5, sample_rows: int = 1000, @@ -2104,13 +2120,14 @@ async def upload_dataset( raise except httpx.HTTPStatusError as exc: raise MemoryLayerError( - str(exc), status_code=exc.response.status_code, + str(exc), + status_code=exc.response.status_code, ) async def list_datasets( self, *, - status: Optional[str] = None, + status: str | None = None, limit: int = 50, offset: int = 0, ) -> tuple[list[DatasetInfo], int]: @@ -2125,7 +2142,9 @@ async def list_datasets( params["status"] = status data = await self._request( - "GET", "/datasets", params=params, + "GET", + "/datasets", + params=params, enterprise_feature="Dataset management", ) datasets = [DatasetInfo(**d) for d in data.get("datasets", [])] @@ -2134,23 +2153,29 @@ async def list_datasets( async def get_dataset(self, dataset_id: str) -> DatasetInfo: """Get dataset metadata, schema, and profile.""" data = await self._request( - "GET", f"/datasets/{dataset_id}", + "GET", + f"/datasets/{dataset_id}", enterprise_feature="Dataset management", ) return DatasetInfo(**data) async def delete_dataset( - self, dataset_id: str, *, delete_memories: bool = False, + self, + dataset_id: str, + *, + delete_memories: bool = False, ) -> None: """Delete a dataset and optionally its extracted memories.""" await self._request( - "DELETE", f"/datasets/{dataset_id}", + "DELETE", + f"/datasets/{dataset_id}", params={"delete_memories": str(delete_memories).lower()}, enterprise_feature="Dataset management", ) async def get_dataset_memories( - self, dataset_id: str, + self, + dataset_id: str, ) -> list[dict[str, Any]]: """ Get memories extracted from a dataset. @@ -2159,7 +2184,8 @@ async def get_dataset_memories( List of memory dicts with id, content, type, importance, tags, created_at. """ data = await self._request( - "GET", f"/datasets/{dataset_id}/memories", + "GET", + f"/datasets/{dataset_id}/memories", enterprise_feature="Dataset management", ) return data.get("memories", []) @@ -2168,10 +2194,10 @@ async def query_dataset_slice( self, dataset_id: str, *, - sql: Optional[str] = None, - columns: Optional[list[str]] = None, - filters: Optional[list[dict[str, Any]]] = None, - order_by: Optional[str] = None, + sql: str | None = None, + columns: list[str] | None = None, + filters: list[dict[str, Any]] | None = None, + order_by: str | None = None, descending: bool = False, limit: int = 100, offset: int = 0, @@ -2210,7 +2236,8 @@ async def query_dataset_slice( payload["order_by"] = order_by data = await self._request( - "POST", f"/datasets/{dataset_id}/slice", + "POST", + f"/datasets/{dataset_id}/slice", json=payload, enterprise_feature="Dataset management", ) @@ -2219,7 +2246,8 @@ async def query_dataset_slice( async def get_dataset_job(self, job_id: str) -> DatasetJobInfo: """Get dataset processing job status and progress.""" data = await self._request( - "GET", f"/datasets/jobs/{job_id}", + "GET", + f"/datasets/jobs/{job_id}", enterprise_feature="Dataset processing jobs", ) return DatasetJobInfo(**data) @@ -2227,7 +2255,7 @@ async def get_dataset_job(self, job_id: str) -> DatasetJobInfo: async def list_dataset_jobs( self, *, - status: Optional[str] = None, + status: str | None = None, limit: int = 50, ) -> list[DatasetJobInfo]: """List dataset processing jobs in the workspace.""" @@ -2236,7 +2264,9 @@ async def list_dataset_jobs( params["status"] = status data = await self._request( - "GET", "/datasets/jobs", params=params, + "GET", + "/datasets/jobs", + params=params, enterprise_feature="Dataset processing jobs", ) return [DatasetJobInfo(**j) for j in data.get("jobs", [])] @@ -2244,6 +2274,7 @@ async def list_dataset_jobs( async def cancel_dataset_job(self, job_id: str) -> None: """Cancel a queued or running dataset processing job.""" await self._request( - "POST", f"/datasets/jobs/{job_id}/cancel", + "POST", + f"/datasets/jobs/{job_id}/cancel", enterprise_feature="Dataset processing jobs", ) diff --git a/memorylayer-sdk-python/src/memorylayer/exceptions.py b/memorylayer-sdk-python/src/memorylayer/exceptions.py index 5f9f5a7..e1b4de3 100644 --- a/memorylayer-sdk-python/src/memorylayer/exceptions.py +++ b/memorylayer-sdk-python/src/memorylayer/exceptions.py @@ -43,10 +43,7 @@ def __init__( feature: str = "This feature", message: str | None = None, ) -> None: - msg = message or ( - f"{feature} requires MemoryLayer Enterprise. " - "See https://memorylayer.ai for upgrade options." - ) + msg = message or (f"{feature} requires MemoryLayer Enterprise. See https://memorylayer.ai for upgrade options.") super().__init__(msg, status_code=404) self.feature = feature diff --git a/memorylayer-sdk-python/src/memorylayer/sync_client.py b/memorylayer-sdk-python/src/memorylayer/sync_client.py index 579dfad..22baa1f 100644 --- a/memorylayer-sdk-python/src/memorylayer/sync_client.py +++ b/memorylayer-sdk-python/src/memorylayer/sync_client.py @@ -2,8 +2,9 @@ import json import logging +from collections.abc import Generator from contextlib import contextmanager -from typing import Any, Generator, Optional, Union +from typing import Any import httpx from pydantic import TypeAdapter @@ -22,7 +23,6 @@ ChatMessage, ChatThread, ChatThreadWithMessages, - DatasetColumn, DatasetInfo, DatasetJobInfo, DatasetSliceResult, @@ -81,9 +81,9 @@ class SyncMemoryLayerClient: def __init__( self, base_url: str = "http://localhost:61001", - api_key: Optional[str] = None, - workspace_id: Optional[str] = None, - session_id: Optional[str] = None, + api_key: str | None = None, + workspace_id: str | None = None, + session_id: str | None = None, timeout: float = 30.0, ): """ @@ -101,7 +101,7 @@ def __init__( self.workspace_id = workspace_id self.session_id = session_id self.timeout = timeout - self._client: Optional[httpx.Client] = None + self._client: httpx.Client | None = None def __enter__(self) -> "SyncMemoryLayerClient": """Context manager entry.""" @@ -148,7 +148,7 @@ def clear_session(self) -> None: if self._client and "X-Session-ID" in self._client.headers: del self._client.headers["X-Session-ID"] - def get_session_id(self) -> Optional[str]: + def get_session_id(self) -> str | None: """ Get the current session ID, if any. @@ -173,9 +173,9 @@ def _request( method: str, path: str, *, - json: Optional[dict[str, Any]] = None, - params: Optional[dict[str, Any]] = None, - enterprise_feature: Optional[str] = None, + json: dict[str, Any] | None = None, + params: dict[str, Any] | None = None, + enterprise_feature: str | None = None, ) -> dict[str, Any]: """ Make HTTP request with error handling. @@ -247,13 +247,13 @@ def _request( def remember( self, content: str, - type: Optional[Union[str, MemoryType]] = None, - subtype: Optional[Union[str, MemorySubtype]] = None, + type: str | MemoryType | None = None, + subtype: str | MemorySubtype | None = None, importance: float = 0.5, - tags: Optional[list[str]] = None, - metadata: Optional[dict[str, Any]] = None, - context_id: Optional[str] = None, - user_id: Optional[str] = None, + tags: list[str] | None = None, + metadata: dict[str, Any] | None = None, + context_id: str | None = None, + user_id: str | None = None, ) -> Memory: """ Store a new memory. @@ -304,20 +304,20 @@ def remember( def recall( self, query: str, - types: Optional[list[Union[str, MemoryType]]] = None, - subtypes: Optional[list[Union[str, MemorySubtype]]] = None, - tags: Optional[list[str]] = None, - mode: Optional[Union[str, RecallMode]] = None, + types: list[str | MemoryType] | None = None, + subtypes: list[str | MemorySubtype] | None = None, + tags: list[str] | None = None, + mode: str | RecallMode | None = None, limit: int = 10, - min_relevance: Optional[float] = None, - recency_weight: Optional[float] = None, - tolerance: Optional[Union[str, SearchTolerance]] = None, - include_associations: Optional[bool] = None, - traverse_depth: Optional[int] = None, - max_expansion: Optional[int] = None, - created_after: Optional[str] = None, - created_before: Optional[str] = None, - user_id: Optional[str] = None, + min_relevance: float | None = None, + recency_weight: float | None = None, + tolerance: str | SearchTolerance | None = None, + include_associations: bool | None = None, + traverse_depth: int | None = None, + max_expansion: int | None = None, + created_after: str | None = None, + created_before: str | None = None, + user_id: str | None = None, ) -> RecallResult: """ Search memories by semantic query. @@ -463,10 +463,10 @@ def get_memory(self, memory_id: str) -> Memory: def update_memory( self, memory_id: str, - content: Optional[str] = None, - importance: Optional[float] = None, - tags: Optional[list[str]] = None, - metadata: Optional[dict[str, Any]] = None, + content: str | None = None, + importance: float | None = None, + tags: list[str] | None = None, + metadata: dict[str, Any] | None = None, ) -> Memory: """ Update an existing memory. @@ -507,9 +507,9 @@ def associate( self, source_id: str, target_id: str, - relationship: Union[str, RelationshipType], + relationship: str | RelationshipType, strength: float = 0.5, - metadata: Optional[dict[str, Any]] = None, + metadata: dict[str, Any] | None = None, ) -> Association: """ Link two memories with a relationship. @@ -661,8 +661,8 @@ def create_session(self, ttl_seconds: int = 3600) -> Session: def list_sessions( self, - workspace_id: Optional[str] = None, - context_id: Optional[str] = None, + workspace_id: str | None = None, + context_id: str | None = None, include_expired: bool = False, ) -> list[dict[str, Any]]: """ @@ -708,7 +708,7 @@ def set_context( session_id: str, key: str, value: Any, - ttl_seconds: Optional[int] = None, + ttl_seconds: int | None = None, ) -> None: """ Set a context value in a session. @@ -762,7 +762,7 @@ def get_context( def get_briefing( self, - lookback_hours: Optional[int] = None, + lookback_hours: int | None = None, lookback_minutes: int = 60, detail_level: str = "abstract", limit: int = 10, @@ -817,7 +817,7 @@ def commit_session( session_id: str, min_importance: float = 0.5, deduplicate: bool = True, - categories: Optional[list[str]] = None, + categories: list[str] | None = None, max_memories: int = 50, ) -> dict[str, Any]: """ @@ -885,7 +885,7 @@ def create_workspace(self, name: str) -> Workspace: data = self._request("POST", "/workspaces", json=payload) return Workspace(**data) - def get_workspace(self, workspace_id: Optional[str] = None) -> Workspace: + def get_workspace(self, workspace_id: str | None = None) -> Workspace: """ Get workspace details. @@ -908,8 +908,8 @@ def get_workspace(self, workspace_id: Optional[str] = None) -> Workspace: def update_workspace( self, workspace_id: str, - name: Optional[str] = None, - settings: Optional[dict[str, Any]] = None, + name: str | None = None, + settings: dict[str, Any] | None = None, ) -> Workspace: """ Update an existing workspace. @@ -942,8 +942,8 @@ def create_context( self, workspace_id: str, name: str, - description: Optional[str] = None, - settings: Optional[dict[str, Any]] = None, + description: str | None = None, + settings: dict[str, Any] | None = None, ) -> dict[str, Any]: """ Create a context within a workspace. @@ -1012,7 +1012,7 @@ def get_workspace_schema(self, workspace_id: str) -> dict[str, Any]: def context_exec( self, code: str, - result_var: Optional[str] = None, + result_var: str | None = None, return_result: bool = True, max_return_chars: int = 10_000, ) -> dict[str, Any]: @@ -1028,7 +1028,7 @@ def context_exec( def context_inspect( self, - variable: Optional[str] = None, + variable: str | None = None, preview_chars: int = 200, ) -> dict[str, Any]: """Inspect sandbox state or a specific variable.""" @@ -1042,9 +1042,9 @@ def context_load( var: str, query: str, limit: int = 50, - types: Optional[list[str]] = None, - tags: Optional[list[str]] = None, - min_relevance: Optional[float] = None, + types: list[str] | None = None, + tags: list[str] | None = None, + min_relevance: float | None = None, include_embeddings: bool = False, ) -> dict[str, Any]: """Load memories into the sandbox as a variable.""" @@ -1080,8 +1080,8 @@ def context_query( self, prompt: str, variables: list[str], - max_context_chars: Optional[int] = None, - result_var: Optional[str] = None, + max_context_chars: int | None = None, + result_var: str | None = None, ) -> dict[str, Any]: """Send sandbox variables and a prompt to the LLM.""" payload: dict[str, Any] = { @@ -1097,11 +1097,11 @@ def context_query( def context_rlm( self, goal: str, - memory_query: Optional[str] = None, + memory_query: str | None = None, memory_limit: int = 100, max_iterations: int = 10, - variables: Optional[list[str]] = None, - result_var: Optional[str] = None, + variables: list[str] | None = None, + result_var: str | None = None, detail_level: str = "standard", ) -> dict[str, Any]: """Run a Recursive Language Model (RLM) loop.""" @@ -1138,15 +1138,15 @@ def context_cleanup(self) -> None: def create_thread( self, *, - workspace_id: Optional[str] = None, - thread_id: Optional[str] = None, - user_id: Optional[str] = None, - context_id: Optional[str] = None, - observer_id: Optional[str] = None, - subject_id: Optional[str] = None, - title: Optional[str] = None, - metadata: Optional[dict[str, Any]] = None, - expires_at: Optional[str] = None, + workspace_id: str | None = None, + thread_id: str | None = None, + user_id: str | None = None, + context_id: str | None = None, + observer_id: str | None = None, + subject_id: str | None = None, + title: str | None = None, + metadata: dict[str, Any] | None = None, + expires_at: str | None = None, ) -> ChatThread: """ Create a new chat thread. @@ -1195,8 +1195,8 @@ def create_thread( def list_threads( self, *, - workspace_id: Optional[str] = None, - user_id: Optional[str] = None, + workspace_id: str | None = None, + user_id: str | None = None, limit: int = 50, offset: int = 0, ) -> list[ChatThread]: @@ -1230,7 +1230,7 @@ def get_thread( self, thread_id: str, *, - workspace_id: Optional[str] = None, + workspace_id: str | None = None, ) -> ChatThread: """ Get thread metadata. @@ -1257,7 +1257,7 @@ def get_thread_full( self, thread_id: str, *, - workspace_id: Optional[str] = None, + workspace_id: str | None = None, limit: int = 100, offset: int = 0, order: str = "asc", @@ -1290,7 +1290,7 @@ def delete_thread( self, thread_id: str, *, - workspace_id: Optional[str] = None, + workspace_id: str | None = None, ) -> None: """ Delete a thread and its messages. @@ -1314,7 +1314,7 @@ def append_messages( thread_id: str, messages: list[dict[str, Any]], *, - workspace_id: Optional[str] = None, + workspace_id: str | None = None, ) -> list[ChatMessage]: """ Append messages to a thread. @@ -1340,8 +1340,10 @@ def append_messages( payload: dict[str, Any] = {"messages": messages} data = self._request( - "POST", f"/threads/{thread_id}/messages", - json=payload, params=params or None, + "POST", + f"/threads/{thread_id}/messages", + json=payload, + params=params or None, ) messages_adapter = TypeAdapter(list[ChatMessage]) return messages_adapter.validate_python(data.get("messages", data if isinstance(data, list) else [])) @@ -1350,10 +1352,10 @@ def get_messages( self, thread_id: str, *, - workspace_id: Optional[str] = None, + workspace_id: str | None = None, limit: int = 100, offset: int = 0, - after_index: Optional[int] = None, + after_index: int | None = None, order: str = "asc", ) -> list[ChatMessage]: """ @@ -1388,7 +1390,7 @@ def decompose_thread( self, thread_id: str, *, - workspace_id: Optional[str] = None, + workspace_id: str | None = None, ) -> DecompositionResult: """ Trigger memory decomposition for unprocessed messages. @@ -1410,7 +1412,8 @@ def decompose_thread( params["workspace_id"] = ws_id data = self._request( - "POST", f"/threads/{thread_id}/decompose", + "POST", + f"/threads/{thread_id}/decompose", params=params or None, ) return DecompositionResult(**data) @@ -1461,13 +1464,14 @@ def upload_document( raise except httpx.HTTPStatusError as exc: raise MemoryLayerError( - str(exc), status_code=exc.response.status_code, + str(exc), + status_code=exc.response.status_code, ) def list_documents( self, *, - status: Optional[str] = None, + status: str | None = None, limit: int = 50, offset: int = 0, ) -> tuple[list[DocumentInfo], int]: @@ -1477,7 +1481,9 @@ def list_documents( params["status"] = status data = self._request( - "GET", "/documents", params=params, + "GET", + "/documents", + params=params, enterprise_feature="Document management", ) docs = [DocumentInfo(**d) for d in data.get("documents", [])] @@ -1486,17 +1492,22 @@ def list_documents( def get_document(self, document_id: str) -> DocumentInfo: """Get document metadata and processing status.""" data = self._request( - "GET", f"/documents/{document_id}", + "GET", + f"/documents/{document_id}", enterprise_feature="Document management", ) return DocumentInfo(**data) def delete_document( - self, document_id: str, *, delete_memories: bool = False, + self, + document_id: str, + *, + delete_memories: bool = False, ) -> None: """Delete a document and optionally its extracted memories.""" self._request( - "DELETE", f"/documents/{document_id}", + "DELETE", + f"/documents/{document_id}", params={"delete_memories": str(delete_memories).lower()}, enterprise_feature="Document management", ) @@ -1506,7 +1517,7 @@ def search_document_pages( query: str, *, limit: int = 10, - doc_ids: Optional[list[str]] = None, + doc_ids: list[str] | None = None, ) -> PageSearchResult: """Search document pages using ColPali MaxSim (Enterprise).""" payload: dict[str, Any] = {"query": query, "limit": limit} @@ -1514,7 +1525,9 @@ def search_document_pages( payload["doc_ids"] = doc_ids data = self._request( - "POST", "/documents/search", json=payload, + "POST", + "/documents/search", + json=payload, enterprise_feature="Document page search", ) return PageSearchResult( @@ -1526,7 +1539,8 @@ def search_document_pages( def get_document_pages(self, document_id: str) -> list[DocumentPage]: """Get all pages for a document.""" data = self._request( - "GET", f"/documents/{document_id}/pages", + "GET", + f"/documents/{document_id}/pages", enterprise_feature="Document pages", ) return [DocumentPage(**p) for p in data.get("pages", [])] @@ -1547,13 +1561,15 @@ def get_page_image(self, document_id: str, page_id: str) -> bytes: raise except httpx.HTTPStatusError as exc: raise MemoryLayerError( - str(exc), status_code=exc.response.status_code, + str(exc), + status_code=exc.response.status_code, ) def get_job(self, job_id: str) -> JobInfo: """Get ingestion job status and progress.""" data = self._request( - "GET", f"/documents/jobs/{job_id}", + "GET", + f"/documents/jobs/{job_id}", enterprise_feature="Document ingestion jobs", ) return JobInfo(**data) @@ -1561,7 +1577,7 @@ def get_job(self, job_id: str) -> JobInfo: def list_jobs( self, *, - status: Optional[str] = None, + status: str | None = None, limit: int = 50, ) -> list[JobInfo]: """List ingestion jobs in the workspace.""" @@ -1570,7 +1586,9 @@ def list_jobs( params["status"] = status data = self._request( - "GET", "/documents/jobs", params=params, + "GET", + "/documents/jobs", + params=params, enterprise_feature="Document ingestion jobs", ) return [JobInfo(**j) for j in data.get("jobs", [])] @@ -1578,7 +1596,8 @@ def list_jobs( def cancel_job(self, job_id: str) -> None: """Cancel a queued or running ingestion job.""" self._request( - "POST", f"/documents/jobs/{job_id}/cancel", + "POST", + f"/documents/jobs/{job_id}/cancel", enterprise_feature="Document ingestion jobs", ) @@ -1586,11 +1605,11 @@ def reprocess_document( self, document_id: str, *, - target_context_id: Optional[str] = None, - chunking_strategy: Optional[str] = None, - chunk_size: Optional[int] = None, - chunk_overlap: Optional[int] = None, - importance: Optional[float] = None, + target_context_id: str | None = None, + chunking_strategy: str | None = None, + chunk_size: int | None = None, + chunk_overlap: int | None = None, + importance: float | None = None, ) -> JobInfo: """Reprocess a document with different extraction options.""" payload: dict[str, Any] = {} @@ -1606,7 +1625,8 @@ def reprocess_document( payload["importance"] = importance data = self._request( - "POST", f"/documents/{document_id}/reprocess", + "POST", + f"/documents/{document_id}/reprocess", json=payload if payload else None, enterprise_feature="Document reprocessing", ) @@ -1621,7 +1641,7 @@ def upload_dataset( file_data: bytes, filename: str, *, - name: Optional[str] = None, + name: str | None = None, target_context_id: str = "_default", importance: float = 0.5, sample_rows: int = 1000, @@ -1661,13 +1681,14 @@ def upload_dataset( raise except httpx.HTTPStatusError as exc: raise MemoryLayerError( - str(exc), status_code=exc.response.status_code, + str(exc), + status_code=exc.response.status_code, ) def list_datasets( self, *, - status: Optional[str] = None, + status: str | None = None, limit: int = 50, offset: int = 0, ) -> tuple[list[DatasetInfo], int]: @@ -1677,7 +1698,9 @@ def list_datasets( params["status"] = status data = self._request( - "GET", "/datasets", params=params, + "GET", + "/datasets", + params=params, enterprise_feature="Dataset management", ) datasets = [DatasetInfo(**d) for d in data.get("datasets", [])] @@ -1686,27 +1709,34 @@ def list_datasets( def get_dataset(self, dataset_id: str) -> DatasetInfo: """Get dataset metadata, schema, and profile.""" data = self._request( - "GET", f"/datasets/{dataset_id}", + "GET", + f"/datasets/{dataset_id}", enterprise_feature="Dataset management", ) return DatasetInfo(**data) def delete_dataset( - self, dataset_id: str, *, delete_memories: bool = False, + self, + dataset_id: str, + *, + delete_memories: bool = False, ) -> None: """Delete a dataset and optionally its extracted memories.""" self._request( - "DELETE", f"/datasets/{dataset_id}", + "DELETE", + f"/datasets/{dataset_id}", params={"delete_memories": str(delete_memories).lower()}, enterprise_feature="Dataset management", ) def get_dataset_memories( - self, dataset_id: str, + self, + dataset_id: str, ) -> list[dict[str, Any]]: """Get memories extracted from a dataset.""" data = self._request( - "GET", f"/datasets/{dataset_id}/memories", + "GET", + f"/datasets/{dataset_id}/memories", enterprise_feature="Dataset management", ) return data.get("memories", []) @@ -1715,10 +1745,10 @@ def query_dataset_slice( self, dataset_id: str, *, - sql: Optional[str] = None, - columns: Optional[list[str]] = None, - filters: Optional[list[dict[str, Any]]] = None, - order_by: Optional[str] = None, + sql: str | None = None, + columns: list[str] | None = None, + filters: list[dict[str, Any]] | None = None, + order_by: str | None = None, descending: bool = False, limit: int = 100, offset: int = 0, @@ -1739,7 +1769,8 @@ def query_dataset_slice( payload["order_by"] = order_by data = self._request( - "POST", f"/datasets/{dataset_id}/slice", + "POST", + f"/datasets/{dataset_id}/slice", json=payload, enterprise_feature="Dataset management", ) @@ -1748,7 +1779,8 @@ def query_dataset_slice( def get_dataset_job(self, job_id: str) -> DatasetJobInfo: """Get dataset processing job status and progress.""" data = self._request( - "GET", f"/datasets/jobs/{job_id}", + "GET", + f"/datasets/jobs/{job_id}", enterprise_feature="Dataset processing jobs", ) return DatasetJobInfo(**data) @@ -1756,7 +1788,7 @@ def get_dataset_job(self, job_id: str) -> DatasetJobInfo: def list_dataset_jobs( self, *, - status: Optional[str] = None, + status: str | None = None, limit: int = 50, ) -> list[DatasetJobInfo]: """List dataset processing jobs in the workspace.""" @@ -1765,7 +1797,9 @@ def list_dataset_jobs( params["status"] = status data = self._request( - "GET", "/datasets/jobs", params=params, + "GET", + "/datasets/jobs", + params=params, enterprise_feature="Dataset processing jobs", ) return [DatasetJobInfo(**j) for j in data.get("jobs", [])] @@ -1773,13 +1807,14 @@ def list_dataset_jobs( def cancel_dataset_job(self, job_id: str) -> None: """Cancel a queued or running dataset processing job.""" self._request( - "POST", f"/datasets/jobs/{job_id}/cancel", + "POST", + f"/datasets/jobs/{job_id}/cancel", enterprise_feature="Dataset processing jobs", ) def export_workspace( self, - workspace_id: Optional[str] = None, + workspace_id: str | None = None, include_associations: bool = True, offset: int = 0, limit: int = 0, @@ -1830,7 +1865,7 @@ def export_workspace( # Parse NDJSON response text = response.text - lines = [line.strip() for line in text.strip().split('\n') if line.strip()] + lines = [line.strip() for line in text.strip().split("\n") if line.strip()] header = None memories = [] @@ -1874,15 +1909,12 @@ def import_workspace( result = client.import_workspace("ws_123", export_data) print(f"Imported {result['imported']} memories") """ - response = self._request( - "POST", f"/workspaces/{workspace_id}/import", - json={"data": data} - ) + response = self._request("POST", f"/workspaces/{workspace_id}/import", json={"data": data}) return response def export_workspace_stream( self, - workspace_id: Optional[str] = None, + workspace_id: str | None = None, include_associations: bool = True, offset: int = 0, limit: int = 0, @@ -1933,7 +1965,7 @@ def export_workspace_stream( response.raise_for_status() text = response.text - lines = [line.strip() for line in text.strip().split('\n') if line.strip()] + lines = [line.strip() for line in text.strip().split("\n") if line.strip()] for line in lines: yield json.loads(line) @@ -1961,14 +1993,10 @@ def import_workspace_stream( print(f"Imported {result['imported']} memories") """ # Serialize to NDJSON - ndjson_body = '\n'.join(json.dumps(line) for line in ndjson_lines) + ndjson_body = "\n".join(json.dumps(line) for line in ndjson_lines) client = self._ensure_client() - response = client.post( - f"/workspaces/{workspace_id}/import", - content=ndjson_body, - headers={"Content-Type": "application/x-ndjson"} - ) + response = client.post(f"/workspaces/{workspace_id}/import", content=ndjson_body, headers={"Content-Type": "application/x-ndjson"}) if response.status_code >= 400: if response.status_code == 401: @@ -1990,9 +2018,9 @@ def import_workspace_stream( @contextmanager def sync_client( base_url: str = "http://localhost:61001", - api_key: Optional[str] = None, - workspace_id: Optional[str] = None, - session_id: Optional[str] = None, + api_key: str | None = None, + workspace_id: str | None = None, + session_id: str | None = None, timeout: float = 30.0, ) -> Generator[SyncMemoryLayerClient, None, None]: """ diff --git a/memorylayer-sdk-python/src/memorylayer/types.py b/memorylayer-sdk-python/src/memorylayer/types.py index 37c6bc3..1469e8e 100644 --- a/memorylayer-sdk-python/src/memorylayer/types.py +++ b/memorylayer-sdk-python/src/memorylayer/types.py @@ -1,9 +1,9 @@ """Type definitions and enums for MemoryLayer.ai SDK.""" -from enum import Enum +from enum import StrEnum -class MemoryType(str, Enum): +class MemoryType(StrEnum): """Cognitive memory types - how memory is structured.""" EPISODIC = "episodic" # Specific events/interactions @@ -12,7 +12,7 @@ class MemoryType(str, Enum): WORKING = "working" # Current task context -class MemorySubtype(str, Enum): +class MemorySubtype(StrEnum): """Domain subtypes - what the memory is about.""" SOLUTION = "solution" # Working fixes to problems @@ -30,7 +30,7 @@ class MemorySubtype(str, Enum): INFERENCE = "inference" # Inferred/derived knowledge -class RecallMode(str, Enum): +class RecallMode(StrEnum): """Retrieval strategy for recall queries.""" RAG = "rag" # Fast vector similarity search @@ -38,7 +38,7 @@ class RecallMode(str, Enum): HYBRID = "hybrid" # Combine both strategies -class SearchTolerance(str, Enum): +class SearchTolerance(StrEnum): """Search precision level.""" LOOSE = "loose" # Fuzzy matching, broader results @@ -46,7 +46,7 @@ class SearchTolerance(str, Enum): STRICT = "strict" # Exact matching, high relevance -class RelationshipCategory(str, Enum): +class RelationshipCategory(StrEnum): """High-level relationship categories. These match the server's category names from the unified ontology. @@ -64,7 +64,7 @@ class RelationshipCategory(str, Enum): QUALITY = "quality" # Quality assessments -class RelationshipType(str, Enum): +class RelationshipType(StrEnum): """Specific relationship types between memories. These match the server's relationship type strings (snake_case). diff --git a/memorylayer-sdk-python/tests/test_client.py b/memorylayer-sdk-python/tests/test_client.py index a734272..d49df0a 100644 --- a/memorylayer-sdk-python/tests/test_client.py +++ b/memorylayer-sdk-python/tests/test_client.py @@ -37,18 +37,20 @@ def client(base_url: str) -> MemoryLayerClient: async def test_remember(client: MemoryLayerClient, base_url: str) -> None: """Test storing a memory.""" # Mock response (server wraps in MemoryResponse envelope) - mock_response = {"memory": { - "id": "mem_123", - "workspace_id": "ws_test", - "content": "User prefers Python", - "type": "semantic", - "importance": 0.8, - "tags": ["preferences"], - "metadata": {}, - "access_count": 0, - "created_at": "2026-01-26T10:00:00Z", - "updated_at": "2026-01-26T10:00:00Z", - }} + mock_response = { + "memory": { + "id": "mem_123", + "workspace_id": "ws_test", + "content": "User prefers Python", + "type": "semantic", + "importance": 0.8, + "tags": ["preferences"], + "metadata": {}, + "access_count": 0, + "created_at": "2026-01-26T10:00:00Z", + "updated_at": "2026-01-26T10:00:00Z", + } + } respx.post(f"{base_url}/v1/memories").mock(return_value=Response(200, json=mock_response)) @@ -137,18 +139,20 @@ async def test_reflect(client: MemoryLayerClient, base_url: str) -> None: async def test_get_memory(client: MemoryLayerClient, base_url: str) -> None: """Test getting a specific memory.""" # Mock response (server wraps in MemoryResponse envelope) - mock_response = {"memory": { - "id": "mem_123", - "workspace_id": "ws_test", - "content": "User prefers Python", - "type": "semantic", - "importance": 0.8, - "tags": ["preferences"], - "metadata": {}, - "access_count": 0, - "created_at": "2026-01-26T10:00:00Z", - "updated_at": "2026-01-26T10:00:00Z", - }} + mock_response = { + "memory": { + "id": "mem_123", + "workspace_id": "ws_test", + "content": "User prefers Python", + "type": "semantic", + "importance": 0.8, + "tags": ["preferences"], + "metadata": {}, + "access_count": 0, + "created_at": "2026-01-26T10:00:00Z", + "updated_at": "2026-01-26T10:00:00Z", + } + } respx.get(f"{base_url}/v1/memories/mem_123").mock(return_value=Response(200, json=mock_response)) diff --git a/memorylayer-sdk-python/tests/test_sync_client.py b/memorylayer-sdk-python/tests/test_sync_client.py index b9997c0..ced04d2 100644 --- a/memorylayer-sdk-python/tests/test_sync_client.py +++ b/memorylayer-sdk-python/tests/test_sync_client.py @@ -37,18 +37,20 @@ def client(base_url: str) -> SyncMemoryLayerClient: def test_remember(client: SyncMemoryLayerClient, base_url: str) -> None: """Test storing a memory.""" # Mock response (server wraps in MemoryResponse envelope) - mock_response = {"memory": { - "id": "mem_123", - "workspace_id": "ws_test", - "content": "User prefers Python", - "type": "semantic", - "importance": 0.8, - "tags": ["preferences"], - "metadata": {}, - "access_count": 0, - "created_at": "2026-01-26T10:00:00Z", - "updated_at": "2026-01-26T10:00:00Z", - }} + mock_response = { + "memory": { + "id": "mem_123", + "workspace_id": "ws_test", + "content": "User prefers Python", + "type": "semantic", + "importance": 0.8, + "tags": ["preferences"], + "metadata": {}, + "access_count": 0, + "created_at": "2026-01-26T10:00:00Z", + "updated_at": "2026-01-26T10:00:00Z", + } + } respx.post(f"{base_url}/v1/memories").mock(return_value=Response(200, json=mock_response)) @@ -134,18 +136,20 @@ def test_reflect(client: SyncMemoryLayerClient, base_url: str) -> None: def test_get_memory(client: SyncMemoryLayerClient, base_url: str) -> None: """Test getting a specific memory.""" # Mock response (server wraps in MemoryResponse envelope) - mock_response = {"memory": { - "id": "mem_123", - "workspace_id": "ws_test", - "content": "User prefers Python", - "type": "semantic", - "importance": 0.8, - "tags": ["preferences"], - "metadata": {}, - "access_count": 0, - "created_at": "2026-01-26T10:00:00Z", - "updated_at": "2026-01-26T10:00:00Z", - }} + mock_response = { + "memory": { + "id": "mem_123", + "workspace_id": "ws_test", + "content": "User prefers Python", + "type": "semantic", + "importance": 0.8, + "tags": ["preferences"], + "metadata": {}, + "access_count": 0, + "created_at": "2026-01-26T10:00:00Z", + "updated_at": "2026-01-26T10:00:00Z", + } + } respx.get(f"{base_url}/v1/memories/mem_123").mock(return_value=Response(200, json=mock_response)) @@ -279,18 +283,20 @@ def test_session_context(client: SyncMemoryLayerClient, base_url: str) -> None: @respx.mock def test_sync_client_helper(base_url: str) -> None: """Test sync_client() context manager helper.""" - # Mock response + # Mock response (server wraps in MemoryResponse envelope) mock_response = { - "id": "mem_456", - "workspace_id": "ws_test", - "content": "Test memory", - "type": "working", - "importance": 0.5, - "tags": [], - "metadata": {}, - "access_count": 0, - "created_at": "2026-01-26T10:00:00Z", - "updated_at": "2026-01-26T10:00:00Z", + "memory": { + "id": "mem_456", + "workspace_id": "ws_test", + "content": "Test memory", + "type": "working", + "importance": 0.5, + "tags": [], + "metadata": {}, + "access_count": 0, + "created_at": "2026-01-26T10:00:00Z", + "updated_at": "2026-01-26T10:00:00Z", + } } respx.post(f"{base_url}/v1/memories").mock(return_value=Response(200, json=mock_response)) diff --git a/memorylayer-sdk-typescript/package-lock.json b/memorylayer-sdk-typescript/package-lock.json new file mode 100644 index 0000000..42de57c --- /dev/null +++ b/memorylayer-sdk-typescript/package-lock.json @@ -0,0 +1,1505 @@ +{ + "name": "@scitrera/memorylayer-sdk", + "version": "0.0.4", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "@scitrera/memorylayer-sdk", + "version": "0.0.4", + "license": "Apache-2.0", + "devDependencies": { + "@types/node": "^20.0.0", + "typescript": "^5.3.0", + "vitest": "^4.0.18" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.27.3.tgz", + "integrity": "sha512-9fJMTNFTWZMh5qwrBItuziu834eOCUcEqymSH7pY+zoMVEZg3gcPuBNxH1EvfVYe9h0x/Ptw8KBzv7qxb7l8dg==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.27.3.tgz", + "integrity": "sha512-i5D1hPY7GIQmXlXhs2w8AWHhenb00+GxjxRncS2ZM7YNVGNfaMxgzSGuO8o8SJzRc/oZwU2bcScvVERk03QhzA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.27.3.tgz", + "integrity": "sha512-YdghPYUmj/FX2SYKJ0OZxf+iaKgMsKHVPF1MAq/P8WirnSpCStzKJFjOjzsW0QQ7oIAiccHdcqjbHmJxRb/dmg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.27.3.tgz", + "integrity": "sha512-IN/0BNTkHtk8lkOM8JWAYFg4ORxBkZQf9zXiEOfERX/CzxW3Vg1ewAhU7QSWQpVIzTW+b8Xy+lGzdYXV6UZObQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.27.3.tgz", + "integrity": "sha512-Re491k7ByTVRy0t3EKWajdLIr0gz2kKKfzafkth4Q8A5n1xTHrkqZgLLjFEHVD+AXdUGgQMq+Godfq45mGpCKg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.27.3.tgz", + "integrity": "sha512-vHk/hA7/1AckjGzRqi6wbo+jaShzRowYip6rt6q7VYEDX4LEy1pZfDpdxCBnGtl+A5zq8iXDcyuxwtv3hNtHFg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.27.3.tgz", + "integrity": "sha512-ipTYM2fjt3kQAYOvo6vcxJx3nBYAzPjgTCk7QEgZG8AUO3ydUhvelmhrbOheMnGOlaSFUoHXB6un+A7q4ygY9w==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.27.3.tgz", + "integrity": "sha512-dDk0X87T7mI6U3K9VjWtHOXqwAMJBNN2r7bejDsc+j03SEjtD9HrOl8gVFByeM0aJksoUuUVU9TBaZa2rgj0oA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.27.3.tgz", + "integrity": "sha512-s6nPv2QkSupJwLYyfS+gwdirm0ukyTFNl3KTgZEAiJDd+iHZcbTPPcWCcRYH+WlNbwChgH2QkE9NSlNrMT8Gfw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.27.3.tgz", + "integrity": "sha512-sZOuFz/xWnZ4KH3YfFrKCf1WyPZHakVzTiqji3WDc0BCl2kBwiJLCXpzLzUBLgmp4veFZdvN5ChW4Eq/8Fc2Fg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.27.3.tgz", + "integrity": "sha512-yGlQYjdxtLdh0a3jHjuwOrxQjOZYD/C9PfdbgJJF3TIZWnm/tMd/RcNiLngiu4iwcBAOezdnSLAwQDPqTmtTYg==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.27.3.tgz", + "integrity": "sha512-WO60Sn8ly3gtzhyjATDgieJNet/KqsDlX5nRC5Y3oTFcS1l0KWba+SEa9Ja1GfDqSF1z6hif/SkpQJbL63cgOA==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.27.3.tgz", + "integrity": "sha512-APsymYA6sGcZ4pD6k+UxbDjOFSvPWyZhjaiPyl/f79xKxwTnrn5QUnXR5prvetuaSMsb4jgeHewIDCIWljrSxw==", + "cpu": [ + "mips64el" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.27.3.tgz", + "integrity": "sha512-eizBnTeBefojtDb9nSh4vvVQ3V9Qf9Df01PfawPcRzJH4gFSgrObw+LveUyDoKU3kxi5+9RJTCWlj4FjYXVPEA==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.27.3.tgz", + "integrity": "sha512-3Emwh0r5wmfm3ssTWRQSyVhbOHvqegUDRd0WhmXKX2mkHJe1SFCMJhagUleMq+Uci34wLSipf8Lagt4LlpRFWQ==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.27.3.tgz", + "integrity": "sha512-pBHUx9LzXWBc7MFIEEL0yD/ZVtNgLytvx60gES28GcWMqil8ElCYR4kvbV2BDqsHOvVDRrOxGySBM9Fcv744hw==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.27.3.tgz", + "integrity": "sha512-Czi8yzXUWIQYAtL/2y6vogER8pvcsOsk5cpwL4Gk5nJqH5UZiVByIY8Eorm5R13gq+DQKYg0+JyQoytLQas4dA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-arm64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.27.3.tgz", + "integrity": "sha512-sDpk0RgmTCR/5HguIZa9n9u+HVKf40fbEUt+iTzSnCaGvY9kFP0YKBWZtJaraonFnqef5SlJ8/TiPAxzyS+UoA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.27.3.tgz", + "integrity": "sha512-P14lFKJl/DdaE00LItAukUdZO5iqNH7+PjoBm+fLQjtxfcfFE20Xf5CrLsmZdq5LFFZzb5JMZ9grUwvtVYzjiA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-arm64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.27.3.tgz", + "integrity": "sha512-AIcMP77AvirGbRl/UZFTq5hjXK+2wC7qFRGoHSDrZ5v5b8DK/GYpXW3CPRL53NkvDqb9D+alBiC/dV0Fb7eJcw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.27.3.tgz", + "integrity": "sha512-DnW2sRrBzA+YnE70LKqnM3P+z8vehfJWHXECbwBmH/CU51z6FiqTQTHFenPlHmo3a8UgpLyH3PT+87OViOh1AQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openharmony-arm64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.27.3.tgz", + "integrity": "sha512-NinAEgr/etERPTsZJ7aEZQvvg/A6IsZG/LgZy+81wON2huV7SrK3e63dU0XhyZP4RKGyTm7aOgmQk0bGp0fy2g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.27.3.tgz", + "integrity": "sha512-PanZ+nEz+eWoBJ8/f8HKxTTD172SKwdXebZ0ndd953gt1HRBbhMsaNqjTyYLGLPdoWHy4zLU7bDVJztF5f3BHA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.27.3.tgz", + "integrity": "sha512-B2t59lWWYrbRDw/tjiWOuzSsFh1Y/E95ofKz7rIVYSQkUYBjfSgf6oeYPNWHToFRr2zx52JKApIcAS/D5TUBnA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.27.3.tgz", + "integrity": "sha512-QLKSFeXNS8+tHW7tZpMtjlNb7HKau0QDpwm49u0vUp9y1WOF+PEzkU84y9GqYaAVW8aH8f3GcBck26jh54cX4Q==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.27.3.tgz", + "integrity": "sha512-4uJGhsxuptu3OcpVAzli+/gWusVGwZZHTlS63hh++ehExkVT8SgiEf7/uC/PclrPPkLhZqGgCTjd0VWLo6xMqA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", + "dev": true, + "license": "MIT" + }, + "node_modules/@rollup/rollup-android-arm-eabi": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.57.1.tgz", + "integrity": "sha512-A6ehUVSiSaaliTxai040ZpZ2zTevHYbvu/lDoeAteHI8QnaosIzm4qwtezfRg1jOYaUmnzLX1AOD6Z+UJjtifg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-android-arm64": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.57.1.tgz", + "integrity": "sha512-dQaAddCY9YgkFHZcFNS/606Exo8vcLHwArFZ7vxXq4rigo2bb494/xKMMwRRQW6ug7Js6yXmBZhSBRuBvCCQ3w==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-darwin-arm64": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.57.1.tgz", + "integrity": "sha512-crNPrwJOrRxagUYeMn/DZwqN88SDmwaJ8Cvi/TN1HnWBU7GwknckyosC2gd0IqYRsHDEnXf328o9/HC6OkPgOg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-darwin-x64": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.57.1.tgz", + "integrity": "sha512-Ji8g8ChVbKrhFtig5QBV7iMaJrGtpHelkB3lsaKzadFBe58gmjfGXAOfI5FV0lYMH8wiqsxKQ1C9B0YTRXVy4w==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-freebsd-arm64": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.57.1.tgz", + "integrity": "sha512-R+/WwhsjmwodAcz65guCGFRkMb4gKWTcIeLy60JJQbXrJ97BOXHxnkPFrP+YwFlaS0m+uWJTstrUA9o+UchFug==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-freebsd-x64": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.57.1.tgz", + "integrity": "sha512-IEQTCHeiTOnAUC3IDQdzRAGj3jOAYNr9kBguI7MQAAZK3caezRrg0GxAb6Hchg4lxdZEI5Oq3iov/w/hnFWY9Q==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-linux-arm-gnueabihf": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.57.1.tgz", + "integrity": "sha512-F8sWbhZ7tyuEfsmOxwc2giKDQzN3+kuBLPwwZGyVkLlKGdV1nvnNwYD0fKQ8+XS6hp9nY7B+ZeK01EBUE7aHaw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm-musleabihf": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.57.1.tgz", + "integrity": "sha512-rGfNUfn0GIeXtBP1wL5MnzSj98+PZe/AXaGBCRmT0ts80lU5CATYGxXukeTX39XBKsxzFpEeK+Mrp9faXOlmrw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-gnu": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.57.1.tgz", + "integrity": "sha512-MMtej3YHWeg/0klK2Qodf3yrNzz6CGjo2UntLvk2RSPlhzgLvYEB3frRvbEF2wRKh1Z2fDIg9KRPe1fawv7C+g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-musl": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.57.1.tgz", + "integrity": "sha512-1a/qhaaOXhqXGpMFMET9VqwZakkljWHLmZOX48R0I/YLbhdxr1m4gtG1Hq7++VhVUmf+L3sTAf9op4JlhQ5u1Q==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-gnu": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.57.1.tgz", + "integrity": "sha512-QWO6RQTZ/cqYtJMtxhkRkidoNGXc7ERPbZN7dVW5SdURuLeVU7lwKMpo18XdcmpWYd0qsP1bwKPf7DNSUinhvA==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-musl": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.57.1.tgz", + "integrity": "sha512-xpObYIf+8gprgWaPP32xiN5RVTi/s5FCR+XMXSKmhfoJjrpRAjCuuqQXyxUa/eJTdAE6eJ+KDKaoEqjZQxh3Gw==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-gnu": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.57.1.tgz", + "integrity": "sha512-4BrCgrpZo4hvzMDKRqEaW1zeecScDCR+2nZ86ATLhAoJ5FQ+lbHVD3ttKe74/c7tNT9c6F2viwB3ufwp01Oh2w==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-musl": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.57.1.tgz", + "integrity": "sha512-NOlUuzesGauESAyEYFSe3QTUguL+lvrN1HtwEEsU2rOwdUDeTMJdO5dUYl/2hKf9jWydJrO9OL/XSSf65R5+Xw==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-gnu": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.57.1.tgz", + "integrity": "sha512-ptA88htVp0AwUUqhVghwDIKlvJMD/fmL/wrQj99PRHFRAG6Z5nbWoWG4o81Nt9FT+IuqUQi+L31ZKAFeJ5Is+A==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-musl": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.57.1.tgz", + "integrity": "sha512-S51t7aMMTNdmAMPpBg7OOsTdn4tySRQvklmL3RpDRyknk87+Sp3xaumlatU+ppQ+5raY7sSTcC2beGgvhENfuw==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-s390x-gnu": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.57.1.tgz", + "integrity": "sha512-Bl00OFnVFkL82FHbEqy3k5CUCKH6OEJL54KCyx2oqsmZnFTR8IoNqBF+mjQVcRCT5sB6yOvK8A37LNm/kPJiZg==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-gnu": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.57.1.tgz", + "integrity": "sha512-ABca4ceT4N+Tv/GtotnWAeXZUZuM/9AQyCyKYyKnpk4yoA7QIAuBt6Hkgpw8kActYlew2mvckXkvx0FfoInnLg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-musl": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.57.1.tgz", + "integrity": "sha512-HFps0JeGtuOR2convgRRkHCekD7j+gdAuXM+/i6kGzQtFhlCtQkpwtNzkNj6QhCDp7DRJ7+qC/1Vg2jt5iSOFw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-openbsd-x64": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.57.1.tgz", + "integrity": "sha512-H+hXEv9gdVQuDTgnqD+SQffoWoc0Of59AStSzTEj/feWTBAnSfSD3+Dql1ZruJQxmykT/JVY0dE8Ka7z0DH1hw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ] + }, + "node_modules/@rollup/rollup-openharmony-arm64": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.57.1.tgz", + "integrity": "sha512-4wYoDpNg6o/oPximyc/NG+mYUejZrCU2q+2w6YZqrAs2UcNUChIZXjtafAiiZSUc7On8v5NyNj34Kzj/Ltk6dQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ] + }, + "node_modules/@rollup/rollup-win32-arm64-msvc": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.57.1.tgz", + "integrity": "sha512-O54mtsV/6LW3P8qdTcamQmuC990HDfR71lo44oZMZlXU4tzLrbvTii87Ni9opq60ds0YzuAlEr/GNwuNluZyMQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-ia32-msvc": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.57.1.tgz", + "integrity": "sha512-P3dLS+IerxCT/7D2q2FYcRdWRl22dNbrbBEtxdWhXrfIMPP9lQhb5h4Du04mdl5Woq05jVCDPCMF7Ub0NAjIew==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-gnu": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.57.1.tgz", + "integrity": "sha512-VMBH2eOOaKGtIJYleXsi2B8CPVADrh+TyNxJ4mWPnKfLB/DBUmzW+5m1xUrcwWoMfSLagIRpjUFeW5CO5hyciQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-msvc": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.57.1.tgz", + "integrity": "sha512-mxRFDdHIWRxg3UfIIAwCm6NzvxG0jDX/wBN6KsQFTvKFqqg9vTrWUE68qEjHt19A5wwx5X5aUi2zuZT7YR0jrA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@standard-schema/spec": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@standard-schema/spec/-/spec-1.1.0.tgz", + "integrity": "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/chai": { + "version": "5.2.3", + "resolved": "https://registry.npmjs.org/@types/chai/-/chai-5.2.3.tgz", + "integrity": "sha512-Mw558oeA9fFbv65/y4mHtXDs9bPnFMZAL/jxdPFUpOHHIXX91mcgEHbS5Lahr+pwZFR8A7GQleRWeI6cGFC2UA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/deep-eql": "*", + "assertion-error": "^2.0.1" + } + }, + "node_modules/@types/deep-eql": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@types/deep-eql/-/deep-eql-4.0.2.tgz", + "integrity": "sha512-c9h9dVVMigMPc4bwTvC5dxqtqJZwQPePsWjPlpSOnojbor6pGqdk541lfA7AqFQr5pB1BRdq0juY9db81BwyFw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/estree": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", + "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/node": { + "version": "20.19.30", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.19.30.tgz", + "integrity": "sha512-WJtwWJu7UdlvzEAUm484QNg5eAoq5QR08KDNx7g45Usrs2NtOPiX8ugDqmKdXkyL03rBqU5dYNYVQetEpBHq2g==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "undici-types": "~6.21.0" + } + }, + "node_modules/@vitest/expect": { + "version": "4.0.18", + "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-4.0.18.tgz", + "integrity": "sha512-8sCWUyckXXYvx4opfzVY03EOiYVxyNrHS5QxX3DAIi5dpJAAkyJezHCP77VMX4HKA2LDT/Jpfo8i2r5BE3GnQQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@standard-schema/spec": "^1.0.0", + "@types/chai": "^5.2.2", + "@vitest/spy": "4.0.18", + "@vitest/utils": "4.0.18", + "chai": "^6.2.1", + "tinyrainbow": "^3.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/mocker": { + "version": "4.0.18", + "resolved": "https://registry.npmjs.org/@vitest/mocker/-/mocker-4.0.18.tgz", + "integrity": "sha512-HhVd0MDnzzsgevnOWCBj5Otnzobjy5wLBe4EdeeFGv8luMsGcYqDuFRMcttKWZA5vVO8RFjexVovXvAM4JoJDQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/spy": "4.0.18", + "estree-walker": "^3.0.3", + "magic-string": "^0.30.21" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "msw": "^2.4.9", + "vite": "^6.0.0 || ^7.0.0-0" + }, + "peerDependenciesMeta": { + "msw": { + "optional": true + }, + "vite": { + "optional": true + } + } + }, + "node_modules/@vitest/pretty-format": { + "version": "4.0.18", + "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-4.0.18.tgz", + "integrity": "sha512-P24GK3GulZWC5tz87ux0m8OADrQIUVDPIjjj65vBXYG17ZeU3qD7r+MNZ1RNv4l8CGU2vtTRqixrOi9fYk/yKw==", + "dev": true, + "license": "MIT", + "dependencies": { + "tinyrainbow": "^3.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/runner": { + "version": "4.0.18", + "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-4.0.18.tgz", + "integrity": "sha512-rpk9y12PGa22Jg6g5M3UVVnTS7+zycIGk9ZNGN+m6tZHKQb7jrP7/77WfZy13Y/EUDd52NDsLRQhYKtv7XfPQw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/utils": "4.0.18", + "pathe": "^2.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/snapshot": { + "version": "4.0.18", + "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-4.0.18.tgz", + "integrity": "sha512-PCiV0rcl7jKQjbgYqjtakly6T1uwv/5BQ9SwBLekVg/EaYeQFPiXcgrC2Y7vDMA8dM1SUEAEV82kgSQIlXNMvA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/pretty-format": "4.0.18", + "magic-string": "^0.30.21", + "pathe": "^2.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/spy": { + "version": "4.0.18", + "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-4.0.18.tgz", + "integrity": "sha512-cbQt3PTSD7P2OARdVW3qWER5EGq7PHlvE+QfzSC0lbwO+xnt7+XH06ZzFjFRgzUX//JmpxrCu92VdwvEPlWSNw==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/utils": { + "version": "4.0.18", + "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-4.0.18.tgz", + "integrity": "sha512-msMRKLMVLWygpK3u2Hybgi4MNjcYJvwTb0Ru09+fOyCXIgT5raYP041DRRdiJiI3k/2U6SEbAETB3YtBrUkCFA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/pretty-format": "4.0.18", + "tinyrainbow": "^3.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/assertion-error": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-2.0.1.tgz", + "integrity": "sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + } + }, + "node_modules/chai": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/chai/-/chai-6.2.2.tgz", + "integrity": "sha512-NUPRluOfOiTKBKvWPtSD4PhFvWCqOi0BGStNWs57X9js7XGTprSmFoz5F0tWhR4WPjNeR9jXqdC7/UpSJTnlRg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + } + }, + "node_modules/es-module-lexer": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.7.0.tgz", + "integrity": "sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA==", + "dev": true, + "license": "MIT" + }, + "node_modules/esbuild": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.27.3.tgz", + "integrity": "sha512-8VwMnyGCONIs6cWue2IdpHxHnAjzxnw2Zr7MkVxB2vjmQ2ivqGFb4LEG3SMnv0Gb2F/G/2yA8zUaiL1gywDCCg==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=18" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.27.3", + "@esbuild/android-arm": "0.27.3", + "@esbuild/android-arm64": "0.27.3", + "@esbuild/android-x64": "0.27.3", + "@esbuild/darwin-arm64": "0.27.3", + "@esbuild/darwin-x64": "0.27.3", + "@esbuild/freebsd-arm64": "0.27.3", + "@esbuild/freebsd-x64": "0.27.3", + "@esbuild/linux-arm": "0.27.3", + "@esbuild/linux-arm64": "0.27.3", + "@esbuild/linux-ia32": "0.27.3", + "@esbuild/linux-loong64": "0.27.3", + "@esbuild/linux-mips64el": "0.27.3", + "@esbuild/linux-ppc64": "0.27.3", + "@esbuild/linux-riscv64": "0.27.3", + "@esbuild/linux-s390x": "0.27.3", + "@esbuild/linux-x64": "0.27.3", + "@esbuild/netbsd-arm64": "0.27.3", + "@esbuild/netbsd-x64": "0.27.3", + "@esbuild/openbsd-arm64": "0.27.3", + "@esbuild/openbsd-x64": "0.27.3", + "@esbuild/openharmony-arm64": "0.27.3", + "@esbuild/sunos-x64": "0.27.3", + "@esbuild/win32-arm64": "0.27.3", + "@esbuild/win32-ia32": "0.27.3", + "@esbuild/win32-x64": "0.27.3" + } + }, + "node_modules/estree-walker": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", + "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0" + } + }, + "node_modules/expect-type": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/expect-type/-/expect-type-1.3.0.tgz", + "integrity": "sha512-knvyeauYhqjOYvQ66MznSMs83wmHrCycNEN6Ao+2AeYEfxUIkuiVxdEa1qlGEPK+We3n0THiDciYSsCcgW/DoA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/fdir": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", + "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/magic-string": { + "version": "0.30.21", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.21.tgz", + "integrity": "sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.5" + } + }, + "node_modules/nanoid": { + "version": "3.3.11", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", + "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/obug": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/obug/-/obug-2.1.1.tgz", + "integrity": "sha512-uTqF9MuPraAQ+IsnPf366RG4cP9RtUi7MLO1N3KEc+wb0a6yKpeL0lmk2IB1jY5KHPAlTc6T/JRdC/YqxHNwkQ==", + "dev": true, + "funding": [ + "https://github.com/sponsors/sxzz", + "https://opencollective.com/debug" + ], + "license": "MIT" + }, + "node_modules/pathe": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/pathe/-/pathe-2.0.3.tgz", + "integrity": "sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==", + "dev": true, + "license": "MIT" + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "dev": true, + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", + "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", + "dev": true, + "license": "MIT", + "peer": true, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/postcss": { + "version": "8.5.6", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.6.tgz", + "integrity": "sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.11", + "picocolors": "^1.1.1", + "source-map-js": "^1.2.1" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/rollup": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.57.1.tgz", + "integrity": "sha512-oQL6lgK3e2QZeQ7gcgIkS2YZPg5slw37hYufJ3edKlfQSGGm8ICoxswK15ntSzF/a8+h7ekRy7k7oWc3BQ7y8A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "1.0.8" + }, + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=18.0.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.57.1", + "@rollup/rollup-android-arm64": "4.57.1", + "@rollup/rollup-darwin-arm64": "4.57.1", + "@rollup/rollup-darwin-x64": "4.57.1", + "@rollup/rollup-freebsd-arm64": "4.57.1", + "@rollup/rollup-freebsd-x64": "4.57.1", + "@rollup/rollup-linux-arm-gnueabihf": "4.57.1", + "@rollup/rollup-linux-arm-musleabihf": "4.57.1", + "@rollup/rollup-linux-arm64-gnu": "4.57.1", + "@rollup/rollup-linux-arm64-musl": "4.57.1", + "@rollup/rollup-linux-loong64-gnu": "4.57.1", + "@rollup/rollup-linux-loong64-musl": "4.57.1", + "@rollup/rollup-linux-ppc64-gnu": "4.57.1", + "@rollup/rollup-linux-ppc64-musl": "4.57.1", + "@rollup/rollup-linux-riscv64-gnu": "4.57.1", + "@rollup/rollup-linux-riscv64-musl": "4.57.1", + "@rollup/rollup-linux-s390x-gnu": "4.57.1", + "@rollup/rollup-linux-x64-gnu": "4.57.1", + "@rollup/rollup-linux-x64-musl": "4.57.1", + "@rollup/rollup-openbsd-x64": "4.57.1", + "@rollup/rollup-openharmony-arm64": "4.57.1", + "@rollup/rollup-win32-arm64-msvc": "4.57.1", + "@rollup/rollup-win32-ia32-msvc": "4.57.1", + "@rollup/rollup-win32-x64-gnu": "4.57.1", + "@rollup/rollup-win32-x64-msvc": "4.57.1", + "fsevents": "~2.3.2" + } + }, + "node_modules/siginfo": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/siginfo/-/siginfo-2.0.0.tgz", + "integrity": "sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==", + "dev": true, + "license": "ISC" + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/stackback": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/stackback/-/stackback-0.0.2.tgz", + "integrity": "sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==", + "dev": true, + "license": "MIT" + }, + "node_modules/std-env": { + "version": "3.10.0", + "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.10.0.tgz", + "integrity": "sha512-5GS12FdOZNliM5mAOxFRg7Ir0pWz8MdpYm6AY6VPkGpbA7ZzmbzNcBJQ0GPvvyWgcY7QAhCgf9Uy89I03faLkg==", + "dev": true, + "license": "MIT" + }, + "node_modules/tinybench": { + "version": "2.9.0", + "resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.9.0.tgz", + "integrity": "sha512-0+DUvqWMValLmha6lr4kD8iAMK1HzV0/aKnCtWb9v9641TnP/MFb7Pc2bxoxQjTXAErryXVgUOfv2YqNllqGeg==", + "dev": true, + "license": "MIT" + }, + "node_modules/tinyexec": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/tinyexec/-/tinyexec-1.0.2.tgz", + "integrity": "sha512-W/KYk+NFhkmsYpuHq5JykngiOCnxeVL8v8dFnqxSD8qEEdRfXk1SDM6JzNqcERbcGYj9tMrDQBYV9cjgnunFIg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + } + }, + "node_modules/tinyglobby": { + "version": "0.2.15", + "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz", + "integrity": "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "fdir": "^6.5.0", + "picomatch": "^4.0.3" + }, + "engines": { + "node": ">=12.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/SuperchupuDev" + } + }, + "node_modules/tinyrainbow": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/tinyrainbow/-/tinyrainbow-3.0.3.tgz", + "integrity": "sha512-PSkbLUoxOFRzJYjjxHJt9xro7D+iilgMX/C9lawzVuYiIdcihh9DXmVibBe8lmcFrRi/VzlPjBxbN7rH24q8/Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/typescript": { + "version": "5.9.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz", + "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/undici-types": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz", + "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/vite": { + "version": "7.3.1", + "resolved": "https://registry.npmjs.org/vite/-/vite-7.3.1.tgz", + "integrity": "sha512-w+N7Hifpc3gRjZ63vYBXA56dvvRlNWRczTdmCBBa+CotUzAPf5b7YMdMR/8CQoeYE5LX3W4wj6RYTgonm1b9DA==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "esbuild": "^0.27.0", + "fdir": "^6.5.0", + "picomatch": "^4.0.3", + "postcss": "^8.5.6", + "rollup": "^4.43.0", + "tinyglobby": "^0.2.15" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^20.19.0 || >=22.12.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^20.19.0 || >=22.12.0", + "jiti": ">=1.21.0", + "less": "^4.0.0", + "lightningcss": "^1.21.0", + "sass": "^1.70.0", + "sass-embedded": "^1.70.0", + "stylus": ">=0.54.8", + "sugarss": "^5.0.0", + "terser": "^5.16.0", + "tsx": "^4.8.1", + "yaml": "^2.4.2" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "jiti": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "sass-embedded": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + }, + "tsx": { + "optional": true + }, + "yaml": { + "optional": true + } + } + }, + "node_modules/vitest": { + "version": "4.0.18", + "resolved": "https://registry.npmjs.org/vitest/-/vitest-4.0.18.tgz", + "integrity": "sha512-hOQuK7h0FGKgBAas7v0mSAsnvrIgAvWmRFjmzpJ7SwFHH3g1k2u37JtYwOwmEKhK6ZO3v9ggDBBm0La1LCK4uQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/expect": "4.0.18", + "@vitest/mocker": "4.0.18", + "@vitest/pretty-format": "4.0.18", + "@vitest/runner": "4.0.18", + "@vitest/snapshot": "4.0.18", + "@vitest/spy": "4.0.18", + "@vitest/utils": "4.0.18", + "es-module-lexer": "^1.7.0", + "expect-type": "^1.2.2", + "magic-string": "^0.30.21", + "obug": "^2.1.1", + "pathe": "^2.0.3", + "picomatch": "^4.0.3", + "std-env": "^3.10.0", + "tinybench": "^2.9.0", + "tinyexec": "^1.0.2", + "tinyglobby": "^0.2.15", + "tinyrainbow": "^3.0.3", + "vite": "^6.0.0 || ^7.0.0", + "why-is-node-running": "^2.3.0" + }, + "bin": { + "vitest": "vitest.mjs" + }, + "engines": { + "node": "^20.0.0 || ^22.0.0 || >=24.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "@edge-runtime/vm": "*", + "@opentelemetry/api": "^1.9.0", + "@types/node": "^20.0.0 || ^22.0.0 || >=24.0.0", + "@vitest/browser-playwright": "4.0.18", + "@vitest/browser-preview": "4.0.18", + "@vitest/browser-webdriverio": "4.0.18", + "@vitest/ui": "4.0.18", + "happy-dom": "*", + "jsdom": "*" + }, + "peerDependenciesMeta": { + "@edge-runtime/vm": { + "optional": true + }, + "@opentelemetry/api": { + "optional": true + }, + "@types/node": { + "optional": true + }, + "@vitest/browser-playwright": { + "optional": true + }, + "@vitest/browser-preview": { + "optional": true + }, + "@vitest/browser-webdriverio": { + "optional": true + }, + "@vitest/ui": { + "optional": true + }, + "happy-dom": { + "optional": true + }, + "jsdom": { + "optional": true + } + } + }, + "node_modules/why-is-node-running": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/why-is-node-running/-/why-is-node-running-2.3.0.tgz", + "integrity": "sha512-hUrmaWBdVDcxvYqnyh09zunKzROWjbZTiNy8dBEjkS7ehEDQibXJ7XvlmtbwuTclUiIyN+CyXQD4Vmko8fNm8w==", + "dev": true, + "license": "MIT", + "dependencies": { + "siginfo": "^2.0.0", + "stackback": "0.0.2" + }, + "bin": { + "why-is-node-running": "cli.js" + }, + "engines": { + "node": ">=8" + } + } + } +} diff --git a/scripts/ci-local.sh b/scripts/ci-local.sh new file mode 100755 index 0000000..de5b7df --- /dev/null +++ b/scripts/ci-local.sh @@ -0,0 +1,133 @@ +#!/usr/bin/env bash +# Local CI — mirrors .github/workflows/ci.yml +# Usage: ./scripts/ci-local.sh [--fix] [component...] +# --fix Auto-fix lint/format issues instead of just checking +# component One or more of: server, sdk, typescript (default: all) +# +# Examples: +# ./scripts/ci-local.sh # Run everything +# ./scripts/ci-local.sh server # Server only +# ./scripts/ci-local.sh sdk --fix # SDK with auto-fix +# ./scripts/ci-local.sh server sdk # Server + SDK, skip TypeScript + +set -euo pipefail +cd "$(dirname "$0")/.." + +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BOLD='\033[1m' +NC='\033[0m' + +FIX=false +COMPONENTS=() + +for arg in "$@"; do + case "$arg" in + --fix) FIX=true ;; + server|sdk|typescript|ts) COMPONENTS+=("$arg") ;; + *) echo -e "${RED}Unknown argument: $arg${NC}"; exit 1 ;; + esac +done + +# Default: run all +if [ ${#COMPONENTS[@]} -eq 0 ]; then + COMPONENTS=(server sdk typescript) +fi + +FAILURES=() + +run_step() { + local label="$1" + shift + echo -e "${BOLD} → $label${NC}" + if "$@"; then + echo -e " ${GREEN}✓ passed${NC}" + else + echo -e " ${RED}✗ failed${NC}" + FAILURES+=("$label") + fi +} + +# ────────────────────────────────────────────────────────────────── +# Python server (memorylayer-core-python) +# ────────────────────────────────────────────────────────────────── +run_server() { + echo -e "\n${YELLOW}━━━ Python: memorylayer-server ━━━${NC}" + local dir="memorylayer-core-python" + + if [ "$FIX" = true ]; then + run_step "server: ruff fix" python3 -m ruff check --fix --unsafe-fixes "$dir" + run_step "server: ruff format" python3 -m ruff format "$dir" + else + run_step "server: ruff check" python3 -m ruff check "$dir" + run_step "server: ruff format" python3 -m ruff format --check "$dir" + fi + + if [ -d "$dir/.venv" ]; then + (cd "$dir" && source .venv/bin/activate && run_step "server: pytest" python3 -m pytest tests/ -m "not slow and not integration and not llm and not llm_quality" -x -q) + else + echo -e " ${RED}No .venv found in $dir — run: cd $dir && python3 -m venv .venv && pip install -e '.[dev]'${NC}" + FAILURES+=("server: pytest (no venv)") + fi +} + +# ────────────────────────────────────────────────────────────────── +# Python SDK (memorylayer-sdk-python) +# ────────────────────────────────────────────────────────────────── +run_sdk() { + echo -e "\n${YELLOW}━━━ Python: memorylayer-client ━━━${NC}" + local dir="memorylayer-sdk-python" + + if [ "$FIX" = true ]; then + run_step "sdk: ruff fix" python3 -m ruff check --fix --unsafe-fixes "$dir" + run_step "sdk: ruff format" python3 -m ruff format "$dir" + else + run_step "sdk: ruff check" python3 -m ruff check "$dir" + run_step "sdk: ruff format" python3 -m ruff format --check "$dir" + fi + + if [ -d "$dir/.venv" ]; then + (cd "$dir" && source .venv/bin/activate && run_step "sdk: pytest" python3 -m pytest tests/ -x -q) + else + echo -e " ${RED}No .venv found in $dir — run: cd $dir && python3 -m venv .venv && pip install -e '.[dev]'${NC}" + FAILURES+=("sdk: pytest (no venv)") + fi +} + +# ────────────────────────────────────────────────────────────────── +# TypeScript packages +# ────────────────────────────────────────────────────────────────── +run_typescript() { + echo -e "\n${YELLOW}━━━ TypeScript: memorylayer-sdk ━━━${NC}" + (cd memorylayer-sdk-typescript && run_step "ts-sdk: npm ci" npm ci && run_step "ts-sdk: build" npm run build) + + echo -e "\n${YELLOW}━━━ TypeScript: memorylayer-mcp-server ━━━${NC}" + (cd memorylayer-mcp-typescript && run_step "ts-mcp: npm ci" npm ci && run_step "ts-mcp: build" npm run build) +} + +# ────────────────────────────────────────────────────────────────── +# Run selected components +# ────────────────────────────────────────────────────────────────── +for component in "${COMPONENTS[@]}"; do + case "$component" in + server) run_server ;; + sdk) run_sdk ;; + typescript|ts) run_typescript ;; + esac +done + +# ────────────────────────────────────────────────────────────────── +# Summary +# ────────────────────────────────────────────────────────────────── +echo "" +if [ ${#FAILURES[@]} -eq 0 ]; then + echo -e "${GREEN}${BOLD}All checks passed!${NC}" + exit 0 +else + echo -e "${RED}${BOLD}${#FAILURES[@]} check(s) failed:${NC}" + for f in "${FAILURES[@]}"; do + echo -e " ${RED}✗ $f${NC}" + done + exit 1 +fi