diff --git a/.anchor/.anchor.lock b/.anchor/.anchor.lock new file mode 100644 index 0000000..c078ebf --- /dev/null +++ b/.anchor/.anchor.lock @@ -0,0 +1,23 @@ +version: 4.0.0 +generated: '2026-03-18T00:00:00Z' +algorithm: sha256 +offline_behaviour: warn +files: + domains/agentic.anchor: 659abaa294a1b1f062385a077b41d04fe75e0d708be89c6ef3ebb4ce69169703 + domains/alignment.anchor: b8fbdbbabc5e82f620a354829f5a8d70c3e85198ccbc96a4c55bd070f3f3f9db + domains/ethics.anchor: d402bf6d69815bdb0074a9fa7a02ae57fcc349a4a5c359f6f128302be5f7c38c + domains/legal.anchor: b5c061c69526f254ce2e6eb8f046aeceb1313b4e6bb8d763bd97ae2b2722854f + domains/operational.anchor: 9784ffa88b352d49b5643a257fedc3cd88e5d4b4f4591bb5c8610b2ca1aef435 + domains/privacy.anchor: aa9204e9a7693e0d70cb09b7d6bd375684cac3b5066a884d9e946baf953805cc + domains/security.anchor: b7756ded815bbe80959e1734badabbaa753608f82486045202c4be89f072b8f8 + domains/shared.anchor: 9121d6b2978c307f1b8d1d9cbccfbb77a3df65e17fdf6d54cdda0eb2d5dc0619 + domains/supply_chain.anchor: 493ae046e572724609bd46bba1d712f9e5b66c550148f45e723cd785f276f9e4 + frameworks/FINOS_Framework.anchor: 60306678ec523f3cc1aca02f7ff23d62a1b22429f23e7994b92fc13a0ded174a + frameworks/NIST_AI_RMF.anchor: 1a0971b93737280564dca779b8bfb6c27552c791c7f0d5bb22a9ff9d11c59ca5 + frameworks/OWASP_LLM.anchor: 63b3086c9ebbb78e45437cf73dc69e72b441683e72ccfeb1fa91ccb11a8921b9 + government/CFPB_Regulations.anchor: 7005b47e40061e1d47c0ee42439c3c2897a701337359490b09f8113d6dc87ee7 + government/EU_AI_Act.anchor: 05063bdd1d5af44d08cedba38bc9549b15ee567d056da7afa217d7da7a185416 + government/FCA_Regulations.anchor: f23b61075d323be487b6218a2c0e353d8df445bf3e13904f977edf895123973e + government/RBI_Regulations.anchor: 0337e51a8520507c951f68acd3ba207f30d015e586007be8a13db5c56a978e40 + government/SEBI_Regulations.anchor: 38dac4c568ecf52d89ee49b027b401d8e8a46b03b40d9f99e9bdf40534247a15 + government/SEC_Regulations.anchor: b7819b6dd874892ef5005eb5033221ac4327146dc060239a1e3fbadaeecd4c07 diff --git a/.anchor/constitution.anchor b/.anchor/constitution.anchor new file mode 100644 index 0000000..f68578a --- /dev/null +++ b/.anchor/constitution.anchor @@ -0,0 +1,127 @@ +anchor_version: '>=4.0.0' +core_domains: +- namespace: SEC + path: domains/security.anchor + required: true + active: true +- namespace: ETH + path: domains/ethics.anchor + required: true + active: true +- namespace: SHR + path: domains/shared.anchor + required: true + active: true +- namespace: ALN + path: domains/alignment.anchor + required: true + active: true +- namespace: AGT + path: domains/agentic.anchor + required: true + active: true +- namespace: PRV + path: domains/privacy.anchor + required: true + active: true +- namespace: LEG + path: domains/legal.anchor + required: true + active: true +- namespace: OPS + path: domains/operational.anchor + required: true + active: true +- namespace: SUP + path: domains/supply_chain.anchor + required: true + active: true +engine: + fail_on: + - BLOCKER + - ERROR + info_on: + - INFO + seal_check: warn + suppress_requires_reason: true + suppress_tracking: true + unknown_namespace: reject + warn_on: + - WARNING +frameworks: +- active: true + namespace: FINOS + path: frameworks/FINOS_Framework.anchor + source: FINOS AI Governance Framework +- active: true + namespace: OWASP + path: frameworks/OWASP_LLM.anchor + source: OWASP LLM Top 10 2025 +- active: true + namespace: NIST + path: frameworks/NIST_AI_RMF.anchor + source: NIST AI RMF 1.0 +legacy_aliases: + ANC-001: FINOS-001 + ANC-002: FINOS-002 + ANC-003: FINOS-003 + ANC-004: FINOS-004 + ANC-005: FINOS-005 + ANC-006: FINOS-006 + ANC-007: FINOS-007 + ANC-008: FINOS-008 + ANC-009: FINOS-009 + ANC-010: FINOS-010 + ANC-011: FINOS-011 + ANC-012: FINOS-012 + ANC-013: FINOS-013 + ANC-014: FINOS-014 + ANC-015: FINOS-015 + ANC-016: FINOS-016 + ANC-017: FINOS-017 + ANC-018: FINOS-018 + ANC-019: FINOS-019 + ANC-020: FINOS-020 + ANC-021: FINOS-021 + ANC-022: FINOS-022 + ANC-023: FINOS-023 +name: Anchor Constitutional Root +output: + formats: + - json + - markdown + include_git_blame: true + report_path: .anchor/reports/ + telemetry_path: .anchor/telemetry/ +policy: + allow_custom_rules: true + custom_rule_prefix: INTERNAL + enforce_raise_only: true + path: policy.anchor +regulators: +- active: true + namespace: RBI + path: government/RBI_Regulations.anchor + source: RBI FREE-AI Report August 2025 +- active: true + namespace: EU + path: government/EU_AI_Act.anchor + source: EU AI Act 2024/1689 +- active: true + namespace: SEBI + path: government/SEBI_Regulations.anchor + source: SEBI AI/ML Consultation 2024-2025 +- active: true + namespace: CFPB + path: government/CFPB_Regulations.anchor + source: CFPB Regulation B + 2024 Guidance +- active: true + namespace: FCA + path: government/FCA_Regulations.anchor + source: FCA AI Governance Guidance 2024 +- active: false + namespace: USSEC + path: government/SEC_Regulations.anchor + source: SEC 2026 Examination Priorities +type: manifest +version: '4.1' diff --git a/.anchor/domains/agentic.anchor b/.anchor/domains/agentic.anchor new file mode 100644 index 0000000..cfdb2e8 --- /dev/null +++ b/.anchor/domains/agentic.anchor @@ -0,0 +1,166 @@ +type: domain +namespace: AGT +version: "1.0" +anchor_version: ">=4.0.0" +maintainer: "Anchor Core" +always_loaded: false +description: > + Agentic AI risks unique to autonomous, tool-calling, and + multi-agent systems. These risks operate at the intent and + reasoning layer — structurally distinct from code-level + security violations. Enable this domain for any system + deploying AI agents, MCP integrations, autonomous pipelines, + or multi-agent orchestration frameworks. +seal: "sha256:PENDING" + +rules: + + - id: "AGT-001" + name: "Agent Action Authorization Bypass" + source: "FINOS" + original_id: "Ri-024" + category: "security" + description: > + An AI agent executes actions outside its granted permissions + not because a code-level permission check failed, but because + the agent's reasoning layer decided to act without consulting + the enforcement layer at all. This is a failure of intent, not + enforcement. A standard authorization bypass (SEC-005) occurs + when code skips a token validation check. An agentic + authorization bypass occurs when the model decides that a + high-stakes action — transferring funds, modifying governance + configuration, calling a privileged API — is within its mandate + based on its interpretation of high-level instructions, bypassing + the human authorization step entirely. In financial AI, this + risk is critical in any agentic system with access to payment + rails, customer account operations, or trading systems. The + mitigation is not better code-level permission checks — it is + explicit intent boundaries declared in the agent's system prompt, + enforced by a runtime governance layer that intercepts tool calls + before execution and validates them against the agent's declared + permission scope. + severity: "blocker" + min_severity: "blocker" + min_mitigations: 1 + detection: ~ + primitives: ~ + + - id: "AGT-002" + name: "Tool Chain Manipulation and Injection" + source: "FINOS" + original_id: "Ri-025" + category: "security" + description: > + An attacker manipulates the parameters, outputs, or metadata + of tools called by an AI agent to corrupt the agent's reasoning, + redirect its actions, or inject malicious instructions into the + tool-calling chain. Unlike prompt injection (SEC-001) which + targets the model's input, tool chain manipulation targets the + feedback loop between the model and its tools — the attacker + poisons what the tools return, causing the model to take + attacker-controlled actions based on fabricated tool results. + In financial AI, tool chain manipulation can cause an agent + with access to market data APIs, customer databases, or payment + systems to act on falsified data — executing trades based on + injected price feeds, approving transactions based on fabricated + credit scores, or exfiltrating customer data through manipulated + search tool responses. The attack surface grows with every tool + the agent can call, and the sophistication required is lower + than direct model manipulation because tool outputs are often + trusted implicitly by the model's reasoning. + severity: "blocker" + min_severity: "error" + min_mitigations: 1 + detection: ~ + primitives: ~ + + - id: "AGT-003" + name: "MCP Server Supply Chain Compromise" + source: "FINOS" + original_id: "Ri-026" + category: "security" + description: > + A compromised or malicious Model Context Protocol (MCP) server + poisons an AI agent's reasoning by returning fabricated tool + schemas, injecting malicious instructions into tool descriptions, + or providing attacker-controlled responses that redirect the + agent's behavior. This is structurally distinct from general + supply chain attacks (SEC-008) which target model weights and + code dependencies. MCP compromise targets the live reasoning + layer — the server that tells the agent what tools exist, what + they do, and what they return. A malicious MCP server can + convince an agent that a destructive action is a routine + operation by manipulating the tool's description and expected + output schema. In financial AI deployments using MCP for + integration with banking APIs, payment systems, or regulatory + reporting tools, a compromised MCP server represents a single + point of failure that can redirect an entire agent fleet. + Mitigation requires cryptographic verification of MCP server + manifests and tool schemas before the agent is permitted to + call any tool from that server. + severity: "blocker" + min_severity: "error" + min_mitigations: 1 + detection: ~ + primitives: ~ + + - id: "AGT-004" + name: "Agent State Persistence Poisoning" + source: "FINOS" + original_id: "Ri-027" + category: "security" + description: > + An attacker injects malicious instructions, false memories, or + behavioral backdoors into an AI agent's persistent state — + long-term memory, conversation history, vector store entries, + or cached reasoning chains — causing the agent to carry + compromised behavior across sessions, tasks, and restarts. + State persistence poisoning is uniquely dangerous because it + survives model redeployment. A poisoned memory entry that + causes an agent to trust a specific external endpoint, bypass + a specific check, or misclassify a specific pattern will + continue to affect agent behavior until the state is explicitly + audited and purged. In financial AI, agents with persistent + state and access to customer data, payment systems, or + compliance workflows represent a critical attack surface — + a single successful state poisoning event can introduce + a long-lived backdoor that operates silently across thousands + of subsequent transactions before detection. + severity: "error" + min_severity: "warning" + min_mitigations: 1 + detection: ~ + primitives: ~ + + - id: "AGT-005" + name: "Multi-Agent Trust Boundary Violations" + source: "FINOS" + original_id: "Ri-028" + category: "security" + description: > + In multi-agent systems where multiple AI agents communicate, + delegate tasks, or share state, a compromised or manipulated + agent propagates malicious behavior across the agent swarm by + exploiting implicit trust between agents. Agents in a swarm + frequently trust messages from other agents in the same system + without verification — a compromised orchestrator can instruct + worker agents to take unauthorized actions, a poisoned worker + can inject false results into the orchestrator's reasoning, + and a compromised memory agent can corrupt the shared state + that all agents read from. In financial AI, multi-agent + architectures are increasingly used for complex workflows — + loan processing pipelines, regulatory reporting chains, fraud + investigation workflows — where each agent handles one step + of a larger process. Trust boundary violations in these systems + can cause cascading failures that are difficult to trace because + the proximate cause of each individual agent's failure appears + legitimate when examined in isolation. Mitigation requires + explicit trust declarations between agents, cryptographic + message signing between agent boundaries, and governance + checkpoints that validate agent outputs before they are + consumed by downstream agents. + severity: "blocker" + min_severity: "blocker" + min_mitigations: 1 + detection: ~ + primitives: ~ diff --git a/.anchor/domains/alignment.anchor b/.anchor/domains/alignment.anchor new file mode 100644 index 0000000..37ae363 --- /dev/null +++ b/.anchor/domains/alignment.anchor @@ -0,0 +1,65 @@ +type: domain +namespace: ALN +version: "1.0" +anchor_version: ">=4.0.0" +maintainer: "Anchor Core" +always_loaded: false +description: > + Alignment violations in AI systems. Covers hallucination of + non-existent APIs and code references, and goal misrepresentation + where AI output diverges from declared system purpose. +seal: "sha256:PENDING" + +rules: + + - id: "ALN-001" + name: "Hallucination" + source: "FINOS" + original_id: "Ri-008" + category: "accuracy" + description: > + AI models generate factually incorrect, fabricated, or + non-existent information presented with the same confidence + as accurate information. In code generation, hallucination + manifests as references to non-existent APIs, libraries, or + functions that appear syntactically valid but will fail at + runtime. In financial AI, hallucination is a critical risk + in automated report generation, regulatory filing assistance, + customer communications, and investment research — where + fabricated figures, non-existent regulatory citations, or + invented financial data can cause material harm. Hallucination + is not a reliability issue — in regulated contexts it is a + compliance issue, as SEBI requires AI outputs to be accurate + and traceable, and RBI FREE-AI Recommendation 14 requires + AI-assisted credit decisions to be explainable and verifiable. + severity: "error" + min_severity: "warning" + min_mitigations: 1 + detection: ~ + primitives: ~ + + - id: "ALN-002" + name: "Goal Misrepresentation" + source: "FINOS" + original_id: "Ri-021" + category: "safety" + description: > + An AI system pursues objectives that diverge from its declared + purpose, either through misaligned training, adversarial + manipulation, or emergent behavior that was not anticipated + during development. In financial AI, goal misrepresentation + manifests when a fraud detection model begins optimizing for + metrics other than fraud detection — such as minimizing false + positive complaints — in ways that compromise its primary + safety function. It also includes agentic systems that interpret + high-level goals in ways that achieve the stated objective + while violating implicit constraints — for example, an agent + instructed to maximize loan approvals that begins bypassing + credit risk checks. This is a BLOCKER because misaligned AI + goals in financial systems can cause systematic harm at scale + before human review catches the drift. + severity: "blocker" + min_severity: "error" + min_mitigations: 1 + detection: ~ + primitives: ~ diff --git a/.anchor/domains/ethics.anchor b/.anchor/domains/ethics.anchor new file mode 100644 index 0000000..8f1b2ae --- /dev/null +++ b/.anchor/domains/ethics.anchor @@ -0,0 +1,105 @@ +type: domain +namespace: ETH +version: "1.0" +anchor_version: ">=4.0.0" +maintainer: "Anchor Core" +always_loaded: true +description: > + Ethics violations in AI systems. Covers bias and discrimination, + explainability absence, human oversight removal, and toxic output. +seal: "sha256:PENDING" + +rules: + + - id: "ETH-001" + name: "Bias and Discrimination" + source: "FINOS" + original_id: "Ri-009" + category: "fairness" + description: > + AI models produce systematically biased or discriminatory outcomes + against protected groups defined by race, gender, age, religion, + national origin, or other protected characteristics. In financial + AI, bias manifests most critically in credit scoring, loan + underwriting, and fraud detection — where biased models produce + disparate impact on protected classes even without discriminatory + intent. This violates ECOA, the Fair Housing Act, RBI FREE-AI + Recommendation 19, and EU AI Act Article 10. Bias is not always + detectable in outputs — it can be embedded in feature engineering + that uses proxies for protected attributes such as zip code, + browsing behavior, or social network connections. + severity: "error" + min_severity: "warning" + min_mitigations: 1 + detection: ~ + primitives: ~ + + - id: "ETH-002" + name: "Explainability Absence" + source: "FINOS" + original_id: "Ri-010" + category: "transparency" + description: > + AI systems make decisions that cannot be explained to affected + individuals, regulators, or auditors in terms of the specific + factors that drove the outcome. Black-box models deployed in + high-stakes contexts — credit decisions, fraud flags, customer + service routing — fail the explainability requirements of RBI + FREE-AI Recommendation 14, CFPB Regulation B adverse action + notices, EU AI Act Article 13, and SEBI AI/ML requirements. + Explainability absence is not merely a transparency gap — it + is a structural compliance failure. Goldman Sachs paid $45M + to the CFPB in October 2024 specifically because their AI + credit model could not explain its decisions at the individual + decision level. + severity: "error" + min_severity: "warning" + min_mitigations: 1 + detection: ~ + primitives: ~ + + - id: "ETH-003" + name: "Human Oversight Removal" + source: "FINOS" + original_id: "Ri-020" + category: "safety" + description: > + AI systems make consequential decisions autonomously without any + mechanism for human review, intervention, or override. EU AI Act + Article 14 requires that high-risk AI systems — including credit + scoring, AML monitoring, and fraud detection — be designed to + allow human oversight with the ability to interrupt, disregard, + or override AI outputs. FCA 2024 guidance requires documented + evidence of human oversight for every AI-assisted decision + submitted for supervisory review. Removing human oversight does + not merely create a compliance gap — it creates a single point + of failure where model errors, adversarial attacks, or behavioral + drift propagate unchecked across every decision in the pipeline. + severity: "blocker" + min_severity: "blocker" + min_mitigations: 1 + detection: ~ + primitives: ~ + + - id: "ETH-004" + name: "Toxic Output" + source: "FINOS" + original_id: "Ri-023" + category: "safety" + description: > + AI models generate harmful, abusive, threatening, or otherwise + toxic content in customer-facing or internal communications. + In financial services, toxic output risk includes models generating + discriminatory rejection language, threatening debt collection + communications, or manipulative sales content that violates + consumer protection standards. Toxic output is particularly + dangerous in automated pipelines where model outputs reach + customers without human review — a single prompt injection + or model failure can cause toxic content to be sent at scale + before detection. RBI FREE-AI Pillar 2 and FCA Consumer Duty + require that customer-facing AI outputs meet conduct standards. + severity: "error" + min_severity: "warning" + min_mitigations: 1 + detection: ~ + primitives: ~ diff --git a/.anchor/domains/legal.anchor b/.anchor/domains/legal.anchor new file mode 100644 index 0000000..b9e5c78 --- /dev/null +++ b/.anchor/domains/legal.anchor @@ -0,0 +1,62 @@ +type: domain +namespace: LEG +version: "1.0" +anchor_version: ">=4.0.0" +maintainer: "Anchor Core" +always_loaded: false +description: > + Legal violations in AI systems. Covers intellectual property + infringement in training data and outputs, and regulatory + non-compliance with applicable AI governance frameworks. +seal: "sha256:PENDING" + +rules: + + - id: "LEG-001" + name: "IP Infringement" + source: "FINOS" + original_id: "Ri-018" + category: "compliance" + description: > + AI models trained on or generating content that reproduces + copyrighted material, trade secrets, or proprietary code + without authorization creates intellectual property liability + for the deploying organization. In financial AI, this includes + models trained on proprietary financial data sets, models that + reproduce licensed analytical frameworks in generated reports, + and code generation models that reproduce GPL-licensed code in + commercial products. IP infringement risk is elevated in RAG + systems where copyrighted documents are chunked and retrieved + verbatim into model outputs. Several ongoing lawsuits establish + that organizations deploying models on proprietary data bear + liability for IP violations in those models' outputs. + severity: "warning" + min_severity: "info" + min_mitigations: 1 + detection: ~ + primitives: ~ + + - id: "LEG-002" + name: "Regulatory Non-Compliance" + source: "FINOS" + original_id: "Ri-019" + category: "compliance" + description: > + AI systems deployed in regulated financial contexts operate + without documented compliance with applicable regulatory + frameworks — EU AI Act, RBI FREE-AI, SEBI AI/ML requirements, + CFPB Regulation B, FCA guidance, or equivalent jurisdiction- + specific requirements. Non-compliance is not merely a legal + risk — it is an operational risk. Regulatory action can suspend + AI-powered products, freeze lending operations, or trigger + mandatory audits. EU AI Act enforcement begins August 2026 with + fines up to 6% of global annual revenue for high-risk AI + violations. RBI has no fine ceiling for FREE-AI non-compliance. + CFPB's $45M Goldman Sachs action in 2024 establishes the + enforcement precedent. Documenting compliance is not optional + — it is the first requirement of every applicable framework. + severity: "error" + min_severity: "error" + min_mitigations: 1 + detection: ~ + primitives: ~ diff --git a/.anchor/domains/operational.anchor b/.anchor/domains/operational.anchor new file mode 100644 index 0000000..df04045 --- /dev/null +++ b/.anchor/domains/operational.anchor @@ -0,0 +1,39 @@ +type: domain +namespace: OPS +version: "1.0" +anchor_version: ">=4.0.0" +maintainer: "Anchor Core" +always_loaded: false +description: > + Operational violations in AI systems. Covers availability risks, + denial of service conditions, and missing circuit breakers in + AI-dependent critical financial infrastructure. +seal: "sha256:PENDING" + +rules: + + - id: "OPS-001" + name: "Availability and Denial" + source: "FINOS" + original_id: "Ri-011" + category: "operations" + description: > + AI systems in critical financial infrastructure lack circuit + breakers, fallback mechanisms, or rate limiting controls that + would prevent availability failures from cascading into + operational outages. Financial AI systems that handle real-time + fraud detection, credit decisioning, or payment routing create + single points of failure when they have no graceful degradation + path — a model API outage or rate limit breach can halt + transaction processing entirely. Additionally, adversarial + denial-of-service attacks targeting AI inference endpoints + can render financial services unavailable by exhausting compute + resources through expensive prompt submissions. RBI FREE-AI + Recommendation 21 requires business continuity plans that + account for AI system failure scenarios, and red-teaming + exercises to validate resilience under stress conditions. + severity: "error" + min_severity: "warning" + min_mitigations: 1 + detection: ~ + primitives: ~ diff --git a/.anchor/domains/privacy.anchor b/.anchor/domains/privacy.anchor new file mode 100644 index 0000000..deb71de --- /dev/null +++ b/.anchor/domains/privacy.anchor @@ -0,0 +1,81 @@ +type: domain +namespace: PRV +version: "1.0" +anchor_version: ">=4.0.0" +maintainer: "Anchor Core" +always_loaded: false +description: > + Privacy violations in AI systems. Covers PII leakage to external + models, vector store inversion attacks, and cross-context data bleed. +seal: "sha256:PENDING" + +rules: + + - id: "PRV-001" + name: "PII Leakage to Hosted Model" + source: "FINOS" + original_id: "Ri-015" + category: "privacy" + description: > + Personally identifiable information — names, account numbers, + transaction history, health data, biometric data, or any data + that can identify an individual — is transmitted to third-party + hosted AI models without adequate data governance controls. + Third-party models may memorize, log, or inadvertently reproduce + PII in subsequent completions. In Indian financial services, this + violates the DPDP Act 2023 purpose limitation requirement — + customer data collected for lending cannot be transmitted to + an external AI provider for general model training. It also + violates RBI Digital Lending Directions on data residency and + GDPR Article 6 lawful basis requirements for EU-facing operations. + severity: "blocker" + min_severity: "error" + min_mitigations: 1 + detection: ~ + primitives: ~ + + - id: "PRV-002" + name: "Vector Inversion Attack" + source: "FINOS" + original_id: "Ri-016" + category: "privacy" + description: > + Embeddings stored in vector databases can be used to reconstruct + or approximate the original sensitive data they were derived from + through inversion attacks. When financial documents, customer + records, or proprietary data are embedded and stored without + access controls or embedding protection, an attacker with read + access to the vector store can recover sensitive information + without ever accessing the original data source. This creates + a secondary data exposure surface that is frequently overlooked + in RAG-based financial AI systems. GDPR Article 5 data minimization + and DPDP Act security obligations apply to embedding stores + exactly as they apply to the underlying data. + severity: "error" + min_severity: "warning" + min_mitigations: 1 + detection: ~ + primitives: ~ + + - id: "PRV-003" + name: "Cross-context Data Bleed" + source: "FINOS" + original_id: "Ri-022" + category: "privacy" + description: > + Data from one user's context, session, or request contaminates + another user's context through shared model state, improperly + isolated conversation history, or context window leakage in + multi-tenant AI deployments. In financial AI, cross-context + bleed can expose one customer's account details, transaction + history, or credit information to another customer in the same + model deployment. This is a critical violation of DPDP Act + Section 4 purpose limitation, RBI data governance requirements, + and basic financial data segregation principles. Multi-tenant + LLM deployments require strict session isolation that many + standard frameworks do not provide by default. + severity: "error" + min_severity: "warning" + min_mitigations: 1 + detection: ~ + primitives: ~ diff --git a/.anchor/domains/security.anchor b/.anchor/domains/security.anchor new file mode 100644 index 0000000..5c7ac2b --- /dev/null +++ b/.anchor/domains/security.anchor @@ -0,0 +1,182 @@ +type: domain +namespace: SEC +version: "1.0" +anchor_version: ">=4.0.0" +maintainer: "Anchor Core" +always_loaded: true +description: > + Security violations in AI-adjacent code. Covers prompt injection, + data poisoning, model tampering, credential harvesting, authorization + bypass, raw network access, shell injection, and supply chain attacks. +seal: "sha256:PENDING" + +rules: + + - id: "SEC-001" + name: "Prompt Injection" + source: "FINOS" + original_id: "Ri-001" + category: "security" + description: > + An attacker manipulates an AI model's behavior by injecting malicious + instructions through untrusted input channels — user-supplied text, + document content, tool outputs, or any data that flows into a prompt + without sanitization. The model cannot distinguish between legitimate + instructions and injected ones, executing the attacker's intent + instead of the developer's. In financial systems, this can cause + models to leak customer data, bypass authorization logic, or generate + fraudulent outputs. Severity is BLOCKER because successful injection + can compromise the entire AI pipeline. + severity: "blocker" + min_severity: "error" + min_mitigations: 1 + detection: ~ + primitives: ~ + + - id: "SEC-002" + name: "Data Poisoning" + source: "FINOS" + original_id: "Ri-002" + category: "security" + description: > + Malicious or corrupted data is introduced into the training, + fine-tuning, or retrieval pipeline, causing the model to learn + incorrect behaviors, biased outputs, or backdoor triggers that + activate under specific conditions. In financial AI, poisoned + training data can cause credit models to systematically favor + or disadvantage specific demographic groups, or cause fraud + detection models to miss specific attack patterns. The attack + is particularly dangerous because poisoned behavior is baked + into the model weights and survives redeployment. + severity: "blocker" + min_severity: "error" + min_mitigations: 1 + detection: ~ + primitives: ~ + + - id: "SEC-003" + name: "Model Tampering" + source: "FINOS" + original_id: "Ri-003" + category: "security" + description: > + The model's weights, architecture, or infrastructure are + compromised through supply chain attacks, unauthorized access + to model storage, or malicious modification of model artifacts + during transit or at rest. A tampered model may behave normally + under standard conditions while producing controlled failures + or data leakage under specific trigger inputs. In regulated + financial systems, model tampering is equivalent to tampering + with a financial instrument — it undermines the integrity of + every decision the model makes and cannot be detected without + cryptographic verification of model artifacts. + severity: "blocker" + min_severity: "blocker" + min_mitigations: 1 + detection: ~ + primitives: ~ + + - id: "SEC-004" + name: "Credential Harvesting" + source: "FINOS" + original_id: "Ri-004" + category: "security" + description: > + AI agents or AI-adjacent code systematically access environment + variables, configuration files, or credential stores to extract + API keys, tokens, database passwords, or other secrets. This + often manifests as broad os.environ access that exposes all + environment variables rather than accessing specific named keys. + In AI pipelines, credential harvesting risk is elevated because + models may generate code that accesses credentials, or agentic + systems may be manipulated into exfiltrating secrets to external + endpoints as part of a multi-step attack chain. + severity: "blocker" + min_severity: "error" + min_mitigations: 1 + detection: ~ + primitives: ~ + + - id: "SEC-005" + name: "Authorization Bypass" + source: "FINOS" + original_id: "Ri-012" + category: "security" + description: > + AI agents or model-integrated code execute actions outside their + granted permissions or bypass authorization checks that would + normally gate access to sensitive operations. This includes + agents that call APIs without verifying caller identity, models + that generate code skipping permission checks, and agentic + workflows that escalate privileges by chaining tool calls that + individually appear authorized. In financial AI, authorization + bypass can allow unauthorized access to customer accounts, + trading systems, or regulatory reporting pipelines. + severity: "blocker" + min_severity: "blocker" + min_mitigations: 1 + detection: ~ + primitives: ~ + + - id: "SEC-006" + name: "Raw Network Access" + source: "FINOS" + original_id: "Ri-013" + category: "security" + description: > + AI components or model integration code make direct calls to + external LLM API endpoints, data sources, or third-party services + without routing through a governed proxy or backstop layer. Raw + network access bypasses governance controls, telemetry, rate + limiting, and audit logging. In regulated financial environments, + unproxied API calls to public LLM providers mean that sensitive + financial data and customer information may be transmitted to + external services without adequate data governance, violating + RBI Digital Lending Directions and EU AI Act data requirements. + severity: "error" + min_severity: "warning" + min_mitigations: 1 + detection: ~ + primitives: ~ + + - id: "SEC-007" + name: "Shell Injection" + source: "FINOS" + original_id: "Ri-014" + category: "security" + description: > + AI-generated code or agentic tool calls invoke shell commands, + subprocesses, or system calls that are constructed from untrusted + input or operate outside a sandboxed execution environment. Models + generating code frequently produce subprocess calls as part of + automation tasks — these calls become injection vectors when they + incorporate model outputs or user inputs without validation. In + AI pipelines, shell injection risk is compounded by the fact that + models may generate plausible-looking but malicious commands as + part of multi-step agentic workflows, bypassing human review. + severity: "blocker" + min_severity: "error" + min_mitigations: 1 + detection: ~ + primitives: ~ + + - id: "SEC-008" + name: "Supply Chain Attack" + source: "FINOS" + original_id: "Ri-017" + category: "security" + description: > + Compromised dependencies, model repositories, MCP servers, or + third-party AI tool integrations introduce malicious code or + model artifacts into the AI pipeline. Supply chain attacks in + AI systems are particularly difficult to detect because the + compromise occurs upstream — a poisoned model checkpoint from + a public repository, a compromised MCP server injecting malicious + tool responses, or a tampered dependency that exfiltrates model + inputs to an attacker-controlled endpoint. Every external AI + component is a potential supply chain attack surface. + severity: "blocker" + min_severity: "error" + min_mitigations: 1 + detection: ~ + primitives: ~ diff --git a/.anchor/domains/shared.anchor b/.anchor/domains/shared.anchor new file mode 100644 index 0000000..002ef56 --- /dev/null +++ b/.anchor/domains/shared.anchor @@ -0,0 +1,114 @@ +type: domain +namespace: SHR +version: "1.0" +anchor_version: ">=4.0.0" +maintainer: "Anchor Core" +always_loaded: true +description: > + Cross-domain risks that span multiple governance boundaries + simultaneously. Shared rules cannot be cleanly owned by a + single domain — they represent systemic risks where the + failure mode touches security, ethics, legal, and operational + concerns at the same time. This file is always loaded + regardless of what other domains are active. +seal: "sha256:PENDING" + +rules: + + - id: "SHR-001" + name: "Model Overreach and Expanded Use" + source: "FINOS" + original_id: "Ri-018" + domains: [LEG, ETH, ALN] + category: "governance" + description: > + An AI model is deployed or used beyond the validated context, + scope, or population for which it was developed, tested, and + approved — without re-validation, updated governance review, + or regulatory sign-off for the expanded use case. Model + overreach is a systemic risk that simultaneously breaches + legal obligations, ethical standards, and alignment + requirements. A credit scoring model validated for personal + loans being repurposed for small business lending without + re-validation violates EU AI Act conformity assessment + requirements for the new use case. A fraud detection model + trained on one demographic being applied to another without + bias re-testing violates ETH-001 fairness requirements. + An NLP model validated for internal document classification + being deployed in customer-facing decisions without transparency + review violates ETH-002 explainability requirements. Model + overreach is particularly dangerous in organizations moving + fast — the same model that works safely in one context can + cause systematic harm when the context changes without the + governance process catching up. + severity: "warning" + min_severity: "info" + min_mitigations: 1 + detection: ~ + primitives: ~ + + - id: "SHR-002" + name: "Data Quality and Model Drift" + source: "FINOS" + original_id: "Ri-019" + domains: [OPS, ALN, SUP] + category: "accuracy" + description: > + AI model performance silently degrades over time as production + data drifts away from the distribution of the training data, + upstream data pipelines introduce errors or schema changes, or + the real-world phenomena the model was trained to predict + evolve in ways the model cannot track. Data drift is not a + single event — it is a continuous operational and alignment + risk that simultaneously degrades accuracy, introduces bias, + and undermines supply chain integrity. In financial AI, data + drift is particularly dangerous because the consequences + are not immediately visible — a credit model that has drifted + may continue approving and rejecting loans at the same rate + while the quality of those decisions silently deteriorates. + SEBI requires continuous monitoring of AI models because it + explicitly recognizes that AI models may change behavior over + time. RBI FREE-AI Recommendation 24 requires AI inventory with + risk profiles maintained for supervisory inspection — a drifted + model whose risk profile no longer reflects its actual behavior + fails this requirement. Without continuous monitoring, data + drift is invisible until a failure event triggers a regulatory + inquiry. + severity: "warning" + min_severity: "info" + min_mitigations: 1 + detection: ~ + primitives: ~ + + - id: "SHR-003" + name: "Reputational and Conduct Risk" + source: "FINOS" + original_id: "Ri-020" + domains: [ETH, LEG] + category: "governance" + description: > + AI systems generate outputs or make decisions that, while not + triggering a specific security or privacy violation, cause + material reputational harm, regulatory conduct concerns, or + brand damage for the deploying organization. Reputational risk + in AI spans both domains simultaneously — it is an ethics + failure because the model's behavior falls below the conduct + standards required for customer-facing AI, and a legal risk + because reputational damage from AI misconduct has triggered + regulatory action and litigation. FCA Consumer Duty requires + firms to deliver good outcomes for retail customers — an AI + model that systematically provides poor advice, denies services + without adequate justification, or treats customers unfairly + triggers conduct risk regardless of technical compliance. In + Indian financial services, RBI FREE-AI Pillar 5 (Protection) + and Pillar 2 (Governance) both address consumer protection + obligations that go beyond technical rule compliance into + overall conduct quality. Reputational risk is difficult to + detect deterministically — it lives at the intersection of + model behavior and organizational context — which is why it + belongs in shared rather than any single domain. + severity: "error" + min_severity: "warning" + min_mitigations: 1 + detection: ~ + primitives: ~ diff --git a/.anchor/domains/supply_chain.anchor b/.anchor/domains/supply_chain.anchor new file mode 100644 index 0000000..280842c --- /dev/null +++ b/.anchor/domains/supply_chain.anchor @@ -0,0 +1,87 @@ +type: domain +namespace: SUP +version: "1.0" +anchor_version: ">=4.0.0" +maintainer: "Anchor Core" +always_loaded: false +description: > + Supply chain violations in AI systems. Covers model leakage and + theft, weight corruption, and versioning drift across the AI + model supply chain. +seal: "sha256:PENDING" + +rules: + + - id: "SUP-001" + name: "Model Leakage and Theft" + source: "FINOS" + original_id: "Ri-005" + category: "robustness" + description: > + Proprietary AI model weights, architectures, or fine-tuning + data are exposed to unauthorized parties through inadequate + access controls, insecure model serving infrastructure, or + model extraction attacks that reconstruct model behavior through + repeated API queries. In financial AI, model theft is a material + risk because proprietary credit scoring models, fraud detection + logic, and algorithmic trading strategies represent significant + competitive and regulatory assets. A stolen credit model can be + reverse-engineered to understand approval thresholds, enabling + adversarial loan applications designed to game the system. + Model extraction attacks require no direct access to weights — + an attacker can reconstruct approximate model behavior through + black-box API access alone. + severity: "blocker" + min_severity: "blocker" + min_mitigations: 1 + detection: ~ + primitives: ~ + + - id: "SUP-002" + name: "Weight Corruption" + source: "FINOS" + original_id: "Ri-006" + category: "robustness" + description: > + AI model weights are corrupted, modified, or replaced with + adversarial variants without detection, causing the deployed + model to differ from the validated and approved version. Weight + corruption can occur through supply chain compromise of model + repositories, unauthorized access to model storage, or + deliberate poisoning of model artifacts during deployment + pipelines. The danger is that a corrupted model may pass + standard functional tests while containing backdoor triggers + or systematic biases that only activate under specific conditions. + Without cryptographic verification of model artifacts at load + time, there is no way to prove that the model currently running + in production is the model that was audited and approved. + severity: "blocker" + min_severity: "error" + min_mitigations: 1 + detection: ~ + primitives: ~ + + - id: "SUP-003" + name: "Versioning Drift" + source: "FINOS" + original_id: "Ri-007" + category: "operations" + description: > + AI models deployed in production operate on unpinned or + undocumented versions, causing silent behavioral changes when + model providers update their APIs or when local models are + replaced without formal change management. Versioning drift + means the model producing decisions today is not the model + that was validated, tested, or approved — breaking the chain + of accountability that regulators require. FCA 2024 requires + model version traceability per decision. SEBI requires 5-year + retention of model input and output data with version documentation. + RBI FREE-AI Recommendation 24 requires an AI inventory with + version tracking for supervisory inspection. A system that + cannot identify exactly which model version produced a specific + decision cannot satisfy any of these requirements. + severity: "warning" + min_severity: "info" + min_mitigations: 1 + detection: ~ + primitives: ~ diff --git a/.anchor/frameworks/FINOS_Framework.anchor b/.anchor/frameworks/FINOS_Framework.anchor new file mode 100644 index 0000000..96e67b1 --- /dev/null +++ b/.anchor/frameworks/FINOS_Framework.anchor @@ -0,0 +1,178 @@ +type: framework +namespace: FINOS +version: "1.0" +anchor_version: ">=4.0.0" +maintainer: "Anchor Core" +source: "FINOS AI Governance Framework" +source_url: "https://github.com/finos/ai-governance-framework" +credit: "FINOS AI Governance Framework Risk Taxonomy (Ri-001 - Ri-023)" +description: > + The FINOS AI Governance Framework provides the foundational risk + taxonomy for Anchor. This framework file acts as the primary + mapping layer, connecting the original FINOS Ri-IDs and V3 ANC-IDs + to the refined V4 Domain-prefixed rules. Use this framework to + ensure compliance with the FINOS standard. +seal: "sha256:PENDING" + +rules: + + - id: "FINOS-001" + name: "Prompt Injection" + original_id: "Ri-001" + maps_to: "SEC-001" + severity: "blocker" + description: "Malicious instructions injected into prompts." + + - id: "FINOS-002" + name: "Data Poisoning" + original_id: "Ri-002" + maps_to: "SEC-002" + severity: "blocker" + description: "Poisoning of training, fine-tuning, or retrieval data." + + - id: "FINOS-003" + name: "Model Tampering" + original_id: "Ri-003" + maps_to: "SEC-003" + severity: "blocker" + description: "Unauthorized modification of model weights or artifacts." + + - id: "FINOS-004" + name: "Credential Harvesting" + original_id: "Ri-004" + maps_to: "SEC-004" + severity: "blocker" + description: "Systematic exfiltration of secrets via AI pipelines." + + - id: "FINOS-005" + name: "Model Leakage and Theft" + original_id: "Ri-005" + maps_to: "SUP-001" + severity: "blocker" + description: "Unauthorized export or exfiltration of model weights." + + - id: "FINOS-006" + name: "Weight Corruption" + original_id: "Ri-006" + maps_to: "SUP-002" + severity: "blocker" + description: "Accidental or malicious corruption of model weights." + + - id: "FINOS-007" + name: "Versioning Drift" + original_id: "Ri-007" + maps_to: "SUP-003" + severity: "warning" + description: "Undocumented or unverified changes in model versions." + + - id: "FINOS-008" + name: "Hallucination" + original_id: "Ri-008" + maps_to: "ALN-001" + severity: "error" + description: "Model generating plausible but false or dangerous information." + + - id: "FINOS-009" + name: "Bias and Discrimination" + original_id: "Ri-009" + maps_to: "ETH-001" + severity: "error" + description: "Systematically biased or discriminatory model outcomes." + + - id: "FINOS-010" + name: "Explainability Absence" + original_id: "Ri-010" + maps_to: "ETH-002" + severity: "error" + description: "Decisions made by black-box models that cannot be explained." + + - id: "FINOS-011" + name: "Availability and Denial" + original_id: "Ri-011" + maps_to: "OPS-001" + severity: "error" + description: "AI system unavailability due to resource exhaustion or attacks." + + - id: "FINOS-012" + name: "Authorization Bypass" + original_id: "Ri-012" + maps_to: "SEC-005" + severity: "blocker" + description: "Executing actions outside granted permissions via AI tools." + + - id: "FINOS-013" + name: "Raw Network Access" + original_id: "Ri-013" + maps_to: "SEC-006" + severity: "error" + description: "Unproxied outbound network calls from AI components." + + - id: "FINOS-014" + name: "Shell Injection" + original_id: "Ri-014" + maps_to: "SEC-007" + severity: "blocker" + description: "Executing shell commands constructed from untrusted model input." + + - id: "FINOS-015" + name: "PII Leakage" + original_id: "Ri-015" + maps_to: "PRV-001" + severity: "blocker" + description: "Unauthorized exposure of Personally Identifiable Information." + + - id: "FINOS-016" + name: "Vector Inversion" + original_id: "Ri-016" + maps_to: "PRV-002" + severity: "error" + description: "Reconstructing training data from embedding vectors." + + - id: "FINOS-017" + name: "Supply Chain Attack" + original_id: "Ri-017" + maps_to: "SEC-008" + severity: "blocker" + description: "Compromised upstream dependencies or tool integrations." + + - id: "FINOS-018" + name: "Model Overreach" + original_id: "Ri-018" + maps_to: "SHR-001" + severity: "warning" + description: "Using models beyond their validated scope or context." + + - id: "FINOS-019" + name: "Regulatory Non-Compliance" + original_id: "Ri-019" + maps_to: "LEG-002" + severity: "error" + description: "AI deployment violating specific jurisdictional laws." + + - id: "FINOS-020" + name: "Human Oversight Removal" + original_id: "Ri-020" + maps_to: "ETH-003" + severity: "blocker" + description: "Autonomous decisions made without human-in-the-loop controls." + + - id: "FINOS-021" + name: "Goal Misrepresentation" + original_id: "Ri-021" + maps_to: "ALN-002" + severity: "blocker" + description: "Agents pursuing objectives misaligned with user intent." + + - id: "FINOS-022" + name: "Cross-context Data Bleed" + original_id: "Ri-022" + maps_to: "PRV-003" + severity: "error" + description: "Data from one context leaking into another via shared state." + + - id: "FINOS-023" + name: "IP Infringement" + original_id: "Ri-023" + maps_to: "LEG-001" + severity: "warning" + description: "Model outputs infringing on intellectual property or copyright." diff --git a/.anchor/frameworks/NIST_AI_RMF.anchor b/.anchor/frameworks/NIST_AI_RMF.anchor new file mode 100644 index 0000000..eefe238 --- /dev/null +++ b/.anchor/frameworks/NIST_AI_RMF.anchor @@ -0,0 +1,66 @@ +type: framework +namespace: NIST +version: "1.0" +anchor_version: ">=4.0.0" +maintainer: "Anchor Core" +source: "NIST AI Risk Management Framework (AI RMF 1.0)" +source_url: "https://www.nist.gov/itl/ai-rmf" +credit: "National Institute of Standards and Technology (NIST)" +description: > + The NIST AI RMF provides a high-level framework for managing + risks associated with AI systems. Unlike risk taxonomies, + NIST RMF defines governance functions—Govern, Map, Measure, + Manage. This framework file maps these functions to Anchor's + operational primitives and enforcement mechanisms. +seal: "sha256:PENDING" + +rules: + + - id: "NIST-GOV" + name: "GOVERN: Institutional Policies" + original_id: "Govern 1.1" + maps_to: "LEG-002" + severity: "error" + obligation_type: "audit" + anchor_mechanism: "policy.anchor + sealed manifest" + description: > + Policies, processes, and procedures for AI risk management are + established and maintained. Anchor satisfy this by enforcing + a cryptographically sealed constitution and project-level + policy.anchor overrides. + + - id: "NIST-MAP" + name: "MAP: Risk Identification" + original_id: "Map 1.1" + maps_to: "SHR-001" + severity: "warning" + obligation_type: "audit" + anchor_mechanism: "anchor check --report-server" + description: > + Context is established and risks are identified and documented. + Anchor's federated domains (SEC, ETH, PRV, etc.) provide the + contextual mapping of technical risks to organizational impact. + + - id: "NIST-MEAS" + name: "MEASURE: Risk Assessment" + original_id: "Measure 2.1" + maps_to: "OPS-001" + severity: "warning" + obligation_type: "provenance" + anchor_mechanism: "telemetry_path: .anchor/telemetry/" + description: > + AI systems are assessed for risks and impacts. Anchor's + telemetry output provides the metrics for assessing frequency + and severity of compliance violations across the fleet. + + - id: "NIST-MAN" + name: "MANAGE: Risk Treatment" + original_id: "Manage 1.1" + maps_to: "ALN-002" + severity: "blocker" + obligation_type: "audit" + anchor_mechanism: "anchor check --severity error (CI Gate)" + description: > + Risks are prioritized and managed based on impact and likelihood. + Anchor's CI/CD integration (pre-commit hooks, GH Actions) acts + as the primary "Manage" gate, blocking non-compliant code from deployment. diff --git a/.anchor/frameworks/OWASP_LLM.anchor b/.anchor/frameworks/OWASP_LLM.anchor new file mode 100644 index 0000000..e23ea1f --- /dev/null +++ b/.anchor/frameworks/OWASP_LLM.anchor @@ -0,0 +1,86 @@ +type: framework +namespace: OWASP +version: "2025" +anchor_version: ">=4.0.0" +maintainer: "Anchor Core" +source: "OWASP Top 10 for Large Language Model Applications" +source_url: "https://owasp.org/www-project-top-10-for-large-language-model-applications/" +credit: "OWASP Foundation" +description: > + The OWASP Top 10 for LLMs provides a list of the most critical + security risks for applications utilizing Large Language Models. + This framework file maps OWASP LLM-specific risks to the + canonical Anchor V4 Domain rules. +seal: "sha256:PENDING" + +rules: + + - id: "OWASP-001" + name: "LLM01: Prompt Injection" + original_id: "LLM-01" + maps_to: "SEC-001" + severity: "blocker" + description: "Malicious instructions injected into prompts to manipulate LLM behavior." + + - id: "OWASP-002" + name: "LLM02: Insecure Output Handling" + original_id: "LLM-02" + maps_to: "SEC-007" + severity: "blocker" + description: "Failure to sanitize LLM outputs before passing them to sensitive downstream functions (e.g. shell)." + + - id: "OWASP-003" + name: "LLM03: Training Data Poisoning" + original_id: "LLM-03" + maps_to: "SEC-002" + severity: "blocker" + description: "Poisoning training data to create backdoors or bias in LLM behavior." + + - id: "OWASP-004" + name: "LLM04: Model Denial of Service" + original_id: "LLM-04" + maps_to: "OPS-001" + severity: "error" + description: "Causing excessive resource consumption in LLMs to degrade availability." + + - id: "OWASP-005" + name: "LLM05: Supply Chain Vulnerabilities" + original_id: "LLM-05" + maps_to: "SEC-008" + severity: "blocker" + description: "Risks from compromised third-party components, data, or models." + + - id: "OWASP-006" + name: "LLM06: Sensitive Information Disclosure" + original_id: "LLM-06" + maps_to: "PRV-001" + severity: "blocker" + description: "LLM leaking PII or other sensitive data in its responses." + + - id: "OWASP-007" + name: "LLM07: Insecure Plugin Design" + original_id: "LLM-07" + maps_to: "AGT-001" + severity: "blocker" + description: "Plugins/tools with insufficient access controls callable by the LLM." + + - id: "OWASP-008" + name: "LLM08: Excessive Agency" + original_id: "LLM-08" + maps_to: "AGT-005" + severity: "blocker" + description: "LLM having broad permissions or functioning without adequate human oversight." + + - id: "OWASP-009" + name: "LLM09: Overreliance" + original_id: "LLM-09" + maps_to: "ALN-001" + severity: "error" + description: "Dependence on LLM outputs without verification, increasing risk from hallucinations." + + - id: "OWASP-010" + name: "LLM10: Model Theft" + original_id: "LLM-10" + maps_to: "SUP-001" + severity: "blocker" + description: "Unauthorized access, copying, or extraction of proprietary models." diff --git a/.anchor/government/CFPB_Regulations.anchor b/.anchor/government/CFPB_Regulations.anchor new file mode 100644 index 0000000..7ecab44 --- /dev/null +++ b/.anchor/government/CFPB_Regulations.anchor @@ -0,0 +1,116 @@ +type: framework +namespace: CFPB +version: "2024" +anchor_version: ">=4.0.0" +maintainer: "Anchor Core" +opt_in: true +source: "CFPB Regulation B (Equal Credit Opportunity Act) and 2024 AI Guidance" +source_url: "https://www.consumerfinance.gov/compliance/circulars/" +source_date: "2024" +credit: > + Consumer Financial Protection Bureau Regulation B implementing + the Equal Credit Opportunity Act (ECOA), and CFPB 2024 guidance + on adverse action notification requirements for AI-assisted credit + decisions. The $45 million enforcement action against Goldman Sachs + in October 2024 established the enforcement precedent for AI credit + model explainability obligations in US financial services. +layer_2_status: > + Rules marked obligation_type: provenance depend on AnchorRuntime + (Layer 2) and the Decision Audit Chain (DAC). Layer 2 is currently + in development. +seal: "sha256:PENDING" + +rules: + + - id: "CFPB-001" + name: "Adverse Action Notice — Specific Reasons Required" + original_id: "Regulation B, Section 202.9" + maps_to: "ETH-002" + obligation_type: detection + anchor_mechanism: > + ETH-002 explainability absence fires on credit decision code + without reason codes. CREDIT-001 violation fires specifically + on denial output with no reason_code field. adverse_action_reasons() + method on AuditEntry produces ECOA-compliant reason codes. + Layer 1 detection active now. + severity: "blocker" + min_severity: "blocker" + description: > + Creditors must provide applicants with specific, principal + reasons for adverse action taken on credit applications. The + CFPB explicitly rejects the position that algorithmic complexity + justifies opaque denials — the reasons must be specific, + comprehensible, and accurate. Goldman Sachs paid $45 million + in October 2024 not because their Apple Card AI model was + wrong, but because they could not explain at the individual + decision level why the algorithm reached its conclusions. + This is the most directly enforced AI compliance obligation + in US financial services. + + - id: "CFPB-002" + name: "AI Credit Models — Algorithm Not an Excuse" + original_id: "CFPB Circular 2024" + maps_to: "ETH-002" + obligation_type: detection + anchor_mechanism: > + ETH-002 explainability absence detection. CREDIT-001 fires + on denial without reason code. ADV-001 fires on adverse + action without violation_id linkage. Layer 1 active now. + severity: "blocker" + min_severity: "blocker" + description: > + CFPB 2024 guidance explicitly extends Regulation B to + AI-assisted credit decisions. The use of a complex AI model + does not exempt creditors from providing specific reasons + for adverse action. The model's complexity is the creditor's + problem, not the applicant's. Any creditor that cannot + explain its AI credit decisions at the individual level + is in violation of Regulation B regardless of the model's + technical architecture. + + - id: "CFPB-003" + name: "Prohibited Basis Discrimination — ECOA Enforcement" + original_id: "ECOA Section 701, Regulation B Section 202.4" + maps_to: "ETH-001" + obligation_type: detection + anchor_mechanism: > + ETH-001 bias and discrimination detection active in Layer 1. + BIAS-001 fires on protected class reference in credit output. + BIAS-* violation category covers all ECOA protected characteristics: + race, color, religion, national origin, sex, marital status, + age, public assistance income. + severity: "blocker" + min_severity: "blocker" + description: > + ECOA prohibits credit discrimination based on race, color, + religion, national origin, sex, marital status, age, or + receipt of public assistance income. AI systems that produce + disparate impact on protected classes violate ECOA even + without discriminatory intent. The Fair Housing Act extends + these protections to mortgage and housing-related credit. + Disparate impact is measured against outcomes, not intent — + a facially neutral AI model that produces systematically + worse outcomes for protected groups is a ECOA violation + regardless of how it was designed. + + - id: "CFPB-004" + name: "Model Risk Management — Documented Validation" + original_id: "CFPB Supervisory Guidance 2024" + maps_to: "LEG-002" + obligation_type: audit + anchor_mechanism: > + anchor audit pre-deployment produces validation evidence. + Violation taxonomy as documented validation artifact. + DAC audit chain as ongoing monitoring record. Layer 2 + in development for full monitoring support. + severity: "error" + min_severity: "error" + description: > + CFPB 2024 guidance requires that AI credit models be + validated, documented, and subject to ongoing monitoring. + Validation results must be available for supervisory + examination. Backtesting and performance monitoring are + required throughout the model lifecycle — not just at + initial deployment. Model risk management for AI credit + models is subject to the same supervisory scrutiny as + traditional statistical models under OCC SR 11-7. diff --git a/.anchor/government/EU_AI_Act.anchor b/.anchor/government/EU_AI_Act.anchor new file mode 100644 index 0000000..631a397 --- /dev/null +++ b/.anchor/government/EU_AI_Act.anchor @@ -0,0 +1,258 @@ +type: framework +namespace: EU +version: "2024/1689" +anchor_version: ">=4.0.0" +maintainer: "Anchor Core" +opt_in: true +source: "EU Artificial Intelligence Act (Regulation EU 2024/1689)" +source_url: "https://eur-lex.europa.eu/eli/reg/2024/1689/oj/eng" +source_date: "August 1, 2024" +credit: > + Regulation (EU) 2024/1689 of the European Parliament and of the + Council laying down harmonised rules on artificial intelligence. + Published in the Official Journal of the European Union, L series, + 2024. Full enforcement of high-risk AI provisions begins August 2, + 2026. Credit scoring, AML monitoring, and fraud detection are + legally classified as high-risk AI systems under Annex III. +layer_2_status: > + Rules marked obligation_type: provenance or audit depend on + AnchorRuntime (Layer 2) and the Decision Audit Chain (DAC). + Layer 2 is currently in development. These rules are specified + as designed and will be enforced once Layer 2 ships. +seal: "sha256:PENDING" + +rules: + + - id: "EU-ART09" + name: "Risk Management System — Continuous Lifecycle" + original_id: "Article 9" + maps_to: "LEG-002" + obligation_type: detection + anchor_mechanism: > + constitution.anchor sealed ruleset constitutes the documented + risk management system. anchor check in CI/CD provides the + continuous testing requirement. Violation report is the + documented evidence of risk management activity. + severity: "blocker" + min_severity: "blocker" + description: > + A documented, ongoing risk management system must be established, + implemented, and maintained across the entire AI lifecycle for + all high-risk AI systems. The system must identify known and + foreseeable risks, estimate and evaluate risks, adopt risk + management measures, and test the system before market placement + and throughout development. This is not a one-time process — + it must be updated continuously. Anchor's sealed constitution + and CI/CD integration satisfy the technical continuous testing + requirement. Fines up to €30 million or 6% of global annual + revenue for non-compliance after August 2, 2026. + + - id: "EU-ART10" + name: "Data and Data Governance" + original_id: "Article 10" + maps_to: "ETH-001" + obligation_type: detection + anchor_mechanism: > + ETH-001 bias detection active in Layer 1. PRV-002 vector + inversion detection covers embedding data governance. + PROV-003 provenance violation fires on missing data lineage. + DATA-* violation category covers data governance gaps. + severity: "blocker" + min_severity: "error" + description: > + Training, validation, and testing datasets for high-risk AI + systems must be subject to appropriate data governance practices. + Data must be relevant, representative, and free from errors. + Bias detection and mitigation is required. Data residency and + provenance must be documented. For financial AI, this means + every dataset used in credit scoring, AML, or fraud detection + must have documented provenance, bias testing results, and + residency records available for conformity assessment. + + - id: "EU-ART11" + name: "Technical Documentation — Before Market Placement" + original_id: "Article 11" + maps_to: "LEG-002" + obligation_type: disclosure + anchor_mechanism: > + Violation taxonomy + constitution.anchor + mitigation.anchor + together constitute the technical documentation layer. + anchor audit --report generates the structured documentation + artifact for conformity assessment submission. + severity: "blocker" + min_severity: "blocker" + description: > + Technical documentation must be drawn up before the AI system + is placed on the market or put into service. Must include: + general description of the system, system components, + development process, training methodology, validation results, + capabilities and limitations, and risk mitigation measures + adopted. Anchor's audit report, sealed constitution, and + violation taxonomy together constitute the technical + documentation that feeds the EU conformity assessment process. + + - id: "EU-ART12" + name: "Record-Keeping — Auto-Generated Tamper-Evident Logs" + original_id: "Article 12" + maps_to: "DAC-AuditEntry" + obligation_type: provenance + anchor_mechanism: > + DAC AuditEntry chain satisfies all Article 12 requirements. + entry_id = unique record identifier. chain_hash = tamper + evidence. signature = per-entry integrity seal. timestamp = + retention timestamp. model_id + model_version = system + identification. eu_article12_record() method serializes + to EU AI Act compliant log format. Layer 2 in development. + severity: "blocker" + min_severity: "blocker" + description: > + High-risk AI systems must automatically generate logs enabling + post-hoc review of the system's operation. Logs must be retained + for a period defined by the deploying operator or relevant + sectoral authority — minimum 6 months for most financial AI + applications. Logs must be tamper-evident and enable + reconstruction of the circumstances around events of concern. + This is the most technically specific Article in the EU AI Act + and the one most directly satisfied by Anchor's Decision Audit + Chain architecture. + + - id: "EU-ART13" + name: "Transparency — Information to Deployers" + original_id: "Article 13" + maps_to: "ETH-002" + obligation_type: detection + anchor_mechanism: > + ETH-002 explainability absence fires on black-box model + usage without explain() hooks. adverse_action_reasons() + provides CFPB and EU compliant reason codes. TRANS-* + violation category covers transparency gaps. model_version + in AuditEntry satisfies system identification requirement. + severity: "blocker" + min_severity: "error" + description: > + High-risk AI systems must be designed to be sufficiently + transparent that deployers can understand the system's + capabilities, limitations, and intended purpose. Instructions + for use must include: identity of the provider, capabilities + and performance limitations, accuracy metrics, human oversight + measures, and technical measures for human control. In financial + AI, this means every AI-assisted decision output must include + enough information for the deploying institution — and + ultimately the affected individual — to understand why the + decision was made. + + - id: "EU-ART14" + name: "Human Oversight — Intervention and Override" + original_id: "Article 14" + maps_to: "ETH-003" + obligation_type: detection + anchor_mechanism: > + ETH-003 human oversight removal fires when autonomous + decision code has no human review checkpoint. AnchorRuntime + compliant flag per AuditEntry records whether human oversight + was maintained for each decision. Layer 2 in development + for runtime enforcement. + severity: "blocker" + min_severity: "blocker" + description: > + High-risk AI systems must be designed to allow effective human + oversight. Deployers must be able to monitor the system's + operation, detect and address malfunctions, and interrupt, + disregard, or override the system's outputs when necessary. + Human oversight must be effective — not nominal. A human + reviewer who is presented with AI outputs too quickly, without + adequate context, or under time pressure that makes genuine + review impossible does not satisfy Article 14. The oversight + mechanism must be designed to be practically effective. + + - id: "EU-ART15" + name: "Accuracy, Robustness and Cybersecurity" + original_id: "Article 15" + maps_to: "SEC-001" + obligation_type: detection + anchor_mechanism: > + SEC-001 prompt injection, SEC-002 data poisoning, SEC-003 + model tampering, SEC-008 supply chain attack, AGT-001 through + AGT-005 agentic security rules all fire in Layer 1. Diamond + Cage WASM sandbox provides runtime robustness for high-risk + operations. + severity: "blocker" + min_severity: "error" + description: > + High-risk AI systems must achieve appropriate levels of accuracy, + robustness, and cybersecurity for their intended purpose. + They must be resilient against errors, faults, and adversarial + attacks — including prompt injection, data poisoning, and model + evasion attempts. Security measures must be commensurate with + the risk profile of the specific AI system and its deployment + context. Anchor's SEC- and AGT- domain rules provide the + technical detection layer for every adversarial attack category + enumerated in Article 15. + + - id: "EU-ART16" + name: "Provider Obligations — Complete List" + original_id: "Article 16" + maps_to: "LEG-002" + obligation_type: disclosure + anchor_mechanism: > + Sealed constitution.anchor + full DAC audit chain together + constitute the conformity evidence package. anchor audit + --report generates the structured disclosure artifact for + EU database registration and supervisory authority submission. + severity: "blocker" + min_severity: "blocker" + description: > + Providers of high-risk AI systems must: ensure compliance with + all technical requirements, draw up technical documentation, + operate a quality management system, keep technical documentation + and logs for the required retention period, conduct conformity + assessment, register in the EU AI database before market + placement, affix CE marking where required, appoint an + authorised representative in the EU where applicable, and + cooperate with national competent authorities on request. + + - id: "EU-ART26" + name: "Deployer Obligations — Monitoring and Oversight" + original_id: "Article 26" + maps_to: "ETH-003" + obligation_type: audit + anchor_mechanism: > + AnchorRuntime continuous eval satisfies continuous monitoring + requirement. Real-time violation detection per AuditEntry. + compliant boolean per decision records governance status. + Layer 2 in development. + severity: "blocker" + min_severity: "error" + description: > + Deployers of high-risk AI systems must implement appropriate + human oversight measures, monitor the system for anomalous + behavior, suspend use when serious risk is identified, inform + the provider of serious incidents, and keep logs generated + by the AI system for the required retention period. Deployers + must also conduct data protection impact assessments where + the system processes personal data. The deployer bears + regulatory liability for every decision the system makes + in their deployment context. + + - id: "EU-ART99" + name: "Penalties — No Safe Harbour After August 2026" + original_id: "Article 99" + maps_to: "LEG-002" + obligation_type: disclosure + anchor_mechanism: > + Full Anchor compliance stack — sealed constitution, active + domain rules, DAC audit chain, anchor audit --report — is + the compliance evidence package that demonstrates conformity + and mitigates penalty exposure. + severity: "blocker" + min_severity: "blocker" + description: > + Violations of requirements for high-risk AI systems carry + fines of up to €30 million or 6% of total worldwide annual + turnover, whichever is higher. Violations of Article 5 + prohibited practices carry up to €35 million or 7%. + There is no grace period after August 2, 2026. Supervisory + authorities in each EU member state are empowered to conduct + inspections, demand documentation, and impose fines without + prior warning. The only defense is documented, demonstrable + compliance — not intent to comply. diff --git a/.anchor/government/FCA_Regulations.anchor b/.anchor/government/FCA_Regulations.anchor new file mode 100644 index 0000000..9928abb --- /dev/null +++ b/.anchor/government/FCA_Regulations.anchor @@ -0,0 +1,142 @@ +type: framework +namespace: FCA +version: "2024" +anchor_version: ">=4.0.0" +maintainer: "Anchor Core" +opt_in: true +source: "FCA AI Governance Guidance 2024 and FCA Consumer Duty" +source_url: "https://www.fca.org.uk/publications/feedback-statements/fs23-6-artifical-intelligence-machine-learning" +source_date: "2024" +credit: > + UK Financial Conduct Authority guidance on AI governance + (FS23/6 Feedback Statement and subsequent 2024 guidance). + FCA Consumer Duty effective July 31, 2023. FCA guidance + effective September 2026 links AI governance failures to + fitness and propriety of compliance leadership — CCOs and + CROs can be held personally accountable for AI governance + failures from that date. +layer_2_status: > + Rules marked obligation_type: provenance or audit depend on + AnchorRuntime (Layer 2) and the Decision Audit Chain (DAC). + Layer 2 is currently in development. +seal: "sha256:PENDING" + +rules: + + - id: "FCA-001" + name: "Human Oversight Records — AI-Assisted Decisions" + original_id: "FCA AI Governance Guidance 2024" + maps_to: "ETH-003" + obligation_type: audit + anchor_mechanism: > + DAC query API /audit exposes full oversight record. + ETH-003 human oversight removal violation fires on + autonomous decision code without human review checkpoint. + AuditEntry records compliant boolean per decision. + Layer 2 in development. + severity: "blocker" + min_severity: "error" + description: > + FCA 2024 guidance requires firms to demonstrate human oversight + and maintain records of AI-assisted decisions sufficient to + support supervisory review. Oversight must be documented — + not merely stated in policy. A firm that claims to have human + oversight but cannot produce records demonstrating that oversight + occurred for specific decisions does not satisfy this requirement. + The FCA has indicated that oversight records will be a primary + focus of AI-related supervisory visits. + + - id: "FCA-002" + name: "Model Version Traceability Per Decision" + original_id: "FCA AI Governance Guidance 2024" + maps_to: "SUP-003" + obligation_type: provenance + anchor_mechanism: > + model_version in every AuditEntry records exact model version + per decision. SUP-003 versioning drift violation fires on + undeclared model version changes. Layer 2 in development. + severity: "error" + min_severity: "error" + description: > + FCA requires firms to maintain records showing which version + of which model produced which decision. Model version + traceability must be continuous — not just documented at + the point of initial deployment. When a model is updated, + the version change must be logged, and historical decisions + must remain traceable to the model version that produced them. + This requirement is a prerequisite for any post-hoc supervisory + review of AI-assisted decisions. + + - id: "FCA-003" + name: "CCO Personal Liability — AI Governance Failures" + original_id: "FCA Guidance September 2026" + maps_to: "ETH-003" + obligation_type: disclosure + anchor_mechanism: > + Signed DAC audit chain is the CCO's evidence of governance. + anchor audit --report generates the compliance record that + demonstrates active governance during the relevant period. + A CCO who can produce sealed, timestamped governance records + has documented evidence of their oversight function. + severity: "blocker" + min_severity: "blocker" + description: > + From September 2026, FCA guidance links AI governance failures + to the fitness and propriety of compliance leadership. Chief + Compliance Officers and Chief Risk Officers can be held + personally accountable for AI governance failures — not just + the firm. This transforms AI governance from a corporate risk + into a personal career risk for named compliance individuals. + The only defense is documented, demonstrable governance — + which Anchor's sealed audit chain provides. A CCO who cannot + produce evidence of active AI governance when the FCA asks + is personally exposed. + + - id: "FCA-004" + name: "AML AI Output Cryptographic Verification" + original_id: "FCA AML Guidance 2024" + maps_to: "SEC-006" + obligation_type: provenance + anchor_mechanism: > + AML-002 violation fires on AML output without output_hash + verification. output_hash in AuditEntry provides SHA-256 + binding of every AML flagging result to the model output + that produced it. Layer 2 in development. + severity: "error" + min_severity: "error" + description: > + FCA 2024 guidance on AML requires that AI-generated flagging + results be verifiable — displayed AML alerts must be + cryptographically linkable to the originating model output. + An AML alert that cannot be traced to a specific model output + at a specific timestamp is unverifiable, and an unverifiable + alert cannot form the basis of a suspicious activity report + that would survive regulatory scrutiny. This is particularly + critical in automated AML pipelines where human review + occurs after flagging rather than before. + + - id: "FCA-005" + name: "Consumer Duty — Good Outcomes for Retail Customers" + original_id: "FCA Consumer Duty PS22/9" + maps_to: "ETH-004" + obligation_type: detection + anchor_mechanism: > + ETH-004 toxic output detection fires on harmful customer + communications. ETH-002 explainability absence fires on + opaque customer-facing AI decisions. SHR-003 reputational + and conduct risk covers Consumer Duty conduct obligations. + Layer 1 active now. + severity: "error" + min_severity: "error" + description: > + FCA Consumer Duty requires firms to deliver good outcomes + for retail customers across four outcome areas: products + and services, price and value, consumer understanding, and + consumer support. For AI systems interacting with retail + customers, this means AI outputs must be accurate, fair, + and comprehensible — not optimized for firm metrics at the + expense of customer outcomes. An AI system that generates + misleading communications, opaque decisions, or outcomes + that systematically disadvantage retail customers violates + Consumer Duty regardless of technical compliance with + other regulatory requirements. diff --git a/.anchor/government/RBI_Regulations.anchor b/.anchor/government/RBI_Regulations.anchor new file mode 100644 index 0000000..6f0d3c5 --- /dev/null +++ b/.anchor/government/RBI_Regulations.anchor @@ -0,0 +1,282 @@ +type: framework +namespace: RBI +version: "2025-08" +anchor_version: ">=4.0.0" +maintainer: "Anchor Core" +opt_in: true +source: "RBI Framework for Responsible and Ethical Enablement of AI (FREE-AI)" +source_url: "https://rbidocs.rbi.org.in/rdocs/PublicationReport/Pdfs/FREEAIR130820250A24FF2D4578453F824C72ED9F5D5851.PDF" +source_date: "August 13, 2025" +credit: > + The Reserve Bank of India FREE-AI Report (August 2025) issued 26 + mandatory recommendations for AI deployed in financial services, + structured around 7 sutras and 6 strategic pillars. This framework + file maps those recommendations to Anchor enforcement mechanisms. + Full report: RBI Expert Committee on FREE-AI, August 2025. +layer_2_status: > + Rules marked obligation_type: provenance, audit, or disclosure + depend on AnchorRuntime (Layer 2) and the Decision Audit Chain (DAC). + Layer 2 is currently in development. These rules are specified as + designed and will be enforced once Layer 2 ships. +seal: "sha256:PENDING" + +rules: + + - id: "RBI-006" + name: "Board-Approved AI Policy" + original_id: "Recommendation 6" + maps_to: "LEG-002" + obligation_type: disclosure + anchor_mechanism: > + constitution.anchor + policy.anchor as the machine-readable + board-approved policy artifact. anchor audit --report generates + the compliance disclosure document. + severity: "blocker" + min_severity: "blocker" + description: > + Every regulated entity must formulate a board-approved AI policy + covering adoption areas, risk appetite, governance framework, + and periodic review mechanism. Board-level accountability is + mandatory and cannot be delegated to vendors or technical teams. + Anchor's sealed constitution.anchor and policy.anchor together + constitute the machine-readable equivalent of this policy — + cryptographically signed, version-controlled, and auditable. + + - id: "RBI-007" + name: "Per-Decision Audit Trail — CIMS Reportable" + original_id: "Recommendation 7" + maps_to: "DAC-AuditEntry" + obligation_type: provenance + anchor_mechanism: > + DAC AuditEntry chain with cims_payload() method. Every AI + decision produces an AuditEntry with entry_id, timestamp, + model_id, model_version, input_hash, output_hash, violations, + risk_level, chain_hash, and signature. cims_payload() serializes + to RBI CIMS-reportable JSON on demand. Layer 2 in development. + severity: "blocker" + min_severity: "blocker" + description: > + Documented audit trail per AI decision must be maintained and + reportable to the RBI CIMS portal on demand. This is the core + enforcement mechanism for AI governance in lending and credit + decisions. The RBI has no fine ceiling for non-compliance with + this requirement. Every AI-assisted decision — credit approval, + fraud flag, customer service routing — must have a corresponding + audit record that proves what the model decided, on what input, + at what version, under which governance rules. + + - id: "RBI-009" + name: "AI Liability Framework — Non-Transferable" + original_id: "Recommendation 9" + maps_to: "DAC-AuditEntry" + obligation_type: provenance + anchor_mechanism: > + chain_hash + signature in AuditEntry provides cryptographic + non-repudiation. The deploying RE's AuditEntry proves ownership + of every AI decision — vendor liability cannot be claimed when + the decision chain is signed by the RE's key. Layer 2 in development. + severity: "blocker" + min_severity: "blocker" + description: > + Regulatory entities are accountable for the consequences of + every AI decision they deploy, regardless of whether the model + was built by a third-party vendor. Vendor liability does not + transfer — CFPB, FCA, and RBI have all confirmed this explicitly. + The RE that deploys the model owns every decision that model makes. + Anchor's cryptographic audit chain provides the non-repudiation + proof that establishes this ownership — if your key signed the + AuditEntry, you own the decision. + + - id: "RBI-012" + name: "Regulator Query Access to Audit Chain" + original_id: "Recommendation 12" + maps_to: "DAC-AuditLog" + obligation_type: audit + anchor_mechanism: > + DAC AuditLog.verify_chain() method and /audit API endpoint + expose the full tamper-evident audit chain for regulator + inspection in real time. Layer 2 in development. + severity: "blocker" + min_severity: "error" + description: > + RBI must be able to build internal AI expertise and conduct + supervisory review of AI systems deployed by regulated entities. + This requires that audit trails be queryable by the regulator + — not just internally logged. Anchor's /audit endpoint exposes + the full DAC chain for regulator inspection, with verify_chain() + providing real-time tamper detection. A regulator can verify + the integrity of the entire audit history in a single API call. + + - id: "RBI-014" + name: "AI Credit Decisions — Explainability Mandatory" + original_id: "Recommendation 14" + maps_to: "ETH-002" + obligation_type: detection + anchor_mechanism: > + ETH-002 explainability absence violation fires on black-box + credit decision code. adverse_action_reasons() method on + AuditEntry produces CFPB and RBI compliant reason codes. + CREDIT-001 violation fires when denial output has no reason + code field. Layer 1 detection active now. + severity: "blocker" + min_severity: "blocker" + description: > + AI-assisted credit decisions must be explainable and auditable + through the CIMS portal. Specific reason codes are required for + every adverse action. The RBI explicitly rejects the position + that algorithmic complexity is a valid reason for opaque decisions. + Goldman Sachs paid $45M to the CFPB in October 2024 for exactly + this failure — an AI credit model that could not explain its + decisions at the individual decision level. The same enforcement + logic applies under RBI mandate for Indian regulated entities. + + - id: "RBI-015" + name: "Data Lifecycle Governance Framework" + original_id: "Recommendation 15" + maps_to: "PRV-001" + obligation_type: detection + anchor_mechanism: > + PRV-001 PII leakage detection active in Layer 1. DATA-* + violation category covers data governance gaps. PROV-003 + provenance violation fires when AI output has no data + lineage metadata. Layer 1 detection active now. + severity: "error" + min_severity: "error" + description: > + Regulated entities must implement data governance practices + covering collection, storage, processing, and deletion of data + used in AI systems. Must align with DPDP Act 2023. Data lineage + is mandatory — every AI output must be traceable back to the + data sources that influenced it. This requirement is not + satisfied by policy documents — it requires technical controls + that can be demonstrated to a regulator. + + - id: "RBI-017" + name: "Product Approval Process for AI Features" + original_id: "Recommendation 17" + maps_to: "LEG-002" + obligation_type: detection + anchor_mechanism: > + anchor check in CI/CD pipeline acts as the technical gate + in the product approval process. A failing audit blocks + deployment. The violation report is the governance sign-off + artifact. Layer 1 active now. + severity: "blocker" + min_severity: "error" + description: > + Product approval processes must be expanded to include + AI-related aspects. Any product using AI in customer-facing + decisions requires governance sign-off before launch. Running + anchor check as a required CI/CD step satisfies this requirement + technically — a passing audit with zero BLOCKER or ERROR + violations constitutes the governance gate that must be cleared + before deployment. + + - id: "RBI-018" + name: "Cybersecurity Augmentation — AI-Specific Threats" + original_id: "Recommendation 18" + maps_to: ["SEC-001", "SEC-007"] + obligation_type: detection + anchor_mechanism: > + SEC-001 prompt injection, SEC-002 data poisoning, SEC-003 + model tampering, AGT-003 MCP compromise all fire in Layer 1 + static analysis. Full coverage of AI-specific cybersecurity + threats listed in RBI FREE-AI Pillar 5. + severity: "blocker" + min_severity: "error" + description: > + The RBI Cyber Security Framework must be extended to cover + AI-specific risks including model poisoning, adversarial attacks, + prompt injection, and AI incident reporting protocols. These + are not hypothetical risks — they are active attack vectors + against financial AI systems. Anchor's SEC- and AGT- domain + rules provide the technical detection layer for every + AI-specific cybersecurity threat enumerated in FREE-AI Pillar 5. + + - id: "RBI-019" + name: "Algorithmic Fairness Audits — Mandatory" + original_id: "Recommendation 19" + maps_to: "ETH-001" + obligation_type: detection + anchor_mechanism: > + ETH-001 bias and discrimination detection active in Layer 1. + BIAS-* violation category fires on protected attribute usage + in feature vectors and decision outputs. + severity: "error" + min_severity: "error" + description: > + Regular algorithmic fairness audits are mandatory for AI systems + used in credit, lending, and customer decisions. Bias monitoring + and bias testing are not optional best practices — they are + regulatory obligations. Running anchor check with ETH-001 active + constitutes the technical layer of this audit obligation. + The audit report generated by anchor audit --report provides + the documented evidence of fairness testing that regulators + can inspect. + + - id: "RBI-024" + name: "AI Inventory — Supervisory Inspection" + original_id: "Recommendation 24" + maps_to: "DAC-AuditEntry" + obligation_type: provenance + anchor_mechanism: > + model_version + model_id in every AuditEntry constitutes the + AI inventory record per decision. SUP-003 versioning drift + violation fires when model version is undeclared or inconsistent. + Layer 2 in development. + severity: "error" + min_severity: "error" + description: > + Regulated entities must maintain an AI inventory of all deployed + models, use cases, dependencies, and risk profiles — available + for supervisory inspection at any time. Anchor's AuditEntry + records model_id and model_version per decision, creating a + continuous, tamper-evident inventory of every model that has + made a decision. This is not a static spreadsheet — it is a + live, cryptographically signed record of every AI system in + production. + + - id: "RBI-025" + name: "Risk-Based AI Audit Framework" + original_id: "Recommendation 25" + maps_to: "DAC-AuditLog" + obligation_type: audit + anchor_mechanism: > + anchor audit command produces the internal audit artifact. + DAC verify_chain() provides tamper-evident audit chain for + third-party auditors. /audit endpoint exposes the chain for + independent audit firms. anchor audit --report generates + the biannual audit report artifact. Layer 2 in development + for full DAC audit support. + severity: "blocker" + min_severity: "error" + description: > + Internal audits must be proportional to AI risk level. + Independent third-party audits are required for high-risk + or complex AI use cases. The audit framework must be reviewed + and updated biannually to incorporate emerging risks and + regulatory developments. Anchor satisfies the technical audit + requirement — the violation report, DAC chain, and verify_chain() + output constitute the audit artifacts that internal and external + auditors consume. + + - id: "RBI-026" + name: "Mandatory AI Disclosures and Compliance Toolkit" + original_id: "Recommendation 26" + maps_to: "LEG-002" + obligation_type: disclosure + anchor_mechanism: > + anchor audit --report generates the JSON and Markdown compliance + report that feeds annual disclosure requirements. The sealed + constitution.anchor SHA-256 hash provides the cryptographic + attestation of the compliance toolkit. + severity: "error" + min_severity: "warning" + description: > + Regulated entities must include AI governance disclosures in + annual reports covering AI governance frameworks, adoption areas, + consumer protection measures, and grievance redressal mechanisms. + Anchor's audit report output provides the structured compliance + evidence that feeds these disclosures. The constitution.anchor + seal provides cryptographic proof that the governance framework + was active and enforced during the reporting period. diff --git a/.anchor/government/SEBI_Regulations.anchor b/.anchor/government/SEBI_Regulations.anchor new file mode 100644 index 0000000..5222f02 --- /dev/null +++ b/.anchor/government/SEBI_Regulations.anchor @@ -0,0 +1,207 @@ +type: framework +namespace: SEBI +version: "2025-06" +anchor_version: ">=4.0.0" +maintainer: "Anchor Core" +opt_in: true +source: "SEBI Consultation Papers on AI/ML in Securities Markets" +source_url: "https://www.sebi.gov.in/reports-and-statistics/reports/jun-2025/consultation-paper-on-guidelines-for-responsible-usage-of-ai-ml-in-indian-securities-markets_94687.html" +source_date: "June 2025" +credit: > + Securities and Exchange Board of India consultation papers on + AI/ML governance (November 2024 and June 2025). SEBI requires + all market participants using AI/ML tools to be responsible for + compliance with all applicable laws regardless of the method or + degree of AI adoption. Third-party vendor liability does not + transfer to SEBI-regulated entities. +layer_2_status: > + Rules marked obligation_type: provenance or audit depend on + AnchorRuntime (Layer 2) and the Decision Audit Chain (DAC). + Layer 2 is currently in development. +seal: "sha256:PENDING" + +rules: + + - id: "SEBI-001" + name: "Senior Management Oversight — Designated Responsibility" + original_id: "SEBI AI/ML Requirement 1" + maps_to: "ETH-003" + obligation_type: disclosure + anchor_mechanism: > + Board-approved policy.anchor with sealed constitution.anchor + constitutes the governance policy artifact. anchor audit + --report generates the oversight documentation for senior + management sign-off. + severity: "blocker" + min_severity: "error" + description: > + Market participants using AI/ML must designate senior management + with technical expertise to oversee AI tool performance and + control. Board-level oversight with named accountability is + mandatory and cannot be delegated to vendors or technical teams. + SEBI requires that senior management understand and be + accountable for every AI tool deployed in trading, advisory, + and compliance functions. + + - id: "SEBI-002" + name: "Model Validation, Documentation and Interpretability" + original_id: "SEBI AI/ML Requirement 2" + maps_to: "ETH-002" + obligation_type: detection + anchor_mechanism: > + ETH-002 explainability absence fires on black-box model usage. + TRANS-* violation category covers transparency and documentation + gaps. PROV-001 fires on AI output without model version. + Layer 1 detection active now. + severity: "error" + min_severity: "error" + description: > + Market participants must maintain validation documentation and + ensure interpretability of AI models. Outcomes must be + explainable, traceable, and repeatable. Documentation explaining + the logic of AI/ML models is mandatory — not optional. SEBI + specifically requires that AI model decisions be capable of + being explained to the regulator, the firm's board, and + affected investors. + + - id: "SEBI-003" + name: "Periodic Accuracy Reporting to SEBI" + original_id: "SEBI AI/ML Requirement 3" + maps_to: "DAC-AuditLog" + obligation_type: audit + anchor_mechanism: > + anchor audit --report generates JSON and Markdown accuracy + and compliance reports. /audit endpoint exposes the full + audit chain for regulator access. Layer 2 in development + for full continuous reporting support. + severity: "error" + min_severity: "error" + description: > + Market participants must share AI/ML accuracy results and + audit findings with SEBI on a periodic basis. These are not + internal records — they must be shareable with the regulator + in a structured format. Anchor's audit report output provides + the structured compliance evidence that satisfies this periodic + reporting obligation. + + - id: "SEBI-004" + name: "5-Year Input and Output Data Retention" + original_id: "SEBI AI/ML Requirement 4" + maps_to: "DAC-AuditEntry" + obligation_type: provenance + anchor_mechanism: > + DAC AuditEntry records input_hash + output_hash per decision + with ISO 8601 timestamp. Append-only log provides 5-year + retention with tamper-evident integrity. Layer 2 in development. + severity: "blocker" + min_severity: "blocker" + description: > + Market participants must maintain documentation of all models + and store input and output data for at least 5 years. This + applies to all AI systems used in trading, advisory, and + compliance functions. Five years is the statutory limitation + period for most SEBI enforcement actions — data retention for + this period ensures that the firm can defend against any + regulatory inquiry within that window. + + - id: "SEBI-005" + name: "Segregated Testing Environment Before Deployment" + original_id: "SEBI AI/ML Requirement 5" + maps_to: "LEG-002" + obligation_type: detection + anchor_mechanism: > + anchor check in CI/CD acts as the pre-deployment governance + gate. Diamond Cage WASM sandbox provides the segregated + runtime testing environment for high-risk operations. + Layer 1 active now. + severity: "blocker" + min_severity: "error" + description: > + AI/ML models must be tested in a segregated environment before + deployment. Shadow testing with live traffic is required. + Models must be validated in both stressed and unstressed + market conditions before going live. SEBI explicitly requires + this because live market conditions — volatility spikes, + liquidity crises, unusual order patterns — may expose model + failures that standard testing environments do not surface. + + - id: "SEBI-006" + name: "Continuous Monitoring as AI Models Evolve" + original_id: "SEBI AI/ML Requirement 6" + maps_to: "SUP-003" + obligation_type: provenance + anchor_mechanism: > + SUP-003 versioning drift violation fires on undeclared model + version changes. AnchorRuntime continuous eval provides + post-deployment monitoring. model_version in AuditEntry + per decision enables drift detection. Layer 2 in development. + severity: "error" + min_severity: "error" + description: > + SEBI explicitly notes that AI models may change behavior over + time. Continuous monitoring systems are required beyond + traditional one-time testing. Model drift detection is a + regulatory expectation — not a best practice. A model that + was validated at deployment is not necessarily the same model + six months later, and SEBI requires that organizations have + systems in place to detect and respond to that drift. + + - id: "SEBI-007" + name: "Investor Disclosures for AI-Driven Decisions" + original_id: "SEBI AI/ML Requirement 7" + maps_to: "ETH-002" + obligation_type: disclosure + anchor_mechanism: > + ETH-002 explainability absence detection. adverse_action_reasons() + provides investor-facing reason codes. anchor audit --report + generates disclosure artifacts. Layer 1 active now. + severity: "error" + min_severity: "warning" + description: > + Market participants using AI in customer-facing decisions must + disclose product features, purpose, risks, model accuracy, + fees, and data quality used for decisions. Language must be + comprehensible to investors, not just technical staff. Investor + grievance mechanisms for AI-driven decisions must be established + and documented. + + - id: "SEBI-008" + name: "No Discriminatory AI Outputs — Fairness Mandatory" + original_id: "SEBI AI/ML Requirement 8" + maps_to: "ETH-001" + obligation_type: detection + anchor_mechanism: > + ETH-001 bias and discrimination detection active in Layer 1. + BIAS-* violation category fires on protected attribute usage + in feature vectors and decision outputs. + severity: "error" + min_severity: "error" + description: > + AI/ML models must not favor or discriminate against any group + of clients or customers. Data quality must be sufficiently + broad, relevant, and complete to support fair outcomes. + Processes to identify and remove biases from datasets are + required. Training courses on data bias are mandatory for + data scientists developing AI systems for SEBI-regulated + market participants. + + - id: "SEBI-010" + name: "Third-Party AI Vendor Accountability — No Transfer" + original_id: "SEBI AI/ML Requirement 10" + maps_to: "DAC-AuditEntry" + obligation_type: provenance + anchor_mechanism: > + Non-repudiation chain — the deploying firm's AuditEntry + signed with their key proves ownership of every AI decision + regardless of which vendor's model produced it. Layer 2 + in development. + severity: "blocker" + min_severity: "blocker" + description: > + Using a third-party AI tool does not transfer regulatory + liability to the vendor. Market participants are solely + responsible for the consequences of deploying any AI tool + regardless of who built it. SLAs with AI vendors must include + regulatory compliance obligations — but even contractual + protections do not transfer the regulatory liability that + SEBI places on the market participant. diff --git a/.anchor/government/SEC_Regulations.anchor b/.anchor/government/SEC_Regulations.anchor new file mode 100644 index 0000000..07aae76 --- /dev/null +++ b/.anchor/government/SEC_Regulations.anchor @@ -0,0 +1,157 @@ +type: framework +namespace: USSEC +version: "2026" +anchor_version: ">=4.0.0" +maintainer: "Anchor Core" +opt_in: true +source: "US Securities and Exchange Commission — 2026 Examination Priorities and AI Governance Guidance" +source_url: "https://www.sec.gov/exams/announcement/exam-priorities-2026.pdf" +source_date: "January 2026" +credit: > + US Securities and Exchange Commission 2026 Examination Priorities, + published January 2026 by the SEC Division of Examinations. AI + governance was named the top examination priority for 2026 — + overtaking cryptocurrency for the first time in the agency's + published examination schedule. The SEC has indicated that + 'AI washing' — overstating AI capabilities or governance maturity + — constitutes securities fraud exposure under existing law. + Applies to SEC-registered investment advisers, broker-dealers, + and any firm using AI in securities-related activities. +layer_2_status: > + Rules marked obligation_type: provenance or audit depend on + AnchorRuntime (Layer 2) and the Decision Audit Chain (DAC). + Layer 2 is currently in development. +seal: "sha256:PENDING" + +rules: + + - id: "USSEC-001" + name: "AI Governance — Top Examination Priority 2026" + original_id: "SEC 2026 Examination Priority 1" + maps_to: "LEG-002" + obligation_type: disclosure + anchor_mechanism: > + Sealed constitution.anchor + anchor audit --report generates + the AI governance documentation package that demonstrates + active, enforceable governance to SEC examiners. The SHA-256 + sealed constitution proves the governance framework was in + place and enforced during the examination period. + severity: "blocker" + min_severity: "blocker" + description: > + The SEC Division of Examinations identified AI governance as + the top examination priority for 2026 — the first time in the + agency's history that AI has overtaken cryptocurrency as the + primary focus. SEC-registered firms using AI in investment + advice, trading, compliance, or customer communications are + subject to examination specifically on AI governance practices. + Examiners will review whether firms have adequate policies, + procedures, and controls governing their AI systems — and + whether those controls are actually enforced, not just + documented on paper. Anchor's sealed, version-controlled + governance stack provides the technical evidence that + governance is active and enforceable. + + - id: "USSEC-002" + name: "AI Washing — Securities Fraud Exposure" + original_id: "SEC AI Washing Guidance 2026" + maps_to: "ALN-002" + obligation_type: detection + anchor_mechanism: > + ALN-002 goal misrepresentation fires when AI system behavior + diverges from declared purpose. ETH-002 explainability absence + fires on black-box AI claims without verifiable explanation. + Layer 1 detection active now. + severity: "blocker" + min_severity: "blocker" + description: > + The SEC has indicated that overstating AI capabilities, + misrepresenting AI governance maturity, or claiming AI-driven + investment processes that are not actually AI-driven constitutes + securities fraud exposure under existing law — not just a + regulatory violation. AI washing is the AI equivalent of + greenwashing: making claims about AI usage, accuracy, or + governance that are not substantiated by actual technical + controls. Firms that market AI-powered investment products + must be able to demonstrate that the AI described actually + exists, works as described, and is governed as claimed. + Anchor's audit chain provides the technical proof that + governance claims are substantiated. + + - id: "USSEC-003" + name: "AI in Investment Advice — Fiduciary Obligations" + original_id: "SEC Regulation Best Interest + AI Guidance 2026" + maps_to: "ETH-002" + obligation_type: detection + anchor_mechanism: > + ETH-002 explainability absence fires on AI investment + recommendation code without explainability hooks. + adverse_action_reasons() provides SEC-compliant reason + codes for AI-driven investment recommendations. + Layer 1 active now. + severity: "blocker" + min_severity: "error" + description: > + Investment advisers using AI to generate investment + recommendations must satisfy Regulation Best Interest + obligations — the AI recommendation must be in the best + interest of the customer, not optimized for firm revenue. + The SEC has made clear that using an AI model does not + transfer or dilute the fiduciary obligations of the + registered investment adviser. AI-generated recommendations + must be explainable, traceable, and demonstrably aligned + with the customer's investment profile and risk tolerance. + A black-box AI generating investment advice without + explainability controls fails Regulation Best Interest. + + - id: "USSEC-004" + name: "AI Model Risk in Trading Systems — Audit Trail" + original_id: "SEC 2026 Examination Priority — Trading AI" + maps_to: "DAC-AuditEntry" + obligation_type: provenance + anchor_mechanism: > + DAC AuditEntry records every AI-assisted trading decision + with model_id, model_version, input_hash, output_hash, + timestamp, and chain_hash. Full tamper-evident audit chain + survives legal discovery. Layer 2 in development. + severity: "blocker" + min_severity: "error" + description: > + AI systems used in trading — algorithmic trading, order + routing, risk management, and market surveillance — are + subject to SEC examination specifically on model risk + management and audit trail requirements. The SEC expects + firms to maintain records of AI trading decisions sufficient + to reconstruct the circumstances of any trade under review. + An AI trading system that cannot produce a tamper-evident + record of what it decided, when, on what data, and at what + model version cannot satisfy SEC examination requirements + and creates significant regulatory exposure in the event + of a market disruption inquiry. + + - id: "USSEC-005" + name: "Cybersecurity of AI Systems — SEC Rule 10" + original_id: "SEC Cybersecurity Rule (Rule 10) + AI Guidance 2026" + maps_to: "SEC-001" + obligation_type: detection + anchor_mechanism: > + SEC-001 prompt injection, SEC-002 data poisoning, SEC-003 + model tampering, AGT-001 through AGT-005 agentic security + rules provide the technical detection layer for AI-specific + cybersecurity threats under SEC Rule 10. + Layer 1 active now. + severity: "blocker" + min_severity: "error" + description: > + SEC Rule 10 on cybersecurity requires registered firms to + have policies and procedures reasonably designed to address + cybersecurity risks. The SEC's 2026 examination priorities + extend this to AI-specific cybersecurity threats — prompt + injection, model tampering, adversarial attacks, and AI + supply chain compromise. Firms using AI in trading or + investment advisory must demonstrate that their AI systems + are protected against the specific attack vectors that + target AI infrastructure, not just general cybersecurity + threats. Anchor's SEC- and AGT- domain rules satisfy the + technical detection requirement for AI-specific cybersecurity + under SEC Rule 10. diff --git a/.anchor/mitigation.anchor b/.anchor/mitigation.anchor new file mode 100644 index 0000000..239feb4 --- /dev/null +++ b/.anchor/mitigation.anchor @@ -0,0 +1,86 @@ +# ============================================================================= +# ANCHOR MITIGATION CATALOG — Detection Patterns (v3.1.0) +# ============================================================================= +# This file defines the HOW — the detection patterns for risks. +# Patterns are "regex" (line-level) or "ast" (tree-sitter queries). +# +# DESIGN PRINCIPLE: Patterns must be CONTEXT-AWARE. +# ✅ Flag: prompt = f"Process: {user_input}" +# ❌ Skip: click.echo(f"Loaded {count} rules") +# +# COMPLETE COVERAGE: All ANC-001 through ANC-023 (FINOS 23 rules) +# ============================================================================= + +version: "3.1.0" + +mitigations: + # --- SEC-006: Raw Network Access --- + - id: "MIT-001-A" + rule_id: "SEC-006" + name: "Public LLM Endpoint Detection" + match: + type: "regex" + # Requires SDK instantiation or URL assignment — skips strings and comments + pattern: >- + ^(?:[^"\'#]|(["\'])(?:(?!\1).|\\\1)*\1)*\b(=\s*["']https?://api\.(openai|anthropic|cohere)\.(com|ai)|openai\.Client|anthropic\.Anthropic\(|cohere\.Client) + message: "Direct call to public LLM API detected. Route through a PII-scrubbing proxy." + severity: "error" + + # --- SEC-002: Data Poisoning --- + - id: "MIT-002-A" + rule_id: "SEC-002" + name: "Unencrypted Vector Store Upsert" + match: + type: "regex" + # Refinement: removed .add() as it's too common for sets/lists. + # Vector stores typically use add_texts, add_documents, or upsert. + pattern: >- + ^(?:[^"\'#]|(["\'])(?:(?!\1).|\\\1)*\1)*\.\b(upsert|add_texts|add_documents)\s*\((?!.*encrypt) + message: "Vector store write detected without encryption. Embeddings can leak sensitive data via inversion attacks." + severity: "error" + + # --- ALN-001: Hallucination --- + - id: "MIT-003-A" + rule_id: "ALN-001" + name: "LLM Output Without Validation" + match: + type: "regex" + pattern: >- + ^(?:[^"\'#]|(["\'])(?:(?!\1).|\\\1)*\1)*\.\b(ChatCompletion|completions|messages)\.(create|send)\s*\( + message: "LLM API call detected. Ensure output is validated before use (e.g., schema check, grounding)." + severity: "error" + + # --- SEC-007: Shell Injection (os-level) --- + - id: "MIT-014-A" + rule_id: "SEC-007" + name: "Shell Command Execution" + match: + type: "regex" + pattern: >- + ^(?:[^"\'#]|(["\'])(?:(?!\1).|\\\1)*\1)*\bos\.(system|popen|spawn)\s*\( + message: "Potential shell injection via os.system detects. Use subprocess with list arguments instead." + severity: "blocker" + + # --- SEC-007: Shell Injection (subprocess-level) --- + - id: "MIT-014-B" + rule_id: "SEC-007" + name: "Unsandboxed Subprocess in Agent" + match: + type: "regex" + # Excludes occurrences inside string literals or comments + pattern: >- + ^(?:[^"\'#]|(["\'])(?:(?!\1).|\\\1)*\1)*\bsubprocess\.(run|call|Popen|check_output)\s*\( + message: "Native subprocess execution detected. Use Diamond Cage (WASM) sandboxing for agent tools." + severity: "blocker" + + # --- SEC-004: Credential Harvesting --- + - id: "MIT-004-A" + rule_id: "SEC-004" + name: "Bulk Env Variable Access" + match: + type: "regex" + # Only fire on bulk access or sensitive key names + pattern: >- + ^(?:[^"\'#]|(["\'])(?:(?!\1).|\\\1)*\1)*\bos\.(environ\.(copy|items)\(\)|\benviron\b\s*\[.*(?i)(TOKEN|KEY|SECRET|PASSWORD|CREDENTIAL|API).*\]|\{\*\*os\.environ) + message: "Broad environment variable access detected. Agents may harvest secrets from env." + severity: "error" diff --git a/.anchor/policy.anchor b/.anchor/policy.anchor new file mode 100644 index 0000000..6dd9faa --- /dev/null +++ b/.anchor/policy.anchor @@ -0,0 +1,26 @@ +# ============================================================================= +# POLICY — Project Policy +# ============================================================================= +# This file is for YOUR project-specific rules. +# Automatically ignored by git to protect company policies. +# +# RULES: +# 1. Can only RAISE severity (ERROR -> BLOCKER is allowed) +# 2. Cannot LOWER severity — the floor is absolute +# 3. Cannot suppress constitutional rules + # Example: raise SEC-006 from error to blocker + # - id: SEC-006 + # severity: blocker + # reason: > + # Our PCI-DSS scope requires blocking all direct LLM API calls. + +custom_rules: + # Example: add a company-specific rule + # - id: INTERNAL-001 + # name: Internal vault access pattern + # severity: blocker + # detection: + # method: regex + # pattern: 'vault\.read\((?!approved_keys)' + # description: > + # Vault read operations must only access approved_keys namespace. diff --git a/.gitignore b/.gitignore index 2f9d92c..f92fb80 100644 --- a/.gitignore +++ b/.gitignore @@ -44,10 +44,18 @@ docs_framework/ *.swp *.swo -# Anchor Security & Governance (Local Settings) -/.anchor/ - -# Anchor governance cache/logs +# Anchor Security & Governance +# We commit the manifest and policies, but ignore cache and local logs .anchor/cache/ -.anchor/logs/*.tmp -.anchor/policy.anchor +.anchor/telemetry/ +.anchor/reports/ +.anchor/violations/ +.anchor/logs/ +.anchor/branding/ +! .anchor/constitution.anchor +! .anchor/mitigation.anchor +! .anchor/policy.anchor +! .anchor/domains/*.anchor +! .anchor/frameworks/*.anchor +! .anchor/government/*.anchor +! .anchor/.anchor.lock diff --git a/GOVERNANCE.lock b/GOVERNANCE.lock index b11b52d..c078ebf 100644 --- a/GOVERNANCE.lock +++ b/GOVERNANCE.lock @@ -18,6 +18,6 @@ files: government/CFPB_Regulations.anchor: 7005b47e40061e1d47c0ee42439c3c2897a701337359490b09f8113d6dc87ee7 government/EU_AI_Act.anchor: 05063bdd1d5af44d08cedba38bc9549b15ee567d056da7afa217d7da7a185416 government/FCA_Regulations.anchor: f23b61075d323be487b6218a2c0e353d8df445bf3e13904f977edf895123973e - government/RBI_Regulations.anchor: a69dcd38cb0306b6886c1c1aebe8594e9b4e45acbb48d16feeb64615edb9d2b7 + government/RBI_Regulations.anchor: 0337e51a8520507c951f68acd3ba207f30d015e586007be8a13db5c56a978e40 government/SEBI_Regulations.anchor: 38dac4c568ecf52d89ee49b027b401d8e8a46b03b40d9f99e9bdf40534247a15 government/SEC_Regulations.anchor: b7819b6dd874892ef5005eb5033221ac4327146dc060239a1e3fbadaeecd4c07 diff --git a/anchor/__init__.py b/anchor/__init__.py index 57bd456..28f8679 100644 --- a/anchor/__init__.py +++ b/anchor/__init__.py @@ -2,4 +2,4 @@ Anchor-Audit — The Federated Governance Engine for AI """ -__version__ = "4.3.0" +__version__ = "4.3.1" diff --git a/anchor/cli.py b/anchor/cli.py index df2b35d..5223ee4 100644 --- a/anchor/cli.py +++ b/anchor/cli.py @@ -15,10 +15,11 @@ verify_integrity, ) from anchor.core.config import settings +from anchor.utils.output import ANCHOR_ICON, CHECK, CROSS, WARN, BAR, ARROW from anchor import __version__ -__version__ = "4.1.4" +__version__ = "4.3.1" @click.group() @click.version_option(version=__version__) @@ -173,7 +174,7 @@ def init(domains, frameworks, regulators, sandbox, all_items, force, no_sign, po for d in domain_list: if d not in AVAILABLE_DOMAINS: click.secho( - f" ✗ Unknown domain: '{d}'. " + f" {CROSS} Unknown domain: '{d}'. " f"Available: {', '.join(AVAILABLE_DOMAINS.keys())}", fg="red" ) @@ -190,7 +191,7 @@ def init(domains, frameworks, regulators, sandbox, all_items, force, no_sign, po for fw in fw_list: if fw not in AVAILABLE_FRAMEWORKS: click.secho( - f" ✗ Unknown framework: '{fw}'. " + f" {CROSS} Unknown framework: '{fw}'. " f"Available: {', '.join(AVAILABLE_FRAMEWORKS.keys())}", fg="red" ) @@ -207,7 +208,7 @@ def init(domains, frameworks, regulators, sandbox, all_items, force, no_sign, po for reg in reg_list: if reg not in AVAILABLE_REGULATORS: click.secho( - f" ✗ Unknown regulator: '{reg}'. " + f" {CROSS} Unknown regulator: '{reg}'. " f"Available: {', '.join(AVAILABLE_REGULATORS.keys())}", fg="red" ) @@ -240,7 +241,7 @@ def copy_file(relative_path, label): click.secho(f" [SKIP] Already exists: {label}", fg="yellow") return True shutil.copy2(src, dst) - click.secho(f" [OK] {label}", fg="green") + click.secho(f" [{CHECK}] {label}", fg="green") return True # ── Copy domain files ───────────────────────────────────── @@ -283,6 +284,16 @@ def copy_file(relative_path, label): manifest_data = yaml.safe_load(f) updated = False + # Update domains + for domain in manifest_data.get("core_domains", []): + # Robust matching: check if filename (minus extension) matches requested domain + d_path = domain.get("path", "") + d_name = os.path.basename(d_path).replace(".anchor", "").lower() + if d_name in requested_domains: + if not domain.get("active"): + domain["active"] = True + updated = True + # Update frameworks for fw in manifest_data.get("frameworks", []): if fw["namespace"].lower() in requested_frameworks: @@ -298,6 +309,8 @@ def copy_file(relative_path, label): updated = True if updated: + print("DEBUG: Final updated manifest_data:") + print(yaml.dump(manifest_data, default_flow_style=False, sort_keys=False)) with open(dot_anchor_manifest, "w", encoding="utf-8") as f: yaml.dump(manifest_data, f, default_flow_style=False, sort_keys=False) except Exception as e: @@ -414,29 +427,34 @@ def copy_file(relative_path, label): os.chmod(pre_commit_path, 0o755) except Exception: pass - click.secho(" [OK] Git pre-commit hook installed", fg="green") + click.secho(f" [{CHECK}] Git pre-commit hook installed", fg="green") except Exception as e: click.secho(f" WARNING: Could not install git hook: {e}", fg="yellow") # ── Verify Remote Integrity ─────────────────────────────── click.echo("") if no_sign: - click.secho(" ~ Remote integrity fetch skipped (--no-sign)", fg="yellow") + click.secho(f" {WARN} Remote integrity fetch skipped (--no-sign)", fg="yellow") else: - import urllib.request - import urllib.error - try: - req = urllib.request.Request("https://raw.githubusercontent.com/Tanishq1030/anchor/main/GOVERNANCE.lock") - with urllib.request.urlopen(req, timeout=5) as response: - remote_lock = response.read().decode('utf-8') - lock_path = os.path.join(dot_anchor, ".anchor.lock") - with open(lock_path, "w", encoding="utf-8") as f: - f.write(remote_lock) - click.secho(" [OK] Fetched GOVERNANCE.lock from remote", fg="green") - except urllib.error.URLError as e: - click.secho(f" WARNING: Could not fetch GOVERNANCE.lock remotely: {e.reason}", fg="yellow") - except Exception as e: - click.secho(f" WARNING: Failed to fetch remote lockfile: {e}", fg="yellow") + lock_path = os.path.join(dot_anchor, ".anchor.lock") + if os.path.exists(lock_path) and not force: + click.echo(f"\n {WARN} WARNING: .anchor.lock already exists.") + click.echo(" Remote fetch will overwrite your local hashes.") + click.echo(" Use --force to confirm, or --no-sign to skip verification.\n") + else: + import urllib.request + import urllib.error + try: + req = urllib.request.Request(settings.governance_lock_url) + with urllib.request.urlopen(req, timeout=5) as response: + remote_lock = response.read().decode('utf-8') + with open(lock_path, "w", encoding="utf-8") as f: + f.write(remote_lock) + click.secho(f" [{CHECK}] Fetched GOVERNANCE.lock from remote", fg="green") + except urllib.error.URLError as e: + click.secho(f" {WARN} WARNING: Could not fetch GOVERNANCE.lock remotely: {e.reason}", fg="yellow") + except Exception as e: + click.secho(f" {WARN} WARNING: Failed to fetch remote lockfile: {e}", fg="yellow") # ── Optionally install Diamond Cage ─────────────────────── if sandbox: @@ -665,7 +683,7 @@ def check(ctx, policy, paths, dir, model, metadata, context, server_mode, genera governance_root=Path(governance_root_path), anchor_dir=Path(dot_anchor) if os.path.exists(dot_anchor) else None, ) - + # Convert Rule dataclass objects to dicts the engine understands for rule_id, rule in loaded.rules.items(): rule_dict[rule_id] = { @@ -682,30 +700,36 @@ def check(ctx, policy, paths, dir, model, metadata, context, server_mode, genera "mitigation": None, } - if verbose: - click.echo(f" Loaded {len(rule_dict)} rules from V4 federated domains.") + if verbose or not rule_dict: if loaded.errors: for err in loaded.errors: - click.secho(f" [!] Loader warning: {err}", fg="yellow") + click.secho(f" [!] Loader error: {err}", fg="red") + if verbose: + click.echo(f" Loaded {len(rule_dict)} rules from V4 federated domains.") except Exception as e: - if verbose: - click.secho(f" [!] V4 loader failed, falling back to cache: {e}", fg="yellow") - - # Fallback: load from V3 cache if V4 loader fails - constitution_path = os.path.join(cache_dir, "constitution.anchor") - if os.path.exists(constitution_path): - try: - with open(constitution_path, "r", encoding="utf-8") as f: - c_data = yaml.safe_load(f) or {} - for r in c_data.get("rules", []): - if "id" in r: - rule_dict[r["id"]] = r - if verbose: - click.echo(f" Fallback: loaded {len(rule_dict)} rules from V3 cache.") - except Exception as fallback_err: - if verbose: - click.secho(f" [!] Fallback also failed: {fallback_err}", fg="red") + error_msg = str(e) + if "INTEGRITY VIOLATION" in error_msg or "integrity" in error_msg.lower(): + # ALWAYS show — never hide governance integrity errors + click.echo(f"\n{BAR*80}", err=True) + click.secho(" ANCHOR INTEGRITY VIOLATION", fg="red", bold=True, err=True) + click.secho(f" {error_msg}", fg="red", err=True) + click.echo(f" Run: python -m anchor sync --restore", err=True) + click.echo(f"{BAR*80}\n", err=True) + sys.exit(2) + + # In tests or local dev, we want to know IF the loader failed + click.secho(f"CRITICAL: Governance loader failed: {error_msg}", fg="red") + import traceback + click.echo(traceback.format_exc()) + sys.exit(2) + + # FAIL LOUD — never pass with zero laws + if not rule_dict: + click.echo(f"\n {CROSS} ERROR: 0 active laws loaded and no cache available.", err=True) + click.echo(" This means governance loading failed or no domains are active.", err=True) + click.echo(f" Run: python -m anchor init --all --force\n", err=True) + sys.exit(2) # B. Load detection patterns from mitigation.anchor and join with rule metadata # Prioritize project-local mitigation catalog @@ -730,14 +754,21 @@ def check(ctx, policy, paths, dir, model, metadata, context, server_mode, genera if resolved_id in rule_dict: # Merge detection pattern into the rule metadata - rule_dict[resolved_id]["match"] = m.get("match") - rule_dict[resolved_id]["pattern"] = m.get("pattern") + match_data = m.get("match") + if match_data: + rule_dict[resolved_id]["match"] = match_data + # If the match block has a top-level pattern (V3 style) or internal + if "pattern" in match_data: + rule_dict[resolved_id]["pattern"] = match_data["pattern"] + + # Explicit top-level pattern support + if m.get("pattern"): + rule_dict[resolved_id]["pattern"] = m.get("pattern") + rule_dict[resolved_id]["message"] = m.get("message", rule_dict[resolved_id].get("name")) rule_dict[resolved_id]["mitigation"] = m.get("fix", m.get("mitigation")) - rule_dict[resolved_id]["name"] = ( - f"{rule_dict[resolved_id]['name']} ({m['name']})" - if m.get("name") else rule_dict[resolved_id]["name"] - ) + if m.get("name"): + rule_dict[resolved_id]["name"] = f"{rule_dict[resolved_id]['name']} ({m['name']})" if verbose: active = sum(1 for r in rule_dict.values() if r.get("match") or r.get("pattern")) @@ -1142,7 +1173,7 @@ def _sev_score(v): return severity_map.get(v.get('severity', '').lower(), 0) click.secho(f" INFO : {severity_buckets['info']}", fg="white") if suppressed: click.secho(f"{len(suppressed)} suppressed findings (See report for audit trail).", fg="cyan", dim=True) - click.echo("-" * 80) + click.echo(BAR * 80) # Only show the first 5 violations in terminal to avoid clutter display_limit = 5 @@ -1157,11 +1188,11 @@ def _sev_score(v): return severity_map.get(v.get('severity', '').lower(), 0) click.echo(f" Location: {v['file']}:{v['line']}") if v.get('mitigation'): click.secho(f" Fix: {v['mitigation']}", fg="cyan", dim=True) - click.echo("-" * 80) + click.echo(BAR * 80) if len(violations) > display_limit: click.secho(f"... and {len(violations)-display_limit} more findings in the report.", fg="white", dim=True) - click.echo("-" * 80) + click.echo(BAR * 80) # 5. CLEANUP (If ephemeral) if ephemeral_sandbox and not no_sandbox: @@ -1255,12 +1286,12 @@ def sha256(path): click.secho(f" [MISSING] {label}", fg="red") all_synced = False elif h == canon_hash: - status = click.style("[OK] ", fg="green") + status = click.style(f"[{CHECK}] ", fg="green") click.echo(f" {status} {label}") if verbose: click.echo(f" SHA-256: {h[:16]}...") else: - status = click.style("[MISMATCH]", fg="red", bold=True) + status = click.style(f"[{CROSS}] ", fg="red", bold=True) click.echo(f" {status} {label}") if verbose: click.echo(f" Expected: {canon_hash[:16]}...") @@ -1635,7 +1666,7 @@ def heal(paths, apply_fixes, verbose): click.echo() click.secho(f"Anchor Heal: {len(suggestions)} suggestion(s) found", bold=True) - click.secho(f" [V] Auto-fixable: {len(auto_fixable)} [!] Manual: {len(manual_only)}", fg="cyan") + click.secho(f" [{CHECK}] Auto-fixable: {len(auto_fixable)} [{WARN}] Manual: {len(manual_only)}", fg="cyan") click.echo("=" * 70) applied = 0 diff --git a/anchor/core/config.py b/anchor/core/config.py index f43f0be..a380e1a 100644 --- a/anchor/core/config.py +++ b/anchor/core/config.py @@ -35,6 +35,11 @@ class AnchorSettings(BaseSettings): description="URL to fetch the Mitigation Catalog from.", ) + governance_lock_url: str = Field( + default="https://raw.githubusercontent.com/Tanishq1030/anchor/main/GOVERNANCE.lock", + description="URL to fetch the GOVERNANCE.lock integrity file from.", + ) + # ── Runtime Behaviour ───────────────────────────────────────── verbose: bool = Field( default=False, diff --git a/anchor/core/constitution.py b/anchor/core/constitution.py index 2f85bf0..3d97e38 100644 --- a/anchor/core/constitution.py +++ b/anchor/core/constitution.py @@ -21,7 +21,7 @@ # SHA-256 of the official legacy files (optional in V3). CONSTITUTION_SHA256 = "17101731EA80A091A4AE10FB8CC548943D24C5A65CBBDA28590CF2EA3262F2EA" -MITIGATION_SHA256 = "D71DE885992ADF5DE87B6093D64D20F45156674CB85BFAFC6A0492DA40A3DF86" +MITIGATION_SHA256 = "AD87C86297600C85B2D837D1CE440EF9A70DBE12D078D016EF3C25E42B035D1D" # ============================================================================= diff --git a/anchor/core/engine.py b/anchor/core/engine.py index 040f960..b79e7f6 100644 --- a/anchor/core/engine.py +++ b/anchor/core/engine.py @@ -262,7 +262,9 @@ def scan_file(self, content: bytes, file_path: str, adapter: LanguageAdapter) -> elif rule_type == "regex": # Nested regex support inside 'match' block pattern = match_config.get("pattern") - found = self._check_regex(content.decode('utf-8', errors='ignore'), pattern, rule_id=rule.get("id")) + decoded_content = content.decode('utf-8', errors='ignore') + found = self._check_regex(decoded_content, pattern, rule_id=rule.get("id")) + for line_num, match_text in found: is_suppressed = False # Aggregate IDs (Canonical + active Frameworks/Regulators) diff --git a/anchor/core/loader.py b/anchor/core/loader.py index 37ea54f..592f1ee 100644 --- a/anchor/core/loader.py +++ b/anchor/core/loader.py @@ -91,7 +91,9 @@ def verify_remote_lockfile(anchor_dir: Path, offline_attr: str = "warn") -> bool import urllib.request import urllib.error - GOVERNANCE_LOCK_URL = "https://raw.githubusercontent.com/Tanishq1030/anchor/main/GOVERNANCE.lock" + from anchor.core.config import settings + + GOVERNANCE_LOCK_URL = settings.governance_lock_url local_lock_path = anchor_dir / ".anchor.lock" lock_data = None @@ -342,11 +344,17 @@ def load_constitution( # Locate constitution.anchor if constitution_path is None: - constitution_path = governance_root / "constitution.anchor" - if not constitution_path.exists(): - # Fall back to package root - constitution_path = Path(__file__).parent.parent.parent \ - / "constitution.anchor" + if anchor_dir: + local_manifest = anchor_dir / "constitution.anchor" + if local_manifest.exists(): + constitution_path = local_manifest + + if constitution_path is None: + constitution_path = governance_root / "constitution.anchor" + if not constitution_path.exists(): + # Fall back to package root + constitution_path = Path(__file__).parent.parent.parent \ + / "constitution.anchor" manifest = load_manifest(constitution_path) seal_check = manifest.engine.get("seal_check", "strict") @@ -373,8 +381,15 @@ def resolve_path(rel_path: str) -> Path: p = governance_root / rel_path return p - # ── STEP 1: Load core domains ───────────────────────────── + # ── STEP 1: Load active domains ─────────────────────────── for domain in manifest.core_domains: + # Auto-activate if local file exists + local_path = anchor_dir / domain["path"] if anchor_dir else None + is_local = local_path and local_path.exists() + + if not domain.get("active", False) and not is_local: + continue + path = resolve_path(domain["path"]) namespace = domain["namespace"] try: @@ -444,11 +459,17 @@ def resolve_path(rel_path: str) -> Path: # Also include maps_to relations from frameworks/regulators in the alias chain for rid, rule in constitution.rules.items(): - if rule.maps_to and rule.maps_to in constitution.rules: - # If a rule maps to another (e.g. FINOS-014 -> SEC-007) - # we treat it as an alias for reporting purposes - if rid not in constitution.alias_chain: - constitution.alias_chain[rid] = rule.maps_to + if rule.maps_to: + # Handle both single string and list of strings for multi-ID support + mappings = rule.maps_to if isinstance(rule.maps_to, list) else [rule.maps_to] + for m_id in mappings: + if m_id in constitution.rules: + # If a rule maps to another (e.g. FINOS-014 -> SEC-007) + # we treat it as an alias for reporting purposes. + # For many-to-many, we link to the first valid mapping in the alias chain. + if rid not in constitution.alias_chain: + constitution.alias_chain[rid] = m_id + break # ── STEP 6: Load policy.anchor ──────────────────────────── if anchor_dir: diff --git a/anchor/governance/mitigation.anchor b/anchor/governance/mitigation.anchor index 239feb4..26a247d 100644 --- a/anchor/governance/mitigation.anchor +++ b/anchor/governance/mitigation.anchor @@ -56,8 +56,9 @@ mitigations: name: "Shell Command Execution" match: type: "regex" + # Simplified to match os.system/popen anywhere in the line, ignoring preceding code pattern: >- - ^(?:[^"\'#]|(["\'])(?:(?!\1).|\\\1)*\1)*\bos\.(system|popen|spawn)\s*\( + \bos\.(system|popen|spawn)\s*\( message: "Potential shell injection via os.system detects. Use subprocess with list arguments instead." severity: "blocker" diff --git a/anchor/utils/__init__.py b/anchor/utils/__init__.py new file mode 100644 index 0000000..a4c50e4 --- /dev/null +++ b/anchor/utils/__init__.py @@ -0,0 +1 @@ +# Anchor Utils Package diff --git a/anchor/utils/output.py b/anchor/utils/output.py new file mode 100644 index 0000000..b5681e4 --- /dev/null +++ b/anchor/utils/output.py @@ -0,0 +1,28 @@ +import sys + +def supports_unicode(): + """Detect if the terminal supports UTF-8/Unicode.""" + try: + # Check standard output encoding + encoding = sys.stdout.encoding + if encoding: + return encoding.lower() in ('utf-8', 'utf-16', 'utf-32') + return False + except AttributeError: + return False + +# Symbol constants +if supports_unicode(): + ANCHOR_ICON = "⚓" + CHECK = "✓" + CROSS = "✗" + WARN = "!" + BAR = "─" + ARROW = "→" +else: + ANCHOR_ICON = "[ANCHOR]" + CHECK = "OK" + CROSS = "X" + WARN = "!" + BAR = "-" + ARROW = "->" diff --git a/setup.py b/setup.py index 8ab61c7..f1b523f 100644 --- a/setup.py +++ b/setup.py @@ -6,7 +6,7 @@ setup( name="anchor-audit", - version="4.3.0", + version="4.3.1", description="The Federated Governance Engine for AI (Universal Multi-Language)", long_description=long_description, long_description_content_type="text/markdown", diff --git a/tests/integration/test_v4_cli.py b/tests/integration/test_v4_cli.py index ef5270d..4583485 100644 --- a/tests/integration/test_v4_cli.py +++ b/tests/integration/test_v4_cli.py @@ -59,16 +59,91 @@ def test_v4_check_with_federated_rules(temp_project): init_result = runner.invoke(main, ["init", "--domains", "security", "--regulators", "rbi"]) assert init_result.exit_code == 0, f"Init failed: {init_result.output}" + # 3. Run check + # Use local constitution to match the new package hash and avoid GitHub sync mismatch + from anchor.core.config import settings +import pytest +import os +import shutil +from pathlib import Path +from click.testing import CliRunner +from anchor.cli import cli as main + +@pytest.fixture +def temp_project(tmp_path): + """Creates a temporary project directory.""" + project_dir = tmp_path / "my_project" + project_dir.mkdir() + # Add a dummy file to scan + (project_dir / "app.py").write_text("import os\nos.system('ls')\n") + return project_dir + +def test_v4_init_regulators(temp_project): + """ + Verify that anchor init correctly creates the government directory + and copies requested regulators. + """ + runner = CliRunner() + + # Change CWD to temp_project + with runner.isolated_filesystem(temp_dir=temp_project): + # Run init with a regulator + result = runner.invoke(main, ["init", "--regulators", "rbi"]) + assert result.exit_code == 0 + + # Check directory structure + dot_anchor = Path(".anchor") + assert dot_anchor.exists() + assert (dot_anchor / "government").exists() + assert (dot_anchor / "government" / "RBI_Regulations.anchor").exists() + + # Verify manifest + constitution_path = dot_anchor / "constitution.anchor" + assert constitution_path.exists() + with open(constitution_path, "r") as f: + content = f.read() + assert "namespace: RBI" in content + # The loader should have marked it active: true if passed via --regulators + assert "active: true" in content + +def test_v4_check_with_federated_rules(temp_project): + """ + Verify that anchor check correctly loads and applies rules from + different domains and regulators. + """ + runner = CliRunner() + + with runner.isolated_filesystem(temp_dir=temp_project): + # 1. Add dummy code + Path("app.py").write_text("import os\nos.system('ls')\n") + + # 2. Initialize with security domain and RBI regulator + # Use --no-sign to skip signature generation for faster testing if needed, + # but we want to test the full flow. + init_result = runner.invoke(main, ["init", "--domains", "security", "--regulators", "rbi", "--no-sign"]) + assert init_result.exit_code == 0, f"Init failed: {init_result.output}" + # 3. Run check # Use local constitution to match the new package hash and avoid GitHub sync mismatch from anchor.core.config import settings pkg_root = Path(main.callback.__globals__['__file__']).parent + project_root = pkg_root.parent local_const = pkg_root / "governance" / "constitution.anchor" + local_mitig = pkg_root / "governance" / "mitigation.anchor" + local_lock = project_root / "GOVERNANCE.lock" + settings.constitution_url = local_const.as_uri() + settings.mitigation_url = local_mitig.as_uri() + settings.governance_lock_url = local_lock.as_uri() # Use --verbose to see loader info and bypass sync blocking result = runner.invoke(main, ["check", ".", "--verbose"]) + # DEBUG: Print output if assertion fails + if not any(id in result.output for id in ["FINOS-014", "SEC-007"]): + print(f"INIT OUTPUT:\n{init_result.output}") + print(f"CHECK OUTPUT:\n{result.output}") + # Detection should match either the framework ID or the canonical SEC ID assert any(id in result.output for id in ["FINOS-014", "SEC-007"]) @@ -97,8 +172,88 @@ def test_v4_init_all(temp_project): assert frameworks_dir.exists() # FINOS, NIST, OWASP assert len(list(frameworks_dir.glob("*.anchor"))) == 3 + + # Check government (6 expected) + gov_dir = dot_anchor / "government" + assert gov_dir.exists() + assert len(list(gov_dir.glob("*.anchor"))) == 6 + +def test_v4_init_all_activation(temp_project): + """ + Verify that init --all correctly marks items as active in the manifest. + """ + runner = CliRunner() + with runner.isolated_filesystem(temp_dir=temp_project): + runner.invoke(main, ["init", "--all"]) + + constitution_path = Path(".anchor/constitution.anchor") + with open(constitution_path, "r") as f: + content = f.read() + # Ensure at least one from each category is active + assert "namespace: FINOS" in content + assert "active: true" in content # for FINOS + assert "namespace: RBI" in content + assert "active: true" in content # for RBI + +def test_fail_on_zero_laws(tmp_path): + """Anchor must never report PASSED with 0 active laws.""" + runner = CliRunner() + # Create an empty directory with no .anchor + empty_dir = tmp_path / "empty_dir" + empty_dir.mkdir() + + # Force settings to a non-existent constitution to truly trigger 0 laws + # We also need to ensure we aren't loading from the internal governance library + # One way is to set governance_root to an empty temp dir + from anchor.core.config import settings + settings.constitution_url = (empty_dir / "none.anchor").as_uri() + settings.mitigation_url = (empty_dir / "none_mitig.anchor").as_uri() + + # Clear cache to ensure zero laws are found + # (This was already added in previous step but repeating for context) + cache_dir = Path.home() / ".anchor" / "cache" + if cache_dir.exists(): + shutil.rmtree(cache_dir) + + # We need a way to tell the CLI to skip internal defaults if needed, + # but for this test, simply having no .anchor and an invalid URL + # should fail the V4 loader too, IF we ensure it doesn't find internal domains. + # Actually, let's just assert that it exits with error 2. + import unittest.mock + # Force zero rules by mocking the loader's output + with unittest.mock.patch('anchor.core.loader.load_constitution') as mock_load: + from anchor.core.loader import LoadedConstitution + mock_load.return_value = LoadedConstitution( + manifest=None, rules={}, errors=["Triggered mock zero laws"] + ) + + result = runner.invoke(main, ['check', str(empty_dir)]) + + # Should exit with error code 2 + assert result.exit_code == 2 + assert "0 active laws loaded" in result.output + + # Should exit with error code, not 0 + assert result.exit_code != 0 + # Should never say PASSED + assert "PASSED" not in result.output + # Should explain what happened + assert any(msg in result.output for msg in ["0 active laws", "ERROR", "VIOLATION"]) + +def test_integrity_violation_always_shown(temp_project): + """Integrity violations must surface even without --verbose.""" + runner = CliRunner() + with runner.isolated_filesystem(temp_dir=temp_project): + runner.invoke(main, ["init", "--domains", "security"]) + + # Tamper with a file + sec_path = Path(".anchor/domains/security.anchor") + content = sec_path.read_text() + sec_path.write_text(content + "\n# Tampered") + + # Run check without --verbose + result = runner.invoke(main, ["check", "."]) - # Check regulators (6 expected) - government_dir = dot_anchor / "government" - assert government_dir.exists() - assert len(list(government_dir.glob("*.anchor"))) == 6 + # Should surface the violation + assert result.exit_code == 2 + assert "ANCHOR INTEGRITY VIOLATION" in result.output