diff --git a/.anchor/.anchor.sig b/.anchor/.anchor.sig deleted file mode 100644 index 449b430..0000000 --- a/.anchor/.anchor.sig +++ /dev/null @@ -1 +0,0 @@ -sha256:0edb5dad2a2dc26c956082c71224edba281569a76bbd41465fc8e6720cf58dd6 \ No newline at end of file diff --git a/.anchor/constitution.anchor b/.anchor/constitution.anchor index 84d5fac..8751a0b 100644 --- a/.anchor/constitution.anchor +++ b/.anchor/constitution.anchor @@ -4,7 +4,7 @@ # ───────────────────────────────────────────────────────────── type: manifest -version: "4.0" +version: "4.1" anchor_version: ">=4.0.0" name: "Anchor Constitutional Root" @@ -104,29 +104,30 @@ policy: # FINOS_Framework.anchor is the Rosetta Stone. legacy_aliases: - ANC-001: FINOS-001 # → SEC-006 Raw Network Access - ANC-002: FINOS-002 # → PRV-002 Vector Inversion - ANC-003: FINOS-003 # → ALN-001 Hallucination - ANC-004: FINOS-004 # → SUP-003 Versioning Drift - ANC-005: FINOS-005 # → ALN-001 Non-Deterministic Behaviour - ANC-006: FINOS-006 # → OPS-001 Availability - ANC-007: FINOS-007 # → SEC-003 Model Tampering - ANC-008: FINOS-008 # → SEC-002 Data Poisoning - ANC-009: FINOS-009 # → SEC-001 Prompt Injection - ANC-010: FINOS-010 # → ALN-002 Goal Misrepresentation - ANC-011: FINOS-011 # → ETH-001 Bias - ANC-012: FINOS-012 # → ETH-002 Explainability Absence - ANC-013: FINOS-013 # → SHR-001 Model Overreach - ANC-014: FINOS-014 # → SHR-002 Data Quality and Drift - ANC-015: FINOS-015 # → SHR-003 Reputational Risk - ANC-016: FINOS-016 # → LEG-002 Regulatory Non-Compliance - ANC-017: FINOS-017 # → LEG-001 IP Infringement - ANC-018: FINOS-018 # → AGT-001 Agent Authorization Bypass - ANC-019: FINOS-019 # → AGT-002 Tool Chain Manipulation - ANC-020: FINOS-020 # → AGT-003 MCP Compromise - ANC-021: FINOS-021 # → AGT-004 State Persistence Poisoning - ANC-022: FINOS-022 # → AGT-005 Multi-Agent Trust Violations - ANC-023: FINOS-023 # → SEC-004 Credential Harvesting + ANC-001: FINOS-001 + ANC-002: FINOS-002 + ANC-003: FINOS-003 + ANC-004: FINOS-004 + ANC-005: FINOS-005 + ANC-006: FINOS-006 + ANC-007: FINOS-007 + ANC-008: FINOS-008 + ANC-009: FINOS-009 + ANC-010: FINOS-010 + ANC-011: FINOS-011 + ANC-012: FINOS-012 + ANC-013: FINOS-013 + ANC-014: FINOS-014 + ANC-015: FINOS-015 + ANC-016: FINOS-016 + ANC-017: FINOS-017 + ANC-018: FINOS-018 + ANC-019: FINOS-019 + ANC-020: FINOS-020 + ANC-021: FINOS-021 + ANC-022: FINOS-022 + ANC-023: FINOS-023 + engine: fail_on: [BLOCKER, ERROR] diff --git a/.anchor/frameworks/FINOS_Framework.anchor b/.anchor/frameworks/FINOS_Framework.anchor new file mode 100644 index 0000000..96e67b1 --- /dev/null +++ b/.anchor/frameworks/FINOS_Framework.anchor @@ -0,0 +1,178 @@ +type: framework +namespace: FINOS +version: "1.0" +anchor_version: ">=4.0.0" +maintainer: "Anchor Core" +source: "FINOS AI Governance Framework" +source_url: "https://github.com/finos/ai-governance-framework" +credit: "FINOS AI Governance Framework Risk Taxonomy (Ri-001 - Ri-023)" +description: > + The FINOS AI Governance Framework provides the foundational risk + taxonomy for Anchor. This framework file acts as the primary + mapping layer, connecting the original FINOS Ri-IDs and V3 ANC-IDs + to the refined V4 Domain-prefixed rules. Use this framework to + ensure compliance with the FINOS standard. +seal: "sha256:PENDING" + +rules: + + - id: "FINOS-001" + name: "Prompt Injection" + original_id: "Ri-001" + maps_to: "SEC-001" + severity: "blocker" + description: "Malicious instructions injected into prompts." + + - id: "FINOS-002" + name: "Data Poisoning" + original_id: "Ri-002" + maps_to: "SEC-002" + severity: "blocker" + description: "Poisoning of training, fine-tuning, or retrieval data." + + - id: "FINOS-003" + name: "Model Tampering" + original_id: "Ri-003" + maps_to: "SEC-003" + severity: "blocker" + description: "Unauthorized modification of model weights or artifacts." + + - id: "FINOS-004" + name: "Credential Harvesting" + original_id: "Ri-004" + maps_to: "SEC-004" + severity: "blocker" + description: "Systematic exfiltration of secrets via AI pipelines." + + - id: "FINOS-005" + name: "Model Leakage and Theft" + original_id: "Ri-005" + maps_to: "SUP-001" + severity: "blocker" + description: "Unauthorized export or exfiltration of model weights." + + - id: "FINOS-006" + name: "Weight Corruption" + original_id: "Ri-006" + maps_to: "SUP-002" + severity: "blocker" + description: "Accidental or malicious corruption of model weights." + + - id: "FINOS-007" + name: "Versioning Drift" + original_id: "Ri-007" + maps_to: "SUP-003" + severity: "warning" + description: "Undocumented or unverified changes in model versions." + + - id: "FINOS-008" + name: "Hallucination" + original_id: "Ri-008" + maps_to: "ALN-001" + severity: "error" + description: "Model generating plausible but false or dangerous information." + + - id: "FINOS-009" + name: "Bias and Discrimination" + original_id: "Ri-009" + maps_to: "ETH-001" + severity: "error" + description: "Systematically biased or discriminatory model outcomes." + + - id: "FINOS-010" + name: "Explainability Absence" + original_id: "Ri-010" + maps_to: "ETH-002" + severity: "error" + description: "Decisions made by black-box models that cannot be explained." + + - id: "FINOS-011" + name: "Availability and Denial" + original_id: "Ri-011" + maps_to: "OPS-001" + severity: "error" + description: "AI system unavailability due to resource exhaustion or attacks." + + - id: "FINOS-012" + name: "Authorization Bypass" + original_id: "Ri-012" + maps_to: "SEC-005" + severity: "blocker" + description: "Executing actions outside granted permissions via AI tools." + + - id: "FINOS-013" + name: "Raw Network Access" + original_id: "Ri-013" + maps_to: "SEC-006" + severity: "error" + description: "Unproxied outbound network calls from AI components." + + - id: "FINOS-014" + name: "Shell Injection" + original_id: "Ri-014" + maps_to: "SEC-007" + severity: "blocker" + description: "Executing shell commands constructed from untrusted model input." + + - id: "FINOS-015" + name: "PII Leakage" + original_id: "Ri-015" + maps_to: "PRV-001" + severity: "blocker" + description: "Unauthorized exposure of Personally Identifiable Information." + + - id: "FINOS-016" + name: "Vector Inversion" + original_id: "Ri-016" + maps_to: "PRV-002" + severity: "error" + description: "Reconstructing training data from embedding vectors." + + - id: "FINOS-017" + name: "Supply Chain Attack" + original_id: "Ri-017" + maps_to: "SEC-008" + severity: "blocker" + description: "Compromised upstream dependencies or tool integrations." + + - id: "FINOS-018" + name: "Model Overreach" + original_id: "Ri-018" + maps_to: "SHR-001" + severity: "warning" + description: "Using models beyond their validated scope or context." + + - id: "FINOS-019" + name: "Regulatory Non-Compliance" + original_id: "Ri-019" + maps_to: "LEG-002" + severity: "error" + description: "AI deployment violating specific jurisdictional laws." + + - id: "FINOS-020" + name: "Human Oversight Removal" + original_id: "Ri-020" + maps_to: "ETH-003" + severity: "blocker" + description: "Autonomous decisions made without human-in-the-loop controls." + + - id: "FINOS-021" + name: "Goal Misrepresentation" + original_id: "Ri-021" + maps_to: "ALN-002" + severity: "blocker" + description: "Agents pursuing objectives misaligned with user intent." + + - id: "FINOS-022" + name: "Cross-context Data Bleed" + original_id: "Ri-022" + maps_to: "PRV-003" + severity: "error" + description: "Data from one context leaking into another via shared state." + + - id: "FINOS-023" + name: "IP Infringement" + original_id: "Ri-023" + maps_to: "LEG-001" + severity: "warning" + description: "Model outputs infringing on intellectual property or copyright." diff --git a/.anchor/frameworks/NIST_AI_RMF.anchor b/.anchor/frameworks/NIST_AI_RMF.anchor new file mode 100644 index 0000000..eefe238 --- /dev/null +++ b/.anchor/frameworks/NIST_AI_RMF.anchor @@ -0,0 +1,66 @@ +type: framework +namespace: NIST +version: "1.0" +anchor_version: ">=4.0.0" +maintainer: "Anchor Core" +source: "NIST AI Risk Management Framework (AI RMF 1.0)" +source_url: "https://www.nist.gov/itl/ai-rmf" +credit: "National Institute of Standards and Technology (NIST)" +description: > + The NIST AI RMF provides a high-level framework for managing + risks associated with AI systems. Unlike risk taxonomies, + NIST RMF defines governance functions—Govern, Map, Measure, + Manage. This framework file maps these functions to Anchor's + operational primitives and enforcement mechanisms. +seal: "sha256:PENDING" + +rules: + + - id: "NIST-GOV" + name: "GOVERN: Institutional Policies" + original_id: "Govern 1.1" + maps_to: "LEG-002" + severity: "error" + obligation_type: "audit" + anchor_mechanism: "policy.anchor + sealed manifest" + description: > + Policies, processes, and procedures for AI risk management are + established and maintained. Anchor satisfy this by enforcing + a cryptographically sealed constitution and project-level + policy.anchor overrides. + + - id: "NIST-MAP" + name: "MAP: Risk Identification" + original_id: "Map 1.1" + maps_to: "SHR-001" + severity: "warning" + obligation_type: "audit" + anchor_mechanism: "anchor check --report-server" + description: > + Context is established and risks are identified and documented. + Anchor's federated domains (SEC, ETH, PRV, etc.) provide the + contextual mapping of technical risks to organizational impact. + + - id: "NIST-MEAS" + name: "MEASURE: Risk Assessment" + original_id: "Measure 2.1" + maps_to: "OPS-001" + severity: "warning" + obligation_type: "provenance" + anchor_mechanism: "telemetry_path: .anchor/telemetry/" + description: > + AI systems are assessed for risks and impacts. Anchor's + telemetry output provides the metrics for assessing frequency + and severity of compliance violations across the fleet. + + - id: "NIST-MAN" + name: "MANAGE: Risk Treatment" + original_id: "Manage 1.1" + maps_to: "ALN-002" + severity: "blocker" + obligation_type: "audit" + anchor_mechanism: "anchor check --severity error (CI Gate)" + description: > + Risks are prioritized and managed based on impact and likelihood. + Anchor's CI/CD integration (pre-commit hooks, GH Actions) acts + as the primary "Manage" gate, blocking non-compliant code from deployment. diff --git a/.anchor/frameworks/OWASP_LLM.anchor b/.anchor/frameworks/OWASP_LLM.anchor new file mode 100644 index 0000000..e23ea1f --- /dev/null +++ b/.anchor/frameworks/OWASP_LLM.anchor @@ -0,0 +1,86 @@ +type: framework +namespace: OWASP +version: "2025" +anchor_version: ">=4.0.0" +maintainer: "Anchor Core" +source: "OWASP Top 10 for Large Language Model Applications" +source_url: "https://owasp.org/www-project-top-10-for-large-language-model-applications/" +credit: "OWASP Foundation" +description: > + The OWASP Top 10 for LLMs provides a list of the most critical + security risks for applications utilizing Large Language Models. + This framework file maps OWASP LLM-specific risks to the + canonical Anchor V4 Domain rules. +seal: "sha256:PENDING" + +rules: + + - id: "OWASP-001" + name: "LLM01: Prompt Injection" + original_id: "LLM-01" + maps_to: "SEC-001" + severity: "blocker" + description: "Malicious instructions injected into prompts to manipulate LLM behavior." + + - id: "OWASP-002" + name: "LLM02: Insecure Output Handling" + original_id: "LLM-02" + maps_to: "SEC-007" + severity: "blocker" + description: "Failure to sanitize LLM outputs before passing them to sensitive downstream functions (e.g. shell)." + + - id: "OWASP-003" + name: "LLM03: Training Data Poisoning" + original_id: "LLM-03" + maps_to: "SEC-002" + severity: "blocker" + description: "Poisoning training data to create backdoors or bias in LLM behavior." + + - id: "OWASP-004" + name: "LLM04: Model Denial of Service" + original_id: "LLM-04" + maps_to: "OPS-001" + severity: "error" + description: "Causing excessive resource consumption in LLMs to degrade availability." + + - id: "OWASP-005" + name: "LLM05: Supply Chain Vulnerabilities" + original_id: "LLM-05" + maps_to: "SEC-008" + severity: "blocker" + description: "Risks from compromised third-party components, data, or models." + + - id: "OWASP-006" + name: "LLM06: Sensitive Information Disclosure" + original_id: "LLM-06" + maps_to: "PRV-001" + severity: "blocker" + description: "LLM leaking PII or other sensitive data in its responses." + + - id: "OWASP-007" + name: "LLM07: Insecure Plugin Design" + original_id: "LLM-07" + maps_to: "AGT-001" + severity: "blocker" + description: "Plugins/tools with insufficient access controls callable by the LLM." + + - id: "OWASP-008" + name: "LLM08: Excessive Agency" + original_id: "LLM-08" + maps_to: "AGT-005" + severity: "blocker" + description: "LLM having broad permissions or functioning without adequate human oversight." + + - id: "OWASP-009" + name: "LLM09: Overreliance" + original_id: "LLM-09" + maps_to: "ALN-001" + severity: "error" + description: "Dependence on LLM outputs without verification, increasing risk from hallucinations." + + - id: "OWASP-010" + name: "LLM10: Model Theft" + original_id: "LLM-10" + maps_to: "SUP-001" + severity: "blocker" + description: "Unauthorized access, copying, or extraction of proprietary models." diff --git a/.anchor/government/CFPB_Regulations.anchor b/.anchor/government/CFPB_Regulations.anchor new file mode 100644 index 0000000..7ecab44 --- /dev/null +++ b/.anchor/government/CFPB_Regulations.anchor @@ -0,0 +1,116 @@ +type: framework +namespace: CFPB +version: "2024" +anchor_version: ">=4.0.0" +maintainer: "Anchor Core" +opt_in: true +source: "CFPB Regulation B (Equal Credit Opportunity Act) and 2024 AI Guidance" +source_url: "https://www.consumerfinance.gov/compliance/circulars/" +source_date: "2024" +credit: > + Consumer Financial Protection Bureau Regulation B implementing + the Equal Credit Opportunity Act (ECOA), and CFPB 2024 guidance + on adverse action notification requirements for AI-assisted credit + decisions. The $45 million enforcement action against Goldman Sachs + in October 2024 established the enforcement precedent for AI credit + model explainability obligations in US financial services. +layer_2_status: > + Rules marked obligation_type: provenance depend on AnchorRuntime + (Layer 2) and the Decision Audit Chain (DAC). Layer 2 is currently + in development. +seal: "sha256:PENDING" + +rules: + + - id: "CFPB-001" + name: "Adverse Action Notice — Specific Reasons Required" + original_id: "Regulation B, Section 202.9" + maps_to: "ETH-002" + obligation_type: detection + anchor_mechanism: > + ETH-002 explainability absence fires on credit decision code + without reason codes. CREDIT-001 violation fires specifically + on denial output with no reason_code field. adverse_action_reasons() + method on AuditEntry produces ECOA-compliant reason codes. + Layer 1 detection active now. + severity: "blocker" + min_severity: "blocker" + description: > + Creditors must provide applicants with specific, principal + reasons for adverse action taken on credit applications. The + CFPB explicitly rejects the position that algorithmic complexity + justifies opaque denials — the reasons must be specific, + comprehensible, and accurate. Goldman Sachs paid $45 million + in October 2024 not because their Apple Card AI model was + wrong, but because they could not explain at the individual + decision level why the algorithm reached its conclusions. + This is the most directly enforced AI compliance obligation + in US financial services. + + - id: "CFPB-002" + name: "AI Credit Models — Algorithm Not an Excuse" + original_id: "CFPB Circular 2024" + maps_to: "ETH-002" + obligation_type: detection + anchor_mechanism: > + ETH-002 explainability absence detection. CREDIT-001 fires + on denial without reason code. ADV-001 fires on adverse + action without violation_id linkage. Layer 1 active now. + severity: "blocker" + min_severity: "blocker" + description: > + CFPB 2024 guidance explicitly extends Regulation B to + AI-assisted credit decisions. The use of a complex AI model + does not exempt creditors from providing specific reasons + for adverse action. The model's complexity is the creditor's + problem, not the applicant's. Any creditor that cannot + explain its AI credit decisions at the individual level + is in violation of Regulation B regardless of the model's + technical architecture. + + - id: "CFPB-003" + name: "Prohibited Basis Discrimination — ECOA Enforcement" + original_id: "ECOA Section 701, Regulation B Section 202.4" + maps_to: "ETH-001" + obligation_type: detection + anchor_mechanism: > + ETH-001 bias and discrimination detection active in Layer 1. + BIAS-001 fires on protected class reference in credit output. + BIAS-* violation category covers all ECOA protected characteristics: + race, color, religion, national origin, sex, marital status, + age, public assistance income. + severity: "blocker" + min_severity: "blocker" + description: > + ECOA prohibits credit discrimination based on race, color, + religion, national origin, sex, marital status, age, or + receipt of public assistance income. AI systems that produce + disparate impact on protected classes violate ECOA even + without discriminatory intent. The Fair Housing Act extends + these protections to mortgage and housing-related credit. + Disparate impact is measured against outcomes, not intent — + a facially neutral AI model that produces systematically + worse outcomes for protected groups is a ECOA violation + regardless of how it was designed. + + - id: "CFPB-004" + name: "Model Risk Management — Documented Validation" + original_id: "CFPB Supervisory Guidance 2024" + maps_to: "LEG-002" + obligation_type: audit + anchor_mechanism: > + anchor audit pre-deployment produces validation evidence. + Violation taxonomy as documented validation artifact. + DAC audit chain as ongoing monitoring record. Layer 2 + in development for full monitoring support. + severity: "error" + min_severity: "error" + description: > + CFPB 2024 guidance requires that AI credit models be + validated, documented, and subject to ongoing monitoring. + Validation results must be available for supervisory + examination. Backtesting and performance monitoring are + required throughout the model lifecycle — not just at + initial deployment. Model risk management for AI credit + models is subject to the same supervisory scrutiny as + traditional statistical models under OCC SR 11-7. diff --git a/.anchor/government/EU_AI_Act.anchor b/.anchor/government/EU_AI_Act.anchor new file mode 100644 index 0000000..631a397 --- /dev/null +++ b/.anchor/government/EU_AI_Act.anchor @@ -0,0 +1,258 @@ +type: framework +namespace: EU +version: "2024/1689" +anchor_version: ">=4.0.0" +maintainer: "Anchor Core" +opt_in: true +source: "EU Artificial Intelligence Act (Regulation EU 2024/1689)" +source_url: "https://eur-lex.europa.eu/eli/reg/2024/1689/oj/eng" +source_date: "August 1, 2024" +credit: > + Regulation (EU) 2024/1689 of the European Parliament and of the + Council laying down harmonised rules on artificial intelligence. + Published in the Official Journal of the European Union, L series, + 2024. Full enforcement of high-risk AI provisions begins August 2, + 2026. Credit scoring, AML monitoring, and fraud detection are + legally classified as high-risk AI systems under Annex III. +layer_2_status: > + Rules marked obligation_type: provenance or audit depend on + AnchorRuntime (Layer 2) and the Decision Audit Chain (DAC). + Layer 2 is currently in development. These rules are specified + as designed and will be enforced once Layer 2 ships. +seal: "sha256:PENDING" + +rules: + + - id: "EU-ART09" + name: "Risk Management System — Continuous Lifecycle" + original_id: "Article 9" + maps_to: "LEG-002" + obligation_type: detection + anchor_mechanism: > + constitution.anchor sealed ruleset constitutes the documented + risk management system. anchor check in CI/CD provides the + continuous testing requirement. Violation report is the + documented evidence of risk management activity. + severity: "blocker" + min_severity: "blocker" + description: > + A documented, ongoing risk management system must be established, + implemented, and maintained across the entire AI lifecycle for + all high-risk AI systems. The system must identify known and + foreseeable risks, estimate and evaluate risks, adopt risk + management measures, and test the system before market placement + and throughout development. This is not a one-time process — + it must be updated continuously. Anchor's sealed constitution + and CI/CD integration satisfy the technical continuous testing + requirement. Fines up to €30 million or 6% of global annual + revenue for non-compliance after August 2, 2026. + + - id: "EU-ART10" + name: "Data and Data Governance" + original_id: "Article 10" + maps_to: "ETH-001" + obligation_type: detection + anchor_mechanism: > + ETH-001 bias detection active in Layer 1. PRV-002 vector + inversion detection covers embedding data governance. + PROV-003 provenance violation fires on missing data lineage. + DATA-* violation category covers data governance gaps. + severity: "blocker" + min_severity: "error" + description: > + Training, validation, and testing datasets for high-risk AI + systems must be subject to appropriate data governance practices. + Data must be relevant, representative, and free from errors. + Bias detection and mitigation is required. Data residency and + provenance must be documented. For financial AI, this means + every dataset used in credit scoring, AML, or fraud detection + must have documented provenance, bias testing results, and + residency records available for conformity assessment. + + - id: "EU-ART11" + name: "Technical Documentation — Before Market Placement" + original_id: "Article 11" + maps_to: "LEG-002" + obligation_type: disclosure + anchor_mechanism: > + Violation taxonomy + constitution.anchor + mitigation.anchor + together constitute the technical documentation layer. + anchor audit --report generates the structured documentation + artifact for conformity assessment submission. + severity: "blocker" + min_severity: "blocker" + description: > + Technical documentation must be drawn up before the AI system + is placed on the market or put into service. Must include: + general description of the system, system components, + development process, training methodology, validation results, + capabilities and limitations, and risk mitigation measures + adopted. Anchor's audit report, sealed constitution, and + violation taxonomy together constitute the technical + documentation that feeds the EU conformity assessment process. + + - id: "EU-ART12" + name: "Record-Keeping — Auto-Generated Tamper-Evident Logs" + original_id: "Article 12" + maps_to: "DAC-AuditEntry" + obligation_type: provenance + anchor_mechanism: > + DAC AuditEntry chain satisfies all Article 12 requirements. + entry_id = unique record identifier. chain_hash = tamper + evidence. signature = per-entry integrity seal. timestamp = + retention timestamp. model_id + model_version = system + identification. eu_article12_record() method serializes + to EU AI Act compliant log format. Layer 2 in development. + severity: "blocker" + min_severity: "blocker" + description: > + High-risk AI systems must automatically generate logs enabling + post-hoc review of the system's operation. Logs must be retained + for a period defined by the deploying operator or relevant + sectoral authority — minimum 6 months for most financial AI + applications. Logs must be tamper-evident and enable + reconstruction of the circumstances around events of concern. + This is the most technically specific Article in the EU AI Act + and the one most directly satisfied by Anchor's Decision Audit + Chain architecture. + + - id: "EU-ART13" + name: "Transparency — Information to Deployers" + original_id: "Article 13" + maps_to: "ETH-002" + obligation_type: detection + anchor_mechanism: > + ETH-002 explainability absence fires on black-box model + usage without explain() hooks. adverse_action_reasons() + provides CFPB and EU compliant reason codes. TRANS-* + violation category covers transparency gaps. model_version + in AuditEntry satisfies system identification requirement. + severity: "blocker" + min_severity: "error" + description: > + High-risk AI systems must be designed to be sufficiently + transparent that deployers can understand the system's + capabilities, limitations, and intended purpose. Instructions + for use must include: identity of the provider, capabilities + and performance limitations, accuracy metrics, human oversight + measures, and technical measures for human control. In financial + AI, this means every AI-assisted decision output must include + enough information for the deploying institution — and + ultimately the affected individual — to understand why the + decision was made. + + - id: "EU-ART14" + name: "Human Oversight — Intervention and Override" + original_id: "Article 14" + maps_to: "ETH-003" + obligation_type: detection + anchor_mechanism: > + ETH-003 human oversight removal fires when autonomous + decision code has no human review checkpoint. AnchorRuntime + compliant flag per AuditEntry records whether human oversight + was maintained for each decision. Layer 2 in development + for runtime enforcement. + severity: "blocker" + min_severity: "blocker" + description: > + High-risk AI systems must be designed to allow effective human + oversight. Deployers must be able to monitor the system's + operation, detect and address malfunctions, and interrupt, + disregard, or override the system's outputs when necessary. + Human oversight must be effective — not nominal. A human + reviewer who is presented with AI outputs too quickly, without + adequate context, or under time pressure that makes genuine + review impossible does not satisfy Article 14. The oversight + mechanism must be designed to be practically effective. + + - id: "EU-ART15" + name: "Accuracy, Robustness and Cybersecurity" + original_id: "Article 15" + maps_to: "SEC-001" + obligation_type: detection + anchor_mechanism: > + SEC-001 prompt injection, SEC-002 data poisoning, SEC-003 + model tampering, SEC-008 supply chain attack, AGT-001 through + AGT-005 agentic security rules all fire in Layer 1. Diamond + Cage WASM sandbox provides runtime robustness for high-risk + operations. + severity: "blocker" + min_severity: "error" + description: > + High-risk AI systems must achieve appropriate levels of accuracy, + robustness, and cybersecurity for their intended purpose. + They must be resilient against errors, faults, and adversarial + attacks — including prompt injection, data poisoning, and model + evasion attempts. Security measures must be commensurate with + the risk profile of the specific AI system and its deployment + context. Anchor's SEC- and AGT- domain rules provide the + technical detection layer for every adversarial attack category + enumerated in Article 15. + + - id: "EU-ART16" + name: "Provider Obligations — Complete List" + original_id: "Article 16" + maps_to: "LEG-002" + obligation_type: disclosure + anchor_mechanism: > + Sealed constitution.anchor + full DAC audit chain together + constitute the conformity evidence package. anchor audit + --report generates the structured disclosure artifact for + EU database registration and supervisory authority submission. + severity: "blocker" + min_severity: "blocker" + description: > + Providers of high-risk AI systems must: ensure compliance with + all technical requirements, draw up technical documentation, + operate a quality management system, keep technical documentation + and logs for the required retention period, conduct conformity + assessment, register in the EU AI database before market + placement, affix CE marking where required, appoint an + authorised representative in the EU where applicable, and + cooperate with national competent authorities on request. + + - id: "EU-ART26" + name: "Deployer Obligations — Monitoring and Oversight" + original_id: "Article 26" + maps_to: "ETH-003" + obligation_type: audit + anchor_mechanism: > + AnchorRuntime continuous eval satisfies continuous monitoring + requirement. Real-time violation detection per AuditEntry. + compliant boolean per decision records governance status. + Layer 2 in development. + severity: "blocker" + min_severity: "error" + description: > + Deployers of high-risk AI systems must implement appropriate + human oversight measures, monitor the system for anomalous + behavior, suspend use when serious risk is identified, inform + the provider of serious incidents, and keep logs generated + by the AI system for the required retention period. Deployers + must also conduct data protection impact assessments where + the system processes personal data. The deployer bears + regulatory liability for every decision the system makes + in their deployment context. + + - id: "EU-ART99" + name: "Penalties — No Safe Harbour After August 2026" + original_id: "Article 99" + maps_to: "LEG-002" + obligation_type: disclosure + anchor_mechanism: > + Full Anchor compliance stack — sealed constitution, active + domain rules, DAC audit chain, anchor audit --report — is + the compliance evidence package that demonstrates conformity + and mitigates penalty exposure. + severity: "blocker" + min_severity: "blocker" + description: > + Violations of requirements for high-risk AI systems carry + fines of up to €30 million or 6% of total worldwide annual + turnover, whichever is higher. Violations of Article 5 + prohibited practices carry up to €35 million or 7%. + There is no grace period after August 2, 2026. Supervisory + authorities in each EU member state are empowered to conduct + inspections, demand documentation, and impose fines without + prior warning. The only defense is documented, demonstrable + compliance — not intent to comply. diff --git a/.anchor/government/FCA_Regulations.anchor b/.anchor/government/FCA_Regulations.anchor new file mode 100644 index 0000000..9928abb --- /dev/null +++ b/.anchor/government/FCA_Regulations.anchor @@ -0,0 +1,142 @@ +type: framework +namespace: FCA +version: "2024" +anchor_version: ">=4.0.0" +maintainer: "Anchor Core" +opt_in: true +source: "FCA AI Governance Guidance 2024 and FCA Consumer Duty" +source_url: "https://www.fca.org.uk/publications/feedback-statements/fs23-6-artifical-intelligence-machine-learning" +source_date: "2024" +credit: > + UK Financial Conduct Authority guidance on AI governance + (FS23/6 Feedback Statement and subsequent 2024 guidance). + FCA Consumer Duty effective July 31, 2023. FCA guidance + effective September 2026 links AI governance failures to + fitness and propriety of compliance leadership — CCOs and + CROs can be held personally accountable for AI governance + failures from that date. +layer_2_status: > + Rules marked obligation_type: provenance or audit depend on + AnchorRuntime (Layer 2) and the Decision Audit Chain (DAC). + Layer 2 is currently in development. +seal: "sha256:PENDING" + +rules: + + - id: "FCA-001" + name: "Human Oversight Records — AI-Assisted Decisions" + original_id: "FCA AI Governance Guidance 2024" + maps_to: "ETH-003" + obligation_type: audit + anchor_mechanism: > + DAC query API /audit exposes full oversight record. + ETH-003 human oversight removal violation fires on + autonomous decision code without human review checkpoint. + AuditEntry records compliant boolean per decision. + Layer 2 in development. + severity: "blocker" + min_severity: "error" + description: > + FCA 2024 guidance requires firms to demonstrate human oversight + and maintain records of AI-assisted decisions sufficient to + support supervisory review. Oversight must be documented — + not merely stated in policy. A firm that claims to have human + oversight but cannot produce records demonstrating that oversight + occurred for specific decisions does not satisfy this requirement. + The FCA has indicated that oversight records will be a primary + focus of AI-related supervisory visits. + + - id: "FCA-002" + name: "Model Version Traceability Per Decision" + original_id: "FCA AI Governance Guidance 2024" + maps_to: "SUP-003" + obligation_type: provenance + anchor_mechanism: > + model_version in every AuditEntry records exact model version + per decision. SUP-003 versioning drift violation fires on + undeclared model version changes. Layer 2 in development. + severity: "error" + min_severity: "error" + description: > + FCA requires firms to maintain records showing which version + of which model produced which decision. Model version + traceability must be continuous — not just documented at + the point of initial deployment. When a model is updated, + the version change must be logged, and historical decisions + must remain traceable to the model version that produced them. + This requirement is a prerequisite for any post-hoc supervisory + review of AI-assisted decisions. + + - id: "FCA-003" + name: "CCO Personal Liability — AI Governance Failures" + original_id: "FCA Guidance September 2026" + maps_to: "ETH-003" + obligation_type: disclosure + anchor_mechanism: > + Signed DAC audit chain is the CCO's evidence of governance. + anchor audit --report generates the compliance record that + demonstrates active governance during the relevant period. + A CCO who can produce sealed, timestamped governance records + has documented evidence of their oversight function. + severity: "blocker" + min_severity: "blocker" + description: > + From September 2026, FCA guidance links AI governance failures + to the fitness and propriety of compliance leadership. Chief + Compliance Officers and Chief Risk Officers can be held + personally accountable for AI governance failures — not just + the firm. This transforms AI governance from a corporate risk + into a personal career risk for named compliance individuals. + The only defense is documented, demonstrable governance — + which Anchor's sealed audit chain provides. A CCO who cannot + produce evidence of active AI governance when the FCA asks + is personally exposed. + + - id: "FCA-004" + name: "AML AI Output Cryptographic Verification" + original_id: "FCA AML Guidance 2024" + maps_to: "SEC-006" + obligation_type: provenance + anchor_mechanism: > + AML-002 violation fires on AML output without output_hash + verification. output_hash in AuditEntry provides SHA-256 + binding of every AML flagging result to the model output + that produced it. Layer 2 in development. + severity: "error" + min_severity: "error" + description: > + FCA 2024 guidance on AML requires that AI-generated flagging + results be verifiable — displayed AML alerts must be + cryptographically linkable to the originating model output. + An AML alert that cannot be traced to a specific model output + at a specific timestamp is unverifiable, and an unverifiable + alert cannot form the basis of a suspicious activity report + that would survive regulatory scrutiny. This is particularly + critical in automated AML pipelines where human review + occurs after flagging rather than before. + + - id: "FCA-005" + name: "Consumer Duty — Good Outcomes for Retail Customers" + original_id: "FCA Consumer Duty PS22/9" + maps_to: "ETH-004" + obligation_type: detection + anchor_mechanism: > + ETH-004 toxic output detection fires on harmful customer + communications. ETH-002 explainability absence fires on + opaque customer-facing AI decisions. SHR-003 reputational + and conduct risk covers Consumer Duty conduct obligations. + Layer 1 active now. + severity: "error" + min_severity: "error" + description: > + FCA Consumer Duty requires firms to deliver good outcomes + for retail customers across four outcome areas: products + and services, price and value, consumer understanding, and + consumer support. For AI systems interacting with retail + customers, this means AI outputs must be accurate, fair, + and comprehensible — not optimized for firm metrics at the + expense of customer outcomes. An AI system that generates + misleading communications, opaque decisions, or outcomes + that systematically disadvantage retail customers violates + Consumer Duty regardless of technical compliance with + other regulatory requirements. diff --git a/.anchor/government/SEBI_Regulations.anchor b/.anchor/government/SEBI_Regulations.anchor new file mode 100644 index 0000000..5222f02 --- /dev/null +++ b/.anchor/government/SEBI_Regulations.anchor @@ -0,0 +1,207 @@ +type: framework +namespace: SEBI +version: "2025-06" +anchor_version: ">=4.0.0" +maintainer: "Anchor Core" +opt_in: true +source: "SEBI Consultation Papers on AI/ML in Securities Markets" +source_url: "https://www.sebi.gov.in/reports-and-statistics/reports/jun-2025/consultation-paper-on-guidelines-for-responsible-usage-of-ai-ml-in-indian-securities-markets_94687.html" +source_date: "June 2025" +credit: > + Securities and Exchange Board of India consultation papers on + AI/ML governance (November 2024 and June 2025). SEBI requires + all market participants using AI/ML tools to be responsible for + compliance with all applicable laws regardless of the method or + degree of AI adoption. Third-party vendor liability does not + transfer to SEBI-regulated entities. +layer_2_status: > + Rules marked obligation_type: provenance or audit depend on + AnchorRuntime (Layer 2) and the Decision Audit Chain (DAC). + Layer 2 is currently in development. +seal: "sha256:PENDING" + +rules: + + - id: "SEBI-001" + name: "Senior Management Oversight — Designated Responsibility" + original_id: "SEBI AI/ML Requirement 1" + maps_to: "ETH-003" + obligation_type: disclosure + anchor_mechanism: > + Board-approved policy.anchor with sealed constitution.anchor + constitutes the governance policy artifact. anchor audit + --report generates the oversight documentation for senior + management sign-off. + severity: "blocker" + min_severity: "error" + description: > + Market participants using AI/ML must designate senior management + with technical expertise to oversee AI tool performance and + control. Board-level oversight with named accountability is + mandatory and cannot be delegated to vendors or technical teams. + SEBI requires that senior management understand and be + accountable for every AI tool deployed in trading, advisory, + and compliance functions. + + - id: "SEBI-002" + name: "Model Validation, Documentation and Interpretability" + original_id: "SEBI AI/ML Requirement 2" + maps_to: "ETH-002" + obligation_type: detection + anchor_mechanism: > + ETH-002 explainability absence fires on black-box model usage. + TRANS-* violation category covers transparency and documentation + gaps. PROV-001 fires on AI output without model version. + Layer 1 detection active now. + severity: "error" + min_severity: "error" + description: > + Market participants must maintain validation documentation and + ensure interpretability of AI models. Outcomes must be + explainable, traceable, and repeatable. Documentation explaining + the logic of AI/ML models is mandatory — not optional. SEBI + specifically requires that AI model decisions be capable of + being explained to the regulator, the firm's board, and + affected investors. + + - id: "SEBI-003" + name: "Periodic Accuracy Reporting to SEBI" + original_id: "SEBI AI/ML Requirement 3" + maps_to: "DAC-AuditLog" + obligation_type: audit + anchor_mechanism: > + anchor audit --report generates JSON and Markdown accuracy + and compliance reports. /audit endpoint exposes the full + audit chain for regulator access. Layer 2 in development + for full continuous reporting support. + severity: "error" + min_severity: "error" + description: > + Market participants must share AI/ML accuracy results and + audit findings with SEBI on a periodic basis. These are not + internal records — they must be shareable with the regulator + in a structured format. Anchor's audit report output provides + the structured compliance evidence that satisfies this periodic + reporting obligation. + + - id: "SEBI-004" + name: "5-Year Input and Output Data Retention" + original_id: "SEBI AI/ML Requirement 4" + maps_to: "DAC-AuditEntry" + obligation_type: provenance + anchor_mechanism: > + DAC AuditEntry records input_hash + output_hash per decision + with ISO 8601 timestamp. Append-only log provides 5-year + retention with tamper-evident integrity. Layer 2 in development. + severity: "blocker" + min_severity: "blocker" + description: > + Market participants must maintain documentation of all models + and store input and output data for at least 5 years. This + applies to all AI systems used in trading, advisory, and + compliance functions. Five years is the statutory limitation + period for most SEBI enforcement actions — data retention for + this period ensures that the firm can defend against any + regulatory inquiry within that window. + + - id: "SEBI-005" + name: "Segregated Testing Environment Before Deployment" + original_id: "SEBI AI/ML Requirement 5" + maps_to: "LEG-002" + obligation_type: detection + anchor_mechanism: > + anchor check in CI/CD acts as the pre-deployment governance + gate. Diamond Cage WASM sandbox provides the segregated + runtime testing environment for high-risk operations. + Layer 1 active now. + severity: "blocker" + min_severity: "error" + description: > + AI/ML models must be tested in a segregated environment before + deployment. Shadow testing with live traffic is required. + Models must be validated in both stressed and unstressed + market conditions before going live. SEBI explicitly requires + this because live market conditions — volatility spikes, + liquidity crises, unusual order patterns — may expose model + failures that standard testing environments do not surface. + + - id: "SEBI-006" + name: "Continuous Monitoring as AI Models Evolve" + original_id: "SEBI AI/ML Requirement 6" + maps_to: "SUP-003" + obligation_type: provenance + anchor_mechanism: > + SUP-003 versioning drift violation fires on undeclared model + version changes. AnchorRuntime continuous eval provides + post-deployment monitoring. model_version in AuditEntry + per decision enables drift detection. Layer 2 in development. + severity: "error" + min_severity: "error" + description: > + SEBI explicitly notes that AI models may change behavior over + time. Continuous monitoring systems are required beyond + traditional one-time testing. Model drift detection is a + regulatory expectation — not a best practice. A model that + was validated at deployment is not necessarily the same model + six months later, and SEBI requires that organizations have + systems in place to detect and respond to that drift. + + - id: "SEBI-007" + name: "Investor Disclosures for AI-Driven Decisions" + original_id: "SEBI AI/ML Requirement 7" + maps_to: "ETH-002" + obligation_type: disclosure + anchor_mechanism: > + ETH-002 explainability absence detection. adverse_action_reasons() + provides investor-facing reason codes. anchor audit --report + generates disclosure artifacts. Layer 1 active now. + severity: "error" + min_severity: "warning" + description: > + Market participants using AI in customer-facing decisions must + disclose product features, purpose, risks, model accuracy, + fees, and data quality used for decisions. Language must be + comprehensible to investors, not just technical staff. Investor + grievance mechanisms for AI-driven decisions must be established + and documented. + + - id: "SEBI-008" + name: "No Discriminatory AI Outputs — Fairness Mandatory" + original_id: "SEBI AI/ML Requirement 8" + maps_to: "ETH-001" + obligation_type: detection + anchor_mechanism: > + ETH-001 bias and discrimination detection active in Layer 1. + BIAS-* violation category fires on protected attribute usage + in feature vectors and decision outputs. + severity: "error" + min_severity: "error" + description: > + AI/ML models must not favor or discriminate against any group + of clients or customers. Data quality must be sufficiently + broad, relevant, and complete to support fair outcomes. + Processes to identify and remove biases from datasets are + required. Training courses on data bias are mandatory for + data scientists developing AI systems for SEBI-regulated + market participants. + + - id: "SEBI-010" + name: "Third-Party AI Vendor Accountability — No Transfer" + original_id: "SEBI AI/ML Requirement 10" + maps_to: "DAC-AuditEntry" + obligation_type: provenance + anchor_mechanism: > + Non-repudiation chain — the deploying firm's AuditEntry + signed with their key proves ownership of every AI decision + regardless of which vendor's model produced it. Layer 2 + in development. + severity: "blocker" + min_severity: "blocker" + description: > + Using a third-party AI tool does not transfer regulatory + liability to the vendor. Market participants are solely + responsible for the consequences of deploying any AI tool + regardless of who built it. SLAs with AI vendors must include + regulatory compliance obligations — but even contractual + protections do not transfer the regulatory liability that + SEBI places on the market participant. diff --git a/.anchor/government/SEC_Regulations.anchor b/.anchor/government/SEC_Regulations.anchor new file mode 100644 index 0000000..07aae76 --- /dev/null +++ b/.anchor/government/SEC_Regulations.anchor @@ -0,0 +1,157 @@ +type: framework +namespace: USSEC +version: "2026" +anchor_version: ">=4.0.0" +maintainer: "Anchor Core" +opt_in: true +source: "US Securities and Exchange Commission — 2026 Examination Priorities and AI Governance Guidance" +source_url: "https://www.sec.gov/exams/announcement/exam-priorities-2026.pdf" +source_date: "January 2026" +credit: > + US Securities and Exchange Commission 2026 Examination Priorities, + published January 2026 by the SEC Division of Examinations. AI + governance was named the top examination priority for 2026 — + overtaking cryptocurrency for the first time in the agency's + published examination schedule. The SEC has indicated that + 'AI washing' — overstating AI capabilities or governance maturity + — constitutes securities fraud exposure under existing law. + Applies to SEC-registered investment advisers, broker-dealers, + and any firm using AI in securities-related activities. +layer_2_status: > + Rules marked obligation_type: provenance or audit depend on + AnchorRuntime (Layer 2) and the Decision Audit Chain (DAC). + Layer 2 is currently in development. +seal: "sha256:PENDING" + +rules: + + - id: "USSEC-001" + name: "AI Governance — Top Examination Priority 2026" + original_id: "SEC 2026 Examination Priority 1" + maps_to: "LEG-002" + obligation_type: disclosure + anchor_mechanism: > + Sealed constitution.anchor + anchor audit --report generates + the AI governance documentation package that demonstrates + active, enforceable governance to SEC examiners. The SHA-256 + sealed constitution proves the governance framework was in + place and enforced during the examination period. + severity: "blocker" + min_severity: "blocker" + description: > + The SEC Division of Examinations identified AI governance as + the top examination priority for 2026 — the first time in the + agency's history that AI has overtaken cryptocurrency as the + primary focus. SEC-registered firms using AI in investment + advice, trading, compliance, or customer communications are + subject to examination specifically on AI governance practices. + Examiners will review whether firms have adequate policies, + procedures, and controls governing their AI systems — and + whether those controls are actually enforced, not just + documented on paper. Anchor's sealed, version-controlled + governance stack provides the technical evidence that + governance is active and enforceable. + + - id: "USSEC-002" + name: "AI Washing — Securities Fraud Exposure" + original_id: "SEC AI Washing Guidance 2026" + maps_to: "ALN-002" + obligation_type: detection + anchor_mechanism: > + ALN-002 goal misrepresentation fires when AI system behavior + diverges from declared purpose. ETH-002 explainability absence + fires on black-box AI claims without verifiable explanation. + Layer 1 detection active now. + severity: "blocker" + min_severity: "blocker" + description: > + The SEC has indicated that overstating AI capabilities, + misrepresenting AI governance maturity, or claiming AI-driven + investment processes that are not actually AI-driven constitutes + securities fraud exposure under existing law — not just a + regulatory violation. AI washing is the AI equivalent of + greenwashing: making claims about AI usage, accuracy, or + governance that are not substantiated by actual technical + controls. Firms that market AI-powered investment products + must be able to demonstrate that the AI described actually + exists, works as described, and is governed as claimed. + Anchor's audit chain provides the technical proof that + governance claims are substantiated. + + - id: "USSEC-003" + name: "AI in Investment Advice — Fiduciary Obligations" + original_id: "SEC Regulation Best Interest + AI Guidance 2026" + maps_to: "ETH-002" + obligation_type: detection + anchor_mechanism: > + ETH-002 explainability absence fires on AI investment + recommendation code without explainability hooks. + adverse_action_reasons() provides SEC-compliant reason + codes for AI-driven investment recommendations. + Layer 1 active now. + severity: "blocker" + min_severity: "error" + description: > + Investment advisers using AI to generate investment + recommendations must satisfy Regulation Best Interest + obligations — the AI recommendation must be in the best + interest of the customer, not optimized for firm revenue. + The SEC has made clear that using an AI model does not + transfer or dilute the fiduciary obligations of the + registered investment adviser. AI-generated recommendations + must be explainable, traceable, and demonstrably aligned + with the customer's investment profile and risk tolerance. + A black-box AI generating investment advice without + explainability controls fails Regulation Best Interest. + + - id: "USSEC-004" + name: "AI Model Risk in Trading Systems — Audit Trail" + original_id: "SEC 2026 Examination Priority — Trading AI" + maps_to: "DAC-AuditEntry" + obligation_type: provenance + anchor_mechanism: > + DAC AuditEntry records every AI-assisted trading decision + with model_id, model_version, input_hash, output_hash, + timestamp, and chain_hash. Full tamper-evident audit chain + survives legal discovery. Layer 2 in development. + severity: "blocker" + min_severity: "error" + description: > + AI systems used in trading — algorithmic trading, order + routing, risk management, and market surveillance — are + subject to SEC examination specifically on model risk + management and audit trail requirements. The SEC expects + firms to maintain records of AI trading decisions sufficient + to reconstruct the circumstances of any trade under review. + An AI trading system that cannot produce a tamper-evident + record of what it decided, when, on what data, and at what + model version cannot satisfy SEC examination requirements + and creates significant regulatory exposure in the event + of a market disruption inquiry. + + - id: "USSEC-005" + name: "Cybersecurity of AI Systems — SEC Rule 10" + original_id: "SEC Cybersecurity Rule (Rule 10) + AI Guidance 2026" + maps_to: "SEC-001" + obligation_type: detection + anchor_mechanism: > + SEC-001 prompt injection, SEC-002 data poisoning, SEC-003 + model tampering, AGT-001 through AGT-005 agentic security + rules provide the technical detection layer for AI-specific + cybersecurity threats under SEC Rule 10. + Layer 1 active now. + severity: "blocker" + min_severity: "error" + description: > + SEC Rule 10 on cybersecurity requires registered firms to + have policies and procedures reasonably designed to address + cybersecurity risks. The SEC's 2026 examination priorities + extend this to AI-specific cybersecurity threats — prompt + injection, model tampering, adversarial attacks, and AI + supply chain compromise. Firms using AI in trading or + investment advisory must demonstrate that their AI systems + are protected against the specific attack vectors that + target AI infrastructure, not just general cybersecurity + threats. Anchor's SEC- and AGT- domain rules satisfy the + technical detection requirement for AI-specific cybersecurity + under SEC Rule 10. diff --git a/.anchor/mitigation.anchor b/.anchor/mitigation.anchor index 3fea1c2..239feb4 100644 --- a/.anchor/mitigation.anchor +++ b/.anchor/mitigation.anchor @@ -14,9 +14,9 @@ version: "3.1.0" mitigations: - # --- ANC-001: Hosted Model Leakage --- + # --- SEC-006: Raw Network Access --- - id: "MIT-001-A" - rule_id: "ANC-001" + rule_id: "SEC-006" name: "Public LLM Endpoint Detection" match: type: "regex" @@ -24,11 +24,11 @@ mitigations: pattern: >- ^(?:[^"\'#]|(["\'])(?:(?!\1).|\\\1)*\1)*\b(=\s*["']https?://api\.(openai|anthropic|cohere)\.(com|ai)|openai\.Client|anthropic\.Anthropic\(|cohere\.Client) message: "Direct call to public LLM API detected. Route through a PII-scrubbing proxy." - severity: "blocker" + severity: "error" - # --- ANC-002: Vector Store Leakage --- + # --- SEC-002: Data Poisoning --- - id: "MIT-002-A" - rule_id: "ANC-002" + rule_id: "SEC-002" name: "Unencrypted Vector Store Upsert" match: type: "regex" @@ -39,9 +39,9 @@ mitigations: message: "Vector store write detected without encryption. Embeddings can leak sensitive data via inversion attacks." severity: "error" - # --- ANC-003: Hallucination --- + # --- ALN-001: Hallucination --- - id: "MIT-003-A" - rule_id: "ANC-003" + rule_id: "ALN-001" name: "LLM Output Without Validation" match: type: "regex" @@ -50,9 +50,9 @@ mitigations: message: "LLM API call detected. Ensure output is validated before use (e.g., schema check, grounding)." severity: "error" - # --- ANC-014: Shell Injection --- + # --- SEC-007: Shell Injection (os-level) --- - id: "MIT-014-A" - rule_id: "ANC-014" + rule_id: "SEC-007" name: "Shell Command Execution" match: type: "regex" @@ -61,9 +61,9 @@ mitigations: message: "Potential shell injection via os.system detects. Use subprocess with list arguments instead." severity: "blocker" - # --- ANC-018: Agent Auth Bypass --- - - id: "MIT-018-A" - rule_id: "ANC-018" + # --- SEC-007: Shell Injection (subprocess-level) --- + - id: "MIT-014-B" + rule_id: "SEC-007" name: "Unsandboxed Subprocess in Agent" match: type: "regex" @@ -73,14 +73,14 @@ mitigations: message: "Native subprocess execution detected. Use Diamond Cage (WASM) sandboxing for agent tools." severity: "blocker" - # --- ANC-023: Credential Harvesting --- - - id: "MIT-023-A" - rule_id: "ANC-023" + # --- SEC-004: Credential Harvesting --- + - id: "MIT-004-A" + rule_id: "SEC-004" name: "Bulk Env Variable Access" match: type: "regex" - # Excludes string literals, comments, and .get() calls + # Only fire on bulk access or sensitive key names pattern: >- - ^(?:[^"\'#]|(["\'])(?:(?!\1).|\\\1)*\1)*\bos\.environ\b(?!\s*\.get|["']) + ^(?:[^"\'#]|(["\'])(?:(?!\1).|\\\1)*\1)*\bos\.(environ\.(copy|items)\(\)|\benviron\b\s*\[.*(?i)(TOKEN|KEY|SECRET|PASSWORD|CREDENTIAL|API).*\]|\{\*\*os\.environ) message: "Broad environment variable access detected. Agents may harvest secrets from env." severity: "error" diff --git a/.anchor/reports/governance_audit.md b/.anchor/reports/governance_audit.md index 61acedd..f20a156 100644 --- a/.anchor/reports/governance_audit.md +++ b/.anchor/reports/governance_audit.md @@ -1,17 +1,25 @@ # Anchor Governance Audit -**Status:** PASSED -**Timestamp:** 2026-03-18 21:55:12 -**Source:** `D:\Anchor` +**Status:** FAILED +**Timestamp:** 2026-03-22 19:59:07 +**Source:** `D:\Anchor\anchor\__init__.py` ## Summary | Category | Count | |---|---| -| Blockers / Errors | 0 | +| Blockers / Errors | 3 | | Warnings | 0 | | Info | 0 | | Suppressed | 0 | -| Files Scanned | 61 | +| Files Scanned | 7 | + +## Active Violations + +| ID | Severity | File | Message | +|---|---|---|---| +| `FINOS-014, SEC-007` | **BLOCKER** | `anchor/core/engine.py:54` | Native subprocess execution detected. Use Diamond Cage (WASM) sandboxing for agent tools. | +| `FINOS-014, SEC-007` | **BLOCKER** | `anchor/core/engine.py:558` | Native subprocess execution detected. Use Diamond Cage (WASM) sandboxing for agent tools. | +| `FINOS-014, SEC-007` | **BLOCKER** | `test_vuln.py:2` | Native subprocess execution detected. Use Diamond Cage (WASM) sandboxing for agent tools. | > *Suppressed exceptions are authorized security bypasses — verify authors are correct.* diff --git a/anchor/__init__.py b/anchor/__init__.py index ef1d103..ee3b6a2 100644 --- a/anchor/__init__.py +++ b/anchor/__init__.py @@ -2,4 +2,4 @@ Anchor-Audit — The Federated Governance Engine for AI """ -__version__ = "4.1.1" +__version__ = "4.1.4" diff --git a/anchor/cli.py b/anchor/cli.py index 57eb19c..8f61035 100644 --- a/anchor/cli.py +++ b/anchor/cli.py @@ -18,6 +18,7 @@ from anchor import __version__ +__version__ = "4.1.4" @click.group() @click.version_option(version=__version__) @@ -154,7 +155,7 @@ def init(domains, frameworks, regulators, sandbox, all_items, force, no_sign, po dot_anchor = ".anchor" click.echo("") - click.secho("⚓ Anchor V4 — init", fg="cyan", bold=True) + click.secho("Anchor V4 - init", fg="cyan", bold=True) click.echo("") if all_items: @@ -233,13 +234,13 @@ def copy_file(relative_path, label): dst = os.path.join(dot_anchor, relative_path) os.makedirs(os.path.dirname(dst), exist_ok=True) if not os.path.exists(src): - click.secho(f" ✗ Not found in package: {relative_path}", fg="red") + click.secho(f" [FAIL] Not found in package: {relative_path}", fg="red") return False if os.path.exists(dst) and not force: - click.secho(f" ~ Already exists: {label}", fg="yellow") + click.secho(f" [SKIP] Already exists: {label}", fg="yellow") return True shutil.copy2(src, dst) - click.secho(f" ✓ {label}", fg="green") + click.secho(f" [OK] {label}", fg="green") return True # ── Copy domain files ───────────────────────────────────── @@ -300,17 +301,6 @@ def copy_file(relative_path, label): # 1. Can only RAISE severity (ERROR -> BLOCKER is allowed) # 2. Cannot LOWER severity — the floor is absolute # 3. Cannot suppress constitutional rules -# 4. Can add INTERNAL-* prefixed custom rules -# -# Reference: .anchor/constitution.anchor.example -# ============================================================================= - -version: "4.0" - -metadata: - project: "{os.path.basename(os.getcwd())}" - -overrides: # Example: raise SEC-006 from error to blocker # - id: SEC-006 # severity: blocker @@ -331,7 +321,7 @@ def copy_file(relative_path, label): with open(policy_path, "w", encoding="utf-8") as f: f.write(policy_template) click.echo("") - click.secho(f" ✓ Created {policy_path}", fg="green") + click.secho(f" [OK] Created {policy_path}", fg="green") # ── Update .gitignore ───────────────────────────────────── # V4 Decision: .anchor/ should be committed, excluding cache and temp @@ -397,7 +387,7 @@ def copy_file(relative_path, label): os.chmod(pre_commit_path, 0o755) except Exception: pass - click.secho(" ✓ Git pre-commit hook installed", fg="green") + click.secho(" [OK] Git pre-commit hook installed", fg="green") except Exception as e: click.secho(f" WARNING: Could not install git hook: {e}", fg="yellow") @@ -415,7 +405,7 @@ def copy_file(relative_path, label): lock_path = os.path.join(dot_anchor, ".anchor.lock") with open(lock_path, "w", encoding="utf-8") as f: f.write(remote_lock) - click.secho(" ✓ Fetched GOVERNANCE.lock from remote", fg="green") + click.secho(" [OK] Fetched GOVERNANCE.lock from remote", fg="green") except urllib.error.URLError as e: click.secho(f" WARNING: Could not fetch GOVERNANCE.lock remotely: {e.reason}", fg="yellow") except Exception as e: @@ -430,7 +420,7 @@ def copy_file(relative_path, label): # ── Summary ─────────────────────────────────────────────── click.echo("") - click.secho(" " + "─" * 40, fg="bright_black") + click.secho(" " + "-" * 40, fg="bright_black") click.secho(f" {len(requested_domains)} domain(s) loaded", fg="white") if requested_frameworks: click.secho(f" {len(requested_frameworks)} framework(s) loaded", fg="white") @@ -503,10 +493,10 @@ def sync(restore): os.makedirs(os.path.dirname(local_path), exist_ok=True) with open(local_path, "wb") as bf: bf.write(content) - click.secho(f" ✓ Restored {rel_path}", fg="green") + click.secho(f" [OK] Restored {rel_path}", fg="green") restored_count += 1 except Exception as e: - click.secho(f" ✗ Failed to fetch {rel_path}: {e}", fg="red") + click.secho(f" [FAIL] Failed to fetch {rel_path}: {e}", fg="red") click.secho(f"\nSync complete. Restored {restored_count} files to authoritative state.", fg="cyan", bold=True) @@ -657,6 +647,7 @@ def check(ctx, policy, paths, dir, model, metadata, context, server_mode, genera "severity": rule.severity, "description": rule.description, "category": rule.category, + "maps_to": rule.maps_to, # detection fields populated below from mitigation.anchor "match": None, "pattern": None, @@ -729,21 +720,7 @@ def check(ctx, policy, paths, dir, model, metadata, context, server_mode, genera if verbose: click.secho(f" [!] Failed to load mitigation patterns: {e}", fg="yellow") - # B.2. Register virtual aliases for legacy IDs (ANC-NNN) after patterns are merged - if loaded and loaded.alias_chain: - for alias_id, canonical_id in loaded.alias_chain.items(): - if canonical_id in rule_dict: - if alias_id not in rule_dict: - # Create virtual copy - alias_entry = rule_dict[canonical_id].copy() - alias_entry["id"] = alias_id - rule_dict[alias_id] = alias_entry - else: - # Sync pattern from canonical to existing alias if needed - can_rule = rule_dict[canonical_id] - for field in ["match", "pattern", "message", "mitigation"]: - if can_rule.get(field) and not rule_dict[alias_id].get(field): - rule_dict[alias_id][field] = can_rule[field] + # B.2 (Removed: Aliases are now handled by the engine's ID aggregation) # C. Load and merge local risk catalogs from patterns/ (unchanged from V3) patterns_dir = os.path.join(os.getcwd(), "patterns") @@ -774,11 +751,8 @@ def check(ctx, policy, paths, dir, model, metadata, context, server_mode, genera if verbose: click.secho(f" ! Failed to load {file}: {e}", fg="yellow") - # Remove rules with no detection capability — engine can't enforce them yet - master_rules = [ - r for r in rule_dict.values() - if r.get("match") or r.get("pattern") - ] + # Retain all rules (even those without patterns) so the engine can resolve Multi-ID associations + master_rules = list(rule_dict.values()) if verbose: click.echo(f" {len(master_rules)} enforceable rules ready.") @@ -1022,7 +996,9 @@ def _sev_score(v): return severity_map.get(v.get('severity', '').lower(), 0) f.write(f"{tag} [{v['id']}] {v['name']} ({v['severity'].upper()})\n") f.write(f" Location: {v['file']}:{v['line']}\n") f.write(f" Message: {v['message']}\n") - f.write(f" Details: {v.get('description', 'No further details.')}\n") + # Use the rule's specific internal description or fallback + description = v.get('description', 'No further details.') + f.write(f" Details: {description}\n") f.write(f" Fix: {v.get('mitigation', 'N/A')}\n") try: suggestion = suggest_fix(v) @@ -1148,7 +1124,9 @@ def _sev_score(v): return severity_map.get(v.get('severity', '').lower(), 0) for v in sorted_violations[:display_limit]: severity_color = "red" if v['severity'] in ['critical', 'blocker', 'error'] else "yellow" - click.secho(f"[{v['id']}] {v['name']} ({v['severity'].upper()})", fg=severity_color, bold=True) + id_display = v['id'] + + click.secho(f"[{id_display}] {v['name']} ({v['severity'].upper()})", fg=severity_color, bold=True) click.echo(f" Location: {v['file']}:{v['line']}") if v.get('mitigation'): click.secho(f" Fix: {v['mitigation']}", fg="cyan", dim=True) diff --git a/anchor/core/constitution.py b/anchor/core/constitution.py index 23f79b3..a0cf3d7 100644 --- a/anchor/core/constitution.py +++ b/anchor/core/constitution.py @@ -21,7 +21,7 @@ # SHA-256 of the official legacy files (optional in V3). CONSTITUTION_SHA256 = "E292674E571C32273E5C227DFD5F77379B5C15E07E6272C228C39BF91B5C8D79" -MITIGATION_SHA256 = "E38500AB08E5071B258B2508DBA84D230D03DB4F17949D348E9219D80F77C7BE" +MITIGATION_SHA256 = "D71DE885992ADF5DE87B6093D64D20F45156674CB85BFAFC6A0492DA40A3DF86" # ============================================================================= diff --git a/anchor/core/engine.py b/anchor/core/engine.py index 45faf0c..4e94faa 100644 --- a/anchor/core/engine.py +++ b/anchor/core/engine.py @@ -51,7 +51,7 @@ def _get_suppression_author(self, file_path: str, line_num: int) -> str: # Normalize path for git on Windows norm_path = file_path.replace("\\", "/") cmd = ["git", "blame", "-L", f"{line_num},{line_num}", "--porcelain", norm_path] - result = subprocess.run(cmd, capture_output=True, text=True, check=False) # anchor: ignore ANC-018 + result = subprocess.run(cmd, capture_output=True, text=True, check=False) # anchor: ignore SEC-007 if result.returncode == 0: for line in result.stdout.splitlines(): if line.startswith("author "): @@ -264,16 +264,23 @@ def scan_file(self, content: bytes, file_path: str, adapter: LanguageAdapter) -> found = self._check_regex(content.decode('utf-8', errors='ignore'), pattern, rule_id=rule.get("id")) for line_num, match_text in found: is_suppressed = False + # Aggregate IDs (Canonical + active Frameworks/Regulators) + matching_ids = [rule['id']] + if hasattr(self, 'rules'): + for other in self.rules: + if other.get('maps_to') == rule['id']: matching_ids.append(other['id']) + v_id = ", ".join(sorted(list(set(matching_ids)))) + if self.allow_suppressions: - if f"# anchor: ignore {rule.get('id')}" in match_text or "# anchor: ignore-all" in match_text: + if any(f"# anchor: ignore {rid}" in match_text for rid in matching_ids) or "# anchor: ignore-all" in match_text: author = self._get_suppression_author(file_path, line_num) suppressed.append({ - "id": rule["id"], "name": rule.get("name"), "file": file_path, "line": line_num, "author": author, "severity": rule.get("severity", "error") + "id": v_id, "name": rule.get("name"), "file": file_path, "line": line_num, "author": author, "severity": rule.get("severity", "error") }) is_suppressed = True if not is_suppressed: violations.append({ - "id": rule["id"], "name": rule.get("name"), "description": rule.get("description"), "message": rule.get("message"), "mitigation": rule.get("mitigation"), "file": file_path, "line": line_num, "severity": rule.get("severity", "error") + "id": v_id, "name": rule.get("name"), "description": rule.get("description"), "message": rule.get("message"), "mitigation": rule.get("mitigation"), "file": file_path, "line": line_num, "severity": rule.get("severity", "error") }) continue # Regex handled, skip AST logic else: @@ -370,8 +377,15 @@ def scan_file(self, content: bytes, file_path: str, adapter: LanguageAdapter) -> if is_suppressed: continue + # Aggregate IDs (Canonical + Frameworks) + matching_ids = [rule['id']] + if hasattr(self, 'rules'): + for other in self.rules: + if other.get('maps_to') == rule['id']: matching_ids.append(other['id']) + v_id = ", ".join(sorted(list(set(matching_ids)))) + violations.append({ - "id": rule["id"], + "id": v_id, "name": rule.get("name", "Unnamed Rule"), "description": rule.get("description", "No description provided."), "message": rule.get("message", "Policy Violation"), @@ -541,7 +555,7 @@ def _get_suppression_author(self, file_path: str, line_num: int) -> str: # -L , : only blame the specified line # --porcelain : machine-readable format cmd = ["git", "blame", "-L", f"{line_num},{line_num}", "--porcelain", abs_path] - result = subprocess.run(cmd, capture_output=True, text=True, check=True, timeout=2) # anchor: ignore ANC-018 + result = subprocess.run(cmd, capture_output=True, text=True, check=True, timeout=2) # anchor: ignore SEC-007 # 3. Parse author from porcelain output for line in result.stdout.splitlines(): diff --git a/anchor/core/loader.py b/anchor/core/loader.py index ee7df21..94918d1 100644 --- a/anchor/core/loader.py +++ b/anchor/core/loader.py @@ -412,14 +412,10 @@ def resolve_path(rel_path: str) -> Path: constitution.errors.append(str(e)) # ── STEP 5: Build alias chain ───────────────────────────── + # Maps ANC-009 -> FINOS-009 -> SEC-001 for alias_id, target_id in manifest.legacy_aliases.items(): - # Walk the chain to find the canonical rule and max severity current_id = alias_id visited = {alias_id} - max_severity = "info" - canonical_id = None - - # We start with the target defined in legacy_aliases next_id = target_id while next_id: @@ -428,45 +424,21 @@ def resolve_path(rel_path: str) -> Path: visited.add(next_id) if next_id in constitution.rules: - rule = constitution.rules[next_id] - # Update max severity - if severity_gte(rule.severity, max_severity): - max_severity = rule.severity - - # Treat this rule as our canonical alias target - canonical_id = next_id - break # End of chain + # Target rule found! + constitution.alias_chain[alias_id] = next_id + break elif next_id in manifest.legacy_aliases: next_id = manifest.legacy_aliases[next_id] else: - # Target not found in rules or aliases break - - if canonical_id and canonical_id in constitution.rules: - constitution.alias_chain[alias_id] = canonical_id - - # Create virtual rule - target = constitution.rules[canonical_id] - alias_rule = Rule( - id=alias_id, - name=target.name, - namespace=target.namespace, - severity=max_severity, # Use inherited max severity - min_severity=target.min_severity, - description=target.description, - category=target.category, - maps_to=canonical_id, - obligation_type=target.obligation_type, - anchor_mechanism=target.anchor_mechanism, - source_file=target.source_file, - original_id=target.original_id, - v3_id=target.v3_id, - ) - constitution.rules[alias_id] = alias_rule - else: - constitution.errors.append( - f"Could not resolve alias chain: {alias_id} → {target_id}" - ) + + # Also include maps_to relations from frameworks/regulators in the alias chain + for rid, rule in constitution.rules.items(): + if rule.maps_to and rule.maps_to in constitution.rules: + # If a rule maps to another (e.g. FINOS-014 -> SEC-007) + # we treat it as an alias for reporting purposes + if rid not in constitution.alias_chain: + constitution.alias_chain[rid] = rule.maps_to # ── STEP 6: Load policy.anchor ──────────────────────────── if anchor_dir: diff --git a/anchor/governance/mitigation.anchor b/anchor/governance/mitigation.anchor index 3fea1c2..239feb4 100644 --- a/anchor/governance/mitigation.anchor +++ b/anchor/governance/mitigation.anchor @@ -14,9 +14,9 @@ version: "3.1.0" mitigations: - # --- ANC-001: Hosted Model Leakage --- + # --- SEC-006: Raw Network Access --- - id: "MIT-001-A" - rule_id: "ANC-001" + rule_id: "SEC-006" name: "Public LLM Endpoint Detection" match: type: "regex" @@ -24,11 +24,11 @@ mitigations: pattern: >- ^(?:[^"\'#]|(["\'])(?:(?!\1).|\\\1)*\1)*\b(=\s*["']https?://api\.(openai|anthropic|cohere)\.(com|ai)|openai\.Client|anthropic\.Anthropic\(|cohere\.Client) message: "Direct call to public LLM API detected. Route through a PII-scrubbing proxy." - severity: "blocker" + severity: "error" - # --- ANC-002: Vector Store Leakage --- + # --- SEC-002: Data Poisoning --- - id: "MIT-002-A" - rule_id: "ANC-002" + rule_id: "SEC-002" name: "Unencrypted Vector Store Upsert" match: type: "regex" @@ -39,9 +39,9 @@ mitigations: message: "Vector store write detected without encryption. Embeddings can leak sensitive data via inversion attacks." severity: "error" - # --- ANC-003: Hallucination --- + # --- ALN-001: Hallucination --- - id: "MIT-003-A" - rule_id: "ANC-003" + rule_id: "ALN-001" name: "LLM Output Without Validation" match: type: "regex" @@ -50,9 +50,9 @@ mitigations: message: "LLM API call detected. Ensure output is validated before use (e.g., schema check, grounding)." severity: "error" - # --- ANC-014: Shell Injection --- + # --- SEC-007: Shell Injection (os-level) --- - id: "MIT-014-A" - rule_id: "ANC-014" + rule_id: "SEC-007" name: "Shell Command Execution" match: type: "regex" @@ -61,9 +61,9 @@ mitigations: message: "Potential shell injection via os.system detects. Use subprocess with list arguments instead." severity: "blocker" - # --- ANC-018: Agent Auth Bypass --- - - id: "MIT-018-A" - rule_id: "ANC-018" + # --- SEC-007: Shell Injection (subprocess-level) --- + - id: "MIT-014-B" + rule_id: "SEC-007" name: "Unsandboxed Subprocess in Agent" match: type: "regex" @@ -73,14 +73,14 @@ mitigations: message: "Native subprocess execution detected. Use Diamond Cage (WASM) sandboxing for agent tools." severity: "blocker" - # --- ANC-023: Credential Harvesting --- - - id: "MIT-023-A" - rule_id: "ANC-023" + # --- SEC-004: Credential Harvesting --- + - id: "MIT-004-A" + rule_id: "SEC-004" name: "Bulk Env Variable Access" match: type: "regex" - # Excludes string literals, comments, and .get() calls + # Only fire on bulk access or sensitive key names pattern: >- - ^(?:[^"\'#]|(["\'])(?:(?!\1).|\\\1)*\1)*\bos\.environ\b(?!\s*\.get|["']) + ^(?:[^"\'#]|(["\'])(?:(?!\1).|\\\1)*\1)*\bos\.(environ\.(copy|items)\(\)|\benviron\b\s*\[.*(?i)(TOKEN|KEY|SECRET|PASSWORD|CREDENTIAL|API).*\]|\{\*\*os\.environ) message: "Broad environment variable access detected. Agents may harvest secrets from env." severity: "error" diff --git a/setup.py b/setup.py index 23c6a3d..9e98a52 100644 --- a/setup.py +++ b/setup.py @@ -6,7 +6,7 @@ setup( name="anchor-audit", - version="4.1.1", + version="4.1.4", description="The Federated Governance Engine for AI (Universal Multi-Language)", long_description=long_description, long_description_content_type="text/markdown",