tractatus/public/locales/en/leader.json
TheFlow 2238547344 refactor: Rename "Home AI" → "Village AI" across entire codebase
- 57 files modified, 5 files renamed (home-ai → village-ai)
- HTML pages: all user-facing text, data-i18n attributes, anchor IDs, CSS classes
- i18n JSON: keys (home_ai → village_ai) and values across en/de/fr/mi
- Locale files renamed: home-ai.json → village-ai.json (4 languages)
- Main page renamed: home-ai.html → village-ai.html
- Research downloads: translated terms updated (French "IA domestique",
  Māori "AI ā-whare"/"AI kāinga" → "Village AI" per brand name rule)
- JavaScript: navbar component, blog post scripts
- Markdown: research timeline, steering vectors paper, taonga paper

Aligns with community codebase rename (commit 21ab7bc0).
"Village" is a brand name — stays untranslated in all languages.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-23 22:09:44 +13:00

255 lines
22 KiB
JSON

{
"page": {
"title": "For Decision-Makers | Tractatus AI Safety Framework",
"description": "Structural AI governance for organisations deploying LLM systems at scale. Research framework addressing architectural gaps in AI safety."
},
"header": {
"badge": "Research Framework • Production Validated",
"title": "Tractatus: Architectural Governance for LLM Systems",
"subtitle": "Architectural governance for organizations where AI governance failure triggers regulatory consequences. If your deployment is low-risk, architectural enforcement is likely unnecessary."
},
"sections": {
"governance_gap": {
"heading": "The Governance Gap",
"intro": "Current AI governance approaches—policy documents, training programmes, ethical guidelines—rely on voluntary compliance. LLM systems can bypass these controls simply by not invoking them. When an AI agent needs to check a policy, it must choose to do so. When it should escalate a decision to human oversight, it must recognise that obligation.",
"problem": "This creates a structural problem: governance exists only insofar as the AI acknowledges it. For organisations subject to EU AI Act Article 14 (human oversight requirements) or deploying AI in high-stakes domains, this voluntary model is inadequate.",
"solution": "Tractatus explores whether governance can be made architecturally external—difficult to bypass not through better prompts, but through system design that places control points outside the AI's discretion."
},
"architectural_approach": {
"heading": "Architectural Approach",
"three_layer_title": "Three-Layer Architecture",
"services_title": "Six Governance Services",
"arch_layers": {
"layer_1_title": "Agent Runtime Layer",
"layer_1_desc": "Any LLM system (Claude Code, Copilot, custom agents, LangChain, CrewAI). The AI system being governed.",
"layer_2_title": "Governance Layer",
"layer_2_desc": "Six autonomous services that intercept, validate, and document AI operations. External to the AI runtime.",
"layer_3_title": "Persistent Storage Layer",
"layer_3_desc": "Immutable audit logs, governance rules, instruction history. Cannot be altered by AI prompts."
},
"services": {
"service_1_title": "BoundaryEnforcer",
"service_1_desc": "Blocks AI from making values decisions without human approval. Enforces decision boundaries through architectural controls.",
"service_2_title": "InstructionPersistenceClassifier",
"service_2_desc": "Prevents pattern bias from overriding explicit instructions. Stores organisational directives external to AI context.",
"service_3_title": "CrossReferenceValidator",
"service_3_desc": "Validates AI actions against stored policies before execution. Detects conflicts with established rules.",
"service_4_title": "ContextPressureMonitor",
"service_4_desc": "Tracks session complexity, token usage, conversation length. Detects degradation in decision quality.",
"service_5_title": "MetacognitiveVerifier",
"service_5_desc": "Validates reasoning quality before complex operations. Self-checks alignment, coherence, alternatives.",
"service_6_title": "PluralisticDeliberationOrchestrator",
"service_6_desc": "Facilitates multi-stakeholder deliberation for values conflicts. Non-hierarchical engagement with documented dissent."
}
},
"governance_capabilities": {
"heading": "Governance Capabilities",
"intro": "Three interactive demonstrations showing governance infrastructure in operation. These show mechanisms, not fictional scenarios.",
"audit_trail_title": "Audit Trail & Compliance Evidence Generation",
"audit_trail_desc": "Immutable logging, evidence extraction, regulatory reporting",
"continuous_improvement_title": "Continuous Improvement: Incident → Rule Creation",
"continuous_improvement_desc": "Learning from failures, automated rule generation, validation",
"pluralistic_deliberation_title": "Pluralistic Deliberation: Values Conflict Resolution",
"pluralistic_deliberation_desc": "Multi-stakeholder engagement, non-hierarchical process, moral remainder documentation",
"sample_heading": "Sample Audit Log Structure",
"immutability_label": "Immutability:",
"immutability_text": "Audit logs stored in append-only database. AI cannot modify or delete entries.",
"compliance_label": "Compliance Evidence:",
"compliance_text": "Automatic tagging with regulatory requirements (EU AI Act Article 14, GDPR Article 22, etc.)",
"export_label": "Export Capabilities:",
"export_text": "Generate compliance reports for regulators showing human oversight enforcement",
"footer_text": "When regulator asks How do you prove effective human oversight at scale, this audit trail provides structural evidence independent of AI cooperation.",
"flow_heading": "Incident Learning Flow",
"step_1_desc": "CrossReferenceValidator flags policy violation",
"step_2_desc": "Automated analysis of instruction history, context state",
"step_3_desc": "Proposed governance rule to prevent recurrence",
"step_4_desc": "Governance board reviews and approves new rule",
"step_5_desc": "Rule added to persistent storage, active immediately",
"example_heading": "Example Generated Rule",
"learning_label": "Organisational Learning:",
"learning_text": "When one team encounters governance failure, entire organisation benefits from automatically generated preventive rules. Scales governance knowledge without manual documentation.",
"conflict_label": "Conflict Detection:",
"conflict_text": "AI system identifies competing values in decision context (e.g., efficiency vs. transparency, cost vs. risk mitigation, innovation vs. regulatory compliance). BoundaryEnforcer blocks autonomous decision, escalates to PluralisticDeliberationOrchestrator.",
"stakeholder_heading": "Stakeholder Identification Process",
"stakeholder_1": "Automatic Detection: System identifies which values frameworks are in tension (utilitarian, deontological, virtue ethics, contractarian, etc.)",
"stakeholder_2": "Stakeholder Mapping: Identifies parties with legitimate interest in decision (affected parties, domain experts, governance authorities, community representatives)",
"stakeholder_3": "Human Approval: Governance board reviews stakeholder list, adds/removes as appropriate (TRA-OPS-0002)",
"deliberation_heading": "Non-Hierarchical Deliberation",
"equal_voice_title": "Equal Voice",
"equal_voice_text": "All stakeholders present perspectives without hierarchical weighting. Technical experts do not automatically override community concerns.",
"dissent_title": "Documented Dissent",
"dissent_text": "Minority positions recorded in full. Dissenting stakeholders can document why consensus fails their values framework.",
"moral_title": "Moral Remainder",
"moral_text": "System documents unavoidable value trade-offs. Even correct decision creates documented harm to other legitimate values.",
"precedent_title": "Precedent (Not Binding)",
"precedent_text": "Decision becomes informative precedent for similar conflicts. But context differences mean precedents guide, not dictate.",
"record_heading": "Deliberation Record Structure",
"key_principle": "Key Principle: When legitimate values conflict, no algorithm can determine the \"correct\" answer. Tractatus provides architecture for decisions to be made through inclusive deliberation with full documentation of trade-offs, rather than AI imposing single values framework or decision-maker dismissing stakeholder concerns."
},
"development_status": {
"heading": "Development Status",
"warning_title": "Production-Validated Research Framework",
"warning_text": "Tractatus has been in active development for 11+ months (April 2025 to present) with production deployment at Village Home Trust, sovereign language model governance through Village AI, and over 171,800 audit decisions recorded. Independent validation and red-team testing remain outstanding research needs.",
"validation_title": "Validated vs. Not Validated",
"validated_label": "Validated:",
"validated_text": "Framework successfully governs Claude Code in development workflows. User reports order-of-magnitude improvement in productivity for non-technical operators building production systems.",
"not_validated_label": "Not Validated:",
"not_validated_text": "Performance at enterprise scale, integration complexity with existing systems, effectiveness against adversarial prompts, cross-platform consistency.",
"limitation_label": "Known Limitation:",
"limitation_text": "Framework can be bypassed if AI simply chooses not to use governance tools. Voluntary invocation remains a structural weakness requiring external enforcement mechanisms."
},
"village_ai": {
"heading": "Sovereign AI: Governance Embedded in Locally-Trained Models",
"intro": "Village AI demonstrates what it means to have governance embedded directly in locally-trained language models — not as an external compliance layer, but as part of the model serving architecture itself.",
"architecture_title": "Two-Model Architecture",
"arch_fast": "<strong>Fast model (3B parameters):</strong> Routine queries with governance pre-screening",
"arch_deep": "<strong>Deep model (8B parameters):</strong> Complex reasoning with full governance pipeline",
"arch_local": "<strong>Fully local:</strong> Training data never leaves the infrastructure",
"strategic_title": "Strategic Value",
"strat_sovereignty": "<strong>Data sovereignty:</strong> No cloud dependency for model training or inference",
"strat_governance": "<strong>Governance by design:</strong> Constraints are architectural, not retroactive compliance",
"strat_regulatory": "<strong>Regulatory positioning:</strong> Structurally stronger than bolt-on governance approaches",
"status": "<strong>Current status:</strong> Inference governance operational. Training pipeline installation in progress. First non-Claude deployment surface for Tractatus governance.",
"cta": "Learn about Village AI →"
},
"taonga": {
"heading": "Polycentric Governance for Indigenous Data Sovereignty",
"intro": "For organisations with indigenous stakeholder obligations or multi-jurisdictional operations, Tractatus is developing a polycentric governance architecture where communities maintain architectural co-governance — not just consultation rights, but structural authority over how their data is used.",
"status_label": "Status:",
"status_text": "Draft paper (STO-RES-0010 v0.1) in indigenous peer review. Written without Maori co-authorship — presented transparently as a starting point for collaboration. This approach requires further peer review before implementation.",
"relevance": "<strong>Relevant for:</strong> Organisations operating in Aotearoa New Zealand, Australia, Canada, or other jurisdictions with indigenous data sovereignty obligations. Also applicable to any multi-stakeholder governance context where different parties require different levels of control over shared AI systems.",
"research_link": "Research details →",
"paper_link": "Read the draft paper"
},
"steering_vectors": {
"heading": "Inference-Time Bias Correction (Steering Vectors)",
"summary": "New research (STO-RES-0009, published February 2026) demonstrates techniques for correcting bias at inference time without model retraining. For organisations concerned about bias in deployed AI systems, steering vectors offer the ability to respond to bias concerns without model downtime — corrections are applied as mathematical adjustments during inference, not through expensive retraining cycles.",
"link": "Technical details on the researcher page →"
},
"eu_ai_act": {
"heading": "EU AI Act Considerations",
"article_14_title": "Regulation 2024/1689, Article 14: Human Oversight",
"intro": "The EU AI Act (Regulation 2024/1689) establishes human oversight requirements for high-risk AI systems (Article 14). Organisations must ensure AI systems are effectively overseen by natural persons with authority to interrupt or disregard AI outputs.",
"addresses": "Tractatus addresses this through architectural controls that:",
"bullet_1": "Generate immutable audit trails documenting AI decision-making processes",
"bullet_2": "Enforce human approval requirements for values-based decisions",
"bullet_3": "Provide evidence of oversight mechanisms independent of AI cooperation",
"bullet_4": "Document compliance with transparency and record-keeping obligations",
"disclaimer": "This does not constitute legal compliance advice. Organisations should evaluate whether these architectural patterns align with their specific regulatory obligations in consultation with legal counsel.",
"penalties": "Maximum penalties under EU AI Act: 35 million euros or 7 percent of global annual turnover (whichever is higher) for prohibited AI practices; 15 million euros or 3 percent for other violations."
},
"research_foundations": {
"heading": "Research Foundations",
"org_theory_title": "Organisational Theory & Philosophical Basis",
"intro": "Tractatus draws on 40+ years of organisational theory research: time-based organisation (Bluedorn, Ancona), knowledge orchestration (Crossan), post-bureaucratic authority (Laloux), structural inertia (Hannan Freeman).",
"premise": "Core premise: When knowledge becomes ubiquitous through AI, authority must derive from appropriate time horizon and domain expertise rather than hierarchical position. Governance systems must orchestrate decision-making across strategic, operational, and tactical timescales.",
"view_pdf": "View complete organisational theory foundations (PDF)",
"ai_safety_title": "AI Safety Research: Architectural Safeguards Against LLM Hierarchical Dominance",
"ai_safety_desc": "How Tractatus protects pluralistic values from AI pattern bias while maintaining safety boundaries.",
"pdf_link": "PDF",
"read_online": "Read online"
},
"scope_limitations": {
"heading": "Scope & Limitations",
"title": "What This Is Not • What It Offers",
"not_title": "Tractatus is not:",
"offers_title": "What it offers:",
"not_1": "An AI safety solution for all contexts",
"not_2": "Independently validated or security-audited",
"not_3": "Tested against adversarial attacks",
"not_4": "Validated across multiple organizations",
"not_5": "A substitute for legal compliance review",
"not_6": "A commercial product (research framework, Apache 2.0 licence)",
"offers_1": "Architectural patterns for external governance controls",
"offers_2": "Reference implementation demonstrating feasibility",
"offers_3": "Foundation for organisational pilots and validation studies",
"offers_4": "Evidence that structural approaches to AI safety merit investigation"
},
"target_audience": {
"heading": "Target Audience",
"primary": "Organizations with high-consequence AI deployments facing regulatory obligations: EU AI Act Article 14 (human oversight), GDPR Article 22 (automated decision-making), SOC 2 CC6.1 (logical access controls), sector-specific regulations.",
"disclaimer": "If AI governance failure in your context is low-consequence and easily reversible, architectural enforcement adds complexity without commensurate benefit. Policy-based governance may be more appropriate."
},
"governance_assessment": {
"heading": "Governance Theatre vs. Enforcement",
"intro": "Many organizations have AI governance but lack enforcement. The diagnostic question:",
"question": "\"What structurally prevents your AI from executing values decisions without human approval?\"",
"answer_theatre": "If your answer is \"policies\" or \"training\" or \"review processes\": You have governance theatre (voluntary compliance)",
"answer_enforcement": "If your answer is \"architectural blocking mechanism with audit trail\": You have enforcement (Tractatus is one implementation)",
"consequence": "Theatre may be acceptable if governance failures are low-consequence. Enforcement becomes relevant when failures trigger regulatory exposure, safety incidents, or existential business risk.",
"template_link": "Assessment Framework: Business Case Template (PDF)"
}
},
"footer": {
"assessment_resources": "Assessment Resources",
"intro": "If your regulatory context or risk profile suggests architectural governance may be relevant, these resources support self-evaluation:",
"business_case": "Business Case Template",
"business_case_desc": "Assessment framework for evaluating whether architectural governance addresses your regulatory obligations",
"leadership_questions": "Common Leadership Questions",
"leadership_questions_desc": "Governance theatre vs enforcement, investment justification, risk assessment frameworks",
"technical_docs": null,
"technical_docs_desc": "Organizational theory basis, empirical observations, validation studies",
"research_foundations": "Research Foundations",
"research_foundations_desc": "Organizational theory basis, empirical observations, validation studies",
"evaluation_note": "Evaluation Process: Organizations assessing Tractatus typically follow: (1) Technical review of architectural patterns, (2) Pilot deployment in development environment, (3) Context-specific validation with legal counsel, (4) Decision whether patterns address specific regulatory/risk requirements.",
"contact_note": "Project information and contact details: About page"
},
"share_cta": {
"heading": "Help us reach the right people.",
"description": "If you know researchers, implementers, or leaders who need structural AI governance solutions, share this with them.",
"copy_link": "Copy Link",
"email": "Email",
"linkedin": "LinkedIn"
},
"alexander_leadership": {
"heading": "Why Architectural Governance Matters",
"subtitle": "Built on living systems principles from Christopher Alexander—governance that evolves with your organization",
"differentiator": {
"heading": "Strategic Differentiator: Not Compliance Theatre",
"compliance_theatre": "Compliance theatre relies on documented policies, training programs, and post-execution reviews. AI can bypass controls, enforcement is voluntary, and audit trails show what should happen, not what did happen.",
"architectural_enforcement": "Architectural enforcement (Tractatus) weaves governance into deployment architecture. Services intercept actions before execution in the critical path—bypasses require explicit --no-verify flags and are logged. Audit trails prove real-time enforcement, not aspirational policy."
},
"principles_heading": "Five Principles for Competitive Advantage",
"principles": {
"deep_interlock": {
"title": "Deep Interlock",
"description": "Six governance services coordinate in real-time. When one detects risk, others reinforce—resilient enforcement through mutual validation, not isolated checks.",
"business_value": "Business Value: Single service failure doesn't compromise governance. Redundant enforcement layer."
},
"structure_preserving": {
"title": "Structure-Preserving",
"description": "Framework changes maintain audit continuity. Historical governance decisions remain interpretable—institutional memory preserved across evolution.",
"business_value": "Business Value: Regulatory audit trail remains valid. No \"governance migration\" breaking compliance records."
},
"gradients": {
"title": "Gradients Not Binary",
"description": "Governance operates on intensity levels (NORMAL/ELEVATED/HIGH/CRITICAL)—nuanced response to risk, not mechanical yes/no.",
"business_value": "Business Value: Avoids alert fatigue and over-enforcement. Matches governance intensity to actual risk level."
},
"living_process": {
"title": "Living Process",
"description": "Framework evolves from operational failures, not predetermined plans. Adaptive resilience—learns from real incidents.",
"business_value": "Business Value: Continuous improvement without governance migration. System gets smarter through use."
},
"not_separateness": {
"title": "Not-Separateness",
"description": "Governance woven into deployment architecture, integrated into the critical execution path. Not bolt-on compliance layer—enforcement is structural.",
"business_value": "Business Value: Bypasses require explicit flags and are logged. Enforcement happens before actions execute, not after."
}
},
"regulatory": {
"heading": "Regulatory Positioning",
"intro": "These architectural characteristics position organizations ahead of the \"we have policies\" baseline when demonstrating governance to regulators:",
"evidence_heading": "Audit Evidence Provided by Tractatus:",
"evidence_items": [
"Audit trail showing governance enforcement before actions executed (not aspirational policy documentation)",
"Verifiable service coordination patterns (deep interlock logs proving mutual validation)",
"Continuous structural enforcement (not periodic compliance reviews)",
"Historical governance decision continuity (structure-preserving changes maintain audit validity)"
],
"conclusion": "This positions regulated entities to demonstrate actual enforcement, not just documented intent—a material advantage when regulators increasingly expect proof of operational governance."
},
"architecture_link": "See Technical Architecture →",
"values_link": "Values & Principles →"
}
}