Added translations for 7 remaining accordion sections in leader.html: - Demo: Audit Logging (8 keys) - Demo: Incident Learning (8 keys) - Demo: Pluralistic Deliberation (15 keys) - Validated vs Not Validated (6 keys) - EU AI Act Considerations (8 keys) - Research Foundations (7 keys) - Scope & Limitations (12 keys) All JSON code blocks and technical identifiers remain in English. Only human-readable descriptive content is translated. Total: ~64 new translation keys added to EN/DE/FR leader.json files. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
141 lines
12 KiB
JSON
141 lines
12 KiB
JSON
{
|
|
"page": {
|
|
"title": "For Decision-Makers | Tractatus AI Safety Framework",
|
|
"description": "Structural AI governance for organisations deploying LLM systems at scale. Research framework addressing architectural gaps in AI safety."
|
|
},
|
|
"header": {
|
|
"badge": "Research Framework • Early Development",
|
|
"title": "Tractatus: Architectural Governance for LLM Systems",
|
|
"subtitle": "A governance framework addressing structural gaps in AI safety through external architectural controls. Designed for organisations deploying large language models at scale where conventional oversight mechanisms prove insufficient."
|
|
},
|
|
"sections": {
|
|
"governance_gap": {
|
|
"heading": "The Governance Gap",
|
|
"intro": "Current AI governance approaches—policy documents, training programmes, ethical guidelines—rely on voluntary compliance. LLM systems can bypass these controls simply by not invoking them. When an AI agent needs to check a policy, it must choose to do so. When it should escalate a decision to human oversight, it must recognise that obligation.",
|
|
"problem": "This creates a structural problem: governance exists only insofar as the AI acknowledges it. For organisations subject to EU AI Act Article 14 (human oversight requirements) or deploying AI in high-stakes domains, this voluntary model is inadequate.",
|
|
"solution": "Tractatus explores whether governance can be made architecturally external—difficult to bypass not through better prompts, but through system design that places control points outside the AI's discretion."
|
|
},
|
|
"architectural_approach": {
|
|
"heading": "Architectural Approach",
|
|
"three_layer_title": "Three-Layer Architecture",
|
|
"services_title": "Six Governance Services",
|
|
"arch_layers": {
|
|
"layer_1_title": "Agent Runtime Layer",
|
|
"layer_1_desc": "Any LLM system (Claude Code, Copilot, custom agents, LangChain, CrewAI). The AI system being governed.",
|
|
"layer_2_title": "Governance Layer",
|
|
"layer_2_desc": "Six autonomous services that intercept, validate, and document AI operations. External to the AI runtime.",
|
|
"layer_3_title": "Persistent Storage Layer",
|
|
"layer_3_desc": "Immutable audit logs, governance rules, instruction history. Cannot be altered by AI prompts."
|
|
},
|
|
"services": {
|
|
"service_1_title": "BoundaryEnforcer",
|
|
"service_1_desc": "Blocks AI from making values decisions without human approval. Enforces decision boundaries through architectural controls.",
|
|
"service_2_title": "InstructionPersistenceClassifier",
|
|
"service_2_desc": "Prevents pattern bias from overriding explicit instructions. Stores organisational directives external to AI context.",
|
|
"service_3_title": "CrossReferenceValidator",
|
|
"service_3_desc": "Validates AI actions against stored policies before execution. Detects conflicts with established rules.",
|
|
"service_4_title": "ContextPressureMonitor",
|
|
"service_4_desc": "Tracks session complexity, token usage, conversation length. Detects degradation in decision quality.",
|
|
"service_5_title": "MetacognitiveVerifier",
|
|
"service_5_desc": "Validates reasoning quality before complex operations. Self-checks alignment, coherence, alternatives.",
|
|
"service_6_title": "PluralisticDeliberationOrchestrator",
|
|
"service_6_desc": "Facilitates multi-stakeholder deliberation for values conflicts. Non-hierarchical engagement with documented dissent."
|
|
}
|
|
},
|
|
"governance_capabilities": {
|
|
"heading": "Governance Capabilities",
|
|
"intro": "Three interactive demonstrations showing governance infrastructure in operation. These show mechanisms, not fictional scenarios.",
|
|
"audit_trail_title": "Audit Trail & Compliance Evidence Generation",
|
|
"audit_trail_desc": "Immutable logging, evidence extraction, regulatory reporting",
|
|
"continuous_improvement_title": "Continuous Improvement: Incident → Rule Creation",
|
|
"continuous_improvement_desc": "Learning from failures, automated rule generation, validation",
|
|
"pluralistic_deliberation_title": "Pluralistic Deliberation: Values Conflict Resolution",
|
|
"pluralistic_deliberation_desc": "Multi-stakeholder engagement, non-hierarchical process, moral remainder documentation",
|
|
"sample_heading": "Sample Audit Log Structure",
|
|
"immutability_label": "Immutability:",
|
|
"immutability_text": "Audit logs stored in append-only database. AI cannot modify or delete entries.",
|
|
"compliance_label": "Compliance Evidence:",
|
|
"compliance_text": "Automatic tagging with regulatory requirements (EU AI Act Article 14, GDPR Article 22, etc.)",
|
|
"export_label": "Export Capabilities:",
|
|
"export_text": "Generate compliance reports for regulators showing human oversight enforcement",
|
|
"footer_text": "When regulator asks How do you prove effective human oversight at scale, this audit trail provides structural evidence independent of AI cooperation.",
|
|
"flow_heading": "Incident Learning Flow",
|
|
"step_1_desc": "CrossReferenceValidator flags policy violation",
|
|
"step_2_desc": "Automated analysis of instruction history, context state",
|
|
"step_3_desc": "Proposed governance rule to prevent recurrence",
|
|
"step_4_desc": "Governance board reviews and approves new rule",
|
|
"step_5_desc": "Rule added to persistent storage, active immediately",
|
|
"example_heading": "Example Generated Rule",
|
|
"learning_label": "Organisational Learning:",
|
|
"learning_text": "When one team encounters governance failure, entire organisation benefits from automatically generated preventive rules. Scales governance knowledge without manual documentation.",
|
|
"conflict_label": "Conflict Detection:",
|
|
"conflict_text": "AI system identifies competing values in decision context (e.g., efficiency vs. transparency, cost vs. risk mitigation, innovation vs. regulatory compliance). BoundaryEnforcer blocks autonomous decision, escalates to PluralisticDeliberationOrchestrator.",
|
|
"stakeholder_heading": "Stakeholder Identification Process",
|
|
"stakeholder_1": "Automatic Detection: System identifies which values frameworks are in tension (utilitarian, deontological, virtue ethics, contractarian, etc.)",
|
|
"stakeholder_2": "Stakeholder Mapping: Identifies parties with legitimate interest in decision (affected parties, domain experts, governance authorities, community representatives)",
|
|
"stakeholder_3": "Human Approval: Governance board reviews stakeholder list, adds/removes as appropriate (TRA-OPS-0002)",
|
|
"deliberation_heading": "Non-Hierarchical Deliberation",
|
|
"equal_voice_title": "Equal Voice",
|
|
"equal_voice_text": "All stakeholders present perspectives without hierarchical weighting. Technical experts do not automatically override community concerns.",
|
|
"dissent_title": "Documented Dissent",
|
|
"dissent_text": "Minority positions recorded in full. Dissenting stakeholders can document why consensus fails their values framework.",
|
|
"moral_title": "Moral Remainder",
|
|
"moral_text": "System documents unavoidable value trade-offs. Even correct decision creates documented harm to other legitimate values.",
|
|
"precedent_title": "Precedent (Not Binding)",
|
|
"precedent_text": "Decision becomes informative precedent for similar conflicts. But context differences mean precedents guide, not dictate.",
|
|
"record_heading": "Deliberation Record Structure",
|
|
"key_principle": "Key Principle: When legitimate values conflict, no algorithm can determine the correct answer. Tractatus ensures decisions are made through inclusive deliberation with full documentation of trade-offs, rather than AI imposing single values framework or decision-maker dismissing stakeholder concerns."
|
|
},
|
|
"development_status": {
|
|
"heading": "Development Status",
|
|
"warning_title": "Early-Stage Research Framework",
|
|
"warning_text": "Tractatus is a proof-of-concept developed over six months in a single project context (this website). It demonstrates architectural patterns for AI governance but has not undergone independent validation, red-team testing, or multi-organisation deployment.",
|
|
"validation_title": "Validated vs. Not Validated",
|
|
"validated_label": "Validated:",
|
|
"validated_text": "Framework successfully governs Claude Code in development workflows. User reports order-of-magnitude improvement in productivity for non-technical operators building production systems.",
|
|
"not_validated_label": "Not Validated:",
|
|
"not_validated_text": "Performance at enterprise scale, integration complexity with existing systems, effectiveness against adversarial prompts, cross-platform consistency.",
|
|
"limitation_label": "Known Limitation:",
|
|
"limitation_text": "Framework can be bypassed if AI simply chooses not to use governance tools. Voluntary invocation remains a structural weakness requiring external enforcement mechanisms."
|
|
},
|
|
"eu_ai_act": {
|
|
"heading": "EU AI Act Considerations",
|
|
"article_14_title": "Regulation 2024/1689, Article 14: Human Oversight",
|
|
"intro": "The EU AI Act (Regulation 2024/1689) establishes human oversight requirements for high-risk AI systems (Article 14). Organisations must ensure AI systems are effectively overseen by natural persons with authority to interrupt or disregard AI outputs.",
|
|
"addresses": "Tractatus addresses this through architectural controls that:",
|
|
"bullet_1": "Generate immutable audit trails documenting AI decision-making processes",
|
|
"bullet_2": "Enforce human approval requirements for values-based decisions",
|
|
"bullet_3": "Provide evidence of oversight mechanisms independent of AI cooperation",
|
|
"bullet_4": "Document compliance with transparency and record-keeping obligations",
|
|
"disclaimer": "This does not constitute legal compliance advice. Organisations should evaluate whether these architectural patterns align with their specific regulatory obligations in consultation with legal counsel.",
|
|
"penalties": "Maximum penalties under EU AI Act: 35 million euros or 7 percent of global annual turnover (whichever is higher) for prohibited AI practices; 15 million euros or 3 percent for other violations."
|
|
},
|
|
"research_foundations": {
|
|
"heading": "Research Foundations",
|
|
"org_theory_title": "Organisational Theory & Philosophical Basis",
|
|
"intro": "Tractatus draws on 40+ years of organisational theory research: time-based organisation (Bluedorn, Ancona), knowledge orchestration (Crossan), post-bureaucratic authority (Laloux), structural inertia (Hannan Freeman).",
|
|
"premise": "Core premise: When knowledge becomes ubiquitous through AI, authority must derive from appropriate time horizon and domain expertise rather than hierarchical position. Governance systems must orchestrate decision-making across strategic, operational, and tactical timescales.",
|
|
"view_pdf": "View complete organisational theory foundations (PDF)",
|
|
"ai_safety_title": "AI Safety Research: Architectural Safeguards Against LLM Hierarchical Dominance",
|
|
"ai_safety_desc": "How Tractatus protects pluralistic values from AI pattern bias while maintaining safety boundaries.",
|
|
"pdf_link": "PDF",
|
|
"read_online": "Read online"
|
|
},
|
|
"scope_limitations": {
|
|
"heading": "Scope & Limitations",
|
|
"title": "What This Is Not • What It Offers",
|
|
"not_title": "Tractatus is not:",
|
|
"offers_title": "What it offers:",
|
|
"not_1": "A comprehensive AI safety solution",
|
|
"not_2": "Independently validated or security-audited",
|
|
"not_3": "Tested against adversarial attacks",
|
|
"not_4": "Proven effective across multiple organisations",
|
|
"not_5": "A substitute for legal compliance review",
|
|
"not_6": "A commercial product (research framework, Apache 2.0 licence)",
|
|
"offers_1": "Architectural patterns for external governance controls",
|
|
"offers_2": "Reference implementation demonstrating feasibility",
|
|
"offers_3": "Foundation for organisational pilots and validation studies",
|
|
"offers_4": "Evidence that structural approaches to AI safety merit investigation"
|
|
}
|
|
}
|
|
}
|