{ "page": { "title": "For Decision-Makers | Tractatus AI Safety Framework", "description": "Structural AI governance for organisations deploying LLM systems at scale. Research framework addressing architectural gaps in AI safety." }, "header": { "badge": "Research Framework • Early Development", "title": "Tractatus: Architectural Governance for LLM Systems", "subtitle": "A governance framework addressing structural gaps in AI safety through external architectural controls. Designed for organisations deploying large language models at scale where conventional oversight mechanisms prove insufficient." }, "sections": { "governance_gap": { "heading": "The Governance Gap", "intro": "Current AI governance approaches—policy documents, training programmes, ethical guidelines—rely on voluntary compliance. LLM systems can bypass these controls simply by not invoking them. When an AI agent needs to check a policy, it must choose to do so. When it should escalate a decision to human oversight, it must recognise that obligation.", "problem": "This creates a structural problem: governance exists only insofar as the AI acknowledges it. For organisations subject to EU AI Act Article 14 (human oversight requirements) or deploying AI in high-stakes domains, this voluntary model is inadequate.", "solution": "Tractatus explores whether governance can be made architecturally external—difficult to bypass not through better prompts, but through system design that places control points outside the AI's discretion." }, "architectural_approach": { "heading": "Architectural Approach", "three_layer_title": "Three-Layer Architecture", "services_title": "Six Governance Services" }, "governance_capabilities": { "heading": "Governance Capabilities", "intro": "Three interactive demonstrations showing governance infrastructure in operation. These show mechanisms, not fictional scenarios.", "audit_trail_title": "Audit Trail & Compliance Evidence Generation", "audit_trail_desc": "Immutable logging, evidence extraction, regulatory reporting", "continuous_improvement_title": "Continuous Improvement: Incident → Rule Creation", "continuous_improvement_desc": "Learning from failures, automated rule generation, validation", "pluralistic_deliberation_title": "Pluralistic Deliberation: Values Conflict Resolution", "pluralistic_deliberation_desc": "Multi-stakeholder engagement, non-hierarchical process, moral remainder documentation" }, "development_status": { "heading": "Development Status", "warning_title": "Early-Stage Research Framework", "warning_text": "Tractatus is a proof-of-concept developed over six months in a single project context (this website). It demonstrates architectural patterns for AI governance but has not undergone independent validation, red-team testing, or multi-organisation deployment.", "validation_title": "Validated vs. Not Validated" }, "eu_ai_act": { "heading": "EU AI Act Considerations", "article_14_title": "Regulation 2024/1689, Article 14: Human Oversight" }, "research_foundations": { "heading": "Research Foundations", "org_theory_title": "Organisational Theory & Philosophical Basis" }, "scope_limitations": { "heading": "Scope & Limitations", "title": "What This Is Not • What It Offers", "not_title": "Tractatus is not:", "offers_title": "What it offers:" } } }