Added i18n support for two accordion sections: - Three-Layer Architecture (arch_layers): 3 layers with titles and descriptions - Six Governance Services (services): 6 services with titles and descriptions Changes: - Added arch_layers and services objects to EN/DE/FR leader.json - Translated all content using DeepL API (component names kept in English) - Added data-i18n attributes to leader.html for both accordions This is phase 1 - remaining 7 accordions (demos, validation, EU AI Act, research, scope) contain extensive content including code samples and will be translated in subsequent phases. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
76 lines
5.2 KiB
JSON
76 lines
5.2 KiB
JSON
{
|
|
"page": {
|
|
"title": "For Decision-Makers | Tractatus AI Safety Framework",
|
|
"description": "Structural AI governance for organisations deploying LLM systems at scale. Research framework addressing architectural gaps in AI safety."
|
|
},
|
|
"header": {
|
|
"badge": "Research Framework • Early Development",
|
|
"title": "Tractatus: Architectural Governance for LLM Systems",
|
|
"subtitle": "A governance framework addressing structural gaps in AI safety through external architectural controls. Designed for organisations deploying large language models at scale where conventional oversight mechanisms prove insufficient."
|
|
},
|
|
"sections": {
|
|
"governance_gap": {
|
|
"heading": "The Governance Gap",
|
|
"intro": "Current AI governance approaches—policy documents, training programmes, ethical guidelines—rely on voluntary compliance. LLM systems can bypass these controls simply by not invoking them. When an AI agent needs to check a policy, it must choose to do so. When it should escalate a decision to human oversight, it must recognise that obligation.",
|
|
"problem": "This creates a structural problem: governance exists only insofar as the AI acknowledges it. For organisations subject to EU AI Act Article 14 (human oversight requirements) or deploying AI in high-stakes domains, this voluntary model is inadequate.",
|
|
"solution": "Tractatus explores whether governance can be made architecturally external—difficult to bypass not through better prompts, but through system design that places control points outside the AI's discretion."
|
|
},
|
|
"architectural_approach": {
|
|
"heading": "Architectural Approach",
|
|
"three_layer_title": "Three-Layer Architecture",
|
|
"services_title": "Six Governance Services",
|
|
"arch_layers": {
|
|
"layer_1_title": "Agent Runtime Layer",
|
|
"layer_1_desc": "Any LLM system (Claude Code, Copilot, custom agents, LangChain, CrewAI). The AI system being governed.",
|
|
"layer_2_title": "Governance Layer",
|
|
"layer_2_desc": "Six autonomous services that intercept, validate, and document AI operations. External to the AI runtime.",
|
|
"layer_3_title": "Persistent Storage Layer",
|
|
"layer_3_desc": "Immutable audit logs, governance rules, instruction history. Cannot be altered by AI prompts."
|
|
},
|
|
"services": {
|
|
"service_1_title": "BoundaryEnforcer",
|
|
"service_1_desc": "Blocks AI from making values decisions without human approval. Enforces decision boundaries through architectural controls.",
|
|
"service_2_title": "InstructionPersistenceClassifier",
|
|
"service_2_desc": "Prevents pattern bias from overriding explicit instructions. Stores organisational directives external to AI context.",
|
|
"service_3_title": "CrossReferenceValidator",
|
|
"service_3_desc": "Validates AI actions against stored policies before execution. Detects conflicts with established rules.",
|
|
"service_4_title": "ContextPressureMonitor",
|
|
"service_4_desc": "Tracks session complexity, token usage, conversation length. Detects degradation in decision quality.",
|
|
"service_5_title": "MetacognitiveVerifier",
|
|
"service_5_desc": "Validates reasoning quality before complex operations. Self-checks alignment, coherence, alternatives.",
|
|
"service_6_title": "PluralisticDeliberationOrchestrator",
|
|
"service_6_desc": "Facilitates multi-stakeholder deliberation for values conflicts. Non-hierarchical engagement with documented dissent."
|
|
}
|
|
},
|
|
"governance_capabilities": {
|
|
"heading": "Governance Capabilities",
|
|
"intro": "Three interactive demonstrations showing governance infrastructure in operation. These show mechanisms, not fictional scenarios.",
|
|
"audit_trail_title": "Audit Trail & Compliance Evidence Generation",
|
|
"audit_trail_desc": "Immutable logging, evidence extraction, regulatory reporting",
|
|
"continuous_improvement_title": "Continuous Improvement: Incident → Rule Creation",
|
|
"continuous_improvement_desc": "Learning from failures, automated rule generation, validation",
|
|
"pluralistic_deliberation_title": "Pluralistic Deliberation: Values Conflict Resolution",
|
|
"pluralistic_deliberation_desc": "Multi-stakeholder engagement, non-hierarchical process, moral remainder documentation"
|
|
},
|
|
"development_status": {
|
|
"heading": "Development Status",
|
|
"warning_title": "Early-Stage Research Framework",
|
|
"warning_text": "Tractatus is a proof-of-concept developed over six months in a single project context (this website). It demonstrates architectural patterns for AI governance but has not undergone independent validation, red-team testing, or multi-organisation deployment.",
|
|
"validation_title": "Validated vs. Not Validated"
|
|
},
|
|
"eu_ai_act": {
|
|
"heading": "EU AI Act Considerations",
|
|
"article_14_title": "Regulation 2024/1689, Article 14: Human Oversight"
|
|
},
|
|
"research_foundations": {
|
|
"heading": "Research Foundations",
|
|
"org_theory_title": "Organisational Theory & Philosophical Basis"
|
|
},
|
|
"scope_limitations": {
|
|
"heading": "Scope & Limitations",
|
|
"title": "What This Is Not • What It Offers",
|
|
"not_title": "Tractatus is not:",
|
|
"offers_title": "What it offers:"
|
|
}
|
|
}
|
|
}
|