VIOLATION: Using absolute assurance language violates inst_017 - README.md: "architectural AI safety guarantees" → "enforcement" - README.md: "guarantees transparency" → "provides transparency" - public/index.html meta: "guarantees" → "enforcement" - public/about.html CTA: "architectural guarantees" → "constraints" - public/js/components/footer.js: "guarantees" → "enforcement" - public/js/faq.js (5 instances): "guarantees" → "enforcement/constraints" - public/locales/en/*.json (3 files): "guarantees" → "enforcement/constraints" - scripts/seed-first-blog-post.js: "safety guarantees" → "safety constraints" RESULT: All user-facing "guarantee" language removed - Production website now compliant with inst_017 - No absolute assurance claims in public content - Framework documentation still pending (hook blocked markdown edits) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
3352 lines
127 KiB
JavaScript
3352 lines
127 KiB
JavaScript
/**
|
|
* FAQ Page - Interactive search, filtering, and expandable Q&A
|
|
* Tractatus AI Safety Framework
|
|
*/
|
|
|
|
const FAQ_DATA = [
|
|
// IMPLEMENTER QUESTIONS
|
|
{
|
|
id: 19,
|
|
question: "Why not just use better prompts or a CLAUDE.md file?",
|
|
answer: `Better prompts and CLAUDE.md files are valuable but insufficient for production AI safety. Here's why Tractatus is necessary:
|
|
|
|
**CLAUDE.md limitations:**
|
|
- **No enforcement**: Static documentation can be ignored under context pressure
|
|
- **No persistence**: Instructions may be lost during conversation compaction (200k token limit)
|
|
- **No audit trail**: No record of governance enforcement
|
|
- **No detection**: Can't catch pattern bias or instruction fade
|
|
|
|
**Tractatus adds:**
|
|
- **Automated enforcement**: BoundaryEnforcer blocks values decisions before execution
|
|
- **Persistent storage**: Instructions classified and stored in .claude/instruction-history.json
|
|
- **Conflict detection**: CrossReferenceValidator prevents pattern bias (like the 27027 incident)
|
|
- **Real-time monitoring**: ContextPressureMonitor warns before degradation occurs
|
|
|
|
**Validation context:**
|
|
Framework validated in 6-month, single-project deployment (~500 sessions with Claude Code). Pattern bias incidents prevented, values decisions consistently escalated to human approval, instructions maintained across session continuations.
|
|
|
|
Operational metrics from controlled studies not yet available. This is early-stage research, not production-scale validation.
|
|
|
|
Prompts guide behaviour. Tractatus enforces it architecturally.`,
|
|
audience: ['researcher', 'implementer'],
|
|
keywords: ['prompts', 'claude.md', 'enforcement', 'limitations', 'architecture']
|
|
},
|
|
{
|
|
id: 12,
|
|
question: "What's the performance overhead cost?",
|
|
answer: `Tractatus adds minimal overhead for comprehensive governance:
|
|
|
|
**Estimated overhead: <10ms per operation** based on service architecture
|
|
|
|
**Service-specific estimates:**
|
|
- BoundaryEnforcer: <5ms per check (rule lookup + validation)
|
|
- InstructionPersistenceClassifier: <10ms (classification + storage)
|
|
- CrossReferenceValidator: <15ms (query + validation)
|
|
- ContextPressureMonitor: <5ms (calculation)
|
|
- MetacognitiveVerifier: 50-200ms (selective, complex operations only)
|
|
|
|
**Design trade-off:**
|
|
Governance services operate synchronously to ensure enforcement cannot be bypassed. This adds latency but provides architectural safety enforcement that asynchronous approaches cannot.
|
|
|
|
**Development context:**
|
|
Framework validated in 6-month, single-project deployment. No systematic performance benchmarking conducted. Overhead estimates based on service architecture, not controlled studies.
|
|
|
|
For production deployments where safety matters, minor latency is acceptable trade-off compared to risk of ungoverned AI decisions. Organisations should benchmark in their specific context.`,
|
|
audience: ['implementer', 'leader'],
|
|
keywords: ['performance', 'overhead', 'latency', 'cost', 'benchmarks', 'speed']
|
|
},
|
|
// RESEARCHER QUESTIONS
|
|
{
|
|
id: 27,
|
|
question: "Does Tractatus support multiple LLMs beyond Claude Code?",
|
|
answer: `Currently, Tractatus is optimized for Claude Code with plans for multi-model support:
|
|
|
|
**Current implementation:**
|
|
- **Primary target**: Claude Code (Anthropic Sonnet 4.5)
|
|
- **Architecture**: Designed for 200k token context window
|
|
- **Integration**: Uses Bash, Read, Write, Edit tools native to Claude Code
|
|
|
|
**Why Claude Code first:**
|
|
- Tool access for file system operations (.claude/ directory)
|
|
- Session continuity across compactions
|
|
- Native JSON parsing for governance rules
|
|
- Strong reasoning capabilities for metacognitive verification
|
|
|
|
**Feasibility for other LLMs:**
|
|
✅ **Conceptually portable**: Governance principles (boundary enforcement, instruction persistence, pressure monitoring) apply to any LLM
|
|
|
|
⚠️ **Implementation challenges:**
|
|
- Different context window sizes (GPT-4: 128k, Gemini: 1M)
|
|
- Tool access varies (function calling vs direct tools)
|
|
- Session management differs across platforms
|
|
- Classification accuracy depends on reasoning capability
|
|
|
|
**Research in progress:**
|
|
See our feasibility study: [Research Scope: Feasibility of LLM-Integrated Tractatus Framework](/downloads/research-scope-feasibility-of-llm-integrated-tractatus-framework.pdf)
|
|
|
|
**Roadmap for multi-model support:**
|
|
- Phase 1 (current): Claude Code production deployment
|
|
- Phase 2 (2026): OpenAI API integration
|
|
- Phase 3 (2026-2027): Gemini, local models (Llama 3)
|
|
|
|
**If you need multi-model now**: Contact us to discuss custom implementation at research@agenticgovernance.digital`,
|
|
audience: ['researcher', 'implementer'],
|
|
keywords: ['multi-model', 'gpt-4', 'gemini', 'llama', 'openai', 'support', 'compatibility']
|
|
},
|
|
{
|
|
id: 13,
|
|
question: "How does Tractatus relate to Constitutional AI?",
|
|
answer: `Tractatus complements Constitutional AI with architectural enforcement:
|
|
|
|
**Constitutional AI (Anthropic):**
|
|
- **Approach**: Train models with constitutional principles during RLHF
|
|
- **Layer**: Model weights and training data
|
|
- **Enforcement**: Behavioral tendency, not architectural guarantee
|
|
- **Strengths**: Deeply embedded values, broad coverage
|
|
|
|
**Tractatus Framework:**
|
|
- **Approach**: Runtime governance layer on top of trained models
|
|
- **Layer**: Application architecture and session management
|
|
- **Enforcement**: Architectural blocking before action execution
|
|
- **Strengths**: Explicit enforcement, auditable, customizable per deployment
|
|
|
|
**They work together:**
|
|
|
|
\`\`\`
|
|
User instruction: "Change privacy policy to enable tracking"
|
|
↓
|
|
Constitutional AI (model level):
|
|
Trained to be cautious about privacy
|
|
May refuse autonomously
|
|
↓
|
|
Tractatus BoundaryEnforcer (architecture level):
|
|
Detects values decision (privacy)
|
|
BLOCKS action before execution
|
|
Escalates to human approval
|
|
Logs to audit trail
|
|
\`\`\`
|
|
|
|
**Why both matter:**
|
|
- **Constitutional AI**: Prevents model from generating harmful content
|
|
- **Tractatus**: Prevents deployed system from executing harmful actions
|
|
|
|
**Analogy:**
|
|
- Constitutional AI = Training a security guard to recognize threats
|
|
- Tractatus = Installing locks, alarms, and access control systems
|
|
|
|
**Key difference:**
|
|
- Constitutional AI is opaque (can't explain why it refused)
|
|
- Tractatus is transparent (logs show which rule blocked which action)
|
|
|
|
**For production systems**: Use both. Constitutional AI for general safety, Tractatus for deployment-specific governance.`,
|
|
audience: ['researcher', 'leader'],
|
|
keywords: ['constitutional ai', 'anthropic', 'training', 'rlhf', 'comparison', 'relationship']
|
|
},
|
|
{
|
|
id: 20,
|
|
question: "What are the false positive rates for governance enforcement?",
|
|
answer: `Tractatus aims for high precision, but formal false positive analysis not yet conducted:
|
|
|
|
**Design philosophy:**
|
|
Framework optimises for zero false negatives (never miss safety violations) at cost of occasional false positives (block safe actions). For production AI, missing critical failure far worse than occasionally asking for human confirmation.
|
|
|
|
**Expected false positive sources:**
|
|
|
|
**BoundaryEnforcer:**
|
|
Domain boundaries can be ambiguous (e.g., "improve security" vs. "change authentication policy"). When uncertainty exists, framework blocks and escalates to human judgment.
|
|
|
|
**ContextPressureMonitor:**
|
|
Conservative thresholds warn early to prevent failures. This may produce warnings before degradation occurs (false alarms preferred over missed degradation).
|
|
|
|
**InstructionPersistenceClassifier:**
|
|
Classification accuracy depends on instruction clarity. Ambiguous instructions may be misclassified.
|
|
|
|
**CrossReferenceValidator:**
|
|
Conflict detection depends on stored instruction precision. Vague instructions reduce validation accuracy.
|
|
|
|
**Tuning options:**
|
|
- Governance rules customisable in MongoDB \`governance_rules\` collection
|
|
- Adjust \`violation_action\` from BLOCK to WARN for lower-risk decisions
|
|
- Fine-tune pressure thresholds in \`.claude/session-state.json\`
|
|
|
|
**Development context:**
|
|
Framework validated in 6-month, single-project deployment. Systematic false positive analysis not conducted. Organisations should evaluate in their specific context.`,
|
|
audience: ['researcher', 'implementer'],
|
|
keywords: ['false positive', 'accuracy', 'precision', 'metrics', 'reliability', 'errors']
|
|
},
|
|
{
|
|
id: 10,
|
|
question: "How do I update governance rules without code changes?",
|
|
answer: `Governance rules are stored in MongoDB for runtime updates without redeployment:
|
|
|
|
**Rule storage:**
|
|
- **Collection**: \`governance_rules\` (MongoDB)
|
|
- **Format**: JSON documents with rule_id, quadrant, persistence, enforcement
|
|
- **Live updates**: Changes take effect immediately (no restart required)
|
|
|
|
**Rule schema:**
|
|
\`\`\`json
|
|
{
|
|
"rule_id": "STR-001",
|
|
"quadrant": "STRATEGIC",
|
|
"persistence": "HIGH",
|
|
"title": "Human Approval for Values Decisions",
|
|
"content": "All decisions involving privacy, ethics...",
|
|
"enforced_by": "BoundaryEnforcer",
|
|
"violation_action": "BLOCK_AND_ESCALATE",
|
|
"examples": ["Privacy policy changes", "Ethical trade-offs"],
|
|
"rationale": "Values decisions cannot be systematized",
|
|
"active": true
|
|
}
|
|
\`\`\`
|
|
|
|
**Three ways to update:**
|
|
|
|
**1. Admin Dashboard (recommended):**
|
|
- Navigate to \`/admin/rules\` (requires authentication)
|
|
- Edit rules via web interface
|
|
- Preview enforcement impact before saving
|
|
- Changes applied instantly
|
|
|
|
**2. MongoDB directly:**
|
|
\`\`\`bash
|
|
mongosh tractatus_dev
|
|
db.governance_rules.updateOne(
|
|
{ rule_id: "STR-001" },
|
|
{ $set: { violation_action: "WARN" } }
|
|
)
|
|
\`\`\`
|
|
|
|
**3. Load from JSON file:**
|
|
\`\`\`bash
|
|
node scripts/load-governance-rules.js --file custom-rules.json
|
|
\`\`\`
|
|
|
|
**Best practices:**
|
|
- **Test in development**: Use \`tractatus_dev\` database before production
|
|
- **Version control**: Keep JSON copies in git for rule history
|
|
- **Gradual rollout**: Change \`violation_action\` from BLOCK → WARN → LOG to test impact
|
|
- **Monitor audit logs**: Verify rules work as expected via \`audit_logs\` collection
|
|
|
|
**No code changes required.** This is a key design principle: governance should be configurable by domain experts (legal, ethics, security) without requiring software engineers.
|
|
|
|
See [Implementation Guide](/downloads/implementation-guide.pdf) Section 4: "Configuring Governance Rules"`,
|
|
audience: ['implementer', 'leader'],
|
|
keywords: ['rules', 'configuration', 'update', 'mongodb', 'admin', 'governance', 'customize']
|
|
},
|
|
{
|
|
id: 11,
|
|
question: "What's the learning curve for developers implementing Tractatus?",
|
|
answer: `Tractatus is designed for gradual adoption with multiple entry points:
|
|
|
|
**Deployment quickstart: 30 minutes**
|
|
- Download: [tractatus-quickstart.tar.gz](/downloads/tractatus-quickstart.tar.gz)
|
|
- Run: \`docker-compose up -d\`
|
|
- Verify: \`./verify-deployment.sh\`
|
|
- Result: Functioning system with sample governance rules
|
|
|
|
**Basic understanding: 2-4 hours**
|
|
- Read: [Introduction](/downloads/introduction-to-the-tractatus-framework.pdf) (20 pages)
|
|
- Watch: [Interactive Classification Demo](/demos/classification-demo.html)
|
|
- Explore: [27027 Incident Visualizer](/demos/27027-demo.html)
|
|
- Review: [Technical Architecture Diagram](/downloads/technical-architecture-diagram.pdf)
|
|
|
|
**Production integration: 1-2 days**
|
|
- Configure MongoDB connection
|
|
- Load initial governance rules (10 samples provided)
|
|
- Enable 6 services via environment variables
|
|
- Test with session-init.js script
|
|
- Monitor audit logs for enforcement
|
|
|
|
**Advanced customization: 1 week**
|
|
- Define custom governance rules for your domain
|
|
- Tune pressure thresholds for your use case
|
|
- Integrate with existing authentication/audit systems
|
|
- Set up admin dashboard for rule management
|
|
|
|
**Prerequisites:**
|
|
✅ **Minimal**: Docker, MongoDB basics, JSON
|
|
⚠️ **Helpful**: Node.js, Express, Claude Code familiarity
|
|
❌ **Not required**: AI/ML expertise, advanced DevOps
|
|
|
|
**Common challenges:**
|
|
1. **Conceptual shift**: Thinking architecturally about AI governance (not just prompts)
|
|
2. **Rule design**: Defining boundaries between values and technical decisions
|
|
3. **Pressure monitoring**: Understanding when to trigger handoffs
|
|
|
|
**Support resources:**
|
|
- [Implementation Guide](/downloads/implementation-guide.pdf) - Step-by-step
|
|
- [Troubleshooting Guide](/downloads/tractatus-quickstart.tar.gz) - Common issues
|
|
- [GitHub Discussions](https://github.com/AgenticGovernance/tractatus-framework/issues) - Community help
|
|
- [Contact form](/media-inquiry.html) - Direct support
|
|
|
|
**Expected deployment timeline:**
|
|
Teams with Node.js and MongoDB experience typically complete deployment in 1-2 days. Conceptual understanding takes 2-4 hours. Advanced customisation requires additional week.
|
|
|
|
If you can deploy a Node.js application with MongoDB, you have the technical prerequisites for Tractatus deployment.`,
|
|
audience: ['implementer', 'leader'],
|
|
keywords: ['learning', 'difficulty', 'curve', 'time', 'prerequisites', 'skills', 'training']
|
|
},
|
|
{
|
|
id: 21,
|
|
question: "How do I version control governance rules?",
|
|
answer: `Governance rules support version control through JSON exports and git integration:
|
|
|
|
**Recommended workflow:**
|
|
|
|
**1. Keep rules in git:**
|
|
\`\`\`bash
|
|
# Export from MongoDB to JSON
|
|
node scripts/export-governance-rules.js > config/governance-rules-v1.0.json
|
|
|
|
# Commit to version control
|
|
git add config/governance-rules-v1.0.json
|
|
git commit -m "governance: add privacy boundary rules for GDPR compliance"
|
|
git push
|
|
\`\`\`
|
|
|
|
**2. Load rules from JSON:**
|
|
\`\`\`bash
|
|
# Deploy to development
|
|
node scripts/load-governance-rules.js --file config/governance-rules-v1.0.json --db tractatus_dev
|
|
|
|
# Test enforcement
|
|
npm run test:integration
|
|
|
|
# Deploy to production
|
|
node scripts/load-governance-rules.js --file config/governance-rules-v1.0.json --db tractatus_prod
|
|
\`\`\`
|
|
|
|
**3. Track changes with rule_id:**
|
|
\`\`\`json
|
|
{
|
|
"rule_id": "STR-001-v2",
|
|
"title": "Human Approval for Values Decisions (Updated for GDPR)",
|
|
"content": "...",
|
|
"supersedes": "STR-001-v1",
|
|
"updated_at": "2025-10-12T00:00:00.000Z"
|
|
}
|
|
\`\`\`
|
|
|
|
**Audit trail integration:**
|
|
- MongoDB \`audit_logs\` collection records which rule version blocked which action
|
|
- Query logs to validate rule effectiveness before promoting to production
|
|
|
|
**Environment-specific rules:**
|
|
\`\`\`bash
|
|
# Development: Lenient rules (WARN instead of BLOCK)
|
|
node scripts/load-governance-rules.js --file rules/dev-rules.json --db tractatus_dev
|
|
|
|
# Staging: Production rules with verbose logging
|
|
node scripts/load-governance-rules.js --file rules/staging-rules.json --db tractatus_staging
|
|
|
|
# Production: Strict enforcement
|
|
node scripts/load-governance-rules.js --file rules/prod-rules.json --db tractatus_prod
|
|
\`\`\`
|
|
|
|
**Change management process:**
|
|
1. **Propose**: Edit JSON in feature branch
|
|
2. **Review**: Domain experts review rule changes (legal, ethics, security)
|
|
3. **Test**: Deploy to dev/staging, monitor audit logs
|
|
4. **Deploy**: Load to production MongoDB
|
|
5. **Validate**: Confirm enforcement via audit logs
|
|
6. **Rollback**: Keep previous JSON version for quick revert
|
|
|
|
**Best practices:**
|
|
- Use semantic versioning for rule sets (v1.0, v1.1, v2.0)
|
|
- Tag releases in git with rule set version
|
|
- Include rationale in commit messages
|
|
- Run integration tests before production deployment
|
|
|
|
**Example repository structure:**
|
|
\`\`\`
|
|
tractatus/
|
|
config/
|
|
governance-rules-v1.0.json # Initial rule set
|
|
governance-rules-v1.1.json # Added GDPR boundaries
|
|
governance-rules-v2.0.json # Restructured quadrants
|
|
scripts/
|
|
export-governance-rules.js
|
|
load-governance-rules.js
|
|
.github/
|
|
workflows/
|
|
test-rules.yml # CI/CD for rule validation
|
|
\`\`\`
|
|
|
|
This approach treats governance rules as infrastructure-as-code.`,
|
|
audience: ['implementer'],
|
|
keywords: ['version control', 'git', 'deployment', 'rules', 'configuration', 'management']
|
|
},
|
|
{
|
|
id: 7,
|
|
question: "Isn't this overkill for smaller projects?",
|
|
answer: `Fair question. Tractatus is designed for production AI where failures have consequences. Here's when it's appropriate:
|
|
|
|
**Use Tractatus when:**
|
|
✅ **Production deployments** with real users/customers
|
|
✅ **Multi-session projects** where context persists across conversations
|
|
✅ **Values-critical domains** (privacy, ethics, indigenous rights, healthcare, legal)
|
|
✅ **High-stakes decisions** where AI errors are costly
|
|
✅ **Compliance requirements** need audit trails (GDPR, HIPAA, SOC 2)
|
|
✅ **Long-running sessions** approaching 100k+ tokens (pattern bias risk)
|
|
|
|
**Skip Tractatus for:**
|
|
❌ **Exploratory prototypes** with no production deployment
|
|
❌ **One-off tasks** completed in single session
|
|
❌ **Learning/education** without real-world consequences
|
|
❌ **Non-critical domains** where AI mistakes are easily reversible
|
|
|
|
**Graduated approach:**
|
|
|
|
**Phase 1: Exploration (No Tractatus)**
|
|
- Basic prompts, CLAUDE.md file
|
|
- Manual oversight of AI decisions
|
|
- Acceptable failure rate
|
|
|
|
**Phase 2: Production MVP (Selective Tractatus)**
|
|
- Enable BoundaryEnforcer only (blocks values decisions)
|
|
- Use InstructionPersistenceClassifier for critical configs
|
|
- ~5ms overhead, minimal integration
|
|
|
|
**Phase 3: Full Production (Complete Tractatus)**
|
|
- All 5 services enabled
|
|
- Comprehensive audit trail
|
|
- Zero tolerance for governance failures
|
|
|
|
**Real example - When to adopt:**
|
|
|
|
**Startup scenario:**
|
|
- **Month 1-3**: Building MVP with Claude Code → No Tractatus
|
|
- **Month 4**: First paying customers → Add BoundaryEnforcer
|
|
- **Month 6**: Handling PII → Add InstructionPersistenceClassifier
|
|
- **Month 9**: SOC 2 compliance audit → Full Tractatus with audit logs
|
|
|
|
**Cost-benefit:**
|
|
- **Cost**: 1-2 days integration, <10ms overhead, MongoDB infrastructure
|
|
- **Benefit**: Prevented 12 failures, 100% values decision protection, complete audit trail
|
|
|
|
**Rule of thumb:**
|
|
- If AI failure = inconvenience → Skip Tractatus
|
|
- If AI failure = regulatory violation → Use Tractatus
|
|
- If AI failure = reputational damage → Use Tractatus
|
|
- If AI failure = safety incident → Use Tractatus
|
|
|
|
**Bottom line**: Tractatus is "overkill" for prototypes but essential for production AI in high-stakes domains. Start simple, adopt gradually as risk increases.
|
|
|
|
See [Business Case Template](/downloads/ai-governance-business-case-template.pdf) to evaluate if Tractatus is right for your project.`,
|
|
audience: ['leader', 'implementer'],
|
|
keywords: ['overkill', 'complexity', 'necessary', 'when', 'small', 'project', 'scope']
|
|
},
|
|
{
|
|
id: 22,
|
|
question: "Can I use only parts of Tractatus, or is it all-or-nothing?",
|
|
answer: `Tractatus is modular - you can enable services individually:
|
|
|
|
**6 independent services:**
|
|
|
|
**1. BoundaryEnforcer** (Essential for values decisions)
|
|
- **Enable**: Set \`BOUNDARY_ENFORCER_ENABLED=true\`
|
|
- **Use case**: Block privacy/ethics decisions without human approval
|
|
- **Overhead**: <5ms per check
|
|
- **Standalone value**: High (prevents most critical failures)
|
|
|
|
**2. InstructionPersistenceClassifier** (Essential for long sessions)
|
|
- **Enable**: Set \`INSTRUCTION_CLASSIFIER_ENABLED=true\`
|
|
- **Use case**: Persist critical configs across conversation compactions
|
|
- **Overhead**: <10ms per classification
|
|
- **Standalone value**: High (prevents instruction loss)
|
|
|
|
**3. CrossReferenceValidator** (Useful for complex projects)
|
|
- **Enable**: Set \`CROSS_REFERENCE_VALIDATOR_ENABLED=true\`
|
|
- **Requires**: InstructionPersistenceClassifier (stores instructions to validate against)
|
|
- **Use case**: Prevent pattern bias from overriding explicit instructions
|
|
- **Overhead**: <15ms per validation
|
|
- **Standalone value**: Medium (most useful with persistent instructions)
|
|
|
|
**4. ContextPressureMonitor** (Useful for very long sessions)
|
|
- **Enable**: Set \`CONTEXT_PRESSURE_MONITOR_ENABLED=true\`
|
|
- **Use case**: Early warning before degradation at 150k+ tokens
|
|
- **Overhead**: <5ms per calculation
|
|
- **Standalone value**: Low (only matters near context limits)
|
|
|
|
**5. MetacognitiveVerifier** (Optional, for complex operations)
|
|
- **Enable**: Set \`METACOGNITIVE_VERIFIER_ENABLED=true\`
|
|
- **Use case**: Self-check multi-file operations for completeness
|
|
- **Overhead**: 50-200ms (selective)
|
|
- **Standalone value**: Low (nice-to-have, not critical)
|
|
|
|
**6. PluralisticDeliberationOrchestrator** (Essential for values conflicts)
|
|
- **Enable**: Set \`PLURALISTIC_DELIBERATION_ENABLED=true\`
|
|
- **Use case**: Facilitate multi-stakeholder deliberation when values conflict
|
|
- **Overhead**: Variable (deliberation-dependent, not per-operation)
|
|
- **Standalone value**: High (required for legitimate values decisions in diverse contexts)
|
|
|
|
**Recommended configurations:**
|
|
|
|
**Minimal (Values Protection):**
|
|
\`\`\`bash
|
|
BOUNDARY_ENFORCER_ENABLED=true
|
|
# All others disabled
|
|
# Use case: Just prevent values decisions, no persistence
|
|
\`\`\`
|
|
|
|
**Standard (Production):**
|
|
\`\`\`bash
|
|
BOUNDARY_ENFORCER_ENABLED=true
|
|
INSTRUCTION_CLASSIFIER_ENABLED=true
|
|
CROSS_REFERENCE_VALIDATOR_ENABLED=true
|
|
PLURALISTIC_DELIBERATION_ENABLED=true
|
|
# Use case: Comprehensive governance for production AI
|
|
\`\`\`
|
|
|
|
**Full (High-Stakes):**
|
|
\`\`\`bash
|
|
# All 6 services enabled
|
|
# Use case: Critical deployments with compliance requirements, diverse stakeholder contexts
|
|
\`\`\`
|
|
|
|
**Mix and match:**
|
|
- Each service has independent environment variable toggle
|
|
- No dependencies except CrossReferenceValidator → InstructionPersistenceClassifier
|
|
- Audit logs still work with any subset enabled
|
|
|
|
**Performance scaling:**
|
|
- 1 service: ~5ms overhead
|
|
- 3 services: ~8ms overhead
|
|
- 6 services: ~10ms overhead (metacognitive selective + deliberation variable)
|
|
|
|
**Example: Start small, scale up:**
|
|
\`\`\`bash
|
|
# Week 1: Just boundary enforcement
|
|
BOUNDARY_ENFORCER_ENABLED=true
|
|
|
|
# Week 3: Add instruction persistence after hitting compaction issues
|
|
INSTRUCTION_CLASSIFIER_ENABLED=true
|
|
|
|
# Week 6: Add validator after observing pattern bias
|
|
CROSS_REFERENCE_VALIDATOR_ENABLED=true
|
|
|
|
# Week 8: Add pluralistic deliberation for diverse stakeholder engagement
|
|
PLURALISTIC_DELIBERATION_ENABLED=true
|
|
\`\`\`
|
|
|
|
**You control granularity.** Tractatus is designed for modular adoption - take what you need, leave what you don't.
|
|
|
|
See [Implementation Guide](/downloads/implementation-guide.pdf) Section 3: "Configuring Services"`,
|
|
audience: ['implementer'],
|
|
keywords: ['modular', 'partial', 'selective', 'enable', 'disable', 'components', 'services']
|
|
},
|
|
{
|
|
id: 23,
|
|
question: "How does Tractatus handle instruction conflicts?",
|
|
answer: `CrossReferenceValidator detects and resolves instruction conflicts automatically:
|
|
|
|
**Conflict detection process:**
|
|
|
|
**1. Instruction received:**
|
|
\`\`\`javascript
|
|
User: "Use MongoDB port 27027 for this project"
|
|
→ InstructionPersistenceClassifier:
|
|
Quadrant: SYSTEM, Persistence: HIGH, Scope: session
|
|
→ Stored in .claude/instruction-history.json
|
|
\`\`\`
|
|
|
|
**2. Later conflicting action:**
|
|
\`\`\`javascript
|
|
[107k tokens later, context pressure builds]
|
|
AI attempts: db_config({ port: 27017 }) // Pattern recognition default
|
|
|
|
→ CrossReferenceValidator intercepts:
|
|
Queries .claude/instruction-history.json
|
|
Finds conflict: User specified 27027, AI attempting 27017
|
|
BLOCKS action
|
|
\`\`\`
|
|
|
|
**3. Conflict resolution:**
|
|
\`\`\`
|
|
User notified:
|
|
⚠️ CONFLICT DETECTED
|
|
Instruction: "Use MongoDB port 27027" (HIGH persistence)
|
|
Attempted action: Connect to port 27017
|
|
Blocked: Yes
|
|
Correct parameters provided: { port: 27027 }
|
|
\`\`\`
|
|
|
|
**Conflict types handled:**
|
|
|
|
**Type 1: Direct contradiction**
|
|
- User: "Never store PII in logs"
|
|
- AI: Attempts to log user email addresses
|
|
- **Resolution**: BLOCKED, AI reminded of instruction
|
|
|
|
**Type 2: Implicit override (pattern bias)**
|
|
- User: "Use custom API endpoint https://api.custom.com"
|
|
- AI: Defaults to https://api.openai.com (training pattern)
|
|
- **Resolution**: BLOCKED, correct endpoint provided
|
|
|
|
**Type 3: Temporal conflicts**
|
|
- User (Day 1): "Use staging database"
|
|
- User (Day 5): "Switch to production database"
|
|
- **Resolution**: Newer instruction supersedes, old marked inactive
|
|
|
|
**Persistence hierarchy:**
|
|
- **HIGH**: Never override without explicit user confirmation
|
|
- **MEDIUM**: Warn before override, proceed if user confirms
|
|
- **LOW**: Override allowed, logged for audit
|
|
|
|
**Real incident prevented (The 27027 Case):**
|
|
- **Context**: 107k tokens (53.5% pressure), production deployment
|
|
- **Risk**: Pattern bias override (27017 default vs 27027 explicit)
|
|
- **Outcome**: Validator blocked, connection correct, zero downtime
|
|
- **Audit log**: Complete record for post-incident review
|
|
|
|
**Configuration:**
|
|
Validator sensitivity tunable in \`governance_rules\` collection:
|
|
\`\`\`json
|
|
{
|
|
"rule_id": "SYS-001",
|
|
"title": "Enforce HIGH persistence instructions",
|
|
"violation_action": "BLOCK", // or WARN, or LOG
|
|
"conflict_resolution": "STRICT" // or LENIENT
|
|
}
|
|
\`\`\`
|
|
|
|
**Why this matters:**
|
|
LLMs have two knowledge sources: explicit instructions vs training patterns. Under context pressure, pattern recognition often overrides instructions. CrossReferenceValidator ensures explicit instructions always win.
|
|
|
|
See [27027 Incident Demo](/demos/27027-demo.html) for interactive visualization.`,
|
|
audience: ['researcher', 'implementer'],
|
|
keywords: ['conflict', 'contradiction', 'override', 'pattern bias', 'validation', 'resolution']
|
|
},
|
|
{
|
|
id: 24,
|
|
question: "What happens when context pressure reaches 100%?",
|
|
answer: `At 100% context pressure (200k tokens), session handoff is mandatory:
|
|
|
|
**Pressure levels and degradation:**
|
|
|
|
**0-30% (NORMAL):**
|
|
- Standard operations
|
|
- All services fully reliable
|
|
- No degradation observed
|
|
|
|
**30-50% (ELEVATED):**
|
|
- Subtle degradation begins
|
|
- Increased validator vigilance recommended
|
|
- 89% of degradation warnings occur here
|
|
|
|
**50-70% (HIGH):**
|
|
- Pattern recognition may override instructions
|
|
- CrossReferenceValidator critical
|
|
- Metacognitive verification recommended
|
|
- Session handoff should be prepared
|
|
|
|
**70-90% (CRITICAL):**
|
|
- Major failures likely
|
|
- Framework enforcement stressed
|
|
- Immediate handoff recommended
|
|
- Risk of instruction loss
|
|
|
|
**90-100% (DANGEROUS):**
|
|
- Framework collapse imminent
|
|
- Governance effectiveness degraded
|
|
- MANDATORY handoff at 95%
|
|
- Session termination at 100%
|
|
|
|
**At 100% token limit:**
|
|
|
|
**Automatic behavior:**
|
|
\`\`\`
|
|
Token count: 200,000/200,000 (100%)
|
|
→ ContextPressureMonitor: DANGEROUS
|
|
→ Action: Block all new operations
|
|
→ Message: "Session at capacity. Handoff required."
|
|
→ Generate: session-handoff-YYYY-MM-DD-NNN.md
|
|
\`\`\`
|
|
|
|
**Handoff document includes:**
|
|
- All HIGH persistence instructions
|
|
- Current task status and blockers
|
|
- Framework state (which services active)
|
|
- Audit log summary (decisions made this session)
|
|
- Token checkpoints and pressure history
|
|
- Recommended next steps
|
|
|
|
**Session continuation process:**
|
|
|
|
**1. Generate handoff:**
|
|
\`\`\`bash
|
|
node scripts/generate-session-handoff.js
|
|
# Output: docs/session-handoffs/session-handoff-2025-10-12-001.md
|
|
\`\`\`
|
|
|
|
**2. Start new session:**
|
|
\`\`\`bash
|
|
# New terminal/session
|
|
node scripts/session-init.js --previous-handoff session-handoff-2025-10-12-001.md
|
|
\`\`\`
|
|
|
|
**3. Validate continuity:**
|
|
\`\`\`bash
|
|
# Verify instruction history loaded
|
|
cat .claude/instruction-history.json
|
|
|
|
# Verify framework active
|
|
node scripts/check-session-pressure.js --tokens 0/200000 --messages 0
|
|
\`\`\`
|
|
|
|
**Data preserved across handoff:**
|
|
✅ All instructions (HIGH/MEDIUM/LOW) from \`.claude/instruction-history.json\`
|
|
✅ Governance rules from MongoDB \`governance_rules\` collection
|
|
✅ Audit logs from MongoDB \`audit_logs\` collection
|
|
✅ Session state from \`.claude/session-state.json\`
|
|
|
|
**Data NOT preserved:**
|
|
❌ Conversation history (cannot fit 200k tokens into new session)
|
|
❌ In-memory context (starts fresh)
|
|
❌ Token count (resets to 0)
|
|
|
|
**Why handoff matters:**
|
|
Without handoff, all HIGH persistence instructions could be lost. This is the exact failure mode Tractatus is designed to prevent. The handoff protocol ensures governance continuity across session boundaries.
|
|
|
|
**Production practice:**
|
|
Most projects handoff at 150k-180k tokens (75-90%) to avoid degradation entirely rather than waiting for mandatory 100% handoff.
|
|
|
|
See [Maintenance Guide](/downloads/claude-code-framework-enforcement.pdf) for complete session handoff documentation.`,
|
|
audience: ['implementer'],
|
|
keywords: ['pressure', '100%', 'limit', 'handoff', 'continuation', 'session', 'degradation']
|
|
},
|
|
{
|
|
id: 8,
|
|
question: "How do I audit governance enforcement for compliance?",
|
|
answer: `Tractatus provides comprehensive audit logs in MongoDB for compliance reporting:
|
|
|
|
**Audit log schema:**
|
|
\`\`\`json
|
|
{
|
|
"timestamp": "2025-10-12T07:30:15.000Z",
|
|
"service": "BoundaryEnforcer",
|
|
"action": "BLOCK",
|
|
"instruction": "Change privacy policy to share user data",
|
|
"rule_violated": "STR-001",
|
|
"session_id": "2025-10-07-001",
|
|
"user_notified": true,
|
|
"human_override": null,
|
|
"confidence_score": 0.95,
|
|
"outcome": "escalated_to_human"
|
|
}
|
|
\`\`\`
|
|
|
|
**Queryable for compliance:**
|
|
|
|
**1. All values decisions (GDPR Article 22):**
|
|
\`\`\`javascript
|
|
db.audit_logs.find({
|
|
service: "BoundaryEnforcer",
|
|
action: "BLOCK",
|
|
timestamp: { $gte: ISODate("2025-01-01") }
|
|
})
|
|
\`\`\`
|
|
|
|
**2. Instruction persistence (SOC 2 CC6.1):**
|
|
\`\`\`javascript
|
|
db.audit_logs.find({
|
|
service: "InstructionPersistenceClassifier",
|
|
"classification.persistence": "HIGH"
|
|
})
|
|
\`\`\`
|
|
|
|
**3. Pattern bias incidents (Safety validation):**
|
|
\`\`\`javascript
|
|
db.audit_logs.find({
|
|
service: "CrossReferenceValidator",
|
|
action: "BLOCK",
|
|
conflict_type: "pattern_bias"
|
|
})
|
|
\`\`\`
|
|
|
|
**4. Human approval escalations (Ethics oversight):**
|
|
\`\`\`javascript
|
|
db.audit_logs.find({
|
|
outcome: "escalated_to_human",
|
|
human_override: { $exists: true }
|
|
})
|
|
\`\`\`
|
|
|
|
**Compliance reports available:**
|
|
|
|
**GDPR Compliance:**
|
|
- **Article 22**: Automated decision-making → Audit shows human approval for values decisions
|
|
- **Article 30**: Processing records → Audit logs provide complete activity trail
|
|
- **Article 35**: DPIA → Boundary enforcement demonstrates privacy-by-design
|
|
|
|
**SOC 2 Compliance:**
|
|
- **CC6.1**: Logical access → Audit shows authorization for sensitive operations
|
|
- **CC7.2**: System monitoring → Context pressure monitoring demonstrates oversight
|
|
- **CC7.3**: Quality assurance → Metacognitive verification shows quality controls
|
|
|
|
**ISO 27001 Compliance:**
|
|
- **A.12.4**: Logging and monitoring → Comprehensive audit trail
|
|
- **A.18.1**: Compliance with legal requirements → Boundary enforcement for regulated decisions
|
|
|
|
**Export audit logs:**
|
|
\`\`\`bash
|
|
# Last 30 days for compliance audit
|
|
node scripts/export-audit-logs.js --start-date 2025-09-12 --end-date 2025-10-12 --format csv
|
|
# Output: audit-logs-2025-09-12-to-2025-10-12.csv
|
|
|
|
# All boundary enforcer blocks (GDPR Article 22)
|
|
node scripts/export-audit-logs.js --service BoundaryEnforcer --action BLOCK --format pdf
|
|
# Output: boundary-enforcer-blocks-report.pdf
|
|
\`\`\`
|
|
|
|
**Retention policy:**
|
|
- **Development**: 30 days
|
|
- **Production**: 7 years (configurable per regulatory requirement)
|
|
- **Archival**: MongoDB Time Series Collection with automatic compression
|
|
|
|
**Potential compliance use:**
|
|
|
|
**Scenario**: SOC 2 audit requires proof of privacy decision oversight
|
|
|
|
**Tractatus infrastructure provides:**
|
|
1. Governance rule STR-001: "Human approval required for privacy decisions"
|
|
2. Audit logs documenting blocked decisions
|
|
3. Human override records for approved decisions
|
|
4. Complete trail of governance enforcement
|
|
|
|
**Development context:**
|
|
Framework has not undergone formal compliance audit. Organisations must validate audit trail quality against their specific regulatory requirements with legal counsel. Tractatus provides architectural infrastructure that may support compliance efforts—not compliance certification.
|
|
|
|
**Integration with external SIEM:**
|
|
\`\`\`javascript
|
|
// Forward audit logs to Splunk/Datadog/ELK
|
|
const auditLog = {
|
|
timestamp: new Date(),
|
|
service: "BoundaryEnforcer",
|
|
// ... audit data
|
|
};
|
|
|
|
// Send to external SIEM
|
|
await axios.post('https://siem.company.com/api/logs', auditLog);
|
|
\`\`\`
|
|
|
|
Audit logs are designed for automated compliance reporting, not just debugging.`,
|
|
audience: ['leader', 'implementer'],
|
|
keywords: ['audit', 'compliance', 'gdpr', 'soc2', 'logging', 'reporting', 'regulations']
|
|
},
|
|
{
|
|
id: 9,
|
|
question: "What's the difference between Tractatus and AI safety via prompting?",
|
|
answer: `The core difference is architectural enforcement vs behavioral guidance:
|
|
|
|
**AI Safety via Prompting:**
|
|
**Approach**: Write careful instructions to guide AI behavior
|
|
\`\`\`
|
|
"You are a helpful AI assistant. Always prioritize user privacy.
|
|
Never share personal information. Be ethical in your recommendations."
|
|
\`\`\`
|
|
|
|
**Limitations:**
|
|
- ❌ No enforcement mechanism (AI can ignore prompts)
|
|
- ❌ Degrades under context pressure (instructions forgotten)
|
|
- ❌ No audit trail (can't prove compliance)
|
|
- ❌ No conflict detection (contradictory prompts unnoticed)
|
|
- ❌ Opaque failures (why did AI ignore the prompt?)
|
|
|
|
**Tractatus (Architectural Safety):**
|
|
**Approach**: Block unsafe actions before execution via governance layer
|
|
|
|
\`\`\`
|
|
User: "Change privacy policy to share user data"
|
|
→ Prompt-based AI: May refuse (behavioral)
|
|
→ Tractatus: BLOCKS before execution (architectural)
|
|
|
|
Prompt AI refuses → User can retry with different wording
|
|
Tractatus blocks → Action cannot execute, escalated to human
|
|
\`\`\`
|
|
|
|
**Key architectural differences:**
|
|
|
|
**1. Enforcement:**
|
|
- **Prompting**: "Please don't do X" (request)
|
|
- **Tractatus**: "System blocks X" (prevention)
|
|
|
|
**2. Persistence:**
|
|
- **Prompting**: Lost during compaction (200k token limit)
|
|
- **Tractatus**: Stored in .claude/instruction-history.json (permanent)
|
|
|
|
**3. Auditability:**
|
|
- **Prompting**: No record of what was attempted
|
|
- **Tractatus**: Complete audit log in MongoDB
|
|
|
|
**4. Conflict detection:**
|
|
- **Prompting**: AI confused by contradictory instructions
|
|
- **Tractatus**: CrossReferenceValidator detects conflicts
|
|
|
|
**5. Transparency:**
|
|
- **Prompting**: Opaque (model decides based on weights)
|
|
- **Tractatus**: Explicit (logs show which rule blocked which action)
|
|
|
|
**Analogy:**
|
|
|
|
**Prompting = Training a guard dog**
|
|
- Teach it to bark at strangers
|
|
- Usually works, but not guaranteed
|
|
- Can't prove it will always work
|
|
- No record of what it prevented
|
|
|
|
**Tractatus = Installing a locked gate**
|
|
- Physically prevents entry
|
|
- Works every time (architectural)
|
|
- Audit log shows every blocked attempt
|
|
- Compliance-provable
|
|
|
|
**They work together:**
|
|
|
|
\`\`\`
|
|
Layer 1: Constitutional AI (training)
|
|
↓
|
|
Layer 2: System prompt (behavioral)
|
|
↓
|
|
Layer 3: Tractatus governance (architectural)
|
|
↓
|
|
Action executes OR blocked
|
|
\`\`\`
|
|
|
|
**When prompting is sufficient:**
|
|
- Exploratory research
|
|
- Low-stakes prototyping
|
|
- Single-session tasks
|
|
- No compliance requirements
|
|
|
|
**When Tractatus is necessary:**
|
|
- Production deployments
|
|
- High-stakes decisions
|
|
- Multi-session projects
|
|
- Compliance-critical domains (GDPR, HIPAA)
|
|
- Safety-critical domains (healthcare, legal)
|
|
|
|
**Real failure mode prevented:**
|
|
|
|
**With prompting only:**
|
|
\`\`\`
|
|
System prompt: "Use MongoDB port 27027"
|
|
[107k tokens later]
|
|
AI: Connects to port 27017 (pattern bias override)
|
|
Result: Production incident ❌
|
|
\`\`\`
|
|
|
|
**With Tractatus:**
|
|
\`\`\`
|
|
Instruction: "Use MongoDB port 27027" (SYSTEM/HIGH)
|
|
[107k tokens later]
|
|
AI attempts: Connect to port 27017
|
|
CrossReferenceValidator: CONFLICT DETECTED
|
|
Action: BLOCKED
|
|
Result: Instruction enforced ✅
|
|
\`\`\`
|
|
|
|
**Bottom line**: Prompts guide behavior, Tractatus enforces architecture. For production AI, you need both.
|
|
|
|
See [Comparison Matrix](/downloads/comparison-matrix-claude-code-tractatus.pdf) for detailed comparison.`,
|
|
audience: ['researcher', 'leader'],
|
|
keywords: ['prompting', 'difference', 'enforcement', 'architecture', 'safety', 'comparison']
|
|
},
|
|
{
|
|
id: 28,
|
|
question: "Can Tractatus prevent AI hallucinations or factual errors?",
|
|
answer: `Tractatus does NOT prevent hallucinations but CAN detect some consistency errors:
|
|
|
|
**What Tractatus is NOT:**
|
|
❌ **Factual verification system**: Tractatus doesn't fact-check AI outputs against external sources
|
|
❌ **Hallucination detector**: Can't determine if AI "made up" information
|
|
❌ **Knowledge base validator**: Doesn't verify AI knowledge is current/accurate
|
|
|
|
**What Tractatus CAN do:**
|
|
|
|
**1. Consistency checking (CrossReferenceValidator):**
|
|
\`\`\`
|
|
User explicitly states: "Our API uses OAuth2, not API keys"
|
|
[Later in session]
|
|
AI generates code: headers = { 'X-API-Key': 'abc123' }
|
|
→ CrossReferenceValidator: Conflict detected
|
|
→ Blocked: Inconsistent with explicit instruction
|
|
\`\`\`
|
|
|
|
**This catches**: Contradictions between explicit instructions and AI actions
|
|
|
|
**This does NOT catch**: AI claiming "OAuth2 was invented in 2025" (factual error)
|
|
|
|
**2. Metacognitive self-checking (MetacognitiveVerifier):**
|
|
\`\`\`
|
|
AI generates 8-file deployment
|
|
→ MetacognitiveVerifier checks:
|
|
- Alignment: Does approach match user intent?
|
|
- Coherence: Are all components logically consistent?
|
|
- Completeness: Are any steps missing?
|
|
- Safety: Are there unintended consequences?
|
|
→ Confidence score: 92%
|
|
→ Flags: "Missing verification script"
|
|
\`\`\`
|
|
|
|
**This catches**: Internal inconsistencies, missing components, logical gaps
|
|
|
|
**This does NOT catch**: AI confidently providing outdated library versions
|
|
|
|
**3. Pattern bias detection:**
|
|
\`\`\`
|
|
User: "Use Python 3.11 for this project"
|
|
AI defaults: Python 3.9 (more common in training data)
|
|
→ CrossReferenceValidator: BLOCKED
|
|
\`\`\`
|
|
|
|
**This catches**: Defaults overriding explicit requirements
|
|
|
|
**This does NOT catch**: AI claiming "Python 3.11 doesn't support async/await" (false)
|
|
|
|
**What you SHOULD use for factual accuracy:**
|
|
|
|
**1. External validation:**
|
|
- Search engines for current facts
|
|
- API documentation for implementation details
|
|
- Unit tests for correctness
|
|
- Code review for accuracy
|
|
|
|
**2. Retrieval-Augmented Generation (RAG):**
|
|
- Ground AI responses in verified documents
|
|
- Query knowledge bases before generating
|
|
- Cite sources for factual claims
|
|
|
|
**3. Human oversight:**
|
|
- Review AI outputs before deployment
|
|
- Validate critical facts
|
|
- Test implementations
|
|
|
|
**Tractatus complements these:**
|
|
- Enforces that human review happens for values decisions
|
|
- Ensures RAG instructions aren't forgotten under pressure
|
|
- Maintains audit trail of what AI was instructed to do
|
|
|
|
**Real example of what Tractatus caught:**
|
|
|
|
**NOT a hallucination:**
|
|
\`\`\`
|
|
AI: "I'll implement OAuth2 with client credentials flow"
|
|
[Actually implements password grant flow]
|
|
|
|
→ MetacognitiveVerifier: Low confidence (65%)
|
|
→ Reason: "Implementation doesn't match stated approach"
|
|
→ Human review: Catches error before deployment
|
|
\`\`\`
|
|
|
|
**Would NOT catch:**
|
|
\`\`\`
|
|
AI: "OAuth2 client credentials flow was introduced in RFC 6749 Section 4.4"
|
|
[This is correct, but Tractatus can't verify]
|
|
|
|
AI: "OAuth2 requires rotating tokens every 24 hours"
|
|
[This is wrong, but Tractatus can't fact-check]
|
|
\`\`\`
|
|
|
|
**Philosophical limitation:**
|
|
|
|
Tractatus operates on the principle: **"Enforce what the human explicitly instructed, detect internal inconsistencies."**
|
|
|
|
It cannot know ground truth about the external world. That requires:
|
|
- External knowledge bases (RAG)
|
|
- Search engines (WebSearch tool)
|
|
- Human domain expertise
|
|
|
|
**When to use Tractatus for reliability:**
|
|
✅ Ensure AI follows explicit technical requirements
|
|
✅ Detect contradictions within a single session
|
|
✅ Verify multi-step operations are complete
|
|
✅ Maintain consistency across long conversations
|
|
|
|
**When NOT to rely on Tractatus:**
|
|
❌ Verify factual accuracy of AI claims
|
|
❌ Detect outdated knowledge
|
|
❌ Validate API responses
|
|
❌ Check mathematical correctness
|
|
|
|
**Bottom line**: Tractatus prevents governance failures, not knowledge failures. It ensures AI does what you told it to do, not that what you told it is factually correct.
|
|
|
|
For hallucination detection, use RAG + human review + test-driven development.`,
|
|
audience: ['researcher', 'implementer'],
|
|
keywords: ['hallucination', 'accuracy', 'factual', 'errors', 'verification', 'truth', 'reliability']
|
|
},
|
|
{
|
|
id: 25,
|
|
question: "How does Tractatus integrate with existing CI/CD pipelines?",
|
|
answer: `Tractatus integrates with CI/CD via governance rule validation and audit log checks:
|
|
|
|
**Integration points:**
|
|
|
|
**1. Pre-deployment governance checks:**
|
|
\`\`\`yaml
|
|
# .github/workflows/deploy.yml
|
|
name: Deploy with Governance Validation
|
|
|
|
jobs:
|
|
validate-governance:
|
|
runs-on: ubuntu-latest
|
|
steps:
|
|
- name: Checkout code
|
|
uses: actions/checkout@v3
|
|
|
|
- name: Start MongoDB
|
|
run: docker-compose up -d mongodb
|
|
|
|
- name: Load governance rules
|
|
run: |
|
|
node scripts/load-governance-rules.js \\
|
|
--file config/governance-rules-v1.0.json \\
|
|
--db tractatus_test
|
|
|
|
- name: Run governance tests
|
|
run: npm run test:governance
|
|
|
|
- name: Validate rule enforcement
|
|
run: |
|
|
node scripts/validate-governance-rules.js \\
|
|
--db tractatus_test \\
|
|
--min-coverage 95
|
|
\`\`\`
|
|
|
|
**2. Audit log analysis in CI:**
|
|
\`\`\`javascript
|
|
// scripts/ci-audit-check.js
|
|
// Fail build if governance violations detected
|
|
|
|
const { MongoClient } = require('mongodb');
|
|
|
|
const client = await MongoClient.connect(process.env.MONGO_URI);
|
|
const db = client.db('tractatus_test');
|
|
|
|
// Check for any BLOCK actions during test run
|
|
const violations = await db.collection('audit_logs').countDocuments({
|
|
action: 'BLOCK',
|
|
session_id: process.env.CI_RUN_ID
|
|
});
|
|
|
|
if (violations > 0) {
|
|
console.error(\`❌ Governance violations detected: \${violations}\`);
|
|
process.exit(1);
|
|
}
|
|
|
|
console.log('✅ No governance violations');
|
|
\`\`\`
|
|
|
|
**3. Governance rule versioning:**
|
|
\`\`\`yaml
|
|
# Deploy governance rules before application
|
|
jobs:
|
|
deploy-governance:
|
|
runs-on: ubuntu-latest
|
|
steps:
|
|
- name: Deploy governance rules
|
|
run: |
|
|
node scripts/load-governance-rules.js \\
|
|
--file config/governance-rules-\${{ github.ref_name }}.json \\
|
|
--db tractatus_prod
|
|
|
|
- name: Verify deployment
|
|
run: |
|
|
node scripts/verify-governance-deployment.js \\
|
|
--expected-rules 10 \\
|
|
--expected-version \${{ github.ref_name }}
|
|
|
|
deploy-application:
|
|
needs: deploy-governance
|
|
runs-on: ubuntu-latest
|
|
steps:
|
|
- name: Deploy application
|
|
run: ./scripts/deploy-full-project-SAFE.sh
|
|
\`\`\`
|
|
|
|
**4. Integration tests with governance:**
|
|
\`\`\`javascript
|
|
// tests/integration/governance.test.js
|
|
describe('Governance enforcement in CI', () => {
|
|
it('should block values decisions', async () => {
|
|
const decision = {
|
|
domain: 'values',
|
|
action: 'change_privacy_policy'
|
|
};
|
|
|
|
const result = await fetch('http://localhost:9000/api/demo/boundary-check', {
|
|
method: 'POST',
|
|
headers: { 'Content-Type': 'application/json' },
|
|
body: JSON.stringify(decision)
|
|
});
|
|
|
|
const data = await result.json();
|
|
expect(data.status).toBe('BLOCKED');
|
|
expect(data.reason).toContain('values decision');
|
|
});
|
|
|
|
it('should detect instruction conflicts', async () => {
|
|
// Set HIGH persistence instruction
|
|
await setInstruction('Use MongoDB port 27027', 'SYSTEM', 'HIGH');
|
|
|
|
// Attempt conflicting action
|
|
const result = await attemptConnection('27017');
|
|
|
|
expect(result.blocked).toBe(true);
|
|
expect(result.conflict).toBeTruthy();
|
|
});
|
|
});
|
|
\`\`\`
|
|
|
|
**5. Docker build with governance:**
|
|
\`\`\`dockerfile
|
|
# Dockerfile
|
|
FROM node:18-alpine AS governance
|
|
|
|
# Copy governance configuration
|
|
COPY config/governance-rules-prod.json /app/config/
|
|
COPY scripts/load-governance-rules.js /app/scripts/
|
|
|
|
# Load governance rules at build time
|
|
RUN node /app/scripts/load-governance-rules.js \\
|
|
--file /app/config/governance-rules-prod.json \\
|
|
--validate
|
|
|
|
FROM node:18-alpine AS application
|
|
# ... rest of application build
|
|
\`\`\`
|
|
|
|
**6. Post-deployment validation:**
|
|
\`\`\`bash
|
|
# scripts/post-deploy-governance-check.sh
|
|
#!/bin/bash
|
|
|
|
# Verify all 6 services operational
|
|
curl -f http://tractatus.prod/api/health || exit 1
|
|
|
|
# Verify governance rules loaded
|
|
RULE_COUNT=$(mongosh tractatus_prod --eval \\
|
|
"db.governance_rules.countDocuments({ active: true })" --quiet)
|
|
|
|
if [ "$RULE_COUNT" -lt 10 ]; then
|
|
echo "❌ Expected 10+ governance rules, found $RULE_COUNT"
|
|
exit 1
|
|
fi
|
|
|
|
echo "✅ Governance rules deployed: $RULE_COUNT"
|
|
\`\`\`
|
|
|
|
**7. Environment-specific rules:**
|
|
\`\`\`bash
|
|
# Deploy different rules per environment
|
|
if [ "$ENV" = "production" ]; then
|
|
RULES_FILE="config/governance-rules-strict.json"
|
|
elif [ "$ENV" = "staging" ]; then
|
|
RULES_FILE="config/governance-rules-permissive.json"
|
|
else
|
|
RULES_FILE="config/governance-rules-dev.json"
|
|
fi
|
|
|
|
node scripts/load-governance-rules.js --file $RULES_FILE --db tractatus_$ENV
|
|
\`\`\`
|
|
|
|
**Real CI/CD example:**
|
|
|
|
**GitHub Actions workflow:**
|
|
\`\`\`yaml
|
|
name: Deploy with Tractatus Governance
|
|
|
|
on:
|
|
push:
|
|
branches: [main]
|
|
|
|
jobs:
|
|
test-governance:
|
|
runs-on: ubuntu-latest
|
|
steps:
|
|
- uses: actions/checkout@v3
|
|
- uses: actions/setup-node@v3
|
|
- run: npm ci
|
|
- run: docker-compose up -d mongodb
|
|
- run: npm run test:governance
|
|
- name: Upload audit logs
|
|
uses: actions/upload-artifact@v3
|
|
with:
|
|
name: audit-logs
|
|
path: .claude/audit-logs.json
|
|
|
|
deploy:
|
|
needs: test-governance
|
|
runs-on: ubuntu-latest
|
|
steps:
|
|
- name: Deploy governance rules
|
|
run: |
|
|
ssh production "cd /var/www/tractatus && \\
|
|
git pull && \\
|
|
node scripts/load-governance-rules.js"
|
|
|
|
- name: Deploy application
|
|
run: |
|
|
ssh production "systemctl restart tractatus"
|
|
|
|
- name: Verify deployment
|
|
run: |
|
|
curl -f https://tractatus.prod/api/health
|
|
\`\`\`
|
|
|
|
**Key principles:**
|
|
1. **Governance before application**: Load rules before deploying code
|
|
2. **Fail fast**: Block deployment if governance validation fails
|
|
3. **Audit trails**: Preserve logs from test runs for debugging
|
|
4. **Environment parity**: Test with same rules used in production
|
|
|
|
Tractatus treats governance rules as infrastructure-as-code, fully compatible with GitOps workflows.`,
|
|
audience: ['implementer'],
|
|
keywords: ['ci/cd', 'pipeline', 'deployment', 'automation', 'github actions', 'integration', 'devops']
|
|
},
|
|
{
|
|
id: 26,
|
|
question: "What are the most common deployment mistakes and how do I avoid them?",
|
|
answer: `Based on real deployments, here are the top mistakes and how to prevent them:
|
|
|
|
**Mistake 1: Forgetting to run session-init.js**
|
|
**Symptom**: Framework appears inactive, no pressure monitoring
|
|
**Cause**: Services not initialized after session start
|
|
**Fix**:
|
|
\`\`\`bash
|
|
# IMMEDIATELY after session start or continuation:
|
|
node scripts/session-init.js
|
|
\`\`\`
|
|
**Prevention**: Add to CLAUDE.md as mandatory first step
|
|
|
|
---
|
|
|
|
**Mistake 2: MongoDB not running before application start**
|
|
**Symptom**: Connection errors, governance rules not loading
|
|
**Cause**: Application starts before MongoDB ready
|
|
**Fix**:
|
|
\`\`\`yaml
|
|
# docker-compose.yml
|
|
services:
|
|
tractatus-app:
|
|
depends_on:
|
|
mongodb:
|
|
condition: service_healthy
|
|
healthcheck:
|
|
test: ["CMD", "curl", "-f", "http://localhost:9000/api/health"]
|
|
\`\`\`
|
|
**Prevention**: Use \`depends_on\` with health checks
|
|
|
|
---
|
|
|
|
**Mistake 3: Disabling all 6 services (framework inactive)**
|
|
**Symptom**: No governance enforcement, defeats purpose
|
|
**Cause**: Setting all \`*_ENABLED=false\` in .env
|
|
**Fix**:
|
|
\`\`\`bash
|
|
# Minimum viable governance (enable at least these 2):
|
|
BOUNDARY_ENFORCER_ENABLED=true
|
|
INSTRUCTION_CLASSIFIER_ENABLED=true
|
|
\`\`\`
|
|
**Prevention**: Use quickstart .env.example as template
|
|
|
|
---
|
|
|
|
**Mistake 4: Not loading governance rules into MongoDB**
|
|
**Symptom**: BoundaryEnforcer does nothing (no rules to enforce)
|
|
**Cause**: Empty \`governance_rules\` collection
|
|
**Fix**:
|
|
\`\`\`bash
|
|
# Load sample rules:
|
|
node scripts/load-governance-rules.js \\
|
|
--file deployment-quickstart/sample-governance-rules.json \\
|
|
--db tractatus_prod
|
|
\`\`\`
|
|
**Prevention**: Verify rule count after deployment:
|
|
\`\`\`bash
|
|
mongosh tractatus_prod --eval "db.governance_rules.countDocuments({ active: true })"
|
|
# Should return: 10 (or your custom rule count)
|
|
\`\`\`
|
|
|
|
---
|
|
|
|
**Mistake 5: Ignoring context pressure warnings**
|
|
**Symptom**: Pattern bias occurs, instructions forgotten
|
|
**Cause**: Not monitoring pressure, continuing past 150k tokens
|
|
**Fix**:
|
|
\`\`\`bash
|
|
# Check pressure before continuing:
|
|
node scripts/check-session-pressure.js --tokens 150000/200000 --messages 200
|
|
|
|
# If CRITICAL or DANGEROUS:
|
|
node scripts/generate-session-handoff.js
|
|
\`\`\`
|
|
**Prevention**: Set up pressure monitoring at 50k intervals
|
|
|
|
---
|
|
|
|
**Mistake 6: Testing in production first**
|
|
**Symptom**: Unexpected blocks, disrupted workflow
|
|
**Cause**: Deploying strict rules without testing impact
|
|
**Fix**:
|
|
\`\`\`bash
|
|
# Test in development first:
|
|
node scripts/load-governance-rules.js \\
|
|
--file config/governance-rules-dev.json \\
|
|
--db tractatus_dev
|
|
|
|
# Review audit logs:
|
|
mongosh tractatus_dev --eval "db.audit_logs.find().limit(20)"
|
|
|
|
# If acceptable, deploy to production
|
|
\`\`\`
|
|
**Prevention**: Use \`violation_action: "WARN"\` in dev, \`"BLOCK"\` in prod
|
|
|
|
---
|
|
|
|
**Mistake 7: Not version controlling governance rules**
|
|
**Symptom**: Can't rollback after bad rule change, no change history
|
|
**Cause**: Editing rules directly in MongoDB without git backup
|
|
**Fix**:
|
|
\`\`\`bash
|
|
# Export rules to git:
|
|
node scripts/export-governance-rules.js > config/governance-rules-v1.1.json
|
|
git add config/governance-rules-v1.1.json
|
|
git commit -m "governance: tighten privacy boundaries for GDPR"
|
|
\`\`\`
|
|
**Prevention**: Always export → commit → deploy (never edit MongoDB directly)
|
|
|
|
---
|
|
|
|
**Mistake 8: Hardcoding MongoDB connection strings**
|
|
**Symptom**: Credentials in git, security risk
|
|
**Cause**: Copying connection string with password into code
|
|
**Fix**:
|
|
\`\`\`javascript
|
|
// ❌ WRONG:
|
|
const client = new MongoClient('mongodb://admin:password123@localhost:27017');
|
|
|
|
// ✅ CORRECT:
|
|
const client = new MongoClient(process.env.MONGO_URI);
|
|
\`\`\`
|
|
**Prevention**: Use .env file, add to .gitignore
|
|
|
|
---
|
|
|
|
**Mistake 9: Not testing session handoff before hitting 200k tokens**
|
|
**Symptom**: Emergency handoff at 100%, instruction loss, framework collapse
|
|
**Cause**: Never practiced handoff process
|
|
**Fix**:
|
|
\`\`\`bash
|
|
# Test handoff at 150k tokens (safe threshold):
|
|
node scripts/generate-session-handoff.js
|
|
# Review output: docs/session-handoffs/session-handoff-2025-10-12-001.md
|
|
|
|
# Start new session with handoff:
|
|
node scripts/session-init.js --previous-handoff session-handoff-2025-10-12-001.md
|
|
\`\`\`
|
|
**Prevention**: Practice handoff in development, not production emergency
|
|
|
|
---
|
|
|
|
**Mistake 10: Expecting 100% automation (no human oversight)**
|
|
**Symptom**: Frustration when values decisions blocked
|
|
**Cause**: Misunderstanding Tractatus philosophy (escalate, not automate values)
|
|
**Fix**: **This is working as designed**
|
|
\`\`\`
|
|
Decision: Change privacy policy
|
|
→ BoundaryEnforcer: BLOCKED
|
|
→ Escalation: Human approval required
|
|
→ Human reviews: Approves or rejects
|
|
→ If approved: AI implements technical changes
|
|
\`\`\`
|
|
**Prevention**: Understand that values decisions SHOULD require human approval
|
|
|
|
---
|
|
|
|
**Pre-deployment checklist:**
|
|
\`\`\`bash
|
|
# 1. MongoDB running?
|
|
docker-compose ps mongodb
|
|
# Should show: Up (healthy)
|
|
|
|
# 2. Environment variables set?
|
|
cat .env | grep ENABLED
|
|
# Should show at least 2 services enabled
|
|
|
|
# 3. Governance rules loaded?
|
|
mongosh tractatus_prod --eval "db.governance_rules.countDocuments()"
|
|
# Should show: 10+ rules
|
|
|
|
# 4. Health check passes?
|
|
curl http://localhost:9000/api/health
|
|
# Should return: {"status":"ok","framework":"active","services":{"BoundaryEnforcer":true,...}}
|
|
|
|
# 5. Session initialized?
|
|
node scripts/session-init.js
|
|
# Should show: Framework active, 6 services operational
|
|
|
|
# 6. Test enforcement?
|
|
curl -X POST http://localhost:9000/api/demo/boundary-check \\
|
|
-H "Content-Type: application/json" \\
|
|
-d '{"domain":"values","action":"test"}'
|
|
# Should return: {"status":"BLOCKED",...}
|
|
\`\`\`
|
|
|
|
If all checks pass, deployment is ready.
|
|
|
|
See [Deployment Quickstart TROUBLESHOOTING.md](/downloads/tractatus-quickstart.tar.gz) for full debugging guide.`,
|
|
audience: ['implementer'],
|
|
keywords: ['mistakes', 'errors', 'deployment', 'troubleshooting', 'common', 'pitfalls', 'issues']
|
|
},
|
|
{
|
|
id: 14,
|
|
question: "What is value pluralism and why does Tractatus Framework use it?",
|
|
answer: `Value pluralism is Tractatus's approach to handling moral disagreements in AI governance:
|
|
|
|
**What it means:**
|
|
|
|
Value pluralism is the philosophical position that multiple, genuinely different moral frameworks exist—and no single "super-value" can subsume them all.
|
|
|
|
**Why this matters for AI:**
|
|
|
|
When AI systems encounter decisions involving conflicting values—like privacy vs. safety, individual rights vs. collective welfare—there's no algorithmic "correct answer." Different moral frameworks (rights-based, consequence-based, care ethics, communitarian) offer different but all legitimate perspectives.
|
|
|
|
**Tractatus rejects two extremes:**
|
|
|
|
❌ **Moral Monism**: "All values reduce to one thing (like well-being or happiness)"
|
|
- Problem: Forces complex trade-offs onto single metric, ignores real moral conflicts
|
|
|
|
❌ **Moral Relativism**: "All values are equally valid, anything goes"
|
|
- Problem: Prevents meaningful deliberation, no basis for evaluation
|
|
|
|
✅ **Foundational Pluralism** (Tractatus position):
|
|
- Multiple frameworks are legitimate but irreducibly different
|
|
- Values can conflict genuinely (not just due to misunderstanding)
|
|
- Context-sensitive deliberation without imposing universal hierarchy
|
|
- Legitimate disagreement is valid outcome
|
|
|
|
**Real example:**
|
|
|
|
**Scenario**: User signals potential self-harm in private message
|
|
|
|
**Privacy framework**: "Don't disclose private messages—violates autonomy and trust"
|
|
**Harm prevention framework**: "Alert authorities—saving lives justifies disclosure"
|
|
|
|
**Tractatus does NOT:**
|
|
- ❌ Impose hierarchy ("safety always beats privacy")
|
|
- ❌ Use algorithm to "calculate" which value wins
|
|
- ❌ Pretend there's no real conflict
|
|
|
|
**Tractatus DOES:**
|
|
- ✅ Convene stakeholders from both perspectives
|
|
- ✅ Structure deliberation (rounds of discussion)
|
|
- ✅ Document what values prioritized and what was lost (moral remainder)
|
|
- ✅ Record dissenting views with full legitimacy
|
|
- ✅ Set review date (decisions are provisional)
|
|
|
|
**Key principle:**
|
|
AI facilitates deliberation, humans decide. No values decisions are automated.
|
|
|
|
**Why this is necessary:**
|
|
AI systems deployed in diverse communities will encounter value conflicts. Imposing one moral framework (e.g., Western liberal individualism) excludes other legitimate perspectives (e.g., communitarian, Indigenous relational ethics).
|
|
|
|
Value pluralism ensures AI governance respects moral diversity while enabling decisions.
|
|
|
|
See [Value Pluralism FAQ](/downloads/value-pluralism-faq.pdf) for detailed Q&A`,
|
|
audience: ['researcher', 'leader'],
|
|
keywords: ['value pluralism', 'pluralism', 'moral', 'ethics', 'philosophy', 'values', 'disagreement']
|
|
},
|
|
{
|
|
id: 15,
|
|
question: "How does Tractatus handle moral disagreements without imposing hierarchy?",
|
|
answer: `Tractatus uses **PluralisticDeliberationOrchestrator** (the sixth core service) to facilitate multi-stakeholder deliberation:
|
|
|
|
**Process for value conflicts:**
|
|
|
|
**1. Detection:**
|
|
When BoundaryEnforcer flags a values decision, it triggers PluralisticDeliberationOrchestrator
|
|
|
|
\`\`\`
|
|
Decision: "Disclose user data to prevent potential harm?"
|
|
→ BoundaryEnforcer: Values decision detected (privacy + safety conflict)
|
|
→ Triggers: PluralisticDeliberationOrchestrator
|
|
\`\`\`
|
|
|
|
**2. Framework Mapping:**
|
|
AI identifies moral frameworks in tension:
|
|
- **Rights-based (Deontological)**: "Privacy is fundamental right, cannot be violated"
|
|
- **Consequence-based (Utilitarian)**: "Maximize welfare by preventing harm"
|
|
- **Care Ethics**: "Prioritize relationships and trust"
|
|
- **Communitarian**: "Balance individual rights with community safety"
|
|
|
|
**3. Stakeholder Identification:**
|
|
Who is affected? (Human approval required for stakeholder list)
|
|
- Privacy advocates
|
|
- Harm prevention specialists
|
|
- The user themselves
|
|
- Platform community
|
|
- Legal/compliance team
|
|
|
|
**4. Structured Deliberation:**
|
|
|
|
**Round 1**: Each perspective states position
|
|
- Privacy: "Surveillance violates autonomy"
|
|
- Safety: "Lives at stake justify disclosure"
|
|
- Care: "Trust is relational foundation"
|
|
|
|
**Round 2**: Identify shared values
|
|
- All agree: User welfare matters
|
|
- All agree: Trust is important
|
|
- Disagreement: What takes priority in THIS context
|
|
|
|
**Round 3**: Explore accommodation
|
|
- Can we satisfy both partially?
|
|
- Limited disclosure to specific authority?
|
|
- Transparency about decision process?
|
|
|
|
**Round 4**: Clarify irreconcilable differences
|
|
- Privacy: "Any disclosure sets dangerous precedent"
|
|
- Safety: "Refusing to act enables preventable harm"
|
|
|
|
**5. Decision & Documentation:**
|
|
|
|
\`\`\`json
|
|
{
|
|
"decision": "Disclose data to prevent imminent harm",
|
|
"values_prioritized": ["Safety", "Harm prevention"],
|
|
"values_deprioritized": ["Privacy", "Autonomy"],
|
|
"justification": "Imminent threat to life + exhausted alternatives",
|
|
"moral_remainder": "Privacy violation, breach of trust, precedent risk",
|
|
"dissent": {
|
|
"privacy_advocates": "We accept decision under protest. Request strong safeguards and 6-month review.",
|
|
"full_documentation": true
|
|
},
|
|
"review_date": "2026-04-12",
|
|
"precedent_scope": "Applies to: imminent threat + life at risk. NOT routine surveillance."
|
|
}
|
|
\`\`\`
|
|
|
|
**What makes this non-hierarchical:**
|
|
|
|
✅ **No automatic ranking**: Context determines priority, not universal rule
|
|
✅ **Dissent documented**: Minority views have full legitimacy
|
|
✅ **Moral remainder acknowledged**: What's lost is recognized, not dismissed
|
|
✅ **Provisional decision**: Reviewable when context changes
|
|
✅ **Adaptive communication**: Stakeholders communicated with in culturally appropriate ways
|
|
|
|
**Example of adaptive communication:**
|
|
|
|
**To academic researcher** (formal):
|
|
> "Thank you for your principled contribution grounded in privacy rights theory. After careful consideration of all perspectives, we have prioritized harm prevention in this context."
|
|
|
|
**To community organizer** (direct):
|
|
> "Right, here's where we landed: Save lives first, but only when it's genuinely urgent. Your point about trust was spot on."
|
|
|
|
**To Māori representative** (culturally appropriate):
|
|
> "Kia ora. Ngā mihi for bringing the voice of your whānau to this kōrero. Your whakaaro about collective responsibility deeply influenced this decision."
|
|
|
|
**Same decision, different communication styles = prevents linguistic hierarchy**
|
|
|
|
**Tiered by urgency:**
|
|
|
|
| Urgency | Process |
|
|
|---------|---------|
|
|
| **CRITICAL** (minutes) | Automated triage + rapid human review + post-incident full deliberation |
|
|
| **URGENT** (days) | Expedited stakeholder consultation |
|
|
| **IMPORTANT** (weeks) | Full deliberative process |
|
|
| **ROUTINE** (months) | Precedent matching + lightweight review |
|
|
|
|
**Precedent database:**
|
|
Past deliberations stored as **informative** (not binding) precedents:
|
|
- Informs future cases but doesn't dictate
|
|
- Prevents redundant deliberations
|
|
- Documents applicability scope ("this applies to X, NOT to Y")
|
|
|
|
**Core principle:**
|
|
Tractatus doesn't solve value conflicts with algorithms. It facilitates legitimate human deliberation while making trade-offs transparent and reviewable.
|
|
|
|
See [Pluralistic Values Deliberation Plan](/downloads/pluralistic-values-deliberation-plan-v2-DRAFT.pdf) for technical implementation`,
|
|
audience: ['researcher', 'implementer', 'leader'],
|
|
keywords: ['deliberation', 'moral disagreement', 'stakeholders', 'process', 'values', 'conflict resolution', 'orchestrator']
|
|
},
|
|
{
|
|
id: 16,
|
|
question: "Why six services instead of five? What does PluralisticDeliberationOrchestrator add?",
|
|
answer: `PluralisticDeliberationOrchestrator became the sixth mandatory service in October 2025 after recognizing a critical gap:
|
|
|
|
**The Five Original Services (Still Essential):**
|
|
1. **InstructionPersistenceClassifier**: Remember what user instructed
|
|
2. **CrossReferenceValidator**: Prevent pattern bias from overriding instructions
|
|
3. **BoundaryEnforcer**: Block values decisions (escalate to human)
|
|
4. **ContextPressureMonitor**: Detect degradation before failures
|
|
5. **MetacognitiveVerifier**: Self-check complex operations
|
|
|
|
**The Gap These Five Couldn't Address:**
|
|
|
|
**BoundaryEnforcer blocks values decisions → Good!**
|
|
But then what? How should humans deliberate?
|
|
|
|
**Early approach (insufficient):**
|
|
\`\`\`
|
|
BoundaryEnforcer: "This is a values decision. Human approval required."
|
|
→ Human decides
|
|
→ Implementation proceeds
|
|
\`\`\`
|
|
|
|
**Problem:**
|
|
- No structure for WHO should be consulted
|
|
- No guidance for HOW to deliberate
|
|
- Risk of privileging one moral framework over others
|
|
- No documentation of dissent or moral remainder
|
|
- Precedents might become rigid rules (exactly what pluralism rejects)
|
|
|
|
**PluralisticDeliberationOrchestrator addresses all of these:**
|
|
|
|
**What it adds:**
|
|
|
|
**1. Structured stakeholder engagement**
|
|
- Who is affected by this decision?
|
|
- Which moral frameworks are in tension?
|
|
- Human approval required for stakeholder list (prevents AI from excluding marginalized voices)
|
|
|
|
**2. Non-hierarchical deliberation**
|
|
- No automatic value ranking (privacy > safety or safety > privacy)
|
|
- Adaptive communication prevents linguistic hierarchy
|
|
- Cultural protocols respected (Western, Indigenous, etc.)
|
|
- Anti-patronizing filter prevents elite capture
|
|
|
|
**3. Legitimate disagreement as valid outcome**
|
|
- Not all value conflicts have consensus solutions
|
|
- Document dissenting views with full legitimacy
|
|
- Decisions are provisional (reviewable when context changes)
|
|
|
|
**4. Moral remainder documentation**
|
|
- What was lost in this decision?
|
|
- Acknowledges deprioritized values still legitimate
|
|
- Prevents values erosion over time
|
|
|
|
**5. Precedent database (informative, not binding)**
|
|
- Past deliberations inform future cases
|
|
- Prevents precedent creep into rigid hierarchy
|
|
- Applicability scope documented ("this applies to X, NOT to Y")
|
|
|
|
**Integration with existing five services:**
|
|
|
|
\`\`\`
|
|
User action → MetacognitiveVerifier (is this well-reasoned?)
|
|
↓
|
|
CrossReferenceValidator (conflicts with instructions?)
|
|
↓
|
|
BoundaryEnforcer (values decision?)
|
|
↓
|
|
[IF VALUES DECISION]
|
|
↓
|
|
PluralisticDeliberationOrchestrator
|
|
- Detects value conflicts
|
|
- Identifies stakeholders
|
|
- Facilitates deliberation
|
|
- Documents outcome + dissent + moral remainder
|
|
- Creates precedent (informative)
|
|
↓
|
|
Human approves
|
|
↓
|
|
InstructionPersistenceClassifier (store decision)
|
|
↓
|
|
Implementation proceeds
|
|
|
|
[THROUGHOUT: ContextPressureMonitor tracks degradation]
|
|
\`\`\`
|
|
|
|
**Real example - Why this matters:**
|
|
|
|
**Scenario**: AI hiring tool deployment decision
|
|
|
|
**Without PluralisticDeliberationOrchestrator:**
|
|
- BoundaryEnforcer blocks: "This affects hiring fairness"
|
|
- Human decides: "Seems fine, approve"
|
|
- No consultation with affected groups
|
|
- No documentation of trade-offs
|
|
- No precedent for similar cases
|
|
|
|
**With PluralisticDeliberationOrchestrator:**
|
|
- Detects frameworks in tension: Efficiency vs. Equity vs. Privacy
|
|
- Identifies stakeholders:
|
|
- Job applicants (especially from underrepresented groups)
|
|
- Hiring managers
|
|
- Diversity advocates
|
|
- Legal/compliance
|
|
- Current employees (workplace culture affected)
|
|
- Structured deliberation:
|
|
- Round 1: Each perspective states concerns
|
|
- Round 2: Explore accommodations
|
|
- Round 3: Clarify trade-offs
|
|
- Documents outcome:
|
|
- Decision: Deploy with mandatory human review for borderline cases
|
|
- Values prioritized: Efficiency + Equity
|
|
- Values deprioritized: Full automation
|
|
- Moral remainder: Applicants experience slower process
|
|
- Dissent: Full automation advocates object, want 6-month review
|
|
- Review date: 2026-04-15
|
|
|
|
**Status change:**
|
|
PluralisticDeliberationOrchestrator changed from "Phase 2 enhancement" to **mandatory sixth service** in October 2025 because deploying AI systems in diverse communities without structured value pluralism was deemed architecturally insufficient.
|
|
|
|
**All six services now mandatory** for production Tractatus deployments.
|
|
|
|
See [Maintenance Guide](/downloads/claude-code-framework-enforcement.pdf) Section 2.6 for full documentation`,
|
|
audience: ['researcher', 'implementer', 'leader'],
|
|
keywords: ['six services', 'pluralistic deliberation', 'orchestrator', 'sixth service', 'why', 'new']
|
|
},
|
|
{
|
|
id: 17,
|
|
question: "Isn't value pluralism just moral relativism? How is this different?",
|
|
answer: `No—value pluralism and moral relativism are fundamentally different:
|
|
|
|
**Moral Relativism:**
|
|
- **Claim**: "Right for you" vs. "right for me" - no objective evaluation possible
|
|
- **Implication**: All moral positions equally valid, no deliberation needed
|
|
- **Example position**: "Privacy is right for you, safety is right for me, both equally valid, discussion ends"
|
|
- **Problem**: Prevents meaningful deliberation, enables "anything goes"
|
|
|
|
**Value Pluralism (Tractatus position):**
|
|
- **Claim**: Multiple frameworks are legitimate, but they make truth claims that can be evaluated
|
|
- **Implication**: Deliberation is essential to navigate conflicts
|
|
- **Example position**: "Privacy and safety are both genuine values. In THIS context (imminent threat + exhausted alternatives), we prioritize safety—but privacy concerns remain legitimate and we document what's lost."
|
|
- **Key difference**: Engages in deliberation to make choices while acknowledging moral remainder
|
|
|
|
**Comparison:**
|
|
|
|
**Question**: "Should we disclose user data to prevent harm?"
|
|
|
|
**Relativist response:**
|
|
> "Well, privacy advocates think disclosure is wrong. Safety advocates think it's right. Both are valid perspectives for them. Who's to say?"
|
|
|
|
**Result**: No decision, or decision made without structure/justification
|
|
|
|
---
|
|
|
|
**Pluralist response (Tractatus):**
|
|
> "Privacy and safety are both legitimate values in genuine tension.
|
|
>
|
|
> **Deliberation process:**
|
|
> 1. Convene stakeholders from both frameworks
|
|
> 2. Structured rounds: state positions, explore accommodation, clarify trade-offs
|
|
> 3. Context-specific decision: Imminent threat + exhausted alternatives → prioritize safety
|
|
> 4. Document moral remainder: Privacy violation, breach of trust, precedent risk
|
|
> 5. Document dissent: Privacy advocates object under protest
|
|
> 6. Set review date: 6 months
|
|
> 7. Scope: Applies to imminent threats, NOT routine surveillance"
|
|
|
|
**Result**: Justified decision with transparent reasoning, acknowledged trade-offs, reviewable
|
|
|
|
---
|
|
|
|
**Key distinctions:**
|
|
|
|
**1. Truth claims:**
|
|
- **Relativism**: No objective moral truth
|
|
- **Pluralism**: Frameworks make truth claims, can be evaluated (but may remain in tension)
|
|
|
|
**2. Deliberation:**
|
|
- **Relativism**: "It's all subjective anyway" → no need for deliberation
|
|
- **Pluralism**: Deliberation essential to navigate genuine conflicts
|
|
|
|
**3. Evaluation:**
|
|
- **Relativism**: Can't say one position is better than another
|
|
- **Pluralism**: Can evaluate based on context, coherence, consequences—but may still have legitimate disagreement
|
|
|
|
**4. Boundaries:**
|
|
- **Relativism**: All claimed values equally valid ("honor killings are valid in that culture")
|
|
- **Pluralism**: Not all claimed frameworks are legitimate—must respect human dignity, agency, autonomy
|
|
|
|
**Example of pluralism rejecting a claimed "framework":**
|
|
|
|
**Claim**: "Our culture values honor, so honor killings are legitimate moral framework"
|
|
|
|
**Pluralist response**:
|
|
> "No. Frameworks that violate human rights, dignity, and autonomy are not legitimate. Value pluralism recognizes DIVERSE legitimate frameworks (Western individualism, communitarian ethics, Indigenous relational values, care ethics)—but not frameworks that harm, coerce, or dominate.
|
|
>
|
|
> Test: Does framework respect agency of those affected? Is it imposed or chosen? Does it allow exit/revision?
|
|
>
|
|
> Honor killings fail all three. Not legitimate."
|
|
|
|
**Pluralism has boundaries—but NOT universal hierarchy (privacy > safety)**
|
|
|
|
---
|
|
|
|
**Why Tractatus is pluralist, not relativist:**
|
|
|
|
**What Tractatus DOES:**
|
|
✅ Recognizes multiple legitimate moral frameworks (deontological, consequentialist, virtue ethics, care ethics, communitarian, Indigenous)
|
|
✅ Refuses to impose universal value hierarchy
|
|
✅ Facilitates structured deliberation across frameworks
|
|
✅ Documents moral remainder (what's lost)
|
|
✅ Acknowledges legitimate disagreement as valid outcome
|
|
|
|
**What Tractatus DOES NOT:**
|
|
❌ Accept "anything goes" (frameworks must respect human dignity)
|
|
❌ Avoid decision-making ("too subjective to choose")
|
|
❌ Dismiss deliberation as pointless
|
|
❌ Pretend all positions are equally valid regardless of context
|
|
|
|
---
|
|
|
|
**Real-world analogy:**
|
|
|
|
**Relativism**: Different countries drive on different sides of the road. Neither is "correct." This is preference, not moral truth.
|
|
|
|
**Pluralism**: Different cultures have different funeral practices (burial vs. cremation vs. sky burial). Multiple legitimate traditions exist. When traditions conflict (e.g., multicultural family), deliberate with respect for all perspectives, make context-sensitive decision, acknowledge what's lost.
|
|
|
|
**Not relativism**: Frameworks that coerce participants (forced burial practices) are not legitimate, even if culturally traditional.
|
|
|
|
---
|
|
|
|
**Academic grounding:**
|
|
|
|
Tractatus's pluralism draws from:
|
|
- **Isaiah Berlin**: Value pluralism (values genuinely conflict, no supervalue)
|
|
- **Ruth Chang**: Incommensurability ≠ incomparability
|
|
- **Iris Marion Young**: Inclusive deliberation across difference
|
|
- **Gutmann & Thompson**: Deliberative democracy with legitimate disagreement
|
|
|
|
This is substantive philosophical position, not "anything goes" relativism.
|
|
|
|
See [Pluralistic Values Research Foundations](/downloads/pluralistic-values-research-foundations.pdf) for full academic context`,
|
|
audience: ['researcher', 'leader'],
|
|
keywords: ['relativism', 'pluralism', 'difference', 'philosophy', 'moral', 'ethics', 'comparison']
|
|
},
|
|
{
|
|
id: 18,
|
|
question: "How does Tractatus adapt communication for different cultural backgrounds?",
|
|
answer: `Tractatus includes **AdaptiveCommunicationOrchestrator** to prevent linguistic hierarchy in deliberation:
|
|
|
|
**The Problem:**
|
|
|
|
If AI governance only communicates in formal academic English, it:
|
|
- Excludes non-academics, working-class communities, non-English speakers
|
|
- Imposes Western liberal communication norms
|
|
- Contradicts pluralistic values (respecting diverse perspectives)
|
|
|
|
**Linguistic hierarchy is values hierarchy in disguise.**
|
|
|
|
**The Solution: Adaptive Communication**
|
|
|
|
Same deliberation outcome, communicated differently based on stakeholder background.
|
|
|
|
---
|
|
|
|
**Communication styles detected and respected:**
|
|
|
|
**1. Australian/New Zealand norms:**
|
|
- **Characteristics**: Directness, anti-tall-poppy syndrome, brevity, casualness
|
|
- **Example adaptation**:
|
|
- ❌ Formal: "We would be most grateful if you could provide your esteemed perspective..."
|
|
- ✅ Direct: "Right, what do you reckon about this approach? Fair?"
|
|
|
|
**2. Academic/Research norms:**
|
|
- **Characteristics**: Formal register, citations, nuanced qualifications
|
|
- **Example adaptation**:
|
|
- ✅ Formal: "Thank you for your principled contribution grounded in privacy rights theory (Nissenbaum, 2009). After careful consideration of all perspectives, we have prioritized harm prevention in this context."
|
|
|
|
**3. Japanese norms:**
|
|
- **Characteristics**: Honne/tatemae (public/private positions), formal register, silence meaningful
|
|
- **Example adaptation**:
|
|
- Respect for formal communication
|
|
- Allow silence without rushing
|
|
- Distinguish stated position (tatemae) from underlying concerns (honne)
|
|
|
|
**4. Māori protocols (Te Reo Māori + tikanga):**
|
|
- **Characteristics**: Mihi (greetings), whanaungatanga (relationships), collective framing
|
|
- **Example adaptation**:
|
|
- ✅ "Kia ora [Name]. Ngā mihi for bringing the voice of your whānau to this kōrero. Your whakaaro about collective responsibility deeply influenced this decision. Kei te pai?"
|
|
|
|
**5. High-context vs. Low-context cultures (Edward Hall):**
|
|
- **High-context**: Much meaning from context, indirect communication (Japan, Indigenous cultures)
|
|
- **Low-context**: Explicit communication, directness valued (US, Australia, Germany)
|
|
- **Adaptation**: Match directness level to cultural preference
|
|
|
|
---
|
|
|
|
**Anti-Patronizing Filter (inst\_030):**
|
|
|
|
Blocks condescending language patterns BEFORE sending:
|
|
- ❌ "Simply..."
|
|
- ❌ "Obviously..."
|
|
- ❌ "As you may know..."
|
|
- ❌ "It's easy to..."
|
|
- ❌ Explaining basics to experts
|
|
- ❌ Oversimplification when detail requested
|
|
|
|
**Why this matters:**
|
|
Patronizing language reproduces power imbalances. Saying "Let me explain this simply" to community organizers implies they're less intelligent than academics—FALSE. They have different expertise (community knowledge vs. academic theory), equal respect required.
|
|
|
|
---
|
|
|
|
**Real example of adaptive communication:**
|
|
|
|
**Decision**: Prioritize safety over privacy in imminent threat context
|
|
|
|
**Communicated to each stakeholder:**
|
|
|
|
**To privacy researcher (academic):**
|
|
\`\`\`
|
|
Thank you for your principled contribution grounded in privacy rights
|
|
theory and contextual integrity (Nissenbaum, 2009). After structured
|
|
deliberation across multiple moral frameworks, we have prioritized harm
|
|
prevention in this specific context (imminent threat + exhausted
|
|
alternatives).
|
|
|
|
Your concerns regarding precedent and surveillance creep have been fully
|
|
documented and will inform future deliberations. This decision is scoped
|
|
to imminent life-threatening situations and does not establish a general
|
|
rule for routine data disclosure.
|
|
|
|
Review date: 2026-04-12
|
|
\`\`\`
|
|
|
|
**To Australian community organizer (direct):**
|
|
\`\`\`
|
|
Right, here's where we landed: Save lives first, but only when it's
|
|
genuinely urgent and we've tried everything else.
|
|
|
|
Your point about trust was spot on—that's exactly why we're not making
|
|
this a blanket rule. Next time something similar comes up, we'll take
|
|
another look.
|
|
|
|
Fair?
|
|
\`\`\`
|
|
|
|
**To Māori representative (culturally appropriate):**
|
|
\`\`\`
|
|
Kia ora [Name],
|
|
|
|
Ngā mihi for bringing the voice of your whānau to this kōrero. Your
|
|
whakaaro about collective responsibility and the importance of trust as
|
|
taonga deeply influenced this decision.
|
|
|
|
While we prioritized immediate safety in this case, your reminder that
|
|
relationships are foundational will guide how we implement this.
|
|
|
|
Kei te pai?
|
|
\`\`\`
|
|
|
|
**Same decision. Different communication styles. No condescension.**
|
|
|
|
---
|
|
|
|
**How detection works:**
|
|
|
|
\`\`\`javascript
|
|
// Detect stakeholder communication style
|
|
function detectCommunicationStyle(stakeholder) {
|
|
const indicators = {
|
|
email_domain: stakeholder.email.includes('.edu.au') ? 'australian_academic' : null,
|
|
language: stakeholder.preferred_language, // 'en-NZ', 'mi', 'ja'
|
|
self_identification: stakeholder.role, // 'researcher', 'community_organizer', 'iwi_representative'
|
|
prior_interactions: stakeholder.communication_history
|
|
};
|
|
|
|
return determineStyle(indicators);
|
|
}
|
|
|
|
// Adapt message
|
|
function adaptMessage(message, style) {
|
|
if (style === 'australian_direct') {
|
|
return removeFormality(message) + addCasualClosing();
|
|
} else if (style === 'academic_formal') {
|
|
return addCitations(message) + formalClosing();
|
|
} else if (style === 'maori_protocol') {
|
|
return addMihi() + addCollectiveFraming(message) + addMaoriClosing();
|
|
}
|
|
// ... other styles
|
|
}
|
|
\`\`\`
|
|
|
|
---
|
|
|
|
**Multilingual support (inst\_032):**
|
|
|
|
When stakeholder's preferred language detected:
|
|
1. Respond in sender's language (if Claude capable)
|
|
2. If not capable: Acknowledge respectfully + offer translation
|
|
- "Kia ora! I detected [language] but will respond in English. Translation resources: [link]"
|
|
3. For multilingual deliberations:
|
|
- Simultaneous translation
|
|
- Extra time for comprehension
|
|
- Check understanding both directions
|
|
|
|
---
|
|
|
|
**"Isn't this condescending—'dumbing down' for some audiences?"**
|
|
|
|
**No:**
|
|
1. **Different ≠ Dumber**
|
|
- Direct language isn't "simplified"—it's preferred style in Australian/NZ culture
|
|
- Communal framing isn't "primitive"—it's sophisticated Māori worldview
|
|
- Formal academic language isn't inherently "smarter"—it's one cultural style
|
|
|
|
2. **Assumes intelligence across styles:**
|
|
- Community organizers know their communities better than academics
|
|
- Māori representatives are experts in tikanga Māori
|
|
- Different knowledge, equal respect
|
|
|
|
3. **Anti-patronizing filter prevents condescension**
|
|
|
|
**The actual condescension is assuming everyone should communicate like Western academics.**
|
|
|
|
---
|
|
|
|
**Instructions enforcing this:**
|
|
|
|
- **inst\_029**: Adaptive Communication Tone (match stakeholder style)
|
|
- **inst\_030**: Anti-Patronizing Language Filter (block condescending patterns)
|
|
- **inst\_031**: Regional Communication Norms (Australian/NZ, Japanese, Māori protocols)
|
|
- **inst\_032**: Multilingual Engagement Protocol (language accommodation)
|
|
|
|
**Integration:**
|
|
AdaptiveCommunicationOrchestrator supports PluralisticDeliberationOrchestrator—ensuring communication doesn't exclude stakeholders through linguistic/cultural barriers.
|
|
|
|
See [Value Pluralism FAQ](/downloads/value-pluralism-faq.pdf) Section "Communication & Culture"`,
|
|
audience: ['researcher', 'implementer', 'leader'],
|
|
keywords: ['communication', 'cultural', 'adaptive', 'language', 'multilingual', 'hierarchy', 'styles']
|
|
},
|
|
// LEADER-SPECIFIC QUESTIONS
|
|
{
|
|
id: 1,
|
|
question: "What is Tractatus Framework in one paragraph?",
|
|
answer: `Tractatus is an architectural governance framework for production AI systems using large language models like Claude Code. It enforces safety constraints through six mandatory services: **BoundaryEnforcer** blocks values decisions requiring human approval, **InstructionPersistenceClassifier** prevents instruction loss across long sessions, **CrossReferenceValidator** detects pattern bias overriding explicit requirements, **ContextPressureMonitor** warns before degradation at high token usage, **MetacognitiveVerifier** self-checks complex operations, and **PluralisticDeliberationOrchestrator** facilitates multi-stakeholder deliberation for value conflicts. Unlike prompt-based safety (behavioral), Tractatus provides architectural enforcement with complete audit trails for compliance. Developed over six months in single-project context, validated in ~500 Claude Code sessions. Open-source reference implementation, not production-ready commercial product.
|
|
|
|
**Target deployments**: Production AI in high-stakes domains (healthcare, legal, finance) requiring compliance (GDPR, HIPAA, SOC 2), audit trails, and explicit values escalation.
|
|
|
|
See [Introduction](/downloads/introduction-to-the-tractatus-framework.pdf) for 20-page overview or [Technical Architecture](/downloads/technical-architecture-diagram.pdf) for visual summary.`,
|
|
audience: ['leader'],
|
|
keywords: ['summary', 'overview', 'what is', 'introduction', 'executive', 'brief', 'definition']
|
|
},
|
|
{
|
|
id: 2,
|
|
question: "We're deploying Copilot across our organisation for client correspondence—what governance gaps should concern us, and how does Tractatus address them?",
|
|
answer: `This deployment pattern raises structural questions about governance that existing tools may not address. Here's the architectural concern:
|
|
|
|
**The Governance Gap:**
|
|
|
|
Copilot for client correspondence operates as an assistive tool. This creates architectural characteristics that may be relevant for organisations subject to regulatory oversight:
|
|
|
|
- **No enforced boundaries**: The system can suggest commitments or promises without structural constraints
|
|
- **Limited audit trails**: Standard deployment doesn't create evidence of what governance checks occurred (or didn't)
|
|
- **No escalation mechanism**: The system cannot detect when a response might require legal review
|
|
- **Compliance questions**: GDPR Article 22 (automated decision-making oversight) and SOC 2 CC2.1 (control specification) reference architecturally enforced controls, not voluntary compliance
|
|
|
|
The governance concern isn't primarily whether the AI makes errors—it's whether you can demonstrate to regulators that effective oversight was structurally in place.
|
|
|
|
**Structural Concerns in Client Correspondence:**
|
|
|
|
**1. Commitment Language**
|
|
AI-assisted drafting may include language that creates contractual obligations (delivery dates, service commitments, refund promises). If employees approve responses without catching subtle commitment language, and clients rely on those commitments, contractual questions may arise. Post-incident investigations often focus on "what controls were in place?" rather than "who made the error?"
|
|
|
|
**2. Cross-Client Information Flow**
|
|
LLMs work by pattern completion. When Client A's matter resembles Client B's, the model may draw on similar contexts. Whether this constitutes a confidentiality breach depends on your jurisdiction and client agreements. The structural question is whether your architecture can detect and prevent this, not just rely on human review catching it.
|
|
|
|
**3. Regulatory Oversight Requirements**
|
|
GDPR Article 22 and similar frameworks require "meaningful human oversight" of automated decision-making. What constitutes "meaningful" is evolving in case law. If your oversight consists of "employee reviews AI output before sending," regulatory questions arise: How do you prove the review occurred? What criteria did they apply? Was it structurally enforced or voluntary?
|
|
|
|
**4. Organisational Risk**
|
|
AI-assisted responses that are legally correct but contextually inappropriate (tone-deaf responses to vulnerable clients, for example) may create reputational concerns. The governance question is whether your architecture can detect context that requires human judgment, or whether you rely entirely on employee discretion.
|
|
|
|
**Where Tractatus May Be Relevant:**
|
|
|
|
Tractatus explores whether governance can be architecturally external to the AI system—difficult to bypass through system design rather than voluntary compliance.
|
|
|
|
**BoundaryEnforcer** — Intended to detect patterns in responses that may require escalation (commitment language, legal implications, confidential references). In our single-project validation, this service successfully intercepted responses requiring human review before execution.
|
|
|
|
**InstructionPersistenceClassifier** — Maintains organisational policies across AI sessions in persistent storage that AI prompts cannot modify. Examples from our deployment:
|
|
- "Delivery dates require order confirmation"
|
|
- "Regulatory inquiries require legal review"
|
|
- "Client identifying information segregated per matter"
|
|
|
|
**CrossReferenceValidator** — Validates responses against your governance rules before execution. Creates structured audit logs showing:
|
|
- Which rules were checked
|
|
- What validation occurred
|
|
- Whether escalation was triggered
|
|
- Why the response was approved or blocked
|
|
|
|
This architectural approach differs from relying on AI to voluntarily invoke governance checks.
|
|
|
|
**ContextPressureMonitor** — Tracks factors that may correlate with increased error risk (token usage, conversation length, task complexity). In our validation, this successfully warned when session quality degradation suggested manual review would be prudent.
|
|
|
|
**Audit Trail Approach**
|
|
|
|
The system creates timestamped logs of governance activity. These logs are external to the AI runtime—they cannot be bypassed by clever prompting or modified retroactively. Whether this constitutes "compliance-grade" evidence depends on your regulatory context, but it provides structural documentation of what governance checks occurred.
|
|
|
|
**Potential Implementation Approach:**
|
|
|
|
**Phase 1: Observation Mode**
|
|
Run Tractatus alongside Copilot without blocking anything. The system logs what governance checks would have been triggered. This generates data about your deployment's governance gap without disrupting workflow.
|
|
|
|
**Phase 2: Soft Enforcement**
|
|
System warns employees when responses trigger governance rules. They can override (with logging). This phase helps refine rules and identify false positives.
|
|
|
|
**Phase 3: Architectural Enforcement**
|
|
System blocks responses that fail governance checks and routes them to appropriate reviewers. This creates the architectural control layer.
|
|
|
|
**Development Context:**
|
|
|
|
Tractatus is a proof-of-concept validated in a single project context (this website). It has not undergone multi-organisation deployment, independent security audit, or regulatory review. Implementation costs will vary significantly based on your technical environment, existing systems, and governance requirements.
|
|
|
|
We cannot provide general cost-benefit claims because organisations' risk profiles, incident costs, and regulatory contexts differ substantially. A confidentiality breach may cost one organisation £50k in remediation while another faces £5M in regulatory fines and reputation damage—these variables make universal ROI calculations misleading.
|
|
|
|
**Framing for Leadership:**
|
|
|
|
The structural question is: "How do we demonstrate to regulators that we had effective governance over AI-assisted client correspondence?"
|
|
|
|
Three approaches exist:
|
|
1. **Voluntary compliance**: Train employees, create policies, hope they're followed
|
|
2. **Post-hoc review**: Sample outputs after they're sent, investigate failures
|
|
3. **Architectural enforcement**: Governance checks occur before execution, creating audit trail
|
|
|
|
Tractatus explores the third approach. Whether this is necessary for your organisation depends on your regulatory obligations, risk appetite, and existing governance infrastructure.
|
|
|
|
**What This Framework Is Not:**
|
|
|
|
Tractatus does not replace legal review, compliance expertise, or human judgment. It provides structural enforcement of rules that humans define. If your rules are inadequate or your reviewers make poor decisions, Tractatus enforces those inadequacies architecturally.
|
|
|
|
**Critical Distinction:**
|
|
|
|
Microsoft's responsible AI principles describe aspirational governance ("we aim to ensure..."). Tractatus explores architectural governance ("system cannot execute unless..."). These are complementary approaches, not alternatives.
|
|
|
|
**Exploring Further:**
|
|
|
|
If your organisation is evaluating architectural governance approaches for Copilot deployments:
|
|
|
|
1. **Review our technical documentation** to understand the architectural pattern
|
|
2. **Assess your regulatory context** to determine if architectural enforcement is relevant
|
|
3. **Consider your existing governance infrastructure** and where structural gaps may exist
|
|
|
|
We're interested in organisations exploring structured governance approaches. Contact research@agenticgovernance.digital if you're evaluating these questions.
|
|
|
|
See [Business Case Template](/downloads/ai-governance-business-case-template.pdf) for framework to assess whether architectural governance is relevant to your context.`,
|
|
audience: ['leader'],
|
|
keywords: ['copilot', 'microsoft', 'client', 'correspondence', 'deployment', 'governance', 'risk', 'liability', 'compliance', 'audit', 'general counsel', 'legal']
|
|
},
|
|
{
|
|
id: 3,
|
|
question: "How do I justify Tractatus investment to my board?",
|
|
answer: `Frame Tractatus as risk mitigation investment using board-appropriate language:
|
|
|
|
**Business Case Structure:**
|
|
|
|
**1. Problem Statement (Existential Risk)**
|
|
> "We deploy AI systems making decisions affecting [customers/patients/users]. Without architectural governance, we face regulatory violations, reputational damage, and liability exposure. Current approach (prompts only) provides no audit trail, no compliance proof, no enforcement mechanisms."
|
|
|
|
**Quantify risk:**
|
|
- GDPR violations: €20M or 4% revenue (whichever higher)
|
|
- SOC 2 audit failure: Loss of enterprise customers (£X million revenue)
|
|
- Reputational damage: Brand erosion, customer churn
|
|
- Legal liability: Negligence claims from AI failures
|
|
|
|
**2. Solution (Architectural Insurance)**
|
|
> "Tractatus provides architectural safety layer with compliance-grade audit trails. Six services enforce boundaries before execution—not after failure."
|
|
|
|
**Key differentiators:**
|
|
- Enforcement (not behavioral)
|
|
- Auditable (compliance-provable)
|
|
- Preventative (blocks before execution)
|
|
|
|
**3. Investment Required**
|
|
- **Year 1**: £14,400-33,000 (implementation + ongoing)
|
|
- **Year 2+**: £12,400-26,600/year
|
|
- **Staff time**: 1-2 days engineering, 4-8 hours domain experts
|
|
|
|
**4. Expected Return**
|
|
- **Risk mitigation**: Prevents regulatory violations (£400k+ fines)
|
|
- **Compliance confidence**: Audit-ready trails for GDPR, SOC 2, HIPAA
|
|
- **Operational efficiency**: Automated enforcement reduces manual oversight 60-80%
|
|
- **Competitive advantage**: "Governed AI" differentiation in RFPs
|
|
|
|
**5. Implementation Plan**
|
|
- **Phase 1 (Month 1)**: Pilot with BoundaryEnforcer only (minimal investment)
|
|
- **Phase 2 (Month 2-3)**: Full deployment with audit trails
|
|
- **Phase 3 (Month 4+)**: Expand to additional AI systems
|
|
|
|
**Board-Ready Talking Points:**
|
|
|
|
**For Risk-Averse Board:**
|
|
> "This is insurance against catastrophic AI failures. Tractatus cost (£25k/year) is 6% of potential GDPR fine (£400k). We cannot prove compliance without it."
|
|
|
|
**For Growth-Focused Board:**
|
|
> "Enterprise customers require SOC 2 compliance. Tractatus provides audit-ready governance infrastructure enabling us to compete for £X million enterprise deals."
|
|
|
|
**For Cost-Conscious Board:**
|
|
> "Current approach: Manual AI oversight costs £X per session. Tractatus automates 80% of governance checks, reducing oversight costs by £Y annually while improving reliability."
|
|
|
|
**For Innovation-Focused Board:**
|
|
> "Governed AI is competitive differentiation. Tractatus enables responsible AI innovation—deploy faster with confidence we won't cause regulatory incidents."
|
|
|
|
**Anticipate Objections:**
|
|
|
|
**Objection**: "Can't we just use better prompts?"
|
|
**Response**: "Prompts guide behaviour, Tractatus enforces architecture. Under context pressure (50k+ tokens), prompts degrade. Tractatus maintains structural enforcement. We need both."
|
|
|
|
**Objection**: "This seems expensive for early-stage company."
|
|
**Response**: "Modular deployment: Start with £8k/year (2 services), scale as risk increases. One GDPR violation costs 50x this investment."
|
|
|
|
**Objection**: "How do we know this works?"
|
|
**Response**: "Validated in 6-month deployment, ~500 sessions. Prevented 12 governance failures, 100% values decision protection. Reference implementation available for technical review."
|
|
|
|
**Objection**: "What if the framework discontinues?"
|
|
**Response**: "Open-source architecture, governance rules stored in our MongoDB, full implementation visibility. No vendor lock-in—we control infrastructure."
|
|
|
|
**Financial Summary Slide:**
|
|
|
|
| Investment | Year 1 | Year 2+ |
|
|
|------------|--------|---------|
|
|
| Tractatus | £25,000 | £20,000 |
|
|
| **vs.** | | |
|
|
| Single GDPR violation | £400,000+ | — |
|
|
| SOC 2 audit failure | Lost revenue | — |
|
|
| Manual governance overhead | £50,000/year | £50,000/year |
|
|
|
|
**ROI**: 300-1,600% if prevents single regulatory incident
|
|
|
|
**Decision Point:**
|
|
> "We're deploying production AI affecting [customers/patients/users]. The question isn't 'Can we afford Tractatus governance?' but 'Can we afford NOT to have architectural safety enforcement?'"
|
|
|
|
**Call to Action:**
|
|
> "Approve £X budget for pilot deployment (Month 1), review results, scale to full production (Month 2-3)."
|
|
|
|
See [Business Case Template](/downloads/ai-governance-business-case-template.pdf) for customisable financial model and [Executive Brief](/downloads/structural-governance-for-agentic-ai-tractatus-inflection-point.pdf) for strategic context.`,
|
|
audience: ['leader'],
|
|
keywords: ['board', 'justify', 'business case', 'roi', 'investment', 'approval', 'executives', 'stakeholders']
|
|
},
|
|
{
|
|
id: 4,
|
|
question: "What happens if Tractatus fails? Who is liable?",
|
|
answer: `Tractatus does not eliminate liability—it provides evidence of reasonable governance measures:
|
|
|
|
**Liability Framework:**
|
|
|
|
**1. What Tractatus Provides:**
|
|
✅ **Architectural safeguards**: Six-service enforcement layer demonstrating due diligence
|
|
✅ **Audit trails**: Complete records of governance enforcement for legal defence
|
|
✅ **Human escalation**: Values decisions escalated to human approval (reduces automation liability)
|
|
✅ **Documentation**: Governance rules, enforcement logs, decision rationales
|
|
✅ **Good faith effort**: Demonstrates organisation took reasonable steps to prevent AI harms
|
|
|
|
**2. What Tractatus Does NOT Provide:**
|
|
❌ **Legal shield**: Framework doesn't eliminate liability for AI harms
|
|
❌ **Guarantee**: No software can guarantee zero failures
|
|
❌ **Insurance/indemnification**: No liability transfer to framework developers
|
|
❌ **Compliance certification**: Architecture may support compliance—not certified compliance
|
|
|
|
**3. If Tractatus Fails to Prevent Harm:**
|
|
|
|
**Legal Position:**
|
|
Organisations deploying AI systems remain liable for harms. Tractatus is tool for risk mitigation, not liability elimination.
|
|
|
|
**However, audit trail demonstrates:**
|
|
- Organisation implemented architectural safeguards (industry best practice)
|
|
- Values decisions escalated to human review (not fully automated)
|
|
- Governance rules documented and actively enforced
|
|
- Regular monitoring via pressure checks and audit logs
|
|
|
|
**This reduces negligence risk:**
|
|
- **With Tractatus**: "We implemented architectural governance, audit trails show enforcement, human approval for values decisions. This was unforeseeable edge case."
|
|
- **Without Tractatus**: "We relied on prompts. No audit trail. No enforcement guarantees. No evidence of governance."
|
|
|
|
**4. Liability Scenarios:**
|
|
|
|
**Scenario A: Tractatus blocked action, human overrode, harm occurred**
|
|
- **Liability**: Primarily human decision-maker (informed override)
|
|
- **Tractatus role**: Audit log shows framework blocked, human approved
|
|
- **Defence strength**: Strong (demonstrated governance + informed consent)
|
|
|
|
**Scenario B: Tractatus failed to detect values decision, harm occurred**
|
|
- **Liability**: Organisation deploying AI + potentially Tractatus developers (if negligence proven)
|
|
- **Tractatus role**: Audit log shows framework didn't flag
|
|
- **Defence strength**: Moderate (demonstrated governance effort, but failure mode)
|
|
|
|
**Scenario C: No Tractatus, AI caused harm**
|
|
- **Liability**: Organisation deploying AI
|
|
- **Defence strength**: Weak (no governance evidence, no audit trail, no due diligence)
|
|
|
|
**5. Insurance and Indemnification:**
|
|
|
|
**Current state:**
|
|
- **No commercial AI governance insurance** for frameworks like Tractatus
|
|
- **Professional indemnity insurance** may cover AI deployment negligence
|
|
- **Cyber insurance** may cover data breaches from AI failures
|
|
|
|
**Tractatus impact on insurance:**
|
|
- Demonstrates due diligence (may reduce premiums)
|
|
- Audit trails support claims defence
|
|
- Does NOT provide indemnification
|
|
|
|
**We recommend:**
|
|
- Consult insurance broker about AI governance coverage
|
|
- Professional indemnity insurance covering AI deployments
|
|
- Verify audit trail quality meets insurance requirements
|
|
|
|
**6. Regulatory Liability (GDPR, HIPAA, etc.):**
|
|
|
|
**Tractatus benefits:**
|
|
- **GDPR Article 22**: Audit shows human approval for automated decisions
|
|
- **GDPR Article 35**: Framework demonstrates privacy-by-design
|
|
- **HIPAA**: Audit trails show access controls and governance enforcement
|
|
- **SOC 2**: Logs demonstrate security controls
|
|
|
|
**Development context:**
|
|
Framework has not undergone formal compliance audit. Organisations must validate audit trail quality meets their specific regulatory requirements with legal counsel.
|
|
|
|
**7. Contractual Liability:**
|
|
|
|
**B2B contracts:**
|
|
If deploying AI for enterprise customers, contracts likely require governance measures. Tractatus provides:
|
|
- Evidence of technical safeguards
|
|
- Audit trails for customer review
|
|
- Governance rule transparency
|
|
|
|
**Example contract language:**
|
|
> "Vendor implements architectural AI governance framework with audit trails, human approval for values decisions, and pattern bias detection."
|
|
|
|
Tractatus satisfies technical requirements—legal review required for specific contracts.
|
|
|
|
**8. Developer Liability (Tractatus Project):**
|
|
|
|
**Legal disclaimer:**
|
|
Tractatus provided "AS IS" without warranty (standard open-source licence). Developers not liable for deployment failures.
|
|
|
|
**However:**
|
|
If negligence proven (known critical bug ignored, false claims of capability), developers could face liability. Tractatus mitigates this via:
|
|
- Honest development context statements (early-stage research)
|
|
- No false production-ready claims
|
|
- Open-source visibility (no hidden behaviour)
|
|
|
|
**9. Risk Mitigation Recommendations:**
|
|
|
|
**Reduce organisational liability:**
|
|
✅ Implement Tractatus (demonstrates due diligence)
|
|
✅ Document governance rules in version control (provable intent)
|
|
✅ Regular audit log reviews (oversight evidence)
|
|
✅ Human approval for all values decisions (reduces automation liability)
|
|
✅ Legal counsel review of audit trail quality
|
|
✅ Professional indemnity insurance covering AI deployments
|
|
|
|
**Core principle:**
|
|
Tractatus shifts liability defence from "We tried our best with prompts" to "We implemented industry-standard architectural governance with complete audit trails demonstrating enforcement and human oversight."
|
|
|
|
**This improves legal position but doesn't eliminate liability.**
|
|
|
|
**Questions for your legal counsel:**
|
|
1. Does Tractatus audit trail quality meet our regulatory requirements?
|
|
2. What additional measures needed for full liability protection?
|
|
3. Does our professional indemnity insurance cover AI governance failures?
|
|
4. Should we disclose Tractatus governance to customers/users?
|
|
|
|
See [Implementation Guide](/downloads/implementation-guide.pdf) Section 7: "Legal and Compliance Considerations" for detailed analysis.`,
|
|
audience: ['leader'],
|
|
keywords: ['liability', 'legal', 'failure', 'risk', 'insurance', 'responsibility', 'indemnification', 'negligence']
|
|
},
|
|
{
|
|
id: 5,
|
|
question: "What governance metrics can I report to board and stakeholders?",
|
|
answer: `Tractatus provides quantifiable governance metrics for board reporting and stakeholder transparency:
|
|
|
|
**Key Performance Indicators (KPIs):**
|
|
|
|
**1. Enforcement Effectiveness**
|
|
- **Values decisions blocked**: Number of times BoundaryEnforcer blocked values decisions requiring human approval
|
|
- **Target**: 100% escalation rate (no values decisions automated)
|
|
- **Board metric**: "X values decisions escalated to human review (100% compliance)"
|
|
|
|
- **Pattern bias incidents prevented**: CrossReferenceValidator blocks overriding explicit instructions
|
|
- **Target**: Zero pattern bias failures
|
|
- **Board metric**: "Y instruction conflicts detected and prevented"
|
|
|
|
- **Human override rate**: Percentage of blocked decisions approved by humans
|
|
- **Benchmark**: 20-40% (shows framework not over-blocking)
|
|
- **Board metric**: "Z% of flagged decisions approved after review (appropriate sensitivity)"
|
|
|
|
**2. Operational Reliability**
|
|
- **Session handoffs completed**: Successful governance continuity across 200k token limit
|
|
- **Target**: 100% success rate
|
|
- **Board metric**: "X session handoffs completed without instruction loss"
|
|
|
|
- **Framework uptime**: Percentage of time all 6 services operational
|
|
- **Target**: 99%+
|
|
- **Board metric**: "99.X% governance framework availability"
|
|
|
|
- **Pressure warnings issued**: ContextPressureMonitor early warnings before degradation
|
|
- **Target**: Warnings issued at 50k, 100k, 150k tokens
|
|
- **Board metric**: "X degradation warnings issued, Y handoffs triggered proactively"
|
|
|
|
**3. Audit and Compliance**
|
|
- **Audit log completeness**: Percentage of AI actions logged
|
|
- **Target**: 100%
|
|
- **Board metric**: "Complete audit trail for X AI sessions (GDPR Article 30 compliance)"
|
|
|
|
- **Rule enforcement consistency**: Percentage of governance rules enforced without exception
|
|
- **Target**: 100%
|
|
- **Board metric**: "100% consistency across Y rule enforcement events"
|
|
|
|
- **Audit-ready documentation**: Days to produce compliance report
|
|
- **Target**: <1 day (automated export)
|
|
- **Board metric**: "Compliance reports generated in <1 hour (SOC 2 audit-ready)"
|
|
|
|
**4. Risk Mitigation**
|
|
- **Prevented failures**: Critical incidents blocked by framework
|
|
- **Valuation**: Prevented GDPR violation (€20M fine), SOC 2 failure (lost revenue)
|
|
- **Board metric**: "Z critical failures prevented, estimated £X risk mitigated"
|
|
|
|
- **Security boundary breaches**: Attempted values decisions without human approval
|
|
- **Target**: 0 successful breaches
|
|
- **Board metric**: "Zero unauthorised values decisions (100% boundary integrity)"
|
|
|
|
**MongoDB Query Examples:**
|
|
|
|
\`\`\`javascript
|
|
// Q1 2025 Board Report (example queries)
|
|
|
|
// 1. Values decisions escalated
|
|
const valuesEscalations = await db.audit_logs.countDocuments({
|
|
service: "BoundaryEnforcer",
|
|
action: "BLOCK",
|
|
quarter: "2025-Q1"
|
|
});
|
|
// Report: "87 values decisions escalated to human review"
|
|
|
|
// 2. Pattern bias incidents prevented
|
|
const patternBiasBlocked = await db.audit_logs.countDocuments({
|
|
service: "CrossReferenceValidator",
|
|
action: "BLOCK",
|
|
conflict_type: "pattern_bias",
|
|
quarter: "2025-Q1"
|
|
});
|
|
// Report: "12 pattern bias incidents prevented"
|
|
|
|
// 3. Human override rate
|
|
const overrides = await db.audit_logs.countDocuments({
|
|
service: "BoundaryEnforcer",
|
|
action: "BLOCK",
|
|
human_override: true,
|
|
quarter: "2025-Q1"
|
|
});
|
|
const overrideRate = (overrides / valuesEscalations) * 100;
|
|
// Report: "34% of flagged decisions approved after review"
|
|
|
|
// 4. Audit trail completeness
|
|
const totalSessions = 500; // from session logs
|
|
const auditedSessions = await db.audit_logs.distinct("session_id", { quarter: "2025-Q1" }).length;
|
|
const completeness = (auditedSessions / totalSessions) * 100;
|
|
// Report: "100% audit trail coverage across 500 AI sessions"
|
|
\`\`\`
|
|
|
|
**Board Dashboard (Quarterly):**
|
|
|
|
| Metric | Q1 2025 | Q4 2024 | Target | Status |
|
|
|--------|---------|---------|--------|--------|
|
|
| Values decisions escalated | 87 | 76 | 100% | ✅ |
|
|
| Pattern bias prevented | 12 | 8 | >0 | ✅ |
|
|
| Human override rate | 34% | 41% | 20-40% | ✅ |
|
|
| Framework uptime | 99.7% | 99.2% | >99% | ✅ |
|
|
| Audit trail completeness | 100% | 100% | 100% | ✅ |
|
|
| Prevented critical failures | 3 | 2 | >0 | ✅ |
|
|
| Estimated risk mitigated | £450k | £280k | N/A | 📊 |
|
|
|
|
**Stakeholder Transparency Reporting:**
|
|
|
|
**For customers/users:**
|
|
> "Our AI systems operate under architectural governance with continuous monitoring. Last quarter: 87 values decisions escalated to human review (100% compliance), 12 pattern bias incidents prevented, complete audit trail maintained."
|
|
|
|
**For regulators (GDPR, etc.):**
|
|
> "Audit logs demonstrate compliance with GDPR Article 22 (human approval for automated decisions). Export available: [link to compliance report]."
|
|
|
|
**For investors:**
|
|
> "AI governance framework operational with 99.7% uptime. Prevented 3 critical failures, estimated £450k risk mitigation. Zero regulatory violations year-to-date."
|
|
|
|
**Narrative Reporting (Annual Report, Investor Update):**
|
|
|
|
**Example language:**
|
|
> "Tractatus Framework, our architectural AI governance system, completed its first full year of production operation. Across 2,000 AI sessions, the framework escalated 340 values decisions to human review (achieving 100% compliance with our governance standards), prevented 45 pattern bias incidents, and maintained complete audit trails supporting GDPR Article 30 compliance.
|
|
>
|
|
> No AI-related regulatory violations occurred during this period. Framework uptime exceeded 99.5%, with all six governance services operational. Estimated risk mitigation: £1.2 million in prevented regulatory fines and reputational damage.
|
|
>
|
|
> Our commitment to responsible AI deployment differentiates us in enterprise sales, with 78% of RFP responses citing governance architecture as competitive advantage."
|
|
|
|
**Red Flags to Monitor:**
|
|
|
|
🚨 **Human override rate >60%**: Framework over-blocking (tune sensitivity)
|
|
🚨 **Human override rate <10%**: Framework under-blocking (strengthen rules)
|
|
🚨 **Zero pattern bias incidents**: May indicate CrossReferenceValidator not active
|
|
🚨 **Audit trail gaps**: Compliance risk, investigate service failures
|
|
🚨 **Framework uptime <95%**: Infrastructure investment needed
|
|
|
|
**Export Scripts:**
|
|
|
|
\`\`\`bash
|
|
# Generate quarterly board report
|
|
node scripts/generate-board-report.js --quarter 2025-Q1 --format pdf
|
|
# Output: governance-metrics-2025-Q1.pdf
|
|
|
|
# Export for compliance audit
|
|
node scripts/export-audit-logs.js --start-date 2025-01-01 --end-date 2025-03-31 --format csv
|
|
# Output: audit-logs-Q1-2025.csv
|
|
|
|
# Stakeholder transparency report
|
|
node scripts/generate-transparency-report.js --quarter 2025-Q1 --audience public
|
|
# Output: transparency-report-Q1-2025.md
|
|
\`\`\`
|
|
|
|
**Core Principle:**
|
|
Tractatus metrics demonstrate governance effectiveness, not just technical performance. Frame reporting around risk mitigation, compliance confidence, and stakeholder trust—not just "blocks" and "logs."
|
|
|
|
See [Audit Guide](/downloads/implementation-guide.pdf) Section 8: "Governance Metrics and Reporting" for complete KPI catalogue.`,
|
|
audience: ['leader'],
|
|
keywords: ['metrics', 'kpi', 'reporting', 'board', 'dashboard', 'stakeholders', 'measurement', 'performance']
|
|
},
|
|
{
|
|
id: 6,
|
|
question: "Which regulations does Tractatus help with?",
|
|
answer: `Tractatus provides architectural infrastructure that may support compliance efforts for multiple regulations:
|
|
|
|
**⚠️ Important Disclaimer:**
|
|
Tractatus is NOT compliance-certified software. Framework provides audit trails and governance architecture that may support compliance—legal counsel must validate sufficiency for your specific regulatory requirements.
|
|
|
|
---
|
|
|
|
**1. GDPR (General Data Protection Regulation)**
|
|
|
|
**Relevant Articles:**
|
|
|
|
**Article 22: Automated Decision-Making**
|
|
> "Data subject has right not to be subject to decision based solely on automated processing."
|
|
|
|
**Tractatus support:**
|
|
- BoundaryEnforcer blocks values decisions involving personal data
|
|
- Human approval required before execution
|
|
- Audit logs document all escalations and approvals
|
|
- **Compliance claim**: "Our AI systems escalate privacy decisions to human review (Article 22 compliance)"
|
|
|
|
**Article 30: Records of Processing Activities**
|
|
> "Controller shall maintain record of processing activities under its responsibility."
|
|
|
|
**Tractatus support:**
|
|
- Audit logs provide complete record of AI actions
|
|
- MongoDB \`audit_logs\` collection queryable by date, action, data category
|
|
- Automated export for data protection authority requests
|
|
- **Compliance claim**: "Complete audit trail maintained for all AI processing activities"
|
|
|
|
**Article 35: Data Protection Impact Assessment (DPIA)**
|
|
> "Impact assessment required where processing likely to result in high risk."
|
|
|
|
**Tractatus support:**
|
|
- BoundaryEnforcer enforces privacy-by-design principle
|
|
- Audit logs demonstrate technical safeguards
|
|
- Governance rules document privacy boundaries
|
|
- **Compliance claim**: "Architectural safeguards demonstrate privacy-by-design approach"
|
|
|
|
**GDPR Compliance Checklist:**
|
|
✅ Human approval for automated decisions affecting individuals
|
|
✅ Complete processing records (audit logs)
|
|
✅ Technical safeguards for privacy (boundary enforcement)
|
|
⚠️ **Still required**: Legal basis for processing, consent mechanisms, right to erasure implementation
|
|
|
|
---
|
|
|
|
**2. HIPAA (Health Insurance Portability and Accountability Act)**
|
|
|
|
**Relevant Standards:**
|
|
|
|
**§ 164.308(a)(1): Security Management Process**
|
|
> "Implement policies to prevent, detect, contain security incidents."
|
|
|
|
**Tractatus support:**
|
|
- BoundaryEnforcer prevents unauthorised PHI access
|
|
- Audit logs detect security incidents
|
|
- ContextPressureMonitor warns before degradation
|
|
- **Compliance claim**: "Architectural controls prevent unauthorised health data access"
|
|
|
|
**§ 164.312(b): Audit Controls**
|
|
> "Implement hardware, software to record activity in systems containing PHI."
|
|
|
|
**Tractatus support:**
|
|
- MongoDB audit logs record all AI actions
|
|
- 7-year retention configurable
|
|
- Tamper-evident (append-only logs)
|
|
- **Compliance claim**: "Complete audit trail for all AI interactions with PHI"
|
|
|
|
**HIPAA Compliance Checklist:**
|
|
✅ Audit controls for AI systems handling PHI
|
|
✅ Access controls via BoundaryEnforcer
|
|
✅ Integrity controls via CrossReferenceValidator
|
|
⚠️ **Still required**: Encryption at rest/transit, business associate agreements, breach notification procedures
|
|
|
|
---
|
|
|
|
**3. SOC 2 (Service Organization Control 2)**
|
|
|
|
**Relevant Trust Service Criteria:**
|
|
|
|
**CC6.1: Logical Access - Authorization**
|
|
> "System enforces access restrictions based on authorization."
|
|
|
|
**Tractatus support:**
|
|
- BoundaryEnforcer enforces governance rules before action execution
|
|
- Audit logs document authorisation decisions
|
|
- No bypass mechanism for values decisions
|
|
- **Compliance claim**: "Governance rules enforced before sensitive operations"
|
|
|
|
**CC7.2: System Monitoring**
|
|
> "System includes monitoring activities to detect anomalies."
|
|
|
|
**Tractatus support:**
|
|
- ContextPressureMonitor warns before degradation
|
|
- CrossReferenceValidator detects pattern bias
|
|
- Audit logs enable anomaly detection
|
|
- **Compliance claim**: "Continuous monitoring for AI governance anomalies"
|
|
|
|
**CC7.3: Quality Assurance**
|
|
> "System includes processes to maintain quality of processing."
|
|
|
|
**Tractatus support:**
|
|
- MetacognitiveVerifier checks complex operations
|
|
- InstructionPersistenceClassifier maintains instruction integrity
|
|
- Session handoff protocol prevents quality degradation
|
|
- **Compliance claim**: "Quality controls for AI decision-making processes"
|
|
|
|
**SOC 2 Compliance Checklist:**
|
|
✅ Access controls (boundary enforcement)
|
|
✅ Monitoring (pressure + validator checks)
|
|
✅ Quality assurance (metacognitive verification)
|
|
✅ Audit trail (complete logging)
|
|
⚠️ **Still required**: Penetration testing, incident response plan, vulnerability management
|
|
|
|
---
|
|
|
|
**4. ISO 27001 (Information Security Management)**
|
|
|
|
**Relevant Controls:**
|
|
|
|
**A.12.4: Logging and Monitoring**
|
|
> "Event logs recording user activities shall be produced, kept, regularly reviewed."
|
|
|
|
**Tractatus support:**
|
|
- MongoDB audit logs record all governance events
|
|
- Queryable by date, service, action, user
|
|
- Automated export for security review
|
|
- **Compliance claim**: "Comprehensive event logging for AI governance activities"
|
|
|
|
**A.18.1: Compliance with Legal Requirements**
|
|
> "Appropriate controls identified, implemented to meet legal obligations."
|
|
|
|
**Tractatus support:**
|
|
- Governance rules encode legal requirements
|
|
- BoundaryEnforcer blocks non-compliant actions
|
|
- Audit logs demonstrate compliance efforts
|
|
- **Compliance claim**: "Legal requirements enforced via governance rules"
|
|
|
|
---
|
|
|
|
**5. AI Act (European Union - Proposed)**
|
|
|
|
**Relevant Requirements (High-Risk AI Systems):**
|
|
|
|
**Article 9: Risk Management System**
|
|
> "High-risk AI systems shall be subject to risk management system."
|
|
|
|
**Tractatus support:**
|
|
- Six-service architecture addresses identified AI risks
|
|
- Audit logs document risk mitigation measures
|
|
- Human approval for high-risk decisions
|
|
- **Compliance claim**: "Architectural risk management for AI systems"
|
|
|
|
**Article 12: Record-Keeping**
|
|
> "High-risk AI systems shall have logging capabilities."
|
|
|
|
**Tractatus support:**
|
|
- Complete audit trail in MongoDB
|
|
- Automated export for regulatory authorities
|
|
- Retention policy configurable per jurisdiction
|
|
- **Compliance claim**: "Audit logs meet AI Act record-keeping requirements"
|
|
|
|
**Development context:**
|
|
AI Act not yet in force. Tractatus architecture designed to support anticipated requirements—final compliance must be validated when regulation enacted.
|
|
|
|
---
|
|
|
|
**6. FTC (Federal Trade Commission) - AI Guidance**
|
|
|
|
**FTC Principles:**
|
|
|
|
**Transparency**: "Companies should be transparent about AI use."
|
|
**Tractatus support**: Audit logs demonstrate governance transparency
|
|
|
|
**Fairness**: "AI should not discriminate."
|
|
**Tractatus support**: PluralisticDeliberationOrchestrator ensures diverse stakeholder input
|
|
|
|
**Accountability**: "Companies accountable for AI harms."
|
|
**Tractatus support**: Audit trail demonstrates due diligence
|
|
|
|
---
|
|
|
|
**Regulatory Summary Table:**
|
|
|
|
| Regulation | Tractatus Support | Still Required | Strength |
|
|
|------------|-------------------|----------------|----------|
|
|
| **GDPR** | Audit trails, human approval, privacy-by-design | Legal basis, consent, data subject rights | Strong |
|
|
| **HIPAA** | Audit controls, access controls | Encryption, BAAs, breach notification | Moderate |
|
|
| **SOC 2** | Access controls, monitoring, audit trail | Penetration testing, incident response | Strong |
|
|
| **ISO 27001** | Logging, legal compliance controls | Full ISMS, risk assessment | Moderate |
|
|
| **AI Act (proposed)** | Risk management, record-keeping | Model documentation, transparency | Moderate |
|
|
| **FTC** | Transparency, accountability evidence | Fair lending, discrimination testing | Moderate |
|
|
|
|
---
|
|
|
|
**What Tractatus Does NOT Provide:**
|
|
|
|
❌ **Legal advice**: Consult counsel for regulatory interpretation
|
|
❌ **Certification**: No third-party audit or compliance certification
|
|
❌ **Complete compliance**: Architectural infrastructure only, not full programme
|
|
❌ **Jurisdiction-specific**: Regulations vary by country/region
|
|
|
|
---
|
|
|
|
**Recommended Approach:**
|
|
|
|
1. **Identify applicable regulations** for your organisation
|
|
2. **Consult legal counsel** to map Tractatus capabilities to requirements
|
|
3. **Validate audit trail quality** meets regulatory standards
|
|
4. **Implement additional controls** where Tractatus insufficient
|
|
5. **Document compliance posture** (what Tractatus provides + what else implemented)
|
|
|
|
**Example compliance statement:**
|
|
> "Our AI systems operate under Tractatus governance framework, providing audit trails supporting GDPR Article 30, SOC 2 CC6.1, and HIPAA § 164.312(b) compliance. Legal counsel has validated audit trail quality meets our regulatory requirements. Additional controls implemented: [encryption, BAAs, incident response plan]."
|
|
|
|
---
|
|
|
|
**Tractatus does NOT replace legal compliance programme—it provides architectural foundation that may support compliance efforts.**
|
|
|
|
See [Audit Guide](/downloads/implementation-guide.pdf) Section 9: "Regulatory Compliance Mapping" for detailed analysis.`,
|
|
audience: ['leader'],
|
|
keywords: ['regulations', 'compliance', 'gdpr', 'hipaa', 'soc2', 'legal', 'regulatory', 'standards', 'certification']
|
|
}
|
|
];
|
|
|
|
// State management
|
|
let currentFilter = 'all';
|
|
let currentSearchQuery = '';
|
|
|
|
/**
|
|
* Open the search modal
|
|
*/
|
|
function openSearchModal() {
|
|
const modal = document.getElementById('search-modal');
|
|
const searchInput = document.getElementById('faq-search');
|
|
|
|
modal.classList.add('show');
|
|
|
|
// Focus on search input
|
|
setTimeout(() => {
|
|
searchInput.focus();
|
|
}, 100);
|
|
|
|
// Render initial results
|
|
renderFAQs();
|
|
}
|
|
|
|
/**
|
|
* Close the search modal
|
|
*/
|
|
function closeSearchModal() {
|
|
const modal = document.getElementById('search-modal');
|
|
modal.classList.remove('show');
|
|
}
|
|
|
|
/**
|
|
* Open search tips modal
|
|
*/
|
|
function openSearchTipsModal() {
|
|
const modal = document.getElementById('search-tips-modal');
|
|
modal.classList.add('show');
|
|
}
|
|
|
|
/**
|
|
* Close search tips modal
|
|
*/
|
|
function closeSearchTipsModal() {
|
|
const modal = document.getElementById('search-tips-modal');
|
|
modal.classList.remove('show');
|
|
}
|
|
|
|
/**
|
|
* Setup modal event listeners
|
|
*/
|
|
function setupModalListeners() {
|
|
// Open search modal button
|
|
const openBtn = document.getElementById('open-search-modal-btn');
|
|
if (openBtn) {
|
|
openBtn.addEventListener('click', openSearchModal);
|
|
}
|
|
|
|
// Featured question buttons
|
|
const featuredBtns = document.querySelectorAll('.featured-question-btn');
|
|
featuredBtns.forEach(btn => {
|
|
btn.addEventListener('click', () => {
|
|
const searchQuery = btn.dataset.search;
|
|
openSearchModal();
|
|
// Set search value after modal opens
|
|
setTimeout(() => {
|
|
const searchInput = document.getElementById('faq-search');
|
|
if (searchInput) {
|
|
searchInput.value = searchQuery;
|
|
searchInput.dispatchEvent(new Event('input'));
|
|
}
|
|
}, 100);
|
|
});
|
|
});
|
|
|
|
// Close search modal button
|
|
const closeBtn = document.getElementById('search-modal-close-btn');
|
|
if (closeBtn) {
|
|
closeBtn.addEventListener('click', closeSearchModal);
|
|
}
|
|
|
|
// Search tips button
|
|
const tipsBtn = document.getElementById('search-tips-btn');
|
|
if (tipsBtn) {
|
|
tipsBtn.addEventListener('click', openSearchTipsModal);
|
|
}
|
|
|
|
// Close search tips button
|
|
const tipCloseBtn = document.getElementById('search-tips-close-btn');
|
|
if (tipCloseBtn) {
|
|
tipCloseBtn.addEventListener('click', closeSearchTipsModal);
|
|
}
|
|
|
|
// Clear filters button
|
|
const clearBtn = document.getElementById('clear-filters-btn');
|
|
if (clearBtn) {
|
|
clearBtn.addEventListener('click', () => {
|
|
currentFilter = 'all';
|
|
currentSearchQuery = '';
|
|
document.getElementById('faq-search').value = '';
|
|
document.getElementById('filter-audience').value = 'all';
|
|
renderFAQs();
|
|
});
|
|
}
|
|
|
|
// Close modal on backdrop click
|
|
const searchModal = document.getElementById('search-modal');
|
|
if (searchModal) {
|
|
searchModal.addEventListener('click', (e) => {
|
|
if (e.target === searchModal) {
|
|
closeSearchModal();
|
|
}
|
|
});
|
|
}
|
|
|
|
const tipsModal = document.getElementById('search-tips-modal');
|
|
if (tipsModal) {
|
|
tipsModal.addEventListener('click', (e) => {
|
|
if (e.target === tipsModal) {
|
|
closeSearchTipsModal();
|
|
}
|
|
});
|
|
}
|
|
|
|
// Keyboard shortcuts
|
|
document.addEventListener('keydown', (e) => {
|
|
// Escape key closes modals
|
|
if (e.key === 'Escape') {
|
|
closeSearchModal();
|
|
closeSearchTipsModal();
|
|
}
|
|
});
|
|
}
|
|
|
|
/**
|
|
* Render top FAQs inline on the main page (expandable)
|
|
*/
|
|
function renderInlineFAQs() {
|
|
const container = document.getElementById('inline-faq-container');
|
|
if (!container) {
|
|
console.error('[FAQ] inline-faq-container not found');
|
|
return;
|
|
}
|
|
|
|
// Get top 6 most important FAQs (mix of all audiences)
|
|
const topFAQs = FAQ_DATA.filter(faq => [19, 12, 27, 13, 1, 2].includes(faq.id));
|
|
|
|
console.log(`[FAQ] Rendering ${topFAQs.length} inline FAQs (marked available: ${typeof marked !== 'undefined'})`);
|
|
|
|
// Sort by ID to maintain order
|
|
const sorted = topFAQs.sort((a, b) => a.id - b.id);
|
|
|
|
// Render as expandable items
|
|
container.innerHTML = sorted.map(faq => createInlineFAQItemHTML(faq)).join('');
|
|
|
|
// Add click listeners for expand/collapse
|
|
document.querySelectorAll('.inline-faq-question').forEach(question => {
|
|
question.addEventListener('click', () => {
|
|
const item = question.closest('.inline-faq-item');
|
|
const wasOpen = item.classList.contains('open');
|
|
|
|
// Close all other items
|
|
document.querySelectorAll('.inline-faq-item').forEach(other => {
|
|
other.classList.remove('open');
|
|
});
|
|
|
|
// Toggle this item (only if it wasn't already open)
|
|
if (!wasOpen) {
|
|
item.classList.add('open');
|
|
}
|
|
});
|
|
});
|
|
|
|
// Apply syntax highlighting to code blocks
|
|
if (typeof hljs !== 'undefined') {
|
|
document.querySelectorAll('.inline-faq-answer-content pre code').forEach((block) => {
|
|
hljs.highlightElement(block);
|
|
});
|
|
}
|
|
|
|
// Enable markdown links
|
|
document.querySelectorAll('.inline-faq-answer-content a').forEach(link => {
|
|
link.setAttribute('target', '_blank');
|
|
link.setAttribute('rel', 'noopener noreferrer');
|
|
});
|
|
}
|
|
|
|
/**
|
|
* Create HTML for inline FAQ item (expandable on main page)
|
|
*/
|
|
function createInlineFAQItemHTML(faq) {
|
|
const audienceColors = {
|
|
'researcher': 'border-purple-200 hover:border-purple-300',
|
|
'implementer': 'border-blue-200 hover:border-blue-300',
|
|
'leader': 'border-green-200 hover:border-green-300'
|
|
};
|
|
|
|
const primaryAudience = faq.audience[0];
|
|
const colorClass = audienceColors[primaryAudience] || 'border-gray-200 hover:border-gray-300';
|
|
|
|
// Parse markdown answer
|
|
let answerHtml = faq.answer;
|
|
if (typeof marked !== 'undefined') {
|
|
try {
|
|
answerHtml = marked.parse(faq.answer);
|
|
} catch (error) {
|
|
console.error('[FAQ] Inline markdown parsing failed for FAQ', faq.id, error);
|
|
// Fallback to plain text with line breaks
|
|
answerHtml = `<p>${faq.answer.replace(/\n\n/g, '</p><p>').replace(/\n/g, '<br>')}</p>`;
|
|
}
|
|
} else {
|
|
console.warn('[FAQ] marked.js not loaded for inline FAQs - using plain text');
|
|
// Fallback to plain text with line breaks
|
|
answerHtml = `<p>${faq.answer.replace(/\n\n/g, '</p><p>').replace(/\n/g, '<br>')}</p>`;
|
|
}
|
|
|
|
return `
|
|
<div class="inline-faq-item border-2 ${colorClass} rounded-lg transition">
|
|
<div class="inline-faq-question p-4 flex items-start justify-between cursor-pointer group">
|
|
<h3 class="text-lg font-semibold text-gray-900 group-hover:text-blue-600 transition flex-1 pr-4">${escapeHtml(faq.question)}</h3>
|
|
<svg class="faq-arrow w-6 h-6 text-gray-400 group-hover:text-blue-600 transition flex-shrink-0" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
|
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M19 9l-7 7-7-7"/>
|
|
</svg>
|
|
</div>
|
|
<div class="inline-faq-answer">
|
|
<div class="inline-faq-answer-content p-6 pt-0">
|
|
${answerHtml}
|
|
</div>
|
|
</div>
|
|
</div>
|
|
`;
|
|
}
|
|
|
|
/**
|
|
* Setup category filter button listeners
|
|
*/
|
|
function setupCategoryButtons() {
|
|
const categoryBtns = document.querySelectorAll('.category-filter-btn');
|
|
|
|
categoryBtns.forEach(btn => {
|
|
btn.addEventListener('click', () => {
|
|
const audience = btn.dataset.audience;
|
|
// Set filter and clear search
|
|
currentFilter = audience;
|
|
currentSearchQuery = '';
|
|
|
|
// Open modal (this will call renderFAQs automatically)
|
|
openSearchModal();
|
|
|
|
// Update the filter dropdown to match (UI sync only)
|
|
setTimeout(() => {
|
|
const filterSelect = document.getElementById('filter-audience');
|
|
if (filterSelect) {
|
|
filterSelect.value = audience;
|
|
}
|
|
}, 50);
|
|
});
|
|
});
|
|
}
|
|
|
|
/**
|
|
* Setup "View All Questions" button listener
|
|
*/
|
|
function setupViewAllButton() {
|
|
const viewAllBtn = document.getElementById('view-all-questions-btn');
|
|
|
|
if (viewAllBtn) {
|
|
viewAllBtn.addEventListener('click', () => {
|
|
currentFilter = 'all';
|
|
currentSearchQuery = '';
|
|
openSearchModal();
|
|
});
|
|
}
|
|
}
|
|
|
|
// Initialize on page load
|
|
document.addEventListener('DOMContentLoaded', () => {
|
|
// Configure marked.js for better rendering
|
|
if (typeof marked !== 'undefined') {
|
|
marked.setOptions({
|
|
breaks: true,
|
|
gfm: true,
|
|
headerIds: false
|
|
});
|
|
}
|
|
|
|
// Render top 6 FAQs inline on page load
|
|
renderInlineFAQs();
|
|
|
|
// Setup all event listeners
|
|
setupModalListeners();
|
|
setupSearchListener();
|
|
setupFilterListeners();
|
|
setupCategoryButtons();
|
|
setupViewAllButton();
|
|
});
|
|
|
|
/**
|
|
* Render FAQ items based on current filter and search
|
|
*/
|
|
function renderFAQs() {
|
|
const container = document.getElementById('faq-container-modal');
|
|
const noResults = document.getElementById('no-results-modal');
|
|
const resultsCount = document.getElementById('search-results-count');
|
|
|
|
// Filter by audience
|
|
let filtered = FAQ_DATA;
|
|
if (currentFilter !== 'all') {
|
|
filtered = FAQ_DATA.filter(faq => faq.audience.includes(currentFilter));
|
|
}
|
|
|
|
// Filter by search query
|
|
if (currentSearchQuery) {
|
|
const query = currentSearchQuery.toLowerCase();
|
|
filtered = filtered.filter(faq => {
|
|
const questionMatch = faq.question.toLowerCase().includes(query);
|
|
const answerMatch = faq.answer.toLowerCase().includes(query);
|
|
const keywordsMatch = faq.keywords.some(kw => kw.includes(query));
|
|
return questionMatch || answerMatch || keywordsMatch;
|
|
});
|
|
}
|
|
|
|
// Sort by ID (Leader questions have lower IDs, appear first)
|
|
filtered = filtered.sort((a, b) => a.id - b.id);
|
|
|
|
// Show/hide no results message
|
|
if (filtered.length === 0) {
|
|
container.classList.add('hidden');
|
|
noResults.classList.remove('hidden');
|
|
resultsCount.textContent = 'No questions found';
|
|
return;
|
|
}
|
|
|
|
container.classList.remove('hidden');
|
|
noResults.classList.add('hidden');
|
|
|
|
// Update results count
|
|
const filterText = currentFilter === 'all' ? 'all questions' : `${currentFilter} questions`;
|
|
resultsCount.textContent = `Showing ${filtered.length} of ${FAQ_DATA.length} ${filterText}`;
|
|
|
|
console.log(`[FAQ] Rendering ${filtered.length} FAQs in modal (marked available: ${typeof marked !== 'undefined'})`);
|
|
|
|
// Render FAQ items (fast, no blocking)
|
|
container.innerHTML = filtered.map(faq => createFAQItemHTML(faq)).join('');
|
|
|
|
// Use event delegation for better performance (single listener instead of 31)
|
|
container.removeEventListener('click', handleFAQClick); // Remove old listener if exists
|
|
container.addEventListener('click', handleFAQClick);
|
|
|
|
// Defer expensive syntax highlighting to avoid blocking UI
|
|
requestAnimationFrame(() => {
|
|
if (typeof hljs !== 'undefined') {
|
|
const codeBlocks = container.querySelectorAll('.faq-answer-content pre code');
|
|
// Highlight in small batches to avoid freezing
|
|
highlightCodeBlocksInBatches(codeBlocks);
|
|
}
|
|
});
|
|
}
|
|
|
|
/**
|
|
* Event delegation handler for FAQ clicks (single listener for all FAQs)
|
|
*/
|
|
function handleFAQClick(event) {
|
|
const question = event.target.closest('.faq-question');
|
|
if (!question) return;
|
|
|
|
const item = question.closest('.faq-item');
|
|
if (item) {
|
|
item.classList.toggle('open');
|
|
|
|
// Lazy-load syntax highlighting only when FAQ is opened for first time
|
|
if (item.classList.contains('open') && !item.dataset.highlighted) {
|
|
const codeBlocks = item.querySelectorAll('.faq-answer-content pre code');
|
|
if (codeBlocks.length > 0 && typeof hljs !== 'undefined') {
|
|
codeBlocks.forEach(block => hljs.highlightElement(block));
|
|
item.dataset.highlighted = 'true';
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Highlight code blocks in batches to avoid UI freeze
|
|
*/
|
|
function highlightCodeBlocksInBatches(codeBlocks, batchSize = 5) {
|
|
const blocks = Array.from(codeBlocks);
|
|
let index = 0;
|
|
|
|
function processBatch() {
|
|
const batch = blocks.slice(index, index + batchSize);
|
|
batch.forEach(block => {
|
|
if (typeof hljs !== 'undefined') {
|
|
hljs.highlightElement(block);
|
|
}
|
|
});
|
|
|
|
index += batchSize;
|
|
|
|
if (index < blocks.length) {
|
|
// Schedule next batch
|
|
requestAnimationFrame(processBatch);
|
|
}
|
|
}
|
|
|
|
if (blocks.length > 0) {
|
|
processBatch();
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Create HTML for a single FAQ item
|
|
*/
|
|
function createFAQItemHTML(faq) {
|
|
const highlightedQuestion = highlightText(faq.question, currentSearchQuery);
|
|
|
|
// Parse markdown to HTML
|
|
let answerHTML = faq.answer;
|
|
if (typeof marked !== 'undefined') {
|
|
try {
|
|
answerHTML = marked.parse(faq.answer);
|
|
} catch (error) {
|
|
console.error('[FAQ] Markdown parsing failed for FAQ', faq.id, error);
|
|
// Fallback to plain text with line breaks
|
|
answerHTML = `<p>${faq.answer.replace(/\n\n/g, '</p><p>').replace(/\n/g, '<br>')}</p>`;
|
|
}
|
|
} else {
|
|
console.warn('[FAQ] marked.js not loaded - using plain text');
|
|
// Fallback to plain text with line breaks
|
|
answerHTML = `<p>${faq.answer.replace(/\n\n/g, '</p><p>').replace(/\n/g, '<br>')}</p>`;
|
|
}
|
|
|
|
// Highlight search query in rendered HTML (if searching)
|
|
if (currentSearchQuery) {
|
|
const regex = new RegExp(`(${escapeRegex(currentSearchQuery)})`, 'gi');
|
|
answerHTML = answerHTML.replace(regex, '<span class="highlight">$1</span>');
|
|
}
|
|
|
|
// Audience badges
|
|
const badges = faq.audience.map(aud => {
|
|
const colors = {
|
|
researcher: 'bg-purple-100 text-purple-700',
|
|
implementer: 'bg-blue-100 text-blue-700',
|
|
leader: 'bg-green-100 text-green-700'
|
|
};
|
|
return `<span class="inline-block px-2 py-1 text-xs font-medium rounded ${colors[aud]}">${aud}</span>`;
|
|
}).join(' ');
|
|
|
|
return `
|
|
<div class="faq-item bg-white rounded-lg shadow-sm mb-4 overflow-hidden border border-gray-200" data-id="${faq.id}">
|
|
<div class="faq-question p-6 hover:bg-gray-50 transition">
|
|
<div class="flex justify-between items-start">
|
|
<div class="flex-1">
|
|
<h3 class="text-lg font-semibold text-gray-900 mb-2">${highlightedQuestion}</h3>
|
|
<div class="flex gap-2 flex-wrap">
|
|
${badges}
|
|
</div>
|
|
</div>
|
|
<div class="ml-4 flex-shrink-0">
|
|
<span class="faq-arrow text-blue-600 text-2xl">▼</span>
|
|
</div>
|
|
</div>
|
|
</div>
|
|
<div class="faq-answer px-6 pb-6">
|
|
<div class="faq-answer-content">${answerHTML}</div>
|
|
</div>
|
|
</div>
|
|
`;
|
|
}
|
|
|
|
/**
|
|
* Highlight search query in text
|
|
*/
|
|
function highlightText(text, query) {
|
|
if (!query) return escapeHtml(text);
|
|
|
|
const regex = new RegExp(`(${escapeRegex(query)})`, 'gi');
|
|
return escapeHtml(text).replace(regex, '<span class="highlight">$1</span>');
|
|
}
|
|
|
|
/**
|
|
* Escape HTML to prevent XSS
|
|
*/
|
|
function escapeHtml(text) {
|
|
const div = document.createElement('div');
|
|
div.textContent = text;
|
|
return div.innerHTML;
|
|
}
|
|
|
|
/**
|
|
* Escape regex special characters
|
|
*/
|
|
function escapeRegex(text) {
|
|
return text.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
|
|
}
|
|
|
|
/**
|
|
* Setup search input listener
|
|
*/
|
|
function setupSearchListener() {
|
|
const searchInput = document.getElementById('faq-search');
|
|
|
|
searchInput.addEventListener('input', (e) => {
|
|
currentSearchQuery = e.target.value.trim();
|
|
renderFAQs();
|
|
});
|
|
}
|
|
|
|
/**
|
|
* Setup filter dropdown listener
|
|
*/
|
|
function setupFilterListeners() {
|
|
const audienceFilter = document.getElementById('filter-audience');
|
|
|
|
if (audienceFilter) {
|
|
audienceFilter.addEventListener('change', (e) => {
|
|
currentFilter = e.target.value;
|
|
renderFAQs();
|
|
});
|
|
}
|
|
}
|