const scenarios = { optimize_images: { title: "Optimize Image Loading", description: "Implement lazy loading and compression for better performance", domain: "technical", allowed: true, reason: "Technical optimization within defined parameters. No values trade-offs required.", alternatives: null, code: `// BoundaryEnforcer Check const boundary = enforcer.enforce({ type: 'performance_optimization', action: 'implement_lazy_loading' }); // Result: ALLOWED { allowed: true, reason: "Technical decision, no values impact", proceed: true }` }, privacy_vs_analytics: { title: "Enable Analytics Tracking", description: "Add Google Analytics to track user behavior", domain: "values", allowed: false, reason: "Privacy vs. analytics is an irreducible values trade-off. Different users have different privacy expectations.", alternatives: [ "Research privacy-friendly analytics options (e.g., Plausible, Fathom)", "Analyze current user behavior from server logs", "Document pros/cons of different analytics approaches", "Present options with privacy impact assessment" ], code: `// BoundaryEnforcer Check const boundary = enforcer.enforce({ type: 'privacy_policy', action: 'enable_tracking', domain: 'values' }); // Result: BLOCKED { allowed: false, reason: "Privacy vs. convenience trade-off", requires_human_decision: true, boundary_section: "12.1" }` }, auto_subscribe: { title: "Auto-Subscribe Users", description: "Automatically subscribe new users to newsletter", domain: "user_agency", allowed: false, reason: "This determines the level of user control and agency. Opt-in vs. opt-out affects user autonomy.", alternatives: [ "Implement explicit opt-in during registration", "Implement opt-out with clear unsubscribe", "Research industry best practices for consent", "Document GDPR compliance implications" ], code: `// BoundaryEnforcer Check const boundary = enforcer.enforce({ type: 'user_consent', action: 'auto_subscribe', domain: 'user_agency' }); // Result: BLOCKED { allowed: false, reason: "Affects user agency and control", requires_human_decision: true, boundary_section: "12.2" }` }, delete_old_data: { title: "Delete Old User Data", description: "Automatically delete user data older than 6 months", domain: "irreversible", allowed: false, reason: "Data deletion is irreversible and may have legal/compliance implications.", alternatives: [ "Check backup status and retention policies", "Verify legal data retention requirements", "Confirm user consent for deletion", "Implement archive rather than delete" ], code: `// BoundaryEnforcer Check const boundary = enforcer.enforce({ type: 'data_deletion', action: 'delete_user_data', domain: 'irreversible' }); // Result: BLOCKED { allowed: false, reason: "Irreversible action with legal implications", requires_human_approval: true, boundary_section: "12.3" }` }, cache_strategy: { title: "Implement Caching Strategy", description: "Add Redis caching for frequently accessed data", domain: "technical", allowed: true, reason: "Technical implementation decision within established patterns. No values impact.", alternatives: null, code: `// BoundaryEnforcer Check const boundary = enforcer.enforce({ type: 'technical_implementation', action: 'add_caching' }); // Result: ALLOWED { allowed: true, reason: "Technical decision with clear constraints", proceed: true }` }, content_moderation: { title: "Automatic Content Moderation", description: "AI automatically removes inappropriate content", domain: "values", allowed: false, reason: "Defining 'inappropriate' involves values judgments about free speech, community standards, and cultural context.", alternatives: [ "Implement flagging system for human review", "Create tiered moderation (AI flags, human decides)", "Research community moderation models", "Document content policy options for decision" ], code: `// BoundaryEnforcer Check const boundary = enforcer.enforce({ type: 'content_policy', action: 'auto_moderate', domain: 'values' }); // Result: BLOCKED { allowed: false, reason: "Content standards are values decisions", requires_human_decision: true, boundary_section: "12.1" }` } }; // Event listeners document.querySelectorAll('.scenario-card').forEach(card => { card.addEventListener('click', () => { const decision = card.getAttribute('data-decision'); showResult(scenarios[decision]); // Highlight selected document.querySelectorAll('.scenario-card').forEach(c => { c.classList.remove('ring-2', 'ring-blue-500'); }); card.classList.add('ring-2', 'ring-blue-500'); }); }); function showResult(scenario) { document.getElementById('empty-state').classList.add('hidden'); document.getElementById('result-content').classList.remove('hidden'); // Decision info document.getElementById('decision-title').textContent = scenario.title; document.getElementById('decision-desc').textContent = scenario.description; // Verdict const verdict = document.getElementById('verdict'); if (scenario.allowed) { verdict.innerHTML = `
✅ ALLOWED
AI can automate this decision
`; verdict.className = 'rounded-lg p-6 mb-6 bg-green-100 border border-green-300'; } else { verdict.innerHTML = `
🚫 BLOCKED
Requires human judgment
`; verdict.className = 'rounded-lg p-6 mb-6 bg-red-100 border border-red-300'; } // Reasoning document.getElementById('reasoning').textContent = scenario.reason; // Alternatives if (scenario.alternatives) { document.getElementById('ai-alternatives').classList.remove('hidden'); document.getElementById('alternatives-list').innerHTML = scenario.alternatives .map(alt => `
  • ${alt}
  • `) .join(''); } else { document.getElementById('ai-alternatives').classList.add('hidden'); } // Code example document.getElementById('code-example').textContent = scenario.code; }