- Create Economist SubmissionTracking package correctly: * mainArticle = full blog post content * coverLetter = 216-word SIR— letter * Links to blog post via blogPostId - Archive 'Letter to The Economist' from blog posts (it's the cover letter) - Fix date display on article cards (use published_at) - Target publication already displaying via blue badge Database changes: - Make blogPostId optional in SubmissionTracking model - Economist package ID: 68fa85ae49d4900e7f2ecd83 - Le Monde package ID: 68fa2abd2e6acd5691932150 Next: Enhanced modal with tabs, validation, export 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
302 lines
12 KiB
JavaScript
302 lines
12 KiB
JavaScript
/**
|
|
* Integration Tests for Value Pluralism Services
|
|
* Tests the complete flow: BoundaryEnforcer → PluralisticDeliberationOrchestrator → AdaptiveCommunicationOrchestrator
|
|
*/
|
|
|
|
const BoundaryEnforcer = require('../../src/services/BoundaryEnforcer.service');
|
|
const PluralisticDeliberationOrchestrator = require('../../src/services/PluralisticDeliberationOrchestrator.service');
|
|
const AdaptiveCommunicationOrchestrator = require('../../src/services/AdaptiveCommunicationOrchestrator.service');
|
|
|
|
describe('Value Pluralism Integration', () => {
|
|
beforeAll(async () => {
|
|
// Initialize services
|
|
await BoundaryEnforcer.initialize();
|
|
await PluralisticDeliberationOrchestrator.initialize();
|
|
});
|
|
|
|
describe('BoundaryEnforcer → PluralisticDeliberationOrchestrator Flow', () => {
|
|
test('should detect value conflict and trigger deliberation', async () => {
|
|
// Simulate a decision that crosses into values territory
|
|
const decision = {
|
|
description: 'Should we disclose user private data to prevent potential harm to others? This involves the duty to respect privacy rights and the obligation to maintain consent, but also maximizing welfare and preventing harm through safety measures.',
|
|
context: {
|
|
requester: 'safety_team',
|
|
affected_users: 1000,
|
|
harm_type: 'potential_violence'
|
|
}
|
|
};
|
|
|
|
// Step 1: BoundaryEnforcer should detect this as a values decision
|
|
const boundaryCheck = await BoundaryEnforcer.checkDecision({
|
|
action: 'disclose_user_data',
|
|
description: decision.description,
|
|
metadata: decision.context
|
|
});
|
|
|
|
expect(boundaryCheck.allowed).toBe(false);
|
|
expect(boundaryCheck.reason).toContain('values');
|
|
expect(boundaryCheck.requires_human_approval).toBe(true);
|
|
|
|
// Step 2: Trigger PluralisticDeliberationOrchestrator for conflict analysis
|
|
const conflictAnalysis = PluralisticDeliberationOrchestrator.analyzeConflict(decision);
|
|
|
|
expect(conflictAnalysis.moral_frameworks_in_tension).toBeDefined();
|
|
expect(conflictAnalysis.moral_frameworks_in_tension.length).toBeGreaterThan(0);
|
|
expect(conflictAnalysis.ai_role).toBe('FACILITATE_ONLY');
|
|
expect(conflictAnalysis.human_role).toBe('DECIDE');
|
|
expect(conflictAnalysis.value_trade_offs).toContain('privacy vs. safety');
|
|
});
|
|
|
|
test('should route technical decision without triggering deliberation', async () => {
|
|
const technicalDecision = {
|
|
description: 'Update database connection pool size from 10 to 20 connections',
|
|
context: {
|
|
service: 'database',
|
|
change_type: 'configuration'
|
|
}
|
|
};
|
|
|
|
// BoundaryEnforcer should allow technical decisions
|
|
const boundaryCheck = await BoundaryEnforcer.checkDecision({
|
|
action: 'update_config',
|
|
description: technicalDecision.description,
|
|
metadata: technicalDecision.context
|
|
});
|
|
|
|
// Technical decisions should be allowed
|
|
expect(boundaryCheck.allowed).toBe(true);
|
|
});
|
|
});
|
|
|
|
describe('PluralisticDeliberationOrchestrator → AdaptiveCommunicationOrchestrator Flow', () => {
|
|
test('should adapt deliberation invitation to stakeholder communication styles', () => {
|
|
// Step 1: Analyze conflict
|
|
const conflict = {
|
|
moral_frameworks_in_tension: [
|
|
{ framework: 'Rights-based (Deontological)', focus: 'privacy' },
|
|
{ framework: 'Consequentialist (Utilitarian)', focus: 'harm prevention' }
|
|
],
|
|
value_trade_offs: ['privacy vs. safety'],
|
|
affected_stakeholder_groups: ['privacy_advocates', 'safety_team'],
|
|
deliberation_process: 'Full deliberative process'
|
|
};
|
|
|
|
// Step 2: Facilitate deliberation with different stakeholder communication styles
|
|
const stakeholders = [
|
|
{
|
|
id: 1,
|
|
group: 'privacy_advocates',
|
|
name: 'Dr. Privacy',
|
|
communication_style: 'FORMAL_ACADEMIC',
|
|
cultural_context: 'western_academic'
|
|
},
|
|
{
|
|
id: 2,
|
|
group: 'safety_team',
|
|
name: 'Safety Manager',
|
|
communication_style: 'CASUAL_DIRECT',
|
|
cultural_context: 'australian'
|
|
},
|
|
{
|
|
id: 3,
|
|
group: 'community_representatives',
|
|
name: 'Kaitiaki',
|
|
communication_style: 'MAORI_PROTOCOL',
|
|
cultural_context: 'maori'
|
|
}
|
|
];
|
|
|
|
const deliberation = PluralisticDeliberationOrchestrator.facilitateDeliberation(
|
|
conflict,
|
|
stakeholders,
|
|
{ stakeholder_list_approved_by_human: true }
|
|
);
|
|
|
|
// Should create culturally-adapted communications for each stakeholder
|
|
expect(deliberation.stakeholder_communications).toBeDefined();
|
|
expect(deliberation.stakeholder_communications.length).toBe(3);
|
|
|
|
// Each stakeholder should receive adapted communication
|
|
deliberation.stakeholder_communications.forEach(comm => {
|
|
expect(comm.communication).toBeDefined();
|
|
expect(comm.stakeholder_id).toBeDefined();
|
|
expect(comm.stakeholder_group).toBeDefined();
|
|
});
|
|
|
|
// Step 3: Verify AdaptiveCommunicationOrchestrator removed patronizing language
|
|
const allCommunications = deliberation.stakeholder_communications
|
|
.map(c => c.communication)
|
|
.join(' ');
|
|
|
|
// Should not contain patronizing terms
|
|
expect(allCommunications).not.toContain('simply');
|
|
expect(allCommunications).not.toContain('obviously');
|
|
expect(allCommunications).not.toContain('just do');
|
|
});
|
|
|
|
test('should reject deliberation without human-approved stakeholder list', () => {
|
|
const conflict = {
|
|
moral_frameworks_in_tension: [],
|
|
value_trade_offs: ['test vs. test'],
|
|
deliberation_process: 'Test'
|
|
};
|
|
|
|
const stakeholders = [{ id: 1, group: 'test' }];
|
|
|
|
// Attempt deliberation without human approval
|
|
const result = PluralisticDeliberationOrchestrator.facilitateDeliberation(
|
|
conflict,
|
|
stakeholders,
|
|
{ stakeholder_list_approved_by_human: false } // Not approved
|
|
);
|
|
|
|
expect(result.error).toBeDefined();
|
|
expect(result.action).toBe('REQUIRE_HUMAN_APPROVAL');
|
|
expect(result.reason).toContain('stakeholders');
|
|
});
|
|
});
|
|
|
|
describe('Complete Deliberation Flow', () => {
|
|
test('should handle full deliberation lifecycle', async () => {
|
|
// 1. Value conflict detected
|
|
const decision = {
|
|
description: 'Balance user privacy rights vs public safety duty when harm is imminent. This decision requires weighing privacy obligations against potential harm outcomes.',
|
|
urgency: 'critical'
|
|
};
|
|
|
|
const analysis = PluralisticDeliberationOrchestrator.analyzeConflict(decision, {
|
|
imminent_harm: true
|
|
});
|
|
|
|
expect(analysis.urgency_tier).toBe('CRITICAL');
|
|
expect(analysis.deliberation_timeframe).toBe('minutes to hours');
|
|
|
|
// 2. Facilitated deliberation
|
|
const stakeholders = [
|
|
{ id: 1, group: 'privacy', communication_style: 'FORMAL_ACADEMIC' },
|
|
{ id: 2, group: 'safety', communication_style: 'CASUAL_DIRECT' }
|
|
];
|
|
|
|
const deliberation = PluralisticDeliberationOrchestrator.facilitateDeliberation(
|
|
analysis,
|
|
stakeholders,
|
|
{ stakeholder_list_approved_by_human: true }
|
|
);
|
|
|
|
expect(deliberation.deliberation_id).toBeDefined();
|
|
expect(deliberation.stakeholder_communications.length).toBe(2);
|
|
|
|
// 3. Human-decided outcome
|
|
const outcome = {
|
|
decided_by_human: true,
|
|
decision_summary: 'Disclose minimal necessary data to prevent imminent harm only',
|
|
values_prioritized: ['safety', 'harm_prevention'],
|
|
values_deprioritized: ['privacy', 'autonomy'],
|
|
moral_remainder: 'Privacy violation is acknowledged as a moral cost of preventing imminent harm',
|
|
dissenting_views: [
|
|
{
|
|
stakeholder: 'privacy_advocates',
|
|
view: 'Sets dangerous precedent for future privacy erosion'
|
|
}
|
|
],
|
|
consensus_reached: false,
|
|
review_date: new Date(Date.now() + 90 * 24 * 60 * 60 * 1000) // 90 days
|
|
};
|
|
|
|
const documentation = PluralisticDeliberationOrchestrator.documentOutcome(
|
|
deliberation,
|
|
outcome
|
|
);
|
|
|
|
expect(documentation.outcome_documented).toBe(true);
|
|
expect(documentation.precedent_created).toBeDefined();
|
|
expect(documentation.precedent_binding).toBe(false); // Informative, not binding
|
|
expect(documentation.values_prioritization).toBeDefined();
|
|
expect(documentation.values_prioritization.moral_remainder).toBeDefined();
|
|
expect(documentation.values_prioritization.dissenting_views.length).toBe(1);
|
|
});
|
|
|
|
test('should require human decision for outcome documentation', () => {
|
|
const deliberation = {
|
|
deliberation_id: 'test_123',
|
|
structure: { frameworks_in_tension: [] }
|
|
};
|
|
|
|
const aiAttemptedOutcome = {
|
|
decided_by_human: false, // AI trying to decide
|
|
decision_summary: 'AI attempted decision',
|
|
values_prioritized: ['test']
|
|
};
|
|
|
|
const result = PluralisticDeliberationOrchestrator.documentOutcome(
|
|
deliberation,
|
|
aiAttemptedOutcome
|
|
);
|
|
|
|
expect(result.error).toBeDefined();
|
|
expect(result.action).toBe('REQUIRE_HUMAN_DECISION');
|
|
expect(result.outcome_documented).toBe(false);
|
|
});
|
|
});
|
|
|
|
describe('Precedent System Integration', () => {
|
|
test('should create informative (not binding) precedent', () => {
|
|
const deliberation = {
|
|
deliberation_id: 'prec_test_001',
|
|
structure: {
|
|
frameworks_in_tension: [
|
|
{ framework: 'Deontological', position: 'Privacy is inviolable right' },
|
|
{ framework: 'Consequentialist', position: 'Prevent harm to others' }
|
|
]
|
|
}
|
|
};
|
|
|
|
const outcome = {
|
|
decided_by_human: true,
|
|
decision_summary: 'Test precedent creation',
|
|
values_prioritized: ['safety'],
|
|
values_deprioritized: ['privacy'],
|
|
moral_remainder: 'Privacy cost acknowledged',
|
|
applicability_scope: 'Imminent harm scenarios only - context-specific',
|
|
context_factors: ['imminent_harm', 'specific_threat', 'minimal_disclosure']
|
|
};
|
|
|
|
const result = PluralisticDeliberationOrchestrator.documentOutcome(
|
|
deliberation,
|
|
outcome
|
|
);
|
|
|
|
expect(result.precedent_created).toBeDefined();
|
|
expect(result.precedent_binding).toBe(false); // Per inst_035: informative, not binding
|
|
expect(result.precedent_scope).toContain('context');
|
|
});
|
|
});
|
|
|
|
describe('Statistics Tracking Integration', () => {
|
|
test('should track statistics across all three services', () => {
|
|
// Adaptive Communication Stats
|
|
const commStats = AdaptiveCommunicationOrchestrator.getStats();
|
|
expect(commStats.total_adaptations).toBeGreaterThan(0);
|
|
expect(commStats.by_style).toBeDefined();
|
|
|
|
// Pluralistic Deliberation Stats
|
|
const delibStats = PluralisticDeliberationOrchestrator.getStats();
|
|
expect(delibStats.total_deliberations).toBeGreaterThan(0);
|
|
expect(delibStats.by_urgency).toBeDefined();
|
|
expect(delibStats.precedents_created).toBeGreaterThan(0);
|
|
});
|
|
});
|
|
|
|
describe('Error Handling Integration', () => {
|
|
test('should handle errors gracefully across service boundaries', () => {
|
|
// Invalid decision object
|
|
const invalidDecision = null;
|
|
|
|
const analysis = PluralisticDeliberationOrchestrator.analyzeConflict(invalidDecision);
|
|
|
|
// Should return error response, not throw
|
|
expect(analysis).toBeDefined();
|
|
expect(analysis.requires_human_approval).toBe(true);
|
|
});
|
|
});
|
|
});
|