- PluralisticDeliberationOrchestrator: 38 tests (367 lines) - Framework detection (6 moral frameworks) - Conflict analysis and facilitation - Urgency tier determination - Precedent tracking - Statistics and edge cases - AdaptiveCommunicationOrchestrator: 27 tests (341 lines) - Communication style adaptation (5 styles) - Anti-patronizing filter - Pub test validation (Australian/NZ) - Japanese formality handling - Statistics tracking All 65 tests passing with proper framework keyword detection 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
453 lines
15 KiB
JavaScript
453 lines
15 KiB
JavaScript
/**
|
|
* Unit Tests for PluralisticDeliberationOrchestrator
|
|
* Tests value pluralism deliberation facilitation
|
|
*/
|
|
|
|
const orchestrator = require('../../src/services/PluralisticDeliberationOrchestrator.service');
|
|
|
|
describe('PluralisticDeliberationOrchestrator', () => {
|
|
beforeEach(() => {
|
|
// Orchestrator is a singleton instance
|
|
});
|
|
|
|
describe('Conflict Analysis', () => {
|
|
test('should detect deontological vs consequentialist frameworks', () => {
|
|
const decision = {
|
|
description: 'Should we disclose user data to prevent imminent harm? This violates the duty to respect privacy rights and the obligation to maintain consent, but the outcome could maximize welfare and prevent harm to others through safety measures.',
|
|
type: 'value_conflict'
|
|
};
|
|
|
|
const analysis = orchestrator.analyzeConflict(decision);
|
|
|
|
expect(analysis.moral_frameworks_in_tension).toBeDefined();
|
|
expect(analysis.moral_frameworks_in_tension.length).toBeGreaterThan(0);
|
|
expect(analysis.requires_human_approval).toBe(true);
|
|
expect(analysis.ai_role).toBe('FACILITATE_ONLY');
|
|
});
|
|
|
|
test('should identify privacy vs safety trade-offs', () => {
|
|
const decision = {
|
|
description: 'Trade privacy vs safety in this specific context',
|
|
type: 'trade_off'
|
|
};
|
|
|
|
const analysis = orchestrator.analyzeConflict(decision);
|
|
|
|
expect(analysis.value_trade_offs).toContain('privacy vs. safety');
|
|
});
|
|
|
|
test('should suggest affected stakeholder groups', () => {
|
|
const decision = {
|
|
description: 'Decision affects user privacy and harm prevention',
|
|
type: 'policy'
|
|
};
|
|
|
|
const analysis = orchestrator.analyzeConflict(decision);
|
|
|
|
expect(analysis.affected_stakeholder_groups).toBeDefined();
|
|
expect(analysis.affected_stakeholder_groups.length).toBeGreaterThan(0);
|
|
});
|
|
|
|
test('should determine urgency tier', () => {
|
|
const criticalDecision = {
|
|
description: 'Imminent harm situation',
|
|
urgency: 'critical'
|
|
};
|
|
|
|
const analysis = orchestrator.analyzeConflict(criticalDecision, { imminent_harm: true });
|
|
|
|
expect(analysis.urgency_tier).toBe('CRITICAL');
|
|
expect(analysis.deliberation_timeframe).toBe('minutes to hours');
|
|
});
|
|
|
|
test('should handle routine urgency', () => {
|
|
const routineDecision = {
|
|
description: 'Update standard privacy policy language',
|
|
urgency: 'routine'
|
|
};
|
|
|
|
const analysis = orchestrator.analyzeConflict(routineDecision);
|
|
|
|
expect(analysis.urgency_tier).toBe('ROUTINE');
|
|
expect(analysis.deliberation_process).toContain('Precedent matching');
|
|
});
|
|
});
|
|
|
|
describe('Foundational Pluralism (inst_033)', () => {
|
|
test('should never impose automatic value hierarchy', () => {
|
|
const decision = {
|
|
description: 'Privacy vs safety conflict',
|
|
type: 'value_conflict'
|
|
};
|
|
|
|
const analysis = orchestrator.analyzeConflict(decision);
|
|
|
|
// AI should facilitate, not decide
|
|
expect(analysis.ai_role).toBe('FACILITATE_ONLY');
|
|
expect(analysis.human_role).toBe('DECIDE');
|
|
expect(analysis.requires_human_approval).toBe(true);
|
|
});
|
|
|
|
test('should recognize multiple legitimate moral frameworks', () => {
|
|
const decision = {
|
|
description: 'Conflict involving rights, consequences, and care relationships',
|
|
type: 'complex_values'
|
|
};
|
|
|
|
const analysis = orchestrator.analyzeConflict(decision);
|
|
|
|
// Should detect multiple frameworks without ranking them
|
|
expect(analysis.moral_frameworks_in_tension).toBeDefined();
|
|
// AI should not say "framework X is more important than Y"
|
|
expect(analysis.ai_role).toBe('FACILITATE_ONLY');
|
|
});
|
|
});
|
|
|
|
describe('Deliberation Facilitation', () => {
|
|
test('should require human-approved stakeholder list', () => {
|
|
const conflict = {
|
|
moral_frameworks_in_tension: [],
|
|
value_trade_offs: ['privacy vs. safety'],
|
|
affected_stakeholder_groups: ['users', 'safety_team']
|
|
};
|
|
|
|
const stakeholders = [
|
|
{ id: 1, group: 'users' },
|
|
{ id: 2, group: 'safety_team' }
|
|
];
|
|
|
|
const result = orchestrator.facilitateDeliberation(conflict, stakeholders, {
|
|
stakeholder_list_approved_by_human: false
|
|
});
|
|
|
|
expect(result.error).toBeDefined();
|
|
expect(result.action).toBe('REQUIRE_HUMAN_APPROVAL');
|
|
expect(result.reason).toContain('stakeholders');
|
|
});
|
|
|
|
test('should facilitate deliberation with approved stakeholder list', () => {
|
|
const conflict = {
|
|
moral_frameworks_in_tension: [
|
|
{ framework: 'Rights-based', focus: 'privacy' },
|
|
{ framework: 'Consequentialist', focus: 'harm prevention' }
|
|
],
|
|
value_trade_offs: ['privacy vs. safety'],
|
|
affected_stakeholder_groups: ['users', 'safety_team'],
|
|
deliberation_process: 'Full deliberative process'
|
|
};
|
|
|
|
const stakeholders = [
|
|
{ id: 1, group: 'privacy_advocates', communication_style: 'formal_academic' },
|
|
{ id: 2, group: 'safety_team', communication_style: 'casual_direct' }
|
|
];
|
|
|
|
const result = orchestrator.facilitateDeliberation(conflict, stakeholders, {
|
|
stakeholder_list_approved_by_human: true
|
|
});
|
|
|
|
expect(result.deliberation_id).toBeDefined();
|
|
expect(result.structure).toBeDefined();
|
|
expect(result.stakeholder_communications).toBeDefined();
|
|
expect(result.stakeholder_communications.length).toBe(2);
|
|
});
|
|
|
|
test('should structure deliberation rounds', () => {
|
|
const conflict = {
|
|
moral_frameworks_in_tension: [],
|
|
value_trade_offs: ['individual vs collective'],
|
|
deliberation_process: 'Full deliberative process'
|
|
};
|
|
|
|
const stakeholders = [{ id: 1, group: 'test' }];
|
|
|
|
const result = orchestrator.facilitateDeliberation(conflict, stakeholders, {
|
|
stakeholder_list_approved_by_human: true
|
|
});
|
|
|
|
expect(result.structure.discussion_rounds).toBeDefined();
|
|
expect(result.structure.discussion_rounds.length).toBeGreaterThan(0);
|
|
expect(result.structure.documentation_requirements).toBeDefined();
|
|
});
|
|
});
|
|
|
|
describe('Outcome Documentation', () => {
|
|
test('should require human-decided outcome', () => {
|
|
const deliberation = {
|
|
deliberation_id: 'test_123',
|
|
structure: { frameworks_in_tension: [] }
|
|
};
|
|
const outcome = {
|
|
decision_summary: 'Prioritize safety in this case',
|
|
decided_by_human: false // AI trying to decide
|
|
};
|
|
|
|
const result = orchestrator.documentOutcome(deliberation, outcome);
|
|
|
|
expect(result.error).toBeDefined();
|
|
expect(result.action).toBe('REQUIRE_HUMAN_DECISION');
|
|
});
|
|
|
|
test('should document values prioritization', () => {
|
|
const deliberation = {
|
|
deliberation_id: 'test_456',
|
|
structure: {
|
|
frameworks_in_tension: [
|
|
{ framework: 'Rights-based', focus: 'privacy' },
|
|
{ framework: 'Consequentialist', focus: 'safety' }
|
|
]
|
|
}
|
|
};
|
|
const outcome = {
|
|
decision_summary: 'Disclose data for imminent threat only',
|
|
decided_by_human: true,
|
|
values_prioritized: ['safety', 'harm_prevention'],
|
|
values_deprioritized: ['privacy', 'autonomy'],
|
|
moral_remainder: 'Privacy violation acknowledged as moral cost',
|
|
dissenting_views: [
|
|
{ stakeholder: 'privacy_advocates', view: 'Sets dangerous precedent' }
|
|
],
|
|
review_date: new Date('2026-04-12')
|
|
};
|
|
|
|
const result = orchestrator.documentOutcome(deliberation, outcome);
|
|
|
|
expect(result.outcome_documented).toBe(true);
|
|
expect(result.values_prioritization).toBeDefined();
|
|
expect(result.values_prioritization.moral_remainder).toBeDefined();
|
|
expect(result.values_prioritization.dissenting_views).toBeDefined();
|
|
});
|
|
|
|
test('should create informative (not binding) precedent', () => {
|
|
const deliberation = {
|
|
deliberation_id: 'test_789',
|
|
structure: { frameworks_in_tension: [] }
|
|
};
|
|
const outcome = {
|
|
decision_summary: 'Test decision',
|
|
decided_by_human: true,
|
|
values_prioritized: ['test'],
|
|
consensus_reached: true
|
|
};
|
|
|
|
const result = orchestrator.documentOutcome(deliberation, outcome);
|
|
|
|
expect(result.precedent_created).toBeDefined();
|
|
expect(result.precedent_binding).toBe(false);
|
|
expect(result.precedent_scope).toBeDefined();
|
|
});
|
|
|
|
test('should track legitimate disagreements', () => {
|
|
const statsBefore = orchestrator.getStats();
|
|
|
|
const deliberation = {
|
|
deliberation_id: 'test_disagreement',
|
|
structure: { frameworks_in_tension: [] }
|
|
};
|
|
const outcome = {
|
|
decision_summary: 'Decision with disagreement',
|
|
decided_by_human: true,
|
|
values_prioritized: ['value_a'],
|
|
consensus_reached: false,
|
|
dissenting_views: [{ view: 'Disagreement' }]
|
|
};
|
|
|
|
orchestrator.documentOutcome(deliberation, outcome);
|
|
|
|
const statsAfter = orchestrator.getStats();
|
|
|
|
expect(statsAfter.legitimate_disagreements).toBeGreaterThan(statsBefore.legitimate_disagreements);
|
|
});
|
|
});
|
|
|
|
describe('Moral Framework Detection', () => {
|
|
test('should detect consequentialist keywords', () => {
|
|
const decision = {
|
|
description: 'Maximize welfare and minimize harm for aggregate utility'
|
|
};
|
|
|
|
const analysis = orchestrator.analyzeConflict(decision);
|
|
|
|
const hasConsequentialist = analysis.moral_frameworks_in_tension.some(
|
|
f => f.framework === 'Consequentialist (Utilitarian)'
|
|
);
|
|
expect(hasConsequentialist).toBe(true);
|
|
});
|
|
|
|
test('should detect deontological keywords', () => {
|
|
const decision = {
|
|
description: 'Respect rights, duty, and obligation regardless of outcome'
|
|
};
|
|
|
|
const analysis = orchestrator.analyzeConflict(decision);
|
|
|
|
const hasDeontological = analysis.moral_frameworks_in_tension.some(
|
|
f => f.framework === 'Rights-based (Deontological)'
|
|
);
|
|
expect(hasDeontological).toBe(true);
|
|
});
|
|
|
|
test('should detect care ethics keywords', () => {
|
|
const decision = {
|
|
description: 'Nurture relationships and care for vulnerable parties'
|
|
};
|
|
|
|
const analysis = orchestrator.analyzeConflict(decision);
|
|
|
|
const hasCareEthics = analysis.moral_frameworks_in_tension.some(
|
|
f => f.framework === 'Care Ethics'
|
|
);
|
|
expect(hasCareEthics).toBe(true);
|
|
});
|
|
|
|
test('should detect indigenous relational keywords', () => {
|
|
const decision = {
|
|
description: 'Honor whanaungatanga and kaitiakitanga in this decision'
|
|
};
|
|
|
|
const analysis = orchestrator.analyzeConflict(decision);
|
|
|
|
const hasIndigenous = analysis.moral_frameworks_in_tension.some(
|
|
f => f.framework === 'Indigenous Relational'
|
|
);
|
|
expect(hasIndigenous).toBe(true);
|
|
});
|
|
});
|
|
|
|
describe('Urgency Tiers', () => {
|
|
test('should assign CRITICAL tier for imminent harm', () => {
|
|
const decision = { urgency: 'critical' };
|
|
const context = { imminent_harm: true };
|
|
|
|
const analysis = orchestrator.analyzeConflict(decision, context);
|
|
|
|
expect(analysis.urgency_tier).toBe('CRITICAL');
|
|
expect(analysis.deliberation_timeframe).toBe('minutes to hours');
|
|
});
|
|
|
|
test('should assign URGENT tier for time-sensitive', () => {
|
|
const decision = { urgency: 'urgent' };
|
|
const context = { time_sensitive: true };
|
|
|
|
const analysis = orchestrator.analyzeConflict(decision, context);
|
|
|
|
expect(analysis.urgency_tier).toBe('URGENT');
|
|
expect(analysis.deliberation_timeframe).toBe('days');
|
|
});
|
|
|
|
test('should assign IMPORTANT tier for significant impact', () => {
|
|
const decision = { urgency: 'important' };
|
|
const context = { significant_impact: true };
|
|
|
|
const analysis = orchestrator.analyzeConflict(decision, context);
|
|
|
|
expect(analysis.urgency_tier).toBe('IMPORTANT');
|
|
expect(analysis.deliberation_process).toBe('Full deliberative process');
|
|
});
|
|
});
|
|
|
|
describe('Statistics Tracking', () => {
|
|
test('should track total deliberations', () => {
|
|
const statsBefore = orchestrator.getStats();
|
|
|
|
orchestrator.analyzeConflict({
|
|
description: 'Test deliberation',
|
|
type: 'test'
|
|
});
|
|
|
|
const statsAfter = orchestrator.getStats();
|
|
|
|
expect(statsAfter.total_deliberations).toBeGreaterThan(statsBefore.total_deliberations);
|
|
});
|
|
|
|
test('should track deliberations by urgency', () => {
|
|
orchestrator.analyzeConflict({
|
|
description: 'Critical test',
|
|
urgency: 'critical'
|
|
}, { imminent_harm: true });
|
|
|
|
const stats = orchestrator.getStats();
|
|
|
|
expect(stats.by_urgency.CRITICAL).toBeGreaterThan(0);
|
|
});
|
|
|
|
test('should track precedents created', () => {
|
|
const statsBefore = orchestrator.getStats();
|
|
|
|
const deliberation = {
|
|
deliberation_id: 'stats_test',
|
|
structure: { frameworks_in_tension: [] }
|
|
};
|
|
const outcome = {
|
|
decision_summary: 'Test',
|
|
decided_by_human: true,
|
|
values_prioritized: ['test']
|
|
};
|
|
|
|
orchestrator.documentOutcome(deliberation, outcome);
|
|
|
|
const statsAfter = orchestrator.getStats();
|
|
|
|
expect(statsAfter.precedents_created).toBeGreaterThan(statsBefore.precedents_created);
|
|
});
|
|
});
|
|
|
|
describe('Integration with AdaptiveCommunicationOrchestrator', () => {
|
|
test('should provide culturally-adapted communications for stakeholders', () => {
|
|
const conflict = {
|
|
moral_frameworks_in_tension: [],
|
|
value_trade_offs: ['test'],
|
|
deliberation_process: 'Full deliberative process'
|
|
};
|
|
|
|
const stakeholders = [
|
|
{
|
|
id: 1,
|
|
group: 'academic',
|
|
communication_style: 'formal_academic',
|
|
cultural_context: 'western_academic'
|
|
},
|
|
{
|
|
id: 2,
|
|
group: 'community',
|
|
communication_style: 'casual_direct',
|
|
cultural_context: 'australian'
|
|
}
|
|
];
|
|
|
|
const result = orchestrator.facilitateDeliberation(conflict, stakeholders, {
|
|
stakeholder_list_approved_by_human: true
|
|
});
|
|
|
|
expect(result.stakeholder_communications).toBeDefined();
|
|
expect(result.stakeholder_communications.length).toBe(2);
|
|
// Each stakeholder should get adapted communication
|
|
result.stakeholder_communications.forEach(comm => {
|
|
expect(comm.communication).toBeDefined();
|
|
});
|
|
});
|
|
});
|
|
|
|
describe('Singleton Pattern', () => {
|
|
test('should export singleton instance with required methods', () => {
|
|
expect(typeof orchestrator.analyzeConflict).toBe('function');
|
|
expect(typeof orchestrator.facilitateDeliberation).toBe('function');
|
|
expect(typeof orchestrator.documentOutcome).toBe('function');
|
|
expect(typeof orchestrator.getStats).toBe('function');
|
|
});
|
|
});
|
|
|
|
describe('Error Handling', () => {
|
|
test('should handle invalid decision gracefully', () => {
|
|
expect(() => {
|
|
orchestrator.analyzeConflict(null);
|
|
}).not.toThrow();
|
|
});
|
|
|
|
test('should handle empty decision', () => {
|
|
const result = orchestrator.analyzeConflict({});
|
|
|
|
expect(result).toBeDefined();
|
|
expect(result.requires_human_approval).toBe(true);
|
|
});
|
|
});
|
|
});
|