tractatus/src/services/PluralisticDeliberationOrchestrator.service.js
TheFlow cb7d84e639 feat(services): add 6th core service - value pluralism deliberation
- Implement PluralisticDeliberationOrchestrator (433 lines)
  - 6 moral frameworks: deontological, consequentialist, virtue, care, communitarian, indigenous
  - 4 urgency tiers: critical, urgent, important, routine
  - Foundational pluralism without value hierarchy
  - Precedent tracking (informative, not binding)

- Implement AdaptiveCommunicationOrchestrator (346 lines)
  - 5 communication styles: formal, casual (pub test), Māori protocol, Japanese formal, plain
  - Anti-patronizing filter (removes "simply", "obviously", "clearly")
  - Cultural context adaptation

- Both services use singleton pattern with statistics tracking
- Implements TRA-OPS-0002: AI facilitates, humans decide
- Supports inst_029-inst_035 (value pluralism governance)

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-10-12 16:35:15 +13:00

532 lines
17 KiB
JavaScript

/*
* Copyright 2025 John G Stroh
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Pluralistic Deliberation Orchestrator Service
* Facilitates multi-stakeholder deliberation when values conflict without imposing hierarchy
*
* Core Tractatus Service #6: Implements foundational pluralism where moral frameworks
* (deontological, consequentialist, virtue ethics, care ethics, communitarian) are
* irreducibly different with no supervalue. AI facilitates deliberation, humans decide.
*
* Core Principles (inst_033):
* - Foundational Pluralism: No automatic value ranking (privacy > safety or safety > privacy)
* - Legitimate Disagreement: Valid outcome when values genuinely incommensurable
* - Moral Remainder: Document what's lost in decisions, not just what's gained
* - Provisional Decisions: Reviewable when context changes
*
* Integration:
* - Triggered by BoundaryEnforcer when value conflicts detected
* - Uses AdaptiveCommunicationOrchestrator for culturally appropriate communication
* - Stores precedents in precedent database (informative, not binding)
*/
const logger = require('../utils/logger.util');
const { getMemoryProxy } = require('./MemoryProxy.service');
const AdaptiveCommunicationOrchestrator = require('./AdaptiveCommunicationOrchestrator.service');
/**
* Moral frameworks recognized by the orchestrator
*/
const MORAL_FRAMEWORKS = {
DEONTOLOGICAL: {
name: 'Rights-based (Deontological)',
focus: 'Duties, rights, rules',
keywords: ['duty', 'right', 'obligation', 'principle', 'rule', 'categorical'],
typical_concerns: ['privacy', 'autonomy', 'dignity', 'consent']
},
CONSEQUENTIALIST: {
name: 'Consequentialist (Utilitarian)',
focus: 'Outcomes, aggregate welfare',
keywords: ['outcome', 'benefit', 'harm', 'utility', 'maximize', 'welfare'],
typical_concerns: ['safety', 'harm prevention', 'aggregate good']
},
VIRTUE_ETHICS: {
name: 'Virtue Ethics',
focus: 'Character, virtues, flourishing',
keywords: ['virtue', 'character', 'flourish', 'excellence', 'integrity'],
typical_concerns: ['honesty', 'courage', 'compassion', 'wisdom']
},
CARE_ETHICS: {
name: 'Care Ethics',
focus: 'Relationships, care, context',
keywords: ['care', 'relationship', 'context', 'nurture', 'responsive'],
typical_concerns: ['vulnerability', 'interdependence', 'particular needs']
},
COMMUNITARIAN: {
name: 'Communitarian',
focus: 'Community, tradition, shared goods',
keywords: ['community', 'tradition', 'shared', 'collective', 'solidarity'],
typical_concerns: ['social cohesion', 'common good', 'cultural values']
},
INDIGENOUS_RELATIONAL: {
name: 'Indigenous Relational',
focus: 'Interconnection, reciprocity, land',
keywords: ['whanaungatanga', 'kaitiakitanga', 'reciprocity', 'interconnection'],
typical_concerns: ['land stewardship', 'intergenerational responsibility', 'balance']
}
};
/**
* Urgency tiers for deliberation timeframes
*/
const URGENCY_TIERS = {
CRITICAL: {
timeframe: 'minutes to hours',
process: 'Automated triage + rapid human review'
},
URGENT: {
timeframe: 'days',
process: 'Expedited stakeholder consultation'
},
IMPORTANT: {
timeframe: 'weeks',
process: 'Full deliberative process'
},
ROUTINE: {
timeframe: 'months',
process: 'Precedent matching + lightweight review'
}
};
class PluralisticDeliberationOrchestrator {
constructor() {
this.moralFrameworks = MORAL_FRAMEWORKS;
this.urgencyTiers = URGENCY_TIERS;
this.communicationOrchestrator = AdaptiveCommunicationOrchestrator;
// Initialize MemoryProxy for precedent storage
this.memoryProxy = getMemoryProxy();
this.memoryProxyInitialized = false;
// Statistics tracking
this.stats = {
total_deliberations: 0,
frameworks_in_tension: {},
legitimate_disagreements: 0,
consensus_reached: 0,
precedents_created: 0,
by_urgency: {
CRITICAL: 0,
URGENT: 0,
IMPORTANT: 0,
ROUTINE: 0
}
};
logger.info('PluralisticDeliberationOrchestrator initialized');
}
/**
* Initialize MemoryProxy and load precedent database
* @returns {Promise<Object>} Initialization result
*/
async initialize() {
try {
await this.memoryProxy.initialize();
// Load precedent database (stored in memory as inst_035)
const precedentRule = await this.memoryProxy.getRule('inst_035');
if (precedentRule) {
logger.info('Precedent database configuration loaded');
} else {
logger.warn('Precedent database rule (inst_035) not found');
}
this.memoryProxyInitialized = true;
return {
success: true,
precedent_db_loaded: !!precedentRule
};
} catch (error) {
logger.error('Failed to initialize PluralisticDeliberationOrchestrator', {
error: error.message
});
return {
success: false,
error: error.message
};
}
}
/**
* Analyze a values conflict to identify moral frameworks in tension
* @param {Object} decision - The decision involving value conflict
* @param {Object} context - Decision context
* @returns {Object} Conflict analysis
*/
analyzeConflict(decision, context = {}) {
try {
const decisionText = (decision.description || decision.text || '').toLowerCase();
// Detect which moral frameworks are in tension
const frameworksInTension = this._detectFrameworks(decisionText);
// Identify value trade-offs
const valueTradeOffs = this._identifyTradeOffs(decisionText, decision);
// Identify affected stakeholder groups
const affectedStakeholders = this._identifyStakeholders(decision, context);
// Determine urgency tier
const urgency = this._determineUrgency(decision, context);
// Check for precedents
const relevantPrecedents = this._findPrecedents(decision);
this.stats.total_deliberations++;
this.stats.by_urgency[urgency]++;
return {
moral_frameworks_in_tension: frameworksInTension,
value_trade_offs: valueTradeOffs,
affected_stakeholder_groups: affectedStakeholders,
urgency_tier: urgency,
deliberation_timeframe: this.urgencyTiers[urgency].timeframe,
deliberation_process: this.urgencyTiers[urgency].process,
relevant_precedents: relevantPrecedents,
requires_human_approval: true, // Always true per TRA-OPS-0002
ai_role: 'FACILITATE_ONLY',
human_role: 'DECIDE',
analysis_timestamp: new Date()
};
} catch (error) {
logger.error('Conflict analysis error:', error);
return {
error: error.message,
requires_human_approval: true,
ai_role: 'FACILITATE_ONLY'
};
}
}
/**
* Facilitate a deliberation round across stakeholder groups
* @param {Object} conflict - Conflict analysis from analyzeConflict()
* @param {Array} stakeholders - Stakeholder list (requires human approval per inst_034)
* @param {Object} options - Deliberation options
* @returns {Object} Deliberation facilitation result
*/
facilitateDeliberation(conflict, stakeholders, options = {}) {
try {
// Validate stakeholder list was human-approved
if (!options.stakeholder_list_approved_by_human) {
return {
error: 'Stakeholder list requires human approval (inst_034)',
action: 'REQUIRE_HUMAN_APPROVAL',
reason: 'AI cannot determine affected stakeholders unilaterally'
};
}
// Generate deliberation structure
const deliberationStructure = this._generateDeliberationStructure(
conflict,
stakeholders
);
// Prepare culturally-adapted communication for each stakeholder
const stakeholderCommunications = stakeholders.map(stakeholder => {
return {
stakeholder_id: stakeholder.id,
stakeholder_group: stakeholder.group,
communication: this.communicationOrchestrator.adaptCommunication(
deliberationStructure.invitation_message,
{
audience: stakeholder.communication_style || 'formal_academic',
cultural_context: stakeholder.cultural_context,
language: stakeholder.preferred_language
}
)
};
});
return {
deliberation_id: `delib_${Date.now()}`,
structure: deliberationStructure,
stakeholder_communications: stakeholderCommunications,
process: conflict.deliberation_process,
timeframe: conflict.deliberation_timeframe,
next_steps: [
'Send invitations to stakeholders',
'Collect initial positions from each framework',
'Facilitate structured discussion rounds',
'Document outcomes and dissent'
],
ai_facilitation_role: 'Structure discussion, document positions, ensure all voices heard',
human_decision_role: 'Make final decision, acknowledge moral remainder',
timestamp: new Date()
};
} catch (error) {
logger.error('Deliberation facilitation error:', error);
return {
error: error.message,
requires_human_fallback: true
};
}
}
/**
* Document deliberation outcome (AI facilitates, humans decide)
* @param {Object} deliberation - The deliberation context
* @param {Object} outcome - The human-decided outcome
* @returns {Object} Documentation result
*/
documentOutcome(deliberation, outcome) {
try {
// Validate that outcome was human-decided
if (!outcome.decided_by_human) {
return {
error: 'Outcome must be human-decided (TRA-OPS-0002)',
action: 'REQUIRE_HUMAN_DECISION'
};
}
// Document what values were prioritized/deprioritized
const valuesPrioritization = {
prioritized: outcome.values_prioritized || [],
deprioritized: outcome.values_deprioritized || [],
moral_remainder: outcome.moral_remainder || 'Not documented',
dissenting_views: outcome.dissenting_views || [],
review_date: outcome.review_date || null
};
// Create precedent (informative, not binding per inst_035)
const precedent = this._createPrecedent(deliberation, outcome, valuesPrioritization);
// Store precedent in memory
if (this.memoryProxyInitialized) {
this._storePrecedent(precedent);
}
this.stats.precedents_created++;
if (outcome.consensus_reached) {
this.stats.consensus_reached++;
} else {
this.stats.legitimate_disagreements++;
}
return {
outcome_documented: true,
precedent_created: precedent.id,
precedent_scope: precedent.applicability_scope,
precedent_binding: false, // Always informative, not binding
values_prioritization: valuesPrioritization,
review_date: outcome.review_date,
documentation_timestamp: new Date()
};
} catch (error) {
logger.error('Outcome documentation error:', error);
return {
error: error.message,
outcome_documented: false
};
}
}
/**
* Private helper methods
*/
_detectFrameworks(text) {
const detected = [];
for (const [key, framework] of Object.entries(this.moralFrameworks)) {
let matchScore = 0;
for (const keyword of framework.keywords) {
if (text.includes(keyword)) {
matchScore++;
}
}
if (matchScore >= 2) {
detected.push({
framework: framework.name,
focus: framework.focus,
typical_concerns: framework.typical_concerns,
match_score: matchScore
});
// Track statistics
if (!this.stats.frameworks_in_tension[key]) {
this.stats.frameworks_in_tension[key] = 0;
}
this.stats.frameworks_in_tension[key]++;
}
}
return detected;
}
_identifyTradeOffs(text, decision) {
const tradeoffs = [];
// Common value conflicts
const conflicts = [
{ values: ['privacy', 'safety'], pattern: /privacy.*(?:vs|versus|or).*safety|safety.*(?:vs|versus|or).*privacy/i },
{ values: ['privacy', 'convenience'], pattern: /privacy.*(?:vs|versus|or).*convenience|convenience.*(?:vs|versus|or).*privacy/i },
{ values: ['individual', 'collective'], pattern: /individual.*(?:vs|versus|or).*collective|collective.*(?:vs|versus|or).*individual/i },
{ values: ['freedom', 'security'], pattern: /freedom.*(?:vs|versus|or).*security|security.*(?:vs|versus|or).*freedom/i }
];
for (const conflict of conflicts) {
if (conflict.pattern.test(text)) {
tradeoffs.push(`${conflict.values[0]} vs. ${conflict.values[1]}`);
}
}
// Check decision metadata
if (decision.value_conflicts) {
tradeoffs.push(...decision.value_conflicts);
}
return [...new Set(tradeoffs)]; // Remove duplicates
}
_identifyStakeholders(decision, context) {
// AI can suggest stakeholders, but human must approve (inst_034)
const suggestedStakeholders = [];
// Suggest based on affected groups mentioned
const text = (decision.description || decision.text || '').toLowerCase();
if (text.includes('user') || text.includes('customer')) {
suggestedStakeholders.push('affected_users');
}
if (text.includes('privacy')) {
suggestedStakeholders.push('privacy_advocates');
}
if (text.includes('safety') || text.includes('harm')) {
suggestedStakeholders.push('harm_prevention_specialists');
}
if (text.includes('legal') || text.includes('compliance')) {
suggestedStakeholders.push('legal_team');
}
// Add from context if provided
if (context.affected_groups) {
suggestedStakeholders.push(...context.affected_groups);
}
return [...new Set(suggestedStakeholders)];
}
_determineUrgency(decision, context) {
// Determine urgency tier based on context
if (decision.urgency === 'critical' || context.imminent_harm) {
return 'CRITICAL';
}
if (decision.urgency === 'urgent' || context.time_sensitive) {
return 'URGENT';
}
if (decision.urgency === 'important' || context.significant_impact) {
return 'IMPORTANT';
}
return 'ROUTINE';
}
_findPrecedents(decision) {
// Placeholder for precedent matching
// In full implementation, would query precedent database
return [];
}
_generateDeliberationStructure(conflict, stakeholders) {
return {
invitation_message: `We are deliberating a decision involving ${conflict.value_trade_offs.join(', ')}. ` +
`Multiple moral frameworks are in tension, and we need diverse perspectives.`,
discussion_rounds: [
{
round: 1,
purpose: 'Initial positions from each moral framework',
format: 'Written submissions'
},
{
round: 2,
purpose: 'Respond to other frameworks, explore accommodations',
format: 'Structured discussion'
},
{
round: 3,
purpose: 'Identify areas of agreement and irreducible disagreement',
format: 'Facilitated synthesis'
}
],
documentation_requirements: [
'Values prioritized',
'Values deprioritized',
'Moral remainder (what is lost)',
'Dissenting views with full legitimacy',
'Review date for changed circumstances'
]
};
}
_createPrecedent(deliberation, outcome, valuesPrioritization) {
return {
id: `prec_${Date.now()}`,
deliberation_id: deliberation.deliberation_id,
decision_summary: outcome.decision_summary,
values_prioritized: valuesPrioritization.prioritized,
values_deprioritized: valuesPrioritization.deprioritized,
moral_remainder: valuesPrioritization.moral_remainder,
dissenting_views: valuesPrioritization.dissenting_views,
frameworks_involved: deliberation.structure.frameworks_in_tension,
context_factors: outcome.context_factors || [],
applicability_scope: outcome.applicability_scope || 'Similar cases with same context factors',
binding_status: 'INFORMATIVE_NOT_BINDING', // Per inst_035
review_date: outcome.review_date,
created_at: new Date()
};
}
async _storePrecedent(precedent) {
try {
// Store precedent in memory for future reference
await this.memoryProxy.storePrecedent(precedent);
logger.info('Precedent stored', { precedent_id: precedent.id });
} catch (error) {
logger.error('Failed to store precedent', {
error: error.message,
precedent_id: precedent.id
});
}
}
/**
* Get deliberation statistics
* @returns {Object} Statistics object
*/
getStats() {
return {
...this.stats,
timestamp: new Date()
};
}
}
// Singleton instance
const orchestrator = new PluralisticDeliberationOrchestrator();
// Export both singleton (default) and class (for testing)
module.exports = orchestrator;
module.exports.PluralisticDeliberationOrchestrator = PluralisticDeliberationOrchestrator;