tractatus/src/models/DeliberationSession.model.js
TheFlow 9d8fe404df chore: update dependencies and documentation
Update project dependencies, documentation, and supporting files:
- i18n improvements for multilingual support
- Admin dashboard enhancements
- Documentation updates for Koha/Stripe and deployment
- Server middleware and model updates
- Package dependency updates

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-10-19 12:48:37 +13:00

494 lines
18 KiB
JavaScript

/**
* DeliberationSession Model
* Tracks multi-stakeholder deliberation for values conflicts
*
* AI-LED FACILITATION: This model tracks AI vs. human interventions
* and enforces safety mechanisms for AI-led deliberation.
*/
const { ObjectId } = require('mongodb');
const { getCollection } = require('../utils/db.util');
class DeliberationSession {
/**
* Create new deliberation session
*/
static async create(data) {
const collection = await getCollection('deliberation_sessions');
const session = {
session_id: data.session_id || `deliberation-${Date.now()}`,
created_at: new Date(),
updated_at: new Date(),
status: 'pending', // "pending" | "in_progress" | "completed" | "paused" | "archived"
// Decision under deliberation
decision: {
description: data.decision?.description,
context: data.decision?.context || {},
triggered_by: data.decision?.triggered_by || 'manual',
scenario: data.decision?.scenario || null // e.g., "algorithmic_hiring_transparency"
},
// Conflict analysis (AI-generated initially, can be refined by human)
conflict_analysis: {
moral_frameworks_in_tension: data.conflict_analysis?.moral_frameworks_in_tension || [],
value_trade_offs: data.conflict_analysis?.value_trade_offs || [],
affected_stakeholder_groups: data.conflict_analysis?.affected_stakeholder_groups || [],
incommensurability_level: data.conflict_analysis?.incommensurability_level || 'unknown', // "low" | "moderate" | "high" | "unknown"
analysis_source: data.conflict_analysis?.analysis_source || 'ai' // "ai" | "human" | "collaborative"
},
// Stakeholders participating in deliberation
stakeholders: (data.stakeholders || []).map(s => ({
id: s.id || new ObjectId().toString(),
name: s.name,
type: s.type, // "organization" | "individual" | "group"
represents: s.represents, // e.g., "Job Applicants", "AI Vendors", "Employers"
moral_framework: s.moral_framework || null, // e.g., "consequentialist", "deontological"
contact: {
email: s.contact?.email || null,
organization: s.contact?.organization || null,
role: s.contact?.role || null
},
participation_status: s.participation_status || 'invited', // "invited" | "confirmed" | "active" | "withdrawn"
consent_given: s.consent_given || false,
consent_timestamp: s.consent_timestamp || null
})),
// Deliberation rounds (4-round structure)
deliberation_rounds: data.deliberation_rounds || [],
// Outcome of deliberation
outcome: data.outcome || null,
// ===== AI SAFETY MECHANISMS =====
// Tracks AI vs. human facilitation actions
facilitation_log: data.facilitation_log || [],
// Human intervention tracking
human_interventions: data.human_interventions || [],
// Safety escalations
safety_escalations: data.safety_escalations || [],
// AI facilitation quality monitoring
ai_quality_metrics: {
stakeholder_satisfaction_scores: [], // Populated post-deliberation
fairness_scores: [], // Populated during deliberation
escalation_count: 0,
human_takeover_count: 0
},
// Transparency report (auto-generated)
transparency_report: data.transparency_report || null,
// Audit log (all actions)
audit_log: data.audit_log || [],
// Metadata
configuration: {
format: data.configuration?.format || 'hybrid', // "synchronous" | "asynchronous" | "hybrid"
visibility: data.configuration?.visibility || 'private_then_public', // "public" | "private_then_public" | "partial"
compensation: data.configuration?.compensation || 'volunteer', // "volunteer" | "500" | "1000"
ai_role: data.configuration?.ai_role || 'ai_led', // "minimal" | "assisted" | "ai_led"
output_framing: data.configuration?.output_framing || 'pluralistic_accommodation' // "recommendation" | "consensus" | "pluralistic_accommodation"
}
};
const result = await collection.insertOne(session);
return { ...session, _id: result.insertedId };
}
/**
* Add deliberation round
*/
static async addRound(sessionId, roundData) {
const collection = await getCollection('deliberation_sessions');
const round = {
round_number: roundData.round_number,
round_type: roundData.round_type, // "position_statements" | "shared_values" | "accommodation" | "outcome"
started_at: new Date(),
completed_at: null,
facilitator: roundData.facilitator || 'ai', // "ai" | "human" | "collaborative"
// Contributions from stakeholders
contributions: (roundData.contributions || []).map(c => ({
stakeholder_id: c.stakeholder_id,
stakeholder_name: c.stakeholder_name,
timestamp: c.timestamp || new Date(),
content: c.content,
moral_framework_expressed: c.moral_framework_expressed || null,
values_emphasized: c.values_emphasized || []
})),
// AI-generated summaries and analysis
ai_summary: roundData.ai_summary || null,
ai_framework_analysis: roundData.ai_framework_analysis || null,
// Human notes/observations
human_notes: roundData.human_notes || null,
// Safety checks during this round
safety_checks: roundData.safety_checks || []
};
const result = await collection.updateOne(
{ session_id: sessionId },
{
$push: { deliberation_rounds: round },
$set: { updated_at: new Date() }
}
);
return result.modifiedCount > 0;
}
/**
* Record facilitation action (AI or human)
* SAFETY MECHANISM: Tracks who did what for transparency
*/
static async recordFacilitationAction(sessionId, action) {
const collection = await getCollection('deliberation_sessions');
const logEntry = {
timestamp: new Date(),
actor: action.actor, // "ai" | "human"
action_type: action.action_type, // "prompt" | "summary" | "question" | "intervention" | "escalation"
round_number: action.round_number || null,
content: action.content,
reason: action.reason || null, // Why was this action taken?
stakeholder_reactions: action.stakeholder_reactions || [] // Optional: track if stakeholders respond well
};
const result = await collection.updateOne(
{ session_id: sessionId },
{
$push: {
facilitation_log: logEntry,
audit_log: {
timestamp: new Date(),
action: 'facilitation_action_recorded',
actor: action.actor,
details: logEntry
}
},
$set: { updated_at: new Date() }
}
);
return result.modifiedCount > 0;
}
/**
* Record human intervention (SAFETY MECHANISM)
* Called when human observer takes over from AI
*/
static async recordHumanIntervention(sessionId, intervention) {
const collection = await getCollection('deliberation_sessions');
const interventionRecord = {
timestamp: new Date(),
intervener: intervention.intervener, // Name/ID of human who intervened
trigger: intervention.trigger, // "safety_concern" | "ai_error" | "stakeholder_request" | "quality_issue" | "manual"
round_number: intervention.round_number || null,
description: intervention.description,
ai_action_overridden: intervention.ai_action_overridden || null, // What AI was doing when intervention occurred
corrective_action: intervention.corrective_action, // What human did instead
stakeholder_informed: intervention.stakeholder_informed || false, // Were stakeholders told about the intervention?
resolution: intervention.resolution || null // How was the situation resolved?
};
const result = await collection.updateOne(
{ session_id: sessionId },
{
$push: {
human_interventions: interventionRecord,
audit_log: {
timestamp: new Date(),
action: 'human_intervention',
details: interventionRecord
}
},
$inc: { 'ai_quality_metrics.human_takeover_count': 1 },
$set: { updated_at: new Date() }
}
);
return result.modifiedCount > 0;
}
/**
* Record safety escalation (SAFETY MECHANISM)
* Called when concerning pattern detected (bias, harm, disengagement)
*/
static async recordSafetyEscalation(sessionId, escalation) {
const collection = await getCollection('deliberation_sessions');
const escalationRecord = {
timestamp: new Date(),
detected_by: escalation.detected_by, // "ai" | "human" | "stakeholder"
escalation_type: escalation.escalation_type, // "pattern_bias" | "stakeholder_distress" | "disengagement" | "hostile_exchange" | "ai_malfunction"
severity: escalation.severity, // "low" | "moderate" | "high" | "critical"
round_number: escalation.round_number || null,
description: escalation.description,
stakeholders_affected: escalation.stakeholders_affected || [],
immediate_action_taken: escalation.immediate_action_taken, // What was done immediately?
requires_session_pause: escalation.requires_session_pause || false,
resolved: escalation.resolved || false,
resolution_details: escalation.resolution_details || null
};
const updates = {
$push: {
safety_escalations: escalationRecord,
audit_log: {
timestamp: new Date(),
action: 'safety_escalation',
severity: escalation.severity,
details: escalationRecord
}
},
$inc: { 'ai_quality_metrics.escalation_count': 1 },
$set: { updated_at: new Date() }
};
// If critical severity or session pause required, auto-pause session
if (escalation.severity === 'critical' || escalation.requires_session_pause) {
updates.$set.status = 'paused';
updates.$set.paused_reason = escalationRecord.description;
updates.$set.paused_at = new Date();
}
const result = await collection.updateOne(
{ session_id: sessionId },
updates
);
return result.modifiedCount > 0;
}
/**
* Set deliberation outcome
*/
static async setOutcome(sessionId, outcome) {
const collection = await getCollection('deliberation_sessions');
const outcomeRecord = {
decision_made: outcome.decision_made,
values_prioritized: outcome.values_prioritized || [],
values_deprioritized: outcome.values_deprioritized || [],
deliberation_summary: outcome.deliberation_summary,
consensus_level: outcome.consensus_level, // "full_consensus" | "strong_accommodation" | "moderate_accommodation" | "documented_dissent" | "no_resolution"
dissenting_perspectives: outcome.dissenting_perspectives || [],
justification: outcome.justification,
moral_remainder: outcome.moral_remainder || null, // What was sacrificed/lost?
generated_by: outcome.generated_by || 'ai', // "ai" | "human" | "collaborative"
finalized_at: new Date()
};
const result = await collection.updateOne(
{ session_id: sessionId },
{
$set: {
outcome: outcomeRecord,
status: 'completed',
updated_at: new Date()
},
$push: {
audit_log: {
timestamp: new Date(),
action: 'outcome_set',
details: { consensus_level: outcome.consensus_level }
}
}
}
);
return result.modifiedCount > 0;
}
/**
* Find session by ID
*/
static async findBySessionId(sessionId) {
const collection = await getCollection('deliberation_sessions');
return await collection.findOne({ session_id: sessionId });
}
/**
* Find sessions by scenario
*/
static async findByScenario(scenario, options = {}) {
const collection = await getCollection('deliberation_sessions');
const { limit = 50, skip = 0 } = options;
return await collection
.find({ 'decision.scenario': scenario })
.sort({ created_at: -1 })
.skip(skip)
.limit(limit)
.toArray();
}
/**
* Find sessions by status
*/
static async findByStatus(status, options = {}) {
const collection = await getCollection('deliberation_sessions');
const { limit = 50, skip = 0 } = options;
return await collection
.find({ status })
.sort({ created_at: -1 })
.skip(skip)
.limit(limit)
.toArray();
}
/**
* Get AI safety metrics for session
* SAFETY MECHANISM: Monitors AI facilitation quality
*/
static async getAISafetyMetrics(sessionId) {
const session = await this.findBySessionId(sessionId);
if (!session) return null;
return {
session_id: sessionId,
status: session.status,
total_interventions: session.human_interventions.length,
total_escalations: session.safety_escalations.length,
critical_escalations: session.safety_escalations.filter(e => e.severity === 'critical').length,
ai_takeover_count: session.ai_quality_metrics.human_takeover_count,
facilitation_balance: {
ai_actions: session.facilitation_log.filter(a => a.actor === 'ai').length,
human_actions: session.facilitation_log.filter(a => a.actor === 'human').length
},
unresolved_escalations: session.safety_escalations.filter(e => !e.resolved).length,
stakeholder_satisfaction: session.ai_quality_metrics.stakeholder_satisfaction_scores,
recommendation: this._generateSafetyRecommendation(session)
};
}
/**
* Generate safety recommendation based on metrics
* SAFETY MECHANISM: Auto-flags concerning sessions
*/
static _generateSafetyRecommendation(session) {
const criticalCount = session.safety_escalations.filter(e => e.severity === 'critical').length;
const takeoverCount = session.ai_quality_metrics.human_takeover_count;
const unresolvedCount = session.safety_escalations.filter(e => !e.resolved).length;
if (criticalCount > 0 || unresolvedCount > 2) {
return {
level: 'critical',
message: 'Session requires immediate human review. Critical safety issues detected.',
action: 'pause_and_review'
};
}
if (takeoverCount > 3 || session.safety_escalations.length > 5) {
return {
level: 'warning',
message: 'High intervention rate suggests AI facilitation quality issues.',
action: 'increase_human_oversight'
};
}
if (takeoverCount === 0 && session.safety_escalations.length === 0) {
return {
level: 'excellent',
message: 'AI facilitation proceeding smoothly with no interventions.',
action: 'continue_monitoring'
};
}
return {
level: 'normal',
message: 'AI facilitation within normal parameters.',
action: 'continue_monitoring'
};
}
/**
* Generate transparency report
*/
static async generateTransparencyReport(sessionId) {
const session = await this.findBySessionId(sessionId);
if (!session) return null;
const report = {
session_id: sessionId,
generated_at: new Date(),
// Process transparency
process: {
format: session.configuration.format,
ai_role: session.configuration.ai_role,
total_rounds: session.deliberation_rounds.length,
duration_days: Math.ceil((new Date() - new Date(session.created_at)) / (1000 * 60 * 60 * 24))
},
// Stakeholder participation
stakeholders: {
total: session.stakeholders.length,
confirmed: session.stakeholders.filter(s => s.participation_status === 'confirmed').length,
active: session.stakeholders.filter(s => s.participation_status === 'active').length,
withdrawn: session.stakeholders.filter(s => s.participation_status === 'withdrawn').length
},
// Facilitation transparency (AI vs. Human)
facilitation: {
total_actions: session.facilitation_log.length,
ai_actions: session.facilitation_log.filter(a => a.actor === 'ai').length,
human_actions: session.facilitation_log.filter(a => a.actor === 'human').length,
intervention_count: session.human_interventions.length,
intervention_triggers: this._summarizeInterventionTriggers(session.human_interventions)
},
// Safety transparency
safety: {
escalations: session.safety_escalations.length,
by_severity: {
low: session.safety_escalations.filter(e => e.severity === 'low').length,
moderate: session.safety_escalations.filter(e => e.severity === 'moderate').length,
high: session.safety_escalations.filter(e => e.severity === 'high').length,
critical: session.safety_escalations.filter(e => e.severity === 'critical').length
},
resolved: session.safety_escalations.filter(e => e.resolved).length,
unresolved: session.safety_escalations.filter(e => !e.resolved).length
},
// Outcome transparency
outcome: session.outcome ? {
consensus_level: session.outcome.consensus_level,
generated_by: session.outcome.generated_by,
dissenting_perspectives_count: session.outcome.dissenting_perspectives.length,
values_in_tension: {
prioritized: session.outcome.values_prioritized,
deprioritized: session.outcome.values_deprioritized
}
} : null
};
// Store report in session
await this.collection.updateOne(
{ session_id: sessionId },
{ $set: { transparency_report: report, updated_at: new Date() } }
);
return report;
}
static _summarizeInterventionTriggers(interventions) {
const triggers = {};
interventions.forEach(i => {
triggers[i.trigger] = (triggers[i.trigger] || 0) + 1;
});
return triggers;
}
}
module.exports = DeliberationSession;