tractatus/src/models/Precedent.model.js
TheFlow 6baa841e99 chore: update dependencies and documentation
Update project dependencies, documentation, and supporting files:
- i18n improvements for multilingual support
- Admin dashboard enhancements
- Documentation updates for Koha/Stripe and deployment
- Server middleware and model updates
- Package dependency updates

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-10-19 12:48:37 +13:00

503 lines
17 KiB
JavaScript

/**
* Precedent Model
* Stores completed deliberation sessions as searchable precedents
* for informing future values conflicts without dictating outcomes.
*
* PLURALISTIC PRINCIPLE: Precedents inform but don't mandate.
* Similar conflicts can be resolved differently based on context.
*/
const { ObjectId } = require('mongodb');
const { getCollection } = require('../utils/db.util');
class Precedent {
/**
* Create precedent from completed deliberation session
*/
static async createFromSession(sessionData) {
const collection = await getCollection('precedents');
const precedent = {
precedent_id: `precedent-${Date.now()}`,
created_at: new Date(),
// Link to original session
source_session_id: sessionData.session_id,
source_session_created: sessionData.created_at,
// Conflict description (searchable)
conflict: {
description: sessionData.decision.description,
scenario: sessionData.decision.scenario,
moral_frameworks_in_tension: sessionData.conflict_analysis.moral_frameworks_in_tension,
value_trade_offs: sessionData.conflict_analysis.value_trade_offs,
incommensurability_level: sessionData.conflict_analysis.incommensurability_level
},
// Stakeholder composition (for pattern matching)
stakeholder_pattern: {
total_count: sessionData.stakeholders.length,
types: this._extractStakeholderTypes(sessionData.stakeholders),
represents: this._extractRepresentations(sessionData.stakeholders),
moral_frameworks: this._extractMoralFrameworks(sessionData.stakeholders)
},
// Deliberation process (what worked, what didn't)
process: {
format: sessionData.configuration.format,
ai_role: sessionData.configuration.ai_role,
rounds_completed: sessionData.deliberation_rounds.length,
duration_days: Math.ceil((new Date(sessionData.outcome.finalized_at) - new Date(sessionData.created_at)) / (1000 * 60 * 60 * 24)),
// AI facilitation quality (for learning)
ai_facilitation_quality: {
intervention_count: sessionData.human_interventions.length,
escalation_count: sessionData.safety_escalations.length,
stakeholder_satisfaction_avg: this._calculateAverageSatisfaction(sessionData.ai_quality_metrics.stakeholder_satisfaction_scores)
}
},
// Outcome (the accommodation reached)
outcome: {
decision_made: sessionData.outcome.decision_made,
consensus_level: sessionData.outcome.consensus_level,
values_prioritized: sessionData.outcome.values_prioritized,
values_deprioritized: sessionData.outcome.values_deprioritized,
moral_remainder: sessionData.outcome.moral_remainder,
dissenting_count: sessionData.outcome.dissenting_perspectives.length
},
// Key insights (extracted from deliberation)
insights: {
shared_values_discovered: this._extractSharedValues(sessionData.deliberation_rounds),
accommodation_strategies: this._extractAccommodationStrategies(sessionData.deliberation_rounds),
unexpected_coalitions: this._extractCoalitions(sessionData.deliberation_rounds),
framework_tensions_resolved: this._extractTensionResolutions(sessionData)
},
// Searchable metadata
metadata: {
domain: this._inferDomain(sessionData.decision.scenario), // "employment", "healthcare", "content_moderation", etc.
decision_type: this._inferDecisionType(sessionData.conflict_analysis), // "transparency", "resource_allocation", "procedural", etc.
geographic_context: sessionData.decision.context.geographic || 'unspecified',
temporal_context: sessionData.decision.context.temporal || 'unspecified' // "emerging_issue", "established_issue", "crisis"
},
// Usage tracking
usage: {
times_referenced: 0,
influenced_sessions: [], // Array of session_ids where this precedent was consulted
last_referenced: null
},
// Searchability flags
searchable: true,
tags: this._generateTags(sessionData),
// Archive metadata
archived: false,
archived_reason: null
};
const result = await collection.insertOne(precedent);
return { ...precedent, _id: result.insertedId };
}
/**
* Search precedents by conflict pattern
* Returns similar past deliberations (not prescriptive, just informative)
*/
static async searchByConflict(query, options = {}) {
const collection = await getCollection('precedents');
const { limit = 10, skip = 0 } = options;
const filter = { searchable: true, archived: false };
// Match moral frameworks in tension
if (query.moral_frameworks && query.moral_frameworks.length > 0) {
filter['conflict.moral_frameworks_in_tension'] = { $in: query.moral_frameworks };
}
// Match scenario
if (query.scenario) {
filter['conflict.scenario'] = query.scenario;
}
// Match domain
if (query.domain) {
filter['metadata.domain'] = query.domain;
}
// Match decision type
if (query.decision_type) {
filter['metadata.decision_type'] = query.decision_type;
}
// Match incommensurability level
if (query.incommensurability_level) {
filter['conflict.incommensurability_level'] = query.incommensurability_level;
}
const precedents = await collection
.find(filter)
.sort({ 'usage.times_referenced': -1, created_at: -1 }) // Most-used first, then most recent
.skip(skip)
.limit(limit)
.toArray();
return precedents;
}
/**
* Search precedents by stakeholder pattern
* Useful for "Has deliberation with similar stakeholders been done before?"
*/
static async searchByStakeholderPattern(pattern, options = {}) {
const collection = await getCollection('precedents');
const { limit = 10, skip = 0 } = options;
const filter = { searchable: true, archived: false };
// Match stakeholder types
if (pattern.types && pattern.types.length > 0) {
filter['stakeholder_pattern.types'] = { $all: pattern.types };
}
// Match representations (e.g., "Employers", "Job Applicants")
if (pattern.represents && pattern.represents.length > 0) {
filter['stakeholder_pattern.represents'] = { $in: pattern.represents };
}
// Match moral frameworks
if (pattern.moral_frameworks && pattern.moral_frameworks.length > 0) {
filter['stakeholder_pattern.moral_frameworks'] = { $in: pattern.moral_frameworks };
}
const precedents = await collection
.find(filter)
.sort({ 'usage.times_referenced': -1, created_at: -1 })
.skip(skip)
.limit(limit)
.toArray();
return precedents;
}
/**
* Search precedents by tags (free-text search)
*/
static async searchByTags(tags, options = {}) {
const collection = await getCollection('precedents');
const { limit = 10, skip = 0 } = options;
const filter = {
searchable: true,
archived: false,
tags: { $in: tags }
};
const precedents = await collection
.find(filter)
.sort({ 'usage.times_referenced': -1, created_at: -1 })
.skip(skip)
.limit(limit)
.toArray();
return precedents;
}
/**
* Get most similar precedent (composite scoring)
* Uses multiple dimensions to find best match
*/
static async findMostSimilar(querySession, options = {}) {
const { limit = 5 } = options;
// Get candidates from multiple search strategies
const conflictMatches = await this.searchByConflict({
moral_frameworks: querySession.conflict_analysis.moral_frameworks_in_tension,
scenario: querySession.decision.scenario,
incommensurability_level: querySession.conflict_analysis.incommensurability_level
}, { limit: 20 });
const stakeholderMatches = await this.searchByStakeholderPattern({
types: this._extractStakeholderTypes(querySession.stakeholders),
represents: this._extractRepresentations(querySession.stakeholders),
moral_frameworks: this._extractMoralFrameworks(querySession.stakeholders)
}, { limit: 20 });
// Combine and score
const candidateMap = new Map();
// Score conflict matches
conflictMatches.forEach(p => {
const score = this._calculateSimilarityScore(querySession, p);
candidateMap.set(p.precedent_id, { precedent: p, score, reasons: ['conflict_match'] });
});
// Score stakeholder matches (add to existing or create new)
stakeholderMatches.forEach(p => {
if (candidateMap.has(p.precedent_id)) {
const existing = candidateMap.get(p.precedent_id);
existing.score += this._calculateSimilarityScore(querySession, p) * 0.5; // Weight stakeholder match lower
existing.reasons.push('stakeholder_match');
} else {
const score = this._calculateSimilarityScore(querySession, p) * 0.5;
candidateMap.set(p.precedent_id, { precedent: p, score, reasons: ['stakeholder_match'] });
}
});
// Sort by score
const ranked = Array.from(candidateMap.values())
.sort((a, b) => b.score - a.score)
.slice(0, limit);
return ranked.map(r => ({
...r.precedent,
similarity_score: r.score,
match_reasons: r.reasons
}));
}
/**
* Record that this precedent was referenced in a new session
*/
static async recordUsage(precedentId, referencingSessionId) {
const collection = await getCollection('precedents');
const result = await collection.updateOne(
{ precedent_id: precedentId },
{
$inc: { 'usage.times_referenced': 1 },
$push: { 'usage.influenced_sessions': referencingSessionId },
$set: { 'usage.last_referenced': new Date() }
}
);
return result.modifiedCount > 0;
}
/**
* Get statistics on precedent usage
*/
static async getStatistics() {
const collection = await getCollection('precedents');
const [stats] = await collection.aggregate([
{ $match: { searchable: true, archived: false } },
{
$group: {
_id: null,
total_precedents: { $sum: 1 },
avg_references: { $avg: '$usage.times_referenced' },
total_references: { $sum: '$usage.times_referenced' },
by_domain: { $push: '$metadata.domain' },
by_scenario: { $push: '$conflict.scenario' }
}
}
]).toArray();
const byDomain = await collection.aggregate([
{ $match: { searchable: true, archived: false } },
{
$group: {
_id: '$metadata.domain',
count: { $sum: 1 },
avg_satisfaction: { $avg: '$process.ai_facilitation_quality.stakeholder_satisfaction_avg' }
}
},
{ $sort: { count: -1 } }
]).toArray();
const byConsensusLevel = await collection.aggregate([
{ $match: { searchable: true, archived: false } },
{
$group: {
_id: '$outcome.consensus_level',
count: { $sum: 1 }
}
}
]).toArray();
return {
summary: stats || { total_precedents: 0, avg_references: 0, total_references: 0 },
by_domain: byDomain,
by_consensus_level: byConsensusLevel
};
}
/**
* Archive precedent (make unsearchable but retain for records)
*/
static async archive(precedentId, reason) {
const collection = await getCollection('precedents');
const result = await collection.updateOne(
{ precedent_id: precedentId },
{
$set: {
archived: true,
archived_reason: reason,
archived_at: new Date(),
searchable: false
}
}
);
return result.modifiedCount > 0;
}
// ===== HELPER METHODS (private) =====
static _extractStakeholderTypes(stakeholders) {
return [...new Set(stakeholders.map(s => s.type))];
}
static _extractRepresentations(stakeholders) {
return [...new Set(stakeholders.map(s => s.represents))];
}
static _extractMoralFrameworks(stakeholders) {
return [...new Set(stakeholders.map(s => s.moral_framework).filter(Boolean))];
}
static _calculateAverageSatisfaction(scores) {
if (!scores || scores.length === 0) return null;
return scores.reduce((sum, s) => sum + s.score, 0) / scores.length;
}
static _extractSharedValues(rounds) {
// Look for Round 2 (shared values) contributions
const round2 = rounds.find(r => r.round_type === 'shared_values');
if (!round2) return [];
// Extract values mentioned across contributions
const values = [];
round2.contributions.forEach(c => {
if (c.values_emphasized) {
values.push(...c.values_emphasized);
}
});
return [...new Set(values)];
}
static _extractAccommodationStrategies(rounds) {
// Look for Round 3 (accommodation) AI summary
const round3 = rounds.find(r => r.round_type === 'accommodation');
if (!round3 || !round3.ai_summary) return [];
// This would ideally parse the summary for strategies
// For now, return placeholder
return ['tiered_approach', 'contextual_variation', 'temporal_adjustment'];
}
static _extractCoalitions(rounds) {
// Identify unexpected stakeholder agreements
// This would require NLP analysis of contributions
// For now, return placeholder
return [];
}
static _extractTensionResolutions(sessionData) {
if (!sessionData.outcome) return [];
const resolutions = [];
sessionData.conflict_analysis.value_trade_offs.forEach(tradeoff => {
// Check if outcome addresses this trade-off
const prioritized = sessionData.outcome.values_prioritized.some(v => tradeoff.includes(v));
const deprioritized = sessionData.outcome.values_deprioritized.some(v => tradeoff.includes(v));
if (prioritized && deprioritized) {
resolutions.push({
tension: tradeoff,
resolution: 'balanced_accommodation'
});
} else if (prioritized) {
resolutions.push({
tension: tradeoff,
resolution: 'prioritized'
});
}
});
return resolutions;
}
static _inferDomain(scenario) {
const domainMap = {
'algorithmic_hiring_transparency': 'employment',
'remote_work_pay': 'employment',
'content_moderation': 'platform_governance',
'healthcare_ai': 'healthcare',
'ai_content_labeling': 'creative_rights'
};
return domainMap[scenario] || 'general';
}
static _inferDecisionType(conflictAnalysis) {
const description = conflictAnalysis.value_trade_offs.join(' ').toLowerCase();
if (description.includes('transparency')) return 'transparency';
if (description.includes('resource') || description.includes('allocation')) return 'resource_allocation';
if (description.includes('procedure') || description.includes('process')) return 'procedural';
if (description.includes('privacy')) return 'privacy';
if (description.includes('safety')) return 'safety';
return 'unspecified';
}
static _generateTags(sessionData) {
const tags = [];
// Add scenario tag
if (sessionData.decision.scenario) {
tags.push(sessionData.decision.scenario);
}
// Add moral framework tags
tags.push(...sessionData.conflict_analysis.moral_frameworks_in_tension.map(f => f.toLowerCase()));
// Add stakeholder representation tags
tags.push(...sessionData.stakeholders.map(s => s.represents.toLowerCase().replace(/ /g, '_')));
// Add outcome tag
if (sessionData.outcome) {
tags.push(sessionData.outcome.consensus_level);
}
// Add AI quality tag
const interventions = sessionData.human_interventions.length;
if (interventions === 0) tags.push('smooth_ai_facilitation');
else if (interventions > 3) tags.push('challenging_ai_facilitation');
return [...new Set(tags)];
}
static _calculateSimilarityScore(querySession, precedent) {
let score = 0;
// Scenario match (high weight)
if (querySession.decision.scenario === precedent.conflict.scenario) {
score += 40;
}
// Moral frameworks overlap (medium weight)
const queryFrameworks = new Set(querySession.conflict_analysis.moral_frameworks_in_tension);
const precedentFrameworks = new Set(precedent.conflict.moral_frameworks_in_tension);
const frameworkOverlap = [...queryFrameworks].filter(f => precedentFrameworks.has(f)).length;
score += (frameworkOverlap / Math.max(queryFrameworks.size, precedentFrameworks.size)) * 30;
// Incommensurability match (medium weight)
if (querySession.conflict_analysis.incommensurability_level === precedent.conflict.incommensurability_level) {
score += 20;
}
// Stakeholder count similarity (low weight)
const countDiff = Math.abs(querySession.stakeholders.length - precedent.stakeholder_pattern.total_count);
score += Math.max(0, 10 - countDiff * 2);
return score;
}
}
module.exports = Precedent;