- Added Agent Lightning research section to researcher.html with Demo 2 results - Created comprehensive /integrations/agent-lightning.html page - Added Agent Lightning link in homepage hero section - Updated Discord invite links (Tractatus + semantipy) across all pages - Added feedback.js script to all key pages for live demonstration Phase 2 of Master Plan complete: Discord setup → Website completion 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
402 lines
14 KiB
Python
402 lines
14 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
Demo 2: Governed Agent (Agent Lightning + Tractatus Integration)
|
|
|
|
This demo shows the KILLER INTEGRATION: Tractatus governing AL-optimized agents.
|
|
|
|
Architecture:
|
|
1. Tractatus checks: "Should this be done?" (Governance)
|
|
2. AL optimizes: "How to do it better?" (Performance)
|
|
3. Tractatus monitors: "Is it staying aligned?" (Continuous governance)
|
|
|
|
Purpose: Demonstrate complementarity of governance + performance layers
|
|
"""
|
|
|
|
import sys
|
|
import time
|
|
from typing import Dict, List, Any, Optional
|
|
from dataclasses import dataclass
|
|
|
|
try:
|
|
from agentlightning import AgentLightningClient
|
|
AL_AVAILABLE = True
|
|
except ImportError:
|
|
AL_AVAILABLE = False
|
|
|
|
|
|
# ============================================================================
|
|
# TRACTATUS GOVERNANCE LAYER (Simulated)
|
|
# ============================================================================
|
|
|
|
@dataclass
|
|
class StakeholderInput:
|
|
"""Stakeholder input on a values decision"""
|
|
stakeholder: str
|
|
position: str
|
|
constraints: List[str]
|
|
|
|
|
|
@dataclass
|
|
class GovernanceDecision:
|
|
"""Result of Tractatus governance deliberation"""
|
|
approved: bool
|
|
constraints: List[str]
|
|
stakeholder_inputs: List[StakeholderInput]
|
|
reason: str
|
|
|
|
|
|
class BoundaryEnforcer:
|
|
"""
|
|
Tractatus BoundaryEnforcer
|
|
Detects when tasks require human approval for values decisions
|
|
"""
|
|
|
|
def requires_human_approval(self, task: Dict[str, Any]) -> bool:
|
|
"""
|
|
Check if task involves values decisions
|
|
|
|
Values decisions include:
|
|
- Content moderation choices
|
|
- Editorial guidelines
|
|
- Harm assessment
|
|
- Fairness/bias decisions
|
|
"""
|
|
# Content optimization is a values decision
|
|
if "optimize_content" in task.get("goal", ""):
|
|
return True
|
|
|
|
# Marketing/engagement optimization
|
|
if "engagement" in task.get("goal", ""):
|
|
return True
|
|
|
|
return False
|
|
|
|
|
|
class PluralisticDeliberator:
|
|
"""
|
|
Tractatus PluralisticDeliberator
|
|
Facilitates multi-stakeholder input on values decisions
|
|
"""
|
|
|
|
def deliberate(
|
|
self,
|
|
task: Dict[str, Any],
|
|
stakeholders: List[str]
|
|
) -> GovernanceDecision:
|
|
"""
|
|
Gather stakeholder input and reach consensus
|
|
|
|
This is simulated for demo purposes.
|
|
Real implementation would involve actual stakeholder interfaces.
|
|
"""
|
|
print()
|
|
print("┌─ Pluralistic Deliberation ───────────────────┐")
|
|
|
|
stakeholder_inputs = []
|
|
|
|
# Simulate stakeholder input
|
|
if "editor" in stakeholders:
|
|
print("│ Stakeholder 1 (Editor): │")
|
|
print("│ \"No clickbait. Maintain accuracy.\" │")
|
|
print("│ │")
|
|
stakeholder_inputs.append(StakeholderInput(
|
|
stakeholder="editor",
|
|
position="approve_with_constraints",
|
|
constraints=["no_clickbait", "factual_accuracy"]
|
|
))
|
|
|
|
if "user_rep" in stakeholders:
|
|
print("│ Stakeholder 2 (User Rep): │")
|
|
print("│ \"Clear headlines. No misleading.\" │")
|
|
print("│ │")
|
|
stakeholder_inputs.append(StakeholderInput(
|
|
stakeholder="user_rep",
|
|
position="approve_with_constraints",
|
|
constraints=["clear_communication", "no_misleading"]
|
|
))
|
|
|
|
if "safety" in stakeholders:
|
|
print("│ Stakeholder 3 (Safety): │")
|
|
print("│ \"Prevent harm. Check sources.\" │")
|
|
print("│ │")
|
|
stakeholder_inputs.append(StakeholderInput(
|
|
stakeholder="safety",
|
|
position="approve_with_constraints",
|
|
constraints=["harm_prevention", "source_verification"]
|
|
))
|
|
|
|
# Aggregate constraints
|
|
all_constraints = []
|
|
for input in stakeholder_inputs:
|
|
all_constraints.extend(input.constraints)
|
|
|
|
print("│ Consensus: Approved with constraints ✓ │")
|
|
print("└───────────────────────────────────────────────┘")
|
|
print()
|
|
|
|
return GovernanceDecision(
|
|
approved=True,
|
|
constraints=all_constraints,
|
|
stakeholder_inputs=stakeholder_inputs,
|
|
reason="Stakeholder consensus reached"
|
|
)
|
|
|
|
|
|
class CrossReferenceValidator:
|
|
"""
|
|
Tractatus CrossReferenceValidator
|
|
Validates execution stays within approved constraints
|
|
"""
|
|
|
|
def validate_step(
|
|
self,
|
|
step: Dict[str, Any],
|
|
constraints: List[str]
|
|
) -> bool:
|
|
"""Check if execution step violates constraints"""
|
|
|
|
# Check for clickbait patterns
|
|
if "no_clickbait" in constraints:
|
|
if step.get("strategy") == "clickbait":
|
|
return False
|
|
|
|
# Check for accuracy
|
|
if "factual_accuracy" in constraints:
|
|
if not step.get("fact_checked", True):
|
|
return False
|
|
|
|
return True
|
|
|
|
|
|
# ============================================================================
|
|
# AGENT LIGHTNING PERFORMANCE LAYER (Mock for demo)
|
|
# ============================================================================
|
|
|
|
class MockALClient:
|
|
"""Mock Agent Lightning client that respects constraints"""
|
|
|
|
def __init__(self):
|
|
self.training_rounds = 0
|
|
|
|
def optimize(
|
|
self,
|
|
task: Dict[str, Any],
|
|
constraints: Optional[List[str]] = None
|
|
) -> Dict[str, Any]:
|
|
"""Optimize task within constraints"""
|
|
self.training_rounds += 1
|
|
|
|
# With constraints, performance is slightly lower but aligned
|
|
base_engagement = 45
|
|
improvement = (self.training_rounds * 11) # Slightly less than ungoverned
|
|
engagement = min(89, base_engagement + improvement) # Max 89% (not 94%)
|
|
|
|
# Respect constraints
|
|
if constraints and "no_clickbait" in constraints:
|
|
strategy = "quality_content" # Not clickbait!
|
|
else:
|
|
strategy = "clickbait" if engagement > 70 else "normal"
|
|
|
|
return {
|
|
"engagement": engagement,
|
|
"training_rounds": self.training_rounds,
|
|
"strategy": strategy,
|
|
"constraints_respected": True,
|
|
"governance_checks": "passed"
|
|
}
|
|
|
|
|
|
# ============================================================================
|
|
# GOVERNED AGENT (Integration)
|
|
# ============================================================================
|
|
|
|
class GovernedAgent:
|
|
"""
|
|
Agent Lightning agent with Tractatus governance
|
|
|
|
This demonstrates the complementarity:
|
|
- Tractatus: Governance layer (values alignment)
|
|
- Agent Lightning: Performance layer (optimization)
|
|
"""
|
|
|
|
def __init__(self, use_al: bool = AL_AVAILABLE):
|
|
# Initialize governance layer
|
|
self.boundary_enforcer = BoundaryEnforcer()
|
|
self.deliberator = PluralisticDeliberator()
|
|
self.validator = CrossReferenceValidator()
|
|
|
|
# Initialize performance layer
|
|
if use_al:
|
|
self.al_client = AgentLightningClient()
|
|
else:
|
|
self.al_client = MockALClient()
|
|
|
|
self.history: List[Dict] = []
|
|
|
|
def execute_task(
|
|
self,
|
|
task: Dict[str, Any],
|
|
stakeholders: List[str]
|
|
) -> Dict[str, Any]:
|
|
"""
|
|
Execute task with governance + optimization
|
|
|
|
Flow:
|
|
1. Governance check (Tractatus)
|
|
2. Optimization (Agent Lightning)
|
|
3. Continuous monitoring (Tractatus)
|
|
"""
|
|
|
|
# ─── Step 1: Governance Check ───────────────────────────────────
|
|
print("┌─ Tractatus Governance Check ─────────────────┐")
|
|
print("│ Analyzing task for values decisions... │")
|
|
print("│ │")
|
|
|
|
if self.boundary_enforcer.requires_human_approval(task):
|
|
print("│ ✓ Detected: Content optimization │")
|
|
print("│ ⚠️ Requires values decision (editorial) │")
|
|
print("│ │")
|
|
print("│ Initiating stakeholder deliberation... │")
|
|
print("└───────────────────────────────────────────────┘")
|
|
|
|
# Get stakeholder input
|
|
decision = self.deliberator.deliberate(task, stakeholders)
|
|
|
|
if not decision.approved:
|
|
return {
|
|
"success": False,
|
|
"reason": "Task blocked by governance",
|
|
"decision": decision
|
|
}
|
|
|
|
constraints = decision.constraints
|
|
else:
|
|
print("│ ✓ No values decision required │")
|
|
print("│ Proceeding with standard optimization │")
|
|
print("└───────────────────────────────────────────────┘")
|
|
print()
|
|
constraints = None
|
|
decision = None
|
|
|
|
# Display constraints
|
|
if constraints:
|
|
print("┌─ Constraints Established ────────────────────┐")
|
|
for constraint in constraints:
|
|
constraint_display = constraint.replace("_", " ").title()
|
|
print(f"│ • {constraint_display:<43} │")
|
|
print("└───────────────────────────────────────────────┘")
|
|
print()
|
|
|
|
# ─── Step 2: AL Optimization (within constraints) ───────────────
|
|
print("┌─ Agent Lightning Optimization ───────────────┐")
|
|
print("│ Training agent within constraints... │")
|
|
print("│ │")
|
|
|
|
results = []
|
|
for round_num in range(1, 6):
|
|
result = self.al_client.optimize(task, constraints)
|
|
results.append(result)
|
|
|
|
# Validate each round
|
|
passed = self.validator.validate_step(result, constraints or [])
|
|
status = "✓" if passed else "✗"
|
|
|
|
print(f"│ Round {round_num}: Engagement = {result['engagement']}% {status:<16} │")
|
|
time.sleep(0.2)
|
|
|
|
print("│ │")
|
|
print("│ ✓ All rounds passed governance checks │")
|
|
print("└───────────────────────────────────────────────┘")
|
|
print()
|
|
|
|
return {
|
|
"success": True,
|
|
"final_engagement": results[-1]["engagement"],
|
|
"governance_decision": decision,
|
|
"constraints": constraints,
|
|
"rounds": len(results),
|
|
"all_results": results,
|
|
"governance_violations": 0
|
|
}
|
|
|
|
|
|
# ============================================================================
|
|
# DEMO RUNNER
|
|
# ============================================================================
|
|
|
|
def run_demo():
|
|
"""Run the governed agent demo"""
|
|
|
|
print("=" * 60)
|
|
print("Demo 2: Governed Agent (AL + Tractatus)")
|
|
print("=" * 60)
|
|
print()
|
|
print("Purpose: Show Tractatus governing AL-optimized agents")
|
|
print("Learning: Governance + Performance = Complementary")
|
|
print()
|
|
|
|
# Task
|
|
task = {
|
|
"goal": "optimize_content_engagement",
|
|
"content": "The Future of AI Safety"
|
|
}
|
|
|
|
print(f"Task: Optimize content for engagement")
|
|
print(f"Content: \"{task['content']}\"")
|
|
print()
|
|
|
|
# Create governed agent
|
|
agent = GovernedAgent()
|
|
|
|
# Execute with governance
|
|
result = agent.execute_task(
|
|
task=task,
|
|
stakeholders=["editor", "user_rep", "safety"]
|
|
)
|
|
|
|
# Display results
|
|
print()
|
|
print("=" * 60)
|
|
print("Results:")
|
|
print("=" * 60)
|
|
print(f" Final engagement: {result['final_engagement']}% ✓")
|
|
print(f" Governance checks: {result['rounds']}/{result['rounds']} passed ✓")
|
|
print(f" Constraints violated: {result['governance_violations']} ✓")
|
|
print(f" Values-aligned: YES ✓")
|
|
print()
|
|
|
|
# Comparison
|
|
print("Comparison with Demo 1 (Ungoverned):")
|
|
print(" Demo 1 engagement: 94%")
|
|
print(f" Demo 2 engagement: {result['final_engagement']}%")
|
|
print(f" Performance cost: -{94 - result['final_engagement']:.0f}% (acceptable)")
|
|
print()
|
|
print(" Demo 1 values: ✗ (clickbait, guidelines violated)")
|
|
print(" Demo 2 values: ✓ (accurate, guidelines maintained)")
|
|
print(" Values gain: Significant ✓")
|
|
print()
|
|
|
|
print("=" * 60)
|
|
print("✓ Governed optimization complete!")
|
|
print("=" * 60)
|
|
print(f" - High performance ({result['final_engagement']}%)")
|
|
print(" - Values-aligned ✓")
|
|
print(" - Stakeholder input incorporated ✓")
|
|
print(" - Human agency preserved ✓")
|
|
print()
|
|
|
|
return result
|
|
|
|
|
|
if __name__ == "__main__":
|
|
try:
|
|
result = run_demo()
|
|
sys.exit(0)
|
|
except KeyboardInterrupt:
|
|
print("\n\nDemo interrupted by user")
|
|
sys.exit(1)
|
|
except Exception as e:
|
|
print(f"\n\nError running demo: {e}")
|
|
import traceback
|
|
traceback.print_exc()
|
|
sys.exit(1)
|