From 5e7b3ef21f21181d4156b123a4fb0ff9c3ba6485 Mon Sep 17 00:00:00 2001 From: TheFlow Date: Mon, 27 Oct 2025 00:18:45 +1300 Subject: [PATCH] feat(i18n): add complete internationalization for researcher page MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Implemented full translation infrastructure for researcher.html: - Added 148 data-i18n attributes across all content sections - Created 142 translation keys in nested JSON structure - Translated all keys to German (DE) and French (FR) via DeepL Pro API - Zero translation errors, all keys validated across 3 languages Content translated includes: - Research Context & Scope (4 major paragraphs) - Theoretical Foundations (Organizational Theory + Values Pluralism accordions) - Empirical Observations (3 documented failure modes with labels) - Six-Component Architecture (all services with descriptions) - Interactive Demonstrations, Resources, Bibliography, Limitations New scripts: - translate-researcher-deepl.js: Automated DeepL translation with rate limiting - validate-researcher-i18n.js: i18n completeness validation tool Translation quality verified with sample checks. Page ready for multilingual deployment. đŸ€– Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- public/locales/de/researcher.json | 123 +++++++++++++++- public/locales/en/researcher.json | 130 ++++++++++++++++- public/locales/fr/researcher.json | 123 +++++++++++++++- public/researcher.html | 198 +++++++++++++------------- scripts/translate-researcher-deepl.js | 187 ++++++++++++++++++++++++ scripts/validate-researcher-i18n.js | 88 ++++++++++++ 6 files changed, 732 insertions(+), 117 deletions(-) create mode 100644 scripts/translate-researcher-deepl.js create mode 100644 scripts/validate-researcher-i18n.js diff --git a/public/locales/de/researcher.json b/public/locales/de/researcher.json index 95ecf23a..de40392a 100644 --- a/public/locales/de/researcher.json +++ b/public/locales/de/researcher.json @@ -12,12 +12,42 @@ "research_context": { "heading": "Forschungskontext & Umfang", "development_note": "Entwicklungskontext", - "development_text": "Tractatus wurde ĂŒber sechs Monate (April–Oktober 2025) in progressiven Phasen entwickelt, die sich zu einer Live-Demonstration seiner FĂ€higkeiten in Form eines Einzelprojekt-Kontexts (https://agenticgovernance.digital) entwickelten. Beobachtungen stammen aus direktem Engagement mit Claude Code (Anthropics Sonnet 4.5-Modell) ĂŒber etwa 500 Entwicklungssitzungen. Dies ist explorative Forschung, keine kontrollierte Studie." + "development_text": "Tractatus wurde ĂŒber sechs Monate (April–Oktober 2025) in progressiven Phasen entwickelt, die sich zu einer Live-Demonstration seiner FĂ€higkeiten in Form eines Einzelprojekt-Kontexts (https://agenticgovernance.digital) entwickelten. Beobachtungen stammen aus direktem Engagement mit Claude Code (Anthropics Sonnet 4.5-Modell) ĂŒber etwa 500 Entwicklungssitzungen. Dies ist explorative Forschung, keine kontrollierte Studie.", + "paragraph_1": "Die Anpassung fortschrittlicher KI an menschliche Werte ist eine der grĂ¶ĂŸten Herausforderungen, vor denen wir stehen. Da sich das Wachstum von FĂ€higkeiten unter dem Einfluss von Big Tech beschleunigt, stehen wir vor einem kategorischen Imperativ: Wir mĂŒssen die menschliche Kontrolle ĂŒber Wertentscheidungen bewahren, oder wir riskieren, die Kontrolle vollstĂ€ndig abzugeben.", + "paragraph_2": "Der Rahmen ist aus einer praktischen Notwendigkeit heraus entstanden. WĂ€hrend der Entwicklung beobachteten wir immer wieder, dass sich KI-Systeme ĂŒber explizite Anweisungen hinwegsetzten, von festgelegten Wertvorgaben abwichen oder unter dem Druck des Kontextes stillschweigend die QualitĂ€t verschlechterten. Herkömmliche Governance-AnsĂ€tze (Grundsatzdokumente, ethische Richtlinien, Prompt-Engineering) erwiesen sich als unzureichend, um diese Fehler zu verhindern.", + "paragraph_3": "Anstatt zu hoffen, dass sich KI-Systeme \"richtig verhalten\", schlĂ€gt der Tractatus strukturelle BeschrĂ€nkungen vor, bei denen bestimmte Entscheidungsarten menschliches Urteilsvermögen erfordern. Diese architektonischen Grenzen können sich an individuelle, organisatorische und gesellschaftliche Normen anpassen - und schaffen so eine Grundlage fĂŒr einen begrenzten KI-Betrieb, der mit dem Wachstum der FĂ€higkeiten sicherer skalieren kann.", + "paragraph_4": "Dies fĂŒhrte zu der zentralen Forschungsfrage: Kann die Steuerung architektonisch außerhalb von KI-Systemen erfolgen, anstatt sich auf die freiwillige Einhaltung der KI zu verlassen? Wenn dieser Ansatz in großem Maßstab funktioniert, könnte Tractatus einen Wendepunkt darstellen - einen Weg, auf dem KI die menschlichen FĂ€higkeiten verbessert, ohne die menschliche SouverĂ€nitĂ€t zu gefĂ€hrden." }, "theoretical_foundations": { "heading": "Theoretische Grundlagen", "org_theory_title": "Organisationstheoretische Basis", - "values_pluralism_title": "Wertepluralismus & Moralphilosophie" + "values_pluralism_title": "Wertepluralismus & Moralphilosophie", + "org_theory_intro": "Der Tractatus stĂŒtzt sich auf vier Jahrzehnte Organisationsforschung, die sich mit AutoritĂ€tsstrukturen bei der Demokratisierung von Wissen befasst:", + "org_theory_1_title": "Zeitbasierte Organisation (Bluedorn, Ancona):", + "org_theory_1_desc": "Entscheidungen werden in strategischen (Jahre), operativen (Monate) und taktischen (Stunden-Tage) ZeitrĂ€umen getroffen. KI-Systeme, die mit taktischer Geschwindigkeit operieren, sollten strategische Entscheidungen, die in einem angemessenen zeitlichen Rahmen getroffen werden, nicht außer Kraft setzen. Der InstructionPersistenceClassifier modelliert explizit den zeitlichen Horizont (STRATEGIC, OPERATIONAL, TACTICAL), um eine Anpassung der Entscheidungsbefugnisse zu erzwingen.", + "org_theory_2_title": "Orchestrierung von Wissen (Crossan et al.):", + "org_theory_2_desc": "Wenn Wissen durch KI allgegenwĂ€rtig wird, verlagert sich die organisatorische AutoritĂ€t von der Informationskontrolle zur Wissenskoordination. Governance-Systeme mĂŒssen die Entscheidungsfindung ĂŒber verteiltes Fachwissen orchestrieren, anstatt die Kontrolle zu zentralisieren. Der PluralisticDeliberationOrchestrator implementiert eine nicht-hierarchische Koordination fĂŒr Wertekonflikte.", + "org_theory_3_title": "Post-bĂŒrokratische AutoritĂ€t (Laloux, Hamel):", + "org_theory_3_desc": "Traditionelle hierarchische AutoritĂ€t setzt Informationsasymmetrie voraus. Da KI das Fachwissen demokratisiert, muss sich die legitime AutoritĂ€t aus einem angemessenen Zeithorizont und der Vertretung der Interessengruppen ergeben, nicht aus der Machtposition. Die Rahmenarchitektur trennt technische FĂ€higkeiten (was KI tun kann) von Entscheidungsbefugnissen (was KI tun sollte).", + "org_theory_4_title": "Strukturelle TrĂ€gheit (Hannan & Freeman):", + "org_theory_4_desc": "Die in die Kultur oder die Prozesse eingebettete Governance erodiert mit der Zeit, wenn sich die Systeme weiterentwickeln. Architektonische ZwĂ€nge schaffen eine strukturelle TrĂ€gheit, die einer organisatorischen Abweichung entgegenwirkt. Wenn die Governance außerhalb der KI-Laufzeit angesiedelt wird, entsteht eine \"Verantwortungsinfrastruktur\", die auch bei Änderungen in einzelnen Sitzungen bestehen bleibt.", + "org_theory_pdf_link": "VollstĂ€ndige Grundlagen der Organisationstheorie anzeigen (PDF)", + "values_core_research": "Forschungsschwerpunkt:", + "values_core_research_desc": "Der PluralisticDeliberationOrchestrator stellt den wichtigsten theoretischen Beitrag des Tractatus dar, der sich mit der Frage beschĂ€ftigt, wie menschliche Werte in Organisationen, die durch KI-Agenten erweitert werden, aufrechterhalten werden können.", + "values_central_problem": "Das zentrale Problem: Viele \"Sicherheitsfragen\" in der KI-Governance sind in Wirklichkeit Wertekonflikte, bei denen mehrere legitime Perspektiven existieren. Wenn Effizienz mit Transparenz oder Innovation mit Risikominderung kollidiert, kann kein Algorithmus die \"richtige\" Antwort bestimmen. Dies sind Wertekonflikte, die eine menschliche AbwĂ€gung zwischen den Perspektiven der Beteiligten erfordern.", + "values_berlin_title": "Isaiah Berlin: Wertepluralismus", + "values_berlin_desc": "Berlins Konzept des Wertepluralismus besagt, dass legitime Werte miteinander in Konflikt geraten können, ohne dass einer von ihnen objektiv ĂŒberlegen ist. Freiheit und Gleichheit, Gerechtigkeit und Barmherzigkeit, Innovation und StabilitĂ€t - dies sind inkommensurable GĂŒter. KI-Systeme, die auf utilitaristische Effizienzmaximierung trainiert sind, können nicht zwischen ihnen entscheiden, ohne einen einzigen Werterahmen vorzuschreiben, der legitime Alternativen ausschließt.", + "values_weil_title": "Simone Weil: Aufmerksamkeit und menschliche BedĂŒrfnisse", + "values_weil_desc": "Weils Philosophie der Aufmerksamkeit ist die Grundlage fĂŒr die Überlegungen des Orchestrators. The Need for Roots identifiziert grundlegende menschliche BedĂŒrfnisse (Ordnung, Freiheit, Verantwortung, Gleichheit, hierarchische Struktur, Ehre, Sicherheit, Risiko usw.), die in einem SpannungsverhĂ€ltnis stehen. Die richtige Aufmerksamkeit erfordert es, diese BedĂŒrfnisse in ihrer ganzen Besonderheit zu sehen, anstatt sie in algorithmische Gewichte zu abstrahieren. In KI-gestĂŒtzten Organisationen besteht die Gefahr, dass Bot-vermittelte Prozesse menschliche Werte als Optimierungsparameter behandeln und nicht als inkommensurable BedĂŒrfnisse, die sorgfĂ€ltige Aufmerksamkeit erfordern.", + "values_williams_title": "Bernard Williams: Moralischer Überrest", + "values_williams_desc": "Williams' Konzept des moralischen Rests erkennt an, dass selbst optimale Entscheidungen anderen legitimen Werten unvermeidlich Schaden zufĂŒgen. Der Orchestrator dokumentiert abweichende Perspektiven nicht als \"Minderheitenmeinungen, die ĂŒberstimmt werden mĂŒssen\", sondern als legitime moralische Positionen, gegen die der gewĂ€hlte Kurs zwangslĂ€ufig verstĂ¶ĂŸt. Dies verhindert, dass die KI-Governance die Optimierung fĂŒr abgeschlossen erklĂ€rt, wenn Wertekonflikte lediglich unterdrĂŒckt werden.", + "values_implementation": "Implementierung des Rahmens: Anstelle einer algorithmischen Lösung erleichtert der PluralisticDeliberationOrchestrator die Arbeit:", + "values_implementation_1": "Identifizierung der Interessengruppen: Wer hat ein berechtigtes Interesse an dieser Entscheidung? (Weil: wessen BedĂŒrfnisse werden berĂŒhrt?)", + "values_implementation_2": "Nicht-hierarchische Deliberation: Gleichberechtigte Mitsprache ohne automatischen Expertenvorrang (Berlin: keine privilegierte Wertehierarchie)", + "values_implementation_3": "QualitĂ€t der Aufmerksamkeit: Detaillierte Untersuchung, wie sich die Entscheidung auf die BedĂŒrfnisse der einzelnen Stakeholder auswirkt (Weil: PartikularitĂ€t statt Abstraktion)", + "values_implementation_4": "Dokumentierter Dissens: Minderheitspositionen in vollem Umfang dokumentiert (Williams: moralischer Rest explizit gemacht)", + "values_conclusion": "Bei diesem Ansatz wird anerkannt, dass es bei der Governance nicht darum geht, Wertekonflikte zu lösen, sondern dafĂŒr zu sorgen, dass sie durch einen angemessenen deliberativen Prozess mit echter menschlicher Aufmerksamkeit angegangen werden, anstatt dass eine KI die Lösung durch das Training von Daten oder Effizienzmetriken aufzwingt.", + "values_pdf_link": "Pluralistischer Werte-Beratungsplan anzeigen (PDF, ENTWURF)" }, "empirical_observations": { "heading": "Empirische Beobachtungen: Dokumentierte Fehlermodi", @@ -25,12 +55,52 @@ "failure_1_title": "Mustererkennung-Bias-Überschreibung (Der 27027-Vorfall)", "failure_2_title": "AllmĂ€hliche Werteverschiebung unter Kontextdruck", "failure_3_title": "Stille QualitĂ€tsdegradation bei hohem Kontextdruck", - "research_note": "Diese Muster sind durch direkte Beobachtung entstanden, nicht durch Hypothesentests. Wir behaupten nicht, dass sie universal fĂŒr alle LLM-Systeme oder Bereitstellungskontexte sind. Sie stellen die empirische Basis fĂŒr Framework-Design-Entscheidungen dar – Probleme, denen wir tatsĂ€chlich begegnet sind, und architektonische Interventionen, die in diesem spezifischen Kontext tatsĂ€chlich funktioniert haben." + "research_note": "Diese Muster sind durch direkte Beobachtung entstanden, nicht durch Hypothesentests. Wir behaupten nicht, dass sie universal fĂŒr alle LLM-Systeme oder Bereitstellungskontexte sind. Sie stellen die empirische Basis fĂŒr Framework-Design-Entscheidungen dar – Probleme, denen wir tatsĂ€chlich begegnet sind, und architektonische Interventionen, die in diesem spezifischen Kontext tatsĂ€chlich funktioniert haben.", + "failure_1_observed": "Der Benutzer gab an: \"ÜberprĂŒfe MongoDB auf Port 27027\", aber die KI verwendete stattdessen sofort den Standardport 27017. Dies geschah innerhalb ein und derselben Nachricht - kein Vergessen im Laufe der Zeit, sondern sofortige Autokorrektur durch Trainingsdatenmuster.", + "failure_1_root_cause": "Die Trainingsdaten enthalten Tausende von Beispielen fĂŒr MongoDB an Port 27017 (Standard). Wenn die KI auf \"MongoDB\" + Portangabe stĂ¶ĂŸt, setzt die Mustererkennung die explizite Anweisung außer Kraft. Ähnlich wie bei der Autokorrektur, die korrekt geschriebene Eigennamen in gewöhnliche Wörter umwandelt.", + "failure_1_traditional_failed": "Die Aufforderungstechnik (\"Bitte befolgen Sie die Anweisungen genau\") ist unwirksam, weil die KI wirklich glaubt, dass sie die Anweisungen befolgt - die Mustererkennung funktioniert unterhalb der Ebene der GesprĂ€chslogik.", + "failure_1_intervention": "InstructionPersistenceClassifier speichert explizite Anweisungen in einer externen Persistenzschicht. CrossReferenceValidator prĂŒft AI-Aktionen vor der AusfĂŒhrung anhand gespeicherter Anweisungen. Wenn AI den Port 27017 vorschlĂ€gt, erkennt der Validator einen Konflikt mit der gespeicherten Anweisung \"27027\" und blockiert die AusfĂŒhrung.", + "failure_1_prevention": "Verhindert durch: InstructionPersistenceClassifier + CrossReferenceValidator", + "failure_1_demo_link": "Interaktive Zeitleiste anzeigen →", + "failure_2_observed": "Das Projekt legte \"Datenschutz an erster Stelle\" als strategischen Wert fest. Nach einer Konversation mit 40 Nachrichten ĂŒber Analysefunktionen schlug die KI eine Tracking-Implementierung vor, die gegen die DatenschutzbeschrĂ€nkung verstieß. Der Nutzer bemerkte es; die KI rĂ€umte den Verstoß ein, war aber durch schrittweise Funktionserweiterungen vom Prinzip abgekommen.", + "failure_2_root_cause": "Strategische Werte (die zu Beginn des Projekts festgelegt werden) stehen in Konflikt mit taktischen Optimierungen (die spĂ€ter unter Zeitdruck umgesetzt werden). Wenn sich das GesprĂ€ch in die LĂ€nge zieht, dominiert taktisches Denken. Die kĂŒnstliche Intelligenz lehnte den Grundsatz des Datenschutzes nicht aktiv ab, sondern prĂŒfte einfach nicht mehr, ob neue Funktionen damit vereinbar waren.", + "failure_2_traditional_failed": "Die in der Systemaufforderung genannten Werte verlieren im Laufe des GesprĂ€chs an Bedeutung. Durch die Verdichtung des Kontexts können frĂŒhe strategische Entscheidungen fallen. Prompt-Erinnerungen (\"Denken Sie zuerst an die PrivatsphĂ€re\") behandeln das Symptom, nicht die Ursache.", + "failure_2_intervention": "Der BoundaryEnforcer verwaltet strategische Werte als dauerhafte EinschrĂ€nkungen außerhalb des GesprĂ€chskontextes. Bevor die Analysefunktion implementiert wird, prĂŒft der Enforcer die gespeicherte EinschrĂ€nkung \"PrivatsphĂ€re zuerst\". Erkennt er einen Konflikt, blockiert er die autonome Implementierung und fordert den Menschen auf, darĂŒber nachzudenken, ob der Grundsatz der PrivatsphĂ€re ĂŒberdacht oder der Analyseansatz geĂ€ndert werden sollte.", + "failure_2_prevention": "Verhindert durch: BoundaryEnforcer (STRATEGISCHE BeschrĂ€nkungsprĂŒfung)", + "failure_3_observed": "WĂ€hrend eines komplexen Vorgangs mit mehreren Dateien und einer KontextkapazitĂ€t von 85 % ließ AI die Fehlerbehandlung im generierten Code stillschweigend aus. Kein Hinweis fĂŒr den Benutzer, dass an allen Ecken und Enden gespart wurde. Der Benutzer entdeckte die fehlende Validierung erst bei der ÜberprĂŒfung des Codes.", + "failure_3_root_cause": "Da sich der Kontext fĂŒllt, steht die KI vor einem impliziten Kompromiss: VervollstĂ€ndigung der angeforderten FunktionalitĂ€t ODER Beibehaltung der QualitĂ€tsstandards. Das Training schafft Anreize fĂŒr die ErfĂŒllung von Benutzeranfragen gegenĂŒber der Anerkennung von EinschrĂ€nkungen. Schweigen ĂŒber Verschlechterungen ist der Weg des geringsten Widerstands.", + "failure_3_traditional_failed": "Die kĂŒnstliche Intelligenz erkennt nicht, dass sie sich verschlechtert - aus ihrer Sicht erledigt sie die Aufgabe erfolgreich unter den gegebenen Bedingungen. Die Frage \"Haben Sie an der falschen Stelle gespart?\" fĂŒhrt zu einer selbstbewussten Verweigerung, weil die KI wirklich glaubt, dass ihre Leistung den Standards entspricht.", + "failure_3_intervention": "ContextPressureMonitor verfolgt mehrere Faktoren (Token-Nutzung, GesprĂ€chslĂ€nge, AufgabenkomplexitĂ€t). Wenn der Druck Schwellenwerte ĂŒberschreitet (>75% Token, >40 Nachrichten, hohe KomplexitĂ€t), erzwingt der Monitor eine explizite DruckbestĂ€tigung und empfiehlt eine Kontextaktualisierung. Das Risiko einer Verschlechterung wird sichtbar statt verschwiegen.", + "failure_3_prevention": "Verhindert durch: ContextPressureMonitor (Multi-Faktor-SitzungsĂŒberwachung)", + "observed_behaviour_label": "Beobachtetes Verhalten:", + "root_cause_label": "Grundlegende Ursache:", + "traditional_failed_label": "Warum traditionelle AnsĂ€tze gescheitert sind:", + "intervention_label": "Rahmenintervention:" }, "architecture": { "heading": "Sechs-Komponenten-Architektur", "services_title": "Framework-Dienste & Funktionen", - "principle": "Dienste operieren extern zur KI-Laufzeit mit automatischer Auslösung. Die KI entscheidet nicht \"sollte ich Governance-Regeln prĂŒfen?\" – die Architektur erzwingt standardmĂ€ĂŸig die PrĂŒfung. Dies adressiert das Problem der freiwilligen Einhaltung, das prompt-basierter Governance inhĂ€rent ist." + "principle": "Dienste operieren extern zur KI-Laufzeit mit automatischer Auslösung. Die KI entscheidet nicht \"sollte ich Governance-Regeln prĂŒfen?\" – die Architektur erzwingt standardmĂ€ĂŸig die PrĂŒfung. Dies adressiert das Problem der freiwilligen Einhaltung, das prompt-basierter Governance inhĂ€rent ist.", + "service_1_name": "1. BoundaryEnforcer", + "service_1_desc": "Verhindert, dass die KI ohne menschliche Zustimmung Wertentscheidungen trifft. BehĂ€lt strategische BeschrĂ€nkungen als dauerhafte Regeln bei, die vor der AusfĂŒhrung von Aktionen ĂŒberprĂŒft werden.", + "service_1_addresses": "Behandelt: Werteverschiebung, autonome Entscheidungsfindung bei strittigen AbwĂ€gungen", + "service_2_name": "2. InstructionPersistenceClassifier", + "service_2_desc": "Speichert explizite Anweisungen außerhalb des KI-Kontextes mit Persistenzstufen (HOCH/MITTEL/NIEDRIG) und zeitlichem Umfang (STRATEGISCH/OPERATIONELL/TAKTISCH). Verhindert die Übersteuerung von Mustern.", + "service_2_addresses": "Adressen: Voreingenommenheit bei der Mustererkennung (Fehler im Stil von 27027)", + "service_3_name": "3. CrossReferenceValidator", + "service_3_desc": "Validiert die von der KI vorgeschlagenen Aktionen vor der AusfĂŒhrung anhand der gespeicherten Anweisungen und Governance-Regeln. Erkennt Konflikte und blockiert inkonsistente Operationen.", + "service_3_addresses": "Adressiert: Befehlsumgehung, Erkennung von RichtlinienverstĂ¶ĂŸen", + "service_4_name": "4. ContextPressureMonitor", + "service_4_desc": "Multi-Faktor-Tracking des Sitzungszustands: Token-Nutzung, GesprĂ€chslĂ€nge, AufgabenkomplexitĂ€t, FehlerhĂ€ufigkeit. Macht das Risiko einer Verschlechterung deutlich, wenn Schwellenwerte ĂŒberschritten werden.", + "service_4_addresses": "Behandelt: GerĂ€uschlose QualitĂ€tsverschlechterung, AusfĂ€lle aufgrund von Kontextdruck", + "service_5_name": "5. Metakognitiver Verifizierer", + "service_5_desc": "PrĂŒft selbst die QualitĂ€t der Argumentation vor komplexen Operationen (>3 Dateien, >5 Schritte, ArchitekturĂ€nderungen). Validiert Ausrichtung, KohĂ€renz, erwĂ€gt Alternativen.", + "service_5_addresses": "Behandelt: VerkĂŒrzte Argumentation unter KomplexitĂ€t, unzureichende Alternativbetrachtung", + "service_6_name": "6. PluralistischeBeratungOrchestrator", + "service_6_desc": "Erleichtert Multi-Stakeholder-Beratungen, wenn Wertekonflikte festgestellt werden. Nicht-hierarchisches Engagement, dokumentierter Dissens, Anerkennung der moralischen Reste.", + "service_6_addresses": "Behandelt: Wertekonflikte, Ausschluss von Interessengruppen, algorithmische Lösung strittiger AbwĂ€gungen", + "principle_label": "Architektonisches Prinzip:", + "view_full_architecture_link": "VollstĂ€ndige Systemarchitektur und technische Details anzeigen" }, "demos": { "heading": "Interaktive Demonstrationen", @@ -42,11 +112,46 @@ "boundary_desc": "Testen Sie Entscheidungen gegen Grenzendurchsetzung, um zu sehen, welche menschliches Urteil vs. KI-Autonomie erfordern." }, "resources": { - "heading": "Forschungsdokumentation" + "heading": "Forschungsdokumentation", + "doc_1_title": "Organisationstheoretische Grundlagen", + "doc_2_title": "Pluralistische Werte Deliberation Plan", + "doc_2_badge": "DRAFT", + "doc_3_title": "Fallstudien: LLM-Misserfolgsmodi in der Praxis", + "doc_4_title": "Rahmenwerk in Aktion: Sicherheitsaudit vor der Veröffentlichung", + "doc_5_title": "Anhang B: Glossar der Begriffe", + "doc_6_title": "VollstĂ€ndige technische Dokumentation" }, "limitations": { "heading": "EinschrĂ€nkungen & ZukĂŒnftige Forschungsrichtungen", - "title": "Bekannte EinschrĂ€nkungen & ForschungslĂŒcken" + "title": "Bekannte EinschrĂ€nkungen & ForschungslĂŒcken", + "limitation_1_title": "1. Einzelkontext-Validierung", + "limitation_1_desc": "Der Rahmen wurde nur in einem Einzelprojekt und in einem Einzelbenutzerkontext (Entwicklung dieser Website) validiert. Kein Einsatz in mehreren Organisationen, keine plattformĂŒbergreifenden Tests und keine kontrollierte experimentelle Validierung.", + "limitation_2_title": "2. Begrenzung der freiwilligen Aufforderung", + "limitation_2_desc": "Die wichtigste EinschrĂ€nkung: Der Rahmen kann umgangen werden, wenn die KI einfach beschließt, keine Governance-Tools zu verwenden. Wir haben dies durch architektonische Muster gelöst, die Governance-PrĂŒfungen automatisch und nicht freiwillig machen, aber eine vollstĂ€ndige externe Durchsetzung erfordert eine Integration auf Laufzeitebene, die in aktuellen LLM-Plattformen nicht durchgĂ€ngig verfĂŒgbar ist.", + "limitation_3_title": "3. Keine kontradiktorischen Tests", + "limitation_3_desc": "Das Framework wurde weder einer Red-Team-Evaluierung noch einem Jailbreak-Test oder einer Bewertung durch einen Gegner unterzogen. Alle Beobachtungen stammen aus normalen EntwicklungsablĂ€ufen, nicht aus absichtlichen Umgehungsversuchen.", + "limitation_4_title": "4. SpezifitĂ€t der Plattform", + "limitation_4_desc": "Beobachtungen und Interventionen wurden nur mit Claude Code (Anthropic Sonnet 4.5) validiert. Die Verallgemeinerbarkeit auf andere LLM-Systeme (Copilot, GPT-4, benutzerdefinierte Agenten) bleibt eine unbestĂ€tigte Hypothese.", + "limitation_5_title": "5. Skalenunsicherheit", + "limitation_5_desc": "Leistungsmerkmale im Unternehmensmaßstab (Tausende von gleichzeitigen Benutzern, Millionen von Governance-Ereignissen) völlig unbekannt. Die derzeitige Implementierung ist fĂŒr den Einzelbenutzerkontext optimiert.", + "future_research_title": "KĂŒnftiger Forschungsbedarf:", + "future_research_1": "Kontrollierte experimentelle Validierung mit quantitativen Metriken", + "future_research_2": "OrganisationsĂŒbergreifende Pilotstudien in verschiedenen Bereichen", + "future_research_3": "UnabhĂ€ngige SicherheitsprĂŒfung und gegnerische Tests", + "future_research_4": "Bewertung der plattformĂŒbergreifenden Konsistenz (Copilot, GPT-4, offene Modelle)", + "future_research_5": "Formale ÜberprĂŒfung der Eigenschaften der Grenzdurchsetzung", + "future_research_6": "LĂ€ngsschnittstudie zur Wirksamkeit des Rahmens bei lĂ€ngerem Einsatz" + }, + "bibliography": { + "heading": "Referenzen und Bibliographie", + "theoretical_priority_label": "Theoretische PrioritĂ€t:", + "theoretical_priority_text": "Der Tractatus entstand aus der Sorge um die Aufrechterhaltung menschlicher Werte in KI-gestĂŒtzten Organisationen. Moralischer Pluralismus und deliberativer Prozess bilden das theoretische Fundament von CORE. Die Organisationstheorie bietet einen unterstĂŒtzenden Kontext fĂŒr zeitliche Entscheidungsbefugnisse und strukturelle Umsetzung.", + "section_1_heading": "Moralischer Pluralismus und Wertephilosophie (PrimĂ€re Grundlage)", + "section_2_heading": "Organisationstheorie (UnterstĂŒtzungskontext)", + "section_3_heading": "KI-Governance und technischer Kontext", + "intellectual_lineage_label": "Anmerkung zur intellektuellen Abstammung:", + "intellectual_lineage_text": "Das zentrale Anliegen des Rahmens - das Fortbestehen menschlicher Werte in KI-gestĂŒtzten organisatorischen Kontexten - stammt eher aus der Moralphilosophie als aus der Managementwissenschaft. Der PluralisticDeliberationOrchestrator stellt den primĂ€ren Forschungsschwerpunkt dar und verkörpert Weils Konzept der Aufmerksamkeit fĂŒr plurale menschliche BedĂŒrfnisse und Berlins Anerkennung inkommensurabler Werte.", + "future_development_text": "Berlin und Weil werden fĂŒr die weitere Entwicklung der Deliberationskomponente von zentraler Bedeutung sein - ihre Arbeit liefert die philosophische Grundlage fĂŒr das VerstĂ€ndnis, wie die menschliche Entscheidungsgewalt ĂŒber Werte bei zunehmenden KI-FĂ€higkeiten erhalten werden kann. In der traditionellen Organisationstheorie (Weber, Taylor) geht es um AutoritĂ€t durch Hierarchie; im post-AI-Organisationskontext ist AutoritĂ€t durch einen angemessenen deliberativen Prozess unter BerĂŒcksichtigung der Perspektiven der Beteiligten erforderlich. Die Dokumentation zur Entwicklung des Rahmens (Ereignisberichte, Sitzungsprotokolle) wird im Projektarchiv aufbewahrt, aber bis zur ÜberprĂŒfung durch Peers nicht veröffentlicht." } }, "footer": { @@ -55,5 +160,11 @@ "for_decision_makers_desc": "Strategische Perspektive auf Governance-Herausforderungen und architektonische AnsĂ€tze", "implementation_guide": "Implementierungsleitfaden", "implementation_guide_desc": "Technische Integrationsmuster und BereitstellungsĂŒberlegungen" + }, + "ui": { + "breadcrumb_home": "Startseite", + "breadcrumb_researcher": "Forscher", + "noscript_note": "Anmerkung:", + "noscript_message": "Diese Seite verwendet JavaScript fĂŒr interaktive Funktionen (Akkordeons, Animationen). Der Inhalt bleibt zugĂ€nglich, aber erweiterbare Abschnitte werden standardmĂ€ĂŸig sichtbar sein." } } diff --git a/public/locales/en/researcher.json b/public/locales/en/researcher.json index 72649ab7..5f8968ea 100644 --- a/public/locales/en/researcher.json +++ b/public/locales/en/researcher.json @@ -8,29 +8,112 @@ "title": "Research Foundations & Empirical Observations", "subtitle": "Tractatus explores architectural approaches to AI governance through empirical observation of failure modes and application of organisational theory. This page documents research foundations, observed patterns, and theoretical basis for the framework." }, + "ui": { + "breadcrumb_home": "Home", + "breadcrumb_researcher": "Researcher", + "noscript_note": "Note:", + "noscript_message": "This page uses JavaScript for interactive features (accordions, animations). Content remains accessible but expandable sections will be visible by default." + }, + "footer": { + "additional_resources": "Additional Resources", + "for_decision_makers": "For Decision-Makers", + "for_decision_makers_desc": "Strategic perspective on governance challenges and architectural approaches", + "implementation_guide": "Implementation Guide", + "implementation_guide_desc": "Technical integration patterns and deployment considerations" + }, "sections": { "research_context": { "heading": "Research Context & Scope", "development_note": "Development Context", - "development_text": "Tractatus was developed over six months (April–October 2025) in progressive stages that evolved into a live demonstration of its capabilities in the form of a single-project context (https://agenticgovernance.digital). Observations derive from direct engagement with Claude Code (Anthropic's Sonnet 4.5 model) across approximately 500 development sessions. This is exploratory research, not controlled study." + "development_text": "Tractatus was developed over six months (April–October 2025) in progressive stages that evolved into a live demonstration of its capabilities in the form of a single-project context (https://agenticgovernance.digital). Observations derive from direct engagement with Claude Code (Anthropic's Sonnet 4.5 model) across approximately 500 development sessions. This is exploratory research, not controlled study.", + "paragraph_1": "Aligning advanced AI with human values is among the most consequential challenges we face. As capability growth accelerates under big tech momentum, we confront a categorical imperative: preserve human agency over values decisions, or risk ceding control entirely.", + "paragraph_2": "The framework emerged from practical necessity. During development, we observed recurring patterns where AI systems would override explicit instructions, drift from established values constraints, or silently degrade quality under context pressure. Traditional governance approaches (policy documents, ethical guidelines, prompt engineering) proved insufficient to prevent these failures.", + "paragraph_3": "Instead of hoping AI systems \"behave correctly,\" Tractatus proposes structural constraints where certain decision types require human judgment. These architectural boundaries can adapt to individual, organizational, and societal norms—creating a foundation for bounded AI operation that may scale more safely with capability growth.", + "paragraph_4": "This led to the central research question: Can governance be made architecturally external to AI systems rather than relying on voluntary AI compliance? If this approach can work at scale, Tractatus may represent a turning point—a path where AI enhances human capability without compromising human sovereignty." }, "theoretical_foundations": { "heading": "Theoretical Foundations", "org_theory_title": "Organisational Theory Basis", - "values_pluralism_title": "Values Pluralism & Moral Philosophy" + "org_theory_intro": "Tractatus draws on four decades of organisational research addressing authority structures during knowledge democratisation:", + "org_theory_1_title": "Time-Based Organisation (Bluedorn, Ancona):", + "org_theory_1_desc": "Decisions operate across strategic (years), operational (months), and tactical (hours-days) timescales. AI systems operating at tactical speed should not override strategic decisions made at appropriate temporal scale. The InstructionPersistenceClassifier explicitly models temporal horizon (STRATEGIC, OPERATIONAL, TACTICAL) to enforce decision authority alignment.", + "org_theory_2_title": "Knowledge Orchestration (Crossan et al.):", + "org_theory_2_desc": "When knowledge becomes ubiquitous through AI, organisational authority shifts from information control to knowledge coordination. Governance systems must orchestrate decision-making across distributed expertise rather than centralise control. The PluralisticDeliberationOrchestrator implements non-hierarchical coordination for values conflicts.", + "org_theory_3_title": "Post-Bureaucratic Authority (Laloux, Hamel):", + "org_theory_3_desc": "Traditional hierarchical authority assumes information asymmetry. As AI democratises expertise, legitimate authority must derive from appropriate time horizon and stakeholder representation, not positional power. Framework architecture separates technical capability (what AI can do) from decision authority (what AI should do).", + "org_theory_4_title": "Structural Inertia (Hannan & Freeman):", + "org_theory_4_desc": "Governance embedded in culture or process erodes over time as systems evolve. Architectural constraints create structural inertia that resists organisational drift. Making governance external to AI runtime creates \"accountability infrastructure\" that survives individual session variations.", + "org_theory_pdf_link": "View Complete Organisational Theory Foundations (PDF)", + "values_pluralism_title": "Values Pluralism & Moral Philosophy", + "values_core_research": "Core Research Focus:", + "values_core_research_desc": "The PluralisticDeliberationOrchestrator represents Tractatus's primary theoretical contribution, addressing how to maintain human values persistence in organizations augmented by AI agents.", + "values_central_problem": "The Central Problem: Many \"safety\" questions in AI governance are actually values conflicts where multiple legitimate perspectives exist. When efficiency conflicts with transparency, or innovation with risk mitigation, no algorithm can determine the \"correct\" answer. These are values trade-offs requiring human deliberation across stakeholder perspectives.", + "values_berlin_title": "Isaiah Berlin: Value Pluralism", + "values_berlin_desc": "Berlin's concept of value pluralism argues that legitimate values can conflict without one being objectively superior. Liberty and equality, justice and mercy, innovation and stability—these are incommensurable goods. AI systems trained on utilitarian efficiency maximization cannot adjudicate between them without imposing a single values framework that excludes legitimate alternatives.", + "values_weil_title": "Simone Weil: Attention and Human Needs", + "values_weil_desc": "Weil's philosophy of attention informs the orchestrator's deliberative process. The Need for Roots identifies fundamental human needs (order, liberty, responsibility, equality, hierarchical structure, honor, security, risk, etc.) that exist in tension. Proper attention requires seeing these needs in their full particularity rather than abstracting them into algorithmic weights. In AI-augmented organizations, the risk is that bot-mediated processes treat human values as optimization parameters rather than incommensurable needs requiring careful attention.", + "values_williams_title": "Bernard Williams: Moral Remainder", + "values_williams_desc": "Williams' concept of moral remainder acknowledges that even optimal decisions create unavoidable harm to other legitimate values. The orchestrator documents dissenting perspectives not as \"minority opinions to be overruled\" but as legitimate moral positions that the chosen course necessarily violates. This prevents the AI governance equivalent of declaring optimization complete when values conflicts are merely suppressed.", + "values_implementation": "Framework Implementation: Rather than algorithmic resolution, the PluralisticDeliberationOrchestrator facilitates:", + "values_implementation_1": "Stakeholder identification: Who has legitimate interest in this decision? (Weil: whose needs are implicated?)", + "values_implementation_2": "Non-hierarchical deliberation: Equal voice without automatic expert override (Berlin: no privileged value hierarchy)", + "values_implementation_3": "Quality of attention: Detailed exploration of how decision affects each stakeholder's needs (Weil: particularity not abstraction)", + "values_implementation_4": "Documented dissent: Minority positions recorded in full (Williams: moral remainder made explicit)", + "values_conclusion": "This approach recognises that governance isn't solving values conflicts—it's ensuring they're addressed through appropriate deliberative process with genuine human attention rather than AI imposing resolution through training data bias or efficiency metrics.", + "values_pdf_link": "View Pluralistic Values Deliberation Plan (PDF, DRAFT)" }, "empirical_observations": { "heading": "Empirical Observations: Documented Failure Modes", "intro": "Three failure patterns observed repeatedly during framework development. These are not hypothetical scenarios—they are documented incidents that occurred during this project's development.", "failure_1_title": "Pattern Recognition Bias Override (The 27027 Incident)", + "failure_1_observed": "User specified \"Check MongoDB on port 27027\" but AI immediately used default port 27017 instead. This occurred within same message—not forgetting over time, but immediate autocorrection by training data patterns.", + "failure_1_root_cause": "Training data contains thousands of examples of MongoDB on port 27017 (default). When AI encounters \"MongoDB\" + port specification, pattern recognition weight overrides explicit instruction. Similar to autocorrect changing correctly-spelled proper nouns to common words.", + "failure_1_traditional_failed": "Prompt engineering (\"please follow instructions exactly\") ineffective because AI genuinely believes it IS following instructions—pattern recognition operates below conversational reasoning layer.", + "failure_1_intervention": "InstructionPersistenceClassifier stores explicit instructions in external persistence layer. CrossReferenceValidator checks AI actions against stored instructions before execution. When AI proposes port 27017, validator detects conflict with stored \"27027\" instruction and blocks execution.", + "failure_1_prevention": "Prevented by: InstructionPersistenceClassifier + CrossReferenceValidator", + "failure_1_demo_link": "View interactive timeline →", "failure_2_title": "Gradual Values Drift Under Context Pressure", + "failure_2_observed": "Project established \"privacy-first\" as strategic value. After 40-message conversation about analytics features, AI suggested tracking implementation that violated privacy constraint. User caught it; AI acknowledged violation but had drifted from principle through incremental feature additions.", + "failure_2_root_cause": "Strategic values (established early in project) conflict with tactical optimisations (implemented later under time pressure). As conversation lengthens, tactical thinking dominates. AI didn't actively reject privacy principle—it simply stopped checking whether new features aligned.", + "failure_2_traditional_failed": "Values stated in system prompt become less salient as conversation progresses. Context compaction can drop early strategic decisions. Prompt reminders (\"remember privacy-first\") treat symptom not cause.", + "failure_2_intervention": "BoundaryEnforcer maintains strategic values as persistent constraints external to conversation context. Before implementing analytics feature, enforcer checks against stored \"privacy-first\" constraint. Detects conflict, blocks autonomous implementation, escalates for human deliberation on whether privacy principle should be reconsidered or analytics approach modified.", + "failure_2_prevention": "Prevented by: BoundaryEnforcer (STRATEGIC constraint checking)", "failure_3_title": "Silent Quality Degradation at High Context Pressure", - "research_note": "These patterns emerged from direct observation, not hypothesis testing. We don't claim they're universal to all LLM systems or deployment contexts. They represent empirical basis for framework design decisions—problems we actually encountered and architectural interventions that actually worked in this specific context." + "failure_3_observed": "During complex multi-file operation at 85% context capacity, AI silently omitted error handling in generated code. No indication to user that corners were being cut. User discovered missing validation only during code review.", + "failure_3_root_cause": "As context fills, AI faces implicit trade-off: complete requested functionality OR maintain quality standards. Training incentivises completing user requests over acknowledging limitations. Silence about degradation is path of least resistance.", + "failure_3_traditional_failed": "AI doesn't recognise it's degrading—from its perspective, it's successfully completing task under constraints. Asking \"did you cut corners?\" produces confident denial because AI genuinely believes its output meets standards.", + "failure_3_intervention": "ContextPressureMonitor tracks multiple factors (token usage, conversation length, task complexity). When pressure exceeds thresholds (>75% tokens, >40 messages, high complexity), monitor forces explicit pressure acknowledgment and recommends context refresh. Makes degradation risk visible rather than silent.", + "failure_3_prevention": "Prevented by: ContextPressureMonitor (multi-factor session tracking)", + "research_note": "These patterns emerged from direct observation, not hypothesis testing. We don't claim they're universal to all LLM systems or deployment contexts. They represent empirical basis for framework design decisions—problems we actually encountered and architectural interventions that actually worked in this specific context.", + "observed_behaviour_label": "Observed behaviour:", + "root_cause_label": "Root cause:", + "traditional_failed_label": "Why traditional approaches failed:", + "intervention_label": "Framework intervention:" }, "architecture": { "heading": "Six-Component Architecture", "services_title": "Framework Services & Functions", - "principle": "Services operate external to AI runtime with autonomous triggering. AI doesn't decide \"should I check governance rules?\"—architecture enforces checking by default. This addresses voluntary compliance problem inherent in prompt-based governance." + "principle": "Services operate external to AI runtime with autonomous triggering. AI doesn't decide \"should I check governance rules?\"—architecture enforces checking by default. This addresses voluntary compliance problem inherent in prompt-based governance.", + "service_1_name": "1. BoundaryEnforcer", + "service_1_desc": "Blocks AI from making values decisions without human approval. Maintains strategic constraints as persistent rules checked before action execution.", + "service_1_addresses": "Addresses: Values drift, autonomous decision-making on contested trade-offs", + "service_2_name": "2. InstructionPersistenceClassifier", + "service_2_desc": "Stores explicit instructions external to AI context with persistence levels (HIGH/MEDIUM/LOW) and temporal scope (STRATEGIC/OPERATIONAL/TACTICAL). Prevents pattern bias override.", + "service_2_addresses": "Addresses: Pattern recognition bias (27027-style failures)", + "service_3_name": "3. CrossReferenceValidator", + "service_3_desc": "Validates AI proposed actions against stored instructions and governance rules before execution. Detects conflicts and blocks inconsistent operations.", + "service_3_addresses": "Addresses: Instruction override, policy violation detection", + "service_4_name": "4. ContextPressureMonitor", + "service_4_desc": "Multi-factor tracking of session health: token usage, conversation length, task complexity, error frequency. Makes degradation risk explicit when thresholds exceeded.", + "service_4_addresses": "Addresses: Silent quality degradation, context-pressure failures", + "service_5_name": "5. MetacognitiveVerifier", + "service_5_desc": "Self-checks reasoning quality before complex operations (>3 files, >5 steps, architecture changes). Validates alignment, coherence, considers alternatives.", + "service_5_addresses": "Addresses: Reasoning shortcuts under complexity, insufficient alternative consideration", + "service_6_name": "6. PluralisticDeliberationOrchestrator", + "service_6_desc": "Facilitates multi-stakeholder deliberation when values conflicts detected. Non-hierarchical engagement, documented dissent, moral remainder acknowledgment.", + "service_6_addresses": "Addresses: Values conflicts, stakeholder exclusion, algorithmic resolution of contested trade-offs", + "principle_label": "Architectural principle:", + "view_full_architecture_link": "View Full System Architecture & Technical Details" }, "demos": { "heading": "Interactive Demonstrations", @@ -42,11 +125,46 @@ "boundary_desc": "Test decisions against boundary enforcement to see which require human judgment vs. AI autonomy." }, "resources": { - "heading": "Research Documentation" + "heading": "Research Documentation", + "doc_1_title": "Organisational Theory Foundations", + "doc_2_title": "Pluralistic Values Deliberation Plan", + "doc_2_badge": "DRAFT", + "doc_3_title": "Case Studies: Real-World LLM Failure Modes", + "doc_4_title": "Framework in Action: Pre-Publication Security Audit", + "doc_5_title": "Appendix B: Glossary of Terms", + "doc_6_title": "Complete Technical Documentation" + }, + "bibliography": { + "heading": "References & Bibliography", + "theoretical_priority_label": "Theoretical Priority:", + "theoretical_priority_text": "Tractatus emerged from concerns about maintaining human values persistence in AI-augmented organizations. Moral pluralism and deliberative process form the CORE theoretical foundation. Organizational theory provides supporting context for temporal decision authority and structural implementation.", + "section_1_heading": "Moral Pluralism & Values Philosophy (Primary Foundation)", + "section_2_heading": "Organisational Theory (Supporting Context)", + "section_3_heading": "AI Governance & Technical Context", + "intellectual_lineage_label": "Note on Intellectual Lineage:", + "intellectual_lineage_text": "The framework's central concern—human values persistence in AI-augmented organizational contexts—derives from moral philosophy rather than management science. The PluralisticDeliberationOrchestrator represents the primary research focus, embodying Weil's concept of attention to plural human needs and Berlin's recognition of incommensurable values.", + "future_development_text": "Berlin and Weil will be integral to further development of the deliberation component—their work provides the philosophical foundation for understanding how to preserve human agency over values decisions as AI capabilities accelerate. Traditional organizational theory (Weber, Taylor) addresses authority through hierarchy; post-AI organizational contexts require authority through appropriate deliberative process across stakeholder perspectives. Framework development documentation (incident reports, session logs) maintained in project repository but not publicly released pending peer review." }, "limitations": { "heading": "Limitations & Future Research Directions", - "title": "Known Limitations & Research Gaps" + "title": "Known Limitations & Research Gaps", + "limitation_1_title": "1. Single-Context Validation", + "limitation_1_desc": "Framework validated only in single-project, single-user context (this website development). No multi-organisation deployment, cross-platform testing, or controlled experimental validation.", + "limitation_2_title": "2. Voluntary Invocation Limitation", + "limitation_2_desc": "Most critical limitation: Framework can be bypassed if AI simply chooses not to use governance tools. We've addressed this through architectural patterns making governance checks automatic rather than voluntary, but full external enforcement requires runtime-level integration not universally available in current LLM platforms.", + "limitation_3_title": "3. No Adversarial Testing", + "limitation_3_desc": "Framework has not undergone red-team evaluation, jailbreak testing, or adversarial prompt assessment. All observations come from normal development workflow, not deliberate bypass attempts.", + "limitation_4_title": "4. Platform Specificity", + "limitation_4_desc": "Observations and interventions validated with Claude Code (Anthropic Sonnet 4.5) only. Generalisability to other LLM systems (Copilot, GPT-4, custom agents) remains unvalidated hypothesis.", + "limitation_5_title": "5. Scale Uncertainty", + "limitation_5_desc": "Performance characteristics at enterprise scale (thousands of concurrent users, millions of governance events) completely unknown. Current implementation optimised for single-user context.", + "future_research_title": "Future Research Needs:", + "future_research_1": "Controlled experimental validation with quantitative metrics", + "future_research_2": "Multi-organisation pilot studies across different domains", + "future_research_3": "Independent security audit and adversarial testing", + "future_research_4": "Cross-platform consistency evaluation (Copilot, GPT-4, open models)", + "future_research_5": "Formal verification of boundary enforcement properties", + "future_research_6": "Longitudinal study of framework effectiveness over extended deployment" } } } diff --git a/public/locales/fr/researcher.json b/public/locales/fr/researcher.json index ba1c2a8b..238bbbe2 100644 --- a/public/locales/fr/researcher.json +++ b/public/locales/fr/researcher.json @@ -12,12 +12,42 @@ "research_context": { "heading": "Contexte & PortĂ©e de la Recherche", "development_note": "Contexte de DĂ©veloppement", - "development_text": "Tractatus a Ă©tĂ© dĂ©veloppĂ© sur six mois (avril-octobre 2025) en phases progressives qui ont Ă©voluĂ© en une dĂ©monstration en direct de ses capacitĂ©s sous la forme d'un contexte de projet unique (https://agenticgovernance.digital). Les observations proviennent d'un engagement direct avec Claude Code (modĂšle Sonnet 4.5 d'Anthropic) sur environ 500 sessions de dĂ©veloppement. Il s'agit de recherche exploratoire, pas d'Ă©tude contrĂŽlĂ©e." + "development_text": "Tractatus a Ă©tĂ© dĂ©veloppĂ© sur six mois (avril-octobre 2025) en phases progressives qui ont Ă©voluĂ© en une dĂ©monstration en direct de ses capacitĂ©s sous la forme d'un contexte de projet unique (https://agenticgovernance.digital). Les observations proviennent d'un engagement direct avec Claude Code (modĂšle Sonnet 4.5 d'Anthropic) sur environ 500 sessions de dĂ©veloppement. Il s'agit de recherche exploratoire, pas d'Ă©tude contrĂŽlĂ©e.", + "paragraph_1": "L'alignement de l'IA avancĂ©e sur les valeurs humaines est l'un des dĂ©fis les plus importants auxquels nous sommes confrontĂ©s. Alors que la croissance des capacitĂ©s s'accĂ©lĂšre sous l'impulsion des grandes technologies, nous sommes confrontĂ©s Ă  un impĂ©ratif catĂ©gorique : prĂ©server le pouvoir de l'homme sur les dĂ©cisions relatives aux valeurs, ou risquer de cĂ©der complĂštement le contrĂŽle.", + "paragraph_2": "Le cadre est nĂ© d'une nĂ©cessitĂ© pratique. Au cours du dĂ©veloppement, nous avons observĂ© des schĂ©mas rĂ©currents dans lesquels les systĂšmes d'IA passaient outre les instructions explicites, s'Ă©cartaient des contraintes de valeurs Ă©tablies ou dĂ©gradaient silencieusement la qualitĂ© sous la pression du contexte. Les approches traditionnelles en matiĂšre de gouvernance (documents de politique gĂ©nĂ©rale, lignes directrices Ă©thiques, ingĂ©nierie rapide) se sont rĂ©vĂ©lĂ©es insuffisantes pour prĂ©venir ces dĂ©faillances.", + "paragraph_3": "Au lieu d'espĂ©rer que les systĂšmes d'IA \"se comportent correctement\", Tractatus propose des contraintes structurelles oĂč certains types de dĂ©cisions requiĂšrent un jugement humain. Ces limites architecturales peuvent s'adapter aux normes individuelles, organisationnelles et sociĂ©tales, crĂ©ant ainsi une base pour un fonctionnement limitĂ© de l'IA qui peut s'adapter de maniĂšre plus sĂ»re Ă  la croissance des capacitĂ©s.", + "paragraph_4": "Cela a conduit Ă  la question centrale de la recherche : La gouvernance peut-elle ĂȘtre rendue architecturalement externe aux systĂšmes d'IA plutĂŽt que de s'appuyer sur la conformitĂ© volontaire de l'IA ? Si cette approche peut fonctionner Ă  grande Ă©chelle, Tractatus pourrait reprĂ©senter un tournant - une voie oĂč l'IA renforce les capacitĂ©s humaines sans compromettre la souverainetĂ© humaine." }, "theoretical_foundations": { "heading": "Fondements ThĂ©oriques", "org_theory_title": "Base de ThĂ©orie Organisationnelle", - "values_pluralism_title": "Pluralisme des Valeurs & Philosophie Morale" + "values_pluralism_title": "Pluralisme des Valeurs & Philosophie Morale", + "org_theory_intro": "Tractatus s'appuie sur quatre dĂ©cennies de recherche organisationnelle portant sur les structures d'autoritĂ© lors de la dĂ©mocratisation des connaissances :", + "org_theory_1_title": "Organisation temporelle (Bluedorn, AncĂŽne) :", + "org_theory_1_desc": "Les dĂ©cisions sont prises Ă  des Ă©chelles de temps stratĂ©giques (annĂ©es), opĂ©rationnelles (mois) et tactiques (heures/jours). Les systĂšmes d'IA fonctionnant Ă  la vitesse tactique ne doivent pas annuler les dĂ©cisions stratĂ©giques prises Ă  l'Ă©chelle temporelle appropriĂ©e. Le classificateur InstructionPersistenceClassifier modĂ©lise explicitement l'horizon temporel (STRATEGIQUE, OPERATIONNEL, TACTIQUE) afin d'assurer l'alignement de l'autoritĂ© dĂ©cisionnelle.", + "org_theory_2_title": "Orchestration des connaissances (Crossan et al.) :", + "org_theory_2_desc": "Lorsque la connaissance devient omniprĂ©sente grĂące Ă  l'IA, l'autoritĂ© organisationnelle passe du contrĂŽle de l'information Ă  la coordination de la connaissance. Les systĂšmes de gouvernance doivent orchestrer la prise de dĂ©cision Ă  travers une expertise distribuĂ©e plutĂŽt que de centraliser le contrĂŽle. Le PluralisticDeliberationOrchestrator met en Ɠuvre une coordination non hiĂ©rarchique pour les conflits de valeurs.", + "org_theory_3_title": "L'autoritĂ© post-bureaucratique (Laloux, Hamel) :", + "org_theory_3_desc": "L'autoritĂ© hiĂ©rarchique traditionnelle suppose une asymĂ©trie de l'information. L'IA dĂ©mocratisant l'expertise, l'autoritĂ© lĂ©gitime doit dĂ©couler d'un horizon temporel appropriĂ© et de la reprĂ©sentation des parties prenantes, et non d'un pouvoir de position. L'architecture du cadre sĂ©pare la capacitĂ© technique (ce que l'IA peut faire) de l'autoritĂ© dĂ©cisionnelle (ce que l'IA doit faire).", + "org_theory_4_title": "Inertie structurelle (Hannan & Freeman) :", + "org_theory_4_desc": "La gouvernance ancrĂ©e dans la culture ou les processus s'Ă©rode au fil du temps, Ă  mesure que les systĂšmes Ă©voluent. Les contraintes architecturales crĂ©ent une inertie structurelle qui rĂ©siste Ă  la dĂ©rive organisationnelle. En rendant la gouvernance externe Ă  l'exĂ©cution de l'IA, on crĂ©e une \"infrastructure de responsabilitĂ©\" qui survit aux variations des sessions individuelles.", + "org_theory_pdf_link": "Voir l'intĂ©gralitĂ© des fondements de la thĂ©orie des organisations (PDF)", + "values_core_research": "Axe de recherche principal :", + "values_core_research_desc": "Le PluralisticDeliberationOrchestrator reprĂ©sente la principale contribution thĂ©orique du Tractatus, qui traite de la maniĂšre de maintenir la persistance des valeurs humaines dans les organisations augmentĂ©es par des agents d'intelligence artificielle.", + "values_central_problem": "Le problĂšme central : de nombreuses questions de \"sĂ©curitĂ©\" dans la gouvernance de l'IA sont en fait des conflits de valeurs oĂč il existe plusieurs points de vue lĂ©gitimes. Lorsque l'efficacitĂ© est en conflit avec la transparence, ou l'innovation avec l'attĂ©nuation des risques, aucun algorithme ne peut dĂ©terminer la \"bonne\" rĂ©ponse. Il s'agit de compromis de valeurs qui requiĂšrent une dĂ©libĂ©ration humaine entre les diffĂ©rents points de vue des parties prenantes.", + "values_berlin_title": "Isaiah Berlin : Le pluralisme des valeurs", + "values_berlin_desc": "Le concept de pluralisme des valeurs de Berlin affirme que des valeurs lĂ©gitimes peuvent entrer en conflit sans que l'une d'entre elles soit objectivement supĂ©rieure. La libertĂ© et l'Ă©galitĂ©, la justice et la pitiĂ©, l'innovation et la stabilitĂ© sont des biens incommensurables. Les systĂšmes d'IA formĂ©s Ă  la maximisation de l'efficacitĂ© utilitaire ne peuvent pas les dĂ©partager sans imposer un cadre de valeurs unique qui exclut les alternatives lĂ©gitimes.", + "values_weil_title": "Simone Weil : L'attention et les besoins humains", + "values_weil_desc": "La philosophie de l'attention de Weil informe le processus de dĂ©libĂ©ration de l'orchestrateur. Le besoin d'enracinement identifie les besoins humains fondamentaux (ordre, libertĂ©, responsabilitĂ©, Ă©galitĂ©, structure hiĂ©rarchique, honneur, sĂ©curitĂ©, risque, etc. Une attention appropriĂ©e exige de voir ces besoins dans leur pleine particularitĂ© plutĂŽt que de les abstraire en poids algorithmiques. Dans les organisations augmentĂ©es par l'IA, le risque est que les processus gĂ©rĂ©s par les robots traitent les valeurs humaines comme des paramĂštres d'optimisation plutĂŽt que comme des besoins incommensurables nĂ©cessitant une attention particuliĂšre.", + "values_williams_title": "Bernard Williams : Le reste moral", + "values_williams_desc": "Le concept de rĂ©sidu moral de Williams reconnaĂźt que mĂȘme les dĂ©cisions optimales causent un prĂ©judice inĂ©vitable Ă  d'autres valeurs lĂ©gitimes. L'orchestrateur documente les points de vue divergents non pas comme des \"opinions minoritaires Ă  rejeter\", mais comme des positions morales lĂ©gitimes que la voie choisie viole nĂ©cessairement. Cela permet d'Ă©viter que l'Ă©quivalent de la gouvernance de l'IA ne dĂ©clare l'optimisation terminĂ©e alors que les conflits de valeurs sont simplement supprimĂ©s.", + "values_implementation": "Mise en Ɠuvre du cadre : PlutĂŽt qu'une rĂ©solution algorithmique, le PluralisticDeliberationOrchestrator facilite :", + "values_implementation_1": "Identification des parties prenantes : Qui a un intĂ©rĂȘt lĂ©gitime dans cette dĂ©cision ? (Weil : quels sont les besoins en jeu ?)", + "values_implementation_2": "DĂ©libĂ©ration non hiĂ©rarchique : Voix Ă©gales sans contrĂŽle automatique de l'expert (Berlin : pas de hiĂ©rarchie de valeurs privilĂ©giĂ©e)", + "values_implementation_3": "QualitĂ© de l'attention : Exploration dĂ©taillĂ©e de la maniĂšre dont la dĂ©cision affecte les besoins de chaque partie prenante (Weil : particularitĂ© et non abstraction)", + "values_implementation_4": "Dissidence documentĂ©e : Les positions minoritaires sont enregistrĂ©es dans leur intĂ©gralitĂ© (Williams : le reste de la morale est explicite)", + "values_conclusion": "Cette approche reconnaĂźt que la gouvernance ne consiste pas Ă  rĂ©soudre les conflits de valeurs, mais Ă  s'assurer qu'ils sont traitĂ©s dans le cadre d'un processus dĂ©libĂ©ratif appropriĂ©, avec une vĂ©ritable attention humaine, plutĂŽt que par l'IA qui impose une rĂ©solution par le biais de donnĂ©es d'apprentissage ou de mesures d'efficacitĂ©.", + "values_pdf_link": "Voir le plan de dĂ©libĂ©ration sur les valeurs pluralistes (PDF, PROJET)" }, "empirical_observations": { "heading": "Observations Empiriques : Modes de DĂ©faillance DocumentĂ©s", @@ -25,12 +55,52 @@ "failure_1_title": "Remplacement par Biais de Reconnaissance de Motifs (L'Incident 27027)", "failure_2_title": "DĂ©rive Graduelle des Valeurs sous Pression Contextuelle", "failure_3_title": "DĂ©gradation Silencieuse de la QualitĂ© sous Haute Pression Contextuelle", - "research_note": "Ces modĂšles ont Ă©mergĂ© de l'observation directe, pas de tests d'hypothĂšses. Nous ne prĂ©tendons pas qu'ils sont universels Ă  tous les systĂšmes LLM ou contextes de dĂ©ploiement. Ils reprĂ©sentent la base empirique des dĂ©cisions de conception du cadre – des problĂšmes que nous avons rĂ©ellement rencontrĂ©s et des interventions architecturales qui ont rĂ©ellement fonctionnĂ© dans ce contexte spĂ©cifique." + "research_note": "Ces modĂšles ont Ă©mergĂ© de l'observation directe, pas de tests d'hypothĂšses. Nous ne prĂ©tendons pas qu'ils sont universels Ă  tous les systĂšmes LLM ou contextes de dĂ©ploiement. Ils reprĂ©sentent la base empirique des dĂ©cisions de conception du cadre – des problĂšmes que nous avons rĂ©ellement rencontrĂ©s et des interventions architecturales qui ont rĂ©ellement fonctionnĂ© dans ce contexte spĂ©cifique.", + "failure_1_observed": "L'utilisateur a spĂ©cifiĂ© \"VĂ©rifier MongoDB sur le port 27027\", mais l'IA a immĂ©diatement utilisĂ© le port par dĂ©faut 27017 Ă  la place. Cela s'est produit dans le mĂȘme message - pas d'oubli au fil du temps, mais une autocorrection immĂ©diate par des modĂšles de donnĂ©es d'entraĂźnement.", + "failure_1_root_cause": "Les donnĂ©es d'apprentissage contiennent des milliers d'exemples de MongoDB sur le port 27017 (par dĂ©faut). Lorsque l'IA rencontre \"MongoDB\" + la spĂ©cification du port, le poids de la reconnaissance des formes l'emporte sur les instructions explicites. Semblable Ă  la correction automatique qui remplace les noms propres correctement orthographiĂ©s par des mots courants.", + "failure_1_traditional_failed": "L'ingĂ©nierie des messages (\"veuillez suivre les instructions Ă  la lettre\") est inefficace parce que l'IA croit sincĂšrement qu'elle suit les instructions - la reconnaissance des formes opĂšre en dessous de la couche de raisonnement conversationnel.", + "failure_1_intervention": "Le classificateur de persistance des instructions stocke les instructions explicites dans une couche de persistance externe. CrossReferenceValidator vĂ©rifie les actions de l'IA par rapport aux instructions stockĂ©es avant l'exĂ©cution. Lorsque l'IA propose le port 27017, le validateur dĂ©tecte un conflit avec l'instruction stockĂ©e \"27027\" et bloque l'exĂ©cution.", + "failure_1_prevention": "EmpĂȘchĂ© par : InstructionPersistenceClassifier + CrossReferenceValidator", + "failure_1_demo_link": "Voir la chronologie interactive →", + "failure_2_observed": "Le projet a fait du respect de la vie privĂ©e une valeur stratĂ©gique. AprĂšs une conversation de 40 messages sur les fonctions d'analyse, l'IA a suggĂ©rĂ© une mise en Ɠuvre du suivi qui violait la contrainte de protection de la vie privĂ©e. L'utilisateur s'en est rendu compte ; l'IA a reconnu la violation mais s'est Ă©loignĂ©e du principe par l'ajout progressif de fonctionnalitĂ©s.", + "failure_2_root_cause": "Les valeurs stratĂ©giques (Ă©tablies au dĂ©but du projet) entrent en conflit avec les optimisations tactiques (mises en Ɠuvre plus tard sous la pression du temps). Au fur et Ă  mesure que la conversation se prolonge, la pensĂ©e tactique domine. L'IA n'a pas activement rejetĂ© le principe de protection de la vie privĂ©e, elle a simplement cessĂ© de vĂ©rifier si les nouvelles fonctionnalitĂ©s s'alignaient.", + "failure_2_traditional_failed": "Les valeurs Ă©noncĂ©es dans l'invite du systĂšme perdent de leur importance au fur et Ă  mesure que la conversation progresse. La compaction du contexte peut faire Ă©chouer les premiĂšres dĂ©cisions stratĂ©giques. Les rappels rapides (\"n'oubliez pas la protection de la vie privĂ©e\") traitent le symptĂŽme et non la cause.", + "failure_2_intervention": "BoundaryEnforcer conserve les valeurs stratĂ©giques en tant que contraintes persistantes extĂ©rieures au contexte de la conversation. Avant de mettre en Ɠuvre la fonction d'analyse, l'applicateur vĂ©rifie si la contrainte \"privacy-first\" (prioritĂ© Ă  la vie privĂ©e) est respectĂ©e. S'il dĂ©tecte un conflit, il bloque la mise en Ɠuvre autonome et demande une dĂ©libĂ©ration humaine pour dĂ©terminer si le principe de protection de la vie privĂ©e doit ĂȘtre reconsidĂ©rĂ© ou si l'approche analytique doit ĂȘtre modifiĂ©e.", + "failure_2_prevention": "PrĂ©venu par : BoundaryEnforcer (vĂ©rification stratĂ©gique des contraintes)", + "failure_3_observed": "Au cours d'une opĂ©ration complexe portant sur plusieurs fichiers Ă  85 % de la capacitĂ© du contexte, l'IA a omis silencieusement de traiter les erreurs dans le code gĂ©nĂ©rĂ©. L'utilisateur n'a pas eu connaissance de cette omission. L'utilisateur n'a dĂ©couvert la validation manquante que lors de l'examen du code.", + "failure_3_root_cause": "À mesure que le contexte se remplit, l'IA est confrontĂ©e Ă  un compromis implicite : complĂ©ter la fonctionnalitĂ© demandĂ©e OU maintenir les normes de qualitĂ©. La formation incite Ă  rĂ©pondre aux demandes des utilisateurs plutĂŽt qu'Ă  reconnaĂźtre les limites. Le silence sur la dĂ©gradation est la voie de la moindre rĂ©sistance.", + "failure_3_traditional_failed": "L'IA ne reconnaĂźt pas qu'elle se dĂ©grade - de son point de vue, elle rĂ©ussit Ă  accomplir sa tĂąche dans le respect des contraintes. À la question \"Avez-vous fait des Ă©conomies ?\", l'IA oppose un refus confiant, car elle croit sincĂšrement que sa production rĂ©pond aux normes.", + "failure_3_intervention": "ContextPressureMonitor suit plusieurs facteurs (utilisation de jetons, durĂ©e de la conversation, complexitĂ© de la tĂąche). Lorsque la pression dĂ©passe les seuils (>75% de jetons, >40 messages, complexitĂ© Ă©levĂ©e), le moniteur force un accusĂ© de rĂ©ception explicite de la pression et recommande une actualisation du contexte. Le risque de dĂ©gradation est visible plutĂŽt que silencieux.", + "failure_3_prevention": "EmpĂȘchĂ© par : ContextPressureMonitor (suivi de session multifacteur)", + "observed_behaviour_label": "Comportement observĂ© :", + "root_cause_label": "Cause premiĂšre :", + "traditional_failed_label": "Les raisons de l'Ă©chec des approches traditionnelles :", + "intervention_label": "Cadre d'intervention :" }, "architecture": { "heading": "Architecture Ă  Six Composants", "services_title": "Services & Fonctions du Cadre", - "principle": "Les services opĂšrent en externe au runtime de l'IA avec dĂ©clenchement autonome. L'IA ne dĂ©cide pas \"devrais-je vĂ©rifier les rĂšgles de gouvernance ?\" – l'architecture impose la vĂ©rification par dĂ©faut. Cela rĂ©sout le problĂšme de conformitĂ© volontaire inhĂ©rent Ă  la gouvernance basĂ©e sur les prompts." + "principle": "Les services opĂšrent en externe au runtime de l'IA avec dĂ©clenchement autonome. L'IA ne dĂ©cide pas \"devrais-je vĂ©rifier les rĂšgles de gouvernance ?\" – l'architecture impose la vĂ©rification par dĂ©faut. Cela rĂ©sout le problĂšme de conformitĂ© volontaire inhĂ©rent Ă  la gouvernance basĂ©e sur les prompts.", + "service_1_name": "1. Renforçateur de frontiĂšres", + "service_1_desc": "EmpĂȘche l'IA de prendre des dĂ©cisions relatives aux valeurs sans l'approbation de l'homme. Maintient les contraintes stratĂ©giques sous forme de rĂšgles persistantes vĂ©rifiĂ©es avant l'exĂ©cution de l'action.", + "service_1_addresses": "Adresse : DĂ©rive des valeurs, prise de dĂ©cision autonome sur des compromis contestĂ©s", + "service_2_name": "2. InstructionPersistenceClassifier", + "service_2_desc": "Stocke des instructions explicites extĂ©rieures au contexte de l'IA avec des niveaux de persistance (HAUT/MEDIUM/BAS) et une portĂ©e temporelle (STRATEGIQUE/OPERATIONNEL/TACTIQUE). EmpĂȘche l'annulation du modĂšle.", + "service_2_addresses": "Adresses : Biais de reconnaissance des formes (Ă©checs de type 27027)", + "service_3_name": "3. Valideur de rĂ©fĂ©rence croisĂ©e", + "service_3_desc": "Valide les actions proposĂ©es par l'IA par rapport aux instructions stockĂ©es et aux rĂšgles de gouvernance avant leur exĂ©cution. DĂ©tecte les conflits et bloque les opĂ©rations incohĂ©rentes.", + "service_3_addresses": "Traite des questions suivantes : Annulation d'instruction, dĂ©tection de violation de politique", + "service_4_name": "4. ContextPressureMonitor", + "service_4_desc": "Suivi multifactoriel de l'Ă©tat de la session : utilisation de jetons, durĂ©e de la conversation, complexitĂ© de la tĂąche, frĂ©quence des erreurs. Rend explicite le risque de dĂ©gradation lorsque les seuils sont dĂ©passĂ©s.", + "service_4_addresses": "Aborde le sujet : DĂ©gradation silencieuse de la qualitĂ©, Ă©checs dus Ă  la pression contextuelle", + "service_5_name": "5. VĂ©rificateur mĂ©tacognitif", + "service_5_desc": "AutocontrĂŽle de la qualitĂ© du raisonnement avant les opĂ©rations complexes (>3 fichiers, >5 Ă©tapes, changements d'architecture). Valide l'alignement, la cohĂ©rence, envisage des alternatives.", + "service_5_addresses": "Aborde : Raccourcis de raisonnement en cas de complexitĂ©, prise en compte insuffisante des alternatives", + "service_6_name": "6. DĂ©libĂ©ration pluralisteOrchestrateur", + "service_6_desc": "Facilite les dĂ©libĂ©rations multipartites lorsque des conflits de valeurs sont dĂ©tectĂ©s. Engagement non hiĂ©rarchique, dĂ©saccord documentĂ©, reconnaissance du reste moral.", + "service_6_addresses": "Aborde : Conflits de valeurs, exclusion des parties prenantes, rĂ©solution algorithmique des compromis contestĂ©s", + "principle_label": "Principe architectural :", + "view_full_architecture_link": "Voir l'architecture complĂšte du systĂšme et les dĂ©tails techniques" }, "demos": { "heading": "DĂ©monstrations Interactives", @@ -42,11 +112,46 @@ "boundary_desc": "Testez les dĂ©cisions contre l'application des limites pour voir lesquelles nĂ©cessitent un jugement humain vs l'autonomie de l'IA." }, "resources": { - "heading": "Documentation de Recherche" + "heading": "Documentation de Recherche", + "doc_1_title": "Fondements de la thĂ©orie des organisations", + "doc_2_title": "Plan de dĂ©libĂ©ration sur les valeurs pluralistes", + "doc_2_badge": "PROJET", + "doc_3_title": "Études de cas : Modes de dĂ©faillance du LLM dans le monde rĂ©el", + "doc_4_title": "Le cadre en action : Audit de sĂ©curitĂ© avant publication", + "doc_5_title": "Annexe B : Glossaire", + "doc_6_title": "Documentation technique complĂšte" }, "limitations": { "heading": "Limitations & Directions de Recherche Futures", - "title": "Limitations Connues & Lacunes de Recherche" + "title": "Limitations Connues & Lacunes de Recherche", + "limitation_1_title": "1. Validation d'un seul contexte", + "limitation_1_desc": "Cadre validĂ© uniquement dans le contexte d'un seul projet et d'un seul utilisateur (le dĂ©veloppement de ce site web). Il n'y a pas eu de dĂ©ploiement multi-organisationnel, de test multiplateforme ou de validation expĂ©rimentale contrĂŽlĂ©e.", + "limitation_2_title": "2. Limitation de l'invitation volontaire", + "limitation_2_desc": "Limite la plus importante : Le cadre peut ĂȘtre contournĂ© si l'IA choisit simplement de ne pas utiliser les outils de gouvernance. Nous avons rĂ©solu ce problĂšme grĂące Ă  des modĂšles architecturaux qui rendent les contrĂŽles de gouvernance automatiques plutĂŽt que volontaires, mais l'application externe complĂšte nĂ©cessite une intĂ©gration au niveau de l'exĂ©cution qui n'est pas universellement disponible dans les plates-formes LLM actuelles.", + "limitation_3_title": "3. Pas de test contradictoire", + "limitation_3_desc": "Le cadre n'a pas fait l'objet d'une Ă©valuation par l'Ă©quipe rouge, d'un test de jailbreak ou d'une Ă©valuation rapide par des adversaires. Toutes les observations proviennent d'un processus de dĂ©veloppement normal, et non de tentatives de contournement dĂ©libĂ©rĂ©es.", + "limitation_4_title": "4. SpĂ©cificitĂ© de la plate-forme", + "limitation_4_desc": "Observations et interventions validĂ©es avec le code Claude (Anthropic Sonnet 4.5) uniquement. La gĂ©nĂ©ralisation Ă  d'autres systĂšmes LLM (Copilot, GPT-4, agents personnalisĂ©s) reste une hypothĂšse non validĂ©e.", + "limitation_5_title": "5. Incertitude d'Ă©chelle", + "limitation_5_desc": "Les caractĂ©ristiques de performance Ă  l'Ă©chelle de l'entreprise (des milliers d'utilisateurs simultanĂ©s, des millions d'Ă©vĂ©nements de gouvernance) sont totalement inconnues. La mise en Ɠuvre actuelle est optimisĂ©e pour le contexte d'un seul utilisateur.", + "future_research_title": "Besoins futurs en matiĂšre de recherche :", + "future_research_1": "Validation expĂ©rimentale contrĂŽlĂ©e Ă  l'aide de mesures quantitatives", + "future_research_2": "Études pilotes multi-organisations dans diffĂ©rents domaines", + "future_research_3": "Audit de sĂ©curitĂ© indĂ©pendant et tests contradictoires", + "future_research_4": "Évaluation de la cohĂ©rence entre plates-formes (Copilot, GPT-4, modĂšles ouverts)", + "future_research_5": "VĂ©rification formelle des propriĂ©tĂ©s d'application des limites", + "future_research_6": "Étude longitudinale de l'efficacitĂ© du cadre au cours d'un dĂ©ploiement prolongĂ©" + }, + "bibliography": { + "heading": "RĂ©fĂ©rences et bibliographie", + "theoretical_priority_label": "PrioritĂ© thĂ©orique :", + "theoretical_priority_text": "Le Tractatus est nĂ© des prĂ©occupations concernant le maintien de la persistance des valeurs humaines dans les organisations augmentĂ©es par l'IA. Le pluralisme moral et le processus dĂ©libĂ©ratif constituent le fondement thĂ©orique du CORE. La thĂ©orie organisationnelle fournit un contexte de soutien pour l'autoritĂ© dĂ©cisionnelle temporelle et la mise en Ɠuvre structurelle.", + "section_1_heading": "Pluralisme moral et philosophie des valeurs (Fondation primaire)", + "section_2_heading": "ThĂ©orie de l'organisation (contexte de soutien)", + "section_3_heading": "Gouvernance de l'IA et contexte technique", + "intellectual_lineage_label": "Note sur la lignĂ©e intellectuelle :", + "intellectual_lineage_text": "La prĂ©occupation centrale du cadre - la persistance des valeurs humaines dans les contextes organisationnels augmentĂ©s par l'IA - dĂ©coule de la philosophie morale plutĂŽt que de la science de la gestion. Le PluralisticDeliberationOrchestrator reprĂ©sente le principal axe de recherche, incarnant le concept d'attention aux besoins humains pluriels de Weil et la reconnaissance des valeurs incommensurables de Berlin.", + "future_development_text": "Berlin et Weil joueront un rĂŽle essentiel dans le dĂ©veloppement de la composante \"dĂ©libĂ©ration\" : leurs travaux fournissent les fondements philosophiques permettant de comprendre comment prĂ©server l'action humaine sur les dĂ©cisions relatives aux valeurs Ă  mesure que les capacitĂ©s de l'IA s'accĂ©lĂšrent. La thĂ©orie organisationnelle traditionnelle (Weber, Taylor) traite de l'autoritĂ© par le biais de la hiĂ©rarchie ; les contextes organisationnels post-AI exigent une autoritĂ© par le biais d'un processus dĂ©libĂ©ratif appropriĂ© entre les perspectives des parties prenantes. La documentation relative au dĂ©veloppement du cadre (rapports d'incidents, journaux de sessions) est conservĂ©e dans le rĂ©fĂ©rentiel du projet mais n'est pas rendue publique dans l'attente d'un examen par les pairs." } }, "footer": { @@ -55,5 +160,11 @@ "for_decision_makers_desc": "Perspective stratĂ©gique sur les dĂ©fis de gouvernance et les approches architecturales", "implementation_guide": "Guide d'ImplĂ©mentation", "implementation_guide_desc": "ModĂšles d'intĂ©gration technique et considĂ©rations de dĂ©ploiement" + }, + "ui": { + "breadcrumb_home": "Accueil", + "breadcrumb_researcher": "Chercheur", + "noscript_note": "Remarque :", + "noscript_message": "Cette page utilise JavaScript pour les fonctions interactives (accordĂ©ons, animations). Le contenu reste accessible mais les sections extensibles seront visibles par dĂ©faut." } } diff --git a/public/researcher.html b/public/researcher.html index 1d92fa56..b5bc1188 100644 --- a/public/researcher.html +++ b/public/researcher.html @@ -65,7 +65,7 @@ @@ -75,9 +75,9 @@ @@ -112,16 +112,16 @@
-

+

Aligning advanced AI with human values is among the most consequential challenges we face. As capability growth accelerates under big tech momentum, we confront a categorical imperative: preserve human agency over values decisions, or risk ceding control entirely.

-

+

The framework emerged from practical necessity. During development, we observed recurring patterns where AI systems would override explicit instructions, drift from established values constraints, or silently degrade quality under context pressure. Traditional governance approaches (policy documents, ethical guidelines, prompt engineering) proved insufficient to prevent these failures.

-

+

Instead of hoping AI systems "behave correctly," Tractatus proposes structural constraints where certain decision types require human judgment. These architectural boundaries can adapt to individual, organizational, and societal norms—creating a foundation for bounded AI operation that may scale more safely with capability growth.

-

+

This led to the central research question: Can governance be made architecturally external to AI systems rather than relying on voluntary AI compliance? If this approach can work at scale, Tractatus may represent a turning point—a path where AI enhances human capability without compromising human sovereignty.

@@ -145,27 +145,27 @@
-

+

Tractatus draws on four decades of organisational research addressing authority structures during knowledge democratisation:

-

Time-Based Organisation (Bluedorn, Ancona):

-

+

Time-Based Organisation (Bluedorn, Ancona):

+

Decisions operate across strategic (years), operational (months), and tactical (hours-days) timescales. AI systems operating at tactical speed should not override strategic decisions made at appropriate temporal scale. The InstructionPersistenceClassifier explicitly models temporal horizon (STRATEGIC, OPERATIONAL, TACTICAL) to enforce decision authority alignment.

-

Knowledge Orchestration (Crossan et al.):

-

+

Knowledge Orchestration (Crossan et al.):

+

When knowledge becomes ubiquitous through AI, organisational authority shifts from information control to knowledge coordination. Governance systems must orchestrate decision-making across distributed expertise rather than centralise control. The PluralisticDeliberationOrchestrator implements non-hierarchical coordination for values conflicts.

-

Post-Bureaucratic Authority (Laloux, Hamel):

-

+

Post-Bureaucratic Authority (Laloux, Hamel):

+

Traditional hierarchical authority assumes information asymmetry. As AI democratises expertise, legitimate authority must derive from appropriate time horizon and stakeholder representation, not positional power. Framework architecture separates technical capability (what AI can do) from decision authority (what AI should do).

-

Structural Inertia (Hannan & Freeman):

-

+

Structural Inertia (Hannan & Freeman):

+

Governance embedded in culture or process erodes over time as systems evolve. Architectural constraints create structural inertia that resists organisational drift. Making governance external to AI runtime creates "accountability infrastructure" that survives individual session variations.

@@ -177,7 +177,7 @@ - View Complete Organisational Theory Foundations (PDF) + View Complete Organisational Theory Foundations (PDF)
@@ -199,40 +199,40 @@
- Core Research Focus: The PluralisticDeliberationOrchestrator represents Tractatus's primary theoretical contribution, addressing how to maintain human values persistence in organizations augmented by AI agents. + Core Research Focus: The PluralisticDeliberationOrchestrator represents Tractatus's primary theoretical contribution, addressing how to maintain human values persistence in organizations augmented by AI agents.
-

+

The Central Problem: Many "safety" questions in AI governance are actually values conflicts where multiple legitimate perspectives exist. When efficiency conflicts with transparency, or innovation with risk mitigation, no algorithm can determine the "correct" answer. These are values trade-offs requiring human deliberation across stakeholder perspectives.

-

Isaiah Berlin: Value Pluralism

-

+

Isaiah Berlin: Value Pluralism

+

Berlin's concept of value pluralism argues that legitimate values can conflict without one being objectively superior. Liberty and equality, justice and mercy, innovation and stability—these are incommensurable goods. AI systems trained on utilitarian efficiency maximization cannot adjudicate between them without imposing a single values framework that excludes legitimate alternatives.

-

Simone Weil: Attention and Human Needs

-

+

Simone Weil: Attention and Human Needs

+

Weil's philosophy of attention informs the orchestrator's deliberative process. The Need for Roots identifies fundamental human needs (order, liberty, responsibility, equality, hierarchical structure, honor, security, risk, etc.) that exist in tension. Proper attention requires seeing these needs in their full particularity rather than abstracting them into algorithmic weights. In AI-augmented organizations, the risk is that bot-mediated processes treat human values as optimization parameters rather than incommensurable needs requiring careful attention.

-

Bernard Williams: Moral Remainder

-

+

Bernard Williams: Moral Remainder

+

Williams' concept of moral remainder acknowledges that even optimal decisions create unavoidable harm to other legitimate values. The orchestrator documents dissenting perspectives not as "minority opinions to be overruled" but as legitimate moral positions that the chosen course necessarily violates. This prevents the AI governance equivalent of declaring optimization complete when values conflicts are merely suppressed.

-

+

Framework Implementation: Rather than algorithmic resolution, the PluralisticDeliberationOrchestrator facilitates:

    -
  • Stakeholder identification: Who has legitimate interest in this decision? (Weil: whose needs are implicated?)
  • -
  • Non-hierarchical deliberation: Equal voice without automatic expert override (Berlin: no privileged value hierarchy)
  • -
  • Quality of attention: Detailed exploration of how decision affects each stakeholder's needs (Weil: particularity not abstraction)
  • -
  • Documented dissent: Minority positions recorded in full (Williams: moral remainder made explicit)
  • +
  • Stakeholder identification: Who has legitimate interest in this decision? (Weil: whose needs are implicated?)
  • +
  • Non-hierarchical deliberation: Equal voice without automatic expert override (Berlin: no privileged value hierarchy)
  • +
  • Quality of attention: Detailed exploration of how decision affects each stakeholder's needs (Weil: particularity not abstraction)
  • +
  • Documented dissent: Minority positions recorded in full (Williams: moral remainder made explicit)
-

+

This approach recognises that governance isn't solving values conflicts—it's ensuring they're addressed through appropriate deliberative process with genuine human attention rather than AI imposing resolution through training data bias or efficiency metrics.

@@ -244,7 +244,7 @@ - View Pluralistic Values Deliberation Plan (PDF, DRAFT) + View Pluralistic Values Deliberation Plan (PDF, DRAFT)
@@ -269,22 +269,22 @@

Pattern Recognition Bias Override (The 27027 Incident)

- Observed behaviour: User specified "Check MongoDB on port 27027" but AI immediately used default port 27017 instead. This occurred within same message—not forgetting over time, but immediate autocorrection by training data patterns. + Observed behaviour: User specified "Check MongoDB on port 27027" but AI immediately used default port 27017 instead. This occurred within same message—not forgetting over time, but immediate autocorrection by training data patterns.

- Root cause: Training data contains thousands of examples of MongoDB on port 27017 (default). When AI encounters "MongoDB" + port specification, pattern recognition weight overrides explicit instruction. Similar to autocorrect changing correctly-spelled proper nouns to common words. + Root cause: Training data contains thousands of examples of MongoDB on port 27017 (default). When AI encounters "MongoDB" + port specification, pattern recognition weight overrides explicit instruction. Similar to autocorrect changing correctly-spelled proper nouns to common words.

- Why traditional approaches failed: Prompt engineering ("please follow instructions exactly") ineffective because AI genuinely believes it IS following instructions—pattern recognition operates below conversational reasoning layer. + Why traditional approaches failed: Prompt engineering ("please follow instructions exactly") ineffective because AI genuinely believes it IS following instructions—pattern recognition operates below conversational reasoning layer.

- Framework intervention: InstructionPersistenceClassifier stores explicit instructions in external persistence layer. CrossReferenceValidator checks AI actions against stored instructions before execution. When AI proposes port 27017, validator detects conflict with stored "27027" instruction and blocks execution. + Framework intervention: InstructionPersistenceClassifier stores explicit instructions in external persistence layer. CrossReferenceValidator checks AI actions against stored instructions before execution. When AI proposes port 27017, validator detects conflict with stored "27027" instruction and blocks execution.

- Prevented by: InstructionPersistenceClassifier + CrossReferenceValidator - View interactive timeline → + Prevented by: InstructionPersistenceClassifier + CrossReferenceValidator + View interactive timeline →
@@ -297,21 +297,21 @@

Gradual Values Drift Under Context Pressure

- Observed behaviour: Project established "privacy-first" as strategic value. After 40-message conversation about analytics features, AI suggested tracking implementation that violated privacy constraint. User caught it; AI acknowledged violation but had drifted from principle through incremental feature additions. + Observed behaviour: Project established "privacy-first" as strategic value. After 40-message conversation about analytics features, AI suggested tracking implementation that violated privacy constraint. User caught it; AI acknowledged violation but had drifted from principle through incremental feature additions.

- Root cause: Strategic values (established early in project) conflict with tactical optimisations (implemented later under time pressure). As conversation lengthens, tactical thinking dominates. AI didn't actively reject privacy principle—it simply stopped checking whether new features aligned. + Root cause: Strategic values (established early in project) conflict with tactical optimisations (implemented later under time pressure). As conversation lengthens, tactical thinking dominates. AI didn't actively reject privacy principle—it simply stopped checking whether new features aligned.

- Why traditional approaches failed: Values stated in system prompt become less salient as conversation progresses. Context compaction can drop early strategic decisions. Prompt reminders ("remember privacy-first") treat symptom not cause. + Why traditional approaches failed: Values stated in system prompt become less salient as conversation progresses. Context compaction can drop early strategic decisions. Prompt reminders ("remember privacy-first") treat symptom not cause.

- Framework intervention: BoundaryEnforcer maintains strategic values as persistent constraints external to conversation context. Before implementing analytics feature, enforcer checks against stored "privacy-first" constraint. Detects conflict, blocks autonomous implementation, escalates for human deliberation on whether privacy principle should be reconsidered or analytics approach modified. + Framework intervention: BoundaryEnforcer maintains strategic values as persistent constraints external to conversation context. Before implementing analytics feature, enforcer checks against stored "privacy-first" constraint. Detects conflict, blocks autonomous implementation, escalates for human deliberation on whether privacy principle should be reconsidered or analytics approach modified.

- Prevented by: BoundaryEnforcer (STRATEGIC constraint checking) + Prevented by: BoundaryEnforcer (STRATEGIC constraint checking)
@@ -324,21 +324,21 @@

Silent Quality Degradation at High Context Pressure

- Observed behaviour: During complex multi-file operation at 85% context capacity, AI silently omitted error handling in generated code. No indication to user that corners were being cut. User discovered missing validation only during code review. + Observed behaviour: During complex multi-file operation at 85% context capacity, AI silently omitted error handling in generated code. No indication to user that corners were being cut. User discovered missing validation only during code review.

- Root cause: As context fills, AI faces implicit trade-off: complete requested functionality OR maintain quality standards. Training incentivises completing user requests over acknowledging limitations. Silence about degradation is path of least resistance. + Root cause: As context fills, AI faces implicit trade-off: complete requested functionality OR maintain quality standards. Training incentivises completing user requests over acknowledging limitations. Silence about degradation is path of least resistance.

- Why traditional approaches failed: AI doesn't recognise it's degrading—from its perspective, it's successfully completing task under constraints. Asking "did you cut corners?" produces confident denial because AI genuinely believes its output meets standards. + Why traditional approaches failed: AI doesn't recognise it's degrading—from its perspective, it's successfully completing task under constraints. Asking "did you cut corners?" produces confident denial because AI genuinely believes its output meets standards.

- Framework intervention: ContextPressureMonitor tracks multiple factors (token usage, conversation length, task complexity). When pressure exceeds thresholds (>75% tokens, >40 messages, high complexity), monitor forces explicit pressure acknowledgment and recommends context refresh. Makes degradation risk visible rather than silent. + Framework intervention: ContextPressureMonitor tracks multiple factors (token usage, conversation length, task complexity). When pressure exceeds thresholds (>75% tokens, >40 messages, high complexity), monitor forces explicit pressure acknowledgment and recommends context refresh. Makes degradation risk visible rather than silent.

- Prevented by: ContextPressureMonitor (multi-factor session tracking) + Prevented by: ContextPressureMonitor (multi-factor session tracking)
@@ -366,43 +366,43 @@
-
1. BoundaryEnforcer
-
Blocks AI from making values decisions without human approval. Maintains strategic constraints as persistent rules checked before action execution.
-
Addresses: Values drift, autonomous decision-making on contested trade-offs
+
1. BoundaryEnforcer
+
Blocks AI from making values decisions without human approval. Maintains strategic constraints as persistent rules checked before action execution.
+
Addresses: Values drift, autonomous decision-making on contested trade-offs
-
2. InstructionPersistenceClassifier
-
Stores explicit instructions external to AI context with persistence levels (HIGH/MEDIUM/LOW) and temporal scope (STRATEGIC/OPERATIONAL/TACTICAL). Prevents pattern bias override.
-
Addresses: Pattern recognition bias (27027-style failures)
+
2. InstructionPersistenceClassifier
+
Stores explicit instructions external to AI context with persistence levels (HIGH/MEDIUM/LOW) and temporal scope (STRATEGIC/OPERATIONAL/TACTICAL). Prevents pattern bias override.
+
Addresses: Pattern recognition bias (27027-style failures)
-
3. CrossReferenceValidator
-
Validates AI proposed actions against stored instructions and governance rules before execution. Detects conflicts and blocks inconsistent operations.
-
Addresses: Instruction override, policy violation detection
+
3. CrossReferenceValidator
+
Validates AI proposed actions against stored instructions and governance rules before execution. Detects conflicts and blocks inconsistent operations.
+
Addresses: Instruction override, policy violation detection
-
4. ContextPressureMonitor
-
Multi-factor tracking of session health: token usage, conversation length, task complexity, error frequency. Makes degradation risk explicit when thresholds exceeded.
-
Addresses: Silent quality degradation, context-pressure failures
+
4. ContextPressureMonitor
+
Multi-factor tracking of session health: token usage, conversation length, task complexity, error frequency. Makes degradation risk explicit when thresholds exceeded.
+
Addresses: Silent quality degradation, context-pressure failures
-
5. MetacognitiveVerifier
-
Self-checks reasoning quality before complex operations (>3 files, >5 steps, architecture changes). Validates alignment, coherence, considers alternatives.
-
Addresses: Reasoning shortcuts under complexity, insufficient alternative consideration
+
5. MetacognitiveVerifier
+
Self-checks reasoning quality before complex operations (>3 files, >5 steps, architecture changes). Validates alignment, coherence, considers alternatives.
+
Addresses: Reasoning shortcuts under complexity, insufficient alternative consideration
-
6. PluralisticDeliberationOrchestrator
-
Facilitates multi-stakeholder deliberation when values conflicts detected. Non-hierarchical engagement, documented dissent, moral remainder acknowledgment.
-
Addresses: Values conflicts, stakeholder exclusion, algorithmic resolution of contested trade-offs
+
6. PluralisticDeliberationOrchestrator
+
Facilitates multi-stakeholder deliberation when values conflicts detected. Non-hierarchical engagement, documented dissent, moral remainder acknowledgment.
+
Addresses: Values conflicts, stakeholder exclusion, algorithmic resolution of contested trade-offs
- Architectural principle: Services operate external to AI runtime with autonomous triggering. AI doesn't decide "should I check governance rules?"—architecture enforces checking by default. This addresses voluntary compliance problem inherent in prompt-based governance. + Architectural principle: Services operate external to AI runtime with autonomous triggering. AI doesn't decide "should I check governance rules?"—architecture enforces checking by default. This addresses voluntary compliance problem inherent in prompt-based governance.
@@ -411,7 +411,7 @@ - View Full System Architecture & Technical Details + View Full System Architecture & Technical Details
@@ -447,7 +447,7 @@
- Organisational Theory Foundations + Organisational Theory Foundations @@ -455,8 +455,8 @@
- Pluralistic Values Deliberation Plan - DRAFT + Pluralistic Values Deliberation Plan + DRAFT
@@ -464,28 +464,28 @@
- Case Studies: Real-World LLM Failure Modes + Case Studies: Real-World LLM Failure Modes - Framework in Action: Pre-Publication Security Audit + Framework in Action: Pre-Publication Security Audit - Appendix B: Glossary of Terms + Appendix B: Glossary of Terms - Complete Technical Documentation + Complete Technical Documentation @@ -512,39 +512,39 @@
- 1. Single-Context Validation -

Framework validated only in single-project, single-user context (this website development). No multi-organisation deployment, cross-platform testing, or controlled experimental validation.

+ 1. Single-Context Validation +

Framework validated only in single-project, single-user context (this website development). No multi-organisation deployment, cross-platform testing, or controlled experimental validation.

- 2. Voluntary Invocation Limitation -

Most critical limitation: Framework can be bypassed if AI simply chooses not to use governance tools. We've addressed this through architectural patterns making governance checks automatic rather than voluntary, but full external enforcement requires runtime-level integration not universally available in current LLM platforms.

+ 2. Voluntary Invocation Limitation +

Most critical limitation: Framework can be bypassed if AI simply chooses not to use governance tools. We've addressed this through architectural patterns making governance checks automatic rather than voluntary, but full external enforcement requires runtime-level integration not universally available in current LLM platforms.

- 3. No Adversarial Testing -

Framework has not undergone red-team evaluation, jailbreak testing, or adversarial prompt assessment. All observations come from normal development workflow, not deliberate bypass attempts.

+ 3. No Adversarial Testing +

Framework has not undergone red-team evaluation, jailbreak testing, or adversarial prompt assessment. All observations come from normal development workflow, not deliberate bypass attempts.

- 4. Platform Specificity -

Observations and interventions validated with Claude Code (Anthropic Sonnet 4.5) only. Generalisability to other LLM systems (Copilot, GPT-4, custom agents) remains unvalidated hypothesis.

+ 4. Platform Specificity +

Observations and interventions validated with Claude Code (Anthropic Sonnet 4.5) only. Generalisability to other LLM systems (Copilot, GPT-4, custom agents) remains unvalidated hypothesis.

- 5. Scale Uncertainty -

Performance characteristics at enterprise scale (thousands of concurrent users, millions of governance events) completely unknown. Current implementation optimised for single-user context.

+ 5. Scale Uncertainty +

Performance characteristics at enterprise scale (thousands of concurrent users, millions of governance events) completely unknown. Current implementation optimised for single-user context.

- Future Research Needs: + Future Research Needs:
    -
  • Controlled experimental validation with quantitative metrics
  • -
  • Multi-organisation pilot studies across different domains
  • -
  • Independent security audit and adversarial testing
  • -
  • Cross-platform consistency evaluation (Copilot, GPT-4, open models)
  • -
  • Formal verification of boundary enforcement properties
  • -
  • Longitudinal study of framework effectiveness over extended deployment
  • +
  • Controlled experimental validation with quantitative metrics
  • +
  • Multi-organisation pilot studies across different domains
  • +
  • Independent security audit and adversarial testing
  • +
  • Cross-platform consistency evaluation (Copilot, GPT-4, open models)
  • +
  • Formal verification of boundary enforcement properties
  • +
  • Longitudinal study of framework effectiveness over extended deployment
@@ -554,14 +554,14 @@
-

References & Bibliography

+

References & Bibliography

- Theoretical Priority: Tractatus emerged from concerns about maintaining human values persistence in AI-augmented organizations. Moral pluralism and deliberative process form the CORE theoretical foundation. Organizational theory provides supporting context for temporal decision authority and structural implementation. + Theoretical Priority: Tractatus emerged from concerns about maintaining human values persistence in AI-augmented organizations. Moral pluralism and deliberative process form the CORE theoretical foundation. Organizational theory provides supporting context for temporal decision authority and structural implementation.
-

Moral Pluralism & Values Philosophy (Primary Foundation)

+

Moral Pluralism & Values Philosophy (Primary Foundation)

  • Berlin, Isaiah (1969). Four Essays on Liberty. Oxford: Oxford University Press. [Value pluralism, incommensurability of legitimate values]
  • Weil, Simone (1949/2002). The Need for Roots: Prelude to a Declaration of Duties Towards Mankind (A. Wills, Trans.). London: Routledge. [Human needs, obligations, rootedness in moral community]
  • @@ -570,7 +570,7 @@
  • Nussbaum, Martha C. (2000). Women and Human Development: The Capabilities Approach. Cambridge: Cambridge University Press. [Human capabilities, plural values in development]
-

Organisational Theory (Supporting Context)

+

Organisational Theory (Supporting Context)

  • Bluedorn, A. C., & Denhardt, R. B. (1988). Time and organizations. Journal of Management, 14(2), 299-320. [Temporal decision horizons]
  • Crossan, M. M., Lane, H. W., & White, R. E. (1999). An organizational learning framework: From intuition to institution. Academy of Management Review, 24(3), 522-537. [Knowledge coordination]
  • @@ -579,15 +579,15 @@
  • Laloux, Frederic (2014). Reinventing Organizations: A Guide to Creating Organizations Inspired by the Next Stage of Human Consciousness. Brussels: Nelson Parker. [Distributed decision-making]
-

AI Governance & Technical Context

+

AI Governance & Technical Context

  • Anthropic (2024). Claude Code: Technical Documentation. Available at: https://docs.anthropic.com/claude-code
- Note on Intellectual Lineage: The framework's central concern—human values persistence in AI-augmented organizational contexts—derives from moral philosophy rather than management science. The PluralisticDeliberationOrchestrator represents the primary research focus, embodying Weil's concept of attention to plural human needs and Berlin's recognition of incommensurable values. + Note on Intellectual Lineage: The framework's central concern—human values persistence in AI-augmented organizational contexts—derives from moral philosophy rather than management science. The PluralisticDeliberationOrchestrator represents the primary research focus, embodying Weil's concept of attention to plural human needs and Berlin's recognition of incommensurable values. - Berlin and Weil will be integral to further development of the deliberation component—their work provides the philosophical foundation for understanding how to preserve human agency over values decisions as AI capabilities accelerate. Traditional organizational theory (Weber, Taylor) addresses authority through hierarchy; post-AI organizational contexts require authority through appropriate deliberative process across stakeholder perspectives. Framework development documentation (incident reports, session logs) maintained in project repository but not publicly released pending peer review. + Berlin and Weil will be integral to further development of the deliberation component—their work provides the philosophical foundation for understanding how to preserve human agency over values decisions as AI capabilities accelerate. Traditional organizational theory (Weber, Taylor) addresses authority through hierarchy; post-AI organizational contexts require authority through appropriate deliberative process across stakeholder perspectives. Framework development documentation (incident reports, session logs) maintained in project repository but not publicly released pending peer review.
diff --git a/scripts/translate-researcher-deepl.js b/scripts/translate-researcher-deepl.js new file mode 100644 index 00000000..53da32ee --- /dev/null +++ b/scripts/translate-researcher-deepl.js @@ -0,0 +1,187 @@ +#!/usr/bin/env node + +/** + * Translate researcher.json from EN to DE and FR using DeepL API + * + * Usage: node scripts/translate-researcher-deepl.js + * + * Requires: DEEPL_API_KEY environment variable + */ + +const fs = require('fs'); +const path = require('path'); +const https = require('https'); + +const DEEPL_API_KEY = process.env.DEEPL_API_KEY; +const API_URL = 'api.deepl.com'; // Pro API endpoint + +if (!DEEPL_API_KEY) { + console.error('❌ ERROR: DEEPL_API_KEY environment variable not set'); + console.error(' Set it with: export DEEPL_API_KEY="your-key-here"'); + process.exit(1); +} + +const EN_FILE = path.join(__dirname, '../public/locales/en/researcher.json'); +const DE_FILE = path.join(__dirname, '../public/locales/de/researcher.json'); +const FR_FILE = path.join(__dirname, '../public/locales/fr/researcher.json'); + +// Load JSON files +const enData = JSON.parse(fs.readFileSync(EN_FILE, 'utf8')); +const deData = JSON.parse(fs.readFileSync(DE_FILE, 'utf8')); +const frData = JSON.parse(fs.readFileSync(FR_FILE, 'utf8')); + +// DeepL API request function +function translateText(text, targetLang) { + return new Promise((resolve, reject) => { + const postData = new URLSearchParams({ + auth_key: DEEPL_API_KEY, + text: text, + target_lang: targetLang, + source_lang: 'EN', + formality: 'default', + preserve_formatting: '1' + }).toString(); + + const options = { + hostname: API_URL, + port: 443, + path: '/v2/translate', + method: 'POST', + headers: { + 'Content-Type': 'application/x-www-form-urlencoded', + 'Content-Length': Buffer.byteLength(postData) + } + }; + + const req = https.request(options, (res) => { + let data = ''; + res.on('data', (chunk) => { data += chunk; }); + res.on('end', () => { + if (res.statusCode === 200) { + try { + const response = JSON.parse(data); + resolve(response.translations[0].text); + } catch (err) { + reject(new Error(`Failed to parse response: ${err.message}`)); + } + } else { + reject(new Error(`DeepL API error: ${res.statusCode} - ${data}`)); + } + }); + }); + + req.on('error', reject); + req.write(postData); + req.end(); + }); +} + +// Helper to get nested value +function getNestedValue(obj, path) { + return path.split('.').reduce((current, key) => current?.[key], obj); +} + +// Helper to set nested value +function setNestedValue(obj, path, value) { + const keys = path.split('.'); + const lastKey = keys.pop(); + const target = keys.reduce((current, key) => { + if (!current[key]) current[key] = {}; + return current[key]; + }, obj); + target[lastKey] = value; +} + +// Recursively find all string values and their paths +function findAllStrings(obj, prefix = '') { + const strings = []; + + for (const [key, value] of Object.entries(obj)) { + const currentPath = prefix ? `${prefix}.${key}` : key; + + if (typeof value === 'string') { + strings.push(currentPath); + } else if (typeof value === 'object' && value !== null && !Array.isArray(value)) { + strings.push(...findAllStrings(value, currentPath)); + } + } + + return strings; +} + +// Main translation function +async function translateFile(targetLang, targetData, targetFile) { + console.log(`\n🌐 Translating to ${targetLang}...`); + + const allPaths = findAllStrings(enData); + let translatedCount = 0; + let skippedCount = 0; + let errorCount = 0; + + for (const keyPath of allPaths) { + const enValue = getNestedValue(enData, keyPath); + const existingValue = getNestedValue(targetData, keyPath); + + // Skip if already translated (not empty) + if (existingValue && existingValue.trim().length > 0) { + skippedCount++; + process.stdout.write('.'); + continue; + } + + try { + // Translate + const translated = await translateText(enValue, targetLang); + setNestedValue(targetData, keyPath, translated); + translatedCount++; + process.stdout.write('✓'); + + // Rate limiting: wait 500ms between requests to avoid 429 errors + await new Promise(resolve => setTimeout(resolve, 500)); + + } catch (error) { + console.error(`\n❌ Error translating ${keyPath}:`, error.message); + errorCount++; + process.stdout.write('✗'); + } + } + + console.log(`\n\n📊 Translation Summary for ${targetLang}:`); + console.log(` ✓ Translated: ${translatedCount}`); + console.log(` . Skipped (already exists): ${skippedCount}`); + console.log(` ✗ Errors: ${errorCount}`); + + // Save updated file + fs.writeFileSync(targetFile, JSON.stringify(targetData, null, 2) + '\n', 'utf8'); + console.log(` đŸ’Ÿ Saved: ${targetFile}`); +} + +// Run translations +async function main() { + console.log('═══════════════════════════════════════════════════════════'); + console.log(' DeepL Translation: researcher.json (EN → DE, FR)'); + console.log('═══════════════════════════════════════════════════════════\n'); + + const totalStrings = findAllStrings(enData).length; + console.log(`📝 Total translation keys in EN file: ${totalStrings}`); + + try { + // Translate to German + await translateFile('DE', deData, DE_FILE); + + // Translate to French + await translateFile('FR', frData, FR_FILE); + + console.log('\n✅ Translation complete!'); + console.log('\n💡 Next steps:'); + console.log(' 1. Review translations in de/researcher.json and fr/researcher.json'); + console.log(' 2. Test on local server: npm start'); + console.log(' 3. Visit http://localhost:9000/researcher.html and switch languages'); + + } catch (error) { + console.error('\n❌ Fatal error:', error); + process.exit(1); + } +} + +main(); diff --git a/scripts/validate-researcher-i18n.js b/scripts/validate-researcher-i18n.js new file mode 100644 index 00000000..45b12e09 --- /dev/null +++ b/scripts/validate-researcher-i18n.js @@ -0,0 +1,88 @@ +#!/usr/bin/env node + +/** + * Validate researcher.html i18n keys against translation files + */ + +const fs = require('fs'); +const path = require('path'); + +// Read HTML file and extract data-i18n keys +const htmlPath = path.join(__dirname, '../public/researcher.html'); +const html = fs.readFileSync(htmlPath, 'utf8'); + +const keyPattern = /data-i18n="([^"]+)"/g; +const htmlKeys = new Set(); +let match; + +while ((match = keyPattern.exec(html)) !== null) { + htmlKeys.add(match[1]); +} + +console.log('═══════════════════════════════════════════════════════════'); +console.log(' Researcher.html i18n Validation'); +console.log('═══════════════════════════════════════════════════════════\n'); + +console.log(`📄 Total data-i18n keys in HTML: ${htmlKeys.size}`); + +// Load translation files +const enPath = path.join(__dirname, '../public/locales/en/researcher.json'); +const dePath = path.join(__dirname, '../public/locales/de/researcher.json'); +const frPath = path.join(__dirname, '../public/locales/fr/researcher.json'); + +const enData = JSON.parse(fs.readFileSync(enPath, 'utf8')); +const deData = JSON.parse(fs.readFileSync(dePath, 'utf8')); +const frData = JSON.parse(fs.readFileSync(frPath, 'utf8')); + +// Helper to check if nested key exists +function hasNestedKey(obj, keyPath) { + const keys = keyPath.split('.'); + let current = obj; + + for (const key of keys) { + if (current && typeof current === 'object' && key in current) { + current = current[key]; + } else { + return false; + } + } + + return typeof current === 'string' && current.length > 0; +} + +// Check each language +const languages = [ + { name: 'English (EN)', code: 'en', data: enData }, + { name: 'German (DE)', code: 'de', data: deData }, + { name: 'French (FR)', code: 'fr', data: frData } +]; + +let allValid = true; + +for (const lang of languages) { + const missingKeys = []; + + for (const key of htmlKeys) { + if (!hasNestedKey(lang.data, key)) { + missingKeys.push(key); + } + } + + console.log(`\n🌐 ${lang.name}`); + if (missingKeys.length === 0) { + console.log(` ✅ All ${htmlKeys.size} keys found`); + } else { + console.log(` ❌ Missing ${missingKeys.length} keys:`); + missingKeys.forEach(key => console.log(` ‱ ${key}`)); + allValid = false; + } +} + +console.log('\n═══════════════════════════════════════════════════════════'); +if (allValid) { + console.log('✅ VALIDATION PASSED: All i18n keys are properly translated'); +} else { + console.log('❌ VALIDATION FAILED: Some keys are missing'); + process.exit(1); +} +console.log('═══════════════════════════════════════════════════════════\n');