Add complete AI Director system that transforms the survival simulation into a user-driven interactive story with audience voting. Backend: - Add DirectorService for LLM-powered plot generation with fallback templates - Add VoteManager for dual-channel voting (Twitch + Unity) - Integrate 4-phase game loop: Simulation → Narrative → Voting → Resolution - Add vote command parsing (!1, !2, !A, !B) in Twitch service - Add type-safe LLM output handling with _coerce_int() helper - Normalize voter IDs for case-insensitive duplicate prevention Unity Client: - Add NarrativeUI for cinematic event cards and voting progress bars - Add 7 new event types and data models for director/voting events - Add delayed subscription coroutine for NetworkManager timing - Sync client timer with server's remaining_seconds to prevent drift Documentation: - Update README.md with AI Director features, voting commands, and event types 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
563 lines
19 KiB
Python
563 lines
19 KiB
Python
"""
|
|
AI Director Service - Narrative Control Module (Phase 9).
|
|
|
|
The Director acts as the Dungeon Master for the survival drama,
|
|
generating dramatic plot points and resolving audience votes.
|
|
"""
|
|
|
|
from __future__ import annotations
|
|
|
|
import json
|
|
import logging
|
|
import os
|
|
import random
|
|
import time
|
|
import uuid
|
|
from dataclasses import dataclass, field
|
|
from enum import Enum
|
|
from typing import Any
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
class GameMode(str, Enum):
|
|
"""Game engine operating modes."""
|
|
SIMULATION = "simulation" # Normal agent behavior
|
|
NARRATIVE = "narrative" # Director presents plot point
|
|
VOTING = "voting" # Audience voting window
|
|
RESOLUTION = "resolution" # Applying vote consequences
|
|
|
|
|
|
@dataclass(frozen=True)
|
|
class PlotChoice:
|
|
"""A choice option in a plot point."""
|
|
choice_id: str
|
|
text: str
|
|
effects: dict[str, Any] = field(default_factory=dict)
|
|
|
|
|
|
@dataclass
|
|
class PlotPoint:
|
|
"""A narrative event generated by the Director."""
|
|
plot_id: str
|
|
title: str
|
|
description: str
|
|
choices: list[PlotChoice]
|
|
ttl_seconds: int = 60
|
|
created_at: float = field(default_factory=time.time)
|
|
|
|
def to_dict(self) -> dict[str, Any]:
|
|
"""Convert to dictionary for broadcasting."""
|
|
return {
|
|
"plot_id": self.plot_id,
|
|
"title": self.title,
|
|
"description": self.description,
|
|
"choices": [
|
|
{"choice_id": c.choice_id, "text": c.text}
|
|
for c in self.choices
|
|
],
|
|
"ttl_seconds": self.ttl_seconds,
|
|
}
|
|
|
|
|
|
@dataclass
|
|
class ResolutionResult:
|
|
"""Result of resolving a plot point vote."""
|
|
plot_id: str
|
|
choice_id: str
|
|
message: str
|
|
effects: dict[str, Any] = field(default_factory=dict)
|
|
|
|
def to_dict(self) -> dict[str, Any]:
|
|
"""Convert to dictionary for broadcasting."""
|
|
return {
|
|
"plot_id": self.plot_id,
|
|
"choice_id": self.choice_id,
|
|
"message": self.message,
|
|
"effects_json": json.dumps(self.effects),
|
|
}
|
|
|
|
|
|
# Fallback templates when LLM is unavailable
|
|
FALLBACK_PLOT_TEMPLATES = [
|
|
{
|
|
"title": "Mysterious Footprints",
|
|
"description": "Strange footprints appear on the beach overnight. Someone - or something - has been watching the camp.",
|
|
"choices": [
|
|
PlotChoice("investigate", "Follow the tracks into the forest", {"risk": "medium", "reward": "discovery"}),
|
|
PlotChoice("fortify", "Strengthen camp defenses and wait", {"safety": "high", "mood_delta": -5}),
|
|
],
|
|
},
|
|
{
|
|
"title": "Supply Shortage",
|
|
"description": "The food stores are running dangerously low. Tension builds among the survivors.",
|
|
"choices": [
|
|
PlotChoice("ration", "Implement strict rationing for everyone", {"mood_delta": -10, "food_save": 2}),
|
|
PlotChoice("hunt", "Send a group on a risky hunting expedition", {"risk": "high", "food_gain": 3}),
|
|
],
|
|
},
|
|
{
|
|
"title": "Storm Warning",
|
|
"description": "Dark clouds gather on the horizon. A massive storm approaches the island.",
|
|
"choices": [
|
|
PlotChoice("shelter", "Everyone take shelter immediately", {"safety": "high", "mood_delta": 5}),
|
|
PlotChoice("salvage", "Quickly gather supplies before the storm hits", {"risk": "medium", "resource_gain": 2}),
|
|
],
|
|
},
|
|
{
|
|
"title": "Trust Crisis",
|
|
"description": "Accusations fly as valuable supplies go missing from the camp.",
|
|
"choices": [
|
|
PlotChoice("accuse", "Hold a trial to find the culprit", {"drama": "high", "relationship_delta": -5}),
|
|
PlotChoice("forgive", "Call for unity and move on together", {"mood_delta": 3, "trust": "restored"}),
|
|
],
|
|
},
|
|
{
|
|
"title": "Rescue Signal",
|
|
"description": "A faint light flickers on the distant horizon. Could it be a ship?",
|
|
"choices": [
|
|
PlotChoice("signal", "Build a massive signal fire on the beach", {"energy_delta": -15, "hope": "high"}),
|
|
PlotChoice("wait", "Wait and observe - it could be dangerous", {"safety": "medium", "mood_delta": -3}),
|
|
],
|
|
},
|
|
]
|
|
|
|
|
|
class DirectorService:
|
|
"""
|
|
AI Director for generating and resolving narrative events.
|
|
Uses LLM to create dramatic plot points based on world state.
|
|
"""
|
|
|
|
def __init__(self, llm_service=None) -> None:
|
|
"""
|
|
Initialize the Director service.
|
|
|
|
Args:
|
|
llm_service: Optional LLMService instance. If None, uses global instance.
|
|
"""
|
|
self._llm_service = llm_service
|
|
self._rng = random.Random()
|
|
self._current_plot: PlotPoint | None = None
|
|
self._plot_history: list[str] = [] # Recent plot titles to avoid repetition
|
|
|
|
@property
|
|
def llm(self):
|
|
"""Lazy-load LLM service to avoid circular imports."""
|
|
if self._llm_service is None:
|
|
from .llm import llm_service
|
|
self._llm_service = llm_service
|
|
return self._llm_service
|
|
|
|
@property
|
|
def current_plot(self) -> PlotPoint | None:
|
|
"""Get the current active plot point."""
|
|
return self._current_plot
|
|
|
|
def clear_current_plot(self) -> None:
|
|
"""Clear the current plot after resolution."""
|
|
if self._current_plot:
|
|
self._plot_history.append(self._current_plot.title)
|
|
# Keep only last 5 titles to avoid repetition
|
|
self._plot_history = self._plot_history[-5:]
|
|
self._current_plot = None
|
|
|
|
async def generate_plot_point(self, world_state: dict[str, Any]) -> PlotPoint:
|
|
"""
|
|
Generate a dramatic plot point based on current world state.
|
|
|
|
Args:
|
|
world_state: Dictionary containing:
|
|
- day: Current game day
|
|
- weather: Current weather condition
|
|
- time_of_day: dawn/day/dusk/night
|
|
- alive_agents: List of alive agent summaries
|
|
- recent_events: List of recent event descriptions
|
|
- tension_level: low/medium/high (derived from deaths, resources, etc.)
|
|
|
|
Returns:
|
|
PlotPoint with title, description, and 2 choices
|
|
"""
|
|
# Extract context
|
|
day = world_state.get("day", 1)
|
|
weather = world_state.get("weather", "Sunny")
|
|
alive_count = len(world_state.get("alive_agents", []))
|
|
recent_events = world_state.get("recent_events", [])
|
|
tension_level = world_state.get("tension_level", "medium")
|
|
mood_avg = world_state.get("mood_avg", 50)
|
|
|
|
# Build context summary
|
|
agents_summary = ", ".join([
|
|
f"{a.get('name', 'Unknown')} (HP:{a.get('hp', 0)})"
|
|
for a in world_state.get("alive_agents", [])[:5]
|
|
]) or "No agents alive"
|
|
|
|
events_summary = "; ".join(recent_events[-3:]) if recent_events else "Nothing notable recently"
|
|
|
|
# Try LLM generation first
|
|
if not self.llm.is_mock_mode:
|
|
try:
|
|
plot = await self._generate_llm_plot(
|
|
day=day,
|
|
weather=weather,
|
|
alive_count=alive_count,
|
|
agents_summary=agents_summary,
|
|
events_summary=events_summary,
|
|
tension_level=tension_level,
|
|
mood_avg=mood_avg,
|
|
)
|
|
if plot:
|
|
self._current_plot = plot
|
|
return plot
|
|
except Exception as e:
|
|
logger.error(f"LLM plot generation failed: {e}")
|
|
|
|
# Fallback to template-based generation
|
|
plot = self._generate_fallback_plot(weather, tension_level, mood_avg)
|
|
self._current_plot = plot
|
|
return plot
|
|
|
|
async def _generate_llm_plot(
|
|
self,
|
|
day: int,
|
|
weather: str,
|
|
alive_count: int,
|
|
agents_summary: str,
|
|
events_summary: str,
|
|
tension_level: str,
|
|
mood_avg: int,
|
|
) -> PlotPoint | None:
|
|
"""Generate plot point using LLM."""
|
|
|
|
# Build the prompt for the AI Director
|
|
system_prompt = f"""You are the AI Director for a survival drama on a deserted island.
|
|
Your role is to create dramatic narrative moments that engage the audience.
|
|
|
|
CURRENT SITUATION:
|
|
- Day {day} on the island
|
|
- Weather: {weather}
|
|
- Survivors: {alive_count} ({agents_summary})
|
|
- Recent events: {events_summary}
|
|
- Tension level: {tension_level}
|
|
- Average mood: {mood_avg}/100
|
|
|
|
RECENTLY USED PLOTS (avoid these):
|
|
{', '.join(self._plot_history) if self._plot_history else 'None yet'}
|
|
|
|
GUIDELINES:
|
|
1. Create dramatic tension appropriate to the tension level
|
|
2. Choices should have meaningful trade-offs
|
|
3. Consider weather and mood in your narrative
|
|
4. Keep descriptions cinematic but brief (under 50 words)
|
|
|
|
OUTPUT FORMAT (strict JSON):
|
|
{{
|
|
"title": "Brief dramatic title (3-5 words)",
|
|
"description": "Cinematic description of the situation (under 50 words)",
|
|
"choices": [
|
|
{{"id": "choice_a", "text": "First option (under 15 words)", "effects": {{"mood_delta": 5}}}},
|
|
{{"id": "choice_b", "text": "Second option (under 15 words)", "effects": {{"mood_delta": -5}}}}
|
|
]
|
|
}}"""
|
|
|
|
user_prompt = f"""The current tension is {tension_level}.
|
|
{"Create an intense, high-stakes event!" if tension_level == "high" else "Create an interesting event to raise the drama." if tension_level == "low" else "Create a moderately dramatic event."}
|
|
|
|
Generate a plot point now. Output ONLY valid JSON, no explanation."""
|
|
|
|
try:
|
|
# Use LLM service's internal acompletion
|
|
kwargs = {
|
|
"model": self.llm._model,
|
|
"messages": [
|
|
{"role": "system", "content": system_prompt},
|
|
{"role": "user", "content": user_prompt}
|
|
],
|
|
"max_tokens": 300,
|
|
"temperature": 0.9,
|
|
}
|
|
if self.llm._api_base:
|
|
kwargs["api_base"] = self.llm._api_base
|
|
if self.llm._api_key and not self.llm._api_key_header:
|
|
kwargs["api_key"] = self.llm._api_key
|
|
if self.llm._extra_headers:
|
|
kwargs["extra_headers"] = self.llm._extra_headers
|
|
|
|
response = await self.llm._acompletion(**kwargs)
|
|
content = response.choices[0].message.content.strip()
|
|
|
|
# Parse JSON response
|
|
# Handle potential markdown code blocks
|
|
if content.startswith("```"):
|
|
content = content.split("```")[1]
|
|
if content.startswith("json"):
|
|
content = content[4:]
|
|
|
|
data = json.loads(content)
|
|
|
|
# Validate and construct PlotPoint
|
|
choices = [
|
|
PlotChoice(
|
|
choice_id=c.get("id", f"choice_{i}"),
|
|
text=c.get("text", "Unknown option"),
|
|
effects=c.get("effects", {}),
|
|
)
|
|
for i, c in enumerate(data.get("choices", []))
|
|
]
|
|
|
|
if len(choices) < 2:
|
|
logger.warning("LLM returned fewer than 2 choices, using fallback")
|
|
return None
|
|
|
|
return PlotPoint(
|
|
plot_id=uuid.uuid4().hex,
|
|
title=data.get("title", "Unexpected Event"),
|
|
description=data.get("description", "Something happens..."),
|
|
choices=choices[:2], # Limit to 2 choices
|
|
ttl_seconds=60,
|
|
)
|
|
|
|
except json.JSONDecodeError as e:
|
|
logger.error(f"Failed to parse LLM JSON response: {e}")
|
|
return None
|
|
except Exception as e:
|
|
logger.error(f"LLM plot generation error: {e}")
|
|
return None
|
|
|
|
def _generate_fallback_plot(
|
|
self,
|
|
weather: str,
|
|
tension_level: str,
|
|
mood_avg: int,
|
|
) -> PlotPoint:
|
|
"""Generate plot point from templates when LLM is unavailable."""
|
|
|
|
# Filter templates based on context
|
|
available = [t for t in FALLBACK_PLOT_TEMPLATES if t["title"] not in self._plot_history]
|
|
|
|
if not available:
|
|
available = FALLBACK_PLOT_TEMPLATES
|
|
|
|
# Weight selection based on weather and tension
|
|
if weather.lower() in ("stormy", "rainy", "thunder"):
|
|
# Prefer storm-related plots
|
|
storm_plots = [t for t in available if "storm" in t["title"].lower()]
|
|
if storm_plots:
|
|
available = storm_plots
|
|
elif tension_level == "low" and mood_avg > 60:
|
|
# Prefer dramatic plots to shake things up
|
|
drama_plots = [t for t in available if "crisis" in t["title"].lower() or "trust" in t["title"].lower()]
|
|
if drama_plots:
|
|
available = drama_plots
|
|
|
|
template = self._rng.choice(available)
|
|
|
|
return PlotPoint(
|
|
plot_id=uuid.uuid4().hex,
|
|
title=template["title"],
|
|
description=template["description"],
|
|
choices=list(template["choices"]),
|
|
ttl_seconds=60,
|
|
)
|
|
|
|
async def resolve_vote(
|
|
self,
|
|
plot_point: PlotPoint,
|
|
winning_choice_id: str,
|
|
world_state: dict[str, Any],
|
|
) -> ResolutionResult:
|
|
"""
|
|
Resolve the vote and generate consequences.
|
|
|
|
Args:
|
|
plot_point: The PlotPoint that was voted on
|
|
winning_choice_id: The ID of the winning choice
|
|
world_state: Current world state for context
|
|
|
|
Returns:
|
|
ResolutionResult with message and effects to apply
|
|
"""
|
|
# Find the winning choice
|
|
winning_choice = next(
|
|
(c for c in plot_point.choices if c.choice_id == winning_choice_id),
|
|
plot_point.choices[0] # Fallback to first choice
|
|
)
|
|
|
|
# Try LLM resolution first
|
|
if not self.llm.is_mock_mode:
|
|
try:
|
|
result = await self._generate_llm_resolution(
|
|
plot_point=plot_point,
|
|
winning_choice=winning_choice,
|
|
world_state=world_state,
|
|
)
|
|
if result:
|
|
return result
|
|
except Exception as e:
|
|
logger.error(f"LLM resolution failed: {e}")
|
|
|
|
# Fallback resolution
|
|
return self._generate_fallback_resolution(plot_point, winning_choice)
|
|
|
|
async def _generate_llm_resolution(
|
|
self,
|
|
plot_point: PlotPoint,
|
|
winning_choice: PlotChoice,
|
|
world_state: dict[str, Any],
|
|
) -> ResolutionResult | None:
|
|
"""Generate resolution using LLM."""
|
|
|
|
agents_summary = ", ".join([
|
|
a.get("name", "Unknown")
|
|
for a in world_state.get("alive_agents", [])[:5]
|
|
]) or "the survivors"
|
|
|
|
system_prompt = f"""You are the AI Director narrating the consequences of an audience vote.
|
|
|
|
THE SITUATION:
|
|
{plot_point.description}
|
|
|
|
THE AUDIENCE VOTED FOR:
|
|
"{winning_choice.text}"
|
|
|
|
SURVIVORS INVOLVED:
|
|
{agents_summary}
|
|
|
|
GUIDELINES:
|
|
1. Describe the immediate consequence dramatically
|
|
2. Mention how the survivors react
|
|
3. Keep it brief but impactful (under 40 words)
|
|
4. The effects should feel meaningful
|
|
|
|
OUTPUT FORMAT (strict JSON):
|
|
{{
|
|
"message": "Dramatic description of what happens...",
|
|
"effects": {{
|
|
"mood_delta": -5,
|
|
"hp_delta": 0,
|
|
"energy_delta": -10,
|
|
"item_gained": null,
|
|
"item_lost": null,
|
|
"relationship_change": null
|
|
}}
|
|
}}"""
|
|
|
|
try:
|
|
kwargs = {
|
|
"model": self.llm._model,
|
|
"messages": [
|
|
{"role": "system", "content": system_prompt},
|
|
{"role": "user", "content": f"Narrate the consequence of choosing: {winning_choice.text}"}
|
|
],
|
|
"max_tokens": 200,
|
|
"temperature": 0.8,
|
|
}
|
|
if self.llm._api_base:
|
|
kwargs["api_base"] = self.llm._api_base
|
|
if self.llm._api_key and not self.llm._api_key_header:
|
|
kwargs["api_key"] = self.llm._api_key
|
|
if self.llm._extra_headers:
|
|
kwargs["extra_headers"] = self.llm._extra_headers
|
|
|
|
response = await self.llm._acompletion(**kwargs)
|
|
content = response.choices[0].message.content.strip()
|
|
|
|
# Handle markdown code blocks
|
|
if content.startswith("```"):
|
|
content = content.split("```")[1]
|
|
if content.startswith("json"):
|
|
content = content[4:]
|
|
|
|
data = json.loads(content)
|
|
|
|
# Merge LLM effects with choice's predefined effects
|
|
effects = {**winning_choice.effects, **data.get("effects", {})}
|
|
|
|
return ResolutionResult(
|
|
plot_id=plot_point.plot_id,
|
|
choice_id=winning_choice.choice_id,
|
|
message=data.get("message", f"The survivors chose: {winning_choice.text}"),
|
|
effects=effects,
|
|
)
|
|
|
|
except Exception as e:
|
|
logger.error(f"LLM resolution error: {e}")
|
|
return None
|
|
|
|
def _generate_fallback_resolution(
|
|
self,
|
|
plot_point: PlotPoint,
|
|
winning_choice: PlotChoice,
|
|
) -> ResolutionResult:
|
|
"""Generate fallback resolution message."""
|
|
|
|
# Template-based resolution messages
|
|
messages = [
|
|
f"The decision is made! {winning_choice.text}",
|
|
f"The survivors act: {winning_choice.text}",
|
|
f"Following the audience's choice: {winning_choice.text}",
|
|
]
|
|
|
|
return ResolutionResult(
|
|
plot_id=plot_point.plot_id,
|
|
choice_id=winning_choice.choice_id,
|
|
message=self._rng.choice(messages),
|
|
effects=dict(winning_choice.effects),
|
|
)
|
|
|
|
def calculate_tension_level(self, world_state: dict[str, Any]) -> str:
|
|
"""
|
|
Calculate the current tension level based on world state.
|
|
|
|
Args:
|
|
world_state: Dictionary with game state information
|
|
|
|
Returns:
|
|
"low", "medium", or "high"
|
|
"""
|
|
score = 0
|
|
|
|
# Factor: Agent health
|
|
alive_agents = world_state.get("alive_agents", [])
|
|
if alive_agents:
|
|
avg_hp = sum(a.get("hp", 100) for a in alive_agents) / len(alive_agents)
|
|
if avg_hp < 30:
|
|
score += 3
|
|
elif avg_hp < 50:
|
|
score += 2
|
|
elif avg_hp < 70:
|
|
score += 1
|
|
|
|
# Factor: Weather severity
|
|
weather = world_state.get("weather", "").lower()
|
|
if weather in ("stormy", "thunder"):
|
|
score += 2
|
|
elif weather in ("rainy",):
|
|
score += 1
|
|
|
|
# Factor: Mood
|
|
mood_avg = world_state.get("mood_avg", 50)
|
|
if mood_avg < 30:
|
|
score += 2
|
|
elif mood_avg < 50:
|
|
score += 1
|
|
|
|
# Factor: Recent deaths
|
|
recent_deaths = world_state.get("recent_deaths", 0)
|
|
score += min(recent_deaths * 2, 4)
|
|
|
|
# Factor: Low resources
|
|
if world_state.get("resources_critical", False):
|
|
score += 2
|
|
|
|
# Determine level
|
|
if score >= 6:
|
|
return "high"
|
|
elif score >= 3:
|
|
return "medium"
|
|
return "low"
|
|
|
|
|
|
# Global instance
|
|
director_service = DirectorService()
|