feat: add LLM integration and enhance game engine

- Add OpenAI-compatible LLM integration for agent dialogue
- Enhance survival mechanics with energy decay and feeding system
- Update frontend debug client with improved UI
- Add .gitignore rules for Unity and Serena

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
empty
2026-01-01 12:15:13 +08:00
parent cf1739b7f8
commit e96948e8a4
7 changed files with 504 additions and 12 deletions

19
.gitignore vendored
View File

@@ -30,3 +30,22 @@ ENV/
# OS
.DS_Store
Thumbs.db
# Unity
unity-client/Library/
unity-client/Temp/
unity-client/Logs/
unity-client/UserSettings/
unity-client/obj/
unity-client/*.csproj
unity-client/*.sln
unity-client/*.pidb
unity-client/*.booproj
unity-client/*.svd
unity-client/*.pdb
unity-client/*.mdb
unity-client/*.opendb
unity-client/*.VC.db
# Serena AI
.serena/

View File

@@ -5,6 +5,7 @@ Manages survival mechanics, agent states, and user interactions.
import asyncio
import logging
import random
import re
import time
from typing import TYPE_CHECKING
@@ -12,6 +13,7 @@ from typing import TYPE_CHECKING
from .schemas import GameEvent, EventType
from .database import init_db, get_db_session
from .models import User, Agent, WorldState
from .llm import llm_service
if TYPE_CHECKING:
from .server import ConnectionManager
@@ -21,6 +23,7 @@ logger = logging.getLogger(__name__)
# Command patterns
FEED_PATTERN = re.compile(r"feed\s+(\w+)", re.IGNORECASE)
CHECK_PATTERN = re.compile(r"(check|查询|状态)", re.IGNORECASE)
RESET_PATTERN = re.compile(r"(reset|重新开始|重置)", re.IGNORECASE)
# Game constants
TICK_INTERVAL = 5.0 # Seconds between ticks
@@ -29,12 +32,13 @@ HP_DECAY_WHEN_STARVING = 5 # HP lost when energy is 0
FEED_COST = 10 # Gold cost to feed an agent
FEED_ENERGY_RESTORE = 20 # Energy restored when fed
INITIAL_USER_GOLD = 100 # Starting gold for new users
IDLE_CHAT_PROBABILITY = 0.1 # 10% chance of idle chat per tick
# Initial NPC data
INITIAL_AGENTS = [
{"name": "Jack", "personality": "勇敢"},
{"name": "Luna", "personality": "狡猾"},
{"name": "Bob", "personality": "老实"},
{"name": "Jack", "personality": "Brave"},
{"name": "Luna", "personality": "Cunning"},
{"name": "Bob", "personality": "Honest"},
]
@@ -116,6 +120,103 @@ class GameEngine:
{"agents": agents_data}
)
async def _trigger_agent_speak(
self,
agent_id: int,
agent_name: str,
agent_personality: str,
agent_hp: int,
agent_energy: int,
event_description: str,
event_type: str = "feed"
) -> None:
"""
Fire-and-forget LLM call to generate agent speech.
This runs asynchronously without blocking the game loop.
"""
try:
# Create a lightweight agent-like object for LLM
class AgentSnapshot:
def __init__(self, name, personality, hp, energy):
self.name = name
self.personality = personality
self.hp = hp
self.energy = energy
agent_snapshot = AgentSnapshot(
agent_name, agent_personality, agent_hp, agent_energy
)
text = await llm_service.generate_reaction(
agent_snapshot, event_description, event_type
)
await self._broadcast_event(
EventType.AGENT_SPEAK,
{
"agent_id": agent_id,
"agent_name": agent_name,
"text": text
}
)
logger.debug(f"Agent {agent_name} says: {text}")
except Exception as e:
logger.error(f"Error in agent speak: {e}")
async def _trigger_idle_chat(self) -> None:
"""
Randomly select an alive agent to say something about their situation.
Called with IDLE_CHAT_PROBABILITY chance each tick.
"""
with get_db_session() as db:
alive_agents = db.query(Agent).filter(Agent.status == "Alive").all()
world = db.query(WorldState).first()
weather = world.weather if world else "Sunny"
if not alive_agents:
return
# Pick a random alive agent
agent = random.choice(alive_agents)
agent_data = {
"id": agent.id,
"name": agent.name,
"personality": agent.personality,
"hp": agent.hp,
"energy": agent.energy
}
try:
class AgentSnapshot:
def __init__(self, name, personality, hp, energy):
self.name = name
self.personality = personality
self.hp = hp
self.energy = energy
agent_snapshot = AgentSnapshot(
agent_data["name"],
agent_data["personality"],
agent_data["hp"],
agent_data["energy"]
)
text = await llm_service.generate_idle_chat(agent_snapshot, weather)
await self._broadcast_event(
EventType.AGENT_SPEAK,
{
"agent_id": agent_data["id"],
"agent_name": agent_data["name"],
"text": text
}
)
logger.debug(f"Idle chat - {agent_data['name']}: {text}")
except Exception as e:
logger.error(f"Error in idle chat: {e}")
async def _process_survival_tick(self) -> None:
"""
Process survival mechanics for all alive agents.
@@ -151,7 +252,7 @@ class GameEngine:
EventType.AGENT_DIED,
{
"agent_name": death["name"],
"message": f"💀 {death['name']}{death['personality']})因饥饿而死亡..."
"message": f"{death['name']} ({death['personality']}) has died of starvation..."
}
)
@@ -177,14 +278,14 @@ class GameEngine:
if agent is None:
await self._broadcast_event(
EventType.ERROR,
{"message": f"找不到名为 {agent_name} 的角色"}
{"message": f"Agent '{agent_name}' not found"}
)
return
if agent.status != "Alive":
await self._broadcast_event(
EventType.ERROR,
{"message": f"{agent.name} 已经死亡,无法投喂"}
{"message": f"{agent.name} is already dead and cannot be fed"}
)
return
@@ -193,7 +294,7 @@ class GameEngine:
EventType.ERROR,
{
"user": username,
"message": f"金币不足!需要 {FEED_COST} 金币,当前只有 {user.gold} 金币"
"message": f"Not enough gold! Need {FEED_COST}, you have {user.gold}"
}
)
return
@@ -206,7 +307,10 @@ class GameEngine:
# Store data for broadcasting before session closes
feed_result = {
"agent_id": agent.id,
"agent_name": agent.name,
"agent_personality": agent.personality,
"agent_hp": agent.hp,
"actual_restore": actual_restore,
"agent_energy": agent.energy,
"user_gold": user.gold
@@ -222,8 +326,8 @@ class GameEngine:
"energy_restored": feed_result["actual_restore"],
"agent_energy": feed_result["agent_energy"],
"user_gold": feed_result["user_gold"],
"message": f"🍖 {username} 投喂了 {feed_result['agent_name']}"
f"恢复 {feed_result['actual_restore']} 点体力(当前: {feed_result['agent_energy']}/100"
"message": f"{username} fed {feed_result['agent_name']}! "
f"Restored {feed_result['actual_restore']} energy (now: {feed_result['agent_energy']}/100)"
}
)
@@ -235,6 +339,19 @@ class GameEngine:
}
)
# Fire-and-forget: Trigger LLM response asynchronously
asyncio.create_task(
self._trigger_agent_speak(
agent_id=feed_result["agent_id"],
agent_name=feed_result["agent_name"],
agent_personality=feed_result["agent_personality"],
agent_hp=feed_result["agent_hp"],
agent_energy=feed_result["agent_energy"],
event_description=f"User {username} gave you food. You feel more energetic!",
event_type="feed"
)
)
async def _handle_check(self, username: str) -> None:
"""Handle check/status command."""
with get_db_session() as db:
@@ -246,7 +363,7 @@ class GameEngine:
user_data = {"username": user.username, "gold": user.gold}
agents_data = [agent.to_dict() for agent in agents]
world_data = world.to_dict() if world else {}
message = f"📊 {username} 的状态 - 金币: {user_data['gold']}"
message = f"{username}'s status - Gold: {user_data['gold']}"
await self._broadcast_event(
EventType.CHECK,
@@ -258,6 +375,28 @@ class GameEngine:
}
)
async def _handle_reset(self, username: str) -> None:
"""Handle reset/restart command - reset all agents to full HP/energy."""
with get_db_session() as db:
agents = db.query(Agent).all()
for agent in agents:
agent.hp = 100
agent.energy = 100
agent.status = "Alive"
# Also reset world state
world = db.query(WorldState).first()
if world:
world.day_count = 1
await self._broadcast_event(
EventType.SYSTEM,
{"message": f"{username} triggered a restart! All survivors have been revived."}
)
# Broadcast updated agent states
await self._broadcast_agents_status()
async def process_comment(self, user: str, message: str) -> None:
"""
Process a comment through command matching.
@@ -283,6 +422,10 @@ class GameEngine:
await self._handle_check(user)
return
if RESET_PATTERN.search(message):
await self._handle_reset(user)
return
# No command matched - treat as regular chat
async def _game_loop(self) -> None:
@@ -292,6 +435,7 @@ class GameEngine:
Every tick:
1. Process survival mechanics (energy/HP decay)
2. Broadcast agent states
3. Random chance for idle chat
"""
logger.info("Game loop started - Island survival simulation")
@@ -322,6 +466,10 @@ class GameEngine:
}
)
# Random idle chat (10% chance per tick)
if alive_count > 0 and random.random() < IDLE_CHAT_PROBABILITY:
asyncio.create_task(self._trigger_idle_chat())
logger.debug(f"Tick {self._tick_count}: {alive_count} agents alive")
await asyncio.sleep(self._tick_interval)

186
backend/app/llm.py Normal file
View File

@@ -0,0 +1,186 @@
"""
LLM Service - Agent Brain Module.
Provides AI-powered responses for agents using OpenAI's API.
"""
import logging
import os
import random
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from .models import Agent
logger = logging.getLogger(__name__)
# Mock responses for development without API key
MOCK_REACTIONS = {
"feed": [
"Oh! Finally some food! Thank you stranger!",
"Mmm, that's delicious! I was starving!",
"You're too kind! My energy is back!",
"Food! Glorious food! I love you!",
],
"idle_sunny": [
"What a beautiful day on this island...",
"The sun feels nice, but I'm getting hungry.",
"I wonder if rescue will ever come...",
"At least the weather is good today.",
],
"idle_rainy": [
"This rain is so depressing...",
"I hope the storm passes soon.",
"Getting wet and cold out here...",
"Rain again? Just my luck.",
],
"idle_starving": [
"I'm so hungry... I can barely stand...",
"Someone please... I need food...",
"My stomach is eating itself...",
"Is this how it ends? Starving on a beach?",
],
}
class LLMService:
"""
Service for generating AI-powered agent reactions.
Falls back to mock responses if API key is not configured.
"""
def __init__(self) -> None:
"""Initialize the LLM service with OpenAI client or mock mode."""
self._api_key = os.environ.get("OPENAI_API_KEY")
self._client = None
self._mock_mode = False
if not self._api_key:
logger.warning(
"OPENAI_API_KEY not found in environment. "
"LLMService running in MOCK mode - using predefined responses."
)
self._mock_mode = True
else:
try:
from openai import AsyncOpenAI
self._client = AsyncOpenAI(api_key=self._api_key)
logger.info("LLMService initialized with OpenAI API")
except ImportError:
logger.error("openai package not installed. Running in MOCK mode.")
self._mock_mode = True
except Exception as e:
logger.error(f"Failed to initialize OpenAI client: {e}. Running in MOCK mode.")
self._mock_mode = True
@property
def is_mock_mode(self) -> bool:
"""Check if service is running in mock mode."""
return self._mock_mode
def _get_mock_response(self, event_type: str = "feed") -> str:
"""Get a random mock response for testing without API."""
responses = MOCK_REACTIONS.get(event_type, MOCK_REACTIONS["feed"])
return random.choice(responses)
async def generate_reaction(
self,
agent: "Agent",
event_description: str,
event_type: str = "feed"
) -> str:
"""
Generate an AI reaction for an agent based on an event.
Args:
agent: The Agent model instance
event_description: Description of what happened (e.g., "User X gave you food")
event_type: Type of event for mock mode categorization
Returns:
A first-person verbal response from the agent
"""
if self._mock_mode:
return self._get_mock_response(event_type)
try:
system_prompt = (
f"You are {agent.name}. "
f"Personality: {agent.personality}. "
f"Current Status: HP={agent.hp}, Energy={agent.energy}. "
f"You live on a survival island. "
f"React to the following event briefly (under 20 words). "
f"Respond in first person, as if speaking out loud."
)
response = await self._client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": event_description}
],
max_tokens=50,
temperature=0.8,
)
return response.choices[0].message.content.strip()
except Exception as e:
logger.error(f"LLM API error: {e}")
return self._get_mock_response(event_type)
async def generate_idle_chat(
self,
agent: "Agent",
weather: str = "Sunny"
) -> str:
"""
Generate idle chatter for an agent based on current conditions.
Args:
agent: The Agent model instance
weather: Current weather condition
Returns:
A spontaneous thought or comment from the agent
"""
# Determine event type for mock responses
if agent.energy <= 20:
event_type = "idle_starving"
elif weather.lower() in ["rainy", "stormy"]:
event_type = "idle_rainy"
else:
event_type = "idle_sunny"
if self._mock_mode:
return self._get_mock_response(event_type)
try:
system_prompt = (
f"You are {agent.name}. "
f"Personality: {agent.personality}. "
f"Current Status: HP={agent.hp}, Energy={agent.energy}. "
f"You are stranded on a survival island. "
f"The weather is {weather}. "
f"Say something brief (under 15 words) about your situation or thoughts. "
f"Speak naturally, as if talking to yourself or nearby survivors."
)
response = await self._client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": "What are you thinking right now?"}
],
max_tokens=40,
temperature=0.9,
)
return response.choices[0].message.content.strip()
except Exception as e:
logger.error(f"LLM API error for idle chat: {e}")
return self._get_mock_response(event_type)
# Global instance for easy import
llm_service = LLMService()

View File

@@ -18,6 +18,7 @@ class EventType(str, Enum):
# Island survival events
AGENTS_UPDATE = "agents_update" # All agents status broadcast
AGENT_DIED = "agent_died" # An agent has died
AGENT_SPEAK = "agent_speak" # Agent says something (LLM response)
FEED = "feed" # User fed an agent
USER_UPDATE = "user_update" # User gold/status update
WORLD_UPDATE = "world_update" # World state update

View File

@@ -4,3 +4,4 @@ websockets>=12.0
pydantic>=2.5.0
sqlalchemy>=2.0.0
aiosqlite>=0.19.0
openai>=1.0.0

View File

@@ -96,6 +96,9 @@ function handleGameEvent(event) {
userGoldDisplay.textContent = userGold;
}
break;
case 'agent_speak':
showSpeechBubble(data.agent_id, data.agent_name, data.text);
break;
}
logEvent(event);
@@ -201,6 +204,75 @@ function feedAgent(agentName) {
ws.send(JSON.stringify(payload));
}
/**
* Show speech bubble above an agent card
*/
function showSpeechBubble(agentId, agentName, text) {
const card = document.getElementById(`agent-${agentId}`);
const overlay = document.getElementById('speechBubblesOverlay');
if (!card || !overlay) {
console.warn(`Agent card or overlay not found: agent-${agentId}`);
return;
}
// Remove existing bubble for this agent if any
const existingBubble = document.getElementById(`bubble-${agentId}`);
if (existingBubble) {
existingBubble.remove();
}
// Get card position relative to overlay
const cardRect = card.getBoundingClientRect();
const overlayRect = overlay.parentElement.getBoundingClientRect();
// Create new speech bubble
const bubble = document.createElement('div');
bubble.className = 'speech-bubble';
bubble.id = `bubble-${agentId}`;
bubble.innerHTML = `
<div class="bubble-name">${agentName}</div>
<div>${text}</div>
`;
// Position bubble above the card
const left = (cardRect.left - overlayRect.left) + (cardRect.width / 2);
const top = (cardRect.top - overlayRect.top) - 10;
bubble.style.left = `${left}px`;
bubble.style.top = `${top}px`;
overlay.appendChild(bubble);
// Auto-hide after 5 seconds
setTimeout(() => {
bubble.classList.add('fade-out');
setTimeout(() => {
if (bubble.parentNode) {
bubble.remove();
}
}, 300); // Wait for fade animation
}, 5000);
}
/**
* Reset game - revive all agents
*/
function resetGame() {
if (!ws || ws.readyState !== WebSocket.OPEN) {
alert('未连接到服务器');
return;
}
const user = getCurrentUser();
const payload = {
action: 'send_comment',
payload: { user, message: 'reset' }
};
ws.send(JSON.stringify(payload));
}
/**
* Send a comment/command to the server
*/
@@ -250,6 +322,8 @@ function formatEventData(eventType, data) {
case 'agent_died':
case 'check':
return data.message;
case 'agent_speak':
return `💬 ${data.agent_name}: "${data.text}"`;
case 'agents_update':
return `角色状态已更新`;
case 'user_update':

View File

@@ -92,6 +92,7 @@
display: grid;
grid-template-columns: repeat(auto-fit, minmax(280px, 1fr));
gap: 15px;
padding-top: 50px; /* Space for speech bubbles */
}
.agent-card {
background: rgba(255, 255, 255, 0.05);
@@ -180,6 +181,63 @@
cursor: not-allowed;
}
/* Speech Bubble */
.speech-bubbles-overlay {
position: absolute;
top: 0;
left: 0;
right: 0;
pointer-events: none;
z-index: 100;
}
.speech-bubble {
position: absolute;
background: rgba(255, 255, 255, 0.95);
color: #333;
padding: 10px 15px;
border-radius: 12px;
font-size: 0.9rem;
max-width: 250px;
text-align: center;
box-shadow: 0 4px 15px rgba(0, 0, 0, 0.3);
transform: translateX(-50%);
animation: bubbleIn 0.3s ease-out;
pointer-events: auto;
}
.speech-bubble::after {
content: '';
position: absolute;
bottom: -8px;
left: 50%;
transform: translateX(-50%);
border-left: 8px solid transparent;
border-right: 8px solid transparent;
border-top: 8px solid rgba(255, 255, 255, 0.95);
}
.speech-bubble .bubble-name {
font-weight: bold;
color: #88cc88;
margin-bottom: 5px;
font-size: 0.8rem;
}
.speech-bubble.fade-out {
animation: bubbleOut 0.3s ease-in forwards;
}
@keyframes bubbleIn {
from { opacity: 0; transform: translateX(-50%) translateY(10px); }
to { opacity: 1; transform: translateX(-50%) translateY(0); }
}
@keyframes bubbleOut {
from { opacity: 1; transform: translateX(-50%) translateY(0); }
to { opacity: 0; transform: translateX(-50%) translateY(-10px); }
}
.agents-section {
position: relative;
}
.agent-card {
position: relative;
}
/* Panels */
.panels {
display: flex;
@@ -267,6 +325,7 @@
.event.error { border-color: #ff4444; background: rgba(255, 68, 68, 0.1); }
.event.feed { border-color: #ffaa00; background: rgba(255, 170, 0, 0.1); color: #ffcc66; }
.event.agent_died { border-color: #ff4444; background: rgba(255, 68, 68, 0.15); color: #ff8888; }
.event.agent_speak { border-color: #88ccff; background: rgba(136, 204, 255, 0.1); color: #aaddff; }
.event.check { border-color: #88cc88; background: rgba(136, 204, 136, 0.1); }
.event-time { color: #888; font-size: 11px; }
.event-type { font-weight: bold; text-transform: uppercase; font-size: 11px; }
@@ -299,7 +358,11 @@
<!-- Agents Section -->
<div class="agents-section">
<h2 class="section-title">岛上幸存者</h2>
<div class="speech-bubbles-overlay" id="speechBubblesOverlay"></div>
<div style="display: flex; justify-content: space-between; align-items: center; margin-bottom: 15px;">
<h2 class="section-title" style="margin-bottom: 0;">岛上幸存者</h2>
<button onclick="resetGame()" style="background: #ff6666; padding: 8px 16px; font-size: 13px;">🔄 重新开始</button>
</div>
<div class="agents-grid" id="agentsGrid">
<!-- Agent cards will be dynamically generated -->
<div class="agent-card" id="agent-loading">
@@ -316,7 +379,7 @@
<button onclick="sendComment()">发送</button>
</div>
<p style="margin-top: 10px; font-size: 0.85rem; color: #888;">
指令: <code>feed [名字]</code> - 投喂角色 (消耗10金币) | <code>check</code> - 查询状态
指令: <code>feed [名字]</code> - 投喂 | <code>check</code> - 查询 | <code>reset</code> - 重新开始
</p>
</div>