支持历史生成结果的呈现逻辑

This commit is contained in:
puke
2025-11-18 15:52:27 +08:00
parent 9c01c3a9a5
commit dfa2f76c5b
11 changed files with 1198 additions and 64 deletions

View File

@@ -22,6 +22,7 @@ Services:
- VideoService: Video processing
- FrameProcessor: Frame processing orchestrator
- PersistenceService: Task metadata and storyboard persistence
- HistoryManager: History management business logic
- ComfyBaseService: Base class for ComfyUI-based services
"""
@@ -32,6 +33,7 @@ from pixelle_video.services.media import MediaService
from pixelle_video.services.video import VideoService
from pixelle_video.services.frame_processor import FrameProcessor
from pixelle_video.services.persistence import PersistenceService
from pixelle_video.services.history_manager import HistoryManager
# Backward compatibility alias
ImageService = MediaService
@@ -45,5 +47,6 @@ __all__ = [
"VideoService",
"FrameProcessor",
"PersistenceService",
"HistoryManager",
]

View File

@@ -0,0 +1,224 @@
# Copyright (C) 2025 AIDC-AI
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
History Manager Service
Business logic for history management (UI-agnostic).
Provides high-level operations on top of PersistenceService.
"""
from typing import List, Dict, Optional, Any
from pathlib import Path
from loguru import logger
from pixelle_video.services.persistence import PersistenceService
class HistoryManager:
"""
History management service
Provides business logic for:
- Task listing and filtering
- Task detail retrieval
- Task duplication (for re-generation)
- Task deletion
- Future: Frame regeneration, export, etc.
"""
def __init__(self, persistence: PersistenceService):
"""
Initialize history manager
Args:
persistence: PersistenceService instance
"""
self.persistence = persistence
async def get_task_list(
self,
page: int = 1,
page_size: int = 20,
status: Optional[str] = None,
sort_by: str = "created_at",
sort_order: str = "desc"
) -> Dict[str, Any]:
"""
Get paginated task list
Args:
page: Page number (1-indexed)
page_size: Items per page
status: Filter by status (optional)
sort_by: Sort field (created_at, completed_at, title, duration)
sort_order: Sort order (asc, desc)
Returns:
{
"tasks": [...],
"total": 100,
"page": 1,
"page_size": 20,
"total_pages": 5
}
"""
return await self.persistence.list_tasks_paginated(
page=page,
page_size=page_size,
status=status,
sort_by=sort_by,
sort_order=sort_order
)
async def get_task_detail(self, task_id: str) -> Optional[Dict[str, Any]]:
"""
Get full task detail including storyboard
Args:
task_id: Task ID
Returns:
{
"metadata": {...}, # Task metadata
"storyboard": {...} # Storyboard data (if available)
}
or None if task not found
"""
metadata = await self.persistence.load_task_metadata(task_id)
if not metadata:
return None
storyboard = await self.persistence.load_storyboard(task_id)
return {
"metadata": metadata,
"storyboard": storyboard,
}
async def get_statistics(self) -> Dict[str, Any]:
"""
Get statistics about all tasks
Returns:
{
"total_tasks": 100,
"completed": 95,
"failed": 5,
"total_duration": 3600.5, # seconds
"total_size": 1024000000, # bytes
}
"""
return await self.persistence.get_statistics()
async def delete_task(self, task_id: str) -> bool:
"""
Delete a task and all its files
Args:
task_id: Task ID to delete
Returns:
True if successful, False otherwise
"""
return await self.persistence.delete_task(task_id)
async def duplicate_task(self, task_id: str) -> Optional[Dict[str, Any]]:
"""
Duplicate a task (get input parameters for new generation)
This allows users to:
1. Copy all generation parameters from a previous task
2. Pre-fill the generation form
3. Regenerate with same/modified parameters
Args:
task_id: Task ID to duplicate
Returns:
Input parameters dict or None if task not found
{
"text": "...",
"mode": "generate",
"title": "...",
"n_scenes": 5,
"tts_inference_mode": "local",
"tts_voice": "...",
...
}
"""
metadata = await self.persistence.load_task_metadata(task_id)
if not metadata:
logger.warning(f"Task {task_id} not found for duplication")
return None
# Extract input parameters
input_params = metadata.get("input", {})
logger.info(f"Duplicated task {task_id} parameters")
return input_params
async def rebuild_index(self):
"""Rebuild task index (useful for maintenance or after manual changes)"""
await self.persistence.rebuild_index()
# ========================================================================
# Future Extensions (Phase 3)
# ========================================================================
async def regenerate_frame(
self,
task_id: str,
frame_index: int,
**override_params
) -> Optional[str]:
"""
Regenerate a specific frame (FUTURE FEATURE)
Args:
task_id: Original task ID
frame_index: Frame index to regenerate (0-based)
**override_params: Parameters to override (image_prompt, style, etc.)
Returns:
New frame image path or None if failed
TODO: Implement in Phase 3
- Load original storyboard
- Get frame parameters
- Override with new parameters
- Call image generation service
- Update storyboard
- Re-composite video
"""
logger.warning("regenerate_frame is not implemented yet (Phase 3 feature)")
return None
async def export_task(self, task_id: str, export_path: str) -> Optional[str]:
"""
Export task as a package (metadata + video + frames) (FUTURE FEATURE)
Args:
task_id: Task ID to export
export_path: Export file path (e.g., "exports/task.zip")
Returns:
Export file path or None if failed
TODO: Implement in Phase 3
- Collect all task files
- Create ZIP archive
- Include metadata.json, storyboard.json, video, frames
"""
logger.warning("export_task is not implemented yet (Phase 3 feature)")
return None

View File

@@ -66,6 +66,10 @@ class PersistenceService:
"""
self.output_dir = Path(output_dir)
self.output_dir.mkdir(exist_ok=True)
# Index file for fast listing
self.index_file = self.output_dir / ".index.json"
self._ensure_index()
def get_task_dir(self, task_id: str) -> Path:
"""Get task directory path"""
@@ -124,6 +128,9 @@ class PersistenceService:
logger.debug(f"Saved task metadata: {task_id}")
# Update index
await self._update_index_for_task(task_id, metadata)
except Exception as e:
logger.error(f"Failed to save task metadata {task_id}: {e}")
raise
@@ -457,4 +464,249 @@ class PersistenceService:
publication_year=data.get("publication_year"),
cover_url=data.get("cover_url"),
)
# ========================================================================
# Index Management (for fast listing)
# ========================================================================
def _ensure_index(self):
"""Ensure index file exists, create if not"""
if not self.index_file.exists():
self._save_index({"version": "1.0", "tasks": []})
def _load_index(self) -> Dict[str, Any]:
"""Load index from file"""
try:
with open(self.index_file, "r", encoding="utf-8") as f:
return json.load(f)
except Exception as e:
logger.error(f"Failed to load index: {e}")
return {"version": "1.0", "tasks": []}
def _save_index(self, index_data: Dict[str, Any]):
"""Save index to file"""
try:
index_data["last_updated"] = datetime.now().isoformat()
with open(self.index_file, "w", encoding="utf-8") as f:
json.dump(index_data, f, ensure_ascii=False, indent=2)
except Exception as e:
logger.error(f"Failed to save index: {e}")
async def _update_index_for_task(self, task_id: str, metadata: Dict[str, Any]):
"""Update index entry for a specific task"""
index = self._load_index()
# Try to get title from multiple sources
title = metadata.get("input", {}).get("title")
if not title or title == "":
# Try to get title from storyboard if input title is empty
storyboard = await self.load_storyboard(task_id)
if storyboard and storyboard.title:
title = storyboard.title
else:
# Fall back to using input text preview
input_text = metadata.get("input", {}).get("text", "")
if input_text:
# Use first 30 characters of input text as title
title = input_text[:30] + ("..." if len(input_text) > 30 else "")
else:
title = "Untitled"
# Extract key info for index
index_entry = {
"task_id": task_id,
"created_at": metadata.get("created_at"),
"completed_at": metadata.get("completed_at"),
"status": metadata.get("status", "unknown"),
"title": title,
"duration": metadata.get("result", {}).get("duration", 0),
"n_frames": metadata.get("result", {}).get("n_frames", 0),
"file_size": metadata.get("result", {}).get("file_size", 0),
"video_path": metadata.get("result", {}).get("video_path"),
}
# Update or append
tasks = index.get("tasks", [])
existing_idx = next((i for i, t in enumerate(tasks) if t["task_id"] == task_id), None)
if existing_idx is not None:
tasks[existing_idx] = index_entry
else:
tasks.append(index_entry)
index["tasks"] = tasks
self._save_index(index)
async def rebuild_index(self):
"""Rebuild index by scanning all task directories"""
logger.info("Rebuilding task index...")
index = {"version": "1.0", "tasks": []}
# Scan all directories
for task_dir in self.output_dir.iterdir():
if not task_dir.is_dir() or task_dir.name.startswith("."):
continue
task_id = task_dir.name
metadata = await self.load_task_metadata(task_id)
if metadata:
# Try to get title from multiple sources
title = metadata.get("input", {}).get("title")
if not title or title == "":
# Try to get title from storyboard if input title is empty
storyboard = await self.load_storyboard(task_id)
if storyboard and storyboard.title:
title = storyboard.title
else:
# Fall back to using input text preview
input_text = metadata.get("input", {}).get("text", "")
if input_text:
# Use first 30 characters of input text as title
title = input_text[:30] + ("..." if len(input_text) > 30 else "")
else:
title = "Untitled"
# Add to index
index["tasks"].append({
"task_id": task_id,
"created_at": metadata.get("created_at"),
"completed_at": metadata.get("completed_at"),
"status": metadata.get("status", "unknown"),
"title": title,
"duration": metadata.get("result", {}).get("duration", 0),
"n_frames": metadata.get("result", {}).get("n_frames", 0),
"file_size": metadata.get("result", {}).get("file_size", 0),
"video_path": metadata.get("result", {}).get("video_path"),
})
self._save_index(index)
logger.info(f"Index rebuilt: {len(index['tasks'])} tasks")
# ========================================================================
# Paginated Listing
# ========================================================================
async def list_tasks_paginated(
self,
page: int = 1,
page_size: int = 20,
status: Optional[str] = None,
sort_by: str = "created_at",
sort_order: str = "desc"
) -> Dict[str, Any]:
"""
List tasks with pagination
Args:
page: Page number (1-indexed)
page_size: Items per page
status: Filter by status (optional)
sort_by: Sort field (created_at, completed_at, title, duration)
sort_order: Sort order (asc, desc)
Returns:
{
"tasks": [...], # List of task summaries
"total": 100, # Total matching tasks
"page": 1, # Current page
"page_size": 20, # Items per page
"total_pages": 5 # Total pages
}
"""
index = self._load_index()
tasks = index.get("tasks", [])
# Filter by status
if status:
tasks = [t for t in tasks if t.get("status") == status]
# Sort
reverse = (sort_order == "desc")
if sort_by in ["created_at", "completed_at"]:
tasks.sort(
key=lambda t: datetime.fromisoformat(t.get(sort_by, "1970-01-01T00:00:00")),
reverse=reverse
)
elif sort_by in ["title", "duration", "n_frames"]:
tasks.sort(key=lambda t: t.get(sort_by, ""), reverse=reverse)
# Paginate
total = len(tasks)
total_pages = (total + page_size - 1) // page_size
start_idx = (page - 1) * page_size
end_idx = start_idx + page_size
page_tasks = tasks[start_idx:end_idx]
return {
"tasks": page_tasks,
"total": total,
"page": page,
"page_size": page_size,
"total_pages": total_pages,
}
# ========================================================================
# Statistics
# ========================================================================
async def get_statistics(self) -> Dict[str, Any]:
"""
Get statistics about all tasks
Returns:
{
"total_tasks": 100,
"completed": 95,
"failed": 5,
"total_duration": 3600.5, # seconds
"total_size": 1024000000, # bytes
}
"""
index = self._load_index()
tasks = index.get("tasks", [])
stats = {
"total_tasks": len(tasks),
"completed": len([t for t in tasks if t.get("status") == "completed"]),
"failed": len([t for t in tasks if t.get("status") == "failed"]),
"total_duration": sum(t.get("duration", 0) for t in tasks),
"total_size": sum(t.get("file_size", 0) for t in tasks),
}
return stats
# ========================================================================
# Delete Task
# ========================================================================
async def delete_task(self, task_id: str) -> bool:
"""
Delete a task and all its files
Args:
task_id: Task ID to delete
Returns:
True if successful, False otherwise
"""
try:
import shutil
task_dir = self.get_task_dir(task_id)
if task_dir.exists():
shutil.rmtree(task_dir)
logger.info(f"Deleted task directory: {task_dir}")
# Update index
index = self._load_index()
tasks = index.get("tasks", [])
tasks = [t for t in tasks if t["task_id"] != task_id]
index["tasks"] = tasks
self._save_index(index)
return True
except Exception as e:
logger.error(f"Failed to delete task {task_id}: {e}")
return False