This commit is contained in:
puke
2025-10-25 19:58:51 +08:00
committed by puke
parent 60918f69b1
commit 198094fe5f
25 changed files with 110 additions and 1218 deletions

View File

@@ -8,13 +8,12 @@ from reelforge.services.base import BaseService
from reelforge.services.llm import LLMService
from reelforge.services.tts import TTSService
from reelforge.services.image import ImageService
from reelforge.services.book_fetcher import BookFetcherService
from reelforge.services.video import VideoService
from reelforge.services.narration_generator import NarrationGeneratorService
from reelforge.services.image_prompt_generator import ImagePromptGeneratorService
from reelforge.services.frame_composer import FrameComposerService
from reelforge.services.storyboard_processor import StoryboardProcessorService
from reelforge.services.book_video import BookVideoService
from reelforge.services.video_generator import VideoGeneratorService
from reelforge.services.final_image_prompt import (
FinalImagePromptService,
StylePreset,
@@ -26,13 +25,12 @@ __all__ = [
"LLMService",
"TTSService",
"ImageService",
"BookFetcherService",
"VideoService",
"NarrationGeneratorService",
"ImagePromptGeneratorService",
"FrameComposerService",
"StoryboardProcessorService",
"BookVideoService",
"VideoGeneratorService",
"FinalImagePromptService",
"StylePreset",
"PresetValue",

View File

@@ -1,221 +0,0 @@
"""
Book Fetcher Service
Fetch book information from various sources (API or LLM).
"""
import json
from typing import Optional, Literal
from loguru import logger
from reelforge.services.base import BaseService
class BookFetcherService(BaseService):
"""
Book information fetcher service
Provides unified access to various book data sources:
- API: Google Books, Douban, etc. (via configured capability)
- LLM: Generate book info using LLM (flexible, works for any book)
Usage:
# Use default source (from config, usually 'google')
book_info = await reelforge.book_fetcher("原则")
# Explicitly use Google Books API
book_info = await reelforge.book_fetcher("Atomic Habits", query_source="google")
# Explicitly use LLM (good for Chinese books)
book_info = await reelforge.book_fetcher("人性的弱点", query_source="llm")
# Use Douban (if you implemented it)
book_info = await reelforge.book_fetcher(
book_name="原则",
author="瑞·达利欧",
query_source="douban"
)
"""
def __init__(self, config_manager):
super().__init__(config_manager, "book_fetcher")
self._core = None # Will be set by ReelForgeCore (for LLM query)
def set_core(self, core):
"""Set reference to ReelForgeCore (for LLM query)"""
self._core = core
async def __call__(
self,
book_name: str,
author: Optional[str] = None,
query_source: Optional[Literal["google", "douban", "llm"]] = None,
**kwargs
) -> dict:
"""
Fetch book information
Args:
book_name: Book name (required)
author: Author name (optional, improves matching accuracy)
query_source: Data source to query:
- "google": Google Books API
- "douban": Douban Books (requires implementation)
- "llm": Generate book info using LLM
- None: Use default from config (usually "google")
**kwargs: Additional provider-specific parameters
Returns:
Book information dict with fields:
- title: Book title
- author: Author name
- summary: Book summary
- genre: Book category/genre
- publication_year: Publication year (string)
- key_points: List of key points (only from LLM)
- cover_url: Cover image URL (only from API)
- isbn: ISBN code (only from API)
- source: Data source ("google", "douban", or "llm")
Examples:
>>> # Use default source (from config)
>>> book = await reelforge.book_fetcher("Atomic Habits")
>>> # Explicitly use Google Books
>>> book = await reelforge.book_fetcher("Atomic Habits", query_source="google")
>>> # Explicitly use LLM (good for Chinese books)
>>> book = await reelforge.book_fetcher("人性的弱点", query_source="llm")
>>> # Use Douban (if implemented)
>>> book = await reelforge.book_fetcher(
... "原则",
... author="瑞·达利欧",
... query_source="douban"
... )
>>> print(f"Title: {book['title']}")
>>> print(f"Source: {book['source']}")
"""
# Route to appropriate method based on query_source
if query_source == "llm":
# Use LLM to generate book info
return await self._fetch_via_llm(book_name, author)
else:
# Use API (google, douban, or default from config)
return await self._fetch_via_api(book_name, author, query_source, **kwargs)
async def _fetch_via_api(
self,
book_name: str,
author: Optional[str] = None,
query_source: Optional[str] = None,
**kwargs
) -> dict:
"""
Fetch book information via API capability
Args:
book_name: Book name
author: Author name (optional)
query_source: Specific capability to use ("google", "douban", or None for default)
**kwargs: Additional parameters
Returns:
Book information dict
Raises:
Exception: If API call fails
"""
params = {"book_name": book_name}
if author is not None:
params["author"] = author
params.update(kwargs)
# Call book_fetcher capability
# If query_source is specified (e.g., "google"), use it
# Otherwise use default from config
result_json = await self._config_manager.call(
self._capability_type,
cap_id=query_source, # None = use default from config
**params
)
result = json.loads(result_json)
result["source"] = query_source or self.active or "api"
logger.info(f"✅ Fetched book info from {result['source']}: {result.get('title', book_name)}")
return result
async def _fetch_via_llm(self, book_name: str, author: Optional[str] = None) -> dict:
"""
Generate book information using LLM
This method uses LLM to generate book information based on its knowledge.
Good for books that are not available in API databases or for Chinese books.
Args:
book_name: Book name
author: Author name (optional)
Returns:
Book information dict
Raises:
ValueError: If LLM response cannot be parsed
Exception: If LLM call fails
"""
if not self._core:
raise RuntimeError("ReelForgeCore not set. Cannot use LLM query.")
# Build prompt
author_info = f",作者是{author}" if author else ""
prompt = f"""请为书籍《{book_name}{author_info}生成详细的书籍信息。
要求:
1. 如果你知道这本书,请提供真实准确的信息
2. 如果不确定,请基于书名和作者推测合理的信息
3. 严格按照JSON格式输出不要添加任何其他内容
输出格式JSON
{{
"title": "书名",
"author": "作者",
"summary": "书籍简介100-200字概括核心内容和价值",
"genre": "书籍类型(如:自我成长、商业管理、心理学等)",
"publication_year": "2018",
"key_points": [
"核心观点120-30字",
"核心观点220-30字",
"核心观点320-30字"
]
}}
只输出JSON不要其他内容。"""
# Call LLM
response = await self._core.llm(
prompt=prompt,
temperature=0.3, # Lower temperature for more factual responses
max_tokens=1000
)
# Parse JSON
try:
book_info = json.loads(response)
except json.JSONDecodeError as e:
logger.error(f"Failed to parse LLM response as JSON: {e}")
logger.error(f"Response: {response[:200]}...")
raise ValueError(f"LLM returned invalid JSON for book: {book_name}")
# Ensure required fields exist
book_info.setdefault("title", book_name)
book_info.setdefault("author", author or "Unknown")
book_info.setdefault("summary", "No summary available")
book_info.setdefault("genre", "Unknown")
book_info.setdefault("publication_year", "")
book_info["source"] = "llm"
logger.info(f"✅ Generated book info via LLM: {book_info['title']}")
return book_info

View File

@@ -1,454 +0,0 @@
"""
Book Video Service
End-to-end service for generating book short videos.
"""
from datetime import datetime
from pathlib import Path
from typing import Optional, Callable
from loguru import logger
from reelforge.models.progress import ProgressEvent
from reelforge.models.storyboard import (
Storyboard,
StoryboardFrame,
StoryboardConfig,
BookInfo,
VideoGenerationResult
)
class BookVideoService:
"""
Book video generation service
Orchestrates the complete pipeline:
1. Generate narrations (LLM)
2. Generate image prompts (LLM)
3. Process each frame (TTS + Image + Compose + Video)
4. Concatenate all segments
5. Add BGM (optional)
"""
def __init__(self, reelforge_core):
"""
Initialize book video service
Args:
reelforge_core: ReelForgeCore instance
"""
self.core = reelforge_core
async def __call__(
self,
# === Content Source (Choose ONE, mutually exclusive) ===
book_name: Optional[str] = None,
author: Optional[str] = None,
topic: Optional[str] = None,
content: Optional[str] = None,
# === Optional Title (works with any source) ===
title: Optional[str] = None,
# === Basic Config ===
n_frames: int = 3,
voice_id: str = "zh-CN-YunjianNeural",
output_path: Optional[str] = None,
# === LLM Parameters ===
min_narration_words: int = 20,
max_narration_words: int = 40,
min_image_prompt_words: int = 50,
max_image_prompt_words: int = 100,
# === Image Parameters ===
image_width: int = 1024,
image_height: int = 1024,
image_style_preset: Optional[str] = None,
image_style_description: Optional[str] = None,
# === Video Parameters ===
video_width: int = 1080,
video_height: int = 1920,
video_fps: int = 30,
# === BGM Parameters ===
bgm_path: Optional[str] = None,
bgm_volume: float = 0.2,
bgm_mode: str = "loop",
# === Advanced Options ===
book_info: Optional[BookInfo] = None,
progress_callback: Optional[Callable[[ProgressEvent], None]] = None,
) -> VideoGenerationResult:
"""
Generate book short video from different content sources
Args:
book_name: Book name (e.g., "从零到一")
author: Book author (optional, pairs with book_name)
topic: Topic/theme (e.g., "如何提高学习效率")
content: User-provided content (any length)
Note: Must provide exactly ONE of: book_name, topic, or content
title: Video title (optional)
- If provided, use it as the video title
- If not provided, auto-generate based on source:
* book_name → use book title
* topic → use topic text
* content → LLM extracts title from content
n_frames: Number of storyboard frames (default 3)
voice_id: TTS voice ID (default "zh-CN-YunjianNeural")
output_path: Output video path (auto-generated if None)
min_narration_words: Min narration length
max_narration_words: Max narration length
min_image_prompt_words: Min image prompt length
max_image_prompt_words: Max image prompt length
image_width: Generated image width (default 1024)
image_height: Generated image height (default 1024)
image_style_preset: Preset style name (e.g., "book", "stick_figure", "minimal", "concept")
image_style_description: Custom style description (overrides preset)
video_width: Final video width (default 1080)
video_height: Final video height (default 1920)
video_fps: Video frame rate (default 30)
bgm_path: BGM path ("default", "happy", custom path, or None)
bgm_volume: BGM volume 0.0-1.0 (default 0.2)
bgm_mode: BGM mode "once" or "loop" (default "loop")
book_info: Book metadata (optional)
progress_callback: Progress callback function(message, progress)
Returns:
VideoGenerationResult with video path and metadata
Examples:
# Generate from book name
>>> result = await reelforge.generate_book_video(
... book_name="从零到一",
... author="彼得·蒂尔",
... n_frames=3,
... image_style_preset="book"
... )
# Generate from topic
>>> result = await reelforge.generate_book_video(
... topic="如何在信息爆炸时代保持深度思考",
... n_frames=3,
... bgm_path="default"
... )
# Generate from user content with auto-generated title
>>> result = await reelforge.generate_book_video(
... content="昨天我读了一本书,讲的是...",
... n_frames=3
... )
# Generate from user content with custom title
>>> result = await reelforge.generate_book_video(
... content="买房子,第一应该看的是楼盘的整体环境...",
... title="买房风水指南",
... n_frames=3
... )
>>> print(result.video_path)
"""
# ========== Step 0: Validate parameters (mutually exclusive) ==========
sources = [book_name, topic, content]
source_count = sum(x is not None for x in sources)
if source_count == 0:
raise ValueError(
"Must provide exactly ONE of: book_name, topic, or content"
)
elif source_count > 1:
raise ValueError(
"Cannot provide multiple sources. Choose ONE of: book_name, topic, or content"
)
# Determine source type
if book_name:
source_type = "book"
elif topic:
source_type = "topic"
else: # content
source_type = "content"
# Determine final title (priority: user-specified > auto-generated)
if title:
# User specified title, use it directly
final_title = title
logger.info(f"🚀 Starting book video generation from {source_type} with title: '{title}'")
else:
# Auto-generate title based on source
if source_type == "book":
final_title = f"{book_name}" + (f" - {author}" if author else "")
logger.info(f"🚀 Starting book video generation from book: '{final_title}'")
elif source_type == "topic":
final_title = topic
logger.info(f"🚀 Starting book video generation from topic: '{final_title}'")
else: # content
# Will generate title from content using LLM
logger.info(f"🚀 Starting book video generation from content ({len(content)} chars)")
final_title = None # Will be generated later
# Generate title from content if needed (before creating output path)
if source_type == "content" and final_title is None:
self._report_progress(progress_callback, "generating_title", 0.01)
final_title = await self._generate_title_from_content(content)
logger.info(f"✅ Generated title: {final_title}")
# Auto-generate output path if not provided
if output_path is None:
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
# Use first 10 chars of final_title for filename
safe_name = final_title[:10].replace('/', '_').replace(' ', '_')
output_path = f"output/{timestamp}_{safe_name}.mp4"
# Ensure output directory exists
Path(output_path).parent.mkdir(parents=True, exist_ok=True)
# Create storyboard config
config = StoryboardConfig(
n_storyboard=n_frames,
min_narration_words=min_narration_words,
max_narration_words=max_narration_words,
min_image_prompt_words=min_image_prompt_words,
max_image_prompt_words=max_image_prompt_words,
video_width=video_width,
video_height=video_height,
video_fps=video_fps,
voice_id=voice_id,
image_width=image_width,
image_height=image_height
)
# Create storyboard
storyboard = Storyboard(
topic=final_title, # Use final_title as video title
config=config,
book_info=book_info,
created_at=datetime.now()
)
try:
# ========== Step 1: Route based on source type ==========
# Step 1a: Fetch book info if needed
if source_type == "book":
self._report_progress(progress_callback, "fetching_book_info", 0.03)
book_dict = await self.core.book_fetcher(
book_name=book_name,
author=author
)
# Convert dict to BookInfo object
fetched_book_info = BookInfo(
title=book_dict.get("title", book_name),
author=book_dict.get("author", author or "Unknown"),
summary=book_dict.get("summary", ""),
genre=book_dict.get("genre", ""),
publication_year=book_dict.get("publication_year", ""),
cover_url=book_dict.get("cover_url")
)
logger.info(f"✅ Fetched book info: {fetched_book_info.title}")
# Update storyboard with fetched book info
storyboard.book_info = fetched_book_info
else:
fetched_book_info = None
# Step 1b: Generate narrations
self._report_progress(progress_callback, "generating_narrations", 0.05)
narrations = await self.core.narration_generator.generate_narrations(
config=config,
source_type=source_type,
book_info=fetched_book_info if source_type == "book" else None,
topic=topic if source_type == "topic" else None,
content=content if source_type == "content" else None
)
logger.info(f"✅ Generated {len(narrations)} narrations")
# Step 2: Generate image prompts
self._report_progress(progress_callback, "generating_image_prompts", 0.15)
image_prompts = await self.core.image_prompt_generator.generate_image_prompts(
narrations=narrations,
config=config,
image_style_preset=image_style_preset,
image_style_description=image_style_description
)
logger.info(f"✅ Generated {len(image_prompts)} image prompts")
# Step 3: Create frames
for i, (narration, image_prompt) in enumerate(zip(narrations, image_prompts)):
frame = StoryboardFrame(
index=i,
narration=narration,
image_prompt=image_prompt,
created_at=datetime.now()
)
storyboard.frames.append(frame)
# Step 4: Process each frame
for i, frame in enumerate(storyboard.frames):
# Calculate fine-grained progress for this frame
base_progress = 0.2 # Frames processing starts at 20%
frame_range = 0.6 # Frames processing takes 60% (20%-80%)
per_frame_progress = frame_range / len(storyboard.frames)
# Create frame-specific progress callback
def frame_progress_callback(event: ProgressEvent):
"""Report sub-step progress within current frame"""
# Calculate overall progress: base + previous frames + current frame progress
overall_progress = base_progress + (per_frame_progress * i) + (per_frame_progress * event.progress)
# Forward the event with adjusted overall progress
if progress_callback:
adjusted_event = ProgressEvent(
event_type=event.event_type,
progress=overall_progress,
frame_current=event.frame_current,
frame_total=event.frame_total,
step=event.step,
action=event.action
)
progress_callback(adjusted_event)
# Report frame start
self._report_progress(
progress_callback,
"processing_frame",
base_progress + (per_frame_progress * i),
frame_current=i+1,
frame_total=len(storyboard.frames)
)
processed_frame = await self.core.storyboard_processor.process_frame(
frame=frame,
config=config,
total_frames=len(storyboard.frames),
progress_callback=frame_progress_callback
)
storyboard.total_duration += processed_frame.duration
logger.info(f"✅ Frame {i+1} completed ({processed_frame.duration:.2f}s)")
# Step 5: Concatenate videos
self._report_progress(progress_callback, "concatenating", 0.85)
segment_paths = [frame.video_segment_path for frame in storyboard.frames]
from reelforge.services.video import VideoService
video_service = VideoService()
final_video_path = video_service.concat_videos(
videos=segment_paths,
output=output_path,
bgm_path=bgm_path,
bgm_volume=bgm_volume,
bgm_mode=bgm_mode
)
storyboard.final_video_path = final_video_path
storyboard.completed_at = datetime.now()
logger.success(f"🎬 Video generation completed: {final_video_path}")
# Step 6: Create result
self._report_progress(progress_callback, "finalizing", 1.0)
video_path_obj = Path(final_video_path)
file_size = video_path_obj.stat().st_size
result = VideoGenerationResult(
video_path=final_video_path,
storyboard=storyboard,
duration=storyboard.total_duration,
file_size=file_size
)
logger.info(f"✅ Generated video: {final_video_path}")
logger.info(f" Duration: {storyboard.total_duration:.2f}s")
logger.info(f" Size: {file_size / (1024*1024):.2f} MB")
logger.info(f" Frames: {len(storyboard.frames)}")
return result
except Exception as e:
logger.error(f"❌ Video generation failed: {e}")
raise
def _report_progress(
self,
callback: Optional[Callable[[ProgressEvent], None]],
event_type: str,
progress: float,
**kwargs
):
"""
Report progress via callback
Args:
callback: Progress callback function
event_type: Type of progress event
progress: Progress value (0.0-1.0)
**kwargs: Additional event-specific parameters (frame_current, frame_total, etc.)
"""
if callback:
event = ProgressEvent(event_type=event_type, progress=progress, **kwargs)
callback(event)
logger.debug(f"Progress: {progress*100:.0f}% - {event_type}")
else:
logger.debug(f"Progress: {progress*100:.0f}% - {event_type}")
async def _generate_title_from_content(self, content: str) -> str:
"""
Generate a short, attractive title from user content using LLM
Args:
content: User-provided content
Returns:
Generated title (10 characters or less)
"""
# Take first 500 chars to avoid overly long prompts
content_preview = content[:500]
prompt = f"""请为以下内容生成一个简短、有吸引力的标题10字以内
内容:
{content_preview}
要求:
1. 简短精炼10字以内
2. 准确概括核心内容
3. 有吸引力,适合作为视频标题
4. 只输出标题文本,不要其他内容
标题:"""
# Call LLM to generate title
response = await self.core.llm(
prompt=prompt,
temperature=0.7,
max_tokens=50
)
# Clean up response
title = response.strip()
# Remove quotes if present
if title.startswith('"') and title.endswith('"'):
title = title[1:-1]
if title.startswith("'") and title.endswith("'"):
title = title[1:-1]
# Limit to 20 chars max (safety)
if len(title) > 20:
title = title[:20]
return title

View File

@@ -69,19 +69,19 @@ class FinalImagePromptService:
Usage:
# With preset style
final = await reelforge.generate_final_image_prompt(
prompt="A beautiful book on a desk",
prompt="A peaceful mountain landscape",
style_preset=StylePreset.FUTURISTIC
)
# With custom style (any language)
final = await reelforge.generate_final_image_prompt(
prompt="A book",
prompt="A coffee cup on table",
custom_style_description="温馨的咖啡馆,暖色调"
)
# Only prompt (no style)
final = await reelforge.generate_final_image_prompt(
prompt="A book on a wooden desk"
prompt="A sunset over the ocean"
)
"""
@@ -114,7 +114,7 @@ class FinalImagePromptService:
- Join with comma: "{style_part}, {prompt}"
Args:
prompt: Base prompt (optional, e.g., "A beautiful book on a desk")
prompt: Base prompt (optional, e.g., "A peaceful landscape")
style_preset: Preset style from StylePreset enum (optional)
custom_style_description: Custom description in any language (optional)
Overrides style_preset if provided
@@ -125,21 +125,21 @@ class FinalImagePromptService:
Examples:
# With preset style (IDE autocomplete!)
final = await service(
prompt="A book on a desk",
prompt="A mountain landscape",
style_preset=StylePreset.FUTURISTIC
)
# Returns: "Futuristic sci-fi style..., A book on a desk"
# Returns: "Futuristic sci-fi style..., A mountain landscape"
# With custom style (any language)
final = await service(
prompt="A book",
prompt="A coffee cup",
custom_style_description="温馨的咖啡馆,暖色调"
)
# Returns: "Cozy coffee shop interior..., A book"
# Returns: "Cozy coffee shop interior..., A coffee cup"
# Only prompt
final = await service(prompt="A book on desk")
# Returns: "A book on desk"
final = await service(prompt="A sunset scene")
# Returns: "A sunset scene"
# Only style
final = await service(style_preset=StylePreset.MINIMAL)

View File

@@ -24,7 +24,7 @@ class HTMLFrameGenerator:
... topic="Why reading matters",
... text="Reading builds new neural pathways...",
... image="/path/to/image.png",
... ext={"book_title": "Atomic Habits", "book_author": "James Clear"}
... ext={"content_title": "Sample Title", "content_author": "Author Name"}
... )
"""
@@ -74,7 +74,7 @@ class HTMLFrameGenerator:
topic: Video topic/theme
text: Narration text for this frame
image: Path to AI-generated image
ext: Additional data (book_title, book_author, etc.)
ext: Additional data (content_title, content_author, etc.)
width: Frame width in pixels
height: Frame height in pixels

View File

@@ -17,9 +17,8 @@ class ImageService(BaseService):
Usage:
# Direct call with workflow path
image_path = await reelforge.image(
workflow="workflows/book_cover.json",
title="Atomic Habits",
author="James Clear"
workflow="workflows/t2i_by_local_flux.json",
prompt="A peaceful mountain landscape"
)
# Returns: "http://comfyui.local/view?filename=..."
@@ -52,10 +51,10 @@ class ImageService(BaseService):
Image URL or path (str)
Example:
# Generate book cover
# Generate image
image_url = await reelforge.image(
workflow="workflows/book_cover.json",
title="Atomic Habits",
workflow="workflows/t2i_by_local_flux.json",
prompt="A beautiful landscape",
author="James Clear",
genre="Self-Help"
)

View File

@@ -68,7 +68,7 @@ class LLMService(BaseService):
# Override with custom parameters
answer = await reelforge.llm(
"Summarize the book 'Atomic Habits' in 3 sentences",
"Explain the concept of atomic habits in 3 sentences",
api_key="sk-custom-key",
base_url="https://api.custom.com/v1",
model="custom-model",

View File

@@ -1,10 +1,9 @@
"""
Narration generation service
Supports three content sources:
1. Book: Generate book review narrations from book information
2. Topic: Generate narrations from a topic/theme
3. Content: Extract/refine narrations from user-provided content
Supports two content sources:
1. Topic: Generate narrations from a topic/theme
2. Content: Extract/refine narrations from user-provided content
"""
import json
@@ -13,9 +12,8 @@ from typing import List, Optional, Literal
from loguru import logger
from reelforge.models.storyboard import StoryboardConfig, BookInfo
from reelforge.models.storyboard import StoryboardConfig, ContentMetadata
from reelforge.prompts.narration_template import (
build_book_narration_prompt,
build_topic_narration_prompt,
build_content_narration_prompt,
build_narration_prompt # Keep for backward compatibility
@@ -37,8 +35,8 @@ class NarrationGeneratorService:
async def generate_narrations(
self,
config: StoryboardConfig,
source_type: Literal["book", "topic", "content"],
book_info: Optional[BookInfo] = None,
source_type: Literal["topic", "content"],
content_metadata: Optional[ContentMetadata] = None,
topic: Optional[str] = None,
content: Optional[str] = None,
) -> List[str]:
@@ -47,8 +45,8 @@ class NarrationGeneratorService:
Args:
config: Storyboard configuration
source_type: Type of content source ("book", "topic", or "content")
book_info: Book information (required if source_type="book")
source_type: Type of content source ("topic" or "content")
content_metadata: Content metadata (optional, not currently used)
topic: Topic/theme (required if source_type="topic")
content: User-provided content (required if source_type="content")
@@ -60,13 +58,6 @@ class NarrationGeneratorService:
json.JSONDecodeError: If unable to parse LLM response as JSON
Examples:
# Generate from book
>>> narrations = await service.generate_narrations(
... config=config,
... source_type="book",
... book_info=book_info
... )
# Generate from topic
>>> narrations = await service.generate_narrations(
... config=config,
@@ -82,18 +73,7 @@ class NarrationGeneratorService:
... )
"""
# 1. Build prompt based on source_type
if source_type == "book":
if book_info is None:
raise ValueError("book_info is required when source_type='book'")
logger.info(f"Generating book review narrations for: {book_info.title}")
prompt = build_book_narration_prompt(
book_info=book_info,
n_storyboard=config.n_storyboard,
min_words=config.min_narration_words,
max_words=config.max_narration_words
)
elif source_type == "topic":
if source_type == "topic":
if topic is None:
raise ValueError("topic is required when source_type='topic'")
logger.info(f"Generating topic narrations for: {topic}")

View File

@@ -217,17 +217,17 @@ class StoryboardProcessorService:
f"Available templates: classic, modern, minimal"
)
# Get storyboard for book info
# Get storyboard for content metadata
storyboard = getattr(self.core, '_current_storyboard', None)
book_info = storyboard.book_info if storyboard else None
content_metadata = storyboard.content_metadata if storyboard else None
# Build ext data
ext = {}
if book_info:
ext["book_title"] = book_info.title or ""
ext["book_author"] = book_info.author or ""
ext["book_subtitle"] = book_info.subtitle or ""
ext["book_genre"] = book_info.genre or ""
if content_metadata:
ext["content_title"] = content_metadata.title or ""
ext["content_author"] = content_metadata.author or ""
ext["content_subtitle"] = content_metadata.subtitle or ""
ext["content_genre"] = content_metadata.genre or ""
# Generate frame using HTML
generator = HTMLFrameGenerator(str(template_path))

View File

@@ -1,7 +1,7 @@
"""
Book Video Service
Video Generator Service
End-to-end service for generating book short videos.
End-to-end service for generating short videos from content.
"""
from datetime import datetime
@@ -15,14 +15,14 @@ from reelforge.models.storyboard import (
Storyboard,
StoryboardFrame,
StoryboardConfig,
BookInfo,
ContentMetadata,
VideoGenerationResult
)
class BookVideoService:
class VideoGeneratorService:
"""
Book video generation service
Video generation service
Orchestrates the complete pipeline:
1. Generate narrations (LLM)
@@ -34,7 +34,7 @@ class BookVideoService:
def __init__(self, reelforge_core):
"""
Initialize book video service
Initialize video generator service
Args:
reelforge_core: ReelForgeCore instance
@@ -44,8 +44,6 @@ class BookVideoService:
async def __call__(
self,
# === Content Source (Choose ONE, mutually exclusive) ===
book_name: Optional[str] = None,
author: Optional[str] = None,
topic: Optional[str] = None,
content: Optional[str] = None,
@@ -83,24 +81,21 @@ class BookVideoService:
bgm_mode: Literal["once", "loop"] = "loop",
# === Advanced Options ===
book_info: Optional[BookInfo] = None,
content_metadata: Optional[ContentMetadata] = None,
progress_callback: Optional[Callable[[ProgressEvent], None]] = None,
) -> VideoGenerationResult:
"""
Generate book short video from different content sources
Generate short video from different content sources
Args:
book_name: Book name (e.g., "从零到一")
author: Book author (optional, pairs with book_name)
topic: Topic/theme (e.g., "如何提高学习效率")
content: User-provided content (any length)
Note: Must provide exactly ONE of: book_name, topic, or content
Note: Must provide exactly ONE of: topic or content
title: Video title (optional)
- If provided, use it as the video title
- If not provided, auto-generate based on source:
* book_name use book title
* topic use topic text
* content LLM extracts title from content
@@ -115,7 +110,7 @@ class BookVideoService:
image_width: Generated image width (default 1024)
image_height: Generated image height (default 1024)
image_style_preset: Preset style name (e.g., "book", "stick_figure", "minimal", "concept")
image_style_preset: Preset style name (e.g., "minimal", "concept", "cinematic")
image_style_description: Custom style description (overrides preset)
video_width: Final video width (default 1080)
@@ -129,36 +124,28 @@ class BookVideoService:
bgm_volume: BGM volume 0.0-1.0 (default 0.2)
bgm_mode: BGM mode "once" or "loop" (default "loop")
book_info: Book metadata (optional)
content_metadata: Content metadata (optional, for display)
progress_callback: Progress callback function(message, progress)
Returns:
VideoGenerationResult with video path and metadata
Examples:
# Generate from book name
>>> result = await reelforge.generate_book_video(
... book_name="从零到一",
... author="彼得·蒂尔",
... n_frames=5,
... image_style_preset="book"
... )
# Generate from topic
>>> result = await reelforge.generate_book_video(
>>> result = await reelforge.generate_video(
... topic="如何在信息爆炸时代保持深度思考",
... n_frames=5,
... bgm_path="default"
... )
# Generate from user content with auto-generated title
>>> result = await reelforge.generate_book_video(
>>> result = await reelforge.generate_video(
... content="昨天我读了一本书,讲的是...",
... n_frames=3
... )
# Generate from user content with custom title
>>> result = await reelforge.generate_book_video(
>>> result = await reelforge.generate_video(
... content="买房子,第一应该看的是楼盘的整体环境...",
... title="买房风水指南",
... n_frames=5
@@ -166,22 +153,20 @@ class BookVideoService:
>>> print(result.video_path)
"""
# ========== Step 0: Validate parameters (mutually exclusive) ==========
sources = [book_name, topic, content]
sources = [topic, content]
source_count = sum(x is not None for x in sources)
if source_count == 0:
raise ValueError(
"Must provide exactly ONE of: book_name, topic, or content"
"Must provide exactly ONE of: topic or content"
)
elif source_count > 1:
raise ValueError(
"Cannot provide multiple sources. Choose ONE of: book_name, topic, or content"
"Cannot provide multiple sources. Choose ONE of: topic or content"
)
# Determine source type
if book_name:
source_type = "book"
elif topic:
if topic:
source_type = "topic"
else: # content
source_type = "content"
@@ -190,18 +175,15 @@ class BookVideoService:
if title:
# User specified title, use it directly
final_title = title
logger.info(f"🚀 Starting book video generation from {source_type} with title: '{title}'")
logger.info(f"🚀 Starting video generation from {source_type} with title: '{title}'")
else:
# Auto-generate title based on source
if source_type == "book":
final_title = f"{book_name}" + (f" - {author}" if author else "")
logger.info(f"🚀 Starting book video generation from book: '{final_title}'")
elif source_type == "topic":
if source_type == "topic":
final_title = topic
logger.info(f"🚀 Starting book video generation from topic: '{final_title}'")
logger.info(f"🚀 Starting video generation from topic: '{final_title}'")
else: # content
# Will generate title from content using LLM
logger.info(f"🚀 Starting book video generation from content ({len(content)} chars)")
logger.info(f"🚀 Starting video generation from content ({len(content)} chars)")
final_title = None # Will be generated later
# Generate title from content if needed (before creating output path)
@@ -240,7 +222,7 @@ class BookVideoService:
storyboard = Storyboard(
topic=final_title, # Use final_title as video title
config=config,
book_info=book_info,
content_metadata=content_metadata,
created_at=datetime.now()
)
@@ -248,38 +230,12 @@ class BookVideoService:
self.core._current_storyboard = storyboard
try:
# ========== Step 1: Route based on source type ==========
# Step 1a: Fetch book info if needed
if source_type == "book":
self._report_progress(progress_callback, "fetching_book_info", 0.03)
book_dict = await self.core.book_fetcher(
book_name=book_name,
author=author
)
# Convert dict to BookInfo object
fetched_book_info = BookInfo(
title=book_dict.get("title", book_name),
author=book_dict.get("author", author or "Unknown"),
summary=book_dict.get("summary", ""),
genre=book_dict.get("genre", ""),
publication_year=book_dict.get("publication_year", ""),
cover_url=book_dict.get("cover_url")
)
logger.info(f"✅ Fetched book info: {fetched_book_info.title}")
# Update storyboard with fetched book info
storyboard.book_info = fetched_book_info
else:
fetched_book_info = None
# Step 1b: Generate narrations
# ========== Step 1: Generate narrations ==========
self._report_progress(progress_callback, "generating_narrations", 0.05)
narrations = await self.core.narration_generator.generate_narrations(
config=config,
source_type=source_type,
book_info=fetched_book_info if source_type == "book" else None,
content_metadata=None, # No metadata needed for topic/content
topic=topic if source_type == "topic" else None,
content=content if source_type == "content" else None
)