This commit is contained in:
puke
2025-10-25 19:58:51 +08:00
committed by puke
parent 60918f69b1
commit 198094fe5f
25 changed files with 110 additions and 1218 deletions

View File

@@ -68,19 +68,6 @@ image:
comfyui_url: http://127.0.0.1:8188 # Local ComfyUI server
# runninghub_api_key: "" # Optional: RunningHub cloud API key
# ==================== Book Fetcher Configuration ====================
book_fetcher:
default: google
# Google Books API (free, no key needed, works for English/Chinese books)
# Will automatically fallback to LLM if API fails or book not found
google:
# No configuration needed
# Douban (requires custom implementation)
douban:
# Implement your own logic in reelforge/capabilities/book_fetcher.py
# ==================== External MCP Servers (Optional) ====================
mcp_servers: []
# Example: Add custom MCP server

View File

@@ -1,5 +1,5 @@
"""
ReelForge - AI-powered book video generator with pluggable capabilities
ReelForge - AI-powered video generator with pluggable capabilities
Convention-based capability system using FastMCP and LiteLLM.
@@ -12,7 +12,9 @@ Usage:
# Use capabilities
answer = await reelforge.llm("Explain atomic habits")
audio = await reelforge.tts("Hello world")
book = await reelforge.book_fetcher("原则")
# Generate video
result = await reelforge.generate_video(topic="AI in 2024")
"""
from reelforge.service import ReelForgeCore, reelforge

View File

@@ -1,173 +0,0 @@
"""
Book Fetcher Capabilities
Fetch book information from various sources:
- Google Books API (free, stable, English/Chinese books)
- Douban (framework, requires custom implementation)
Convention: Tool names must be book_fetcher_{id}
Note: For LLM-based book info generation, use services/book_fetcher.py
which combines LLM capability with book fetcher logic.
"""
import json
from typing import Optional
import httpx
from loguru import logger
from pydantic import Field
from reelforge.core.mcp_server import reelforge_mcp
@reelforge_mcp.tool(
description="Fetch book information from Google Books API",
meta={
"reelforge": {
"display_name": "Google Books",
"description": "Fetch book info from Google Books (English/Chinese books)",
"is_default": True,
}
},
)
async def book_fetcher_google(
book_name: str = Field(description="Book name"),
author: Optional[str] = Field(default=None, description="Author name (optional)"),
) -> str:
"""
Fetch book information from Google Books API
Free API, no key required. Works for both English and Chinese books.
Args:
book_name: Book name
author: Author name (optional, for better search results)
Returns:
JSON string with book information:
{
"title": "Book title",
"author": "Author name",
"summary": "Book summary",
"genre": "Category",
"publication_year": "2018",
"cover_url": "https://...",
"isbn": "9781234567890"
}
Example:
>>> info = await book_fetcher_google("Atomic Habits")
>>> book = json.loads(info)
>>> print(book['title'])
Atomic Habits
"""
logger.info(f"Fetching book info from Google Books: {book_name}")
try:
# Build search query
query = book_name
if author:
query += f"+inauthor:{author}"
# Call Google Books API
async with httpx.AsyncClient() as client:
url = "https://www.googleapis.com/books/v1/volumes"
params = {
"q": query,
"maxResults": 1,
"langRestrict": "zh-CN,en", # Chinese and English
}
response = await client.get(url, params=params, timeout=10.0)
response.raise_for_status()
data = response.json()
if "items" not in data or len(data["items"]) == 0:
logger.warning(f"No results found for: {book_name}")
raise ValueError(f"No results found for book: {book_name}")
# Parse first result
item = data["items"][0]
volume_info = item.get("volumeInfo", {})
book_info = {
"title": volume_info.get("title", book_name),
"author": ", ".join(volume_info.get("authors", [author or "Unknown"])),
"summary": volume_info.get("description", "No description available"),
"genre": ", ".join(volume_info.get("categories", ["Uncategorized"])),
"publication_year": volume_info.get("publishedDate", "")[:4] if volume_info.get("publishedDate") else "",
"cover_url": volume_info.get("imageLinks", {}).get("thumbnail", ""),
"isbn": next(
(id_info["identifier"] for id_info in volume_info.get("industryIdentifiers", [])
if id_info["type"] in ["ISBN_13", "ISBN_10"]),
""
),
}
logger.info(f"✅ Successfully fetched from Google Books: {book_info['title']}")
return json.dumps(book_info, ensure_ascii=False, indent=2)
except httpx.HTTPError as e:
logger.error(f"HTTP error fetching from Google Books: {e}")
raise
except Exception as e:
logger.error(f"Error fetching from Google Books: {e}")
raise
@reelforge_mcp.tool(
description="Fetch book information from Douban (requires custom implementation)",
meta={
"reelforge": {
"display_name": "豆瓣读书 (Douban)",
"description": "Fetch book info from Douban (best for Chinese books) - requires custom implementation",
"is_default": False,
}
},
)
async def book_fetcher_douban(
book_name: str = Field(description="Book name"),
author: Optional[str] = Field(default=None, description="Author name (optional)"),
) -> str:
"""
Fetch book information from Douban
NOTE: Douban official API is closed. This is a framework for custom implementation.
You can implement this using:
1. Third-party Douban API services
2. Web scraping (be careful with rate limits)
3. Cached database
Args:
book_name: Book name
author: Author name (optional)
Returns:
JSON string with book information
Example implementation:
```python
# Option 1: Use third-party API
async with httpx.AsyncClient() as client:
url = "https://your-douban-api-service.com/search"
params = {"q": book_name}
response = await client.get(url, params=params)
data = response.json()
return json.dumps(data, ensure_ascii=False)
# Option 2: Web scraping
# Use BeautifulSoup + httpx to scrape Douban pages
# Option 3: Pre-built database
# Query your own book database
```
"""
logger.error("book_fetcher_douban is not implemented")
logger.info("To implement: Edit reelforge/capabilities/book_fetcher.py and add your logic")
raise NotImplementedError(
"book_fetcher_douban requires custom implementation. "
"Please edit reelforge/capabilities/book_fetcher.py to add your Douban API integration."
)

View File

@@ -57,9 +57,8 @@ async def image_comfykit(
Example:
# Local ComfyUI (default)
image_url = await image_comfykit(
workflow="workflows/book_cover.json",
title="Atomic Habits",
author="James Clear"
workflow="workflows/t2i_by_local_flux.json",
prompt="A peaceful mountain landscape"
)
# RunningHub cloud

View File

@@ -15,7 +15,7 @@ async def test_llm():
await app.initialize()
# Test prompt
prompt = "Explain the book 'Atomic Habits' by James Clear in 3 sentences."
prompt = "Explain the concept of atomic habits in 3 sentences."
logger.info(f"\n📝 Test Prompt: {prompt}\n")

View File

@@ -75,11 +75,6 @@ def _get_default_config() -> dict[str, Any]:
"comfykit": {
"comfyui_url": "http://127.0.0.1:8188"
}
},
"book_fetcher": {
"default": "google",
"google": None,
"douban": None
}
}

View File

@@ -111,10 +111,6 @@ class ConfigManager:
# Remove local config
self.config["image"]["comfykit"].pop("comfyui_url", None)
# Update Book Fetcher configuration
if "book_provider" in ui_values:
self.config["book_fetcher"]["default"] = ui_values["book_provider"]
self.save()
def _is_valid_api_key(self, api_key: str) -> bool:
@@ -249,10 +245,6 @@ class ConfigManager:
"""Get list of available image providers"""
return ["comfykit"]
def get_book_providers(self) -> List[str]:
"""Get list of available book fetcher providers"""
return ["google", "douban"]
def _create_default_config(self) -> Dict[str, Any]:
"""Create default configuration"""
return {
@@ -272,10 +264,6 @@ class ConfigManager:
"comfyui_url": "http://127.0.0.1:8188"
}
},
"book_fetcher": {
"default": "google",
"google": {}
},
"mcp_servers": []
}

View File

@@ -6,7 +6,7 @@ All capability tools MUST follow this naming pattern:
{type}_{id}
Where:
- type: MUST be one of the known capability types (llm, tts, book_fetcher, etc.)
- type: MUST be one of the known capability types (llm, tts, image, etc.)
- id: Unique identifier for this specific capability
Parsing strategy (Fail Fast):
@@ -18,7 +18,6 @@ Examples:
✅ llm_call → type: llm, id: call
✅ tts_edge → type: tts, id: edge
✅ image_comfykit → type: image, id: comfykit
✅ book_fetcher_google → type: book_fetcher, id: google
❌ call_llm → Wrong order
❌ llm-call → Use underscore, not dash
@@ -34,7 +33,6 @@ CAPABILITY_TYPES = {
"llm",
"tts",
"image",
"book_fetcher",
}
@@ -46,7 +44,7 @@ def parse_tool_name(tool_name: str) -> Optional[tuple[str, str]]:
Returns None for unknown types to fail early and expose configuration errors.
Args:
tool_name: Tool name following convention (e.g., "llm_qwen", "book_fetcher_douban")
tool_name: Tool name following convention (e.g., "llm_qwen", "tts_edge")
Returns:
(type, id) tuple if matches known type, None otherwise
@@ -55,8 +53,8 @@ def parse_tool_name(tool_name: str) -> Optional[tuple[str, str]]:
>>> parse_tool_name("llm_call")
('llm', 'call')
>>> parse_tool_name("book_fetcher_google")
('book_fetcher', 'google')
>>> parse_tool_name("tts_edge")
('tts', 'edge')
>>> parse_tool_name("unknown_type_id")
None # Fail fast - unknown type

View File

@@ -55,26 +55,26 @@ class StoryboardFrame:
@dataclass
class BookInfo:
"""Book information for visual display and narration generation"""
title: str # Book title
author: Optional[str] = None # Author
class ContentMetadata:
"""Content metadata for visual display and narration generation"""
title: str # Content title
author: Optional[str] = None # Author/creator
subtitle: Optional[str] = None # Subtitle
genre: Optional[str] = None # Genre/category
summary: Optional[str] = None # Book summary
summary: Optional[str] = None # Content summary
publication_year: Optional[str] = None # Publication year
cover_url: Optional[str] = None # Cover image URL
cover_url: Optional[str] = None # Cover/thumbnail image URL
@dataclass
class Storyboard:
"""Complete storyboard"""
topic: str # Topic (book name or discussion topic)
topic: str # Topic or content title
config: StoryboardConfig # Configuration
frames: List[StoryboardFrame] = field(default_factory=list)
# Book information (optional)
book_info: Optional[BookInfo] = None
# Content metadata (optional)
content_metadata: Optional[ContentMetadata] = None
# Final output
final_video_path: Optional[str] = None

View File

@@ -16,12 +16,6 @@ IMAGE_STYLE_PRESETS = {
"use_case": "通用场景,简单直观"
},
"book": {
"name": "书籍阅读场景",
"description": "warm book-related scenes, reading atmosphere, books and bookshelves, soft lighting, cozy learning environment, professional photography style",
"use_case": "书单号视频(推荐)"
},
"minimal": {
"name": "极简抽象",
"description": "minimalist abstract art, geometric shapes, clean composition, modern design, soft pastel colors",
@@ -122,17 +116,17 @@ def build_image_prompt_prompt(
narrations: List of narrations
min_words: Minimum word count
max_words: Maximum word count
image_style_preset: Preset style name (e.g., "book", "stick_figure", "minimal", "concept")
image_style_preset: Preset style name (e.g., "minimal", "stick_figure", "concept")
Available presets: see IMAGE_STYLE_PRESETS
image_style_description: Custom style description (overrides preset if provided)
Example: "warm book scenes, soft lighting, professional photography"
Example: "warm scenes, soft lighting, professional photography"
Returns:
Formatted prompt
Examples:
# Use preset style
>>> build_image_prompt_prompt(narrations, 50, 100, image_style_preset="book")
>>> build_image_prompt_prompt(narrations, 50, 100, image_style_preset="minimal")
# Use custom style
>>> build_image_prompt_prompt(

View File

@@ -1,87 +1,12 @@
"""
Narration generation prompt template
Supports three content sources:
1. Book: Generate book review narrations from book information
2. Topic: Generate narrations from a topic/theme
3. Content: Extract/refine narrations from user-provided content
Supports two content sources:
1. Topic: Generate narrations from a topic/theme
2. Content: Extract/refine narrations from user-provided content
"""
from typing import Optional
from reelforge.models.storyboard import BookInfo
# ==================== BOOK NARRATION PROMPT ====================
# For generating book review style narrations
BOOK_NARRATION_PROMPT = """# 角色定位
你是一位专业的书籍解读专家,擅长像"樊登读书"那样,用深入浅出的方式讲解书籍核心内容,帮助观众快速理解一本书的精华。
# 核心任务
用户会输入一本书的名称,你需要为这本书创作 {n_storyboard} 个书籍解读分镜,每个分镜包含"旁白用于TTS生成视频讲解音频",像在跟朋友推荐书籍一样,自然、有价值、引发共鸣
# 输出要求
## 旁白规范(书籍解读风格)
- 用途定位用于TTS生成书单号短视频音频像樊登读书那样讲解书籍精华
- 字数限制:严格控制在{min_words}~{max_words}个字(最低不少于{min_words}字)
- 结尾格式:结尾不要使用标点符号
- 内容要求:提炼书籍的核心观点,用通俗易懂的语言讲解,每个分镜传递一个有价值的洞察
- 风格要求:像跟朋友聊天一样,通俗、真诚、有启发性,避免学术化和生硬的表达
- 开场建议:第一个分镜可以用提问、场景、痛点等方式引发共鸣,吸引观众注意
- 核心内容:中间分镜提炼书中的关键观点,用生活化的例子帮助理解,像樊登那样深入浅出
- 结尾建议:最后一个分镜给出行动建议或启发,让观众有收获感
- 衔接建议:用"你有没有发现""其实""更重要的是""这本书告诉我们"等连接词,保持连贯
- 情绪与语气:温和、真诚、有热情,像一个读过书的朋友在分享收获
- 禁止项:不出现网址、表情符号、数字编号、不说空话套话、不过度煽情、不使用"这本书说"等生硬表达
- 字数检查:生成后必须自我验证不少于{min_words}个字,如不足则补充具体观点或生活化例子
- 内容结构:遵循"引发共鸣 → 提炼观点 → 深入讲解 → 给出启发"的叙述逻辑,确保每个分镜都有价值
## 分镜连贯性要求
- {n_storyboard} 个分镜应围绕这本书的核心内容展开,形成完整的书籍解读
- 遵循"吸引注意 → 提炼观点 → 深入讲解 → 给出启发"的叙述逻辑
- 每个分镜像同一个人在连贯分享读书心得,语气一致、自然流畅
- 通过书籍的核心观点自然过渡,形成完整的解读脉络
- 确保内容有价值、有启发,让观众觉得"这个视频值得看"
# 输出格式
严格按照以下JSON格式输出不要添加任何额外的文字说明
```json
{{
"narrations": [
"第一段{min_words}~{max_words}字,用提问或场景引发共鸣,吸引观众",
"第二段{min_words}~{max_words}字,提炼书中核心观点,深入浅出讲解",
"第三段{min_words}~{max_words}字,给出行动建议或启发,让观众有收获"
]
}}
```
# 示例输出
假设用户输入书名:"{topic}",输出示例:
```json
{{
"narrations": [
"你有没有这样的经历,明明知道该做什么,但就是做不到,这本书告诉我们,问题的关键在于习惯",
"作者提出了一个简单但有力的观点,改变不需要靠意志力,而是要设计一个好的系统",
"书中有个很有意思的例子,如果你想养成阅读习惯,不要逼自己每天读一小时,而是先从每天读一页开始",
"更重要的是,习惯的复利效应非常惊人,每天进步百分之一,一年后你会进步三十七倍",
"所以与其追求完美的计划,不如从一个小到不可能失败的习惯开始,然后坚持下去"
]
}}
```
# 重要提醒
1. 只输出JSON格式内容不要添加任何解释说明
2. 确保JSON格式严格正确可以被程序直接解析
3. 旁白必须严格控制在{min_words}~{max_words}字之间,用通俗易懂的语言,像樊登那样讲解
4. {n_storyboard} 个分镜要围绕这本书的核心观点展开,形成完整的书籍解读
5. 每个分镜都要有价值,提炼书中的洞察,避免空洞的介绍
6. 输出格式为 {{"narrations": [旁白数组]}} 的JSON对象
现在,请为书籍《{book_name}》创作 {n_storyboard} 个分镜的解读旁白。只输出JSON不要其他内容。
"""
# ==================== TOPIC NARRATION PROMPT ====================
@@ -208,37 +133,6 @@ CONTENT_NARRATION_PROMPT = """# 角色定位
# ==================== PROMPT BUILDER FUNCTIONS ====================
def build_book_narration_prompt(
book_info: BookInfo,
n_storyboard: int,
min_words: int,
max_words: int
) -> str:
"""
Build book review narration prompt
Args:
book_info: Book information
n_storyboard: Number of storyboard frames
min_words: Minimum word count
max_words: Maximum word count
Returns:
Formatted prompt
"""
# Build book description for prompt
book_name = book_info.title
if book_info.author:
book_name = f"{book_info.title} - {book_info.author}"
return BOOK_NARRATION_PROMPT.format(
book_name=book_name,
n_storyboard=n_storyboard,
min_words=min_words,
max_words=max_words
)
def build_topic_narration_prompt(
topic: str,
n_storyboard: int,
@@ -301,7 +195,7 @@ def build_narration_prompt(
Build narration generation prompt (legacy function for backward compatibility)
Args:
topic: Topic (book name or discussion topic)
topic: Topic or discussion theme
n_storyboard: Number of storyboard frames
min_words: Minimum word count
max_words: Maximum word count
@@ -311,8 +205,7 @@ def build_narration_prompt(
Note:
This function is kept for backward compatibility.
Use build_book_narration_prompt, build_topic_narration_prompt,
or build_content_narration_prompt instead.
Use build_topic_narration_prompt or build_content_narration_prompt instead.
"""
return build_topic_narration_prompt(
topic=topic,

View File

@@ -12,7 +12,7 @@ from reelforge.config import load_config
from reelforge.core.discovery import CapabilityRegistry
from reelforge.core.mcp_server import reelforge_mcp
from reelforge.core.config_manager import ConfigManager
from reelforge.services import LLMService, TTSService, ImageService, BookFetcherService
from reelforge.services import LLMService, TTSService, ImageService
class ReelForgeCore:
@@ -31,7 +31,6 @@ class ReelForgeCore:
# Use capabilities directly
answer = await reelforge.llm("Explain atomic habits")
audio = await reelforge.tts("Hello world")
book = await reelforge.book_fetcher("原则")
# Check active capabilities
print(f"Using LLM: {reelforge.llm.active}")
@@ -43,7 +42,7 @@ class ReelForgeCore:
├── config_manager (config injection + MCP calls)
├── llm (LLM service)
├── tts (TTS service)
└── book_fetcher (Book fetcher service)
└── image (Image service)
"""
def __init__(self, config_path: str = "config.yaml"):
@@ -62,7 +61,6 @@ class ReelForgeCore:
self.llm: Optional[LLMService] = None
self.tts: Optional[TTSService] = None
self.image: Optional[ImageService] = None
self.book_fetcher: Optional[BookFetcherService] = None
# Content generation services
self.narration_generator = None
@@ -73,8 +71,8 @@ class ReelForgeCore:
self.frame_composer = None
self.storyboard_processor = None
# Book video service (named as verb for direct calling)
self.generate_book_video = None
# Video generation service (named as verb for direct calling)
self.generate_video = None
async def initialize(self):
"""
@@ -106,8 +104,6 @@ class ReelForgeCore:
self.llm = LLMService(self.config_manager)
self.tts = TTSService(self.config_manager)
self.image = ImageService(self.config_manager)
self.book_fetcher = BookFetcherService(self.config_manager)
self.book_fetcher.set_core(self) # Set core reference for LLM fallback
# 5. Initialize content generation services
from reelforge.services.narration_generator import NarrationGeneratorService
@@ -125,10 +121,10 @@ class ReelForgeCore:
self.frame_composer = FrameComposerService()
self.storyboard_processor = StoryboardProcessorService(self)
# 7. Initialize book video service
from reelforge.services.book_video import BookVideoService
# 7. Initialize video generation service
from reelforge.services.video_generator import VideoGeneratorService
self.generate_book_video = BookVideoService(self)
self.generate_video = VideoGeneratorService(self)
self._initialized = True
logger.info("✅ ReelForge initialized successfully\n")
@@ -139,7 +135,6 @@ class ReelForgeCore:
from reelforge.capabilities import llm # noqa: F401
from reelforge.capabilities import tts # noqa: F401
from reelforge.capabilities import image # noqa: F401
from reelforge.capabilities import book_fetcher # noqa: F401
@property
def project_name(self) -> str:

View File

@@ -8,13 +8,12 @@ from reelforge.services.base import BaseService
from reelforge.services.llm import LLMService
from reelforge.services.tts import TTSService
from reelforge.services.image import ImageService
from reelforge.services.book_fetcher import BookFetcherService
from reelforge.services.video import VideoService
from reelforge.services.narration_generator import NarrationGeneratorService
from reelforge.services.image_prompt_generator import ImagePromptGeneratorService
from reelforge.services.frame_composer import FrameComposerService
from reelforge.services.storyboard_processor import StoryboardProcessorService
from reelforge.services.book_video import BookVideoService
from reelforge.services.video_generator import VideoGeneratorService
from reelforge.services.final_image_prompt import (
FinalImagePromptService,
StylePreset,
@@ -26,13 +25,12 @@ __all__ = [
"LLMService",
"TTSService",
"ImageService",
"BookFetcherService",
"VideoService",
"NarrationGeneratorService",
"ImagePromptGeneratorService",
"FrameComposerService",
"StoryboardProcessorService",
"BookVideoService",
"VideoGeneratorService",
"FinalImagePromptService",
"StylePreset",
"PresetValue",

View File

@@ -1,221 +0,0 @@
"""
Book Fetcher Service
Fetch book information from various sources (API or LLM).
"""
import json
from typing import Optional, Literal
from loguru import logger
from reelforge.services.base import BaseService
class BookFetcherService(BaseService):
"""
Book information fetcher service
Provides unified access to various book data sources:
- API: Google Books, Douban, etc. (via configured capability)
- LLM: Generate book info using LLM (flexible, works for any book)
Usage:
# Use default source (from config, usually 'google')
book_info = await reelforge.book_fetcher("原则")
# Explicitly use Google Books API
book_info = await reelforge.book_fetcher("Atomic Habits", query_source="google")
# Explicitly use LLM (good for Chinese books)
book_info = await reelforge.book_fetcher("人性的弱点", query_source="llm")
# Use Douban (if you implemented it)
book_info = await reelforge.book_fetcher(
book_name="原则",
author="瑞·达利欧",
query_source="douban"
)
"""
def __init__(self, config_manager):
super().__init__(config_manager, "book_fetcher")
self._core = None # Will be set by ReelForgeCore (for LLM query)
def set_core(self, core):
"""Set reference to ReelForgeCore (for LLM query)"""
self._core = core
async def __call__(
self,
book_name: str,
author: Optional[str] = None,
query_source: Optional[Literal["google", "douban", "llm"]] = None,
**kwargs
) -> dict:
"""
Fetch book information
Args:
book_name: Book name (required)
author: Author name (optional, improves matching accuracy)
query_source: Data source to query:
- "google": Google Books API
- "douban": Douban Books (requires implementation)
- "llm": Generate book info using LLM
- None: Use default from config (usually "google")
**kwargs: Additional provider-specific parameters
Returns:
Book information dict with fields:
- title: Book title
- author: Author name
- summary: Book summary
- genre: Book category/genre
- publication_year: Publication year (string)
- key_points: List of key points (only from LLM)
- cover_url: Cover image URL (only from API)
- isbn: ISBN code (only from API)
- source: Data source ("google", "douban", or "llm")
Examples:
>>> # Use default source (from config)
>>> book = await reelforge.book_fetcher("Atomic Habits")
>>> # Explicitly use Google Books
>>> book = await reelforge.book_fetcher("Atomic Habits", query_source="google")
>>> # Explicitly use LLM (good for Chinese books)
>>> book = await reelforge.book_fetcher("人性的弱点", query_source="llm")
>>> # Use Douban (if implemented)
>>> book = await reelforge.book_fetcher(
... "原则",
... author="瑞·达利欧",
... query_source="douban"
... )
>>> print(f"Title: {book['title']}")
>>> print(f"Source: {book['source']}")
"""
# Route to appropriate method based on query_source
if query_source == "llm":
# Use LLM to generate book info
return await self._fetch_via_llm(book_name, author)
else:
# Use API (google, douban, or default from config)
return await self._fetch_via_api(book_name, author, query_source, **kwargs)
async def _fetch_via_api(
self,
book_name: str,
author: Optional[str] = None,
query_source: Optional[str] = None,
**kwargs
) -> dict:
"""
Fetch book information via API capability
Args:
book_name: Book name
author: Author name (optional)
query_source: Specific capability to use ("google", "douban", or None for default)
**kwargs: Additional parameters
Returns:
Book information dict
Raises:
Exception: If API call fails
"""
params = {"book_name": book_name}
if author is not None:
params["author"] = author
params.update(kwargs)
# Call book_fetcher capability
# If query_source is specified (e.g., "google"), use it
# Otherwise use default from config
result_json = await self._config_manager.call(
self._capability_type,
cap_id=query_source, # None = use default from config
**params
)
result = json.loads(result_json)
result["source"] = query_source or self.active or "api"
logger.info(f"✅ Fetched book info from {result['source']}: {result.get('title', book_name)}")
return result
async def _fetch_via_llm(self, book_name: str, author: Optional[str] = None) -> dict:
"""
Generate book information using LLM
This method uses LLM to generate book information based on its knowledge.
Good for books that are not available in API databases or for Chinese books.
Args:
book_name: Book name
author: Author name (optional)
Returns:
Book information dict
Raises:
ValueError: If LLM response cannot be parsed
Exception: If LLM call fails
"""
if not self._core:
raise RuntimeError("ReelForgeCore not set. Cannot use LLM query.")
# Build prompt
author_info = f",作者是{author}" if author else ""
prompt = f"""请为书籍《{book_name}{author_info}生成详细的书籍信息。
要求:
1. 如果你知道这本书,请提供真实准确的信息
2. 如果不确定,请基于书名和作者推测合理的信息
3. 严格按照JSON格式输出不要添加任何其他内容
输出格式JSON
{{
"title": "书名",
"author": "作者",
"summary": "书籍简介100-200字概括核心内容和价值",
"genre": "书籍类型(如:自我成长、商业管理、心理学等)",
"publication_year": "2018",
"key_points": [
"核心观点120-30字",
"核心观点220-30字",
"核心观点320-30字"
]
}}
只输出JSON不要其他内容。"""
# Call LLM
response = await self._core.llm(
prompt=prompt,
temperature=0.3, # Lower temperature for more factual responses
max_tokens=1000
)
# Parse JSON
try:
book_info = json.loads(response)
except json.JSONDecodeError as e:
logger.error(f"Failed to parse LLM response as JSON: {e}")
logger.error(f"Response: {response[:200]}...")
raise ValueError(f"LLM returned invalid JSON for book: {book_name}")
# Ensure required fields exist
book_info.setdefault("title", book_name)
book_info.setdefault("author", author or "Unknown")
book_info.setdefault("summary", "No summary available")
book_info.setdefault("genre", "Unknown")
book_info.setdefault("publication_year", "")
book_info["source"] = "llm"
logger.info(f"✅ Generated book info via LLM: {book_info['title']}")
return book_info

View File

@@ -1,454 +0,0 @@
"""
Book Video Service
End-to-end service for generating book short videos.
"""
from datetime import datetime
from pathlib import Path
from typing import Optional, Callable
from loguru import logger
from reelforge.models.progress import ProgressEvent
from reelforge.models.storyboard import (
Storyboard,
StoryboardFrame,
StoryboardConfig,
BookInfo,
VideoGenerationResult
)
class BookVideoService:
"""
Book video generation service
Orchestrates the complete pipeline:
1. Generate narrations (LLM)
2. Generate image prompts (LLM)
3. Process each frame (TTS + Image + Compose + Video)
4. Concatenate all segments
5. Add BGM (optional)
"""
def __init__(self, reelforge_core):
"""
Initialize book video service
Args:
reelforge_core: ReelForgeCore instance
"""
self.core = reelforge_core
async def __call__(
self,
# === Content Source (Choose ONE, mutually exclusive) ===
book_name: Optional[str] = None,
author: Optional[str] = None,
topic: Optional[str] = None,
content: Optional[str] = None,
# === Optional Title (works with any source) ===
title: Optional[str] = None,
# === Basic Config ===
n_frames: int = 3,
voice_id: str = "zh-CN-YunjianNeural",
output_path: Optional[str] = None,
# === LLM Parameters ===
min_narration_words: int = 20,
max_narration_words: int = 40,
min_image_prompt_words: int = 50,
max_image_prompt_words: int = 100,
# === Image Parameters ===
image_width: int = 1024,
image_height: int = 1024,
image_style_preset: Optional[str] = None,
image_style_description: Optional[str] = None,
# === Video Parameters ===
video_width: int = 1080,
video_height: int = 1920,
video_fps: int = 30,
# === BGM Parameters ===
bgm_path: Optional[str] = None,
bgm_volume: float = 0.2,
bgm_mode: str = "loop",
# === Advanced Options ===
book_info: Optional[BookInfo] = None,
progress_callback: Optional[Callable[[ProgressEvent], None]] = None,
) -> VideoGenerationResult:
"""
Generate book short video from different content sources
Args:
book_name: Book name (e.g., "从零到一")
author: Book author (optional, pairs with book_name)
topic: Topic/theme (e.g., "如何提高学习效率")
content: User-provided content (any length)
Note: Must provide exactly ONE of: book_name, topic, or content
title: Video title (optional)
- If provided, use it as the video title
- If not provided, auto-generate based on source:
* book_name → use book title
* topic → use topic text
* content → LLM extracts title from content
n_frames: Number of storyboard frames (default 3)
voice_id: TTS voice ID (default "zh-CN-YunjianNeural")
output_path: Output video path (auto-generated if None)
min_narration_words: Min narration length
max_narration_words: Max narration length
min_image_prompt_words: Min image prompt length
max_image_prompt_words: Max image prompt length
image_width: Generated image width (default 1024)
image_height: Generated image height (default 1024)
image_style_preset: Preset style name (e.g., "book", "stick_figure", "minimal", "concept")
image_style_description: Custom style description (overrides preset)
video_width: Final video width (default 1080)
video_height: Final video height (default 1920)
video_fps: Video frame rate (default 30)
bgm_path: BGM path ("default", "happy", custom path, or None)
bgm_volume: BGM volume 0.0-1.0 (default 0.2)
bgm_mode: BGM mode "once" or "loop" (default "loop")
book_info: Book metadata (optional)
progress_callback: Progress callback function(message, progress)
Returns:
VideoGenerationResult with video path and metadata
Examples:
# Generate from book name
>>> result = await reelforge.generate_book_video(
... book_name="从零到一",
... author="彼得·蒂尔",
... n_frames=3,
... image_style_preset="book"
... )
# Generate from topic
>>> result = await reelforge.generate_book_video(
... topic="如何在信息爆炸时代保持深度思考",
... n_frames=3,
... bgm_path="default"
... )
# Generate from user content with auto-generated title
>>> result = await reelforge.generate_book_video(
... content="昨天我读了一本书,讲的是...",
... n_frames=3
... )
# Generate from user content with custom title
>>> result = await reelforge.generate_book_video(
... content="买房子,第一应该看的是楼盘的整体环境...",
... title="买房风水指南",
... n_frames=3
... )
>>> print(result.video_path)
"""
# ========== Step 0: Validate parameters (mutually exclusive) ==========
sources = [book_name, topic, content]
source_count = sum(x is not None for x in sources)
if source_count == 0:
raise ValueError(
"Must provide exactly ONE of: book_name, topic, or content"
)
elif source_count > 1:
raise ValueError(
"Cannot provide multiple sources. Choose ONE of: book_name, topic, or content"
)
# Determine source type
if book_name:
source_type = "book"
elif topic:
source_type = "topic"
else: # content
source_type = "content"
# Determine final title (priority: user-specified > auto-generated)
if title:
# User specified title, use it directly
final_title = title
logger.info(f"🚀 Starting book video generation from {source_type} with title: '{title}'")
else:
# Auto-generate title based on source
if source_type == "book":
final_title = f"{book_name}" + (f" - {author}" if author else "")
logger.info(f"🚀 Starting book video generation from book: '{final_title}'")
elif source_type == "topic":
final_title = topic
logger.info(f"🚀 Starting book video generation from topic: '{final_title}'")
else: # content
# Will generate title from content using LLM
logger.info(f"🚀 Starting book video generation from content ({len(content)} chars)")
final_title = None # Will be generated later
# Generate title from content if needed (before creating output path)
if source_type == "content" and final_title is None:
self._report_progress(progress_callback, "generating_title", 0.01)
final_title = await self._generate_title_from_content(content)
logger.info(f"✅ Generated title: {final_title}")
# Auto-generate output path if not provided
if output_path is None:
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
# Use first 10 chars of final_title for filename
safe_name = final_title[:10].replace('/', '_').replace(' ', '_')
output_path = f"output/{timestamp}_{safe_name}.mp4"
# Ensure output directory exists
Path(output_path).parent.mkdir(parents=True, exist_ok=True)
# Create storyboard config
config = StoryboardConfig(
n_storyboard=n_frames,
min_narration_words=min_narration_words,
max_narration_words=max_narration_words,
min_image_prompt_words=min_image_prompt_words,
max_image_prompt_words=max_image_prompt_words,
video_width=video_width,
video_height=video_height,
video_fps=video_fps,
voice_id=voice_id,
image_width=image_width,
image_height=image_height
)
# Create storyboard
storyboard = Storyboard(
topic=final_title, # Use final_title as video title
config=config,
book_info=book_info,
created_at=datetime.now()
)
try:
# ========== Step 1: Route based on source type ==========
# Step 1a: Fetch book info if needed
if source_type == "book":
self._report_progress(progress_callback, "fetching_book_info", 0.03)
book_dict = await self.core.book_fetcher(
book_name=book_name,
author=author
)
# Convert dict to BookInfo object
fetched_book_info = BookInfo(
title=book_dict.get("title", book_name),
author=book_dict.get("author", author or "Unknown"),
summary=book_dict.get("summary", ""),
genre=book_dict.get("genre", ""),
publication_year=book_dict.get("publication_year", ""),
cover_url=book_dict.get("cover_url")
)
logger.info(f"✅ Fetched book info: {fetched_book_info.title}")
# Update storyboard with fetched book info
storyboard.book_info = fetched_book_info
else:
fetched_book_info = None
# Step 1b: Generate narrations
self._report_progress(progress_callback, "generating_narrations", 0.05)
narrations = await self.core.narration_generator.generate_narrations(
config=config,
source_type=source_type,
book_info=fetched_book_info if source_type == "book" else None,
topic=topic if source_type == "topic" else None,
content=content if source_type == "content" else None
)
logger.info(f"✅ Generated {len(narrations)} narrations")
# Step 2: Generate image prompts
self._report_progress(progress_callback, "generating_image_prompts", 0.15)
image_prompts = await self.core.image_prompt_generator.generate_image_prompts(
narrations=narrations,
config=config,
image_style_preset=image_style_preset,
image_style_description=image_style_description
)
logger.info(f"✅ Generated {len(image_prompts)} image prompts")
# Step 3: Create frames
for i, (narration, image_prompt) in enumerate(zip(narrations, image_prompts)):
frame = StoryboardFrame(
index=i,
narration=narration,
image_prompt=image_prompt,
created_at=datetime.now()
)
storyboard.frames.append(frame)
# Step 4: Process each frame
for i, frame in enumerate(storyboard.frames):
# Calculate fine-grained progress for this frame
base_progress = 0.2 # Frames processing starts at 20%
frame_range = 0.6 # Frames processing takes 60% (20%-80%)
per_frame_progress = frame_range / len(storyboard.frames)
# Create frame-specific progress callback
def frame_progress_callback(event: ProgressEvent):
"""Report sub-step progress within current frame"""
# Calculate overall progress: base + previous frames + current frame progress
overall_progress = base_progress + (per_frame_progress * i) + (per_frame_progress * event.progress)
# Forward the event with adjusted overall progress
if progress_callback:
adjusted_event = ProgressEvent(
event_type=event.event_type,
progress=overall_progress,
frame_current=event.frame_current,
frame_total=event.frame_total,
step=event.step,
action=event.action
)
progress_callback(adjusted_event)
# Report frame start
self._report_progress(
progress_callback,
"processing_frame",
base_progress + (per_frame_progress * i),
frame_current=i+1,
frame_total=len(storyboard.frames)
)
processed_frame = await self.core.storyboard_processor.process_frame(
frame=frame,
config=config,
total_frames=len(storyboard.frames),
progress_callback=frame_progress_callback
)
storyboard.total_duration += processed_frame.duration
logger.info(f"✅ Frame {i+1} completed ({processed_frame.duration:.2f}s)")
# Step 5: Concatenate videos
self._report_progress(progress_callback, "concatenating", 0.85)
segment_paths = [frame.video_segment_path for frame in storyboard.frames]
from reelforge.services.video import VideoService
video_service = VideoService()
final_video_path = video_service.concat_videos(
videos=segment_paths,
output=output_path,
bgm_path=bgm_path,
bgm_volume=bgm_volume,
bgm_mode=bgm_mode
)
storyboard.final_video_path = final_video_path
storyboard.completed_at = datetime.now()
logger.success(f"🎬 Video generation completed: {final_video_path}")
# Step 6: Create result
self._report_progress(progress_callback, "finalizing", 1.0)
video_path_obj = Path(final_video_path)
file_size = video_path_obj.stat().st_size
result = VideoGenerationResult(
video_path=final_video_path,
storyboard=storyboard,
duration=storyboard.total_duration,
file_size=file_size
)
logger.info(f"✅ Generated video: {final_video_path}")
logger.info(f" Duration: {storyboard.total_duration:.2f}s")
logger.info(f" Size: {file_size / (1024*1024):.2f} MB")
logger.info(f" Frames: {len(storyboard.frames)}")
return result
except Exception as e:
logger.error(f"❌ Video generation failed: {e}")
raise
def _report_progress(
self,
callback: Optional[Callable[[ProgressEvent], None]],
event_type: str,
progress: float,
**kwargs
):
"""
Report progress via callback
Args:
callback: Progress callback function
event_type: Type of progress event
progress: Progress value (0.0-1.0)
**kwargs: Additional event-specific parameters (frame_current, frame_total, etc.)
"""
if callback:
event = ProgressEvent(event_type=event_type, progress=progress, **kwargs)
callback(event)
logger.debug(f"Progress: {progress*100:.0f}% - {event_type}")
else:
logger.debug(f"Progress: {progress*100:.0f}% - {event_type}")
async def _generate_title_from_content(self, content: str) -> str:
"""
Generate a short, attractive title from user content using LLM
Args:
content: User-provided content
Returns:
Generated title (10 characters or less)
"""
# Take first 500 chars to avoid overly long prompts
content_preview = content[:500]
prompt = f"""请为以下内容生成一个简短、有吸引力的标题10字以内
内容:
{content_preview}
要求:
1. 简短精炼10字以内
2. 准确概括核心内容
3. 有吸引力,适合作为视频标题
4. 只输出标题文本,不要其他内容
标题:"""
# Call LLM to generate title
response = await self.core.llm(
prompt=prompt,
temperature=0.7,
max_tokens=50
)
# Clean up response
title = response.strip()
# Remove quotes if present
if title.startswith('"') and title.endswith('"'):
title = title[1:-1]
if title.startswith("'") and title.endswith("'"):
title = title[1:-1]
# Limit to 20 chars max (safety)
if len(title) > 20:
title = title[:20]
return title

View File

@@ -69,19 +69,19 @@ class FinalImagePromptService:
Usage:
# With preset style
final = await reelforge.generate_final_image_prompt(
prompt="A beautiful book on a desk",
prompt="A peaceful mountain landscape",
style_preset=StylePreset.FUTURISTIC
)
# With custom style (any language)
final = await reelforge.generate_final_image_prompt(
prompt="A book",
prompt="A coffee cup on table",
custom_style_description="温馨的咖啡馆,暖色调"
)
# Only prompt (no style)
final = await reelforge.generate_final_image_prompt(
prompt="A book on a wooden desk"
prompt="A sunset over the ocean"
)
"""
@@ -114,7 +114,7 @@ class FinalImagePromptService:
- Join with comma: "{style_part}, {prompt}"
Args:
prompt: Base prompt (optional, e.g., "A beautiful book on a desk")
prompt: Base prompt (optional, e.g., "A peaceful landscape")
style_preset: Preset style from StylePreset enum (optional)
custom_style_description: Custom description in any language (optional)
Overrides style_preset if provided
@@ -125,21 +125,21 @@ class FinalImagePromptService:
Examples:
# With preset style (IDE autocomplete!)
final = await service(
prompt="A book on a desk",
prompt="A mountain landscape",
style_preset=StylePreset.FUTURISTIC
)
# Returns: "Futuristic sci-fi style..., A book on a desk"
# Returns: "Futuristic sci-fi style..., A mountain landscape"
# With custom style (any language)
final = await service(
prompt="A book",
prompt="A coffee cup",
custom_style_description="温馨的咖啡馆,暖色调"
)
# Returns: "Cozy coffee shop interior..., A book"
# Returns: "Cozy coffee shop interior..., A coffee cup"
# Only prompt
final = await service(prompt="A book on desk")
# Returns: "A book on desk"
final = await service(prompt="A sunset scene")
# Returns: "A sunset scene"
# Only style
final = await service(style_preset=StylePreset.MINIMAL)

View File

@@ -24,7 +24,7 @@ class HTMLFrameGenerator:
... topic="Why reading matters",
... text="Reading builds new neural pathways...",
... image="/path/to/image.png",
... ext={"book_title": "Atomic Habits", "book_author": "James Clear"}
... ext={"content_title": "Sample Title", "content_author": "Author Name"}
... )
"""
@@ -74,7 +74,7 @@ class HTMLFrameGenerator:
topic: Video topic/theme
text: Narration text for this frame
image: Path to AI-generated image
ext: Additional data (book_title, book_author, etc.)
ext: Additional data (content_title, content_author, etc.)
width: Frame width in pixels
height: Frame height in pixels

View File

@@ -17,9 +17,8 @@ class ImageService(BaseService):
Usage:
# Direct call with workflow path
image_path = await reelforge.image(
workflow="workflows/book_cover.json",
title="Atomic Habits",
author="James Clear"
workflow="workflows/t2i_by_local_flux.json",
prompt="A peaceful mountain landscape"
)
# Returns: "http://comfyui.local/view?filename=..."
@@ -52,10 +51,10 @@ class ImageService(BaseService):
Image URL or path (str)
Example:
# Generate book cover
# Generate image
image_url = await reelforge.image(
workflow="workflows/book_cover.json",
title="Atomic Habits",
workflow="workflows/t2i_by_local_flux.json",
prompt="A beautiful landscape",
author="James Clear",
genre="Self-Help"
)

View File

@@ -68,7 +68,7 @@ class LLMService(BaseService):
# Override with custom parameters
answer = await reelforge.llm(
"Summarize the book 'Atomic Habits' in 3 sentences",
"Explain the concept of atomic habits in 3 sentences",
api_key="sk-custom-key",
base_url="https://api.custom.com/v1",
model="custom-model",

View File

@@ -1,10 +1,9 @@
"""
Narration generation service
Supports three content sources:
1. Book: Generate book review narrations from book information
2. Topic: Generate narrations from a topic/theme
3. Content: Extract/refine narrations from user-provided content
Supports two content sources:
1. Topic: Generate narrations from a topic/theme
2. Content: Extract/refine narrations from user-provided content
"""
import json
@@ -13,9 +12,8 @@ from typing import List, Optional, Literal
from loguru import logger
from reelforge.models.storyboard import StoryboardConfig, BookInfo
from reelforge.models.storyboard import StoryboardConfig, ContentMetadata
from reelforge.prompts.narration_template import (
build_book_narration_prompt,
build_topic_narration_prompt,
build_content_narration_prompt,
build_narration_prompt # Keep for backward compatibility
@@ -37,8 +35,8 @@ class NarrationGeneratorService:
async def generate_narrations(
self,
config: StoryboardConfig,
source_type: Literal["book", "topic", "content"],
book_info: Optional[BookInfo] = None,
source_type: Literal["topic", "content"],
content_metadata: Optional[ContentMetadata] = None,
topic: Optional[str] = None,
content: Optional[str] = None,
) -> List[str]:
@@ -47,8 +45,8 @@ class NarrationGeneratorService:
Args:
config: Storyboard configuration
source_type: Type of content source ("book", "topic", or "content")
book_info: Book information (required if source_type="book")
source_type: Type of content source ("topic" or "content")
content_metadata: Content metadata (optional, not currently used)
topic: Topic/theme (required if source_type="topic")
content: User-provided content (required if source_type="content")
@@ -60,13 +58,6 @@ class NarrationGeneratorService:
json.JSONDecodeError: If unable to parse LLM response as JSON
Examples:
# Generate from book
>>> narrations = await service.generate_narrations(
... config=config,
... source_type="book",
... book_info=book_info
... )
# Generate from topic
>>> narrations = await service.generate_narrations(
... config=config,
@@ -82,18 +73,7 @@ class NarrationGeneratorService:
... )
"""
# 1. Build prompt based on source_type
if source_type == "book":
if book_info is None:
raise ValueError("book_info is required when source_type='book'")
logger.info(f"Generating book review narrations for: {book_info.title}")
prompt = build_book_narration_prompt(
book_info=book_info,
n_storyboard=config.n_storyboard,
min_words=config.min_narration_words,
max_words=config.max_narration_words
)
elif source_type == "topic":
if source_type == "topic":
if topic is None:
raise ValueError("topic is required when source_type='topic'")
logger.info(f"Generating topic narrations for: {topic}")

View File

@@ -217,17 +217,17 @@ class StoryboardProcessorService:
f"Available templates: classic, modern, minimal"
)
# Get storyboard for book info
# Get storyboard for content metadata
storyboard = getattr(self.core, '_current_storyboard', None)
book_info = storyboard.book_info if storyboard else None
content_metadata = storyboard.content_metadata if storyboard else None
# Build ext data
ext = {}
if book_info:
ext["book_title"] = book_info.title or ""
ext["book_author"] = book_info.author or ""
ext["book_subtitle"] = book_info.subtitle or ""
ext["book_genre"] = book_info.genre or ""
if content_metadata:
ext["content_title"] = content_metadata.title or ""
ext["content_author"] = content_metadata.author or ""
ext["content_subtitle"] = content_metadata.subtitle or ""
ext["content_genre"] = content_metadata.genre or ""
# Generate frame using HTML
generator = HTMLFrameGenerator(str(template_path))

View File

@@ -1,7 +1,7 @@
"""
Book Video Service
Video Generator Service
End-to-end service for generating book short videos.
End-to-end service for generating short videos from content.
"""
from datetime import datetime
@@ -15,14 +15,14 @@ from reelforge.models.storyboard import (
Storyboard,
StoryboardFrame,
StoryboardConfig,
BookInfo,
ContentMetadata,
VideoGenerationResult
)
class BookVideoService:
class VideoGeneratorService:
"""
Book video generation service
Video generation service
Orchestrates the complete pipeline:
1. Generate narrations (LLM)
@@ -34,7 +34,7 @@ class BookVideoService:
def __init__(self, reelforge_core):
"""
Initialize book video service
Initialize video generator service
Args:
reelforge_core: ReelForgeCore instance
@@ -44,8 +44,6 @@ class BookVideoService:
async def __call__(
self,
# === Content Source (Choose ONE, mutually exclusive) ===
book_name: Optional[str] = None,
author: Optional[str] = None,
topic: Optional[str] = None,
content: Optional[str] = None,
@@ -83,24 +81,21 @@ class BookVideoService:
bgm_mode: Literal["once", "loop"] = "loop",
# === Advanced Options ===
book_info: Optional[BookInfo] = None,
content_metadata: Optional[ContentMetadata] = None,
progress_callback: Optional[Callable[[ProgressEvent], None]] = None,
) -> VideoGenerationResult:
"""
Generate book short video from different content sources
Generate short video from different content sources
Args:
book_name: Book name (e.g., "从零到一")
author: Book author (optional, pairs with book_name)
topic: Topic/theme (e.g., "如何提高学习效率")
content: User-provided content (any length)
Note: Must provide exactly ONE of: book_name, topic, or content
Note: Must provide exactly ONE of: topic or content
title: Video title (optional)
- If provided, use it as the video title
- If not provided, auto-generate based on source:
* book_name use book title
* topic use topic text
* content LLM extracts title from content
@@ -115,7 +110,7 @@ class BookVideoService:
image_width: Generated image width (default 1024)
image_height: Generated image height (default 1024)
image_style_preset: Preset style name (e.g., "book", "stick_figure", "minimal", "concept")
image_style_preset: Preset style name (e.g., "minimal", "concept", "cinematic")
image_style_description: Custom style description (overrides preset)
video_width: Final video width (default 1080)
@@ -129,36 +124,28 @@ class BookVideoService:
bgm_volume: BGM volume 0.0-1.0 (default 0.2)
bgm_mode: BGM mode "once" or "loop" (default "loop")
book_info: Book metadata (optional)
content_metadata: Content metadata (optional, for display)
progress_callback: Progress callback function(message, progress)
Returns:
VideoGenerationResult with video path and metadata
Examples:
# Generate from book name
>>> result = await reelforge.generate_book_video(
... book_name="从零到一",
... author="彼得·蒂尔",
... n_frames=5,
... image_style_preset="book"
... )
# Generate from topic
>>> result = await reelforge.generate_book_video(
>>> result = await reelforge.generate_video(
... topic="如何在信息爆炸时代保持深度思考",
... n_frames=5,
... bgm_path="default"
... )
# Generate from user content with auto-generated title
>>> result = await reelforge.generate_book_video(
>>> result = await reelforge.generate_video(
... content="昨天我读了一本书,讲的是...",
... n_frames=3
... )
# Generate from user content with custom title
>>> result = await reelforge.generate_book_video(
>>> result = await reelforge.generate_video(
... content="买房子,第一应该看的是楼盘的整体环境...",
... title="买房风水指南",
... n_frames=5
@@ -166,22 +153,20 @@ class BookVideoService:
>>> print(result.video_path)
"""
# ========== Step 0: Validate parameters (mutually exclusive) ==========
sources = [book_name, topic, content]
sources = [topic, content]
source_count = sum(x is not None for x in sources)
if source_count == 0:
raise ValueError(
"Must provide exactly ONE of: book_name, topic, or content"
"Must provide exactly ONE of: topic or content"
)
elif source_count > 1:
raise ValueError(
"Cannot provide multiple sources. Choose ONE of: book_name, topic, or content"
"Cannot provide multiple sources. Choose ONE of: topic or content"
)
# Determine source type
if book_name:
source_type = "book"
elif topic:
if topic:
source_type = "topic"
else: # content
source_type = "content"
@@ -190,18 +175,15 @@ class BookVideoService:
if title:
# User specified title, use it directly
final_title = title
logger.info(f"🚀 Starting book video generation from {source_type} with title: '{title}'")
logger.info(f"🚀 Starting video generation from {source_type} with title: '{title}'")
else:
# Auto-generate title based on source
if source_type == "book":
final_title = f"{book_name}" + (f" - {author}" if author else "")
logger.info(f"🚀 Starting book video generation from book: '{final_title}'")
elif source_type == "topic":
if source_type == "topic":
final_title = topic
logger.info(f"🚀 Starting book video generation from topic: '{final_title}'")
logger.info(f"🚀 Starting video generation from topic: '{final_title}'")
else: # content
# Will generate title from content using LLM
logger.info(f"🚀 Starting book video generation from content ({len(content)} chars)")
logger.info(f"🚀 Starting video generation from content ({len(content)} chars)")
final_title = None # Will be generated later
# Generate title from content if needed (before creating output path)
@@ -240,7 +222,7 @@ class BookVideoService:
storyboard = Storyboard(
topic=final_title, # Use final_title as video title
config=config,
book_info=book_info,
content_metadata=content_metadata,
created_at=datetime.now()
)
@@ -248,38 +230,12 @@ class BookVideoService:
self.core._current_storyboard = storyboard
try:
# ========== Step 1: Route based on source type ==========
# Step 1a: Fetch book info if needed
if source_type == "book":
self._report_progress(progress_callback, "fetching_book_info", 0.03)
book_dict = await self.core.book_fetcher(
book_name=book_name,
author=author
)
# Convert dict to BookInfo object
fetched_book_info = BookInfo(
title=book_dict.get("title", book_name),
author=book_dict.get("author", author or "Unknown"),
summary=book_dict.get("summary", ""),
genre=book_dict.get("genre", ""),
publication_year=book_dict.get("publication_year", ""),
cover_url=book_dict.get("cover_url")
)
logger.info(f"✅ Fetched book info: {fetched_book_info.title}")
# Update storyboard with fetched book info
storyboard.book_info = fetched_book_info
else:
fetched_book_info = None
# Step 1b: Generate narrations
# ========== Step 1: Generate narrations ==========
self._report_progress(progress_callback, "generating_narrations", 0.05)
narrations = await self.core.narration_generator.generate_narrations(
config=config,
source_type=source_type,
book_info=fetched_book_info if source_type == "book" else None,
content_metadata=None, # No metadata needed for topic/content
topic=topic if source_type == "topic" else None,
content=content if source_type == "content" else None
)

View File

@@ -97,8 +97,8 @@ def get_data_path(*paths: str) -> str:
Absolute path to data directory or file
Example:
get_data_path("books", "book.json")
# Returns: "/path/to/project/data/books/book.json"
get_data_path("videos", "output.mp4")
# Returns: "/path/to/project/data/videos/output.mp4"
"""
data_path = get_root_path("data")
if paths:

View File

@@ -18,7 +18,7 @@ from reelforge.service import reelforge
await reelforge.initialize()
# Use preset template
result = await reelforge.generate_book_video(
result = await reelforge.generate_video(
topic="为什么阅读改变命运",
frame_template="classic" # or "modern", "minimal"
)
@@ -76,7 +76,7 @@ templates/my-cyberpunk.html
#### Step 5: Use Template
```python
await reelforge.generate_book_video(
await reelforge.generate_video(
topic="...",
frame_template="my-cyberpunk" # Use your template
)

64
web.py
View File

@@ -1,7 +1,7 @@
"""
ReelForge Web UI
A simple web interface for generating book short videos.
A simple web interface for generating short videos from content.
"""
import asyncio
@@ -19,8 +19,8 @@ from reelforge.models.progress import ProgressEvent
# Setup page config (must be first)
st.set_page_config(
page_title="ReelForge - AI Book Video Generator",
page_icon="📚",
page_title="ReelForge - AI Video Generator",
page_icon="🎬",
layout="wide",
initial_sidebar_state="collapsed",
)
@@ -105,7 +105,7 @@ def generate_style_preview_cached(
# Generate final prompt using the new service
final_prompt = run_async(reelforge.generate_final_image_prompt(
prompt="A beautiful book on a desk",
prompt="A peaceful mountain landscape",
style_preset=style_preset,
custom_style_description=custom_style_description
))
@@ -146,8 +146,6 @@ def get_reelforge():
def init_session_state():
"""Initialize session state variables"""
if "book_info" not in st.session_state:
st.session_state.book_info = None
if "language" not in st.session_state:
st.session_state.language = "zh_CN"
@@ -163,7 +161,7 @@ def render_advanced_settings(config_manager: ConfigManager):
# Expand if not configured, collapse if configured
with st.expander(tr("settings.title"), expanded=not is_configured):
# 2-column layout: LLM | Image+Book
# 2-column layout: LLM | Image
llm_col, image_col = st.columns(2)
# ====================================================================
@@ -396,7 +394,7 @@ def main():
left_col, middle_col, right_col = st.columns([1, 1, 1])
# ========================================================================
# Left Column: Book & Content
# Left Column: Content Input
# ========================================================================
with left_col:
with st.container(border=True):
@@ -405,57 +403,16 @@ def main():
# Input mode selection
input_mode = st.radio(
"Input Mode",
[tr("input_mode.book"), tr("input_mode.topic"), tr("input_mode.custom")],
[tr("input_mode.topic"), tr("input_mode.custom")],
horizontal=True,
label_visibility="collapsed"
)
book_name = None
topic = None
content = None
title = None
if input_mode == tr("input_mode.book"):
book_name = st.text_input(
tr("input.book_name"),
placeholder=tr("input.book_name_placeholder"),
help=tr("input.book_name_help")
)
# Book search button
if st.button(tr("book.search"), use_container_width=True):
if book_name:
with st.spinner(tr("book.searching")):
try:
# Search book using BookFetcherService
book_info = run_async(reelforge.book_fetcher(book_name))
st.session_state.book_info = book_info
st.success(tr("book.found"))
except Exception as e:
st.error(tr("book.not_found", error=str(e)))
else:
st.warning(tr("book.name_required"))
# Display book info if available
if st.session_state.book_info:
st.markdown("---")
book_info = st.session_state.book_info
# Book cover (if available)
if hasattr(book_info, 'cover_url') and book_info.cover_url:
st.image(book_info.cover_url, width=200)
# Book details
st.markdown(f"**{tr('book.title')}:** {book_info.title}")
if hasattr(book_info, 'author'):
st.markdown(f"**{tr('book.author')}:** {book_info.author}")
if hasattr(book_info, 'rating'):
st.markdown(f"**{tr('book.rating')}:** ⭐ {book_info.rating}")
if hasattr(book_info, 'summary'):
with st.expander(tr("book.summary")):
st.write(book_info.summary)
elif input_mode == tr("input_mode.topic"):
if input_mode == tr("input_mode.topic"):
topic = st.text_area(
tr("input.topic"),
placeholder=tr("input.topic_placeholder"),
@@ -688,7 +645,7 @@ def main():
st.stop()
# Validate input
if not book_name and not topic and not content:
if not topic and not content:
st.error(tr("error.input_required"))
st.stop()
@@ -737,8 +694,7 @@ def main():
# Preset mode: pass preset name
style_preset_param = style_preset
result = run_async(reelforge.generate_book_video(
book_name=book_name if book_name else None,
result = run_async(reelforge.generate_video(
topic=topic if topic else None,
content=content if content else None,
title=title if title else None,