支持批量生成功能

This commit is contained in:
puke
2025-11-18 17:51:52 +08:00
parent dfa2f76c5b
commit 2ac03b4b48
5 changed files with 577 additions and 45 deletions

View File

@@ -21,60 +21,152 @@ from web.utils.async_helpers import get_project_version
def render_content_input():
"""Render content input section (left column)"""
"""Render content input section (left column) with batch support"""
with st.container(border=True):
st.markdown(f"**{tr('section.content_input')}**")
# Processing mode selection
mode = st.radio(
"Processing Mode",
["generate", "fixed"],
horizontal=True,
format_func=lambda x: tr(f"mode.{x}"),
label_visibility="collapsed"
# ====================================================================
# Step 1: Batch mode toggle (highest priority)
# ====================================================================
batch_mode = st.checkbox(
tr("batch.mode_label"),
value=False,
help=tr("batch.mode_help")
)
# Text input (unified for both modes)
text_placeholder = tr("input.topic_placeholder") if mode == "generate" else tr("input.content_placeholder")
text_height = 120 if mode == "generate" else 200
text_help = tr("input.text_help_generate") if mode == "generate" else tr("input.text_help_fixed")
if not batch_mode:
# ================================================================
# Single task mode (original logic, unchanged)
# ================================================================
# Processing mode selection
mode = st.radio(
"Processing Mode",
["generate", "fixed"],
horizontal=True,
format_func=lambda x: tr(f"mode.{x}"),
label_visibility="collapsed"
)
# Text input (unified for both modes)
text_placeholder = tr("input.topic_placeholder") if mode == "generate" else tr("input.content_placeholder")
text_height = 120 if mode == "generate" else 200
text_help = tr("input.text_help_generate") if mode == "generate" else tr("input.text_help_fixed")
text = st.text_area(
tr("input.text"),
placeholder=text_placeholder,
height=text_height,
help=text_help
)
# Title input (optional for both modes)
title = st.text_input(
tr("input.title"),
placeholder=tr("input.title_placeholder"),
help=tr("input.title_help")
)
# Number of scenes (only show in generate mode)
if mode == "generate":
n_scenes = st.slider(
tr("video.frames"),
min_value=3,
max_value=30,
value=5,
help=tr("video.frames_help"),
label_visibility="collapsed"
)
st.caption(tr("video.frames_label", n=n_scenes))
else:
# Fixed mode: n_scenes is ignored, set default value
n_scenes = 5
st.info(tr("video.frames_fixed_mode_hint"))
return {
"batch_mode": False,
"mode": mode,
"text": text,
"title": title,
"n_scenes": n_scenes
}
text = st.text_area(
tr("input.text"),
placeholder=text_placeholder,
height=text_height,
help=text_help
)
# Title input (optional for both modes)
title = st.text_input(
tr("input.title"),
placeholder=tr("input.title_placeholder"),
help=tr("input.title_help")
)
# Number of scenes (only show in generate mode)
if mode == "generate":
else:
# ================================================================
# Batch mode (simplified YAGNI version)
# ================================================================
st.markdown(f"**{tr('batch.section_title')}**")
# Batch rules info
st.info(f"""
**{tr('batch.rules_title')}**
- ✅ {tr('batch.rule_1')}
- ✅ {tr('batch.rule_2')}
- ✅ {tr('batch.rule_3')}
""")
# Batch topics input
text_input = st.text_area(
tr("batch.topics_label"),
height=300,
placeholder=tr("batch.topics_placeholder"),
help=tr("batch.topics_help")
)
# Split topics by newline
if text_input:
# Simple split by newline, filter empty lines
topics = [
line.strip()
for line in text_input.strip().split('\n')
if line.strip()
]
if topics:
# Check count limit
if len(topics) > 100:
st.error(tr("batch.count_error", count=len(topics)))
topics = []
else:
st.success(tr("batch.count_success", count=len(topics)))
# Preview topics list
with st.expander(tr("batch.preview_title"), expanded=False):
for i, topic in enumerate(topics, 1):
st.markdown(f"`{i}.` {topic}")
else:
topics = []
else:
topics = []
st.markdown("---")
# Title prefix (optional)
title_prefix = st.text_input(
tr("batch.title_prefix_label"),
placeholder=tr("batch.title_prefix_placeholder"),
help=tr("batch.title_prefix_help")
)
# Number of scenes (unified for all videos)
n_scenes = st.slider(
tr("video.frames"),
tr("batch.n_scenes_label"),
min_value=3,
max_value=30,
value=5,
help=tr("video.frames_help"),
label_visibility="collapsed"
help=tr("batch.n_scenes_help")
)
st.caption(tr("video.frames_label", n=n_scenes))
else:
# Fixed mode: n_scenes is ignored, set default value
n_scenes = 5
st.info(tr("video.frames_fixed_mode_hint"))
return {
"mode": mode,
"text": text,
"title": title,
"n_scenes": n_scenes
}
st.caption(tr("batch.n_scenes_caption", n=n_scenes))
# Config info
st.info(f"📌 {tr('batch.config_info')}")
return {
"batch_mode": True,
"topics": topics,
"mode": "generate", # Fixed to AI generate content
"title_prefix": title_prefix,
"n_scenes": n_scenes,
}
def render_bgm_section():

View File

@@ -29,6 +29,19 @@ from pixelle_video.config import config_manager
def render_output_preview(pixelle_video, video_params):
"""Render output preview section (right column)"""
# Check if batch mode
is_batch = video_params.get("batch_mode", False)
if is_batch:
# Batch generation mode
render_batch_output(pixelle_video, video_params)
else:
# Single video generation mode (original logic)
render_single_output(pixelle_video, video_params)
def render_single_output(pixelle_video, video_params):
"""Render single video generation output (original logic, unchanged)"""
# Extract parameters from video_params dict
text = video_params.get("text", "")
mode = video_params.get("mode", "generate")
@@ -193,4 +206,179 @@ def render_output_preview(pixelle_video, video_params):
st.error(tr("status.error", error=str(e)))
logger.exception(e)
st.stop()
def render_batch_output(pixelle_video, video_params):
"""Render batch generation output (minimal, redirect to History)"""
topics = video_params.get("topics", [])
with st.container(border=True):
st.markdown(f"**{tr('batch.section_generation')}**")
# Check if topics are provided
if not topics:
st.warning(tr("batch.no_topics"))
return
# Check system configuration
if not config_manager.validate():
st.warning(tr("settings.not_configured"))
return
batch_count = len(topics)
# Display batch info
st.info(tr("batch.prepare_info", count=batch_count))
# Estimated time (optional)
estimated_minutes = batch_count * 3 # Assume 3 minutes per video
st.caption(tr("batch.estimated_time", minutes=estimated_minutes))
# Generate button with batch semantics
if st.button(
tr("batch.generate_button", count=batch_count),
type="primary",
use_container_width=True,
help=tr("batch.generate_help")
):
# Prepare shared config
shared_config = {
"title_prefix": video_params.get("title_prefix"),
"n_scenes": video_params.get("n_scenes", 5),
"image_workflow": video_params.get("image_workflow"),
"frame_template": video_params.get("frame_template"),
"prompt_prefix": video_params.get("prompt_prefix", ""),
"bgm_path": video_params.get("bgm_path"),
"bgm_volume": video_params.get("bgm_volume", 0.2),
"tts_inference_mode": video_params.get("tts_inference_mode", "local"),
}
# Add TTS parameters
if shared_config["tts_inference_mode"] == "local":
shared_config["tts_voice"] = video_params.get("tts_voice")
shared_config["tts_speed"] = video_params.get("tts_speed")
else:
shared_config["tts_workflow"] = video_params.get("tts_workflow")
if video_params.get("ref_audio"):
shared_config["ref_audio"] = str(video_params["ref_audio"])
# Add template parameters
if video_params.get("template_params"):
shared_config["template_params"] = video_params["template_params"]
# UI containers
overall_progress_container = st.container()
current_task_container = st.container()
# Overall progress UI
overall_progress_bar = overall_progress_container.progress(0)
overall_status = overall_progress_container.empty()
# Current task progress UI
current_task_title = current_task_container.empty()
current_task_progress = current_task_container.progress(0)
current_task_status = current_task_container.empty()
# Overall progress callback
def update_overall_progress(current, total, topic):
progress = (current - 1) / total
overall_progress_bar.progress(progress)
overall_status.markdown(
f"📊 **{tr('batch.overall_progress')}**: {current}/{total} ({int(progress * 100)}%)"
)
# Single task progress callback factory
def make_task_progress_callback(task_idx, topic):
def callback(event: ProgressEvent):
# Display current task title
current_task_title.markdown(f"🎬 **{tr('batch.current_task')} {task_idx}**: {topic}")
# Update task detailed progress
if event.event_type == "frame_step":
action_key = f"progress.step_{event.action}"
action_text = tr(action_key)
message = tr(
"progress.frame_step",
current=event.frame_current,
total=event.frame_total,
step=event.step,
action=action_text
)
elif event.event_type == "processing_frame":
message = tr(
"progress.frame",
current=event.frame_current,
total=event.frame_total
)
else:
message = tr(f"progress.{event.event_type}")
current_task_progress.progress(event.progress)
current_task_status.text(message)
return callback
# Execute batch generation
from web.utils.batch_manager import SimpleBatchManager
import time
batch_manager = SimpleBatchManager()
start_time = time.time()
batch_result = batch_manager.execute_batch(
pixelle_video=pixelle_video,
topics=topics,
shared_config=shared_config,
overall_progress_callback=update_overall_progress,
task_progress_callback_factory=make_task_progress_callback
)
total_time = time.time() - start_time
# Clear progress displays
overall_progress_bar.progress(1.0)
overall_status.markdown(f"✅ **{tr('batch.completed')}**")
current_task_title.empty()
current_task_progress.empty()
current_task_status.empty()
# Display results summary
st.markdown("---")
st.markdown(f"**{tr('batch.results_title')}**")
col1, col2, col3 = st.columns(3)
col1.metric(tr("batch.total"), batch_result["total_count"])
col2.metric(f"{tr('batch.success')}", batch_result["success_count"])
col3.metric(f"{tr('batch.failed')}", batch_result["failed_count"])
# Display total time
minutes = int(total_time / 60)
seconds = int(total_time % 60)
st.caption(f"⏱️ {tr('batch.total_time')}: {minutes}{tr('batch.minutes')}{seconds}{tr('batch.seconds')}")
# Redirect to History page
st.markdown("---")
st.success(tr("batch.success_message"))
st.info(tr("batch.view_in_history"))
# Button to go to History page
if st.button(
f"📚 {tr('batch.goto_history')}",
type="secondary",
use_container_width=True
):
st.switch_page("pages/2_📚_History.py")
# Show failed tasks if any
if batch_result["errors"]:
st.markdown("---")
st.markdown(f"#### {tr('batch.failed_list')}")
for item in batch_result["errors"]:
with st.expander(f"🔴 {tr('batch.task')} {item['index']}: {item['topic']}", expanded=False):
st.error(f"**{tr('batch.error')}**: {item['error']}")
# Detailed error (collapsed)
with st.expander(tr("batch.error_detail")):
st.code(item['traceback'], language="python")

View File

@@ -320,7 +320,51 @@
"history.action.delete_confirm": "Confirm deletion? This action cannot be undone!",
"history.action.delete_success": "✅ Task deleted",
"history.action.delete_failed": "❌ Deletion failed: {error}",
"history.page_info": "Page {page} / {total_pages}"
"history.page_info": "Page {page} / {total_pages}",
"batch.mode_label": "🔢 Batch Generation Mode",
"batch.mode_help": "Generate multiple videos, one topic per line",
"batch.section_title": "Batch Topics Input",
"batch.section_generation": "📦 Batch Video Generation",
"batch.rules_title": "Batch Generation Rules",
"batch.rule_1": "Automatically use 'AI Generate Content' mode",
"batch.rule_2": "Enter one topic per line",
"batch.rule_3": "All videos use the same configuration (TTS, template, workflow, etc.)",
"batch.topics_label": "Batch Topics (one per line)",
"batch.topics_placeholder": "Why develop a reading habit\nHow to manage time efficiently\n5 secrets to healthy living\nBenefits of waking up early\nHow to overcome procrastination\nTechniques to stay focused\nEmotional management methods\nTips to improve memory\nBuilding good relationships\nWealth management basics",
"batch.topics_help": "One video topic per line, AI will generate content based on the topic",
"batch.count_success": "✅ Detected {count} topics",
"batch.count_error": "❌ Batch size exceeds limit (max 100), current: {count}",
"batch.preview_title": "📋 Preview Topic List",
"batch.title_prefix_label": "Title Prefix (optional)",
"batch.title_prefix_placeholder": "e.g., Knowledge Sharing",
"batch.title_prefix_help": "Final title format: {prefix} - {topic}, e.g., Knowledge Sharing - Why develop a reading habit",
"batch.n_scenes_label": "Scenes (unified for all videos)",
"batch.n_scenes_help": "Number of scenes per video, same setting for all videos",
"batch.n_scenes_caption": "Scenes: {n}",
"batch.config_info": "Other configurations: TTS voice, video template, image workflow, etc. will use the settings from the right panel, unified for all videos",
"batch.no_topics": "⚠️ Please enter batch topics on the left (one per line)",
"batch.prepare_info": "📊 Ready to generate {count} videos (using same configuration)",
"batch.estimated_time": "⏱️ Estimated time: about {minutes} minutes",
"batch.generate_button": "🚀 Batch Generate {count} Videos",
"batch.generate_help": "⚠️ Please keep the page open during batch generation, do not close the browser",
"batch.overall_progress": "Overall Progress",
"batch.current_task": "Current Task",
"batch.completed": "Batch generation completed!",
"batch.results_title": "📊 Batch Generation Results",
"batch.total": "Total",
"batch.success": "Success",
"batch.failed": "Failed",
"batch.total_time": "Total Time",
"batch.minutes": "m",
"batch.seconds": "s",
"batch.success_message": "✅ Batch generation completed! All videos have been saved to history.",
"batch.view_in_history": "💡 Tip: You can view all generated videos in the '📚 History' page.",
"batch.goto_history": "Go to History Page",
"batch.failed_list": "❌ Failed Tasks",
"batch.task": "Task",
"batch.error": "Error",
"batch.error_detail": "View detailed error stack"
}
}

View File

@@ -320,7 +320,51 @@
"history.action.delete_confirm": "确认删除该任务?此操作无法撤销!",
"history.action.delete_success": "✅ 任务已删除",
"history.action.delete_failed": "❌ 删除失败:{error}",
"history.page_info": "第 {page} 页 / 共 {total_pages} 页"
"history.page_info": "第 {page} 页 / 共 {total_pages} 页",
"batch.mode_label": "🔢 批量生成模式",
"batch.mode_help": "批量生成多个视频,每行一个主题",
"batch.section_title": "批量主题输入",
"batch.section_generation": "📦 批量视频生成",
"batch.rules_title": "批量生成规则",
"batch.rule_1": "自动使用「AI 生成内容」模式",
"batch.rule_2": "每行输入一个主题",
"batch.rule_3": "所有视频使用相同的配置TTS、模板、工作流等",
"batch.topics_label": "批量主题(每行一个)",
"batch.topics_placeholder": "为什么要养成阅读习惯\n如何高效管理时间\n健康生活的5个秘诀\n早起的好处\n如何克服拖延症\n保持专注的技巧\n情绪管理的方法\n提升记忆力的窍门\n建立良好人际关系\n财富管理基础知识",
"batch.topics_help": "每行一个视频主题AI会根据主题自动生成文案",
"batch.count_success": "✅ 识别到 {count} 个主题",
"batch.count_error": "❌ 批量数量超过限制最多100个当前: {count}",
"batch.preview_title": "📋 预览主题列表",
"batch.title_prefix_label": "标题前缀(可选)",
"batch.title_prefix_placeholder": "例如:知识分享",
"batch.title_prefix_help": "最终标题格式:{标题前缀} - {主题},如:知识分享 - 为什么要养成阅读习惯",
"batch.n_scenes_label": "分镜数(所有视频统一)",
"batch.n_scenes_help": "每个视频的分镜数量,所有视频使用相同设置",
"batch.n_scenes_caption": "分镜数:{n}",
"batch.config_info": "其他配置TTS语音、视频模板、图像工作流等配置将使用右侧栏的设置所有视频统一",
"batch.no_topics": "⚠️ 请先在左侧输入批量主题(每行一个)",
"batch.prepare_info": "📊 准备生成 {count} 个视频(使用相同配置)",
"batch.estimated_time": "⏱️ 预估总耗时: 约 {minutes} 分钟",
"batch.generate_button": "🚀 批量生成 {count} 个视频",
"batch.generate_help": "⚠️ 批量生成期间请保持页面打开,不要关闭浏览器",
"batch.overall_progress": "整体进度",
"batch.current_task": "当前任务",
"batch.completed": "批量生成完成!",
"batch.results_title": "📊 批量生成结果",
"batch.total": "总数",
"batch.success": "成功",
"batch.failed": "失败",
"batch.total_time": "总耗时",
"batch.minutes": "分",
"batch.seconds": "秒",
"batch.success_message": "✅ 批量生成完成!所有视频已保存到历史记录。",
"batch.view_in_history": "💡 提示:可以在「📚 历史记录」页面查看所有生成的视频。",
"batch.goto_history": "前往历史记录页面",
"batch.failed_list": "❌ 失败的任务",
"batch.task": "任务",
"batch.error": "错误信息",
"batch.error_detail": "查看详细错误堆栈"
}
}

164
web/utils/batch_manager.py Normal file
View File

@@ -0,0 +1,164 @@
# Copyright (C) 2025 AIDC-AI
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Lightweight batch manager for Streamlit (Simplified YAGNI version)
"""
import time
import traceback
from typing import List, Dict, Any, Optional, Callable
from loguru import logger
class SimpleBatchManager:
"""
Ultra-simple batch manager following YAGNI principle
Design principles:
1. Only supports "AI generate content" mode
2. Same config for all videos, only topics differ
3. No CSV, no complex validation, just loop and execute
"""
def __init__(self):
self.results = []
self.errors = []
self.current_index = 0
self.total_count = 0
def execute_batch(
self,
pixelle_video,
topics: List[str],
shared_config: Dict[str, Any],
overall_progress_callback: Optional[Callable] = None,
task_progress_callback_factory: Optional[Callable] = None
) -> Dict[str, Any]:
"""
Execute batch generation with shared config
Args:
pixelle_video: PixelleVideoCore instance
topics: List of topics (one per video)
shared_config: Shared configuration for all videos
overall_progress_callback: Callback for overall progress
task_progress_callback_factory: Factory function to create per-task callback
Returns:
{
"results": [...],
"errors": [...],
"total_count": N,
"success_count": M,
"failed_count": K
}
"""
self.results = []
self.errors = []
self.total_count = len(topics)
logger.info(f"Starting batch generation: {self.total_count} topics")
for idx, topic in enumerate(topics, 1):
self.current_index = idx
# Report overall progress
if overall_progress_callback:
overall_progress_callback(
current=idx,
total=self.total_count,
topic=topic
)
try:
logger.info(f"Task {idx}/{self.total_count} started: {topic}")
# Extract title_prefix from shared_config (not a valid parameter for generate_video)
title_prefix = shared_config.get("title_prefix")
# Build task params (merge topic with shared config, excluding title_prefix)
task_params = {
"text": topic, # Topic as input
"mode": "generate", # Fixed mode
}
# Merge shared config, excluding title_prefix
for key, value in shared_config.items():
if key != "title_prefix":
task_params[key] = value
# Generate title using title_prefix
if title_prefix:
task_params["title"] = f"{title_prefix} - {topic}"
else:
# Use topic as title
task_params["title"] = topic
# Add per-task progress callback
if task_progress_callback_factory:
task_params["progress_callback"] = task_progress_callback_factory(idx, topic)
# Execute generation
from web.utils.async_helpers import run_async
result = run_async(pixelle_video.generate_video(**task_params))
# Extract task_id from video_path (e.g., output/20251118_173821_f96a/final.mp4)
from pathlib import Path
task_id = Path(result.video_path).parent.name
# Record success
self.results.append({
"index": idx,
"topic": topic,
"task_id": task_id,
"video_path": result.video_path,
"status": "success"
})
logger.info(f"Task {idx}/{self.total_count} completed: {result.video_path}")
except Exception as e:
# Record error but continue
error_msg = str(e)
error_trace = traceback.format_exc()
logger.error(f"Task {idx}/{self.total_count} failed: {error_msg}")
logger.debug(f"Error traceback:\n{error_trace}")
self.errors.append({
"index": idx,
"topic": topic,
"error": error_msg,
"traceback": error_trace,
"status": "failed"
})
# Continue to next task
continue
success_count = len(self.results)
failed_count = len(self.errors)
logger.info(
f"Batch generation completed: "
f"{success_count}/{self.total_count} succeeded, "
f"{failed_count} failed"
)
return {
"results": self.results,
"errors": self.errors,
"total_count": self.total_count,
"success_count": success_count,
"failed_count": failed_count
}