支持批量生成功能

This commit is contained in:
puke
2025-11-18 17:51:52 +08:00
parent dfa2f76c5b
commit 2ac03b4b48
5 changed files with 577 additions and 45 deletions

View File

@@ -21,60 +21,152 @@ from web.utils.async_helpers import get_project_version
def render_content_input():
"""Render content input section (left column)"""
"""Render content input section (left column) with batch support"""
with st.container(border=True):
st.markdown(f"**{tr('section.content_input')}**")
# Processing mode selection
mode = st.radio(
"Processing Mode",
["generate", "fixed"],
horizontal=True,
format_func=lambda x: tr(f"mode.{x}"),
label_visibility="collapsed"
# ====================================================================
# Step 1: Batch mode toggle (highest priority)
# ====================================================================
batch_mode = st.checkbox(
tr("batch.mode_label"),
value=False,
help=tr("batch.mode_help")
)
# Text input (unified for both modes)
text_placeholder = tr("input.topic_placeholder") if mode == "generate" else tr("input.content_placeholder")
text_height = 120 if mode == "generate" else 200
text_help = tr("input.text_help_generate") if mode == "generate" else tr("input.text_help_fixed")
if not batch_mode:
# ================================================================
# Single task mode (original logic, unchanged)
# ================================================================
# Processing mode selection
mode = st.radio(
"Processing Mode",
["generate", "fixed"],
horizontal=True,
format_func=lambda x: tr(f"mode.{x}"),
label_visibility="collapsed"
)
# Text input (unified for both modes)
text_placeholder = tr("input.topic_placeholder") if mode == "generate" else tr("input.content_placeholder")
text_height = 120 if mode == "generate" else 200
text_help = tr("input.text_help_generate") if mode == "generate" else tr("input.text_help_fixed")
text = st.text_area(
tr("input.text"),
placeholder=text_placeholder,
height=text_height,
help=text_help
)
# Title input (optional for both modes)
title = st.text_input(
tr("input.title"),
placeholder=tr("input.title_placeholder"),
help=tr("input.title_help")
)
# Number of scenes (only show in generate mode)
if mode == "generate":
n_scenes = st.slider(
tr("video.frames"),
min_value=3,
max_value=30,
value=5,
help=tr("video.frames_help"),
label_visibility="collapsed"
)
st.caption(tr("video.frames_label", n=n_scenes))
else:
# Fixed mode: n_scenes is ignored, set default value
n_scenes = 5
st.info(tr("video.frames_fixed_mode_hint"))
return {
"batch_mode": False,
"mode": mode,
"text": text,
"title": title,
"n_scenes": n_scenes
}
text = st.text_area(
tr("input.text"),
placeholder=text_placeholder,
height=text_height,
help=text_help
)
# Title input (optional for both modes)
title = st.text_input(
tr("input.title"),
placeholder=tr("input.title_placeholder"),
help=tr("input.title_help")
)
# Number of scenes (only show in generate mode)
if mode == "generate":
else:
# ================================================================
# Batch mode (simplified YAGNI version)
# ================================================================
st.markdown(f"**{tr('batch.section_title')}**")
# Batch rules info
st.info(f"""
**{tr('batch.rules_title')}**
- ✅ {tr('batch.rule_1')}
- ✅ {tr('batch.rule_2')}
- ✅ {tr('batch.rule_3')}
""")
# Batch topics input
text_input = st.text_area(
tr("batch.topics_label"),
height=300,
placeholder=tr("batch.topics_placeholder"),
help=tr("batch.topics_help")
)
# Split topics by newline
if text_input:
# Simple split by newline, filter empty lines
topics = [
line.strip()
for line in text_input.strip().split('\n')
if line.strip()
]
if topics:
# Check count limit
if len(topics) > 100:
st.error(tr("batch.count_error", count=len(topics)))
topics = []
else:
st.success(tr("batch.count_success", count=len(topics)))
# Preview topics list
with st.expander(tr("batch.preview_title"), expanded=False):
for i, topic in enumerate(topics, 1):
st.markdown(f"`{i}.` {topic}")
else:
topics = []
else:
topics = []
st.markdown("---")
# Title prefix (optional)
title_prefix = st.text_input(
tr("batch.title_prefix_label"),
placeholder=tr("batch.title_prefix_placeholder"),
help=tr("batch.title_prefix_help")
)
# Number of scenes (unified for all videos)
n_scenes = st.slider(
tr("video.frames"),
tr("batch.n_scenes_label"),
min_value=3,
max_value=30,
value=5,
help=tr("video.frames_help"),
label_visibility="collapsed"
help=tr("batch.n_scenes_help")
)
st.caption(tr("video.frames_label", n=n_scenes))
else:
# Fixed mode: n_scenes is ignored, set default value
n_scenes = 5
st.info(tr("video.frames_fixed_mode_hint"))
return {
"mode": mode,
"text": text,
"title": title,
"n_scenes": n_scenes
}
st.caption(tr("batch.n_scenes_caption", n=n_scenes))
# Config info
st.info(f"📌 {tr('batch.config_info')}")
return {
"batch_mode": True,
"topics": topics,
"mode": "generate", # Fixed to AI generate content
"title_prefix": title_prefix,
"n_scenes": n_scenes,
}
def render_bgm_section():

View File

@@ -29,6 +29,19 @@ from pixelle_video.config import config_manager
def render_output_preview(pixelle_video, video_params):
"""Render output preview section (right column)"""
# Check if batch mode
is_batch = video_params.get("batch_mode", False)
if is_batch:
# Batch generation mode
render_batch_output(pixelle_video, video_params)
else:
# Single video generation mode (original logic)
render_single_output(pixelle_video, video_params)
def render_single_output(pixelle_video, video_params):
"""Render single video generation output (original logic, unchanged)"""
# Extract parameters from video_params dict
text = video_params.get("text", "")
mode = video_params.get("mode", "generate")
@@ -193,4 +206,179 @@ def render_output_preview(pixelle_video, video_params):
st.error(tr("status.error", error=str(e)))
logger.exception(e)
st.stop()
def render_batch_output(pixelle_video, video_params):
"""Render batch generation output (minimal, redirect to History)"""
topics = video_params.get("topics", [])
with st.container(border=True):
st.markdown(f"**{tr('batch.section_generation')}**")
# Check if topics are provided
if not topics:
st.warning(tr("batch.no_topics"))
return
# Check system configuration
if not config_manager.validate():
st.warning(tr("settings.not_configured"))
return
batch_count = len(topics)
# Display batch info
st.info(tr("batch.prepare_info", count=batch_count))
# Estimated time (optional)
estimated_minutes = batch_count * 3 # Assume 3 minutes per video
st.caption(tr("batch.estimated_time", minutes=estimated_minutes))
# Generate button with batch semantics
if st.button(
tr("batch.generate_button", count=batch_count),
type="primary",
use_container_width=True,
help=tr("batch.generate_help")
):
# Prepare shared config
shared_config = {
"title_prefix": video_params.get("title_prefix"),
"n_scenes": video_params.get("n_scenes", 5),
"image_workflow": video_params.get("image_workflow"),
"frame_template": video_params.get("frame_template"),
"prompt_prefix": video_params.get("prompt_prefix", ""),
"bgm_path": video_params.get("bgm_path"),
"bgm_volume": video_params.get("bgm_volume", 0.2),
"tts_inference_mode": video_params.get("tts_inference_mode", "local"),
}
# Add TTS parameters
if shared_config["tts_inference_mode"] == "local":
shared_config["tts_voice"] = video_params.get("tts_voice")
shared_config["tts_speed"] = video_params.get("tts_speed")
else:
shared_config["tts_workflow"] = video_params.get("tts_workflow")
if video_params.get("ref_audio"):
shared_config["ref_audio"] = str(video_params["ref_audio"])
# Add template parameters
if video_params.get("template_params"):
shared_config["template_params"] = video_params["template_params"]
# UI containers
overall_progress_container = st.container()
current_task_container = st.container()
# Overall progress UI
overall_progress_bar = overall_progress_container.progress(0)
overall_status = overall_progress_container.empty()
# Current task progress UI
current_task_title = current_task_container.empty()
current_task_progress = current_task_container.progress(0)
current_task_status = current_task_container.empty()
# Overall progress callback
def update_overall_progress(current, total, topic):
progress = (current - 1) / total
overall_progress_bar.progress(progress)
overall_status.markdown(
f"📊 **{tr('batch.overall_progress')}**: {current}/{total} ({int(progress * 100)}%)"
)
# Single task progress callback factory
def make_task_progress_callback(task_idx, topic):
def callback(event: ProgressEvent):
# Display current task title
current_task_title.markdown(f"🎬 **{tr('batch.current_task')} {task_idx}**: {topic}")
# Update task detailed progress
if event.event_type == "frame_step":
action_key = f"progress.step_{event.action}"
action_text = tr(action_key)
message = tr(
"progress.frame_step",
current=event.frame_current,
total=event.frame_total,
step=event.step,
action=action_text
)
elif event.event_type == "processing_frame":
message = tr(
"progress.frame",
current=event.frame_current,
total=event.frame_total
)
else:
message = tr(f"progress.{event.event_type}")
current_task_progress.progress(event.progress)
current_task_status.text(message)
return callback
# Execute batch generation
from web.utils.batch_manager import SimpleBatchManager
import time
batch_manager = SimpleBatchManager()
start_time = time.time()
batch_result = batch_manager.execute_batch(
pixelle_video=pixelle_video,
topics=topics,
shared_config=shared_config,
overall_progress_callback=update_overall_progress,
task_progress_callback_factory=make_task_progress_callback
)
total_time = time.time() - start_time
# Clear progress displays
overall_progress_bar.progress(1.0)
overall_status.markdown(f"✅ **{tr('batch.completed')}**")
current_task_title.empty()
current_task_progress.empty()
current_task_status.empty()
# Display results summary
st.markdown("---")
st.markdown(f"**{tr('batch.results_title')}**")
col1, col2, col3 = st.columns(3)
col1.metric(tr("batch.total"), batch_result["total_count"])
col2.metric(f"{tr('batch.success')}", batch_result["success_count"])
col3.metric(f"{tr('batch.failed')}", batch_result["failed_count"])
# Display total time
minutes = int(total_time / 60)
seconds = int(total_time % 60)
st.caption(f"⏱️ {tr('batch.total_time')}: {minutes}{tr('batch.minutes')}{seconds}{tr('batch.seconds')}")
# Redirect to History page
st.markdown("---")
st.success(tr("batch.success_message"))
st.info(tr("batch.view_in_history"))
# Button to go to History page
if st.button(
f"📚 {tr('batch.goto_history')}",
type="secondary",
use_container_width=True
):
st.switch_page("pages/2_📚_History.py")
# Show failed tasks if any
if batch_result["errors"]:
st.markdown("---")
st.markdown(f"#### {tr('batch.failed_list')}")
for item in batch_result["errors"]:
with st.expander(f"🔴 {tr('batch.task')} {item['index']}: {item['topic']}", expanded=False):
st.error(f"**{tr('batch.error')}**: {item['error']}")
# Detailed error (collapsed)
with st.expander(tr("batch.error_detail")):
st.code(item['traceback'], language="python")