Files
AI-Video/web/components/output_preview.py
2025-11-17 20:44:17 +08:00

197 lines
8.3 KiB
Python

# Copyright (C) 2025 AIDC-AI
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Output preview components for web UI (right column)
"""
import base64
import os
from pathlib import Path
import streamlit as st
from loguru import logger
from web.i18n import tr, get_language
from web.utils.async_helpers import run_async
from pixelle_video.models.progress import ProgressEvent
from pixelle_video.config import config_manager
def render_output_preview(pixelle_video, video_params):
"""Render output preview section (right column)"""
# Extract parameters from video_params dict
text = video_params.get("text", "")
mode = video_params.get("mode", "generate")
title = video_params.get("title")
n_scenes = video_params.get("n_scenes", 5)
bgm_path = video_params.get("bgm_path")
bgm_volume = video_params.get("bgm_volume", 0.2)
tts_mode = video_params.get("tts_inference_mode", "local")
selected_voice = video_params.get("tts_voice")
tts_speed = video_params.get("tts_speed")
tts_workflow_key = video_params.get("tts_workflow")
ref_audio_path = video_params.get("ref_audio")
frame_template = video_params.get("frame_template")
custom_values_for_video = video_params.get("template_params", {})
workflow_key = video_params.get("image_workflow")
prompt_prefix = video_params.get("prompt_prefix", "")
with st.container(border=True):
st.markdown(f"**{tr('section.video_generation')}**")
# Check if system is configured
if not config_manager.validate():
st.warning(tr("settings.not_configured"))
# Generate Button
if st.button(tr("btn.generate"), type="primary", use_container_width=True):
# Validate system configuration
if not config_manager.validate():
st.error(tr("settings.not_configured"))
st.stop()
# Validate input
if not text:
st.error(tr("error.input_required"))
st.stop()
# Show progress
progress_bar = st.progress(0)
status_text = st.empty()
# Record start time for generation
import time
start_time = time.time()
try:
# Progress callback to update UI
def update_progress(event: ProgressEvent):
"""Update progress bar and status text from ProgressEvent"""
# Translate event to user-facing message
if event.event_type == "frame_step":
# Frame step: "分镜 3/5 - 步骤 2/4: 生成插图"
action_key = f"progress.step_{event.action}"
action_text = tr(action_key)
message = tr(
"progress.frame_step",
current=event.frame_current,
total=event.frame_total,
step=event.step,
action=action_text
)
elif event.event_type == "processing_frame":
# Processing frame: "分镜 3/5"
message = tr(
"progress.frame",
current=event.frame_current,
total=event.frame_total
)
else:
# Simple events: use i18n key directly
message = tr(f"progress.{event.event_type}")
# Append extra_info if available (e.g., batch progress)
if event.extra_info:
message = f"{message} - {event.extra_info}"
status_text.text(message)
progress_bar.progress(min(int(event.progress * 100), 99)) # Cap at 99% until complete
# Generate video (directly pass parameters)
# Note: image_width and image_height are now auto-determined from template
video_params = {
"text": text,
"mode": mode,
"title": title if title else None,
"n_scenes": n_scenes,
"image_workflow": workflow_key,
"frame_template": frame_template,
"prompt_prefix": prompt_prefix,
"bgm_path": bgm_path,
"bgm_volume": bgm_volume if bgm_path else 0.2,
"progress_callback": update_progress,
}
# Add TTS parameters based on mode
video_params["tts_inference_mode"] = tts_mode
if tts_mode == "local":
video_params["tts_voice"] = selected_voice
video_params["tts_speed"] = tts_speed
else: # comfyui
video_params["tts_workflow"] = tts_workflow_key
if ref_audio_path:
video_params["ref_audio"] = str(ref_audio_path)
# Add custom template parameters if any
if custom_values_for_video:
video_params["template_params"] = custom_values_for_video
result = run_async(pixelle_video.generate_video(**video_params))
# Calculate total generation time
total_generation_time = time.time() - start_time
progress_bar.progress(100)
status_text.text(tr("status.success"))
# Display success message
st.success(tr("status.video_generated", path=result.video_path))
st.markdown("---")
# Video information (compact display)
file_size_mb = result.file_size / (1024 * 1024)
# Parse video size from template path
from pixelle_video.utils.template_util import parse_template_size, resolve_template_path
template_path = resolve_template_path(result.storyboard.config.frame_template)
video_width, video_height = parse_template_size(template_path)
info_text = (
f"⏱️ {tr('info.generation_time')} {total_generation_time:.1f}s "
f"📦 {file_size_mb:.2f}MB "
f"🎬 {len(result.storyboard.frames)}{tr('info.scenes_unit')} "
f"📐 {video_width}x{video_height}"
)
st.caption(info_text)
st.markdown("---")
# Video preview
if os.path.exists(result.video_path):
st.video(result.video_path)
# Download button
with open(result.video_path, "rb") as video_file:
video_bytes = video_file.read()
video_filename = os.path.basename(result.video_path)
st.download_button(
label="⬇️ 下载视频" if get_language() == "zh_CN" else "⬇️ Download Video",
data=video_bytes,
file_name=video_filename,
mime="video/mp4",
use_container_width=True
)
else:
st.error(tr("status.video_not_found", path=result.video_path))
except Exception as e:
status_text.text("")
progress_bar.empty()
st.error(tr("status.error", error=str(e)))
logger.exception(e)
st.stop()