支持视频生成时插图没必填,大幅提升视频生成速度

This commit is contained in:
puke
2025-11-07 14:09:32 +08:00
parent 514dbfaa1b
commit 8d5c578958
11 changed files with 674 additions and 330 deletions

View File

@@ -22,7 +22,16 @@ Yes, you can use traditional pip + venv approach.
### Q: Do I need to configure ComfyUI?
Not necessarily. You can use RunningHub cloud service without local deployment.
**Not necessarily** - it depends on your template choice:
| Template Type | ComfyUI | Best For | Speed |
|--------------|---------|----------|-------|
| Text-only<br/>(e.g., `simple.html`) | ❌ Not needed | Quotes, announcements, reading prompts | ⚡⚡⚡ Very fast |
| AI Images<br/>(e.g., `default.html`) | ✅ Required | Rich visual content | ⚡ Standard |
**Tip**: Beginners can start with text-only templates for instant zero-barrier experience!
**Alternative**: If you need AI images but don't want local ComfyUI, use RunningHub cloud service.
### Q: Which LLMs are supported?

View File

@@ -22,7 +22,16 @@ curl -LsSf https://astral.sh/uv/install.sh | sh
### Q: 必须要配置 ComfyUI 吗?
不一定。你可以使用 RunningHub 云端服务,无需本地部署。
**不一定**,取决于您选择的模板类型:
| 模板类型 | ComfyUI | 适用场景 | 生成速度 |
|---------|---------|---------|----------|
| 纯文本模板<br/>(如 `simple.html` | ❌ 不需要 | 文字金句、公告、阅读提示 | ⚡⚡⚡ 极快 |
| AI 配图模板<br/>(如 `default.html` | ✅ 需要 | 图文并茂的丰富内容 | ⚡ 标准 |
**推荐**:新手可以从纯文本模板开始,零门槛快速体验!
**其他选项**:如果需要 AI 配图但不想本地部署 ComfyUI可以使用 RunningHub 云端服务。
### Q: 支持哪些 LLM

View File

@@ -32,10 +32,24 @@ class CustomPipeline(BasePipeline):
You can customize:
- Content processing logic
- Narration generation strategy
- Image prompt generation
- Image prompt generation (conditional based on template)
- Frame composition
- Video assembly
KEY OPTIMIZATION: Conditional Image Generation
-----------------------------------------------
This pipeline supports automatic detection of template image requirements.
If your template doesn't use {{image}}, the entire image generation pipeline
can be skipped, providing:
⚡ Faster generation (no image API calls)
💰 Lower cost (no LLM calls for image prompts)
🚀 Reduced dependencies (no ComfyUI needed for text-only videos)
Usage patterns:
1. Text-only videos: Use templates/1080x1920/simple.html
2. AI-generated images: Use templates with {{image}} placeholder
3. Custom logic: Modify template or override the detection logic in your subclass
Example usage:
# 1. Create your own pipeline by copying this file
# 2. Modify the __call__ method with your custom logic
@@ -90,6 +104,11 @@ class CustomPipeline(BasePipeline):
Returns:
VideoGenerationResult
Image Generation Logic:
- If template has {{image}} → automatically generates images
- If template has no {{image}} → skips image generation (faster, cheaper)
- To customize: Override the template_requires_image logic in your subclass
"""
logger.info("Starting CustomPipeline")
logger.info(f"Input text length: {len(text)} chars")
@@ -114,6 +133,22 @@ class CustomPipeline(BasePipeline):
user_specified_output = output_path
output_path = get_task_final_video_path(task_id)
# ========== Step 0.5: Check template requirements ==========
# Detect if template requires {{image}} parameter
# This allows skipping the entire image generation pipeline for text-only templates
from pixelle_video.services.frame_html import HTMLFrameGenerator
from pixelle_video.utils.template_util import resolve_template_path
template_path = resolve_template_path(frame_template)
generator = HTMLFrameGenerator(template_path)
template_requires_image = generator.requires_image()
if template_requires_image:
logger.info(f"📸 Template requires image generation")
else:
logger.info(f"⚡ Template does not require images - skipping image generation pipeline")
logger.info(f" 💡 Benefits: Faster generation + Lower cost + No ComfyUI dependency")
# ========== Step 1: Process content (CUSTOMIZE THIS) ==========
self._report_progress(progress_callback, "processing_content", 0.10)
@@ -138,29 +173,37 @@ class CustomPipeline(BasePipeline):
logger.info(f"Generated {len(narrations)} narrations")
# ========== Step 2: Generate image prompts (CUSTOMIZE THIS) ==========
# ========== Step 2: Generate image prompts (CONDITIONAL - CUSTOMIZE THIS) ==========
self._report_progress(progress_callback, "generating_image_prompts", 0.25)
# Example: Generate image prompts using LLM
from pixelle_video.utils.content_generators import generate_image_prompts
# IMPORTANT: Check if template actually needs images
# If your template doesn't use {{image}}, you can skip this entire step!
if template_requires_image:
# Template requires images - generate image prompts using LLM
from pixelle_video.utils.content_generators import generate_image_prompts
image_prompts = await generate_image_prompts(
self.llm,
narrations=narrations,
min_words=30,
max_words=60
)
image_prompts = await generate_image_prompts(
self.llm,
narrations=narrations,
min_words=30,
max_words=60
)
# Example: Apply custom prompt prefix
from pixelle_video.utils.prompt_helper import build_image_prompt
custom_prefix = "cinematic style, professional lighting" # Customize this
# Example: Apply custom prompt prefix
from pixelle_video.utils.prompt_helper import build_image_prompt
custom_prefix = "cinematic style, professional lighting" # Customize this
final_image_prompts = []
for base_prompt in image_prompts:
final_prompt = build_image_prompt(base_prompt, custom_prefix)
final_image_prompts.append(final_prompt)
final_image_prompts = []
for base_prompt in image_prompts:
final_prompt = build_image_prompt(base_prompt, custom_prefix)
final_image_prompts.append(final_prompt)
logger.info(f"Generated {len(final_image_prompts)} image prompts")
logger.info(f"Generated {len(final_image_prompts)} image prompts")
else:
# Template doesn't need images - skip image generation entirely
final_image_prompts = [None] * len(narrations)
logger.info(f"⚡ Skipped image prompt generation (template doesn't need images)")
logger.info(f" 💡 Savings: {len(narrations)} LLM calls + {len(narrations)} image generations")
# ========== Step 3: Create storyboard ==========
config = StoryboardConfig(
@@ -317,8 +360,8 @@ class CustomPipeline(BasePipeline):
# ==================== Usage Examples ====================
"""
Example 1: Register and use custom pipeline
----------------------------------------
Example 1: Text-only video (no AI image generation)
---------------------------------------------------
from pixelle_video import pixelle_video
from pixelle_video.pipelines.custom import CustomPipeline
@@ -328,15 +371,27 @@ await pixelle_video.initialize()
# Register custom pipeline
pixelle_video.pipelines["my_custom"] = CustomPipeline(pixelle_video)
# Use it
# Use text-only template - no image generation!
result = await pixelle_video.generate_video(
text="Your input content here",
text="Your content here",
pipeline="my_custom",
custom_param_example="custom_value"
frame_template="1080x1920/simple.html" # Template without {{image}}
)
# Benefits: ⚡ Fast, 💰 Cheap, 🚀 No ComfyUI needed
Example 2: Create your own pipeline class
Example 2: AI-generated image video
---------------------------------------------------
# Use template with {{image}} - automatic image generation
result = await pixelle_video.generate_video(
text="Your content here",
pipeline="my_custom",
frame_template="1080x1920/default.html" # Template with {{image}}
)
# Will automatically generate images via LLM + ComfyUI
Example 3: Create your own pipeline class
----------------------------------------
from pixelle_video.pipelines.custom import CustomPipeline
@@ -351,7 +406,7 @@ class MySpecialPipeline(CustomPipeline):
return result
Example 3: Inline custom pipeline
Example 4: Inline custom pipeline
----------------------------------------
from pixelle_video.pipelines.base import BasePipeline

View File

@@ -250,6 +250,14 @@ class StandardPipeline(BasePipeline):
created_at=datetime.now()
)
# ========== Step 0.8: Check template requirements ==========
template_requires_image = self._check_template_requires_image(config.frame_template)
if template_requires_image:
logger.info(f"📸 Template requires image generation")
else:
logger.info(f"⚡ Template does not require images - skipping image generation pipeline")
logger.info(f" 💡 Benefits: Faster generation + Lower cost + No ComfyUI dependency")
try:
# ========== Step 1: Generate/Split narrations ==========
if mode == "generate":
@@ -268,54 +276,61 @@ class StandardPipeline(BasePipeline):
logger.info(f"✅ Split script into {len(narrations)} segments (by lines)")
logger.info(f" Note: n_scenes={n_scenes} is ignored in fixed mode")
# ========== Step 2: Generate image prompts ==========
self._report_progress(progress_callback, "generating_image_prompts", 0.15)
# ========== Step 2: Generate image prompts (conditional) ==========
if template_requires_image:
self._report_progress(progress_callback, "generating_image_prompts", 0.15)
# Override prompt_prefix if provided
original_prefix = None
if prompt_prefix is not None:
image_config = self.core.config.get("comfyui", {}).get("image", {})
original_prefix = image_config.get("prompt_prefix")
image_config["prompt_prefix"] = prompt_prefix
logger.info(f"Using custom prompt_prefix: '{prompt_prefix}'")
# Override prompt_prefix if provided
original_prefix = None
if prompt_prefix is not None:
image_config = self.core.config.get("comfyui", {}).get("image", {})
original_prefix = image_config.get("prompt_prefix")
image_config["prompt_prefix"] = prompt_prefix
logger.info(f"Using custom prompt_prefix: '{prompt_prefix}'")
try:
# Create progress callback wrapper for image prompt generation
def image_prompt_progress(completed: int, total: int, message: str):
batch_progress = completed / total if total > 0 else 0
overall_progress = 0.15 + (batch_progress * 0.15)
self._report_progress(
progress_callback,
"generating_image_prompts",
overall_progress,
extra_info=message
try:
# Create progress callback wrapper for image prompt generation
def image_prompt_progress(completed: int, total: int, message: str):
batch_progress = completed / total if total > 0 else 0
overall_progress = 0.15 + (batch_progress * 0.15)
self._report_progress(
progress_callback,
"generating_image_prompts",
overall_progress,
extra_info=message
)
# Generate base image prompts
base_image_prompts = await generate_image_prompts(
self.llm,
narrations=narrations,
min_words=min_image_prompt_words,
max_words=max_image_prompt_words,
progress_callback=image_prompt_progress
)
# Generate base image prompts
base_image_prompts = await generate_image_prompts(
self.llm,
narrations=narrations,
min_words=min_image_prompt_words,
max_words=max_image_prompt_words,
progress_callback=image_prompt_progress
)
# Apply prompt prefix
from pixelle_video.utils.prompt_helper import build_image_prompt
image_config = self.core.config.get("comfyui", {}).get("image", {})
prompt_prefix_to_use = prompt_prefix if prompt_prefix is not None else image_config.get("prompt_prefix", "")
# Apply prompt prefix
from pixelle_video.utils.prompt_helper import build_image_prompt
image_config = self.core.config.get("comfyui", {}).get("image", {})
prompt_prefix_to_use = prompt_prefix if prompt_prefix is not None else image_config.get("prompt_prefix", "")
image_prompts = []
for base_prompt in base_image_prompts:
final_prompt = build_image_prompt(base_prompt, prompt_prefix_to_use)
image_prompts.append(final_prompt)
image_prompts = []
for base_prompt in base_image_prompts:
final_prompt = build_image_prompt(base_prompt, prompt_prefix_to_use)
image_prompts.append(final_prompt)
finally:
# Restore original prompt_prefix
if original_prefix is not None:
image_config["prompt_prefix"] = original_prefix
finally:
# Restore original prompt_prefix
if original_prefix is not None:
image_config["prompt_prefix"] = original_prefix
logger.info(f"✅ Generated {len(image_prompts)} image prompts")
logger.info(f"✅ Generated {len(image_prompts)} image prompts")
else:
# Skip image prompt generation
image_prompts = [None] * len(narrations)
self._report_progress(progress_callback, "preparing_frames", 0.15)
logger.info(f"⚡ Skipped image prompt generation (template doesn't need images)")
logger.info(f" 💡 Savings: {len(narrations)} LLM calls + {len(narrations)} image generations")
# ========== Step 3: Create frames ==========
for i, (narration, image_prompt) in enumerate(zip(narrations, image_prompts)):
@@ -419,3 +434,29 @@ class StandardPipeline(BasePipeline):
logger.error(f"❌ Video generation failed: {e}")
raise
def _check_template_requires_image(self, frame_template: str) -> bool:
"""
Check if template requires image generation
This is checked at pipeline level to avoid unnecessary:
- LLM calls (generating image_prompts)
- Image generation API calls
- ComfyUI dependency
Args:
frame_template: Template path (e.g., "1080x1920/default.html")
Returns:
True if template contains {{image}}, False otherwise
"""
from pixelle_video.services.frame_html import HTMLFrameGenerator
from pixelle_video.utils.template_util import resolve_template_path
template_path = resolve_template_path(frame_template)
generator = HTMLFrameGenerator(template_path)
requires = generator.requires_image()
logger.debug(f"Template '{frame_template}' requires_image={requires}")
return requires

View File

@@ -1,99 +0,0 @@
# Prompts Directory
Centralized prompt management for all LLM interactions in Pixelle-Video.
## Structure
Each prompt is in its own file for easy maintenance and modification:
```
prompts/
├── __init__.py # Exports all builder functions
├── topic_narration.py # Generate narrations from topic
├── content_narration.py # Extract/refine narrations from content
├── script_split.py # Split fixed script into segments
├── title_generation.py # Generate video title from content
├── image_generation.py # Generate image prompts from narrations
└── style_conversion.py # Convert style description to image prompt
```
## Usage
All builder functions are exported from the package root:
```python
from pixelle_video.prompts import (
build_topic_narration_prompt,
build_content_narration_prompt,
build_script_split_prompt,
build_title_generation_prompt,
build_image_prompt_prompt,
build_style_conversion_prompt,
)
```
## Prompt Files
### Narration Prompts
1. **topic_narration.py**
- Purpose: Generate engaging narrations from a topic/theme
- Input: topic, n_storyboard, min_words, max_words
- Output: JSON with narrations array
2. **content_narration.py**
- Purpose: Extract and refine narrations from user content
- Input: content, n_storyboard, min_words, max_words
- Output: JSON with narrations array
3. **script_split.py**
- Purpose: Split fixed script into natural segments (no modification)
- Input: script, min_words (reference), max_words (reference)
- Output: JSON with narrations array
4. **title_generation.py**
- Purpose: Generate short, attractive video title
- Input: content, max_length
- Output: Plain text title
### Image Prompts
5. **image_generation.py**
- Purpose: Generate English image prompts from narrations
- Input: narrations, min_words, max_words, style_preset/style_description
- Output: JSON with image_prompts array
- Contains: IMAGE_STYLE_PRESETS dictionary
6. **style_conversion.py**
- Purpose: Convert custom style description to English image prompt
- Input: description (any language)
- Output: Plain text English image prompt
## Modifying Prompts
To modify a prompt:
1. Locate the relevant file (e.g., `topic_narration.py`)
2. Edit the prompt constant (e.g., `TOPIC_NARRATION_PROMPT`)
3. Changes take effect immediately (no need to modify service code)
## Adding New Prompts
To add a new prompt:
1. Create a new file (e.g., `my_new_prompt.py`)
2. Define the prompt constant and builder function
3. Export the builder function in `__init__.py`
4. Use it in service code:
```python
from pixelle_video.prompts import build_my_new_prompt
```
## Design Principles
- **One File, One Prompt**: Each prompt has its own file for clarity
- **Builder Functions**: Each file exports a `build_*_prompt()` function
- **Centralized Exports**: All builders are exported from `__init__.py`
- **Consistent Format**: All prompts follow similar structure and style
- **Easy Maintenance**: Modify prompts without touching service code

View File

@@ -57,6 +57,22 @@ class HTMLFrameGenerator:
self._check_linux_dependencies()
logger.debug(f"Loaded HTML template: {template_path} (size: {self.width}x{self.height})")
def requires_image(self) -> bool:
"""
Detect if template requires {{image}} parameter
This method checks if the template uses the {{image}} variable.
If the template doesn't use images, the entire image generation
pipeline can be skipped, significantly improving:
- Generation speed (no image generation API calls)
- Cost efficiency (no LLM calls for image prompts)
- Dependency requirements (no ComfyUI needed)
Returns:
True if template contains {{image}}, False otherwise
"""
return '{{image}}' in self.template
def _check_linux_dependencies(self):
"""Check Linux system dependencies and warn if missing"""
if os.name != 'posix':
@@ -403,7 +419,7 @@ class HTMLFrameGenerator:
# Replace variables in HTML (supports DSL syntax: {{param:type=default}})
html = self._replace_parameters(self.template, context)
logger.info(f"html--->{html}")
logger.debug(f"html--->{html}")
# Use provided output path or auto-generate
if output_path is None:
# Fallback: auto-generate (for backward compatibility)

View File

@@ -56,6 +56,9 @@ class FrameProcessor:
frame_num = frame.index + 1
# Determine if this frame needs image generation
needs_image = frame.image_prompt is not None
try:
# Step 1: Generate audio (TTS)
if progress_callback:
@@ -69,23 +72,27 @@ class FrameProcessor:
))
await self._step_generate_audio(frame, config)
# Step 2: Generate image (ComfyKit)
if progress_callback:
progress_callback(ProgressEvent(
event_type="frame_step",
progress=0.25,
frame_current=frame_num,
frame_total=total_frames,
step=2,
action="image"
))
await self._step_generate_image(frame, config)
# Step 2: Generate image (conditional)
if needs_image:
if progress_callback:
progress_callback(ProgressEvent(
event_type="frame_step",
progress=0.25,
frame_current=frame_num,
frame_total=total_frames,
step=2,
action="image"
))
await self._step_generate_image(frame, config)
else:
frame.image_path = None
logger.debug(f" 2/4: Skipped image generation (not required by template)")
# Step 3: Compose frame (add subtitle)
if progress_callback:
progress_callback(ProgressEvent(
event_type="frame_step",
progress=0.50,
progress=0.50 if needs_image else 0.33,
frame_current=frame_num,
frame_total=total_frames,
step=3,
@@ -97,7 +104,7 @@ class FrameProcessor:
if progress_callback:
progress_callback(ProgressEvent(
event_type="frame_step",
progress=0.75,
progress=0.75 if needs_image else 0.67,
frame_current=frame_num,
frame_total=total_frames,
step=4,

View File

@@ -0,0 +1,278 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<style>
html {
margin: 0;
padding: 0;
}
body {
margin: 0;
padding: 0;
width: 1080px;
font-family: 'PingFang SC', 'Source Han Sans', 'Microsoft YaHei', sans-serif;
position: relative;
overflow: hidden;
}
/* Background image layer (customizable using <img> tag) */
.background-image {
position: absolute;
top: 0;
left: 0;
width: 100%;
height: 100%;
z-index: 0;
}
.background-image img {
width: 100%;
height: 100%;
object-fit: cover;
object-position: center;
}
/* Gradient overlay on top of background */
.gradient-overlay {
position: absolute;
top: 0;
left: 0;
width: 100%;
height: 100%;
background: linear-gradient(135deg, rgba(102, 126, 234, 0.5) 0%, rgba(118, 75, 162, 0.6) 100%);
z-index: 1;
}
.page-container {
width: 1080px;
height: 1920px;
padding: 120px 80px;
box-sizing: border-box;
display: flex;
flex-direction: column;
justify-content: center;
align-items: center;
gap: 80px;
position: relative;
z-index: 3;
}
/* Decorative background elements */
.bg-decoration {
position: absolute;
top: 0;
left: 0;
width: 100%;
height: 100%;
z-index: 2;
overflow: hidden;
pointer-events: none;
}
.circle {
position: absolute;
border-radius: 50%;
background: rgba(255, 255, 255, 0.1);
}
.circle-1 {
width: 400px;
height: 400px;
top: -150px;
right: -100px;
}
.circle-2 {
width: 300px;
height: 300px;
bottom: -100px;
left: -80px;
}
.circle-3 {
width: 200px;
height: 200px;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
opacity: 0.5;
}
/* Title section */
.video-title-wrapper {
position: relative;
max-width: 900px;
text-align: center;
}
.video-title {
font-size: 72px;
font-weight: 700;
color: #ffffff;
line-height: 1.3;
letter-spacing: 3px;
text-shadow: 0 4px 20px rgba(0, 0, 0, 0.3);
margin-bottom: 40px;
}
.title-underline {
width: 150px;
height: 4px;
background: rgba(255, 255, 255, 0.8);
margin: 0 auto;
border-radius: 2px;
}
/* Content section */
.content {
display: flex;
flex-direction: column;
gap: 60px;
max-width: 900px;
width: 100%;
position: relative;
background: rgba(255, 255, 255, 0.15);
backdrop-filter: blur(10px);
padding: 80px 60px;
border-radius: 20px;
box-shadow: 0 20px 60px rgba(0, 0, 0, 0.3);
}
.text-wrapper {
position: relative;
}
.text {
font-size: 48px;
color: #ffffff;
text-align: center;
line-height: 2.0;
font-weight: 500;
text-shadow: 0 2px 10px rgba(0, 0, 0, 0.2);
position: relative;
min-height: 288px;
display: flex;
align-items: center;
justify-content: center;
}
/* Quote marks */
.quote-mark {
position: absolute;
font-size: 120px;
font-family: Georgia, serif;
color: rgba(255, 255, 255, 0.3);
font-weight: bold;
line-height: 1;
}
.quote-mark.left {
top: -30px;
left: -20px;
}
.quote-mark.right {
bottom: -50px;
right: -20px;
}
/* Footer */
.footer {
display: flex;
align-items: center;
justify-content: space-between;
width: 100%;
padding-top: 40px;
border-top: 2px solid rgba(255, 255, 255, 0.3);
}
.author-section {
display: flex;
flex-direction: column;
gap: 8px;
}
.author {
font-size: 32px;
font-weight: 600;
color: #ffffff;
text-shadow: 0 2px 8px rgba(0, 0, 0, 0.2);
}
.author-desc {
font-size: 24px;
color: rgba(255, 255, 255, 0.9);
font-weight: 400;
}
.logo-section {
display: flex;
flex-direction: column;
align-items: flex-end;
gap: 10px;
}
.logo {
font-size: 28px;
font-weight: 600;
color: #ffffff;
letter-spacing: 2px;
text-shadow: 0 2px 8px rgba(0, 0, 0, 0.2);
}
.logo-subtitle {
font-size: 20px;
color: rgba(255, 255, 255, 0.8);
font-weight: 400;
}
</style>
</head>
<body>
<!-- Background image layer (customizable via background parameter) -->
<div class="background-image">
<img src="{{background=https://img.alicdn.com/imgextra/i2/O1CN01TngrfY1NTZK1xwuWd_!!6000000001571-0-tps-690-1494.jpg}}" alt="Background">
</div>
<!-- Gradient overlay -->
<div class="gradient-overlay"></div>
<!-- Background decorations -->
<div class="bg-decoration">
<div class="circle circle-1"></div>
<div class="circle circle-2"></div>
<div class="circle circle-3"></div>
</div>
<div class="page-container">
<!-- Video title -->
<div class="video-title-wrapper">
<div class="video-title">{{title}}</div>
<div class="title-underline"></div>
</div>
<!-- Content card -->
<div class="content">
<div class="text-wrapper">
<div class="quote-mark left">"</div>
<div class="text">{{text}}</div>
<div class="quote-mark right">"</div>
</div>
</div>
<!-- Footer -->
<div class="footer">
<div class="author-section">
<div class="author">{{author=@Pixelle.AI}}</div>
<div class="author-desc">{{describe=Open Source Omnimodal AI Creative Agent}}</div>
</div>
<div class="logo-section">
<div class="logo">{{brand=Pixelle-Video}}</div>
<div class="logo-subtitle">Text-Only Template</div>
</div>
</div>
</div>
</body>
</html>

View File

@@ -636,145 +636,6 @@ def main():
st.error(tr("tts.preview_failed", error=str(e)))
logger.exception(e)
# ====================================================================
# Image Generation Section
# ====================================================================
with st.container(border=True):
st.markdown(f"**{tr('section.image')}**")
# 1. ComfyUI Workflow selection
with st.expander(tr("help.feature_description"), expanded=False):
st.markdown(f"**{tr('help.what')}**")
st.markdown(tr("style.workflow_what"))
st.markdown(f"**{tr('help.how')}**")
st.markdown(tr("style.workflow_how"))
st.markdown(f"**{tr('help.note')}**")
st.markdown(tr("style.image_size_note"))
# Get available workflows from pixelle_video (with source info)
workflows = pixelle_video.image.list_workflows()
# Build options for selectbox
# Display: "image_flux.json - Runninghub"
# Value: "runninghub/image_flux.json"
workflow_options = [wf["display_name"] for wf in workflows]
workflow_keys = [wf["key"] for wf in workflows]
# Default to first option (should be runninghub by sorting)
default_workflow_index = 0
# If user has a saved preference in config, try to match it
comfyui_config = config_manager.get_comfyui_config()
saved_workflow = comfyui_config["image"]["default_workflow"]
if saved_workflow and saved_workflow in workflow_keys:
default_workflow_index = workflow_keys.index(saved_workflow)
workflow_display = st.selectbox(
"Workflow",
workflow_options if workflow_options else ["No workflows found"],
index=default_workflow_index,
label_visibility="collapsed",
key="image_workflow_select"
)
# Get the actual workflow key (e.g., "runninghub/image_flux.json")
if workflow_options:
workflow_selected_index = workflow_options.index(workflow_display)
workflow_key = workflow_keys[workflow_selected_index]
else:
workflow_key = "runninghub/image_flux.json" # fallback
# 2. Image size input
col1, col2 = st.columns(2)
with col1:
image_width = st.number_input(
tr('style.image_width'),
min_value=128,
value=1024,
step=1,
label_visibility="visible",
help=tr('style.image_width_help')
)
with col2:
image_height = st.number_input(
tr('style.image_height'),
min_value=128,
value=1024,
step=1,
label_visibility="visible",
help=tr('style.image_height_help')
)
# 3. Prompt prefix input
# Get current prompt_prefix from config
current_prefix = comfyui_config["image"]["prompt_prefix"]
# Prompt prefix input (temporary, not saved to config)
prompt_prefix = st.text_area(
tr('style.prompt_prefix'),
value=current_prefix,
placeholder=tr("style.prompt_prefix_placeholder"),
height=80,
label_visibility="visible",
help=tr("style.prompt_prefix_help")
)
# Style preview expander (similar to template preview)
with st.expander(tr("style.preview_title"), expanded=False):
# Test prompt input
test_prompt = st.text_input(
tr("style.test_prompt"),
value="a dog",
help=tr("style.test_prompt_help"),
key="style_test_prompt"
)
# Preview button
if st.button(tr("style.preview"), key="preview_style", use_container_width=True):
with st.spinner(tr("style.previewing")):
try:
from pixelle_video.utils.prompt_helper import build_image_prompt
# Build final prompt with prefix
final_prompt = build_image_prompt(test_prompt, prompt_prefix)
# Generate preview image (use user-specified size)
preview_image_path = run_async(pixelle_video.image(
prompt=final_prompt,
workflow=workflow_key,
width=int(image_width),
height=int(image_height)
))
# Display preview (support both URL and local path)
if preview_image_path:
st.success(tr("style.preview_success"))
# Read and encode image
if preview_image_path.startswith('http'):
# URL - use directly
img_html = f'<div class="preview-image"><img src="{preview_image_path}" alt="Style Preview"/></div>'
else:
# Local file - encode as base64
with open(preview_image_path, 'rb') as f:
img_data = base64.b64encode(f.read()).decode()
img_html = f'<div class="preview-image"><img src="data:image/png;base64,{img_data}" alt="Style Preview"/></div>'
st.markdown(img_html, unsafe_allow_html=True)
# Show the final prompt used
st.info(f"**{tr('style.final_prompt_label')}**\n{final_prompt}")
# Show file path
st.caption(f"📁 {preview_image_path}")
else:
st.error(tr("style.preview_failed_general"))
except Exception as e:
st.error(tr("style.preview_failed", error=str(e)))
logger.exception(e)
# ====================================================================
# Storyboard Template Section
# ====================================================================
@@ -866,6 +727,11 @@ def main():
generator_for_params = HTMLFrameGenerator(template_path_for_params)
custom_params_for_video = generator_for_params.parse_template_parameters()
# Detect if template requires image generation
template_requires_image = generator_for_params.requires_image()
# Store in session state for Image Section to read
st.session_state['template_requires_image'] = template_requires_image
custom_values_for_video = {}
if custom_params_for_video:
st.markdown("📝 " + tr("template.custom_parameters"))
@@ -1005,6 +871,162 @@ def main():
st.error(tr("template.preview_failed", error=str(e)))
logger.exception(e)
# ====================================================================
# Image Generation Section (conditional based on template)
# ====================================================================
# Check if current template requires image generation
if st.session_state.get('template_requires_image', True):
# Template requires images - show full Image Section
with st.container(border=True):
st.markdown(f"**{tr('section.image')}**")
# 1. ComfyUI Workflow selection
with st.expander(tr("help.feature_description"), expanded=False):
st.markdown(f"**{tr('help.what')}**")
st.markdown(tr("style.workflow_what"))
st.markdown(f"**{tr('help.how')}**")
st.markdown(tr("style.workflow_how"))
st.markdown(f"**{tr('help.note')}**")
st.markdown(tr("style.image_size_note"))
# Get available workflows from pixelle_video (with source info)
workflows = pixelle_video.image.list_workflows()
# Build options for selectbox
# Display: "image_flux.json - Runninghub"
# Value: "runninghub/image_flux.json"
workflow_options = [wf["display_name"] for wf in workflows]
workflow_keys = [wf["key"] for wf in workflows]
# Default to first option (should be runninghub by sorting)
default_workflow_index = 0
# If user has a saved preference in config, try to match it
comfyui_config = config_manager.get_comfyui_config()
saved_workflow = comfyui_config["image"]["default_workflow"]
if saved_workflow and saved_workflow in workflow_keys:
default_workflow_index = workflow_keys.index(saved_workflow)
workflow_display = st.selectbox(
"Workflow",
workflow_options if workflow_options else ["No workflows found"],
index=default_workflow_index,
label_visibility="collapsed",
key="image_workflow_select"
)
# Get the actual workflow key (e.g., "runninghub/image_flux.json")
if workflow_options:
workflow_selected_index = workflow_options.index(workflow_display)
workflow_key = workflow_keys[workflow_selected_index]
else:
workflow_key = "runninghub/image_flux.json" # fallback
# 2. Image size input
col1, col2 = st.columns(2)
with col1:
image_width = st.number_input(
tr('style.image_width'),
min_value=128,
value=1024,
step=1,
label_visibility="visible",
help=tr('style.image_width_help')
)
with col2:
image_height = st.number_input(
tr('style.image_height'),
min_value=128,
value=1024,
step=1,
label_visibility="visible",
help=tr('style.image_height_help')
)
# 3. Prompt prefix input
# Get current prompt_prefix from config
current_prefix = comfyui_config["image"]["prompt_prefix"]
# Prompt prefix input (temporary, not saved to config)
prompt_prefix = st.text_area(
tr('style.prompt_prefix'),
value=current_prefix,
placeholder=tr("style.prompt_prefix_placeholder"),
height=80,
label_visibility="visible",
help=tr("style.prompt_prefix_help")
)
# Style preview expander (similar to template preview)
with st.expander(tr("style.preview_title"), expanded=False):
# Test prompt input
test_prompt = st.text_input(
tr("style.test_prompt"),
value="a dog",
help=tr("style.test_prompt_help"),
key="style_test_prompt"
)
# Preview button
if st.button(tr("style.preview"), key="preview_style", use_container_width=True):
with st.spinner(tr("style.previewing")):
try:
from pixelle_video.utils.prompt_helper import build_image_prompt
# Build final prompt with prefix
final_prompt = build_image_prompt(test_prompt, prompt_prefix)
# Generate preview image (use user-specified size)
preview_image_path = run_async(pixelle_video.image(
prompt=final_prompt,
workflow=workflow_key,
width=int(image_width),
height=int(image_height)
))
# Display preview (support both URL and local path)
if preview_image_path:
st.success(tr("style.preview_success"))
# Read and encode image
if preview_image_path.startswith('http'):
# URL - use directly
img_html = f'<div class="preview-image"><img src="{preview_image_path}" alt="Style Preview"/></div>'
else:
# Local file - encode as base64
with open(preview_image_path, 'rb') as f:
img_data = base64.b64encode(f.read()).decode()
img_html = f'<div class="preview-image"><img src="data:image/png;base64,{img_data}" alt="Style Preview"/></div>'
st.markdown(img_html, unsafe_allow_html=True)
# Show the final prompt used
st.info(f"**{tr('style.final_prompt_label')}**\n{final_prompt}")
# Show file path
st.caption(f"📁 {preview_image_path}")
else:
st.error(tr("style.preview_failed_general"))
except Exception as e:
st.error(tr("style.preview_failed", error=str(e)))
logger.exception(e)
else:
# Template doesn't need images - show simplified message
with st.container(border=True):
st.markdown(f"**{tr('section.image')}**")
st.info(" " + tr("image.not_required"))
st.caption(tr("image.not_required_hint"))
# Set default values for later use
workflow_key = None
image_width = 1024
image_height = 1024
prompt_prefix = ""
# ========================================================================
# Right Column: Generate Button + Progress + Video Preview
# ========================================================================

View File

@@ -102,6 +102,9 @@
"template.preview_caption": "Template Preview: {template}",
"template.custom_parameters": "Custom Parameters",
"image.not_required": "Current template does not require image generation",
"image.not_required_hint": "The selected template is text-only and does not need images. Benefits: ⚡ Faster generation 💰 Lower cost",
"video.title": "🎬 Video Settings",
"video.frames": "Scenes",
"video.frames_help": "More scenes = longer video",

View File

@@ -102,6 +102,9 @@
"template.preview_caption": "模板预览:{template}",
"template.custom_parameters": "自定义参数",
"image.not_required": "当前模板不需要插图生成",
"image.not_required_hint": "您选择的模板是纯文本模板,无需生成图片。这将:⚡ 加快生成速度 💰 降低生成成本",
"video.title": "🎬 视频设置",
"video.frames": "分镜数",
"video.frames_help": "更多分镜 = 更长视频",