feat: Add comprehensive timeline editor with frame editing and regeneration capabilities
This commit is contained in:
@@ -54,6 +54,8 @@ from api.routers import (
|
||||
files_router,
|
||||
resources_router,
|
||||
frame_router,
|
||||
editor_router,
|
||||
publish_router,
|
||||
)
|
||||
|
||||
|
||||
@@ -133,6 +135,8 @@ app.include_router(tasks_router, prefix=api_config.api_prefix)
|
||||
app.include_router(files_router, prefix=api_config.api_prefix)
|
||||
app.include_router(resources_router, prefix=api_config.api_prefix)
|
||||
app.include_router(frame_router, prefix=api_config.api_prefix)
|
||||
app.include_router(editor_router, prefix=api_config.api_prefix)
|
||||
app.include_router(publish_router, prefix=api_config.api_prefix)
|
||||
|
||||
|
||||
@app.get("/")
|
||||
@@ -153,6 +157,8 @@ async def root():
|
||||
"files": f"{api_config.api_prefix}/files",
|
||||
"resources": f"{api_config.api_prefix}/resources",
|
||||
"frame": f"{api_config.api_prefix}/frame",
|
||||
"editor": f"{api_config.api_prefix}/editor",
|
||||
"publish": f"{api_config.api_prefix}/publish",
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -24,6 +24,8 @@ from api.routers.tasks import router as tasks_router
|
||||
from api.routers.files import router as files_router
|
||||
from api.routers.resources import router as resources_router
|
||||
from api.routers.frame import router as frame_router
|
||||
from api.routers.editor import router as editor_router
|
||||
from api.routers.publish import router as publish_router
|
||||
|
||||
__all__ = [
|
||||
"health_router",
|
||||
@@ -36,5 +38,8 @@ __all__ = [
|
||||
"files_router",
|
||||
"resources_router",
|
||||
"frame_router",
|
||||
"editor_router",
|
||||
"publish_router",
|
||||
]
|
||||
|
||||
|
||||
|
||||
579
api/routers/editor.py
Normal file
579
api/routers/editor.py
Normal file
@@ -0,0 +1,579 @@
|
||||
# Copyright (C) 2025 AIDC-AI
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Editor API router for timeline editor operations
|
||||
|
||||
Provides endpoints for:
|
||||
- Fetching storyboard data
|
||||
- Reordering frames
|
||||
- Updating frame duration
|
||||
- Generating preview
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, HTTPException, Path
|
||||
from loguru import logger
|
||||
|
||||
from api.schemas.editor import (
|
||||
StoryboardSchema,
|
||||
StoryboardFrameSchema,
|
||||
ReorderFramesRequest,
|
||||
UpdateDurationRequest,
|
||||
PreviewRequest,
|
||||
PreviewResponse,
|
||||
UpdateFrameRequest,
|
||||
UpdateFrameResponse,
|
||||
RegenerateImageRequest,
|
||||
RegenerateImageResponse,
|
||||
RegenerateAudioRequest,
|
||||
RegenerateAudioResponse,
|
||||
)
|
||||
|
||||
router = APIRouter(prefix="/editor", tags=["Editor"])
|
||||
|
||||
|
||||
def _path_to_url(file_path: str, base_url: str = "http://localhost:8000") -> str:
|
||||
"""Convert local file path to URL accessible through API"""
|
||||
if not file_path:
|
||||
return None
|
||||
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
# Normalize path separators
|
||||
file_path = file_path.replace("\\", "/")
|
||||
|
||||
# Extract relative path from output directory
|
||||
parts = file_path.split("/")
|
||||
try:
|
||||
output_idx = parts.index("output")
|
||||
relative_parts = parts[output_idx + 1:]
|
||||
relative_path = "/".join(relative_parts)
|
||||
except ValueError:
|
||||
relative_path = Path(file_path).name
|
||||
|
||||
return f"{base_url}/api/files/{relative_path}"
|
||||
|
||||
|
||||
# In-memory cache for demo (in production, use database)
|
||||
_storyboard_cache: dict = {}
|
||||
|
||||
|
||||
# Demo data for testing
|
||||
_demo_storyboard = {
|
||||
"id": "demo-1",
|
||||
"title": "演示视频",
|
||||
"total_duration": 15.5,
|
||||
"final_video_path": None,
|
||||
"created_at": None,
|
||||
"frames": [
|
||||
{"id": "frame-0", "index": 0, "order": 0, "narration": "在一个宁静的早晨,阳光洒满了整个城市", "image_prompt": "A peaceful morning", "duration": 3.2},
|
||||
{"id": "frame-1", "index": 1, "order": 1, "narration": "小明决定出门去探索这个美丽的世界", "image_prompt": "A young man stepping out", "duration": 2.8},
|
||||
{"id": "frame-2", "index": 2, "order": 2, "narration": "他走过熟悉的街道,感受着微风的吹拂", "image_prompt": "Walking through streets", "duration": 3.5},
|
||||
{"id": "frame-3", "index": 3, "order": 3, "narration": "公园里的花朵正在盛开,散发着迷人的芬芳", "image_prompt": "Blooming flowers", "duration": 3.0},
|
||||
{"id": "frame-4", "index": 4, "order": 4, "narration": "这是新的一天的开始,充满了无限可能", "image_prompt": "New day begins", "duration": 3.0},
|
||||
],
|
||||
}
|
||||
# Import task manager
|
||||
from api.tasks.manager import task_manager
|
||||
|
||||
|
||||
@router.get("/storyboard/{storyboard_id}", response_model=StoryboardSchema)
|
||||
async def get_storyboard(storyboard_id: str = Path(..., description="Storyboard/task ID")):
|
||||
"""
|
||||
Get storyboard by ID
|
||||
|
||||
Supports:
|
||||
- 'demo-1': Returns demo data for testing
|
||||
- Any task_id: Loads real storyboard from completed video generation tasks
|
||||
- History tasks: Loads from persistence service
|
||||
"""
|
||||
# Return demo data for demo-1
|
||||
if storyboard_id == "demo-1":
|
||||
if "demo-1" not in _storyboard_cache:
|
||||
_storyboard_cache["demo-1"] = _demo_storyboard.copy()
|
||||
return _storyboard_cache["demo-1"]
|
||||
|
||||
# Try to get from cache first
|
||||
if storyboard_id in _storyboard_cache:
|
||||
return _storyboard_cache[storyboard_id]
|
||||
|
||||
# Try to load from task manager (in-memory task)
|
||||
task = task_manager.get_task(storyboard_id)
|
||||
if task and task.result:
|
||||
# Extract storyboard from task result
|
||||
result = task.result
|
||||
|
||||
# Handle different result formats
|
||||
storyboard_data = None
|
||||
|
||||
if hasattr(result, 'storyboard'):
|
||||
storyboard_data = result.storyboard
|
||||
elif isinstance(result, dict) and 'storyboard' in result:
|
||||
storyboard_data = result['storyboard']
|
||||
|
||||
if storyboard_data:
|
||||
# Convert to editor schema format
|
||||
schema = _convert_storyboard_to_schema(storyboard_id, storyboard_data)
|
||||
_storyboard_cache[storyboard_id] = schema
|
||||
logger.info(f"Loaded storyboard from task {storyboard_id}")
|
||||
return schema
|
||||
|
||||
# Try to load from persistence service (history)
|
||||
try:
|
||||
from pixelle_video.services.persistence import PersistenceService
|
||||
persistence = PersistenceService(output_dir="output")
|
||||
|
||||
# Load storyboard from disk (await since we're in an async function)
|
||||
storyboard = await persistence.load_storyboard(storyboard_id)
|
||||
|
||||
if storyboard:
|
||||
schema = _convert_storyboard_to_schema(storyboard_id, storyboard)
|
||||
_storyboard_cache[storyboard_id] = schema
|
||||
logger.info(f"Loaded storyboard from persistence {storyboard_id}")
|
||||
return schema
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to load from persistence: {e}")
|
||||
|
||||
raise HTTPException(status_code=404, detail=f"Storyboard {storyboard_id} not found")
|
||||
|
||||
|
||||
def _convert_storyboard_to_schema(storyboard_id: str, storyboard) -> dict:
|
||||
"""Convert internal Storyboard model to API schema format."""
|
||||
frames = []
|
||||
|
||||
# Handle both object and dict formats
|
||||
if hasattr(storyboard, 'frames'):
|
||||
frame_list = storyboard.frames
|
||||
title = getattr(storyboard, 'title', storyboard_id)
|
||||
total_duration = getattr(storyboard, 'total_duration', 0)
|
||||
final_video_path = getattr(storyboard, 'final_video_path', None)
|
||||
created_at = getattr(storyboard, 'created_at', None)
|
||||
elif isinstance(storyboard, dict):
|
||||
frame_list = storyboard.get('frames', [])
|
||||
title = storyboard.get('title', storyboard_id)
|
||||
total_duration = storyboard.get('total_duration', 0)
|
||||
final_video_path = storyboard.get('final_video_path')
|
||||
created_at = storyboard.get('created_at')
|
||||
else:
|
||||
frame_list = []
|
||||
title = storyboard_id
|
||||
total_duration = 0
|
||||
final_video_path = None
|
||||
created_at = None
|
||||
|
||||
for i, frame in enumerate(frame_list):
|
||||
if hasattr(frame, 'narration'):
|
||||
# Object format
|
||||
frames.append({
|
||||
"id": f"frame-{i}",
|
||||
"index": getattr(frame, 'index', i),
|
||||
"order": i,
|
||||
"narration": frame.narration or "",
|
||||
"image_prompt": getattr(frame, 'image_prompt', ""),
|
||||
"image_path": _path_to_url(getattr(frame, 'image_path', None)),
|
||||
"audio_path": _path_to_url(getattr(frame, 'audio_path', None)),
|
||||
"video_segment_path": _path_to_url(getattr(frame, 'video_segment_path', None)),
|
||||
"duration": getattr(frame, 'duration', 3.0),
|
||||
})
|
||||
elif isinstance(frame, dict):
|
||||
# Dict format
|
||||
frames.append({
|
||||
"id": f"frame-{i}",
|
||||
"index": frame.get('index', i),
|
||||
"order": i,
|
||||
"narration": frame.get('narration', ""),
|
||||
"image_prompt": frame.get('image_prompt', ""),
|
||||
"image_path": _path_to_url(frame.get('image_path')),
|
||||
"audio_path": _path_to_url(frame.get('audio_path')),
|
||||
"video_segment_path": _path_to_url(frame.get('video_segment_path')),
|
||||
"duration": frame.get('duration', 3.0),
|
||||
})
|
||||
|
||||
return {
|
||||
"id": storyboard_id,
|
||||
"title": title,
|
||||
"frames": frames,
|
||||
"total_duration": total_duration or sum(f.get('duration', 3.0) for f in frames),
|
||||
"final_video_path": final_video_path,
|
||||
"created_at": created_at.isoformat() if created_at else None,
|
||||
}
|
||||
|
||||
|
||||
@router.patch("/storyboard/{storyboard_id}/reorder", response_model=StoryboardSchema)
|
||||
async def reorder_frames(
|
||||
storyboard_id: str = Path(..., description="Storyboard/task ID"),
|
||||
request: ReorderFramesRequest = None
|
||||
):
|
||||
"""
|
||||
Reorder frames in storyboard
|
||||
|
||||
Updates the order of frames based on the provided frame ID list.
|
||||
"""
|
||||
if storyboard_id not in _storyboard_cache:
|
||||
raise HTTPException(status_code=404, detail=f"Storyboard {storyboard_id} not found in cache")
|
||||
|
||||
storyboard = _storyboard_cache[storyboard_id]
|
||||
frames = storyboard["frames"]
|
||||
|
||||
# Create ID to frame mapping
|
||||
frame_map = {f["id"]: f for f in frames}
|
||||
|
||||
# Validate all IDs exist
|
||||
for frame_id in request.order:
|
||||
if frame_id not in frame_map:
|
||||
raise HTTPException(status_code=400, detail=f"Frame {frame_id} not found")
|
||||
|
||||
# Reorder frames
|
||||
reordered = []
|
||||
for idx, frame_id in enumerate(request.order):
|
||||
frame = frame_map[frame_id].copy()
|
||||
frame["order"] = idx
|
||||
reordered.append(frame)
|
||||
|
||||
storyboard["frames"] = reordered
|
||||
_storyboard_cache[storyboard_id] = storyboard
|
||||
|
||||
logger.info(f"Reordered {len(reordered)} frames in storyboard {storyboard_id}")
|
||||
|
||||
return storyboard
|
||||
|
||||
|
||||
@router.patch(
|
||||
"/storyboard/{storyboard_id}/frames/{frame_id}/duration",
|
||||
response_model=StoryboardFrameSchema
|
||||
)
|
||||
async def update_frame_duration(
|
||||
storyboard_id: str = Path(..., description="Storyboard/task ID"),
|
||||
frame_id: str = Path(..., description="Frame ID"),
|
||||
request: UpdateDurationRequest = None
|
||||
):
|
||||
"""
|
||||
Update frame duration
|
||||
|
||||
Changes the duration of a specific frame and recalculates total duration.
|
||||
"""
|
||||
if storyboard_id not in _storyboard_cache:
|
||||
raise HTTPException(status_code=404, detail=f"Storyboard {storyboard_id} not found in cache")
|
||||
|
||||
storyboard = _storyboard_cache[storyboard_id]
|
||||
frames = storyboard["frames"]
|
||||
|
||||
# Find and update frame
|
||||
updated_frame = None
|
||||
for frame in frames:
|
||||
if frame["id"] == frame_id:
|
||||
frame["duration"] = request.duration
|
||||
updated_frame = frame
|
||||
break
|
||||
|
||||
if not updated_frame:
|
||||
raise HTTPException(status_code=404, detail=f"Frame {frame_id} not found")
|
||||
|
||||
# Recalculate total duration
|
||||
storyboard["total_duration"] = sum(f["duration"] for f in frames)
|
||||
_storyboard_cache[storyboard_id] = storyboard
|
||||
|
||||
logger.info(f"Updated frame {frame_id} duration to {request.duration}s")
|
||||
|
||||
return updated_frame
|
||||
|
||||
|
||||
@router.post("/storyboard/{storyboard_id}/preview", response_model=PreviewResponse)
|
||||
async def generate_preview(
|
||||
storyboard_id: str = Path(..., description="Storyboard/task ID"),
|
||||
request: PreviewRequest = None
|
||||
):
|
||||
"""
|
||||
Generate preview video for selected frames
|
||||
|
||||
Creates a preview video from the specified frame range.
|
||||
"""
|
||||
if storyboard_id not in _storyboard_cache:
|
||||
raise HTTPException(status_code=404, detail=f"Storyboard {storyboard_id} not found in cache")
|
||||
|
||||
storyboard = _storyboard_cache[storyboard_id]
|
||||
frames = storyboard["frames"]
|
||||
|
||||
# Determine frame range
|
||||
start = request.start_frame if request else 0
|
||||
end = request.end_frame if request and request.end_frame else len(frames)
|
||||
|
||||
if start >= len(frames):
|
||||
raise HTTPException(status_code=400, detail="Start frame out of range")
|
||||
|
||||
preview_frames = frames[start:end]
|
||||
total_duration = sum(f["duration"] for f in preview_frames)
|
||||
|
||||
# TODO: Implement actual preview generation logic
|
||||
# For now, return mock response
|
||||
preview_path = f"/output/{storyboard_id}/preview_{start}_{end}.mp4"
|
||||
|
||||
logger.info(f"Generated preview for frames {start}-{end} ({len(preview_frames)} frames)")
|
||||
|
||||
return PreviewResponse(
|
||||
preview_path=preview_path,
|
||||
duration=total_duration,
|
||||
frames_count=len(preview_frames)
|
||||
)
|
||||
|
||||
|
||||
def _storyboard_to_schema(storyboard_id: str, storyboard) -> dict:
|
||||
"""Convert internal Storyboard to API schema format"""
|
||||
frames = []
|
||||
for i, frame in enumerate(storyboard.frames):
|
||||
frames.append({
|
||||
"id": f"frame-{i}",
|
||||
"index": frame.index,
|
||||
"order": i,
|
||||
"narration": frame.narration,
|
||||
"image_prompt": frame.image_prompt,
|
||||
"image_path": frame.image_path,
|
||||
"audio_path": frame.audio_path,
|
||||
"video_segment_path": frame.video_segment_path,
|
||||
"duration": frame.duration,
|
||||
})
|
||||
|
||||
return {
|
||||
"id": storyboard_id,
|
||||
"title": storyboard.title,
|
||||
"frames": frames,
|
||||
"total_duration": storyboard.total_duration,
|
||||
"final_video_path": storyboard.final_video_path,
|
||||
"created_at": storyboard.created_at,
|
||||
}
|
||||
|
||||
|
||||
@router.put(
|
||||
"/storyboard/{storyboard_id}/frames/{frame_id}",
|
||||
response_model=UpdateFrameResponse
|
||||
)
|
||||
async def update_frame(
|
||||
storyboard_id: str = Path(..., description="Storyboard/task ID"),
|
||||
frame_id: str = Path(..., description="Frame ID"),
|
||||
request: UpdateFrameRequest = None
|
||||
):
|
||||
"""
|
||||
Update frame content (narration and/or image prompt)
|
||||
|
||||
Updates the text content of a frame without regenerating media.
|
||||
"""
|
||||
if storyboard_id not in _storyboard_cache:
|
||||
raise HTTPException(status_code=404, detail=f"Storyboard {storyboard_id} not found in cache")
|
||||
|
||||
storyboard = _storyboard_cache[storyboard_id]
|
||||
frames = storyboard["frames"]
|
||||
|
||||
# Find and update frame
|
||||
updated_frame = None
|
||||
for frame in frames:
|
||||
if frame["id"] == frame_id:
|
||||
if request.narration is not None:
|
||||
frame["narration"] = request.narration
|
||||
if request.image_prompt is not None:
|
||||
frame["image_prompt"] = request.image_prompt
|
||||
updated_frame = frame
|
||||
break
|
||||
|
||||
if not updated_frame:
|
||||
raise HTTPException(status_code=404, detail=f"Frame {frame_id} not found")
|
||||
|
||||
_storyboard_cache[storyboard_id] = storyboard
|
||||
|
||||
logger.info(f"Updated frame {frame_id} content")
|
||||
|
||||
return UpdateFrameResponse(
|
||||
id=frame_id,
|
||||
narration=updated_frame["narration"],
|
||||
image_prompt=updated_frame.get("image_prompt"),
|
||||
updated=True
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/storyboard/{storyboard_id}/frames/{frame_id}/regenerate-image",
|
||||
response_model=RegenerateImageResponse
|
||||
)
|
||||
async def regenerate_frame_image(
|
||||
storyboard_id: str = Path(..., description="Storyboard/task ID"),
|
||||
frame_id: str = Path(..., description="Frame ID"),
|
||||
request: RegenerateImageRequest = None
|
||||
):
|
||||
"""
|
||||
Regenerate image for a frame
|
||||
|
||||
Uses the frame's image_prompt (or override) to generate a new image.
|
||||
Requires ComfyUI service to be running.
|
||||
"""
|
||||
if storyboard_id not in _storyboard_cache:
|
||||
raise HTTPException(status_code=404, detail=f"Storyboard {storyboard_id} not found")
|
||||
|
||||
storyboard = _storyboard_cache[storyboard_id]
|
||||
frames = storyboard["frames"]
|
||||
|
||||
# Find frame
|
||||
target_frame = None
|
||||
frame_index = 0
|
||||
for i, frame in enumerate(frames):
|
||||
if frame["id"] == frame_id:
|
||||
target_frame = frame
|
||||
frame_index = i
|
||||
break
|
||||
|
||||
if not target_frame:
|
||||
raise HTTPException(status_code=404, detail=f"Frame {frame_id} not found")
|
||||
|
||||
# Get prompt to use
|
||||
prompt = request.image_prompt if request and request.image_prompt else target_frame.get("image_prompt", "")
|
||||
|
||||
if not prompt:
|
||||
raise HTTPException(status_code=400, detail="No image prompt available")
|
||||
|
||||
try:
|
||||
# Import and use PixelleVideo core for image generation
|
||||
from api.dependencies import get_pixelle_video
|
||||
from pixelle_video.models.storyboard import StoryboardFrame, StoryboardConfig
|
||||
|
||||
pixelle_video = get_pixelle_video()
|
||||
|
||||
# Generate image using ComfyKit
|
||||
result = await pixelle_video.comfy(
|
||||
workflow="image_gen",
|
||||
prompt=prompt,
|
||||
task_id=storyboard_id,
|
||||
)
|
||||
|
||||
if result and result.get("images"):
|
||||
# Download and save image
|
||||
image_url = result["images"][0]
|
||||
import aiohttp
|
||||
import os
|
||||
|
||||
output_dir = f"output/{storyboard_id}"
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
image_path = f"{output_dir}/frame_{frame_index}_regenerated.png"
|
||||
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.get(image_url) as resp:
|
||||
if resp.status == 200:
|
||||
with open(image_path, 'wb') as f:
|
||||
f.write(await resp.read())
|
||||
|
||||
# Update frame
|
||||
target_frame["image_path"] = _path_to_url(image_path)
|
||||
_storyboard_cache[storyboard_id] = storyboard
|
||||
|
||||
logger.info(f"Regenerated image for frame {frame_id}")
|
||||
|
||||
return RegenerateImageResponse(
|
||||
image_path=target_frame["image_path"],
|
||||
success=True
|
||||
)
|
||||
else:
|
||||
raise HTTPException(status_code=500, detail="Image generation failed")
|
||||
|
||||
except ImportError as e:
|
||||
logger.error(f"Failed to import dependencies: {e}")
|
||||
raise HTTPException(status_code=500, detail="Image generation service not available")
|
||||
except Exception as e:
|
||||
logger.error(f"Image regeneration failed: {e}")
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.post(
|
||||
"/storyboard/{storyboard_id}/frames/{frame_id}/regenerate-audio",
|
||||
response_model=RegenerateAudioResponse
|
||||
)
|
||||
async def regenerate_frame_audio(
|
||||
storyboard_id: str = Path(..., description="Storyboard/task ID"),
|
||||
frame_id: str = Path(..., description="Frame ID"),
|
||||
request: RegenerateAudioRequest = None
|
||||
):
|
||||
"""
|
||||
Regenerate audio for a frame
|
||||
|
||||
Uses the frame's narration (or override) to generate new audio via TTS.
|
||||
"""
|
||||
if storyboard_id not in _storyboard_cache:
|
||||
raise HTTPException(status_code=404, detail=f"Storyboard {storyboard_id} not found")
|
||||
|
||||
storyboard = _storyboard_cache[storyboard_id]
|
||||
frames = storyboard["frames"]
|
||||
|
||||
# Find frame
|
||||
target_frame = None
|
||||
frame_index = 0
|
||||
for i, frame in enumerate(frames):
|
||||
if frame["id"] == frame_id:
|
||||
target_frame = frame
|
||||
frame_index = i
|
||||
break
|
||||
|
||||
if not target_frame:
|
||||
raise HTTPException(status_code=404, detail=f"Frame {frame_id} not found")
|
||||
|
||||
# Get narration to use
|
||||
narration = request.narration if request and request.narration else target_frame.get("narration", "")
|
||||
|
||||
if not narration:
|
||||
raise HTTPException(status_code=400, detail="No narration text available")
|
||||
|
||||
try:
|
||||
from api.dependencies import get_pixelle_video
|
||||
import os
|
||||
|
||||
pixelle_video = get_pixelle_video()
|
||||
|
||||
# Create output path
|
||||
output_dir = f"output/{storyboard_id}"
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
audio_path = f"{output_dir}/frame_{frame_index}_audio_regenerated.mp3"
|
||||
|
||||
# Generate audio using TTS service
|
||||
voice = request.voice if request and request.voice else None
|
||||
|
||||
result_path = await pixelle_video.tts(
|
||||
text=narration,
|
||||
voice=voice,
|
||||
output_path=audio_path
|
||||
)
|
||||
|
||||
# Get audio duration
|
||||
from mutagen.mp3 import MP3
|
||||
try:
|
||||
audio = MP3(result_path)
|
||||
duration = audio.info.length
|
||||
except:
|
||||
duration = 3.0 # Default duration
|
||||
|
||||
# Update frame
|
||||
target_frame["audio_path"] = _path_to_url(result_path)
|
||||
target_frame["duration"] = duration
|
||||
|
||||
# Recalculate total duration
|
||||
storyboard["total_duration"] = sum(f.get("duration", 3.0) for f in frames)
|
||||
_storyboard_cache[storyboard_id] = storyboard
|
||||
|
||||
logger.info(f"Regenerated audio for frame {frame_id}, duration: {duration}s")
|
||||
|
||||
return RegenerateAudioResponse(
|
||||
audio_path=target_frame["audio_path"],
|
||||
duration=duration,
|
||||
success=True
|
||||
)
|
||||
|
||||
except ImportError as e:
|
||||
logger.error(f"Failed to import dependencies: {e}")
|
||||
raise HTTPException(status_code=500, detail="TTS service not available")
|
||||
except Exception as e:
|
||||
logger.error(f"Audio regeneration failed: {e}")
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
427
api/routers/publish.py
Normal file
427
api/routers/publish.py
Normal file
@@ -0,0 +1,427 @@
|
||||
# Copyright (C) 2025 AIDC-AI
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Publish API router for multi-platform video distribution.
|
||||
|
||||
Endpoints:
|
||||
- POST /publish/export - Format conversion and export
|
||||
- POST /publish/bilibili - Publish to Bilibili (TODO)
|
||||
- POST /publish/youtube - Publish to YouTube (TODO)
|
||||
- GET /publish/tasks/{id} - Get task status
|
||||
"""
|
||||
|
||||
import uuid
|
||||
from datetime import datetime
|
||||
from fastapi import APIRouter, HTTPException, Path, BackgroundTasks
|
||||
from loguru import logger
|
||||
|
||||
from api.schemas.publish import (
|
||||
PublishRequest,
|
||||
PublishResultSchema,
|
||||
PublishTaskSchema,
|
||||
PublishStatusEnum,
|
||||
PlatformRequirementsSchema,
|
||||
VideoMetadataSchema,
|
||||
)
|
||||
from pixelle_video.services.publishing import (
|
||||
VideoMetadata,
|
||||
PublishStatus,
|
||||
PublishTask,
|
||||
Platform,
|
||||
)
|
||||
from pixelle_video.services.publishing.export_publisher import ExportPublisher
|
||||
|
||||
|
||||
router = APIRouter(prefix="/publish", tags=["Publish"])
|
||||
|
||||
|
||||
# In-memory task storage (use Redis in production)
|
||||
_publish_tasks: dict = {}
|
||||
|
||||
# Publisher instances
|
||||
_export_publisher = ExportPublisher()
|
||||
|
||||
|
||||
@router.post("/export", response_model=PublishResultSchema)
|
||||
async def export_video(
|
||||
request: PublishRequest,
|
||||
background_tasks: BackgroundTasks
|
||||
):
|
||||
"""
|
||||
Convert video to platform-optimized format and export.
|
||||
|
||||
Optimizes for:
|
||||
- Portrait 9:16 aspect ratio (1080x1920)
|
||||
- H.264 codec
|
||||
- ≤128MB file size
|
||||
|
||||
For manual upload to Douyin/Kuaishou.
|
||||
"""
|
||||
# Create task
|
||||
task_id = str(uuid.uuid4())[:8]
|
||||
|
||||
metadata = VideoMetadata(
|
||||
title=request.metadata.title,
|
||||
description=request.metadata.description,
|
||||
tags=request.metadata.tags,
|
||||
category=request.metadata.category,
|
||||
cover_path=request.metadata.cover_path,
|
||||
privacy=request.metadata.privacy,
|
||||
platform_options=request.metadata.platform_options,
|
||||
)
|
||||
|
||||
task = PublishTask(
|
||||
id=task_id,
|
||||
video_path=request.video_path,
|
||||
platform=Platform.EXPORT,
|
||||
metadata=metadata,
|
||||
status=PublishStatus.PENDING,
|
||||
)
|
||||
_publish_tasks[task_id] = task
|
||||
|
||||
logger.info(f"📤 Starting export task {task_id} for: {metadata.title}")
|
||||
|
||||
# Execute synchronously for now (can be moved to background)
|
||||
result = await _export_publisher.publish(
|
||||
request.video_path,
|
||||
metadata,
|
||||
progress_callback=lambda p, m: logger.info(f"Export {task_id}: {p:.0%} - {m}")
|
||||
)
|
||||
|
||||
# Update task
|
||||
task.status = PublishStatus(result.status.value)
|
||||
task.result = result
|
||||
task.updated_at = datetime.now()
|
||||
|
||||
return PublishResultSchema(
|
||||
success=result.success,
|
||||
platform="export",
|
||||
status=PublishStatusEnum(result.status.value),
|
||||
export_path=result.export_path,
|
||||
error_message=result.error_message,
|
||||
)
|
||||
|
||||
|
||||
@router.get("/tasks/{task_id}", response_model=PublishTaskSchema)
|
||||
async def get_publish_task(task_id: str = Path(..., description="Task ID")):
|
||||
"""Get publishing task status"""
|
||||
if task_id not in _publish_tasks:
|
||||
raise HTTPException(status_code=404, detail=f"Task {task_id} not found")
|
||||
|
||||
task = _publish_tasks[task_id]
|
||||
|
||||
return PublishTaskSchema(
|
||||
id=task.id,
|
||||
platform=task.platform.value,
|
||||
status=PublishStatusEnum(task.status.value),
|
||||
result=PublishResultSchema(
|
||||
success=task.result.success,
|
||||
platform=task.result.platform.value,
|
||||
status=PublishStatusEnum(task.result.status.value),
|
||||
export_path=task.result.export_path,
|
||||
error_message=task.result.error_message,
|
||||
) if task.result else None,
|
||||
created_at=task.created_at.isoformat(),
|
||||
updated_at=task.updated_at.isoformat() if task.updated_at else None,
|
||||
)
|
||||
|
||||
|
||||
@router.get("/requirements/{platform}", response_model=PlatformRequirementsSchema)
|
||||
async def get_platform_requirements(platform: str = Path(..., description="Platform name")):
|
||||
"""Get platform-specific requirements"""
|
||||
|
||||
requirements = {
|
||||
"export": {
|
||||
"max_file_size_mb": 128,
|
||||
"max_duration_seconds": 900,
|
||||
"supported_formats": ["mp4"],
|
||||
"recommended_resolution": (1080, 1920),
|
||||
"recommended_codec": "h264",
|
||||
},
|
||||
"bilibili": {
|
||||
"max_file_size_mb": 4096,
|
||||
"max_duration_seconds": 14400, # 4 hours
|
||||
"supported_formats": ["mp4", "flv", "webm"],
|
||||
"recommended_resolution": (1920, 1080), # Landscape
|
||||
"recommended_codec": "h264",
|
||||
},
|
||||
"youtube": {
|
||||
"max_file_size_mb": 256000, # 256GB
|
||||
"max_duration_seconds": 43200, # 12 hours
|
||||
"supported_formats": ["mp4", "mov", "avi", "webm"],
|
||||
"recommended_resolution": (1920, 1080),
|
||||
"recommended_codec": "h264",
|
||||
},
|
||||
}
|
||||
|
||||
if platform not in requirements:
|
||||
raise HTTPException(status_code=404, detail=f"Platform {platform} not supported")
|
||||
|
||||
return requirements[platform]
|
||||
|
||||
|
||||
from pixelle_video.services.publishing.bilibili_publisher import BilibiliPublisher
|
||||
|
||||
# Bilibili publisher instance
|
||||
_bilibili_publisher = BilibiliPublisher()
|
||||
|
||||
|
||||
@router.post("/bilibili", response_model=PublishResultSchema)
|
||||
async def publish_to_bilibili(request: PublishRequest):
|
||||
"""
|
||||
Publish video to Bilibili.
|
||||
|
||||
Requires environment variables:
|
||||
- BILIBILI_ACCESS_TOKEN or
|
||||
- BILIBILI_SESSDATA + BILIBILI_BILI_JCT
|
||||
"""
|
||||
if not await _bilibili_publisher.validate_credentials():
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="B站凭证未配置。请设置 BILIBILI_ACCESS_TOKEN 或 BILIBILI_SESSDATA 环境变量"
|
||||
)
|
||||
|
||||
metadata = VideoMetadata(
|
||||
title=request.metadata.title,
|
||||
description=request.metadata.description,
|
||||
tags=request.metadata.tags,
|
||||
category=request.metadata.category,
|
||||
cover_path=request.metadata.cover_path,
|
||||
privacy=request.metadata.privacy,
|
||||
platform_options=request.metadata.platform_options,
|
||||
)
|
||||
|
||||
logger.info(f"📤 Starting Bilibili upload: {metadata.title}")
|
||||
|
||||
result = await _bilibili_publisher.publish(
|
||||
request.video_path,
|
||||
metadata,
|
||||
progress_callback=lambda p, m: logger.info(f"Bilibili: {p:.0%} - {m}")
|
||||
)
|
||||
|
||||
return PublishResultSchema(
|
||||
success=result.success,
|
||||
platform="bilibili",
|
||||
status=PublishStatusEnum(result.status.value),
|
||||
video_url=result.video_url,
|
||||
platform_video_id=result.platform_video_id,
|
||||
error_message=result.error_message,
|
||||
)
|
||||
|
||||
from pixelle_video.services.publishing.youtube_publisher import YouTubePublisher
|
||||
|
||||
# YouTube publisher instance
|
||||
_youtube_publisher = YouTubePublisher()
|
||||
|
||||
|
||||
@router.post("/youtube", response_model=PublishResultSchema)
|
||||
async def publish_to_youtube(request: PublishRequest):
|
||||
"""
|
||||
Publish video to YouTube.
|
||||
|
||||
Requires:
|
||||
- config/youtube_client_secrets.json (OAuth 2.0 credentials)
|
||||
- First-time auth will open browser for authorization
|
||||
"""
|
||||
if not await _youtube_publisher.validate_credentials():
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="YouTube 凭证未配置。请添加 config/youtube_client_secrets.json"
|
||||
)
|
||||
|
||||
metadata = VideoMetadata(
|
||||
title=request.metadata.title,
|
||||
description=request.metadata.description,
|
||||
tags=request.metadata.tags,
|
||||
category=request.metadata.category,
|
||||
cover_path=request.metadata.cover_path,
|
||||
privacy=request.metadata.privacy,
|
||||
platform_options=request.metadata.platform_options,
|
||||
)
|
||||
|
||||
logger.info(f"📤 Starting YouTube upload: {metadata.title}")
|
||||
|
||||
result = await _youtube_publisher.publish(
|
||||
request.video_path,
|
||||
metadata,
|
||||
progress_callback=lambda p, m: logger.info(f"YouTube: {p:.0%} - {m}")
|
||||
)
|
||||
|
||||
return PublishResultSchema(
|
||||
success=result.success,
|
||||
platform="youtube",
|
||||
status=PublishStatusEnum(result.status.value),
|
||||
video_url=result.video_url,
|
||||
platform_video_id=result.platform_video_id,
|
||||
error_message=result.error_message,
|
||||
)
|
||||
|
||||
|
||||
# ============================================================
|
||||
# Async Task Queue Endpoints
|
||||
# ============================================================
|
||||
|
||||
from pixelle_video.services.publishing.task_manager import get_publish_manager, TaskPriority
|
||||
from pydantic import BaseModel
|
||||
from typing import List
|
||||
|
||||
|
||||
class AsyncPublishRequest(BaseModel):
|
||||
"""Request for async publishing"""
|
||||
video_path: str
|
||||
platform: str # export, bilibili, youtube
|
||||
metadata: VideoMetadataSchema
|
||||
priority: str = "normal" # low, normal, high
|
||||
|
||||
|
||||
class QueuedTaskSchema(BaseModel):
|
||||
"""Schema for queued task"""
|
||||
id: str
|
||||
platform: str
|
||||
status: str
|
||||
progress: float
|
||||
progress_message: str
|
||||
retries: int
|
||||
created_at: str
|
||||
started_at: str = None
|
||||
completed_at: str = None
|
||||
|
||||
|
||||
class QueueStatusSchema(BaseModel):
|
||||
"""Queue status overview"""
|
||||
pending: int
|
||||
active: int
|
||||
completed: int
|
||||
failed: int
|
||||
workers: int
|
||||
|
||||
|
||||
@router.post("/async", response_model=dict)
|
||||
async def publish_async(request: AsyncPublishRequest):
|
||||
"""
|
||||
Submit a publish task to the background queue.
|
||||
|
||||
Returns immediately with task ID for tracking.
|
||||
"""
|
||||
manager = get_publish_manager()
|
||||
|
||||
# Ensure manager is running
|
||||
if not manager._running:
|
||||
await manager.start()
|
||||
|
||||
# Map platform string to enum
|
||||
platform_map = {
|
||||
"export": Platform.EXPORT,
|
||||
"bilibili": Platform.BILIBILI,
|
||||
"youtube": Platform.YOUTUBE,
|
||||
}
|
||||
|
||||
platform = platform_map.get(request.platform.lower())
|
||||
if not platform:
|
||||
raise HTTPException(status_code=400, detail=f"Invalid platform: {request.platform}")
|
||||
|
||||
# Map priority
|
||||
priority_map = {
|
||||
"low": TaskPriority.LOW,
|
||||
"normal": TaskPriority.NORMAL,
|
||||
"high": TaskPriority.HIGH,
|
||||
}
|
||||
priority = priority_map.get(request.priority.lower(), TaskPriority.NORMAL)
|
||||
|
||||
metadata = VideoMetadata(
|
||||
title=request.metadata.title,
|
||||
description=request.metadata.description,
|
||||
tags=request.metadata.tags,
|
||||
category=request.metadata.category,
|
||||
cover_path=request.metadata.cover_path,
|
||||
privacy=request.metadata.privacy,
|
||||
platform_options=request.metadata.platform_options,
|
||||
)
|
||||
|
||||
task_id = await manager.enqueue(
|
||||
video_path=request.video_path,
|
||||
platform=platform,
|
||||
metadata=metadata,
|
||||
priority=priority,
|
||||
)
|
||||
|
||||
return {
|
||||
"task_id": task_id,
|
||||
"status": "queued",
|
||||
"message": f"Task queued for {request.platform}",
|
||||
}
|
||||
|
||||
|
||||
@router.get("/queue/status", response_model=QueueStatusSchema)
|
||||
async def get_queue_status():
|
||||
"""Get queue status overview."""
|
||||
manager = get_publish_manager()
|
||||
|
||||
all_tasks = manager.get_all_tasks()
|
||||
|
||||
pending = sum(1 for t in all_tasks if t.task.status == PublishStatus.PENDING)
|
||||
active = len(manager.get_active_tasks())
|
||||
completed = sum(1 for t in all_tasks if t.task.status == PublishStatus.PUBLISHED)
|
||||
failed = sum(1 for t in all_tasks if t.task.status == PublishStatus.FAILED)
|
||||
|
||||
return QueueStatusSchema(
|
||||
pending=pending,
|
||||
active=active,
|
||||
completed=completed,
|
||||
failed=failed,
|
||||
workers=manager.max_workers,
|
||||
)
|
||||
|
||||
|
||||
@router.get("/queue/tasks", response_model=List[QueuedTaskSchema])
|
||||
async def list_queued_tasks():
|
||||
"""List all tasks in queue."""
|
||||
manager = get_publish_manager()
|
||||
|
||||
result = []
|
||||
for qt in manager.get_all_tasks():
|
||||
result.append(QueuedTaskSchema(
|
||||
id=qt.task.id,
|
||||
platform=qt.task.platform.value,
|
||||
status=qt.task.status.value,
|
||||
progress=qt.progress,
|
||||
progress_message=qt.progress_message,
|
||||
retries=qt.retries,
|
||||
created_at=qt.created_at.isoformat(),
|
||||
started_at=qt.started_at.isoformat() if qt.started_at else None,
|
||||
completed_at=qt.completed_at.isoformat() if qt.completed_at else None,
|
||||
))
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@router.get("/queue/tasks/{task_id}", response_model=QueuedTaskSchema)
|
||||
async def get_queued_task(task_id: str = Path(..., description="Task ID")):
|
||||
"""Get specific queued task status."""
|
||||
manager = get_publish_manager()
|
||||
|
||||
qt = manager.get_task(task_id)
|
||||
if not qt:
|
||||
raise HTTPException(status_code=404, detail=f"Task {task_id} not found")
|
||||
|
||||
return QueuedTaskSchema(
|
||||
id=qt.task.id,
|
||||
platform=qt.task.platform.value,
|
||||
status=qt.task.status.value,
|
||||
progress=qt.progress,
|
||||
progress_message=qt.progress_message,
|
||||
retries=qt.retries,
|
||||
created_at=qt.created_at.isoformat(),
|
||||
started_at=qt.started_at.isoformat() if qt.started_at else None,
|
||||
completed_at=qt.completed_at.isoformat() if qt.completed_at else None,
|
||||
)
|
||||
@@ -239,8 +239,13 @@ async def generate_video_async(
|
||||
"prompt_prefix": request_body.prompt_prefix,
|
||||
"bgm_path": request_body.bgm_path,
|
||||
"bgm_volume": request_body.bgm_volume,
|
||||
# Progress callback can be added here if needed
|
||||
# "progress_callback": lambda event: task_manager.update_progress(...)
|
||||
# Progress callback support
|
||||
"progress_callback": lambda event: task_manager.update_progress(
|
||||
task_id=task.task_id,
|
||||
current=int(event.progress * 100),
|
||||
total=100,
|
||||
message=f"{event.event_type}" + (f" - {event.action}" if event.action else "")
|
||||
)
|
||||
}
|
||||
|
||||
# Add TTS workflow if specified
|
||||
@@ -268,10 +273,31 @@ async def generate_video_async(
|
||||
# Convert path to URL
|
||||
video_url = path_to_url(request, result.video_path)
|
||||
|
||||
# Convert storyboard to dict for serialization
|
||||
storyboard_data = {
|
||||
"title": result.storyboard.title,
|
||||
"total_duration": result.storyboard.total_duration,
|
||||
"final_video_path": result.storyboard.final_video_path,
|
||||
"created_at": result.storyboard.created_at.isoformat() if result.storyboard.created_at else None,
|
||||
"frames": [
|
||||
{
|
||||
"index": f.index,
|
||||
"narration": f.narration,
|
||||
"image_prompt": f.image_prompt,
|
||||
"audio_path": f.audio_path,
|
||||
"image_path": f.image_path,
|
||||
"video_segment_path": f.video_segment_path,
|
||||
"duration": f.duration,
|
||||
}
|
||||
for f in result.storyboard.frames
|
||||
]
|
||||
}
|
||||
|
||||
return {
|
||||
"video_url": video_url,
|
||||
"duration": result.duration,
|
||||
"file_size": file_size
|
||||
"file_size": file_size,
|
||||
"storyboard": storyboard_data
|
||||
}
|
||||
|
||||
# Start execution
|
||||
|
||||
110
api/schemas/editor.py
Normal file
110
api/schemas/editor.py
Normal file
@@ -0,0 +1,110 @@
|
||||
# Copyright (C) 2025 AIDC-AI
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Editor API schemas for timeline editor
|
||||
"""
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from typing import List, Optional
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
class StoryboardFrameSchema(BaseModel):
|
||||
"""Schema for a single storyboard frame"""
|
||||
id: str
|
||||
index: int
|
||||
order: int
|
||||
narration: str
|
||||
image_prompt: Optional[str] = None
|
||||
image_path: Optional[str] = None
|
||||
audio_path: Optional[str] = None
|
||||
video_segment_path: Optional[str] = None
|
||||
duration: float = 0.0
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
|
||||
|
||||
class StoryboardSchema(BaseModel):
|
||||
"""Schema for complete storyboard"""
|
||||
id: str
|
||||
title: str
|
||||
frames: List[StoryboardFrameSchema]
|
||||
total_duration: float
|
||||
final_video_path: Optional[str] = None
|
||||
created_at: Optional[datetime] = None
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
|
||||
|
||||
class ReorderFramesRequest(BaseModel):
|
||||
"""Request to reorder frames"""
|
||||
order: List[str] = Field(..., description="List of frame IDs in new order")
|
||||
|
||||
|
||||
class UpdateDurationRequest(BaseModel):
|
||||
"""Request to update frame duration"""
|
||||
duration: float = Field(..., ge=0.1, le=60.0, description="New duration in seconds")
|
||||
|
||||
|
||||
class PreviewRequest(BaseModel):
|
||||
"""Request to generate preview"""
|
||||
start_frame: int = Field(0, ge=0, description="Start frame index")
|
||||
end_frame: Optional[int] = Field(None, description="End frame index (None = to end)")
|
||||
|
||||
|
||||
class PreviewResponse(BaseModel):
|
||||
"""Response with preview video path"""
|
||||
preview_path: str
|
||||
duration: float
|
||||
frames_count: int
|
||||
|
||||
|
||||
class UpdateFrameRequest(BaseModel):
|
||||
"""Request to update frame content"""
|
||||
narration: Optional[str] = Field(None, description="Updated narration text")
|
||||
image_prompt: Optional[str] = Field(None, description="Updated image generation prompt")
|
||||
|
||||
|
||||
class UpdateFrameResponse(BaseModel):
|
||||
"""Response after updating frame"""
|
||||
id: str
|
||||
narration: str
|
||||
image_prompt: Optional[str]
|
||||
updated: bool = True
|
||||
|
||||
|
||||
class RegenerateImageRequest(BaseModel):
|
||||
"""Request to regenerate frame image"""
|
||||
image_prompt: Optional[str] = Field(None, description="Override prompt for regeneration")
|
||||
|
||||
|
||||
class RegenerateImageResponse(BaseModel):
|
||||
"""Response after regenerating image"""
|
||||
image_path: str
|
||||
success: bool = True
|
||||
|
||||
|
||||
class RegenerateAudioRequest(BaseModel):
|
||||
"""Request to regenerate frame audio"""
|
||||
narration: Optional[str] = Field(None, description="Override narration for regeneration")
|
||||
voice: Optional[str] = Field(None, description="Voice to use for TTS")
|
||||
|
||||
|
||||
class RegenerateAudioResponse(BaseModel):
|
||||
"""Response after regenerating audio"""
|
||||
audio_path: str
|
||||
duration: float
|
||||
success: bool = True
|
||||
|
||||
81
api/schemas/publish.py
Normal file
81
api/schemas/publish.py
Normal file
@@ -0,0 +1,81 @@
|
||||
# Copyright (C) 2025 AIDC-AI
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Publish API schemas
|
||||
"""
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from typing import List, Optional, Dict, Any
|
||||
from enum import Enum
|
||||
|
||||
|
||||
class PlatformEnum(str, Enum):
|
||||
export = "export"
|
||||
bilibili = "bilibili"
|
||||
youtube = "youtube"
|
||||
|
||||
|
||||
class PublishStatusEnum(str, Enum):
|
||||
pending = "pending"
|
||||
converting = "converting"
|
||||
uploading = "uploading"
|
||||
processing = "processing"
|
||||
published = "published"
|
||||
failed = "failed"
|
||||
|
||||
|
||||
class VideoMetadataSchema(BaseModel):
|
||||
"""Video metadata for publishing"""
|
||||
title: str = Field(..., min_length=1, max_length=100)
|
||||
description: str = Field("", max_length=5000)
|
||||
tags: List[str] = Field(default_factory=list)
|
||||
category: Optional[str] = None
|
||||
cover_path: Optional[str] = None
|
||||
privacy: str = Field("public", pattern="^(public|private|unlisted)$")
|
||||
platform_options: Dict[str, Any] = Field(default_factory=dict)
|
||||
|
||||
|
||||
class PublishRequest(BaseModel):
|
||||
"""Request to publish a video"""
|
||||
video_path: str = Field(..., description="Path to the video file")
|
||||
metadata: VideoMetadataSchema
|
||||
|
||||
|
||||
class PublishResultSchema(BaseModel):
|
||||
"""Result of a publishing operation"""
|
||||
success: bool
|
||||
platform: str
|
||||
status: PublishStatusEnum
|
||||
video_url: Optional[str] = None
|
||||
platform_video_id: Optional[str] = None
|
||||
error_message: Optional[str] = None
|
||||
export_path: Optional[str] = None
|
||||
|
||||
|
||||
class PublishTaskSchema(BaseModel):
|
||||
"""A publishing task"""
|
||||
id: str
|
||||
platform: str
|
||||
status: PublishStatusEnum
|
||||
result: Optional[PublishResultSchema] = None
|
||||
created_at: str
|
||||
updated_at: Optional[str] = None
|
||||
|
||||
|
||||
class PlatformRequirementsSchema(BaseModel):
|
||||
"""Platform requirements"""
|
||||
max_file_size_mb: int
|
||||
max_duration_seconds: Optional[int] = None
|
||||
supported_formats: List[str] = []
|
||||
recommended_resolution: tuple = (1080, 1920)
|
||||
recommended_codec: str = "h264"
|
||||
@@ -186,7 +186,7 @@ class TaskManager:
|
||||
message: str = ""
|
||||
):
|
||||
"""
|
||||
Update task progress
|
||||
Update task progress and add to logs
|
||||
|
||||
Args:
|
||||
task_id: Task ID
|
||||
@@ -205,6 +205,17 @@ class TaskManager:
|
||||
percentage=percentage,
|
||||
message=message
|
||||
)
|
||||
|
||||
# Add to logs if message is new
|
||||
if message:
|
||||
# Check last log to avoid duplicates
|
||||
if not task.logs or task.logs[-1].get("message") != message:
|
||||
task.logs.append({
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"message": message,
|
||||
"percentage": round(percentage, 1)
|
||||
})
|
||||
logger.debug(f"Task {task_id} log: {message} ({percentage:.1f}%)")
|
||||
|
||||
def cancel_task(self, task_id: str) -> bool:
|
||||
"""
|
||||
|
||||
@@ -16,7 +16,7 @@ Task data models
|
||||
|
||||
from datetime import datetime
|
||||
from enum import Enum
|
||||
from typing import Any, Optional
|
||||
from typing import Any, Optional, List
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
@@ -51,10 +51,13 @@ class Task(BaseModel):
|
||||
# Progress tracking
|
||||
progress: Optional[TaskProgress] = None
|
||||
|
||||
# Result
|
||||
# Results and Errors
|
||||
result: Optional[Any] = None
|
||||
error: Optional[str] = None
|
||||
|
||||
# Event logs/History
|
||||
logs: List[dict] = Field(default_factory=list)
|
||||
|
||||
# Metadata
|
||||
created_at: datetime = Field(default_factory=datetime.now)
|
||||
started_at: Optional[datetime] = None
|
||||
|
||||
Reference in New Issue
Block a user