feat: Add comprehensive timeline editor with frame editing and regeneration capabilities
This commit is contained in:
134
pixelle_video/services/publishing/__init__.py
Normal file
134
pixelle_video/services/publishing/__init__.py
Normal file
@@ -0,0 +1,134 @@
|
||||
# Copyright (C) 2025 AIDC-AI
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Publishing service for multi-platform video distribution.
|
||||
|
||||
Supports:
|
||||
- Format conversion + export (Douyin/Kuaishou)
|
||||
- API-based upload (Bilibili/YouTube)
|
||||
"""
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from dataclasses import dataclass, field
|
||||
from enum import Enum
|
||||
from typing import Optional, List, Dict, Any
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
class Platform(Enum):
|
||||
"""Supported publishing platforms"""
|
||||
EXPORT = "export" # Format conversion only
|
||||
DOUYIN = "douyin" # 抖音 (via export or CDP)
|
||||
KUAISHOU = "kuaishou" # 快手 (via export or CDP)
|
||||
BILIBILI = "bilibili" # B站 (API)
|
||||
YOUTUBE = "youtube" # YouTube (API)
|
||||
|
||||
|
||||
class PublishStatus(Enum):
|
||||
"""Publishing task status"""
|
||||
PENDING = "pending"
|
||||
CONVERTING = "converting"
|
||||
UPLOADING = "uploading"
|
||||
PROCESSING = "processing"
|
||||
PUBLISHED = "published"
|
||||
FAILED = "failed"
|
||||
|
||||
|
||||
@dataclass
|
||||
class VideoMetadata:
|
||||
"""Video metadata for publishing"""
|
||||
title: str
|
||||
description: str = ""
|
||||
tags: List[str] = field(default_factory=list)
|
||||
category: Optional[str] = None
|
||||
cover_path: Optional[str] = None
|
||||
privacy: str = "public" # public, private, unlisted
|
||||
|
||||
# Platform-specific options
|
||||
platform_options: Dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
|
||||
@dataclass
|
||||
class PublishResult:
|
||||
"""Result of a publishing operation"""
|
||||
success: bool
|
||||
platform: Platform
|
||||
status: PublishStatus
|
||||
|
||||
# On success
|
||||
video_url: Optional[str] = None
|
||||
platform_video_id: Optional[str] = None
|
||||
|
||||
# On failure
|
||||
error_message: Optional[str] = None
|
||||
|
||||
# Export result
|
||||
export_path: Optional[str] = None
|
||||
|
||||
# Timestamps
|
||||
started_at: Optional[datetime] = None
|
||||
completed_at: Optional[datetime] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class PublishTask:
|
||||
"""A publishing task for background processing"""
|
||||
id: str
|
||||
video_path: str
|
||||
platform: Platform
|
||||
metadata: VideoMetadata
|
||||
status: PublishStatus = PublishStatus.PENDING
|
||||
result: Optional[PublishResult] = None
|
||||
created_at: datetime = field(default_factory=datetime.now)
|
||||
updated_at: Optional[datetime] = None
|
||||
|
||||
|
||||
class Publisher(ABC):
|
||||
"""Abstract base class for platform publishers"""
|
||||
|
||||
platform: Platform
|
||||
|
||||
@abstractmethod
|
||||
async def publish(
|
||||
self,
|
||||
video_path: str,
|
||||
metadata: VideoMetadata,
|
||||
progress_callback: Optional[callable] = None
|
||||
) -> PublishResult:
|
||||
"""
|
||||
Publish a video to the platform.
|
||||
|
||||
Args:
|
||||
video_path: Path to the video file
|
||||
metadata: Video metadata (title, description, tags, etc.)
|
||||
progress_callback: Optional callback for progress updates
|
||||
|
||||
Returns:
|
||||
PublishResult with success/failure details
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def validate_credentials(self) -> bool:
|
||||
"""Check if platform credentials are valid"""
|
||||
pass
|
||||
|
||||
def get_platform_requirements(self) -> Dict[str, Any]:
|
||||
"""Get platform-specific requirements (dimensions, file size, etc.)"""
|
||||
return {
|
||||
"max_file_size_mb": 128,
|
||||
"max_duration_seconds": 900, # 15 minutes
|
||||
"supported_formats": ["mp4", "webm"],
|
||||
"recommended_resolution": (1080, 1920), # Portrait 9:16
|
||||
"recommended_codec": "h264",
|
||||
}
|
||||
426
pixelle_video/services/publishing/bilibili_publisher.py
Normal file
426
pixelle_video/services/publishing/bilibili_publisher.py
Normal file
@@ -0,0 +1,426 @@
|
||||
# Copyright (C) 2025 AIDC-AI
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Bilibili Publisher - Upload videos to Bilibili using their Open Platform API.
|
||||
|
||||
Flow:
|
||||
1. Get preupload info (upos_uri, auth, chunk_size)
|
||||
2. Upload video chunks (8MB each)
|
||||
3. Merge chunks
|
||||
4. Submit video with metadata
|
||||
"""
|
||||
|
||||
import os
|
||||
import math
|
||||
import aiohttp
|
||||
import asyncio
|
||||
from pathlib import Path
|
||||
from datetime import datetime
|
||||
from typing import Optional, Dict, Any
|
||||
from loguru import logger
|
||||
|
||||
from pixelle_video.services.publishing import (
|
||||
Publisher,
|
||||
Platform,
|
||||
PublishStatus,
|
||||
VideoMetadata,
|
||||
PublishResult,
|
||||
)
|
||||
|
||||
|
||||
# Bilibili API endpoints
|
||||
BILIBILI_PREUPLOAD_URL = "https://member.bilibili.com/preupload"
|
||||
BILIBILI_SUBMIT_URL = "https://member.bilibili.com/x/vu/web/add"
|
||||
|
||||
# Chunk size: 8MB (recommended by Bilibili)
|
||||
CHUNK_SIZE = 8 * 1024 * 1024
|
||||
|
||||
|
||||
class BilibiliPublisher(Publisher):
|
||||
"""
|
||||
Publisher for Bilibili video platform.
|
||||
|
||||
Requires:
|
||||
- access_token: OAuth access token
|
||||
- refresh_token: For token refresh (optional)
|
||||
"""
|
||||
|
||||
platform = Platform.BILIBILI
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
access_token: Optional[str] = None,
|
||||
refresh_token: Optional[str] = None,
|
||||
sessdata: Optional[str] = None, # Alternative: use cookies
|
||||
bili_jct: Optional[str] = None,
|
||||
):
|
||||
self.access_token = access_token or os.getenv("BILIBILI_ACCESS_TOKEN")
|
||||
self.refresh_token = refresh_token or os.getenv("BILIBILI_REFRESH_TOKEN")
|
||||
self.sessdata = sessdata or os.getenv("BILIBILI_SESSDATA")
|
||||
self.bili_jct = bili_jct or os.getenv("BILIBILI_BILI_JCT")
|
||||
|
||||
# Upload state
|
||||
self._upload_id = None
|
||||
self._upos_uri = None
|
||||
self._auth = None
|
||||
self._endpoint = None
|
||||
|
||||
async def publish(
|
||||
self,
|
||||
video_path: str,
|
||||
metadata: VideoMetadata,
|
||||
progress_callback: Optional[callable] = None
|
||||
) -> PublishResult:
|
||||
"""Upload and publish video to Bilibili."""
|
||||
started_at = datetime.now()
|
||||
|
||||
try:
|
||||
if not await self.validate_credentials():
|
||||
return PublishResult(
|
||||
success=False,
|
||||
platform=Platform.BILIBILI,
|
||||
status=PublishStatus.FAILED,
|
||||
error_message="B站凭证未配置或已过期",
|
||||
started_at=started_at,
|
||||
completed_at=datetime.now(),
|
||||
)
|
||||
|
||||
video_file = Path(video_path)
|
||||
if not video_file.exists():
|
||||
return PublishResult(
|
||||
success=False,
|
||||
platform=Platform.BILIBILI,
|
||||
status=PublishStatus.FAILED,
|
||||
error_message=f"视频文件不存在: {video_path}",
|
||||
started_at=started_at,
|
||||
completed_at=datetime.now(),
|
||||
)
|
||||
|
||||
file_size = video_file.stat().st_size
|
||||
|
||||
if progress_callback:
|
||||
progress_callback(0.05, "获取上传信息...")
|
||||
|
||||
# Step 1: Get preupload info
|
||||
preupload_info = await self._preupload(video_file.name, file_size)
|
||||
if not preupload_info:
|
||||
return PublishResult(
|
||||
success=False,
|
||||
platform=Platform.BILIBILI,
|
||||
status=PublishStatus.FAILED,
|
||||
error_message="获取上传信息失败",
|
||||
started_at=started_at,
|
||||
completed_at=datetime.now(),
|
||||
)
|
||||
|
||||
if progress_callback:
|
||||
progress_callback(0.1, "上传视频分片...")
|
||||
|
||||
# Step 2: Upload chunks
|
||||
chunk_count = math.ceil(file_size / CHUNK_SIZE)
|
||||
uploaded_chunks = 0
|
||||
|
||||
async with aiohttp.ClientSession() as session:
|
||||
with open(video_path, "rb") as f:
|
||||
for chunk_index in range(chunk_count):
|
||||
chunk_data = f.read(CHUNK_SIZE)
|
||||
chunk_start = chunk_index * CHUNK_SIZE
|
||||
chunk_end = min(chunk_start + len(chunk_data), file_size)
|
||||
|
||||
success = await self._upload_chunk(
|
||||
session,
|
||||
chunk_data,
|
||||
chunk_index,
|
||||
chunk_count,
|
||||
chunk_start,
|
||||
chunk_end,
|
||||
file_size,
|
||||
)
|
||||
|
||||
if not success:
|
||||
return PublishResult(
|
||||
success=False,
|
||||
platform=Platform.BILIBILI,
|
||||
status=PublishStatus.FAILED,
|
||||
error_message=f"分片 {chunk_index + 1}/{chunk_count} 上传失败",
|
||||
started_at=started_at,
|
||||
completed_at=datetime.now(),
|
||||
)
|
||||
|
||||
uploaded_chunks += 1
|
||||
progress = 0.1 + (0.7 * uploaded_chunks / chunk_count)
|
||||
if progress_callback:
|
||||
progress_callback(progress, f"上传分片 {uploaded_chunks}/{chunk_count}")
|
||||
|
||||
if progress_callback:
|
||||
progress_callback(0.85, "合并视频...")
|
||||
|
||||
# Step 3: Merge chunks
|
||||
video_filename = await self._merge_chunks(chunk_count, file_size)
|
||||
if not video_filename:
|
||||
return PublishResult(
|
||||
success=False,
|
||||
platform=Platform.BILIBILI,
|
||||
status=PublishStatus.FAILED,
|
||||
error_message="视频合并失败",
|
||||
started_at=started_at,
|
||||
completed_at=datetime.now(),
|
||||
)
|
||||
|
||||
if progress_callback:
|
||||
progress_callback(0.9, "提交稿件...")
|
||||
|
||||
# Step 4: Submit video
|
||||
bvid = await self._submit_video(video_filename, metadata)
|
||||
if not bvid:
|
||||
return PublishResult(
|
||||
success=False,
|
||||
platform=Platform.BILIBILI,
|
||||
status=PublishStatus.FAILED,
|
||||
error_message="稿件提交失败",
|
||||
started_at=started_at,
|
||||
completed_at=datetime.now(),
|
||||
)
|
||||
|
||||
if progress_callback:
|
||||
progress_callback(1.0, "发布成功")
|
||||
|
||||
return PublishResult(
|
||||
success=True,
|
||||
platform=Platform.BILIBILI,
|
||||
status=PublishStatus.PUBLISHED,
|
||||
video_url=f"https://www.bilibili.com/video/{bvid}",
|
||||
platform_video_id=bvid,
|
||||
started_at=started_at,
|
||||
completed_at=datetime.now(),
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Bilibili publish failed: {e}")
|
||||
return PublishResult(
|
||||
success=False,
|
||||
platform=Platform.BILIBILI,
|
||||
status=PublishStatus.FAILED,
|
||||
error_message=str(e),
|
||||
started_at=started_at,
|
||||
completed_at=datetime.now(),
|
||||
)
|
||||
|
||||
async def _preupload(self, filename: str, file_size: int) -> Optional[Dict]:
|
||||
"""Get preupload info from Bilibili."""
|
||||
params = {
|
||||
"name": filename,
|
||||
"size": file_size,
|
||||
"r": "upos",
|
||||
"profile": "ugcupos/bup",
|
||||
}
|
||||
|
||||
headers = self._get_headers()
|
||||
|
||||
try:
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.get(
|
||||
BILIBILI_PREUPLOAD_URL,
|
||||
params=params,
|
||||
headers=headers
|
||||
) as resp:
|
||||
if resp.status != 200:
|
||||
logger.error(f"Preupload failed: {resp.status}")
|
||||
return None
|
||||
|
||||
data = await resp.json()
|
||||
|
||||
self._upos_uri = data.get("upos_uri")
|
||||
self._auth = data.get("auth")
|
||||
self._endpoint = data.get("endpoint")
|
||||
self._upload_id = data.get("upload_id")
|
||||
|
||||
logger.info(f"Preupload success: {self._upos_uri}")
|
||||
return data
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Preupload error: {e}")
|
||||
return None
|
||||
|
||||
async def _upload_chunk(
|
||||
self,
|
||||
session: aiohttp.ClientSession,
|
||||
chunk_data: bytes,
|
||||
chunk_index: int,
|
||||
chunk_count: int,
|
||||
chunk_start: int,
|
||||
chunk_end: int,
|
||||
total_size: int,
|
||||
) -> bool:
|
||||
"""Upload a single chunk."""
|
||||
if not self._upos_uri or not self._auth:
|
||||
return False
|
||||
|
||||
# Build upload URL
|
||||
upload_url = f"https:{self._endpoint}{self._upos_uri}"
|
||||
|
||||
params = {
|
||||
"uploadId": self._upload_id,
|
||||
"partNumber": chunk_index + 1,
|
||||
"chunk": chunk_index,
|
||||
"chunks": chunk_count,
|
||||
"size": len(chunk_data),
|
||||
"start": chunk_start,
|
||||
"end": chunk_end,
|
||||
"total": total_size,
|
||||
}
|
||||
|
||||
headers = {
|
||||
"X-Upos-Auth": self._auth,
|
||||
"Content-Type": "application/octet-stream",
|
||||
}
|
||||
|
||||
try:
|
||||
async with session.put(
|
||||
upload_url,
|
||||
params=params,
|
||||
headers=headers,
|
||||
data=chunk_data
|
||||
) as resp:
|
||||
if resp.status not in [200, 201, 204]:
|
||||
logger.error(f"Chunk upload failed: {resp.status}")
|
||||
return False
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Chunk upload error: {e}")
|
||||
return False
|
||||
|
||||
async def _merge_chunks(self, chunk_count: int, file_size: int) -> Optional[str]:
|
||||
"""Merge uploaded chunks."""
|
||||
if not self._upos_uri:
|
||||
return None
|
||||
|
||||
merge_url = f"https:{self._endpoint}{self._upos_uri}"
|
||||
|
||||
params = {
|
||||
"output": "json",
|
||||
"name": self._upos_uri.split("/")[-1],
|
||||
"profile": "ugcupos/bup",
|
||||
"uploadId": self._upload_id,
|
||||
"biz_id": "",
|
||||
}
|
||||
|
||||
# Build parts list
|
||||
parts = [{"partNumber": i + 1, "eTag": "etag"} for i in range(chunk_count)]
|
||||
|
||||
headers = {
|
||||
"X-Upos-Auth": self._auth,
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
|
||||
try:
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.post(
|
||||
merge_url,
|
||||
params=params,
|
||||
headers=headers,
|
||||
json={"parts": parts}
|
||||
) as resp:
|
||||
if resp.status != 200:
|
||||
logger.error(f"Merge failed: {resp.status}")
|
||||
return None
|
||||
|
||||
data = await resp.json()
|
||||
return self._upos_uri.split("/")[-1].split(".")[0]
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Merge error: {e}")
|
||||
return None
|
||||
|
||||
async def _submit_video(
|
||||
self,
|
||||
video_filename: str,
|
||||
metadata: VideoMetadata
|
||||
) -> Optional[str]:
|
||||
"""Submit video with metadata."""
|
||||
|
||||
# Default to "生活" category (tid=160)
|
||||
tid = metadata.platform_options.get("tid", 160)
|
||||
|
||||
data = {
|
||||
"copyright": 1, # 1=原创
|
||||
"videos": [{
|
||||
"filename": video_filename,
|
||||
"title": metadata.title,
|
||||
"desc": metadata.description,
|
||||
}],
|
||||
"title": metadata.title,
|
||||
"desc": metadata.description,
|
||||
"tid": tid,
|
||||
"tag": ",".join(metadata.tags) if metadata.tags else "",
|
||||
"source": "",
|
||||
"cover": metadata.cover_path or "",
|
||||
"no_reprint": 1,
|
||||
"open_elec": 0,
|
||||
}
|
||||
|
||||
headers = self._get_headers()
|
||||
headers["Content-Type"] = "application/json"
|
||||
|
||||
try:
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.post(
|
||||
BILIBILI_SUBMIT_URL,
|
||||
headers=headers,
|
||||
json=data
|
||||
) as resp:
|
||||
result = await resp.json()
|
||||
|
||||
if result.get("code") == 0:
|
||||
bvid = result.get("data", {}).get("bvid")
|
||||
logger.info(f"Video submitted: {bvid}")
|
||||
return bvid
|
||||
else:
|
||||
logger.error(f"Submit failed: {result}")
|
||||
return None
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Submit error: {e}")
|
||||
return None
|
||||
|
||||
def _get_headers(self) -> Dict[str, str]:
|
||||
"""Get common headers with authentication."""
|
||||
headers = {
|
||||
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36",
|
||||
"Referer": "https://www.bilibili.com/",
|
||||
}
|
||||
|
||||
if self.access_token:
|
||||
headers["Authorization"] = f"Bearer {self.access_token}"
|
||||
|
||||
if self.sessdata:
|
||||
headers["Cookie"] = f"SESSDATA={self.sessdata}"
|
||||
if self.bili_jct:
|
||||
headers["Cookie"] += f"; bili_jct={self.bili_jct}"
|
||||
|
||||
return headers
|
||||
|
||||
async def validate_credentials(self) -> bool:
|
||||
"""Check if Bilibili credentials are configured."""
|
||||
return bool(self.access_token or self.sessdata)
|
||||
|
||||
def get_platform_requirements(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"max_file_size_mb": 4096, # 4GB
|
||||
"max_duration_seconds": 14400, # 4 hours
|
||||
"supported_formats": ["mp4", "flv", "webm", "mov"],
|
||||
"recommended_resolution": (1920, 1080),
|
||||
"recommended_codec": "h264",
|
||||
"chunk_size_mb": 8,
|
||||
}
|
||||
182
pixelle_video/services/publishing/export_publisher.py
Normal file
182
pixelle_video/services/publishing/export_publisher.py
Normal file
@@ -0,0 +1,182 @@
|
||||
# Copyright (C) 2025 AIDC-AI
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Export Publisher - Format conversion and local export for platforms
|
||||
without API access (Douyin, Kuaishou).
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import subprocess
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
from datetime import datetime
|
||||
from typing import Optional
|
||||
from loguru import logger
|
||||
|
||||
from pixelle_video.services.publishing import (
|
||||
Publisher,
|
||||
Platform,
|
||||
PublishStatus,
|
||||
VideoMetadata,
|
||||
PublishResult,
|
||||
)
|
||||
|
||||
|
||||
class ExportPublisher(Publisher):
|
||||
"""
|
||||
Publisher that converts video to platform-optimized format
|
||||
and exports to local filesystem for manual upload.
|
||||
"""
|
||||
|
||||
platform = Platform.EXPORT
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
output_dir: str = "./output/exports",
|
||||
target_resolution: tuple = (1080, 1920), # Portrait 9:16
|
||||
target_codec: str = "h264",
|
||||
max_file_size_mb: int = 128,
|
||||
):
|
||||
self.output_dir = Path(output_dir)
|
||||
self.target_resolution = target_resolution
|
||||
self.target_codec = target_codec
|
||||
self.max_file_size_mb = max_file_size_mb
|
||||
|
||||
# Ensure output directory exists
|
||||
self.output_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
async def publish(
|
||||
self,
|
||||
video_path: str,
|
||||
metadata: VideoMetadata,
|
||||
progress_callback: Optional[callable] = None
|
||||
) -> PublishResult:
|
||||
"""
|
||||
Convert video to optimized format and export.
|
||||
"""
|
||||
started_at = datetime.now()
|
||||
|
||||
try:
|
||||
if progress_callback:
|
||||
progress_callback(0.1, "分析视频...")
|
||||
|
||||
# Generate output filename
|
||||
safe_title = "".join(c if c.isalnum() or c in " -_" else "" for c in metadata.title)
|
||||
safe_title = safe_title[:50].strip() or "video"
|
||||
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
output_filename = f"{safe_title}_{timestamp}.mp4"
|
||||
output_path = self.output_dir / output_filename
|
||||
|
||||
if progress_callback:
|
||||
progress_callback(0.2, "转换格式...")
|
||||
|
||||
# Convert video
|
||||
success = await self._convert_video(
|
||||
video_path,
|
||||
str(output_path),
|
||||
progress_callback
|
||||
)
|
||||
|
||||
if not success:
|
||||
return PublishResult(
|
||||
success=False,
|
||||
platform=Platform.EXPORT,
|
||||
status=PublishStatus.FAILED,
|
||||
error_message="视频转换失败",
|
||||
started_at=started_at,
|
||||
completed_at=datetime.now(),
|
||||
)
|
||||
|
||||
if progress_callback:
|
||||
progress_callback(1.0, "导出完成")
|
||||
|
||||
# Verify file size
|
||||
file_size_mb = output_path.stat().st_size / (1024 * 1024)
|
||||
|
||||
return PublishResult(
|
||||
success=True,
|
||||
platform=Platform.EXPORT,
|
||||
status=PublishStatus.PUBLISHED,
|
||||
export_path=str(output_path),
|
||||
started_at=started_at,
|
||||
completed_at=datetime.now(),
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Export failed: {e}")
|
||||
return PublishResult(
|
||||
success=False,
|
||||
platform=Platform.EXPORT,
|
||||
status=PublishStatus.FAILED,
|
||||
error_message=str(e),
|
||||
started_at=started_at,
|
||||
completed_at=datetime.now(),
|
||||
)
|
||||
|
||||
async def _convert_video(
|
||||
self,
|
||||
input_path: str,
|
||||
output_path: str,
|
||||
progress_callback: Optional[callable] = None
|
||||
) -> bool:
|
||||
"""Convert video using FFmpeg."""
|
||||
|
||||
width, height = self.target_resolution
|
||||
|
||||
# FFmpeg command for H.264 conversion with size optimization
|
||||
cmd = [
|
||||
"ffmpeg", "-y",
|
||||
"-i", input_path,
|
||||
"-c:v", "libx264",
|
||||
"-preset", "medium",
|
||||
"-crf", "23",
|
||||
"-c:a", "aac",
|
||||
"-b:a", "128k",
|
||||
"-vf", f"scale={width}:{height}:force_original_aspect_ratio=decrease,pad={width}:{height}:(ow-iw)/2:(oh-ih)/2",
|
||||
"-movflags", "+faststart",
|
||||
output_path
|
||||
]
|
||||
|
||||
try:
|
||||
process = await asyncio.create_subprocess_exec(
|
||||
*cmd,
|
||||
stdout=asyncio.subprocess.PIPE,
|
||||
stderr=asyncio.subprocess.PIPE
|
||||
)
|
||||
|
||||
stdout, stderr = await process.communicate()
|
||||
|
||||
if process.returncode != 0:
|
||||
logger.error(f"FFmpeg error: {stderr.decode()}")
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
except FileNotFoundError:
|
||||
logger.error("FFmpeg not found. Please install FFmpeg.")
|
||||
# Fallback: just copy the file if FFmpeg is not available
|
||||
shutil.copy(input_path, output_path)
|
||||
return True
|
||||
|
||||
async def validate_credentials(self) -> bool:
|
||||
"""No credentials needed for export."""
|
||||
return True
|
||||
|
||||
def get_platform_requirements(self):
|
||||
return {
|
||||
"max_file_size_mb": self.max_file_size_mb,
|
||||
"recommended_resolution": self.target_resolution,
|
||||
"recommended_codec": self.target_codec,
|
||||
"output_format": "mp4",
|
||||
"platforms": ["douyin", "kuaishou"],
|
||||
}
|
||||
299
pixelle_video/services/publishing/task_manager.py
Normal file
299
pixelle_video/services/publishing/task_manager.py
Normal file
@@ -0,0 +1,299 @@
|
||||
# Copyright (C) 2025 AIDC-AI
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Publish Task Manager - Background task queue for video publishing.
|
||||
|
||||
Features:
|
||||
- Async task queue with configurable workers
|
||||
- Task persistence (in-memory, Redis optional)
|
||||
- Progress tracking and callbacks
|
||||
- Retry logic for failed tasks
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import uuid
|
||||
from datetime import datetime
|
||||
from typing import Optional, Dict, List, Callable
|
||||
from dataclasses import dataclass, field
|
||||
from enum import Enum
|
||||
from loguru import logger
|
||||
|
||||
from pixelle_video.services.publishing import (
|
||||
Publisher,
|
||||
Platform,
|
||||
PublishStatus,
|
||||
VideoMetadata,
|
||||
PublishResult,
|
||||
PublishTask,
|
||||
)
|
||||
from pixelle_video.services.publishing.export_publisher import ExportPublisher
|
||||
from pixelle_video.services.publishing.bilibili_publisher import BilibiliPublisher
|
||||
from pixelle_video.services.publishing.youtube_publisher import YouTubePublisher
|
||||
|
||||
|
||||
class TaskPriority(Enum):
|
||||
LOW = 0
|
||||
NORMAL = 1
|
||||
HIGH = 2
|
||||
|
||||
|
||||
@dataclass
|
||||
class QueuedTask:
|
||||
"""Extended task with queue metadata"""
|
||||
task: PublishTask
|
||||
priority: TaskPriority = TaskPriority.NORMAL
|
||||
retries: int = 0
|
||||
max_retries: int = 3
|
||||
retry_delay: float = 5.0
|
||||
created_at: datetime = field(default_factory=datetime.now)
|
||||
started_at: Optional[datetime] = None
|
||||
completed_at: Optional[datetime] = None
|
||||
progress: float = 0.0
|
||||
progress_message: str = ""
|
||||
|
||||
|
||||
class PublishTaskManager:
|
||||
"""
|
||||
Manages background publishing tasks with async queue.
|
||||
|
||||
Usage:
|
||||
manager = PublishTaskManager()
|
||||
await manager.start()
|
||||
|
||||
task_id = await manager.enqueue(
|
||||
video_path="/path/to/video.mp4",
|
||||
platform=Platform.BILIBILI,
|
||||
metadata=VideoMetadata(title="My Video")
|
||||
)
|
||||
|
||||
status = manager.get_task(task_id)
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
max_workers: int = 3,
|
||||
max_queue_size: int = 100,
|
||||
):
|
||||
self.max_workers = max_workers
|
||||
self.max_queue_size = max_queue_size
|
||||
|
||||
# Task storage
|
||||
self._tasks: Dict[str, QueuedTask] = {}
|
||||
self._queue: asyncio.Queue = None
|
||||
self._workers: List[asyncio.Task] = []
|
||||
self._running = False
|
||||
|
||||
# Publishers
|
||||
self._publishers: Dict[Platform, Publisher] = {
|
||||
Platform.EXPORT: ExportPublisher(),
|
||||
Platform.BILIBILI: BilibiliPublisher(),
|
||||
Platform.YOUTUBE: YouTubePublisher(),
|
||||
}
|
||||
|
||||
# Callbacks
|
||||
self._on_complete: Optional[Callable] = None
|
||||
self._on_progress: Optional[Callable] = None
|
||||
|
||||
async def start(self):
|
||||
"""Start the task manager and workers."""
|
||||
if self._running:
|
||||
return
|
||||
|
||||
self._queue = asyncio.Queue(maxsize=self.max_queue_size)
|
||||
self._running = True
|
||||
|
||||
# Start worker tasks
|
||||
for i in range(self.max_workers):
|
||||
worker = asyncio.create_task(self._worker(i))
|
||||
self._workers.append(worker)
|
||||
|
||||
logger.info(f"✅ Publish task manager started with {self.max_workers} workers")
|
||||
|
||||
async def stop(self):
|
||||
"""Stop all workers and clear queue."""
|
||||
self._running = False
|
||||
|
||||
# Cancel all workers
|
||||
for worker in self._workers:
|
||||
worker.cancel()
|
||||
|
||||
await asyncio.gather(*self._workers, return_exceptions=True)
|
||||
self._workers.clear()
|
||||
|
||||
logger.info("✅ Publish task manager stopped")
|
||||
|
||||
async def enqueue(
|
||||
self,
|
||||
video_path: str,
|
||||
platform: Platform,
|
||||
metadata: VideoMetadata,
|
||||
priority: TaskPriority = TaskPriority.NORMAL,
|
||||
) -> str:
|
||||
"""
|
||||
Add a publish task to the queue.
|
||||
|
||||
Returns:
|
||||
Task ID for tracking
|
||||
"""
|
||||
task_id = str(uuid.uuid4())[:8]
|
||||
|
||||
task = PublishTask(
|
||||
id=task_id,
|
||||
video_path=video_path,
|
||||
platform=platform,
|
||||
metadata=metadata,
|
||||
status=PublishStatus.PENDING,
|
||||
)
|
||||
|
||||
queued_task = QueuedTask(task=task, priority=priority)
|
||||
self._tasks[task_id] = queued_task
|
||||
|
||||
await self._queue.put(queued_task)
|
||||
|
||||
logger.info(f"📥 Queued task {task_id}: {metadata.title} → {platform.value}")
|
||||
|
||||
return task_id
|
||||
|
||||
def get_task(self, task_id: str) -> Optional[QueuedTask]:
|
||||
"""Get task by ID."""
|
||||
return self._tasks.get(task_id)
|
||||
|
||||
def get_all_tasks(self) -> List[QueuedTask]:
|
||||
"""Get all tasks."""
|
||||
return list(self._tasks.values())
|
||||
|
||||
def get_pending_tasks(self) -> List[QueuedTask]:
|
||||
"""Get pending tasks."""
|
||||
return [t for t in self._tasks.values() if t.task.status == PublishStatus.PENDING]
|
||||
|
||||
def get_active_tasks(self) -> List[QueuedTask]:
|
||||
"""Get currently processing tasks."""
|
||||
return [t for t in self._tasks.values() if t.task.status in [
|
||||
PublishStatus.CONVERTING,
|
||||
PublishStatus.UPLOADING,
|
||||
PublishStatus.PROCESSING,
|
||||
]]
|
||||
|
||||
def set_on_complete(self, callback: Callable):
|
||||
"""Set callback for task completion."""
|
||||
self._on_complete = callback
|
||||
|
||||
def set_on_progress(self, callback: Callable):
|
||||
"""Set callback for progress updates."""
|
||||
self._on_progress = callback
|
||||
|
||||
async def _worker(self, worker_id: int):
|
||||
"""Worker coroutine that processes tasks from queue."""
|
||||
logger.debug(f"Worker {worker_id} started")
|
||||
|
||||
while self._running:
|
||||
try:
|
||||
# Get task from queue with timeout
|
||||
try:
|
||||
queued_task = await asyncio.wait_for(
|
||||
self._queue.get(),
|
||||
timeout=1.0
|
||||
)
|
||||
except asyncio.TimeoutError:
|
||||
continue
|
||||
|
||||
await self._process_task(queued_task, worker_id)
|
||||
self._queue.task_done()
|
||||
|
||||
except asyncio.CancelledError:
|
||||
break
|
||||
except Exception as e:
|
||||
logger.error(f"Worker {worker_id} error: {e}")
|
||||
|
||||
async def _process_task(self, queued_task: QueuedTask, worker_id: int):
|
||||
"""Process a single publish task."""
|
||||
task = queued_task.task
|
||||
task_id = task.id
|
||||
|
||||
logger.info(f"🔄 Worker {worker_id} processing task {task_id}")
|
||||
|
||||
queued_task.started_at = datetime.now()
|
||||
task.status = PublishStatus.UPLOADING
|
||||
|
||||
# Get publisher
|
||||
publisher = self._publishers.get(task.platform)
|
||||
if not publisher:
|
||||
task.status = PublishStatus.FAILED
|
||||
task.result = PublishResult(
|
||||
success=False,
|
||||
platform=task.platform,
|
||||
status=PublishStatus.FAILED,
|
||||
error_message=f"No publisher for platform: {task.platform}",
|
||||
)
|
||||
return
|
||||
|
||||
# Progress callback
|
||||
def progress_callback(progress: float, message: str):
|
||||
queued_task.progress = progress
|
||||
queued_task.progress_message = message
|
||||
if self._on_progress:
|
||||
self._on_progress(task_id, progress, message)
|
||||
|
||||
try:
|
||||
# Execute publish
|
||||
result = await publisher.publish(
|
||||
task.video_path,
|
||||
task.metadata,
|
||||
progress_callback=progress_callback
|
||||
)
|
||||
|
||||
task.result = result
|
||||
task.status = result.status
|
||||
|
||||
if result.success:
|
||||
logger.info(f"✅ Task {task_id} completed: {result.video_url or result.export_path}")
|
||||
else:
|
||||
logger.warning(f"❌ Task {task_id} failed: {result.error_message}")
|
||||
|
||||
# Retry if applicable
|
||||
if queued_task.retries < queued_task.max_retries:
|
||||
queued_task.retries += 1
|
||||
task.status = PublishStatus.PENDING
|
||||
logger.info(f"🔄 Retrying task {task_id} ({queued_task.retries}/{queued_task.max_retries})")
|
||||
await asyncio.sleep(queued_task.retry_delay)
|
||||
await self._queue.put(queued_task)
|
||||
return
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Task {task_id} exception: {e}")
|
||||
task.status = PublishStatus.FAILED
|
||||
task.result = PublishResult(
|
||||
success=False,
|
||||
platform=task.platform,
|
||||
status=PublishStatus.FAILED,
|
||||
error_message=str(e),
|
||||
)
|
||||
|
||||
queued_task.completed_at = datetime.now()
|
||||
task.updated_at = datetime.now()
|
||||
|
||||
# Call completion callback
|
||||
if self._on_complete:
|
||||
self._on_complete(task_id, task.result)
|
||||
|
||||
|
||||
# Singleton instance
|
||||
_publish_manager: Optional[PublishTaskManager] = None
|
||||
|
||||
|
||||
def get_publish_manager() -> PublishTaskManager:
|
||||
"""Get or create the global publish task manager."""
|
||||
global _publish_manager
|
||||
if _publish_manager is None:
|
||||
_publish_manager = PublishTaskManager()
|
||||
return _publish_manager
|
||||
310
pixelle_video/services/publishing/youtube_publisher.py
Normal file
310
pixelle_video/services/publishing/youtube_publisher.py
Normal file
@@ -0,0 +1,310 @@
|
||||
# Copyright (C) 2025 AIDC-AI
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
YouTube Publisher - Upload videos to YouTube using Data API v3.
|
||||
|
||||
Requires:
|
||||
- Google Cloud project with YouTube Data API v3 enabled
|
||||
- OAuth 2.0 credentials (client_secrets.json)
|
||||
"""
|
||||
|
||||
import os
|
||||
import pickle
|
||||
from pathlib import Path
|
||||
from datetime import datetime
|
||||
from typing import Optional, Dict, Any
|
||||
from loguru import logger
|
||||
|
||||
from pixelle_video.services.publishing import (
|
||||
Publisher,
|
||||
Platform,
|
||||
PublishStatus,
|
||||
VideoMetadata,
|
||||
PublishResult,
|
||||
)
|
||||
|
||||
|
||||
# YouTube category IDs
|
||||
YOUTUBE_CATEGORIES = {
|
||||
"film": "1",
|
||||
"autos": "2",
|
||||
"music": "10",
|
||||
"pets": "15",
|
||||
"sports": "17",
|
||||
"travel": "19",
|
||||
"gaming": "20",
|
||||
"people": "22",
|
||||
"comedy": "23",
|
||||
"entertainment": "24",
|
||||
"news": "25",
|
||||
"howto": "26",
|
||||
"education": "27",
|
||||
"science": "28",
|
||||
"nonprofits": "29",
|
||||
}
|
||||
|
||||
|
||||
class YouTubePublisher(Publisher):
|
||||
"""
|
||||
Publisher for YouTube video platform.
|
||||
|
||||
Uses Google API Python Client for uploading videos.
|
||||
|
||||
Setup:
|
||||
1. Create project in Google Cloud Console
|
||||
2. Enable YouTube Data API v3
|
||||
3. Create OAuth 2.0 credentials
|
||||
4. Download client_secrets.json
|
||||
"""
|
||||
|
||||
platform = Platform.YOUTUBE
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
client_secrets_path: Optional[str] = None,
|
||||
token_path: Optional[str] = None,
|
||||
):
|
||||
self.client_secrets_path = client_secrets_path or os.getenv(
|
||||
"YOUTUBE_CLIENT_SECRETS",
|
||||
"./config/youtube_client_secrets.json"
|
||||
)
|
||||
self.token_path = token_path or os.getenv(
|
||||
"YOUTUBE_TOKEN_PATH",
|
||||
"./config/youtube_token.pickle"
|
||||
)
|
||||
|
||||
self._youtube_service = None
|
||||
|
||||
async def publish(
|
||||
self,
|
||||
video_path: str,
|
||||
metadata: VideoMetadata,
|
||||
progress_callback: Optional[callable] = None
|
||||
) -> PublishResult:
|
||||
"""Upload and publish video to YouTube."""
|
||||
started_at = datetime.now()
|
||||
|
||||
try:
|
||||
if not await self.validate_credentials():
|
||||
return PublishResult(
|
||||
success=False,
|
||||
platform=Platform.YOUTUBE,
|
||||
status=PublishStatus.FAILED,
|
||||
error_message="YouTube 凭证未配置。请配置 client_secrets.json",
|
||||
started_at=started_at,
|
||||
completed_at=datetime.now(),
|
||||
)
|
||||
|
||||
video_file = Path(video_path)
|
||||
if not video_file.exists():
|
||||
return PublishResult(
|
||||
success=False,
|
||||
platform=Platform.YOUTUBE,
|
||||
status=PublishStatus.FAILED,
|
||||
error_message=f"视频文件不存在: {video_path}",
|
||||
started_at=started_at,
|
||||
completed_at=datetime.now(),
|
||||
)
|
||||
|
||||
if progress_callback:
|
||||
progress_callback(0.1, "初始化 YouTube API...")
|
||||
|
||||
# Initialize YouTube service
|
||||
youtube = await self._get_youtube_service()
|
||||
if not youtube:
|
||||
return PublishResult(
|
||||
success=False,
|
||||
platform=Platform.YOUTUBE,
|
||||
status=PublishStatus.FAILED,
|
||||
error_message="无法初始化 YouTube API 服务",
|
||||
started_at=started_at,
|
||||
completed_at=datetime.now(),
|
||||
)
|
||||
|
||||
if progress_callback:
|
||||
progress_callback(0.2, "准备上传...")
|
||||
|
||||
# Prepare video metadata
|
||||
category_id = self._get_category_id(metadata.category)
|
||||
privacy_status = self._map_privacy(metadata.privacy)
|
||||
|
||||
body = {
|
||||
"snippet": {
|
||||
"title": metadata.title,
|
||||
"description": metadata.description,
|
||||
"tags": metadata.tags,
|
||||
"categoryId": category_id,
|
||||
},
|
||||
"status": {
|
||||
"privacyStatus": privacy_status,
|
||||
"selfDeclaredMadeForKids": False,
|
||||
}
|
||||
}
|
||||
|
||||
# Check for synthetic media flag
|
||||
if metadata.platform_options.get("contains_synthetic_media"):
|
||||
body["status"]["containsSyntheticMedia"] = True
|
||||
|
||||
if progress_callback:
|
||||
progress_callback(0.3, "上传视频...")
|
||||
|
||||
# Upload using resumable upload
|
||||
video_id = await self._upload_video(youtube, video_path, body, progress_callback)
|
||||
|
||||
if not video_id:
|
||||
return PublishResult(
|
||||
success=False,
|
||||
platform=Platform.YOUTUBE,
|
||||
status=PublishStatus.FAILED,
|
||||
error_message="视频上传失败",
|
||||
started_at=started_at,
|
||||
completed_at=datetime.now(),
|
||||
)
|
||||
|
||||
if progress_callback:
|
||||
progress_callback(1.0, "发布成功")
|
||||
|
||||
return PublishResult(
|
||||
success=True,
|
||||
platform=Platform.YOUTUBE,
|
||||
status=PublishStatus.PUBLISHED,
|
||||
video_url=f"https://www.youtube.com/watch?v={video_id}",
|
||||
platform_video_id=video_id,
|
||||
started_at=started_at,
|
||||
completed_at=datetime.now(),
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"YouTube publish failed: {e}")
|
||||
return PublishResult(
|
||||
success=False,
|
||||
platform=Platform.YOUTUBE,
|
||||
status=PublishStatus.FAILED,
|
||||
error_message=str(e),
|
||||
started_at=started_at,
|
||||
completed_at=datetime.now(),
|
||||
)
|
||||
|
||||
async def _get_youtube_service(self):
|
||||
"""Get authenticated YouTube service."""
|
||||
try:
|
||||
from google.oauth2.credentials import Credentials
|
||||
from google_auth_oauthlib.flow import InstalledAppFlow
|
||||
from googleapiclient.discovery import build
|
||||
|
||||
SCOPES = ["https://www.googleapis.com/auth/youtube.upload"]
|
||||
|
||||
creds = None
|
||||
|
||||
# Load saved token
|
||||
if os.path.exists(self.token_path):
|
||||
with open(self.token_path, "rb") as token:
|
||||
creds = pickle.load(token)
|
||||
|
||||
# Refresh or get new credentials
|
||||
if not creds or not creds.valid:
|
||||
if creds and creds.expired and creds.refresh_token:
|
||||
from google.auth.transport.requests import Request
|
||||
creds.refresh(Request())
|
||||
else:
|
||||
if not os.path.exists(self.client_secrets_path):
|
||||
logger.error(f"Client secrets not found: {self.client_secrets_path}")
|
||||
return None
|
||||
|
||||
flow = InstalledAppFlow.from_client_secrets_file(
|
||||
self.client_secrets_path,
|
||||
SCOPES
|
||||
)
|
||||
creds = flow.run_local_server(port=0)
|
||||
|
||||
# Save token
|
||||
with open(self.token_path, "wb") as token:
|
||||
pickle.dump(creds, token)
|
||||
|
||||
return build("youtube", "v3", credentials=creds)
|
||||
|
||||
except ImportError:
|
||||
logger.error("Google API libraries not installed. Run: pip install google-api-python-client google-auth-oauthlib")
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to initialize YouTube service: {e}")
|
||||
return None
|
||||
|
||||
async def _upload_video(
|
||||
self,
|
||||
youtube,
|
||||
video_path: str,
|
||||
body: dict,
|
||||
progress_callback: Optional[callable] = None
|
||||
) -> Optional[str]:
|
||||
"""Upload video using resumable upload."""
|
||||
try:
|
||||
from googleapiclient.http import MediaFileUpload
|
||||
|
||||
media = MediaFileUpload(
|
||||
video_path,
|
||||
chunksize=1024 * 1024, # 1MB chunks
|
||||
resumable=True
|
||||
)
|
||||
|
||||
request = youtube.videos().insert(
|
||||
part=",".join(body.keys()),
|
||||
body=body,
|
||||
media_body=media
|
||||
)
|
||||
|
||||
response = None
|
||||
while response is None:
|
||||
status, response = request.next_chunk()
|
||||
if status:
|
||||
progress = 0.3 + (0.6 * status.progress())
|
||||
if progress_callback:
|
||||
progress_callback(progress, f"上传 {int(status.progress() * 100)}%")
|
||||
|
||||
video_id = response.get("id")
|
||||
logger.info(f"Video uploaded: {video_id}")
|
||||
return video_id
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Upload failed: {e}")
|
||||
return None
|
||||
|
||||
def _get_category_id(self, category: Optional[str]) -> str:
|
||||
"""Map category name to YouTube category ID."""
|
||||
if not category:
|
||||
return "22" # Default: People & Blogs
|
||||
|
||||
return YOUTUBE_CATEGORIES.get(category.lower(), "22")
|
||||
|
||||
def _map_privacy(self, privacy: str) -> str:
|
||||
"""Map privacy setting to YouTube format."""
|
||||
mapping = {
|
||||
"public": "public",
|
||||
"private": "private",
|
||||
"unlisted": "unlisted",
|
||||
}
|
||||
return mapping.get(privacy, "private")
|
||||
|
||||
async def validate_credentials(self) -> bool:
|
||||
"""Check if YouTube credentials are configured."""
|
||||
return os.path.exists(self.client_secrets_path) or os.path.exists(self.token_path)
|
||||
|
||||
def get_platform_requirements(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"max_file_size_mb": 256000, # 256GB
|
||||
"max_duration_seconds": 43200, # 12 hours
|
||||
"supported_formats": ["mp4", "mov", "avi", "webm", "mkv"],
|
||||
"recommended_resolution": (1920, 1080),
|
||||
"recommended_codec": "h264",
|
||||
"quota_cost_per_upload": 100,
|
||||
}
|
||||
Reference in New Issue
Block a user