feat: Add hybrid quality evaluation system with CLIP and VLM support

- Add FeatureExtractor for CLIP-based image/text feature extraction
- Add ObjectiveMetricsCalculator for technical quality metrics
- Add VLMEvaluator for vision language model evaluation
- Add HybridQualityGate combining objective + VLM evaluation
- Enhance CharacterMemory with visual feature support
- Add quality optional dependency (torch, ftfy, regex)
- Add unit tests for new modules

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
empty
2026-01-05 15:56:44 +08:00
parent ca018a9b1f
commit 56db9bf9d2
12 changed files with 1230 additions and 4 deletions

View File

@@ -0,0 +1,47 @@
# Copyright (C) 2025 AIDC-AI
# Tests for FeatureExtractor
import pytest
from pixelle_video.services.quality.feature_extractor import (
FeatureExtractor,
FeatureExtractorConfig,
)
class TestFeatureExtractorConfig:
"""Tests for FeatureExtractorConfig"""
def test_default_values(self):
config = FeatureExtractorConfig()
assert config.model_name == "ViT-B/32"
assert config.device == "auto"
assert config.cache_features is True
class TestFeatureExtractor:
"""Tests for FeatureExtractor"""
def test_init_default(self):
extractor = FeatureExtractor()
assert extractor.config.model_name == "ViT-B/32"
def test_is_available_check(self):
"""Test availability check (may be True or False)"""
extractor = FeatureExtractor()
# Just check it returns a boolean
assert isinstance(extractor.is_available, bool)
def test_extract_without_clip(self):
"""Test graceful degradation when CLIP unavailable"""
extractor = FeatureExtractor()
if not extractor.is_available:
result = extractor.extract_image_features("/fake/path.png")
assert result is None
def test_clear_cache(self):
"""Test cache clearing"""
extractor = FeatureExtractor()
extractor._feature_cache["test"] = "value"
extractor.clear_cache()
assert len(extractor._feature_cache) == 0

View File

@@ -0,0 +1,53 @@
# Copyright (C) 2025 AIDC-AI
# Tests for HybridQualityGate
import pytest
from pixelle_video.services.quality.quality_gate import (
QualityGate,
HybridQualityGate,
HybridQualityConfig,
)
from pixelle_video.services.quality.models import QualityScore
class TestHybridQualityConfig:
"""Tests for HybridQualityConfig"""
def test_default_values(self):
config = HybridQualityConfig()
assert config.enable_clip_score is True
assert config.enable_smart_skip is True
assert config.smart_skip_threshold == 0.75
def test_inherits_quality_config(self):
config = HybridQualityConfig()
assert hasattr(config, "overall_threshold")
assert config.overall_threshold == 0.6
class TestHybridQualityGate:
"""Tests for HybridQualityGate"""
def test_init_default(self):
gate = HybridQualityGate()
assert gate.hybrid_config is not None
def test_inherits_quality_gate(self):
gate = HybridQualityGate()
assert isinstance(gate, QualityGate)
def test_lazy_load_metrics_calculator(self):
gate = HybridQualityGate()
calc = gate.metrics_calculator
assert calc is not None
@pytest.mark.asyncio
async def test_evaluate_nonexistent_image(self):
gate = HybridQualityGate()
score = await gate.evaluate_image(
"/nonexistent/path.png",
"test prompt"
)
assert score.passed is False
assert "not found" in score.issues[0].lower()

View File

@@ -0,0 +1,64 @@
# Copyright (C) 2025 AIDC-AI
# Tests for ObjectiveMetricsCalculator
import pytest
from pathlib import Path
from pixelle_video.services.quality.objective_metrics import (
ObjectiveMetricsCalculator,
TechnicalMetrics,
)
class TestTechnicalMetrics:
"""Tests for TechnicalMetrics dataclass"""
def test_default_values(self):
metrics = TechnicalMetrics()
assert metrics.sharpness_score == 0.0
assert metrics.overall_technical == 0.0
assert metrics.issues == []
def test_to_dict(self):
metrics = TechnicalMetrics(
sharpness_score=0.8,
brightness_score=0.5,
issues=["test issue"]
)
d = metrics.to_dict()
assert d["sharpness_score"] == 0.8
assert "test issue" in d["issues"]
class TestObjectiveMetricsCalculator:
"""Tests for ObjectiveMetricsCalculator"""
def test_init_default(self):
calc = ObjectiveMetricsCalculator()
assert calc.sharpness_threshold == 0.3
def test_init_custom(self):
calc = ObjectiveMetricsCalculator(sharpness_threshold=0.5)
assert calc.sharpness_threshold == 0.5
def test_analyze_nonexistent_image(self):
calc = ObjectiveMetricsCalculator()
metrics = calc.analyze_image("/nonexistent/path.png")
assert len(metrics.issues) > 0
assert "failed" in metrics.issues[0].lower()
def test_analyze_real_image(self, tmp_path):
"""Test with a real image file"""
from PIL import Image
# Create test image
img = Image.new("RGB", (256, 256), color=(128, 128, 128))
img_path = tmp_path / "test.png"
img.save(img_path)
calc = ObjectiveMetricsCalculator()
metrics = calc.analyze_image(str(img_path))
assert 0.0 <= metrics.sharpness_score <= 1.0
assert 0.0 <= metrics.brightness_score <= 1.0
assert 0.0 <= metrics.overall_technical <= 1.0