fix: 安全审查 P0-P2 问题修复(26项)

P0 关键修复:
- 移除 exit(0) 强制退出,改为应用语言设置后下次启动生效
- 修复 LivePhotoValidator hasResumed data race,引入线程安全 ResumeOnce
- 修复 addAssetID(toVideo:) continuation 泄漏,添加 writer/reader 启动状态检查
- 修复 OnboardingView "跳过" 按钮未国际化
- 修复 LanguageManager "跟随系统" 硬编码中文
- .gitignore 补全 AI 工具目录

P1 架构与 UI 修复:
- 修复 RealESRGANProcessor actor 隔离违规
- 修复 ODRManager continuation 生命周期保护
- TiledImageProcessor 改为流式拼接,降低内存峰值
- EditorView 硬编码颜色统一为设计系统
- ProcessingView 取消导航竞态修复
- 反馈诊断包添加知情同意提示

P2 代码质量与合规:
- EditorView/WallpaperGuideView 硬编码间距圆角统一为设计令牌
- PrivacyPolicyView 设计系统颜色统一
- HomeView 重复 onChange 合并
- PHAuthorizationStatus 改为英文技术术语
- Analytics 日志 assetId 脱敏
- 隐私政策补充 localIdentifier 存储说明
- 清理孤立的 subscription 翻译 key
- 脚本硬编码绝对路径改为相对路径
- DesignSystem SoftSlider 类型不匹配编译错误修复

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
empty
2026-02-07 20:04:41 +08:00
parent e08cfc981e
commit 4bcad4d4b8
19 changed files with 640 additions and 1396 deletions

View File

@@ -155,11 +155,13 @@ public actor ODRManager {
private func checkODRAvailability() async -> Bool {
// Use conditionallyBeginAccessingResources to check without triggering download
let request = NSBundleResourceRequest(tags: [Self.modelTag])
return await withCheckedContinuation { continuation in
request.conditionallyBeginAccessingResources { available in
request.conditionallyBeginAccessingResources { [request] available in
// Capture request explicitly to prevent ARC from releasing it
// before the callback fires
_ = request
if available {
// Model is already downloaded via ODR
self.logger.debug("ODR model is available locally")
}
continuation.resume(returning: available)

View File

@@ -112,7 +112,10 @@ actor RealESRGANProcessor {
logger.info("Running inference on \(width)x\(height) image...")
// Run inference synchronously (MLModel prediction is thread-safe)
// Capture actor-isolated state before entering non-isolated closure
let localModel = model
// Run inference on background queue (MLModel prediction is thread-safe)
let output: [UInt8] = try await withCheckedThrowingContinuation { continuation in
DispatchQueue.global(qos: .userInitiated).async {
do {
@@ -123,22 +126,15 @@ actor RealESRGANProcessor {
)
// Run inference synchronously
let prediction = try model.prediction(from: inputProvider)
let prediction = try localModel.prediction(from: inputProvider)
// Extract output from model
// The model outputs to "activation_out" as either MultiArray or Image
let rgbaData: [UInt8]
if let outputValue = prediction.featureValue(for: "activation_out") {
if let multiArray = outputValue.multiArrayValue {
// Output is MLMultiArray with shape [C, H, W]
self.logger.info("Output is MultiArray: \(multiArray.shape)")
rgbaData = try self.multiArrayToRGBA(multiArray)
rgbaData = try Self.multiArrayToRGBA(multiArray)
} else if let outputBuffer = outputValue.imageBufferValue {
// Output is CVPixelBuffer (image)
let outWidth = CVPixelBufferGetWidth(outputBuffer)
let outHeight = CVPixelBufferGetHeight(outputBuffer)
self.logger.info("Output is Image: \(outWidth)x\(outHeight)")
rgbaData = try ImageFormatConverter.pixelBufferToRGBAData(outputBuffer)
} else {
continuation.resume(throwing: AIEnhanceError.inferenceError(
@@ -162,13 +158,14 @@ actor RealESRGANProcessor {
}
}
logger.info("Inference completed, output size: \(output.count) bytes")
return output
}
/// Convert MLMultiArray [C, H, W] to RGBA byte array
/// - Parameter multiArray: Output from model with shape [3, H, W] (RGB channels)
/// - Returns: RGBA byte array with shape [H * W * 4]
private func multiArrayToRGBA(_ multiArray: MLMultiArray) throws -> [UInt8] {
private static func multiArrayToRGBA(_ multiArray: MLMultiArray) throws -> [UInt8] {
let shape = multiArray.shape.map { $0.intValue }
// Expect shape [3, H, W] for RGB
@@ -178,12 +175,9 @@ actor RealESRGANProcessor {
)
}
let channels = shape[0]
let height = shape[1]
let width = shape[2]
logger.info("Converting MultiArray \(channels)x\(height)x\(width) to RGBA")
// Output array: RGBA format
var rgbaData = [UInt8](repeating: 255, count: width * height * 4)

View File

@@ -63,17 +63,38 @@ struct TiledImageProcessor {
logger.info("Extracted \(tiles.count) tiles")
progress?(0.1)
// Step 2: Process each tile
var processedTiles: [(tile: ImageTile, output: [UInt8])] = []
// Step 2: Pre-allocate output buffers for streaming stitching
let outputWidth = originalWidth * config.modelScale
let outputHeight = originalHeight * config.modelScale
var outputBuffer = [Float](repeating: 0, count: outputWidth * outputHeight * 3)
var weightBuffer = [Float](repeating: 0, count: outputWidth * outputHeight)
// Step 3: Process each tile and blend immediately (streaming)
let tileProgressBase = 0.1
let tileProgressRange = 0.7
let tileProgressRange = 0.75
for (index, tile) in tiles.enumerated() {
try Task.checkCancellation()
let pixelBuffer = try ImageFormatConverter.cgImageToPixelBuffer(tile.image)
let outputData = try await processor.processImage(pixelBuffer)
processedTiles.append((tile, outputData))
// Blend tile into output immediately no accumulation
let weights = createBlendingWeights(
tileWidth: min(config.outputTileSize, outputWidth - tile.outputOriginX),
tileHeight: min(config.outputTileSize, outputHeight - tile.outputOriginY)
)
blendTileIntoOutput(
data: outputData,
weights: weights,
atX: tile.outputOriginX,
atY: tile.outputOriginY,
outputWidth: outputWidth,
outputHeight: outputHeight,
outputBuffer: &outputBuffer,
weightBuffer: &weightBuffer
)
// outputData and weights are released here
let tileProgress = tileProgressBase + tileProgressRange * Double(index + 1) / Double(tiles.count)
progress?(tileProgress)
@@ -82,19 +103,14 @@ struct TiledImageProcessor {
await Task.yield()
}
progress?(0.85)
progress?(0.9)
// Step 3: Stitch tiles with blending
let outputWidth = originalWidth * config.modelScale
let outputHeight = originalHeight * config.modelScale
let stitchedImage = try stitchTiles(
processedTiles,
outputWidth: outputWidth,
outputHeight: outputHeight
)
// Step 4: Normalize and create final image
normalizeByWeights(&outputBuffer, weights: weightBuffer, width: outputWidth, height: outputHeight)
let stitchedImage = try createCGImage(from: outputBuffer, width: outputWidth, height: outputHeight)
progress?(0.95)
// Step 4: Cap at max dimension if needed
// Step 5: Cap at max dimension if needed
let finalImage = try capToMaxDimension(stitchedImage, maxDimension: 4320)
progress?(1.0)
@@ -196,45 +212,6 @@ struct TiledImageProcessor {
// MARK: - Tile Stitching
/// Stitch processed tiles with weighted blending
private func stitchTiles(
_ tiles: [(tile: ImageTile, output: [UInt8])],
outputWidth: Int,
outputHeight: Int
) throws -> CGImage {
// Create output buffers
var outputBuffer = [Float](repeating: 0, count: outputWidth * outputHeight * 3)
var weightBuffer = [Float](repeating: 0, count: outputWidth * outputHeight)
let outputTileSize = config.outputTileSize // 2048
for (tile, data) in tiles {
// Create blending weights for this tile
let weights = createBlendingWeights(
tileWidth: min(outputTileSize, outputWidth - tile.outputOriginX),
tileHeight: min(outputTileSize, outputHeight - tile.outputOriginY)
)
// Blend tile into output
blendTileIntoOutput(
data: data,
weights: weights,
atX: tile.outputOriginX,
atY: tile.outputOriginY,
outputWidth: outputWidth,
outputHeight: outputHeight,
outputBuffer: &outputBuffer,
weightBuffer: &weightBuffer
)
}
// Normalize by accumulated weights
normalizeByWeights(&outputBuffer, weights: weightBuffer, width: outputWidth, height: outputHeight)
// Convert to CGImage
return try createCGImage(from: outputBuffer, width: outputWidth, height: outputHeight)
}
/// Create blending weights with linear falloff at edges
private func createBlendingWeights(tileWidth: Int, tileHeight: Int) -> [Float] {
let overlap = config.outputOverlap // 256

View File

@@ -347,6 +347,21 @@ public actor AlbumWriter {
}
}
/// 线 continuation resume
private final class ResumeOnce: @unchecked Sendable {
private var _consumed = false
private let lock = NSLock()
/// true false
func tryConsume() -> Bool {
lock.lock()
defer { lock.unlock() }
if _consumed { return false }
_consumed = true
return true
}
}
public actor LivePhotoValidator {
public init() {}
@@ -378,16 +393,13 @@ public actor LivePhotoValidator {
public func requestLivePhoto(photoURL: URL, pairedVideoURL: URL) async -> PHLivePhoto? {
await withCheckedContinuation { continuation in
var hasResumed = false
let resumeOnce = ResumeOnce()
let requestID = PHLivePhoto.request(
withResourceFileURLs: [pairedVideoURL, photoURL],
placeholderImage: nil,
targetSize: .zero,
contentMode: .aspectFit
) { livePhoto, info in
// resume
guard !hasResumed else { return }
//
if let isDegraded = info[PHLivePhotoInfoIsDegradedKey] as? Bool, isDegraded {
return
@@ -398,8 +410,9 @@ public actor LivePhotoValidator {
#if DEBUG
print("[LivePhotoValidator] requestLivePhoto error: \(error.localizedDescription)")
#endif
hasResumed = true
continuation.resume(returning: nil)
if resumeOnce.tryConsume() {
continuation.resume(returning: nil)
}
return
}
@@ -407,23 +420,24 @@ public actor LivePhotoValidator {
#if DEBUG
print("[LivePhotoValidator] requestLivePhoto cancelled")
#endif
hasResumed = true
continuation.resume(returning: nil)
if resumeOnce.tryConsume() {
continuation.resume(returning: nil)
}
return
}
hasResumed = true
continuation.resume(returning: livePhoto)
if resumeOnce.tryConsume() {
continuation.resume(returning: livePhoto)
}
}
//
DispatchQueue.main.asyncAfter(deadline: .now() + 10) {
guard !hasResumed else { return }
guard resumeOnce.tryConsume() else { return }
#if DEBUG
print("[LivePhotoValidator] requestLivePhoto timeout, requestID: \(requestID)")
#endif
PHLivePhoto.cancelRequest(withRequestID: requestID)
hasResumed = true
continuation.resume(returning: nil)
}
}
@@ -966,6 +980,23 @@ public actor LivePhotoBuilder {
assetWriter.startWriting()
videoReader.startReading()
metadataReader.startReading()
// writer/reader continuation resume
guard assetWriter.status == .writing else {
continuation.resume(throwing: AppError(code: "LPB-301", stage: .writeVideoMetadata, message: "视频处理失败", underlyingErrorDescription: assetWriter.error?.localizedDescription ?? "Writer 启动失败", suggestedActions: ["重试"]))
return
}
guard videoReader.status == .reading else {
assetWriter.cancelWriting()
continuation.resume(throwing: AppError(code: "LPB-301", stage: .writeVideoMetadata, message: "视频处理失败", underlyingErrorDescription: videoReader.error?.localizedDescription ?? "VideoReader 启动失败", suggestedActions: ["重试"]))
return
}
guard metadataReader.status == .reading else {
assetWriter.cancelWriting()
continuation.resume(throwing: AppError(code: "LPB-301", stage: .writeVideoMetadata, message: "视频处理失败", underlyingErrorDescription: metadataReader.error?.localizedDescription ?? "MetadataReader 启动失败", suggestedActions: ["重试"]))
return
}
assetWriter.startSession(atSourceTime: .zero)
var currentFrameCount = 0