Files
to-live-photo/scripts/add_localizations_v2.py
empty 33fbc5f4b2 docs: 添加国际化实施总结和工具脚本
包含:
- 国际化实施总结文档
- 翻译工具脚本 (quick_i18n.py)
- 手动翻译库 (manual_translations.json)
- 测试指南和后续优化建议
2026-01-10 14:30:09 +08:00

134 lines
4.4 KiB
Python

#!/usr/bin/env python3
"""
为 Localizable.xcstrings 添加多语言支持
步骤:
1. 加载手动翻译 (manual_translations.json)
2. 为所有字符串添加 5 种语言 (es, ar, fr, ja, ko)
3. 生成 CSV 供人工审核
"""
import json
import csv
from pathlib import Path
# 语言配置
LANGUAGES = {
"es": "Spanish",
"ar": "Arabic",
"fr": "French",
"ja": "Japanese",
"ko": "Korean"
}
def load_json(file_path):
"""加载 JSON 文件"""
with open(file_path, 'r', encoding='utf-8') as f:
return json.load(f)
def save_json(file_path, data):
"""保存 JSON 文件"""
with open(file_path, 'w', encoding='utf-8') as f:
json.dump(data, f, ensure_ascii=False, indent=2)
def main():
# 文件路径
base_dir = Path("/Users/yuanjiantsui/projects/to-live-photo")
xcstrings_path = base_dir / "to-live-photo/to-live-photo/Localizable.xcstrings"
manual_translations_path = base_dir / "scripts/manual_translations.json"
output_csv_path = base_dir / "scripts/translations_review.csv"
print("🔄 加载文件...")
xcstrings_data = load_json(xcstrings_path)
manual_translations = load_json(manual_translations_path)["translations"]
print(f"📊 找到 {len(xcstrings_data['strings'])} 个字符串")
print(f"✏️ 手动翻译: {len(manual_translations)}")
# 统计
stats = {lang: {"manual": 0, "auto": 0} for lang in LANGUAGES}
csv_rows = []
# 处理每个字符串
for key, string_data in xcstrings_data["strings"].items():
if "localizations" not in string_data:
string_data["localizations"] = {}
locs = string_data["localizations"]
# 获取源文本 (优先英文,其次简体中文)
source_text = ""
if "en" in locs:
source_text = locs["en"]["stringUnit"]["value"]
elif "zh-Hans" in locs:
source_text = locs["zh-Hans"]["stringUnit"]["value"]
if not source_text:
continue
# CSV 行数据
row = {
"key": key,
"en": source_text,
"zh-Hans": locs.get("zh-Hans", {}).get("stringUnit", {}).get("value", ""),
"zh-Hant": locs.get("zh-Hant", {}).get("stringUnit", {}).get("value", "")
}
# 为每种语言添加翻译
for lang_code in LANGUAGES:
# 跳过已有翻译
if lang_code in locs:
row[lang_code] = locs[lang_code]["stringUnit"]["value"]
continue
# 使用手动翻译
if key in manual_translations and lang_code in manual_translations[key]:
translated_text = manual_translations[key][lang_code]
stats[lang_code]["manual"] += 1
else:
# 使用源文本作为占位符 (标记为需要翻译)
translated_text = f"[{lang_code.upper()}] {source_text}"
stats[lang_code]["auto"] += 1
# 添加到数据结构
locs[lang_code] = {
"stringUnit": {
"state": "translated" if key in manual_translations else "needs_review",
"value": translated_text
}
}
row[lang_code] = translated_text
csv_rows.append(row)
# 保存更新后的 xcstrings
print("\n💾 保存 Localizable.xcstrings...")
save_json(xcstrings_path, xcstrings_data)
# 生成 CSV
print(f"📝 生成审核 CSV: {output_csv_path}")
fieldnames = ["key", "en", "zh-Hans", "zh-Hant"] + list(LANGUAGES.keys())
with open(output_csv_path, 'w', newline='', encoding='utf-8') as f:
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
writer.writerows(csv_rows)
# 打印统计
print("\n📈 翻译统计:")
for lang_code, lang_name in LANGUAGES.items():
manual_count = stats[lang_code]["manual"]
auto_count = stats[lang_code]["auto"]
total = manual_count + auto_count
print(f" {lang_name} ({lang_code}): {manual_count} 手动 + {auto_count} 自动 = {total} 总计")
print("\n✅ 完成!")
print(f"\n📋 下一步:")
print(f" 1. 审核 CSV 文件: {output_csv_path}")
print(f" 2. 使用 Google Translate 或其他服务翻译标记为 [{lang_code.upper()}] 的字符串")
print(f" 3. 将翻译结果导入回 Localizable.xcstrings")
if __name__ == "__main__":
main()