Some checks failed
AI Web Tester CI / test (push) Has been cancelled
主要改进: - 新增统一测试器 (universal_tester.py) 支持多种测试模式 - 优化测试报告生成器,支持汇总报告和操作截图 - 增强探索器 DFS 算法和状态指纹识别 - 新增智能测试配置 (smart_test.yaml) - 改进 AI 模型集成 (GLM/Gemini 支持) - 添加开发调试工具和文档
113 lines
4.1 KiB
Python
113 lines
4.1 KiB
Python
#!/usr/bin/env python3
|
||
"""
|
||
运行企业系统全功能测试
|
||
"""
|
||
|
||
import sys
|
||
import os
|
||
import json
|
||
from datetime import datetime
|
||
|
||
sys.path.insert(0, ".")
|
||
|
||
from tests.universal_tester import UniversalWebTester, TestConfig
|
||
|
||
def run_full_test():
|
||
"""运行全功能测试"""
|
||
|
||
print("=" * 60)
|
||
print("🚀 企业系统全功能测试")
|
||
print("=" * 60)
|
||
|
||
# 加载配置
|
||
config_path = "tests/configs/enterprise_system.yaml"
|
||
|
||
# 生成测试报告文件名
|
||
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||
report_file = f"reports/enterprise_full_test_{timestamp}.json"
|
||
|
||
# 确保报告目录存在
|
||
os.makedirs("reports", exist_ok=True)
|
||
|
||
try:
|
||
# 使用通用测试器运行
|
||
from tests.universal_tester import load_config_from_file
|
||
|
||
config = load_config_from_file(config_path)
|
||
config.name = f"{config.name}_{timestamp}"
|
||
|
||
print(f"📋 测试名称: {config.name}")
|
||
print(f"🌐 测试URL: {config.url}")
|
||
print(f"🤖 AI模型: {config.model}")
|
||
print(f"📊 测试模式: {config.mode}")
|
||
print(f"🖱️ 最大点击: {config.explore_config.get('max_clicks', 100)}")
|
||
print(f"📏 最大深度: {config.explore_config.get('max_depth', 5)}")
|
||
print("-" * 60)
|
||
|
||
# 创建测试器并运行
|
||
tester = UniversalWebTester(config)
|
||
result = tester.run()
|
||
|
||
# 保存报告
|
||
with open(report_file, 'w', encoding='utf-8') as f:
|
||
json.dump(result, f, ensure_ascii=False, indent=2)
|
||
|
||
# 输出测试结果
|
||
print("\n" + "=" * 60)
|
||
print("📊 测试结果汇总")
|
||
print("=" * 60)
|
||
print(f"✅ 测试状态: {'通过' if result['status'] == 'passed' else '失败'}")
|
||
|
||
if result['errors']:
|
||
print("\n❌ 错误信息:")
|
||
for i, error in enumerate(result['errors'], 1):
|
||
print(f" {i}. {error}")
|
||
|
||
# 统计步骤执行情况
|
||
if result.get('steps'):
|
||
print("\n📈 执行统计:")
|
||
total_steps = len(result['steps'])
|
||
successful_steps = sum(1 for step in result['steps']
|
||
if step.get('result', {}).get('success', True))
|
||
print(f" - 总步骤数: {total_steps}")
|
||
print(f" - 成功步骤: {successful_steps}")
|
||
print(f" - 成功率: {successful_steps/total_steps*100:.1f}%")
|
||
|
||
# 详细步骤信息
|
||
print("\n📝 步骤详情:")
|
||
for i, step in enumerate(result['steps'], 1):
|
||
action = step.get('action', 'unknown')
|
||
if action == 'goal':
|
||
goal = step.get('goal', '')
|
||
status = "✅" if step.get('result', {}).get('success', True) else "❌"
|
||
print(f" {i}. [{status}] 目标: {goal}")
|
||
elif action == 'explore':
|
||
explore_result = step.get('result', {})
|
||
clicks = explore_result.get('click_count', 0)
|
||
elements = explore_result.get('discovered_elements', 0)
|
||
print(f" {i}. [🔍] 探索: 点击{clicks}次, 发现{elements}个元素")
|
||
elif action == 'verify':
|
||
target = step.get('target', '')
|
||
passed = step.get('result', {}).get('passed', False)
|
||
status = "✅" if passed else "❌"
|
||
print(f" {i}. [{status}] 验证: {target}")
|
||
|
||
print(f"\n📄 详细报告已保存到: {report_file}")
|
||
|
||
# 如果有HTML报告,提示查看
|
||
html_report = report_file.replace('.json', '.html')
|
||
if os.path.exists(html_report):
|
||
print(f"🌐 可视化报告: {html_report}")
|
||
|
||
return result['status'] == 'passed'
|
||
|
||
except Exception as e:
|
||
print(f"\n❌ 测试执行失败: {e}")
|
||
import traceback
|
||
traceback.print_exc()
|
||
return False
|
||
|
||
if __name__ == "__main__":
|
||
success = run_full_test()
|
||
sys.exit(0 if success else 1)
|