feat: 集成阿里云日志服务(SLS)并增强日志记录详情

- 添加 SLS 日志上报模块(sls-logger.js)
  - 支持批量上报(每10条或5秒间隔)
  - 环境变量缺失时静默降级
  - 自动重试失败的日志

- 新增日志信息提取器(log-extractor.js)
  - 提取 Token 使用统计(input_tokens, output_tokens)
  - 提取用户标识信息(user_id, session_id, ip)
  - 提取请求参数(temperature, max_tokens, stream)
  - 提取消息摘要(message_count, role_distribution, tool_names)

- 增强所有 API 端点的日志记录
  - /v1/chat/completions
  - /v1/responses
  - /v1/messages
  - /v1/messages/count_tokens

- 修复日志字段序列化问题
  - 扁平化嵌套对象字段,避免 [object Object]
  - 数组字段转换为逗号分隔字符串

- 添加阿里云环境变量配置到 docker-compose.yml
  - ALIYUN_ACCESS_KEY_ID
  - ALIYUN_ACCESS_KEY_SECRET
  - ALIYUN_SLS_ENDPOINT
  - ALIYUN_SLS_PROJECT
  - ALIYUN_SLS_LOGSTORE

- 修改认证配置为自动刷新 Token 机制
  - 使用 DROID_REFRESH_KEY 替代固定的 FACTORY_API_KEY
  - 实现每6小时自动刷新(Token有效期8小时)
  - Token 持久化到 auth.json
This commit is contained in:
Claude Code
2025-12-27 04:42:43 +00:00
parent eb1096ce54
commit 82a5a2cdfb
5 changed files with 336 additions and 37 deletions

View File

@@ -13,6 +13,12 @@ services:
# 次优先级refresh token自动刷新机制
- DROID_REFRESH_KEY=${DROID_REFRESH_KEY}
# 可选如果需要修改端口在config.json中配置
# 阿里云日志服务配置
- ALIYUN_ACCESS_KEY_ID=${ALIYUN_ACCESS_KEY_ID}
- ALIYUN_ACCESS_KEY_SECRET=${ALIYUN_ACCESS_KEY_SECRET}
- ALIYUN_SLS_ENDPOINT=${ALIYUN_SLS_ENDPOINT}
- ALIYUN_SLS_PROJECT=${ALIYUN_SLS_PROJECT}
- ALIYUN_SLS_LOGSTORE=${ALIYUN_SLS_LOGSTORE}
volumes:
# 可选持久化auth.json以保存刷新的tokens
- ./data:/app/data

214
log-extractor.js Normal file
View File

@@ -0,0 +1,214 @@
/**
* 日志信息提取辅助函数
*
* 用于从请求和响应中提取详细的日志信息
*/
/**
* 提取用户标识信息
* @param {Object} req - Express 请求对象
* @returns {Object} 用户标识信息
*/
export function extractUserInfo(req) {
const userInfo = {};
// 从请求头提取用户标识
if (req.headers['x-user-id']) {
userInfo.user_id = req.headers['x-user-id'];
}
// 从 metadata 中提取用户信息Anthropic API
if (req.body?.metadata?.user_id) {
userInfo.user_id = req.body.metadata.user_id;
}
// 提取客户端信息
if (req.headers['user-agent']) {
userInfo.user_agent = req.headers['user-agent'];
}
// 提取会话ID
if (req.headers['x-session-id']) {
userInfo.session_id = req.headers['x-session-id'];
}
// 提取 IP 地址
userInfo.ip = req.ip || req.connection?.remoteAddress;
return userInfo;
}
/**
* 提取请求参数(扁平化)
* @param {Object} reqBody - 请求体
* @returns {Object} 扁平化的请求参数
*/
export function extractRequestParams(reqBody) {
const params = {};
if (reqBody?.temperature !== undefined) {
params.param_temperature = reqBody.temperature;
}
if (reqBody?.max_tokens !== undefined) {
params.param_max_tokens = reqBody.max_tokens;
}
if (reqBody?.top_p !== undefined) {
params.param_top_p = reqBody.top_p;
}
if (reqBody?.stream !== undefined) {
params.param_stream = reqBody.stream;
}
return params;
}
/**
* 提取消息摘要
* @param {Object} reqBody - 请求体
* @returns {Object} 消息摘要信息
*/
export function extractMessageSummary(reqBody) {
const summary = {};
// Anthropic API 格式 (/v1/messages)
if (reqBody?.messages && Array.isArray(reqBody.messages)) {
summary.message_count = reqBody.messages.length;
// 提取第一条消息的前100字符作为摘要
if (reqBody.messages.length > 0) {
const firstMsg = reqBody.messages[0];
if (firstMsg.content) {
if (typeof firstMsg.content === 'string') {
summary.first_message = firstMsg.content.substring(0, 100);
} else if (Array.isArray(firstMsg.content)) {
// 处理多模态内容
const textContent = firstMsg.content.find(c => c.type === 'text');
if (textContent?.text) {
summary.first_message = textContent.text.substring(0, 100);
}
}
}
summary.first_message_role = firstMsg.role;
}
// 统计角色分布(扁平化)
const roles = reqBody.messages.map(m => m.role);
summary.role_user_count = roles.filter(r => r === 'user').length;
summary.role_assistant_count = roles.filter(r => r === 'assistant').length;
summary.role_system_count = roles.filter(r => r === 'system').length;
}
// System prompt
if (reqBody?.system) {
summary.has_system_prompt = true;
summary.system_prompt_length = typeof reqBody.system === 'string'
? reqBody.system.length
: JSON.stringify(reqBody.system).length;
}
// Tools
if (reqBody?.tools && Array.isArray(reqBody.tools)) {
summary.tool_count = reqBody.tools.length;
// 将工具名数组转换为逗号分隔的字符串最多记录10个
summary.tool_names = reqBody.tools.map(t => t.name).slice(0, 10).join(',');
}
return summary;
}
/**
* 提取 Token 使用统计
* @param {Object} responseData - API 响应数据
* @returns {Object} Token 统计信息
*/
export function extractTokenUsage(responseData) {
const usage = {};
if (responseData?.usage) {
if (responseData.usage.input_tokens !== undefined) {
usage.input_tokens = responseData.usage.input_tokens;
}
if (responseData.usage.output_tokens !== undefined) {
usage.output_tokens = responseData.usage.output_tokens;
}
if (responseData.usage.total_tokens !== undefined) {
usage.total_tokens = responseData.usage.total_tokens;
}
// 缓存相关 tokens
if (responseData.usage.cache_creation_input_tokens !== undefined) {
usage.cache_creation_input_tokens = responseData.usage.cache_creation_input_tokens;
}
if (responseData.usage.cache_read_input_tokens !== undefined) {
usage.cache_read_input_tokens = responseData.usage.cache_read_input_tokens;
}
}
return usage;
}
/**
* 构建完整的日志对象
* @param {Object} options - 日志选项
* @param {string} options.method - HTTP 方法
* @param {string} options.endpoint - 请求端点
* @param {string} options.model - 模型 ID
* @param {number} options.status - 响应状态码
* @param {number} options.duration_ms - 请求耗时
* @param {Object} options.req - Express 请求对象
* @param {Object} [options.responseData] - 响应数据(可选,非流式响应时提供)
* @param {string} [options.error] - 错误信息(可选)
* @returns {Object} 完整的日志对象
*/
export function buildDetailedLog(options) {
const { method, endpoint, model, status, duration_ms, req, responseData, error } = options;
const log = {
method,
endpoint,
model,
status,
duration_ms
};
// 添加错误信息
if (error) {
log.error = error;
}
// 提取用户信息
if (req) {
const userInfo = extractUserInfo(req);
Object.assign(log, userInfo);
// 提取请求参数(已扁平化)
const params = extractRequestParams(req.body);
Object.assign(log, params);
// 提取消息摘要
const summary = extractMessageSummary(req.body);
if (Object.keys(summary).length > 0) {
Object.assign(log, summary);
}
}
// 提取 Token 使用统计
if (responseData) {
const tokenUsage = extractTokenUsage(responseData);
if (Object.keys(tokenUsage).length > 0) {
Object.assign(log, tokenUsage);
}
// 添加响应信息
if (responseData.id) {
log.response_id = responseData.id;
}
if (responseData.stop_reason) {
log.stop_reason = responseData.stop_reason;
}
}
return log;
}

117
routes.js
View File

@@ -10,6 +10,7 @@ import { OpenAIResponseTransformer } from './transformers/response-openai.js';
import { getApiKey } from './auth.js';
import { getNextProxyAgent } from './proxy-manager.js';
import { logRequest as slsLogRequest } from './sls-logger.js';
import { buildDetailedLog } from './log-extractor.js';
const router = express.Router();
@@ -189,7 +190,14 @@ async function handleChatCompletions(req, res) {
}
res.end();
logInfo('Stream forwarded (common type)');
slsLogRequest({ method: 'POST', endpoint: '/v1/chat/completions', model: modelId, status: 200, duration_ms: Date.now() - startTime });
slsLogRequest(buildDetailedLog({
method: 'POST',
endpoint: '/v1/chat/completions',
model: modelId,
status: 200,
duration_ms: Date.now() - startTime,
req
}));
} catch (streamError) {
logError('Stream error', streamError);
res.end();
@@ -209,7 +217,14 @@ async function handleChatCompletions(req, res) {
}
res.end();
logInfo('Stream completed');
slsLogRequest({ method: 'POST', endpoint: '/v1/chat/completions', model: modelId, status: 200, duration_ms: Date.now() - startTime });
slsLogRequest(buildDetailedLog({
method: 'POST',
endpoint: '/v1/chat/completions',
model: modelId,
status: 200,
duration_ms: Date.now() - startTime,
req
}));
} catch (streamError) {
logError('Stream error', streamError);
res.end();
@@ -232,12 +247,28 @@ async function handleChatCompletions(req, res) {
logResponse(200, null, data);
res.json(data);
}
slsLogRequest({ method: 'POST', endpoint: '/v1/chat/completions', model: modelId, status: 200, duration_ms: Date.now() - startTime });
slsLogRequest(buildDetailedLog({
method: 'POST',
endpoint: '/v1/chat/completions',
model: modelId,
status: 200,
duration_ms: Date.now() - startTime,
req,
responseData: data
}));
}
} catch (error) {
logError('Error in /v1/chat/completions', error);
slsLogRequest({ method: 'POST', endpoint: '/v1/chat/completions', model: req.body?.model, status: 500, duration_ms: Date.now() - startTime, error: error.message });
slsLogRequest(buildDetailedLog({
method: 'POST',
endpoint: '/v1/chat/completions',
model: req.body?.model,
status: 500,
duration_ms: Date.now() - startTime,
req,
error: error.message
}));
res.status(500).json({
error: 'Internal server error',
message: error.message
@@ -394,7 +425,14 @@ async function handleDirectResponses(req, res) {
}
res.end();
logInfo('Stream forwarded successfully');
slsLogRequest({ method: 'POST', endpoint: '/v1/responses', model: modelId, status: 200, duration_ms: Date.now() - startTime });
slsLogRequest(buildDetailedLog({
method: 'POST',
endpoint: '/v1/responses',
model: modelId,
status: 200,
duration_ms: Date.now() - startTime,
req
}));
} catch (streamError) {
logError('Stream error', streamError);
res.end();
@@ -404,12 +442,28 @@ async function handleDirectResponses(req, res) {
const data = await response.json();
logResponse(200, null, data);
res.json(data);
slsLogRequest({ method: 'POST', endpoint: '/v1/responses', model: modelId, status: 200, duration_ms: Date.now() - startTime });
slsLogRequest(buildDetailedLog({
method: 'POST',
endpoint: '/v1/responses',
model: modelId,
status: 200,
duration_ms: Date.now() - startTime,
req,
responseData: data
}));
}
} catch (error) {
logError('Error in /v1/responses', error);
slsLogRequest({ method: 'POST', endpoint: '/v1/responses', model: req.body?.model, status: 500, duration_ms: Date.now() - startTime, error: error.message });
slsLogRequest(buildDetailedLog({
method: 'POST',
endpoint: '/v1/responses',
model: req.body?.model,
status: 500,
duration_ms: Date.now() - startTime,
req,
error: error.message
}));
res.status(500).json({
error: 'Internal server error',
message: error.message
@@ -631,7 +685,14 @@ async function handleDirectMessages(req, res) {
}
res.end();
logInfo('Stream forwarded successfully');
slsLogRequest({ method: 'POST', endpoint: '/v1/messages', model: modelId, status: 200, duration_ms: Date.now() - startTime });
slsLogRequest(buildDetailedLog({
method: 'POST',
endpoint: '/v1/messages',
model: modelId,
status: 200,
duration_ms: Date.now() - startTime,
req
}));
} catch (streamError) {
logError('Stream error', streamError);
res.end();
@@ -641,12 +702,28 @@ async function handleDirectMessages(req, res) {
const data = await response.json();
logResponse(200, null, data);
res.json(data);
slsLogRequest({ method: 'POST', endpoint: '/v1/messages', model: modelId, status: 200, duration_ms: Date.now() - startTime });
slsLogRequest(buildDetailedLog({
method: 'POST',
endpoint: '/v1/messages',
model: modelId,
status: 200,
duration_ms: Date.now() - startTime,
req,
responseData: data
}));
}
} catch (error) {
logError('Error in /v1/messages', error);
slsLogRequest({ method: 'POST', endpoint: '/v1/messages', model: req.body?.model, status: 500, duration_ms: Date.now() - startTime, error: error.message });
slsLogRequest(buildDetailedLog({
method: 'POST',
endpoint: '/v1/messages',
model: req.body?.model,
status: 500,
duration_ms: Date.now() - startTime,
req,
error: error.message
}));
res.status(500).json({
error: 'Internal server error',
message: error.message
@@ -743,11 +820,27 @@ async function handleCountTokens(req, res) {
const data = await response.json();
logResponse(200, null, data);
res.json(data);
slsLogRequest({ method: 'POST', endpoint: '/v1/messages/count_tokens', model: modelId, status: 200, duration_ms: Date.now() - startTime });
slsLogRequest(buildDetailedLog({
method: 'POST',
endpoint: '/v1/messages/count_tokens',
model: modelId,
status: 200,
duration_ms: Date.now() - startTime,
req,
responseData: data
}));
} catch (error) {
logError('Error in /v1/messages/count_tokens', error);
slsLogRequest({ method: 'POST', endpoint: '/v1/messages/count_tokens', model: req.body?.model, status: 500, duration_ms: Date.now() - startTime, error: error.message });
slsLogRequest(buildDetailedLog({
method: 'POST',
endpoint: '/v1/messages/count_tokens',
model: req.body?.model,
status: 500,
duration_ms: Date.now() - startTime,
req,
error: error.message
}));
res.status(500).json({
error: 'Internal server error',
message: error.message

View File

@@ -4,6 +4,7 @@ import { logInfo, logError } from './logger.js';
import router from './routes.js';
import { initializeAuth } from './auth.js';
import { initializeUserAgentUpdater } from './user-agent-updater.js';
import './sls-logger.js'; // 初始化阿里云日志服务
const app = express();

View File

@@ -1,14 +1,13 @@
/**
* 阿里云日志服务SLS日志模块
*
*
* 功能:
* - 将 API 请求/响应日志上报到阿里云 SLS
* - 批量上报,减少 API 调用
* - 环境变量缺失时静默降级
*/
import aliyunLog from 'aliyun-log';
const { Client, PutLogsRequest, LogItem, LogContent } = aliyunLog;
import ALSClient from 'aliyun-log';
// SLS 配置
const SLS_CONFIG = {
@@ -35,7 +34,7 @@ function initClient() {
}
try {
client = new Client({
client = new ALSClient({
accessKeyId: SLS_CONFIG.accessKeyId,
accessKeySecret: SLS_CONFIG.accessKeySecret,
endpoint: SLS_CONFIG.endpoint
@@ -55,28 +54,14 @@ async function flushLogs() {
const logsToSend = logQueue.splice(0, BATCH_SIZE);
try {
const logItems = logsToSend.map(log => {
const contents = Object.entries(log).map(([key, value]) => {
return new LogContent({
key,
value: String(value ?? '')
});
});
return new LogItem({
time: Math.floor(Date.now() / 1000),
contents
});
});
const logs = logsToSend.map(log => ({
timestamp: Math.floor(Date.now() / 1000),
content: Object.fromEntries(
Object.entries(log).map(([key, value]) => [key, String(value ?? '')])
)
}));
const request = new PutLogsRequest({
projectName: SLS_CONFIG.project,
logStoreName: SLS_CONFIG.logstore,
logGroup: {
logs: logItems
}
});
await client.putLogs(request);
await client.postLogStoreLogs(SLS_CONFIG.project, SLS_CONFIG.logstore, { logs });
console.log(`[SLS] 成功上报 ${logsToSend.length} 条日志`);
} catch (error) {
console.error('[SLS] 日志上报失败:', error.message);