feat: 集成阿里云日志服务(SLS)

- 添加 aliyun-log SDK 依赖
- 新增 sls-logger.js 模块,支持批量日志上报、静默降级
- 在四个 API 处理函数中集成请求日志记录
- 更新 .env.example 添加 SLS 配置示例
This commit is contained in:
empty
2025-12-27 03:08:01 +08:00
parent dec2f26b5c
commit eb1096ce54
5 changed files with 379 additions and 32 deletions

View File

@@ -9,6 +9,7 @@ import { AnthropicResponseTransformer } from './transformers/response-anthropic.
import { OpenAIResponseTransformer } from './transformers/response-openai.js';
import { getApiKey } from './auth.js';
import { getNextProxyAgent } from './proxy-manager.js';
import { logRequest as slsLogRequest } from './sls-logger.js';
const router = express.Router();
@@ -52,7 +53,7 @@ function convertResponseToChatCompletion(resp) {
router.get('/v1/models', (req, res) => {
logInfo('GET /v1/models');
try {
const config = getConfig();
const models = config.models.map(model => ({
@@ -81,6 +82,7 @@ router.get('/v1/models', (req, res) => {
// 标准 OpenAI 聊天补全处理函数(带格式转换)
async function handleChatCompletions(req, res) {
logInfo('POST /v1/chat/completions');
const startTime = Date.now();
try {
const openaiRequest = req.body;
@@ -108,7 +110,7 @@ async function handleChatCompletions(req, res) {
authHeader = await getApiKey(req.headers.authorization);
} catch (error) {
logError('Failed to get API key', error);
return res.status(500).json({
return res.status(500).json({
error: 'API key not available',
message: 'Failed to get or refresh API key. Please check server logs.'
});
@@ -166,9 +168,9 @@ async function handleChatCompletions(req, res) {
if (!response.ok) {
const errorText = await response.text();
logError(`Endpoint error: ${response.status}`, new Error(errorText));
return res.status(response.status).json({
return res.status(response.status).json({
error: `Endpoint returned ${response.status}`,
details: errorText
details: errorText
});
}
@@ -187,6 +189,7 @@ async function handleChatCompletions(req, res) {
}
res.end();
logInfo('Stream forwarded (common type)');
slsLogRequest({ method: 'POST', endpoint: '/v1/chat/completions', model: modelId, status: 200, duration_ms: Date.now() - startTime });
} catch (streamError) {
logError('Stream error', streamError);
res.end();
@@ -206,6 +209,7 @@ async function handleChatCompletions(req, res) {
}
res.end();
logInfo('Stream completed');
slsLogRequest({ method: 'POST', endpoint: '/v1/chat/completions', model: modelId, status: 200, duration_ms: Date.now() - startTime });
} catch (streamError) {
logError('Stream error', streamError);
res.end();
@@ -228,13 +232,15 @@ async function handleChatCompletions(req, res) {
logResponse(200, null, data);
res.json(data);
}
slsLogRequest({ method: 'POST', endpoint: '/v1/chat/completions', model: modelId, status: 200, duration_ms: Date.now() - startTime });
}
} catch (error) {
logError('Error in /v1/chat/completions', error);
res.status(500).json({
slsLogRequest({ method: 'POST', endpoint: '/v1/chat/completions', model: req.body?.model, status: 500, duration_ms: Date.now() - startTime, error: error.message });
res.status(500).json({
error: 'Internal server error',
message: error.message
message: error.message
});
}
}
@@ -242,6 +248,7 @@ async function handleChatCompletions(req, res) {
// 直接转发 OpenAI 请求(不做格式转换)
async function handleDirectResponses(req, res) {
logInfo('POST /v1/responses');
const startTime = Date.now();
try {
const openaiRequest = req.body;
@@ -258,7 +265,7 @@ async function handleDirectResponses(req, res) {
// 只允许 openai 类型端点
if (model.type !== 'openai') {
return res.status(400).json({
return res.status(400).json({
error: 'Invalid endpoint type',
message: `/v1/responses 接口只支持 openai 类型端点,当前模型 ${modelId}${model.type} 类型`
});
@@ -280,17 +287,17 @@ async function handleDirectResponses(req, res) {
authHeader = await getApiKey(req.headers.authorization || clientAuthFromXApiKey);
} catch (error) {
logError('Failed to get API key', error);
return res.status(500).json({
return res.status(500).json({
error: 'API key not available',
message: 'Failed to get or refresh API key. Please check server logs.'
});
}
const clientHeaders = req.headers;
// Get provider from model config
const provider = getModelProvider(modelId);
// 获取 headers
const headers = getOpenAIHeaders(authHeader, clientHeaders, provider);
@@ -340,8 +347,8 @@ async function handleDirectResponses(req, res) {
if (claudeCodeTools.includes(tool.name)) return false;
// 过滤所有 mcp__ 开头的工具和 MCP 相关工具
return !tool.name.startsWith('mcp__') &&
!tool.name.includes('Mcp') &&
!tool.name.includes('MCP');
!tool.name.includes('Mcp') &&
!tool.name.includes('MCP');
});
}
@@ -366,9 +373,9 @@ async function handleDirectResponses(req, res) {
if (!response.ok) {
const errorText = await response.text();
logError(`Endpoint error: ${response.status}`, new Error(errorText));
return res.status(response.status).json({
return res.status(response.status).json({
error: `Endpoint returned ${response.status}`,
details: errorText
details: errorText
});
}
@@ -387,6 +394,7 @@ async function handleDirectResponses(req, res) {
}
res.end();
logInfo('Stream forwarded successfully');
slsLogRequest({ method: 'POST', endpoint: '/v1/responses', model: modelId, status: 200, duration_ms: Date.now() - startTime });
} catch (streamError) {
logError('Stream error', streamError);
res.end();
@@ -396,13 +404,15 @@ async function handleDirectResponses(req, res) {
const data = await response.json();
logResponse(200, null, data);
res.json(data);
slsLogRequest({ method: 'POST', endpoint: '/v1/responses', model: modelId, status: 200, duration_ms: Date.now() - startTime });
}
} catch (error) {
logError('Error in /v1/responses', error);
res.status(500).json({
slsLogRequest({ method: 'POST', endpoint: '/v1/responses', model: req.body?.model, status: 500, duration_ms: Date.now() - startTime, error: error.message });
res.status(500).json({
error: 'Internal server error',
message: error.message
message: error.message
});
}
}
@@ -410,6 +420,7 @@ async function handleDirectResponses(req, res) {
// 直接转发 Anthropic 请求(不做格式转换)
async function handleDirectMessages(req, res) {
logInfo('POST /v1/messages');
const startTime = Date.now();
try {
const anthropicRequest = req.body;
@@ -426,7 +437,7 @@ async function handleDirectMessages(req, res) {
// 只允许 anthropic 类型端点
if (model.type !== 'anthropic') {
return res.status(400).json({
return res.status(400).json({
error: 'Invalid endpoint type',
message: `/v1/messages 接口只支持 anthropic 类型端点,当前模型 ${modelId}${model.type} 类型`
});
@@ -448,17 +459,17 @@ async function handleDirectMessages(req, res) {
authHeader = await getApiKey(req.headers.authorization || clientAuthFromXApiKey);
} catch (error) {
logError('Failed to get API key', error);
return res.status(500).json({
return res.status(500).json({
error: 'API key not available',
message: 'Failed to get or refresh API key. Please check server logs.'
});
}
const clientHeaders = req.headers;
// Get provider from model config
const provider = getModelProvider(modelId);
// 获取 headers
const isStreaming = anthropicRequest.stream === true;
const headers = getAnthropicHeaders(authHeader, clientHeaders, isStreaming, modelId, provider);
@@ -519,7 +530,7 @@ async function handleDirectMessages(req, res) {
'high': 24576,
'xhigh': 40960
};
modifiedRequest.thinking = {
type: 'enabled',
budget_tokens: budgetTokens[reasoningLevel]
@@ -575,8 +586,8 @@ async function handleDirectMessages(req, res) {
if (claudeCodeTools.includes(tool.name)) return false;
// 过滤所有 mcp__ 开头的工具和 MCP 相关工具
return !tool.name.startsWith('mcp__') &&
!tool.name.includes('Mcp') &&
!tool.name.includes('MCP');
!tool.name.includes('Mcp') &&
!tool.name.includes('MCP');
});
}
@@ -601,9 +612,9 @@ async function handleDirectMessages(req, res) {
if (!response.ok) {
const errorText = await response.text();
logError(`Endpoint error: ${response.status}`, new Error(errorText));
return res.status(response.status).json({
return res.status(response.status).json({
error: `Endpoint returned ${response.status}`,
details: errorText
details: errorText
});
}
@@ -620,6 +631,7 @@ async function handleDirectMessages(req, res) {
}
res.end();
logInfo('Stream forwarded successfully');
slsLogRequest({ method: 'POST', endpoint: '/v1/messages', model: modelId, status: 200, duration_ms: Date.now() - startTime });
} catch (streamError) {
logError('Stream error', streamError);
res.end();
@@ -629,10 +641,12 @@ async function handleDirectMessages(req, res) {
const data = await response.json();
logResponse(200, null, data);
res.json(data);
slsLogRequest({ method: 'POST', endpoint: '/v1/messages', model: modelId, status: 200, duration_ms: Date.now() - startTime });
}
} catch (error) {
logError('Error in /v1/messages', error);
slsLogRequest({ method: 'POST', endpoint: '/v1/messages', model: req.body?.model, status: 500, duration_ms: Date.now() - startTime, error: error.message });
res.status(500).json({
error: 'Internal server error',
message: error.message
@@ -643,6 +657,7 @@ async function handleDirectMessages(req, res) {
// 处理 Anthropic count_tokens 请求
async function handleCountTokens(req, res) {
logInfo('POST /v1/messages/count_tokens');
const startTime = Date.now();
try {
const anthropicRequest = req.body;
@@ -686,10 +701,10 @@ async function handleCountTokens(req, res) {
}
const clientHeaders = req.headers;
// Get provider from model config
const provider = getModelProvider(modelId);
const headers = getAnthropicHeaders(authHeader, clientHeaders, false, modelId, provider);
// 构建 count_tokens 端点 URL
@@ -728,9 +743,11 @@ async function handleCountTokens(req, res) {
const data = await response.json();
logResponse(200, null, data);
res.json(data);
slsLogRequest({ method: 'POST', endpoint: '/v1/messages/count_tokens', model: modelId, status: 200, duration_ms: Date.now() - startTime });
} catch (error) {
logError('Error in /v1/messages/count_tokens', error);
slsLogRequest({ method: 'POST', endpoint: '/v1/messages/count_tokens', model: req.body?.model, status: 500, duration_ms: Date.now() - startTime, error: error.message });
res.status(500).json({
error: 'Internal server error',
message: error.message