import express from 'express'; import fetch from 'node-fetch'; import { getConfig, getModelById, getEndpointByType } from './config.js'; import { logInfo, logDebug, logError, logRequest, logResponse } from './logger.js'; import { transformToAnthropic, getAnthropicHeaders } from './transformers/request-anthropic.js'; import { transformToOpenAI, getOpenAIHeaders } from './transformers/request-openai.js'; import { AnthropicResponseTransformer } from './transformers/response-anthropic.js'; import { OpenAIResponseTransformer } from './transformers/response-openai.js'; import { getApiKey } from './auth.js'; const router = express.Router(); router.get('/v1/models', (req, res) => { logInfo('GET /v1/models'); try { const config = getConfig(); const models = config.models.map(model => ({ id: model.id, object: 'model', created: Date.now(), owned_by: model.type, permission: [], root: model.id, parent: null })); const response = { object: 'list', data: models }; logResponse(200, null, response); res.json(response); } catch (error) { logError('Error in GET /v1/models', error); res.status(500).json({ error: 'Internal server error' }); } }); // 标准 OpenAI 聊天补全处理函数(带格式转换) async function handleChatCompletions(req, res) { logInfo('POST /v1/chat/completions'); try { const openaiRequest = req.body; const modelId = openaiRequest.model; if (!modelId) { return res.status(400).json({ error: 'model is required' }); } const model = getModelById(modelId); if (!model) { return res.status(404).json({ error: `Model ${modelId} not found` }); } const endpoint = getEndpointByType(model.type); if (!endpoint) { return res.status(500).json({ error: `Endpoint type ${model.type} not found` }); } logInfo(`Routing to ${model.type} endpoint: ${endpoint.base_url}`); // Get API key (will auto-refresh if needed) let authHeader; try { authHeader = await getApiKey(); } catch (error) { logError('Failed to get API key', error); return res.status(500).json({ error: 'API key not available', message: 'Failed to get or refresh API key. Please check server logs.' }); } let transformedRequest; let headers; const clientHeaders = req.headers; // Log received client headers for debugging logDebug('Client headers received', { 'x-factory-client': clientHeaders['x-factory-client'], 'x-session-id': clientHeaders['x-session-id'], 'x-assistant-message-id': clientHeaders['x-assistant-message-id'], 'user-agent': clientHeaders['user-agent'] }); if (model.type === 'anthropic') { transformedRequest = transformToAnthropic(openaiRequest); const isStreaming = openaiRequest.stream !== false; headers = getAnthropicHeaders(authHeader, clientHeaders, isStreaming); } else if (model.type === 'openai') { transformedRequest = transformToOpenAI(openaiRequest); headers = getOpenAIHeaders(authHeader, clientHeaders); } else { return res.status(500).json({ error: `Unknown endpoint type: ${model.type}` }); } logRequest('POST', endpoint.base_url, headers, transformedRequest); const response = await fetch(endpoint.base_url, { method: 'POST', headers, body: JSON.stringify(transformedRequest) }); logInfo(`Response status: ${response.status}`); if (!response.ok) { const errorText = await response.text(); logError(`Endpoint error: ${response.status}`, new Error(errorText)); return res.status(response.status).json({ error: `Endpoint returned ${response.status}`, details: errorText }); } const isStreaming = transformedRequest.stream !== false; if (isStreaming) { res.setHeader('Content-Type', 'text/event-stream'); res.setHeader('Cache-Control', 'no-cache'); res.setHeader('Connection', 'keep-alive'); let transformer; if (model.type === 'anthropic') { transformer = new AnthropicResponseTransformer(modelId, `chatcmpl-${Date.now()}`); } else if (model.type === 'openai') { transformer = new OpenAIResponseTransformer(modelId, `chatcmpl-${Date.now()}`); } try { for await (const chunk of transformer.transformStream(response.body)) { res.write(chunk); } res.end(); logInfo('Stream completed'); } catch (streamError) { logError('Stream error', streamError); res.end(); } } else { const data = await response.json(); logResponse(200, null, data); res.json(data); } } catch (error) { logError('Error in /v1/chat/completions', error); res.status(500).json({ error: 'Internal server error', message: error.message }); } } // 直接转发 OpenAI 请求(不做格式转换) async function handleDirectResponses(req, res) { logInfo('POST /v1/responses'); try { const openaiRequest = req.body; const modelId = openaiRequest.model; if (!modelId) { return res.status(400).json({ error: 'model is required' }); } const model = getModelById(modelId); if (!model) { return res.status(404).json({ error: `Model ${modelId} not found` }); } // 只允许 openai 类型端点 if (model.type !== 'openai') { return res.status(400).json({ error: 'Invalid endpoint type', message: `/v1/responses 接口只支持 openai 类型端点,当前模型 ${modelId} 是 ${model.type} 类型` }); } const endpoint = getEndpointByType(model.type); if (!endpoint) { return res.status(500).json({ error: `Endpoint type ${model.type} not found` }); } logInfo(`Direct forwarding to ${model.type} endpoint: ${endpoint.base_url}`); // Get API key let authHeader; try { authHeader = await getApiKey(); } catch (error) { logError('Failed to get API key', error); return res.status(500).json({ error: 'API key not available', message: 'Failed to get or refresh API key. Please check server logs.' }); } const clientHeaders = req.headers; // 获取 headers,但请求体不做任何转换 const headers = getOpenAIHeaders(authHeader, clientHeaders); logRequest('POST', endpoint.base_url, headers, openaiRequest); // 直接转发原始请求 const response = await fetch(endpoint.base_url, { method: 'POST', headers, body: JSON.stringify(openaiRequest) // 不做任何转换,直接转发 }); logInfo(`Response status: ${response.status}`); if (!response.ok) { const errorText = await response.text(); logError(`Endpoint error: ${response.status}`, new Error(errorText)); return res.status(response.status).json({ error: `Endpoint returned ${response.status}`, details: errorText }); } const isStreaming = openaiRequest.stream !== false; if (isStreaming) { // 直接转发流式响应,不做任何转换 res.setHeader('Content-Type', 'text/event-stream'); res.setHeader('Cache-Control', 'no-cache'); res.setHeader('Connection', 'keep-alive'); try { // 直接将原始响应流转发给客户端 for await (const chunk of response.body) { res.write(chunk); } res.end(); logInfo('Stream forwarded successfully'); } catch (streamError) { logError('Stream error', streamError); res.end(); } } else { // 直接转发非流式响应,不做任何转换 const data = await response.json(); logResponse(200, null, data); res.json(data); } } catch (error) { logError('Error in /v1/responses', error); res.status(500).json({ error: 'Internal server error', message: error.message }); } } // 直接转发 Anthropic 请求(不做格式转换) async function handleDirectMessages(req, res) { logInfo('POST /v1/messages'); try { const anthropicRequest = req.body; const modelId = anthropicRequest.model; if (!modelId) { return res.status(400).json({ error: 'model is required' }); } const model = getModelById(modelId); if (!model) { return res.status(404).json({ error: `Model ${modelId} not found` }); } // 只允许 anthropic 类型端点 if (model.type !== 'anthropic') { return res.status(400).json({ error: 'Invalid endpoint type', message: `/v1/messages 接口只支持 anthropic 类型端点,当前模型 ${modelId} 是 ${model.type} 类型` }); } const endpoint = getEndpointByType(model.type); if (!endpoint) { return res.status(500).json({ error: `Endpoint type ${model.type} not found` }); } logInfo(`Direct forwarding to ${model.type} endpoint: ${endpoint.base_url}`); // Get API key let authHeader; try { authHeader = await getApiKey(); } catch (error) { logError('Failed to get API key', error); return res.status(500).json({ error: 'API key not available', message: 'Failed to get or refresh API key. Please check server logs.' }); } const clientHeaders = req.headers; // 获取 headers,但请求体不做任何转换 const isStreaming = anthropicRequest.stream !== false; const headers = getAnthropicHeaders(authHeader, clientHeaders, isStreaming); logRequest('POST', endpoint.base_url, headers, anthropicRequest); // 直接转发原始请求 const response = await fetch(endpoint.base_url, { method: 'POST', headers, body: JSON.stringify(anthropicRequest) // 不做任何转换,直接转发 }); logInfo(`Response status: ${response.status}`); if (!response.ok) { const errorText = await response.text(); logError(`Endpoint error: ${response.status}`, new Error(errorText)); return res.status(response.status).json({ error: `Endpoint returned ${response.status}`, details: errorText }); } if (isStreaming) { // 直接转发流式响应,不做任何转换 res.setHeader('Content-Type', 'text/event-stream'); res.setHeader('Cache-Control', 'no-cache'); res.setHeader('Connection', 'keep-alive'); try { // 直接将原始响应流转发给客户端 for await (const chunk of response.body) { res.write(chunk); } res.end(); logInfo('Stream forwarded successfully'); } catch (streamError) { logError('Stream error', streamError); res.end(); } } else { // 直接转发非流式响应,不做任何转换 const data = await response.json(); logResponse(200, null, data); res.json(data); } } catch (error) { logError('Error in /v1/messages', error); res.status(500).json({ error: 'Internal server error', message: error.message }); } } // 注册路由 router.post('/v1/chat/completions', handleChatCompletions); router.post('/v1/responses', handleDirectResponses); router.post('/v1/messages', handleDirectMessages); export default router;