Add /v1/responses endpoint for direct OpenAI forwarding

Features:
- Add new /v1/responses endpoint for transparent request/response forwarding
- Only supports openai type endpoints (rejects anthropic with 400 error)
- No request transformation - forwards original request body as-is
- No response transformation - streams and non-streaming responses forwarded directly
- /v1/chat/completions keeps original behavior with format conversion

Differences between endpoints:
- /v1/chat/completions: Converts formats for all endpoint types (anthropic, openai)
- /v1/responses: Direct proxy for openai endpoints only, zero transformation
This commit is contained in:
1e0n
2025-10-07 05:14:58 +08:00
parent 3aebe7e723
commit 79616ba3b9
2 changed files with 117 additions and 5 deletions

115
routes.js
View File

@@ -38,7 +38,8 @@ router.get('/v1/models', (req, res) => {
}
});
router.post('/v1/chat/completions', async (req, res) => {
// 标准 OpenAI 聊天补全处理函数(带格式转换)
async function handleChatCompletions(req, res) {
logInfo('POST /v1/chat/completions');
try {
@@ -146,12 +147,120 @@ router.post('/v1/chat/completions', async (req, res) => {
}
} catch (error) {
logError('Error in POST /v1/chat/completions', error);
logError('Error in /v1/chat/completions', error);
res.status(500).json({
error: 'Internal server error',
message: error.message
});
}
});
}
// 直接转发 OpenAI 请求(不做格式转换)
async function handleDirectResponses(req, res) {
logInfo('POST /v1/responses');
try {
const openaiRequest = req.body;
const modelId = openaiRequest.model;
if (!modelId) {
return res.status(400).json({ error: 'model is required' });
}
const model = getModelById(modelId);
if (!model) {
return res.status(404).json({ error: `Model ${modelId} not found` });
}
// 只允许 openai 类型端点
if (model.type !== 'openai') {
return res.status(400).json({
error: 'Invalid endpoint type',
message: `/v1/responses 接口只支持 openai 类型端点,当前模型 ${modelId}${model.type} 类型`
});
}
const endpoint = getEndpointByType(model.type);
if (!endpoint) {
return res.status(500).json({ error: `Endpoint type ${model.type} not found` });
}
logInfo(`Direct forwarding to ${model.type} endpoint: ${endpoint.base_url}`);
// Get API key
let authHeader;
try {
authHeader = await getApiKey();
} catch (error) {
logError('Failed to get API key', error);
return res.status(500).json({
error: 'API key not available',
message: 'Failed to get or refresh API key. Please check server logs.'
});
}
const clientHeaders = req.headers;
// 获取 headers但请求体不做任何转换
const headers = getOpenAIHeaders(authHeader, clientHeaders);
logRequest('POST', endpoint.base_url, headers, openaiRequest);
// 直接转发原始请求
const response = await fetch(endpoint.base_url, {
method: 'POST',
headers,
body: JSON.stringify(openaiRequest) // 不做任何转换,直接转发
});
logInfo(`Response status: ${response.status}`);
if (!response.ok) {
const errorText = await response.text();
logError(`Endpoint error: ${response.status}`, new Error(errorText));
return res.status(response.status).json({
error: `Endpoint returned ${response.status}`,
details: errorText
});
}
const isStreaming = openaiRequest.stream !== false;
if (isStreaming) {
// 直接转发流式响应,不做任何转换
res.setHeader('Content-Type', 'text/event-stream');
res.setHeader('Cache-Control', 'no-cache');
res.setHeader('Connection', 'keep-alive');
try {
// 直接将原始响应流转发给客户端
for await (const chunk of response.body) {
res.write(chunk);
}
res.end();
logInfo('Stream forwarded successfully');
} catch (streamError) {
logError('Stream error', streamError);
res.end();
}
} else {
// 直接转发非流式响应,不做任何转换
const data = await response.json();
logResponse(200, null, data);
res.json(data);
}
} catch (error) {
logError('Error in /v1/responses', error);
res.status(500).json({
error: 'Internal server error',
message: error.message
});
}
}
// 注册路由
router.post('/v1/chat/completions', handleChatCompletions);
router.post('/v1/responses', handleDirectResponses);
export default router;

View File

@@ -29,7 +29,8 @@ app.get('/', (req, res) => {
description: 'OpenAI Compatible API Proxy',
endpoints: [
'GET /v1/models',
'POST /v1/chat/completions'
'POST /v1/chat/completions',
'POST /v1/responses'
]
});
});
@@ -86,7 +87,8 @@ app.use((req, res, next) => {
timestamp: errorInfo.timestamp,
availableEndpoints: [
'GET /v1/models',
'POST /v1/chat/completions'
'POST /v1/chat/completions',
'POST /v1/responses'
]
});
});
@@ -118,6 +120,7 @@ app.use((err, req, res, next) => {
logInfo('Available endpoints:');
logInfo(' GET /v1/models');
logInfo(' POST /v1/chat/completions');
logInfo(' POST /v1/responses');
})
.on('error', (err) => {
if (err.code === 'EADDRINUSE') {