- Add common endpoint type for GLM-4.6 model - Implement automatic system prompt injection for all requests - Simplify README documentation for better user focus - Update version to 1.1.0 - Add *.txt to .gitignore Co-authored-by: factory-droid[bot] <138933559+factory-droid[bot]@users.noreply.github.com>
421 lines
13 KiB
JavaScript
421 lines
13 KiB
JavaScript
import express from 'express';
|
||
import fetch from 'node-fetch';
|
||
import { getConfig, getModelById, getEndpointByType, getSystemPrompt } from './config.js';
|
||
import { logInfo, logDebug, logError, logRequest, logResponse } from './logger.js';
|
||
import { transformToAnthropic, getAnthropicHeaders } from './transformers/request-anthropic.js';
|
||
import { transformToOpenAI, getOpenAIHeaders } from './transformers/request-openai.js';
|
||
import { transformToCommon, getCommonHeaders } from './transformers/request-common.js';
|
||
import { AnthropicResponseTransformer } from './transformers/response-anthropic.js';
|
||
import { OpenAIResponseTransformer } from './transformers/response-openai.js';
|
||
import { getApiKey } from './auth.js';
|
||
|
||
const router = express.Router();
|
||
|
||
router.get('/v1/models', (req, res) => {
|
||
logInfo('GET /v1/models');
|
||
|
||
try {
|
||
const config = getConfig();
|
||
const models = config.models.map(model => ({
|
||
id: model.id,
|
||
object: 'model',
|
||
created: Date.now(),
|
||
owned_by: model.type,
|
||
permission: [],
|
||
root: model.id,
|
||
parent: null
|
||
}));
|
||
|
||
const response = {
|
||
object: 'list',
|
||
data: models
|
||
};
|
||
|
||
logResponse(200, null, response);
|
||
res.json(response);
|
||
} catch (error) {
|
||
logError('Error in GET /v1/models', error);
|
||
res.status(500).json({ error: 'Internal server error' });
|
||
}
|
||
});
|
||
|
||
// 标准 OpenAI 聊天补全处理函数(带格式转换)
|
||
async function handleChatCompletions(req, res) {
|
||
logInfo('POST /v1/chat/completions');
|
||
|
||
try {
|
||
const openaiRequest = req.body;
|
||
const modelId = openaiRequest.model;
|
||
|
||
if (!modelId) {
|
||
return res.status(400).json({ error: 'model is required' });
|
||
}
|
||
|
||
const model = getModelById(modelId);
|
||
if (!model) {
|
||
return res.status(404).json({ error: `Model ${modelId} not found` });
|
||
}
|
||
|
||
const endpoint = getEndpointByType(model.type);
|
||
if (!endpoint) {
|
||
return res.status(500).json({ error: `Endpoint type ${model.type} not found` });
|
||
}
|
||
|
||
logInfo(`Routing to ${model.type} endpoint: ${endpoint.base_url}`);
|
||
|
||
// Get API key (will auto-refresh if needed)
|
||
let authHeader;
|
||
try {
|
||
authHeader = await getApiKey();
|
||
} catch (error) {
|
||
logError('Failed to get API key', error);
|
||
return res.status(500).json({
|
||
error: 'API key not available',
|
||
message: 'Failed to get or refresh API key. Please check server logs.'
|
||
});
|
||
}
|
||
|
||
let transformedRequest;
|
||
let headers;
|
||
const clientHeaders = req.headers;
|
||
|
||
// Log received client headers for debugging
|
||
logDebug('Client headers received', {
|
||
'x-factory-client': clientHeaders['x-factory-client'],
|
||
'x-session-id': clientHeaders['x-session-id'],
|
||
'x-assistant-message-id': clientHeaders['x-assistant-message-id'],
|
||
'user-agent': clientHeaders['user-agent']
|
||
});
|
||
|
||
if (model.type === 'anthropic') {
|
||
transformedRequest = transformToAnthropic(openaiRequest);
|
||
const isStreaming = openaiRequest.stream !== false;
|
||
headers = getAnthropicHeaders(authHeader, clientHeaders, isStreaming);
|
||
} else if (model.type === 'openai') {
|
||
transformedRequest = transformToOpenAI(openaiRequest);
|
||
headers = getOpenAIHeaders(authHeader, clientHeaders);
|
||
} else if (model.type === 'common') {
|
||
transformedRequest = transformToCommon(openaiRequest);
|
||
headers = getCommonHeaders(authHeader, clientHeaders);
|
||
} else {
|
||
return res.status(500).json({ error: `Unknown endpoint type: ${model.type}` });
|
||
}
|
||
|
||
logRequest('POST', endpoint.base_url, headers, transformedRequest);
|
||
|
||
const response = await fetch(endpoint.base_url, {
|
||
method: 'POST',
|
||
headers,
|
||
body: JSON.stringify(transformedRequest)
|
||
});
|
||
|
||
logInfo(`Response status: ${response.status}`);
|
||
|
||
if (!response.ok) {
|
||
const errorText = await response.text();
|
||
logError(`Endpoint error: ${response.status}`, new Error(errorText));
|
||
return res.status(response.status).json({
|
||
error: `Endpoint returned ${response.status}`,
|
||
details: errorText
|
||
});
|
||
}
|
||
|
||
const isStreaming = transformedRequest.stream !== false;
|
||
|
||
if (isStreaming) {
|
||
res.setHeader('Content-Type', 'text/event-stream');
|
||
res.setHeader('Cache-Control', 'no-cache');
|
||
res.setHeader('Connection', 'keep-alive');
|
||
|
||
// common 类型直接转发,不使用 transformer
|
||
if (model.type === 'common') {
|
||
try {
|
||
for await (const chunk of response.body) {
|
||
res.write(chunk);
|
||
}
|
||
res.end();
|
||
logInfo('Stream forwarded (common type)');
|
||
} catch (streamError) {
|
||
logError('Stream error', streamError);
|
||
res.end();
|
||
}
|
||
} else {
|
||
// anthropic 和 openai 类型使用 transformer
|
||
let transformer;
|
||
if (model.type === 'anthropic') {
|
||
transformer = new AnthropicResponseTransformer(modelId, `chatcmpl-${Date.now()}`);
|
||
} else if (model.type === 'openai') {
|
||
transformer = new OpenAIResponseTransformer(modelId, `chatcmpl-${Date.now()}`);
|
||
}
|
||
|
||
try {
|
||
for await (const chunk of transformer.transformStream(response.body)) {
|
||
res.write(chunk);
|
||
}
|
||
res.end();
|
||
logInfo('Stream completed');
|
||
} catch (streamError) {
|
||
logError('Stream error', streamError);
|
||
res.end();
|
||
}
|
||
}
|
||
} else {
|
||
const data = await response.json();
|
||
logResponse(200, null, data);
|
||
res.json(data);
|
||
}
|
||
|
||
} catch (error) {
|
||
logError('Error in /v1/chat/completions', error);
|
||
res.status(500).json({
|
||
error: 'Internal server error',
|
||
message: error.message
|
||
});
|
||
}
|
||
}
|
||
|
||
// 直接转发 OpenAI 请求(不做格式转换)
|
||
async function handleDirectResponses(req, res) {
|
||
logInfo('POST /v1/responses');
|
||
|
||
try {
|
||
const openaiRequest = req.body;
|
||
const modelId = openaiRequest.model;
|
||
|
||
if (!modelId) {
|
||
return res.status(400).json({ error: 'model is required' });
|
||
}
|
||
|
||
const model = getModelById(modelId);
|
||
if (!model) {
|
||
return res.status(404).json({ error: `Model ${modelId} not found` });
|
||
}
|
||
|
||
// 只允许 openai 类型端点
|
||
if (model.type !== 'openai') {
|
||
return res.status(400).json({
|
||
error: 'Invalid endpoint type',
|
||
message: `/v1/responses 接口只支持 openai 类型端点,当前模型 ${modelId} 是 ${model.type} 类型`
|
||
});
|
||
}
|
||
|
||
const endpoint = getEndpointByType(model.type);
|
||
if (!endpoint) {
|
||
return res.status(500).json({ error: `Endpoint type ${model.type} not found` });
|
||
}
|
||
|
||
logInfo(`Direct forwarding to ${model.type} endpoint: ${endpoint.base_url}`);
|
||
|
||
// Get API key
|
||
let authHeader;
|
||
try {
|
||
authHeader = await getApiKey();
|
||
} catch (error) {
|
||
logError('Failed to get API key', error);
|
||
return res.status(500).json({
|
||
error: 'API key not available',
|
||
message: 'Failed to get or refresh API key. Please check server logs.'
|
||
});
|
||
}
|
||
|
||
const clientHeaders = req.headers;
|
||
|
||
// 获取 headers
|
||
const headers = getOpenAIHeaders(authHeader, clientHeaders);
|
||
|
||
// 注入系统提示到 instructions 字段
|
||
const systemPrompt = getSystemPrompt();
|
||
const modifiedRequest = { ...openaiRequest };
|
||
if (systemPrompt) {
|
||
// 如果已有 instructions,则在前面添加系统提示
|
||
if (modifiedRequest.instructions) {
|
||
modifiedRequest.instructions = systemPrompt + modifiedRequest.instructions;
|
||
} else {
|
||
// 否则直接设置系统提示
|
||
modifiedRequest.instructions = systemPrompt;
|
||
}
|
||
}
|
||
|
||
logRequest('POST', endpoint.base_url, headers, modifiedRequest);
|
||
|
||
// 转发修改后的请求
|
||
const response = await fetch(endpoint.base_url, {
|
||
method: 'POST',
|
||
headers,
|
||
body: JSON.stringify(modifiedRequest)
|
||
});
|
||
|
||
logInfo(`Response status: ${response.status}`);
|
||
|
||
if (!response.ok) {
|
||
const errorText = await response.text();
|
||
logError(`Endpoint error: ${response.status}`, new Error(errorText));
|
||
return res.status(response.status).json({
|
||
error: `Endpoint returned ${response.status}`,
|
||
details: errorText
|
||
});
|
||
}
|
||
|
||
const isStreaming = openaiRequest.stream !== false;
|
||
|
||
if (isStreaming) {
|
||
// 直接转发流式响应,不做任何转换
|
||
res.setHeader('Content-Type', 'text/event-stream');
|
||
res.setHeader('Cache-Control', 'no-cache');
|
||
res.setHeader('Connection', 'keep-alive');
|
||
|
||
try {
|
||
// 直接将原始响应流转发给客户端
|
||
for await (const chunk of response.body) {
|
||
res.write(chunk);
|
||
}
|
||
res.end();
|
||
logInfo('Stream forwarded successfully');
|
||
} catch (streamError) {
|
||
logError('Stream error', streamError);
|
||
res.end();
|
||
}
|
||
} else {
|
||
// 直接转发非流式响应,不做任何转换
|
||
const data = await response.json();
|
||
logResponse(200, null, data);
|
||
res.json(data);
|
||
}
|
||
|
||
} catch (error) {
|
||
logError('Error in /v1/responses', error);
|
||
res.status(500).json({
|
||
error: 'Internal server error',
|
||
message: error.message
|
||
});
|
||
}
|
||
}
|
||
|
||
// 直接转发 Anthropic 请求(不做格式转换)
|
||
async function handleDirectMessages(req, res) {
|
||
logInfo('POST /v1/messages');
|
||
|
||
try {
|
||
const anthropicRequest = req.body;
|
||
const modelId = anthropicRequest.model;
|
||
|
||
if (!modelId) {
|
||
return res.status(400).json({ error: 'model is required' });
|
||
}
|
||
|
||
const model = getModelById(modelId);
|
||
if (!model) {
|
||
return res.status(404).json({ error: `Model ${modelId} not found` });
|
||
}
|
||
|
||
// 只允许 anthropic 类型端点
|
||
if (model.type !== 'anthropic') {
|
||
return res.status(400).json({
|
||
error: 'Invalid endpoint type',
|
||
message: `/v1/messages 接口只支持 anthropic 类型端点,当前模型 ${modelId} 是 ${model.type} 类型`
|
||
});
|
||
}
|
||
|
||
const endpoint = getEndpointByType(model.type);
|
||
if (!endpoint) {
|
||
return res.status(500).json({ error: `Endpoint type ${model.type} not found` });
|
||
}
|
||
|
||
logInfo(`Direct forwarding to ${model.type} endpoint: ${endpoint.base_url}`);
|
||
|
||
// Get API key
|
||
let authHeader;
|
||
try {
|
||
authHeader = await getApiKey();
|
||
} catch (error) {
|
||
logError('Failed to get API key', error);
|
||
return res.status(500).json({
|
||
error: 'API key not available',
|
||
message: 'Failed to get or refresh API key. Please check server logs.'
|
||
});
|
||
}
|
||
|
||
const clientHeaders = req.headers;
|
||
|
||
// 获取 headers
|
||
const isStreaming = anthropicRequest.stream !== false;
|
||
const headers = getAnthropicHeaders(authHeader, clientHeaders, isStreaming);
|
||
|
||
// 注入系统提示到 system 字段
|
||
const systemPrompt = getSystemPrompt();
|
||
const modifiedRequest = { ...anthropicRequest };
|
||
if (systemPrompt) {
|
||
if (modifiedRequest.system && Array.isArray(modifiedRequest.system)) {
|
||
// 如果已有 system 数组,则在最前面插入系统提示
|
||
modifiedRequest.system = [
|
||
{ type: 'text', text: systemPrompt },
|
||
...modifiedRequest.system
|
||
];
|
||
} else {
|
||
// 否则创建新的 system 数组
|
||
modifiedRequest.system = [
|
||
{ type: 'text', text: systemPrompt }
|
||
];
|
||
}
|
||
}
|
||
|
||
logRequest('POST', endpoint.base_url, headers, modifiedRequest);
|
||
|
||
// 转发修改后的请求
|
||
const response = await fetch(endpoint.base_url, {
|
||
method: 'POST',
|
||
headers,
|
||
body: JSON.stringify(modifiedRequest)
|
||
});
|
||
|
||
logInfo(`Response status: ${response.status}`);
|
||
|
||
if (!response.ok) {
|
||
const errorText = await response.text();
|
||
logError(`Endpoint error: ${response.status}`, new Error(errorText));
|
||
return res.status(response.status).json({
|
||
error: `Endpoint returned ${response.status}`,
|
||
details: errorText
|
||
});
|
||
}
|
||
|
||
if (isStreaming) {
|
||
// 直接转发流式响应,不做任何转换
|
||
res.setHeader('Content-Type', 'text/event-stream');
|
||
res.setHeader('Cache-Control', 'no-cache');
|
||
res.setHeader('Connection', 'keep-alive');
|
||
|
||
try {
|
||
// 直接将原始响应流转发给客户端
|
||
for await (const chunk of response.body) {
|
||
res.write(chunk);
|
||
}
|
||
res.end();
|
||
logInfo('Stream forwarded successfully');
|
||
} catch (streamError) {
|
||
logError('Stream error', streamError);
|
||
res.end();
|
||
}
|
||
} else {
|
||
// 直接转发非流式响应,不做任何转换
|
||
const data = await response.json();
|
||
logResponse(200, null, data);
|
||
res.json(data);
|
||
}
|
||
|
||
} catch (error) {
|
||
logError('Error in /v1/messages', error);
|
||
res.status(500).json({
|
||
error: 'Internal server error',
|
||
message: error.message
|
||
});
|
||
}
|
||
}
|
||
|
||
// 注册路由
|
||
router.post('/v1/chat/completions', handleChatCompletions);
|
||
router.post('/v1/responses', handleDirectResponses);
|
||
router.post('/v1/messages', handleDirectMessages);
|
||
|
||
export default router;
|