- Implemented OpenAI compatible API proxy server - Support for Anthropic and custom OpenAI format conversion - Automatic API key refresh with WorkOS OAuth - SSE streaming response transformation - Smart header management for Factory endpoints - Chinese documentation
158 lines
4.8 KiB
JavaScript
158 lines
4.8 KiB
JavaScript
import express from 'express';
|
|
import fetch from 'node-fetch';
|
|
import { getConfig, getModelById, getEndpointByType } from './config.js';
|
|
import { logInfo, logDebug, logError, logRequest, logResponse } from './logger.js';
|
|
import { transformToAnthropic, getAnthropicHeaders } from './transformers/request-anthropic.js';
|
|
import { transformToOpenAI, getOpenAIHeaders } from './transformers/request-openai.js';
|
|
import { AnthropicResponseTransformer } from './transformers/response-anthropic.js';
|
|
import { OpenAIResponseTransformer } from './transformers/response-openai.js';
|
|
import { getApiKey } from './auth.js';
|
|
|
|
const router = express.Router();
|
|
|
|
router.get('/v1/models', (req, res) => {
|
|
logInfo('GET /v1/models');
|
|
|
|
try {
|
|
const config = getConfig();
|
|
const models = config.models.map(model => ({
|
|
id: model.id,
|
|
object: 'model',
|
|
created: Date.now(),
|
|
owned_by: model.type,
|
|
permission: [],
|
|
root: model.id,
|
|
parent: null
|
|
}));
|
|
|
|
const response = {
|
|
object: 'list',
|
|
data: models
|
|
};
|
|
|
|
logResponse(200, null, response);
|
|
res.json(response);
|
|
} catch (error) {
|
|
logError('Error in GET /v1/models', error);
|
|
res.status(500).json({ error: 'Internal server error' });
|
|
}
|
|
});
|
|
|
|
router.post('/v1/chat/completions', async (req, res) => {
|
|
logInfo('POST /v1/chat/completions');
|
|
|
|
try {
|
|
const openaiRequest = req.body;
|
|
const modelId = openaiRequest.model;
|
|
|
|
if (!modelId) {
|
|
return res.status(400).json({ error: 'model is required' });
|
|
}
|
|
|
|
const model = getModelById(modelId);
|
|
if (!model) {
|
|
return res.status(404).json({ error: `Model ${modelId} not found` });
|
|
}
|
|
|
|
const endpoint = getEndpointByType(model.type);
|
|
if (!endpoint) {
|
|
return res.status(500).json({ error: `Endpoint type ${model.type} not found` });
|
|
}
|
|
|
|
logInfo(`Routing to ${model.type} endpoint: ${endpoint.base_url}`);
|
|
|
|
// Get API key (will auto-refresh if needed)
|
|
let authHeader;
|
|
try {
|
|
authHeader = await getApiKey();
|
|
} catch (error) {
|
|
logError('Failed to get API key', error);
|
|
return res.status(500).json({
|
|
error: 'API key not available',
|
|
message: 'Failed to get or refresh API key. Please check server logs.'
|
|
});
|
|
}
|
|
|
|
let transformedRequest;
|
|
let headers;
|
|
const clientHeaders = req.headers;
|
|
|
|
// Log received client headers for debugging
|
|
logDebug('Client headers received', {
|
|
'x-factory-client': clientHeaders['x-factory-client'],
|
|
'x-session-id': clientHeaders['x-session-id'],
|
|
'x-assistant-message-id': clientHeaders['x-assistant-message-id'],
|
|
'user-agent': clientHeaders['user-agent']
|
|
});
|
|
|
|
if (model.type === 'anthropic') {
|
|
transformedRequest = transformToAnthropic(openaiRequest);
|
|
const isStreaming = openaiRequest.stream !== false;
|
|
headers = getAnthropicHeaders(authHeader, clientHeaders, isStreaming);
|
|
} else if (model.type === 'openai') {
|
|
transformedRequest = transformToOpenAI(openaiRequest);
|
|
headers = getOpenAIHeaders(authHeader, clientHeaders);
|
|
} else {
|
|
return res.status(500).json({ error: `Unknown endpoint type: ${model.type}` });
|
|
}
|
|
|
|
logRequest('POST', endpoint.base_url, headers, transformedRequest);
|
|
|
|
const response = await fetch(endpoint.base_url, {
|
|
method: 'POST',
|
|
headers,
|
|
body: JSON.stringify(transformedRequest)
|
|
});
|
|
|
|
logInfo(`Response status: ${response.status}`);
|
|
|
|
if (!response.ok) {
|
|
const errorText = await response.text();
|
|
logError(`Endpoint error: ${response.status}`, new Error(errorText));
|
|
return res.status(response.status).json({
|
|
error: `Endpoint returned ${response.status}`,
|
|
details: errorText
|
|
});
|
|
}
|
|
|
|
const isStreaming = transformedRequest.stream !== false;
|
|
|
|
if (isStreaming) {
|
|
res.setHeader('Content-Type', 'text/event-stream');
|
|
res.setHeader('Cache-Control', 'no-cache');
|
|
res.setHeader('Connection', 'keep-alive');
|
|
|
|
let transformer;
|
|
if (model.type === 'anthropic') {
|
|
transformer = new AnthropicResponseTransformer(modelId, `chatcmpl-${Date.now()}`);
|
|
} else if (model.type === 'openai') {
|
|
transformer = new OpenAIResponseTransformer(modelId, `chatcmpl-${Date.now()}`);
|
|
}
|
|
|
|
try {
|
|
for await (const chunk of transformer.transformStream(response.body)) {
|
|
res.write(chunk);
|
|
}
|
|
res.end();
|
|
logInfo('Stream completed');
|
|
} catch (streamError) {
|
|
logError('Stream error', streamError);
|
|
res.end();
|
|
}
|
|
} else {
|
|
const data = await response.json();
|
|
logResponse(200, null, data);
|
|
res.json(data);
|
|
}
|
|
|
|
} catch (error) {
|
|
logError('Error in POST /v1/chat/completions', error);
|
|
res.status(500).json({
|
|
error: 'Internal server error',
|
|
message: error.message
|
|
});
|
|
}
|
|
});
|
|
|
|
export default router;
|