feat: add dynamic x-api-provider and reasoning_effort support

- Add per-model provider configuration in config.json
- Implement getModelProvider() to fetch provider from model config
- Update all header generators to accept dynamic provider parameter
- Add reasoning_effort field handling for common endpoint type
- Support auto/low/medium/high/off reasoning levels for common models

This enables flexible multi-provider support and reasoning control
across different endpoint types (anthropic, openai, common).
This commit is contained in:
1eon
2025-11-19 01:25:01 +08:00
parent c31b680d95
commit 62a384f34b
6 changed files with 70 additions and 22 deletions

View File

@@ -64,6 +64,11 @@ export function getModelReasoning(modelId) {
return null;
}
export function getModelProvider(modelId) {
const model = getModelById(modelId);
return model?.provider || null;
}
export function getUserAgent() {
return getCurrentUserAgent();
}

View File

@@ -25,48 +25,64 @@
"name": "Opus 4.1",
"id": "claude-opus-4-1-20250805",
"type": "anthropic",
"reasoning": "auto"
"reasoning": "auto",
"provider": "anthropic"
},
{
"name": "Haiku 4.5",
"id": "claude-haiku-4-5-20251001",
"type": "anthropic",
"reasoning": "auto"
"reasoning": "auto",
"provider": "anthropic"
},
{
"name": "Sonnet 4.5",
"id": "claude-sonnet-4-5-20250929",
"type": "anthropic",
"reasoning": "auto"
"reasoning": "auto",
"provider": "anthropic"
},
{
"name": "GPT-5",
"id": "gpt-5-2025-08-07",
"type": "openai",
"reasoning": "auto"
"reasoning": "auto",
"provider": "openai"
},
{
"name": "GPT-5-Codex",
"id": "gpt-5-codex",
"type": "openai",
"reasoning": "off"
"reasoning": "off",
"provider": "openai"
},
{
"name": "GPT-5.1",
"id": "gpt-5.1",
"type": "openai",
"reasoning": "auto"
"reasoning": "auto",
"provider": "openai"
},
{
"name": "GPT-5.1-Codex",
"id": "gpt-5.1-codex",
"type": "openai",
"reasoning": "off"
"reasoning": "off",
"provider": "openai"
},
{
"name": "GLM-4.6",
"id": "glm-4.6",
"type": "common"
"type": "common",
"reasoning": "off",
"provider": "fireworks"
},
{
"name": "Gemini-3-Pro",
"id": "gemini-3-pro-preview",
"type": "common",
"reasoning": "auto",
"provider": "google"
}
],
"dev_mode": false,

View File

@@ -1,6 +1,6 @@
import express from 'express';
import fetch from 'node-fetch';
import { getConfig, getModelById, getEndpointByType, getSystemPrompt, getModelReasoning, getRedirectedModelId } from './config.js';
import { getConfig, getModelById, getEndpointByType, getSystemPrompt, getModelReasoning, getRedirectedModelId, getModelProvider } from './config.js';
import { logInfo, logDebug, logError, logRequest, logResponse } from './logger.js';
import { transformToAnthropic, getAnthropicHeaders } from './transformers/request-anthropic.js';
import { transformToOpenAI, getOpenAIHeaders } from './transformers/request-openai.js';
@@ -129,16 +129,19 @@ async function handleChatCompletions(req, res) {
// Update request body with redirected model ID before transformation
const requestWithRedirectedModel = { ...openaiRequest, model: modelId };
// Get provider from model config
const provider = getModelProvider(modelId);
if (model.type === 'anthropic') {
transformedRequest = transformToAnthropic(requestWithRedirectedModel);
const isStreaming = openaiRequest.stream === true;
headers = getAnthropicHeaders(authHeader, clientHeaders, isStreaming, modelId);
headers = getAnthropicHeaders(authHeader, clientHeaders, isStreaming, modelId, provider);
} else if (model.type === 'openai') {
transformedRequest = transformToOpenAI(requestWithRedirectedModel);
headers = getOpenAIHeaders(authHeader, clientHeaders);
headers = getOpenAIHeaders(authHeader, clientHeaders, provider);
} else if (model.type === 'common') {
transformedRequest = transformToCommon(requestWithRedirectedModel);
headers = getCommonHeaders(authHeader, clientHeaders);
headers = getCommonHeaders(authHeader, clientHeaders, provider);
} else {
return res.status(500).json({ error: `Unknown endpoint type: ${model.type}` });
}
@@ -285,8 +288,11 @@ async function handleDirectResponses(req, res) {
const clientHeaders = req.headers;
// Get provider from model config
const provider = getModelProvider(modelId);
// 获取 headers
const headers = getOpenAIHeaders(authHeader, clientHeaders);
const headers = getOpenAIHeaders(authHeader, clientHeaders, provider);
// 注入系统提示到 instructions 字段并更新重定向后的模型ID
const systemPrompt = getSystemPrompt();
@@ -427,9 +433,12 @@ async function handleDirectMessages(req, res) {
const clientHeaders = req.headers;
// Get provider from model config
const provider = getModelProvider(modelId);
// 获取 headers
const isStreaming = anthropicRequest.stream === true;
const headers = getAnthropicHeaders(authHeader, clientHeaders, isStreaming, modelId);
const headers = getAnthropicHeaders(authHeader, clientHeaders, isStreaming, modelId, provider);
// 注入系统提示到 system 字段并更新重定向后的模型ID
const systemPrompt = getSystemPrompt();
@@ -576,7 +585,11 @@ async function handleCountTokens(req, res) {
}
const clientHeaders = req.headers;
const headers = getAnthropicHeaders(authHeader, clientHeaders, false, modelId);
// Get provider from model config
const provider = getModelProvider(modelId);
const headers = getAnthropicHeaders(authHeader, clientHeaders, false, modelId, provider);
// 构建 count_tokens 端点 URL
const countTokensUrl = endpoint.base_url.replace('/v1/messages', '/v1/messages/count_tokens');

View File

@@ -154,7 +154,7 @@ export function transformToAnthropic(openaiRequest) {
return anthropicRequest;
}
export function getAnthropicHeaders(authHeader, clientHeaders = {}, isStreaming = true, modelId = null) {
export function getAnthropicHeaders(authHeader, clientHeaders = {}, isStreaming = true, modelId = null, provider = 'anthropic') {
// Generate unique IDs if not provided
const sessionId = clientHeaders['x-session-id'] || generateUUID();
const messageId = clientHeaders['x-assistant-message-id'] || generateUUID();
@@ -165,7 +165,7 @@ export function getAnthropicHeaders(authHeader, clientHeaders = {}, isStreaming
'anthropic-version': clientHeaders['anthropic-version'] || '2023-06-01',
'authorization': authHeader || '',
'x-api-key': 'placeholder',
'x-api-provider': 'anthropic',
'x-api-provider': provider,
'x-factory-client': 'cli',
'x-session-id': sessionId,
'x-assistant-message-id': messageId,

View File

@@ -1,5 +1,5 @@
import { logDebug } from '../logger.js';
import { getSystemPrompt, getUserAgent } from '../config.js';
import { getSystemPrompt, getUserAgent, getModelReasoning } from '../config.js';
export function transformToCommon(openaiRequest) {
logDebug('Transforming OpenAI request to Common format');
@@ -39,11 +39,25 @@ export function transformToCommon(openaiRequest) {
}
}
// Handle reasoning_effort field based on model configuration
const reasoningLevel = getModelReasoning(openaiRequest.model);
if (reasoningLevel === 'auto') {
// Auto mode: preserve original request's reasoning_effort field exactly as-is
// If original request has reasoning_effort field, keep it; otherwise don't add one
} else if (reasoningLevel && ['low', 'medium', 'high'].includes(reasoningLevel)) {
// Specific level: override with model configuration
commonRequest.reasoning_effort = reasoningLevel;
} else {
// Off or invalid: explicitly remove reasoning_effort field
// This ensures any reasoning_effort field from the original request is deleted
delete commonRequest.reasoning_effort;
}
logDebug('Transformed Common request', commonRequest);
return commonRequest;
}
export function getCommonHeaders(authHeader, clientHeaders = {}) {
export function getCommonHeaders(authHeader, clientHeaders = {}, provider = 'baseten') {
// Generate unique IDs if not provided
const sessionId = clientHeaders['x-session-id'] || generateUUID();
const messageId = clientHeaders['x-assistant-message-id'] || generateUUID();
@@ -52,7 +66,7 @@ export function getCommonHeaders(authHeader, clientHeaders = {}) {
'accept': 'application/json',
'content-type': 'application/json',
'authorization': authHeader || '',
'x-api-provider': 'baseten',
'x-api-provider': provider,
'x-factory-client': 'cli',
'x-session-id': sessionId,
'x-assistant-message-id': messageId,

View File

@@ -133,7 +133,7 @@ export function transformToOpenAI(openaiRequest) {
return targetRequest;
}
export function getOpenAIHeaders(authHeader, clientHeaders = {}) {
export function getOpenAIHeaders(authHeader, clientHeaders = {}, provider = 'openai') {
// Generate unique IDs if not provided
const sessionId = clientHeaders['x-session-id'] || generateUUID();
const messageId = clientHeaders['x-assistant-message-id'] || generateUUID();
@@ -141,7 +141,7 @@ export function getOpenAIHeaders(authHeader, clientHeaders = {}) {
const headers = {
'content-type': 'application/json',
'authorization': authHeader || '',
'x-api-provider': 'openai',
'x-api-provider': provider,
'x-factory-client': 'cli',
'x-session-id': sessionId,
'x-assistant-message-id': messageId,