feat: add dynamic x-api-provider and reasoning_effort support

- Add per-model provider configuration in config.json
- Implement getModelProvider() to fetch provider from model config
- Update all header generators to accept dynamic provider parameter
- Add reasoning_effort field handling for common endpoint type
- Support auto/low/medium/high/off reasoning levels for common models

This enables flexible multi-provider support and reasoning control
across different endpoint types (anthropic, openai, common).
This commit is contained in:
1eon
2025-11-19 01:25:01 +08:00
parent c31b680d95
commit 62a384f34b
6 changed files with 70 additions and 22 deletions

View File

@@ -1,6 +1,6 @@
import express from 'express';
import fetch from 'node-fetch';
import { getConfig, getModelById, getEndpointByType, getSystemPrompt, getModelReasoning, getRedirectedModelId } from './config.js';
import { getConfig, getModelById, getEndpointByType, getSystemPrompt, getModelReasoning, getRedirectedModelId, getModelProvider } from './config.js';
import { logInfo, logDebug, logError, logRequest, logResponse } from './logger.js';
import { transformToAnthropic, getAnthropicHeaders } from './transformers/request-anthropic.js';
import { transformToOpenAI, getOpenAIHeaders } from './transformers/request-openai.js';
@@ -129,16 +129,19 @@ async function handleChatCompletions(req, res) {
// Update request body with redirected model ID before transformation
const requestWithRedirectedModel = { ...openaiRequest, model: modelId };
// Get provider from model config
const provider = getModelProvider(modelId);
if (model.type === 'anthropic') {
transformedRequest = transformToAnthropic(requestWithRedirectedModel);
const isStreaming = openaiRequest.stream === true;
headers = getAnthropicHeaders(authHeader, clientHeaders, isStreaming, modelId);
headers = getAnthropicHeaders(authHeader, clientHeaders, isStreaming, modelId, provider);
} else if (model.type === 'openai') {
transformedRequest = transformToOpenAI(requestWithRedirectedModel);
headers = getOpenAIHeaders(authHeader, clientHeaders);
headers = getOpenAIHeaders(authHeader, clientHeaders, provider);
} else if (model.type === 'common') {
transformedRequest = transformToCommon(requestWithRedirectedModel);
headers = getCommonHeaders(authHeader, clientHeaders);
headers = getCommonHeaders(authHeader, clientHeaders, provider);
} else {
return res.status(500).json({ error: `Unknown endpoint type: ${model.type}` });
}
@@ -285,8 +288,11 @@ async function handleDirectResponses(req, res) {
const clientHeaders = req.headers;
// Get provider from model config
const provider = getModelProvider(modelId);
// 获取 headers
const headers = getOpenAIHeaders(authHeader, clientHeaders);
const headers = getOpenAIHeaders(authHeader, clientHeaders, provider);
// 注入系统提示到 instructions 字段并更新重定向后的模型ID
const systemPrompt = getSystemPrompt();
@@ -427,9 +433,12 @@ async function handleDirectMessages(req, res) {
const clientHeaders = req.headers;
// Get provider from model config
const provider = getModelProvider(modelId);
// 获取 headers
const isStreaming = anthropicRequest.stream === true;
const headers = getAnthropicHeaders(authHeader, clientHeaders, isStreaming, modelId);
const headers = getAnthropicHeaders(authHeader, clientHeaders, isStreaming, modelId, provider);
// 注入系统提示到 system 字段并更新重定向后的模型ID
const systemPrompt = getSystemPrompt();
@@ -576,7 +585,11 @@ async function handleCountTokens(req, res) {
}
const clientHeaders = req.headers;
const headers = getAnthropicHeaders(authHeader, clientHeaders, false, modelId);
// Get provider from model config
const provider = getModelProvider(modelId);
const headers = getAnthropicHeaders(authHeader, clientHeaders, false, modelId, provider);
// 构建 count_tokens 端点 URL
const countTokensUrl = endpoint.base_url.replace('/v1/messages', '/v1/messages/count_tokens');