From 1c29062ba7e9321c34f7ed6de02cbd73ad60eadb Mon Sep 17 00:00:00 2001 From: 1e0n Date: Wed, 8 Oct 2025 05:26:31 +0800 Subject: [PATCH] =?UTF-8?q?=E5=A2=9E=E5=8A=A0=E5=A4=A7=E6=A8=A1=E5=9E=8B?= =?UTF-8?q?=E6=8E=A8=E7=90=86=E7=BA=A7=E5=88=AB=E9=85=8D=E7=BD=AE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- config.js | 12 ++++++ config.json | 15 +++++--- routes.js | 36 +++++++++++++++-- transformers/request-anthropic.js | 64 +++++++++++++++++++++++++++++-- transformers/request-openai.js | 22 ++++++++++- 5 files changed, 137 insertions(+), 12 deletions(-) diff --git a/config.js b/config.js index 9ecbc2b..5e8ba1d 100644 --- a/config.js +++ b/config.js @@ -49,3 +49,15 @@ export function getSystemPrompt() { const cfg = getConfig(); return cfg.system_prompt || ''; } + +export function getModelReasoning(modelId) { + const model = getModelById(modelId); + if (!model || !model.reasoning) { + return null; + } + const reasoningLevel = model.reasoning.toLowerCase(); + if (['low', 'medium', 'high'].includes(reasoningLevel)) { + return reasoningLevel; + } + return null; +} diff --git a/config.json b/config.json index 698ff62..5e8a251 100644 --- a/config.json +++ b/config.json @@ -18,27 +18,32 @@ { "name": "Opus 4.1", "id": "claude-opus-4-1-20250805", - "type": "anthropic" + "type": "anthropic", + "reasoning": "off" }, { "name": "Sonnet 4", "id": "claude-sonnet-4-20250514", - "type": "anthropic" + "type": "anthropic", + "reasoning": "medium" }, { "name": "Sonnet 4.5", "id": "claude-sonnet-4-5-20250929", - "type": "anthropic" + "type": "anthropic", + "reasoning": "high" }, { "name": "GPT-5", "id": "gpt-5-2025-08-07", - "type": "openai" + "type": "openai", + "reasoning": "off" }, { "name": "GPT-5-Codex", "id": "gpt-5-codex", - "type": "openai" + "type": "openai", + "reasoning": "high" }, { "name": "GLM-4.6", diff --git a/routes.js b/routes.js index 55e4ebe..7171e0d 100644 --- a/routes.js +++ b/routes.js @@ -1,6 +1,6 @@ import express from 'express'; import fetch from 'node-fetch'; -import { getConfig, getModelById, getEndpointByType, getSystemPrompt } from './config.js'; +import { getConfig, getModelById, getEndpointByType, getSystemPrompt, getModelReasoning } from './config.js'; import { logInfo, logDebug, logError, logRequest, logResponse } from './logger.js'; import { transformToAnthropic, getAnthropicHeaders } from './transformers/request-anthropic.js'; import { transformToOpenAI, getOpenAIHeaders } from './transformers/request-openai.js'; @@ -90,7 +90,7 @@ async function handleChatCompletions(req, res) { if (model.type === 'anthropic') { transformedRequest = transformToAnthropic(openaiRequest); const isStreaming = openaiRequest.stream !== false; - headers = getAnthropicHeaders(authHeader, clientHeaders, isStreaming); + headers = getAnthropicHeaders(authHeader, clientHeaders, isStreaming, modelId); } else if (model.type === 'openai') { transformedRequest = transformToOpenAI(openaiRequest); headers = getOpenAIHeaders(authHeader, clientHeaders); @@ -236,6 +236,18 @@ async function handleDirectResponses(req, res) { } } + // 处理reasoning字段 + const reasoningLevel = getModelReasoning(modelId); + if (reasoningLevel) { + modifiedRequest.reasoning = { + effort: reasoningLevel, + summary: 'auto' + }; + } else { + // 如果配置是off或无效,移除reasoning字段 + delete modifiedRequest.reasoning; + } + logRequest('POST', endpoint.base_url, headers, modifiedRequest); // 转发修改后的请求 @@ -339,7 +351,7 @@ async function handleDirectMessages(req, res) { // 获取 headers const isStreaming = anthropicRequest.stream !== false; - const headers = getAnthropicHeaders(authHeader, clientHeaders, isStreaming); + const headers = getAnthropicHeaders(authHeader, clientHeaders, isStreaming, modelId); // 注入系统提示到 system 字段 const systemPrompt = getSystemPrompt(); @@ -359,6 +371,24 @@ async function handleDirectMessages(req, res) { } } + // 处理thinking字段 + const reasoningLevel = getModelReasoning(modelId); + if (reasoningLevel) { + const budgetTokens = { + 'low': 4096, + 'medium': 12288, + 'high': 24576 + }; + + modifiedRequest.thinking = { + type: 'enabled', + budget_tokens: budgetTokens[reasoningLevel] + }; + } else { + // 如果配置是off或无效,移除thinking字段 + delete modifiedRequest.thinking; + } + logRequest('POST', endpoint.base_url, headers, modifiedRequest); // 转发修改后的请求 diff --git a/transformers/request-anthropic.js b/transformers/request-anthropic.js index ef30393..3b7230d 100644 --- a/transformers/request-anthropic.js +++ b/transformers/request-anthropic.js @@ -1,5 +1,5 @@ import { logDebug } from '../logger.js'; -import { getSystemPrompt } from '../config.js'; +import { getSystemPrompt, getModelReasoning } from '../config.js'; export function transformToAnthropic(openaiRequest) { logDebug('Transforming OpenAI request to Anthropic format'); @@ -107,6 +107,38 @@ export function transformToAnthropic(openaiRequest) { }); } + // Handle thinking field based on model configuration + const reasoningLevel = getModelReasoning(openaiRequest.model); + if (reasoningLevel) { + const budgetTokens = { + 'low': 4096, + 'medium': 12288, + 'high': 24576 + }; + + anthropicRequest.thinking = { + type: 'enabled', + budget_tokens: budgetTokens[reasoningLevel] + }; + } + // If request already has thinking field, respect the configuration rule + // Remove it if model config is off/invalid, otherwise override with config + if (openaiRequest.thinking) { + if (reasoningLevel) { + const budgetTokens = { + 'low': 4096, + 'medium': 12288, + 'high': 24576 + }; + + anthropicRequest.thinking = { + type: 'enabled', + budget_tokens: budgetTokens[reasoningLevel] + }; + } + // If reasoningLevel is null (off/invalid), don't add thinking field + } + // Pass through other compatible parameters if (openaiRequest.temperature !== undefined) { anthropicRequest.temperature = openaiRequest.temperature; @@ -124,7 +156,7 @@ export function transformToAnthropic(openaiRequest) { return anthropicRequest; } -export function getAnthropicHeaders(authHeader, clientHeaders = {}, isStreaming = true) { +export function getAnthropicHeaders(authHeader, clientHeaders = {}, isStreaming = true, modelId = null) { // Generate unique IDs if not provided const sessionId = clientHeaders['x-session-id'] || generateUUID(); const messageId = clientHeaders['x-assistant-message-id'] || generateUUID(); @@ -133,7 +165,6 @@ export function getAnthropicHeaders(authHeader, clientHeaders = {}, isStreaming 'accept': 'application/json', 'content-type': 'application/json', 'anthropic-version': '2023-06-01', - 'anthropic-beta': 'interleaved-thinking-2025-05-14', 'x-api-key': 'placeholder', 'authorization': authHeader || '', 'x-api-provider': 'anthropic', @@ -145,6 +176,33 @@ export function getAnthropicHeaders(authHeader, clientHeaders = {}, isStreaming 'connection': 'keep-alive' }; + // Handle anthropic-beta header based on reasoning configuration + const reasoningLevel = modelId ? getModelReasoning(modelId) : null; + let betaValues = []; + + // Add existing beta values from client headers + if (clientHeaders['anthropic-beta']) { + const existingBeta = clientHeaders['anthropic-beta']; + betaValues = existingBeta.split(',').map(v => v.trim()); + } + + // Handle thinking beta based on reasoning configuration + const thinkingBeta = 'interleaved-thinking-2025-05-14'; + if (reasoningLevel) { + // Add thinking beta if not already present + if (!betaValues.includes(thinkingBeta)) { + betaValues.push(thinkingBeta); + } + } else { + // Remove thinking beta if reasoning is off/invalid + betaValues = betaValues.filter(v => v !== thinkingBeta); + } + + // Set anthropic-beta header if there are any values + if (betaValues.length > 0) { + headers['anthropic-beta'] = betaValues.join(', '); + } + // Pass through Stainless SDK headers with defaults const stainlessDefaults = { 'x-stainless-arch': 'x64', diff --git a/transformers/request-openai.js b/transformers/request-openai.js index d9c8bd2..b272555 100644 --- a/transformers/request-openai.js +++ b/transformers/request-openai.js @@ -1,5 +1,5 @@ import { logDebug } from '../logger.js'; -import { getSystemPrompt } from '../config.js'; +import { getSystemPrompt, getModelReasoning } from '../config.js'; export function transformToOpenAI(openaiRequest) { logDebug('Transforming OpenAI request to target OpenAI format'); @@ -88,6 +88,26 @@ export function transformToOpenAI(openaiRequest) { targetRequest.instructions = systemPrompt; } + // Handle reasoning field based on model configuration + const reasoningLevel = getModelReasoning(openaiRequest.model); + if (reasoningLevel) { + targetRequest.reasoning = { + effort: reasoningLevel, + summary: 'auto' + }; + } + // If request already has reasoning field, respect the configuration rule + // Remove it if model config is off/invalid, otherwise override with config + if (openaiRequest.reasoning) { + if (reasoningLevel) { + targetRequest.reasoning = { + effort: reasoningLevel, + summary: 'auto' + }; + } + // If reasoningLevel is null (off/invalid), don't add reasoning field + } + // Pass through other parameters if (openaiRequest.temperature !== undefined) { targetRequest.temperature = openaiRequest.temperature;