diff --git a/config.js b/config.js index 017200f..ad792a2 100644 --- a/config.js +++ b/config.js @@ -58,7 +58,7 @@ export function getModelReasoning(modelId) { return null; } const reasoningLevel = model.reasoning.toLowerCase(); - if (['low', 'medium', 'high', 'auto'].includes(reasoningLevel)) { + if (['low', 'medium', 'high', 'xhigh', 'auto'].includes(reasoningLevel)) { return reasoningLevel; } return null; diff --git a/config.json b/config.json index 52f52d0..177ab03 100644 --- a/config.json +++ b/config.json @@ -57,11 +57,18 @@ "provider": "openai" }, { - "name": "GPT-5.1-Codex", + "name": "GPT-5.1 Codex", "id": "gpt-5.1-codex", "type": "openai", "reasoning": "off", "provider": "openai" + }, + { + "name": "GPT-5.1 Codex Max", + "id": "gpt-5.1-codex-max", + "type": "openai", + "reasoning": "auto", + "provider": "openai" }, { "name": "GLM-4.6", diff --git a/routes.js b/routes.js index c5c7a07..5b1a33e 100644 --- a/routes.js +++ b/routes.js @@ -312,7 +312,7 @@ async function handleDirectResponses(req, res) { if (reasoningLevel === 'auto') { // Auto模式:保持原始请求的reasoning字段不变 // 如果原始请求有reasoning字段就保留,没有就不添加 - } else if (reasoningLevel && ['low', 'medium', 'high'].includes(reasoningLevel)) { + } else if (reasoningLevel && ['low', 'medium', 'high', 'xhigh'].includes(reasoningLevel)) { modifiedRequest.reasoning = { effort: reasoningLevel, summary: 'auto' @@ -463,11 +463,12 @@ async function handleDirectMessages(req, res) { if (reasoningLevel === 'auto') { // Auto模式:保持原始请求的thinking字段不变 // 如果原始请求有thinking字段就保留,没有就不添加 - } else if (reasoningLevel && ['low', 'medium', 'high'].includes(reasoningLevel)) { + } else if (reasoningLevel && ['low', 'medium', 'high', 'xhigh'].includes(reasoningLevel)) { const budgetTokens = { 'low': 4096, 'medium': 12288, - 'high': 24576 + 'high': 24576, + 'xhigh': 40960 }; modifiedRequest.thinking = { diff --git a/start.sh b/start.sh index 13c444e..c892f5c 100755 --- a/start.sh +++ b/start.sh @@ -1,4 +1,8 @@ #!/bin/bash +echo "FACTORY_API_KEY 当前值是" $FACTORY_API_KEY +echo $FACTORY_API_KEY +echo "Reset FACTORY_API_KEY..." +export FACTORY_API_KEY="" echo "Starting droid2api server..." node server.js diff --git a/transformers/request-anthropic.js b/transformers/request-anthropic.js index d3aee14..108b1f4 100644 --- a/transformers/request-anthropic.js +++ b/transformers/request-anthropic.js @@ -119,12 +119,13 @@ export function transformToAnthropic(openaiRequest) { anthropicRequest.thinking = openaiRequest.thinking; } // If original request has no thinking field, don't add one - } else if (reasoningLevel && ['low', 'medium', 'high'].includes(reasoningLevel)) { + } else if (reasoningLevel && ['low', 'medium', 'high', 'xhigh'].includes(reasoningLevel)) { // Specific level: override with model configuration const budgetTokens = { 'low': 4096, 'medium': 12288, - 'high': 24576 + 'high': 24576, + 'xhigh': 24576 }; anthropicRequest.thinking = { @@ -189,7 +190,7 @@ export function getAnthropicHeaders(authHeader, clientHeaders = {}, isStreaming if (reasoningLevel === 'auto') { // Auto mode: don't modify anthropic-beta header, preserve original // betaValues remain unchanged from client headers - } else if (reasoningLevel && ['low', 'medium', 'high'].includes(reasoningLevel)) { + } else if (reasoningLevel && ['low', 'medium', 'high', 'xhigh'].includes(reasoningLevel)) { // Add thinking beta if not already present if (!betaValues.includes(thinkingBeta)) { betaValues.push(thinkingBeta); diff --git a/transformers/request-common.js b/transformers/request-common.js index 42fb991..594a4d2 100644 --- a/transformers/request-common.js +++ b/transformers/request-common.js @@ -44,7 +44,7 @@ export function transformToCommon(openaiRequest) { if (reasoningLevel === 'auto') { // Auto mode: preserve original request's reasoning_effort field exactly as-is // If original request has reasoning_effort field, keep it; otherwise don't add one - } else if (reasoningLevel && ['low', 'medium', 'high'].includes(reasoningLevel)) { + } else if (reasoningLevel && ['low', 'medium', 'high', 'xhigh'].includes(reasoningLevel)) { // Specific level: override with model configuration commonRequest.reasoning_effort = reasoningLevel; } else { diff --git a/transformers/request-openai.js b/transformers/request-openai.js index 091daae..cb602cb 100644 --- a/transformers/request-openai.js +++ b/transformers/request-openai.js @@ -100,7 +100,7 @@ export function transformToOpenAI(openaiRequest) { targetRequest.reasoning = openaiRequest.reasoning; } // If original request has no reasoning field, don't add one - } else if (reasoningLevel && ['low', 'medium', 'high'].includes(reasoningLevel)) { + } else if (reasoningLevel && ['low', 'medium', 'high', 'xhigh'].includes(reasoningLevel)) { // Specific level: override with model configuration targetRequest.reasoning = { effort: reasoningLevel,