add gpt-5.1-codex-max support
This commit is contained in:
@@ -58,7 +58,7 @@ export function getModelReasoning(modelId) {
|
|||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
const reasoningLevel = model.reasoning.toLowerCase();
|
const reasoningLevel = model.reasoning.toLowerCase();
|
||||||
if (['low', 'medium', 'high', 'auto'].includes(reasoningLevel)) {
|
if (['low', 'medium', 'high', 'xhigh', 'auto'].includes(reasoningLevel)) {
|
||||||
return reasoningLevel;
|
return reasoningLevel;
|
||||||
}
|
}
|
||||||
return null;
|
return null;
|
||||||
|
|||||||
@@ -57,11 +57,18 @@
|
|||||||
"provider": "openai"
|
"provider": "openai"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "GPT-5.1-Codex",
|
"name": "GPT-5.1 Codex",
|
||||||
"id": "gpt-5.1-codex",
|
"id": "gpt-5.1-codex",
|
||||||
"type": "openai",
|
"type": "openai",
|
||||||
"reasoning": "off",
|
"reasoning": "off",
|
||||||
"provider": "openai"
|
"provider": "openai"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "GPT-5.1 Codex Max",
|
||||||
|
"id": "gpt-5.1-codex-max",
|
||||||
|
"type": "openai",
|
||||||
|
"reasoning": "auto",
|
||||||
|
"provider": "openai"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "GLM-4.6",
|
"name": "GLM-4.6",
|
||||||
|
|||||||
@@ -312,7 +312,7 @@ async function handleDirectResponses(req, res) {
|
|||||||
if (reasoningLevel === 'auto') {
|
if (reasoningLevel === 'auto') {
|
||||||
// Auto模式:保持原始请求的reasoning字段不变
|
// Auto模式:保持原始请求的reasoning字段不变
|
||||||
// 如果原始请求有reasoning字段就保留,没有就不添加
|
// 如果原始请求有reasoning字段就保留,没有就不添加
|
||||||
} else if (reasoningLevel && ['low', 'medium', 'high'].includes(reasoningLevel)) {
|
} else if (reasoningLevel && ['low', 'medium', 'high', 'xhigh'].includes(reasoningLevel)) {
|
||||||
modifiedRequest.reasoning = {
|
modifiedRequest.reasoning = {
|
||||||
effort: reasoningLevel,
|
effort: reasoningLevel,
|
||||||
summary: 'auto'
|
summary: 'auto'
|
||||||
@@ -463,11 +463,12 @@ async function handleDirectMessages(req, res) {
|
|||||||
if (reasoningLevel === 'auto') {
|
if (reasoningLevel === 'auto') {
|
||||||
// Auto模式:保持原始请求的thinking字段不变
|
// Auto模式:保持原始请求的thinking字段不变
|
||||||
// 如果原始请求有thinking字段就保留,没有就不添加
|
// 如果原始请求有thinking字段就保留,没有就不添加
|
||||||
} else if (reasoningLevel && ['low', 'medium', 'high'].includes(reasoningLevel)) {
|
} else if (reasoningLevel && ['low', 'medium', 'high', 'xhigh'].includes(reasoningLevel)) {
|
||||||
const budgetTokens = {
|
const budgetTokens = {
|
||||||
'low': 4096,
|
'low': 4096,
|
||||||
'medium': 12288,
|
'medium': 12288,
|
||||||
'high': 24576
|
'high': 24576,
|
||||||
|
'xhigh': 40960
|
||||||
};
|
};
|
||||||
|
|
||||||
modifiedRequest.thinking = {
|
modifiedRequest.thinking = {
|
||||||
|
|||||||
4
start.sh
4
start.sh
@@ -1,4 +1,8 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
|
echo "FACTORY_API_KEY 当前值是" $FACTORY_API_KEY
|
||||||
|
echo $FACTORY_API_KEY
|
||||||
|
echo "Reset FACTORY_API_KEY..."
|
||||||
|
export FACTORY_API_KEY=""
|
||||||
echo "Starting droid2api server..."
|
echo "Starting droid2api server..."
|
||||||
node server.js
|
node server.js
|
||||||
|
|||||||
@@ -119,12 +119,13 @@ export function transformToAnthropic(openaiRequest) {
|
|||||||
anthropicRequest.thinking = openaiRequest.thinking;
|
anthropicRequest.thinking = openaiRequest.thinking;
|
||||||
}
|
}
|
||||||
// If original request has no thinking field, don't add one
|
// If original request has no thinking field, don't add one
|
||||||
} else if (reasoningLevel && ['low', 'medium', 'high'].includes(reasoningLevel)) {
|
} else if (reasoningLevel && ['low', 'medium', 'high', 'xhigh'].includes(reasoningLevel)) {
|
||||||
// Specific level: override with model configuration
|
// Specific level: override with model configuration
|
||||||
const budgetTokens = {
|
const budgetTokens = {
|
||||||
'low': 4096,
|
'low': 4096,
|
||||||
'medium': 12288,
|
'medium': 12288,
|
||||||
'high': 24576
|
'high': 24576,
|
||||||
|
'xhigh': 24576
|
||||||
};
|
};
|
||||||
|
|
||||||
anthropicRequest.thinking = {
|
anthropicRequest.thinking = {
|
||||||
@@ -189,7 +190,7 @@ export function getAnthropicHeaders(authHeader, clientHeaders = {}, isStreaming
|
|||||||
if (reasoningLevel === 'auto') {
|
if (reasoningLevel === 'auto') {
|
||||||
// Auto mode: don't modify anthropic-beta header, preserve original
|
// Auto mode: don't modify anthropic-beta header, preserve original
|
||||||
// betaValues remain unchanged from client headers
|
// betaValues remain unchanged from client headers
|
||||||
} else if (reasoningLevel && ['low', 'medium', 'high'].includes(reasoningLevel)) {
|
} else if (reasoningLevel && ['low', 'medium', 'high', 'xhigh'].includes(reasoningLevel)) {
|
||||||
// Add thinking beta if not already present
|
// Add thinking beta if not already present
|
||||||
if (!betaValues.includes(thinkingBeta)) {
|
if (!betaValues.includes(thinkingBeta)) {
|
||||||
betaValues.push(thinkingBeta);
|
betaValues.push(thinkingBeta);
|
||||||
|
|||||||
@@ -44,7 +44,7 @@ export function transformToCommon(openaiRequest) {
|
|||||||
if (reasoningLevel === 'auto') {
|
if (reasoningLevel === 'auto') {
|
||||||
// Auto mode: preserve original request's reasoning_effort field exactly as-is
|
// Auto mode: preserve original request's reasoning_effort field exactly as-is
|
||||||
// If original request has reasoning_effort field, keep it; otherwise don't add one
|
// If original request has reasoning_effort field, keep it; otherwise don't add one
|
||||||
} else if (reasoningLevel && ['low', 'medium', 'high'].includes(reasoningLevel)) {
|
} else if (reasoningLevel && ['low', 'medium', 'high', 'xhigh'].includes(reasoningLevel)) {
|
||||||
// Specific level: override with model configuration
|
// Specific level: override with model configuration
|
||||||
commonRequest.reasoning_effort = reasoningLevel;
|
commonRequest.reasoning_effort = reasoningLevel;
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@@ -100,7 +100,7 @@ export function transformToOpenAI(openaiRequest) {
|
|||||||
targetRequest.reasoning = openaiRequest.reasoning;
|
targetRequest.reasoning = openaiRequest.reasoning;
|
||||||
}
|
}
|
||||||
// If original request has no reasoning field, don't add one
|
// If original request has no reasoning field, don't add one
|
||||||
} else if (reasoningLevel && ['low', 'medium', 'high'].includes(reasoningLevel)) {
|
} else if (reasoningLevel && ['low', 'medium', 'high', 'xhigh'].includes(reasoningLevel)) {
|
||||||
// Specific level: override with model configuration
|
// Specific level: override with model configuration
|
||||||
targetRequest.reasoning = {
|
targetRequest.reasoning = {
|
||||||
effort: reasoningLevel,
|
effort: reasoningLevel,
|
||||||
|
|||||||
Reference in New Issue
Block a user