fix: Increase VLM max_tokens to 2000 to avoid response truncation

This commit is contained in:
empty
2026-01-07 03:37:55 +08:00
parent 8d82cf91d5
commit be216eacad

View File

@@ -167,7 +167,7 @@ Output ONLY the JSON object, no additional text."""
model=vision_model,
messages=messages,
temperature=0.3,
max_tokens=800
max_tokens=2000 # Increased to avoid truncation
)
vlm_response = response.choices[0].message.content if response.choices else None