fix: Increase VLM max_tokens to 2000 to avoid response truncation
This commit is contained in:
@@ -167,7 +167,7 @@ Output ONLY the JSON object, no additional text."""
|
||||
model=vision_model,
|
||||
messages=messages,
|
||||
temperature=0.3,
|
||||
max_tokens=800
|
||||
max_tokens=2000 # Increased to avoid truncation
|
||||
)
|
||||
|
||||
vlm_response = response.choices[0].message.content if response.choices else None
|
||||
|
||||
Reference in New Issue
Block a user