feat: Add VLM config to config.example.yaml with config.yaml support
This commit is contained in:
@@ -17,6 +17,20 @@ llm:
|
||||
# DeepSeek: base_url: "https://api.deepseek.com" model: "deepseek-chat"
|
||||
# Ollama (Local): base_url: "http://localhost:11434/v1" model: "llama3.2"
|
||||
|
||||
# ==================== VLM Configuration (Vision Language Model) ====================
|
||||
# Used for character analysis and image understanding
|
||||
# If not configured, will try to use LLM config with vision model auto-detection
|
||||
vlm:
|
||||
provider: "qwen" # Options: qwen, glm, openai
|
||||
api_key: "" # Leave empty to use DASHSCOPE_API_KEY or VLM_API_KEY env var
|
||||
base_url: "" # Leave empty for auto-detection based on provider
|
||||
model: "" # Leave empty for default model based on provider
|
||||
|
||||
# VLM Provider presets:
|
||||
# Qwen (通义千问): provider: "qwen" model: "qwen-vl-plus" or "qwen-vl-max" or "qwen3-vl-plus"
|
||||
# GLM (智谱): provider: "glm" model: "glm-4v-flash" or "glm-4v"
|
||||
# OpenAI: provider: "openai" model: "gpt-4-vision-preview" or "gpt-4o"
|
||||
|
||||
# ==================== ComfyUI Configuration ====================
|
||||
comfyui:
|
||||
# Global ComfyUI settings
|
||||
|
||||
Reference in New Issue
Block a user