chore: Merge upstream/main with RunningHub 48G support and FAQ improvements

This commit is contained in:
empty
2026-01-06 17:48:02 +08:00
12 changed files with 1828 additions and 2138 deletions

View File

@@ -14,6 +14,7 @@
FAQ component for displaying frequently asked questions
"""
import re
from pathlib import Path
from typing import Optional
@@ -57,6 +58,50 @@ def load_faq_content(language: str) -> Optional[str]:
return None
def parse_faq_sections(content: str) -> list[tuple[str, str]]:
"""
Parse FAQ content into sections by ### headings
Args:
content: Raw markdown content
Returns:
List of (question, answer) tuples
"""
# Remove the first main heading (starts with #, not ###)
lines = content.split('\n')
if lines and lines[0].startswith('#') and not lines[0].startswith('##'):
content = '\n'.join(lines[1:])
# Split by ### headings (top-level questions)
# Pattern matches ### at start of line followed by question text
pattern = r'^###\s+(.+?)$'
sections = []
current_question = None
current_answer_lines = []
for line in content.split('\n'):
match = re.match(pattern, line)
if match:
# Save previous section if exists
if current_question is not None:
answer = '\n'.join(current_answer_lines).strip()
sections.append((current_question, answer))
# Start new section
current_question = match.group(1).strip()
current_answer_lines = []
else:
current_answer_lines.append(line)
# Save last section
if current_question is not None:
answer = '\n'.join(current_answer_lines).strip()
sections.append((current_question, answer))
return sections
def render_faq_sidebar():
"""
Render FAQ in the sidebar
@@ -77,14 +122,13 @@ def render_faq_sidebar():
if faq_content:
# Display FAQ in an expander, expanded by default
with st.expander(tr('faq.expand_to_view', fallback='FAQ'), expanded=True):
# Remove the first heading from FAQ content since we already show it above
lines = faq_content.split('\n')
# Skip the first line if it's a heading
if lines and lines[0].startswith('#'):
faq_content = '\n'.join(lines[1:])
# Parse FAQ into sections
sections = parse_faq_sections(faq_content)
# Display FAQ content
st.markdown(faq_content, unsafe_allow_html=True)
# Display each question in its own collapsible expander
for question, answer in sections:
with st.expander(question, expanded=False):
st.markdown(answer, unsafe_allow_html=True)
# Add a link to GitHub issues for more help
st.markdown(

View File

@@ -180,15 +180,35 @@ def render_advanced_settings():
f"(https://www.runninghub{'.cn' if get_language() == 'zh_CN' else '.ai'}/?inviteCode=bozpdlbj)"
)
# RunningHub concurrent limit
runninghub_concurrent_limit = st.number_input(
tr("settings.comfyui.runninghub_concurrent_limit"),
min_value=1,
max_value=10,
value=comfyui_config.get("runninghub_concurrent_limit", 1),
help=tr("settings.comfyui.runninghub_concurrent_limit_help"),
key="runninghub_concurrent_limit_input"
)
# RunningHub concurrent limit and instance type (in one row)
limit_col, instance_col = st.columns(2)
with limit_col:
runninghub_concurrent_limit = st.number_input(
tr("settings.comfyui.runninghub_concurrent_limit"),
min_value=1,
max_value=10,
value=comfyui_config.get("runninghub_concurrent_limit", 1),
help=tr("settings.comfyui.runninghub_concurrent_limit_help"),
key="runninghub_concurrent_limit_input"
)
with instance_col:
# Check if instance type is "plus" (48G VRAM enabled)
current_instance_type = comfyui_config.get("runninghub_instance_type") or ""
is_plus_enabled = current_instance_type == "plus"
# Instance type options with i18n
instance_options = [
tr("settings.comfyui.runninghub_instance_24g"),
tr("settings.comfyui.runninghub_instance_48g"),
]
runninghub_instance_type_display = st.selectbox(
tr("settings.comfyui.runninghub_instance_type"),
options=instance_options,
index=1 if is_plus_enabled else 0,
help=tr("settings.comfyui.runninghub_instance_type_help"),
key="runninghub_instance_type_input"
)
# Convert display value back to actual value
runninghub_48g_enabled = runninghub_instance_type_display == tr("settings.comfyui.runninghub_instance_48g")
# ====================================================================
# Action Buttons (full width at bottom)
@@ -206,11 +226,14 @@ def render_advanced_settings():
config_manager.set_llm_config(llm_api_key, llm_base_url, llm_model)
# Save ComfyUI configuration (optional fields, always save what's provided)
# Convert checkbox to instance type: True -> "plus", False -> ""
instance_type = "plus" if runninghub_48g_enabled else ""
config_manager.set_comfyui_config(
comfyui_url=comfyui_url if comfyui_url else None,
comfyui_api_key=comfyui_api_key if comfyui_api_key else None,
runninghub_api_key=runninghub_api_key if runninghub_api_key else None,
runninghub_concurrent_limit=int(runninghub_concurrent_limit)
runninghub_concurrent_limit=int(runninghub_concurrent_limit),
runninghub_instance_type=instance_type
)
# Only save to file if LLM config is valid

View File

@@ -197,6 +197,10 @@
"settings.comfyui.runninghub_get_api_key": "Get RunningHub API Key",
"settings.comfyui.runninghub_concurrent_limit": "Concurrent Limit",
"settings.comfyui.runninghub_concurrent_limit_help": "RunningHub concurrent execution limit (1-10), default is 1 for regular members, adjust based on your membership level",
"settings.comfyui.runninghub_instance_type": "Machine Spec",
"settings.comfyui.runninghub_instance_type_help": "Select RunningHub machine spec, 48G VRAM is suitable for large models or high-resolution generation (requires membership support)",
"settings.comfyui.runninghub_instance_24g": "24G VRAM",
"settings.comfyui.runninghub_instance_48g": "48G VRAM",
"tts.inference_mode": "Synthesis Mode",
"tts.mode.local": "Local Synthesis",
"tts.mode.comfyui": "ComfyUI Synthesis",

View File

@@ -197,6 +197,10 @@
"settings.comfyui.runninghub_get_api_key": "点此获取 RunningHub API Key",
"settings.comfyui.runninghub_concurrent_limit": "并发限制",
"settings.comfyui.runninghub_concurrent_limit_help": "RunningHub 并发执行数量1-10普通会员默认为1请根据您的会员等级调整",
"settings.comfyui.runninghub_instance_type": "机器规格",
"settings.comfyui.runninghub_instance_type_help": "选择 RunningHub 机器规格48G 显存适用于大模型或高分辨率生成(需要会员支持)",
"settings.comfyui.runninghub_instance_24g": "24G 显存",
"settings.comfyui.runninghub_instance_48g": "48G 显存",
"tts.inference_mode": "合成方式",
"tts.mode.local": "本地合成",
"tts.mode.comfyui": "ComfyUI 合成",