Skip to content

Commit a96e8d9

Browse files
committed
fix bug
1 parent 8577c71 commit a96e8d9

File tree

2 files changed

+7
-4
lines changed

2 files changed

+7
-4
lines changed

chatchat-server/chatchat/server/utils.py

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -185,20 +185,23 @@ def get_default_llm():
185185
available_llms = list(get_config_models(model_type="llm").keys())
186186
if Settings.model_settings.DEFAULT_LLM_MODEL in available_llms:
187187
return Settings.model_settings.DEFAULT_LLM_MODEL
188-
else:
188+
elif available_llms:
189189
logger.warning(f"default llm model {Settings.model_settings.DEFAULT_LLM_MODEL} is not found in available llms, "
190190
f"using {available_llms[0]} instead")
191191
return available_llms[0]
192-
192+
else:
193+
logger.error("can not find an available llm model")
193194

194195
def get_default_embedding():
195196
available_embeddings = list(get_config_models(model_type="embed").keys())
196197
if Settings.model_settings.DEFAULT_EMBEDDING_MODEL in available_embeddings:
197198
return Settings.model_settings.DEFAULT_EMBEDDING_MODEL
198-
else:
199+
elif available_embeddings:
199200
logger.warning(f"default embedding model {Settings.model_settings.DEFAULT_EMBEDDING_MODEL} is not found in "
200201
f"available embeddings, using {available_embeddings[0]} instead")
201202
return available_embeddings[0]
203+
else:
204+
logger.error("can not find an available embedding model")
202205

203206

204207
def get_history_len() -> int:

chatchat-server/chatchat/webui_pages/utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -990,7 +990,7 @@ def check_model_supports_streaming(llm_model: str):
990990
返回 True 或 False
991991
"""
992992
# todo: 需要实现更精细的"关于模型是否支持流式输出"的判断逻辑
993-
if llm_model == "qwen2.5-instruct":
993+
if llm_model.startswith("qwen"):
994994
return False
995995
else:
996996
return True

0 commit comments

Comments
 (0)