@@ -869,7 +869,10 @@ def decode(self, *args, **kwargs):
869
869
logger .error (trimmed_format_exc ())
870
870
871
871
# -=-=-=-=-=-=- 阿里云百炼(通义)-在线模型 -=-=-=-=-=-=-
872
- qwen_models = ["qwen-max-latest" , "qwen-max-2025-01-25" ,"qwen-max" ,"qwen-turbo" ,"qwen-plus" ,"dashscope-deepseek-r1" ,"dashscope-deepseek-v3" ]
872
+ qwen_models = ["qwen-max-latest" , "qwen-max-2025-01-25" ,"qwen-max" ,"qwen-turbo" ,"qwen-plus" ,
873
+ "dashscope-deepseek-r1" ,"dashscope-deepseek-v3" ,
874
+ "dashscope-qwen3-14b" , "dashscope-qwen3-235b-a22b" , "dashscope-qwen3-qwen3-32b" ,
875
+ ]
873
876
if any (item in qwen_models for item in AVAIL_LLM_MODELS ):
874
877
try :
875
878
from .bridge_qwen import predict_no_ui_long_connection as qwen_noui
@@ -938,6 +941,34 @@ def decode(self, *args, **kwargs):
938
941
"max_token" : 57344 ,
939
942
"tokenizer" : tokenizer_gpt35 ,
940
943
"token_cnt" : get_token_num_gpt35 ,
944
+ },
945
+ "dashscope-qwen3-14b" : {
946
+ "fn_with_ui" : qwen_ui ,
947
+ "fn_without_ui" : qwen_noui ,
948
+ "enable_reasoning" : True ,
949
+ "can_multi_thread" : True ,
950
+ "endpoint" : None ,
951
+ "max_token" : 129024 ,
952
+ "tokenizer" : tokenizer_gpt35 ,
953
+ "token_cnt" : get_token_num_gpt35 ,
954
+ },
955
+ "dashscope-qwen3-235b-a22b" : {
956
+ "fn_with_ui" : qwen_ui ,
957
+ "fn_without_ui" : qwen_noui ,
958
+ "can_multi_thread" : True ,
959
+ "endpoint" : None ,
960
+ "max_token" : 129024 ,
961
+ "tokenizer" : tokenizer_gpt35 ,
962
+ "token_cnt" : get_token_num_gpt35 ,
963
+ },
964
+ "dashscope-qwen3-32b" : {
965
+ "fn_with_ui" : qwen_ui ,
966
+ "fn_without_ui" : qwen_noui ,
967
+ "can_multi_thread" : True ,
968
+ "endpoint" : None ,
969
+ "max_token" : 129024 ,
970
+ "tokenizer" : tokenizer_gpt35 ,
971
+ "token_cnt" : get_token_num_gpt35 ,
941
972
}
942
973
})
943
974
except :
0 commit comments