From 87b894bbdcf5daf6e8602edbfcd2d563dfa376a4 Mon Sep 17 00:00:00 2001 From: MasuRii Date: Sun, 15 Feb 2026 15:34:27 +0800 Subject: [PATCH 1/3] feat(iflow): add support for glm-5, minimax-m2.5, qwen3-32b, tstars2.0, iflow-rome-30ba3b Add five new models to the iFlow provider to maintain feature parity with the competitor repository (router-for-me/CLIProxyAPI): - glm-5: Added to HARDCODED_MODELS, ENABLE_THINKING_MODELS, and GLM_MODELS (thinking support with GLM-style clear_thinking handling) - minimax-m2.5: Added to HARDCODED_MODELS and REASONING_SPLIT_MODELS (thinking support via reasoning_split boolean) - qwen3-32b: Added to HARDCODED_MODELS and ENABLE_THINKING_MODELS (thinking support) - tstars2.0: Added to HARDCODED_MODELS (multimodal assistant) - iflow-rome-30ba3b: Added to HARDCODED_MODELS (iFlow Rome model) Also update REASONING_PRESERVATION_MODELS_PREFIXES to include "glm-5" and "tstars" prefixes, and alphabetically sort the HARDCODED_MODELS list for maintainability. Closes: #129 --- .../providers/iflow_provider.py | 31 +++++++++++-------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/src/rotator_library/providers/iflow_provider.py b/src/rotator_library/providers/iflow_provider.py index ae292719..eb69c87b 100644 --- a/src/rotator_library/providers/iflow_provider.py +++ b/src/rotator_library/providers/iflow_provider.py @@ -31,25 +31,30 @@ HARDCODED_MODELS = [ "glm-4.6", "glm-4.7", - "minimax-m2", - "minimax-m2.1", - "qwen3-coder-plus", + "glm-5", + "iflow-rome-30ba3b", "kimi-k2", "kimi-k2-0905", "kimi-k2-thinking", # Seems to not work, but should "kimi-k2.5", # Seems to not work, but should + "minimax-m2", + "minimax-m2.1", + "minimax-m2.5", + "qwen3-32b", + "qwen3-235b", + "qwen3-235b-a22b-instruct", + "qwen3-235b-a22b-thinking-2507", + "qwen3-coder-plus", "qwen3-max", "qwen3-max-preview", - "qwen3-235b-a22b-thinking-2507", + "qwen3-vl-plus", "deepseek-v3.2-reasoner", "deepseek-v3.2-chat", "deepseek-v3.2", # seems to not work, but should. Use above variants instead "deepseek-v3.1", "deepseek-v3", "deepseek-r1", - "qwen3-vl-plus", - "qwen3-235b-a22b-instruct", - "qwen3-235b", + "tstars2.0", ] # OpenAI-compatible parameters supported by iFlow API @@ -83,20 +88,22 @@ ENABLE_THINKING_MODELS = { "glm-4.6", "glm-4.7", + "glm-5", "qwen3-max-preview", + "qwen3-32b", "deepseek-v3.2", "deepseek-v3.1", } # GLM models need additional clear_thinking=false when thinking is enabled -GLM_MODELS = {"glm-4.6", "glm-4.7"} +GLM_MODELS = {"glm-4.6", "glm-4.7", "glm-5"} # Models using reasoning_split (boolean) instead of enable_thinking -REASONING_SPLIT_MODELS = {"minimax-m2", "minimax-m2.1"} +REASONING_SPLIT_MODELS = {"minimax-m2", "minimax-m2.1", "minimax-m2.5"} # Models that benefit from reasoning_content preservation in message history # (for multi-turn conversations) -REASONING_PRESERVATION_MODELS_PREFIXES = ("glm-4", "minimax-m2") +REASONING_PRESERVATION_MODELS_PREFIXES = ("glm-4", "glm-5", "minimax-m2", "tstars") # Cache file path for reasoning content preservation _REASONING_CACHE_FILE = ( @@ -1022,9 +1029,7 @@ async def stream_handler(response_stream, attempt=1): else: if not error_text: content_type = response.headers.get("content-type", "") - error_text = ( - f"(empty response body, content-type={content_type})" - ) + error_text = f"(empty response body, content-type={content_type})" error_msg = ( f"iFlow HTTP {response.status_code} error: {error_text}" ) From d55b566b2d164ed8274885bcce9f90393adb1cab Mon Sep 17 00:00:00 2001 From: MasuRii Date: Thu, 19 Feb 2026 18:13:11 +0800 Subject: [PATCH 2/3] fix(iflow): address PR review feedback Sort HARDCODED_MODELS alphabetically (deepseek before glm). Expand REASONING_PRESERVATION_MODELS_PREFIXES to include deepseek, qwen, and use broader prefix matching. Simplify _inject_reasoning_content docstring. Refs: #130 --- .../providers/iflow_provider.py | 22 ++++++++++++------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/src/rotator_library/providers/iflow_provider.py b/src/rotator_library/providers/iflow_provider.py index eb69c87b..4d60476f 100644 --- a/src/rotator_library/providers/iflow_provider.py +++ b/src/rotator_library/providers/iflow_provider.py @@ -29,6 +29,12 @@ # Model list can be expanded as iFlow supports more models HARDCODED_MODELS = [ + "deepseek-r1", + "deepseek-v3", + "deepseek-v3.1", + "deepseek-v3.2", # seems to not work, but should. Use -chat/-reasoner variants instead + "deepseek-v3.2-chat", + "deepseek-v3.2-reasoner", "glm-4.6", "glm-4.7", "glm-5", @@ -48,12 +54,6 @@ "qwen3-max", "qwen3-max-preview", "qwen3-vl-plus", - "deepseek-v3.2-reasoner", - "deepseek-v3.2-chat", - "deepseek-v3.2", # seems to not work, but should. Use above variants instead - "deepseek-v3.1", - "deepseek-v3", - "deepseek-r1", "tstars2.0", ] @@ -103,7 +103,13 @@ # Models that benefit from reasoning_content preservation in message history # (for multi-turn conversations) -REASONING_PRESERVATION_MODELS_PREFIXES = ("glm-4", "glm-5", "minimax-m2", "tstars") +REASONING_PRESERVATION_MODELS_PREFIXES = ( + "deepseek", + "glm-", + "minimax-", + "qwen", + "tstars", +) # Cache file path for reasoning content preservation _REASONING_CACHE_FILE = ( @@ -427,7 +433,7 @@ def _inject_reasoning_content( """ Inject cached reasoning_content into assistant messages. - Only for models that benefit from reasoning preservation (GLM-4.x, MiniMax-M2.x). + Only for models that benefit from reasoning preservation. This is helpful for multi-turn conversations where the model may benefit from seeing its previous reasoning to maintain coherent thought chains. From 42a5cf2aa68a415d800456ca78747c25f8dded6d Mon Sep 17 00:00:00 2001 From: MasuRii Date: Thu, 19 Feb 2026 18:18:21 +0800 Subject: [PATCH 3/3] fix(iflow): use specific prefixes for reasoning preservation models Replace broad prefixes ("deepseek", "glm-", "minimax-", "qwen") with specific model identifiers ("glm-4", "glm-5", "minimax-m2", "qwen3-32b", "deepseek-v3.1", "deepseek-v3.2") in REASONING_PRESERVATION_MODELS_PREFIXES to prevent unintended matches on future models that may not support reasoning content preservation. --- src/rotator_library/providers/iflow_provider.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/rotator_library/providers/iflow_provider.py b/src/rotator_library/providers/iflow_provider.py index 4d60476f..0ef670db 100644 --- a/src/rotator_library/providers/iflow_provider.py +++ b/src/rotator_library/providers/iflow_provider.py @@ -104,11 +104,13 @@ # Models that benefit from reasoning_content preservation in message history # (for multi-turn conversations) REASONING_PRESERVATION_MODELS_PREFIXES = ( - "deepseek", - "glm-", - "minimax-", - "qwen", + "glm-4", + "glm-5", + "minimax-m2", "tstars", + "qwen3-32b", + "deepseek-v3.1", + "deepseek-v3.2", ) # Cache file path for reasoning content preservation