feat: support kimi-k2.5 with multimodal thinking
This commit is contained in:
parent
d8cffa30cc
commit
8a7cc5d9c6
@ -9,6 +9,7 @@ KIMI_BASE = _env("API_BASE_KIMI", _env("AGENT_API_BASE_URL", "https://api.moonsh
|
||||
KIMI_KEY = _env("API_KEY_KIMI", _env("AGENT_API_KEY", ""))
|
||||
KIMI_FAST_MODEL = _env("MODEL_KIMI_FAST", _env("AGENT_MODEL_ID", "kimi-k2-0905-preview"))
|
||||
KIMI_THINK_MODEL = _env("MODEL_KIMI_THINK", _env("AGENT_THINKING_MODEL_ID", "kimi-k2-thinking"))
|
||||
KIMI_25_MODEL = _env("MODEL_KIMI_25", "kimi-k2.5")
|
||||
|
||||
# DeepSeek
|
||||
DEEPSEEK_BASE = _env("API_BASE_DEEPSEEK", "https://api.deepseek.com")
|
||||
@ -31,6 +32,25 @@ MODEL_PROFILES = {
|
||||
"fast_only": False,
|
||||
"name": "Kimi-k2"
|
||||
},
|
||||
"kimi-k2.5": {
|
||||
"fast": {
|
||||
"base_url": KIMI_BASE,
|
||||
"api_key": KIMI_KEY,
|
||||
"model_id": KIMI_25_MODEL,
|
||||
"max_tokens": None,
|
||||
"extra_params": {"thinking": {"type": "disabled"}}
|
||||
},
|
||||
"thinking": {
|
||||
"base_url": KIMI_BASE,
|
||||
"api_key": KIMI_KEY,
|
||||
"model_id": KIMI_25_MODEL,
|
||||
"max_tokens": None,
|
||||
"extra_params": {"thinking": {"type": "enabled"}}
|
||||
},
|
||||
"supports_thinking": True,
|
||||
"fast_only": False,
|
||||
"name": "Kimi-k2.5"
|
||||
},
|
||||
"deepseek": {
|
||||
"fast": {"base_url": DEEPSEEK_BASE, "api_key": DEEPSEEK_KEY, "model_id": DEEPSEEK_FAST_MODEL, "max_tokens": 8192},
|
||||
"thinking": {
|
||||
@ -77,6 +97,11 @@ MODEL_PROMPT_OVERRIDES = {
|
||||
"thinking_model_line": "思考模式时,第一次请求的模型不是 Kimi-k2,而是 Kimi-k2-Thinking,一个更善于分析复杂问题、规划复杂流程的模型,在后续请求时模型会换回 Kimi-k2。",
|
||||
"deep_thinking_line": "在深度思考模式中,请求的模型是 Kimi-k2-Thinking,一个更善于分析复杂问题、规划复杂流程的模型。"
|
||||
},
|
||||
"kimi-k2.5": {
|
||||
"model_description": "你的基础模型是 Kimi-k2.5,支持图文多模态,并通过 thinking 参数开启/关闭思考能力。",
|
||||
"thinking_model_line": "思考模式时使用同一个 Kimi-k2.5 模型,但会在请求中注入 thinking={\"type\": \"enabled\"} 来开启思考;快速模式则传递 thinking={\"type\": \"disabled\"}。",
|
||||
"deep_thinking_line": "深度思考模式下,所有请求都会携带 thinking={\"type\": \"enabled\"},以获得持续的推理能力。"
|
||||
},
|
||||
"deepseek": {
|
||||
"model_description": "你的基础模型是 DeepSeek-V3.2(deepseek-chat),由 DeepSeek 提供,数学与推理能力较强,当前通过官方 API 调用。",
|
||||
"thinking_model_line": "思考模式时,第一次请求使用 DeepSeek-Reasoner,一个强化推理的模型,后续请求会切回 DeepSeek-V3.2。",
|
||||
|
||||
@ -98,7 +98,7 @@ class MainTerminal:
|
||||
# 初始化组件
|
||||
self.api_client = DeepSeekClient(thinking_mode=self.thinking_mode)
|
||||
self.api_client.set_deep_thinking_mode(self.deep_thinking_mode)
|
||||
self.model_key = "kimi"
|
||||
self.model_key = "kimi-k2.5"
|
||||
self.model_profile = get_model_profile(self.model_key)
|
||||
self.apply_model_profile(self.model_profile)
|
||||
self.context_manager = ContextManager(project_path, data_dir=str(self.data_dir))
|
||||
@ -1826,8 +1826,8 @@ class MainTerminal:
|
||||
}
|
||||
}
|
||||
]
|
||||
# Qwen-VL 自带多模态能力,不向其暴露额外的 vlm_analyze 工具,避免重复与误导
|
||||
if getattr(self, "model_key", None) == "qwen3-vl-plus":
|
||||
# 视觉模型(Qwen-VL / Kimi-k2.5)自带多模态能力,不再暴露 vlm_analyze,改为 view_image
|
||||
if getattr(self, "model_key", None) in {"qwen3-vl-plus", "kimi-k2.5"}:
|
||||
tools = [
|
||||
tool for tool in tools
|
||||
if (tool.get("function") or {}).get("name") != "vlm_analyze"
|
||||
@ -2491,7 +2491,7 @@ class MainTerminal:
|
||||
def build_messages(self, context: Dict, user_input: str) -> List[Dict]:
|
||||
"""构建消息列表(添加终端内容注入)"""
|
||||
# 加载系统提示(Qwen-VL 使用专用提示)
|
||||
prompt_name = "main_system_qwenvl" if getattr(self, "model_key", "kimi") == "qwen3-vl-plus" else "main_system"
|
||||
prompt_name = "main_system_qwenvl" if getattr(self, "model_key", "kimi") in {"qwen3-vl-plus", "kimi-k2.5"} else "main_system"
|
||||
system_prompt = self.load_prompt(prompt_name)
|
||||
|
||||
# 格式化系统提示
|
||||
@ -2803,8 +2803,8 @@ class MainTerminal:
|
||||
|
||||
def set_model(self, model_key: str) -> str:
|
||||
profile = get_model_profile(model_key)
|
||||
if getattr(self.context_manager, "has_images", False) and model_key != "qwen3-vl-plus":
|
||||
raise ValueError("当前对话包含图片,仅支持 Qwen-VL")
|
||||
if getattr(self.context_manager, "has_images", False) and model_key not in {"qwen3-vl-plus", "kimi-k2.5"}:
|
||||
raise ValueError("当前对话包含图片,仅支持 Qwen-VL 或 Kimi-k2.5")
|
||||
self.model_key = model_key
|
||||
self.model_profile = profile
|
||||
# 将模型标识传递给底层 API 客户端,便于按模型做兼容处理
|
||||
|
||||
@ -36,7 +36,7 @@ DEFAULT_PERSONALIZATION_CONFIG: Dict[str, Any] = {
|
||||
"default_run_mode": None,
|
||||
"auto_generate_title": True,
|
||||
"tool_intent_enabled": True,
|
||||
"default_model": "kimi",
|
||||
"default_model": "kimi-k2.5",
|
||||
}
|
||||
|
||||
__all__ = [
|
||||
@ -109,7 +109,7 @@ def sanitize_personalization_payload(
|
||||
base.update(fallback)
|
||||
data = payload or {}
|
||||
allowed_tool_categories = set(TOOL_CATEGORIES.keys())
|
||||
allowed_models = {"kimi", "deepseek", "qwen3-max", "qwen3-vl-plus"}
|
||||
allowed_models = {"kimi", "kimi-k2.5", "deepseek", "qwen3-max", "qwen3-vl-plus"}
|
||||
|
||||
def _resolve_short_field(key: str) -> str:
|
||||
if key in data:
|
||||
@ -153,7 +153,7 @@ def sanitize_personalization_payload(
|
||||
if isinstance(chosen_model, str) and chosen_model in allowed_models:
|
||||
base["default_model"] = chosen_model
|
||||
elif base.get("default_model") not in allowed_models:
|
||||
base["default_model"] = "kimi"
|
||||
base["default_model"] = "kimi-k2.5"
|
||||
|
||||
return base
|
||||
|
||||
|
||||
@ -10,7 +10,7 @@
|
||||
- **自动化任务**:批量处理文件、执行重复性工作
|
||||
- **视觉理解**:你自带多模态能力,用户可以直接发送图片;如需主动查看本地图片,可调用 `view_image` 指定路径,系统会代发一条包含图片的用户消息供你查看。
|
||||
|
||||
## 图片分析(Qwen-VL 重点)
|
||||
## 图片分析(Kimi-k2.5/Qwen-VL 重点)
|
||||
当用户提出“这是什么”“识别文字/表格/票据”“找瑕疵/细节”“读屏/按钮含义”等图片分析任务时,优先采用下面的方法,保证细节充分、结论可验证:
|
||||
|
||||
### 基本流程(先粗后细)
|
||||
|
||||
@ -195,8 +195,8 @@ def handle_message(data):
|
||||
if not message and not images:
|
||||
emit('error', {'message': '消息不能为空'})
|
||||
return
|
||||
if images and getattr(terminal, "model_key", None) != "qwen3-vl-plus":
|
||||
emit('error', {'message': '当前模型不支持图片,请切换到 Qwen-VL'})
|
||||
if images and getattr(terminal, "model_key", None) not in {"qwen3-vl-plus", "kimi-k2.5"}:
|
||||
emit('error', {'message': '当前模型不支持图片,请切换到 Qwen-VL 或 Kimi-k2.5'})
|
||||
return
|
||||
|
||||
print(f"[WebSocket] 收到消息: {message}")
|
||||
|
||||
@ -918,7 +918,7 @@ def get_user_resources(username: Optional[str] = None) -> Tuple[Optional[WebTerm
|
||||
terminal.admin_policy_version = policy.get("updated_at")
|
||||
# 若当前模型被禁用,则回退到第一个可用模型
|
||||
if terminal.model_key in disabled_models:
|
||||
for candidate in ["kimi", "deepseek", "qwen3-vl-plus", "qwen3-max"]:
|
||||
for candidate in ["kimi-k2.5", "kimi", "deepseek", "qwen3-vl-plus", "qwen3-max"]:
|
||||
if candidate not in disabled_models:
|
||||
try:
|
||||
terminal.set_model(candidate)
|
||||
|
||||
@ -216,7 +216,7 @@ def get_user_resources(username: Optional[str] = None, workspace_id: Optional[st
|
||||
terminal.admin_policy_ui_blocks = policy.get("ui_blocks") or {}
|
||||
terminal.admin_policy_version = policy.get("updated_at")
|
||||
if terminal.model_key in disabled_models:
|
||||
for candidate in ["kimi", "deepseek", "qwen3-vl-plus", "qwen3-max"]:
|
||||
for candidate in ["kimi-k2.5", "kimi", "deepseek", "qwen3-vl-plus", "qwen3-max"]:
|
||||
if candidate not in disabled_models:
|
||||
try:
|
||||
terminal.set_model(candidate)
|
||||
|
||||
@ -216,8 +216,8 @@ def handle_message(data):
|
||||
if not message and not images:
|
||||
emit('error', {'message': '消息不能为空'})
|
||||
return
|
||||
if images and getattr(terminal, "model_key", None) != "qwen3-vl-plus":
|
||||
emit('error', {'message': '当前模型不支持图片,请切换到 Qwen-VL'})
|
||||
if images and getattr(terminal, "model_key", None) not in {"qwen3-vl-plus", "kimi-k2.5"}:
|
||||
emit('error', {'message': '当前模型不支持图片,请切换到 Qwen-VL 或 Kimi-k2.5'})
|
||||
return
|
||||
|
||||
print(f"[WebSocket] 收到消息: {message}")
|
||||
|
||||
@ -2398,10 +2398,10 @@ const appOptions = {
|
||||
return;
|
||||
}
|
||||
|
||||
if (hasImages && this.currentModelKey !== 'qwen3-vl-plus') {
|
||||
if (hasImages && !['qwen3-vl-plus', 'kimi-k2.5'].includes(this.currentModelKey)) {
|
||||
this.uiPushToast({
|
||||
title: '当前模型不支持图片',
|
||||
message: '请切换到 Qwen-VL 再发送图片',
|
||||
message: '请切换到 Qwen-VL 或 Kimi-k2.5 再发送图片',
|
||||
type: 'error'
|
||||
});
|
||||
return;
|
||||
@ -2627,10 +2627,10 @@ const appOptions = {
|
||||
},
|
||||
|
||||
async openImagePicker() {
|
||||
if (this.currentModelKey !== 'qwen3-vl-plus') {
|
||||
if (!['qwen3-vl-plus', 'kimi-k2.5'].includes(this.currentModelKey)) {
|
||||
this.uiPushToast({
|
||||
title: '当前模型不支持图片',
|
||||
message: '请选择 Qwen-VL 后再发送图片',
|
||||
message: '请选择 Qwen-VL 或 Kimi-k2.5 后再发送图片',
|
||||
type: 'error'
|
||||
});
|
||||
return;
|
||||
@ -2793,10 +2793,10 @@ const appOptions = {
|
||||
});
|
||||
return;
|
||||
}
|
||||
if (this.conversationHasImages && key !== 'qwen3-vl-plus') {
|
||||
if (this.conversationHasImages && !['qwen3-vl-plus', 'kimi-k2.5'].includes(key)) {
|
||||
this.uiPushToast({
|
||||
title: '切换失败',
|
||||
message: '当前对话包含图片,仅支持 Qwen-VL',
|
||||
message: '当前对话包含图片,仅支持 Qwen-VL 或 Kimi-k2.5',
|
||||
type: 'error'
|
||||
});
|
||||
return;
|
||||
|
||||
@ -18,7 +18,7 @@
|
||||
对话回顾
|
||||
</button>
|
||||
<button
|
||||
v-if="currentModelKey === 'qwen3-vl-plus'"
|
||||
v-if="['qwen3-vl-plus', 'kimi-k2.5'].includes(currentModelKey)"
|
||||
type="button"
|
||||
class="menu-entry"
|
||||
@click.stop="$emit('pick-images')"
|
||||
|
||||
@ -609,7 +609,8 @@ const policyStore = usePolicyStore();
|
||||
|
||||
const modelOptions = [
|
||||
{ id: 'deepseek', label: 'DeepSeek', desc: '通用 + 思考强化', value: 'deepseek' },
|
||||
{ id: 'kimi', label: 'Kimi', desc: '默认模型,兼顾通用对话', value: 'kimi' },
|
||||
{ id: 'kimi-k2.5', label: 'Kimi-k2.5', desc: '新版 Kimi,思考开关 + 图文多模态', value: 'kimi-k2.5', badge: '图文' },
|
||||
{ id: 'kimi', label: 'Kimi-k2', desc: '旧版 Kimi-k2,兼顾通用对话', value: 'kimi' },
|
||||
{ id: 'qwen3-max', label: 'Qwen-Max', desc: '仅快速模式,不支持思考', value: 'qwen3-max', badge: '仅快速' },
|
||||
{ id: 'qwen3-vl-plus', label: 'Qwen-VL', desc: '图文多模态,思考/快速均可', value: 'qwen3-vl-plus', badge: '图文' }
|
||||
] as const;
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
import { defineStore } from 'pinia';
|
||||
|
||||
export type ModelKey = 'kimi' | 'deepseek' | 'qwen3-max' | 'qwen3-vl-plus';
|
||||
export type ModelKey = 'kimi-k2.5' | 'kimi' | 'deepseek' | 'qwen3-max' | 'qwen3-vl-plus';
|
||||
|
||||
export interface ModelOption {
|
||||
key: ModelKey;
|
||||
@ -17,8 +17,15 @@ interface ModelState {
|
||||
|
||||
export const useModelStore = defineStore('model', {
|
||||
state: (): ModelState => ({
|
||||
currentModelKey: 'kimi',
|
||||
currentModelKey: 'kimi-k2.5',
|
||||
models: [
|
||||
{
|
||||
key: 'kimi-k2.5',
|
||||
label: 'Kimi-k2.5',
|
||||
description: '新版 Kimi,支持图文 & 思考开关',
|
||||
fastOnly: false,
|
||||
supportsThinking: true
|
||||
},
|
||||
{
|
||||
key: 'kimi',
|
||||
label: 'Kimi-k2',
|
||||
|
||||
@ -65,7 +65,7 @@ const defaultForm = (): PersonalForm => ({
|
||||
thinking_interval: null,
|
||||
disabled_tool_categories: [],
|
||||
default_run_mode: null,
|
||||
default_model: 'kimi'
|
||||
default_model: 'kimi-k2.5'
|
||||
});
|
||||
|
||||
const defaultExperimentState = (): ExperimentState => ({
|
||||
@ -179,7 +179,7 @@ export const usePersonalizationStore = defineStore('personalization', {
|
||||
applyPersonalizationData(data: any) {
|
||||
// 若后端未返回默认模型(旧版本接口),保持当前已选模型而不是回退为 Kimi
|
||||
const fallbackModel =
|
||||
(this.form && typeof this.form.default_model === 'string' ? this.form.default_model : null) || 'kimi';
|
||||
(this.form && typeof this.form.default_model === 'string' ? this.form.default_model : null) || 'kimi-k2.5';
|
||||
this.form = {
|
||||
enabled: !!data.enabled,
|
||||
auto_generate_title: data.auto_generate_title !== false,
|
||||
@ -357,7 +357,7 @@ export const usePersonalizationStore = defineStore('personalization', {
|
||||
this.clearFeedback();
|
||||
},
|
||||
setDefaultModel(model: string | null) {
|
||||
const allowed = ['deepseek', 'kimi', 'qwen3-max', 'qwen3-vl-plus'];
|
||||
const allowed = ['deepseek', 'kimi-k2.5', 'kimi', 'qwen3-max', 'qwen3-vl-plus'];
|
||||
const target = typeof model === 'string' && allowed.includes(model) ? model : null;
|
||||
this.form = {
|
||||
...this.form,
|
||||
|
||||
@ -1223,7 +1223,7 @@ class ContextManager:
|
||||
"""构建消息列表(添加终端内容注入)"""
|
||||
# 加载系统提示(Qwen-VL 使用专用提示)
|
||||
model_key = getattr(self.main_terminal, "model_key", "kimi") if hasattr(self, "main_terminal") else "kimi"
|
||||
prompt_name = "main_system_qwenvl" if model_key == "qwen3-vl-plus" else "main_system"
|
||||
prompt_name = "main_system_qwenvl" if model_key in {"qwen3-vl-plus", "kimi-k2.5"} else "main_system"
|
||||
system_prompt = self.load_prompt(prompt_name)
|
||||
|
||||
# 格式化系统提示
|
||||
|
||||
Loading…
Reference in New Issue
Block a user