fix: reset defaults for new conversations
This commit is contained in:
parent
f5cf2fb7e7
commit
f9b5aa2af9
@ -367,6 +367,14 @@ class MainTerminal:
|
||||
self.tool_category_states[key] = False if key in disabled_categories else category.default_enabled
|
||||
self._refresh_disabled_tools()
|
||||
|
||||
# 默认模型偏好(优先应用,再处理运行模式)
|
||||
preferred_model = effective_config.get("default_model")
|
||||
if isinstance(preferred_model, str) and preferred_model != self.model_key:
|
||||
try:
|
||||
self.set_model(preferred_model)
|
||||
except Exception as exc:
|
||||
logger.warning("忽略无效默认模型: %s (%s)", preferred_model, exc)
|
||||
|
||||
preferred_mode = effective_config.get("default_run_mode")
|
||||
if isinstance(preferred_mode, str):
|
||||
normalized_mode = preferred_mode.strip().lower()
|
||||
|
||||
@ -2,8 +2,10 @@
|
||||
|
||||
import json
|
||||
from typing import Dict, List, Optional, Callable, TYPE_CHECKING
|
||||
import os
|
||||
from core.main_terminal import MainTerminal
|
||||
from utils.logger import setup_logger
|
||||
from modules.personalization_manager import load_personalization_config
|
||||
try:
|
||||
from config import MAX_TERMINALS, TERMINAL_BUFFER_SIZE, TERMINAL_DISPLAY_SIZE
|
||||
except ImportError:
|
||||
@ -68,8 +70,8 @@ class WebTerminal(MainTerminal):
|
||||
self.message_callback = message_callback
|
||||
self.web_mode = True
|
||||
|
||||
# 设置API客户端为Web模式(禁用print)
|
||||
self.api_client.web_mode = True
|
||||
# 默认允许输出(api_client.web_mode=False 表示允许 _print),若需静默可设置 WEB_API_SILENT=1
|
||||
self.api_client.web_mode = bool(os.environ.get("WEB_API_SILENT"))
|
||||
|
||||
# 重新初始化终端管理器
|
||||
self.terminal_manager = TerminalManager(
|
||||
@ -107,7 +109,34 @@ class WebTerminal(MainTerminal):
|
||||
Returns:
|
||||
Dict: 包含新对话信息
|
||||
"""
|
||||
if thinking_mode is None:
|
||||
prefer_defaults = thinking_mode is None and run_mode is None
|
||||
thinking_mode_explicit = thinking_mode is not None
|
||||
if prefer_defaults:
|
||||
# 优先回到个性化/默认配置;没有配置时退回快速模式 + 默认模型
|
||||
try:
|
||||
prefs = load_personalization_config(self.data_dir)
|
||||
except Exception as exc:
|
||||
prefs = {}
|
||||
logger.warning("加载个性化偏好失败,将使用内置默认: %s", exc)
|
||||
|
||||
# 新对话视为“干净”会话,清除图片限制便于切换模型
|
||||
self.context_manager.has_images = False
|
||||
|
||||
preferred_model = prefs.get("default_model") or "kimi"
|
||||
try:
|
||||
self.set_model(preferred_model)
|
||||
except Exception as exc:
|
||||
logger.warning("忽略无效默认模型 %s: %s", preferred_model, exc)
|
||||
|
||||
preferred_mode = prefs.get("default_run_mode")
|
||||
if isinstance(preferred_mode, str) and preferred_mode.lower() in {"fast", "thinking", "deep"}:
|
||||
try:
|
||||
self.set_run_mode(preferred_mode.lower())
|
||||
except ValueError as exc:
|
||||
logger.warning("忽略无效默认运行模式 %s: %s", preferred_mode, exc)
|
||||
else:
|
||||
# 未配置默认模式时回到快速模式
|
||||
self.set_run_mode("fast")
|
||||
thinking_mode = self.thinking_mode
|
||||
|
||||
if isinstance(run_mode, str):
|
||||
@ -116,6 +145,15 @@ class WebTerminal(MainTerminal):
|
||||
thinking_mode = self.thinking_mode
|
||||
except ValueError:
|
||||
logger.warning("无效的 run_mode 参数: %s", run_mode)
|
||||
elif thinking_mode_explicit:
|
||||
# run_mode 未显式传入但思考开关给定时,基于布尔值决定模式
|
||||
try:
|
||||
self.set_run_mode("thinking" if bool(thinking_mode) else "fast")
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
if thinking_mode is None:
|
||||
thinking_mode = self.thinking_mode
|
||||
|
||||
try:
|
||||
conversation_id = self.context_manager.start_new_conversation(
|
||||
|
||||
@ -36,6 +36,7 @@ DEFAULT_PERSONALIZATION_CONFIG: Dict[str, Any] = {
|
||||
"default_run_mode": None,
|
||||
"auto_generate_title": True,
|
||||
"tool_intent_enabled": True,
|
||||
"default_model": "kimi",
|
||||
}
|
||||
|
||||
__all__ = [
|
||||
@ -84,8 +85,13 @@ def load_personalization_config(base_dir: PathLike) -> Dict[str, Any]:
|
||||
return ensure_personalization_config(base_dir)
|
||||
try:
|
||||
with open(path, "r", encoding="utf-8") as f:
|
||||
data = json.load(f)
|
||||
return sanitize_personalization_payload(data)
|
||||
raw = json.load(f)
|
||||
sanitized = sanitize_personalization_payload(raw)
|
||||
# 若发现缺失字段(如默认模型)或数据被规范化,主动写回文件,避免下一次读取仍为旧格式
|
||||
if sanitized != raw:
|
||||
with open(path, "w", encoding="utf-8") as wf:
|
||||
json.dump(sanitized, wf, ensure_ascii=False, indent=2)
|
||||
return sanitized
|
||||
except (json.JSONDecodeError, OSError):
|
||||
# 重置为默认配置,避免错误阻塞
|
||||
with open(path, "w", encoding="utf-8") as f:
|
||||
@ -103,6 +109,7 @@ def sanitize_personalization_payload(
|
||||
base.update(fallback)
|
||||
data = payload or {}
|
||||
allowed_tool_categories = set(TOOL_CATEGORIES.keys())
|
||||
allowed_models = {"kimi", "deepseek", "qwen3-max", "qwen3-vl-plus"}
|
||||
|
||||
def _resolve_short_field(key: str) -> str:
|
||||
if key in data:
|
||||
@ -140,6 +147,14 @@ def sanitize_personalization_payload(
|
||||
base["default_run_mode"] = _sanitize_run_mode(data.get("default_run_mode"))
|
||||
else:
|
||||
base["default_run_mode"] = _sanitize_run_mode(base.get("default_run_mode"))
|
||||
|
||||
# 默认模型
|
||||
chosen_model = data.get("default_model", base.get("default_model"))
|
||||
if isinstance(chosen_model, str) and chosen_model in allowed_models:
|
||||
base["default_model"] = chosen_model
|
||||
elif base.get("default_model") not in allowed_models:
|
||||
base["default_model"] = "kimi"
|
||||
|
||||
return base
|
||||
|
||||
|
||||
|
||||
@ -182,6 +182,51 @@
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
<section v-else-if="activeTab === 'model'" key="model" class="personal-page behavior-page">
|
||||
<div class="behavior-section">
|
||||
<div class="behavior-field">
|
||||
<div class="behavior-field-header">
|
||||
<span class="field-title">默认模型</span>
|
||||
<p class="field-desc">为新对话/登录后首次请求选择首选模型,会与默认思考模式一起生效。</p>
|
||||
</div>
|
||||
<div class="run-mode-options model-options">
|
||||
<button
|
||||
v-for="option in modelOptions"
|
||||
:key="option.id"
|
||||
type="button"
|
||||
class="run-mode-card"
|
||||
:class="{ active: form.default_model === option.value }"
|
||||
:aria-pressed="form.default_model === option.value"
|
||||
@click.prevent="setDefaultModel(option.value)"
|
||||
>
|
||||
<div class="run-mode-card-header">
|
||||
<span class="run-mode-title">{{ option.label }}</span>
|
||||
<span v-if="option.badge" class="run-mode-badge">{{ option.badge }}</span>
|
||||
</div>
|
||||
<p class="run-mode-desc">{{ option.desc }}</p>
|
||||
</button>
|
||||
</div>
|
||||
<p class="behavior-hint">
|
||||
Qwen-Max 仅支持快速模式;Qwen-VL 不支持深度思考模式,选择时会给出提示。
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="personal-actions-row">
|
||||
<div class="personal-form-actions card-aligned">
|
||||
<div class="personal-status-group">
|
||||
<transition name="personal-status-fade">
|
||||
<span class="status success" v-if="status">{{ status }}</span>
|
||||
</transition>
|
||||
<transition name="personal-status-fade">
|
||||
<span class="status error" v-if="error">{{ error }}</span>
|
||||
</transition>
|
||||
</div>
|
||||
<button type="submit" class="primary" :disabled="saving">
|
||||
{{ saving ? '保存中...' : '保存设置' }}
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
<section v-else-if="activeTab === 'behavior'" key="behavior" class="personal-page behavior-page">
|
||||
<div class="behavior-section">
|
||||
<div class="behavior-field">
|
||||
@ -475,6 +520,7 @@ import { ref, computed } from 'vue';
|
||||
import { storeToRefs } from 'pinia';
|
||||
import { usePersonalizationStore } from '@/stores/personalization';
|
||||
import { useResourceStore } from '@/stores/resource';
|
||||
import { useUiStore } from '@/stores/ui';
|
||||
import { useTheme } from '@/utils/theme';
|
||||
import type { ThemeKey } from '@/utils/theme';
|
||||
|
||||
@ -482,6 +528,7 @@ defineOptions({ name: 'PersonalizationDrawer' });
|
||||
|
||||
const personalization = usePersonalizationStore();
|
||||
const resourceStore = useResourceStore();
|
||||
const uiStore = useUiStore();
|
||||
const {
|
||||
visible,
|
||||
loading,
|
||||
@ -501,12 +548,13 @@ const {
|
||||
|
||||
const baseTabs = [
|
||||
{ id: 'preferences', label: '个性化设置' },
|
||||
{ id: 'model', label: '模型偏好', description: '默认模型选择' },
|
||||
{ id: 'behavior', label: '模型行为' },
|
||||
{ id: 'theme', label: '主题切换', description: '浅色 / 深色 / Claude' },
|
||||
{ id: 'experiments', label: '实验功能', description: 'Liquid Glass' }
|
||||
] as const;
|
||||
|
||||
type PersonalTab = 'preferences' | 'behavior' | 'theme' | 'experiments' | 'admin-monitor';
|
||||
type PersonalTab = 'preferences' | 'model' | 'behavior' | 'theme' | 'experiments' | 'admin-monitor';
|
||||
|
||||
const isAdmin = computed(() => (resourceStore.usageQuota.role || '').toLowerCase() === 'admin');
|
||||
|
||||
@ -529,6 +577,13 @@ const runModeOptions: Array<{ id: string; label: string; desc: string; value: Ru
|
||||
{ id: 'deep', label: '深度思考', desc: '整轮对话都使用思考模型', value: 'deep' }
|
||||
];
|
||||
|
||||
const modelOptions = [
|
||||
{ id: 'deepseek', label: 'DeepSeek', desc: '通用 + 思考强化', value: 'deepseek' },
|
||||
{ id: 'kimi', label: 'Kimi', desc: '默认模型,兼顾通用对话', value: 'kimi' },
|
||||
{ id: 'qwen3-max', label: 'Qwen-Max', desc: '仅快速模式,不支持思考', value: 'qwen3-max', badge: '仅快速' },
|
||||
{ id: 'qwen3-vl-plus', label: 'Qwen-VL', desc: '图文多模态,思考/快速均可', value: 'qwen3-vl-plus', badge: '图文' }
|
||||
] as const;
|
||||
|
||||
const thinkingPresets = [
|
||||
{ id: 'low', label: '低', value: 10 },
|
||||
{ id: 'medium', label: '中', value: 5 },
|
||||
@ -583,9 +638,39 @@ const isRunModeActive = (value: RunModeValue) => {
|
||||
};
|
||||
|
||||
const setDefaultRunMode = (value: RunModeValue) => {
|
||||
if (checkModeModelConflict(value, form.value.default_model)) {
|
||||
return;
|
||||
}
|
||||
personalization.setDefaultRunMode(value);
|
||||
};
|
||||
|
||||
const setDefaultModel = (value: string) => {
|
||||
if (checkModeModelConflict(form.value.default_run_mode, value)) {
|
||||
return;
|
||||
}
|
||||
personalization.setDefaultModel(value);
|
||||
};
|
||||
|
||||
const checkModeModelConflict = (mode: RunModeValue, model: string | null): boolean => {
|
||||
const warnings: string[] = [];
|
||||
if (model === 'qwen3-max' && mode && mode !== 'fast') {
|
||||
warnings.push('Qwen-Max 仅支持快速模式,已保持原设置。');
|
||||
}
|
||||
if (model === 'qwen3-vl-plus' && mode === 'deep') {
|
||||
warnings.push('Qwen-VL 不支持深度思考模式,已保持原设置。');
|
||||
}
|
||||
if (warnings.length) {
|
||||
uiStore.pushToast({
|
||||
title: '模型/思考模式不兼容',
|
||||
message: warnings.join(' '),
|
||||
type: 'warning',
|
||||
duration: 6000
|
||||
});
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
};
|
||||
|
||||
const handleThinkingInput = (event: Event) => {
|
||||
const target = event.target as HTMLInputElement;
|
||||
if (!target.value) {
|
||||
|
||||
@ -1,8 +1,8 @@
|
||||
<template>
|
||||
<div class="token-drawer" v-if="visible" :class="{ collapsed }">
|
||||
<div class="token-display-panel">
|
||||
<button class="token-close-btn" type="button" @click="emit('toggle')">
|
||||
✕
|
||||
<button class="token-close-btn" type="button" @click="emit('toggle')" aria-label="收起用量统计">
|
||||
<span class="sr-only">关闭</span>
|
||||
</button>
|
||||
<div class="token-panel-content">
|
||||
<div class="usage-dashboard">
|
||||
|
||||
@ -14,6 +14,7 @@ interface PersonalForm {
|
||||
thinking_interval: number | null;
|
||||
disabled_tool_categories: string[];
|
||||
default_run_mode: RunMode | null;
|
||||
default_model: string | null;
|
||||
}
|
||||
|
||||
interface LiquidGlassPosition {
|
||||
@ -63,7 +64,8 @@ const defaultForm = (): PersonalForm => ({
|
||||
considerations: [],
|
||||
thinking_interval: null,
|
||||
disabled_tool_categories: [],
|
||||
default_run_mode: null
|
||||
default_run_mode: null,
|
||||
default_model: 'kimi'
|
||||
});
|
||||
|
||||
const defaultExperimentState = (): ExperimentState => ({
|
||||
@ -175,6 +177,9 @@ export const usePersonalizationStore = defineStore('personalization', {
|
||||
}
|
||||
},
|
||||
applyPersonalizationData(data: any) {
|
||||
// 若后端未返回默认模型(旧版本接口),保持当前已选模型而不是回退为 Kimi
|
||||
const fallbackModel =
|
||||
(this.form && typeof this.form.default_model === 'string' ? this.form.default_model : null) || 'kimi';
|
||||
this.form = {
|
||||
enabled: !!data.enabled,
|
||||
auto_generate_title: data.auto_generate_title !== false,
|
||||
@ -189,7 +194,8 @@ export const usePersonalizationStore = defineStore('personalization', {
|
||||
default_run_mode:
|
||||
typeof data.default_run_mode === 'string' && RUN_MODE_OPTIONS.includes(data.default_run_mode as RunMode)
|
||||
? data.default_run_mode as RunMode
|
||||
: null
|
||||
: null,
|
||||
default_model: typeof data.default_model === 'string' ? data.default_model : fallbackModel
|
||||
};
|
||||
this.clearFeedback();
|
||||
},
|
||||
@ -350,6 +356,15 @@ export const usePersonalizationStore = defineStore('personalization', {
|
||||
};
|
||||
this.clearFeedback();
|
||||
},
|
||||
setDefaultModel(model: string | null) {
|
||||
const allowed = ['deepseek', 'kimi', 'qwen3-max', 'qwen3-vl-plus'];
|
||||
const target = typeof model === 'string' && allowed.includes(model) ? model : null;
|
||||
this.form = {
|
||||
...this.form,
|
||||
default_model: target
|
||||
};
|
||||
this.clearFeedback();
|
||||
},
|
||||
applyTonePreset(preset: string) {
|
||||
if (!preset) {
|
||||
return;
|
||||
|
||||
@ -168,22 +168,20 @@
|
||||
position: absolute;
|
||||
top: 12px;
|
||||
left: 12px;
|
||||
width: 28px;
|
||||
height: 28px;
|
||||
border: 1px solid var(--claude-border);
|
||||
width: 14px;
|
||||
height: 14px;
|
||||
border: 1px solid #e0443e;
|
||||
border-radius: 50%;
|
||||
background: rgba(255, 255, 255, 0.8);
|
||||
color: var(--claude-text);
|
||||
font-size: 14px;
|
||||
display: grid;
|
||||
place-items: center;
|
||||
background: #ff5f56;
|
||||
display: block;
|
||||
padding: 0;
|
||||
cursor: pointer;
|
||||
transition: background 0.2s ease, transform 0.2s ease;
|
||||
box-shadow: 0 8px 18px rgba(0, 0, 0, 0.08);
|
||||
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.16);
|
||||
}
|
||||
|
||||
.token-close-btn:hover {
|
||||
background: var(--claude-surface);
|
||||
background: #ff3b30;
|
||||
transform: translateY(-1px);
|
||||
}
|
||||
|
||||
@ -191,6 +189,17 @@
|
||||
transform: translateY(0);
|
||||
}
|
||||
|
||||
.sr-only {
|
||||
position: absolute;
|
||||
width: 1px;
|
||||
height: 1px;
|
||||
padding: 0;
|
||||
margin: -1px;
|
||||
overflow: hidden;
|
||||
clip: rect(0, 0, 0, 0);
|
||||
border: 0;
|
||||
}
|
||||
|
||||
.panel-row {
|
||||
display: grid;
|
||||
grid-template-columns: repeat(auto-fit, minmax(360px, 1fr));
|
||||
|
||||
@ -2,6 +2,7 @@
|
||||
|
||||
import json
|
||||
from typing import Dict, List, Optional, Callable
|
||||
import os
|
||||
from core.main_terminal import MainTerminal
|
||||
from utils.logger import setup_logger
|
||||
try:
|
||||
@ -55,8 +56,8 @@ class WebTerminal(MainTerminal):
|
||||
self.message_callback = message_callback
|
||||
self.web_mode = True
|
||||
|
||||
# 设置API客户端为Web模式(禁用print)
|
||||
self.api_client.web_mode = True
|
||||
# 默认允许输出,便于排查(若需静默可设置环境变量 WEB_API_SILENT=1)
|
||||
self.api_client.web_mode = bool(os.environ.get("WEB_API_SILENT"))
|
||||
|
||||
# 重新初始化终端管理器
|
||||
self.terminal_manager = TerminalManager(
|
||||
|
||||
@ -335,6 +335,18 @@ class DeepSeekClient:
|
||||
"stream": stream,
|
||||
"max_tokens": max_tokens
|
||||
}
|
||||
# 部分平台(如 Qwen、DeepSeek)需要显式请求 usage 才会在流式尾包返回
|
||||
if stream:
|
||||
should_include_usage = False
|
||||
if self.model_key in {"qwen3-max", "qwen3-vl-plus", "deepseek"}:
|
||||
should_include_usage = True
|
||||
# 兜底:根据 base_url 识别 openai 兼容的提供商
|
||||
if api_config["base_url"]:
|
||||
lower_url = api_config["base_url"].lower()
|
||||
if any(keyword in lower_url for keyword in ["dashscope", "aliyuncs", "deepseek.com"]):
|
||||
should_include_usage = True
|
||||
if should_include_usage:
|
||||
payload.setdefault("stream_options", {})["include_usage"] = True
|
||||
# 注入模型额外参数(如 Qwen enable_thinking)
|
||||
extra_params = self.thinking_extra_params if current_thinking_mode else self.fast_extra_params
|
||||
if extra_params:
|
||||
|
||||
@ -448,13 +448,16 @@ class ContextManager:
|
||||
return
|
||||
try:
|
||||
run_mode = getattr(self.main_terminal, "run_mode", None) if hasattr(self, "main_terminal") else None
|
||||
model_key = getattr(self.main_terminal, "model_key", None) if hasattr(self, "main_terminal") else None
|
||||
self.conversation_manager.save_conversation(
|
||||
conversation_id=self.current_conversation_id,
|
||||
messages=self.conversation_history,
|
||||
project_path=str(self.project_path),
|
||||
todo_list=self.todo_list,
|
||||
thinking_mode=getattr(self.main_terminal, "thinking_mode", None) if hasattr(self, "main_terminal") else None,
|
||||
run_mode=run_mode
|
||||
run_mode=run_mode,
|
||||
model_key=model_key,
|
||||
has_images=self.has_images
|
||||
)
|
||||
# 静默保存,不输出日志
|
||||
except Exception as e:
|
||||
@ -749,6 +752,14 @@ class ContextManager:
|
||||
message["images"] = images
|
||||
self.has_images = True
|
||||
|
||||
# 记录当前助手回复所用模型,便于回放时查看
|
||||
if role == "assistant":
|
||||
message.setdefault("metadata", {})
|
||||
if "model_key" not in message["metadata"]:
|
||||
model_key = getattr(self.main_terminal, "model_key", None) if self.main_terminal else None
|
||||
if model_key:
|
||||
message["metadata"]["model_key"] = model_key
|
||||
|
||||
# 如果是assistant消息且有工具调用,保存完整格式
|
||||
if role == "assistant" and tool_calls:
|
||||
# 确保工具调用格式完整
|
||||
|
||||
@ -469,8 +469,20 @@ class ConversationManager:
|
||||
existing_data["metadata"]["run_mode"] = normalized_mode
|
||||
elif "run_mode" not in existing_data["metadata"]:
|
||||
existing_data["metadata"]["run_mode"] = "thinking" if existing_data["metadata"].get("thinking_mode") else "fast"
|
||||
if model_key is not None:
|
||||
existing_data["metadata"]["model_key"] = model_key
|
||||
# 推断最新使用的模型(优先参数,其次倒序扫描助手消息)
|
||||
inferred_model = None
|
||||
if model_key is None:
|
||||
for msg in reversed(messages):
|
||||
if msg.get("role") != "assistant":
|
||||
continue
|
||||
msg_meta = msg.get("metadata") or {}
|
||||
mk = msg_meta.get("model_key")
|
||||
if mk:
|
||||
inferred_model = mk
|
||||
break
|
||||
target_model = model_key if model_key is not None else inferred_model
|
||||
if target_model is not None:
|
||||
existing_data["metadata"]["model_key"] = target_model
|
||||
elif "model_key" not in existing_data["metadata"]:
|
||||
existing_data["metadata"]["model_key"] = None
|
||||
if has_images is not None:
|
||||
@ -544,6 +556,21 @@ class ConversationManager:
|
||||
self._save_conversation_file(conversation_id, data)
|
||||
print(f"🔧 为对话 {conversation_id} 添加运行模式字段")
|
||||
|
||||
# 回填缺失的模型字段:从最近的助手消息元数据推断
|
||||
if metadata.get("model_key") is None:
|
||||
inferred_model = None
|
||||
for msg in reversed(data.get("messages") or []):
|
||||
if msg.get("role") != "assistant":
|
||||
continue
|
||||
mk = (msg.get("metadata") or {}).get("model_key")
|
||||
if mk:
|
||||
inferred_model = mk
|
||||
break
|
||||
if inferred_model is not None:
|
||||
metadata["model_key"] = inferred_model
|
||||
self._save_conversation_file(conversation_id, data)
|
||||
print(f"🔧 为对话 {conversation_id} 回填模型字段: {inferred_model}")
|
||||
|
||||
return data
|
||||
except (json.JSONDecodeError, Exception) as e:
|
||||
print(f"⌘ 加载对话失败 {conversation_id}: {e}")
|
||||
|
||||
@ -810,11 +810,12 @@ def build_upload_error_response(exc: UploadSecurityError):
|
||||
}), status
|
||||
|
||||
|
||||
def ensure_conversation_loaded(terminal: WebTerminal, conversation_id: Optional[str], run_mode: Optional[str]) -> Tuple[str, bool]:
|
||||
def ensure_conversation_loaded(terminal: WebTerminal, conversation_id: Optional[str]) -> Tuple[str, bool]:
|
||||
"""确保终端加载指定对话,若无则创建新的"""
|
||||
created_new = False
|
||||
if not conversation_id:
|
||||
result = terminal.create_new_conversation(run_mode=run_mode)
|
||||
# 不显式传入运行模式,优先回到个性化/默认配置
|
||||
result = terminal.create_new_conversation()
|
||||
if not result.get("success"):
|
||||
raise RuntimeError(result.get("message", "创建对话失败"))
|
||||
conversation_id = result["conversation_id"]
|
||||
@ -2474,7 +2475,7 @@ def handle_message(data):
|
||||
|
||||
requested_conversation_id = data.get('conversation_id')
|
||||
try:
|
||||
conversation_id, created_new = ensure_conversation_loaded(terminal, requested_conversation_id, terminal.run_mode)
|
||||
conversation_id, created_new = ensure_conversation_loaded(terminal, requested_conversation_id)
|
||||
except RuntimeError as exc:
|
||||
emit('error', {'message': str(exc)})
|
||||
return
|
||||
@ -2602,8 +2603,11 @@ def create_conversation(terminal: WebTerminal, workspace: UserWorkspace, usernam
|
||||
"""创建新对话"""
|
||||
try:
|
||||
data = request.get_json() or {}
|
||||
thinking_mode = data.get('thinking_mode', terminal.thinking_mode)
|
||||
run_mode = data.get('mode')
|
||||
# 前端现在期望“新建对话”回到用户配置的默认模型/模式,
|
||||
# 只有当客户端显式要求保留当前模式时才使用传入值。
|
||||
preserve_mode = bool(data.get('preserve_mode'))
|
||||
thinking_mode = data.get('thinking_mode') if preserve_mode and 'thinking_mode' in data else None
|
||||
run_mode = data.get('mode') if preserve_mode and 'mode' in data else None
|
||||
|
||||
result = terminal.create_new_conversation(thinking_mode=thinking_mode, run_mode=run_mode)
|
||||
|
||||
@ -3891,20 +3895,23 @@ async def handle_task_with_sender(terminal: WebTerminal, workspace: UserWorkspac
|
||||
modify_result = await finalize_pending_modify(full_response, False, finish_reason="user_stop")
|
||||
break
|
||||
|
||||
# 先尝试记录 usage(有些平台会在最后一个 chunk 里携带 usage 但 choices 为空)
|
||||
usage_info = chunk.get("usage")
|
||||
if usage_info:
|
||||
last_usage_payload = usage_info
|
||||
|
||||
if "choices" not in chunk:
|
||||
debug_log(f"Chunk {chunk_count}: 无choices字段")
|
||||
continue
|
||||
|
||||
if not chunk.get("choices"):
|
||||
debug_log(f"Chunk {chunk_count}: choices为空列表")
|
||||
continue
|
||||
choice = chunk["choices"][0]
|
||||
delta = choice.get("delta", {})
|
||||
finish_reason = choice.get("finish_reason")
|
||||
if finish_reason:
|
||||
last_finish_reason = finish_reason
|
||||
|
||||
usage_info = choice.get("usage")
|
||||
if usage_info:
|
||||
last_usage_payload = usage_info
|
||||
|
||||
# 处理思考内容
|
||||
if "reasoning_content" in delta:
|
||||
reasoning_content = delta["reasoning_content"]
|
||||
|
||||
Loading…
Reference in New Issue
Block a user