chore: refresh deep mode styling

This commit is contained in:
JOJO 2025-11-30 17:57:48 +08:00
parent 2fea4a7244
commit 97da631e01
12 changed files with 261 additions and 71 deletions

View File

@ -16,7 +16,7 @@ OUTPUT_FORMATS = {
"session": "📺 [会话]",
}
AGENT_VERSION = "v3.2"
AGENT_VERSION = "v4.1"
LOG_LEVEL = "INFO"
LOG_FORMAT = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"

4
package-lock.json generated
View File

@ -1,12 +1,12 @@
{
"name": "ai-agent-frontend",
"version": "0.1.0",
"version": "4.1.0",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "ai-agent-frontend",
"version": "0.1.0",
"version": "4.1.0",
"dependencies": {
"katex": "^0.16.9",
"marked": "^11.1.0",

View File

@ -1,6 +1,6 @@
{
"name": "ai-agent-frontend",
"version": "0.1.0",
"version": "4.1.0",
"private": true,
"type": "module",
"scripts": {

View File

@ -0,0 +1,4 @@
你现在处于「深度思考模式」
在深度思考模式中请求的模型是kimi-k2-thinking 一个更善于分析复杂问题,规划复杂流程的模型
在每一轮对用户要求的执行中,你的之前的思考会始终可见,保障思维过程和操作流程的连续性
每次思考时,禁止回顾“我上一步做了什么”,只需要判断“下一步应该做什么”

View File

@ -132,6 +132,9 @@ const appOptions = {
_scrollListenerReady: false,
historyLoading: false,
mobileViewportQuery: null,
modeMenuOpen: false,
conversationListRequestSeq: 0,
conversationListRefreshToken: 0,
// 工具控制菜单
icons: ICONS,
@ -199,7 +202,8 @@ const appOptions = {
'stopRequested',
'projectPath',
'agentVersion',
'thinkingMode'
'thinkingMode',
'runMode'
]),
...mapState(useFileStore, ['contextMenu', 'fileTree', 'expandedFolders', 'todoList']),
...mapWritableState(useUiStore, [
@ -1003,6 +1007,7 @@ const appOptions = {
this.inputSetSettingsOpen(false);
this.inputSetToolMenuOpen(false);
this.inputSetQuickMenuOpen(false);
this.modeMenuOpen = false;
this.inputSetLineCount(1);
this.inputSetMultiline(false);
this.inputClearMessage();
@ -1097,6 +1102,14 @@ const appOptions = {
applyStatusSnapshot(status) {
this.resourceApplyStatusSnapshot(status);
if (status && typeof status.thinking_mode !== 'undefined') {
this.thinkingMode = !!status.thinking_mode;
}
if (status && typeof status.run_mode === 'string') {
this.runMode = status.run_mode;
} else if (status && typeof status.thinking_mode !== 'undefined') {
this.runMode = status.thinking_mode ? 'thinking' : 'fast';
}
},
updateContainerStatus(status) {
@ -1144,13 +1157,25 @@ const appOptions = {
// ==========================================
async loadConversationsList() {
const queryOffset = this.conversationsOffset;
const queryLimit = this.conversationsLimit;
const refreshToken = queryOffset === 0 ? ++this.conversationListRefreshToken : this.conversationListRefreshToken;
const requestSeq = ++this.conversationListRequestSeq;
this.conversationsLoading = true;
try {
const response = await fetch(`/api/conversations?limit=${this.conversationsLimit}&offset=${this.conversationsOffset}`);
const response = await fetch(`/api/conversations?limit=${queryLimit}&offset=${queryOffset}`);
const data = await response.json();
if (data.success) {
if (this.conversationsOffset === 0) {
if (refreshToken !== this.conversationListRefreshToken) {
debugLog('忽略已过期的对话列表响应', {
requestSeq,
responseOffset: queryOffset
});
return;
}
if (queryOffset === 0) {
this.conversations = data.data.conversations;
} else {
this.conversations.push(...data.data.conversations);
@ -1173,7 +1198,9 @@ const appOptions = {
} catch (error) {
console.error('加载对话列表异常:', error);
} finally {
this.conversationsLoading = false;
if (refreshToken === this.conversationListRefreshToken) {
this.conversationsLoading = false;
}
}
},
@ -1592,33 +1619,36 @@ const appOptions = {
'Content-Type': 'application/json'
},
body: JSON.stringify({
thinking_mode: this.thinkingMode
thinking_mode: this.thinkingMode,
mode: this.runMode
})
});
const result = await response.json();
if (result.success) {
debugLog('新对话创建成功:', result.conversation_id);
const newConversationId = result.conversation_id;
debugLog('新对话创建成功:', newConversationId);
// 清空当前消息
this.logMessageState('createNewConversation:before-clear');
this.messages = [];
this.logMessageState('createNewConversation:after-clear');
this.currentConversationId = result.conversation_id;
this.currentConversationTitle = '新对话';
history.pushState({ conversationId: this.currentConversationId }, '', `/${this.stripConversationPrefix(this.currentConversationId)}`);
// 在本地列表插入占位,避免等待刷新
const placeholder = {
id: newConversationId,
title: '新对话',
updated_at: new Date().toISOString(),
total_messages: 0,
total_tools: 0
};
this.conversations = [
placeholder,
...this.conversations.filter(conv => conv && conv.id !== newConversationId)
];
// 重置Token统计
this.resetTokenStatistics();
// 重置状态
this.resetAllStates('createNewConversation');
// 刷新对话列表
// 直接加载新对话,确保状态一致
await this.loadConversation(newConversationId);
// 刷新对话列表获取最新统计
this.conversationsOffset = 0;
await this.loadConversationsList();
} else {
console.error('创建对话失败:', result.message);
this.uiPushToast({
@ -1794,30 +1824,8 @@ const appOptions = {
},
async toggleThinkingMode() {
const nextMode = !this.thinkingMode;
try {
const response = await fetch('/api/thinking-mode', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify({ thinking_mode: nextMode })
});
const data = await response.json();
if (response.ok && data.success) {
const actual = typeof data.data === 'boolean' ? data.data : nextMode;
this.thinkingMode = actual;
return;
}
throw new Error(data.message || data.error || '切换失败');
} catch (error) {
console.error('切换思考模式失败:', error);
this.uiPushToast({
title: '切换思考模式失败',
message: error.message || '请稍后重试',
type: 'error'
});
}
const target = this.thinkingMode ? 'fast' : 'thinking';
await this.setRunMode(target);
},
triggerFileUpload() {
@ -1967,6 +1975,7 @@ const appOptions = {
if (!this.isConnected) {
return;
}
this.modeMenuOpen = false;
const nextState = this.inputToggleToolMenu();
if (nextState) {
this.inputSetSettingsOpen(false);
@ -1983,11 +1992,15 @@ const appOptions = {
if (!this.isConnected) {
return;
}
this.inputToggleQuickMenu();
const opened = this.inputToggleQuickMenu();
if (!opened) {
this.modeMenuOpen = false;
}
},
closeQuickMenu() {
this.inputCloseMenus();
this.modeMenuOpen = false;
},
handleQuickUpload() {
@ -1997,11 +2010,72 @@ const appOptions = {
this.triggerFileUpload();
},
handleQuickModeToggle() {
toggleModeMenu() {
if (!this.isConnected || this.streamingMessage) {
return;
}
this.toggleThinkingMode();
const next = !this.modeMenuOpen;
this.modeMenuOpen = next;
if (next) {
this.inputSetToolMenuOpen(false);
this.inputSetSettingsOpen(false);
if (!this.quickMenuOpen) {
this.inputOpenQuickMenu();
}
}
},
async handleModeSelect(mode) {
if (!this.isConnected || this.streamingMessage) {
return;
}
await this.setRunMode(mode);
},
async handleCycleRunMode() {
const modes: Array<'fast' | 'thinking' | 'deep'> = ['fast', 'thinking', 'deep'];
const currentIndex = modes.indexOf(this.runMode);
const nextMode = modes[(currentIndex + 1) % modes.length];
await this.setRunMode(nextMode);
},
async setRunMode(mode) {
if (!this.isConnected || this.streamingMessage) {
this.modeMenuOpen = false;
return;
}
if (mode === this.runMode) {
this.modeMenuOpen = false;
this.closeQuickMenu();
return;
}
try {
const response = await fetch('/api/thinking-mode', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify({ mode })
});
const payload = await response.json();
if (!response.ok || !payload.success) {
throw new Error(payload.message || payload.error || '切换失败');
}
const data = payload.data || {};
this.thinkingMode = typeof data.thinking_mode === 'boolean' ? data.thinking_mode : mode !== 'fast';
this.runMode = data.mode || mode;
} catch (error) {
console.error('切换运行模式失败:', error);
const message = error instanceof Error ? error.message : String(error || '未知错误');
this.uiPushToast({
title: '切换思考模式失败',
message: message || '请稍后重试',
type: 'error'
});
} finally {
this.modeMenuOpen = false;
this.inputCloseMenus();
}
},
handleInputChange() {
@ -2190,6 +2264,7 @@ const appOptions = {
if (!this.isConnected) {
return;
}
this.modeMenuOpen = false;
const nextState = this.inputToggleSettingsMenu();
if (nextState) {
this.inputSetToolMenuOpen(false);

View File

@ -530,6 +530,11 @@ export async function initializeLegacySocket(ctx: any) {
ctx.projectPath = data.project_path || '';
ctx.agentVersion = data.version || ctx.agentVersion;
ctx.thinkingMode = !!data.thinking_mode;
if (data.run_mode) {
ctx.runMode = data.run_mode;
} else {
ctx.runMode = ctx.thinkingMode ? 'thinking' : 'fast';
}
socketLog('系统就绪:', data);
// 系统就绪后立即加载对话列表
@ -621,6 +626,9 @@ export async function initializeLegacySocket(ctx: any) {
ctx.socket.on('conversation_list_update', (data) => {
socketLog('对话列表已更新:', data);
// 刷新对话列表
ctx.conversationsOffset = 0;
ctx.hasMoreConversations = false;
ctx.loadingMoreConversations = false;
ctx.loadConversationsList();
});
@ -630,8 +638,10 @@ export async function initializeLegacySocket(ctx: any) {
if (status.conversation && status.conversation.current_id) {
ctx.currentConversationId = status.conversation.current_id;
}
if (typeof status.thinking_mode !== 'undefined') {
ctx.thinkingMode = !!status.thinking_mode;
if (typeof status.run_mode === 'string') {
ctx.runMode = status.run_mode;
} else if (typeof status.thinking_mode !== 'undefined') {
ctx.runMode = status.thinking_mode ? 'thinking' : 'fast';
}
});

View File

@ -13,6 +13,8 @@
--claude-muted: rgba(121, 109, 94, 0.4);
--claude-accent: #da7756;
--claude-accent-strong: #bd5d3a;
--claude-deep: #f2a93b;
--claude-deep-strong: #d07a14;
--claude-highlight: rgba(218, 119, 86, 0.14);
--claude-button-hover: #c76541;
--claude-button-active: #a95331;

View File

@ -108,6 +108,9 @@
}
.mode-indicator {
--mode-indicator-color-1: var(--claude-accent);
--mode-indicator-color-2: var(--claude-accent-strong);
--mode-indicator-shadow: rgba(189, 93, 58, 0.25);
border: none;
cursor: pointer;
outline: none;
@ -118,10 +121,10 @@
display: inline-flex;
align-items: center;
justify-content: center;
background: var(--claude-accent);
background: linear-gradient(135deg, var(--mode-indicator-color-1), var(--mode-indicator-color-2));
color: #fffef8;
box-shadow: 0 8px 20px rgba(189, 93, 58, 0.25);
transition: background 0.25s ease, box-shadow 0.25s ease, transform 0.25s ease;
box-shadow: 0 8px 20px var(--mode-indicator-shadow);
transition: background 0.35s ease, box-shadow 0.35s ease, transform 0.25s ease;
}
.mode-indicator:hover {
@ -129,12 +132,25 @@
}
.mode-indicator:focus-visible {
box-shadow: 0 0 0 2px rgba(255, 255, 255, 0.8), 0 8px 20px rgba(189, 93, 58, 0.35);
box-shadow: 0 0 0 2px rgba(255, 255, 255, 0.8), 0 8px 20px var(--mode-indicator-shadow);
}
.mode-indicator.fast {
background: #ffcc4d;
box-shadow: 0 8px 20px rgba(255, 204, 77, 0.35);
--mode-indicator-color-1: #ffe08c;
--mode-indicator-color-2: #f7b23c;
--mode-indicator-shadow: rgba(247, 178, 60, 0.35);
}
.mode-indicator.thinking {
--mode-indicator-color-1: var(--claude-accent);
--mode-indicator-color-2: var(--claude-accent-strong);
--mode-indicator-shadow: rgba(189, 93, 58, 0.3);
}
.mode-indicator.deep {
--mode-indicator-color-1: var(--claude-deep);
--mode-indicator-color-2: var(--claude-deep-strong);
--mode-indicator-shadow: rgba(208, 122, 20, 0.35);
}
.mode-indicator .icon {

View File

@ -16,7 +16,7 @@ OUTPUT_FORMATS = {
"session": "📺 [会话]",
}
AGENT_VERSION = "v3.2"
AGENT_VERSION = "v4.1"
LOG_LEVEL = "INFO"
LOG_FORMAT = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"

View File

@ -30,6 +30,7 @@ class ConversationMetadata:
thinking_mode: bool
total_messages: int
total_tools: int
run_mode: str = "fast"
status: str = "active" # active, archived, error
class ConversationManager:
@ -42,7 +43,8 @@ class ConversationManager:
self.current_conversation_id: Optional[str] = None
self.workspace_root = Path(__file__).resolve().parents[1]
self._ensure_directories()
self._load_index()
self._index_verified = False
self._load_index(ensure_integrity=True)
# 初始化tiktoken编码器
try:
@ -99,28 +101,50 @@ class ConversationManager:
print(f"🔄 已从对话文件重建索引,共 {len(rebuilt_index)} 条记录")
return rebuilt_index
def _load_index(self) -> Dict:
"""加载对话索引"""
def _index_missing_conversations(self, index: Dict) -> bool:
"""检测本地对话文件是否未出现在索引里"""
index_ids = set(index.keys())
for file_path in self._iter_conversation_files():
conv_id = file_path.stem
if conv_id and conv_id not in index_ids:
print(f"🔍 对话 {conv_id} 未包含在索引中,将执行重建。")
return True
return False
def _load_index(self, ensure_integrity: bool = False) -> Dict:
"""加载对话索引,可选地校验并重建"""
try:
index: Dict = {}
if self.index_file.exists():
with open(self.index_file, 'r', encoding='utf-8') as f:
content = f.read().strip()
if content:
index = json.loads(content)
if index:
if ensure_integrity and not self._index_verified:
if self._index_missing_conversations(index):
rebuilt = self._rebuild_index_from_files()
if rebuilt:
self._save_index(rebuilt)
index = rebuilt
self._index_verified = True
return index
# 索引为空但对话文件仍然存在时尝试重建
rebuilt = self._rebuild_index_from_files()
if rebuilt:
self._save_index(rebuilt)
if ensure_integrity:
self._index_verified = True
return rebuilt
return {}
# 索引缺失但存在对话文件时重建
rebuilt = self._rebuild_index_from_files()
if rebuilt:
self._save_index(rebuilt)
if ensure_integrity:
self._index_verified = True
return rebuilt
return {}
return index
except (json.JSONDecodeError, Exception) as e:
print(f"⚠️ 加载对话索引失败,将尝试重建: {e}")
backup_path = self.index_file.with_name(
@ -135,6 +159,8 @@ class ConversationManager:
rebuilt = self._rebuild_index_from_files()
if rebuilt:
self._save_index(rebuilt)
if ensure_integrity:
self._index_verified = True
return rebuilt
return {}

View File

@ -46,6 +46,8 @@ class DeepSeekClient:
"model_id": THINKING_MODEL_ID or MODEL_ID
}
self.thinking_mode = thinking_mode # True=智能思考模式, False=快速模式
self.deep_thinking_mode = False # 深度思考模式:整轮都使用思考模型
self.deep_thinking_session = False # 当前任务是否处于深度思考会话
self.web_mode = web_mode # Web模式标志用于禁用print输出
# 兼容旧代码路径
self.api_base_url = self.fast_api_config["base_url"]
@ -126,13 +128,20 @@ class DeepSeekClient:
return json.dumps(data, ensure_ascii=False)
def start_new_task(self):
def set_deep_thinking_mode(self, enabled: bool):
"""配置深度思考模式(持续使用思考模型)。"""
self.deep_thinking_mode = bool(enabled)
if not enabled:
self.deep_thinking_session = False
def start_new_task(self, force_deep: bool = False):
"""开始新任务(重置任务级别的状态)"""
self.current_task_first_call = True
self.current_task_thinking = ""
self.force_thinking_next_call = False
self.skip_thinking_next_call = False
self.last_call_used_thinking = False
self.deep_thinking_session = bool(force_deep) or bool(self.deep_thinking_mode)
def _build_headers(self, api_key: str) -> Dict[str, str]:
return {
@ -154,6 +163,8 @@ class DeepSeekClient:
def get_current_thinking_mode(self) -> bool:
"""获取当前应该使用的思考模式"""
if self.deep_thinking_session:
return True
if not self.thinking_mode:
return False
if self.force_thinking_next_call:
@ -469,7 +480,8 @@ class DeepSeekClient:
"content": assistant_content,
"tool_calls": tool_calls
}
if current_thinking:
assistant_message["reasoning_content"] = current_thinking
messages.append(assistant_message)
# 执行所有工具调用 - 使用鲁棒的参数解析

View File

@ -29,6 +29,7 @@ class ConversationMetadata:
thinking_mode: bool
total_messages: int
total_tools: int
run_mode: str = "fast"
status: str = "active" # active, archived, error
class ConversationManager:
@ -41,7 +42,8 @@ class ConversationManager:
self.current_conversation_id: Optional[str] = None
self.workspace_root = Path(__file__).resolve().parents[1]
self._ensure_directories()
self._load_index()
self._index_verified = False
self._load_index(ensure_integrity=True)
def _ensure_directories(self):
"""确保必要的目录存在"""
@ -83,6 +85,7 @@ class ConversationManager:
"project_path": metadata.get("project_path"),
"project_relative_path": metadata.get("project_relative_path"),
"thinking_mode": metadata.get("thinking_mode", False),
"run_mode": metadata.get("run_mode") or ("thinking" if metadata.get("thinking_mode") else "fast"),
"total_messages": metadata.get("total_messages", 0),
"total_tools": metadata.get("total_tools", 0),
"status": metadata.get("status", "active"),
@ -91,28 +94,50 @@ class ConversationManager:
print(f"🔄 已从对话文件重建索引,共 {len(rebuilt_index)} 条记录")
return rebuilt_index
def _load_index(self) -> Dict:
"""加载对话索引"""
def _index_missing_conversations(self, index: Dict) -> bool:
"""检查索引是否缺失本地对话文件"""
index_ids = set(index.keys())
for file_path in self._iter_conversation_files():
conv_id = file_path.stem
if conv_id and conv_id not in index_ids:
print(f"🔍 对话 {conv_id} 未出现在索引中,将重建索引。")
return True
return False
def _load_index(self, ensure_integrity: bool = False) -> Dict:
"""加载对话索引,可选地在缺失时自动重建"""
try:
index: Dict = {}
if self.index_file.exists():
with open(self.index_file, 'r', encoding='utf-8') as f:
content = f.read().strip()
if content:
index = json.loads(content)
if index:
if ensure_integrity and not self._index_verified:
if self._index_missing_conversations(index):
rebuilt = self._rebuild_index_from_files()
if rebuilt:
self._save_index(rebuilt)
index = rebuilt
self._index_verified = True
return index
# 索引为空但对话文件仍然存在时尝试重建
rebuilt = self._rebuild_index_from_files()
if rebuilt:
self._save_index(rebuilt)
if ensure_integrity:
self._index_verified = True
return rebuilt
return {}
# 索引缺失但存在对话文件时重建
rebuilt = self._rebuild_index_from_files()
if rebuilt:
self._save_index(rebuilt)
if ensure_integrity:
self._index_verified = True
return rebuilt
return {}
return index
except (json.JSONDecodeError, Exception) as e:
print(f"⚠️ 加载对话索引失败,将尝试重建: {e}")
backup_path = self.index_file.with_name(
@ -127,6 +152,8 @@ class ConversationManager:
rebuilt = self._rebuild_index_from_files()
if rebuilt:
self._save_index(rebuilt)
if ensure_integrity:
self._index_verified = True
return rebuilt
return {}
@ -247,6 +274,7 @@ class ConversationManager:
self,
project_path: str,
thinking_mode: bool = False,
run_mode: str = "fast",
initial_messages: List[Dict] = None
) -> str:
"""
@ -265,6 +293,7 @@ class ConversationManager:
# 创建对话数据
path_metadata = self._prepare_project_path_metadata(project_path)
normalized_mode = run_mode if run_mode in {"fast", "thinking", "deep"} else ("thinking" if thinking_mode else "fast")
conversation_data = {
"id": conversation_id,
"title": self._extract_title_from_messages(messages),
@ -276,6 +305,7 @@ class ConversationManager:
"project_path": path_metadata["project_path"],
"project_relative_path": path_metadata["project_relative_path"],
"thinking_mode": thinking_mode,
"run_mode": normalized_mode,
"total_messages": len(messages),
"total_tools": self._count_tools_in_messages(messages),
"status": "active"
@ -326,6 +356,7 @@ class ConversationManager:
project_path=conversation_data["metadata"]["project_path"],
project_relative_path=conversation_data["metadata"].get("project_relative_path"),
thinking_mode=conversation_data["metadata"]["thinking_mode"],
run_mode=conversation_data["metadata"].get("run_mode", "thinking" if conversation_data["metadata"]["thinking_mode"] else "fast"),
total_messages=conversation_data["metadata"]["total_messages"],
total_tools=conversation_data["metadata"]["total_tools"],
status=conversation_data["metadata"].get("status", "active")
@ -339,6 +370,7 @@ class ConversationManager:
"project_path": metadata.project_path,
"project_relative_path": metadata.project_relative_path,
"thinking_mode": metadata.thinking_mode,
"run_mode": metadata.run_mode,
"total_messages": metadata.total_messages,
"total_tools": metadata.total_tools,
"status": metadata.status
@ -354,6 +386,7 @@ class ConversationManager:
messages: List[Dict],
project_path: str = None,
thinking_mode: bool = None,
run_mode: Optional[str] = None,
todo_list: Optional[Dict] = None
) -> bool:
"""
@ -393,6 +426,13 @@ class ConversationManager:
existing_data["metadata"].setdefault("project_relative_path", None)
if thinking_mode is not None:
existing_data["metadata"]["thinking_mode"] = thinking_mode
if run_mode:
normalized_mode = run_mode if run_mode in {"fast", "thinking", "deep"} else (
"thinking" if existing_data["metadata"].get("thinking_mode") else "fast"
)
existing_data["metadata"]["run_mode"] = normalized_mode
elif "run_mode" not in existing_data["metadata"]:
existing_data["metadata"]["run_mode"] = "thinking" if existing_data["metadata"].get("thinking_mode") else "fast"
existing_data["metadata"]["total_messages"] = len(messages)
existing_data["metadata"]["total_tools"] = self._count_tools_in_messages(messages)
@ -455,6 +495,11 @@ class ConversationManager:
# 验证现有Token统计数据
data = self._validate_token_statistics(data)
if "run_mode" not in metadata:
metadata["run_mode"] = "thinking" if metadata.get("thinking_mode") else "fast"
self._save_conversation_file(conversation_id, data)
print(f"🔧 为对话 {conversation_id} 添加运行模式字段")
return data
except (json.JSONDecodeError, Exception) as e:
print(f"⌘ 加载对话失败 {conversation_id}: {e}")