From 2fea4a7244b96bd928110f24456a180dc4f69dfe Mon Sep 17 00:00:00 2001 From: JOJO <1498581755@qq.com> Date: Sun, 30 Nov 2025 15:15:35 +0800 Subject: [PATCH] refactor: remove legacy think tags --- core/main_terminal.py | 18 ++++-------------- doc/frontend/legacy_ui_spec.md | 2 +- static/src/app.ts | 28 ++-------------------------- sub_agent/core/main_terminal.py | 18 ++++-------------- sub_agent/static/app.js | 18 ++++-------------- sub_agent/utils/api_client.py | 4 ---- sub_agent/web_server.py | 30 +++++++++--------------------- 7 files changed, 24 insertions(+), 94 deletions(-) diff --git a/core/main_terminal.py b/core/main_terminal.py index 1898618..1f12c8f 100644 --- a/core/main_terminal.py +++ b/core/main_terminal.py @@ -696,25 +696,15 @@ class MainTerminal: # ===== 统一保存到对话历史(关键修复) ===== - # 1. 构建完整的assistant消息内容 - assistant_content_parts = [] - - # 添加思考内容 - if final_thinking: - assistant_content_parts.append(f"\n{final_thinking}\n") - - # 添加回复内容 - if final_response: - assistant_content_parts.append(final_response) - - # 合并内容 - assistant_content = "\n".join(assistant_content_parts) if assistant_content_parts else "已完成操作。" + # 1. 构建助手回复内容(思考内容通过 reasoning_content 存储) + assistant_content = final_response or "已完成操作。" # 2. 保存assistant消息(包含tool_calls但不包含结果) self.context_manager.add_conversation( "assistant", assistant_content, - collected_tool_calls if collected_tool_calls else None + collected_tool_calls if collected_tool_calls else None, + reasoning_content=final_thinking or None ) # 3. 保存独立的tool消息 diff --git a/doc/frontend/legacy_ui_spec.md b/doc/frontend/legacy_ui_spec.md index ea74dcc..91bebfe 100644 --- a/doc/frontend/legacy_ui_spec.md +++ b/doc/frontend/legacy_ui_spec.md @@ -77,7 +77,7 @@ - `tool`:卡片包含工具名、图标(`TOOL_ICON_MAP`),参数快照/状态消息/结果(JSON、plain text、命令输出 `pre`),流式时加 loading 条;`action.tool.awaiting_content` 控制“等待正文”占位。工具 `status` 颜色区分 `preparing/running/completed/error`。 - `append_payload`/`modify_payload`:专门显示文件路径、强制标志、成功/失败行数。 - `system`:用于 `role=system` 文本,折叠块 `system-message`。 -- **特殊处理**:`renderHistoryMessages()` 会解析 `` 块、自定义 metadata(`append_payload`, `modify_payload`),并避免 LLm 输出的 `<<>>` 内容直接进入 UI。 +- **特殊处理**:`renderHistoryMessages()` 会读取消息的 `reasoning_content`、自定义 metadata(`append_payload`, `modify_payload`),并避免 LLM 输出的 `<<>>` 内容直接进入 UI。 - **滚动策略**:`messagesArea` 的 scroll 事件更新 `userScrolling/autoScrollEnabled`,`scroll-lock-toggle` 按钮切换锁定;思考块单独监听 `scroll` 以保持 pinned 行为。 #### 3.3 输入区(`.compact-input-area`) diff --git a/static/src/app.ts b/static/src/app.ts index 05ef727..e155902 100644 --- a/static/src/app.ts +++ b/static/src/app.ts @@ -1387,23 +1387,7 @@ const appOptions = { } const content = message.content || ''; - let reasoningText = (message.reasoning_content || '').trim(); - - if (!reasoningText) { - const thinkPatterns = [ - /([\s\S]*?)<\/think>/g, - /([\s\S]*?)<\/thinking>/g - ]; - - let extracted = ''; - for (const pattern of thinkPatterns) { - let match; - while ((match = pattern.exec(content)) !== null) { - extracted += (match[1] || '').trim() + '\n'; - } - } - reasoningText = extracted.trim(); - } + const reasoningText = (message.reasoning_content || '').trim(); if (reasoningText) { const blockId = `history-thinking-${Date.now()}-${Math.random().toString(36).slice(2)}`; @@ -1426,15 +1410,7 @@ const appOptions = { const isModifyMessage = message.name === 'modify_file'; const containsAppendMarkers = /<<<\s*(APPEND|MODIFY)/i.test(content || '') || /<<>>/i.test(content || ''); - let textContent = content; - if (!message.reasoning_content) { - textContent = textContent - .replace(/[\s\S]*?<\/think>/g, '') - .replace(/[\s\S]*?<\/thinking>/g, '') - .trim(); - } else { - textContent = textContent.trim(); - } + const textContent = content.trim(); if (appendPayloadMeta) { currentAssistantMessage.actions.push({ diff --git a/sub_agent/core/main_terminal.py b/sub_agent/core/main_terminal.py index 97ea713..1a02f2e 100644 --- a/sub_agent/core/main_terminal.py +++ b/sub_agent/core/main_terminal.py @@ -592,25 +592,15 @@ class MainTerminal: # ===== 统一保存到对话历史(关键修复) ===== - # 1. 构建完整的assistant消息内容 - assistant_content_parts = [] - - # 添加思考内容 - if final_thinking: - assistant_content_parts.append(f"\n{final_thinking}\n") - - # 添加回复内容 - if final_response: - assistant_content_parts.append(final_response) - - # 合并内容 - assistant_content = "\n".join(assistant_content_parts) if assistant_content_parts else "已完成操作。" + # 1. 构建助手回复内容(思考内容通过 reasoning_content 保存) + assistant_content = final_response or "已完成操作。" # 2. 保存assistant消息(包含tool_calls但不包含结果) self.context_manager.add_conversation( "assistant", assistant_content, - collected_tool_calls if collected_tool_calls else None + collected_tool_calls if collected_tool_calls else None, + reasoning_content=final_thinking or None ) # 3. 保存独立的tool消息 diff --git a/sub_agent/static/app.js b/sub_agent/static/app.js index 4f8790c..f066858 100644 --- a/sub_agent/static/app.js +++ b/sub_agent/static/app.js @@ -1984,28 +1984,18 @@ async function bootstrapApp() { const appendPayloadMeta = metadata.append_payload; const modifyPayloadMeta = metadata.modify_payload; - const thinkPatterns = [/([\s\S]*?)<\/think>/g, /([\s\S]*?)<\/thinking>/g]; - let allThinkingContent = ''; - for (const pattern of thinkPatterns) { - let match; - while ((match = pattern.exec(content)) !== null) { - allThinkingContent += match[1].trim() + '\n'; - } - } - if (allThinkingContent.trim()) { + const reasoningText = (message.reasoning_content || '').trim(); + if (reasoningText) { assistant.actions.push({ id: `history-think-${Date.now()}-${Math.random()}`, type: 'thinking', - content: allThinkingContent.trim(), + content: reasoningText, streaming: false, timestamp: Date.now() }); } - let textContent = content - .replace(/[\s\S]*?<\/think>/g, '') - .replace(/[\s\S]*?<\/thinking>/g, '') - .trim(); + const textContent = content.trim(); if (appendPayloadMeta) { assistant.actions.push({ diff --git a/sub_agent/utils/api_client.py b/sub_agent/utils/api_client.py index 4133d7e..a2bce71 100644 --- a/sub_agent/utils/api_client.py +++ b/sub_agent/utils/api_client.py @@ -409,10 +409,6 @@ class DeepSeekClient: # 构建助手消息 - 始终包含所有收集到的内容 assistant_content_parts = [] - # 添加思考内容(如果有) - if current_thinking: - assistant_content_parts.append(f"\n{current_thinking}\n") - # 添加正式回复内容(如果有) if full_response: assistant_content_parts.append(full_response) diff --git a/sub_agent/web_server.py b/sub_agent/web_server.py index 8ad8921..32bd840 100644 --- a/sub_agent/web_server.py +++ b/sub_agent/web_server.py @@ -2543,7 +2543,6 @@ async def handle_task_with_sender(terminal: WebTerminal, message, sender, client sender('ai_message_start', {}) # 增量保存相关变量 - has_saved_thinking = False # 是否已保存思考内容 accumulated_response = "" # 累积的响应内容 is_first_iteration = True # 是否是第一次迭代 @@ -3347,13 +3346,6 @@ async def handle_task_with_sender(terminal: WebTerminal, message, sender, client thinking_ended = True sender('thinking_end', {'full_content': current_thinking}) await asyncio.sleep(0.1) - - # ===== 增量保存:保存思考内容 ===== - if current_thinking and not has_saved_thinking and is_first_iteration: - thinking_content = f"\n{current_thinking}\n" - web_terminal.context_manager.add_conversation("assistant", thinking_content) - has_saved_thinking = True - debug_log(f"💾 增量保存:思考内容 ({len(current_thinking)} 字符)") expecting_modify = bool(pending_modify) or bool(getattr(web_terminal, "pending_modify_request", None)) expecting_append = bool(pending_append) or bool(getattr(web_terminal, "pending_append_request", None)) @@ -3599,13 +3591,6 @@ async def handle_task_with_sender(terminal: WebTerminal, message, sender, client if in_thinking and not thinking_ended: sender('thinking_end', {'full_content': current_thinking}) await asyncio.sleep(0.1) - - # 保存思考内容 - if current_thinking and not has_saved_thinking and is_first_iteration: - thinking_content = f"\n{current_thinking}\n" - web_terminal.context_manager.add_conversation("assistant", thinking_content) - has_saved_thinking = True - debug_log(f"💾 增量保存:延迟思考内容 ({len(current_thinking)} 字符)") # 确保text_end事件被发送 if text_started and text_has_content and not append_result["handled"] and not modify_result["handled"]: @@ -3617,7 +3602,11 @@ async def handle_task_with_sender(terminal: WebTerminal, message, sender, client # ===== 增量保存:保存当前轮次的文本内容 ===== if full_response.strip(): - web_terminal.context_manager.add_conversation("assistant", full_response) + web_terminal.context_manager.add_conversation( + "assistant", + full_response, + reasoning_content=current_thinking or None + ) debug_log(f"💾 增量保存:文本内容 ({len(full_response)} 字符)") if append_result["handled"]: @@ -3627,7 +3616,8 @@ async def handle_task_with_sender(terminal: WebTerminal, message, sender, client web_terminal.context_manager.add_conversation( "assistant", append_content_text, - metadata=append_metadata + metadata=append_metadata, + reasoning_content=current_thinking or None ) debug_log("💾 增量保存:追加正文快照") @@ -3703,7 +3693,8 @@ async def handle_task_with_sender(terminal: WebTerminal, message, sender, client web_terminal.context_manager.add_conversation( "assistant", modify_content_text, - metadata=modify_metadata + metadata=modify_metadata, + reasoning_content=current_thinking or None ) debug_log("💾 增量保存:修改正文快照") @@ -3809,9 +3800,6 @@ async def handle_task_with_sender(terminal: WebTerminal, message, sender, client # 构建助手消息(用于API继续对话) assistant_content_parts = [] - if current_thinking: - assistant_content_parts.append(f"\n{current_thinking}\n") - if full_response: assistant_content_parts.append(full_response) elif append_result["handled"] and append_result["assistant_content"]: