fix: remove legacy file edit tags

This commit is contained in:
JOJO 2026-03-17 22:43:51 +08:00
parent 5ab3acef9c
commit 12c7a4bdd9
25 changed files with 107 additions and 1294 deletions

View File

@ -96,24 +96,9 @@ tail -f logs/container_stats.log
## Important Implementation Details
### File Operations with Streaming
### File Operations
**append_to_file** 和 **modify_file** 使用特殊的流式输出格式:
```
<<<APPEND:path/to/file>>>
文件内容...
<<<END_APPEND>>>
<<<MODIFY:path/to/file>>>
[replace:1]
<<OLD>>原文内容<<END>>
<<NEW>>新内容<<END>>
[/replace]
<<<END_MODIFY>>>
```
处理逻辑在 `web_server.py``handle_task_with_sender` 中,通过检测标记并在流式输出中即时执行。
文件写入统一使用 `write_file`/`edit_file` 工具完成:`write_file` 负责覆盖或追加内容,`edit_file` 负责精确字符串替换,不再依赖历史版本的流式标签标记。
### Container Architecture

View File

@ -113,7 +113,7 @@ npm run dev
| --- | --- |
| `/help` | CLI 指令列表 |
| `/status` | 查看系统/容器资源状态 |
| `read_file` / `modify_file` | 读写项目文件(自动通过容器代理) |
| `read_file` / `write_file` / `edit_file` | 读写项目文件(自动通过容器代理) |
| `terminal_session` / `terminal_input` | 管理多终端会话 |
| `run_command` / `run_python` | 工具容器快速执行命令/Python 代码 |
| `todo_*`, `update_memory` | 维护待办与长期记忆 |

View File

@ -258,6 +258,9 @@ class MainTerminalContextMixin:
self.context_manager._build_content_with_images(conv["content"], images, videos)
if (images or videos) else conv["content"]
)
# 调试:记录所有 system 消息
if conv["role"] == "system":
logger.info(f"[DEBUG build_messages] 添加 system 消息: content前50字={conv['content'][:50]}")
messages.append({
"role": conv["role"],
"content": content_payload

View File

@ -33,8 +33,6 @@ TOOL_CATEGORIES: Dict[str, ToolCategory] = {
"create_file",
"write_file",
"edit_file",
"append_to_file",
"modify_file",
"delete_file",
"rename_file",
"create_folder",

View File

@ -395,13 +395,6 @@ class WebTerminal(MainTerminal):
'status': 'reading',
'detail': f'读取文件({read_type}): {arguments.get("path", "未知路径")}'
})
elif tool_name == "modify_file":
path = arguments.get("path", "未知路径")
self.broadcast('tool_status', {
'tool': tool_name,
'status': 'modifying',
'detail': f'准备修改文件: {path}'
})
elif tool_name == "delete_file":
self.broadcast('tool_status', {
'tool': tool_name,

View File

@ -1076,7 +1076,7 @@ class FileManager:
if old_text is None or new_text is None:
block_result["status"] = "error"
block_result["reason"] = "缺少 OLD 或 NEW 内容"
block_result["hint"] = "请确保粘贴的补丁包含成对的 <<<OLD>>> / <<<NEW>>> 标记"
block_result["hint"] = "请确保补丁包含成对的 OLD/NEW 段落"
failed_details.append({"index": index, "reason": "缺少 OLD/NEW 标记"})
results.append(block_result)
continue

View File

@ -39,6 +39,7 @@ DEFAULT_PERSONALIZATION_CONFIG: Dict[str, Any] = {
"default_run_mode": None,
"auto_generate_title": True,
"tool_intent_enabled": True,
"skill_hints_enabled": False, # Skill 提示系统开关(默认关闭)
"default_model": "kimi-k2.5",
"image_compression": "original", # original / 1080p / 720p / 540p
"silent_tool_disable": False, # 禁用工具时不向模型插入提示
@ -145,6 +146,12 @@ def sanitize_personalization_payload(
else:
base["tool_intent_enabled"] = bool(base.get("tool_intent_enabled"))
# Skill 提示系统开关
if "skill_hints_enabled" in data:
base["skill_hints_enabled"] = bool(data.get("skill_hints_enabled"))
else:
base["skill_hints_enabled"] = bool(base.get("skill_hints_enabled"))
if "disabled_tool_categories" in data:
base["disabled_tool_categories"] = _sanitize_tool_categories(data.get("disabled_tool_categories"), allowed_tool_categories)
else:

View File

@ -48,40 +48,41 @@ def load_conversation_messages(path: Path) -> List[Dict[str, Any]]:
def minimal_tool_definitions() -> List[Dict[str, Any]]:
"""返回涵盖 append/modify 的最小工具定义集合。"""
"""返回涵盖 write/edit 的最小工具定义集合。"""
return [
{
"type": "function",
"function": {
"name": "append_to_file",
"name": "write_file",
"description": (
"准备向文件追加大段内容。调用后系统会发放 <<<APPEND:path>>>…<<<END_APPEND>>> "
"格式的写入窗口AI 必须在窗口内一次性输出需要追加的全部内容。"
"将内容写入本地文件系统append 为 True 时追加到末尾False 时覆盖原文件。"
),
"parameters": {
"type": "object",
"properties": {
"path": {"type": "string", "description": "目标文件的相对路径"},
"reason": {"type": "string", "description": "为什么需要追加(可选)"}
"file_path": {"type": "string", "description": "目标文件的相对路径"},
"content": {"type": "string", "description": "要写入的内容"},
"append": {"type": "boolean", "description": "是否追加到末尾", "default": False}
},
"required": ["path"]
"required": ["file_path", "content"]
}
}
},
{
"type": "function",
"function": {
"name": "modify_file",
"name": "edit_file",
"description": (
"准备替换文件中的指定内容。模型必须在 <<<MODIFY:path>>>…<<<END_MODIFY>>> "
"结构内输出若干 [replace:n] 补丁块,每块包含 <<OLD>> 原文 和 <<NEW>> 新内容。"
"在文件中执行精确字符串替换,要求 old_string 与文件内容精确匹配。"
),
"parameters": {
"type": "object",
"properties": {
"path": {"type": "string", "description": "目标文件的相对路径"}
"file_path": {"type": "string", "description": "目标文件的相对路径"},
"old_string": {"type": "string", "description": "需要替换的原文"},
"new_string": {"type": "string", "description": "替换后的新内容"}
},
"required": ["path"]
"required": ["file_path", "old_string", "new_string"]
}
}
}

View File

@ -1173,16 +1173,10 @@ def apply_thinking_schedule(terminal: WebTerminal):
client.skip_thinking_next_call = False
return
state = get_thinking_state(terminal)
awaiting_writes = getattr(terminal, "pending_append_request", None) or getattr(terminal, "pending_modify_request", None)
if awaiting_writes:
client.skip_thinking_next_call = True
state["suppress_next"] = False
debug_log("[Thinking] 检测到写入窗口请求,跳过思考。")
return
if state.get("suppress_next"):
client.skip_thinking_next_call = True
state["suppress_next"] = False
debug_log("[Thinking] 由于写入窗口,下一次跳过思考")
debug_log("[Thinking] 已标记跳过思考模式。")
return
if state.get("force_next"):
client.force_thinking_next_call = True

View File

@ -137,16 +137,10 @@ def apply_thinking_schedule(terminal: WebTerminal, default_interval: int, debug_
return
state = get_thinking_state(terminal)
awaiting_writes = getattr(terminal, "pending_append_request", None) or getattr(terminal, "pending_modify_request", None)
if awaiting_writes:
client.skip_thinking_next_call = True
state["suppress_next"] = False
debug_logger("[Thinking] 检测到写入窗口请求,跳过思考。")
return
if state.get("suppress_next"):
client.skip_thinking_next_call = True
state["suppress_next"] = False
debug_logger("[Thinking] 由于写入窗口,下一次跳过思考")
debug_logger("[Thinking] 已标记跳过思考模式。")
return
if state.get("force_next"):
client.force_thinking_next_call = True

View File

@ -1,546 +0,0 @@
from __future__ import annotations
import json
import re
from typing import Dict, List, Optional
async def finalize_pending_append(*, pending_append, append_probe_buffer: str, response_text: str, stream_completed: bool, finish_reason: str = None, web_terminal, sender, debug_log):
"""在流式输出结束后处理追加写入"""
result = {
"handled": False,
"success": False,
"summary": None,
"summary_message": None,
"tool_content": None,
"tool_call_id": None,
"path": None,
"forced": False,
"error": None,
"assistant_content": response_text,
"lines": 0,
"bytes": 0,
"finish_reason": finish_reason,
"appended_content": "",
"assistant_metadata": None
}
if not pending_append:
return result, pending_append, append_probe_buffer
state = pending_append
path = state.get("path")
tool_call_id = state.get("tool_call_id")
buffer = state.get("buffer", "")
start_marker = state.get("start_marker")
end_marker = state.get("end_marker")
start_idx = state.get("content_start")
end_idx = state.get("end_index")
display_id = state.get("display_id")
result.update({
"handled": True,
"path": path,
"tool_call_id": tool_call_id,
"display_id": display_id
})
if path is None or tool_call_id is None:
error_msg = "append_to_file 状态不完整缺少路径或ID。"
debug_log(error_msg)
result["error"] = error_msg
result["summary_message"] = error_msg
result["tool_content"] = json.dumps({
"success": False,
"error": error_msg
}, ensure_ascii=False)
if display_id:
sender('update_action', {
'id': display_id,
'status': 'failed',
'preparing_id': tool_call_id,
'message': error_msg
})
pending_append = None
return result, pending_append, append_probe_buffer
if start_idx is None:
error_msg = f"未检测到格式正确的开始标识 {start_marker}"
debug_log(error_msg)
result["error"] = error_msg
result["summary_message"] = error_msg
result["tool_content"] = json.dumps({
"success": False,
"path": path,
"error": error_msg
}, ensure_ascii=False)
if display_id:
sender('update_action', {
'id': display_id,
'status': 'failed',
'preparing_id': tool_call_id,
'message': error_msg
})
pending_append = None
return result, pending_append, append_probe_buffer
forced = False
if end_idx is None:
forced = True
# 查找下一个<<<,否则使用整个缓冲结尾
remaining = buffer[start_idx:]
next_marker = remaining.find("<<<", len(end_marker))
if next_marker != -1:
end_idx = start_idx + next_marker
else:
end_idx = len(buffer)
content = buffer[start_idx:end_idx]
if content.startswith('\n'):
content = content[1:]
if not content:
error_msg = "未检测到需要追加的内容,请严格按照<<<APPEND:path>>>...<<<END_APPEND>>>格式输出。"
debug_log(error_msg)
result["error"] = error_msg
result["forced"] = forced
result["tool_content"] = json.dumps({
"success": False,
"path": path,
"error": error_msg
}, ensure_ascii=False)
if display_id:
sender('update_action', {
'id': display_id,
'status': 'failed',
'preparing_id': tool_call_id,
'message': error_msg
})
pending_append = None
return result, pending_append, append_probe_buffer
assistant_message_lines = []
if start_marker:
assistant_message_lines.append(start_marker)
assistant_message_lines.append(content)
if not forced and end_marker:
assistant_message_lines.append(end_marker)
assistant_message_text = "\n".join(assistant_message_lines)
result["assistant_content"] = assistant_message_text
assistant_metadata = {
"append_payload": {
"path": path,
"tool_call_id": tool_call_id,
"forced": forced,
"has_end_marker": not forced
}
}
result["assistant_metadata"] = assistant_metadata
write_result = web_terminal.file_manager.append_file(path, content)
if write_result.get("success"):
bytes_written = len(content.encode('utf-8'))
line_count = content.count('\n')
if content and not content.endswith('\n'):
line_count += 1
summary = f"已向 {path} 追加 {line_count} 行({bytes_written} 字节)"
if forced:
summary += "。未检测到 <<<END_APPEND>>> 标记,系统已在流结束处完成写入。如内容未完成,请重新调用 append_to_file 并按标准格式补充;如已完成,可继续后续步骤。"
result.update({
"success": True,
"summary": summary,
"summary_message": summary,
"forced": forced,
"lines": line_count,
"bytes": bytes_written,
"appended_content": content,
"tool_content": json.dumps({
"success": True,
"path": path,
"lines": line_count,
"bytes": bytes_written,
"forced": forced,
"message": summary,
"finish_reason": finish_reason
}, ensure_ascii=False)
})
assistant_meta_payload = result["assistant_metadata"]["append_payload"]
assistant_meta_payload["lines"] = line_count
assistant_meta_payload["bytes"] = bytes_written
assistant_meta_payload["success"] = True
summary_payload = {
"success": True,
"path": path,
"lines": line_count,
"bytes": bytes_written,
"forced": forced,
"message": summary
}
if display_id:
sender('update_action', {
'id': display_id,
'status': 'completed',
'result': summary_payload,
'preparing_id': tool_call_id,
'message': summary
})
debug_log(f"追加写入完成: {summary}")
else:
error_msg = write_result.get("error", "追加写入失败")
result.update({
"error": error_msg,
"summary_message": error_msg,
"forced": forced,
"appended_content": content,
"tool_content": json.dumps({
"success": False,
"path": path,
"error": error_msg,
"finish_reason": finish_reason
}, ensure_ascii=False)
})
debug_log(f"追加写入失败: {error_msg}")
if result["assistant_metadata"]:
assistant_meta_payload = result["assistant_metadata"]["append_payload"]
assistant_meta_payload["lines"] = content.count('\n') + (0 if content.endswith('\n') or not content else 1)
assistant_meta_payload["bytes"] = len(content.encode('utf-8'))
assistant_meta_payload["success"] = False
failure_payload = {
"success": False,
"path": path,
"error": error_msg,
"forced": forced
}
if display_id:
sender('update_action', {
'id': display_id,
'status': 'completed',
'result': failure_payload,
'preparing_id': tool_call_id,
'message': error_msg
})
pending_append = None
append_probe_buffer = ""
if hasattr(web_terminal, "pending_append_request"):
web_terminal.pending_append_request = None
return result, pending_append, append_probe_buffer
async def finalize_pending_modify(*, pending_modify, modify_probe_buffer: str, response_text: str, stream_completed: bool, finish_reason: str = None, web_terminal, sender, debug_log):
"""在流式输出结束后处理修改写入"""
result = {
"handled": False,
"success": False,
"path": None,
"tool_call_id": None,
"display_id": None,
"total_blocks": 0,
"completed_blocks": [],
"failed_blocks": [],
"forced": False,
"details": [],
"error": None,
"assistant_content": response_text,
"assistant_metadata": None,
"tool_content": None,
"summary_message": None,
"finish_reason": finish_reason
}
if not pending_modify:
return result, pending_modify, modify_probe_buffer
state = pending_modify
path = state.get("path")
tool_call_id = state.get("tool_call_id")
display_id = state.get("display_id")
start_marker = state.get("start_marker")
end_marker = state.get("end_marker")
buffer = state.get("buffer", "")
raw_buffer = state.get("raw_buffer", "")
end_index = state.get("end_index")
result.update({
"handled": True,
"path": path,
"tool_call_id": tool_call_id,
"display_id": display_id
})
if not state.get("start_seen"):
error_msg = "未检测到格式正确的 <<<MODIFY:path>>> 标记。"
debug_log(error_msg)
result["error"] = error_msg
result["summary_message"] = error_msg
result["tool_content"] = json.dumps({
"success": False,
"path": path,
"error": error_msg,
"finish_reason": finish_reason
}, ensure_ascii=False)
if display_id:
sender('update_action', {
'id': display_id,
'status': 'failed',
'preparing_id': tool_call_id,
'message': error_msg
})
if hasattr(web_terminal, "pending_modify_request"):
web_terminal.pending_modify_request = None
pending_modify = None
modify_probe_buffer = ""
return result, pending_modify, modify_probe_buffer
forced = end_index is None
apply_text = buffer if forced else buffer[:end_index]
raw_content = raw_buffer if forced else raw_buffer[:len(start_marker) + end_index + len(end_marker)]
if raw_content:
result["assistant_content"] = raw_content
blocks_info = []
block_reports = {}
detected_indices = set()
block_pattern = re.compile(r"\[replace:(\d+)\](.*?)\[/replace\]", re.DOTALL)
structure_warnings: List[str] = []
structure_detail_entries: List[Dict] = []
def record_structure_warning(message: str, hint: Optional[str] = None):
"""记录结构性缺陷,便于给出更具体的反馈。"""
if message in structure_warnings:
return
structure_warnings.append(message)
structure_detail_entries.append({
"index": 0,
"status": "failed",
"reason": message,
"removed_lines": 0,
"added_lines": 0,
"hint": hint or "请严格按照模板输出:[replace:n] + <<OLD>>/<<NEW>> + [/replace],并使用 <<<END_MODIFY>>> 收尾。"
})
def extract_segment(body: str, tag: str):
marker = f"<<{tag}>>"
end_tag = "<<END>>"
start_pos = body.find(marker)
if start_pos == -1:
return None, f"缺少 {marker}"
start_pos += len(marker)
if body[start_pos:start_pos+2] == "\r\n":
start_pos += 2
elif body[start_pos:start_pos+1] == "\n":
start_pos += 1
end_pos = body.find(end_tag, start_pos)
if end_pos == -1:
return None, f"缺少 {end_tag}"
segment = body[start_pos:end_pos]
return segment, None
for match in block_pattern.finditer(apply_text):
try:
index = int(match.group(1))
except ValueError:
continue
body = match.group(2)
if index in detected_indices:
continue
detected_indices.add(index)
block_reports[index] = {
"index": index,
"status": "pending",
"reason": None,
"removed_lines": 0,
"added_lines": 0,
"hint": None
}
old_content, old_error = extract_segment(body, "OLD")
new_content, new_error = extract_segment(body, "NEW")
if old_error or new_error:
reason = old_error or new_error
block_reports[index]["status"] = "failed"
block_reports[index]["reason"] = reason
blocks_info.append({
"index": index,
"old": old_content,
"new": new_content,
"error": old_error or new_error
})
if not blocks_info:
has_replace_start = bool(re.search(r"\[replace:\s*\d+\]", apply_text))
has_replace_end = "[/replace]" in apply_text
has_old_tag = "<<OLD>>" in apply_text
has_new_tag = "<<NEW>>" in apply_text
if has_replace_start and not has_replace_end:
record_structure_warning("检测到 [replace:n] 标记但缺少对应的 [/replace] 结束标记。")
if has_replace_end and not has_replace_start:
record_structure_warning("检测到 [/replace] 结束标记但缺少对应的 [replace:n] 起始标记。")
old_tags = len(re.findall(r"<<OLD>>", apply_text))
completed_old_tags = len(re.findall(r"<<OLD>>[\s\S]*?<<END>>", apply_text))
if old_tags and completed_old_tags < old_tags:
record_structure_warning("检测到 <<OLD>> 段落但未看到对应的 <<END>> 结束标记。")
new_tags = len(re.findall(r"<<NEW>>", apply_text))
completed_new_tags = len(re.findall(r"<<NEW>>[\s\S]*?<<END>>", apply_text))
if new_tags and completed_new_tags < new_tags:
record_structure_warning("检测到 <<NEW>> 段落但未看到对应的 <<END>> 结束标记。")
if (has_replace_start or has_replace_end or has_old_tag or has_new_tag) and not structure_warnings:
record_structure_warning("检测到部分补丁标记,但整体结构不完整,请严格按照模板填写所有标记。")
total_blocks = len(blocks_info)
result["total_blocks"] = total_blocks
if forced:
debug_log("未检测到 <<<END_MODIFY>>>,将在流结束处执行已识别的修改块。")
result["forced"] = True
blocks_to_apply = [
{"index": block["index"], "old": block["old"], "new": block["new"]}
for block in blocks_info
if block["error"] is None and block["old"] is not None and block["new"] is not None
]
# 记录格式残缺的块
for block in blocks_info:
if block["error"]:
idx = block["index"]
block_reports[idx]["status"] = "failed"
block_reports[idx]["reason"] = block["error"]
block_reports[idx]["hint"] = "请检查补丁块的 OLD/NEW 标记是否完整,必要时复用 terminal_snapshot 或终端命令重新调整。"
apply_result = {}
if blocks_to_apply:
apply_result = web_terminal.file_manager.apply_modify_blocks(path, blocks_to_apply)
else:
apply_result = {"success": False, "completed": [], "failed": [], "results": [], "write_performed": False, "error": None}
block_result_map = {item["index"]: item for item in apply_result.get("results", [])}
for block in blocks_info:
idx = block["index"]
report = block_reports.get(idx)
if report is None:
continue
if report["status"] == "failed":
continue
block_apply = block_result_map.get(idx)
if not block_apply:
report["status"] = "failed"
report["reason"] = "未执行,可能未找到匹配原文"
report["hint"] = report.get("hint") or "请确认 OLD 文本与文件内容完全一致;若多次失败,可改用终端命令/Python 进行精准替换。"
continue
status = block_apply.get("status")
report["removed_lines"] = block_apply.get("removed_lines", 0)
report["added_lines"] = block_apply.get("added_lines", 0)
if block_apply.get("hint"):
report["hint"] = block_apply.get("hint")
if status == "success":
report["status"] = "completed"
elif status == "not_found":
report["status"] = "failed"
report["reason"] = block_apply.get("reason") or "未找到匹配的原文"
if not report.get("hint"):
report["hint"] = "请使用 terminal_snapshot/grep -n 校验原文,或在说明后改用 run_command/python 精确替换。"
else:
report["status"] = "failed"
report["reason"] = block_apply.get("reason") or "替换失败"
if not report.get("hint"):
report["hint"] = block_apply.get("hint") or "若多次尝试仍失败,可考虑利用终端命令或 Python 小脚本完成此次修改。"
completed_blocks = sorted([idx for idx, rep in block_reports.items() if rep["status"] == "completed"])
failed_blocks = sorted([idx for idx, rep in block_reports.items() if rep["status"] != "completed"])
result["completed_blocks"] = completed_blocks
result["failed_blocks"] = failed_blocks
details = sorted(block_reports.values(), key=lambda x: x["index"])
if structure_detail_entries:
details = structure_detail_entries + details
result["details"] = details
summary_parts = []
if total_blocks == 0:
summary_parts.append("未检测到有效的修改块,未执行任何修改。")
summary_parts.extend(structure_warnings)
else:
if not completed_blocks and failed_blocks:
summary_parts.append(f"共检测到 {total_blocks} 个修改块,全部未执行。")
elif completed_blocks and not failed_blocks:
summary_parts.append(f"{total_blocks} 个修改块全部完成。")
else:
summary_parts.append(
f"共检测到 {total_blocks} 个修改块,其中成功 {len(completed_blocks)} 个,失败 {len(failed_blocks)} 个。"
)
if forced:
summary_parts.append("未检测到 <<<END_MODIFY>>> 标记,系统已在流结束处执行补丁。")
if apply_result.get("error"):
summary_parts.append(apply_result["error"])
matching_note = "提示:补丁匹配基于完整文本,包含注释和空白符,请确保 <<<OLD>>> 段落与文件内容逐字一致。如果修改成功,请忽略,如果失败,请明确原文后再次尝试。"
summary_parts.append(matching_note)
summary_message = " ".join(summary_parts).strip()
result["summary_message"] = summary_message
result["success"] = bool(completed_blocks) and not failed_blocks and apply_result.get("error") is None
tool_payload = {
"success": result["success"],
"path": path,
"total_blocks": total_blocks,
"completed": completed_blocks,
"failed": [
{
"index": rep["index"],
"reason": rep.get("reason"),
"hint": rep.get("hint")
}
for rep in result["details"] if rep["status"] != "completed"
],
"forced": forced,
"message": summary_message,
"finish_reason": finish_reason,
"details": result["details"]
}
if apply_result.get("error"):
tool_payload["error"] = apply_result["error"]
result["tool_content"] = json.dumps(tool_payload, ensure_ascii=False)
result["assistant_metadata"] = {
"modify_payload": {
"path": path,
"total_blocks": total_blocks,
"completed": completed_blocks,
"failed": failed_blocks,
"forced": forced,
"details": result["details"]
}
}
if display_id:
sender('update_action', {
'id': display_id,
'status': 'completed' if result["success"] else 'failed',
'result': tool_payload,
'preparing_id': tool_call_id,
'message': summary_message
})
pending_modify = None
modify_probe_buffer = ""
if hasattr(web_terminal, "pending_modify_request"):
web_terminal.pending_modify_request = None
return result, pending_modify, modify_probe_buffer

View File

@ -3,19 +3,17 @@ from __future__ import annotations
import asyncio
import json
import time
import re
from typing import Any, Dict, Optional
from config.model_profiles import get_model_profile
from .utils_common import debug_log, brief_log, log_backend_chunk
from .chat_flow_runner_helpers import extract_intent_from_partial
from .chat_flow_pending_writes import finalize_pending_append, finalize_pending_modify
from .chat_flow_task_support import wait_retry_delay, cancel_pending_tools
from .state import get_stop_flag, clear_stop_flag
async def run_streaming_attempts(*, web_terminal, messages, tools, sender, client_sid: str, username: str, conversation_id: Optional[str], current_iteration: int, max_api_retries: int, retry_delay_seconds: int, pending_append, append_probe_buffer: str, pending_modify, modify_probe_buffer: str, detected_tool_intent: Dict[str, str], full_response: str, tool_calls: list, current_thinking: str, detected_tools: Dict[str, str], last_usage_payload, in_thinking: bool, thinking_started: bool, thinking_ended: bool, text_started: bool, text_has_content: bool, text_streaming: bool, text_chunk_index: int, last_text_chunk_time, chunk_count: int, reasoning_chunks: int, content_chunks: int, tool_chunks: int, append_result: Dict[str, Any], modify_result: Dict[str, Any], last_finish_reason: Optional[str], accumulated_response: str) -> Dict[str, Any]:
async def run_streaming_attempts(*, web_terminal, messages, tools, sender, client_sid: str, username: str, conversation_id: Optional[str], current_iteration: int, max_api_retries: int, retry_delay_seconds: int, detected_tool_intent: Dict[str, str], full_response: str, tool_calls: list, current_thinking: str, detected_tools: Dict[str, str], last_usage_payload, in_thinking: bool, thinking_started: bool, thinking_ended: bool, text_started: bool, text_has_content: bool, text_streaming: bool, text_chunk_index: int, last_text_chunk_time, chunk_count: int, reasoning_chunks: int, content_chunks: int, tool_chunks: int, last_finish_reason: Optional[str], accumulated_response: str) -> Dict[str, Any]:
api_error = None
for api_attempt in range(max_api_retries + 1):
api_error = None
@ -37,13 +35,8 @@ async def run_streaming_attempts(*, web_terminal, messages, tools, sender, clien
reasoning_chunks = 0
content_chunks = 0
tool_chunks = 0
append_result = {"handled": False}
modify_result = {"handled": False}
last_finish_reason = None
append_break_triggered = False
modify_break_triggered = False
# 收集流式响应
async for chunk in web_terminal.api_client.chat(messages, tools, stream=True):
chunk_count += 1
@ -54,10 +47,6 @@ async def run_streaming_attempts(*, web_terminal, messages, tools, sender, clien
stop_requested = client_stop_info.get('stop', False) if isinstance(client_stop_info, dict) else client_stop_info
if stop_requested:
debug_log(f"检测到停止请求,中断流处理")
if pending_append:
append_result, pending_append, append_probe_buffer = await finalize_pending_append(pending_append=pending_append, append_probe_buffer=append_probe_buffer, response_text=full_response, stream_completed=False, finish_reason="user_stop", web_terminal=web_terminal, sender=sender, debug_log=debug_log)
if pending_modify:
modify_result, pending_modify, modify_probe_buffer = await finalize_pending_modify(pending_modify=pending_modify, modify_probe_buffer=modify_probe_buffer, response_text=full_response, stream_completed=False, finish_reason="user_stop", web_terminal=web_terminal, sender=sender, debug_log=debug_log)
cancel_pending_tools(tool_calls_list=tool_calls, sender=sender, messages=messages)
sender('task_stopped', {
'message': '命令执行被用户取消',
@ -83,13 +72,7 @@ async def run_streaming_attempts(*, web_terminal, messages, tools, sender, clien
"reasoning_chunks": reasoning_chunks,
"content_chunks": content_chunks,
"tool_chunks": tool_chunks,
"append_result": append_result,
"modify_result": modify_result,
"last_finish_reason": last_finish_reason,
"pending_append": pending_append,
"append_probe_buffer": append_probe_buffer,
"pending_modify": pending_modify,
"modify_probe_buffer": modify_probe_buffer,
"accumulated_response": accumulated_response,
}
@ -158,168 +141,6 @@ async def run_streaming_attempts(*, web_terminal, messages, tools, sender, clien
sender('thinking_end', {'full_content': current_thinking})
await asyncio.sleep(0.1)
expecting_modify = bool(pending_modify) or bool(getattr(web_terminal, "pending_modify_request", None))
expecting_append = bool(pending_append) or bool(getattr(web_terminal, "pending_append_request", None))
if pending_modify:
if not pending_modify.get("start_seen"):
probe_buffer = pending_modify.get("probe_buffer", "") + content
if len(probe_buffer) > 10000:
probe_buffer = probe_buffer[-10000:]
marker = pending_modify.get("start_marker")
marker_index = probe_buffer.find(marker)
if marker_index == -1:
pending_modify["probe_buffer"] = probe_buffer
continue
after_marker = marker_index + len(marker)
remainder = probe_buffer[after_marker:]
pending_modify["buffer"] = remainder
pending_modify["raw_buffer"] = marker + remainder
pending_modify["start_seen"] = True
pending_modify["detected_blocks"] = set()
pending_modify["probe_buffer"] = ""
if pending_modify.get("display_id"):
sender('update_action', {
'id': pending_modify["display_id"],
'status': 'running',
'preparing_id': pending_modify.get("tool_call_id"),
'message': f"正在修改 {pending_modify['path']}..."
})
else:
pending_modify["buffer"] += content
pending_modify["raw_buffer"] += content
if pending_modify.get("start_seen"):
block_text = pending_modify["buffer"]
for match in re.finditer(r"\[replace:(\d+)\]", block_text):
try:
block_index = int(match.group(1))
except ValueError:
continue
detected_blocks = pending_modify.setdefault("detected_blocks", set())
if block_index not in detected_blocks:
detected_blocks.add(block_index)
if pending_modify.get("display_id"):
sender('update_action', {
'id': pending_modify["display_id"],
'status': 'running',
'preparing_id': pending_modify.get("tool_call_id"),
'message': f"正在对 {pending_modify['path']} 进行第 {block_index} 处修改..."
})
if pending_modify.get("start_seen"):
end_pos = pending_modify["buffer"].find(pending_modify["end_marker"])
if end_pos != -1:
pending_modify["end_index"] = end_pos
modify_break_triggered = True
debug_log("检测到<<<END_MODIFY>>>,即将终止流式输出并应用修改")
break
continue
elif expecting_modify:
modify_probe_buffer += content
if len(modify_probe_buffer) > 10000:
modify_probe_buffer = modify_probe_buffer[-10000:]
marker_match = re.search(r"<<<MODIFY:\s*([\s\S]*?)>>>", modify_probe_buffer)
if marker_match:
detected_raw_path = marker_match.group(1)
detected_path = detected_raw_path.strip()
marker_full = marker_match.group(0)
after_marker_index = modify_probe_buffer.find(marker_full) + len(marker_full)
remainder = modify_probe_buffer[after_marker_index:]
modify_probe_buffer = ""
if not detected_path:
debug_log("检测到 MODIFY 起始标记但路径为空,忽略。")
continue
pending_modify = {
"path": detected_path,
"tool_call_id": None,
"buffer": remainder,
"raw_buffer": marker_full + remainder,
"start_marker": marker_full,
"end_marker": "<<<END_MODIFY>>>",
"start_seen": True,
"end_index": None,
"display_id": None,
"detected_blocks": set()
}
if hasattr(web_terminal, "pending_modify_request"):
web_terminal.pending_modify_request = {"path": detected_path}
debug_log(f"直接检测到modify起始标记构建修改缓冲: {detected_path}")
end_pos = pending_modify["buffer"].find(pending_modify["end_marker"])
if end_pos != -1:
pending_modify["end_index"] = end_pos
modify_break_triggered = True
debug_log("检测到<<<END_MODIFY>>>,即将终止流式输出并应用修改")
break
continue
if pending_append:
pending_append["buffer"] += content
if pending_append.get("content_start") is None:
marker_index = pending_append["buffer"].find(pending_append["start_marker"])
if marker_index != -1:
pending_append["content_start"] = marker_index + len(pending_append["start_marker"])
debug_log(f"检测到追加起始标识: {pending_append['start_marker']}")
if pending_append.get("content_start") is not None:
end_index = pending_append["buffer"].find(
pending_append["end_marker"],
pending_append["content_start"]
)
if end_index != -1:
pending_append["end_index"] = end_index
append_break_triggered = True
debug_log("检测到<<<END_APPEND>>>,即将终止流式输出并写入文件")
break
# 继续累积追加内容
continue
elif expecting_append:
append_probe_buffer += content
# 限制缓冲区大小防止过长
if len(append_probe_buffer) > 10000:
append_probe_buffer = append_probe_buffer[-10000:]
marker_match = re.search(r"<<<APPEND:\s*([\s\S]*?)>>>", append_probe_buffer)
if marker_match:
detected_raw_path = marker_match.group(1)
detected_path = detected_raw_path.strip()
if not detected_path:
append_probe_buffer = append_probe_buffer[marker_match.end():]
continue
marker_full = marker_match.group(0)
after_marker_index = append_probe_buffer.find(marker_full) + len(marker_full)
remainder = append_probe_buffer[after_marker_index:]
append_probe_buffer = ""
pending_append = {
"path": detected_path,
"tool_call_id": None,
"buffer": remainder,
"start_marker": marker_full,
"end_marker": "<<<END_APPEND>>>",
"content_start": 0,
"end_index": None,
"display_id": None
}
if hasattr(web_terminal, "pending_append_request"):
web_terminal.pending_append_request = {"path": detected_path}
debug_log(f"直接检测到append起始标记构建追加缓冲: {detected_path}")
# 检查是否立即包含结束标记
if pending_append["buffer"]:
end_index = pending_append["buffer"].find(pending_append["end_marker"], pending_append["content_start"])
if end_index != -1:
pending_append["end_index"] = end_index
append_break_triggered = True
debug_log("检测到<<<END_APPEND>>>,即将终止流式输出并写入文件")
break
continue
if not text_started:
text_started = True
text_streaming = True
@ -327,27 +148,26 @@ async def run_streaming_attempts(*, web_terminal, messages, tools, sender, clien
brief_log("模型输出了内容")
await asyncio.sleep(0.05)
if not pending_append:
full_response += content
accumulated_response += content
text_has_content = True
emit_time = time.time()
elapsed = 0.0 if last_text_chunk_time is None else emit_time - last_text_chunk_time
last_text_chunk_time = emit_time
text_chunk_index += 1
log_backend_chunk(
conversation_id,
current_iteration,
text_chunk_index,
elapsed,
len(content),
content[:32]
)
sender('text_chunk', {
'content': content,
'index': text_chunk_index,
'elapsed': elapsed
})
full_response += content
accumulated_response += content
text_has_content = True
emit_time = time.time()
elapsed = 0.0 if last_text_chunk_time is None else emit_time - last_text_chunk_time
last_text_chunk_time = emit_time
text_chunk_index += 1
log_backend_chunk(
conversation_id,
current_iteration,
text_chunk_index,
elapsed,
len(content),
content[:32]
)
sender('text_chunk', {
'content': content,
'index': text_chunk_index,
'elapsed': elapsed
})
# 收集工具调用 - 实时发送准备状态
if "tool_calls" in delta:
@ -472,13 +292,7 @@ async def run_streaming_attempts(*, web_terminal, messages, tools, sender, clien
"reasoning_chunks": reasoning_chunks,
"content_chunks": content_chunks,
"tool_chunks": tool_chunks,
"append_result": append_result,
"modify_result": modify_result,
"last_finish_reason": last_finish_reason,
"pending_append": pending_append,
"append_probe_buffer": append_probe_buffer,
"pending_modify": pending_modify,
"modify_probe_buffer": modify_probe_buffer,
"accumulated_response": accumulated_response,
}
@ -546,8 +360,6 @@ async def run_streaming_attempts(*, web_terminal, messages, tools, sender, clien
and not full_response
and not tool_calls
and not current_thinking
and not pending_append
and not pending_modify
)
sender('error', {
'message': error_message,
@ -590,13 +402,7 @@ async def run_streaming_attempts(*, web_terminal, messages, tools, sender, clien
"reasoning_chunks": reasoning_chunks,
"content_chunks": content_chunks,
"tool_chunks": tool_chunks,
"append_result": append_result,
"modify_result": modify_result,
"last_finish_reason": last_finish_reason,
"pending_append": pending_append,
"append_probe_buffer": append_probe_buffer,
"pending_modify": pending_modify,
"modify_probe_buffer": modify_probe_buffer,
"accumulated_response": accumulated_response,
}
continue
@ -620,13 +426,7 @@ async def run_streaming_attempts(*, web_terminal, messages, tools, sender, clien
"reasoning_chunks": reasoning_chunks,
"content_chunks": content_chunks,
"tool_chunks": tool_chunks,
"append_result": append_result,
"modify_result": modify_result,
"last_finish_reason": last_finish_reason,
"pending_append": pending_append,
"append_probe_buffer": append_probe_buffer,
"pending_modify": pending_modify,
"modify_probe_buffer": modify_probe_buffer,
"accumulated_response": accumulated_response,
}
break
@ -650,12 +450,6 @@ async def run_streaming_attempts(*, web_terminal, messages, tools, sender, clien
"reasoning_chunks": reasoning_chunks,
"content_chunks": content_chunks,
"tool_chunks": tool_chunks,
"append_result": append_result,
"modify_result": modify_result,
"last_finish_reason": last_finish_reason,
"pending_append": pending_append,
"append_probe_buffer": append_probe_buffer,
"pending_modify": pending_modify,
"modify_probe_buffer": modify_probe_buffer,
"accumulated_response": accumulated_response,
}

View File

@ -39,6 +39,7 @@ from modules.personalization_manager import (
THINKING_INTERVAL_MIN,
THINKING_INTERVAL_MAX,
)
from modules.skill_hint_manager import SkillHintManager
from modules.upload_security import UploadSecurityError
from modules.user_manager import UserWorkspace
from modules.usage_tracker import QUOTA_DEFAULTS
@ -62,7 +63,7 @@ from .utils_common import (
CHUNK_FRONTEND_LOG_FILE,
STREAMING_DEBUG_LOG_FILE,
)
from .security import rate_limited, format_tool_result_notice, compact_web_search_result, consume_socket_token, prune_socket_tokens, validate_csrf_request, requires_csrf_protection, get_csrf_token
from .security import rate_limited, compact_web_search_result, consume_socket_token, prune_socket_tokens, validate_csrf_request, requires_csrf_protection, get_csrf_token
from .monitor import cache_monitor_snapshot, get_cached_monitor_snapshot
from .extensions import socketio
from .state import (
@ -125,7 +126,6 @@ from .chat_flow_runtime import (
detect_malformed_tool_call,
)
from .chat_flow_pending_writes import finalize_pending_append, finalize_pending_modify
from .chat_flow_task_support import process_sub_agent_updates
from .chat_flow_tool_loop import execute_tool_calls
from .chat_flow_stream_loop import run_streaming_attempts
@ -465,7 +465,30 @@ async def handle_task_with_sender(terminal: WebTerminal, workspace: UserWorkspac
history_len_before = len(getattr(web_terminal.context_manager, "conversation_history", []) or [])
is_first_user_message = history_len_before == 0
web_terminal.context_manager.add_conversation("user", message, images=images, videos=videos)
# Skill 提示系统:检测关键词并在用户消息之后插入 system 消息
try:
personal_config = load_personalization_config(workspace.data_dir)
skill_hints_enabled = personal_config.get("skill_hints_enabled", False)
if skill_hints_enabled and message:
hint_manager = SkillHintManager()
hint_manager.set_enabled(True)
hint_messages = hint_manager.build_hint_messages(message)
# 将提示消息插入到对话历史中(在用户消息之后)
for hint_msg in hint_messages:
debug_log(f"[Skill Hints] 插入提示消息: {hint_msg['content'][:100]}")
web_terminal.context_manager.add_conversation(
"system",
hint_msg["content"]
)
# 验证插入后的消息
last_msg = web_terminal.context_manager.conversation_history[-1]
debug_log(f"[Skill Hints] 插入后验证 - role: {last_msg.get('role')}, content: {last_msg.get('content')[:100]}")
except Exception as exc:
debug_log(f"Skill hints 处理失败: {exc}")
if is_first_user_message and getattr(web_terminal, "context_manager", None):
try:
personal_config = load_personalization_config(workspace.data_dir)
@ -551,10 +574,6 @@ async def handle_task_with_sender(terminal: WebTerminal, workspace: UserWorkspac
max_api_retries = 4
retry_delay_seconds = 10
pending_append = None # {"path": str, "tool_call_id": str, "buffer": str, ...}
append_probe_buffer = ""
pending_modify = None # {"path": str, "tool_call_id": str, "buffer": str, ...}
modify_probe_buffer = ""
iteration = 0
while max_iterations is None or iteration < max_iterations:
@ -596,8 +615,6 @@ async def handle_task_with_sender(terminal: WebTerminal, workspace: UserWorkspac
reasoning_chunks = 0
content_chunks = 0
tool_chunks = 0
append_result = {"handled": False}
modify_result = {"handled": False}
last_finish_reason = None
thinking_expected = web_terminal.api_client.get_current_thinking_mode()
@ -638,10 +655,6 @@ async def handle_task_with_sender(terminal: WebTerminal, workspace: UserWorkspac
current_iteration=current_iteration,
max_api_retries=max_api_retries,
retry_delay_seconds=retry_delay_seconds,
pending_append=pending_append,
append_probe_buffer=append_probe_buffer,
pending_modify=pending_modify,
modify_probe_buffer=modify_probe_buffer,
detected_tool_intent=detected_tool_intent,
full_response=full_response,
tool_calls=tool_calls,
@ -660,8 +673,6 @@ async def handle_task_with_sender(terminal: WebTerminal, workspace: UserWorkspac
reasoning_chunks=reasoning_chunks,
content_chunks=content_chunks,
tool_chunks=tool_chunks,
append_result=append_result,
modify_result=modify_result,
last_finish_reason=last_finish_reason,
accumulated_response=accumulated_response,
)
@ -685,13 +696,7 @@ async def handle_task_with_sender(terminal: WebTerminal, workspace: UserWorkspac
reasoning_chunks = stream_result["reasoning_chunks"]
content_chunks = stream_result["content_chunks"]
tool_chunks = stream_result["tool_chunks"]
append_result = stream_result["append_result"]
modify_result = stream_result["modify_result"]
last_finish_reason = stream_result["last_finish_reason"]
pending_append = stream_result["pending_append"]
append_probe_buffer = stream_result["append_probe_buffer"]
pending_modify = stream_result["pending_modify"]
modify_probe_buffer = stream_result["modify_probe_buffer"]
accumulated_response = stream_result["accumulated_response"]
# 流结束后的处理
@ -704,11 +709,6 @@ async def handle_task_with_sender(terminal: WebTerminal, workspace: UserWorkspac
debug_log(f" 收集到的正文: {len(full_response)} 字符")
debug_log(f" 收集到的工具: {len(tool_calls)}")
if not append_result["handled"] and pending_append:
append_result, pending_append, append_probe_buffer = await finalize_pending_append(pending_append=pending_append, append_probe_buffer=append_probe_buffer, response_text=full_response, stream_completed=True, finish_reason=last_finish_reason, web_terminal=web_terminal, sender=sender, debug_log=debug_log)
if not modify_result["handled"] and pending_modify:
modify_result, pending_modify, modify_probe_buffer = await finalize_pending_modify(pending_modify=pending_modify, modify_probe_buffer=modify_probe_buffer, response_text=full_response, stream_completed=True, finish_reason=last_finish_reason, web_terminal=web_terminal, sender=sender, debug_log=debug_log)
# 结束未完成的流
if in_thinking and not thinking_ended:
sender('thinking_end', {'full_content': current_thinking})
@ -716,7 +716,7 @@ async def handle_task_with_sender(terminal: WebTerminal, workspace: UserWorkspac
# 确保text_end事件被发送
if text_started and text_has_content and not append_result["handled"] and not modify_result["handled"]:
if text_started and text_has_content:
debug_log(f"发送text_end事件完整内容长度: {len(full_response)}")
sender('text_end', {'full_content': full_response})
await asyncio.sleep(0.1)
@ -725,159 +725,6 @@ async def handle_task_with_sender(terminal: WebTerminal, workspace: UserWorkspac
if full_response.strip():
debug_log(f"流式文本内容长度: {len(full_response)} 字符")
if append_result["handled"]:
append_metadata = append_result.get("assistant_metadata")
append_content_text = append_result.get("assistant_content")
if append_content_text:
web_terminal.context_manager.add_conversation(
"assistant",
append_content_text,
metadata=append_metadata
)
debug_log("💾 增量保存:追加正文快照")
payload_info = append_metadata.get("append_payload") if append_metadata else {}
sender('append_payload', {
'path': payload_info.get("path") or append_result.get("path"),
'forced': payload_info.get("forced", False),
'lines': payload_info.get("lines"),
'bytes': payload_info.get("bytes"),
'tool_call_id': payload_info.get("tool_call_id") or append_result.get("tool_call_id"),
'success': payload_info.get("success", append_result.get("success", False)),
'conversation_id': conversation_id
})
if append_result["tool_content"]:
tool_call_id = append_result.get("tool_call_id") or f"append_{int(time.time() * 1000)}"
system_notice = format_tool_result_notice("append_to_file", tool_call_id, append_result["tool_content"])
web_terminal.context_manager.add_conversation("system", system_notice)
append_result["tool_call_id"] = tool_call_id
debug_log("💾 增量保存append_to_file 工具结果system 通知)")
finish_reason = append_result.get("finish_reason")
path_for_prompt = append_result.get("path")
need_follow_prompt = (
finish_reason == "length" or
append_result.get("forced") or
not append_result.get("success")
)
if need_follow_prompt and path_for_prompt:
prompt_lines = [
f"append_to_file 在处理 {path_for_prompt} 时未完成,需要重新发起写入。"
]
if finish_reason == "length":
prompt_lines.append(
"上一次输出达到系统单次输出上限,已写入的内容已保存。"
)
if append_result.get("forced"):
prompt_lines.append(
"收到的内容缺少 <<<END_APPEND>>> 标记,系统依据流式结束位置落盘。"
)
if not append_result.get("success"):
prompt_lines.append("系统未能识别有效的追加标记。")
prompt_lines.append(
"请再次调用 append_to_file 工具获取新的写入窗口,并在工具调用的输出中遵循以下格式:"
)
prompt_lines.append(f"<<<APPEND:{path_for_prompt}>>>")
prompt_lines.append("...填写剩余正文,如内容已完成可留空...")
prompt_lines.append("<<<END_APPEND>>>")
prompt_lines.append("不要在普通回复中粘贴上述标记,必须通过 append_to_file 工具发送。")
follow_prompt = "\n".join(prompt_lines)
messages.append({
"role": "system",
"content": follow_prompt
})
web_terminal.context_manager.add_conversation("system", follow_prompt)
debug_log("已注入追加任务提示")
if append_result["handled"] and append_result.get("forced") and append_result.get("success"):
mark_force_thinking(web_terminal, reason="append_forced_finish")
if append_result["handled"] and not append_result.get("success"):
sender('system_message', {
'content': f'⚠️ 追加写入失败:{append_result.get("error")}'
})
maybe_mark_failure_from_message(web_terminal, f'⚠️ 追加写入失败:{append_result.get("error")}')
mark_force_thinking(web_terminal, reason="append_failed")
if modify_result["handled"]:
modify_metadata = modify_result.get("assistant_metadata")
modify_content_text = modify_result.get("assistant_content")
if modify_content_text:
web_terminal.context_manager.add_conversation(
"assistant",
modify_content_text,
metadata=modify_metadata
)
debug_log("💾 增量保存:修改正文快照")
payload_info = modify_metadata.get("modify_payload") if modify_metadata else {}
sender('modify_payload', {
'path': payload_info.get("path") or modify_result.get("path"),
'total': payload_info.get("total_blocks") or modify_result.get("total_blocks"),
'completed': payload_info.get("completed") or modify_result.get("completed_blocks"),
'failed': payload_info.get("failed") or modify_result.get("failed_blocks"),
'forced': payload_info.get("forced", modify_result.get("forced", False)),
'success': modify_result.get("success", False),
'conversation_id': conversation_id
})
if modify_result["tool_content"]:
tool_call_id = modify_result.get("tool_call_id") or f"modify_{int(time.time() * 1000)}"
system_notice = format_tool_result_notice("modify_file", tool_call_id, modify_result["tool_content"])
web_terminal.context_manager.add_conversation("system", system_notice)
modify_result["tool_call_id"] = tool_call_id
debug_log("💾 增量保存modify_file 工具结果system 通知)")
path_for_prompt = modify_result.get("path")
failed_blocks = modify_result.get("failed_blocks") or []
need_follow_prompt = modify_result.get("forced") or bool(failed_blocks)
if need_follow_prompt and path_for_prompt:
prompt_lines = [
f"modify_file 在处理 {path_for_prompt} 时未完成,需要重新发起补丁。"
]
if modify_result.get("forced"):
prompt_lines.append(
"刚才的内容缺少 <<<END_MODIFY>>> 标记,系统仅应用了已识别的部分。"
)
if failed_blocks:
failed_text = "".join(str(idx) for idx in failed_blocks)
prompt_lines.append(f"以下补丁未成功:第 {failed_text} 处。")
prompt_lines.append(
"请再次调用 modify_file 工具,并在新的工具调用中按以下模板提供完整补丁:"
)
prompt_lines.append(f"<<<MODIFY:{path_for_prompt}>>>")
prompt_lines.append("[replace:序号]")
prompt_lines.append("<<OLD>>")
prompt_lines.append("...原文(必须逐字匹配,包含全部缩进、空格和换行)...")
prompt_lines.append("<<END>>")
prompt_lines.append("<<NEW>>")
prompt_lines.append("...新内容,可留空表示清空,注意保持结构完整...")
prompt_lines.append("<<END>>")
prompt_lines.append("[/replace]")
prompt_lines.append("<<<END_MODIFY>>>")
prompt_lines.append("请勿在普通回复中直接粘贴补丁,必须通过 modify_file 工具发送。")
follow_prompt = "\n".join(prompt_lines)
messages.append({
"role": "system",
"content": follow_prompt
})
web_terminal.context_manager.add_conversation("system", follow_prompt)
debug_log("已注入修改任务提示")
if modify_result["handled"] and modify_result.get("failed_blocks"):
mark_force_thinking(web_terminal, reason="modify_partial_failure")
if modify_result["handled"] and modify_result.get("forced") and modify_result.get("success"):
mark_force_thinking(web_terminal, reason="modify_forced_finish")
if modify_result["handled"] and not modify_result.get("success"):
error_message = modify_result.get("summary_message") or modify_result.get("error") or "修改操作未成功,请根据提示重新执行。"
sender('system_message', {
'content': f'⚠️ 修改操作存在未完成的内容:{error_message}'
})
maybe_mark_failure_from_message(web_terminal, f'⚠️ 修改操作存在未完成的内容:{error_message}')
mark_force_thinking(web_terminal, reason="modify_failed")
if web_terminal.api_client.last_call_used_thinking and current_thinking:
web_terminal.api_client.current_task_thinking = current_thinking or ""
if web_terminal.api_client.current_task_first_call:
@ -885,7 +732,7 @@ async def handle_task_with_sender(terminal: WebTerminal, workspace: UserWorkspac
update_thinking_after_call(web_terminal)
# 检测是否有格式错误的工具调用
if not tool_calls and full_response and AUTO_FIX_TOOL_CALL and not append_result["handled"] and not modify_result["handled"]:
if not tool_calls and full_response and AUTO_FIX_TOOL_CALL:
if detect_malformed_tool_call(full_response):
auto_fix_attempts += 1
@ -919,10 +766,6 @@ async def handle_task_with_sender(terminal: WebTerminal, workspace: UserWorkspac
if full_response:
assistant_content_parts.append(full_response)
elif append_result["handled"] and append_result["assistant_content"]:
assistant_content_parts.append(append_result["assistant_content"])
elif modify_result["handled"] and modify_result.get("assistant_content"):
assistant_content_parts.append(modify_result["assistant_content"])
assistant_content = "\n".join(assistant_content_parts) if assistant_content_parts else ""
@ -949,35 +792,6 @@ async def handle_task_with_sender(terminal: WebTerminal, workspace: UserWorkspac
text_started = False
text_has_content = False
full_response = ""
if append_result["handled"] and append_result.get("tool_content"):
tool_call_id = append_result.get("tool_call_id") or f"append_{int(time.time() * 1000)}"
system_notice = format_tool_result_notice("append_to_file", tool_call_id, append_result["tool_content"])
messages.append({
"role": "system",
"content": system_notice
})
append_result["tool_call_id"] = tool_call_id
debug_log("已将 append_to_file 工具结果以 system 形式追加到对话上下文")
if modify_result["handled"] and modify_result.get("tool_content"):
tool_call_id = modify_result.get("tool_call_id") or f"modify_{int(time.time() * 1000)}"
system_notice = format_tool_result_notice("modify_file", tool_call_id, modify_result["tool_content"])
messages.append({
"role": "system",
"content": system_notice
})
modify_result["tool_call_id"] = tool_call_id
debug_log("已将 modify_file 工具结果以 system 形式追加到对话上下文")
force_continue = append_result["handled"] or modify_result["handled"]
if force_continue:
if append_result["handled"]:
debug_log("append_to_file 已处理,继续下一轮以让模型返回确认回复")
elif modify_result["handled"]:
debug_log("modify_file 已处理,继续下一轮以让模型返回确认回复")
else:
debug_log("补丁处理完成,继续下一轮以获取模型回复")
continue
if not tool_calls:
debug_log("没有工具调用,结束迭代")

View File

@ -28,7 +28,7 @@ stop_flags: Dict[str, Dict[str, Any]] = {}
active_polling_tasks: Dict[str, bool] = {} # conversation_id -> is_polling
# 监控/限流/用量
MONITOR_FILE_TOOLS = {'append_to_file', 'modify_file', 'write_file', 'edit_file'}
MONITOR_FILE_TOOLS = {'write_file', 'edit_file'}
MONITOR_MEMORY_TOOLS = {'update_memory'}
MONITOR_SNAPSHOT_CHAR_LIMIT = 60000
MONITOR_MEMORY_ENTRY_LIMIT = 256

View File

@ -94,8 +94,6 @@ const appOptions = {
chatAppendTextChunk: 'appendTextChunk',
chatCompleteTextAction: 'completeText',
chatAddSystemMessage: 'addSystemMessage',
chatAddAppendPayloadAction: 'addAppendPayloadAction',
chatAddModifyPayloadAction: 'addModifyPayloadAction',
chatEnsureAssistantMessage: 'ensureAssistantMessage'
}),
...mapActions(useInputStore, {

View File

@ -202,47 +202,8 @@ export const historyMethods = {
}
// 处理普通文本内容(移除思考标签后的内容)
const metadata = message.metadata || {};
const appendPayloadMeta = metadata.append_payload;
const modifyPayloadMeta = metadata.modify_payload;
const isAppendMessage = message.name === 'append_to_file';
const isModifyMessage = message.name === 'modify_file';
const containsAppendMarkers = /<<<\s*(APPEND|MODIFY)/i.test(content || '') || /<<<END_\s*(APPEND|MODIFY)>>>/i.test(content || '');
const textContent = content.trim();
if (appendPayloadMeta) {
currentAssistantMessage.actions.push({
id: `history-append-payload-${Date.now()}-${Math.random()}`,
type: 'append_payload',
append: {
path: appendPayloadMeta.path || '未知文件',
forced: !!appendPayloadMeta.forced,
success: appendPayloadMeta.success === undefined ? true : !!appendPayloadMeta.success,
lines: appendPayloadMeta.lines ?? null,
bytes: appendPayloadMeta.bytes ?? null
},
timestamp: Date.now()
});
debugLog('添加append占位信息:', appendPayloadMeta.path);
} else if (modifyPayloadMeta) {
currentAssistantMessage.actions.push({
id: `history-modify-payload-${Date.now()}-${Math.random()}`,
type: 'modify_payload',
modify: {
path: modifyPayloadMeta.path || '未知文件',
total: modifyPayloadMeta.total_blocks ?? null,
completed: modifyPayloadMeta.completed || [],
failed: modifyPayloadMeta.failed || [],
forced: !!modifyPayloadMeta.forced,
details: modifyPayloadMeta.details || []
},
timestamp: Date.now()
});
debugLog('添加modify占位信息:', modifyPayloadMeta.path);
}
if (textContent && !appendPayloadMeta && !modifyPayloadMeta && !isAppendMessage && !isModifyMessage && !containsAppendMarkers) {
if (textContent) {
currentAssistantMessage.actions.push({
id: `history-text-${Date.now()}-${Math.random()}`,
type: 'text',
@ -341,12 +302,7 @@ export const historyMethods = {
toolAction.tool.message = result.message;
}
}
if (message.name === 'append_to_file' && result && result.message) {
toolAction.tool.message = result.message;
}
debugLog(`更新工具结果: ${message.name} -> ${message.content?.substring(0, 50)}...`);
// append_to_file 的摘要在 append_payload 占位中呈现,此处无需重复
} else {
console.warn('找不到对应的工具调用:', message.name, message.tool_call_id);
}

View File

@ -105,10 +105,6 @@ function renderEnhancedToolResult(): string {
return renderWriteFile(result, args);
} else if (name === 'edit_file') {
return renderEditFile(result, args);
} else if (name === 'append_to_file') {
return renderAppendToFile(result, args);
} else if (name === 'modify_file') {
return renderModifyFile(result, args);
} else if (name === 'delete_file') {
return renderDeleteFile(result, args);
} else if (name === 'rename_file') {
@ -297,67 +293,6 @@ function renderEditFile(result: any, args: any): string {
return html;
}
function renderAppendToFile(result: any, args: any): string {
const path = args.path || result.path || '';
const status = result.success ? '✓ 已追加' : '✗ 失败';
const content = args.content || '';
let html = '<div class="tool-result-meta">';
html += `<div><strong>路径:</strong>${escapeHtml(path)}</div>`;
html += `<div><strong>状态:</strong>${status}</div>`;
html += '</div>';
if (content) {
html += '<div class="tool-result-content scrollable">';
html += '<div class="content-label">追加内容:</div>';
html += `<pre>${escapeHtml(content)}</pre>`;
html += '</div>';
}
return html;
}
function renderModifyFile(result: any, args: any): string {
const path = args.path || result.path || '';
const status = result.success ? '✓ 已修改' : '✗ 失败';
const operations = args.operations || [];
let html = '<div class="tool-result-meta">';
html += `<div><strong>路径:</strong>${escapeHtml(path)}</div>`;
html += `<div><strong>状态:</strong>${status}</div>`;
html += '</div>';
if (operations.length > 0) {
html += '<div class="tool-result-diff scrollable">';
operations.forEach((op: any, idx: number) => {
if (idx > 0) html += '<div class="diff-separator">⋮</div>';
const type = op.type || op.operation;
const oldText = op.old_text || op.old || '';
const newText = op.new_text || op.new || '';
html += `<div class="diff-operation">[${idx + 1}] ${escapeHtml(type)}</div>`;
if (oldText) {
const oldLines = oldText.split('\n');
oldLines.forEach((line: string) => {
html += `<div class="diff-line diff-remove">- ${escapeHtml(line)}</div>`;
});
}
if (newText) {
const newLines = newText.split('\n');
newLines.forEach((line: string) => {
html += `<div class="diff-line diff-add">+ ${escapeHtml(line)}</div>`;
});
}
});
html += '</div>';
}
return html;
}
function renderDeleteFile(result: any, args: any): string {
const path = args.path || result.path || '';
const status = result.success ? '✓ 已删除' : '✗ 失败';

View File

@ -45,10 +45,6 @@ export function renderEnhancedToolResult(
return renderWriteFile(result, args);
} else if (name === 'edit_file') {
return renderEditFile(result, args);
} else if (name === 'append_to_file') {
return renderAppendToFile(result, args);
} else if (name === 'modify_file') {
return renderModifyFile(result, args);
} else if (name === 'delete_file') {
return renderDeleteFile(result, args);
} else if (name === 'rename_file') {
@ -242,79 +238,6 @@ function renderEditFile(result: any, args: any): string {
return html;
}
function renderAppendToFile(result: any, args: any): string {
const path = args.file_path || args.path || result.path || '';
const status = result.success ? '✓ 已追加' : '✗ 失败';
let content = args.content || '';
// 处理转义的 \n
content = content.replace(/\\n/g, '\n');
let html = '<div class="tool-result-meta">';
html += `<div><strong>路径:</strong>${escapeHtml(path)}</div>`;
html += `<div><strong>状态:</strong>${status}</div>`;
html += '</div>';
if (content) {
html += '<div class="tool-result-content scrollable">';
html += '<div class="content-label">追加内容:</div>';
const lines = content.split('\n');
html += '<div class="tool-result-diff">';
lines.forEach((line: string) => {
html += `<div class="diff-line diff-add">+ ${escapeHtml(line)}</div>`;
});
html += '</div>';
html += '</div>';
}
return html;
}
function renderModifyFile(result: any, args: any): string {
const path = args.file_path || args.path || result.path || '';
const status = result.success ? '✓ 已修改' : '✗ 失败';
const operations = args.operations || [];
let html = '<div class="tool-result-meta">';
html += `<div><strong>路径:</strong>${escapeHtml(path)}</div>`;
html += `<div><strong>状态:</strong>${status}</div>`;
html += '</div>';
if (operations.length > 0) {
html += '<div class="tool-result-diff scrollable">';
operations.forEach((op: any, idx: number) => {
if (idx > 0) html += '<div class="diff-separator">⋮</div>';
const type = op.type || op.operation;
let oldText = op.old_text || op.old || '';
let newText = op.new_text || op.new || '';
// 处理转义的 \n
oldText = oldText.replace(/\\n/g, '\n');
newText = newText.replace(/\\n/g, '\n');
html += `<div class="diff-operation">[${idx + 1}] ${escapeHtml(type)}</div>`;
if (oldText) {
const oldLines = oldText.split('\n');
oldLines.forEach((line: string) => {
html += `<div class="diff-line diff-remove">- ${escapeHtml(line)}</div>`;
});
}
if (newText) {
const newLines = newText.split('\n');
newLines.forEach((line: string) => {
html += `<div class="diff-line diff-add">+ ${escapeHtml(line)}</div>`;
});
}
});
html += '</div>';
}
return html;
}
function renderDeleteFile(result: any, args: any): string {
const path = args.path || result.path || '';
const status = result.success ? '✓ 已删除' : '✗ 失败';

View File

@ -517,7 +517,30 @@
></path>
</svg>
</span>
<span>在工具块显示我要做什么的简短提示</span>
<span>在工具块显示"我要做什么"的简短提示</span>
</label>
</div>
<div class="behavior-field">
<div class="behavior-field-header">
<span class="field-title">Skill 提示系统</span>
<p class="field-desc">开启后检测到特定关键词时会自动提示模型阅读相关 skill terminal-guidesub-agent-guide </p>
</div>
<label class="toggle-row">
<input
type="checkbox"
:checked="form.skill_hints_enabled"
@change="personalization.updateField({ key: 'skill_hints_enabled', value: $event.target.checked })"
/>
<span class="fancy-check" aria-hidden="true">
<svg viewBox="0 0 64 64">
<path
d="M 0 16 V 56 A 8 8 90 0 0 8 64 H 56 A 8 8 90 0 0 64 56 V 8 A 8 8 90 0 0 56 0 H 8 A 8 8 90 0 0 0 8 V 16 L 32 48 L 64 16 V 8 A 8 8 90 0 0 56 0 H 8 A 8 8 90 0 0 0 8 V 56 A 8 8 90 0 0 8 64 H 56 A 8 8 90 0 0 64 56 V 16"
pathLength="575.0541381835938"
class="fancy-path"
></path>
</svg>
</span>
<span>根据关键词自动提示阅读相关 skill</span>
</label>
</div>
<div class="behavior-field">

View File

@ -286,26 +286,6 @@ export const useChatStore = defineStore('chat', {
timestamp: Date.now()
});
},
addAppendPayloadAction(data: any) {
const msg = this.ensureAssistantMessage();
clearAwaitingFirstContent(msg);
msg.actions.push({
id: `append-payload-${Date.now()}-${Math.random()}`,
type: 'append_payload',
append: data,
timestamp: Date.now()
});
},
addModifyPayloadAction(data: any) {
const msg = this.ensureAssistantMessage();
clearAwaitingFirstContent(msg);
msg.actions.push({
id: `modify-payload-${Date.now()}-${Math.random()}`,
type: 'modify_payload',
modify: data,
timestamp: Date.now()
});
},
getActiveThinkingAction(msg: any) {
if (!msg || !Array.isArray(msg.actions)) {
return null;

View File

@ -101,8 +101,6 @@ const TOOL_SCENE_MAP: Record<string, string> = {
create_file: 'createFile',
rename_file: 'renameFile',
delete_file: 'deleteFile',
append_to_file: 'appendFile',
modify_file: 'modifyFile',
write_file: 'modifyFile',
edit_file: 'modifyFile',
run_command: 'runCommand',

View File

@ -6,6 +6,7 @@ interface PersonalForm {
enabled: boolean;
auto_generate_title: boolean;
tool_intent_enabled: boolean;
skill_hints_enabled: boolean;
silent_tool_disable: boolean;
enhanced_tool_display: boolean;
enabled_skills: string[];
@ -63,6 +64,7 @@ const defaultForm = (): PersonalForm => ({
enabled: false,
auto_generate_title: true,
tool_intent_enabled: true,
skill_hints_enabled: false,
silent_tool_disable: false,
enhanced_tool_display: true,
enabled_skills: [],
@ -196,6 +198,7 @@ export const usePersonalizationStore = defineStore('personalization', {
enabled: !!data.enabled,
auto_generate_title: data.auto_generate_title !== false,
tool_intent_enabled: !!data.tool_intent_enabled,
skill_hints_enabled: !!data.skill_hints_enabled,
silent_tool_disable: !!data.silent_tool_disable,
enhanced_tool_display: data.enhanced_tool_display !== false,
enabled_skills: Array.isArray(data.enabled_skills)

View File

@ -9,8 +9,6 @@ const RUNNING_ANIMATIONS: Record<string, string> = {
rename_file: 'file-animation',
write_file: 'file-animation',
edit_file: 'file-animation',
modify_file: 'file-animation',
append_to_file: 'file-animation',
create_folder: 'file-animation',
web_search: 'search-animation',
extract_webpage: 'search-animation',
@ -36,8 +34,6 @@ const RUNNING_STATUS_TEXTS: Record<string, string> = {
rename_file: '正在重命名文件...',
write_file: '正在写入文件...',
edit_file: '正在编辑文件...',
modify_file: '正在修改文件...',
append_to_file: '正在追加文件...',
create_folder: '正在创建文件夹...',
web_search: '正在搜索网络...',
extract_webpage: '正在提取网页...',
@ -57,8 +53,6 @@ const COMPLETED_STATUS_TEXTS: Record<string, string> = {
rename_file: '文件重命名成功',
write_file: '文件写入完成',
edit_file: '文件编辑完成',
modify_file: '文件修改成功',
append_to_file: '文件追加完成',
create_folder: '文件夹创建成功',
web_search: '搜索完成',
extract_webpage: '网页提取完成',

View File

@ -41,14 +41,12 @@ export const ICONS = Object.freeze({
});
export const TOOL_ICON_MAP = Object.freeze({
append_to_file: 'pencil',
close_sub_agent: 'bot',
create_file: 'file',
create_folder: 'folder',
create_sub_agent: 'bot',
delete_file: 'trash',
extract_webpage: 'globe',
modify_file: 'pencil',
write_file: 'pencil',
edit_file: 'pencil',
vlm_analyze: 'camera',

View File

@ -831,9 +831,6 @@ class DeepSeekClient:
full_response = ""
tool_calls = []
current_thinking = ""
# 针对 append_to_file / modify_file 的占位结构,防止未定义变量导致异常
append_result = {"handled": False}
modify_result = {"handled": False}
# 状态标志
in_thinking = False
@ -935,10 +932,6 @@ class DeepSeekClient:
# 添加正式回复内容(如果有)
if full_response:
assistant_content_parts.append(full_response)
elif append_result["handled"] and append_result["assistant_content"]:
assistant_content_parts.append(append_result["assistant_content"])
elif modify_result["handled"] and modify_result.get("assistant_content"):
assistant_content_parts.append(modify_result["assistant_content"])
# 添加工具调用说明
if tool_calls:
@ -1000,31 +993,6 @@ class DeepSeekClient:
self._print(f"\n{OUTPUT_FORMATS['action']} 调用工具: {function_name}")
# 额外的参数长度检查(针对特定工具)
if function_name == "modify_file" and "content" in arguments:
content_length = len(arguments.get("content", ""))
if content_length > 9999999999: # 降低到50KB限制
error_msg = f"内容过长({content_length}字符)超过50KB限制"
self._print(f"{OUTPUT_FORMATS['warning']} {error_msg}")
messages.append({
"role": "tool",
"tool_call_id": tool_call["id"],
"name": function_name,
"content": json.dumps({
"success": False,
"error": error_msg,
"suggestion": "请将内容分成多个小块分别修改或使用replace操作只修改必要部分"
}, ensure_ascii=False)
})
all_tool_results.append({
"tool": function_name,
"args": arguments,
"result": error_msg
})
continue
tool_result = await tool_handler(function_name, arguments)
# 解析工具结果,提取关键信息