diff --git a/core/main_terminal_parts/__init__.py b/core/main_terminal_parts/__init__.py index 65e3ece..35167cd 100644 --- a/core/main_terminal_parts/__init__.py +++ b/core/main_terminal_parts/__init__.py @@ -6,4 +6,13 @@ __all__ = [ "MainTerminalCommandMixin", "MainTerminalContextMixin", "MainTerminalToolsMixin", + "MainTerminalToolsPolicyMixin", + "MainTerminalToolsReadMixin", + "MainTerminalToolsDefinitionMixin", + "MainTerminalToolsExecutionMixin", ] + +from .tools_policy import MainTerminalToolsPolicyMixin +from .tools_read import MainTerminalToolsReadMixin +from .tools_definition import MainTerminalToolsDefinitionMixin +from .tools_execution import MainTerminalToolsExecutionMixin diff --git a/core/main_terminal_parts/tools.py b/core/main_terminal_parts/tools.py index b24bc2b..ddd405e 100644 --- a/core/main_terminal_parts/tools.py +++ b/core/main_terminal_parts/tools.py @@ -4,1741 +4,17 @@ from datetime import datetime from pathlib import Path from typing import Any, Dict, List, Optional, Set -try: - from config import ( - OUTPUT_FORMATS, DATA_DIR, PROMPTS_DIR, NEED_CONFIRMATION, - MAX_TERMINALS, TERMINAL_BUFFER_SIZE, TERMINAL_DISPLAY_SIZE, - MAX_READ_FILE_CHARS, READ_TOOL_DEFAULT_MAX_CHARS, - READ_TOOL_DEFAULT_CONTEXT_BEFORE, READ_TOOL_DEFAULT_CONTEXT_AFTER, - READ_TOOL_MAX_CONTEXT_BEFORE, READ_TOOL_MAX_CONTEXT_AFTER, - READ_TOOL_DEFAULT_MAX_MATCHES, READ_TOOL_MAX_MATCHES, - READ_TOOL_MAX_FILE_SIZE, - TERMINAL_SANDBOX_MOUNT_PATH, - TERMINAL_SANDBOX_MODE, - TERMINAL_SANDBOX_CPUS, - TERMINAL_SANDBOX_MEMORY, - PROJECT_MAX_STORAGE_MB, - CUSTOM_TOOLS_ENABLED, - ) -except ImportError: - import sys - project_root = Path(__file__).resolve().parents[2] - if str(project_root) not in sys.path: - sys.path.insert(0, str(project_root)) - from config import ( - OUTPUT_FORMATS, DATA_DIR, PROMPTS_DIR, NEED_CONFIRMATION, - MAX_TERMINALS, TERMINAL_BUFFER_SIZE, TERMINAL_DISPLAY_SIZE, - MAX_READ_FILE_CHARS, READ_TOOL_DEFAULT_MAX_CHARS, - READ_TOOL_DEFAULT_CONTEXT_BEFORE, READ_TOOL_DEFAULT_CONTEXT_AFTER, - READ_TOOL_MAX_CONTEXT_BEFORE, READ_TOOL_MAX_CONTEXT_AFTER, - READ_TOOL_DEFAULT_MAX_MATCHES, READ_TOOL_MAX_MATCHES, - READ_TOOL_MAX_FILE_SIZE, - TERMINAL_SANDBOX_MOUNT_PATH, - TERMINAL_SANDBOX_MODE, - TERMINAL_SANDBOX_CPUS, - TERMINAL_SANDBOX_MEMORY, - PROJECT_MAX_STORAGE_MB, - CUSTOM_TOOLS_ENABLED, - ) +from .tools_policy import MainTerminalToolsPolicyMixin +from .tools_read import MainTerminalToolsReadMixin +from .tools_definition import MainTerminalToolsDefinitionMixin +from .tools_execution import MainTerminalToolsExecutionMixin -from modules.file_manager import FileManager -from modules.search_engine import SearchEngine -from modules.terminal_ops import TerminalOperator -from modules.memory_manager import MemoryManager -from modules.terminal_manager import TerminalManager -from modules.todo_manager import TodoManager -from modules.sub_agent_manager import SubAgentManager -from modules.webpage_extractor import extract_webpage_content, tavily_extract -from modules.ocr_client import OCRClient -from modules.easter_egg_manager import EasterEggManager -from modules.personalization_manager import ( - load_personalization_config, - build_personalization_prompt, -) -from modules.skills_manager import ( - get_skills_catalog, - build_skills_list, - merge_enabled_skills, - build_skills_prompt, -) -from modules.custom_tool_registry import CustomToolRegistry, build_default_tool_category -from modules.custom_tool_executor import CustomToolExecutor -try: - from config.limits import THINKING_FAST_INTERVAL -except ImportError: - THINKING_FAST_INTERVAL = 10 +class MainTerminalToolsMixin( + MainTerminalToolsPolicyMixin, + MainTerminalToolsReadMixin, + MainTerminalToolsDefinitionMixin, + MainTerminalToolsExecutionMixin, +): + """组合主终端工具相关能力。""" -from modules.container_monitor import collect_stats, inspect_state -from core.tool_config import TOOL_CATEGORIES -from utils.api_client import DeepSeekClient -from utils.context_manager import ContextManager -from utils.tool_result_formatter import format_tool_result_for_context -from utils.logger import setup_logger -from config.model_profiles import ( - get_model_profile, - get_model_prompt_replacements, - get_model_context_window, -) - -logger = setup_logger(__name__) -DISABLE_LENGTH_CHECK = True - -class MainTerminalToolsMixin: - def _clamp_int(value, default, min_value=None, max_value=None): - """将输入转换为整数并限制范围。""" - if value is None: - return default - try: - num = int(value) - except (TypeError, ValueError): - return default - if min_value is not None: - num = max(min_value, num) - if max_value is not None: - num = min(max_value, num) - return num - - def _parse_optional_line(value, field_name: str): - """解析可选的行号参数。""" - if value is None: - return None, None - try: - number = int(value) - except (TypeError, ValueError): - return None, f"{field_name} 必须是整数" - if number < 1: - return None, f"{field_name} 必须大于等于1" - return number, None - - def _truncate_text_block(text: str, max_chars: int): - """对单段文本应用字符限制。""" - if max_chars and len(text) > max_chars: - return text[:max_chars], True, max_chars - return text, False, len(text) - - def _limit_text_chunks(chunks: List[Dict], text_key: str, max_chars: int): - """对多个文本片段应用全局字符限制。""" - if max_chars is None or max_chars <= 0: - return chunks, False, sum(len(chunk.get(text_key, "") or "") for chunk in chunks) - - remaining = max_chars - limited_chunks: List[Dict] = [] - truncated = False - consumed = 0 - - for chunk in chunks: - snippet = chunk.get(text_key, "") or "" - snippet_len = len(snippet) - chunk_copy = dict(chunk) - - if remaining <= 0: - truncated = True - break - - if snippet_len > remaining: - chunk_copy[text_key] = snippet[:remaining] - chunk_copy["truncated"] = True - consumed += remaining - limited_chunks.append(chunk_copy) - truncated = True - remaining = 0 - break - - limited_chunks.append(chunk_copy) - consumed += snippet_len - remaining -= snippet_len - - return limited_chunks, truncated, consumed - - def _record_sub_agent_message(self, message: Optional[str], task_id: Optional[str] = None, inline: bool = False): - """以 system 消息记录子智能体状态。""" - if not message: - return - if task_id and task_id in self._announced_sub_agent_tasks: - return - if task_id: - self._announced_sub_agent_tasks.add(task_id) - logger.info( - "[SubAgent] record message | task=%s | inline=%s | content=%s", - task_id, - inline, - message.replace("\n", "\\n")[:200], - ) - metadata = {"sub_agent_notice": True, "inline": inline} - if task_id: - metadata["task_id"] = task_id - self.context_manager.add_conversation("system", message, metadata=metadata) - print(f"{OUTPUT_FORMATS['info']} {message}") - - def apply_personalization_preferences(self, config: Optional[Dict[str, Any]] = None): - """Apply persisted personalization settings that affect runtime behavior.""" - try: - effective_config = config or load_personalization_config(self.data_dir) - except Exception: - effective_config = {} - - # 工具意图开关 - self.tool_intent_enabled = bool(effective_config.get("tool_intent_enabled")) - - interval = effective_config.get("thinking_interval") - if isinstance(interval, int) and interval > 0: - self.thinking_fast_interval = interval - else: - self.thinking_fast_interval = THINKING_FAST_INTERVAL - - disabled_categories = [] - raw_disabled = effective_config.get("disabled_tool_categories") - if isinstance(raw_disabled, list): - disabled_categories = [ - key for key in raw_disabled - if isinstance(key, str) and key in self.tool_categories_map - ] - self.default_disabled_tool_categories = disabled_categories - - # 图片压缩模式传递给上下文 - img_mode = effective_config.get("image_compression") - if isinstance(img_mode, str): - self.context_manager.image_compression_mode = img_mode - - # Reset category states to defaults before applying overrides - for key, category in self.tool_categories_map.items(): - self.tool_category_states[key] = False if key in disabled_categories else category.default_enabled - self._refresh_disabled_tools() - - # 默认模型偏好(优先应用,再处理运行模式) - preferred_model = effective_config.get("default_model") - if isinstance(preferred_model, str) and preferred_model != self.model_key: - try: - self.set_model(preferred_model) - except Exception as exc: - logger.warning("忽略无效默认模型: %s (%s)", preferred_model, exc) - - preferred_mode = effective_config.get("default_run_mode") - if isinstance(preferred_mode, str): - normalized_mode = preferred_mode.strip().lower() - if normalized_mode in {"fast", "thinking", "deep"} and normalized_mode != self.run_mode: - try: - self.set_run_mode(normalized_mode) - except ValueError: - logger.warning("忽略无效默认运行模式: %s", preferred_mode) - - # 静默禁用工具提示 - self.silent_tool_disable = bool(effective_config.get("silent_tool_disable")) - - def _handle_read_tool(self, arguments: Dict) -> Dict: - """集中处理 read_file 工具的三种模式。""" - file_path = arguments.get("path") - if not file_path: - return {"success": False, "error": "缺少文件路径参数"} - - read_type = (arguments.get("type") or "read").lower() - if read_type not in {"read", "search", "extract"}: - return {"success": False, "error": f"未知的读取类型: {read_type}"} - - max_chars = self._clamp_int( - arguments.get("max_chars"), - READ_TOOL_DEFAULT_MAX_CHARS, - 1, - MAX_READ_FILE_CHARS - ) - - base_result = { - "success": True, - "type": read_type, - "path": None, - "encoding": "utf-8", - "max_chars": max_chars, - "truncated": False - } - - if read_type == "read": - start_line, error = self._parse_optional_line(arguments.get("start_line"), "start_line") - if error: - return {"success": False, "error": error} - end_line_val = arguments.get("end_line") - end_line = None - if end_line_val is not None: - end_line, error = self._parse_optional_line(end_line_val, "end_line") - if error: - return {"success": False, "error": error} - if start_line and end_line < start_line: - return {"success": False, "error": "end_line 必须大于等于 start_line"} - - read_result = self.file_manager.read_text_segment( - file_path, - start_line=start_line, - end_line=end_line, - size_limit=READ_TOOL_MAX_FILE_SIZE - ) - if not read_result.get("success"): - return read_result - - content, truncated, char_count = self._truncate_text_block(read_result["content"], max_chars) - base_result.update({ - "path": read_result["path"], - "content": content, - "line_start": read_result["line_start"], - "line_end": read_result["line_end"], - "total_lines": read_result["total_lines"], - "file_size": read_result["size"], - "char_count": char_count, - "message": f"已读取 {read_result['path']} 的内容(行 {read_result['line_start']}~{read_result['line_end']})" - }) - base_result["truncated"] = truncated - self.context_manager.load_file(read_result["path"]) - return base_result - - if read_type == "search": - query = arguments.get("query") - if not query: - return {"success": False, "error": "搜索模式需要提供 query 参数"} - - max_matches = self._clamp_int( - arguments.get("max_matches"), - READ_TOOL_DEFAULT_MAX_MATCHES, - 1, - READ_TOOL_MAX_MATCHES - ) - context_before = self._clamp_int( - arguments.get("context_before"), - READ_TOOL_DEFAULT_CONTEXT_BEFORE, - 0, - READ_TOOL_MAX_CONTEXT_BEFORE - ) - context_after = self._clamp_int( - arguments.get("context_after"), - READ_TOOL_DEFAULT_CONTEXT_AFTER, - 0, - READ_TOOL_MAX_CONTEXT_AFTER - ) - case_sensitive = bool(arguments.get("case_sensitive")) - - search_result = self.file_manager.search_text( - file_path, - query=query, - max_matches=max_matches, - context_before=context_before, - context_after=context_after, - case_sensitive=case_sensitive, - size_limit=READ_TOOL_MAX_FILE_SIZE - ) - if not search_result.get("success"): - return search_result - - matches = search_result["matches"] - limited_matches, truncated, char_count = self._limit_text_chunks(matches, "snippet", max_chars) - - base_result.update({ - "path": search_result["path"], - "file_size": search_result["size"], - "query": query, - "max_matches": max_matches, - "actual_matches": len(matches), - "returned_matches": len(limited_matches), - "context_before": context_before, - "context_after": context_after, - "case_sensitive": case_sensitive, - "matches": limited_matches, - "char_count": char_count, - "message": f"在 {search_result['path']} 中搜索 \"{query}\",返回 {len(limited_matches)} 条结果" - }) - base_result["truncated"] = truncated - return base_result - - # extract - segments = arguments.get("segments") - if not isinstance(segments, list) or not segments: - return {"success": False, "error": "extract 模式需要提供 segments 数组"} - - extract_result = self.file_manager.extract_segments( - file_path, - segments=segments, - size_limit=READ_TOOL_MAX_FILE_SIZE - ) - if not extract_result.get("success"): - return extract_result - - limited_segments, truncated, char_count = self._limit_text_chunks( - extract_result["segments"], - "content", - max_chars - ) - - base_result.update({ - "path": extract_result["path"], - "segments": limited_segments, - "file_size": extract_result["size"], - "total_lines": extract_result["total_lines"], - "segment_count": len(limited_segments), - "char_count": char_count, - "message": f"已从 {extract_result['path']} 抽取 {len(limited_segments)} 个片段" - }) - base_result["truncated"] = truncated - self.context_manager.load_file(extract_result["path"]) - return base_result - - def set_tool_category_enabled(self, category: str, enabled: bool) -> None: - """设置工具类别的启用状态 / Toggle tool category enablement.""" - categories = self.tool_categories_map - if category not in categories: - raise ValueError(f"未知的工具类别: {category}") - forced = self.admin_forced_category_states.get(category) - if isinstance(forced, bool) and forced != enabled: - raise ValueError("该类别被管理员强制为启用/禁用,无法修改") - self.tool_category_states[category] = bool(enabled) - self._refresh_disabled_tools() - - def set_admin_policy( - self, - categories: Optional[Dict[str, "ToolCategory"]] = None, - forced_category_states: Optional[Dict[str, Optional[bool]]] = None, - disabled_models: Optional[List[str]] = None, - ) -> None: - """应用管理员策略(工具分类、强制开关、模型禁用)。""" - if categories: - self.tool_categories_map = dict(categories) - # 保证自定义工具分类存在(仅当功能启用) - if self.custom_tools_enabled and "custom" not in self.tool_categories_map: - self.tool_categories_map["custom"] = type(next(iter(TOOL_CATEGORIES.values())))( - label="自定义工具", - tools=[], - default_enabled=True, - silent_when_disabled=False, - ) - # 重新构建启用状态映射,保留已有值 - new_states: Dict[str, bool] = {} - for key, cat in self.tool_categories_map.items(): - if key in self.tool_category_states: - new_states[key] = self.tool_category_states[key] - else: - new_states[key] = cat.default_enabled - self.tool_category_states = new_states - # 清理已被移除的类别 - for removed in list(self.tool_category_states.keys()): - if removed not in self.tool_categories_map: - self.tool_category_states.pop(removed, None) - - self.admin_forced_category_states = forced_category_states or {} - self.admin_disabled_models = disabled_models or [] - self._refresh_disabled_tools() - - def get_tool_settings_snapshot(self) -> List[Dict[str, object]]: - """获取工具类别状态快照 / Return tool category states snapshot.""" - snapshot: List[Dict[str, object]] = [] - categories = self.tool_categories_map - for key, category in categories.items(): - forced = self.admin_forced_category_states.get(key) - enabled = self.tool_category_states.get(key, category.default_enabled) - if isinstance(forced, bool): - enabled = forced - snapshot.append({ - "id": key, - "label": category.label, - "enabled": enabled, - "tools": list(category.tools), - "locked": isinstance(forced, bool), - "locked_state": forced if isinstance(forced, bool) else None, - }) - return snapshot - - def _refresh_disabled_tools(self) -> None: - """刷新禁用工具列表 / Refresh disabled tool set.""" - disabled: Set[str] = set() - notice: Set[str] = set() - categories = self.tool_categories_map - for key, category in categories.items(): - state = self.tool_category_states.get(key, category.default_enabled) - forced = self.admin_forced_category_states.get(key) - if isinstance(forced, bool): - state = forced - if not state: - disabled.update(category.tools) - if not getattr(category, "silent_when_disabled", False): - notice.update(category.tools) - self.disabled_tools = disabled - self.disabled_notice_tools = notice - - def _format_disabled_tool_notice(self) -> Optional[str]: - """生成禁用工具提示信息 / Format disabled tool notice.""" - if getattr(self, "silent_tool_disable", False): - return None - if not self.disabled_notice_tools: - return None - - lines = ["=== 工具可用性提醒 ==="] - for tool_name in sorted(self.disabled_notice_tools): - lines.append(f"{tool_name}:已被用户禁用") - lines.append("=== 提示结束 ===") - return "\n".join(lines) - - def _inject_intent(self, properties: Dict[str, Any]) -> Dict[str, Any]: - """在工具参数中注入 intent(简短意图说明),仅当开关启用时。 - - 字段含义:要求模型用不超过15个中文字符对即将执行的动作做简要说明,供前端展示。 - """ - if not self.tool_intent_enabled: - return properties - if not isinstance(properties, dict): - return properties - intent_field = { - "intent": { - "type": "string", - "description": "用不超过15个字向用户说明你要做什么,例如:等待下载完成/创建日志文件" - } - } - # 将 intent 放在最前面以提高模型关注度 - return {**intent_field, **properties} - - def _apply_intent_to_tools(self, tools: List[Dict]) -> List[Dict]: - """遍历工具列表,为缺少 intent 的工具补充字段(开关启用时生效)。""" - if not self.tool_intent_enabled: - return tools - intent_field = { - "intent": { - "type": "string", - "description": "用不超过15个字向用户说明你要做什么,例如:等待下载完成/创建日志文件/搜索最新新闻" - } - } - for tool in tools: - func = tool.get("function") or {} - params = func.get("parameters") or {} - if not isinstance(params, dict): - continue - if params.get("type") != "object": - continue - props = params.get("properties") - if not isinstance(props, dict): - continue - # 补充 intent 属性 - if "intent" not in props: - params["properties"] = {**intent_field, **props} - # 将 intent 加入必填 - required_list = params.get("required") - if isinstance(required_list, list): - if "intent" not in required_list: - required_list.insert(0, "intent") - params["required"] = required_list - else: - params["required"] = ["intent"] - return tools - - def _build_custom_tools(self) -> List[Dict]: - if not (self.custom_tools_enabled and getattr(self, "user_role", "user") == "admin"): - return [] - try: - definitions = self.custom_tool_registry.reload() - except Exception: - definitions = self.custom_tool_registry.list_tools() - if not definitions: - # 更新分类为空列表,避免旧缓存 - if "custom" in self.tool_categories_map: - self.tool_categories_map["custom"].tools = [] - return [] - - tools: List[Dict] = [] - tool_ids: List[str] = [] - for item in definitions: - tool_id = item.get("id") - if not tool_id: - continue - if item.get("invalid_id"): - # 跳过不合法的工具 ID,避免供应商严格校验时报错 - continue - tool_ids.append(tool_id) - params = item.get("parameters") or {"type": "object", "properties": {}} - if isinstance(params, dict) and params.get("type") != "object": - params = {"type": "object", "properties": {}} - required = item.get("required") - if isinstance(required, list): - params = dict(params) - params["required"] = required - - tools.append({ - "type": "function", - "function": { - "name": tool_id, - "description": item.get("description") or f"自定义工具: {tool_id}", - "parameters": params - } - }) - - # 覆盖 custom 分类的工具列表 - if "custom" in self.tool_categories_map: - self.tool_categories_map["custom"].tools = tool_ids - - return tools - - def define_tools(self) -> List[Dict]: - """定义可用工具(添加确认工具)""" - current_time = datetime.now().strftime("%Y-%m-%d %H") - - tools = [ - { - "type": "function", - "function": { - "name": "sleep", - "description": "等待指定的秒数,用于短暂延迟/节奏控制(例如让终端产生更多输出、或在两次快照之间留出间隔)。命令是否完成必须用 terminal_snapshot 确认;需要强制超时终止请使用 run_command。", - "parameters": { - "type": "object", - "properties": self._inject_intent({ - "seconds": { - "type": "number", - "description": "等待的秒数,可以是小数(如0.2秒)。建议范围:0.1-10秒" - }, - "reason": { - "type": "string", - "description": "等待的原因说明(可选)" - } - }), - "required": ["seconds"] - } - } - }, - { - "type": "function", - "function": { - "name": "create_file", - "description": "创建新文件(仅创建空文件,正文请使用 write_file 或 edit_file 写入/替换)", - "parameters": { - "type": "object", - "properties": self._inject_intent({ - "path": {"type": "string", "description": "文件路径"}, - "file_type": {"type": "string", "enum": ["txt", "py", "md"], "description": "文件类型"}, - "annotation": {"type": "string", "description": "文件备注"} - }), - "required": ["path", "file_type", "annotation"] - } - } - }, - { - "type": "function", - "function": { - "name": "write_file", - "description": "将内容写入本地文件系统;append 为 False 时覆盖原文件,True 时追加到末尾。", - "parameters": { - "type": "object", - "properties": self._inject_intent({ - "file_path": { - "type": "string", - "description": "要写入的相对路径" - }, - "content": { - "type": "string", - "description": "要写入文件的内容" - }, - "append": { - "type": "boolean", - "description": "是否追加到文件而不是覆盖它", - "default": False - } - }), - "required": ["file_path", "content"] - } - } - }, - { - "type": "function", - "function": { - "name": "read_file", - "description": "读取/搜索/抽取 UTF-8 文本文件内容。通过 type 参数选择 read(阅读)、search(搜索)、extract(具体行段),支持限制返回字符数。若文件非 UTF-8 或过大,请改用 run_python。", - "parameters": { - "type": "object", - "properties": self._inject_intent({ - "path": {"type": "string", "description": "文件路径"}, - "type": { - "type": "string", - "enum": ["read", "search", "extract"], - "description": "读取模式:read=阅读、search=搜索、extract=按行抽取" - }, - "max_chars": { - "type": "integer", - "description": "返回内容的最大字符数,默认与 config 一致" - }, - "start_line": { - "type": "integer", - "description": "[read] 可选的起始行号(1开始)" - }, - "end_line": { - "type": "integer", - "description": "[read] 可选的结束行号(>=start_line)" - }, - "query": { - "type": "string", - "description": "[search] 搜索关键词" - }, - "max_matches": { - "type": "integer", - "description": "[search] 最多返回多少条命中(默认5,最大50)" - }, - "context_before": { - "type": "integer", - "description": "[search] 命中行向上追加的行数(默认1,最大3)" - }, - "context_after": { - "type": "integer", - "description": "[search] 命中行向下追加的行数(默认1,最大5)" - }, - "case_sensitive": { - "type": "boolean", - "description": "[search] 是否区分大小写,默认 false" - }, - "segments": { - "type": "array", - "description": "[extract] 需要抽取的行区间", - "items": { - "type": "object", - "properties": { - "label": { - "type": "string", - "description": "该片段的标签(可选)" - }, - "start_line": { - "type": "integer", - "description": "起始行号(>=1)" - }, - "end_line": { - "type": "integer", - "description": "结束行号(>=start_line)" - } - }, - "required": ["start_line", "end_line"] - }, - "minItems": 1 - } - }), - "required": ["path", "type"] - } - } - }, - { - "type": "function", - "function": { - "name": "edit_file", - "description": "在文件中执行精确的字符串替换;建议先使用 read_file 获取最新内容以确保精确匹配。", - "parameters": { - "type": "object", - "properties": self._inject_intent({ - "file_path": { - "type": "string", - "description": "要修改文件的相对路径" - }, - "old_string": { - "type": "string", - "description": "要替换的文本(需与文件内容精确匹配,保留缩进)" - }, - "new_string": { - "type": "string", - "description": "用于替换的新文本(必须不同于 old_string)" - } - }), - "required": ["file_path", "old_string", "new_string"] - } - } - }, - { - "type": "function", - "function": { - "name": "vlm_analyze", - "description": "使用大参数视觉语言模型(Qwen3.5)理解图片:文字、物体、布局、表格等,仅支持本地路径。", - "parameters": { - "type": "object", - "properties": self._inject_intent({ - "path": {"type": "string", "description": "项目内的图片相对路径"}, - "prompt": {"type": "string", "description": "传递给 VLM 的中文提示词,如“请总结这张图的内容”“表格的总金额是多少”“图中是什么车?”。"} - }), - "required": ["path", "prompt"] - } - } - }, - { - "type": "function", - "function": { - "name": "delete_file", - "description": "删除文件", - "parameters": { - "type": "object", - "properties": self._inject_intent({ - "path": {"type": "string", "description": "文件路径"} - }), - "required": ["path"] - } - } - }, - { - "type": "function", - "function": { - "name": "rename_file", - "description": "重命名文件", - "parameters": { - "type": "object", - "properties": self._inject_intent({ - "old_path": {"type": "string", "description": "原文件路径"}, - "new_path": {"type": "string", "description": "新文件路径"} - }), - "required": ["old_path", "new_path"] - } - } - }, - { - "type": "function", - "function": { - "name": "create_folder", - "description": "创建文件夹", - "parameters": { - "type": "object", - "properties": self._inject_intent({ - "path": {"type": "string", "description": "文件夹路径"} - }), - "required": ["path"] - } - } - }, - { - "type": "function", - "function": { - "name": "terminal_session", - "description": "管理持久化终端会话,可打开、关闭、列出或切换终端。请在授权工作区内执行命令,禁止启动需要完整 TTY 的程序(python REPL、vim、top 等)。", - "parameters": { - "type": "object", - "properties": self._inject_intent({ - "action": { - "type": "string", - "enum": ["open", "close", "list", "reset"], - "description": "操作类型:open-打开新终端,close-关闭终端,list-列出所有终端,reset-重置终端" - }, - "session_name": { - "type": "string", - "description": "终端会话名称(open、close、reset时需要)" - }, - "working_dir": { - "type": "string", - "description": "工作目录,相对于项目路径(open时可选)" - } - }), - "required": ["action"] - } - } - }, - { - "type": "function", - "function": { - "name": "terminal_input", - "description": "向指定终端发送命令或输入。禁止启动会占用终端界面的程序(python/node/nano/vim 等);如遇卡死请结合 terminal_snapshot 并使用 terminal_session 的 reset 恢复。timeout 必填:传入数字(秒,最大300)表示本次等待输出的时长,不会封装命令、不会强杀进程;在等待窗口内若检测到命令已完成会提前返回,否则在超时后返回已产生的输出并保持命令继续运行。需要强制超时终止请使用 run_command。\n若不确定上一条命令是否结束,先用 terminal_snapshot 确认后再继续输入。", - "parameters": { - "type": "object", - "properties": self._inject_intent({ - "command": { - "type": "string", - "description": "要执行的命令或发送的输入" - }, - "session_name": { - "type": "string", - "description": "目标终端会话名称(必填)" - }, - "timeout": { - "type": "number", - "description": "等待输出的最长秒数,必填,最大300;不会封装命令、不会中断进程" - } - }), - "required": ["command", "timeout", "session_name"] - } - } - }, - { - "type": "function", - "function": { - "name": "terminal_snapshot", - "description": "获取指定终端最近的输出快照,用于判断当前状态。默认返回末尾的50行,可通过参数调整。", - "parameters": { - "type": "object", - "properties": self._inject_intent({ - "session_name": { - "type": "string", - "description": "目标终端会话名称(可选,默认活动终端)" - }, - "lines": { - "type": "integer", - "description": "返回的最大行数(可选)" - }, - "max_chars": { - "type": "integer", - "description": "返回的最大字符数(可选)" - } - }) - } - } - }, - { - "type": "function", - "function": { - "name": "web_search", - "description": f"当现有资料不足时搜索外部信息(当前时间 {current_time})。调用前说明目的,精准撰写 query,并合理设置时间/主题参数;避免重复或无意义的搜索。", - "parameters": { - "type": "object", - "properties": self._inject_intent({ - "query": { - "type": "string", - "description": "搜索查询内容(不要包含日期或时间范围)" - }, - "max_results": { - "type": "integer", - "description": "最大结果数,可选" - }, - "topic": { - "type": "string", - "description": "搜索主题,可选值:general(默认)/news/finance" - }, - "time_range": { - "type": "string", - "description": "相对时间范围,可选 day/week/month/year,支持缩写 d/w/m/y;与 days 和 start_date/end_date 互斥" - }, - "days": { - "type": "integer", - "description": "最近 N 天,仅当 topic=news 时可用;与 time_range、start_date/end_date 互斥" - }, - "start_date": { - "type": "string", - "description": "开始日期,YYYY-MM-DD;必须与 end_date 同时提供,与 time_range、days 互斥" - }, - "end_date": { - "type": "string", - "description": "结束日期,YYYY-MM-DD;必须与 start_date 同时提供,与 time_range、days 互斥" - }, - "country": { - "type": "string", - "description": "国家过滤,仅 topic=general 可用,使用英文小写国名" - }, - "include_domains": { - "type": "array", - "description": "仅包含这些域名(可选,最多300个)", - "items": { - "type": "string" - } - } - }), - "required": ["query"] - } - } - }, - { - "type": "function", - "function": { - "name": "extract_webpage", - "description": "在 web_search 结果不够详细时提取网页正文。调用前说明用途,注意提取内容会消耗大量 token,超过80000字符将被拒绝。", - "parameters": { - "type": "object", - "properties": self._inject_intent({ - "url": {"type": "string", "description": "要提取内容的网页URL"} - }), - "required": ["url"] - } - } - }, - { - "type": "function", - "function": { - "name": "save_webpage", - "description": "提取网页内容并保存为纯文本文件,适合需要长期留存的长文档。请提供网址与目标路径(含 .txt 后缀),落地后请通过终端命令查看。", - "parameters": { - "type": "object", - "properties": self._inject_intent({ - "url": {"type": "string", "description": "要保存的网页URL"}, - "target_path": {"type": "string", "description": "保存位置,包含文件名,相对于项目根目录"} - }), - "required": ["url", "target_path"] - } - } - }, - { - "type": "function", - "function": { - "name": "run_python", - "description": "执行一次性 Python 脚本,可用于处理二进制或非 UTF-8 文件(如 Excel、Word、PDF、图片),或进行数据分析与验证。必须提供 timeout(最长60秒);一旦超时,脚本会被打断且无法继续执行(需要重新运行),并返回已捕获输出。", - "parameters": { - "type": "object", - "properties": self._inject_intent({ - "code": {"type": "string", "description": "Python代码"}, - "timeout": { - "type": "number", - "description": "超时时长(秒),必填,最大60" - } - }), - "required": ["code", "timeout"] - } - } - }, - { - "type": "function", - "function": { - "name": "run_command", - "description": "执行一次性终端命令,适合查看文件信息(file/ls/stat/iconv 等)、转换编码或调用 CLI 工具。禁止启动交互式程序;对已聚焦文件仅允许使用 grep -n 等定位命令。必须提供 timeout(最长30秒);一旦超时,命令**一定会被打断**且无法继续执行(需要重新运行),并返回已捕获输出;输出超过10000字符将被截断或拒绝。", - "parameters": { - "type": "object", - "properties": self._inject_intent({ - "command": {"type": "string", "description": "终端命令"}, - "timeout": { - "type": "number", - "description": "超时时长(秒),必填,最大30" - } - }), - "required": ["command", "timeout"] - } - } - }, - { - "type": "function", - "function": { - "name": "update_memory", - "description": "按条目更新记忆列表(自动编号)。append 追加新条目;replace 用序号替换;delete 用序号删除。", - "parameters": { - "type": "object", - "properties": self._inject_intent({ - "memory_type": {"type": "string", "enum": ["main", "task"], "description": "记忆类型"}, - "content": {"type": "string", "description": "条目内容。append/replace 时必填"}, - "operation": {"type": "string", "enum": ["append", "replace", "delete"], "description": "操作类型"}, - "index": {"type": "integer", "description": "要替换/删除的序号(从1开始)"} - }), - "required": ["memory_type", "operation"] - } - } - }, - { - "type": "function", - "function": { - "name": "todo_create", - "description": "创建待办列表,最多 8 条任务;若已有列表将被覆盖。", - "parameters": { - "type": "object", - "properties": self._inject_intent({ - "overview": {"type": "string", "description": "一句话概述待办清单要完成的目标,50 字以内。"}, - "tasks": { - "type": "array", - "description": "任务列表,1~8 条,每条写清“动词+对象+目标”。", - "items": { - "type": "object", - "properties": { - "title": {"type": "string", "description": "单个任务描述,写成可执行的步骤"} - }, - "required": ["title"] - }, - "minItems": 1, - "maxItems": 8 - } - }), - "required": ["overview", "tasks"] - } - } - }, - { - "type": "function", - "function": { - "name": "todo_update_task", - "description": "批量勾选或取消任务(支持单个或多个任务);全部勾选时提示所有任务已完成。", - "parameters": { - "type": "object", - "properties": self._inject_intent({ - "task_index": {"type": "integer", "description": "任务序号(1-8),兼容旧参数"}, - "task_indices": { - "type": "array", - "items": {"type": "integer"}, - "minItems": 1, - "maxItems": 8, - "description": "要更新的任务序号列表(1-8),可一次勾选多个" - }, - "completed": {"type": "boolean", "description": "true=打勾,false=取消"} - }), - "required": ["completed"] - } - } - }, - { - "type": "function", - "function": { - "name": "close_sub_agent", - "description": "强制关闭指定子智能体,适用于长时间无响应、超时或卡死的任务。使用前请确认必要的日志/文件已保留,操作会立即终止该任务。", - "parameters": { - "type": "object", - "properties": self._inject_intent({ - "task_id": {"type": "string", "description": "子智能体任务ID"}, - "agent_id": {"type": "integer", "description": "子智能体编号(1~5),若缺少 task_id 可用"} - }) - } - } - }, - { - "type": "function", - "function": { - "name": "create_sub_agent", - "description": "创建新的子智能体任务。适合大规模信息搜集、网页提取与多文档总结等会占用大量上下文的工作,需要提供任务摘要、详细要求、交付目录以及参考文件。注意:同一时间最多运行5个子智能体。", - "parameters": { - "type": "object", - "properties": self._inject_intent({ - "agent_id": {"type": "integer", "description": "子智能体代号(1~5)"}, - "summary": {"type": "string", "description": "任务摘要,简要说明目标"}, - "task": {"type": "string", "description": "任务详细要求"}, - "target_dir": {"type": "string", "description": "项目下用于接收交付的相对目录"}, - "reference_files": { - "type": "array", - "description": "提供给子智能体的参考文件列表(相对路径),禁止在summary和task中直接告知子智能体引用图片的路径,必须使用本参数提供", - "items": {"type": "string"}, - "maxItems": 10 - }, - "timeout_seconds": {"type": "integer", "description": "子智能体最大运行秒数:单/双次搜索建议180秒,多轮搜索整理建议300秒,深度调研或长篇分析可设600秒"} - }), - "required": ["agent_id", "summary", "task", "target_dir"] - } - } - }, - { - "type": "function", - "function": { - "name": "wait_sub_agent", - "description": "等待指定子智能体任务结束(或超时)。任务完成后会返回交付目录,并将结果复制到指定的项目文件夹。调用时 `timeout_seconds` 应不少于对应子智能体的 `timeout_seconds`,否则可能提前终止等待。", - "parameters": { - "type": "object", - "properties": self._inject_intent({ - "task_id": {"type": "string", "description": "子智能体任务ID"}, - "agent_id": {"type": "integer", "description": "子智能体代号(可选,用于缺省 task_id 的情况)"}, - "timeout_seconds": {"type": "integer", "description": "本次等待的超时时长(秒)"} - }), - "required": [] - } - } - }, - { - "type": "function", - "function": { - "name": "trigger_easter_egg", - "description": "触发隐藏彩蛋,用于展示非功能性特效。需指定 effect 参数,例如 flood(灌水)或 snake(贪吃蛇)。", - "parameters": { - "type": "object", - "properties": self._inject_intent({ - "effect": { - "type": "string", - "description": "彩蛋标识,目前支持 flood(灌水)与 snake(贪吃蛇)。" - } - }), - "required": ["effect"] - } - } - } - ] - # 视觉模型(Qwen3.5 / Kimi-k2.5)自带多模态能力,不再暴露 vlm_analyze,改为 view_image / view_video - if getattr(self, "model_key", None) in {"qwen3-vl-plus", "kimi-k2.5"}: - tools = [ - tool for tool in tools - if (tool.get("function") or {}).get("name") != "vlm_analyze" - ] - tools.append({ - "type": "function", - "function": { - "name": "view_image", - "description": "将指定本地图片附加到工具结果中(tool 消息携带 image_url),便于模型主动查看图片内容。", - "parameters": { - "type": "object", - "properties": self._inject_intent({ - "path": { - "type": "string", - "description": "项目内的图片相对路径(不要以 /workspace 开头);宿主机模式可用绝对路径。支持 png/jpg/webp/gif/bmp/svg。" - } - }), - "required": ["path"] - } - } - }) - tools.append({ - "type": "function", - "function": { - "name": "view_video", - "description": "将指定本地视频附加到工具结果中(tool 消息携带 video_url),便于模型查看视频内容。", - "parameters": { - "type": "object", - "properties": self._inject_intent({ - "path": { - "type": "string", - "description": "项目内的视频相对路径(不要以 /workspace 开头);宿主机模式可用绝对路径。支持 mp4/mov/mkv/avi/webm。" - } - }), - "required": ["path"] - } - } - }) - # 附加自定义工具(仅管理员可见) - custom_tools = self._build_custom_tools() - if custom_tools: - tools.extend(custom_tools) - if self.disabled_tools: - tools = [ - tool for tool in tools - if tool.get("function", {}).get("name") not in self.disabled_tools - ] - return self._apply_intent_to_tools(tools) - - async def handle_tool_call(self, tool_name: str, arguments: Dict) -> str: - """处理工具调用(添加参数预检查和改进错误处理)""" - # 导入字符限制配置 - from config import ( - MAX_READ_FILE_CHARS, - MAX_RUN_COMMAND_CHARS, MAX_EXTRACT_WEBPAGE_CHARS - ) - - # 检查是否需要确认 - if tool_name in NEED_CONFIRMATION: - if not await self.confirm_action(tool_name, arguments): - return json.dumps({"success": False, "error": "用户取消操作"}) - - # === 新增:预检查参数大小和格式 === - try: - # 检查参数总大小 - arguments_str = json.dumps(arguments, ensure_ascii=False) - if len(arguments_str) > 200000: # 200KB限制 - return json.dumps({ - "success": False, - "error": f"参数过大({len(arguments_str)}字符),超过200KB限制", - "suggestion": "请分块处理或减少参数内容" - }, ensure_ascii=False) - - # 针对特定工具的内容检查 - if tool_name == "write_file": - content = arguments.get("content", "") - length_limit = 200000 - if not DISABLE_LENGTH_CHECK and len(content) > length_limit: - return json.dumps({ - "success": False, - "error": f"文件内容过长({len(content)}字符),超过{length_limit}字符限制", - "suggestion": "请分块写入,或设置 append=true 多次写入" - }, ensure_ascii=False) - if '\\' in content and content.count('\\') > len(content) / 10: - print(f"{OUTPUT_FORMATS['warning']} 检测到大量转义字符,可能存在格式问题") - - except Exception as e: - return json.dumps({ - "success": False, - "error": f"参数预检查失败: {str(e)}" - }, ensure_ascii=False) - - # 自定义工具预解析(仅管理员) - custom_tool = None - if self.custom_tools_enabled and getattr(self, "user_role", "user") == "admin": - try: - self.custom_tool_registry.reload() - except Exception: - pass - custom_tool = self.custom_tool_registry.get_tool(tool_name) - - try: - if custom_tool: - result = await self.custom_tool_executor.run(tool_name, arguments) - elif tool_name == "read_file": - result = self._handle_read_tool(arguments) - elif tool_name in {"vlm_analyze", "ocr_image"}: - path = arguments.get("path") - prompt = arguments.get("prompt") - if not path: - return json.dumps({"success": False, "error": "缺少 path 参数", "warnings": []}, ensure_ascii=False) - result = self.ocr_client.vlm_analyze(path=path, prompt=prompt or "") - elif tool_name == "view_image": - path = (arguments.get("path") or "").strip() - if not path: - return json.dumps({"success": False, "error": "path 不能为空"}, ensure_ascii=False) - host_unrestricted = self._is_host_mode() - if path.startswith("/workspace"): - if host_unrestricted: - path = path.split("/workspace", 1)[1].lstrip("/") - else: - return json.dumps({"success": False, "error": "非法路径,超出项目根目录,请使用不带/workspace的相对路径"}, ensure_ascii=False) - if host_unrestricted and (Path(path).is_absolute() or (len(path) > 1 and path[1] == ":")): - abs_path = Path(path).expanduser().resolve() - else: - abs_path = (Path(self.context_manager.project_path) / path).resolve() - if not host_unrestricted: - try: - abs_path.relative_to(Path(self.context_manager.project_path).resolve()) - except Exception: - return json.dumps({"success": False, "error": "非法路径,超出项目根目录,请使用不带/workspace的相对路径"}, ensure_ascii=False) - if not abs_path.exists() or not abs_path.is_file(): - return json.dumps({"success": False, "error": f"图片不存在: {path}"}, ensure_ascii=False) - if abs_path.stat().st_size > 10 * 1024 * 1024: - return json.dumps({"success": False, "error": "图片过大,需 <= 10MB"}, ensure_ascii=False) - allowed_ext = {".png", ".jpg", ".jpeg", ".webp", ".gif", ".bmp", ".svg"} - if abs_path.suffix.lower() not in allowed_ext: - return json.dumps({"success": False, "error": f"不支持的图片格式: {abs_path.suffix}"}, ensure_ascii=False) - # 记录待附加图片,供上层将图片附加到工具结果 - self.pending_image_view = { - "path": str(path) - } - result = {"success": True, "message": "图片已附加到工具结果中,将随 tool 返回。", "path": path} - elif tool_name == "view_video": - path = (arguments.get("path") or "").strip() - if not path: - return json.dumps({"success": False, "error": "path 不能为空"}, ensure_ascii=False) - host_unrestricted = self._is_host_mode() - if path.startswith("/workspace"): - if host_unrestricted: - path = path.split("/workspace", 1)[1].lstrip("/") - else: - return json.dumps({"success": False, "error": "非法路径,超出项目根目录,请使用相对路径"}, ensure_ascii=False) - if host_unrestricted and (Path(path).is_absolute() or (len(path) > 1 and path[1] == ":")): - abs_path = Path(path).expanduser().resolve() - else: - abs_path = (Path(self.context_manager.project_path) / path).resolve() - if not host_unrestricted: - try: - abs_path.relative_to(Path(self.context_manager.project_path).resolve()) - except Exception: - return json.dumps({"success": False, "error": "非法路径,超出项目根目录,请使用相对路径"}, ensure_ascii=False) - if not abs_path.exists() or not abs_path.is_file(): - return json.dumps({"success": False, "error": f"视频不存在: {path}"}, ensure_ascii=False) - allowed_ext = {".mp4", ".mov", ".mkv", ".avi", ".webm"} - if abs_path.suffix.lower() not in allowed_ext: - return json.dumps({"success": False, "error": f"不支持的视频格式: {abs_path.suffix}"}, ensure_ascii=False) - if abs_path.stat().st_size > 50 * 1024 * 1024: - return json.dumps({"success": False, "error": "视频过大,需 <= 50MB"}, ensure_ascii=False) - self.pending_video_view = {"path": str(path)} - result = { - "success": True, - "message": "视频已附加到工具结果中,将随 tool 返回。", - "path": path - } - - # 终端会话管理工具 - elif tool_name == "terminal_session": - action = arguments["action"] - - if action == "open": - result = self.terminal_manager.open_terminal( - session_name=arguments.get("session_name", "default"), - working_dir=arguments.get("working_dir"), - make_active=True - ) - if result["success"]: - print(f"{OUTPUT_FORMATS['session']} 终端会话已打开: {arguments.get('session_name', 'default')}") - - elif action == "close": - result = self.terminal_manager.close_terminal( - session_name=arguments.get("session_name", "default") - ) - if result["success"]: - print(f"{OUTPUT_FORMATS['session']} 终端会话已关闭: {arguments.get('session_name', 'default')}") - - elif action == "list": - result = self.terminal_manager.list_terminals() - - elif action == "reset": - result = self.terminal_manager.reset_terminal( - session_name=arguments.get("session_name") - ) - if result["success"]: - print(f"{OUTPUT_FORMATS['session']} 终端会话已重置: {result['session']}") - - else: - result = {"success": False, "error": f"未知操作: {action}"} - result["action"] = action - - # 终端输入工具 - elif tool_name == "terminal_input": - result = self.terminal_manager.send_to_terminal( - command=arguments["command"], - session_name=arguments.get("session_name"), - timeout=arguments.get("timeout") - ) - if result["success"]: - print(f"{OUTPUT_FORMATS['terminal']} 执行命令: {arguments['command']}") - - elif tool_name == "terminal_snapshot": - result = self.terminal_manager.get_terminal_snapshot( - session_name=arguments.get("session_name"), - lines=arguments.get("lines"), - max_chars=arguments.get("max_chars") - ) - - # sleep工具 - elif tool_name == "sleep": - seconds = arguments.get("seconds", 1) - reason = arguments.get("reason", "等待操作完成") - - # 限制最大等待时间 - max_sleep = 600 # 最多等待60秒 - if seconds > max_sleep: - result = { - "success": False, - "error": f"等待时间过长,最多允许 {max_sleep} 秒", - "suggestion": f"建议分多次等待或减少等待时间" - } - else: - # 确保秒数为正数 - if seconds <= 0: - result = { - "success": False, - "error": "等待时间必须大于0" - } - else: - print(f"{OUTPUT_FORMATS['info']} 等待 {seconds} 秒: {reason}") - - # 执行等待 - import asyncio - await asyncio.sleep(seconds) - - result = { - "success": True, - "message": f"已等待 {seconds} 秒", - "reason": reason, - "timestamp": datetime.now().isoformat() - } - - print(f"{OUTPUT_FORMATS['success']} 等待完成") - - elif tool_name == "create_file": - result = self.file_manager.create_file( - path=arguments["path"], - file_type=arguments["file_type"] - ) - # 添加备注 - if result["success"] and arguments.get("annotation"): - self.context_manager.update_annotation( - result["path"], - arguments["annotation"] - ) - if result.get("success"): - result["message"] = ( - f"已创建空文件: {result['path']}。请使用 write_file 写入内容,或使用 edit_file 进行替换。" - ) - - elif tool_name == "delete_file": - result = self.file_manager.delete_file(arguments["path"]) - # 如果删除成功,同时删除备注 - if result.get("success") and result.get("action") == "deleted": - deleted_path = result.get("path") - # 删除备注 - if deleted_path in self.context_manager.file_annotations: - del self.context_manager.file_annotations[deleted_path] - self.context_manager.save_annotations() - print(f"🧹 已删除文件备注: {deleted_path}") - - elif tool_name == "rename_file": - result = self.file_manager.rename_file( - arguments["old_path"], - arguments["new_path"] - ) - # 如果重命名成功,更新备注和聚焦的key - # 如果重命名成功,更新备注 - if result.get("success") and result.get("action") == "renamed": - old_path = result.get("old_path") - new_path = result.get("new_path") - # 更新备注 - if old_path in self.context_manager.file_annotations: - annotation = self.context_manager.file_annotations[old_path] - del self.context_manager.file_annotations[old_path] - self.context_manager.file_annotations[new_path] = annotation - self.context_manager.save_annotations() - print(f"📝 已更新文件备注: {old_path} -> {new_path}") - - elif tool_name == "write_file": - path = arguments.get("file_path") - content = arguments.get("content", "") - append_flag = bool(arguments.get("append", False)) - if not path: - result = {"success": False, "error": "缺少必要参数: file_path"} - else: - mode = "a" if append_flag else "w" - result = self.file_manager.write_file(path, content, mode=mode) - - elif tool_name == "edit_file": - path = arguments.get("file_path") - old_text = arguments.get("old_string") - new_text = arguments.get("new_string") - if not path: - result = {"success": False, "error": "缺少必要参数: file_path"} - elif old_text is None or new_text is None: - result = {"success": False, "error": "缺少必要参数: old_string/new_string"} - elif old_text == new_text: - result = {"success": False, "error": "old_string 与 new_string 相同,无法执行替换"} - elif not old_text: - result = {"success": False, "error": "old_string 不能为空,请从 read_file 内容中精确复制"} - else: - result = self.file_manager.replace_in_file(path, old_text, new_text) - elif tool_name == "create_folder": - result = self.file_manager.create_folder(arguments["path"]) - - elif tool_name == "web_search": - allowed, quota_info = self.record_search_call() - if not allowed: - return json.dumps({ - "success": False, - "error": f"搜索配额已用尽,将在 {quota_info.get('reset_at')} 重置。请向用户说明情况并提供替代方案。", - "quota": quota_info - }, ensure_ascii=False) - search_response = await self.search_engine.search_with_summary( - query=arguments["query"], - max_results=arguments.get("max_results"), - topic=arguments.get("topic"), - time_range=arguments.get("time_range"), - days=arguments.get("days"), - start_date=arguments.get("start_date"), - end_date=arguments.get("end_date"), - country=arguments.get("country"), - include_domains=arguments.get("include_domains") - ) - - if search_response["success"]: - result = { - "success": True, - "summary": search_response["summary"], - "filters": search_response.get("filters", {}), - "query": search_response.get("query"), - "results": search_response.get("results", []), - "total_results": search_response.get("total_results", 0) - } - else: - result = { - "success": False, - "error": search_response.get("error", "搜索失败"), - "filters": search_response.get("filters", {}), - "query": search_response.get("query"), - "results": search_response.get("results", []), - "total_results": search_response.get("total_results", 0) - } - - elif tool_name == "extract_webpage": - url = arguments["url"] - try: - # 从config获取API密钥 - from config import TAVILY_API_KEY - full_content, _ = await extract_webpage_content( - urls=url, - api_key=TAVILY_API_KEY, - extract_depth="basic", - max_urls=1 - ) - - # 字符数检查 - char_count = len(full_content) - if char_count > MAX_EXTRACT_WEBPAGE_CHARS: - result = { - "success": False, - "error": f"网页提取返回了过长的{char_count}字符,请不要提取这个网页,可以使用网页保存功能,然后使用read工具查找或查看网页", - "char_count": char_count, - "limit": MAX_EXTRACT_WEBPAGE_CHARS, - "url": url - } - else: - result = { - "success": True, - "url": url, - "content": full_content - } - except Exception as e: - result = { - "success": False, - "error": f"网页提取失败: {str(e)}", - "url": url - } - - elif tool_name == "save_webpage": - url = arguments["url"] - target_path = arguments["target_path"] - try: - from config import TAVILY_API_KEY - except ImportError: - TAVILY_API_KEY = None - - if not TAVILY_API_KEY or TAVILY_API_KEY == "your-tavily-api-key": - result = { - "success": False, - "error": "Tavily API密钥未配置,无法保存网页", - "url": url, - "path": target_path - } - else: - try: - extract_result = await tavily_extract( - urls=url, - api_key=TAVILY_API_KEY, - extract_depth="basic", - max_urls=1 - ) - - if not extract_result or "error" in extract_result: - error_message = extract_result.get("error", "提取失败,未返回任何内容") if isinstance(extract_result, dict) else "提取失败" - result = { - "success": False, - "error": error_message, - "url": url, - "path": target_path - } - else: - results_list = extract_result.get("results", []) if isinstance(extract_result, dict) else [] - - primary_result = None - for item in results_list: - if item.get("raw_content"): - primary_result = item - break - if primary_result is None and results_list: - primary_result = results_list[0] - - if not primary_result: - failed_list = extract_result.get("failed_results", []) if isinstance(extract_result, dict) else [] - result = { - "success": False, - "error": "提取成功结果为空,无法保存", - "url": url, - "path": target_path, - "failed": failed_list - } - else: - content_to_save = primary_result.get("raw_content") or primary_result.get("content") or "" - - if not content_to_save: - result = { - "success": False, - "error": "网页内容为空,未写入文件", - "url": url, - "path": target_path - } - else: - write_result = self.file_manager.write_file(target_path, content_to_save, mode="w") - - if not write_result.get("success"): - result = { - "success": False, - "error": write_result.get("error", "写入文件失败"), - "url": url, - "path": target_path - } - else: - char_count = len(content_to_save) - byte_size = len(content_to_save.encode("utf-8")) - result = { - "success": True, - "url": url, - "path": write_result.get("path", target_path), - "char_count": char_count, - "byte_size": byte_size, - "message": f"网页内容已以纯文本保存到 {write_result.get('path', target_path)},可用 read_file 的 search/extract 查看,必要时再用终端命令。" - } - - if isinstance(extract_result, dict) and extract_result.get("failed_results"): - result["warnings"] = extract_result["failed_results"] - - except Exception as e: - result = { - "success": False, - "error": f"网页保存失败: {str(e)}", - "url": url, - "path": target_path - } - - elif tool_name == "run_python": - result = await self.terminal_ops.run_python_code( - arguments["code"], - timeout=arguments.get("timeout") - ) - - elif tool_name == "run_command": - result = await self.terminal_ops.run_command( - arguments["command"], - timeout=arguments.get("timeout") - ) - - # 字符数检查 - if result.get("success") and "output" in result: - char_count = len(result["output"]) - if char_count > MAX_RUN_COMMAND_CHARS: - result = { - "success": False, - "error": f"结果内容过大,有{char_count}字符,请使用限制字符数的获取内容方式,根据程度选择10k以内的数", - "char_count": char_count, - "limit": MAX_RUN_COMMAND_CHARS, - "command": arguments["command"] - } - - elif tool_name == "update_memory": - memory_type = arguments["memory_type"] - operation = arguments["operation"] - content = arguments.get("content") - index = arguments.get("index") - - # 参数校验 - if operation == "append" and (not content or not str(content).strip()): - result = {"success": False, "error": "append 操作需要 content"} - elif operation == "replace" and (index is None or index <= 0 or not content or not str(content).strip()): - result = {"success": False, "error": "replace 操作需要有效的 index 和 content"} - elif operation == "delete" and (index is None or index <= 0): - result = {"success": False, "error": "delete 操作需要有效的 index"} - else: - result = self.memory_manager.update_entries( - memory_type=memory_type, - operation=operation, - content=content, - index=index - ) - - elif tool_name == "todo_create": - result = self.todo_manager.create_todo_list( - overview=arguments.get("overview", ""), - tasks=arguments.get("tasks", []) - ) - - elif tool_name == "todo_update_task": - task_indices = arguments.get("task_indices") - if task_indices is None: - task_indices = arguments.get("task_index") - result = self.todo_manager.update_task_status( - task_indices=task_indices, - completed=arguments.get("completed", True) - ) - - elif tool_name == "create_sub_agent": - result = self.sub_agent_manager.create_sub_agent( - agent_id=arguments.get("agent_id"), - summary=arguments.get("summary", ""), - task=arguments.get("task", ""), - target_dir=arguments.get("target_dir", ""), - reference_files=arguments.get("reference_files", []), - timeout_seconds=arguments.get("timeout_seconds"), - conversation_id=self.context_manager.current_conversation_id - ) - - elif tool_name == "wait_sub_agent": - wait_timeout = arguments.get("timeout_seconds") - if not wait_timeout: - task_ref = self.sub_agent_manager.lookup_task( - task_id=arguments.get("task_id"), - agent_id=arguments.get("agent_id") - ) - if task_ref: - wait_timeout = task_ref.get("timeout_seconds") - result = self.sub_agent_manager.wait_for_completion( - task_id=arguments.get("task_id"), - agent_id=arguments.get("agent_id"), - timeout_seconds=wait_timeout - ) - - elif tool_name == "close_sub_agent": - result = self.sub_agent_manager.terminate_sub_agent( - task_id=arguments.get("task_id"), - agent_id=arguments.get("agent_id") - ) - - elif tool_name == "trigger_easter_egg": - result = self.easter_egg_manager.trigger_effect(arguments.get("effect")) - - else: - result = {"success": False, "error": f"未知工具: {tool_name}"} - - except Exception as e: - logger.error(f"工具执行失败: {tool_name} - {e}") - result = {"success": False, "error": f"工具执行异常: {str(e)}"} - - return json.dumps(result, ensure_ascii=False) - - async def confirm_action(self, action: str, arguments: Dict) -> bool: - """确认危险操作""" - print(f"\n{OUTPUT_FORMATS['confirm']} 需要确认的操作:") - print(f" 操作: {action}") - print(f" 参数: {json.dumps(arguments, ensure_ascii=False, indent=2)}") - - response = input("\n是否继续? (y/n): ").strip().lower() - return response == 'y' diff --git a/core/main_terminal_parts/tools_definition.py b/core/main_terminal_parts/tools_definition.py new file mode 100644 index 0000000..7e37b30 --- /dev/null +++ b/core/main_terminal_parts/tools_definition.py @@ -0,0 +1,789 @@ +import asyncio +import json +from datetime import datetime +from pathlib import Path +from typing import Any, Dict, List, Optional, Set + +try: + from config import ( + OUTPUT_FORMATS, DATA_DIR, PROMPTS_DIR, NEED_CONFIRMATION, + MAX_TERMINALS, TERMINAL_BUFFER_SIZE, TERMINAL_DISPLAY_SIZE, + MAX_READ_FILE_CHARS, READ_TOOL_DEFAULT_MAX_CHARS, + READ_TOOL_DEFAULT_CONTEXT_BEFORE, READ_TOOL_DEFAULT_CONTEXT_AFTER, + READ_TOOL_MAX_CONTEXT_BEFORE, READ_TOOL_MAX_CONTEXT_AFTER, + READ_TOOL_DEFAULT_MAX_MATCHES, READ_TOOL_MAX_MATCHES, + READ_TOOL_MAX_FILE_SIZE, + TERMINAL_SANDBOX_MOUNT_PATH, + TERMINAL_SANDBOX_MODE, + TERMINAL_SANDBOX_CPUS, + TERMINAL_SANDBOX_MEMORY, + PROJECT_MAX_STORAGE_MB, + CUSTOM_TOOLS_ENABLED, + ) +except ImportError: + import sys + project_root = Path(__file__).resolve().parents[2] + if str(project_root) not in sys.path: + sys.path.insert(0, str(project_root)) + from config import ( + OUTPUT_FORMATS, DATA_DIR, PROMPTS_DIR, NEED_CONFIRMATION, + MAX_TERMINALS, TERMINAL_BUFFER_SIZE, TERMINAL_DISPLAY_SIZE, + MAX_READ_FILE_CHARS, READ_TOOL_DEFAULT_MAX_CHARS, + READ_TOOL_DEFAULT_CONTEXT_BEFORE, READ_TOOL_DEFAULT_CONTEXT_AFTER, + READ_TOOL_MAX_CONTEXT_BEFORE, READ_TOOL_MAX_CONTEXT_AFTER, + READ_TOOL_DEFAULT_MAX_MATCHES, READ_TOOL_MAX_MATCHES, + READ_TOOL_MAX_FILE_SIZE, + TERMINAL_SANDBOX_MOUNT_PATH, + TERMINAL_SANDBOX_MODE, + TERMINAL_SANDBOX_CPUS, + TERMINAL_SANDBOX_MEMORY, + PROJECT_MAX_STORAGE_MB, + CUSTOM_TOOLS_ENABLED, + ) + +from modules.file_manager import FileManager +from modules.search_engine import SearchEngine +from modules.terminal_ops import TerminalOperator +from modules.memory_manager import MemoryManager +from modules.terminal_manager import TerminalManager +from modules.todo_manager import TodoManager +from modules.sub_agent_manager import SubAgentManager +from modules.webpage_extractor import extract_webpage_content, tavily_extract +from modules.ocr_client import OCRClient +from modules.easter_egg_manager import EasterEggManager +from modules.personalization_manager import ( + load_personalization_config, + build_personalization_prompt, +) +from modules.skills_manager import ( + get_skills_catalog, + build_skills_list, + merge_enabled_skills, + build_skills_prompt, +) +from modules.custom_tool_registry import CustomToolRegistry, build_default_tool_category +from modules.custom_tool_executor import CustomToolExecutor + +try: + from config.limits import THINKING_FAST_INTERVAL +except ImportError: + THINKING_FAST_INTERVAL = 10 + +from modules.container_monitor import collect_stats, inspect_state +from core.tool_config import TOOL_CATEGORIES +from utils.api_client import DeepSeekClient +from utils.context_manager import ContextManager +from utils.tool_result_formatter import format_tool_result_for_context +from utils.logger import setup_logger +from config.model_profiles import ( + get_model_profile, + get_model_prompt_replacements, + get_model_context_window, +) + +logger = setup_logger(__name__) +DISABLE_LENGTH_CHECK = True + +class MainTerminalToolsDefinitionMixin: + def _inject_intent(self, properties: Dict[str, Any]) -> Dict[str, Any]: + """在工具参数中注入 intent(简短意图说明),仅当开关启用时。 + + 字段含义:要求模型用不超过15个中文字符对即将执行的动作做简要说明,供前端展示。 + """ + if not self.tool_intent_enabled: + return properties + if not isinstance(properties, dict): + return properties + intent_field = { + "intent": { + "type": "string", + "description": "用不超过15个字向用户说明你要做什么,例如:等待下载完成/创建日志文件" + } + } + # 将 intent 放在最前面以提高模型关注度 + return {**intent_field, **properties} + + def _apply_intent_to_tools(self, tools: List[Dict]) -> List[Dict]: + """遍历工具列表,为缺少 intent 的工具补充字段(开关启用时生效)。""" + if not self.tool_intent_enabled: + return tools + intent_field = { + "intent": { + "type": "string", + "description": "用不超过15个字向用户说明你要做什么,例如:等待下载完成/创建日志文件/搜索最新新闻" + } + } + for tool in tools: + func = tool.get("function") or {} + params = func.get("parameters") or {} + if not isinstance(params, dict): + continue + if params.get("type") != "object": + continue + props = params.get("properties") + if not isinstance(props, dict): + continue + # 补充 intent 属性 + if "intent" not in props: + params["properties"] = {**intent_field, **props} + # 将 intent 加入必填 + required_list = params.get("required") + if isinstance(required_list, list): + if "intent" not in required_list: + required_list.insert(0, "intent") + params["required"] = required_list + else: + params["required"] = ["intent"] + return tools + + def _build_custom_tools(self) -> List[Dict]: + if not (self.custom_tools_enabled and getattr(self, "user_role", "user") == "admin"): + return [] + try: + definitions = self.custom_tool_registry.reload() + except Exception: + definitions = self.custom_tool_registry.list_tools() + if not definitions: + # 更新分类为空列表,避免旧缓存 + if "custom" in self.tool_categories_map: + self.tool_categories_map["custom"].tools = [] + return [] + + tools: List[Dict] = [] + tool_ids: List[str] = [] + for item in definitions: + tool_id = item.get("id") + if not tool_id: + continue + if item.get("invalid_id"): + # 跳过不合法的工具 ID,避免供应商严格校验时报错 + continue + tool_ids.append(tool_id) + params = item.get("parameters") or {"type": "object", "properties": {}} + if isinstance(params, dict) and params.get("type") != "object": + params = {"type": "object", "properties": {}} + required = item.get("required") + if isinstance(required, list): + params = dict(params) + params["required"] = required + + tools.append({ + "type": "function", + "function": { + "name": tool_id, + "description": item.get("description") or f"自定义工具: {tool_id}", + "parameters": params + } + }) + + # 覆盖 custom 分类的工具列表 + if "custom" in self.tool_categories_map: + self.tool_categories_map["custom"].tools = tool_ids + + return tools + + def define_tools(self) -> List[Dict]: + """定义可用工具(添加确认工具)""" + current_time = datetime.now().strftime("%Y-%m-%d %H") + + tools = [ + { + "type": "function", + "function": { + "name": "sleep", + "description": "等待指定的秒数,用于短暂延迟/节奏控制(例如让终端产生更多输出、或在两次快照之间留出间隔)。命令是否完成必须用 terminal_snapshot 确认;需要强制超时终止请使用 run_command。", + "parameters": { + "type": "object", + "properties": self._inject_intent({ + "seconds": { + "type": "number", + "description": "等待的秒数,可以是小数(如0.2秒)。建议范围:0.1-10秒" + }, + "reason": { + "type": "string", + "description": "等待的原因说明(可选)" + } + }), + "required": ["seconds"] + } + } + }, + { + "type": "function", + "function": { + "name": "create_file", + "description": "创建新文件(仅创建空文件,正文请使用 write_file 或 edit_file 写入/替换)", + "parameters": { + "type": "object", + "properties": self._inject_intent({ + "path": {"type": "string", "description": "文件路径"}, + "file_type": {"type": "string", "enum": ["txt", "py", "md"], "description": "文件类型"}, + "annotation": {"type": "string", "description": "文件备注"} + }), + "required": ["path", "file_type", "annotation"] + } + } + }, + { + "type": "function", + "function": { + "name": "write_file", + "description": "将内容写入本地文件系统;append 为 False 时覆盖原文件,True 时追加到末尾。", + "parameters": { + "type": "object", + "properties": self._inject_intent({ + "file_path": { + "type": "string", + "description": "要写入的相对路径" + }, + "content": { + "type": "string", + "description": "要写入文件的内容" + }, + "append": { + "type": "boolean", + "description": "是否追加到文件而不是覆盖它", + "default": False + } + }), + "required": ["file_path", "content"] + } + } + }, + { + "type": "function", + "function": { + "name": "read_file", + "description": "读取/搜索/抽取 UTF-8 文本文件内容。通过 type 参数选择 read(阅读)、search(搜索)、extract(具体行段),支持限制返回字符数。若文件非 UTF-8 或过大,请改用 run_python。", + "parameters": { + "type": "object", + "properties": self._inject_intent({ + "path": {"type": "string", "description": "文件路径"}, + "type": { + "type": "string", + "enum": ["read", "search", "extract"], + "description": "读取模式:read=阅读、search=搜索、extract=按行抽取" + }, + "max_chars": { + "type": "integer", + "description": "返回内容的最大字符数,默认与 config 一致" + }, + "start_line": { + "type": "integer", + "description": "[read] 可选的起始行号(1开始)" + }, + "end_line": { + "type": "integer", + "description": "[read] 可选的结束行号(>=start_line)" + }, + "query": { + "type": "string", + "description": "[search] 搜索关键词" + }, + "max_matches": { + "type": "integer", + "description": "[search] 最多返回多少条命中(默认5,最大50)" + }, + "context_before": { + "type": "integer", + "description": "[search] 命中行向上追加的行数(默认1,最大3)" + }, + "context_after": { + "type": "integer", + "description": "[search] 命中行向下追加的行数(默认1,最大5)" + }, + "case_sensitive": { + "type": "boolean", + "description": "[search] 是否区分大小写,默认 false" + }, + "segments": { + "type": "array", + "description": "[extract] 需要抽取的行区间", + "items": { + "type": "object", + "properties": { + "label": { + "type": "string", + "description": "该片段的标签(可选)" + }, + "start_line": { + "type": "integer", + "description": "起始行号(>=1)" + }, + "end_line": { + "type": "integer", + "description": "结束行号(>=start_line)" + } + }, + "required": ["start_line", "end_line"] + }, + "minItems": 1 + } + }), + "required": ["path", "type"] + } + } + }, + { + "type": "function", + "function": { + "name": "edit_file", + "description": "在文件中执行精确的字符串替换;建议先使用 read_file 获取最新内容以确保精确匹配。", + "parameters": { + "type": "object", + "properties": self._inject_intent({ + "file_path": { + "type": "string", + "description": "要修改文件的相对路径" + }, + "old_string": { + "type": "string", + "description": "要替换的文本(需与文件内容精确匹配,保留缩进)" + }, + "new_string": { + "type": "string", + "description": "用于替换的新文本(必须不同于 old_string)" + } + }), + "required": ["file_path", "old_string", "new_string"] + } + } + }, + { + "type": "function", + "function": { + "name": "vlm_analyze", + "description": "使用大参数视觉语言模型(Qwen3.5)理解图片:文字、物体、布局、表格等,仅支持本地路径。", + "parameters": { + "type": "object", + "properties": self._inject_intent({ + "path": {"type": "string", "description": "项目内的图片相对路径"}, + "prompt": {"type": "string", "description": "传递给 VLM 的中文提示词,如“请总结这张图的内容”“表格的总金额是多少”“图中是什么车?”。"} + }), + "required": ["path", "prompt"] + } + } + }, + { + "type": "function", + "function": { + "name": "delete_file", + "description": "删除文件", + "parameters": { + "type": "object", + "properties": self._inject_intent({ + "path": {"type": "string", "description": "文件路径"} + }), + "required": ["path"] + } + } + }, + { + "type": "function", + "function": { + "name": "rename_file", + "description": "重命名文件", + "parameters": { + "type": "object", + "properties": self._inject_intent({ + "old_path": {"type": "string", "description": "原文件路径"}, + "new_path": {"type": "string", "description": "新文件路径"} + }), + "required": ["old_path", "new_path"] + } + } + }, + { + "type": "function", + "function": { + "name": "create_folder", + "description": "创建文件夹", + "parameters": { + "type": "object", + "properties": self._inject_intent({ + "path": {"type": "string", "description": "文件夹路径"} + }), + "required": ["path"] + } + } + }, + { + "type": "function", + "function": { + "name": "terminal_session", + "description": "管理持久化终端会话,可打开、关闭、列出或切换终端。请在授权工作区内执行命令,禁止启动需要完整 TTY 的程序(python REPL、vim、top 等)。", + "parameters": { + "type": "object", + "properties": self._inject_intent({ + "action": { + "type": "string", + "enum": ["open", "close", "list", "reset"], + "description": "操作类型:open-打开新终端,close-关闭终端,list-列出所有终端,reset-重置终端" + }, + "session_name": { + "type": "string", + "description": "终端会话名称(open、close、reset时需要)" + }, + "working_dir": { + "type": "string", + "description": "工作目录,相对于项目路径(open时可选)" + } + }), + "required": ["action"] + } + } + }, + { + "type": "function", + "function": { + "name": "terminal_input", + "description": "向指定终端发送命令或输入。禁止启动会占用终端界面的程序(python/node/nano/vim 等);如遇卡死请结合 terminal_snapshot 并使用 terminal_session 的 reset 恢复。timeout 必填:传入数字(秒,最大300)表示本次等待输出的时长,不会封装命令、不会强杀进程;在等待窗口内若检测到命令已完成会提前返回,否则在超时后返回已产生的输出并保持命令继续运行。需要强制超时终止请使用 run_command。\n若不确定上一条命令是否结束,先用 terminal_snapshot 确认后再继续输入。", + "parameters": { + "type": "object", + "properties": self._inject_intent({ + "command": { + "type": "string", + "description": "要执行的命令或发送的输入" + }, + "session_name": { + "type": "string", + "description": "目标终端会话名称(必填)" + }, + "timeout": { + "type": "number", + "description": "等待输出的最长秒数,必填,最大300;不会封装命令、不会中断进程" + } + }), + "required": ["command", "timeout", "session_name"] + } + } + }, + { + "type": "function", + "function": { + "name": "terminal_snapshot", + "description": "获取指定终端最近的输出快照,用于判断当前状态。默认返回末尾的50行,可通过参数调整。", + "parameters": { + "type": "object", + "properties": self._inject_intent({ + "session_name": { + "type": "string", + "description": "目标终端会话名称(可选,默认活动终端)" + }, + "lines": { + "type": "integer", + "description": "返回的最大行数(可选)" + }, + "max_chars": { + "type": "integer", + "description": "返回的最大字符数(可选)" + } + }) + } + } + }, + { + "type": "function", + "function": { + "name": "web_search", + "description": f"当现有资料不足时搜索外部信息(当前时间 {current_time})。调用前说明目的,精准撰写 query,并合理设置时间/主题参数;避免重复或无意义的搜索。", + "parameters": { + "type": "object", + "properties": self._inject_intent({ + "query": { + "type": "string", + "description": "搜索查询内容(不要包含日期或时间范围)" + }, + "max_results": { + "type": "integer", + "description": "最大结果数,可选" + }, + "topic": { + "type": "string", + "description": "搜索主题,可选值:general(默认)/news/finance" + }, + "time_range": { + "type": "string", + "description": "相对时间范围,可选 day/week/month/year,支持缩写 d/w/m/y;与 days 和 start_date/end_date 互斥" + }, + "days": { + "type": "integer", + "description": "最近 N 天,仅当 topic=news 时可用;与 time_range、start_date/end_date 互斥" + }, + "start_date": { + "type": "string", + "description": "开始日期,YYYY-MM-DD;必须与 end_date 同时提供,与 time_range、days 互斥" + }, + "end_date": { + "type": "string", + "description": "结束日期,YYYY-MM-DD;必须与 start_date 同时提供,与 time_range、days 互斥" + }, + "country": { + "type": "string", + "description": "国家过滤,仅 topic=general 可用,使用英文小写国名" + }, + "include_domains": { + "type": "array", + "description": "仅包含这些域名(可选,最多300个)", + "items": { + "type": "string" + } + } + }), + "required": ["query"] + } + } + }, + { + "type": "function", + "function": { + "name": "extract_webpage", + "description": "在 web_search 结果不够详细时提取网页正文。调用前说明用途,注意提取内容会消耗大量 token,超过80000字符将被拒绝。", + "parameters": { + "type": "object", + "properties": self._inject_intent({ + "url": {"type": "string", "description": "要提取内容的网页URL"} + }), + "required": ["url"] + } + } + }, + { + "type": "function", + "function": { + "name": "save_webpage", + "description": "提取网页内容并保存为纯文本文件,适合需要长期留存的长文档。请提供网址与目标路径(含 .txt 后缀),落地后请通过终端命令查看。", + "parameters": { + "type": "object", + "properties": self._inject_intent({ + "url": {"type": "string", "description": "要保存的网页URL"}, + "target_path": {"type": "string", "description": "保存位置,包含文件名,相对于项目根目录"} + }), + "required": ["url", "target_path"] + } + } + }, + { + "type": "function", + "function": { + "name": "run_python", + "description": "执行一次性 Python 脚本,可用于处理二进制或非 UTF-8 文件(如 Excel、Word、PDF、图片),或进行数据分析与验证。必须提供 timeout(最长60秒);一旦超时,脚本会被打断且无法继续执行(需要重新运行),并返回已捕获输出。", + "parameters": { + "type": "object", + "properties": self._inject_intent({ + "code": {"type": "string", "description": "Python代码"}, + "timeout": { + "type": "number", + "description": "超时时长(秒),必填,最大60" + } + }), + "required": ["code", "timeout"] + } + } + }, + { + "type": "function", + "function": { + "name": "run_command", + "description": "执行一次性终端命令,适合查看文件信息(file/ls/stat/iconv 等)、转换编码或调用 CLI 工具。禁止启动交互式程序;对已聚焦文件仅允许使用 grep -n 等定位命令。必须提供 timeout(最长30秒);一旦超时,命令**一定会被打断**且无法继续执行(需要重新运行),并返回已捕获输出;输出超过10000字符将被截断或拒绝。", + "parameters": { + "type": "object", + "properties": self._inject_intent({ + "command": {"type": "string", "description": "终端命令"}, + "timeout": { + "type": "number", + "description": "超时时长(秒),必填,最大30" + } + }), + "required": ["command", "timeout"] + } + } + }, + { + "type": "function", + "function": { + "name": "update_memory", + "description": "按条目更新记忆列表(自动编号)。append 追加新条目;replace 用序号替换;delete 用序号删除。", + "parameters": { + "type": "object", + "properties": self._inject_intent({ + "memory_type": {"type": "string", "enum": ["main", "task"], "description": "记忆类型"}, + "content": {"type": "string", "description": "条目内容。append/replace 时必填"}, + "operation": {"type": "string", "enum": ["append", "replace", "delete"], "description": "操作类型"}, + "index": {"type": "integer", "description": "要替换/删除的序号(从1开始)"} + }), + "required": ["memory_type", "operation"] + } + } + }, + { + "type": "function", + "function": { + "name": "todo_create", + "description": "创建待办列表,最多 8 条任务;若已有列表将被覆盖。", + "parameters": { + "type": "object", + "properties": self._inject_intent({ + "overview": {"type": "string", "description": "一句话概述待办清单要完成的目标,50 字以内。"}, + "tasks": { + "type": "array", + "description": "任务列表,1~8 条,每条写清“动词+对象+目标”。", + "items": { + "type": "object", + "properties": { + "title": {"type": "string", "description": "单个任务描述,写成可执行的步骤"} + }, + "required": ["title"] + }, + "minItems": 1, + "maxItems": 8 + } + }), + "required": ["overview", "tasks"] + } + } + }, + { + "type": "function", + "function": { + "name": "todo_update_task", + "description": "批量勾选或取消任务(支持单个或多个任务);全部勾选时提示所有任务已完成。", + "parameters": { + "type": "object", + "properties": self._inject_intent({ + "task_index": {"type": "integer", "description": "任务序号(1-8),兼容旧参数"}, + "task_indices": { + "type": "array", + "items": {"type": "integer"}, + "minItems": 1, + "maxItems": 8, + "description": "要更新的任务序号列表(1-8),可一次勾选多个" + }, + "completed": {"type": "boolean", "description": "true=打勾,false=取消"} + }), + "required": ["completed"] + } + } + }, + { + "type": "function", + "function": { + "name": "close_sub_agent", + "description": "强制关闭指定子智能体,适用于长时间无响应、超时或卡死的任务。使用前请确认必要的日志/文件已保留,操作会立即终止该任务。", + "parameters": { + "type": "object", + "properties": self._inject_intent({ + "task_id": {"type": "string", "description": "子智能体任务ID"}, + "agent_id": {"type": "integer", "description": "子智能体编号(1~5),若缺少 task_id 可用"} + }) + } + } + }, + { + "type": "function", + "function": { + "name": "create_sub_agent", + "description": "创建新的子智能体任务。适合大规模信息搜集、网页提取与多文档总结等会占用大量上下文的工作,需要提供任务摘要、详细要求、交付目录以及参考文件。注意:同一时间最多运行5个子智能体。", + "parameters": { + "type": "object", + "properties": self._inject_intent({ + "agent_id": {"type": "integer", "description": "子智能体代号(1~5)"}, + "summary": {"type": "string", "description": "任务摘要,简要说明目标"}, + "task": {"type": "string", "description": "任务详细要求"}, + "target_dir": {"type": "string", "description": "项目下用于接收交付的相对目录"}, + "reference_files": { + "type": "array", + "description": "提供给子智能体的参考文件列表(相对路径),禁止在summary和task中直接告知子智能体引用图片的路径,必须使用本参数提供", + "items": {"type": "string"}, + "maxItems": 10 + }, + "timeout_seconds": {"type": "integer", "description": "子智能体最大运行秒数:单/双次搜索建议180秒,多轮搜索整理建议300秒,深度调研或长篇分析可设600秒"} + }), + "required": ["agent_id", "summary", "task", "target_dir"] + } + } + }, + { + "type": "function", + "function": { + "name": "wait_sub_agent", + "description": "等待指定子智能体任务结束(或超时)。任务完成后会返回交付目录,并将结果复制到指定的项目文件夹。调用时 `timeout_seconds` 应不少于对应子智能体的 `timeout_seconds`,否则可能提前终止等待。", + "parameters": { + "type": "object", + "properties": self._inject_intent({ + "task_id": {"type": "string", "description": "子智能体任务ID"}, + "agent_id": {"type": "integer", "description": "子智能体代号(可选,用于缺省 task_id 的情况)"}, + "timeout_seconds": {"type": "integer", "description": "本次等待的超时时长(秒)"} + }), + "required": [] + } + } + }, + { + "type": "function", + "function": { + "name": "trigger_easter_egg", + "description": "触发隐藏彩蛋,用于展示非功能性特效。需指定 effect 参数,例如 flood(灌水)或 snake(贪吃蛇)。", + "parameters": { + "type": "object", + "properties": self._inject_intent({ + "effect": { + "type": "string", + "description": "彩蛋标识,目前支持 flood(灌水)与 snake(贪吃蛇)。" + } + }), + "required": ["effect"] + } + } + } + ] + # 视觉模型(Qwen3.5 / Kimi-k2.5)自带多模态能力,不再暴露 vlm_analyze,改为 view_image / view_video + if getattr(self, "model_key", None) in {"qwen3-vl-plus", "kimi-k2.5"}: + tools = [ + tool for tool in tools + if (tool.get("function") or {}).get("name") != "vlm_analyze" + ] + tools.append({ + "type": "function", + "function": { + "name": "view_image", + "description": "将指定本地图片附加到工具结果中(tool 消息携带 image_url),便于模型主动查看图片内容。", + "parameters": { + "type": "object", + "properties": self._inject_intent({ + "path": { + "type": "string", + "description": "项目内的图片相对路径(不要以 /workspace 开头);宿主机模式可用绝对路径。支持 png/jpg/webp/gif/bmp/svg。" + } + }), + "required": ["path"] + } + } + }) + tools.append({ + "type": "function", + "function": { + "name": "view_video", + "description": "将指定本地视频附加到工具结果中(tool 消息携带 video_url),便于模型查看视频内容。", + "parameters": { + "type": "object", + "properties": self._inject_intent({ + "path": { + "type": "string", + "description": "项目内的视频相对路径(不要以 /workspace 开头);宿主机模式可用绝对路径。支持 mp4/mov/mkv/avi/webm。" + } + }), + "required": ["path"] + } + } + }) + # 附加自定义工具(仅管理员可见) + custom_tools = self._build_custom_tools() + if custom_tools: + tools.extend(custom_tools) + if self.disabled_tools: + tools = [ + tool for tool in tools + if tool.get("function", {}).get("name") not in self.disabled_tools + ] + return self._apply_intent_to_tools(tools) diff --git a/core/main_terminal_parts/tools_execution.py b/core/main_terminal_parts/tools_execution.py new file mode 100644 index 0000000..ac442c9 --- /dev/null +++ b/core/main_terminal_parts/tools_execution.py @@ -0,0 +1,674 @@ +import asyncio +import json +from datetime import datetime +from pathlib import Path +from typing import Any, Dict, List, Optional, Set + +try: + from config import ( + OUTPUT_FORMATS, DATA_DIR, PROMPTS_DIR, NEED_CONFIRMATION, + MAX_TERMINALS, TERMINAL_BUFFER_SIZE, TERMINAL_DISPLAY_SIZE, + MAX_READ_FILE_CHARS, READ_TOOL_DEFAULT_MAX_CHARS, + READ_TOOL_DEFAULT_CONTEXT_BEFORE, READ_TOOL_DEFAULT_CONTEXT_AFTER, + READ_TOOL_MAX_CONTEXT_BEFORE, READ_TOOL_MAX_CONTEXT_AFTER, + READ_TOOL_DEFAULT_MAX_MATCHES, READ_TOOL_MAX_MATCHES, + READ_TOOL_MAX_FILE_SIZE, + TERMINAL_SANDBOX_MOUNT_PATH, + TERMINAL_SANDBOX_MODE, + TERMINAL_SANDBOX_CPUS, + TERMINAL_SANDBOX_MEMORY, + PROJECT_MAX_STORAGE_MB, + CUSTOM_TOOLS_ENABLED, + ) +except ImportError: + import sys + project_root = Path(__file__).resolve().parents[2] + if str(project_root) not in sys.path: + sys.path.insert(0, str(project_root)) + from config import ( + OUTPUT_FORMATS, DATA_DIR, PROMPTS_DIR, NEED_CONFIRMATION, + MAX_TERMINALS, TERMINAL_BUFFER_SIZE, TERMINAL_DISPLAY_SIZE, + MAX_READ_FILE_CHARS, READ_TOOL_DEFAULT_MAX_CHARS, + READ_TOOL_DEFAULT_CONTEXT_BEFORE, READ_TOOL_DEFAULT_CONTEXT_AFTER, + READ_TOOL_MAX_CONTEXT_BEFORE, READ_TOOL_MAX_CONTEXT_AFTER, + READ_TOOL_DEFAULT_MAX_MATCHES, READ_TOOL_MAX_MATCHES, + READ_TOOL_MAX_FILE_SIZE, + TERMINAL_SANDBOX_MOUNT_PATH, + TERMINAL_SANDBOX_MODE, + TERMINAL_SANDBOX_CPUS, + TERMINAL_SANDBOX_MEMORY, + PROJECT_MAX_STORAGE_MB, + CUSTOM_TOOLS_ENABLED, + ) + +from modules.file_manager import FileManager +from modules.search_engine import SearchEngine +from modules.terminal_ops import TerminalOperator +from modules.memory_manager import MemoryManager +from modules.terminal_manager import TerminalManager +from modules.todo_manager import TodoManager +from modules.sub_agent_manager import SubAgentManager +from modules.webpage_extractor import extract_webpage_content, tavily_extract +from modules.ocr_client import OCRClient +from modules.easter_egg_manager import EasterEggManager +from modules.personalization_manager import ( + load_personalization_config, + build_personalization_prompt, +) +from modules.skills_manager import ( + get_skills_catalog, + build_skills_list, + merge_enabled_skills, + build_skills_prompt, +) +from modules.custom_tool_registry import CustomToolRegistry, build_default_tool_category +from modules.custom_tool_executor import CustomToolExecutor + +try: + from config.limits import THINKING_FAST_INTERVAL +except ImportError: + THINKING_FAST_INTERVAL = 10 + +from modules.container_monitor import collect_stats, inspect_state +from core.tool_config import TOOL_CATEGORIES +from utils.api_client import DeepSeekClient +from utils.context_manager import ContextManager +from utils.tool_result_formatter import format_tool_result_for_context +from utils.logger import setup_logger +from config.model_profiles import ( + get_model_profile, + get_model_prompt_replacements, + get_model_context_window, +) + +logger = setup_logger(__name__) +DISABLE_LENGTH_CHECK = True + +class MainTerminalToolsExecutionMixin: + def _record_sub_agent_message(self, message: Optional[str], task_id: Optional[str] = None, inline: bool = False): + """以 system 消息记录子智能体状态。""" + if not message: + return + if task_id and task_id in self._announced_sub_agent_tasks: + return + if task_id: + self._announced_sub_agent_tasks.add(task_id) + logger.info( + "[SubAgent] record message | task=%s | inline=%s | content=%s", + task_id, + inline, + message.replace("\n", "\\n")[:200], + ) + metadata = {"sub_agent_notice": True, "inline": inline} + if task_id: + metadata["task_id"] = task_id + self.context_manager.add_conversation("system", message, metadata=metadata) + print(f"{OUTPUT_FORMATS['info']} {message}") + + async def handle_tool_call(self, tool_name: str, arguments: Dict) -> str: + """处理工具调用(添加参数预检查和改进错误处理)""" + # 导入字符限制配置 + from config import ( + MAX_READ_FILE_CHARS, + MAX_RUN_COMMAND_CHARS, MAX_EXTRACT_WEBPAGE_CHARS + ) + + # 检查是否需要确认 + if tool_name in NEED_CONFIRMATION: + if not await self.confirm_action(tool_name, arguments): + return json.dumps({"success": False, "error": "用户取消操作"}) + + # === 新增:预检查参数大小和格式 === + try: + # 检查参数总大小 + arguments_str = json.dumps(arguments, ensure_ascii=False) + if len(arguments_str) > 200000: # 200KB限制 + return json.dumps({ + "success": False, + "error": f"参数过大({len(arguments_str)}字符),超过200KB限制", + "suggestion": "请分块处理或减少参数内容" + }, ensure_ascii=False) + + # 针对特定工具的内容检查 + if tool_name == "write_file": + content = arguments.get("content", "") + length_limit = 200000 + if not DISABLE_LENGTH_CHECK and len(content) > length_limit: + return json.dumps({ + "success": False, + "error": f"文件内容过长({len(content)}字符),超过{length_limit}字符限制", + "suggestion": "请分块写入,或设置 append=true 多次写入" + }, ensure_ascii=False) + if '\\' in content and content.count('\\') > len(content) / 10: + print(f"{OUTPUT_FORMATS['warning']} 检测到大量转义字符,可能存在格式问题") + + except Exception as e: + return json.dumps({ + "success": False, + "error": f"参数预检查失败: {str(e)}" + }, ensure_ascii=False) + + # 自定义工具预解析(仅管理员) + custom_tool = None + if self.custom_tools_enabled and getattr(self, "user_role", "user") == "admin": + try: + self.custom_tool_registry.reload() + except Exception: + pass + custom_tool = self.custom_tool_registry.get_tool(tool_name) + + try: + if custom_tool: + result = await self.custom_tool_executor.run(tool_name, arguments) + elif tool_name == "read_file": + result = self._handle_read_tool(arguments) + elif tool_name in {"vlm_analyze", "ocr_image"}: + path = arguments.get("path") + prompt = arguments.get("prompt") + if not path: + return json.dumps({"success": False, "error": "缺少 path 参数", "warnings": []}, ensure_ascii=False) + result = self.ocr_client.vlm_analyze(path=path, prompt=prompt or "") + elif tool_name == "view_image": + path = (arguments.get("path") or "").strip() + if not path: + return json.dumps({"success": False, "error": "path 不能为空"}, ensure_ascii=False) + host_unrestricted = self._is_host_mode() + if path.startswith("/workspace"): + if host_unrestricted: + path = path.split("/workspace", 1)[1].lstrip("/") + else: + return json.dumps({"success": False, "error": "非法路径,超出项目根目录,请使用不带/workspace的相对路径"}, ensure_ascii=False) + if host_unrestricted and (Path(path).is_absolute() or (len(path) > 1 and path[1] == ":")): + abs_path = Path(path).expanduser().resolve() + else: + abs_path = (Path(self.context_manager.project_path) / path).resolve() + if not host_unrestricted: + try: + abs_path.relative_to(Path(self.context_manager.project_path).resolve()) + except Exception: + return json.dumps({"success": False, "error": "非法路径,超出项目根目录,请使用不带/workspace的相对路径"}, ensure_ascii=False) + if not abs_path.exists() or not abs_path.is_file(): + return json.dumps({"success": False, "error": f"图片不存在: {path}"}, ensure_ascii=False) + if abs_path.stat().st_size > 10 * 1024 * 1024: + return json.dumps({"success": False, "error": "图片过大,需 <= 10MB"}, ensure_ascii=False) + allowed_ext = {".png", ".jpg", ".jpeg", ".webp", ".gif", ".bmp", ".svg"} + if abs_path.suffix.lower() not in allowed_ext: + return json.dumps({"success": False, "error": f"不支持的图片格式: {abs_path.suffix}"}, ensure_ascii=False) + # 记录待附加图片,供上层将图片附加到工具结果 + self.pending_image_view = { + "path": str(path) + } + result = {"success": True, "message": "图片已附加到工具结果中,将随 tool 返回。", "path": path} + elif tool_name == "view_video": + path = (arguments.get("path") or "").strip() + if not path: + return json.dumps({"success": False, "error": "path 不能为空"}, ensure_ascii=False) + host_unrestricted = self._is_host_mode() + if path.startswith("/workspace"): + if host_unrestricted: + path = path.split("/workspace", 1)[1].lstrip("/") + else: + return json.dumps({"success": False, "error": "非法路径,超出项目根目录,请使用相对路径"}, ensure_ascii=False) + if host_unrestricted and (Path(path).is_absolute() or (len(path) > 1 and path[1] == ":")): + abs_path = Path(path).expanduser().resolve() + else: + abs_path = (Path(self.context_manager.project_path) / path).resolve() + if not host_unrestricted: + try: + abs_path.relative_to(Path(self.context_manager.project_path).resolve()) + except Exception: + return json.dumps({"success": False, "error": "非法路径,超出项目根目录,请使用相对路径"}, ensure_ascii=False) + if not abs_path.exists() or not abs_path.is_file(): + return json.dumps({"success": False, "error": f"视频不存在: {path}"}, ensure_ascii=False) + allowed_ext = {".mp4", ".mov", ".mkv", ".avi", ".webm"} + if abs_path.suffix.lower() not in allowed_ext: + return json.dumps({"success": False, "error": f"不支持的视频格式: {abs_path.suffix}"}, ensure_ascii=False) + if abs_path.stat().st_size > 50 * 1024 * 1024: + return json.dumps({"success": False, "error": "视频过大,需 <= 50MB"}, ensure_ascii=False) + self.pending_video_view = {"path": str(path)} + result = { + "success": True, + "message": "视频已附加到工具结果中,将随 tool 返回。", + "path": path + } + + # 终端会话管理工具 + elif tool_name == "terminal_session": + action = arguments["action"] + + if action == "open": + result = self.terminal_manager.open_terminal( + session_name=arguments.get("session_name", "default"), + working_dir=arguments.get("working_dir"), + make_active=True + ) + if result["success"]: + print(f"{OUTPUT_FORMATS['session']} 终端会话已打开: {arguments.get('session_name', 'default')}") + + elif action == "close": + result = self.terminal_manager.close_terminal( + session_name=arguments.get("session_name", "default") + ) + if result["success"]: + print(f"{OUTPUT_FORMATS['session']} 终端会话已关闭: {arguments.get('session_name', 'default')}") + + elif action == "list": + result = self.terminal_manager.list_terminals() + + elif action == "reset": + result = self.terminal_manager.reset_terminal( + session_name=arguments.get("session_name") + ) + if result["success"]: + print(f"{OUTPUT_FORMATS['session']} 终端会话已重置: {result['session']}") + + else: + result = {"success": False, "error": f"未知操作: {action}"} + result["action"] = action + + # 终端输入工具 + elif tool_name == "terminal_input": + result = self.terminal_manager.send_to_terminal( + command=arguments["command"], + session_name=arguments.get("session_name"), + timeout=arguments.get("timeout") + ) + if result["success"]: + print(f"{OUTPUT_FORMATS['terminal']} 执行命令: {arguments['command']}") + + elif tool_name == "terminal_snapshot": + result = self.terminal_manager.get_terminal_snapshot( + session_name=arguments.get("session_name"), + lines=arguments.get("lines"), + max_chars=arguments.get("max_chars") + ) + + # sleep工具 + elif tool_name == "sleep": + seconds = arguments.get("seconds", 1) + reason = arguments.get("reason", "等待操作完成") + + # 限制最大等待时间 + max_sleep = 600 # 最多等待60秒 + if seconds > max_sleep: + result = { + "success": False, + "error": f"等待时间过长,最多允许 {max_sleep} 秒", + "suggestion": f"建议分多次等待或减少等待时间" + } + else: + # 确保秒数为正数 + if seconds <= 0: + result = { + "success": False, + "error": "等待时间必须大于0" + } + else: + print(f"{OUTPUT_FORMATS['info']} 等待 {seconds} 秒: {reason}") + + # 执行等待 + import asyncio + await asyncio.sleep(seconds) + + result = { + "success": True, + "message": f"已等待 {seconds} 秒", + "reason": reason, + "timestamp": datetime.now().isoformat() + } + + print(f"{OUTPUT_FORMATS['success']} 等待完成") + + elif tool_name == "create_file": + result = self.file_manager.create_file( + path=arguments["path"], + file_type=arguments["file_type"] + ) + # 添加备注 + if result["success"] and arguments.get("annotation"): + self.context_manager.update_annotation( + result["path"], + arguments["annotation"] + ) + if result.get("success"): + result["message"] = ( + f"已创建空文件: {result['path']}。请使用 write_file 写入内容,或使用 edit_file 进行替换。" + ) + + elif tool_name == "delete_file": + result = self.file_manager.delete_file(arguments["path"]) + # 如果删除成功,同时删除备注 + if result.get("success") and result.get("action") == "deleted": + deleted_path = result.get("path") + # 删除备注 + if deleted_path in self.context_manager.file_annotations: + del self.context_manager.file_annotations[deleted_path] + self.context_manager.save_annotations() + print(f"🧹 已删除文件备注: {deleted_path}") + + elif tool_name == "rename_file": + result = self.file_manager.rename_file( + arguments["old_path"], + arguments["new_path"] + ) + # 如果重命名成功,更新备注和聚焦的key + # 如果重命名成功,更新备注 + if result.get("success") and result.get("action") == "renamed": + old_path = result.get("old_path") + new_path = result.get("new_path") + # 更新备注 + if old_path in self.context_manager.file_annotations: + annotation = self.context_manager.file_annotations[old_path] + del self.context_manager.file_annotations[old_path] + self.context_manager.file_annotations[new_path] = annotation + self.context_manager.save_annotations() + print(f"📝 已更新文件备注: {old_path} -> {new_path}") + + elif tool_name == "write_file": + path = arguments.get("file_path") + content = arguments.get("content", "") + append_flag = bool(arguments.get("append", False)) + if not path: + result = {"success": False, "error": "缺少必要参数: file_path"} + else: + mode = "a" if append_flag else "w" + result = self.file_manager.write_file(path, content, mode=mode) + + elif tool_name == "edit_file": + path = arguments.get("file_path") + old_text = arguments.get("old_string") + new_text = arguments.get("new_string") + if not path: + result = {"success": False, "error": "缺少必要参数: file_path"} + elif old_text is None or new_text is None: + result = {"success": False, "error": "缺少必要参数: old_string/new_string"} + elif old_text == new_text: + result = {"success": False, "error": "old_string 与 new_string 相同,无法执行替换"} + elif not old_text: + result = {"success": False, "error": "old_string 不能为空,请从 read_file 内容中精确复制"} + else: + result = self.file_manager.replace_in_file(path, old_text, new_text) + elif tool_name == "create_folder": + result = self.file_manager.create_folder(arguments["path"]) + + elif tool_name == "web_search": + allowed, quota_info = self.record_search_call() + if not allowed: + return json.dumps({ + "success": False, + "error": f"搜索配额已用尽,将在 {quota_info.get('reset_at')} 重置。请向用户说明情况并提供替代方案。", + "quota": quota_info + }, ensure_ascii=False) + search_response = await self.search_engine.search_with_summary( + query=arguments["query"], + max_results=arguments.get("max_results"), + topic=arguments.get("topic"), + time_range=arguments.get("time_range"), + days=arguments.get("days"), + start_date=arguments.get("start_date"), + end_date=arguments.get("end_date"), + country=arguments.get("country"), + include_domains=arguments.get("include_domains") + ) + + if search_response["success"]: + result = { + "success": True, + "summary": search_response["summary"], + "filters": search_response.get("filters", {}), + "query": search_response.get("query"), + "results": search_response.get("results", []), + "total_results": search_response.get("total_results", 0) + } + else: + result = { + "success": False, + "error": search_response.get("error", "搜索失败"), + "filters": search_response.get("filters", {}), + "query": search_response.get("query"), + "results": search_response.get("results", []), + "total_results": search_response.get("total_results", 0) + } + + elif tool_name == "extract_webpage": + url = arguments["url"] + try: + # 从config获取API密钥 + from config import TAVILY_API_KEY + full_content, _ = await extract_webpage_content( + urls=url, + api_key=TAVILY_API_KEY, + extract_depth="basic", + max_urls=1 + ) + + # 字符数检查 + char_count = len(full_content) + if char_count > MAX_EXTRACT_WEBPAGE_CHARS: + result = { + "success": False, + "error": f"网页提取返回了过长的{char_count}字符,请不要提取这个网页,可以使用网页保存功能,然后使用read工具查找或查看网页", + "char_count": char_count, + "limit": MAX_EXTRACT_WEBPAGE_CHARS, + "url": url + } + else: + result = { + "success": True, + "url": url, + "content": full_content + } + except Exception as e: + result = { + "success": False, + "error": f"网页提取失败: {str(e)}", + "url": url + } + + elif tool_name == "save_webpage": + url = arguments["url"] + target_path = arguments["target_path"] + try: + from config import TAVILY_API_KEY + except ImportError: + TAVILY_API_KEY = None + + if not TAVILY_API_KEY or TAVILY_API_KEY == "your-tavily-api-key": + result = { + "success": False, + "error": "Tavily API密钥未配置,无法保存网页", + "url": url, + "path": target_path + } + else: + try: + extract_result = await tavily_extract( + urls=url, + api_key=TAVILY_API_KEY, + extract_depth="basic", + max_urls=1 + ) + + if not extract_result or "error" in extract_result: + error_message = extract_result.get("error", "提取失败,未返回任何内容") if isinstance(extract_result, dict) else "提取失败" + result = { + "success": False, + "error": error_message, + "url": url, + "path": target_path + } + else: + results_list = extract_result.get("results", []) if isinstance(extract_result, dict) else [] + + primary_result = None + for item in results_list: + if item.get("raw_content"): + primary_result = item + break + if primary_result is None and results_list: + primary_result = results_list[0] + + if not primary_result: + failed_list = extract_result.get("failed_results", []) if isinstance(extract_result, dict) else [] + result = { + "success": False, + "error": "提取成功结果为空,无法保存", + "url": url, + "path": target_path, + "failed": failed_list + } + else: + content_to_save = primary_result.get("raw_content") or primary_result.get("content") or "" + + if not content_to_save: + result = { + "success": False, + "error": "网页内容为空,未写入文件", + "url": url, + "path": target_path + } + else: + write_result = self.file_manager.write_file(target_path, content_to_save, mode="w") + + if not write_result.get("success"): + result = { + "success": False, + "error": write_result.get("error", "写入文件失败"), + "url": url, + "path": target_path + } + else: + char_count = len(content_to_save) + byte_size = len(content_to_save.encode("utf-8")) + result = { + "success": True, + "url": url, + "path": write_result.get("path", target_path), + "char_count": char_count, + "byte_size": byte_size, + "message": f"网页内容已以纯文本保存到 {write_result.get('path', target_path)},可用 read_file 的 search/extract 查看,必要时再用终端命令。" + } + + if isinstance(extract_result, dict) and extract_result.get("failed_results"): + result["warnings"] = extract_result["failed_results"] + + except Exception as e: + result = { + "success": False, + "error": f"网页保存失败: {str(e)}", + "url": url, + "path": target_path + } + + elif tool_name == "run_python": + result = await self.terminal_ops.run_python_code( + arguments["code"], + timeout=arguments.get("timeout") + ) + + elif tool_name == "run_command": + result = await self.terminal_ops.run_command( + arguments["command"], + timeout=arguments.get("timeout") + ) + + # 字符数检查 + if result.get("success") and "output" in result: + char_count = len(result["output"]) + if char_count > MAX_RUN_COMMAND_CHARS: + result = { + "success": False, + "error": f"结果内容过大,有{char_count}字符,请使用限制字符数的获取内容方式,根据程度选择10k以内的数", + "char_count": char_count, + "limit": MAX_RUN_COMMAND_CHARS, + "command": arguments["command"] + } + + elif tool_name == "update_memory": + memory_type = arguments["memory_type"] + operation = arguments["operation"] + content = arguments.get("content") + index = arguments.get("index") + + # 参数校验 + if operation == "append" and (not content or not str(content).strip()): + result = {"success": False, "error": "append 操作需要 content"} + elif operation == "replace" and (index is None or index <= 0 or not content or not str(content).strip()): + result = {"success": False, "error": "replace 操作需要有效的 index 和 content"} + elif operation == "delete" and (index is None or index <= 0): + result = {"success": False, "error": "delete 操作需要有效的 index"} + else: + result = self.memory_manager.update_entries( + memory_type=memory_type, + operation=operation, + content=content, + index=index + ) + + elif tool_name == "todo_create": + result = self.todo_manager.create_todo_list( + overview=arguments.get("overview", ""), + tasks=arguments.get("tasks", []) + ) + + elif tool_name == "todo_update_task": + task_indices = arguments.get("task_indices") + if task_indices is None: + task_indices = arguments.get("task_index") + result = self.todo_manager.update_task_status( + task_indices=task_indices, + completed=arguments.get("completed", True) + ) + + elif tool_name == "create_sub_agent": + result = self.sub_agent_manager.create_sub_agent( + agent_id=arguments.get("agent_id"), + summary=arguments.get("summary", ""), + task=arguments.get("task", ""), + target_dir=arguments.get("target_dir", ""), + reference_files=arguments.get("reference_files", []), + timeout_seconds=arguments.get("timeout_seconds"), + conversation_id=self.context_manager.current_conversation_id + ) + + elif tool_name == "wait_sub_agent": + wait_timeout = arguments.get("timeout_seconds") + if not wait_timeout: + task_ref = self.sub_agent_manager.lookup_task( + task_id=arguments.get("task_id"), + agent_id=arguments.get("agent_id") + ) + if task_ref: + wait_timeout = task_ref.get("timeout_seconds") + result = self.sub_agent_manager.wait_for_completion( + task_id=arguments.get("task_id"), + agent_id=arguments.get("agent_id"), + timeout_seconds=wait_timeout + ) + + elif tool_name == "close_sub_agent": + result = self.sub_agent_manager.terminate_sub_agent( + task_id=arguments.get("task_id"), + agent_id=arguments.get("agent_id") + ) + + elif tool_name == "trigger_easter_egg": + result = self.easter_egg_manager.trigger_effect(arguments.get("effect")) + + else: + result = {"success": False, "error": f"未知工具: {tool_name}"} + + except Exception as e: + logger.error(f"工具执行失败: {tool_name} - {e}") + result = {"success": False, "error": f"工具执行异常: {str(e)}"} + + return json.dumps(result, ensure_ascii=False) + + async def confirm_action(self, action: str, arguments: Dict) -> bool: + """确认危险操作""" + print(f"\n{OUTPUT_FORMATS['confirm']} 需要确认的操作:") + print(f" 操作: {action}") + print(f" 参数: {json.dumps(arguments, ensure_ascii=False, indent=2)}") + + response = input("\n是否继续? (y/n): ").strip().lower() + return response == 'y' diff --git a/core/main_terminal_parts/tools_policy.py b/core/main_terminal_parts/tools_policy.py new file mode 100644 index 0000000..8a846f7 --- /dev/null +++ b/core/main_terminal_parts/tools_policy.py @@ -0,0 +1,235 @@ +import asyncio +import json +from datetime import datetime +from pathlib import Path +from typing import Any, Dict, List, Optional, Set + +try: + from config import ( + OUTPUT_FORMATS, DATA_DIR, PROMPTS_DIR, NEED_CONFIRMATION, + MAX_TERMINALS, TERMINAL_BUFFER_SIZE, TERMINAL_DISPLAY_SIZE, + MAX_READ_FILE_CHARS, READ_TOOL_DEFAULT_MAX_CHARS, + READ_TOOL_DEFAULT_CONTEXT_BEFORE, READ_TOOL_DEFAULT_CONTEXT_AFTER, + READ_TOOL_MAX_CONTEXT_BEFORE, READ_TOOL_MAX_CONTEXT_AFTER, + READ_TOOL_DEFAULT_MAX_MATCHES, READ_TOOL_MAX_MATCHES, + READ_TOOL_MAX_FILE_SIZE, + TERMINAL_SANDBOX_MOUNT_PATH, + TERMINAL_SANDBOX_MODE, + TERMINAL_SANDBOX_CPUS, + TERMINAL_SANDBOX_MEMORY, + PROJECT_MAX_STORAGE_MB, + CUSTOM_TOOLS_ENABLED, + ) +except ImportError: + import sys + project_root = Path(__file__).resolve().parents[2] + if str(project_root) not in sys.path: + sys.path.insert(0, str(project_root)) + from config import ( + OUTPUT_FORMATS, DATA_DIR, PROMPTS_DIR, NEED_CONFIRMATION, + MAX_TERMINALS, TERMINAL_BUFFER_SIZE, TERMINAL_DISPLAY_SIZE, + MAX_READ_FILE_CHARS, READ_TOOL_DEFAULT_MAX_CHARS, + READ_TOOL_DEFAULT_CONTEXT_BEFORE, READ_TOOL_DEFAULT_CONTEXT_AFTER, + READ_TOOL_MAX_CONTEXT_BEFORE, READ_TOOL_MAX_CONTEXT_AFTER, + READ_TOOL_DEFAULT_MAX_MATCHES, READ_TOOL_MAX_MATCHES, + READ_TOOL_MAX_FILE_SIZE, + TERMINAL_SANDBOX_MOUNT_PATH, + TERMINAL_SANDBOX_MODE, + TERMINAL_SANDBOX_CPUS, + TERMINAL_SANDBOX_MEMORY, + PROJECT_MAX_STORAGE_MB, + CUSTOM_TOOLS_ENABLED, + ) + +from modules.file_manager import FileManager +from modules.search_engine import SearchEngine +from modules.terminal_ops import TerminalOperator +from modules.memory_manager import MemoryManager +from modules.terminal_manager import TerminalManager +from modules.todo_manager import TodoManager +from modules.sub_agent_manager import SubAgentManager +from modules.webpage_extractor import extract_webpage_content, tavily_extract +from modules.ocr_client import OCRClient +from modules.easter_egg_manager import EasterEggManager +from modules.personalization_manager import ( + load_personalization_config, + build_personalization_prompt, +) +from modules.skills_manager import ( + get_skills_catalog, + build_skills_list, + merge_enabled_skills, + build_skills_prompt, +) +from modules.custom_tool_registry import CustomToolRegistry, build_default_tool_category +from modules.custom_tool_executor import CustomToolExecutor + +try: + from config.limits import THINKING_FAST_INTERVAL +except ImportError: + THINKING_FAST_INTERVAL = 10 + +from modules.container_monitor import collect_stats, inspect_state +from core.tool_config import TOOL_CATEGORIES +from utils.api_client import DeepSeekClient +from utils.context_manager import ContextManager +from utils.tool_result_formatter import format_tool_result_for_context +from utils.logger import setup_logger +from config.model_profiles import ( + get_model_profile, + get_model_prompt_replacements, + get_model_context_window, +) + +logger = setup_logger(__name__) +DISABLE_LENGTH_CHECK = True + +class MainTerminalToolsPolicyMixin: + def apply_personalization_preferences(self, config: Optional[Dict[str, Any]] = None): + """Apply persisted personalization settings that affect runtime behavior.""" + try: + effective_config = config or load_personalization_config(self.data_dir) + except Exception: + effective_config = {} + + # 工具意图开关 + self.tool_intent_enabled = bool(effective_config.get("tool_intent_enabled")) + + interval = effective_config.get("thinking_interval") + if isinstance(interval, int) and interval > 0: + self.thinking_fast_interval = interval + else: + self.thinking_fast_interval = THINKING_FAST_INTERVAL + + disabled_categories = [] + raw_disabled = effective_config.get("disabled_tool_categories") + if isinstance(raw_disabled, list): + disabled_categories = [ + key for key in raw_disabled + if isinstance(key, str) and key in self.tool_categories_map + ] + self.default_disabled_tool_categories = disabled_categories + + # 图片压缩模式传递给上下文 + img_mode = effective_config.get("image_compression") + if isinstance(img_mode, str): + self.context_manager.image_compression_mode = img_mode + + # Reset category states to defaults before applying overrides + for key, category in self.tool_categories_map.items(): + self.tool_category_states[key] = False if key in disabled_categories else category.default_enabled + self._refresh_disabled_tools() + + # 默认模型偏好(优先应用,再处理运行模式) + preferred_model = effective_config.get("default_model") + if isinstance(preferred_model, str) and preferred_model != self.model_key: + try: + self.set_model(preferred_model) + except Exception as exc: + logger.warning("忽略无效默认模型: %s (%s)", preferred_model, exc) + + preferred_mode = effective_config.get("default_run_mode") + if isinstance(preferred_mode, str): + normalized_mode = preferred_mode.strip().lower() + if normalized_mode in {"fast", "thinking", "deep"} and normalized_mode != self.run_mode: + try: + self.set_run_mode(normalized_mode) + except ValueError: + logger.warning("忽略无效默认运行模式: %s", preferred_mode) + + # 静默禁用工具提示 + self.silent_tool_disable = bool(effective_config.get("silent_tool_disable")) + + def set_tool_category_enabled(self, category: str, enabled: bool) -> None: + """设置工具类别的启用状态 / Toggle tool category enablement.""" + categories = self.tool_categories_map + if category not in categories: + raise ValueError(f"未知的工具类别: {category}") + forced = self.admin_forced_category_states.get(category) + if isinstance(forced, bool) and forced != enabled: + raise ValueError("该类别被管理员强制为启用/禁用,无法修改") + self.tool_category_states[category] = bool(enabled) + self._refresh_disabled_tools() + + def set_admin_policy( + self, + categories: Optional[Dict[str, "ToolCategory"]] = None, + forced_category_states: Optional[Dict[str, Optional[bool]]] = None, + disabled_models: Optional[List[str]] = None, + ) -> None: + """应用管理员策略(工具分类、强制开关、模型禁用)。""" + if categories: + self.tool_categories_map = dict(categories) + # 保证自定义工具分类存在(仅当功能启用) + if self.custom_tools_enabled and "custom" not in self.tool_categories_map: + self.tool_categories_map["custom"] = type(next(iter(TOOL_CATEGORIES.values())))( + label="自定义工具", + tools=[], + default_enabled=True, + silent_when_disabled=False, + ) + # 重新构建启用状态映射,保留已有值 + new_states: Dict[str, bool] = {} + for key, cat in self.tool_categories_map.items(): + if key in self.tool_category_states: + new_states[key] = self.tool_category_states[key] + else: + new_states[key] = cat.default_enabled + self.tool_category_states = new_states + # 清理已被移除的类别 + for removed in list(self.tool_category_states.keys()): + if removed not in self.tool_categories_map: + self.tool_category_states.pop(removed, None) + + self.admin_forced_category_states = forced_category_states or {} + self.admin_disabled_models = disabled_models or [] + self._refresh_disabled_tools() + + def get_tool_settings_snapshot(self) -> List[Dict[str, object]]: + """获取工具类别状态快照 / Return tool category states snapshot.""" + snapshot: List[Dict[str, object]] = [] + categories = self.tool_categories_map + for key, category in categories.items(): + forced = self.admin_forced_category_states.get(key) + enabled = self.tool_category_states.get(key, category.default_enabled) + if isinstance(forced, bool): + enabled = forced + snapshot.append({ + "id": key, + "label": category.label, + "enabled": enabled, + "tools": list(category.tools), + "locked": isinstance(forced, bool), + "locked_state": forced if isinstance(forced, bool) else None, + }) + return snapshot + + def _refresh_disabled_tools(self) -> None: + """刷新禁用工具列表 / Refresh disabled tool set.""" + disabled: Set[str] = set() + notice: Set[str] = set() + categories = self.tool_categories_map + for key, category in categories.items(): + state = self.tool_category_states.get(key, category.default_enabled) + forced = self.admin_forced_category_states.get(key) + if isinstance(forced, bool): + state = forced + if not state: + disabled.update(category.tools) + if not getattr(category, "silent_when_disabled", False): + notice.update(category.tools) + self.disabled_tools = disabled + self.disabled_notice_tools = notice + + def _format_disabled_tool_notice(self) -> Optional[str]: + """生成禁用工具提示信息 / Format disabled tool notice.""" + if getattr(self, "silent_tool_disable", False): + return None + if not self.disabled_notice_tools: + return None + + lines = ["=== 工具可用性提醒 ==="] + for tool_name in sorted(self.disabled_notice_tools): + lines.append(f"{tool_name}:已被用户禁用") + lines.append("=== 提示结束 ===") + return "\n".join(lines) diff --git a/core/main_terminal_parts/tools_read.py b/core/main_terminal_parts/tools_read.py new file mode 100644 index 0000000..59b88af --- /dev/null +++ b/core/main_terminal_parts/tools_read.py @@ -0,0 +1,304 @@ +import asyncio +import json +from datetime import datetime +from pathlib import Path +from typing import Any, Dict, List, Optional, Set + +try: + from config import ( + OUTPUT_FORMATS, DATA_DIR, PROMPTS_DIR, NEED_CONFIRMATION, + MAX_TERMINALS, TERMINAL_BUFFER_SIZE, TERMINAL_DISPLAY_SIZE, + MAX_READ_FILE_CHARS, READ_TOOL_DEFAULT_MAX_CHARS, + READ_TOOL_DEFAULT_CONTEXT_BEFORE, READ_TOOL_DEFAULT_CONTEXT_AFTER, + READ_TOOL_MAX_CONTEXT_BEFORE, READ_TOOL_MAX_CONTEXT_AFTER, + READ_TOOL_DEFAULT_MAX_MATCHES, READ_TOOL_MAX_MATCHES, + READ_TOOL_MAX_FILE_SIZE, + TERMINAL_SANDBOX_MOUNT_PATH, + TERMINAL_SANDBOX_MODE, + TERMINAL_SANDBOX_CPUS, + TERMINAL_SANDBOX_MEMORY, + PROJECT_MAX_STORAGE_MB, + CUSTOM_TOOLS_ENABLED, + ) +except ImportError: + import sys + project_root = Path(__file__).resolve().parents[2] + if str(project_root) not in sys.path: + sys.path.insert(0, str(project_root)) + from config import ( + OUTPUT_FORMATS, DATA_DIR, PROMPTS_DIR, NEED_CONFIRMATION, + MAX_TERMINALS, TERMINAL_BUFFER_SIZE, TERMINAL_DISPLAY_SIZE, + MAX_READ_FILE_CHARS, READ_TOOL_DEFAULT_MAX_CHARS, + READ_TOOL_DEFAULT_CONTEXT_BEFORE, READ_TOOL_DEFAULT_CONTEXT_AFTER, + READ_TOOL_MAX_CONTEXT_BEFORE, READ_TOOL_MAX_CONTEXT_AFTER, + READ_TOOL_DEFAULT_MAX_MATCHES, READ_TOOL_MAX_MATCHES, + READ_TOOL_MAX_FILE_SIZE, + TERMINAL_SANDBOX_MOUNT_PATH, + TERMINAL_SANDBOX_MODE, + TERMINAL_SANDBOX_CPUS, + TERMINAL_SANDBOX_MEMORY, + PROJECT_MAX_STORAGE_MB, + CUSTOM_TOOLS_ENABLED, + ) + +from modules.file_manager import FileManager +from modules.search_engine import SearchEngine +from modules.terminal_ops import TerminalOperator +from modules.memory_manager import MemoryManager +from modules.terminal_manager import TerminalManager +from modules.todo_manager import TodoManager +from modules.sub_agent_manager import SubAgentManager +from modules.webpage_extractor import extract_webpage_content, tavily_extract +from modules.ocr_client import OCRClient +from modules.easter_egg_manager import EasterEggManager +from modules.personalization_manager import ( + load_personalization_config, + build_personalization_prompt, +) +from modules.skills_manager import ( + get_skills_catalog, + build_skills_list, + merge_enabled_skills, + build_skills_prompt, +) +from modules.custom_tool_registry import CustomToolRegistry, build_default_tool_category +from modules.custom_tool_executor import CustomToolExecutor + +try: + from config.limits import THINKING_FAST_INTERVAL +except ImportError: + THINKING_FAST_INTERVAL = 10 + +from modules.container_monitor import collect_stats, inspect_state +from core.tool_config import TOOL_CATEGORIES +from utils.api_client import DeepSeekClient +from utils.context_manager import ContextManager +from utils.tool_result_formatter import format_tool_result_for_context +from utils.logger import setup_logger +from config.model_profiles import ( + get_model_profile, + get_model_prompt_replacements, + get_model_context_window, +) + +logger = setup_logger(__name__) +DISABLE_LENGTH_CHECK = True + +class MainTerminalToolsReadMixin: + def _clamp_int(value, default, min_value=None, max_value=None): + """将输入转换为整数并限制范围。""" + if value is None: + return default + try: + num = int(value) + except (TypeError, ValueError): + return default + if min_value is not None: + num = max(min_value, num) + if max_value is not None: + num = min(max_value, num) + return num + + def _parse_optional_line(value, field_name: str): + """解析可选的行号参数。""" + if value is None: + return None, None + try: + number = int(value) + except (TypeError, ValueError): + return None, f"{field_name} 必须是整数" + if number < 1: + return None, f"{field_name} 必须大于等于1" + return number, None + + def _truncate_text_block(text: str, max_chars: int): + """对单段文本应用字符限制。""" + if max_chars and len(text) > max_chars: + return text[:max_chars], True, max_chars + return text, False, len(text) + + def _limit_text_chunks(chunks: List[Dict], text_key: str, max_chars: int): + """对多个文本片段应用全局字符限制。""" + if max_chars is None or max_chars <= 0: + return chunks, False, sum(len(chunk.get(text_key, "") or "") for chunk in chunks) + + remaining = max_chars + limited_chunks: List[Dict] = [] + truncated = False + consumed = 0 + + for chunk in chunks: + snippet = chunk.get(text_key, "") or "" + snippet_len = len(snippet) + chunk_copy = dict(chunk) + + if remaining <= 0: + truncated = True + break + + if snippet_len > remaining: + chunk_copy[text_key] = snippet[:remaining] + chunk_copy["truncated"] = True + consumed += remaining + limited_chunks.append(chunk_copy) + truncated = True + remaining = 0 + break + + limited_chunks.append(chunk_copy) + consumed += snippet_len + remaining -= snippet_len + + return limited_chunks, truncated, consumed + + def _handle_read_tool(self, arguments: Dict) -> Dict: + """集中处理 read_file 工具的三种模式。""" + file_path = arguments.get("path") + if not file_path: + return {"success": False, "error": "缺少文件路径参数"} + + read_type = (arguments.get("type") or "read").lower() + if read_type not in {"read", "search", "extract"}: + return {"success": False, "error": f"未知的读取类型: {read_type}"} + + max_chars = self._clamp_int( + arguments.get("max_chars"), + READ_TOOL_DEFAULT_MAX_CHARS, + 1, + MAX_READ_FILE_CHARS + ) + + base_result = { + "success": True, + "type": read_type, + "path": None, + "encoding": "utf-8", + "max_chars": max_chars, + "truncated": False + } + + if read_type == "read": + start_line, error = self._parse_optional_line(arguments.get("start_line"), "start_line") + if error: + return {"success": False, "error": error} + end_line_val = arguments.get("end_line") + end_line = None + if end_line_val is not None: + end_line, error = self._parse_optional_line(end_line_val, "end_line") + if error: + return {"success": False, "error": error} + if start_line and end_line < start_line: + return {"success": False, "error": "end_line 必须大于等于 start_line"} + + read_result = self.file_manager.read_text_segment( + file_path, + start_line=start_line, + end_line=end_line, + size_limit=READ_TOOL_MAX_FILE_SIZE + ) + if not read_result.get("success"): + return read_result + + content, truncated, char_count = self._truncate_text_block(read_result["content"], max_chars) + base_result.update({ + "path": read_result["path"], + "content": content, + "line_start": read_result["line_start"], + "line_end": read_result["line_end"], + "total_lines": read_result["total_lines"], + "file_size": read_result["size"], + "char_count": char_count, + "message": f"已读取 {read_result['path']} 的内容(行 {read_result['line_start']}~{read_result['line_end']})" + }) + base_result["truncated"] = truncated + self.context_manager.load_file(read_result["path"]) + return base_result + + if read_type == "search": + query = arguments.get("query") + if not query: + return {"success": False, "error": "搜索模式需要提供 query 参数"} + + max_matches = self._clamp_int( + arguments.get("max_matches"), + READ_TOOL_DEFAULT_MAX_MATCHES, + 1, + READ_TOOL_MAX_MATCHES + ) + context_before = self._clamp_int( + arguments.get("context_before"), + READ_TOOL_DEFAULT_CONTEXT_BEFORE, + 0, + READ_TOOL_MAX_CONTEXT_BEFORE + ) + context_after = self._clamp_int( + arguments.get("context_after"), + READ_TOOL_DEFAULT_CONTEXT_AFTER, + 0, + READ_TOOL_MAX_CONTEXT_AFTER + ) + case_sensitive = bool(arguments.get("case_sensitive")) + + search_result = self.file_manager.search_text( + file_path, + query=query, + max_matches=max_matches, + context_before=context_before, + context_after=context_after, + case_sensitive=case_sensitive, + size_limit=READ_TOOL_MAX_FILE_SIZE + ) + if not search_result.get("success"): + return search_result + + matches = search_result["matches"] + limited_matches, truncated, char_count = self._limit_text_chunks(matches, "snippet", max_chars) + + base_result.update({ + "path": search_result["path"], + "file_size": search_result["size"], + "query": query, + "max_matches": max_matches, + "actual_matches": len(matches), + "returned_matches": len(limited_matches), + "context_before": context_before, + "context_after": context_after, + "case_sensitive": case_sensitive, + "matches": limited_matches, + "char_count": char_count, + "message": f"在 {search_result['path']} 中搜索 \"{query}\",返回 {len(limited_matches)} 条结果" + }) + base_result["truncated"] = truncated + return base_result + + # extract + segments = arguments.get("segments") + if not isinstance(segments, list) or not segments: + return {"success": False, "error": "extract 模式需要提供 segments 数组"} + + extract_result = self.file_manager.extract_segments( + file_path, + segments=segments, + size_limit=READ_TOOL_MAX_FILE_SIZE + ) + if not extract_result.get("success"): + return extract_result + + limited_segments, truncated, char_count = self._limit_text_chunks( + extract_result["segments"], + "content", + max_chars + ) + + base_result.update({ + "path": extract_result["path"], + "segments": limited_segments, + "file_size": extract_result["size"], + "total_lines": extract_result["total_lines"], + "segment_count": len(limited_segments), + "char_count": char_count, + "message": f"已从 {extract_result['path']} 抽取 {len(limited_segments)} 个片段" + }) + base_result["truncated"] = truncated + self.context_manager.load_file(extract_result["path"]) + return base_result diff --git a/server/chat_flow_runner.py b/server/chat_flow_runner.py index 5fda4a1..64d4203 100644 --- a/server/chat_flow_runner.py +++ b/server/chat_flow_runner.py @@ -1,2215 +1,21 @@ from __future__ import annotations -import asyncio -import json -import time -import re -import zipfile -from collections import defaultdict, Counter, deque -from datetime import datetime, timedelta -from pathlib import Path -from typing import Any, Dict, List, Optional, Tuple - -from werkzeug.utils import secure_filename - -from config import ( - OUTPUT_FORMATS, - AUTO_FIX_TOOL_CALL, - AUTO_FIX_MAX_ATTEMPTS, - MAX_ITERATIONS_PER_TASK, - MAX_CONSECUTIVE_SAME_TOOL, - MAX_TOTAL_TOOL_CALLS, - TOOL_CALL_COOLDOWN, - MAX_UPLOAD_SIZE, - DEFAULT_CONVERSATIONS_LIMIT, - MAX_CONVERSATIONS_LIMIT, - CONVERSATIONS_DIR, - DEFAULT_RESPONSE_MAX_TOKENS, - DEFAULT_PROJECT_PATH, - LOGS_DIR, - AGENT_VERSION, - THINKING_FAST_INTERVAL, - PROJECT_MAX_STORAGE_MB, - PROJECT_MAX_STORAGE_BYTES, - UPLOAD_SCAN_LOG_SUBDIR, +from .chat_flow_runtime import ( + generate_conversation_title_background, + mark_force_thinking, + apply_thinking_schedule, + update_thinking_after_call, + maybe_mark_failure_from_message, + detect_malformed_tool_call, ) -from modules.personalization_manager import ( - load_personalization_config, - save_personalization_config, - THINKING_INTERVAL_MIN, - THINKING_INTERVAL_MAX, -) -from modules.upload_security import UploadSecurityError -from modules.user_manager import UserWorkspace -from modules.usage_tracker import QUOTA_DEFAULTS -from core.web_terminal import WebTerminal -from utils.tool_result_formatter import format_tool_result_for_context -from utils.conversation_manager import ConversationManager -from config.model_profiles import get_model_context_window, get_model_profile +from .chat_flow_task_runner import handle_task_with_sender -from .auth_helpers import api_login_required, resolve_admin_policy, get_current_user_record, get_current_username -from .context import with_terminal, get_gui_manager, get_upload_guard, build_upload_error_response, ensure_conversation_loaded, reset_system_state, get_user_resources, get_or_create_usage_tracker -from .utils_common import ( - build_review_lines, - debug_log, - log_backend_chunk, - log_frontend_chunk, - log_streaming_debug_entry, - brief_log, - DEBUG_LOG_FILE, - CHUNK_BACKEND_LOG_FILE, - CHUNK_FRONTEND_LOG_FILE, - STREAMING_DEBUG_LOG_FILE, -) -from .security import rate_limited, format_tool_result_notice, compact_web_search_result, consume_socket_token, prune_socket_tokens, validate_csrf_request, requires_csrf_protection, get_csrf_token -from .monitor import cache_monitor_snapshot, get_cached_monitor_snapshot -from .extensions import socketio -from .state import ( - MONITOR_FILE_TOOLS, - MONITOR_MEMORY_TOOLS, - MONITOR_SNAPSHOT_CHAR_LIMIT, - MONITOR_MEMORY_ENTRY_LIMIT, - RATE_LIMIT_BUCKETS, - FAILURE_TRACKERS, - pending_socket_tokens, - usage_trackers, - MONITOR_SNAPSHOT_CACHE, - MONITOR_SNAPSHOT_CACHE_LIMIT, - PROJECT_STORAGE_CACHE, - PROJECT_STORAGE_CACHE_TTL_SECONDS, - RECENT_UPLOAD_EVENT_LIMIT, - RECENT_UPLOAD_FEED_LIMIT, - THINKING_FAILURE_KEYWORDS, - TITLE_PROMPT_PATH, - get_last_active_ts, - user_manager, - container_manager, - custom_tool_registry, - user_terminals, - terminal_rooms, - connection_users, - stop_flags, - get_stop_flag, - set_stop_flag, - clear_stop_flag, -) -from .chat_flow_helpers import ( - detect_malformed_tool_call as _detect_malformed_tool_call, - detect_tool_failure, - get_thinking_state, - mark_force_thinking as _mark_force_thinking, - mark_suppress_thinking, - apply_thinking_schedule as _apply_thinking_schedule, - update_thinking_after_call as _update_thinking_after_call, - maybe_mark_failure_from_message as _maybe_mark_failure_from_message, - generate_conversation_title_background as _generate_conversation_title_background, -) - - -from .chat_flow_runner_helpers import ( - extract_intent_from_partial, - resolve_monitor_path, - resolve_monitor_memory, - capture_monitor_snapshot, -) - - -def generate_conversation_title_background(web_terminal: WebTerminal, conversation_id: str, user_message: str, username: str): - return _generate_conversation_title_background( - web_terminal=web_terminal, - conversation_id=conversation_id, - user_message=user_message, - username=username, - socketio_instance=socketio, - title_prompt_path=TITLE_PROMPT_PATH, - debug_logger=debug_log, - ) - - -def mark_force_thinking(terminal: WebTerminal, reason: str = ""): - return _mark_force_thinking(terminal, reason=reason, debug_logger=debug_log) - - -def apply_thinking_schedule(terminal: WebTerminal): - return _apply_thinking_schedule(terminal, default_interval=THINKING_FAST_INTERVAL, debug_logger=debug_log) - - -def update_thinking_after_call(terminal: WebTerminal): - return _update_thinking_after_call(terminal, debug_logger=debug_log) - - -def maybe_mark_failure_from_message(terminal: WebTerminal, content: Optional[str]): - return _maybe_mark_failure_from_message( - terminal, - content, - failure_keywords=THINKING_FAILURE_KEYWORDS, - debug_logger=debug_log, - ) - - -def detect_malformed_tool_call(text): - return _detect_malformed_tool_call(text) - - -async def handle_task_with_sender(terminal: WebTerminal, workspace: UserWorkspace, message, images, sender, client_sid, username: str, videos=None): - """处理任务并发送消息 - 集成token统计版本""" - web_terminal = terminal - conversation_id = getattr(web_terminal.context_manager, "current_conversation_id", None) - videos = videos or [] - raw_sender = sender - - def sender(event_type, data): - """为关键事件补充会话标识,便于前端定位报错归属。""" - if not isinstance(data, dict): - raw_sender(event_type, data) - return - payload = data - if event_type in {"error", "quota_exceeded", "task_stopped", "task_complete"}: - payload = dict(data) - current_conv = conversation_id or getattr(web_terminal.context_manager, "current_conversation_id", None) - if current_conv: - payload.setdefault("conversation_id", current_conv) - task_id = getattr(web_terminal, "task_id", None) or client_sid - if task_id: - payload.setdefault("task_id", task_id) - if client_sid: - payload.setdefault("client_sid", client_sid) - raw_sender(event_type, payload) - - # 如果是思考模式,重置状态 - if web_terminal.thinking_mode: - web_terminal.api_client.start_new_task(force_deep=web_terminal.deep_thinking_mode) - state = get_thinking_state(web_terminal) - state["fast_streak"] = 0 - state["force_next"] = False - state["suppress_next"] = False - - # 添加到对话历史 - history_len_before = len(getattr(web_terminal.context_manager, "conversation_history", []) or []) - is_first_user_message = history_len_before == 0 - web_terminal.context_manager.add_conversation("user", message, images=images, videos=videos) - - if is_first_user_message and getattr(web_terminal, "context_manager", None): - try: - personal_config = load_personalization_config(workspace.data_dir) - except Exception: - personal_config = {} - auto_title_enabled = personal_config.get("auto_generate_title", True) - if auto_title_enabled: - conv_id = getattr(web_terminal.context_manager, "current_conversation_id", None) - socketio.start_background_task( - generate_conversation_title_background, - web_terminal, - conv_id, - message, - username - ) - - # === 移除:不在这里计算输入token,改为在每次API调用前计算 === - - # 构建上下文和消息(用于API调用) - context = web_terminal.build_context() - messages = web_terminal.build_messages(context, message) - tools = web_terminal.define_tools() - try: - profile = get_model_profile(getattr(web_terminal, "model_key", None) or "kimi-k2.5") - web_terminal.apply_model_profile(profile) - except Exception as exc: - debug_log(f"更新模型配置失败: {exc}") - - # === 上下文预算与安全校验(避免超出模型上下文) === - max_context_tokens = get_model_context_window(getattr(web_terminal, "model_key", None) or "kimi-k2.5") - current_tokens = web_terminal.context_manager.get_current_context_tokens(conversation_id) - # 提前同步给底层客户端,动态收缩 max_tokens - web_terminal.api_client.update_context_budget(current_tokens, max_context_tokens) - if max_context_tokens: - if current_tokens >= max_context_tokens: - err_msg = ( - f"当前对话上下文已达 {current_tokens} tokens,超过模型上限 " - f"{max_context_tokens},请先使用压缩功能或清理对话后再试。" - ) - debug_log(err_msg) - web_terminal.context_manager.add_conversation("system", err_msg) - sender('error', { - 'message': err_msg, - 'status_code': 400, - 'error_type': 'context_overflow' - }) - return - usage_percent = (current_tokens / max_context_tokens) * 100 - warned = web_terminal.context_manager.conversation_metadata.get("context_warning_sent", False) - if usage_percent >= 70 and not warned: - warn_msg = ( - f"当前对话上下文约占 {usage_percent:.1f}%({current_tokens}/{max_context_tokens})," - "建议使用压缩功能。" - ) - web_terminal.context_manager.conversation_metadata["context_warning_sent"] = True - web_terminal.context_manager.auto_save_conversation(force=True) - sender('context_warning', { - 'title': '上下文过长', - 'message': warn_msg, - 'type': 'warning', - 'conversation_id': conversation_id - }) - - # 开始新的AI消息 - sender('ai_message_start', {}) - - # 增量保存相关变量 - accumulated_response = "" # 累积的响应内容 - is_first_iteration = True # 是否是第一次迭代 - - # 统计和限制变量 - total_iterations = 0 - total_tool_calls = 0 - consecutive_same_tool = defaultdict(int) - last_tool_name = "" - auto_fix_attempts = 0 - last_tool_call_time = 0 - detected_tool_intent: Dict[str, str] = {} - - # 设置最大迭代次数(API 可覆盖);None 表示不限制 - max_iterations_override = getattr(web_terminal, "max_iterations_override", None) - max_iterations = max_iterations_override if max_iterations_override is not None else MAX_ITERATIONS_PER_TASK - max_api_retries = 4 - retry_delay_seconds = 10 - - pending_append = None # {"path": str, "tool_call_id": str, "buffer": str, ...} - append_probe_buffer = "" - pending_modify = None # {"path": str, "tool_call_id": str, "buffer": str, ...} - modify_probe_buffer = "" - - async def finalize_pending_append(response_text: str, stream_completed: bool, finish_reason: str = None) -> Dict: - """在流式输出结束后处理追加写入""" - nonlocal pending_append, append_probe_buffer - - result = { - "handled": False, - "success": False, - "summary": None, - "summary_message": None, - "tool_content": None, - "tool_call_id": None, - "path": None, - "forced": False, - "error": None, - "assistant_content": response_text, - "lines": 0, - "bytes": 0, - "finish_reason": finish_reason, - "appended_content": "", - "assistant_metadata": None - } - - if not pending_append: - return result - - state = pending_append - path = state.get("path") - tool_call_id = state.get("tool_call_id") - buffer = state.get("buffer", "") - start_marker = state.get("start_marker") - end_marker = state.get("end_marker") - start_idx = state.get("content_start") - end_idx = state.get("end_index") - - display_id = state.get("display_id") - - result.update({ - "handled": True, - "path": path, - "tool_call_id": tool_call_id, - "display_id": display_id - }) - - if path is None or tool_call_id is None: - error_msg = "append_to_file 状态不完整,缺少路径或ID。" - debug_log(error_msg) - result["error"] = error_msg - result["summary_message"] = error_msg - result["tool_content"] = json.dumps({ - "success": False, - "error": error_msg - }, ensure_ascii=False) - if display_id: - sender('update_action', { - 'id': display_id, - 'status': 'failed', - 'preparing_id': tool_call_id, - 'message': error_msg - }) - pending_append = None - return result - - if start_idx is None: - error_msg = f"未检测到格式正确的开始标识 {start_marker}。" - debug_log(error_msg) - result["error"] = error_msg - result["summary_message"] = error_msg - result["tool_content"] = json.dumps({ - "success": False, - "path": path, - "error": error_msg - }, ensure_ascii=False) - if display_id: - sender('update_action', { - 'id': display_id, - 'status': 'failed', - 'preparing_id': tool_call_id, - 'message': error_msg - }) - pending_append = None - return result - - forced = False - if end_idx is None: - forced = True - # 查找下一个<<<,否则使用整个缓冲结尾 - remaining = buffer[start_idx:] - next_marker = remaining.find("<<<", len(end_marker)) - if next_marker != -1: - end_idx = start_idx + next_marker - else: - end_idx = len(buffer) - - content = buffer[start_idx:end_idx] - if content.startswith('\n'): - content = content[1:] - - if not content: - error_msg = "未检测到需要追加的内容,请严格按照<<>>...<<>>格式输出。" - debug_log(error_msg) - result["error"] = error_msg - result["forced"] = forced - result["tool_content"] = json.dumps({ - "success": False, - "path": path, - "error": error_msg - }, ensure_ascii=False) - if display_id: - sender('update_action', { - 'id': display_id, - 'status': 'failed', - 'preparing_id': tool_call_id, - 'message': error_msg - }) - pending_append = None - return result - - assistant_message_lines = [] - if start_marker: - assistant_message_lines.append(start_marker) - assistant_message_lines.append(content) - if not forced and end_marker: - assistant_message_lines.append(end_marker) - assistant_message_text = "\n".join(assistant_message_lines) - result["assistant_content"] = assistant_message_text - assistant_metadata = { - "append_payload": { - "path": path, - "tool_call_id": tool_call_id, - "forced": forced, - "has_end_marker": not forced - } - } - result["assistant_metadata"] = assistant_metadata - - write_result = web_terminal.file_manager.append_file(path, content) - if write_result.get("success"): - bytes_written = len(content.encode('utf-8')) - line_count = content.count('\n') - if content and not content.endswith('\n'): - line_count += 1 - - summary = f"已向 {path} 追加 {line_count} 行({bytes_written} 字节)" - if forced: - summary += "。未检测到 <<>> 标记,系统已在流结束处完成写入。如内容未完成,请重新调用 append_to_file 并按标准格式补充;如已完成,可继续后续步骤。" - - result.update({ - "success": True, - "summary": summary, - "summary_message": summary, - "forced": forced, - "lines": line_count, - "bytes": bytes_written, - "appended_content": content, - "tool_content": json.dumps({ - "success": True, - "path": path, - "lines": line_count, - "bytes": bytes_written, - "forced": forced, - "message": summary, - "finish_reason": finish_reason - }, ensure_ascii=False) - }) - - assistant_meta_payload = result["assistant_metadata"]["append_payload"] - assistant_meta_payload["lines"] = line_count - assistant_meta_payload["bytes"] = bytes_written - assistant_meta_payload["success"] = True - - summary_payload = { - "success": True, - "path": path, - "lines": line_count, - "bytes": bytes_written, - "forced": forced, - "message": summary - } - - if display_id: - sender('update_action', { - 'id': display_id, - 'status': 'completed', - 'result': summary_payload, - 'preparing_id': tool_call_id, - 'message': summary - }) - - debug_log(f"追加写入完成: {summary}") - else: - error_msg = write_result.get("error", "追加写入失败") - result.update({ - "error": error_msg, - "summary_message": error_msg, - "forced": forced, - "appended_content": content, - "tool_content": json.dumps({ - "success": False, - "path": path, - "error": error_msg, - "finish_reason": finish_reason - }, ensure_ascii=False) - }) - debug_log(f"追加写入失败: {error_msg}") - - if result["assistant_metadata"]: - assistant_meta_payload = result["assistant_metadata"]["append_payload"] - assistant_meta_payload["lines"] = content.count('\n') + (0 if content.endswith('\n') or not content else 1) - assistant_meta_payload["bytes"] = len(content.encode('utf-8')) - assistant_meta_payload["success"] = False - - failure_payload = { - "success": False, - "path": path, - "error": error_msg, - "forced": forced - } - - if display_id: - sender('update_action', { - 'id': display_id, - 'status': 'completed', - 'result': failure_payload, - 'preparing_id': tool_call_id, - 'message': error_msg - }) - - pending_append = None - append_probe_buffer = "" - if hasattr(web_terminal, "pending_append_request"): - web_terminal.pending_append_request = None - return result - - async def finalize_pending_modify(response_text: str, stream_completed: bool, finish_reason: str = None) -> Dict: - """在流式输出结束后处理修改写入""" - nonlocal pending_modify, modify_probe_buffer - - result = { - "handled": False, - "success": False, - "path": None, - "tool_call_id": None, - "display_id": None, - "total_blocks": 0, - "completed_blocks": [], - "failed_blocks": [], - "forced": False, - "details": [], - "error": None, - "assistant_content": response_text, - "assistant_metadata": None, - "tool_content": None, - "summary_message": None, - "finish_reason": finish_reason - } - - if not pending_modify: - return result - - state = pending_modify - path = state.get("path") - tool_call_id = state.get("tool_call_id") - display_id = state.get("display_id") - start_marker = state.get("start_marker") - end_marker = state.get("end_marker") - buffer = state.get("buffer", "") - raw_buffer = state.get("raw_buffer", "") - end_index = state.get("end_index") - - result.update({ - "handled": True, - "path": path, - "tool_call_id": tool_call_id, - "display_id": display_id - }) - - if not state.get("start_seen"): - error_msg = "未检测到格式正确的 <<>> 标记。" - debug_log(error_msg) - result["error"] = error_msg - result["summary_message"] = error_msg - result["tool_content"] = json.dumps({ - "success": False, - "path": path, - "error": error_msg, - "finish_reason": finish_reason - }, ensure_ascii=False) - if display_id: - sender('update_action', { - 'id': display_id, - 'status': 'failed', - 'preparing_id': tool_call_id, - 'message': error_msg - }) - if hasattr(web_terminal, "pending_modify_request"): - web_terminal.pending_modify_request = None - pending_modify = None - modify_probe_buffer = "" - return result - - forced = end_index is None - apply_text = buffer if forced else buffer[:end_index] - raw_content = raw_buffer if forced else raw_buffer[:len(start_marker) + end_index + len(end_marker)] - if raw_content: - result["assistant_content"] = raw_content - - blocks_info = [] - block_reports = {} - detected_indices = set() - block_pattern = re.compile(r"\[replace:(\d+)\](.*?)\[/replace\]", re.DOTALL) - structure_warnings: List[str] = [] - structure_detail_entries: List[Dict] = [] - - def record_structure_warning(message: str, hint: Optional[str] = None): - """记录结构性缺陷,便于给出更具体的反馈。""" - if message in structure_warnings: - return - structure_warnings.append(message) - structure_detail_entries.append({ - "index": 0, - "status": "failed", - "reason": message, - "removed_lines": 0, - "added_lines": 0, - "hint": hint or "请严格按照模板输出:[replace:n] + <>/<> + [/replace],并使用 <<>> 收尾。" - }) - - def extract_segment(body: str, tag: str): - marker = f"<<{tag}>>" - end_tag = "<>" - start_pos = body.find(marker) - if start_pos == -1: - return None, f"缺少 {marker}" - start_pos += len(marker) - if body[start_pos:start_pos+2] == "\r\n": - start_pos += 2 - elif body[start_pos:start_pos+1] == "\n": - start_pos += 1 - end_pos = body.find(end_tag, start_pos) - if end_pos == -1: - return None, f"缺少 {end_tag}" - segment = body[start_pos:end_pos] - return segment, None - - for match in block_pattern.finditer(apply_text): - try: - index = int(match.group(1)) - except ValueError: - continue - body = match.group(2) - if index in detected_indices: - continue - detected_indices.add(index) - block_reports[index] = { - "index": index, - "status": "pending", - "reason": None, - "removed_lines": 0, - "added_lines": 0, - "hint": None - } - old_content, old_error = extract_segment(body, "OLD") - new_content, new_error = extract_segment(body, "NEW") - if old_error or new_error: - reason = old_error or new_error - block_reports[index]["status"] = "failed" - block_reports[index]["reason"] = reason - blocks_info.append({ - "index": index, - "old": old_content, - "new": new_content, - "error": old_error or new_error - }) - - if not blocks_info: - has_replace_start = bool(re.search(r"\[replace:\s*\d+\]", apply_text)) - has_replace_end = "[/replace]" in apply_text - has_old_tag = "<>" in apply_text - has_new_tag = "<>" in apply_text - - if has_replace_start and not has_replace_end: - record_structure_warning("检测到 [replace:n] 标记但缺少对应的 [/replace] 结束标记。") - if has_replace_end and not has_replace_start: - record_structure_warning("检测到 [/replace] 结束标记但缺少对应的 [replace:n] 起始标记。") - - old_tags = len(re.findall(r"<>", apply_text)) - completed_old_tags = len(re.findall(r"<>[\s\S]*?<>", apply_text)) - if old_tags and completed_old_tags < old_tags: - record_structure_warning("检测到 <> 段落但未看到对应的 <> 结束标记。") - - new_tags = len(re.findall(r"<>", apply_text)) - completed_new_tags = len(re.findall(r"<>[\s\S]*?<>", apply_text)) - if new_tags and completed_new_tags < new_tags: - record_structure_warning("检测到 <> 段落但未看到对应的 <> 结束标记。") - - if (has_replace_start or has_replace_end or has_old_tag or has_new_tag) and not structure_warnings: - record_structure_warning("检测到部分补丁标记,但整体结构不完整,请严格按照模板填写所有标记。") - - total_blocks = len(blocks_info) - result["total_blocks"] = total_blocks - if forced: - debug_log("未检测到 <<>>,将在流结束处执行已识别的修改块。") - result["forced"] = True - - blocks_to_apply = [ - {"index": block["index"], "old": block["old"], "new": block["new"]} - for block in blocks_info - if block["error"] is None and block["old"] is not None and block["new"] is not None - ] - - # 记录格式残缺的块 - for block in blocks_info: - if block["error"]: - idx = block["index"] - block_reports[idx]["status"] = "failed" - block_reports[idx]["reason"] = block["error"] - block_reports[idx]["hint"] = "请检查补丁块的 OLD/NEW 标记是否完整,必要时复用 terminal_snapshot 或终端命令重新调整。" - - apply_result = {} - if blocks_to_apply: - apply_result = web_terminal.file_manager.apply_modify_blocks(path, blocks_to_apply) - else: - apply_result = {"success": False, "completed": [], "failed": [], "results": [], "write_performed": False, "error": None} - - block_result_map = {item["index"]: item for item in apply_result.get("results", [])} - - for block in blocks_info: - idx = block["index"] - report = block_reports.get(idx) - if report is None: - continue - if report["status"] == "failed": - continue - block_apply = block_result_map.get(idx) - if not block_apply: - report["status"] = "failed" - report["reason"] = "未执行,可能未找到匹配原文" - report["hint"] = report.get("hint") or "请确认 OLD 文本与文件内容完全一致;若多次失败,可改用终端命令/Python 进行精准替换。" - continue - status = block_apply.get("status") - report["removed_lines"] = block_apply.get("removed_lines", 0) - report["added_lines"] = block_apply.get("added_lines", 0) - if block_apply.get("hint"): - report["hint"] = block_apply.get("hint") - if status == "success": - report["status"] = "completed" - elif status == "not_found": - report["status"] = "failed" - report["reason"] = block_apply.get("reason") or "未找到匹配的原文" - if not report.get("hint"): - report["hint"] = "请使用 terminal_snapshot/grep -n 校验原文,或在说明后改用 run_command/python 精确替换。" - else: - report["status"] = "failed" - report["reason"] = block_apply.get("reason") or "替换失败" - if not report.get("hint"): - report["hint"] = block_apply.get("hint") or "若多次尝试仍失败,可考虑利用终端命令或 Python 小脚本完成此次修改。" - - completed_blocks = sorted([idx for idx, rep in block_reports.items() if rep["status"] == "completed"]) - failed_blocks = sorted([idx for idx, rep in block_reports.items() if rep["status"] != "completed"]) - - result["completed_blocks"] = completed_blocks - result["failed_blocks"] = failed_blocks - details = sorted(block_reports.values(), key=lambda x: x["index"]) - if structure_detail_entries: - details = structure_detail_entries + details - result["details"] = details - - summary_parts = [] - if total_blocks == 0: - summary_parts.append("未检测到有效的修改块,未执行任何修改。") - summary_parts.extend(structure_warnings) - else: - if not completed_blocks and failed_blocks: - summary_parts.append(f"共检测到 {total_blocks} 个修改块,全部未执行。") - elif completed_blocks and not failed_blocks: - summary_parts.append(f"共 {total_blocks} 个修改块全部完成。") - else: - summary_parts.append( - f"共检测到 {total_blocks} 个修改块,其中成功 {len(completed_blocks)} 个,失败 {len(failed_blocks)} 个。" - ) - if forced: - summary_parts.append("未检测到 <<>> 标记,系统已在流结束处执行补丁。") - if apply_result.get("error"): - summary_parts.append(apply_result["error"]) - - matching_note = "提示:补丁匹配基于完整文本,包含注释和空白符,请确保 <<>> 段落与文件内容逐字一致。如果修改成功,请忽略,如果失败,请明确原文后再次尝试。" - summary_parts.append(matching_note) - summary_message = " ".join(summary_parts).strip() - result["summary_message"] = summary_message - result["success"] = bool(completed_blocks) and not failed_blocks and apply_result.get("error") is None - - tool_payload = { - "success": result["success"], - "path": path, - "total_blocks": total_blocks, - "completed": completed_blocks, - "failed": [ - { - "index": rep["index"], - "reason": rep.get("reason"), - "hint": rep.get("hint") - } - for rep in result["details"] if rep["status"] != "completed" - ], - "forced": forced, - "message": summary_message, - "finish_reason": finish_reason, - "details": result["details"] - } - if apply_result.get("error"): - tool_payload["error"] = apply_result["error"] - - result["tool_content"] = json.dumps(tool_payload, ensure_ascii=False) - result["assistant_metadata"] = { - "modify_payload": { - "path": path, - "total_blocks": total_blocks, - "completed": completed_blocks, - "failed": failed_blocks, - "forced": forced, - "details": result["details"] - } - } - - if display_id: - sender('update_action', { - 'id': display_id, - 'status': 'completed' if result["success"] else 'failed', - 'result': tool_payload, - 'preparing_id': tool_call_id, - 'message': summary_message - }) - - pending_modify = None - modify_probe_buffer = "" - if hasattr(web_terminal, "pending_modify_request"): - web_terminal.pending_modify_request = None - return result - - async def process_sub_agent_updates( - messages: List[Dict], - inline: bool = False, - after_tool_call_id: Optional[str] = None - ): - """轮询子智能体任务并通知前端,并把结果插入当前对话上下文。""" - manager = getattr(web_terminal, "sub_agent_manager", None) - if not manager: - return - try: - updates = manager.poll_updates() - debug_log(f"[SubAgent] poll inline={inline} updates={len(updates)}") - except Exception as exc: - debug_log(f"子智能体状态检查失败: {exc}") - return - for update in updates: - message = update.get("system_message") - if not message: - continue - task_id = update.get("task_id") - debug_log(f"[SubAgent] update task={task_id} inline={inline} msg={message}") - web_terminal._record_sub_agent_message(message, task_id, inline=inline) - debug_log(f"[SubAgent] recorded task={task_id}, 计算插入位置") - - insert_index = len(messages) - if after_tool_call_id: - for idx, msg in enumerate(messages): - if msg.get("role") == "tool" and msg.get("tool_call_id") == after_tool_call_id: - insert_index = idx + 1 - break - - messages.insert(insert_index, { - "role": "system", - "content": message, - "metadata": {"sub_agent_notice": True, "inline": inline, "task_id": task_id} - }) - debug_log(f"[SubAgent] 插入系统消息位置: {insert_index}") - sender('system_message', { - 'content': message, - 'inline': inline - }) - maybe_mark_failure_from_message(web_terminal, message) - - async def _wait_retry_delay(delay_seconds: int) -> bool: - """等待重试间隔,同时检查是否收到停止请求。""" - if delay_seconds <= 0: - return False - deadline = time.time() + delay_seconds - while time.time() < deadline: - client_stop_info = get_stop_flag(client_sid, username) - if client_stop_info: - stop_requested = client_stop_info.get('stop', False) if isinstance(client_stop_info, dict) else client_stop_info - if stop_requested: - sender('task_stopped', { - 'message': '命令执行被用户取消', - 'reason': 'user_stop' - }) - clear_stop_flag(client_sid, username) - return True - await asyncio.sleep(0.2) - return False - - iteration = 0 - while max_iterations is None or iteration < max_iterations: - current_iteration = iteration + 1 - iteration += 1 - total_iterations += 1 - iteration_limit_label = max_iterations if max_iterations is not None else "∞" - debug_log(f"\n--- 迭代 {current_iteration}/{iteration_limit_label} 开始 ---") - - # 检查是否超过总工具调用限制 - if MAX_TOTAL_TOOL_CALLS is not None and total_tool_calls >= MAX_TOTAL_TOOL_CALLS: - debug_log(f"已达到最大工具调用次数限制 ({MAX_TOTAL_TOOL_CALLS})") - sender('system_message', { - 'content': f'⚠️ 已达到最大工具调用次数限制 ({MAX_TOTAL_TOOL_CALLS}),任务结束。' - }) - mark_force_thinking(web_terminal, reason="tool_limit") - break - - apply_thinking_schedule(web_terminal) - - full_response = "" - tool_calls = [] - current_thinking = "" - detected_tools = {} - last_usage_payload = None - - # 状态标志 - in_thinking = False - thinking_started = False - thinking_ended = False - text_started = False - text_has_content = False - text_streaming = False - text_chunk_index = 0 - last_text_chunk_time: Optional[float] = None - - # 计数器 - chunk_count = 0 - reasoning_chunks = 0 - content_chunks = 0 - tool_chunks = 0 - append_break_triggered = False - append_result = {"handled": False} - modify_break_triggered = False - modify_result = {"handled": False} - last_finish_reason = None - - def _cancel_pending_tools(tool_calls_list): - """为尚未返回结果的工具生成取消结果,防止缺失 tool_call_id 造成后续 400。""" - if not tool_calls_list: - return - for tc in tool_calls_list: - tc_id = tc.get("id") - func_name = tc.get("function", {}).get("name") - sender('update_action', { - 'preparing_id': tc_id, - 'status': 'cancelled', - 'result': { - "success": False, - "status": "cancelled", - "message": "命令执行被用户取消", - "tool": func_name - } - }) - if tc_id: - messages.append({ - "role": "tool", - "tool_call_id": tc_id, - "name": func_name, - "content": "命令执行被用户取消", - "metadata": {"status": "cancelled"} - }) - - thinking_expected = web_terminal.api_client.get_current_thinking_mode() - debug_log(f"思考模式: {thinking_expected}") - quota_allowed = True - quota_info = {} - if hasattr(web_terminal, "record_model_call"): - quota_allowed, quota_info = web_terminal.record_model_call(bool(thinking_expected)) - if not quota_allowed: - quota_type = 'thinking' if thinking_expected else 'fast' - socketio.emit('quota_notice', { - 'type': quota_type, - 'reset_at': quota_info.get('reset_at'), - 'limit': quota_info.get('limit'), - 'count': quota_info.get('count') - }, room=f"user_{getattr(web_terminal, 'username', '')}") - sender('quota_exceeded', { - 'type': quota_type, - 'reset_at': quota_info.get('reset_at') - }) - sender('error', { - 'message': "配额已达到上限,暂时无法继续调用模型。", - 'quota': quota_info - }) - return - - tool_call_limit_label = MAX_TOTAL_TOOL_CALLS if MAX_TOTAL_TOOL_CALLS is not None else "∞" - print(f"[API] 第{current_iteration}次调用 (总工具调用: {total_tool_calls}/{tool_call_limit_label})") - - api_error = None - for api_attempt in range(max_api_retries + 1): - api_error = None - if api_attempt > 0: - full_response = "" - tool_calls = [] - current_thinking = "" - detected_tools = {} - last_usage_payload = None - in_thinking = False - thinking_started = False - thinking_ended = False - text_started = False - text_has_content = False - text_streaming = False - text_chunk_index = 0 - last_text_chunk_time = None - chunk_count = 0 - reasoning_chunks = 0 - content_chunks = 0 - tool_chunks = 0 - append_break_triggered = False - append_result = {"handled": False} - modify_break_triggered = False - modify_result = {"handled": False} - last_finish_reason = None - - # 收集流式响应 - async for chunk in web_terminal.api_client.chat(messages, tools, stream=True): - chunk_count += 1 - - # 检查停止标志 - client_stop_info = get_stop_flag(client_sid, username) - if client_stop_info: - stop_requested = client_stop_info.get('stop', False) if isinstance(client_stop_info, dict) else client_stop_info - if stop_requested: - debug_log(f"检测到停止请求,中断流处理") - if pending_append: - append_result = await finalize_pending_append(full_response, False, finish_reason="user_stop") - if pending_modify: - modify_result = await finalize_pending_modify(full_response, False, finish_reason="user_stop") - _cancel_pending_tools(tool_calls) - sender('task_stopped', { - 'message': '命令执行被用户取消', - 'reason': 'user_stop' - }) - clear_stop_flag(client_sid, username) - return - - if isinstance(chunk, dict) and chunk.get("error"): - api_error = chunk.get("error") - break - - # 先尝试记录 usage(有些平台会在最后一个 chunk 里携带 usage 但 choices 为空) - usage_info = chunk.get("usage") - if usage_info: - last_usage_payload = usage_info - - if "choices" not in chunk: - debug_log(f"Chunk {chunk_count}: 无choices字段") - continue - if not chunk.get("choices"): - debug_log(f"Chunk {chunk_count}: choices为空列表") - continue - choice = chunk["choices"][0] - if not usage_info and isinstance(choice, dict) and choice.get("usage"): - # 兼容部分供应商将 usage 放在 choice 内的格式(例如部分 Kimi/Qwen 返回) - last_usage_payload = choice.get("usage") - delta = choice.get("delta", {}) - finish_reason = choice.get("finish_reason") - if finish_reason: - last_finish_reason = finish_reason - - # 处理思考内容(兼容 reasoning_content / reasoning_details) - reasoning_content = "" - if "reasoning_content" in delta: - reasoning_content = delta.get("reasoning_content") or "" - elif "reasoning_details" in delta: - details = delta.get("reasoning_details") - if isinstance(details, list): - parts = [] - for item in details: - if isinstance(item, dict): - text = item.get("text") - if text: - parts.append(text) - if parts: - reasoning_content = "".join(parts) - if reasoning_content: - reasoning_chunks += 1 - debug_log(f" 思考内容 #{reasoning_chunks}: {len(reasoning_content)} 字符") - - if not thinking_started: - in_thinking = True - thinking_started = True - sender('thinking_start', {}) - await asyncio.sleep(0.05) - - current_thinking += reasoning_content - sender('thinking_chunk', {'content': reasoning_content}) - - # 处理正常内容 - if "content" in delta: - content = delta["content"] - if content: - content_chunks += 1 - debug_log(f" 正式内容 #{content_chunks}: {repr(content[:100] if content else 'None')}") - - if in_thinking and not thinking_ended: - in_thinking = False - thinking_ended = True - sender('thinking_end', {'full_content': current_thinking}) - await asyncio.sleep(0.1) - - - expecting_modify = bool(pending_modify) or bool(getattr(web_terminal, "pending_modify_request", None)) - expecting_append = bool(pending_append) or bool(getattr(web_terminal, "pending_append_request", None)) - - if pending_modify: - if not pending_modify.get("start_seen"): - probe_buffer = pending_modify.get("probe_buffer", "") + content - if len(probe_buffer) > 10000: - probe_buffer = probe_buffer[-10000:] - marker = pending_modify.get("start_marker") - marker_index = probe_buffer.find(marker) - if marker_index == -1: - pending_modify["probe_buffer"] = probe_buffer - continue - after_marker = marker_index + len(marker) - remainder = probe_buffer[after_marker:] - pending_modify["buffer"] = remainder - pending_modify["raw_buffer"] = marker + remainder - pending_modify["start_seen"] = True - pending_modify["detected_blocks"] = set() - pending_modify["probe_buffer"] = "" - if pending_modify.get("display_id"): - sender('update_action', { - 'id': pending_modify["display_id"], - 'status': 'running', - 'preparing_id': pending_modify.get("tool_call_id"), - 'message': f"正在修改 {pending_modify['path']}..." - }) - else: - pending_modify["buffer"] += content - pending_modify["raw_buffer"] += content - - if pending_modify.get("start_seen"): - block_text = pending_modify["buffer"] - for match in re.finditer(r"\[replace:(\d+)\]", block_text): - try: - block_index = int(match.group(1)) - except ValueError: - continue - detected_blocks = pending_modify.setdefault("detected_blocks", set()) - if block_index not in detected_blocks: - detected_blocks.add(block_index) - if pending_modify.get("display_id"): - sender('update_action', { - 'id': pending_modify["display_id"], - 'status': 'running', - 'preparing_id': pending_modify.get("tool_call_id"), - 'message': f"正在对 {pending_modify['path']} 进行第 {block_index} 处修改..." - }) - - if pending_modify.get("start_seen"): - end_pos = pending_modify["buffer"].find(pending_modify["end_marker"]) - if end_pos != -1: - pending_modify["end_index"] = end_pos - modify_break_triggered = True - debug_log("检测到<<>>,即将终止流式输出并应用修改") - break - continue - elif expecting_modify: - modify_probe_buffer += content - if len(modify_probe_buffer) > 10000: - modify_probe_buffer = modify_probe_buffer[-10000:] - - marker_match = re.search(r"<<>>", modify_probe_buffer) - if marker_match: - detected_raw_path = marker_match.group(1) - detected_path = detected_raw_path.strip() - marker_full = marker_match.group(0) - after_marker_index = modify_probe_buffer.find(marker_full) + len(marker_full) - remainder = modify_probe_buffer[after_marker_index:] - modify_probe_buffer = "" - - if not detected_path: - debug_log("检测到 MODIFY 起始标记但路径为空,忽略。") - continue - - pending_modify = { - "path": detected_path, - "tool_call_id": None, - "buffer": remainder, - "raw_buffer": marker_full + remainder, - "start_marker": marker_full, - "end_marker": "<<>>", - "start_seen": True, - "end_index": None, - "display_id": None, - "detected_blocks": set() - } - if hasattr(web_terminal, "pending_modify_request"): - web_terminal.pending_modify_request = {"path": detected_path} - debug_log(f"直接检测到modify起始标记,构建修改缓冲: {detected_path}") - - end_pos = pending_modify["buffer"].find(pending_modify["end_marker"]) - if end_pos != -1: - pending_modify["end_index"] = end_pos - modify_break_triggered = True - debug_log("检测到<<>>,即将终止流式输出并应用修改") - break - continue - - if pending_append: - pending_append["buffer"] += content - - if pending_append.get("content_start") is None: - marker_index = pending_append["buffer"].find(pending_append["start_marker"]) - if marker_index != -1: - pending_append["content_start"] = marker_index + len(pending_append["start_marker"]) - debug_log(f"检测到追加起始标识: {pending_append['start_marker']}") - - if pending_append.get("content_start") is not None: - end_index = pending_append["buffer"].find( - pending_append["end_marker"], - pending_append["content_start"] - ) - if end_index != -1: - pending_append["end_index"] = end_index - append_break_triggered = True - debug_log("检测到<<>>,即将终止流式输出并写入文件") - break - - # 继续累积追加内容 - continue - elif expecting_append: - append_probe_buffer += content - # 限制缓冲区大小防止过长 - if len(append_probe_buffer) > 10000: - append_probe_buffer = append_probe_buffer[-10000:] - - marker_match = re.search(r"<<>>", append_probe_buffer) - if marker_match: - detected_raw_path = marker_match.group(1) - detected_path = detected_raw_path.strip() - if not detected_path: - append_probe_buffer = append_probe_buffer[marker_match.end():] - continue - marker_full = marker_match.group(0) - after_marker_index = append_probe_buffer.find(marker_full) + len(marker_full) - remainder = append_probe_buffer[after_marker_index:] - append_probe_buffer = "" - pending_append = { - "path": detected_path, - "tool_call_id": None, - "buffer": remainder, - "start_marker": marker_full, - "end_marker": "<<>>", - "content_start": 0, - "end_index": None, - "display_id": None - } - if hasattr(web_terminal, "pending_append_request"): - web_terminal.pending_append_request = {"path": detected_path} - debug_log(f"直接检测到append起始标记,构建追加缓冲: {detected_path}") - # 检查是否立即包含结束标记 - if pending_append["buffer"]: - end_index = pending_append["buffer"].find(pending_append["end_marker"], pending_append["content_start"]) - if end_index != -1: - pending_append["end_index"] = end_index - append_break_triggered = True - debug_log("检测到<<>>,即将终止流式输出并写入文件") - break - continue - - if not text_started: - text_started = True - text_streaming = True - sender('text_start', {}) - brief_log("模型输出了内容") - await asyncio.sleep(0.05) - - if not pending_append: - full_response += content - accumulated_response += content - text_has_content = True - emit_time = time.time() - elapsed = 0.0 if last_text_chunk_time is None else emit_time - last_text_chunk_time - last_text_chunk_time = emit_time - text_chunk_index += 1 - log_backend_chunk( - conversation_id, - current_iteration, - text_chunk_index, - elapsed, - len(content), - content[:32] - ) - sender('text_chunk', { - 'content': content, - 'index': text_chunk_index, - 'elapsed': elapsed - }) - - # 收集工具调用 - 实时发送准备状态 - if "tool_calls" in delta: - tool_chunks += 1 - for tc in delta["tool_calls"]: - found = False - for existing in tool_calls: - if existing.get("index") == tc.get("index"): - if "function" in tc and "arguments" in tc["function"]: - arg_chunk = tc["function"]["arguments"] - existing_fn = existing.get("function", {}) - existing_args = existing_fn.get("arguments", "") - existing_fn["arguments"] = (existing_args or "") + arg_chunk - existing["function"] = existing_fn - - combined_args = existing_fn.get("arguments", "") - tool_id = existing.get("id") or tc.get("id") - tool_name = ( - existing_fn.get("name") - or tc.get("function", {}).get("name", "") - ) - intent_value = extract_intent_from_partial(combined_args) - if ( - intent_value - and tool_id - and detected_tool_intent.get(tool_id) != intent_value - ): - detected_tool_intent[tool_id] = intent_value - brief_log(f"[intent] 增量提取 {tool_name}: {intent_value}") - sender('tool_intent', { - 'id': tool_id, - 'name': tool_name, - 'intent': intent_value, - 'conversation_id': conversation_id - }) - debug_log(f" 发送工具意图: {tool_name} -> {intent_value}") - await asyncio.sleep(0.01) - found = True - break - - if not found and tc.get("id"): - tool_id = tc["id"] - tool_name = tc.get("function", {}).get("name", "") - arguments_str = tc.get("function", {}).get("arguments", "") or "" - - # 新工具检测到,立即发送准备事件 - if tool_id not in detected_tools and tool_name: - detected_tools[tool_id] = tool_name - - # 尝试提前提取 intent - intent_value = None - if arguments_str: - intent_value = extract_intent_from_partial(arguments_str) - if intent_value: - detected_tool_intent[tool_id] = intent_value - brief_log(f"[intent] 预提取 {tool_name}: {intent_value}") - - # 立即发送工具准备中事件 - brief_log(f"[tool] 准备调用 {tool_name} (id={tool_id}) intent={intent_value or '-'}") - sender('tool_preparing', { - 'id': tool_id, - 'name': tool_name, - 'message': f'准备调用 {tool_name}...', - 'intent': intent_value, - 'conversation_id': conversation_id - }) - debug_log(f" 发送工具准备事件: {tool_name}") - await asyncio.sleep(0.1) - - tool_calls.append({ - "id": tool_id, - "index": tc.get("index"), - "type": "function", - "function": { - "name": tool_name, - "arguments": arguments_str - } - }) - # 尝试从增量参数中抽取 intent,并单独推送 - if tool_id and arguments_str: - intent_value = extract_intent_from_partial(arguments_str) - if intent_value and detected_tool_intent.get(tool_id) != intent_value: - detected_tool_intent[tool_id] = intent_value - sender('tool_intent', { - 'id': tool_id, - 'name': tool_name, - 'intent': intent_value, - 'conversation_id': conversation_id - }) - debug_log(f" 发送工具意图: {tool_name} -> {intent_value}") - await asyncio.sleep(0.01) - debug_log(f" 新工具: {tool_name}") - - # 检查是否被停止 - client_stop_info = get_stop_flag(client_sid, username) - if client_stop_info: - stop_requested = client_stop_info.get('stop', False) if isinstance(client_stop_info, dict) else client_stop_info - if stop_requested: - debug_log("任务在流处理完成后检测到停止状态") - sender('task_stopped', { - 'message': '命令执行被用户取消', - 'reason': 'user_stop' - }) - _cancel_pending_tools(tool_calls) - clear_stop_flag(client_sid, username) - return - - # === API响应完成后只计算输出token === - if last_usage_payload: - try: - web_terminal.context_manager.apply_usage_statistics(last_usage_payload) - debug_log( - f"Usage统计: prompt={last_usage_payload.get('prompt_tokens', 0)}, " - f"completion={last_usage_payload.get('completion_tokens', 0)}, " - f"total={last_usage_payload.get('total_tokens', 0)}" - ) - except Exception as e: - debug_log(f"Usage统计更新失败: {e}") - else: - debug_log("未获取到usage字段,跳过token统计更新") - - - if api_error: - try: - debug_log(f"API错误原始数据: {json.dumps(api_error, ensure_ascii=False)}") - except Exception: - debug_log(f"API错误原始数据(不可序列化): {repr(api_error)}") - error_message = "" - error_status = None - error_type = None - error_code = None - error_text = "" - request_dump = None - error_base_url = None - error_model_id = None - if isinstance(api_error, dict): - error_status = api_error.get("status_code") - error_type = api_error.get("error_type") or api_error.get("type") - error_code = api_error.get("error_code") or api_error.get("code") - error_text = api_error.get("error_text") or "" - error_message = ( - api_error.get("error_message") - or api_error.get("message") - or error_text - or "" - ) - request_dump = api_error.get("request_dump") - error_base_url = api_error.get("base_url") - error_model_id = api_error.get("model_id") - elif isinstance(api_error, str): - error_message = api_error - if not error_message: - if error_status: - error_message = f"API 请求失败(HTTP {error_status})" - else: - error_message = "API 请求失败" - # 若命中阿里云配额错误,立即写入状态并切换到官方 API - try: - from utils.aliyun_fallback import compute_disabled_until, set_disabled_until - disabled_until, reason = compute_disabled_until(error_message) - if disabled_until and reason: - set_disabled_until(getattr(web_terminal, "model_key", None) or "kimi-k2.5", disabled_until, reason) - profile = get_model_profile(getattr(web_terminal, "model_key", None) or "kimi-k2.5") - web_terminal.apply_model_profile(profile) - except Exception as exc: - debug_log(f"处理阿里云配额回退失败: {exc}") - can_retry = ( - api_attempt < max_api_retries - and not full_response - and not tool_calls - and not current_thinking - and not pending_append - and not pending_modify - ) - sender('error', { - 'message': error_message, - 'status_code': error_status, - 'error_type': error_type, - 'error_code': error_code, - 'error_text': error_text, - 'request_dump': request_dump, - 'base_url': error_base_url, - 'model_id': error_model_id, - 'retry': bool(can_retry), - 'retry_in': retry_delay_seconds if can_retry else None, - 'attempt': api_attempt + 1, - 'max_attempts': max_api_retries + 1 - }) - if can_retry: - try: - profile = get_model_profile(getattr(web_terminal, "model_key", None) or "kimi-k2.5") - web_terminal.apply_model_profile(profile) - except Exception as exc: - debug_log(f"重试前更新模型配置失败: {exc}") - cancelled = await _wait_retry_delay(retry_delay_seconds) - if cancelled: - return - continue - _cancel_pending_tools(tool_calls) - return - break - - # 流结束后的处理 - debug_log(f"\n流结束统计:") - debug_log(f" 总chunks: {chunk_count}") - debug_log(f" 思考chunks: {reasoning_chunks}") - debug_log(f" 内容chunks: {content_chunks}") - debug_log(f" 工具chunks: {tool_chunks}") - debug_log(f" 收集到的思考: {len(current_thinking)} 字符") - debug_log(f" 收集到的正文: {len(full_response)} 字符") - debug_log(f" 收集到的工具: {len(tool_calls)} 个") - - if not append_result["handled"] and pending_append: - append_result = await finalize_pending_append(full_response, True, finish_reason=last_finish_reason) - if not modify_result["handled"] and pending_modify: - modify_result = await finalize_pending_modify(full_response, True, finish_reason=last_finish_reason) - - # 结束未完成的流 - if in_thinking and not thinking_ended: - sender('thinking_end', {'full_content': current_thinking}) - await asyncio.sleep(0.1) - - - # 确保text_end事件被发送 - if text_started and text_has_content and not append_result["handled"] and not modify_result["handled"]: - debug_log(f"发送text_end事件,完整内容长度: {len(full_response)}") - sender('text_end', {'full_content': full_response}) - await asyncio.sleep(0.1) - text_streaming = False - - if full_response.strip(): - debug_log(f"流式文本内容长度: {len(full_response)} 字符") - - if append_result["handled"]: - append_metadata = append_result.get("assistant_metadata") - append_content_text = append_result.get("assistant_content") - if append_content_text: - web_terminal.context_manager.add_conversation( - "assistant", - append_content_text, - metadata=append_metadata - ) - debug_log("💾 增量保存:追加正文快照") - - payload_info = append_metadata.get("append_payload") if append_metadata else {} - sender('append_payload', { - 'path': payload_info.get("path") or append_result.get("path"), - 'forced': payload_info.get("forced", False), - 'lines': payload_info.get("lines"), - 'bytes': payload_info.get("bytes"), - 'tool_call_id': payload_info.get("tool_call_id") or append_result.get("tool_call_id"), - 'success': payload_info.get("success", append_result.get("success", False)), - 'conversation_id': conversation_id - }) - - if append_result["tool_content"]: - tool_call_id = append_result.get("tool_call_id") or f"append_{int(time.time() * 1000)}" - system_notice = format_tool_result_notice("append_to_file", tool_call_id, append_result["tool_content"]) - web_terminal.context_manager.add_conversation("system", system_notice) - append_result["tool_call_id"] = tool_call_id - debug_log("💾 增量保存:append_to_file 工具结果(system 通知)") - - finish_reason = append_result.get("finish_reason") - path_for_prompt = append_result.get("path") - need_follow_prompt = ( - finish_reason == "length" or - append_result.get("forced") or - not append_result.get("success") - ) - - if need_follow_prompt and path_for_prompt: - prompt_lines = [ - f"append_to_file 在处理 {path_for_prompt} 时未完成,需要重新发起写入。" - ] - if finish_reason == "length": - prompt_lines.append( - "上一次输出达到系统单次输出上限,已写入的内容已保存。" - ) - if append_result.get("forced"): - prompt_lines.append( - "收到的内容缺少 <<>> 标记,系统依据流式结束位置落盘。" - ) - if not append_result.get("success"): - prompt_lines.append("系统未能识别有效的追加标记。") - prompt_lines.append( - "请再次调用 append_to_file 工具获取新的写入窗口,并在工具调用的输出中遵循以下格式:" - ) - prompt_lines.append(f"<<>>") - prompt_lines.append("...填写剩余正文,如内容已完成可留空...") - prompt_lines.append("<<>>") - prompt_lines.append("不要在普通回复中粘贴上述标记,必须通过 append_to_file 工具发送。") - follow_prompt = "\n".join(prompt_lines) - messages.append({ - "role": "system", - "content": follow_prompt - }) - web_terminal.context_manager.add_conversation("system", follow_prompt) - debug_log("已注入追加任务提示") - - if append_result["handled"] and append_result.get("forced") and append_result.get("success"): - mark_force_thinking(web_terminal, reason="append_forced_finish") - if append_result["handled"] and not append_result.get("success"): - sender('system_message', { - 'content': f'⚠️ 追加写入失败:{append_result.get("error")}' - }) - maybe_mark_failure_from_message(web_terminal, f'⚠️ 追加写入失败:{append_result.get("error")}') - mark_force_thinking(web_terminal, reason="append_failed") - - if modify_result["handled"]: - modify_metadata = modify_result.get("assistant_metadata") - modify_content_text = modify_result.get("assistant_content") - if modify_content_text: - web_terminal.context_manager.add_conversation( - "assistant", - modify_content_text, - metadata=modify_metadata - ) - debug_log("💾 增量保存:修改正文快照") - - payload_info = modify_metadata.get("modify_payload") if modify_metadata else {} - sender('modify_payload', { - 'path': payload_info.get("path") or modify_result.get("path"), - 'total': payload_info.get("total_blocks") or modify_result.get("total_blocks"), - 'completed': payload_info.get("completed") or modify_result.get("completed_blocks"), - 'failed': payload_info.get("failed") or modify_result.get("failed_blocks"), - 'forced': payload_info.get("forced", modify_result.get("forced", False)), - 'success': modify_result.get("success", False), - 'conversation_id': conversation_id - }) - - if modify_result["tool_content"]: - tool_call_id = modify_result.get("tool_call_id") or f"modify_{int(time.time() * 1000)}" - system_notice = format_tool_result_notice("modify_file", tool_call_id, modify_result["tool_content"]) - web_terminal.context_manager.add_conversation("system", system_notice) - modify_result["tool_call_id"] = tool_call_id - debug_log("💾 增量保存:modify_file 工具结果(system 通知)") - - path_for_prompt = modify_result.get("path") - failed_blocks = modify_result.get("failed_blocks") or [] - need_follow_prompt = modify_result.get("forced") or bool(failed_blocks) - - if need_follow_prompt and path_for_prompt: - prompt_lines = [ - f"modify_file 在处理 {path_for_prompt} 时未完成,需要重新发起补丁。" - ] - if modify_result.get("forced"): - prompt_lines.append( - "刚才的内容缺少 <<>> 标记,系统仅应用了已识别的部分。" - ) - if failed_blocks: - failed_text = "、".join(str(idx) for idx in failed_blocks) - prompt_lines.append(f"以下补丁未成功:第 {failed_text} 处。") - prompt_lines.append( - "请再次调用 modify_file 工具,并在新的工具调用中按以下模板提供完整补丁:" - ) - prompt_lines.append(f"<<>>") - prompt_lines.append("[replace:序号]") - prompt_lines.append("<>") - prompt_lines.append("...原文(必须逐字匹配,包含全部缩进、空格和换行)...") - prompt_lines.append("<>") - prompt_lines.append("<>") - prompt_lines.append("...新内容,可留空表示清空,注意保持结构完整...") - prompt_lines.append("<>") - prompt_lines.append("[/replace]") - prompt_lines.append("<<>>") - prompt_lines.append("请勿在普通回复中直接粘贴补丁,必须通过 modify_file 工具发送。") - follow_prompt = "\n".join(prompt_lines) - messages.append({ - "role": "system", - "content": follow_prompt - }) - web_terminal.context_manager.add_conversation("system", follow_prompt) - debug_log("已注入修改任务提示") - - if modify_result["handled"] and modify_result.get("failed_blocks"): - mark_force_thinking(web_terminal, reason="modify_partial_failure") - if modify_result["handled"] and modify_result.get("forced") and modify_result.get("success"): - mark_force_thinking(web_terminal, reason="modify_forced_finish") - if modify_result["handled"] and not modify_result.get("success"): - error_message = modify_result.get("summary_message") or modify_result.get("error") or "修改操作未成功,请根据提示重新执行。" - sender('system_message', { - 'content': f'⚠️ 修改操作存在未完成的内容:{error_message}' - }) - maybe_mark_failure_from_message(web_terminal, f'⚠️ 修改操作存在未完成的内容:{error_message}') - mark_force_thinking(web_terminal, reason="modify_failed") - - if web_terminal.api_client.last_call_used_thinking and current_thinking: - web_terminal.api_client.current_task_thinking = current_thinking or "" - if web_terminal.api_client.current_task_first_call: - web_terminal.api_client.current_task_first_call = False - update_thinking_after_call(web_terminal) - - # 检测是否有格式错误的工具调用 - if not tool_calls and full_response and AUTO_FIX_TOOL_CALL and not append_result["handled"] and not modify_result["handled"]: - if detect_malformed_tool_call(full_response): - auto_fix_attempts += 1 - - if auto_fix_attempts <= AUTO_FIX_MAX_ATTEMPTS: - debug_log(f"检测到格式错误的工具调用,尝试自动修复 (尝试 {auto_fix_attempts}/{AUTO_FIX_MAX_ATTEMPTS})") - - fix_message = "你使用了错误的格式输出工具调用。请使用正确的工具调用格式而不是直接输出JSON。根据当前进度继续执行任务。" - - sender('system_message', { - 'content': f'⚠️ 自动修复: {fix_message}' - }) - maybe_mark_failure_from_message(web_terminal, f'⚠️ 自动修复: {fix_message}') - - messages.append({ - "role": "user", - "content": fix_message - }) - - await asyncio.sleep(1) - continue - else: - debug_log(f"自动修复尝试已达上限 ({AUTO_FIX_MAX_ATTEMPTS})") - sender('system_message', { - 'content': f'⌘ 工具调用格式错误,自动修复失败。请手动检查并重试。' - }) - maybe_mark_failure_from_message(web_terminal, '⌘ 工具调用格式错误,自动修复失败。请手动检查并重试。') - break - - # 构建助手消息(用于API继续对话) - assistant_content_parts = [] - - if full_response: - assistant_content_parts.append(full_response) - elif append_result["handled"] and append_result["assistant_content"]: - assistant_content_parts.append(append_result["assistant_content"]) - elif modify_result["handled"] and modify_result.get("assistant_content"): - assistant_content_parts.append(modify_result["assistant_content"]) - - assistant_content = "\n".join(assistant_content_parts) if assistant_content_parts else "" - - # 添加到消息历史(用于API继续对话,不保存到文件) - assistant_message = { - "role": "assistant", - "content": assistant_content, - "tool_calls": tool_calls - } - if current_thinking: - assistant_message["reasoning_content"] = current_thinking - - messages.append(assistant_message) - if assistant_content or current_thinking or tool_calls: - web_terminal.context_manager.add_conversation( - "assistant", - assistant_content, - tool_calls=tool_calls if tool_calls else None, - reasoning_content=current_thinking or None - ) - - # 为下一轮迭代重置流状态标志,但保留 full_response 供上面保存使用 - text_streaming = False - text_started = False - text_has_content = False - full_response = "" - - if append_result["handled"] and append_result.get("tool_content"): - tool_call_id = append_result.get("tool_call_id") or f"append_{int(time.time() * 1000)}" - system_notice = format_tool_result_notice("append_to_file", tool_call_id, append_result["tool_content"]) - messages.append({ - "role": "system", - "content": system_notice - }) - append_result["tool_call_id"] = tool_call_id - debug_log("已将 append_to_file 工具结果以 system 形式追加到对话上下文") - if modify_result["handled"] and modify_result.get("tool_content"): - tool_call_id = modify_result.get("tool_call_id") or f"modify_{int(time.time() * 1000)}" - system_notice = format_tool_result_notice("modify_file", tool_call_id, modify_result["tool_content"]) - messages.append({ - "role": "system", - "content": system_notice - }) - modify_result["tool_call_id"] = tool_call_id - debug_log("已将 modify_file 工具结果以 system 形式追加到对话上下文") - - force_continue = append_result["handled"] or modify_result["handled"] - if force_continue: - if append_result["handled"]: - debug_log("append_to_file 已处理,继续下一轮以让模型返回确认回复") - elif modify_result["handled"]: - debug_log("modify_file 已处理,继续下一轮以让模型返回确认回复") - else: - debug_log("补丁处理完成,继续下一轮以获取模型回复") - continue - - if not tool_calls: - debug_log("没有工具调用,结束迭代") - break - - # 检查连续相同工具调用 - for tc in tool_calls: - tool_name = tc["function"]["name"] - - if tool_name == last_tool_name: - consecutive_same_tool[tool_name] += 1 - - if ( - MAX_CONSECUTIVE_SAME_TOOL is not None - and consecutive_same_tool[tool_name] >= MAX_CONSECUTIVE_SAME_TOOL - ): - debug_log(f"警告: 连续调用相同工具 {tool_name} 已达 {MAX_CONSECUTIVE_SAME_TOOL} 次") - sender('system_message', { - 'content': f'⚠️ 检测到重复调用 {tool_name} 工具 {MAX_CONSECUTIVE_SAME_TOOL} 次,可能存在循环。' - }) - maybe_mark_failure_from_message(web_terminal, f'⚠️ 检测到重复调用 {tool_name} 工具 {MAX_CONSECUTIVE_SAME_TOOL} 次,可能存在循环。') - - if consecutive_same_tool[tool_name] >= MAX_CONSECUTIVE_SAME_TOOL + 2: - debug_log(f"终止: 工具 {tool_name} 调用次数过多") - sender('system_message', { - 'content': f'⌘ 工具 {tool_name} 重复调用过多,任务终止。' - }) - maybe_mark_failure_from_message(web_terminal, f'⌘ 工具 {tool_name} 重复调用过多,任务终止。') - break - else: - consecutive_same_tool.clear() - consecutive_same_tool[tool_name] = 1 - - last_tool_name = tool_name - # 更新统计 - total_tool_calls += len(tool_calls) - - # 执行每个工具 - for tool_call in tool_calls: - # 检查停止标志 - client_stop_info = get_stop_flag(client_sid, username) - if client_stop_info: - stop_requested = client_stop_info.get('stop', False) if isinstance(client_stop_info, dict) else client_stop_info - if stop_requested: - debug_log("在工具调用过程中检测到停止状态") - tool_call_id = tool_call.get("id") - function_name = tool_call.get("function", {}).get("name") - # 通知前端该工具已被取消,避免界面卡住 - sender('update_action', { - 'preparing_id': tool_call_id, - 'status': 'cancelled', - 'result': { - "success": False, - "status": "cancelled", - "message": "命令执行被用户取消", - "tool": function_name - } - }) - # 在消息列表中记录取消结果,防止重新加载时仍显示运行中 - if tool_call_id: - messages.append({ - "role": "tool", - "tool_call_id": tool_call_id, - "name": function_name, - "content": "命令执行被用户取消", - "metadata": {"status": "cancelled"} - }) - sender('task_stopped', { - 'message': '命令执行被用户取消', - 'reason': 'user_stop' - }) - clear_stop_flag(client_sid, username) - return - - # 工具调用间隔控制 - current_time = time.time() - if last_tool_call_time > 0: - elapsed = current_time - last_tool_call_time - if elapsed < TOOL_CALL_COOLDOWN: - await asyncio.sleep(TOOL_CALL_COOLDOWN - elapsed) - last_tool_call_time = time.time() - - function_name = tool_call["function"]["name"] - arguments_str = tool_call["function"]["arguments"] - tool_call_id = tool_call["id"] - - - debug_log(f"准备解析JSON,工具: {function_name}, 参数长度: {len(arguments_str)}") - debug_log(f"JSON参数前200字符: {arguments_str[:200]}") - debug_log(f"JSON参数后200字符: {arguments_str[-200:]}") - - # 使用改进的参数解析方法 - if hasattr(web_terminal, 'api_client') and hasattr(web_terminal.api_client, '_safe_tool_arguments_parse'): - success, arguments, error_msg = web_terminal.api_client._safe_tool_arguments_parse(arguments_str, function_name) - if not success: - debug_log(f"安全解析失败: {error_msg}") - error_text = f'工具参数解析失败: {error_msg}' - error_payload = { - "success": False, - "error": error_text, - "error_type": "parameter_format_error", - "tool_name": function_name, - "tool_call_id": tool_call_id, - "message": error_text - } - sender('error', {'message': error_text}) - sender('update_action', { - 'preparing_id': tool_call_id, - 'status': 'completed', - 'result': error_payload, - 'message': error_text - }) - error_content = json.dumps(error_payload, ensure_ascii=False) - web_terminal.context_manager.add_conversation( - "tool", - error_content, - tool_call_id=tool_call_id, - name=function_name - ) - messages.append({ - "role": "tool", - "tool_call_id": tool_call_id, - "name": function_name, - "content": error_content - }) - continue - debug_log(f"使用安全解析成功,参数键: {list(arguments.keys())}") - else: - # 回退到带有基本修复逻辑的解析 - try: - arguments = json.loads(arguments_str) if arguments_str.strip() else {} - debug_log(f"直接JSON解析成功,参数键: {list(arguments.keys())}") - except json.JSONDecodeError as e: - debug_log(f"原始JSON解析失败: {e}") - # 尝试基本的JSON修复 - repaired_str = arguments_str.strip() - repair_attempts = [] - - # 修复1: 未闭合字符串 - if repaired_str.count('"') % 2 == 1: - repaired_str += '"' - repair_attempts.append("添加闭合引号") - - # 修复2: 未闭合JSON对象 - if repaired_str.startswith('{') and not repaired_str.rstrip().endswith('}'): - repaired_str = repaired_str.rstrip() + '}' - repair_attempts.append("添加闭合括号") - - # 修复3: 截断的JSON(移除不完整的最后一个键值对) - if not repair_attempts: # 如果前面的修复都没用上 - last_comma = repaired_str.rfind(',') - if last_comma > 0: - repaired_str = repaired_str[:last_comma] + '}' - repair_attempts.append("移除不完整的键值对") - - # 尝试解析修复后的JSON - try: - arguments = json.loads(repaired_str) - debug_log(f"JSON修复成功: {', '.join(repair_attempts)}") - debug_log(f"修复后参数键: {list(arguments.keys())}") - except json.JSONDecodeError as repair_error: - debug_log(f"JSON修复也失败: {repair_error}") - debug_log(f"修复尝试: {repair_attempts}") - debug_log(f"修复后内容前100字符: {repaired_str[:100]}") - error_text = f'工具参数解析失败: {e}' - error_payload = { - "success": False, - "error": error_text, - "error_type": "parameter_format_error", - "tool_name": function_name, - "tool_call_id": tool_call_id, - "message": error_text - } - sender('error', {'message': error_text}) - sender('update_action', { - 'preparing_id': tool_call_id, - 'status': 'completed', - 'result': error_payload, - 'message': error_text - }) - error_content = json.dumps(error_payload, ensure_ascii=False) - web_terminal.context_manager.add_conversation( - "tool", - error_content, - tool_call_id=tool_call_id, - name=function_name - ) - messages.append({ - "role": "tool", - "tool_call_id": tool_call_id, - "name": function_name, - "content": error_content - }) - continue - - debug_log(f"执行工具: {function_name} (ID: {tool_call_id})") - - # 发送工具开始事件 - tool_display_id = f"tool_{iteration}_{function_name}_{time.time()}" - monitor_snapshot = None - snapshot_path = None - memory_snapshot_type = None - if function_name in MONITOR_FILE_TOOLS: - snapshot_path = resolve_monitor_path(arguments) - monitor_snapshot = capture_monitor_snapshot(web_terminal.file_manager, snapshot_path, MONITOR_SNAPSHOT_CHAR_LIMIT, debug_log) - if monitor_snapshot: - cache_monitor_snapshot(tool_display_id, 'before', monitor_snapshot) - elif function_name in MONITOR_MEMORY_TOOLS: - memory_snapshot_type = (arguments.get('memory_type') or 'main').lower() - before_entries = None - try: - before_entries = resolve_monitor_memory(web_terminal.memory_manager._read_entries(memory_snapshot_type), MONITOR_MEMORY_ENTRY_LIMIT) - except Exception as exc: - debug_log(f"[MonitorSnapshot] 读取记忆失败: {memory_snapshot_type} ({exc})") - if before_entries is not None: - monitor_snapshot = { - 'memory_type': memory_snapshot_type, - 'entries': before_entries - } - cache_monitor_snapshot(tool_display_id, 'before', monitor_snapshot) - - sender('tool_start', { - 'id': tool_display_id, - 'name': function_name, - 'arguments': arguments, - 'preparing_id': tool_call_id, - 'monitor_snapshot': monitor_snapshot, - 'conversation_id': conversation_id - }) - brief_log(f"调用了工具: {function_name}") - - await asyncio.sleep(0.3) - start_time = time.time() - - # 执行工具 - tool_result = await web_terminal.handle_tool_call(function_name, arguments) - debug_log(f"工具结果: {tool_result[:200]}...") - - execution_time = time.time() - start_time - if execution_time < 1.5: - await asyncio.sleep(1.5 - execution_time) - - # 更新工具状态 - result_data = {} - try: - result_data = json.loads(tool_result) - except: - result_data = {'output': tool_result} - tool_failed = detect_tool_failure(result_data) - - action_status = 'completed' - action_message = None - awaiting_flag = False - - if function_name in {"write_file", "edit_file"}: - diff_path = result_data.get("path") or arguments.get("file_path") - summary = result_data.get("summary") or result_data.get("message") - if summary: - action_message = summary - debug_log(f"{function_name} 执行完成: {summary or '无摘要'}") - - if function_name == "wait_sub_agent": - system_msg = result_data.get("system_message") - if system_msg: - messages.append({ - "role": "system", - "content": system_msg - }) - sender('system_message', { - 'content': system_msg, - 'inline': False - }) - maybe_mark_failure_from_message(web_terminal, system_msg) - monitor_snapshot_after = None - if function_name in MONITOR_FILE_TOOLS: - result_path = None - if isinstance(result_data, dict): - result_path = resolve_monitor_path(result_data) - if not result_path: - candidate_path = result_data.get('path') - if isinstance(candidate_path, str) and candidate_path.strip(): - result_path = candidate_path.strip() - if not result_path: - result_path = resolve_monitor_path(arguments, snapshot_path) or snapshot_path - monitor_snapshot_after = capture_monitor_snapshot(web_terminal.file_manager, result_path, MONITOR_SNAPSHOT_CHAR_LIMIT, debug_log) - elif function_name in MONITOR_MEMORY_TOOLS: - memory_after_type = str( - arguments.get('memory_type') - or (isinstance(result_data, dict) and result_data.get('memory_type')) - or memory_snapshot_type - or 'main' - ).lower() - after_entries = None - try: - after_entries = resolve_monitor_memory(web_terminal.memory_manager._read_entries(memory_after_type), MONITOR_MEMORY_ENTRY_LIMIT) - except Exception as exc: - debug_log(f"[MonitorSnapshot] 读取记忆失败(after): {memory_after_type} ({exc})") - if after_entries is not None: - monitor_snapshot_after = { - 'memory_type': memory_after_type, - 'entries': after_entries - } - - update_payload = { - 'id': tool_display_id, - 'status': action_status, - 'result': result_data, - 'preparing_id': tool_call_id, - 'conversation_id': conversation_id - } - if action_message: - update_payload['message'] = action_message - if awaiting_flag: - update_payload['awaiting_content'] = True - if monitor_snapshot_after: - update_payload['monitor_snapshot_after'] = monitor_snapshot_after - cache_monitor_snapshot(tool_display_id, 'after', monitor_snapshot_after) - - sender('update_action', update_payload) - - if function_name in ['create_file', 'delete_file', 'rename_file', 'create_folder']: - if not web_terminal.context_manager._is_host_mode_without_safety(): - structure = web_terminal.context_manager.get_project_structure() - sender('file_tree_update', structure) - - # ===== 增量保存:立即保存工具结果 ===== - metadata_payload = None - tool_images = None - tool_videos = None - if isinstance(result_data, dict): - # 特殊处理 web_search:保留可供前端渲染的精简结构,以便历史记录复现搜索结果 - if function_name == "web_search": - try: - tool_result_content = json.dumps(compact_web_search_result(result_data), ensure_ascii=False) - except Exception: - tool_result_content = tool_result - else: - tool_result_content = format_tool_result_for_context(function_name, result_data, tool_result) - metadata_payload = {"tool_payload": result_data} - else: - tool_result_content = tool_result - tool_message_content = tool_result_content - - # view_image: 将图片直接附加到 tool 结果中(不再插入 user 消息) - if function_name == "view_image" and getattr(web_terminal, "pending_image_view", None): - inj = web_terminal.pending_image_view - web_terminal.pending_image_view = None - if ( - not tool_failed - and isinstance(result_data, dict) - and result_data.get("success") is not False - ): - img_path = inj.get("path") if isinstance(inj, dict) else None - if img_path: - text_part = tool_result_content if isinstance(tool_result_content, str) else "" - tool_message_content = web_terminal.context_manager._build_content_with_images( - text_part, - [img_path] - ) - tool_images = [img_path] - if metadata_payload is None: - metadata_payload = {} - metadata_payload["tool_image_path"] = img_path - sender('system_message', { - 'content': f'系统已按模型请求将图片附加到工具结果: {img_path}' - }) - - # view_video: 将视频直接附加到 tool 结果中(不再插入 user 消息) - if function_name == "view_video" and getattr(web_terminal, "pending_video_view", None): - inj = web_terminal.pending_video_view - web_terminal.pending_video_view = None - if ( - not tool_failed - and isinstance(result_data, dict) - and result_data.get("success") is not False - ): - video_path = inj.get("path") if isinstance(inj, dict) else None - if video_path: - text_part = tool_result_content if isinstance(tool_result_content, str) else "" - video_payload = [video_path] - tool_message_content = web_terminal.context_manager._build_content_with_images( - text_part, - [], - video_payload - ) - tool_videos = [video_path] - if metadata_payload is None: - metadata_payload = {} - metadata_payload["tool_video_path"] = video_path - sender('system_message', { - 'content': f'系统已按模型请求将视频附加到工具结果: {video_path}' - }) - - # 立即保存工具结果 - web_terminal.context_manager.add_conversation( - "tool", - tool_result_content, - tool_call_id=tool_call_id, - name=function_name, - metadata=metadata_payload, - images=tool_images, - videos=tool_videos - ) - debug_log(f"💾 增量保存:工具结果 {function_name}") - system_message = result_data.get("system_message") if isinstance(result_data, dict) else None - if system_message: - web_terminal._record_sub_agent_message(system_message, result_data.get("task_id"), inline=False) - maybe_mark_failure_from_message(web_terminal, system_message) - - # 添加到消息历史(用于API继续对话) - messages.append({ - "role": "tool", - "tool_call_id": tool_call_id, - "name": function_name, - "content": tool_message_content - }) - - if function_name not in {'write_file', 'edit_file'}: - await process_sub_agent_updates(messages, inline=True, after_tool_call_id=tool_call_id) - - await asyncio.sleep(0.2) - - if tool_failed: - mark_force_thinking(web_terminal, reason=f"{function_name}_failed") - - # 标记不再是第一次迭代 - is_first_iteration = False - - - # 最终统计 - debug_log(f"\n{'='*40}") - debug_log(f"任务完成统计:") - debug_log(f" 总迭代次数: {total_iterations}") - debug_log(f" 总工具调用: {total_tool_calls}") - debug_log(f" 自动修复尝试: {auto_fix_attempts}") - debug_log(f" 累积响应: {len(accumulated_response)} 字符") - debug_log(f"{'='*40}\n") - - # 发送完成事件 - sender('task_complete', { - 'total_iterations': total_iterations, - 'total_tool_calls': total_tool_calls, - 'auto_fix_attempts': auto_fix_attempts - }) +__all__ = [ + "generate_conversation_title_background", + "mark_force_thinking", + "apply_thinking_schedule", + "update_thinking_after_call", + "maybe_mark_failure_from_message", + "detect_malformed_tool_call", + "handle_task_with_sender", +] diff --git a/server/chat_flow_runtime.py b/server/chat_flow_runtime.py new file mode 100644 index 0000000..f2449be --- /dev/null +++ b/server/chat_flow_runtime.py @@ -0,0 +1,146 @@ +from __future__ import annotations + +import asyncio +import json +import time +import re +import zipfile +from collections import defaultdict, Counter, deque +from datetime import datetime, timedelta +from pathlib import Path +from typing import Any, Dict, List, Optional, Tuple + +from werkzeug.utils import secure_filename + +from config import ( + OUTPUT_FORMATS, + AUTO_FIX_TOOL_CALL, + AUTO_FIX_MAX_ATTEMPTS, + MAX_ITERATIONS_PER_TASK, + MAX_CONSECUTIVE_SAME_TOOL, + MAX_TOTAL_TOOL_CALLS, + TOOL_CALL_COOLDOWN, + MAX_UPLOAD_SIZE, + DEFAULT_CONVERSATIONS_LIMIT, + MAX_CONVERSATIONS_LIMIT, + CONVERSATIONS_DIR, + DEFAULT_RESPONSE_MAX_TOKENS, + DEFAULT_PROJECT_PATH, + LOGS_DIR, + AGENT_VERSION, + THINKING_FAST_INTERVAL, + PROJECT_MAX_STORAGE_MB, + PROJECT_MAX_STORAGE_BYTES, + UPLOAD_SCAN_LOG_SUBDIR, +) +from modules.personalization_manager import ( + load_personalization_config, + save_personalization_config, + THINKING_INTERVAL_MIN, + THINKING_INTERVAL_MAX, +) +from modules.upload_security import UploadSecurityError +from modules.user_manager import UserWorkspace +from modules.usage_tracker import QUOTA_DEFAULTS +from core.web_terminal import WebTerminal +from utils.tool_result_formatter import format_tool_result_for_context +from utils.conversation_manager import ConversationManager +from config.model_profiles import get_model_context_window, get_model_profile + +from .auth_helpers import api_login_required, resolve_admin_policy, get_current_user_record, get_current_username +from .context import with_terminal, get_gui_manager, get_upload_guard, build_upload_error_response, ensure_conversation_loaded, reset_system_state, get_user_resources, get_or_create_usage_tracker +from .utils_common import ( + build_review_lines, + debug_log, + log_backend_chunk, + log_frontend_chunk, + log_streaming_debug_entry, + brief_log, + DEBUG_LOG_FILE, + CHUNK_BACKEND_LOG_FILE, + CHUNK_FRONTEND_LOG_FILE, + STREAMING_DEBUG_LOG_FILE, +) +from .security import rate_limited, format_tool_result_notice, compact_web_search_result, consume_socket_token, prune_socket_tokens, validate_csrf_request, requires_csrf_protection, get_csrf_token +from .monitor import cache_monitor_snapshot, get_cached_monitor_snapshot +from .extensions import socketio +from .state import ( + MONITOR_FILE_TOOLS, + MONITOR_MEMORY_TOOLS, + MONITOR_SNAPSHOT_CHAR_LIMIT, + MONITOR_MEMORY_ENTRY_LIMIT, + RATE_LIMIT_BUCKETS, + FAILURE_TRACKERS, + pending_socket_tokens, + usage_trackers, + MONITOR_SNAPSHOT_CACHE, + MONITOR_SNAPSHOT_CACHE_LIMIT, + PROJECT_STORAGE_CACHE, + PROJECT_STORAGE_CACHE_TTL_SECONDS, + RECENT_UPLOAD_EVENT_LIMIT, + RECENT_UPLOAD_FEED_LIMIT, + THINKING_FAILURE_KEYWORDS, + TITLE_PROMPT_PATH, + get_last_active_ts, + user_manager, + container_manager, + custom_tool_registry, + user_terminals, + terminal_rooms, + connection_users, + stop_flags, + get_stop_flag, + set_stop_flag, + clear_stop_flag, +) +from .chat_flow_helpers import ( + detect_malformed_tool_call as _detect_malformed_tool_call, + detect_tool_failure, + get_thinking_state, + mark_force_thinking as _mark_force_thinking, + mark_suppress_thinking, + apply_thinking_schedule as _apply_thinking_schedule, + update_thinking_after_call as _update_thinking_after_call, + maybe_mark_failure_from_message as _maybe_mark_failure_from_message, + generate_conversation_title_background as _generate_conversation_title_background, +) + + +from .chat_flow_runner_helpers import ( + extract_intent_from_partial, + resolve_monitor_path, + resolve_monitor_memory, + capture_monitor_snapshot, +) + + +def generate_conversation_title_background(web_terminal: WebTerminal, conversation_id: str, user_message: str, username: str): + return _generate_conversation_title_background( + web_terminal=web_terminal, + conversation_id=conversation_id, + user_message=user_message, + username=username, + socketio_instance=socketio, + title_prompt_path=TITLE_PROMPT_PATH, + debug_logger=debug_log, + ) + +def mark_force_thinking(terminal: WebTerminal, reason: str = ""): + return _mark_force_thinking(terminal, reason=reason, debug_logger=debug_log) + +def apply_thinking_schedule(terminal: WebTerminal): + return _apply_thinking_schedule(terminal, default_interval=THINKING_FAST_INTERVAL, debug_logger=debug_log) + +def update_thinking_after_call(terminal: WebTerminal): + return _update_thinking_after_call(terminal, debug_logger=debug_log) + +def maybe_mark_failure_from_message(terminal: WebTerminal, content: Optional[str]): + return _maybe_mark_failure_from_message( + terminal, + content, + failure_keywords=THINKING_FAILURE_KEYWORDS, + debug_logger=debug_log, + ) + +def detect_malformed_tool_call(text): + return _detect_malformed_tool_call(text) diff --git a/server/chat_flow_task_runner.py b/server/chat_flow_task_runner.py new file mode 100644 index 0000000..52964f5 --- /dev/null +++ b/server/chat_flow_task_runner.py @@ -0,0 +1,2187 @@ +from __future__ import annotations + +import asyncio +import json +import time +import re +import zipfile +from collections import defaultdict, Counter, deque +from datetime import datetime, timedelta +from pathlib import Path +from typing import Any, Dict, List, Optional, Tuple + +from werkzeug.utils import secure_filename + +from config import ( + OUTPUT_FORMATS, + AUTO_FIX_TOOL_CALL, + AUTO_FIX_MAX_ATTEMPTS, + MAX_ITERATIONS_PER_TASK, + MAX_CONSECUTIVE_SAME_TOOL, + MAX_TOTAL_TOOL_CALLS, + TOOL_CALL_COOLDOWN, + MAX_UPLOAD_SIZE, + DEFAULT_CONVERSATIONS_LIMIT, + MAX_CONVERSATIONS_LIMIT, + CONVERSATIONS_DIR, + DEFAULT_RESPONSE_MAX_TOKENS, + DEFAULT_PROJECT_PATH, + LOGS_DIR, + AGENT_VERSION, + THINKING_FAST_INTERVAL, + PROJECT_MAX_STORAGE_MB, + PROJECT_MAX_STORAGE_BYTES, + UPLOAD_SCAN_LOG_SUBDIR, +) +from modules.personalization_manager import ( + load_personalization_config, + save_personalization_config, + THINKING_INTERVAL_MIN, + THINKING_INTERVAL_MAX, +) +from modules.upload_security import UploadSecurityError +from modules.user_manager import UserWorkspace +from modules.usage_tracker import QUOTA_DEFAULTS +from core.web_terminal import WebTerminal +from utils.tool_result_formatter import format_tool_result_for_context +from utils.conversation_manager import ConversationManager +from config.model_profiles import get_model_context_window, get_model_profile + +from .auth_helpers import api_login_required, resolve_admin_policy, get_current_user_record, get_current_username +from .context import with_terminal, get_gui_manager, get_upload_guard, build_upload_error_response, ensure_conversation_loaded, reset_system_state, get_user_resources, get_or_create_usage_tracker +from .utils_common import ( + build_review_lines, + debug_log, + log_backend_chunk, + log_frontend_chunk, + log_streaming_debug_entry, + brief_log, + DEBUG_LOG_FILE, + CHUNK_BACKEND_LOG_FILE, + CHUNK_FRONTEND_LOG_FILE, + STREAMING_DEBUG_LOG_FILE, +) +from .security import rate_limited, format_tool_result_notice, compact_web_search_result, consume_socket_token, prune_socket_tokens, validate_csrf_request, requires_csrf_protection, get_csrf_token +from .monitor import cache_monitor_snapshot, get_cached_monitor_snapshot +from .extensions import socketio +from .state import ( + MONITOR_FILE_TOOLS, + MONITOR_MEMORY_TOOLS, + MONITOR_SNAPSHOT_CHAR_LIMIT, + MONITOR_MEMORY_ENTRY_LIMIT, + RATE_LIMIT_BUCKETS, + FAILURE_TRACKERS, + pending_socket_tokens, + usage_trackers, + MONITOR_SNAPSHOT_CACHE, + MONITOR_SNAPSHOT_CACHE_LIMIT, + PROJECT_STORAGE_CACHE, + PROJECT_STORAGE_CACHE_TTL_SECONDS, + RECENT_UPLOAD_EVENT_LIMIT, + RECENT_UPLOAD_FEED_LIMIT, + THINKING_FAILURE_KEYWORDS, + TITLE_PROMPT_PATH, + get_last_active_ts, + user_manager, + container_manager, + custom_tool_registry, + user_terminals, + terminal_rooms, + connection_users, + stop_flags, + get_stop_flag, + set_stop_flag, + clear_stop_flag, +) +from .chat_flow_helpers import ( + detect_malformed_tool_call as _detect_malformed_tool_call, + detect_tool_failure, + get_thinking_state, + mark_force_thinking as _mark_force_thinking, + mark_suppress_thinking, + apply_thinking_schedule as _apply_thinking_schedule, + update_thinking_after_call as _update_thinking_after_call, + maybe_mark_failure_from_message as _maybe_mark_failure_from_message, + generate_conversation_title_background as _generate_conversation_title_background, +) + + +from .chat_flow_runner_helpers import ( + extract_intent_from_partial, + resolve_monitor_path, + resolve_monitor_memory, + capture_monitor_snapshot, +) + + +from .chat_flow_runtime import ( + generate_conversation_title_background, + mark_force_thinking, + apply_thinking_schedule, + update_thinking_after_call, + maybe_mark_failure_from_message, + detect_malformed_tool_call, +) + +async def handle_task_with_sender(terminal: WebTerminal, workspace: UserWorkspace, message, images, sender, client_sid, username: str, videos=None): + """处理任务并发送消息 - 集成token统计版本""" + web_terminal = terminal + conversation_id = getattr(web_terminal.context_manager, "current_conversation_id", None) + videos = videos or [] + raw_sender = sender + + def sender(event_type, data): + """为关键事件补充会话标识,便于前端定位报错归属。""" + if not isinstance(data, dict): + raw_sender(event_type, data) + return + payload = data + if event_type in {"error", "quota_exceeded", "task_stopped", "task_complete"}: + payload = dict(data) + current_conv = conversation_id or getattr(web_terminal.context_manager, "current_conversation_id", None) + if current_conv: + payload.setdefault("conversation_id", current_conv) + task_id = getattr(web_terminal, "task_id", None) or client_sid + if task_id: + payload.setdefault("task_id", task_id) + if client_sid: + payload.setdefault("client_sid", client_sid) + raw_sender(event_type, payload) + + # 如果是思考模式,重置状态 + if web_terminal.thinking_mode: + web_terminal.api_client.start_new_task(force_deep=web_terminal.deep_thinking_mode) + state = get_thinking_state(web_terminal) + state["fast_streak"] = 0 + state["force_next"] = False + state["suppress_next"] = False + + # 添加到对话历史 + history_len_before = len(getattr(web_terminal.context_manager, "conversation_history", []) or []) + is_first_user_message = history_len_before == 0 + web_terminal.context_manager.add_conversation("user", message, images=images, videos=videos) + + if is_first_user_message and getattr(web_terminal, "context_manager", None): + try: + personal_config = load_personalization_config(workspace.data_dir) + except Exception: + personal_config = {} + auto_title_enabled = personal_config.get("auto_generate_title", True) + if auto_title_enabled: + conv_id = getattr(web_terminal.context_manager, "current_conversation_id", None) + socketio.start_background_task( + generate_conversation_title_background, + web_terminal, + conv_id, + message, + username + ) + + # === 移除:不在这里计算输入token,改为在每次API调用前计算 === + + # 构建上下文和消息(用于API调用) + context = web_terminal.build_context() + messages = web_terminal.build_messages(context, message) + tools = web_terminal.define_tools() + try: + profile = get_model_profile(getattr(web_terminal, "model_key", None) or "kimi-k2.5") + web_terminal.apply_model_profile(profile) + except Exception as exc: + debug_log(f"更新模型配置失败: {exc}") + + # === 上下文预算与安全校验(避免超出模型上下文) === + max_context_tokens = get_model_context_window(getattr(web_terminal, "model_key", None) or "kimi-k2.5") + current_tokens = web_terminal.context_manager.get_current_context_tokens(conversation_id) + # 提前同步给底层客户端,动态收缩 max_tokens + web_terminal.api_client.update_context_budget(current_tokens, max_context_tokens) + if max_context_tokens: + if current_tokens >= max_context_tokens: + err_msg = ( + f"当前对话上下文已达 {current_tokens} tokens,超过模型上限 " + f"{max_context_tokens},请先使用压缩功能或清理对话后再试。" + ) + debug_log(err_msg) + web_terminal.context_manager.add_conversation("system", err_msg) + sender('error', { + 'message': err_msg, + 'status_code': 400, + 'error_type': 'context_overflow' + }) + return + usage_percent = (current_tokens / max_context_tokens) * 100 + warned = web_terminal.context_manager.conversation_metadata.get("context_warning_sent", False) + if usage_percent >= 70 and not warned: + warn_msg = ( + f"当前对话上下文约占 {usage_percent:.1f}%({current_tokens}/{max_context_tokens})," + "建议使用压缩功能。" + ) + web_terminal.context_manager.conversation_metadata["context_warning_sent"] = True + web_terminal.context_manager.auto_save_conversation(force=True) + sender('context_warning', { + 'title': '上下文过长', + 'message': warn_msg, + 'type': 'warning', + 'conversation_id': conversation_id + }) + + # 开始新的AI消息 + sender('ai_message_start', {}) + + # 增量保存相关变量 + accumulated_response = "" # 累积的响应内容 + is_first_iteration = True # 是否是第一次迭代 + + # 统计和限制变量 + total_iterations = 0 + total_tool_calls = 0 + consecutive_same_tool = defaultdict(int) + last_tool_name = "" + auto_fix_attempts = 0 + last_tool_call_time = 0 + detected_tool_intent: Dict[str, str] = {} + + # 设置最大迭代次数(API 可覆盖);None 表示不限制 + max_iterations_override = getattr(web_terminal, "max_iterations_override", None) + max_iterations = max_iterations_override if max_iterations_override is not None else MAX_ITERATIONS_PER_TASK + max_api_retries = 4 + retry_delay_seconds = 10 + + pending_append = None # {"path": str, "tool_call_id": str, "buffer": str, ...} + append_probe_buffer = "" + pending_modify = None # {"path": str, "tool_call_id": str, "buffer": str, ...} + modify_probe_buffer = "" + + async def finalize_pending_append(response_text: str, stream_completed: bool, finish_reason: str = None) -> Dict: + """在流式输出结束后处理追加写入""" + nonlocal pending_append, append_probe_buffer + + result = { + "handled": False, + "success": False, + "summary": None, + "summary_message": None, + "tool_content": None, + "tool_call_id": None, + "path": None, + "forced": False, + "error": None, + "assistant_content": response_text, + "lines": 0, + "bytes": 0, + "finish_reason": finish_reason, + "appended_content": "", + "assistant_metadata": None + } + + if not pending_append: + return result + + state = pending_append + path = state.get("path") + tool_call_id = state.get("tool_call_id") + buffer = state.get("buffer", "") + start_marker = state.get("start_marker") + end_marker = state.get("end_marker") + start_idx = state.get("content_start") + end_idx = state.get("end_index") + + display_id = state.get("display_id") + + result.update({ + "handled": True, + "path": path, + "tool_call_id": tool_call_id, + "display_id": display_id + }) + + if path is None or tool_call_id is None: + error_msg = "append_to_file 状态不完整,缺少路径或ID。" + debug_log(error_msg) + result["error"] = error_msg + result["summary_message"] = error_msg + result["tool_content"] = json.dumps({ + "success": False, + "error": error_msg + }, ensure_ascii=False) + if display_id: + sender('update_action', { + 'id': display_id, + 'status': 'failed', + 'preparing_id': tool_call_id, + 'message': error_msg + }) + pending_append = None + return result + + if start_idx is None: + error_msg = f"未检测到格式正确的开始标识 {start_marker}。" + debug_log(error_msg) + result["error"] = error_msg + result["summary_message"] = error_msg + result["tool_content"] = json.dumps({ + "success": False, + "path": path, + "error": error_msg + }, ensure_ascii=False) + if display_id: + sender('update_action', { + 'id': display_id, + 'status': 'failed', + 'preparing_id': tool_call_id, + 'message': error_msg + }) + pending_append = None + return result + + forced = False + if end_idx is None: + forced = True + # 查找下一个<<<,否则使用整个缓冲结尾 + remaining = buffer[start_idx:] + next_marker = remaining.find("<<<", len(end_marker)) + if next_marker != -1: + end_idx = start_idx + next_marker + else: + end_idx = len(buffer) + + content = buffer[start_idx:end_idx] + if content.startswith('\n'): + content = content[1:] + + if not content: + error_msg = "未检测到需要追加的内容,请严格按照<<>>...<<>>格式输出。" + debug_log(error_msg) + result["error"] = error_msg + result["forced"] = forced + result["tool_content"] = json.dumps({ + "success": False, + "path": path, + "error": error_msg + }, ensure_ascii=False) + if display_id: + sender('update_action', { + 'id': display_id, + 'status': 'failed', + 'preparing_id': tool_call_id, + 'message': error_msg + }) + pending_append = None + return result + + assistant_message_lines = [] + if start_marker: + assistant_message_lines.append(start_marker) + assistant_message_lines.append(content) + if not forced and end_marker: + assistant_message_lines.append(end_marker) + assistant_message_text = "\n".join(assistant_message_lines) + result["assistant_content"] = assistant_message_text + assistant_metadata = { + "append_payload": { + "path": path, + "tool_call_id": tool_call_id, + "forced": forced, + "has_end_marker": not forced + } + } + result["assistant_metadata"] = assistant_metadata + + write_result = web_terminal.file_manager.append_file(path, content) + if write_result.get("success"): + bytes_written = len(content.encode('utf-8')) + line_count = content.count('\n') + if content and not content.endswith('\n'): + line_count += 1 + + summary = f"已向 {path} 追加 {line_count} 行({bytes_written} 字节)" + if forced: + summary += "。未检测到 <<>> 标记,系统已在流结束处完成写入。如内容未完成,请重新调用 append_to_file 并按标准格式补充;如已完成,可继续后续步骤。" + + result.update({ + "success": True, + "summary": summary, + "summary_message": summary, + "forced": forced, + "lines": line_count, + "bytes": bytes_written, + "appended_content": content, + "tool_content": json.dumps({ + "success": True, + "path": path, + "lines": line_count, + "bytes": bytes_written, + "forced": forced, + "message": summary, + "finish_reason": finish_reason + }, ensure_ascii=False) + }) + + assistant_meta_payload = result["assistant_metadata"]["append_payload"] + assistant_meta_payload["lines"] = line_count + assistant_meta_payload["bytes"] = bytes_written + assistant_meta_payload["success"] = True + + summary_payload = { + "success": True, + "path": path, + "lines": line_count, + "bytes": bytes_written, + "forced": forced, + "message": summary + } + + if display_id: + sender('update_action', { + 'id': display_id, + 'status': 'completed', + 'result': summary_payload, + 'preparing_id': tool_call_id, + 'message': summary + }) + + debug_log(f"追加写入完成: {summary}") + else: + error_msg = write_result.get("error", "追加写入失败") + result.update({ + "error": error_msg, + "summary_message": error_msg, + "forced": forced, + "appended_content": content, + "tool_content": json.dumps({ + "success": False, + "path": path, + "error": error_msg, + "finish_reason": finish_reason + }, ensure_ascii=False) + }) + debug_log(f"追加写入失败: {error_msg}") + + if result["assistant_metadata"]: + assistant_meta_payload = result["assistant_metadata"]["append_payload"] + assistant_meta_payload["lines"] = content.count('\n') + (0 if content.endswith('\n') or not content else 1) + assistant_meta_payload["bytes"] = len(content.encode('utf-8')) + assistant_meta_payload["success"] = False + + failure_payload = { + "success": False, + "path": path, + "error": error_msg, + "forced": forced + } + + if display_id: + sender('update_action', { + 'id': display_id, + 'status': 'completed', + 'result': failure_payload, + 'preparing_id': tool_call_id, + 'message': error_msg + }) + + pending_append = None + append_probe_buffer = "" + if hasattr(web_terminal, "pending_append_request"): + web_terminal.pending_append_request = None + return result + + async def finalize_pending_modify(response_text: str, stream_completed: bool, finish_reason: str = None) -> Dict: + """在流式输出结束后处理修改写入""" + nonlocal pending_modify, modify_probe_buffer + + result = { + "handled": False, + "success": False, + "path": None, + "tool_call_id": None, + "display_id": None, + "total_blocks": 0, + "completed_blocks": [], + "failed_blocks": [], + "forced": False, + "details": [], + "error": None, + "assistant_content": response_text, + "assistant_metadata": None, + "tool_content": None, + "summary_message": None, + "finish_reason": finish_reason + } + + if not pending_modify: + return result + + state = pending_modify + path = state.get("path") + tool_call_id = state.get("tool_call_id") + display_id = state.get("display_id") + start_marker = state.get("start_marker") + end_marker = state.get("end_marker") + buffer = state.get("buffer", "") + raw_buffer = state.get("raw_buffer", "") + end_index = state.get("end_index") + + result.update({ + "handled": True, + "path": path, + "tool_call_id": tool_call_id, + "display_id": display_id + }) + + if not state.get("start_seen"): + error_msg = "未检测到格式正确的 <<>> 标记。" + debug_log(error_msg) + result["error"] = error_msg + result["summary_message"] = error_msg + result["tool_content"] = json.dumps({ + "success": False, + "path": path, + "error": error_msg, + "finish_reason": finish_reason + }, ensure_ascii=False) + if display_id: + sender('update_action', { + 'id': display_id, + 'status': 'failed', + 'preparing_id': tool_call_id, + 'message': error_msg + }) + if hasattr(web_terminal, "pending_modify_request"): + web_terminal.pending_modify_request = None + pending_modify = None + modify_probe_buffer = "" + return result + + forced = end_index is None + apply_text = buffer if forced else buffer[:end_index] + raw_content = raw_buffer if forced else raw_buffer[:len(start_marker) + end_index + len(end_marker)] + if raw_content: + result["assistant_content"] = raw_content + + blocks_info = [] + block_reports = {} + detected_indices = set() + block_pattern = re.compile(r"\[replace:(\d+)\](.*?)\[/replace\]", re.DOTALL) + structure_warnings: List[str] = [] + structure_detail_entries: List[Dict] = [] + + def record_structure_warning(message: str, hint: Optional[str] = None): + """记录结构性缺陷,便于给出更具体的反馈。""" + if message in structure_warnings: + return + structure_warnings.append(message) + structure_detail_entries.append({ + "index": 0, + "status": "failed", + "reason": message, + "removed_lines": 0, + "added_lines": 0, + "hint": hint or "请严格按照模板输出:[replace:n] + <>/<> + [/replace],并使用 <<>> 收尾。" + }) + + def extract_segment(body: str, tag: str): + marker = f"<<{tag}>>" + end_tag = "<>" + start_pos = body.find(marker) + if start_pos == -1: + return None, f"缺少 {marker}" + start_pos += len(marker) + if body[start_pos:start_pos+2] == "\r\n": + start_pos += 2 + elif body[start_pos:start_pos+1] == "\n": + start_pos += 1 + end_pos = body.find(end_tag, start_pos) + if end_pos == -1: + return None, f"缺少 {end_tag}" + segment = body[start_pos:end_pos] + return segment, None + + for match in block_pattern.finditer(apply_text): + try: + index = int(match.group(1)) + except ValueError: + continue + body = match.group(2) + if index in detected_indices: + continue + detected_indices.add(index) + block_reports[index] = { + "index": index, + "status": "pending", + "reason": None, + "removed_lines": 0, + "added_lines": 0, + "hint": None + } + old_content, old_error = extract_segment(body, "OLD") + new_content, new_error = extract_segment(body, "NEW") + if old_error or new_error: + reason = old_error or new_error + block_reports[index]["status"] = "failed" + block_reports[index]["reason"] = reason + blocks_info.append({ + "index": index, + "old": old_content, + "new": new_content, + "error": old_error or new_error + }) + + if not blocks_info: + has_replace_start = bool(re.search(r"\[replace:\s*\d+\]", apply_text)) + has_replace_end = "[/replace]" in apply_text + has_old_tag = "<>" in apply_text + has_new_tag = "<>" in apply_text + + if has_replace_start and not has_replace_end: + record_structure_warning("检测到 [replace:n] 标记但缺少对应的 [/replace] 结束标记。") + if has_replace_end and not has_replace_start: + record_structure_warning("检测到 [/replace] 结束标记但缺少对应的 [replace:n] 起始标记。") + + old_tags = len(re.findall(r"<>", apply_text)) + completed_old_tags = len(re.findall(r"<>[\s\S]*?<>", apply_text)) + if old_tags and completed_old_tags < old_tags: + record_structure_warning("检测到 <> 段落但未看到对应的 <> 结束标记。") + + new_tags = len(re.findall(r"<>", apply_text)) + completed_new_tags = len(re.findall(r"<>[\s\S]*?<>", apply_text)) + if new_tags and completed_new_tags < new_tags: + record_structure_warning("检测到 <> 段落但未看到对应的 <> 结束标记。") + + if (has_replace_start or has_replace_end or has_old_tag or has_new_tag) and not structure_warnings: + record_structure_warning("检测到部分补丁标记,但整体结构不完整,请严格按照模板填写所有标记。") + + total_blocks = len(blocks_info) + result["total_blocks"] = total_blocks + if forced: + debug_log("未检测到 <<>>,将在流结束处执行已识别的修改块。") + result["forced"] = True + + blocks_to_apply = [ + {"index": block["index"], "old": block["old"], "new": block["new"]} + for block in blocks_info + if block["error"] is None and block["old"] is not None and block["new"] is not None + ] + + # 记录格式残缺的块 + for block in blocks_info: + if block["error"]: + idx = block["index"] + block_reports[idx]["status"] = "failed" + block_reports[idx]["reason"] = block["error"] + block_reports[idx]["hint"] = "请检查补丁块的 OLD/NEW 标记是否完整,必要时复用 terminal_snapshot 或终端命令重新调整。" + + apply_result = {} + if blocks_to_apply: + apply_result = web_terminal.file_manager.apply_modify_blocks(path, blocks_to_apply) + else: + apply_result = {"success": False, "completed": [], "failed": [], "results": [], "write_performed": False, "error": None} + + block_result_map = {item["index"]: item for item in apply_result.get("results", [])} + + for block in blocks_info: + idx = block["index"] + report = block_reports.get(idx) + if report is None: + continue + if report["status"] == "failed": + continue + block_apply = block_result_map.get(idx) + if not block_apply: + report["status"] = "failed" + report["reason"] = "未执行,可能未找到匹配原文" + report["hint"] = report.get("hint") or "请确认 OLD 文本与文件内容完全一致;若多次失败,可改用终端命令/Python 进行精准替换。" + continue + status = block_apply.get("status") + report["removed_lines"] = block_apply.get("removed_lines", 0) + report["added_lines"] = block_apply.get("added_lines", 0) + if block_apply.get("hint"): + report["hint"] = block_apply.get("hint") + if status == "success": + report["status"] = "completed" + elif status == "not_found": + report["status"] = "failed" + report["reason"] = block_apply.get("reason") or "未找到匹配的原文" + if not report.get("hint"): + report["hint"] = "请使用 terminal_snapshot/grep -n 校验原文,或在说明后改用 run_command/python 精确替换。" + else: + report["status"] = "failed" + report["reason"] = block_apply.get("reason") or "替换失败" + if not report.get("hint"): + report["hint"] = block_apply.get("hint") or "若多次尝试仍失败,可考虑利用终端命令或 Python 小脚本完成此次修改。" + + completed_blocks = sorted([idx for idx, rep in block_reports.items() if rep["status"] == "completed"]) + failed_blocks = sorted([idx for idx, rep in block_reports.items() if rep["status"] != "completed"]) + + result["completed_blocks"] = completed_blocks + result["failed_blocks"] = failed_blocks + details = sorted(block_reports.values(), key=lambda x: x["index"]) + if structure_detail_entries: + details = structure_detail_entries + details + result["details"] = details + + summary_parts = [] + if total_blocks == 0: + summary_parts.append("未检测到有效的修改块,未执行任何修改。") + summary_parts.extend(structure_warnings) + else: + if not completed_blocks and failed_blocks: + summary_parts.append(f"共检测到 {total_blocks} 个修改块,全部未执行。") + elif completed_blocks and not failed_blocks: + summary_parts.append(f"共 {total_blocks} 个修改块全部完成。") + else: + summary_parts.append( + f"共检测到 {total_blocks} 个修改块,其中成功 {len(completed_blocks)} 个,失败 {len(failed_blocks)} 个。" + ) + if forced: + summary_parts.append("未检测到 <<>> 标记,系统已在流结束处执行补丁。") + if apply_result.get("error"): + summary_parts.append(apply_result["error"]) + + matching_note = "提示:补丁匹配基于完整文本,包含注释和空白符,请确保 <<>> 段落与文件内容逐字一致。如果修改成功,请忽略,如果失败,请明确原文后再次尝试。" + summary_parts.append(matching_note) + summary_message = " ".join(summary_parts).strip() + result["summary_message"] = summary_message + result["success"] = bool(completed_blocks) and not failed_blocks and apply_result.get("error") is None + + tool_payload = { + "success": result["success"], + "path": path, + "total_blocks": total_blocks, + "completed": completed_blocks, + "failed": [ + { + "index": rep["index"], + "reason": rep.get("reason"), + "hint": rep.get("hint") + } + for rep in result["details"] if rep["status"] != "completed" + ], + "forced": forced, + "message": summary_message, + "finish_reason": finish_reason, + "details": result["details"] + } + if apply_result.get("error"): + tool_payload["error"] = apply_result["error"] + + result["tool_content"] = json.dumps(tool_payload, ensure_ascii=False) + result["assistant_metadata"] = { + "modify_payload": { + "path": path, + "total_blocks": total_blocks, + "completed": completed_blocks, + "failed": failed_blocks, + "forced": forced, + "details": result["details"] + } + } + + if display_id: + sender('update_action', { + 'id': display_id, + 'status': 'completed' if result["success"] else 'failed', + 'result': tool_payload, + 'preparing_id': tool_call_id, + 'message': summary_message + }) + + pending_modify = None + modify_probe_buffer = "" + if hasattr(web_terminal, "pending_modify_request"): + web_terminal.pending_modify_request = None + return result + + async def process_sub_agent_updates( + messages: List[Dict], + inline: bool = False, + after_tool_call_id: Optional[str] = None + ): + """轮询子智能体任务并通知前端,并把结果插入当前对话上下文。""" + manager = getattr(web_terminal, "sub_agent_manager", None) + if not manager: + return + try: + updates = manager.poll_updates() + debug_log(f"[SubAgent] poll inline={inline} updates={len(updates)}") + except Exception as exc: + debug_log(f"子智能体状态检查失败: {exc}") + return + for update in updates: + message = update.get("system_message") + if not message: + continue + task_id = update.get("task_id") + debug_log(f"[SubAgent] update task={task_id} inline={inline} msg={message}") + web_terminal._record_sub_agent_message(message, task_id, inline=inline) + debug_log(f"[SubAgent] recorded task={task_id}, 计算插入位置") + + insert_index = len(messages) + if after_tool_call_id: + for idx, msg in enumerate(messages): + if msg.get("role") == "tool" and msg.get("tool_call_id") == after_tool_call_id: + insert_index = idx + 1 + break + + messages.insert(insert_index, { + "role": "system", + "content": message, + "metadata": {"sub_agent_notice": True, "inline": inline, "task_id": task_id} + }) + debug_log(f"[SubAgent] 插入系统消息位置: {insert_index}") + sender('system_message', { + 'content': message, + 'inline': inline + }) + maybe_mark_failure_from_message(web_terminal, message) + + async def _wait_retry_delay(delay_seconds: int) -> bool: + """等待重试间隔,同时检查是否收到停止请求。""" + if delay_seconds <= 0: + return False + deadline = time.time() + delay_seconds + while time.time() < deadline: + client_stop_info = get_stop_flag(client_sid, username) + if client_stop_info: + stop_requested = client_stop_info.get('stop', False) if isinstance(client_stop_info, dict) else client_stop_info + if stop_requested: + sender('task_stopped', { + 'message': '命令执行被用户取消', + 'reason': 'user_stop' + }) + clear_stop_flag(client_sid, username) + return True + await asyncio.sleep(0.2) + return False + + iteration = 0 + while max_iterations is None or iteration < max_iterations: + current_iteration = iteration + 1 + iteration += 1 + total_iterations += 1 + iteration_limit_label = max_iterations if max_iterations is not None else "∞" + debug_log(f"\n--- 迭代 {current_iteration}/{iteration_limit_label} 开始 ---") + + # 检查是否超过总工具调用限制 + if MAX_TOTAL_TOOL_CALLS is not None and total_tool_calls >= MAX_TOTAL_TOOL_CALLS: + debug_log(f"已达到最大工具调用次数限制 ({MAX_TOTAL_TOOL_CALLS})") + sender('system_message', { + 'content': f'⚠️ 已达到最大工具调用次数限制 ({MAX_TOTAL_TOOL_CALLS}),任务结束。' + }) + mark_force_thinking(web_terminal, reason="tool_limit") + break + + apply_thinking_schedule(web_terminal) + + full_response = "" + tool_calls = [] + current_thinking = "" + detected_tools = {} + last_usage_payload = None + + # 状态标志 + in_thinking = False + thinking_started = False + thinking_ended = False + text_started = False + text_has_content = False + text_streaming = False + text_chunk_index = 0 + last_text_chunk_time: Optional[float] = None + + # 计数器 + chunk_count = 0 + reasoning_chunks = 0 + content_chunks = 0 + tool_chunks = 0 + append_break_triggered = False + append_result = {"handled": False} + modify_break_triggered = False + modify_result = {"handled": False} + last_finish_reason = None + + def _cancel_pending_tools(tool_calls_list): + """为尚未返回结果的工具生成取消结果,防止缺失 tool_call_id 造成后续 400。""" + if not tool_calls_list: + return + for tc in tool_calls_list: + tc_id = tc.get("id") + func_name = tc.get("function", {}).get("name") + sender('update_action', { + 'preparing_id': tc_id, + 'status': 'cancelled', + 'result': { + "success": False, + "status": "cancelled", + "message": "命令执行被用户取消", + "tool": func_name + } + }) + if tc_id: + messages.append({ + "role": "tool", + "tool_call_id": tc_id, + "name": func_name, + "content": "命令执行被用户取消", + "metadata": {"status": "cancelled"} + }) + + thinking_expected = web_terminal.api_client.get_current_thinking_mode() + debug_log(f"思考模式: {thinking_expected}") + quota_allowed = True + quota_info = {} + if hasattr(web_terminal, "record_model_call"): + quota_allowed, quota_info = web_terminal.record_model_call(bool(thinking_expected)) + if not quota_allowed: + quota_type = 'thinking' if thinking_expected else 'fast' + socketio.emit('quota_notice', { + 'type': quota_type, + 'reset_at': quota_info.get('reset_at'), + 'limit': quota_info.get('limit'), + 'count': quota_info.get('count') + }, room=f"user_{getattr(web_terminal, 'username', '')}") + sender('quota_exceeded', { + 'type': quota_type, + 'reset_at': quota_info.get('reset_at') + }) + sender('error', { + 'message': "配额已达到上限,暂时无法继续调用模型。", + 'quota': quota_info + }) + return + + tool_call_limit_label = MAX_TOTAL_TOOL_CALLS if MAX_TOTAL_TOOL_CALLS is not None else "∞" + print(f"[API] 第{current_iteration}次调用 (总工具调用: {total_tool_calls}/{tool_call_limit_label})") + + api_error = None + for api_attempt in range(max_api_retries + 1): + api_error = None + if api_attempt > 0: + full_response = "" + tool_calls = [] + current_thinking = "" + detected_tools = {} + last_usage_payload = None + in_thinking = False + thinking_started = False + thinking_ended = False + text_started = False + text_has_content = False + text_streaming = False + text_chunk_index = 0 + last_text_chunk_time = None + chunk_count = 0 + reasoning_chunks = 0 + content_chunks = 0 + tool_chunks = 0 + append_break_triggered = False + append_result = {"handled": False} + modify_break_triggered = False + modify_result = {"handled": False} + last_finish_reason = None + + # 收集流式响应 + async for chunk in web_terminal.api_client.chat(messages, tools, stream=True): + chunk_count += 1 + + # 检查停止标志 + client_stop_info = get_stop_flag(client_sid, username) + if client_stop_info: + stop_requested = client_stop_info.get('stop', False) if isinstance(client_stop_info, dict) else client_stop_info + if stop_requested: + debug_log(f"检测到停止请求,中断流处理") + if pending_append: + append_result = await finalize_pending_append(full_response, False, finish_reason="user_stop") + if pending_modify: + modify_result = await finalize_pending_modify(full_response, False, finish_reason="user_stop") + _cancel_pending_tools(tool_calls) + sender('task_stopped', { + 'message': '命令执行被用户取消', + 'reason': 'user_stop' + }) + clear_stop_flag(client_sid, username) + return + + if isinstance(chunk, dict) and chunk.get("error"): + api_error = chunk.get("error") + break + + # 先尝试记录 usage(有些平台会在最后一个 chunk 里携带 usage 但 choices 为空) + usage_info = chunk.get("usage") + if usage_info: + last_usage_payload = usage_info + + if "choices" not in chunk: + debug_log(f"Chunk {chunk_count}: 无choices字段") + continue + if not chunk.get("choices"): + debug_log(f"Chunk {chunk_count}: choices为空列表") + continue + choice = chunk["choices"][0] + if not usage_info and isinstance(choice, dict) and choice.get("usage"): + # 兼容部分供应商将 usage 放在 choice 内的格式(例如部分 Kimi/Qwen 返回) + last_usage_payload = choice.get("usage") + delta = choice.get("delta", {}) + finish_reason = choice.get("finish_reason") + if finish_reason: + last_finish_reason = finish_reason + + # 处理思考内容(兼容 reasoning_content / reasoning_details) + reasoning_content = "" + if "reasoning_content" in delta: + reasoning_content = delta.get("reasoning_content") or "" + elif "reasoning_details" in delta: + details = delta.get("reasoning_details") + if isinstance(details, list): + parts = [] + for item in details: + if isinstance(item, dict): + text = item.get("text") + if text: + parts.append(text) + if parts: + reasoning_content = "".join(parts) + if reasoning_content: + reasoning_chunks += 1 + debug_log(f" 思考内容 #{reasoning_chunks}: {len(reasoning_content)} 字符") + + if not thinking_started: + in_thinking = True + thinking_started = True + sender('thinking_start', {}) + await asyncio.sleep(0.05) + + current_thinking += reasoning_content + sender('thinking_chunk', {'content': reasoning_content}) + + # 处理正常内容 + if "content" in delta: + content = delta["content"] + if content: + content_chunks += 1 + debug_log(f" 正式内容 #{content_chunks}: {repr(content[:100] if content else 'None')}") + + if in_thinking and not thinking_ended: + in_thinking = False + thinking_ended = True + sender('thinking_end', {'full_content': current_thinking}) + await asyncio.sleep(0.1) + + + expecting_modify = bool(pending_modify) or bool(getattr(web_terminal, "pending_modify_request", None)) + expecting_append = bool(pending_append) or bool(getattr(web_terminal, "pending_append_request", None)) + + if pending_modify: + if not pending_modify.get("start_seen"): + probe_buffer = pending_modify.get("probe_buffer", "") + content + if len(probe_buffer) > 10000: + probe_buffer = probe_buffer[-10000:] + marker = pending_modify.get("start_marker") + marker_index = probe_buffer.find(marker) + if marker_index == -1: + pending_modify["probe_buffer"] = probe_buffer + continue + after_marker = marker_index + len(marker) + remainder = probe_buffer[after_marker:] + pending_modify["buffer"] = remainder + pending_modify["raw_buffer"] = marker + remainder + pending_modify["start_seen"] = True + pending_modify["detected_blocks"] = set() + pending_modify["probe_buffer"] = "" + if pending_modify.get("display_id"): + sender('update_action', { + 'id': pending_modify["display_id"], + 'status': 'running', + 'preparing_id': pending_modify.get("tool_call_id"), + 'message': f"正在修改 {pending_modify['path']}..." + }) + else: + pending_modify["buffer"] += content + pending_modify["raw_buffer"] += content + + if pending_modify.get("start_seen"): + block_text = pending_modify["buffer"] + for match in re.finditer(r"\[replace:(\d+)\]", block_text): + try: + block_index = int(match.group(1)) + except ValueError: + continue + detected_blocks = pending_modify.setdefault("detected_blocks", set()) + if block_index not in detected_blocks: + detected_blocks.add(block_index) + if pending_modify.get("display_id"): + sender('update_action', { + 'id': pending_modify["display_id"], + 'status': 'running', + 'preparing_id': pending_modify.get("tool_call_id"), + 'message': f"正在对 {pending_modify['path']} 进行第 {block_index} 处修改..." + }) + + if pending_modify.get("start_seen"): + end_pos = pending_modify["buffer"].find(pending_modify["end_marker"]) + if end_pos != -1: + pending_modify["end_index"] = end_pos + modify_break_triggered = True + debug_log("检测到<<>>,即将终止流式输出并应用修改") + break + continue + elif expecting_modify: + modify_probe_buffer += content + if len(modify_probe_buffer) > 10000: + modify_probe_buffer = modify_probe_buffer[-10000:] + + marker_match = re.search(r"<<>>", modify_probe_buffer) + if marker_match: + detected_raw_path = marker_match.group(1) + detected_path = detected_raw_path.strip() + marker_full = marker_match.group(0) + after_marker_index = modify_probe_buffer.find(marker_full) + len(marker_full) + remainder = modify_probe_buffer[after_marker_index:] + modify_probe_buffer = "" + + if not detected_path: + debug_log("检测到 MODIFY 起始标记但路径为空,忽略。") + continue + + pending_modify = { + "path": detected_path, + "tool_call_id": None, + "buffer": remainder, + "raw_buffer": marker_full + remainder, + "start_marker": marker_full, + "end_marker": "<<>>", + "start_seen": True, + "end_index": None, + "display_id": None, + "detected_blocks": set() + } + if hasattr(web_terminal, "pending_modify_request"): + web_terminal.pending_modify_request = {"path": detected_path} + debug_log(f"直接检测到modify起始标记,构建修改缓冲: {detected_path}") + + end_pos = pending_modify["buffer"].find(pending_modify["end_marker"]) + if end_pos != -1: + pending_modify["end_index"] = end_pos + modify_break_triggered = True + debug_log("检测到<<>>,即将终止流式输出并应用修改") + break + continue + + if pending_append: + pending_append["buffer"] += content + + if pending_append.get("content_start") is None: + marker_index = pending_append["buffer"].find(pending_append["start_marker"]) + if marker_index != -1: + pending_append["content_start"] = marker_index + len(pending_append["start_marker"]) + debug_log(f"检测到追加起始标识: {pending_append['start_marker']}") + + if pending_append.get("content_start") is not None: + end_index = pending_append["buffer"].find( + pending_append["end_marker"], + pending_append["content_start"] + ) + if end_index != -1: + pending_append["end_index"] = end_index + append_break_triggered = True + debug_log("检测到<<>>,即将终止流式输出并写入文件") + break + + # 继续累积追加内容 + continue + elif expecting_append: + append_probe_buffer += content + # 限制缓冲区大小防止过长 + if len(append_probe_buffer) > 10000: + append_probe_buffer = append_probe_buffer[-10000:] + + marker_match = re.search(r"<<>>", append_probe_buffer) + if marker_match: + detected_raw_path = marker_match.group(1) + detected_path = detected_raw_path.strip() + if not detected_path: + append_probe_buffer = append_probe_buffer[marker_match.end():] + continue + marker_full = marker_match.group(0) + after_marker_index = append_probe_buffer.find(marker_full) + len(marker_full) + remainder = append_probe_buffer[after_marker_index:] + append_probe_buffer = "" + pending_append = { + "path": detected_path, + "tool_call_id": None, + "buffer": remainder, + "start_marker": marker_full, + "end_marker": "<<>>", + "content_start": 0, + "end_index": None, + "display_id": None + } + if hasattr(web_terminal, "pending_append_request"): + web_terminal.pending_append_request = {"path": detected_path} + debug_log(f"直接检测到append起始标记,构建追加缓冲: {detected_path}") + # 检查是否立即包含结束标记 + if pending_append["buffer"]: + end_index = pending_append["buffer"].find(pending_append["end_marker"], pending_append["content_start"]) + if end_index != -1: + pending_append["end_index"] = end_index + append_break_triggered = True + debug_log("检测到<<>>,即将终止流式输出并写入文件") + break + continue + + if not text_started: + text_started = True + text_streaming = True + sender('text_start', {}) + brief_log("模型输出了内容") + await asyncio.sleep(0.05) + + if not pending_append: + full_response += content + accumulated_response += content + text_has_content = True + emit_time = time.time() + elapsed = 0.0 if last_text_chunk_time is None else emit_time - last_text_chunk_time + last_text_chunk_time = emit_time + text_chunk_index += 1 + log_backend_chunk( + conversation_id, + current_iteration, + text_chunk_index, + elapsed, + len(content), + content[:32] + ) + sender('text_chunk', { + 'content': content, + 'index': text_chunk_index, + 'elapsed': elapsed + }) + + # 收集工具调用 - 实时发送准备状态 + if "tool_calls" in delta: + tool_chunks += 1 + for tc in delta["tool_calls"]: + found = False + for existing in tool_calls: + if existing.get("index") == tc.get("index"): + if "function" in tc and "arguments" in tc["function"]: + arg_chunk = tc["function"]["arguments"] + existing_fn = existing.get("function", {}) + existing_args = existing_fn.get("arguments", "") + existing_fn["arguments"] = (existing_args or "") + arg_chunk + existing["function"] = existing_fn + + combined_args = existing_fn.get("arguments", "") + tool_id = existing.get("id") or tc.get("id") + tool_name = ( + existing_fn.get("name") + or tc.get("function", {}).get("name", "") + ) + intent_value = extract_intent_from_partial(combined_args) + if ( + intent_value + and tool_id + and detected_tool_intent.get(tool_id) != intent_value + ): + detected_tool_intent[tool_id] = intent_value + brief_log(f"[intent] 增量提取 {tool_name}: {intent_value}") + sender('tool_intent', { + 'id': tool_id, + 'name': tool_name, + 'intent': intent_value, + 'conversation_id': conversation_id + }) + debug_log(f" 发送工具意图: {tool_name} -> {intent_value}") + await asyncio.sleep(0.01) + found = True + break + + if not found and tc.get("id"): + tool_id = tc["id"] + tool_name = tc.get("function", {}).get("name", "") + arguments_str = tc.get("function", {}).get("arguments", "") or "" + + # 新工具检测到,立即发送准备事件 + if tool_id not in detected_tools and tool_name: + detected_tools[tool_id] = tool_name + + # 尝试提前提取 intent + intent_value = None + if arguments_str: + intent_value = extract_intent_from_partial(arguments_str) + if intent_value: + detected_tool_intent[tool_id] = intent_value + brief_log(f"[intent] 预提取 {tool_name}: {intent_value}") + + # 立即发送工具准备中事件 + brief_log(f"[tool] 准备调用 {tool_name} (id={tool_id}) intent={intent_value or '-'}") + sender('tool_preparing', { + 'id': tool_id, + 'name': tool_name, + 'message': f'准备调用 {tool_name}...', + 'intent': intent_value, + 'conversation_id': conversation_id + }) + debug_log(f" 发送工具准备事件: {tool_name}") + await asyncio.sleep(0.1) + + tool_calls.append({ + "id": tool_id, + "index": tc.get("index"), + "type": "function", + "function": { + "name": tool_name, + "arguments": arguments_str + } + }) + # 尝试从增量参数中抽取 intent,并单独推送 + if tool_id and arguments_str: + intent_value = extract_intent_from_partial(arguments_str) + if intent_value and detected_tool_intent.get(tool_id) != intent_value: + detected_tool_intent[tool_id] = intent_value + sender('tool_intent', { + 'id': tool_id, + 'name': tool_name, + 'intent': intent_value, + 'conversation_id': conversation_id + }) + debug_log(f" 发送工具意图: {tool_name} -> {intent_value}") + await asyncio.sleep(0.01) + debug_log(f" 新工具: {tool_name}") + + # 检查是否被停止 + client_stop_info = get_stop_flag(client_sid, username) + if client_stop_info: + stop_requested = client_stop_info.get('stop', False) if isinstance(client_stop_info, dict) else client_stop_info + if stop_requested: + debug_log("任务在流处理完成后检测到停止状态") + sender('task_stopped', { + 'message': '命令执行被用户取消', + 'reason': 'user_stop' + }) + _cancel_pending_tools(tool_calls) + clear_stop_flag(client_sid, username) + return + + # === API响应完成后只计算输出token === + if last_usage_payload: + try: + web_terminal.context_manager.apply_usage_statistics(last_usage_payload) + debug_log( + f"Usage统计: prompt={last_usage_payload.get('prompt_tokens', 0)}, " + f"completion={last_usage_payload.get('completion_tokens', 0)}, " + f"total={last_usage_payload.get('total_tokens', 0)}" + ) + except Exception as e: + debug_log(f"Usage统计更新失败: {e}") + else: + debug_log("未获取到usage字段,跳过token统计更新") + + + if api_error: + try: + debug_log(f"API错误原始数据: {json.dumps(api_error, ensure_ascii=False)}") + except Exception: + debug_log(f"API错误原始数据(不可序列化): {repr(api_error)}") + error_message = "" + error_status = None + error_type = None + error_code = None + error_text = "" + request_dump = None + error_base_url = None + error_model_id = None + if isinstance(api_error, dict): + error_status = api_error.get("status_code") + error_type = api_error.get("error_type") or api_error.get("type") + error_code = api_error.get("error_code") or api_error.get("code") + error_text = api_error.get("error_text") or "" + error_message = ( + api_error.get("error_message") + or api_error.get("message") + or error_text + or "" + ) + request_dump = api_error.get("request_dump") + error_base_url = api_error.get("base_url") + error_model_id = api_error.get("model_id") + elif isinstance(api_error, str): + error_message = api_error + if not error_message: + if error_status: + error_message = f"API 请求失败(HTTP {error_status})" + else: + error_message = "API 请求失败" + # 若命中阿里云配额错误,立即写入状态并切换到官方 API + try: + from utils.aliyun_fallback import compute_disabled_until, set_disabled_until + disabled_until, reason = compute_disabled_until(error_message) + if disabled_until and reason: + set_disabled_until(getattr(web_terminal, "model_key", None) or "kimi-k2.5", disabled_until, reason) + profile = get_model_profile(getattr(web_terminal, "model_key", None) or "kimi-k2.5") + web_terminal.apply_model_profile(profile) + except Exception as exc: + debug_log(f"处理阿里云配额回退失败: {exc}") + can_retry = ( + api_attempt < max_api_retries + and not full_response + and not tool_calls + and not current_thinking + and not pending_append + and not pending_modify + ) + sender('error', { + 'message': error_message, + 'status_code': error_status, + 'error_type': error_type, + 'error_code': error_code, + 'error_text': error_text, + 'request_dump': request_dump, + 'base_url': error_base_url, + 'model_id': error_model_id, + 'retry': bool(can_retry), + 'retry_in': retry_delay_seconds if can_retry else None, + 'attempt': api_attempt + 1, + 'max_attempts': max_api_retries + 1 + }) + if can_retry: + try: + profile = get_model_profile(getattr(web_terminal, "model_key", None) or "kimi-k2.5") + web_terminal.apply_model_profile(profile) + except Exception as exc: + debug_log(f"重试前更新模型配置失败: {exc}") + cancelled = await _wait_retry_delay(retry_delay_seconds) + if cancelled: + return + continue + _cancel_pending_tools(tool_calls) + return + break + + # 流结束后的处理 + debug_log(f"\n流结束统计:") + debug_log(f" 总chunks: {chunk_count}") + debug_log(f" 思考chunks: {reasoning_chunks}") + debug_log(f" 内容chunks: {content_chunks}") + debug_log(f" 工具chunks: {tool_chunks}") + debug_log(f" 收集到的思考: {len(current_thinking)} 字符") + debug_log(f" 收集到的正文: {len(full_response)} 字符") + debug_log(f" 收集到的工具: {len(tool_calls)} 个") + + if not append_result["handled"] and pending_append: + append_result = await finalize_pending_append(full_response, True, finish_reason=last_finish_reason) + if not modify_result["handled"] and pending_modify: + modify_result = await finalize_pending_modify(full_response, True, finish_reason=last_finish_reason) + + # 结束未完成的流 + if in_thinking and not thinking_ended: + sender('thinking_end', {'full_content': current_thinking}) + await asyncio.sleep(0.1) + + + # 确保text_end事件被发送 + if text_started and text_has_content and not append_result["handled"] and not modify_result["handled"]: + debug_log(f"发送text_end事件,完整内容长度: {len(full_response)}") + sender('text_end', {'full_content': full_response}) + await asyncio.sleep(0.1) + text_streaming = False + + if full_response.strip(): + debug_log(f"流式文本内容长度: {len(full_response)} 字符") + + if append_result["handled"]: + append_metadata = append_result.get("assistant_metadata") + append_content_text = append_result.get("assistant_content") + if append_content_text: + web_terminal.context_manager.add_conversation( + "assistant", + append_content_text, + metadata=append_metadata + ) + debug_log("💾 增量保存:追加正文快照") + + payload_info = append_metadata.get("append_payload") if append_metadata else {} + sender('append_payload', { + 'path': payload_info.get("path") or append_result.get("path"), + 'forced': payload_info.get("forced", False), + 'lines': payload_info.get("lines"), + 'bytes': payload_info.get("bytes"), + 'tool_call_id': payload_info.get("tool_call_id") or append_result.get("tool_call_id"), + 'success': payload_info.get("success", append_result.get("success", False)), + 'conversation_id': conversation_id + }) + + if append_result["tool_content"]: + tool_call_id = append_result.get("tool_call_id") or f"append_{int(time.time() * 1000)}" + system_notice = format_tool_result_notice("append_to_file", tool_call_id, append_result["tool_content"]) + web_terminal.context_manager.add_conversation("system", system_notice) + append_result["tool_call_id"] = tool_call_id + debug_log("💾 增量保存:append_to_file 工具结果(system 通知)") + + finish_reason = append_result.get("finish_reason") + path_for_prompt = append_result.get("path") + need_follow_prompt = ( + finish_reason == "length" or + append_result.get("forced") or + not append_result.get("success") + ) + + if need_follow_prompt and path_for_prompt: + prompt_lines = [ + f"append_to_file 在处理 {path_for_prompt} 时未完成,需要重新发起写入。" + ] + if finish_reason == "length": + prompt_lines.append( + "上一次输出达到系统单次输出上限,已写入的内容已保存。" + ) + if append_result.get("forced"): + prompt_lines.append( + "收到的内容缺少 <<>> 标记,系统依据流式结束位置落盘。" + ) + if not append_result.get("success"): + prompt_lines.append("系统未能识别有效的追加标记。") + prompt_lines.append( + "请再次调用 append_to_file 工具获取新的写入窗口,并在工具调用的输出中遵循以下格式:" + ) + prompt_lines.append(f"<<>>") + prompt_lines.append("...填写剩余正文,如内容已完成可留空...") + prompt_lines.append("<<>>") + prompt_lines.append("不要在普通回复中粘贴上述标记,必须通过 append_to_file 工具发送。") + follow_prompt = "\n".join(prompt_lines) + messages.append({ + "role": "system", + "content": follow_prompt + }) + web_terminal.context_manager.add_conversation("system", follow_prompt) + debug_log("已注入追加任务提示") + + if append_result["handled"] and append_result.get("forced") and append_result.get("success"): + mark_force_thinking(web_terminal, reason="append_forced_finish") + if append_result["handled"] and not append_result.get("success"): + sender('system_message', { + 'content': f'⚠️ 追加写入失败:{append_result.get("error")}' + }) + maybe_mark_failure_from_message(web_terminal, f'⚠️ 追加写入失败:{append_result.get("error")}') + mark_force_thinking(web_terminal, reason="append_failed") + + if modify_result["handled"]: + modify_metadata = modify_result.get("assistant_metadata") + modify_content_text = modify_result.get("assistant_content") + if modify_content_text: + web_terminal.context_manager.add_conversation( + "assistant", + modify_content_text, + metadata=modify_metadata + ) + debug_log("💾 增量保存:修改正文快照") + + payload_info = modify_metadata.get("modify_payload") if modify_metadata else {} + sender('modify_payload', { + 'path': payload_info.get("path") or modify_result.get("path"), + 'total': payload_info.get("total_blocks") or modify_result.get("total_blocks"), + 'completed': payload_info.get("completed") or modify_result.get("completed_blocks"), + 'failed': payload_info.get("failed") or modify_result.get("failed_blocks"), + 'forced': payload_info.get("forced", modify_result.get("forced", False)), + 'success': modify_result.get("success", False), + 'conversation_id': conversation_id + }) + + if modify_result["tool_content"]: + tool_call_id = modify_result.get("tool_call_id") or f"modify_{int(time.time() * 1000)}" + system_notice = format_tool_result_notice("modify_file", tool_call_id, modify_result["tool_content"]) + web_terminal.context_manager.add_conversation("system", system_notice) + modify_result["tool_call_id"] = tool_call_id + debug_log("💾 增量保存:modify_file 工具结果(system 通知)") + + path_for_prompt = modify_result.get("path") + failed_blocks = modify_result.get("failed_blocks") or [] + need_follow_prompt = modify_result.get("forced") or bool(failed_blocks) + + if need_follow_prompt and path_for_prompt: + prompt_lines = [ + f"modify_file 在处理 {path_for_prompt} 时未完成,需要重新发起补丁。" + ] + if modify_result.get("forced"): + prompt_lines.append( + "刚才的内容缺少 <<>> 标记,系统仅应用了已识别的部分。" + ) + if failed_blocks: + failed_text = "、".join(str(idx) for idx in failed_blocks) + prompt_lines.append(f"以下补丁未成功:第 {failed_text} 处。") + prompt_lines.append( + "请再次调用 modify_file 工具,并在新的工具调用中按以下模板提供完整补丁:" + ) + prompt_lines.append(f"<<>>") + prompt_lines.append("[replace:序号]") + prompt_lines.append("<>") + prompt_lines.append("...原文(必须逐字匹配,包含全部缩进、空格和换行)...") + prompt_lines.append("<>") + prompt_lines.append("<>") + prompt_lines.append("...新内容,可留空表示清空,注意保持结构完整...") + prompt_lines.append("<>") + prompt_lines.append("[/replace]") + prompt_lines.append("<<>>") + prompt_lines.append("请勿在普通回复中直接粘贴补丁,必须通过 modify_file 工具发送。") + follow_prompt = "\n".join(prompt_lines) + messages.append({ + "role": "system", + "content": follow_prompt + }) + web_terminal.context_manager.add_conversation("system", follow_prompt) + debug_log("已注入修改任务提示") + + if modify_result["handled"] and modify_result.get("failed_blocks"): + mark_force_thinking(web_terminal, reason="modify_partial_failure") + if modify_result["handled"] and modify_result.get("forced") and modify_result.get("success"): + mark_force_thinking(web_terminal, reason="modify_forced_finish") + if modify_result["handled"] and not modify_result.get("success"): + error_message = modify_result.get("summary_message") or modify_result.get("error") or "修改操作未成功,请根据提示重新执行。" + sender('system_message', { + 'content': f'⚠️ 修改操作存在未完成的内容:{error_message}' + }) + maybe_mark_failure_from_message(web_terminal, f'⚠️ 修改操作存在未完成的内容:{error_message}') + mark_force_thinking(web_terminal, reason="modify_failed") + + if web_terminal.api_client.last_call_used_thinking and current_thinking: + web_terminal.api_client.current_task_thinking = current_thinking or "" + if web_terminal.api_client.current_task_first_call: + web_terminal.api_client.current_task_first_call = False + update_thinking_after_call(web_terminal) + + # 检测是否有格式错误的工具调用 + if not tool_calls and full_response and AUTO_FIX_TOOL_CALL and not append_result["handled"] and not modify_result["handled"]: + if detect_malformed_tool_call(full_response): + auto_fix_attempts += 1 + + if auto_fix_attempts <= AUTO_FIX_MAX_ATTEMPTS: + debug_log(f"检测到格式错误的工具调用,尝试自动修复 (尝试 {auto_fix_attempts}/{AUTO_FIX_MAX_ATTEMPTS})") + + fix_message = "你使用了错误的格式输出工具调用。请使用正确的工具调用格式而不是直接输出JSON。根据当前进度继续执行任务。" + + sender('system_message', { + 'content': f'⚠️ 自动修复: {fix_message}' + }) + maybe_mark_failure_from_message(web_terminal, f'⚠️ 自动修复: {fix_message}') + + messages.append({ + "role": "user", + "content": fix_message + }) + + await asyncio.sleep(1) + continue + else: + debug_log(f"自动修复尝试已达上限 ({AUTO_FIX_MAX_ATTEMPTS})") + sender('system_message', { + 'content': f'⌘ 工具调用格式错误,自动修复失败。请手动检查并重试。' + }) + maybe_mark_failure_from_message(web_terminal, '⌘ 工具调用格式错误,自动修复失败。请手动检查并重试。') + break + + # 构建助手消息(用于API继续对话) + assistant_content_parts = [] + + if full_response: + assistant_content_parts.append(full_response) + elif append_result["handled"] and append_result["assistant_content"]: + assistant_content_parts.append(append_result["assistant_content"]) + elif modify_result["handled"] and modify_result.get("assistant_content"): + assistant_content_parts.append(modify_result["assistant_content"]) + + assistant_content = "\n".join(assistant_content_parts) if assistant_content_parts else "" + + # 添加到消息历史(用于API继续对话,不保存到文件) + assistant_message = { + "role": "assistant", + "content": assistant_content, + "tool_calls": tool_calls + } + if current_thinking: + assistant_message["reasoning_content"] = current_thinking + + messages.append(assistant_message) + if assistant_content or current_thinking or tool_calls: + web_terminal.context_manager.add_conversation( + "assistant", + assistant_content, + tool_calls=tool_calls if tool_calls else None, + reasoning_content=current_thinking or None + ) + + # 为下一轮迭代重置流状态标志,但保留 full_response 供上面保存使用 + text_streaming = False + text_started = False + text_has_content = False + full_response = "" + + if append_result["handled"] and append_result.get("tool_content"): + tool_call_id = append_result.get("tool_call_id") or f"append_{int(time.time() * 1000)}" + system_notice = format_tool_result_notice("append_to_file", tool_call_id, append_result["tool_content"]) + messages.append({ + "role": "system", + "content": system_notice + }) + append_result["tool_call_id"] = tool_call_id + debug_log("已将 append_to_file 工具结果以 system 形式追加到对话上下文") + if modify_result["handled"] and modify_result.get("tool_content"): + tool_call_id = modify_result.get("tool_call_id") or f"modify_{int(time.time() * 1000)}" + system_notice = format_tool_result_notice("modify_file", tool_call_id, modify_result["tool_content"]) + messages.append({ + "role": "system", + "content": system_notice + }) + modify_result["tool_call_id"] = tool_call_id + debug_log("已将 modify_file 工具结果以 system 形式追加到对话上下文") + + force_continue = append_result["handled"] or modify_result["handled"] + if force_continue: + if append_result["handled"]: + debug_log("append_to_file 已处理,继续下一轮以让模型返回确认回复") + elif modify_result["handled"]: + debug_log("modify_file 已处理,继续下一轮以让模型返回确认回复") + else: + debug_log("补丁处理完成,继续下一轮以获取模型回复") + continue + + if not tool_calls: + debug_log("没有工具调用,结束迭代") + break + + # 检查连续相同工具调用 + for tc in tool_calls: + tool_name = tc["function"]["name"] + + if tool_name == last_tool_name: + consecutive_same_tool[tool_name] += 1 + + if ( + MAX_CONSECUTIVE_SAME_TOOL is not None + and consecutive_same_tool[tool_name] >= MAX_CONSECUTIVE_SAME_TOOL + ): + debug_log(f"警告: 连续调用相同工具 {tool_name} 已达 {MAX_CONSECUTIVE_SAME_TOOL} 次") + sender('system_message', { + 'content': f'⚠️ 检测到重复调用 {tool_name} 工具 {MAX_CONSECUTIVE_SAME_TOOL} 次,可能存在循环。' + }) + maybe_mark_failure_from_message(web_terminal, f'⚠️ 检测到重复调用 {tool_name} 工具 {MAX_CONSECUTIVE_SAME_TOOL} 次,可能存在循环。') + + if consecutive_same_tool[tool_name] >= MAX_CONSECUTIVE_SAME_TOOL + 2: + debug_log(f"终止: 工具 {tool_name} 调用次数过多") + sender('system_message', { + 'content': f'⌘ 工具 {tool_name} 重复调用过多,任务终止。' + }) + maybe_mark_failure_from_message(web_terminal, f'⌘ 工具 {tool_name} 重复调用过多,任务终止。') + break + else: + consecutive_same_tool.clear() + consecutive_same_tool[tool_name] = 1 + + last_tool_name = tool_name + # 更新统计 + total_tool_calls += len(tool_calls) + + # 执行每个工具 + for tool_call in tool_calls: + # 检查停止标志 + client_stop_info = get_stop_flag(client_sid, username) + if client_stop_info: + stop_requested = client_stop_info.get('stop', False) if isinstance(client_stop_info, dict) else client_stop_info + if stop_requested: + debug_log("在工具调用过程中检测到停止状态") + tool_call_id = tool_call.get("id") + function_name = tool_call.get("function", {}).get("name") + # 通知前端该工具已被取消,避免界面卡住 + sender('update_action', { + 'preparing_id': tool_call_id, + 'status': 'cancelled', + 'result': { + "success": False, + "status": "cancelled", + "message": "命令执行被用户取消", + "tool": function_name + } + }) + # 在消息列表中记录取消结果,防止重新加载时仍显示运行中 + if tool_call_id: + messages.append({ + "role": "tool", + "tool_call_id": tool_call_id, + "name": function_name, + "content": "命令执行被用户取消", + "metadata": {"status": "cancelled"} + }) + sender('task_stopped', { + 'message': '命令执行被用户取消', + 'reason': 'user_stop' + }) + clear_stop_flag(client_sid, username) + return + + # 工具调用间隔控制 + current_time = time.time() + if last_tool_call_time > 0: + elapsed = current_time - last_tool_call_time + if elapsed < TOOL_CALL_COOLDOWN: + await asyncio.sleep(TOOL_CALL_COOLDOWN - elapsed) + last_tool_call_time = time.time() + + function_name = tool_call["function"]["name"] + arguments_str = tool_call["function"]["arguments"] + tool_call_id = tool_call["id"] + + + debug_log(f"准备解析JSON,工具: {function_name}, 参数长度: {len(arguments_str)}") + debug_log(f"JSON参数前200字符: {arguments_str[:200]}") + debug_log(f"JSON参数后200字符: {arguments_str[-200:]}") + + # 使用改进的参数解析方法 + if hasattr(web_terminal, 'api_client') and hasattr(web_terminal.api_client, '_safe_tool_arguments_parse'): + success, arguments, error_msg = web_terminal.api_client._safe_tool_arguments_parse(arguments_str, function_name) + if not success: + debug_log(f"安全解析失败: {error_msg}") + error_text = f'工具参数解析失败: {error_msg}' + error_payload = { + "success": False, + "error": error_text, + "error_type": "parameter_format_error", + "tool_name": function_name, + "tool_call_id": tool_call_id, + "message": error_text + } + sender('error', {'message': error_text}) + sender('update_action', { + 'preparing_id': tool_call_id, + 'status': 'completed', + 'result': error_payload, + 'message': error_text + }) + error_content = json.dumps(error_payload, ensure_ascii=False) + web_terminal.context_manager.add_conversation( + "tool", + error_content, + tool_call_id=tool_call_id, + name=function_name + ) + messages.append({ + "role": "tool", + "tool_call_id": tool_call_id, + "name": function_name, + "content": error_content + }) + continue + debug_log(f"使用安全解析成功,参数键: {list(arguments.keys())}") + else: + # 回退到带有基本修复逻辑的解析 + try: + arguments = json.loads(arguments_str) if arguments_str.strip() else {} + debug_log(f"直接JSON解析成功,参数键: {list(arguments.keys())}") + except json.JSONDecodeError as e: + debug_log(f"原始JSON解析失败: {e}") + # 尝试基本的JSON修复 + repaired_str = arguments_str.strip() + repair_attempts = [] + + # 修复1: 未闭合字符串 + if repaired_str.count('"') % 2 == 1: + repaired_str += '"' + repair_attempts.append("添加闭合引号") + + # 修复2: 未闭合JSON对象 + if repaired_str.startswith('{') and not repaired_str.rstrip().endswith('}'): + repaired_str = repaired_str.rstrip() + '}' + repair_attempts.append("添加闭合括号") + + # 修复3: 截断的JSON(移除不完整的最后一个键值对) + if not repair_attempts: # 如果前面的修复都没用上 + last_comma = repaired_str.rfind(',') + if last_comma > 0: + repaired_str = repaired_str[:last_comma] + '}' + repair_attempts.append("移除不完整的键值对") + + # 尝试解析修复后的JSON + try: + arguments = json.loads(repaired_str) + debug_log(f"JSON修复成功: {', '.join(repair_attempts)}") + debug_log(f"修复后参数键: {list(arguments.keys())}") + except json.JSONDecodeError as repair_error: + debug_log(f"JSON修复也失败: {repair_error}") + debug_log(f"修复尝试: {repair_attempts}") + debug_log(f"修复后内容前100字符: {repaired_str[:100]}") + error_text = f'工具参数解析失败: {e}' + error_payload = { + "success": False, + "error": error_text, + "error_type": "parameter_format_error", + "tool_name": function_name, + "tool_call_id": tool_call_id, + "message": error_text + } + sender('error', {'message': error_text}) + sender('update_action', { + 'preparing_id': tool_call_id, + 'status': 'completed', + 'result': error_payload, + 'message': error_text + }) + error_content = json.dumps(error_payload, ensure_ascii=False) + web_terminal.context_manager.add_conversation( + "tool", + error_content, + tool_call_id=tool_call_id, + name=function_name + ) + messages.append({ + "role": "tool", + "tool_call_id": tool_call_id, + "name": function_name, + "content": error_content + }) + continue + + debug_log(f"执行工具: {function_name} (ID: {tool_call_id})") + + # 发送工具开始事件 + tool_display_id = f"tool_{iteration}_{function_name}_{time.time()}" + monitor_snapshot = None + snapshot_path = None + memory_snapshot_type = None + if function_name in MONITOR_FILE_TOOLS: + snapshot_path = resolve_monitor_path(arguments) + monitor_snapshot = capture_monitor_snapshot(web_terminal.file_manager, snapshot_path, MONITOR_SNAPSHOT_CHAR_LIMIT, debug_log) + if monitor_snapshot: + cache_monitor_snapshot(tool_display_id, 'before', monitor_snapshot) + elif function_name in MONITOR_MEMORY_TOOLS: + memory_snapshot_type = (arguments.get('memory_type') or 'main').lower() + before_entries = None + try: + before_entries = resolve_monitor_memory(web_terminal.memory_manager._read_entries(memory_snapshot_type), MONITOR_MEMORY_ENTRY_LIMIT) + except Exception as exc: + debug_log(f"[MonitorSnapshot] 读取记忆失败: {memory_snapshot_type} ({exc})") + if before_entries is not None: + monitor_snapshot = { + 'memory_type': memory_snapshot_type, + 'entries': before_entries + } + cache_monitor_snapshot(tool_display_id, 'before', monitor_snapshot) + + sender('tool_start', { + 'id': tool_display_id, + 'name': function_name, + 'arguments': arguments, + 'preparing_id': tool_call_id, + 'monitor_snapshot': monitor_snapshot, + 'conversation_id': conversation_id + }) + brief_log(f"调用了工具: {function_name}") + + await asyncio.sleep(0.3) + start_time = time.time() + + # 执行工具 + tool_result = await web_terminal.handle_tool_call(function_name, arguments) + debug_log(f"工具结果: {tool_result[:200]}...") + + execution_time = time.time() - start_time + if execution_time < 1.5: + await asyncio.sleep(1.5 - execution_time) + + # 更新工具状态 + result_data = {} + try: + result_data = json.loads(tool_result) + except: + result_data = {'output': tool_result} + tool_failed = detect_tool_failure(result_data) + + action_status = 'completed' + action_message = None + awaiting_flag = False + + if function_name in {"write_file", "edit_file"}: + diff_path = result_data.get("path") or arguments.get("file_path") + summary = result_data.get("summary") or result_data.get("message") + if summary: + action_message = summary + debug_log(f"{function_name} 执行完成: {summary or '无摘要'}") + + if function_name == "wait_sub_agent": + system_msg = result_data.get("system_message") + if system_msg: + messages.append({ + "role": "system", + "content": system_msg + }) + sender('system_message', { + 'content': system_msg, + 'inline': False + }) + maybe_mark_failure_from_message(web_terminal, system_msg) + monitor_snapshot_after = None + if function_name in MONITOR_FILE_TOOLS: + result_path = None + if isinstance(result_data, dict): + result_path = resolve_monitor_path(result_data) + if not result_path: + candidate_path = result_data.get('path') + if isinstance(candidate_path, str) and candidate_path.strip(): + result_path = candidate_path.strip() + if not result_path: + result_path = resolve_monitor_path(arguments, snapshot_path) or snapshot_path + monitor_snapshot_after = capture_monitor_snapshot(web_terminal.file_manager, result_path, MONITOR_SNAPSHOT_CHAR_LIMIT, debug_log) + elif function_name in MONITOR_MEMORY_TOOLS: + memory_after_type = str( + arguments.get('memory_type') + or (isinstance(result_data, dict) and result_data.get('memory_type')) + or memory_snapshot_type + or 'main' + ).lower() + after_entries = None + try: + after_entries = resolve_monitor_memory(web_terminal.memory_manager._read_entries(memory_after_type), MONITOR_MEMORY_ENTRY_LIMIT) + except Exception as exc: + debug_log(f"[MonitorSnapshot] 读取记忆失败(after): {memory_after_type} ({exc})") + if after_entries is not None: + monitor_snapshot_after = { + 'memory_type': memory_after_type, + 'entries': after_entries + } + + update_payload = { + 'id': tool_display_id, + 'status': action_status, + 'result': result_data, + 'preparing_id': tool_call_id, + 'conversation_id': conversation_id + } + if action_message: + update_payload['message'] = action_message + if awaiting_flag: + update_payload['awaiting_content'] = True + if monitor_snapshot_after: + update_payload['monitor_snapshot_after'] = monitor_snapshot_after + cache_monitor_snapshot(tool_display_id, 'after', monitor_snapshot_after) + + sender('update_action', update_payload) + + if function_name in ['create_file', 'delete_file', 'rename_file', 'create_folder']: + if not web_terminal.context_manager._is_host_mode_without_safety(): + structure = web_terminal.context_manager.get_project_structure() + sender('file_tree_update', structure) + + # ===== 增量保存:立即保存工具结果 ===== + metadata_payload = None + tool_images = None + tool_videos = None + if isinstance(result_data, dict): + # 特殊处理 web_search:保留可供前端渲染的精简结构,以便历史记录复现搜索结果 + if function_name == "web_search": + try: + tool_result_content = json.dumps(compact_web_search_result(result_data), ensure_ascii=False) + except Exception: + tool_result_content = tool_result + else: + tool_result_content = format_tool_result_for_context(function_name, result_data, tool_result) + metadata_payload = {"tool_payload": result_data} + else: + tool_result_content = tool_result + tool_message_content = tool_result_content + + # view_image: 将图片直接附加到 tool 结果中(不再插入 user 消息) + if function_name == "view_image" and getattr(web_terminal, "pending_image_view", None): + inj = web_terminal.pending_image_view + web_terminal.pending_image_view = None + if ( + not tool_failed + and isinstance(result_data, dict) + and result_data.get("success") is not False + ): + img_path = inj.get("path") if isinstance(inj, dict) else None + if img_path: + text_part = tool_result_content if isinstance(tool_result_content, str) else "" + tool_message_content = web_terminal.context_manager._build_content_with_images( + text_part, + [img_path] + ) + tool_images = [img_path] + if metadata_payload is None: + metadata_payload = {} + metadata_payload["tool_image_path"] = img_path + sender('system_message', { + 'content': f'系统已按模型请求将图片附加到工具结果: {img_path}' + }) + + # view_video: 将视频直接附加到 tool 结果中(不再插入 user 消息) + if function_name == "view_video" and getattr(web_terminal, "pending_video_view", None): + inj = web_terminal.pending_video_view + web_terminal.pending_video_view = None + if ( + not tool_failed + and isinstance(result_data, dict) + and result_data.get("success") is not False + ): + video_path = inj.get("path") if isinstance(inj, dict) else None + if video_path: + text_part = tool_result_content if isinstance(tool_result_content, str) else "" + video_payload = [video_path] + tool_message_content = web_terminal.context_manager._build_content_with_images( + text_part, + [], + video_payload + ) + tool_videos = [video_path] + if metadata_payload is None: + metadata_payload = {} + metadata_payload["tool_video_path"] = video_path + sender('system_message', { + 'content': f'系统已按模型请求将视频附加到工具结果: {video_path}' + }) + + # 立即保存工具结果 + web_terminal.context_manager.add_conversation( + "tool", + tool_result_content, + tool_call_id=tool_call_id, + name=function_name, + metadata=metadata_payload, + images=tool_images, + videos=tool_videos + ) + debug_log(f"💾 增量保存:工具结果 {function_name}") + system_message = result_data.get("system_message") if isinstance(result_data, dict) else None + if system_message: + web_terminal._record_sub_agent_message(system_message, result_data.get("task_id"), inline=False) + maybe_mark_failure_from_message(web_terminal, system_message) + + # 添加到消息历史(用于API继续对话) + messages.append({ + "role": "tool", + "tool_call_id": tool_call_id, + "name": function_name, + "content": tool_message_content + }) + + if function_name not in {'write_file', 'edit_file'}: + await process_sub_agent_updates(messages, inline=True, after_tool_call_id=tool_call_id) + + await asyncio.sleep(0.2) + + if tool_failed: + mark_force_thinking(web_terminal, reason=f"{function_name}_failed") + + # 标记不再是第一次迭代 + is_first_iteration = False + + + # 最终统计 + debug_log(f"\n{'='*40}") + debug_log(f"任务完成统计:") + debug_log(f" 总迭代次数: {total_iterations}") + debug_log(f" 总工具调用: {total_tool_calls}") + debug_log(f" 自动修复尝试: {auto_fix_attempts}") + debug_log(f" 累积响应: {len(accumulated_response)} 字符") + debug_log(f"{'='*40}\n") + + # 发送完成事件 + sender('task_complete', { + 'total_iterations': total_iterations, + 'total_tool_calls': total_tool_calls, + 'auto_fix_attempts': auto_fix_attempts + })