- Refactor 6000+ line web_server.py into server/ module - Create separate modules: auth, chat, conversation, files, admin, etc. - Keep web_server.py as backward-compatible entry point - Add container running status field in user_container_manager - Improve admin dashboard API with credentials and debug support 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
1176 lines
44 KiB
Python
1176 lines
44 KiB
Python
from __future__ import annotations
|
||
import sys, os
|
||
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
|
||
if PROJECT_ROOT not in sys.path:
|
||
sys.path.insert(0, PROJECT_ROOT)
|
||
|
||
import asyncio, json, time, re, os
|
||
from datetime import datetime, timedelta
|
||
from pathlib import Path
|
||
from collections import defaultdict, Counter, deque
|
||
from typing import Dict, Any, Optional, List, Tuple
|
||
|
||
from flask import Blueprint, request, jsonify, session
|
||
from werkzeug.utils import secure_filename
|
||
import zipfile
|
||
|
||
from config import (
|
||
OUTPUT_FORMATS,
|
||
AUTO_FIX_TOOL_CALL,
|
||
AUTO_FIX_MAX_ATTEMPTS,
|
||
MAX_ITERATIONS_PER_TASK,
|
||
MAX_CONSECUTIVE_SAME_TOOL,
|
||
MAX_TOTAL_TOOL_CALLS,
|
||
TOOL_CALL_COOLDOWN,
|
||
MAX_UPLOAD_SIZE,
|
||
DEFAULT_CONVERSATIONS_LIMIT,
|
||
MAX_CONVERSATIONS_LIMIT,
|
||
CONVERSATIONS_DIR,
|
||
DEFAULT_RESPONSE_MAX_TOKENS,
|
||
DEFAULT_PROJECT_PATH,
|
||
LOGS_DIR,
|
||
AGENT_VERSION,
|
||
THINKING_FAST_INTERVAL,
|
||
PROJECT_MAX_STORAGE_MB,
|
||
PROJECT_MAX_STORAGE_BYTES,
|
||
UPLOAD_SCAN_LOG_SUBDIR,
|
||
)
|
||
from modules.personalization_manager import (
|
||
load_personalization_config,
|
||
save_personalization_config,
|
||
THINKING_INTERVAL_MIN,
|
||
THINKING_INTERVAL_MAX,
|
||
)
|
||
from modules.upload_security import UploadSecurityError
|
||
from modules.user_manager import UserWorkspace
|
||
from modules.usage_tracker import QUOTA_DEFAULTS
|
||
from core.web_terminal import WebTerminal
|
||
from utils.tool_result_formatter import format_tool_result_for_context
|
||
from utils.conversation_manager import ConversationManager
|
||
from utils.api_client import DeepSeekClient
|
||
|
||
from .auth_helpers import api_login_required, resolve_admin_policy, get_current_user_record, get_current_username
|
||
from .context import with_terminal, get_gui_manager, get_upload_guard, build_upload_error_response, ensure_conversation_loaded, reset_system_state, get_user_resources, get_or_create_usage_tracker
|
||
from .utils_common import (
|
||
build_review_lines,
|
||
debug_log,
|
||
log_backend_chunk,
|
||
log_frontend_chunk,
|
||
log_streaming_debug_entry,
|
||
brief_log,
|
||
DEBUG_LOG_FILE,
|
||
CHUNK_BACKEND_LOG_FILE,
|
||
CHUNK_FRONTEND_LOG_FILE,
|
||
STREAMING_DEBUG_LOG_FILE,
|
||
)
|
||
from .extensions import socketio
|
||
from .state import (
|
||
RECENT_UPLOAD_EVENT_LIMIT,
|
||
RECENT_UPLOAD_FEED_LIMIT,
|
||
user_manager,
|
||
container_manager,
|
||
get_last_active_ts,
|
||
)
|
||
|
||
conversation_bp = Blueprint('conversation', __name__)
|
||
|
||
|
||
# === 背景生成对话标题(从 app_legacy 拆分) ===
|
||
@conversation_bp.route('/api/conversations', methods=['GET'])
|
||
@api_login_required
|
||
@with_terminal
|
||
def get_conversations(terminal: WebTerminal, workspace: UserWorkspace, username: str):
|
||
"""获取对话列表"""
|
||
try:
|
||
# 获取查询参数
|
||
limit = request.args.get('limit', 20, type=int)
|
||
offset = request.args.get('offset', 0, type=int)
|
||
|
||
# 限制参数范围
|
||
limit = max(1, min(limit, 100)) # 限制在1-100之间
|
||
offset = max(0, offset)
|
||
|
||
result = terminal.get_conversations_list(limit=limit, offset=offset)
|
||
|
||
if result["success"]:
|
||
return jsonify({
|
||
"success": True,
|
||
"data": result["data"]
|
||
})
|
||
else:
|
||
return jsonify({
|
||
"success": False,
|
||
"error": result.get("error", "Unknown error"),
|
||
"message": result.get("message", "获取对话列表失败")
|
||
}), 500
|
||
|
||
except Exception as e:
|
||
print(f"[API] 获取对话列表错误: {e}")
|
||
return jsonify({
|
||
"success": False,
|
||
"error": str(e),
|
||
"message": "获取对话列表时发生异常"
|
||
}), 500
|
||
|
||
@conversation_bp.route('/api/conversations', methods=['POST'])
|
||
@api_login_required
|
||
@with_terminal
|
||
def create_conversation(terminal: WebTerminal, workspace: UserWorkspace, username: str):
|
||
"""创建新对话"""
|
||
try:
|
||
data = request.get_json() or {}
|
||
# 前端现在期望“新建对话”回到用户配置的默认模型/模式,
|
||
# 只有当客户端显式要求保留当前模式时才使用传入值。
|
||
preserve_mode = bool(data.get('preserve_mode'))
|
||
thinking_mode = data.get('thinking_mode') if preserve_mode and 'thinking_mode' in data else None
|
||
run_mode = data.get('mode') if preserve_mode and 'mode' in data else None
|
||
|
||
result = terminal.create_new_conversation(thinking_mode=thinking_mode, run_mode=run_mode)
|
||
|
||
if result["success"]:
|
||
session['run_mode'] = terminal.run_mode
|
||
session['thinking_mode'] = terminal.thinking_mode
|
||
# 广播对话列表更新事件
|
||
socketio.emit('conversation_list_update', {
|
||
'action': 'created',
|
||
'conversation_id': result["conversation_id"]
|
||
}, room=f"user_{username}")
|
||
|
||
# 广播当前对话切换事件
|
||
socketio.emit('conversation_changed', {
|
||
'conversation_id': result["conversation_id"],
|
||
'title': "新对话"
|
||
}, room=f"user_{username}")
|
||
|
||
return jsonify(result), 201
|
||
else:
|
||
return jsonify(result), 500
|
||
|
||
except Exception as e:
|
||
print(f"[API] 创建对话错误: {e}")
|
||
return jsonify({
|
||
"success": False,
|
||
"error": str(e),
|
||
"message": "创建对话时发生异常"
|
||
}), 500
|
||
|
||
@conversation_bp.route('/api/conversations/<conversation_id>', methods=['GET'])
|
||
@api_login_required
|
||
@with_terminal
|
||
def get_conversation_info(terminal: WebTerminal, workspace: UserWorkspace, username: str, conversation_id):
|
||
"""获取特定对话信息"""
|
||
try:
|
||
# 通过ConversationManager直接获取对话数据
|
||
conversation_data = terminal.context_manager.conversation_manager.load_conversation(conversation_id)
|
||
|
||
if conversation_data:
|
||
# 提取关键信息,不返回完整消息内容(避免数据量过大)
|
||
info = {
|
||
"id": conversation_data["id"],
|
||
"title": conversation_data["title"],
|
||
"created_at": conversation_data["created_at"],
|
||
"updated_at": conversation_data["updated_at"],
|
||
"metadata": conversation_data["metadata"],
|
||
"messages_count": len(conversation_data.get("messages", []))
|
||
}
|
||
|
||
return jsonify({
|
||
"success": True,
|
||
"data": info
|
||
})
|
||
else:
|
||
return jsonify({
|
||
"success": False,
|
||
"error": "Conversation not found",
|
||
"message": f"对话 {conversation_id} 不存在"
|
||
}), 404
|
||
|
||
except Exception as e:
|
||
print(f"[API] 获取对话信息错误: {e}")
|
||
return jsonify({
|
||
"success": False,
|
||
"error": str(e),
|
||
"message": "获取对话信息时发生异常"
|
||
}), 500
|
||
|
||
@conversation_bp.route('/api/conversations/<conversation_id>/load', methods=['PUT'])
|
||
@api_login_required
|
||
@with_terminal
|
||
def load_conversation(conversation_id, terminal: WebTerminal, workspace: UserWorkspace, username: str):
|
||
"""加载特定对话"""
|
||
try:
|
||
result = terminal.load_conversation(conversation_id)
|
||
|
||
if result["success"]:
|
||
# 广播对话切换事件
|
||
socketio.emit('conversation_changed', {
|
||
'conversation_id': conversation_id,
|
||
'title': result.get("title", "未知对话"),
|
||
'messages_count': result.get("messages_count", 0)
|
||
}, room=f"user_{username}")
|
||
|
||
# 广播系统状态更新(因为当前对话改变了)
|
||
status = terminal.get_status()
|
||
socketio.emit('status_update', status, room=f"user_{username}")
|
||
|
||
# 清理和重置相关UI状态
|
||
socketio.emit('conversation_loaded', {
|
||
'conversation_id': conversation_id,
|
||
'clear_ui': True # 提示前端清理当前UI状态
|
||
}, room=f"user_{username}")
|
||
|
||
return jsonify(result)
|
||
else:
|
||
return jsonify(result), 404 if "不存在" in result.get("message", "") else 500
|
||
|
||
except Exception as e:
|
||
print(f"[API] 加载对话错误: {e}")
|
||
return jsonify({
|
||
"success": False,
|
||
"error": str(e),
|
||
"message": "加载对话时发生异常"
|
||
}), 500
|
||
|
||
@conversation_bp.route('/api/conversations/<conversation_id>', methods=['DELETE'])
|
||
@api_login_required
|
||
@with_terminal
|
||
def delete_conversation(conversation_id, terminal: WebTerminal, workspace: UserWorkspace, username: str):
|
||
"""删除特定对话"""
|
||
try:
|
||
# 检查是否是当前对话
|
||
is_current = (terminal.context_manager.current_conversation_id == conversation_id)
|
||
|
||
result = terminal.delete_conversation(conversation_id)
|
||
|
||
if result["success"]:
|
||
# 广播对话列表更新事件
|
||
socketio.emit('conversation_list_update', {
|
||
'action': 'deleted',
|
||
'conversation_id': conversation_id
|
||
}, room=f"user_{username}")
|
||
|
||
# 如果删除的是当前对话,广播对话清空事件
|
||
if is_current:
|
||
socketio.emit('conversation_changed', {
|
||
'conversation_id': None,
|
||
'title': None,
|
||
'cleared': True
|
||
}, room=f"user_{username}")
|
||
|
||
# 更新系统状态
|
||
status = terminal.get_status()
|
||
socketio.emit('status_update', status, room=f"user_{username}")
|
||
|
||
return jsonify(result)
|
||
else:
|
||
return jsonify(result), 404 if "不存在" in result.get("message", "") else 500
|
||
|
||
except Exception as e:
|
||
print(f"[API] 删除对话错误: {e}")
|
||
return jsonify({
|
||
"success": False,
|
||
"error": str(e),
|
||
"message": "删除对话时发生异常"
|
||
}), 500
|
||
|
||
@conversation_bp.route('/api/conversations/search', methods=['GET'])
|
||
@api_login_required
|
||
@with_terminal
|
||
def search_conversations(terminal: WebTerminal, workspace: UserWorkspace, username: str):
|
||
"""搜索对话"""
|
||
try:
|
||
query = request.args.get('q', '').strip()
|
||
limit = request.args.get('limit', 20, type=int)
|
||
|
||
if not query:
|
||
return jsonify({
|
||
"success": False,
|
||
"error": "Missing query parameter",
|
||
"message": "请提供搜索关键词"
|
||
}), 400
|
||
|
||
# 限制参数范围
|
||
limit = max(1, min(limit, 50))
|
||
|
||
result = terminal.search_conversations(query, limit)
|
||
|
||
return jsonify({
|
||
"success": True,
|
||
"data": {
|
||
"results": result["results"],
|
||
"count": result["count"],
|
||
"query": query
|
||
}
|
||
})
|
||
|
||
except Exception as e:
|
||
print(f"[API] 搜索对话错误: {e}")
|
||
return jsonify({
|
||
"success": False,
|
||
"error": str(e),
|
||
"message": "搜索对话时发生异常"
|
||
}), 500
|
||
|
||
@conversation_bp.route('/api/conversations/<conversation_id>/messages', methods=['GET'])
|
||
@api_login_required
|
||
@with_terminal
|
||
def get_conversation_messages(conversation_id, terminal: WebTerminal, workspace: UserWorkspace, username: str):
|
||
"""获取对话的消息历史(可选功能,用于调试或详细查看)"""
|
||
try:
|
||
# 获取完整对话数据
|
||
conversation_data = terminal.context_manager.conversation_manager.load_conversation(conversation_id)
|
||
|
||
if conversation_data:
|
||
messages = conversation_data.get("messages", [])
|
||
|
||
# 可选:限制消息数量,避免返回过多数据
|
||
limit = request.args.get('limit', type=int)
|
||
if limit:
|
||
messages = messages[-limit:] # 获取最后N条消息
|
||
|
||
return jsonify({
|
||
"success": True,
|
||
"data": {
|
||
"conversation_id": conversation_id,
|
||
"messages": messages,
|
||
"total_count": len(conversation_data.get("messages", []))
|
||
}
|
||
})
|
||
else:
|
||
return jsonify({
|
||
"success": False,
|
||
"error": "Conversation not found",
|
||
"message": f"对话 {conversation_id} 不存在"
|
||
}), 404
|
||
|
||
except Exception as e:
|
||
print(f"[API] 获取对话消息错误: {e}")
|
||
return jsonify({
|
||
"success": False,
|
||
"error": str(e),
|
||
"message": "获取对话消息时发生异常"
|
||
}), 500
|
||
|
||
|
||
@conversation_bp.route('/api/conversations/<conversation_id>/compress', methods=['POST'])
|
||
@api_login_required
|
||
@with_terminal
|
||
def compress_conversation(conversation_id, terminal: WebTerminal, workspace: UserWorkspace, username: str):
|
||
"""压缩指定对话的大体积消息,生成压缩版新对话"""
|
||
try:
|
||
policy = resolve_admin_policy(get_current_user_record())
|
||
if policy.get("ui_blocks", {}).get("block_compress_conversation"):
|
||
return jsonify({"success": False, "error": "压缩对话已被管理员禁用"}), 403
|
||
normalized_id = conversation_id if conversation_id.startswith('conv_') else f"conv_{conversation_id}"
|
||
result = terminal.context_manager.compress_conversation(normalized_id)
|
||
|
||
if not result.get("success"):
|
||
status_code = 404 if "不存在" in result.get("error", "") else 400
|
||
return jsonify(result), status_code
|
||
|
||
new_conversation_id = result["compressed_conversation_id"]
|
||
|
||
load_result = terminal.load_conversation(new_conversation_id)
|
||
|
||
if load_result.get("success"):
|
||
socketio.emit('conversation_list_update', {
|
||
'action': 'compressed',
|
||
'conversation_id': new_conversation_id
|
||
}, room=f"user_{username}")
|
||
socketio.emit('conversation_changed', {
|
||
'conversation_id': new_conversation_id,
|
||
'title': load_result.get('title', '压缩后的对话'),
|
||
'messages_count': load_result.get('messages_count', 0)
|
||
}, room=f"user_{username}")
|
||
socketio.emit('conversation_loaded', {
|
||
'conversation_id': new_conversation_id,
|
||
'clear_ui': True
|
||
}, room=f"user_{username}")
|
||
|
||
response_payload = {
|
||
"success": True,
|
||
"compressed_conversation_id": new_conversation_id,
|
||
"compressed_types": result.get("compressed_types", []),
|
||
"system_message": result.get("system_message"),
|
||
"load_result": load_result
|
||
}
|
||
|
||
return jsonify(response_payload)
|
||
|
||
except Exception as e:
|
||
print(f"[API] 压缩对话错误: {e}")
|
||
return jsonify({
|
||
"success": False,
|
||
"error": str(e),
|
||
"message": "压缩对话时发生异常"
|
||
}), 500
|
||
|
||
|
||
@conversation_bp.route('/api/sub_agents', methods=['GET'])
|
||
@api_login_required
|
||
@with_terminal
|
||
def list_sub_agents(terminal: WebTerminal, workspace: UserWorkspace, username: str):
|
||
"""返回当前对话的子智能体任务列表。"""
|
||
manager = getattr(terminal, "sub_agent_manager", None)
|
||
if not manager:
|
||
return jsonify({"success": True, "data": []})
|
||
try:
|
||
conversation_id = terminal.context_manager.current_conversation_id
|
||
data = manager.get_overview(conversation_id=conversation_id)
|
||
return jsonify({"success": True, "data": data})
|
||
except Exception as exc:
|
||
return jsonify({"success": False, "error": str(exc)}), 500
|
||
|
||
|
||
@conversation_bp.route('/api/conversations/<conversation_id>/duplicate', methods=['POST'])
|
||
@api_login_required
|
||
@with_terminal
|
||
def duplicate_conversation(conversation_id, terminal: WebTerminal, workspace: UserWorkspace, username: str):
|
||
"""复制指定对话,生成新的对话副本"""
|
||
|
||
try:
|
||
result = terminal.context_manager.duplicate_conversation(conversation_id)
|
||
|
||
if not result.get("success"):
|
||
status_code = 404 if "不存在" in result.get("error", "") else 400
|
||
return jsonify(result), status_code
|
||
|
||
new_conversation_id = result["duplicate_conversation_id"]
|
||
load_result = terminal.load_conversation(new_conversation_id)
|
||
|
||
if load_result.get("success"):
|
||
socketio.emit('conversation_list_update', {
|
||
'action': 'duplicated',
|
||
'conversation_id': new_conversation_id
|
||
}, room=f"user_{username}")
|
||
socketio.emit('conversation_changed', {
|
||
'conversation_id': new_conversation_id,
|
||
'title': load_result.get('title', '复制的对话'),
|
||
'messages_count': load_result.get('messages_count', 0)
|
||
}, room=f"user_{username}")
|
||
socketio.emit('conversation_loaded', {
|
||
'conversation_id': new_conversation_id,
|
||
'clear_ui': True
|
||
}, room=f"user_{username}")
|
||
|
||
response_payload = {
|
||
"success": True,
|
||
"duplicate_conversation_id": new_conversation_id,
|
||
"load_result": load_result
|
||
}
|
||
|
||
return jsonify(response_payload)
|
||
|
||
except Exception as e:
|
||
print(f"[API] 复制对话错误: {e}")
|
||
return jsonify({
|
||
"success": False,
|
||
"error": str(e),
|
||
"message": "复制对话时发生异常"
|
||
}), 500
|
||
|
||
|
||
@conversation_bp.route('/api/conversations/<conversation_id>/review_preview', methods=['GET'])
|
||
@api_login_required
|
||
@with_terminal
|
||
def review_conversation_preview(conversation_id, terminal: WebTerminal, workspace: UserWorkspace, username: str):
|
||
"""生成对话回顾预览(不落盘,只返回前若干行文本)"""
|
||
policy = resolve_admin_policy(get_current_user_record())
|
||
if policy.get("ui_blocks", {}).get("block_conversation_review"):
|
||
return jsonify({"success": False, "error": "对话引用已被管理员禁用"}), 403
|
||
try:
|
||
current_id = terminal.context_manager.current_conversation_id
|
||
if conversation_id == current_id:
|
||
return jsonify({
|
||
"success": False,
|
||
"message": "无法引用当前对话"
|
||
}), 400
|
||
|
||
conversation_data = terminal.context_manager.conversation_manager.load_conversation(conversation_id)
|
||
if not conversation_data:
|
||
return jsonify({
|
||
"success": False,
|
||
"error": "Conversation not found",
|
||
"message": f"对话 {conversation_id} 不存在"
|
||
}), 404
|
||
|
||
limit = request.args.get('limit', default=20, type=int) or 20
|
||
lines = build_review_lines(conversation_data.get("messages", []), limit=limit)
|
||
|
||
return jsonify({
|
||
"success": True,
|
||
"data": {
|
||
"preview": lines,
|
||
"count": len(lines)
|
||
}
|
||
})
|
||
except Exception as e:
|
||
print(f"[API] 对话回顾预览错误: {e}")
|
||
return jsonify({
|
||
"success": False,
|
||
"error": str(e),
|
||
"message": "生成预览时发生异常"
|
||
}), 500
|
||
|
||
|
||
@conversation_bp.route('/api/conversations/<conversation_id>/review', methods=['POST'])
|
||
@api_login_required
|
||
@with_terminal
|
||
def review_conversation(conversation_id, terminal: WebTerminal, workspace: UserWorkspace, username: str):
|
||
"""生成完整对话回顾 Markdown 文件"""
|
||
policy = resolve_admin_policy(get_current_user_record())
|
||
if policy.get("ui_blocks", {}).get("block_conversation_review"):
|
||
return jsonify({"success": False, "error": "对话引用已被管理员禁用"}), 403
|
||
try:
|
||
current_id = terminal.context_manager.current_conversation_id
|
||
if conversation_id == current_id:
|
||
return jsonify({
|
||
"success": False,
|
||
"message": "无法引用当前对话"
|
||
}), 400
|
||
|
||
conversation_data = terminal.context_manager.conversation_manager.load_conversation(conversation_id)
|
||
if not conversation_data:
|
||
return jsonify({
|
||
"success": False,
|
||
"error": "Conversation not found",
|
||
"message": f"对话 {conversation_id} 不存在"
|
||
}), 404
|
||
|
||
messages = conversation_data.get("messages", [])
|
||
lines = build_review_lines(messages)
|
||
content = "\n".join(lines) + "\n"
|
||
char_count = len(content)
|
||
|
||
uploads_dir = workspace.uploads_dir / "review"
|
||
uploads_dir.mkdir(parents=True, exist_ok=True)
|
||
|
||
title = conversation_data.get("title") or "untitled"
|
||
safe_title = _sanitize_filename_component(title)
|
||
timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
|
||
filename = f"review_{safe_title}_{timestamp}.md"
|
||
target = uploads_dir / filename
|
||
|
||
target.write_text(content, encoding='utf-8')
|
||
|
||
return jsonify({
|
||
"success": True,
|
||
"data": {
|
||
"path": f"user_upload/review/{filename}",
|
||
"char_count": char_count
|
||
}
|
||
})
|
||
except Exception as e:
|
||
print(f"[API] 对话回顾生成错误: {e}")
|
||
return jsonify({
|
||
"success": False,
|
||
"error": str(e),
|
||
"message": "生成对话回顾时发生异常"
|
||
}), 500
|
||
|
||
@conversation_bp.route('/api/conversations/statistics', methods=['GET'])
|
||
@api_login_required
|
||
@with_terminal
|
||
def get_conversations_statistics(terminal: WebTerminal, workspace: UserWorkspace, username: str):
|
||
"""获取对话统计信息"""
|
||
try:
|
||
stats = terminal.context_manager.get_conversation_statistics()
|
||
|
||
return jsonify({
|
||
"success": True,
|
||
"data": stats
|
||
})
|
||
|
||
except Exception as e:
|
||
print(f"[API] 获取对话统计错误: {e}")
|
||
return jsonify({
|
||
"success": False,
|
||
"error": str(e),
|
||
"message": "获取对话统计时发生异常"
|
||
}), 500
|
||
|
||
@conversation_bp.route('/api/conversations/current', methods=['GET'])
|
||
@api_login_required
|
||
@with_terminal
|
||
def get_current_conversation(terminal: WebTerminal, workspace: UserWorkspace, username: str):
|
||
"""获取当前对话信息"""
|
||
current_id = terminal.context_manager.current_conversation_id
|
||
|
||
# 如果是临时ID,返回空的对话信息
|
||
if not current_id or current_id.startswith('temp_'):
|
||
return jsonify({
|
||
"success": True,
|
||
"data": {
|
||
"id": current_id,
|
||
"title": "新对话",
|
||
"messages_count": 0,
|
||
"is_temporary": True
|
||
}
|
||
})
|
||
|
||
# 如果是真实的对话ID,查找对话数据
|
||
try:
|
||
conversation_data = terminal.context_manager.conversation_manager.load_conversation(current_id)
|
||
if conversation_data:
|
||
return jsonify({
|
||
"success": True,
|
||
"data": {
|
||
"id": current_id,
|
||
"title": conversation_data.get("title", "未知对话"),
|
||
"messages_count": len(conversation_data.get("messages", [])),
|
||
"is_temporary": False
|
||
}
|
||
})
|
||
else:
|
||
return jsonify({
|
||
"success": False,
|
||
"error": "对话不存在"
|
||
}), 404
|
||
|
||
except Exception as e:
|
||
print(f"[API] 获取当前对话错误: {e}")
|
||
return jsonify({
|
||
"success": False,
|
||
"error": str(e)
|
||
}), 500
|
||
|
||
@socketio.on('send_command')
|
||
def handle_command(data):
|
||
"""处理系统命令"""
|
||
command = data.get('command', '')
|
||
|
||
username, terminal, _ = get_terminal_for_sid(request.sid)
|
||
if not terminal:
|
||
emit('error', {'message': 'System not initialized'})
|
||
return
|
||
record_user_activity(username)
|
||
|
||
if command.startswith('/'):
|
||
command = command[1:]
|
||
|
||
parts = command.split(maxsplit=1)
|
||
cmd = parts[0].lower()
|
||
|
||
if cmd == "clear":
|
||
terminal.context_manager.conversation_history.clear()
|
||
if terminal.thinking_mode:
|
||
terminal.api_client.start_new_task(force_deep=terminal.deep_thinking_mode)
|
||
emit('command_result', {
|
||
'command': cmd,
|
||
'success': True,
|
||
'message': '对话已清除'
|
||
})
|
||
elif cmd == "status":
|
||
status = terminal.get_status()
|
||
# 添加终端状态
|
||
if terminal.terminal_manager:
|
||
terminal_status = terminal.terminal_manager.list_terminals()
|
||
status['terminals'] = terminal_status
|
||
emit('command_result', {
|
||
'command': cmd,
|
||
'success': True,
|
||
'data': status
|
||
})
|
||
elif cmd == "terminals":
|
||
# 列出终端会话
|
||
if terminal.terminal_manager:
|
||
result = terminal.terminal_manager.list_terminals()
|
||
emit('command_result', {
|
||
'command': cmd,
|
||
'success': True,
|
||
'data': result
|
||
})
|
||
else:
|
||
emit('command_result', {
|
||
'command': cmd,
|
||
'success': False,
|
||
'message': '终端系统未初始化'
|
||
})
|
||
else:
|
||
emit('command_result', {
|
||
'command': cmd,
|
||
'success': False,
|
||
'message': f'未知命令: {cmd}'
|
||
})
|
||
|
||
@conversation_bp.route('/api/conversations/<conversation_id>/token-statistics', methods=['GET'])
|
||
@api_login_required
|
||
@with_terminal
|
||
def get_conversation_token_statistics(conversation_id, terminal: WebTerminal, workspace: UserWorkspace, username: str):
|
||
"""获取特定对话的token统计"""
|
||
try:
|
||
stats = terminal.context_manager.get_conversation_token_statistics(conversation_id)
|
||
|
||
if stats:
|
||
return jsonify({
|
||
"success": True,
|
||
"data": stats
|
||
})
|
||
else:
|
||
return jsonify({
|
||
"success": False,
|
||
"error": "Conversation not found",
|
||
"message": f"对话 {conversation_id} 不存在"
|
||
}), 404
|
||
|
||
except Exception as e:
|
||
print(f"[API] 获取token统计错误: {e}")
|
||
return jsonify({
|
||
"success": False,
|
||
"error": str(e),
|
||
"message": "获取token统计时发生异常"
|
||
}), 500
|
||
|
||
|
||
@conversation_bp.route('/api/conversations/<conversation_id>/tokens', methods=['GET'])
|
||
@api_login_required
|
||
@with_terminal
|
||
def get_conversation_tokens(conversation_id, terminal: WebTerminal, workspace: UserWorkspace, username: str):
|
||
"""获取对话的当前完整上下文token数(包含所有动态内容)"""
|
||
try:
|
||
current_tokens = terminal.context_manager.get_current_context_tokens(conversation_id)
|
||
return jsonify({
|
||
"success": True,
|
||
"data": {
|
||
"total_tokens": current_tokens
|
||
}
|
||
})
|
||
except Exception as e:
|
||
return jsonify({
|
||
"success": False,
|
||
"error": str(e)
|
||
}), 500
|
||
|
||
def calculate_directory_size(root: Path) -> int:
|
||
if not root.exists():
|
||
return 0
|
||
total = 0
|
||
stack = [root]
|
||
while stack:
|
||
current = stack.pop()
|
||
try:
|
||
with os.scandir(current) as iterator:
|
||
for entry in iterator:
|
||
try:
|
||
if entry.is_symlink():
|
||
continue
|
||
if entry.is_file(follow_symlinks=False):
|
||
total += entry.stat(follow_symlinks=False).st_size
|
||
elif entry.is_dir(follow_symlinks=False):
|
||
stack.append(Path(entry.path))
|
||
except (OSError, FileNotFoundError, PermissionError):
|
||
continue
|
||
except (NotADirectoryError, FileNotFoundError, PermissionError, OSError):
|
||
continue
|
||
return total
|
||
|
||
|
||
def iso_datetime_from_epoch(epoch: Optional[float]) -> Optional[str]:
|
||
if not epoch:
|
||
return None
|
||
try:
|
||
return datetime.utcfromtimestamp(epoch).replace(microsecond=0).isoformat() + "Z"
|
||
except (ValueError, OSError):
|
||
return None
|
||
|
||
|
||
def compute_workspace_storage(workspace: UserWorkspace) -> Dict[str, Any]:
|
||
project_bytes = calculate_directory_size(workspace.project_path)
|
||
data_bytes = calculate_directory_size(workspace.data_dir)
|
||
logs_bytes = calculate_directory_size(workspace.logs_dir)
|
||
quarantine_bytes = calculate_directory_size(workspace.quarantine_dir)
|
||
uploads_bytes = calculate_directory_size(workspace.uploads_dir)
|
||
backups_bytes = calculate_directory_size(workspace.data_dir / "backups")
|
||
usage_percent = None
|
||
if PROJECT_MAX_STORAGE_BYTES:
|
||
usage_percent = round(project_bytes / PROJECT_MAX_STORAGE_BYTES * 100, 2) if project_bytes else 0.0
|
||
status = "ok"
|
||
if usage_percent is not None:
|
||
if usage_percent >= 95:
|
||
status = "critical"
|
||
elif usage_percent >= 80:
|
||
status = "warning"
|
||
return {
|
||
"project_bytes": project_bytes,
|
||
"data_bytes": data_bytes,
|
||
"logs_bytes": logs_bytes,
|
||
"quarantine_bytes": quarantine_bytes,
|
||
"uploads_bytes": uploads_bytes,
|
||
"backups_bytes": backups_bytes,
|
||
"total_bytes": project_bytes + data_bytes + logs_bytes + quarantine_bytes,
|
||
"limit_bytes": PROJECT_MAX_STORAGE_BYTES,
|
||
"usage_percent": usage_percent,
|
||
"status": status,
|
||
}
|
||
|
||
|
||
def collect_usage_snapshot(username: str, workspace: UserWorkspace, role: Optional[str]) -> Dict[str, Any]:
|
||
tracker = get_or_create_usage_tracker(username, workspace)
|
||
stats = tracker.get_stats()
|
||
quotas = stats.get("quotas") or {}
|
||
windows = stats.get("windows") or {}
|
||
snapshot: Dict[str, Any] = {}
|
||
for metric in ("fast", "thinking", "search"):
|
||
window_meta = windows.get(metric) or {}
|
||
quota_meta = quotas.get(metric) or {}
|
||
default_limit = QUOTA_DEFAULTS.get("default", {}).get(metric, {}).get("limit", 0)
|
||
snapshot[metric] = {
|
||
"count": int(window_meta.get("count", 0) or 0),
|
||
"window_start": window_meta.get("window_start"),
|
||
"reset_at": window_meta.get("reset_at") or quota_meta.get("reset_at"),
|
||
"limit": quota_meta.get("limit", default_limit),
|
||
}
|
||
snapshot["role"] = role or quotas.get("role") or "user"
|
||
return snapshot
|
||
|
||
|
||
def _read_token_totals_file(workspace: UserWorkspace) -> Dict[str, int]:
|
||
path = workspace.data_dir / "token_totals.json"
|
||
if not path.exists():
|
||
return {"input_tokens": 0, "output_tokens": 0, "total_tokens": 0}
|
||
try:
|
||
with open(path, 'r', encoding='utf-8') as fh:
|
||
payload = json.load(fh) or {}
|
||
input_tokens = int(payload.get("input_tokens") or payload.get("total_input_tokens") or 0)
|
||
output_tokens = int(payload.get("output_tokens") or payload.get("total_output_tokens") or 0)
|
||
total_tokens = int(payload.get("total_tokens") or (input_tokens + output_tokens))
|
||
return {
|
||
"input_tokens": input_tokens,
|
||
"output_tokens": output_tokens,
|
||
"total_tokens": total_tokens,
|
||
}
|
||
except (OSError, json.JSONDecodeError, ValueError) as exc:
|
||
print(f"[admin] 解析 token_totals.json 失败 ({workspace.username}): {exc}")
|
||
return {"input_tokens": 0, "output_tokens": 0, "total_tokens": 0}
|
||
|
||
|
||
def _collect_conversation_token_totals(workspace: UserWorkspace) -> Dict[str, int]:
|
||
try:
|
||
manager = ConversationManager(base_dir=workspace.data_dir)
|
||
stats = manager.get_statistics() or {}
|
||
token_stats = stats.get("token_statistics") or {}
|
||
input_tokens = int(token_stats.get("total_input_tokens") or 0)
|
||
output_tokens = int(token_stats.get("total_output_tokens") or 0)
|
||
total_tokens = int(token_stats.get("total_tokens") or (input_tokens + output_tokens))
|
||
return {
|
||
"input_tokens": input_tokens,
|
||
"output_tokens": output_tokens,
|
||
"total_tokens": total_tokens,
|
||
}
|
||
except Exception as exc:
|
||
print(f"[admin] 读取 legacy token 统计失败 ({workspace.username}): {exc}")
|
||
return {"input_tokens": 0, "output_tokens": 0, "total_tokens": 0}
|
||
|
||
|
||
def collect_user_token_statistics(workspace: UserWorkspace) -> Dict[str, int]:
|
||
"""汇总单个用户在所有对话中的token累计数据。"""
|
||
file_totals = _read_token_totals_file(workspace)
|
||
legacy_totals = _collect_conversation_token_totals(workspace)
|
||
return {
|
||
"input_tokens": max(file_totals["input_tokens"], legacy_totals["input_tokens"]),
|
||
"output_tokens": max(file_totals["output_tokens"], legacy_totals["output_tokens"]),
|
||
"total_tokens": max(file_totals["total_tokens"], legacy_totals["total_tokens"]),
|
||
}
|
||
|
||
|
||
def compute_usage_leaders(users: List[Dict[str, Any]], metric: str, top_n: int = 5) -> List[Dict[str, Any]]:
|
||
ranked = sorted(
|
||
(
|
||
{
|
||
"username": entry["username"],
|
||
"count": entry.get("usage", {}).get(metric, {}).get("count", 0),
|
||
"limit": entry.get("usage", {}).get(metric, {}).get("limit"),
|
||
}
|
||
for entry in users
|
||
),
|
||
key=lambda item: item["count"],
|
||
reverse=True,
|
||
)
|
||
return [row for row in ranked[:top_n] if row["count"]]
|
||
|
||
|
||
def collect_user_snapshots(handle_map: Dict[str, Dict[str, Any]]) -> Dict[str, Any]:
|
||
user_map = user_manager.list_users()
|
||
items: List[Dict[str, Any]] = []
|
||
role_counter: Counter = Counter()
|
||
usage_totals = {"fast": 0, "thinking": 0, "search": 0}
|
||
token_totals = {"input_tokens": 0, "output_tokens": 0, "total_tokens": 0}
|
||
storage_total_bytes = 0
|
||
quarantine_total_bytes = 0
|
||
now = time.time()
|
||
|
||
for username, record in user_map.items():
|
||
workspace = user_manager.ensure_user_workspace(username)
|
||
storage = compute_workspace_storage(workspace)
|
||
usage = collect_usage_snapshot(username, workspace, record.role)
|
||
tokens = collect_user_token_statistics(workspace)
|
||
storage_total_bytes += storage["total_bytes"]
|
||
quarantine_total_bytes += storage["quarantine_bytes"]
|
||
for metric in usage_totals:
|
||
usage_totals[metric] += usage.get(metric, {}).get("count", 0)
|
||
for key in token_totals:
|
||
token_totals[key] += tokens.get(key, 0)
|
||
normalized_role = (record.role or "user").lower()
|
||
role_counter[normalized_role] += 1
|
||
handle = handle_map.get(username)
|
||
handle_last = handle.get("last_active") if handle else None
|
||
last_active = get_last_active_ts(username, handle_last)
|
||
idle_seconds = max(0.0, now - last_active) if last_active else None
|
||
items.append({
|
||
"username": username,
|
||
"email": record.email,
|
||
"role": record.role or "user",
|
||
"created_at": record.created_at,
|
||
"invite_code": record.invite_code,
|
||
"storage": storage,
|
||
"usage": usage,
|
||
"tokens": tokens,
|
||
"workspace": {
|
||
"project_path": str(workspace.project_path),
|
||
"data_dir": str(workspace.data_dir),
|
||
"logs_dir": str(workspace.logs_dir),
|
||
"uploads_dir": str(workspace.uploads_dir),
|
||
},
|
||
"status": {
|
||
"online": handle is not None,
|
||
"container_mode": handle.get("mode") if handle else None,
|
||
"last_active": iso_datetime_from_epoch(last_active),
|
||
"idle_seconds": idle_seconds,
|
||
},
|
||
})
|
||
|
||
items.sort(key=lambda entry: entry["username"])
|
||
return {
|
||
"items": items,
|
||
"roles": dict(role_counter),
|
||
"usage_totals": usage_totals,
|
||
"token_totals": token_totals,
|
||
"storage_total_bytes": storage_total_bytes,
|
||
"quarantine_total_bytes": quarantine_total_bytes,
|
||
"active_users": sum(1 for entry in items if entry["status"]["online"]),
|
||
"total_users": len(items),
|
||
}
|
||
|
||
|
||
def collect_container_snapshots(handle_map: Dict[str, Dict[str, Any]]) -> Dict[str, Any]:
|
||
items: List[Dict[str, Any]] = []
|
||
cpu_values: List[float] = []
|
||
mem_percent_values: List[float] = []
|
||
total_mem_used = 0
|
||
total_mem_limit = 0
|
||
total_net_rx = 0
|
||
total_net_tx = 0
|
||
docker_count = 0
|
||
failure_count = 0
|
||
now = time.time()
|
||
|
||
for username, handle in sorted(handle_map.items()):
|
||
try:
|
||
status = container_manager.get_container_status(username)
|
||
except Exception as exc:
|
||
status = {
|
||
"username": username,
|
||
"mode": handle.get("mode"),
|
||
"error": str(exc),
|
||
"workspace_path": handle.get("workspace_path"),
|
||
}
|
||
stats = status.get("stats") or {}
|
||
state = status.get("state") or {}
|
||
if status.get("mode") == "docker":
|
||
docker_count += 1
|
||
last_active = get_last_active_ts(username, handle.get("last_active"))
|
||
idle_seconds = max(0.0, now - last_active) if last_active else None
|
||
entry = {
|
||
"username": username,
|
||
"mode": status.get("mode", handle.get("mode")),
|
||
"workspace_path": status.get("workspace_path") or handle.get("workspace_path"),
|
||
"container_name": status.get("container_name") or handle.get("container_name"),
|
||
"created_at": iso_datetime_from_epoch(status.get("created_at") or handle.get("created_at")),
|
||
"last_active": iso_datetime_from_epoch(status.get("last_active") or last_active),
|
||
"idle_seconds": idle_seconds,
|
||
"stats": stats,
|
||
"state": state,
|
||
"error": status.get("error"),
|
||
}
|
||
if entry["error"] or (state and not state.get("running", True)):
|
||
failure_count += 1
|
||
mem_info = stats.get("memory") or {}
|
||
net_info = stats.get("net_io") or {}
|
||
cpu_val = stats.get("cpu_percent")
|
||
mem_percent = mem_info.get("percent")
|
||
mem_used = mem_info.get("used_bytes")
|
||
mem_limit = mem_info.get("limit_bytes")
|
||
rx_bytes = net_info.get("rx_bytes")
|
||
tx_bytes = net_info.get("tx_bytes")
|
||
if isinstance(cpu_val, (int, float)):
|
||
cpu_values.append(cpu_val)
|
||
if isinstance(mem_percent, (int, float)):
|
||
mem_percent_values.append(mem_percent)
|
||
if isinstance(mem_used, (int, float)):
|
||
total_mem_used += mem_used
|
||
if isinstance(mem_limit, (int, float)):
|
||
total_mem_limit += mem_limit
|
||
if isinstance(rx_bytes, (int, float)):
|
||
total_net_rx += rx_bytes
|
||
if isinstance(tx_bytes, (int, float)):
|
||
total_net_tx += tx_bytes
|
||
items.append(entry)
|
||
|
||
active_total = len(handle_map)
|
||
summary = {
|
||
"active": active_total,
|
||
"docker": docker_count,
|
||
"host": active_total - docker_count,
|
||
"issues": failure_count,
|
||
"max_containers": container_manager.max_containers,
|
||
"available_slots": max(0, container_manager.max_containers - active_total) if container_manager.max_containers > 0 else None,
|
||
"avg_cpu_percent": round(sum(cpu_values) / len(cpu_values), 2) if cpu_values else None,
|
||
"avg_mem_percent": round(sum(mem_percent_values) / len(mem_percent_values), 2) if mem_percent_values else None,
|
||
"total_mem_used_bytes": total_mem_used,
|
||
"total_mem_limit_bytes": total_mem_limit,
|
||
"net_rx_bytes": total_net_rx,
|
||
"net_tx_bytes": total_net_tx,
|
||
}
|
||
return {"items": items, "summary": summary}
|
||
|
||
|
||
def parse_upload_line(line: str) -> Optional[Dict[str, Any]]:
|
||
marker = "UPLOAD_AUDIT "
|
||
idx = line.find(marker)
|
||
if idx == -1:
|
||
return None
|
||
payload = line[idx + len(marker):].strip()
|
||
try:
|
||
data = json.loads(payload)
|
||
except json.JSONDecodeError:
|
||
return None
|
||
timestamp_value = data.get("timestamp")
|
||
timestamp_dt = None
|
||
if isinstance(timestamp_value, str):
|
||
try:
|
||
timestamp_dt = datetime.fromisoformat(timestamp_value)
|
||
except ValueError:
|
||
timestamp_dt = None
|
||
data["_dt"] = timestamp_dt
|
||
return data
|
||
|
||
|
||
def collect_upload_events(limit: int = RECENT_UPLOAD_EVENT_LIMIT) -> List[Dict[str, Any]]:
|
||
base_dir = (Path(LOGS_DIR).expanduser().resolve() / UPLOAD_SCAN_LOG_SUBDIR).resolve()
|
||
events: List[Dict[str, Any]] = []
|
||
if not base_dir.exists():
|
||
return []
|
||
for log_file in sorted(base_dir.glob('*.log')):
|
||
buffer: deque = deque(maxlen=limit)
|
||
try:
|
||
with open(log_file, 'r', encoding='utf-8') as fh:
|
||
for line in fh:
|
||
if 'UPLOAD_AUDIT' not in line:
|
||
continue
|
||
buffer.append(line.strip())
|
||
except OSError:
|
||
continue
|
||
for raw in buffer:
|
||
event = parse_upload_line(raw)
|
||
if event:
|
||
events.append(event)
|
||
events.sort(key=lambda item: item.get('_dt') or datetime.min, reverse=True)
|
||
return events[:limit]
|
||
|
||
|
||
def summarize_upload_events(events: List[Dict[str, Any]], quarantine_total_bytes: int) -> Dict[str, Any]:
|
||
now = datetime.utcnow()
|
||
cutoff = now - timedelta(hours=24)
|
||
last_24h = [evt for evt in events if evt.get('_dt') and evt['_dt'] >= cutoff]
|
||
accepted_24h = sum(1 for evt in last_24h if evt.get('accepted'))
|
||
blocked_24h = len(last_24h) - accepted_24h
|
||
skipped_24h = sum(1 for evt in last_24h if ((evt.get('scan') or {}).get('status') == 'skipped'))
|
||
source_counter = Counter((evt.get('source') or 'unknown') for evt in events)
|
||
sanitized_events: List[Dict[str, Any]] = []
|
||
for evt in events[:RECENT_UPLOAD_FEED_LIMIT]:
|
||
sanitized_events.append({k: v for k, v in evt.items() if k != '_dt'})
|
||
return {
|
||
"stats": {
|
||
"total_tracked": len(events),
|
||
"last_24h": len(last_24h),
|
||
"accepted_last_24h": accepted_24h,
|
||
"blocked_last_24h": blocked_24h,
|
||
"skipped_scan_last_24h": skipped_24h,
|
||
"quarantine_bytes": quarantine_total_bytes,
|
||
},
|
||
"recent_events": sanitized_events,
|
||
"sources": [{"source": src, "count": count} for src, count in source_counter.most_common()],
|
||
}
|
||
|
||
|
||
def summarize_invite_codes(codes: List[Dict[str, Any]]) -> Dict[str, int]:
|
||
active = consumed = unlimited = 0
|
||
for code in codes:
|
||
remaining = code.get('remaining')
|
||
if remaining is None:
|
||
unlimited += 1
|
||
elif remaining > 0:
|
||
active += 1
|
||
else:
|
||
consumed += 1
|
||
return {
|
||
"total": len(codes),
|
||
"active": active,
|
||
"consumed": consumed,
|
||
"unlimited": unlimited,
|
||
}
|
||
|
||
|
||
def build_admin_dashboard_snapshot() -> Dict[str, Any]:
|
||
handle_map = container_manager.list_containers()
|
||
user_data = collect_user_snapshots(handle_map)
|
||
container_data = collect_container_snapshots(handle_map)
|
||
invite_codes = user_manager.list_invite_codes()
|
||
upload_events = collect_upload_events()
|
||
uploads_summary = summarize_upload_events(upload_events, user_data['quarantine_total_bytes'])
|
||
overview = {
|
||
"generated_at": datetime.utcnow().replace(microsecond=0).isoformat() + "Z",
|
||
"totals": {
|
||
"users": user_data['total_users'],
|
||
"active_users": user_data['active_users'],
|
||
"containers_active": container_data['summary']['active'],
|
||
"containers_max": container_data['summary']['max_containers'],
|
||
"available_container_slots": container_data['summary']['available_slots'],
|
||
},
|
||
"roles": user_data['roles'],
|
||
"usage_totals": user_data['usage_totals'],
|
||
"token_totals": user_data['token_totals'],
|
||
"usage_leaders": {
|
||
metric: compute_usage_leaders(user_data['items'], metric)
|
||
for metric in ("fast", "thinking", "search")
|
||
},
|
||
"storage": {
|
||
"total_bytes": user_data['storage_total_bytes'],
|
||
"per_user_limit_bytes": PROJECT_MAX_STORAGE_BYTES,
|
||
"project_max_mb": PROJECT_MAX_STORAGE_MB,
|
||
"warning_users": [
|
||
{
|
||
"username": entry['username'],
|
||
"usage_percent": entry['storage']['usage_percent'],
|
||
"status": entry['storage']['status'],
|
||
}
|
||
for entry in user_data['items']
|
||
if entry['storage']['status'] != 'ok'
|
||
],
|
||
},
|
||
"containers": container_data['summary'],
|
||
"invites": summarize_invite_codes(invite_codes),
|
||
"uploads": uploads_summary['stats'],
|
||
}
|
||
return {
|
||
"generated_at": overview['generated_at'],
|
||
"overview": overview,
|
||
"users": user_data['items'],
|
||
"containers": container_data['items'],
|
||
"invites": {
|
||
"summary": summarize_invite_codes(invite_codes),
|
||
"codes": invite_codes,
|
||
},
|
||
"uploads": uploads_summary,
|
||
}
|