feat: add project-level model config and dynamic max_tokens

This commit is contained in:
JOJO 2026-02-28 21:50:32 +08:00
parent 1f456a732a
commit 1775b47b34
8 changed files with 311 additions and 82 deletions

33
models.json Normal file
View File

@ -0,0 +1,33 @@
{
"tavily_api_key": "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",
"default_model": "kimi-k2.5",
"models": [
{
"url": "https://api.moonshot.cn/v1",
"name": "kimi-k2.5",
"apikey": "sk-xW0xjfQM6Mp9ZCWMLlnHiRJcpEOIZPTkXcN0dQ15xpZSuw2y",
"modes": "快速,思考",
"multimodal": "图片,视频",
"max_output": 32000,
"max_context": 256000
},
{
"url": "https://dashscope.aliyuncs.com/compatible-mode/v1",
"name": "qwen3.5-plus",
"apikey": "sk-64af1343e67d46d7a902ef5bcf6817ad",
"modes": "快速,思考",
"multimodal": "图片",
"max_output": 32768,
"max_context": 256000
},
{
"url": "https://api.minimaxi.com/v1",
"name": "minimax-m2.5",
"apikey": "sk-api-iUGqWBvtdqbm74ch4wy__PGZE5xsWzYiHZsWS9naQCWILeRiH9SNWJmnFPbGYDEF37lNjO4GYJPYilB5Z82FmUyVXuKzwNUgk9BvJY5v-lMtRJy0CDrqWCw",
"modes": "思考",
"multimodal": "",
"max_output": 65536,
"max_context": 204800
}
]
}

View File

@ -2,7 +2,7 @@
const { createConversation, loadConversation, listConversations, updateConversation } = require('../storage/conversation_store'); const { createConversation, loadConversation, listConversations, updateConversation } = require('../storage/conversation_store');
const { formatRelativeTime } = require('../utils/time'); const { formatRelativeTime } = require('../utils/time');
const { maskKey } = require('../config'); const { maskKey, getModelByKey } = require('../config');
const { runSelect } = require('../ui/select_prompt'); const { runSelect } = require('../ui/select_prompt');
const { runResumeMenu } = require('../ui/resume_menu'); const { runResumeMenu } = require('../ui/resume_menu');
const { buildFinalLine, formatResultLines, printResultLines } = require('../ui/tool_display'); const { buildFinalLine, formatResultLines, printResultLines } = require('../ui/tool_display');
@ -30,6 +30,17 @@ function printNotice(message) {
console.log(''); console.log('');
} }
function applyModelState(state, config, modelKey, preferredThinking) {
const model = getModelByKey(config, modelKey);
if (!model || !model.valid) return false;
state.modelKey = model.key;
state.modelId = model.model_id || model.name || model.key;
if (model.modes === 'fast') state.thinkingMode = false;
else if (model.modes === 'thinking') state.thinkingMode = true;
else state.thinkingMode = !!preferredThinking;
return true;
}
async function handleCommand(input, ctx) { async function handleCommand(input, ctx) {
const { rl, state, config, workspace, statusBar } = ctx; const { rl, state, config, workspace, statusBar } = ctx;
const persist = () => { const persist = () => {
@ -83,7 +94,10 @@ async function handleCommand(input, ctx) {
} }
state.conversation = conv; state.conversation = conv;
state.messages = conv.messages || []; state.messages = conv.messages || [];
state.thinkingMode = !!conv.metadata?.thinking_mode; const ok = applyModelState(state, config, conv.metadata?.model_key || state.modelKey, !!conv.metadata?.thinking_mode);
if (!ok) {
printNotice('对话中的模型不可用,已保留当前模型');
}
state.allowMode = conv.metadata?.allow_mode || state.allowMode; state.allowMode = conv.metadata?.allow_mode || state.allowMode;
state.tokenUsage = normalizeTokenUsage(conv.metadata?.token_usage); state.tokenUsage = normalizeTokenUsage(conv.metadata?.token_usage);
printNotice(`已加载对话: ${conv.id}`); printNotice(`已加载对话: ${conv.id}`);
@ -133,7 +147,10 @@ async function handleCommand(input, ctx) {
} }
state.conversation = conv; state.conversation = conv;
state.messages = conv.messages || []; state.messages = conv.messages || [];
state.thinkingMode = !!conv.metadata?.thinking_mode; const ok = applyModelState(state, config, conv.metadata?.model_key || state.modelKey, !!conv.metadata?.thinking_mode);
if (!ok) {
printNotice('对话中的模型不可用,已保留当前模型');
}
state.allowMode = conv.metadata?.allow_mode || state.allowMode; state.allowMode = conv.metadata?.allow_mode || state.allowMode;
state.tokenUsage = normalizeTokenUsage(conv.metadata?.token_usage); state.tokenUsage = normalizeTokenUsage(conv.metadata?.token_usage);
printNotice(`已加载对话: ${conv.id}`); printNotice(`已加载对话: ${conv.id}`);
@ -163,15 +180,39 @@ async function handleCommand(input, ctx) {
} }
if (cmd === '/model') { if (cmd === '/model') {
const modelChoices = [ const models = config.valid_models || [];
{ name: `1. Kimi${state.modelKey === 'kimi-k2.5' ? ' (current)' : ''}`, value: 'kimi-k2.5' }, if (!models.length) {
]; printNotice('未找到可用模型,请先完善 models.json');
const model = await runSelect({ rl, message: '', choices: modelChoices, pageSize: 6 }); return { exit: false };
if (!model) return { exit: false }; }
state.modelKey = model; const modelChoices = models.map((model, idx) => ({
state.modelId = config.model_id || 'kimi-k2.5'; name: `${idx + 1}. ${model.name}${state.modelKey === model.key ? ' (current)' : ''}`,
value: model.key,
}));
const modelKey = await runSelect({ rl, message: '', choices: modelChoices, pageSize: 6 });
if (!modelKey) return { exit: false };
const selected = getModelByKey(config, modelKey);
if (!selected || !selected.valid) {
printNotice('模型配置无效');
return { exit: false };
}
state.modelKey = selected.key;
state.modelId = selected.model_id || selected.name || selected.key;
printNotice(`模型已切换为: ${state.modelKey}`); printNotice(`模型已切换为: ${state.modelKey}`);
if (selected.modes === 'fast') {
state.thinkingMode = false;
printNotice('思考模式: fast');
persist();
return { exit: false };
}
if (selected.modes === 'thinking') {
state.thinkingMode = true;
printNotice('思考模式: thinking');
persist();
return { exit: false };
}
const thinkingChoices = [ const thinkingChoices = [
{ name: `1. Fast${!state.thinkingMode ? ' (current)' : ''}`, value: 'fast' }, { name: `1. Fast${!state.thinkingMode ? ' (current)' : ''}`, value: 'fast' },
{ name: `2. Thinking${state.thinkingMode ? ' (current)' : ''}`, value: 'thinking' }, { name: `2. Thinking${state.thinkingMode ? ' (current)' : ''}`, value: 'thinking' },
@ -186,24 +227,41 @@ async function handleCommand(input, ctx) {
} }
if (cmd === '/status') { if (cmd === '/status') {
const usage = normalizeTokenUsage(state.tokenUsage);
const model = getModelByKey(config, state.modelKey);
const title = 'Status'; const title = 'Status';
const maxContext = model && model.max_context ? model.max_context : '';
const maxOutput = model && model.max_output ? model.max_output : '';
const lines = [ const lines = [
`model: ${state.modelKey}`, `model: ${state.modelKey}`,
`thinking: ${state.thinkingMode ? 'thinking' : 'fast'}`, `thinking: ${state.thinkingMode ? 'thinking' : 'fast'}`,
`workspace: ${workspace}`, `workspace: ${workspace}`,
`allow: ${state.allowMode}`, `allow: ${state.allowMode}`,
`conversation: ${state.conversation?.id || 'none'}`, `conversation: ${state.conversation?.id || 'none'}`,
`token usage: ${state.tokenUsage}`, `tokens(in): ${usage.prompt}`,
`tokens(out): ${usage.completion}`,
`tokens(total): ${usage.total}`,
`max_context: ${maxContext}`,
`max_output: ${maxOutput}`,
]; ];
renderBox({ title, lines }); renderBox({ title, lines });
return { exit: false }; return { exit: false };
} }
if (cmd === '/config') { if (cmd === '/config') {
const model = getModelByKey(config, state.modelKey);
console.log(''); console.log('');
console.log(`base_url: ${config.base_url}`); console.log(`config: ${config.path || ''}`);
console.log(`modelname: ${config.model_id || 'kimi-k2.5'}`); if (model) {
console.log(`apikey: ${maskKey(config.api_key)}`); console.log(`base_url: ${model.base_url || ''}`);
console.log(`modelname: ${model.model_id || model.name || ''}`);
console.log(`apikey: ${maskKey(model.api_key)}`);
console.log(`modes: ${model.modes || ''}`);
console.log(`multimodal: ${model.multimodal || ''}`);
console.log(`max_output: ${model.max_output || ''}`);
console.log(`max_context: ${model.max_context || ''}`);
}
console.log(`tavily_api_key: ${maskKey(config.tavily_api_key)}`);
console.log(''); console.log('');
return { exit: false }; return { exit: false };
} }

View File

@ -34,6 +34,12 @@ const IMAGE_EXTS = new Set(['.png', '.jpg', '.jpeg', '.gif', '.webp', '.bmp', '.
const VIDEO_EXTS = new Set(['.mp4', '.mov', '.avi', '.mkv', '.webm', '.m4v']); const VIDEO_EXTS = new Set(['.mp4', '.mov', '.avi', '.mkv', '.webm', '.m4v']);
const config = ensureConfig(); const config = ensureConfig();
if (!config.valid_models || config.valid_models.length === 0) {
console.log('');
console.log(`未找到可用模型,请先在 ${config.path} 填写完整模型信息。`);
console.log('');
process.exit(1);
}
const state = createState(config, WORKSPACE); const state = createState(config, WORKSPACE);
state.conversation = createConversation(WORKSPACE, { state.conversation = createConversation(WORKSPACE, {
model_key: state.modelKey, model_key: state.modelKey,
@ -640,7 +646,16 @@ async function runAssistantLoop() {
const streamController = new AbortController(); const streamController = new AbortController();
activeStreamController = streamController; activeStreamController = streamController;
try { try {
for await (const chunk of streamChat({ config, messages, tools, thinkingMode: state.thinkingMode, abortSignal: streamController.signal })) { const currentContextTokens = normalizeTokenUsage(state.tokenUsage).total || 0;
for await (const chunk of streamChat({
config,
modelKey: state.modelKey,
messages,
tools,
thinkingMode: state.thinkingMode,
currentContextTokens,
abortSignal: streamController.signal,
})) {
const choice = chunk.choices && chunk.choices[0]; const choice = chunk.choices && chunk.choices[0];
if (!choice) continue; if (!choice) continue;
const usage = (choice && (choice.usage || choice.delta?.usage)) || chunk.usage; const usage = (choice && (choice.usage || choice.delta?.usage)) || chunk.usage;
@ -651,10 +666,29 @@ async function runAssistantLoop() {
} }
const delta = choice.delta || {}; const delta = choice.delta || {};
if (delta.reasoning_content || delta.reasoning_details) { if (delta.reasoning_content || delta.reasoning_details || choice.reasoning_details) {
thinkingActive = true; thinkingActive = true;
showThinkingLabel = true; showThinkingLabel = true;
const rc = delta.reasoning_content || (Array.isArray(delta.reasoning_details) ? delta.reasoning_details.map((d) => d.text || '').join('') : ''); let rc = '';
if (delta.reasoning_content) {
rc = delta.reasoning_content;
} else if (delta.reasoning_details) {
if (Array.isArray(delta.reasoning_details)) {
rc = delta.reasoning_details.map((d) => d.text || '').join('');
} else if (typeof delta.reasoning_details === 'string') {
rc = delta.reasoning_details;
} else if (delta.reasoning_details && typeof delta.reasoning_details.text === 'string') {
rc = delta.reasoning_details.text;
}
} else if (choice.reasoning_details) {
if (Array.isArray(choice.reasoning_details)) {
rc = choice.reasoning_details.map((d) => d.text || '').join('');
} else if (typeof choice.reasoning_details === 'string') {
rc = choice.reasoning_details;
} else if (choice.reasoning_details && typeof choice.reasoning_details.text === 'string') {
rc = choice.reasoning_details.text;
}
}
fullThinkingBuffer += rc; fullThinkingBuffer += rc;
thinkingBuffer = truncateThinking(fullThinkingBuffer); thinkingBuffer = truncateThinking(fullThinkingBuffer);
} }

View File

@ -1,61 +1,116 @@
'use strict'; 'use strict';
const fs = require('fs'); const fs = require('fs');
const os = require('os');
const path = require('path'); const path = require('path');
const DEFAULT_ENV_PATH = '/Users/jojo/Desktop/agents/正在修复中/agents/.env'; const DEFAULT_CONFIG_NAME = 'models.json';
const DEFAULT_CONFIG_PATH = path.resolve(__dirname, '..', DEFAULT_CONFIG_NAME);
function parseEnv(content) { function isNonEmptyString(value) {
const out = {}; return typeof value === 'string' && value.trim().length > 0;
const lines = content.split(/\r?\n/);
for (const line of lines) {
const trimmed = line.trim();
if (!trimmed || trimmed.startsWith('#')) continue;
const idx = trimmed.indexOf('=');
if (idx === -1) continue;
const key = trimmed.slice(0, idx).trim();
const val = trimmed.slice(idx + 1).trim();
out[key] = val;
}
return out;
} }
function loadEnvFile(envPath) { function parsePositiveInt(value) {
try { if (value === null || value === undefined || value === '') return null;
if (fs.existsSync(envPath)) { const num = Number(value);
const content = fs.readFileSync(envPath, 'utf8'); if (!Number.isFinite(num)) return null;
return parseEnv(content); const out = Math.floor(num);
} return out > 0 ? out : null;
} catch (_) {}
return {};
} }
function getDefaultConfig() { function normalizeModes(value) {
const env = loadEnvFile(DEFAULT_ENV_PATH); if (!value) return null;
const baseUrl = env.API_BASE_KIMI || env.AGENT_API_BASE_URL || 'https://api.moonshot.cn/v1'; const text = String(value).trim().toLowerCase();
const apiKey = env.API_KEY_KIMI || env.AGENT_API_KEY || 'sk-xW0xjfQM6Mp9ZCWMLlnHiRJcpEOIZPTkXcN0dQ15xpZSuw2y'; if (!text) return null;
const modelId = env.MODEL_KIMI_25 || 'kimi-k2.5'; const parts = text.split(/[,\s]+/).filter(Boolean);
const tavilyKey = env.AGENT_TAVILY_API_KEY || env.TAVILY_API_KEY || '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'; const hasFast = parts.some((p) => p.includes('fast') || p.includes('快速'));
const hasThinking = parts.some((p) => p.includes('thinking') || p.includes('思考'));
if (hasFast && hasThinking) return 'fast+thinking';
if (hasThinking) return 'thinking';
if (hasFast) return 'fast';
return null;
}
function normalizeMultimodal(value) {
if (value === null || value === undefined) return 'none';
const text = String(value).trim().toLowerCase();
if (!text) return 'none';
if (text.includes('无') || text.includes('none') || text.includes('no')) return 'none';
const parts = text.split(/[,\s]+/).filter(Boolean);
const hasImage = parts.some((p) => p.includes('图片') || p.includes('image'));
const hasVideo = parts.some((p) => p.includes('视频') || p.includes('video'));
if (hasVideo && hasImage) return 'image+video';
if (hasVideo) return 'image+video';
if (hasImage) return 'image';
return null;
}
function normalizeModel(raw) {
const name = String(raw.name || raw.model_name || raw.model || '').trim();
const url = String(raw.url || raw.base_url || '').trim();
const apiKey = String(raw.apikey || raw.api_key || '').trim();
const modes = normalizeModes(raw.modes || raw.mode || raw.supported_modes);
const multimodal = normalizeMultimodal(raw.multimodal || raw.multi_modal || raw.multi);
const maxOutput = parsePositiveInt(raw.max_output ?? raw.max_tokens ?? raw.max_output_tokens);
const maxContext = parsePositiveInt(raw.max_context ?? raw.context_window ?? raw.max_context_tokens);
const valid = Boolean(
isNonEmptyString(name)
&& isNonEmptyString(url)
&& isNonEmptyString(apiKey)
&& modes
&& multimodal
&& maxOutput
&& maxContext
);
return { return {
base_url: baseUrl, key: name,
name,
model_id: name,
base_url: url,
api_key: apiKey, api_key: apiKey,
default_model_key: 'kimi-k2.5', modes,
model_id: modelId, multimodal,
tavily_api_key: tavilyKey, max_output: maxOutput,
max_context: maxContext,
valid,
};
}
function buildConfig(raw, filePath) {
const modelsRaw = Array.isArray(raw.models) ? raw.models : [];
const models = modelsRaw.map((item) => normalizeModel(item || {}));
const modelMap = new Map();
models.forEach((model) => {
if (model.key) modelMap.set(model.key, model);
});
const validModels = models.filter((model) => model.valid);
const defaultModelKey = String(raw.default_model || raw.default_model_key || '').trim();
const resolvedDefault = modelMap.get(defaultModelKey)?.valid
? defaultModelKey
: (validModels[0] ? validModels[0].key : '');
return {
path: filePath,
tavily_api_key: String(raw.tavily_api_key || '').trim(),
models,
valid_models: validModels,
model_map: modelMap,
default_model_key: resolvedDefault,
}; };
} }
function ensureConfig() { function ensureConfig() {
const dir = path.join(os.homedir(), '.easyagent'); const file = DEFAULT_CONFIG_PATH;
const file = path.join(dir, 'config.json');
if (!fs.existsSync(dir)) fs.mkdirSync(dir, { recursive: true });
if (!fs.existsSync(file)) { if (!fs.existsSync(file)) {
const cfg = getDefaultConfig(); const template = {
fs.writeFileSync(file, JSON.stringify(cfg, null, 2), 'utf8'); tavily_api_key: '',
default_model: '',
models: [],
};
fs.writeFileSync(file, JSON.stringify(template, null, 2), 'utf8');
} }
const content = fs.readFileSync(file, 'utf8'); const content = fs.readFileSync(file, 'utf8');
return JSON.parse(content); const raw = JSON.parse(content);
return buildConfig(raw, file);
} }
function maskKey(key) { function maskKey(key) {
@ -64,4 +119,15 @@ function maskKey(key) {
return `${key.slice(0, 3)}...${key.slice(-3)}`; return `${key.slice(0, 3)}...${key.slice(-3)}`;
} }
module.exports = { ensureConfig, maskKey }; function getModelByKey(config, key) {
if (!config || !key) return null;
if (config.model_map && typeof config.model_map.get === 'function') {
return config.model_map.get(key) || null;
}
if (Array.isArray(config.models)) {
return config.models.find((m) => m && m.key === key) || null;
}
return null;
}
module.exports = { ensureConfig, maskKey, getModelByKey };

View File

@ -1,12 +1,25 @@
'use strict'; 'use strict';
function resolveDefaultModel(config) {
const key = config.default_model_key || '';
const model = config.model_map && typeof config.model_map.get === 'function'
? config.model_map.get(key)
: null;
const modelKey = model && model.key ? model.key : key;
const modelId = model && (model.model_id || model.name) ? (model.model_id || model.name) : modelKey;
const supportsThinking = model ? (model.modes === 'thinking' || model.modes === 'fast+thinking') : false;
const thinkingMode = supportsThinking && model.modes !== 'fast';
return { modelKey, modelId, thinkingMode };
}
function createState(config, workspace) { function createState(config, workspace) {
const resolved = resolveDefaultModel(config);
return { return {
workspace, workspace,
allowMode: 'full_access', allowMode: 'full_access',
modelKey: config.default_model_key || 'kimi-k2.5', modelKey: resolved.modelKey,
modelId: config.model_id || 'kimi-k2.5', modelId: resolved.modelId,
thinkingMode: true, thinkingMode: resolved.thinkingMode,
tokenUsage: { prompt: 0, completion: 0, total: 0 }, tokenUsage: { prompt: 0, completion: 0, total: 0 },
conversation: null, conversation: null,
messages: [], messages: [],

View File

@ -2,8 +2,19 @@
const { getModelProfile } = require('./model_profiles'); const { getModelProfile } = require('./model_profiles');
async function* streamChat({ config, messages, tools, thinkingMode, abortSignal }) { function computeMaxTokens(profile, currentContextTokens) {
const profile = getModelProfile(config); const baseMax = Number.isFinite(profile.max_output) ? Math.max(1, Math.floor(profile.max_output)) : null;
if (!baseMax) return null;
const maxContext = Number.isFinite(profile.max_context) ? Math.max(1, Math.floor(profile.max_context)) : null;
if (!maxContext) return baseMax;
const used = Number.isFinite(currentContextTokens) ? Math.max(0, Math.floor(currentContextTokens)) : 0;
const available = maxContext - used;
if (available <= 0) return 1;
return Math.min(baseMax, available);
}
async function* streamChat({ config, modelKey, messages, tools, thinkingMode, currentContextTokens, abortSignal }) {
const profile = getModelProfile(config, modelKey);
const url = `${profile.base_url}/chat/completions`; const url = `${profile.base_url}/chat/completions`;
const headers = { const headers = {
'Content-Type': 'application/json', 'Content-Type': 'application/json',
@ -18,8 +29,12 @@ async function* streamChat({ config, messages, tools, thinkingMode, abortSignal
stream: true, stream: true,
stream_options: { include_usage: true }, stream_options: { include_usage: true },
}; };
payload.reasoning_split = true;
const maxTokens = computeMaxTokens(profile, currentContextTokens);
if (maxTokens) payload.max_tokens = maxTokens;
if (thinkingMode) { const useThinking = thinkingMode && profile.supports_thinking;
if (useThinking) {
Object.assign(payload, profile.thinking_params.thinking); Object.assign(payload, profile.thinking_params.thinking);
} else { } else {
Object.assign(payload, profile.thinking_params.fast); Object.assign(payload, profile.thinking_params.fast);

View File

@ -1,25 +1,35 @@
'use strict'; 'use strict';
function buildKimiProfile(config) { const { getModelByKey } = require('../config');
const baseUrl = config.base_url;
const apiKey = config.api_key; function buildProfile(model) {
const modelId = config.model_id || 'kimi-k2.5'; const supportsThinking = model.modes === 'thinking' || model.modes === 'fast+thinking';
return { return {
key: 'kimi-k2.5', key: model.key,
name: 'Kimi-k2.5', name: model.name,
base_url: baseUrl, base_url: model.base_url,
api_key: apiKey, api_key: model.api_key,
model_id: modelId, model_id: model.model_id || model.name,
supports_thinking: true, modes: model.modes,
thinking_params: { multimodal: model.multimodal,
fast: { thinking: { type: 'disabled' } }, max_output: Number.isFinite(model.max_output) ? model.max_output : null,
thinking: { thinking: { type: 'enabled' } }, max_context: Number.isFinite(model.max_context) ? model.max_context : null,
}, supports_thinking: supportsThinking,
thinking_params: supportsThinking
? {
fast: { thinking: { type: 'disabled' } },
thinking: { thinking: { type: 'enabled' } },
}
: { fast: {}, thinking: {} },
}; };
} }
function getModelProfile(config) { function getModelProfile(config, modelKey) {
return buildKimiProfile(config); const model = getModelByKey(config, modelKey);
if (!model || !model.valid) {
throw new Error(`模型配置无效或不存在: ${modelKey || ''}`);
}
return buildProfile(model);
} }
module.exports = { getModelProfile }; module.exports = { getModelProfile };

View File

@ -74,7 +74,7 @@ function saveConversation(workspace, conversation) {
total_tools: countTools(conversation.messages || []), total_tools: countTools(conversation.messages || []),
thinking_mode: conversation.metadata?.thinking_mode || false, thinking_mode: conversation.metadata?.thinking_mode || false,
run_mode: conversation.metadata?.thinking_mode ? 'thinking' : 'fast', run_mode: conversation.metadata?.thinking_mode ? 'thinking' : 'fast',
model_key: conversation.metadata?.model_key || 'kimi-k2.5', model_key: conversation.metadata?.model_key || '',
}; };
saveIndex(indexFile, index); saveIndex(indexFile, index);
} }
@ -88,8 +88,8 @@ function createConversation(workspace, metadata = {}) {
created_at: now, created_at: now,
updated_at: now, updated_at: now,
metadata: { metadata: {
model_key: metadata.model_key || 'kimi-k2.5', model_key: metadata.model_key || '',
model_id: metadata.model_id || 'kimi-k2.5', model_id: metadata.model_id || '',
thinking_mode: !!metadata.thinking_mode, thinking_mode: !!metadata.thinking_mode,
allow_mode: metadata.allow_mode || 'full_access', allow_mode: metadata.allow_mode || 'full_access',
token_usage: normalizeTokenUsage(metadata.token_usage), token_usage: normalizeTokenUsage(metadata.token_usage),