diff --git a/models.json b/models.json new file mode 100644 index 0000000..1afd5c7 --- /dev/null +++ b/models.json @@ -0,0 +1,33 @@ +{ + "tavily_api_key": "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", + "default_model": "kimi-k2.5", + "models": [ + { + "url": "https://api.moonshot.cn/v1", + "name": "kimi-k2.5", + "apikey": "sk-xW0xjfQM6Mp9ZCWMLlnHiRJcpEOIZPTkXcN0dQ15xpZSuw2y", + "modes": "快速,思考", + "multimodal": "图片,视频", + "max_output": 32000, + "max_context": 256000 + }, + { + "url": "https://dashscope.aliyuncs.com/compatible-mode/v1", + "name": "qwen3.5-plus", + "apikey": "sk-64af1343e67d46d7a902ef5bcf6817ad", + "modes": "快速,思考", + "multimodal": "图片", + "max_output": 32768, + "max_context": 256000 + }, + { + "url": "https://api.minimaxi.com/v1", + "name": "minimax-m2.5", + "apikey": "sk-api-iUGqWBvtdqbm74ch4wy__PGZE5xsWzYiHZsWS9naQCWILeRiH9SNWJmnFPbGYDEF37lNjO4GYJPYilB5Z82FmUyVXuKzwNUgk9BvJY5v-lMtRJy0CDrqWCw", + "modes": "思考", + "multimodal": "", + "max_output": 65536, + "max_context": 204800 + } + ] +} diff --git a/src/cli/commands.js b/src/cli/commands.js index b41c9b6..7581225 100644 --- a/src/cli/commands.js +++ b/src/cli/commands.js @@ -2,7 +2,7 @@ const { createConversation, loadConversation, listConversations, updateConversation } = require('../storage/conversation_store'); const { formatRelativeTime } = require('../utils/time'); -const { maskKey } = require('../config'); +const { maskKey, getModelByKey } = require('../config'); const { runSelect } = require('../ui/select_prompt'); const { runResumeMenu } = require('../ui/resume_menu'); const { buildFinalLine, formatResultLines, printResultLines } = require('../ui/tool_display'); @@ -30,6 +30,17 @@ function printNotice(message) { console.log(''); } +function applyModelState(state, config, modelKey, preferredThinking) { + const model = getModelByKey(config, modelKey); + if (!model || !model.valid) return false; + state.modelKey = model.key; + state.modelId = model.model_id || model.name || model.key; + if (model.modes === 'fast') state.thinkingMode = false; + else if (model.modes === 'thinking') state.thinkingMode = true; + else state.thinkingMode = !!preferredThinking; + return true; +} + async function handleCommand(input, ctx) { const { rl, state, config, workspace, statusBar } = ctx; const persist = () => { @@ -83,7 +94,10 @@ async function handleCommand(input, ctx) { } state.conversation = conv; state.messages = conv.messages || []; - state.thinkingMode = !!conv.metadata?.thinking_mode; + const ok = applyModelState(state, config, conv.metadata?.model_key || state.modelKey, !!conv.metadata?.thinking_mode); + if (!ok) { + printNotice('对话中的模型不可用,已保留当前模型'); + } state.allowMode = conv.metadata?.allow_mode || state.allowMode; state.tokenUsage = normalizeTokenUsage(conv.metadata?.token_usage); printNotice(`已加载对话: ${conv.id}`); @@ -133,7 +147,10 @@ async function handleCommand(input, ctx) { } state.conversation = conv; state.messages = conv.messages || []; - state.thinkingMode = !!conv.metadata?.thinking_mode; + const ok = applyModelState(state, config, conv.metadata?.model_key || state.modelKey, !!conv.metadata?.thinking_mode); + if (!ok) { + printNotice('对话中的模型不可用,已保留当前模型'); + } state.allowMode = conv.metadata?.allow_mode || state.allowMode; state.tokenUsage = normalizeTokenUsage(conv.metadata?.token_usage); printNotice(`已加载对话: ${conv.id}`); @@ -163,15 +180,39 @@ async function handleCommand(input, ctx) { } if (cmd === '/model') { - const modelChoices = [ - { name: `1. Kimi${state.modelKey === 'kimi-k2.5' ? ' (current)' : ''}`, value: 'kimi-k2.5' }, - ]; - const model = await runSelect({ rl, message: '', choices: modelChoices, pageSize: 6 }); - if (!model) return { exit: false }; - state.modelKey = model; - state.modelId = config.model_id || 'kimi-k2.5'; + const models = config.valid_models || []; + if (!models.length) { + printNotice('未找到可用模型,请先完善 models.json'); + return { exit: false }; + } + const modelChoices = models.map((model, idx) => ({ + name: `${idx + 1}. ${model.name}${state.modelKey === model.key ? ' (current)' : ''}`, + value: model.key, + })); + const modelKey = await runSelect({ rl, message: '', choices: modelChoices, pageSize: 6 }); + if (!modelKey) return { exit: false }; + const selected = getModelByKey(config, modelKey); + if (!selected || !selected.valid) { + printNotice('模型配置无效'); + return { exit: false }; + } + state.modelKey = selected.key; + state.modelId = selected.model_id || selected.name || selected.key; printNotice(`模型已切换为: ${state.modelKey}`); + if (selected.modes === 'fast') { + state.thinkingMode = false; + printNotice('思考模式: fast'); + persist(); + return { exit: false }; + } + if (selected.modes === 'thinking') { + state.thinkingMode = true; + printNotice('思考模式: thinking'); + persist(); + return { exit: false }; + } + const thinkingChoices = [ { name: `1. Fast${!state.thinkingMode ? ' (current)' : ''}`, value: 'fast' }, { name: `2. Thinking${state.thinkingMode ? ' (current)' : ''}`, value: 'thinking' }, @@ -186,24 +227,41 @@ async function handleCommand(input, ctx) { } if (cmd === '/status') { + const usage = normalizeTokenUsage(state.tokenUsage); + const model = getModelByKey(config, state.modelKey); const title = 'Status'; + const maxContext = model && model.max_context ? model.max_context : ''; + const maxOutput = model && model.max_output ? model.max_output : ''; const lines = [ `model: ${state.modelKey}`, `thinking: ${state.thinkingMode ? 'thinking' : 'fast'}`, `workspace: ${workspace}`, `allow: ${state.allowMode}`, `conversation: ${state.conversation?.id || 'none'}`, - `token usage: ${state.tokenUsage}`, + `tokens(in): ${usage.prompt}`, + `tokens(out): ${usage.completion}`, + `tokens(total): ${usage.total}`, + `max_context: ${maxContext}`, + `max_output: ${maxOutput}`, ]; renderBox({ title, lines }); return { exit: false }; } if (cmd === '/config') { + const model = getModelByKey(config, state.modelKey); console.log(''); - console.log(`base_url: ${config.base_url}`); - console.log(`modelname: ${config.model_id || 'kimi-k2.5'}`); - console.log(`apikey: ${maskKey(config.api_key)}`); + console.log(`config: ${config.path || ''}`); + if (model) { + console.log(`base_url: ${model.base_url || ''}`); + console.log(`modelname: ${model.model_id || model.name || ''}`); + console.log(`apikey: ${maskKey(model.api_key)}`); + console.log(`modes: ${model.modes || ''}`); + console.log(`multimodal: ${model.multimodal || ''}`); + console.log(`max_output: ${model.max_output || ''}`); + console.log(`max_context: ${model.max_context || ''}`); + } + console.log(`tavily_api_key: ${maskKey(config.tavily_api_key)}`); console.log(''); return { exit: false }; } diff --git a/src/cli/index.js b/src/cli/index.js index 78231ad..7d4a8a1 100644 --- a/src/cli/index.js +++ b/src/cli/index.js @@ -34,6 +34,12 @@ const IMAGE_EXTS = new Set(['.png', '.jpg', '.jpeg', '.gif', '.webp', '.bmp', '. const VIDEO_EXTS = new Set(['.mp4', '.mov', '.avi', '.mkv', '.webm', '.m4v']); const config = ensureConfig(); +if (!config.valid_models || config.valid_models.length === 0) { + console.log(''); + console.log(`未找到可用模型,请先在 ${config.path} 填写完整模型信息。`); + console.log(''); + process.exit(1); +} const state = createState(config, WORKSPACE); state.conversation = createConversation(WORKSPACE, { model_key: state.modelKey, @@ -640,7 +646,16 @@ async function runAssistantLoop() { const streamController = new AbortController(); activeStreamController = streamController; try { - for await (const chunk of streamChat({ config, messages, tools, thinkingMode: state.thinkingMode, abortSignal: streamController.signal })) { + const currentContextTokens = normalizeTokenUsage(state.tokenUsage).total || 0; + for await (const chunk of streamChat({ + config, + modelKey: state.modelKey, + messages, + tools, + thinkingMode: state.thinkingMode, + currentContextTokens, + abortSignal: streamController.signal, + })) { const choice = chunk.choices && chunk.choices[0]; if (!choice) continue; const usage = (choice && (choice.usage || choice.delta?.usage)) || chunk.usage; @@ -651,10 +666,29 @@ async function runAssistantLoop() { } const delta = choice.delta || {}; - if (delta.reasoning_content || delta.reasoning_details) { + if (delta.reasoning_content || delta.reasoning_details || choice.reasoning_details) { thinkingActive = true; showThinkingLabel = true; - const rc = delta.reasoning_content || (Array.isArray(delta.reasoning_details) ? delta.reasoning_details.map((d) => d.text || '').join('') : ''); + let rc = ''; + if (delta.reasoning_content) { + rc = delta.reasoning_content; + } else if (delta.reasoning_details) { + if (Array.isArray(delta.reasoning_details)) { + rc = delta.reasoning_details.map((d) => d.text || '').join(''); + } else if (typeof delta.reasoning_details === 'string') { + rc = delta.reasoning_details; + } else if (delta.reasoning_details && typeof delta.reasoning_details.text === 'string') { + rc = delta.reasoning_details.text; + } + } else if (choice.reasoning_details) { + if (Array.isArray(choice.reasoning_details)) { + rc = choice.reasoning_details.map((d) => d.text || '').join(''); + } else if (typeof choice.reasoning_details === 'string') { + rc = choice.reasoning_details; + } else if (choice.reasoning_details && typeof choice.reasoning_details.text === 'string') { + rc = choice.reasoning_details.text; + } + } fullThinkingBuffer += rc; thinkingBuffer = truncateThinking(fullThinkingBuffer); } diff --git a/src/config.js b/src/config.js index 0e18ef0..78538b9 100644 --- a/src/config.js +++ b/src/config.js @@ -1,61 +1,116 @@ 'use strict'; const fs = require('fs'); -const os = require('os'); const path = require('path'); -const DEFAULT_ENV_PATH = '/Users/jojo/Desktop/agents/正在修复中/agents/.env'; +const DEFAULT_CONFIG_NAME = 'models.json'; +const DEFAULT_CONFIG_PATH = path.resolve(__dirname, '..', DEFAULT_CONFIG_NAME); -function parseEnv(content) { - const out = {}; - const lines = content.split(/\r?\n/); - for (const line of lines) { - const trimmed = line.trim(); - if (!trimmed || trimmed.startsWith('#')) continue; - const idx = trimmed.indexOf('='); - if (idx === -1) continue; - const key = trimmed.slice(0, idx).trim(); - const val = trimmed.slice(idx + 1).trim(); - out[key] = val; - } - return out; +function isNonEmptyString(value) { + return typeof value === 'string' && value.trim().length > 0; } -function loadEnvFile(envPath) { - try { - if (fs.existsSync(envPath)) { - const content = fs.readFileSync(envPath, 'utf8'); - return parseEnv(content); - } - } catch (_) {} - return {}; +function parsePositiveInt(value) { + if (value === null || value === undefined || value === '') return null; + const num = Number(value); + if (!Number.isFinite(num)) return null; + const out = Math.floor(num); + return out > 0 ? out : null; } -function getDefaultConfig() { - const env = loadEnvFile(DEFAULT_ENV_PATH); - const baseUrl = env.API_BASE_KIMI || env.AGENT_API_BASE_URL || 'https://api.moonshot.cn/v1'; - const apiKey = env.API_KEY_KIMI || env.AGENT_API_KEY || 'sk-xW0xjfQM6Mp9ZCWMLlnHiRJcpEOIZPTkXcN0dQ15xpZSuw2y'; - const modelId = env.MODEL_KIMI_25 || 'kimi-k2.5'; - const tavilyKey = env.AGENT_TAVILY_API_KEY || env.TAVILY_API_KEY || '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'; +function normalizeModes(value) { + if (!value) return null; + const text = String(value).trim().toLowerCase(); + if (!text) return null; + const parts = text.split(/[,,\s]+/).filter(Boolean); + const hasFast = parts.some((p) => p.includes('fast') || p.includes('快速')); + const hasThinking = parts.some((p) => p.includes('thinking') || p.includes('思考')); + if (hasFast && hasThinking) return 'fast+thinking'; + if (hasThinking) return 'thinking'; + if (hasFast) return 'fast'; + return null; +} + +function normalizeMultimodal(value) { + if (value === null || value === undefined) return 'none'; + const text = String(value).trim().toLowerCase(); + if (!text) return 'none'; + if (text.includes('无') || text.includes('none') || text.includes('no')) return 'none'; + const parts = text.split(/[,,\s]+/).filter(Boolean); + const hasImage = parts.some((p) => p.includes('图片') || p.includes('image')); + const hasVideo = parts.some((p) => p.includes('视频') || p.includes('video')); + if (hasVideo && hasImage) return 'image+video'; + if (hasVideo) return 'image+video'; + if (hasImage) return 'image'; + return null; +} + +function normalizeModel(raw) { + const name = String(raw.name || raw.model_name || raw.model || '').trim(); + const url = String(raw.url || raw.base_url || '').trim(); + const apiKey = String(raw.apikey || raw.api_key || '').trim(); + const modes = normalizeModes(raw.modes || raw.mode || raw.supported_modes); + const multimodal = normalizeMultimodal(raw.multimodal || raw.multi_modal || raw.multi); + const maxOutput = parsePositiveInt(raw.max_output ?? raw.max_tokens ?? raw.max_output_tokens); + const maxContext = parsePositiveInt(raw.max_context ?? raw.context_window ?? raw.max_context_tokens); + const valid = Boolean( + isNonEmptyString(name) + && isNonEmptyString(url) + && isNonEmptyString(apiKey) + && modes + && multimodal + && maxOutput + && maxContext + ); return { - base_url: baseUrl, + key: name, + name, + model_id: name, + base_url: url, api_key: apiKey, - default_model_key: 'kimi-k2.5', - model_id: modelId, - tavily_api_key: tavilyKey, + modes, + multimodal, + max_output: maxOutput, + max_context: maxContext, + valid, + }; +} + +function buildConfig(raw, filePath) { + const modelsRaw = Array.isArray(raw.models) ? raw.models : []; + const models = modelsRaw.map((item) => normalizeModel(item || {})); + const modelMap = new Map(); + models.forEach((model) => { + if (model.key) modelMap.set(model.key, model); + }); + const validModels = models.filter((model) => model.valid); + const defaultModelKey = String(raw.default_model || raw.default_model_key || '').trim(); + const resolvedDefault = modelMap.get(defaultModelKey)?.valid + ? defaultModelKey + : (validModels[0] ? validModels[0].key : ''); + return { + path: filePath, + tavily_api_key: String(raw.tavily_api_key || '').trim(), + models, + valid_models: validModels, + model_map: modelMap, + default_model_key: resolvedDefault, }; } function ensureConfig() { - const dir = path.join(os.homedir(), '.easyagent'); - const file = path.join(dir, 'config.json'); - if (!fs.existsSync(dir)) fs.mkdirSync(dir, { recursive: true }); + const file = DEFAULT_CONFIG_PATH; if (!fs.existsSync(file)) { - const cfg = getDefaultConfig(); - fs.writeFileSync(file, JSON.stringify(cfg, null, 2), 'utf8'); + const template = { + tavily_api_key: '', + default_model: '', + models: [], + }; + fs.writeFileSync(file, JSON.stringify(template, null, 2), 'utf8'); } const content = fs.readFileSync(file, 'utf8'); - return JSON.parse(content); + const raw = JSON.parse(content); + return buildConfig(raw, file); } function maskKey(key) { @@ -64,4 +119,15 @@ function maskKey(key) { return `${key.slice(0, 3)}...${key.slice(-3)}`; } -module.exports = { ensureConfig, maskKey }; +function getModelByKey(config, key) { + if (!config || !key) return null; + if (config.model_map && typeof config.model_map.get === 'function') { + return config.model_map.get(key) || null; + } + if (Array.isArray(config.models)) { + return config.models.find((m) => m && m.key === key) || null; + } + return null; +} + +module.exports = { ensureConfig, maskKey, getModelByKey }; diff --git a/src/core/state.js b/src/core/state.js index 04c8ba0..dea2990 100644 --- a/src/core/state.js +++ b/src/core/state.js @@ -1,12 +1,25 @@ 'use strict'; +function resolveDefaultModel(config) { + const key = config.default_model_key || ''; + const model = config.model_map && typeof config.model_map.get === 'function' + ? config.model_map.get(key) + : null; + const modelKey = model && model.key ? model.key : key; + const modelId = model && (model.model_id || model.name) ? (model.model_id || model.name) : modelKey; + const supportsThinking = model ? (model.modes === 'thinking' || model.modes === 'fast+thinking') : false; + const thinkingMode = supportsThinking && model.modes !== 'fast'; + return { modelKey, modelId, thinkingMode }; +} + function createState(config, workspace) { + const resolved = resolveDefaultModel(config); return { workspace, allowMode: 'full_access', - modelKey: config.default_model_key || 'kimi-k2.5', - modelId: config.model_id || 'kimi-k2.5', - thinkingMode: true, + modelKey: resolved.modelKey, + modelId: resolved.modelId, + thinkingMode: resolved.thinkingMode, tokenUsage: { prompt: 0, completion: 0, total: 0 }, conversation: null, messages: [], diff --git a/src/model/client.js b/src/model/client.js index 0627661..8fbc71f 100644 --- a/src/model/client.js +++ b/src/model/client.js @@ -2,8 +2,19 @@ const { getModelProfile } = require('./model_profiles'); -async function* streamChat({ config, messages, tools, thinkingMode, abortSignal }) { - const profile = getModelProfile(config); +function computeMaxTokens(profile, currentContextTokens) { + const baseMax = Number.isFinite(profile.max_output) ? Math.max(1, Math.floor(profile.max_output)) : null; + if (!baseMax) return null; + const maxContext = Number.isFinite(profile.max_context) ? Math.max(1, Math.floor(profile.max_context)) : null; + if (!maxContext) return baseMax; + const used = Number.isFinite(currentContextTokens) ? Math.max(0, Math.floor(currentContextTokens)) : 0; + const available = maxContext - used; + if (available <= 0) return 1; + return Math.min(baseMax, available); +} + +async function* streamChat({ config, modelKey, messages, tools, thinkingMode, currentContextTokens, abortSignal }) { + const profile = getModelProfile(config, modelKey); const url = `${profile.base_url}/chat/completions`; const headers = { 'Content-Type': 'application/json', @@ -18,8 +29,12 @@ async function* streamChat({ config, messages, tools, thinkingMode, abortSignal stream: true, stream_options: { include_usage: true }, }; + payload.reasoning_split = true; + const maxTokens = computeMaxTokens(profile, currentContextTokens); + if (maxTokens) payload.max_tokens = maxTokens; - if (thinkingMode) { + const useThinking = thinkingMode && profile.supports_thinking; + if (useThinking) { Object.assign(payload, profile.thinking_params.thinking); } else { Object.assign(payload, profile.thinking_params.fast); diff --git a/src/model/model_profiles.js b/src/model/model_profiles.js index cad27aa..f37c452 100644 --- a/src/model/model_profiles.js +++ b/src/model/model_profiles.js @@ -1,25 +1,35 @@ 'use strict'; -function buildKimiProfile(config) { - const baseUrl = config.base_url; - const apiKey = config.api_key; - const modelId = config.model_id || 'kimi-k2.5'; +const { getModelByKey } = require('../config'); + +function buildProfile(model) { + const supportsThinking = model.modes === 'thinking' || model.modes === 'fast+thinking'; return { - key: 'kimi-k2.5', - name: 'Kimi-k2.5', - base_url: baseUrl, - api_key: apiKey, - model_id: modelId, - supports_thinking: true, - thinking_params: { - fast: { thinking: { type: 'disabled' } }, - thinking: { thinking: { type: 'enabled' } }, - }, + key: model.key, + name: model.name, + base_url: model.base_url, + api_key: model.api_key, + model_id: model.model_id || model.name, + modes: model.modes, + multimodal: model.multimodal, + max_output: Number.isFinite(model.max_output) ? model.max_output : null, + max_context: Number.isFinite(model.max_context) ? model.max_context : null, + supports_thinking: supportsThinking, + thinking_params: supportsThinking + ? { + fast: { thinking: { type: 'disabled' } }, + thinking: { thinking: { type: 'enabled' } }, + } + : { fast: {}, thinking: {} }, }; } -function getModelProfile(config) { - return buildKimiProfile(config); +function getModelProfile(config, modelKey) { + const model = getModelByKey(config, modelKey); + if (!model || !model.valid) { + throw new Error(`模型配置无效或不存在: ${modelKey || ''}`); + } + return buildProfile(model); } module.exports = { getModelProfile }; diff --git a/src/storage/conversation_store.js b/src/storage/conversation_store.js index a251637..c00ea4d 100644 --- a/src/storage/conversation_store.js +++ b/src/storage/conversation_store.js @@ -74,7 +74,7 @@ function saveConversation(workspace, conversation) { total_tools: countTools(conversation.messages || []), thinking_mode: conversation.metadata?.thinking_mode || false, run_mode: conversation.metadata?.thinking_mode ? 'thinking' : 'fast', - model_key: conversation.metadata?.model_key || 'kimi-k2.5', + model_key: conversation.metadata?.model_key || '', }; saveIndex(indexFile, index); } @@ -88,8 +88,8 @@ function createConversation(workspace, metadata = {}) { created_at: now, updated_at: now, metadata: { - model_key: metadata.model_key || 'kimi-k2.5', - model_id: metadata.model_id || 'kimi-k2.5', + model_key: metadata.model_key || '', + model_id: metadata.model_id || '', thinking_mode: !!metadata.thinking_mode, allow_mode: metadata.allow_mode || 'full_access', token_usage: normalizeTokenUsage(metadata.token_usage),