fix: improve usage parsing and ignore local artifacts
This commit is contained in:
parent
327932f7a1
commit
bcf83e3672
8
.gitignore
vendored
Normal file
8
.gitignore
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
.DS_Store
|
||||
node_modules/
|
||||
logs/
|
||||
.easyagent/
|
||||
*.log
|
||||
1.txt
|
||||
2.txt
|
||||
edited_records.txt
|
||||
@ -29,6 +29,7 @@ async function* streamChat({ config, modelKey, messages, tools, thinkingMode, cu
|
||||
stream: true,
|
||||
stream_options: { include_usage: true },
|
||||
};
|
||||
payload.include_usage = true;
|
||||
payload.reasoning_split = true;
|
||||
const maxTokens = computeMaxTokens(profile, currentContextTokens);
|
||||
if (maxTokens) payload.max_tokens = maxTokens;
|
||||
|
||||
@ -25,4 +25,15 @@ function applyUsage(base, usage) {
|
||||
return normalized;
|
||||
}
|
||||
|
||||
module.exports = { normalizeTokenUsage, applyUsage };
|
||||
function normalizeUsagePayload(usage) {
|
||||
if (!usage || typeof usage !== 'object') return null;
|
||||
const toNum = (value) => (Number.isFinite(Number(value)) ? Number(value) : 0);
|
||||
const prompt = toNum(usage.prompt_tokens ?? usage.input_tokens ?? usage.prompt ?? usage.input);
|
||||
const completion = toNum(usage.completion_tokens ?? usage.output_tokens ?? usage.completion ?? usage.output);
|
||||
let total = toNum(usage.total_tokens ?? usage.total);
|
||||
if (!total && (prompt || completion)) total = prompt + completion;
|
||||
if (!prompt && !completion && !total) return null;
|
||||
return { prompt_tokens: prompt, completion_tokens: completion, total_tokens: total };
|
||||
}
|
||||
|
||||
module.exports = { normalizeTokenUsage, applyUsage, normalizeUsagePayload };
|
||||
|
||||
Loading…
Reference in New Issue
Block a user