From a191945ed696c2e0a61d33995a474e5fd9446b24 Mon Sep 17 00:00:00 2001 From: ae Date: Mon, 11 Aug 2025 07:19:15 -0700 Subject: [PATCH] fix: token usage display and context calculation (#2117) - I had a recent conversation where the one-liner showed using 11M tokens! But looking into it 10M were cached. So I looked into it and I think we had a regression here. -> - Use blended total tokens for chat composer usage display - Compute remaining context using tokens_in_context_window helper ------ https://chatgpt.com/codex/tasks/task_i_68981a16c0a4832cbf416017390930e5 --- codex-rs/tui/src/bottom_pane/chat_composer.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/codex-rs/tui/src/bottom_pane/chat_composer.rs b/codex-rs/tui/src/bottom_pane/chat_composer.rs index 2743ada5..09ff8b7a 100644 --- a/codex-rs/tui/src/bottom_pane/chat_composer.rs +++ b/codex-rs/tui/src/bottom_pane/chat_composer.rs @@ -698,14 +698,15 @@ impl WidgetRef for &ChatComposer { let token_usage = &token_usage_info.total_token_usage; hint.push(Span::from(" ")); hint.push( - Span::from(format!("{} tokens used", token_usage.total_tokens)) + Span::from(format!("{} tokens used", token_usage.blended_total())) .style(Style::default().add_modifier(Modifier::DIM)), ); let last_token_usage = &token_usage_info.last_token_usage; if let Some(context_window) = token_usage_info.model_context_window { let percent_remaining: u8 = if context_window > 0 { let percent = 100.0 - - (last_token_usage.total_tokens as f32 / context_window as f32 + - (last_token_usage.tokens_in_context_window() as f32 + / context_window as f32 * 100.0); percent.clamp(0.0, 100.0) as u8 } else {