Move token usage/context information to session level (#3221)
Move context information into the main loop so it can be used to interrupt the loop or start auto-compaction.
This commit is contained in:
@@ -398,9 +398,15 @@ impl From<ResponseCompletedUsage> for TokenUsage {
|
||||
fn from(val: ResponseCompletedUsage) -> Self {
|
||||
TokenUsage {
|
||||
input_tokens: val.input_tokens,
|
||||
cached_input_tokens: val.input_tokens_details.map(|d| d.cached_tokens),
|
||||
cached_input_tokens: val
|
||||
.input_tokens_details
|
||||
.map(|d| d.cached_tokens)
|
||||
.unwrap_or(0),
|
||||
output_tokens: val.output_tokens,
|
||||
reasoning_output_tokens: val.output_tokens_details.map(|d| d.reasoning_tokens),
|
||||
reasoning_output_tokens: val
|
||||
.output_tokens_details
|
||||
.map(|d| d.reasoning_tokens)
|
||||
.unwrap_or(0),
|
||||
total_tokens: val.total_tokens,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -99,6 +99,7 @@ use crate::protocol::SessionConfiguredEvent;
|
||||
use crate::protocol::StreamErrorEvent;
|
||||
use crate::protocol::Submission;
|
||||
use crate::protocol::TaskCompleteEvent;
|
||||
use crate::protocol::TokenUsageInfo;
|
||||
use crate::protocol::TurnDiffEvent;
|
||||
use crate::protocol::WebSearchBeginEvent;
|
||||
use crate::rollout::RolloutRecorder;
|
||||
@@ -261,6 +262,7 @@ struct State {
|
||||
pending_approvals: HashMap<String, oneshot::Sender<ReviewDecision>>,
|
||||
pending_input: Vec<ResponseInputItem>,
|
||||
history: ConversationHistory,
|
||||
token_info: Option<TokenUsageInfo>,
|
||||
}
|
||||
|
||||
/// Context for an initialized model agent
|
||||
@@ -1767,15 +1769,23 @@ async fn try_run_turn(
|
||||
response_id: _,
|
||||
token_usage,
|
||||
} => {
|
||||
if let Some(token_usage) = token_usage {
|
||||
sess.tx_event
|
||||
.send(Event {
|
||||
id: sub_id.to_string(),
|
||||
msg: EventMsg::TokenCount(token_usage),
|
||||
})
|
||||
.await
|
||||
.ok();
|
||||
}
|
||||
let info = {
|
||||
let mut st = sess.state.lock_unchecked();
|
||||
let info = TokenUsageInfo::new_or_append(
|
||||
&st.token_info,
|
||||
&token_usage,
|
||||
turn_context.client.get_model_context_window(),
|
||||
);
|
||||
st.token_info = info.clone();
|
||||
info
|
||||
};
|
||||
sess.tx_event
|
||||
.send(Event {
|
||||
id: sub_id.to_string(),
|
||||
msg: EventMsg::TokenCount(crate::protocol::TokenCountEvent { info }),
|
||||
})
|
||||
.await
|
||||
.ok();
|
||||
|
||||
let unified_diff = turn_diff_tracker.get_unified_diff();
|
||||
if let Ok(Some(unified_diff)) = unified_diff {
|
||||
@@ -2841,13 +2851,21 @@ async fn drain_to_completed(
|
||||
response_id: _,
|
||||
token_usage,
|
||||
}) => {
|
||||
// some providers don't return token usage, so we default
|
||||
// TODO: consider approximate token usage
|
||||
let token_usage = token_usage.unwrap_or_default();
|
||||
let info = {
|
||||
let mut st = sess.state.lock_unchecked();
|
||||
let info = TokenUsageInfo::new_or_append(
|
||||
&st.token_info,
|
||||
&token_usage,
|
||||
turn_context.client.get_model_context_window(),
|
||||
);
|
||||
st.token_info = info.clone();
|
||||
info
|
||||
};
|
||||
|
||||
sess.tx_event
|
||||
.send(Event {
|
||||
id: sub_id.to_string(),
|
||||
msg: EventMsg::TokenCount(token_usage),
|
||||
msg: EventMsg::TokenCount(crate::protocol::TokenCountEvent { info }),
|
||||
})
|
||||
.await
|
||||
.ok();
|
||||
|
||||
@@ -189,8 +189,14 @@ impl EventProcessor for EventProcessorWithHumanOutput {
|
||||
}
|
||||
return CodexStatus::InitiateShutdown;
|
||||
}
|
||||
EventMsg::TokenCount(token_usage) => {
|
||||
ts_println!(self, "tokens used: {}", token_usage.blended_total());
|
||||
EventMsg::TokenCount(ev) => {
|
||||
if let Some(usage_info) = ev.info {
|
||||
ts_println!(
|
||||
self,
|
||||
"tokens used: {}",
|
||||
usage_info.total_token_usage.blended_total()
|
||||
);
|
||||
}
|
||||
}
|
||||
EventMsg::AgentMessageDelta(AgentMessageDeltaEvent { delta }) => {
|
||||
if !self.answer_started {
|
||||
|
||||
@@ -417,9 +417,9 @@ pub enum EventMsg {
|
||||
/// Agent has completed all actions
|
||||
TaskComplete(TaskCompleteEvent),
|
||||
|
||||
/// Token count event, sent periodically to report the number of tokens
|
||||
/// used in the current session.
|
||||
TokenCount(TokenUsage),
|
||||
/// Usage update for the current session, including totals and last turn.
|
||||
/// Optional means unknown — UIs should not display when `None`.
|
||||
TokenCount(TokenCountEvent),
|
||||
|
||||
/// Agent text output message
|
||||
AgentMessage(AgentMessageEvent),
|
||||
@@ -521,12 +521,54 @@ pub struct TaskStartedEvent {
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, Default)]
|
||||
pub struct TokenUsage {
|
||||
pub input_tokens: u64,
|
||||
pub cached_input_tokens: Option<u64>,
|
||||
pub cached_input_tokens: u64,
|
||||
pub output_tokens: u64,
|
||||
pub reasoning_output_tokens: Option<u64>,
|
||||
pub reasoning_output_tokens: u64,
|
||||
pub total_tokens: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
pub struct TokenUsageInfo {
|
||||
pub total_token_usage: TokenUsage,
|
||||
pub last_token_usage: TokenUsage,
|
||||
pub model_context_window: Option<u64>,
|
||||
}
|
||||
|
||||
impl TokenUsageInfo {
|
||||
pub fn new_or_append(
|
||||
info: &Option<TokenUsageInfo>,
|
||||
last: &Option<TokenUsage>,
|
||||
model_context_window: Option<u64>,
|
||||
) -> Option<Self> {
|
||||
if info.is_none() && last.is_none() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut info = match info {
|
||||
Some(info) => info.clone(),
|
||||
None => Self {
|
||||
total_token_usage: TokenUsage::default(),
|
||||
last_token_usage: TokenUsage::default(),
|
||||
model_context_window,
|
||||
},
|
||||
};
|
||||
if let Some(last) = last {
|
||||
info.append_last_usage(last);
|
||||
}
|
||||
Some(info)
|
||||
}
|
||||
|
||||
pub fn append_last_usage(&mut self, last: &TokenUsage) {
|
||||
self.total_token_usage.add_assign(last);
|
||||
self.last_token_usage = last.clone();
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
pub struct TokenCountEvent {
|
||||
pub info: Option<TokenUsageInfo>,
|
||||
}
|
||||
|
||||
// Includes prompts, tools and space to call compact.
|
||||
const BASELINE_TOKENS: u64 = 12000;
|
||||
|
||||
@@ -536,7 +578,7 @@ impl TokenUsage {
|
||||
}
|
||||
|
||||
pub fn cached_input(&self) -> u64 {
|
||||
self.cached_input_tokens.unwrap_or(0)
|
||||
self.cached_input_tokens
|
||||
}
|
||||
|
||||
pub fn non_cached_input(&self) -> u64 {
|
||||
@@ -554,7 +596,7 @@ impl TokenUsage {
|
||||
/// This will be off for the current turn and pending function calls.
|
||||
pub fn tokens_in_context_window(&self) -> u64 {
|
||||
self.total_tokens
|
||||
.saturating_sub(self.reasoning_output_tokens.unwrap_or(0))
|
||||
.saturating_sub(self.reasoning_output_tokens)
|
||||
}
|
||||
|
||||
/// Estimate the remaining user-controllable percentage of the model's context window.
|
||||
@@ -579,6 +621,15 @@ impl TokenUsage {
|
||||
let remaining = effective_window.saturating_sub(used);
|
||||
((remaining as f32 / effective_window as f32) * 100.0).clamp(0.0, 100.0) as u8
|
||||
}
|
||||
|
||||
/// In-place element-wise sum of token counts.
|
||||
pub fn add_assign(&mut self, other: &TokenUsage) {
|
||||
self.input_tokens += other.input_tokens;
|
||||
self.cached_input_tokens += other.cached_input_tokens;
|
||||
self.output_tokens += other.output_tokens;
|
||||
self.reasoning_output_tokens += other.reasoning_output_tokens;
|
||||
self.total_tokens += other.total_tokens;
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
@@ -606,10 +657,11 @@ impl fmt::Display for FinalOutput {
|
||||
String::new()
|
||||
},
|
||||
token_usage.output_tokens,
|
||||
token_usage
|
||||
.reasoning_output_tokens
|
||||
.map(|r| format!(" (reasoning {r})"))
|
||||
.unwrap_or_default()
|
||||
if token_usage.reasoning_output_tokens > 0 {
|
||||
format!(" (reasoning {})", token_usage.reasoning_output_tokens)
|
||||
} else {
|
||||
String::new()
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use codex_core::protocol::TokenUsage;
|
||||
use codex_core::protocol::TokenUsageInfo;
|
||||
use crossterm::event::KeyCode;
|
||||
use crossterm::event::KeyEvent;
|
||||
use crossterm::event::KeyEventKind;
|
||||
@@ -63,12 +63,6 @@ struct AttachedImage {
|
||||
path: PathBuf,
|
||||
}
|
||||
|
||||
struct TokenUsageInfo {
|
||||
total_token_usage: TokenUsage,
|
||||
last_token_usage: TokenUsage,
|
||||
model_context_window: Option<u64>,
|
||||
}
|
||||
|
||||
pub(crate) struct ChatComposer {
|
||||
textarea: TextArea,
|
||||
textarea_state: RefCell<TextAreaState>,
|
||||
@@ -166,17 +160,8 @@ impl ChatComposer {
|
||||
/// Update the cached *context-left* percentage and refresh the placeholder
|
||||
/// text. The UI relies on the placeholder to convey the remaining
|
||||
/// context when the composer is empty.
|
||||
pub(crate) fn set_token_usage(
|
||||
&mut self,
|
||||
total_token_usage: TokenUsage,
|
||||
last_token_usage: TokenUsage,
|
||||
model_context_window: Option<u64>,
|
||||
) {
|
||||
self.token_usage_info = Some(TokenUsageInfo {
|
||||
total_token_usage,
|
||||
last_token_usage,
|
||||
model_context_window,
|
||||
});
|
||||
pub(crate) fn set_token_usage(&mut self, token_info: Option<TokenUsageInfo>) {
|
||||
self.token_usage_info = token_info;
|
||||
}
|
||||
|
||||
/// Record the history metadata advertised by `SessionConfiguredEvent` so
|
||||
@@ -1290,11 +1275,16 @@ impl WidgetRef for ChatComposer {
|
||||
} else {
|
||||
100
|
||||
};
|
||||
let context_style = if percent_remaining < 20 {
|
||||
Style::default().fg(Color::Yellow)
|
||||
} else {
|
||||
Style::default().add_modifier(Modifier::DIM)
|
||||
};
|
||||
hint.push(" ".into());
|
||||
hint.push(
|
||||
Span::from(format!("{percent_remaining}% context left"))
|
||||
.style(Style::default().add_modifier(Modifier::DIM)),
|
||||
);
|
||||
hint.push(Span::styled(
|
||||
format!("{percent_remaining}% context left"),
|
||||
context_style,
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ use crate::app_event_sender::AppEventSender;
|
||||
use crate::tui::FrameRequester;
|
||||
use crate::user_approval_widget::ApprovalRequest;
|
||||
use bottom_pane_view::BottomPaneView;
|
||||
use codex_core::protocol::TokenUsage;
|
||||
use codex_core::protocol::TokenUsageInfo;
|
||||
use codex_file_search::FileMatch;
|
||||
use crossterm::event::KeyEvent;
|
||||
use ratatui::buffer::Buffer;
|
||||
@@ -358,14 +358,8 @@ impl BottomPane {
|
||||
|
||||
/// Update the *context-window remaining* indicator in the composer. This
|
||||
/// is forwarded directly to the underlying `ChatComposer`.
|
||||
pub(crate) fn set_token_usage(
|
||||
&mut self,
|
||||
total_token_usage: TokenUsage,
|
||||
last_token_usage: TokenUsage,
|
||||
model_context_window: Option<u64>,
|
||||
) {
|
||||
self.composer
|
||||
.set_token_usage(total_token_usage, last_token_usage, model_context_window);
|
||||
pub(crate) fn set_token_usage(&mut self, token_info: Option<TokenUsageInfo>) {
|
||||
self.composer.set_token_usage(token_info);
|
||||
self.request_redraw();
|
||||
}
|
||||
|
||||
|
||||
@@ -29,6 +29,7 @@ use codex_core::protocol::PatchApplyBeginEvent;
|
||||
use codex_core::protocol::StreamErrorEvent;
|
||||
use codex_core::protocol::TaskCompleteEvent;
|
||||
use codex_core::protocol::TokenUsage;
|
||||
use codex_core::protocol::TokenUsageInfo;
|
||||
use codex_core::protocol::TurnAbortReason;
|
||||
use codex_core::protocol::TurnDiffEvent;
|
||||
use codex_core::protocol::UserMessageEvent;
|
||||
@@ -109,8 +110,7 @@ pub(crate) struct ChatWidget {
|
||||
active_exec_cell: Option<ExecCell>,
|
||||
config: Config,
|
||||
initial_user_message: Option<UserMessage>,
|
||||
total_token_usage: TokenUsage,
|
||||
last_token_usage: TokenUsage,
|
||||
token_info: Option<TokenUsageInfo>,
|
||||
// Stream lifecycle controller
|
||||
stream: StreamController,
|
||||
running_commands: HashMap<String, RunningCommand>,
|
||||
@@ -259,16 +259,10 @@ impl ChatWidget {
|
||||
self.maybe_send_next_queued_input();
|
||||
}
|
||||
|
||||
fn on_token_count(&mut self, token_usage: TokenUsage) {
|
||||
self.total_token_usage = add_token_usage(&self.total_token_usage, &token_usage);
|
||||
self.last_token_usage = token_usage;
|
||||
self.bottom_pane.set_token_usage(
|
||||
self.total_token_usage.clone(),
|
||||
self.last_token_usage.clone(),
|
||||
self.config.model_context_window,
|
||||
);
|
||||
pub(crate) fn set_token_info(&mut self, info: Option<TokenUsageInfo>) {
|
||||
self.bottom_pane.set_token_usage(info.clone());
|
||||
self.token_info = info;
|
||||
}
|
||||
|
||||
/// Finalize any active exec as failed, push an error message into history,
|
||||
/// and stop/clear running UI state.
|
||||
fn finalize_turn_with_error_message(&mut self, message: String) {
|
||||
@@ -659,8 +653,7 @@ impl ChatWidget {
|
||||
initial_prompt.unwrap_or_default(),
|
||||
initial_images,
|
||||
),
|
||||
total_token_usage: TokenUsage::default(),
|
||||
last_token_usage: TokenUsage::default(),
|
||||
token_info: None,
|
||||
stream: StreamController::new(config),
|
||||
running_commands: HashMap::new(),
|
||||
task_complete_pending: false,
|
||||
@@ -712,8 +705,7 @@ impl ChatWidget {
|
||||
initial_prompt.unwrap_or_default(),
|
||||
initial_images,
|
||||
),
|
||||
total_token_usage: TokenUsage::default(),
|
||||
last_token_usage: TokenUsage::default(),
|
||||
token_info: None,
|
||||
stream: StreamController::new(config),
|
||||
running_commands: HashMap::new(),
|
||||
task_complete_pending: false,
|
||||
@@ -1050,7 +1042,7 @@ impl ChatWidget {
|
||||
EventMsg::AgentReasoningSectionBreak(_) => self.on_reasoning_section_break(),
|
||||
EventMsg::TaskStarted(_) => self.on_task_started(),
|
||||
EventMsg::TaskComplete(TaskCompleteEvent { .. }) => self.on_task_complete(),
|
||||
EventMsg::TokenCount(token_usage) => self.on_token_count(token_usage),
|
||||
EventMsg::TokenCount(ev) => self.set_token_info(ev.info),
|
||||
EventMsg::Error(ErrorEvent { message }) => self.on_error(message),
|
||||
EventMsg::TurnAborted(ev) => match ev.reason {
|
||||
TurnAbortReason::Interrupted => {
|
||||
@@ -1157,9 +1149,16 @@ impl ChatWidget {
|
||||
}
|
||||
|
||||
pub(crate) fn add_status_output(&mut self) {
|
||||
let default_usage;
|
||||
let usage_ref = if let Some(ti) = &self.token_info {
|
||||
&ti.total_token_usage
|
||||
} else {
|
||||
default_usage = TokenUsage::default();
|
||||
&default_usage
|
||||
};
|
||||
self.add_to_history(history_cell::new_status_output(
|
||||
&self.config,
|
||||
&self.total_token_usage,
|
||||
usage_ref,
|
||||
&self.session_id,
|
||||
));
|
||||
}
|
||||
@@ -1352,8 +1351,11 @@ impl ChatWidget {
|
||||
self.submit_user_message(text.into());
|
||||
}
|
||||
|
||||
pub(crate) fn token_usage(&self) -> &TokenUsage {
|
||||
&self.total_token_usage
|
||||
pub(crate) fn token_usage(&self) -> TokenUsage {
|
||||
self.token_info
|
||||
.as_ref()
|
||||
.map(|ti| ti.total_token_usage.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
pub(crate) fn session_id(&self) -> Option<Uuid> {
|
||||
@@ -1367,12 +1369,8 @@ impl ChatWidget {
|
||||
}
|
||||
|
||||
pub(crate) fn clear_token_usage(&mut self) {
|
||||
self.total_token_usage = TokenUsage::default();
|
||||
self.bottom_pane.set_token_usage(
|
||||
self.total_token_usage.clone(),
|
||||
self.last_token_usage.clone(),
|
||||
self.config.model_context_window,
|
||||
);
|
||||
self.token_info = None;
|
||||
self.bottom_pane.set_token_usage(None);
|
||||
}
|
||||
|
||||
pub fn cursor_pos(&self, area: Rect) -> Option<(u16, u16)> {
|
||||
@@ -1405,34 +1403,6 @@ const EXAMPLE_PROMPTS: [&str; 6] = [
|
||||
"Improve documentation in @filename",
|
||||
];
|
||||
|
||||
fn add_token_usage(current_usage: &TokenUsage, new_usage: &TokenUsage) -> TokenUsage {
|
||||
let cached_input_tokens = match (
|
||||
current_usage.cached_input_tokens,
|
||||
new_usage.cached_input_tokens,
|
||||
) {
|
||||
(Some(current), Some(new)) => Some(current + new),
|
||||
(Some(current), None) => Some(current),
|
||||
(None, Some(new)) => Some(new),
|
||||
(None, None) => None,
|
||||
};
|
||||
let reasoning_output_tokens = match (
|
||||
current_usage.reasoning_output_tokens,
|
||||
new_usage.reasoning_output_tokens,
|
||||
) {
|
||||
(Some(current), Some(new)) => Some(current + new),
|
||||
(Some(current), None) => Some(current),
|
||||
(None, Some(new)) => Some(new),
|
||||
(None, None) => None,
|
||||
};
|
||||
TokenUsage {
|
||||
input_tokens: current_usage.input_tokens + new_usage.input_tokens,
|
||||
cached_input_tokens,
|
||||
output_tokens: current_usage.output_tokens + new_usage.output_tokens,
|
||||
reasoning_output_tokens,
|
||||
total_tokens: current_usage.total_tokens + new_usage.total_tokens,
|
||||
}
|
||||
}
|
||||
|
||||
// Extract the first bold (Markdown) element in the form **...** from `s`.
|
||||
// Returns the inner text if found; otherwise `None`.
|
||||
fn extract_first_bold(s: &str) -> Option<String> {
|
||||
|
||||
@@ -221,8 +221,7 @@ fn make_chatwidget_manual() -> (
|
||||
active_exec_cell: None,
|
||||
config: cfg.clone(),
|
||||
initial_user_message: None,
|
||||
total_token_usage: TokenUsage::default(),
|
||||
last_token_usage: TokenUsage::default(),
|
||||
token_info: None,
|
||||
stream: StreamController::new(cfg),
|
||||
running_commands: HashMap::new(),
|
||||
task_complete_pending: false,
|
||||
|
||||
@@ -966,9 +966,8 @@ pub(crate) fn new_status_output(
|
||||
" • Input: ".into(),
|
||||
usage.non_cached_input().to_string().into(),
|
||||
];
|
||||
if let Some(cached) = usage.cached_input_tokens
|
||||
&& cached > 0
|
||||
{
|
||||
if usage.cached_input_tokens > 0 {
|
||||
let cached = usage.cached_input_tokens;
|
||||
input_line_spans.push(format!(" (+ {cached} cached)").into());
|
||||
}
|
||||
lines.push(Line::from(input_line_spans));
|
||||
|
||||
Reference in New Issue
Block a user