send context window with task started (#2752)

- Send context window with task started
- Accounting for changing the model per turn
This commit is contained in:
Ahmed Ibrahim
2025-08-27 00:04:21 -07:00
committed by GitHub
parent 4b6c6ce98f
commit d0e06f74e2
7 changed files with 44 additions and 10 deletions

View File

@@ -35,6 +35,7 @@ use crate::flags::CODEX_RS_SSE_FIXTURE;
use crate::model_family::ModelFamily;
use crate::model_provider_info::ModelProviderInfo;
use crate::model_provider_info::WireApi;
use crate::openai_model_info::get_model_info;
use crate::openai_tools::create_tools_json_for_responses_api;
use crate::protocol::TokenUsage;
use crate::user_agent::get_codex_user_agent;
@@ -90,6 +91,12 @@ impl ModelClient {
}
}
pub fn get_model_context_window(&self) -> Option<u64> {
self.config
.model_context_window
.or_else(|| get_model_info(&self.config.model_family).map(|info| info.context_window))
}
/// Dispatches to either the Responses or Chat implementation depending on
/// the provider config. Public callers always invoke `stream()` the
/// specialised helpers are private to avoid accidental misuse.

View File

@@ -15,6 +15,7 @@ use codex_apply_patch::MaybeApplyPatchVerified;
use codex_apply_patch::maybe_parse_apply_patch_verified;
use codex_login::AuthManager;
use codex_protocol::protocol::ConversationHistoryResponseEvent;
use codex_protocol::protocol::TaskStartedEvent;
use codex_protocol::protocol::TurnAbortReason;
use codex_protocol::protocol::TurnAbortedEvent;
use futures::prelude::*;
@@ -62,6 +63,7 @@ use crate::exec_env::create_env;
use crate::mcp_connection_manager::McpConnectionManager;
use crate::mcp_tool_call::handle_mcp_tool_call;
use crate::model_family::find_family_for_model;
use crate::openai_model_info::get_model_info;
use crate::openai_tools::ApplyPatchToolArgs;
use crate::openai_tools::ToolsConfig;
use crate::openai_tools::ToolsConfigParams;
@@ -1079,6 +1081,9 @@ async fn submission_loop(
let mut updated_config = (*config).clone();
updated_config.model = effective_model.clone();
updated_config.model_family = effective_family.clone();
if let Some(model_info) = get_model_info(&effective_family) {
updated_config.model_context_window = Some(model_info.context_window);
}
let client = ModelClient::new(
Arc::new(updated_config),
@@ -1162,6 +1167,9 @@ async fn submission_loop(
let mut per_turn_config = (*config).clone();
per_turn_config.model = model.clone();
per_turn_config.model_family = model_family.clone();
if let Some(model_info) = get_model_info(&model_family) {
per_turn_config.model_context_window = Some(model_info.context_window);
}
// Build a new client with perturn reasoning settings.
// Reuse the same provider and session id; auth defaults to env/API key.
@@ -1370,7 +1378,9 @@ async fn run_task(
}
let event = Event {
id: sub_id.clone(),
msg: EventMsg::TaskStarted,
msg: EventMsg::TaskStarted(TaskStartedEvent {
model_context_window: turn_context.client.get_model_context_window(),
}),
};
if sess.tx_event.send(event).await.is_err() {
return;
@@ -1817,9 +1827,12 @@ async fn run_compact_task(
input: Vec<InputItem>,
compact_instructions: String,
) {
let model_context_window = turn_context.client.get_model_context_window();
let start_event = Event {
id: sub_id.clone(),
msg: EventMsg::TaskStarted,
msg: EventMsg::TaskStarted(TaskStartedEvent {
model_context_window,
}),
};
if sess.tx_event.send(start_event).await.is_err() {
return;

View File

@@ -179,7 +179,7 @@ impl EventProcessor for EventProcessorWithHumanOutput {
EventMsg::StreamError(StreamErrorEvent { message }) => {
ts_println!(self, "{}", message.style(self.dimmed));
}
EventMsg::TaskStarted => {
EventMsg::TaskStarted(_) => {
// Ignore.
}
EventMsg::TaskComplete(TaskCompleteEvent { last_agent_message }) => {

View File

@@ -257,7 +257,7 @@ async fn run_codex_tool_session_inner(
}
EventMsg::AgentReasoningRawContent(_)
| EventMsg::AgentReasoningRawContentDelta(_)
| EventMsg::TaskStarted
| EventMsg::TaskStarted(_)
| EventMsg::TokenCount(_)
| EventMsg::AgentReasoning(_)
| EventMsg::AgentReasoningSectionBreak(_)

View File

@@ -401,7 +401,7 @@ pub enum EventMsg {
Error(ErrorEvent),
/// Agent has started a task
TaskStarted,
TaskStarted(TaskStartedEvent),
/// Agent has completed all actions
TaskComplete(TaskCompleteEvent),
@@ -494,6 +494,11 @@ pub struct TaskCompleteEvent {
pub last_agent_message: Option<String>,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct TaskStartedEvent {
pub model_context_window: Option<u64>,
}
#[derive(Debug, Clone, Deserialize, Serialize, Default)]
pub struct TokenUsage {
pub input_tokens: u64,

View File

@@ -938,7 +938,7 @@ impl ChatWidget {
self.on_agent_reasoning_final()
}
EventMsg::AgentReasoningSectionBreak(_) => self.on_reasoning_section_break(),
EventMsg::TaskStarted => self.on_task_started(),
EventMsg::TaskStarted(_) => self.on_task_started(),
EventMsg::TaskComplete(TaskCompleteEvent { .. }) => self.on_task_complete(),
EventMsg::TokenCount(token_usage) => self.on_token_count(token_usage),
EventMsg::Error(ErrorEvent { message }) => self.on_error(message),

View File

@@ -22,6 +22,7 @@ use codex_core::protocol::PatchApplyBeginEvent;
use codex_core::protocol::PatchApplyEndEvent;
use codex_core::protocol::StreamErrorEvent;
use codex_core::protocol::TaskCompleteEvent;
use codex_core::protocol::TaskStartedEvent;
use codex_login::CodexAuth;
use crossterm::event::KeyCode;
use crossterm::event::KeyEvent;
@@ -829,7 +830,9 @@ fn ui_snapshots_small_heights_task_running() {
// Activate status line
chat.handle_codex_event(Event {
id: "task-1".into(),
msg: EventMsg::TaskStarted,
msg: EventMsg::TaskStarted(TaskStartedEvent {
model_context_window: None,
}),
});
chat.handle_codex_event(Event {
id: "task-1".into(),
@@ -858,7 +861,9 @@ fn status_widget_and_approval_modal_snapshot() {
// Begin a running task so the status indicator would be active.
chat.handle_codex_event(Event {
id: "task-1".into(),
msg: EventMsg::TaskStarted,
msg: EventMsg::TaskStarted(TaskStartedEvent {
model_context_window: None,
}),
});
// Provide a deterministic header for the status line.
chat.handle_codex_event(Event {
@@ -898,7 +903,9 @@ fn status_widget_active_snapshot() {
// Activate the status indicator by simulating a task start.
chat.handle_codex_event(Event {
id: "task-1".into(),
msg: EventMsg::TaskStarted,
msg: EventMsg::TaskStarted(TaskStartedEvent {
model_context_window: None,
}),
});
// Provide a deterministic header via a bold reasoning chunk.
chat.handle_codex_event(Event {
@@ -1346,7 +1353,9 @@ fn multiple_agent_messages_in_single_turn_emit_multiple_headers() {
// Begin turn
chat.handle_codex_event(Event {
id: "s1".into(),
msg: EventMsg::TaskStarted,
msg: EventMsg::TaskStarted(TaskStartedEvent {
model_context_window: None,
}),
});
// First finalized assistant message