Send limits when getting rate limited (#4102)

Users need visibility on rate limits when they are rate limited.
This commit is contained in:
Ahmed Ibrahim
2025-09-23 15:56:34 -07:00
committed by GitHub
parent fdb8dadcae
commit 8227a5ba1b
8 changed files with 186 additions and 46 deletions

View File

@@ -42,7 +42,7 @@ use crate::model_provider_info::ModelProviderInfo;
use crate::model_provider_info::WireApi;
use crate::openai_model_info::get_model_info;
use crate::openai_tools::create_tools_json_for_responses_api;
use crate::protocol::RateLimitSnapshotEvent;
use crate::protocol::RateLimitSnapshot;
use crate::protocol::TokenUsage;
use crate::token_data::PlanType;
use crate::util::backoff;
@@ -330,6 +330,7 @@ impl ModelClient {
}
if status == StatusCode::TOO_MANY_REQUESTS {
let rate_limit_snapshot = parse_rate_limit_snapshot(res.headers());
let body = res.json::<ErrorResponse>().await.ok();
if let Some(ErrorResponse { error }) = body {
if error.r#type.as_deref() == Some("usage_limit_reached") {
@@ -343,6 +344,7 @@ impl ModelClient {
return Err(CodexErr::UsageLimitReached(UsageLimitReachedError {
plan_type,
resets_in_seconds,
rate_limits: rate_limit_snapshot,
}));
} else if error.r#type.as_deref() == Some("usage_not_included") {
return Err(CodexErr::UsageNotIncluded);
@@ -485,7 +487,7 @@ fn attach_item_ids(payload_json: &mut Value, original_items: &[ResponseItem]) {
}
}
fn parse_rate_limit_snapshot(headers: &HeaderMap) -> Option<RateLimitSnapshotEvent> {
fn parse_rate_limit_snapshot(headers: &HeaderMap) -> Option<RateLimitSnapshot> {
let primary_used_percent = parse_header_f64(headers, "x-codex-primary-used-percent")?;
let secondary_used_percent = parse_header_f64(headers, "x-codex-secondary-used-percent")?;
let primary_to_secondary_ratio_percent =
@@ -493,7 +495,7 @@ fn parse_rate_limit_snapshot(headers: &HeaderMap) -> Option<RateLimitSnapshotEve
let primary_window_minutes = parse_header_u64(headers, "x-codex-primary-window-minutes")?;
let secondary_window_minutes = parse_header_u64(headers, "x-codex-secondary-window-minutes")?;
Some(RateLimitSnapshotEvent {
Some(RateLimitSnapshot {
primary_used_percent,
secondary_used_percent,
primary_to_secondary_ratio_percent,

View File

@@ -1,7 +1,7 @@
use crate::error::Result;
use crate::model_family::ModelFamily;
use crate::openai_tools::OpenAiTool;
use crate::protocol::RateLimitSnapshotEvent;
use crate::protocol::RateLimitSnapshot;
use crate::protocol::TokenUsage;
use codex_apply_patch::APPLY_PATCH_TOOL_INSTRUCTIONS;
use codex_protocol::config_types::ReasoningEffort as ReasoningEffortConfig;
@@ -83,7 +83,7 @@ pub enum ResponseEvent {
WebSearchCallBegin {
call_id: String,
},
RateLimits(RateLimitSnapshotEvent),
RateLimits(RateLimitSnapshot),
}
#[derive(Debug, Serialize)]

View File

@@ -100,7 +100,7 @@ use crate::protocol::ListCustomPromptsResponseEvent;
use crate::protocol::Op;
use crate::protocol::PatchApplyBeginEvent;
use crate::protocol::PatchApplyEndEvent;
use crate::protocol::RateLimitSnapshotEvent;
use crate::protocol::RateLimitSnapshot;
use crate::protocol::ReviewDecision;
use crate::protocol::ReviewOutputEvent;
use crate::protocol::SandboxPolicy;
@@ -261,7 +261,7 @@ struct State {
pending_input: Vec<ResponseInputItem>,
history: ConversationHistory,
token_info: Option<TokenUsageInfo>,
latest_rate_limits: Option<RateLimitSnapshotEvent>,
latest_rate_limits: Option<RateLimitSnapshot>,
}
/// Context for an initialized model agent
@@ -739,31 +739,42 @@ impl Session {
async fn update_token_usage_info(
&self,
sub_id: &str,
turn_context: &TurnContext,
token_usage: Option<&TokenUsage>,
) {
let mut state = self.state.lock().await;
if let Some(token_usage) = token_usage {
let info = TokenUsageInfo::new_or_append(
&state.token_info,
&Some(token_usage.clone()),
turn_context.client.get_model_context_window(),
);
state.token_info = info;
{
let mut state = self.state.lock().await;
if let Some(token_usage) = token_usage {
let info = TokenUsageInfo::new_or_append(
&state.token_info,
&Some(token_usage.clone()),
turn_context.client.get_model_context_window(),
);
state.token_info = info;
}
}
self.send_token_count_event(sub_id).await;
}
async fn update_rate_limits(&self, new_rate_limits: RateLimitSnapshotEvent) {
let mut state = self.state.lock().await;
state.latest_rate_limits = Some(new_rate_limits);
async fn update_rate_limits(&self, sub_id: &str, new_rate_limits: RateLimitSnapshot) {
{
let mut state = self.state.lock().await;
state.latest_rate_limits = Some(new_rate_limits);
}
self.send_token_count_event(sub_id).await;
}
async fn get_token_count_event(&self) -> TokenCountEvent {
let state = self.state.lock().await;
TokenCountEvent {
info: state.token_info.clone(),
rate_limits: state.latest_rate_limits.clone(),
}
async fn send_token_count_event(&self, sub_id: &str) {
let (info, rate_limits) = {
let state = self.state.lock().await;
(state.token_info.clone(), state.latest_rate_limits.clone())
};
let event = Event {
id: sub_id.to_string(),
msg: EventMsg::TokenCount(TokenCountEvent { info, rate_limits }),
};
self.send_event(event).await;
}
/// Record a user input item to conversation history and also persist a
@@ -1957,9 +1968,14 @@ async fn run_turn(
Ok(output) => return Ok(output),
Err(CodexErr::Interrupted) => return Err(CodexErr::Interrupted),
Err(CodexErr::EnvVar(var)) => return Err(CodexErr::EnvVar(var)),
Err(e @ (CodexErr::UsageLimitReached(_) | CodexErr::UsageNotIncluded)) => {
return Err(e);
Err(CodexErr::UsageLimitReached(e)) => {
let rate_limits = e.rate_limits.clone();
if let Some(rate_limits) = rate_limits {
sess.update_rate_limits(&sub_id, rate_limits).await;
}
return Err(CodexErr::UsageLimitReached(e));
}
Err(CodexErr::UsageNotIncluded) => return Err(CodexErr::UsageNotIncluded),
Err(e) => {
// Use the configured provider-specific stream retry budget.
let max_retries = turn_context.client.get_provider().stream_max_retries();
@@ -2132,20 +2148,13 @@ async fn try_run_turn(
ResponseEvent::RateLimits(snapshot) => {
// Update internal state with latest rate limits, but defer sending until
// token usage is available to avoid duplicate TokenCount events.
sess.update_rate_limits(snapshot).await;
sess.update_rate_limits(sub_id, snapshot).await;
}
ResponseEvent::Completed {
response_id: _,
token_usage,
} => {
sess.update_token_usage_info(turn_context, token_usage.as_ref())
.await;
let token_event = sess.get_token_count_event().await;
let _ = sess
.send_event(Event {
id: sub_id.to_string(),
msg: EventMsg::TokenCount(token_event),
})
sess.update_token_usage_info(sub_id, turn_context, token_usage.as_ref())
.await;
let unified_diff = turn_diff_tracker.get_unified_diff();

View File

@@ -2,6 +2,7 @@ use crate::exec::ExecToolCallOutput;
use crate::token_data::KnownPlan;
use crate::token_data::PlanType;
use codex_protocol::mcp_protocol::ConversationId;
use codex_protocol::protocol::RateLimitSnapshot;
use reqwest::StatusCode;
use serde_json;
use std::io;
@@ -138,6 +139,7 @@ pub enum CodexErr {
pub struct UsageLimitReachedError {
pub(crate) plan_type: Option<PlanType>,
pub(crate) resets_in_seconds: Option<u64>,
pub(crate) rate_limits: Option<RateLimitSnapshot>,
}
impl std::fmt::Display for UsageLimitReachedError {
@@ -266,11 +268,22 @@ pub fn get_error_message_ui(e: &CodexErr) -> String {
mod tests {
use super::*;
fn rate_limit_snapshot() -> RateLimitSnapshot {
RateLimitSnapshot {
primary_used_percent: 0.5,
secondary_used_percent: 0.3,
primary_to_secondary_ratio_percent: 0.7,
primary_window_minutes: 60,
secondary_window_minutes: 120,
}
}
#[test]
fn usage_limit_reached_error_formats_plus_plan() {
let err = UsageLimitReachedError {
plan_type: Some(PlanType::Known(KnownPlan::Plus)),
resets_in_seconds: None,
rate_limits: Some(rate_limit_snapshot()),
};
assert_eq!(
err.to_string(),
@@ -283,6 +296,7 @@ mod tests {
let err = UsageLimitReachedError {
plan_type: Some(PlanType::Known(KnownPlan::Free)),
resets_in_seconds: Some(3600),
rate_limits: Some(rate_limit_snapshot()),
};
assert_eq!(
err.to_string(),
@@ -295,6 +309,7 @@ mod tests {
let err = UsageLimitReachedError {
plan_type: None,
resets_in_seconds: None,
rate_limits: Some(rate_limit_snapshot()),
};
assert_eq!(
err.to_string(),
@@ -307,6 +322,7 @@ mod tests {
let err = UsageLimitReachedError {
plan_type: Some(PlanType::Known(KnownPlan::Team)),
resets_in_seconds: Some(3600),
rate_limits: Some(rate_limit_snapshot()),
};
assert_eq!(
err.to_string(),
@@ -319,6 +335,7 @@ mod tests {
let err = UsageLimitReachedError {
plan_type: Some(PlanType::Known(KnownPlan::Business)),
resets_in_seconds: None,
rate_limits: Some(rate_limit_snapshot()),
};
assert_eq!(
err.to_string(),
@@ -331,6 +348,7 @@ mod tests {
let err = UsageLimitReachedError {
plan_type: Some(PlanType::Known(KnownPlan::Pro)),
resets_in_seconds: None,
rate_limits: Some(rate_limit_snapshot()),
};
assert_eq!(
err.to_string(),
@@ -343,6 +361,7 @@ mod tests {
let err = UsageLimitReachedError {
plan_type: None,
resets_in_seconds: Some(5 * 60),
rate_limits: Some(rate_limit_snapshot()),
};
assert_eq!(
err.to_string(),
@@ -355,6 +374,7 @@ mod tests {
let err = UsageLimitReachedError {
plan_type: Some(PlanType::Known(KnownPlan::Plus)),
resets_in_seconds: Some(3 * 3600 + 32 * 60),
rate_limits: Some(rate_limit_snapshot()),
};
assert_eq!(
err.to_string(),
@@ -367,6 +387,7 @@ mod tests {
let err = UsageLimitReachedError {
plan_type: None,
resets_in_seconds: Some(2 * 86_400 + 3 * 3600 + 5 * 60),
rate_limits: Some(rate_limit_snapshot()),
};
assert_eq!(
err.to_string(),
@@ -379,6 +400,7 @@ mod tests {
let err = UsageLimitReachedError {
plan_type: None,
resets_in_seconds: Some(30),
rate_limits: Some(rate_limit_snapshot()),
};
assert_eq!(
err.to_string(),