feat: add model nudge for queries (#6286)

This commit is contained in:
Ahmed Ibrahim
2025-11-05 19:42:59 -08:00
committed by GitHub
parent 229d18f4d2
commit 63e1ef25af
3 changed files with 209 additions and 0 deletions

View File

@@ -132,6 +132,8 @@ struct RunningCommand {
}
const RATE_LIMIT_WARNING_THRESHOLDS: [f64; 3] = [75.0, 90.0, 95.0];
const NUDGE_MODEL_SLUG: &str = "gpt-5-codex";
const RATE_LIMIT_SWITCH_PROMPT_THRESHOLD: f64 = 90.0;
#[derive(Default)]
struct RateLimitWarningState {
@@ -230,6 +232,14 @@ pub(crate) struct ChatWidgetInit {
pub(crate) feedback: codex_feedback::CodexFeedback,
}
#[derive(Default)]
enum RateLimitSwitchPromptState {
#[default]
Idle,
Pending,
Shown,
}
pub(crate) struct ChatWidget {
app_event_tx: AppEventSender,
codex_op_tx: UnboundedSender<Op>,
@@ -242,6 +252,7 @@ pub(crate) struct ChatWidget {
token_info: Option<TokenUsageInfo>,
rate_limit_snapshot: Option<RateLimitSnapshotDisplay>,
rate_limit_warnings: RateLimitWarningState,
rate_limit_switch_prompt: RateLimitSwitchPromptState,
// Stream lifecycle controller
stream_controller: Option<StreamController>,
running_commands: HashMap<String, RunningCommand>,
@@ -463,6 +474,8 @@ impl ChatWidget {
self.notify(Notification::AgentTurnComplete {
response: last_agent_message.unwrap_or_default(),
});
self.maybe_show_pending_rate_limit_prompt();
}
pub(crate) fn set_token_info(&mut self, info: Option<TokenUsageInfo>) {
@@ -497,6 +510,27 @@ impl ChatWidget {
.and_then(|window| window.window_minutes),
);
let high_usage = snapshot
.secondary
.as_ref()
.map(|w| w.used_percent >= RATE_LIMIT_SWITCH_PROMPT_THRESHOLD)
.unwrap_or(false)
|| snapshot
.primary
.as_ref()
.map(|w| w.used_percent >= RATE_LIMIT_SWITCH_PROMPT_THRESHOLD)
.unwrap_or(false);
if high_usage
&& self.config.model != NUDGE_MODEL_SLUG
&& !matches!(
self.rate_limit_switch_prompt,
RateLimitSwitchPromptState::Shown
)
{
self.rate_limit_switch_prompt = RateLimitSwitchPromptState::Pending;
}
let display = crate::status::rate_limit_snapshot_display(&snapshot, Local::now());
self.rate_limit_snapshot = Some(display);
@@ -518,6 +552,7 @@ impl ChatWidget {
self.bottom_pane.set_task_running(false);
self.running_commands.clear();
self.stream_controller = None;
self.maybe_show_pending_rate_limit_prompt();
}
fn on_error(&mut self, message: String) {
@@ -1001,6 +1036,7 @@ impl ChatWidget {
token_info: None,
rate_limit_snapshot: None,
rate_limit_warnings: RateLimitWarningState::default(),
rate_limit_switch_prompt: RateLimitSwitchPromptState::default(),
stream_controller: None,
running_commands: HashMap::new(),
task_complete_pending: false,
@@ -1067,6 +1103,7 @@ impl ChatWidget {
token_info: None,
rate_limit_snapshot: None,
rate_limit_warnings: RateLimitWarningState::default(),
rate_limit_switch_prompt: RateLimitSwitchPromptState::default(),
stream_controller: None,
running_commands: HashMap::new(),
task_complete_pending: false,
@@ -1666,6 +1703,85 @@ impl ChatWidget {
));
}
fn lower_cost_preset(&self) -> Option<ModelPreset> {
let auth_mode = self.auth_manager.auth().map(|auth| auth.mode);
builtin_model_presets(auth_mode)
.into_iter()
.find(|preset| preset.model == NUDGE_MODEL_SLUG)
}
fn maybe_show_pending_rate_limit_prompt(&mut self) {
if !matches!(
self.rate_limit_switch_prompt,
RateLimitSwitchPromptState::Pending
) {
return;
}
if let Some(preset) = self.lower_cost_preset() {
self.open_rate_limit_switch_prompt(preset);
self.rate_limit_switch_prompt = RateLimitSwitchPromptState::Shown;
} else {
self.rate_limit_switch_prompt = RateLimitSwitchPromptState::Idle;
}
}
fn open_rate_limit_switch_prompt(&mut self, preset: ModelPreset) {
let switch_model = preset.model.to_string();
let display_name = preset.display_name.to_string();
let default_effort: ReasoningEffortConfig = preset.default_reasoning_effort;
let switch_actions: Vec<SelectionAction> = vec![Box::new(move |tx| {
tx.send(AppEvent::CodexOp(Op::OverrideTurnContext {
cwd: None,
approval_policy: None,
sandbox_policy: None,
model: Some(switch_model.clone()),
effort: Some(Some(default_effort)),
summary: None,
}));
tx.send(AppEvent::UpdateModel(switch_model.clone()));
tx.send(AppEvent::UpdateReasoningEffort(Some(default_effort)));
})];
let keep_actions: Vec<SelectionAction> = Vec::new();
let description = if preset.description.is_empty() {
Some("Uses fewer credits for upcoming turns.".to_string())
} else {
Some(preset.description.to_string())
};
let items = vec![
SelectionItem {
name: format!("Switch to {display_name}"),
description,
selected_description: None,
is_current: false,
actions: switch_actions,
dismiss_on_select: true,
..Default::default()
},
SelectionItem {
name: "Keep current model".to_string(),
description: None,
selected_description: None,
is_current: false,
actions: keep_actions,
dismiss_on_select: true,
..Default::default()
},
];
self.bottom_pane.show_selection_view(SelectionViewParams {
title: Some("Approaching rate limits".to_string()),
subtitle: Some(format!(
"You've used over 90% of your limit. Switch to {display_name} for lower credit usage?"
)),
footer_hint: Some(standard_popup_hint_line()),
items,
..Default::default()
});
}
/// Open a popup to choose the model (stage 1). After selecting a model,
/// a second popup is shown to choose the reasoning effort.
pub(crate) fn open_model_popup(&mut self) {

View File

@@ -0,0 +1,12 @@
---
source: tui/src/chatwidget/tests.rs
assertion_line: 474
expression: popup
---
Approaching rate limits
You've used over 90% of your limit. Switch to gpt-5-codex for lower credit u
1. Switch to gpt-5-codex Optimized for coding tasks with many tools.
2. Keep current model
Press enter to confirm or esc to go back

View File

@@ -26,6 +26,7 @@ use codex_core::protocol::FileChange;
use codex_core::protocol::Op;
use codex_core::protocol::PatchApplyBeginEvent;
use codex_core::protocol::PatchApplyEndEvent;
use codex_core::protocol::RateLimitWindow;
use codex_core::protocol::ReviewCodeLocation;
use codex_core::protocol::ReviewFinding;
use codex_core::protocol::ReviewLineRange;
@@ -102,6 +103,17 @@ fn upgrade_event_payload_for_tests(mut payload: serde_json::Value) -> serde_json
payload
}
fn snapshot(percent: f64) -> RateLimitSnapshot {
RateLimitSnapshot {
primary: Some(RateLimitWindow {
used_percent: percent,
window_minutes: Some(60),
resets_at: None,
}),
secondary: None,
}
}
#[test]
fn resumed_initial_messages_render_history() {
let (mut chat, mut rx, _ops) = make_chatwidget_manual();
@@ -285,6 +297,7 @@ fn make_chatwidget_manual() -> (
token_info: None,
rate_limit_snapshot: None,
rate_limit_warnings: RateLimitWarningState::default(),
rate_limit_switch_prompt: RateLimitSwitchPromptState::default(),
stream_controller: None,
running_commands: HashMap::new(),
task_complete_pending: false,
@@ -393,6 +406,74 @@ fn test_rate_limit_warnings_monthly() {
);
}
#[test]
fn rate_limit_switch_prompt_skips_when_on_lower_cost_model() {
let (mut chat, _, _) = make_chatwidget_manual();
chat.config.model = NUDGE_MODEL_SLUG.to_string();
chat.on_rate_limit_snapshot(Some(snapshot(95.0)));
assert!(matches!(
chat.rate_limit_switch_prompt,
RateLimitSwitchPromptState::Idle
));
}
#[test]
fn rate_limit_switch_prompt_shows_once_per_session() {
let (mut chat, _, _) = make_chatwidget_manual();
chat.config.model = "gpt-5".to_string();
chat.on_rate_limit_snapshot(Some(snapshot(90.0)));
assert!(
chat.rate_limit_warnings.primary_index >= 1,
"warnings not emitted"
);
chat.maybe_show_pending_rate_limit_prompt();
assert!(matches!(
chat.rate_limit_switch_prompt,
RateLimitSwitchPromptState::Shown
));
chat.on_rate_limit_snapshot(Some(snapshot(95.0)));
assert!(matches!(
chat.rate_limit_switch_prompt,
RateLimitSwitchPromptState::Shown
));
}
#[test]
fn rate_limit_switch_prompt_defers_until_task_complete() {
let (mut chat, _, _) = make_chatwidget_manual();
chat.config.model = "gpt-5".to_string();
chat.bottom_pane.set_task_running(true);
chat.on_rate_limit_snapshot(Some(snapshot(90.0)));
assert!(matches!(
chat.rate_limit_switch_prompt,
RateLimitSwitchPromptState::Pending
));
chat.bottom_pane.set_task_running(false);
chat.maybe_show_pending_rate_limit_prompt();
assert!(matches!(
chat.rate_limit_switch_prompt,
RateLimitSwitchPromptState::Shown
));
}
#[test]
fn rate_limit_switch_prompt_popup_snapshot() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual();
chat.config.model = "gpt-5".to_string();
chat.on_rate_limit_snapshot(Some(snapshot(92.0)));
chat.maybe_show_pending_rate_limit_prompt();
let popup = render_bottom_popup(&chat, 80);
assert_snapshot!("rate_limit_switch_prompt_popup", popup);
}
// (removed experimental resize snapshot test)
#[test]