Make model switcher two-stage (#4178)

https://github.com/user-attachments/assets/16d5c67c-e580-4a29-983c-a315f95424ee
This commit is contained in:
easong-openai
2025-10-02 12:38:24 -07:00
committed by GitHub
parent 45936f8fbd
commit 06e34d4607
7 changed files with 265 additions and 50 deletions

View File

@@ -20,49 +20,49 @@ const PRESETS: &[ModelPreset] = &[
ModelPreset {
id: "gpt-5-codex-low",
label: "gpt-5-codex low",
description: "",
description: "Fastest responses with limited reasoning",
model: "gpt-5-codex",
effort: Some(ReasoningEffort::Low),
},
ModelPreset {
id: "gpt-5-codex-medium",
label: "gpt-5-codex medium",
description: "",
description: "Dynamically adjusts reasoning based on the task",
model: "gpt-5-codex",
effort: Some(ReasoningEffort::Medium),
},
ModelPreset {
id: "gpt-5-codex-high",
label: "gpt-5-codex high",
description: "",
description: "Maximizes reasoning depth for complex or ambiguous problems",
model: "gpt-5-codex",
effort: Some(ReasoningEffort::High),
},
ModelPreset {
id: "gpt-5-minimal",
label: "gpt-5 minimal",
description: "— fastest responses with limited reasoning; ideal for coding, instructions, or lightweight tasks",
description: "Fastest responses with little reasoning",
model: "gpt-5",
effort: Some(ReasoningEffort::Minimal),
},
ModelPreset {
id: "gpt-5-low",
label: "gpt-5 low",
description: "— balances speed with some reasoning; useful for straightforward queries and short explanations",
description: "Balances speed with some reasoning; useful for straightforward queries and short explanations",
model: "gpt-5",
effort: Some(ReasoningEffort::Low),
},
ModelPreset {
id: "gpt-5-medium",
label: "gpt-5 medium",
description: "— default setting; provides a solid balance of reasoning depth and latency for general-purpose tasks",
description: "Provides a solid balance of reasoning depth and latency for general-purpose tasks",
model: "gpt-5",
effort: Some(ReasoningEffort::Medium),
},
ModelPreset {
id: "gpt-5-high",
label: "gpt-5 high",
description: "— maximizes reasoning depth for complex or ambiguous problems",
description: "Maximizes reasoning depth for complex or ambiguous problems",
model: "gpt-5",
effort: Some(ReasoningEffort::High),
},

View File

@@ -320,24 +320,28 @@ impl App {
self.config.model_family = family;
}
}
AppEvent::OpenReasoningPopup { model, presets } => {
self.chat_widget.open_reasoning_popup(model, presets);
}
AppEvent::PersistModelSelection { model, effort } => {
let profile = self.active_profile.as_deref();
match persist_model_selection(&self.config.codex_home, profile, &model, effort)
.await
{
Ok(()) => {
let effort_label = effort
.map(|eff| format!(" with {eff} reasoning"))
.unwrap_or_else(|| " with default reasoning".to_string());
if let Some(profile) = profile {
self.chat_widget.add_info_message(
format!("Model changed to {model}{reasoning_effort} for {profile} profile", reasoning_effort = effort.map(|e| format!(" {e}")).unwrap_or_default()),
format!(
"Model changed to {model}{effort_label} for {profile} profile"
),
None,
);
} else {
self.chat_widget.add_info_message(
format!(
"Model changed to {model}{reasoning_effort}",
reasoning_effort =
effort.map(|e| format!(" {e}")).unwrap_or_default()
),
format!("Model changed to {model}{effort_label}"),
None,
);
}

View File

@@ -1,5 +1,6 @@
use std::path::PathBuf;
use codex_common::model_presets::ModelPreset;
use codex_core::protocol::ConversationPathResponseEvent;
use codex_core::protocol::Event;
use codex_file_search::FileMatch;
@@ -60,6 +61,12 @@ pub(crate) enum AppEvent {
effort: Option<ReasoningEffort>,
},
/// Open the reasoning selection popup after picking a model.
OpenReasoningPopup {
model: String,
presets: Vec<ModelPreset>,
},
/// Update the current approval policy in the running app and widget.
UpdateAskForApprovalPolicy(AskForApproval),

View File

@@ -1,3 +1,4 @@
use std::collections::BTreeMap;
use std::collections::HashMap;
use std::collections::VecDeque;
use std::path::PathBuf;
@@ -111,6 +112,7 @@ use codex_git_tooling::GhostCommit;
use codex_git_tooling::GitToolingError;
use codex_git_tooling::create_ghost_commit;
use codex_git_tooling::restore_ghost_commit;
use strum::IntoEnumIterator;
const MAX_TRACKED_GHOST_COMMITS: usize = 20;
@@ -283,6 +285,16 @@ fn create_initial_user_message(text: String, image_paths: Vec<PathBuf>) -> Optio
}
impl ChatWidget {
fn model_description_for(slug: &str) -> Option<&'static str> {
if slug.starts_with("gpt-5-codex") {
Some("Optimized for coding tasks with many tools.")
} else if slug.starts_with("gpt-5") {
Some("Broad world knowledge with strong general reasoning.")
} else {
None
}
}
fn flush_answer_stream_with_separator(&mut self) {
if let Some(mut controller) = self.stream_controller.take()
&& let Some(cell) = controller.finalize()
@@ -1579,47 +1591,38 @@ impl ChatWidget {
));
}
/// Open a popup to choose the model preset (model + reasoning effort).
/// Open a popup to choose the model (stage 1). After selecting a model,
/// a second popup is shown to choose the reasoning effort.
pub(crate) fn open_model_popup(&mut self) {
let current_model = self.config.model.clone();
let current_effort = self.config.model_reasoning_effort;
let auth_mode = self.auth_manager.auth().map(|auth| auth.mode);
let presets: Vec<ModelPreset> = builtin_model_presets(auth_mode);
let mut grouped: BTreeMap<&str, Vec<ModelPreset>> = BTreeMap::new();
for preset in presets.into_iter() {
grouped.entry(preset.model).or_default().push(preset);
}
let mut items: Vec<SelectionItem> = Vec::new();
for preset in presets.iter() {
let name = preset.label.to_string();
let description = Some(preset.description.to_string());
let is_current = preset.model == current_model && preset.effort == current_effort;
let model_slug = preset.model.to_string();
let effort = preset.effort;
let current_model = current_model.clone();
for (model_slug, entries) in grouped.into_iter() {
let name = model_slug.to_string();
let description = Self::model_description_for(model_slug)
.map(std::string::ToString::to_string)
.or_else(|| {
entries
.iter()
.find(|preset| !preset.description.is_empty())
.map(|preset| preset.description.to_string())
})
.or_else(|| entries.first().map(|preset| preset.description.to_string()));
let is_current = model_slug == current_model;
let model_slug_string = model_slug.to_string();
let presets_for_model = entries.clone();
let actions: Vec<SelectionAction> = vec![Box::new(move |tx| {
tx.send(AppEvent::CodexOp(Op::OverrideTurnContext {
cwd: None,
approval_policy: None,
sandbox_policy: None,
model: Some(model_slug.clone()),
effort: Some(effort),
summary: None,
}));
tx.send(AppEvent::UpdateModel(model_slug.clone()));
tx.send(AppEvent::UpdateReasoningEffort(effort));
tx.send(AppEvent::PersistModelSelection {
model: model_slug.clone(),
effort,
tx.send(AppEvent::OpenReasoningPopup {
model: model_slug_string.clone(),
presets: presets_for_model.clone(),
});
tracing::info!(
"New model: {}, New effort: {}, Current model: {}, Current effort: {}",
model_slug.clone(),
effort
.map(|effort| effort.to_string())
.unwrap_or_else(|| "none".to_string()),
current_model,
current_effort
.map(|effort| effort.to_string())
.unwrap_or_else(|| "none".to_string())
);
})];
items.push(SelectionItem {
name,
@@ -1632,10 +1635,127 @@ impl ChatWidget {
}
self.bottom_pane.show_selection_view(SelectionViewParams {
title: Some("Select model and reasoning level".to_string()),
subtitle: Some(
"Switch between OpenAI models for this and future Codex CLI session".to_string(),
),
title: Some("Select Model".to_string()),
subtitle: Some("Switch the model for this and future Codex CLI sessions".to_string()),
footer_hint: Some(standard_popup_hint_line()),
items,
..Default::default()
});
}
/// Open a popup to choose the reasoning effort (stage 2) for the given model.
pub(crate) fn open_reasoning_popup(&mut self, model_slug: String, presets: Vec<ModelPreset>) {
let default_effort = ReasoningEffortConfig::default();
let has_none_choice = presets.iter().any(|preset| preset.effort.is_none());
struct EffortChoice {
stored: Option<ReasoningEffortConfig>,
display: ReasoningEffortConfig,
}
let mut choices: Vec<EffortChoice> = Vec::new();
for effort in ReasoningEffortConfig::iter() {
if presets.iter().any(|preset| preset.effort == Some(effort)) {
choices.push(EffortChoice {
stored: Some(effort),
display: effort,
});
}
if has_none_choice && default_effort == effort {
choices.push(EffortChoice {
stored: None,
display: effort,
});
}
}
if choices.is_empty() {
choices.push(EffortChoice {
stored: Some(default_effort),
display: default_effort,
});
}
let default_choice: Option<ReasoningEffortConfig> = if has_none_choice {
None
} else if choices
.iter()
.any(|choice| choice.stored == Some(default_effort))
{
Some(default_effort)
} else {
choices
.iter()
.find_map(|choice| choice.stored)
.or(Some(default_effort))
};
let is_current_model = self.config.model == model_slug;
let highlight_choice = if is_current_model {
self.config.model_reasoning_effort
} else {
default_choice
};
let mut items: Vec<SelectionItem> = Vec::new();
for choice in choices.iter() {
let effort = choice.display;
let mut effort_label = effort.to_string();
if let Some(first) = effort_label.get_mut(0..1) {
first.make_ascii_uppercase();
}
if choice.stored == default_choice {
effort_label.push_str(" (default)");
}
let description = presets
.iter()
.find(|preset| preset.effort == choice.stored && !preset.description.is_empty())
.map(|preset| preset.description.to_string())
.or_else(|| {
presets
.iter()
.find(|preset| preset.effort == choice.stored)
.map(|preset| preset.description.to_string())
});
let model_for_action = model_slug.clone();
let effort_for_action = choice.stored;
let actions: Vec<SelectionAction> = vec![Box::new(move |tx| {
tx.send(AppEvent::CodexOp(Op::OverrideTurnContext {
cwd: None,
approval_policy: None,
sandbox_policy: None,
model: Some(model_for_action.clone()),
effort: Some(effort_for_action),
summary: None,
}));
tx.send(AppEvent::UpdateModel(model_for_action.clone()));
tx.send(AppEvent::UpdateReasoningEffort(effort_for_action));
tx.send(AppEvent::PersistModelSelection {
model: model_for_action.clone(),
effort: effort_for_action,
});
tracing::info!(
"Selected model: {}, Selected effort: {}",
model_for_action,
effort_for_action
.map(|e| e.to_string())
.unwrap_or_else(|| "default".to_string())
);
})];
items.push(SelectionItem {
name: effort_label,
description,
is_current: is_current_model && choice.stored == highlight_choice,
actions,
dismiss_on_select: true,
search_value: None,
});
}
self.bottom_pane.show_selection_view(SelectionViewParams {
title: Some("Select Reasoning Level".to_string()),
subtitle: Some(format!("Reasoning for model {model_slug}")),
footer_hint: Some(standard_popup_hint_line()),
items,
..Default::default()

View File

@@ -0,0 +1,13 @@
---
source: tui/src/chatwidget/tests.rs
expression: popup
---
Select Reasoning Level
Reasoning for model gpt-5-codex
1. Low Fastest responses with limited reasoning
2. Medium (default) Dynamically adjusts reasoning based on the task
3. High (current) Maximizes reasoning depth for complex or ambiguous
problems
Press enter to confirm or esc to go back

View File

@@ -0,0 +1,12 @@
---
source: tui/src/chatwidget/tests.rs
expression: popup
---
Select Model
Switch the model for this and future Codex CLI sessions
1. gpt-5 Broad world knowledge with strong general
reasoning.
2. gpt-5-codex (current) Optimized for coding tasks with many tools.
Press enter to confirm or esc to go back

View File

@@ -936,6 +936,65 @@ fn render_bottom_first_row(chat: &ChatWidget, width: u16) -> String {
String::new()
}
fn render_bottom_popup(chat: &ChatWidget, width: u16) -> String {
let height = chat.desired_height(width);
let area = Rect::new(0, 0, width, height);
let mut buf = Buffer::empty(area);
(chat).render_ref(area, &mut buf);
let mut lines: Vec<String> = (0..area.height)
.map(|row| {
let mut line = String::new();
for col in 0..area.width {
let symbol = buf[(area.x + col, area.y + row)].symbol();
if symbol.is_empty() {
line.push(' ');
} else {
line.push_str(symbol);
}
}
line.trim_end().to_string()
})
.collect();
while lines.first().is_some_and(|line| line.trim().is_empty()) {
lines.remove(0);
}
while lines.last().is_some_and(|line| line.trim().is_empty()) {
lines.pop();
}
lines.join("\n")
}
#[test]
fn model_selection_popup_snapshot() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual();
chat.config.model = "gpt-5-codex".to_string();
chat.open_model_popup();
let popup = render_bottom_popup(&chat, 80);
assert_snapshot!("model_selection_popup", popup);
}
#[test]
fn model_reasoning_selection_popup_snapshot() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual();
chat.config.model = "gpt-5-codex".to_string();
chat.config.model_reasoning_effort = Some(ReasoningEffortConfig::High);
let presets = builtin_model_presets(None)
.into_iter()
.filter(|preset| preset.model == "gpt-5-codex")
.collect::<Vec<_>>();
chat.open_reasoning_popup("gpt-5-codex".to_string(), presets);
let popup = render_bottom_popup(&chat, 80);
assert_snapshot!("model_reasoning_selection_popup", popup);
}
#[test]
fn exec_history_extends_previous_when_consecutive() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual();