feat: support models with single reasoning effort (#6300)

This commit is contained in:
Thibault Sottiaux
2025-11-05 23:06:45 -08:00
committed by GitHub
parent 63e1ef25af
commit 667e841d3e
8 changed files with 112 additions and 11 deletions

View File

@@ -1544,7 +1544,8 @@ impl CodexMessageProcessor {
async fn list_models(&self, request_id: RequestId, params: ModelListParams) {
let ModelListParams { limit, cursor } = params;
let models = supported_models();
let auth_mode = self.auth_manager.auth().map(|auth| auth.mode);
let models = supported_models(auth_mode);
let total = models.len();
if total == 0 {

View File

@@ -1,11 +1,12 @@
use codex_app_server_protocol::AuthMode;
use codex_app_server_protocol::Model;
use codex_app_server_protocol::ReasoningEffortOption;
use codex_common::model_presets::ModelPreset;
use codex_common::model_presets::ReasoningEffortPreset;
use codex_common::model_presets::builtin_model_presets;
pub fn supported_models() -> Vec<Model> {
builtin_model_presets(None)
pub fn supported_models(auth_mode: Option<AuthMode>) -> Vec<Model> {
builtin_model_presets(auth_mode)
.into_iter()
.map(model_from_preset)
.collect()

View File

@@ -49,7 +49,7 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
id: "gpt-5-codex".to_string(),
model: "gpt-5-codex".to_string(),
display_name: "gpt-5-codex".to_string(),
description: "Optimized for coding tasks with many tools.".to_string(),
description: "Optimized for codex.".to_string(),
supported_reasoning_efforts: vec![
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Low,

View File

@@ -34,7 +34,7 @@ const PRESETS: &[ModelPreset] = &[
id: "gpt-5-codex",
model: "gpt-5-codex",
display_name: "gpt-5-codex",
description: "Optimized for coding tasks with many tools.",
description: "Optimized for codex.",
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: &[
ReasoningEffortPreset {
@@ -52,6 +52,24 @@ const PRESETS: &[ModelPreset] = &[
],
is_default: true,
},
ModelPreset {
id: "desertfox",
model: "desertfox",
display_name: "desertfox",
description: "???",
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: &[
ReasoningEffortPreset {
effort: ReasoningEffort::Medium,
description: "Dynamically adjusts reasoning based on the task",
},
ReasoningEffortPreset {
effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex or ambiguous problems",
},
],
is_default: false,
},
ModelPreset {
id: "gpt-5",
model: "gpt-5",
@@ -80,8 +98,13 @@ const PRESETS: &[ModelPreset] = &[
},
];
pub fn builtin_model_presets(_auth_mode: Option<AuthMode>) -> Vec<ModelPreset> {
PRESETS.to_vec()
pub fn builtin_model_presets(auth_mode: Option<AuthMode>) -> Vec<ModelPreset> {
let allow_desertfox = matches!(auth_mode, Some(AuthMode::ChatGPT));
PRESETS
.iter()
.filter(|preset| allow_desertfox || preset.id != "desertfox")
.copied()
.collect()
}
#[cfg(test)]

View File

@@ -1798,6 +1798,7 @@ impl ChatWidget {
};
let is_current = preset.model == current_model;
let preset_for_action = preset;
let single_supported_effort = preset_for_action.supported_reasoning_efforts.len() == 1;
let actions: Vec<SelectionAction> = vec![Box::new(move |tx| {
tx.send(AppEvent::OpenReasoningPopup {
model: preset_for_action,
@@ -1808,7 +1809,7 @@ impl ChatWidget {
description,
is_current,
actions,
dismiss_on_select: false,
dismiss_on_select: single_supported_effort,
..Default::default()
});
}
@@ -1847,6 +1848,15 @@ impl ChatWidget {
});
}
if choices.len() == 1 {
if let Some(effort) = choices.first().and_then(|c| c.stored) {
self.apply_model_and_effort(preset.model.to_string(), Some(effort));
} else {
self.apply_model_and_effort(preset.model.to_string(), None);
}
return;
}
let default_choice: Option<ReasoningEffortConfig> = choices
.iter()
.any(|choice| choice.stored == Some(default_effort))
@@ -1885,7 +1895,7 @@ impl ChatWidget {
let warning = "⚠ High reasoning effort can quickly consume Plus plan rate limits.";
let show_warning =
preset.model == "gpt-5-codex" && effort == ReasoningEffortConfig::High;
preset.model.starts_with("gpt-5-codex") && effort == ReasoningEffortConfig::High;
let selected_description = show_warning.then(|| {
description
.as_ref()
@@ -1942,6 +1952,32 @@ impl ChatWidget {
});
}
fn apply_model_and_effort(&self, model: String, effort: Option<ReasoningEffortConfig>) {
self.app_event_tx
.send(AppEvent::CodexOp(Op::OverrideTurnContext {
cwd: None,
approval_policy: None,
sandbox_policy: None,
model: Some(model.clone()),
effort: Some(effort),
summary: None,
}));
self.app_event_tx.send(AppEvent::UpdateModel(model.clone()));
self.app_event_tx
.send(AppEvent::UpdateReasoningEffort(effort));
self.app_event_tx.send(AppEvent::PersistModelSelection {
model: model.clone(),
effort,
});
tracing::info!(
"Selected model: {}, Selected effort: {}",
model,
effort
.map(|e| e.to_string())
.unwrap_or_else(|| "default".to_string())
);
}
/// Open a popup to choose the approvals mode (ask for approval policy + sandbox policy).
pub(crate) fn open_approvals_popup(&mut self) {
let current_approval = self.config.approval_policy;

View File

@@ -5,7 +5,7 @@ expression: popup
Select Model and Effort
Switch the model for this and future Codex CLI sessions
1. gpt-5-codex (current) Optimized for coding tasks with many tools.
1. gpt-5-codex (current) Optimized for codex.
2. gpt-5 Broad world knowledge with strong general
reasoning.

View File

@@ -6,7 +6,7 @@ expression: popup
Approaching rate limits
You've used over 90% of your limit. Switch to gpt-5-codex for lower credit u
1. Switch to gpt-5-codex Optimized for coding tasks with many tools.
1. Switch to gpt-5-codex Optimized for codex.
2. Keep current model
Press enter to confirm or esc to go back

View File

@@ -5,6 +5,8 @@ use crate::test_backend::VT100Backend;
use crate::tui::FrameRequester;
use assert_matches::assert_matches;
use codex_common::approval_presets::builtin_approval_presets;
use codex_common::model_presets::ModelPreset;
use codex_common::model_presets::ReasoningEffortPreset;
use codex_core::AuthManager;
use codex_core::CodexAuth;
use codex_core::config::Config;
@@ -1491,6 +1493,44 @@ fn model_reasoning_selection_popup_snapshot() {
assert_snapshot!("model_reasoning_selection_popup", popup);
}
#[test]
fn single_reasoning_option_skips_selection() {
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual();
static SINGLE_EFFORT: [ReasoningEffortPreset; 1] = [ReasoningEffortPreset {
effort: ReasoningEffortConfig::High,
description: "Maximizes reasoning depth for complex or ambiguous problems",
}];
let preset = ModelPreset {
id: "model-with-single-reasoning",
model: "model-with-single-reasoning",
display_name: "model-with-single-reasoning",
description: "",
default_reasoning_effort: ReasoningEffortConfig::High,
supported_reasoning_efforts: &SINGLE_EFFORT,
is_default: false,
};
chat.open_reasoning_popup(preset);
let popup = render_bottom_popup(&chat, 80);
assert!(
!popup.contains("Select Reasoning Level"),
"expected reasoning selection popup to be skipped"
);
let mut events = Vec::new();
while let Ok(ev) = rx.try_recv() {
events.push(ev);
}
assert!(
events
.iter()
.any(|ev| matches!(ev, AppEvent::UpdateReasoningEffort(Some(effort)) if *effort == ReasoningEffortConfig::High)),
"expected reasoning effort to be applied automatically; events: {events:?}"
);
}
#[test]
fn feedback_selection_popup_snapshot() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual();