diff --git a/codex-rs/app-server/src/codex_message_processor.rs b/codex-rs/app-server/src/codex_message_processor.rs index ec96d5dc..e280e563 100644 --- a/codex-rs/app-server/src/codex_message_processor.rs +++ b/codex-rs/app-server/src/codex_message_processor.rs @@ -1544,7 +1544,8 @@ impl CodexMessageProcessor { async fn list_models(&self, request_id: RequestId, params: ModelListParams) { let ModelListParams { limit, cursor } = params; - let models = supported_models(); + let auth_mode = self.auth_manager.auth().map(|auth| auth.mode); + let models = supported_models(auth_mode); let total = models.len(); if total == 0 { diff --git a/codex-rs/app-server/src/models.rs b/codex-rs/app-server/src/models.rs index be47cbc2..d03795c2 100644 --- a/codex-rs/app-server/src/models.rs +++ b/codex-rs/app-server/src/models.rs @@ -1,11 +1,12 @@ +use codex_app_server_protocol::AuthMode; use codex_app_server_protocol::Model; use codex_app_server_protocol::ReasoningEffortOption; use codex_common::model_presets::ModelPreset; use codex_common::model_presets::ReasoningEffortPreset; use codex_common::model_presets::builtin_model_presets; -pub fn supported_models() -> Vec { - builtin_model_presets(None) +pub fn supported_models(auth_mode: Option) -> Vec { + builtin_model_presets(auth_mode) .into_iter() .map(model_from_preset) .collect() diff --git a/codex-rs/app-server/tests/suite/model_list.rs b/codex-rs/app-server/tests/suite/model_list.rs index b652121f..66f60777 100644 --- a/codex-rs/app-server/tests/suite/model_list.rs +++ b/codex-rs/app-server/tests/suite/model_list.rs @@ -49,7 +49,7 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> { id: "gpt-5-codex".to_string(), model: "gpt-5-codex".to_string(), display_name: "gpt-5-codex".to_string(), - description: "Optimized for coding tasks with many tools.".to_string(), + description: "Optimized for codex.".to_string(), supported_reasoning_efforts: vec![ ReasoningEffortOption { reasoning_effort: ReasoningEffort::Low, diff --git a/codex-rs/common/src/model_presets.rs b/codex-rs/common/src/model_presets.rs index b6d06438..5337f68b 100644 --- a/codex-rs/common/src/model_presets.rs +++ b/codex-rs/common/src/model_presets.rs @@ -34,7 +34,7 @@ const PRESETS: &[ModelPreset] = &[ id: "gpt-5-codex", model: "gpt-5-codex", display_name: "gpt-5-codex", - description: "Optimized for coding tasks with many tools.", + description: "Optimized for codex.", default_reasoning_effort: ReasoningEffort::Medium, supported_reasoning_efforts: &[ ReasoningEffortPreset { @@ -52,6 +52,24 @@ const PRESETS: &[ModelPreset] = &[ ], is_default: true, }, + ModelPreset { + id: "desertfox", + model: "desertfox", + display_name: "desertfox", + description: "???", + default_reasoning_effort: ReasoningEffort::Medium, + supported_reasoning_efforts: &[ + ReasoningEffortPreset { + effort: ReasoningEffort::Medium, + description: "Dynamically adjusts reasoning based on the task", + }, + ReasoningEffortPreset { + effort: ReasoningEffort::High, + description: "Maximizes reasoning depth for complex or ambiguous problems", + }, + ], + is_default: false, + }, ModelPreset { id: "gpt-5", model: "gpt-5", @@ -80,8 +98,13 @@ const PRESETS: &[ModelPreset] = &[ }, ]; -pub fn builtin_model_presets(_auth_mode: Option) -> Vec { - PRESETS.to_vec() +pub fn builtin_model_presets(auth_mode: Option) -> Vec { + let allow_desertfox = matches!(auth_mode, Some(AuthMode::ChatGPT)); + PRESETS + .iter() + .filter(|preset| allow_desertfox || preset.id != "desertfox") + .copied() + .collect() } #[cfg(test)] diff --git a/codex-rs/tui/src/chatwidget.rs b/codex-rs/tui/src/chatwidget.rs index aa043c6d..f852ae9f 100644 --- a/codex-rs/tui/src/chatwidget.rs +++ b/codex-rs/tui/src/chatwidget.rs @@ -1798,6 +1798,7 @@ impl ChatWidget { }; let is_current = preset.model == current_model; let preset_for_action = preset; + let single_supported_effort = preset_for_action.supported_reasoning_efforts.len() == 1; let actions: Vec = vec![Box::new(move |tx| { tx.send(AppEvent::OpenReasoningPopup { model: preset_for_action, @@ -1808,7 +1809,7 @@ impl ChatWidget { description, is_current, actions, - dismiss_on_select: false, + dismiss_on_select: single_supported_effort, ..Default::default() }); } @@ -1847,6 +1848,15 @@ impl ChatWidget { }); } + if choices.len() == 1 { + if let Some(effort) = choices.first().and_then(|c| c.stored) { + self.apply_model_and_effort(preset.model.to_string(), Some(effort)); + } else { + self.apply_model_and_effort(preset.model.to_string(), None); + } + return; + } + let default_choice: Option = choices .iter() .any(|choice| choice.stored == Some(default_effort)) @@ -1885,7 +1895,7 @@ impl ChatWidget { let warning = "⚠ High reasoning effort can quickly consume Plus plan rate limits."; let show_warning = - preset.model == "gpt-5-codex" && effort == ReasoningEffortConfig::High; + preset.model.starts_with("gpt-5-codex") && effort == ReasoningEffortConfig::High; let selected_description = show_warning.then(|| { description .as_ref() @@ -1942,6 +1952,32 @@ impl ChatWidget { }); } + fn apply_model_and_effort(&self, model: String, effort: Option) { + self.app_event_tx + .send(AppEvent::CodexOp(Op::OverrideTurnContext { + cwd: None, + approval_policy: None, + sandbox_policy: None, + model: Some(model.clone()), + effort: Some(effort), + summary: None, + })); + self.app_event_tx.send(AppEvent::UpdateModel(model.clone())); + self.app_event_tx + .send(AppEvent::UpdateReasoningEffort(effort)); + self.app_event_tx.send(AppEvent::PersistModelSelection { + model: model.clone(), + effort, + }); + tracing::info!( + "Selected model: {}, Selected effort: {}", + model, + effort + .map(|e| e.to_string()) + .unwrap_or_else(|| "default".to_string()) + ); + } + /// Open a popup to choose the approvals mode (ask for approval policy + sandbox policy). pub(crate) fn open_approvals_popup(&mut self) { let current_approval = self.config.approval_policy; diff --git a/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__model_selection_popup.snap b/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__model_selection_popup.snap index d4ca0491..70587b9b 100644 --- a/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__model_selection_popup.snap +++ b/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__model_selection_popup.snap @@ -5,7 +5,7 @@ expression: popup Select Model and Effort Switch the model for this and future Codex CLI sessions -› 1. gpt-5-codex (current) Optimized for coding tasks with many tools. +› 1. gpt-5-codex (current) Optimized for codex. 2. gpt-5 Broad world knowledge with strong general reasoning. diff --git a/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__rate_limit_switch_prompt_popup.snap b/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__rate_limit_switch_prompt_popup.snap index 39b07ee2..32fb3a7f 100644 --- a/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__rate_limit_switch_prompt_popup.snap +++ b/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__rate_limit_switch_prompt_popup.snap @@ -6,7 +6,7 @@ expression: popup Approaching rate limits You've used over 90% of your limit. Switch to gpt-5-codex for lower credit u -› 1. Switch to gpt-5-codex Optimized for coding tasks with many tools. +› 1. Switch to gpt-5-codex Optimized for codex. 2. Keep current model Press enter to confirm or esc to go back diff --git a/codex-rs/tui/src/chatwidget/tests.rs b/codex-rs/tui/src/chatwidget/tests.rs index 91d66acd..22cc1e92 100644 --- a/codex-rs/tui/src/chatwidget/tests.rs +++ b/codex-rs/tui/src/chatwidget/tests.rs @@ -5,6 +5,8 @@ use crate::test_backend::VT100Backend; use crate::tui::FrameRequester; use assert_matches::assert_matches; use codex_common::approval_presets::builtin_approval_presets; +use codex_common::model_presets::ModelPreset; +use codex_common::model_presets::ReasoningEffortPreset; use codex_core::AuthManager; use codex_core::CodexAuth; use codex_core::config::Config; @@ -1491,6 +1493,44 @@ fn model_reasoning_selection_popup_snapshot() { assert_snapshot!("model_reasoning_selection_popup", popup); } +#[test] +fn single_reasoning_option_skips_selection() { + let (mut chat, mut rx, _op_rx) = make_chatwidget_manual(); + + static SINGLE_EFFORT: [ReasoningEffortPreset; 1] = [ReasoningEffortPreset { + effort: ReasoningEffortConfig::High, + description: "Maximizes reasoning depth for complex or ambiguous problems", + }]; + let preset = ModelPreset { + id: "model-with-single-reasoning", + model: "model-with-single-reasoning", + display_name: "model-with-single-reasoning", + description: "", + default_reasoning_effort: ReasoningEffortConfig::High, + supported_reasoning_efforts: &SINGLE_EFFORT, + is_default: false, + }; + chat.open_reasoning_popup(preset); + + let popup = render_bottom_popup(&chat, 80); + assert!( + !popup.contains("Select Reasoning Level"), + "expected reasoning selection popup to be skipped" + ); + + let mut events = Vec::new(); + while let Ok(ev) = rx.try_recv() { + events.push(ev); + } + + assert!( + events + .iter() + .any(|ev| matches!(ev, AppEvent::UpdateReasoningEffort(Some(effort)) if *effort == ReasoningEffortConfig::High)), + "expected reasoning effort to be applied automatically; events: {events:?}" + ); +} + #[test] fn feedback_selection_popup_snapshot() { let (mut chat, _rx, _op_rx) = make_chatwidget_manual();