diff --git a/codex-rs/common/src/model_presets.rs b/codex-rs/common/src/model_presets.rs index cf536119..8606fcaf 100644 --- a/codex-rs/common/src/model_presets.rs +++ b/codex-rs/common/src/model_presets.rs @@ -50,10 +50,24 @@ pub fn builtin_model_presets() -> &'static [ModelPreset] { effort: Some(ReasoningEffort::High), }, ModelPreset { - id: "gpt-5-high-new", - label: "gpt-5 high new", - description: "— our latest release tuned to rely on the model's built-in reasoning defaults", - model: "gpt-5-high-new", + id: "swiftfox-low", + label: "swiftfox low", + description: "— balances speed with some reasoning; useful for straightforward queries and short explanations", + model: "swiftfox-low", + effort: None, + }, + ModelPreset { + id: "swiftfox-medium", + label: "swiftfox medium", + description: "— default setting; provides a solid balance of reasoning depth and latency for general-purpose tasks", + model: "swiftfox-medium", + effort: None, + }, + ModelPreset { + id: "swiftfox-high", + label: "swiftfox high", + description: "", + model: "swiftfox-high", effort: None, }, ]; diff --git a/codex-rs/core/src/config.rs b/codex-rs/core/src/config.rs index 53e246d2..4e010ae7 100644 --- a/codex-rs/core/src/config.rs +++ b/codex-rs/core/src/config.rs @@ -33,7 +33,7 @@ use toml_edit::DocumentMut; const OPENAI_DEFAULT_MODEL: &str = "gpt-5"; const OPENAI_DEFAULT_REVIEW_MODEL: &str = "gpt-5"; -pub const GPT5_HIGH_MODEL: &str = "gpt-5-high-new"; +pub const SWIFTFOX_MEDIUM_MODEL: &str = "swiftfox-medium"; /// Maximum number of bytes of the documentation that will be embedded. Larger /// files are *silently truncated* to this size so we do not take up too much of @@ -1184,7 +1184,7 @@ exclude_slash_tmp = true persist_model_selection( codex_home.path(), None, - "gpt-5-high-new", + "swiftfox-high", Some(ReasoningEffort::High), ) .await?; @@ -1193,7 +1193,7 @@ exclude_slash_tmp = true tokio::fs::read_to_string(codex_home.path().join(CONFIG_TOML_FILE)).await?; let parsed: ConfigToml = toml::from_str(&serialized)?; - assert_eq!(parsed.model.as_deref(), Some("gpt-5-high-new")); + assert_eq!(parsed.model.as_deref(), Some("swiftfox-high")); assert_eq!(parsed.model_reasoning_effort, Some(ReasoningEffort::High)); Ok(()) @@ -1247,8 +1247,8 @@ model = "gpt-4.1" persist_model_selection( codex_home.path(), Some("dev"), - "gpt-5-high-new", - Some(ReasoningEffort::Low), + "swiftfox-medium", + Some(ReasoningEffort::Medium), ) .await?; @@ -1260,8 +1260,11 @@ model = "gpt-4.1" .get("dev") .expect("profile should be created"); - assert_eq!(profile.model.as_deref(), Some("gpt-5-high-new")); - assert_eq!(profile.model_reasoning_effort, Some(ReasoningEffort::Low)); + assert_eq!(profile.model.as_deref(), Some("swiftfox-medium")); + assert_eq!( + profile.model_reasoning_effort, + Some(ReasoningEffort::Medium) + ); Ok(()) } diff --git a/codex-rs/tui/src/lib.rs b/codex-rs/tui/src/lib.rs index 7971ccf4..a9528dc8 100644 --- a/codex-rs/tui/src/lib.rs +++ b/codex-rs/tui/src/lib.rs @@ -11,7 +11,7 @@ use codex_core::RolloutRecorder; use codex_core::config::Config; use codex_core::config::ConfigOverrides; use codex_core::config::ConfigToml; -use codex_core::config::GPT5_HIGH_MODEL; +use codex_core::config::SWIFTFOX_MEDIUM_MODEL; use codex_core::config::find_codex_home; use codex_core::config::load_config_as_toml_with_cli_overrides; use codex_core::config::persist_model_selection; @@ -380,7 +380,7 @@ async fn run_ratatui_app( let switch_to_new_model = upgrade_decision == ModelUpgradeDecision::Switch; if switch_to_new_model { - config.model = GPT5_HIGH_MODEL.to_owned(); + config.model = SWIFTFOX_MEDIUM_MODEL.to_owned(); config.model_reasoning_effort = None; if let Err(e) = persist_model_selection( &config.codex_home, diff --git a/codex-rs/tui/src/new_model_popup.rs b/codex-rs/tui/src/new_model_popup.rs index ff09573e..0d709284 100644 --- a/codex-rs/tui/src/new_model_popup.rs +++ b/codex-rs/tui/src/new_model_popup.rs @@ -1,7 +1,7 @@ use crate::tui::FrameRequester; use crate::tui::Tui; use crate::tui::TuiEvent; -use codex_core::config::GPT5_HIGH_MODEL; +use codex_core::config::SWIFTFOX_MEDIUM_MODEL; use color_eyre::eyre::Result; use crossterm::event::KeyCode; use crossterm::event::KeyEvent; @@ -83,7 +83,8 @@ impl WidgetRef for &ModelUpgradePopup { let mut lines: Vec = vec![ String::new().into(), - format!(" Codex is now powered by {GPT5_HIGH_MODEL}, a new model that is").into(), + format!(" Codex is now powered by {SWIFTFOX_MEDIUM_MODEL}, a new model that is") + .into(), Line::from(vec![ " ".into(), "faster, a better collaborator, ".bold(), @@ -108,7 +109,7 @@ impl WidgetRef for &ModelUpgradePopup { lines.push(create_option( 0, ModelUpgradeOption::TryNewModel, - &format!("Yes, switch me to {GPT5_HIGH_MODEL}"), + &format!("Yes, switch me to {SWIFTFOX_MEDIUM_MODEL}"), )); lines.push(create_option( 1,