diff --git a/codex-rs/core/src/config.rs b/codex-rs/core/src/config.rs index 2fe52bf4..55405736 100644 --- a/codex-rs/core/src/config.rs +++ b/codex-rs/core/src/config.rs @@ -46,7 +46,10 @@ use toml_edit::DocumentMut; use toml_edit::Item as TomlItem; use toml_edit::Table as TomlTable; -const OPENAI_DEFAULT_MODEL: &str = "gpt-5-codex"; +#[cfg(target_os = "windows")] +pub const OPENAI_DEFAULT_MODEL: &str = "gpt-5"; +#[cfg(not(target_os = "windows"))] +pub const OPENAI_DEFAULT_MODEL: &str = "gpt-5-codex"; const OPENAI_DEFAULT_REVIEW_MODEL: &str = "gpt-5-codex"; pub const GPT_5_CODEX_MEDIUM_MODEL: &str = "gpt-5-codex"; diff --git a/codex-rs/core/tests/suite/compact_resume_fork.rs b/codex-rs/core/tests/suite/compact_resume_fork.rs index 690e1aab..8197bed1 100644 --- a/codex-rs/core/tests/suite/compact_resume_fork.rs +++ b/codex-rs/core/tests/suite/compact_resume_fork.rs @@ -17,6 +17,7 @@ use codex_core::NewConversation; use codex_core::built_in_model_providers; use codex_core::codex::compact::SUMMARIZATION_PROMPT; use codex_core::config::Config; +use codex_core::config::OPENAI_DEFAULT_MODEL; use codex_core::protocol::ConversationPathResponseEvent; use codex_core::protocol::EventMsg; use codex_core::protocol::InputItem; @@ -131,9 +132,10 @@ async fn compact_resume_and_fork_preserve_model_history_view() { .as_str() .unwrap_or_default() .to_string(); + let expected_model = OPENAI_DEFAULT_MODEL; let user_turn_1 = json!( { - "model": "gpt-5-codex", + "model": expected_model, "instructions": prompt, "input": [ { @@ -182,7 +184,7 @@ async fn compact_resume_and_fork_preserve_model_history_view() { }); let compact_1 = json!( { - "model": "gpt-5-codex", + "model": expected_model, "instructions": prompt, "input": [ { @@ -251,7 +253,7 @@ async fn compact_resume_and_fork_preserve_model_history_view() { }); let user_turn_2_after_compact = json!( { - "model": "gpt-5-codex", + "model": expected_model, "instructions": prompt, "input": [ { @@ -316,7 +318,7 @@ SUMMARY_ONLY_CONTEXT" }); let usert_turn_3_after_resume = json!( { - "model": "gpt-5-codex", + "model": expected_model, "instructions": prompt, "input": [ { @@ -401,7 +403,7 @@ SUMMARY_ONLY_CONTEXT" }); let user_turn_3_after_fork = json!( { - "model": "gpt-5-codex", + "model": expected_model, "instructions": prompt, "input": [ { diff --git a/codex-rs/core/tests/suite/prompt_caching.rs b/codex-rs/core/tests/suite/prompt_caching.rs index bc66be18..21df40b7 100644 --- a/codex-rs/core/tests/suite/prompt_caching.rs +++ b/codex-rs/core/tests/suite/prompt_caching.rs @@ -4,6 +4,7 @@ use codex_core::CodexAuth; use codex_core::ConversationManager; use codex_core::ModelProviderInfo; use codex_core::built_in_model_providers; +use codex_core::config::OPENAI_DEFAULT_MODEL; use codex_core::model_family::find_family_for_model; use codex_core::protocol::AskForApproval; use codex_core::protocol::EventMsg; @@ -18,6 +19,7 @@ use core_test_support::load_default_config_for_test; use core_test_support::load_sse_fixture_with_id; use core_test_support::skip_if_no_network; use core_test_support::wait_for_event; +use std::collections::HashMap; use tempfile::TempDir; use wiremock::Mock; use wiremock::MockServer; @@ -219,13 +221,26 @@ async fn prompt_tools_are_consistent_across_requests() { // our internal implementation is responsible for keeping tools in sync // with the OpenAI schema, so we just verify the tool presence here - let expected_tools_names: &[&str] = &[ - "shell", - "update_plan", - "apply_patch", - "read_file", - "view_image", - ]; + let tools_by_model: HashMap<&'static str, Vec<&'static str>> = HashMap::from([ + ( + "gpt-5", + vec!["shell", "update_plan", "apply_patch", "view_image"], + ), + ( + "gpt-5-codex", + vec![ + "shell", + "update_plan", + "apply_patch", + "read_file", + "view_image", + ], + ), + ]); + let expected_tools_names = tools_by_model + .get(OPENAI_DEFAULT_MODEL) + .unwrap_or_else(|| panic!("expected tools to be defined for model {OPENAI_DEFAULT_MODEL}")) + .as_slice(); let body0 = requests[0].body_json::().unwrap(); assert_eq!( body0["instructions"], diff --git a/codex-rs/tui/src/chatwidget/tests.rs b/codex-rs/tui/src/chatwidget/tests.rs index 2071832f..ef24c82f 100644 --- a/codex-rs/tui/src/chatwidget/tests.rs +++ b/codex-rs/tui/src/chatwidget/tests.rs @@ -8,6 +8,7 @@ use codex_core::CodexAuth; use codex_core::config::Config; use codex_core::config::ConfigOverrides; use codex_core::config::ConfigToml; +use codex_core::config::OPENAI_DEFAULT_MODEL; use codex_core::protocol::AgentMessageDeltaEvent; use codex_core::protocol::AgentMessageEvent; use codex_core::protocol::AgentReasoningDeltaEvent; @@ -1101,6 +1102,11 @@ fn disabled_slash_command_while_task_running_snapshot() { #[tokio::test] async fn binary_size_transcript_snapshot() { + // the snapshot in this test depends on gpt-5-codex. Skip for now. We will consider + // creating snapshots for other models in the future. + if OPENAI_DEFAULT_MODEL != "gpt-5-codex" { + return; + } let (mut chat, mut rx, _op_rx) = make_chatwidget_manual(); // Set up a VT100 test terminal to capture ANSI visual output