set gpt-5 as default model for Windows users (#4676)
Codex isn’t great yet on Windows outside of WSL, and while we’ve merged https://github.com/openai/codex/pull/4269 to reduce the repetitive manual approvals on readonly commands, we’ve noticed that users seem to have more issues with GPT-5-Codex than with GPT-5 on Windows. This change makes GPT-5 the default for Windows users while we continue to improve the CLI harness and model for GPT-5-Codex on Windows.
This commit is contained in:
@@ -46,7 +46,10 @@ use toml_edit::DocumentMut;
|
|||||||
use toml_edit::Item as TomlItem;
|
use toml_edit::Item as TomlItem;
|
||||||
use toml_edit::Table as TomlTable;
|
use toml_edit::Table as TomlTable;
|
||||||
|
|
||||||
const OPENAI_DEFAULT_MODEL: &str = "gpt-5-codex";
|
#[cfg(target_os = "windows")]
|
||||||
|
pub const OPENAI_DEFAULT_MODEL: &str = "gpt-5";
|
||||||
|
#[cfg(not(target_os = "windows"))]
|
||||||
|
pub const OPENAI_DEFAULT_MODEL: &str = "gpt-5-codex";
|
||||||
const OPENAI_DEFAULT_REVIEW_MODEL: &str = "gpt-5-codex";
|
const OPENAI_DEFAULT_REVIEW_MODEL: &str = "gpt-5-codex";
|
||||||
pub const GPT_5_CODEX_MEDIUM_MODEL: &str = "gpt-5-codex";
|
pub const GPT_5_CODEX_MEDIUM_MODEL: &str = "gpt-5-codex";
|
||||||
|
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ use codex_core::NewConversation;
|
|||||||
use codex_core::built_in_model_providers;
|
use codex_core::built_in_model_providers;
|
||||||
use codex_core::codex::compact::SUMMARIZATION_PROMPT;
|
use codex_core::codex::compact::SUMMARIZATION_PROMPT;
|
||||||
use codex_core::config::Config;
|
use codex_core::config::Config;
|
||||||
|
use codex_core::config::OPENAI_DEFAULT_MODEL;
|
||||||
use codex_core::protocol::ConversationPathResponseEvent;
|
use codex_core::protocol::ConversationPathResponseEvent;
|
||||||
use codex_core::protocol::EventMsg;
|
use codex_core::protocol::EventMsg;
|
||||||
use codex_core::protocol::InputItem;
|
use codex_core::protocol::InputItem;
|
||||||
@@ -131,9 +132,10 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
|
|||||||
.as_str()
|
.as_str()
|
||||||
.unwrap_or_default()
|
.unwrap_or_default()
|
||||||
.to_string();
|
.to_string();
|
||||||
|
let expected_model = OPENAI_DEFAULT_MODEL;
|
||||||
let user_turn_1 = json!(
|
let user_turn_1 = json!(
|
||||||
{
|
{
|
||||||
"model": "gpt-5-codex",
|
"model": expected_model,
|
||||||
"instructions": prompt,
|
"instructions": prompt,
|
||||||
"input": [
|
"input": [
|
||||||
{
|
{
|
||||||
@@ -182,7 +184,7 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
|
|||||||
});
|
});
|
||||||
let compact_1 = json!(
|
let compact_1 = json!(
|
||||||
{
|
{
|
||||||
"model": "gpt-5-codex",
|
"model": expected_model,
|
||||||
"instructions": prompt,
|
"instructions": prompt,
|
||||||
"input": [
|
"input": [
|
||||||
{
|
{
|
||||||
@@ -251,7 +253,7 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
|
|||||||
});
|
});
|
||||||
let user_turn_2_after_compact = json!(
|
let user_turn_2_after_compact = json!(
|
||||||
{
|
{
|
||||||
"model": "gpt-5-codex",
|
"model": expected_model,
|
||||||
"instructions": prompt,
|
"instructions": prompt,
|
||||||
"input": [
|
"input": [
|
||||||
{
|
{
|
||||||
@@ -316,7 +318,7 @@ SUMMARY_ONLY_CONTEXT"
|
|||||||
});
|
});
|
||||||
let usert_turn_3_after_resume = json!(
|
let usert_turn_3_after_resume = json!(
|
||||||
{
|
{
|
||||||
"model": "gpt-5-codex",
|
"model": expected_model,
|
||||||
"instructions": prompt,
|
"instructions": prompt,
|
||||||
"input": [
|
"input": [
|
||||||
{
|
{
|
||||||
@@ -401,7 +403,7 @@ SUMMARY_ONLY_CONTEXT"
|
|||||||
});
|
});
|
||||||
let user_turn_3_after_fork = json!(
|
let user_turn_3_after_fork = json!(
|
||||||
{
|
{
|
||||||
"model": "gpt-5-codex",
|
"model": expected_model,
|
||||||
"instructions": prompt,
|
"instructions": prompt,
|
||||||
"input": [
|
"input": [
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ use codex_core::CodexAuth;
|
|||||||
use codex_core::ConversationManager;
|
use codex_core::ConversationManager;
|
||||||
use codex_core::ModelProviderInfo;
|
use codex_core::ModelProviderInfo;
|
||||||
use codex_core::built_in_model_providers;
|
use codex_core::built_in_model_providers;
|
||||||
|
use codex_core::config::OPENAI_DEFAULT_MODEL;
|
||||||
use codex_core::model_family::find_family_for_model;
|
use codex_core::model_family::find_family_for_model;
|
||||||
use codex_core::protocol::AskForApproval;
|
use codex_core::protocol::AskForApproval;
|
||||||
use codex_core::protocol::EventMsg;
|
use codex_core::protocol::EventMsg;
|
||||||
@@ -18,6 +19,7 @@ use core_test_support::load_default_config_for_test;
|
|||||||
use core_test_support::load_sse_fixture_with_id;
|
use core_test_support::load_sse_fixture_with_id;
|
||||||
use core_test_support::skip_if_no_network;
|
use core_test_support::skip_if_no_network;
|
||||||
use core_test_support::wait_for_event;
|
use core_test_support::wait_for_event;
|
||||||
|
use std::collections::HashMap;
|
||||||
use tempfile::TempDir;
|
use tempfile::TempDir;
|
||||||
use wiremock::Mock;
|
use wiremock::Mock;
|
||||||
use wiremock::MockServer;
|
use wiremock::MockServer;
|
||||||
@@ -219,13 +221,26 @@ async fn prompt_tools_are_consistent_across_requests() {
|
|||||||
|
|
||||||
// our internal implementation is responsible for keeping tools in sync
|
// our internal implementation is responsible for keeping tools in sync
|
||||||
// with the OpenAI schema, so we just verify the tool presence here
|
// with the OpenAI schema, so we just verify the tool presence here
|
||||||
let expected_tools_names: &[&str] = &[
|
let tools_by_model: HashMap<&'static str, Vec<&'static str>> = HashMap::from([
|
||||||
"shell",
|
(
|
||||||
"update_plan",
|
"gpt-5",
|
||||||
"apply_patch",
|
vec!["shell", "update_plan", "apply_patch", "view_image"],
|
||||||
"read_file",
|
),
|
||||||
"view_image",
|
(
|
||||||
];
|
"gpt-5-codex",
|
||||||
|
vec![
|
||||||
|
"shell",
|
||||||
|
"update_plan",
|
||||||
|
"apply_patch",
|
||||||
|
"read_file",
|
||||||
|
"view_image",
|
||||||
|
],
|
||||||
|
),
|
||||||
|
]);
|
||||||
|
let expected_tools_names = tools_by_model
|
||||||
|
.get(OPENAI_DEFAULT_MODEL)
|
||||||
|
.unwrap_or_else(|| panic!("expected tools to be defined for model {OPENAI_DEFAULT_MODEL}"))
|
||||||
|
.as_slice();
|
||||||
let body0 = requests[0].body_json::<serde_json::Value>().unwrap();
|
let body0 = requests[0].body_json::<serde_json::Value>().unwrap();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
body0["instructions"],
|
body0["instructions"],
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ use codex_core::CodexAuth;
|
|||||||
use codex_core::config::Config;
|
use codex_core::config::Config;
|
||||||
use codex_core::config::ConfigOverrides;
|
use codex_core::config::ConfigOverrides;
|
||||||
use codex_core::config::ConfigToml;
|
use codex_core::config::ConfigToml;
|
||||||
|
use codex_core::config::OPENAI_DEFAULT_MODEL;
|
||||||
use codex_core::protocol::AgentMessageDeltaEvent;
|
use codex_core::protocol::AgentMessageDeltaEvent;
|
||||||
use codex_core::protocol::AgentMessageEvent;
|
use codex_core::protocol::AgentMessageEvent;
|
||||||
use codex_core::protocol::AgentReasoningDeltaEvent;
|
use codex_core::protocol::AgentReasoningDeltaEvent;
|
||||||
@@ -1101,6 +1102,11 @@ fn disabled_slash_command_while_task_running_snapshot() {
|
|||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn binary_size_transcript_snapshot() {
|
async fn binary_size_transcript_snapshot() {
|
||||||
|
// the snapshot in this test depends on gpt-5-codex. Skip for now. We will consider
|
||||||
|
// creating snapshots for other models in the future.
|
||||||
|
if OPENAI_DEFAULT_MODEL != "gpt-5-codex" {
|
||||||
|
return;
|
||||||
|
}
|
||||||
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual();
|
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual();
|
||||||
|
|
||||||
// Set up a VT100 test terminal to capture ANSI visual output
|
// Set up a VT100 test terminal to capture ANSI visual output
|
||||||
|
|||||||
Reference in New Issue
Block a user