feat: change the behavior of SetDefaultModel RPC so None clears the value. (#3529)
It turns out that we want slightly different behavior for the `SetDefaultModel` RPC because some models do not work with reasoning (like GPT-4.1), so we should be able to explicitly clear this value. Verified in `codex-rs/mcp-server/tests/suite/set_default_model.rs`.
This commit is contained in:
@@ -20,7 +20,7 @@ use codex_core::config::ConfigToml;
|
||||
use codex_core::config::load_config_as_toml;
|
||||
use codex_core::config_edit::CONFIG_KEY_EFFORT;
|
||||
use codex_core::config_edit::CONFIG_KEY_MODEL;
|
||||
use codex_core::config_edit::persist_non_null_overrides;
|
||||
use codex_core::config_edit::persist_overrides_and_clear_if_none;
|
||||
use codex_core::default_client::get_codex_user_agent;
|
||||
use codex_core::exec::ExecParams;
|
||||
use codex_core::exec_env::create_env;
|
||||
@@ -519,7 +519,7 @@ impl CodexMessageProcessor {
|
||||
(&[CONFIG_KEY_EFFORT], effort_str.as_deref()),
|
||||
];
|
||||
|
||||
match persist_non_null_overrides(
|
||||
match persist_overrides_and_clear_if_none(
|
||||
&self.config.codex_home,
|
||||
self.config.active_profile.as_deref(),
|
||||
&overrides,
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
use std::path::Path;
|
||||
|
||||
use codex_core::config::ConfigToml;
|
||||
use codex_protocol::config_types::ReasoningEffort;
|
||||
use codex_protocol::mcp_protocol::SetDefaultModelParams;
|
||||
use codex_protocol::mcp_protocol::SetDefaultModelResponse;
|
||||
use mcp_test_support::McpProcess;
|
||||
@@ -14,7 +15,8 @@ const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn set_default_model_persists_overrides() {
|
||||
let codex_home = TempDir::new().unwrap_or_else(|e| panic!("create tempdir: {e}"));
|
||||
let codex_home = TempDir::new().expect("create tempdir");
|
||||
create_config_toml(codex_home.path()).expect("write config.toml");
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path())
|
||||
.await
|
||||
@@ -25,8 +27,8 @@ async fn set_default_model_persists_overrides() {
|
||||
.expect("init failed");
|
||||
|
||||
let params = SetDefaultModelParams {
|
||||
model: Some("o4-mini".to_string()),
|
||||
reasoning_effort: Some(ReasoningEffort::High),
|
||||
model: Some("gpt-4.1".to_string()),
|
||||
reasoning_effort: None,
|
||||
};
|
||||
|
||||
let request_id = mcp
|
||||
@@ -53,10 +55,22 @@ async fn set_default_model_persists_overrides() {
|
||||
|
||||
assert_eq!(
|
||||
ConfigToml {
|
||||
model: Some("o4-mini".to_string()),
|
||||
model_reasoning_effort: Some(ReasoningEffort::High),
|
||||
model: Some("gpt-4.1".to_string()),
|
||||
model_reasoning_effort: None,
|
||||
..Default::default()
|
||||
},
|
||||
config_toml,
|
||||
);
|
||||
}
|
||||
|
||||
// Helper to create a config.toml; mirrors create_conversation.rs
|
||||
fn create_config_toml(codex_home: &Path) -> std::io::Result<()> {
|
||||
let config_toml = codex_home.join("config.toml");
|
||||
std::fs::write(
|
||||
config_toml,
|
||||
r#"
|
||||
model = "gpt-5"
|
||||
model_reasoning_effort = "medium"
|
||||
"#,
|
||||
)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user