It turns out that we want slightly different behavior for the `SetDefaultModel` RPC because some models do not work with reasoning (like GPT-4.1), so we should be able to explicitly clear this value. Verified in `codex-rs/mcp-server/tests/suite/set_default_model.rs`.
77 lines
2.3 KiB
Rust
77 lines
2.3 KiB
Rust
use std::path::Path;
|
|
|
|
use codex_core::config::ConfigToml;
|
|
use codex_protocol::mcp_protocol::SetDefaultModelParams;
|
|
use codex_protocol::mcp_protocol::SetDefaultModelResponse;
|
|
use mcp_test_support::McpProcess;
|
|
use mcp_test_support::to_response;
|
|
use mcp_types::JSONRPCResponse;
|
|
use mcp_types::RequestId;
|
|
use pretty_assertions::assert_eq;
|
|
use tempfile::TempDir;
|
|
use tokio::time::timeout;
|
|
|
|
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
|
|
|
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
|
async fn set_default_model_persists_overrides() {
|
|
let codex_home = TempDir::new().expect("create tempdir");
|
|
create_config_toml(codex_home.path()).expect("write config.toml");
|
|
|
|
let mut mcp = McpProcess::new(codex_home.path())
|
|
.await
|
|
.expect("spawn mcp process");
|
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize())
|
|
.await
|
|
.expect("init timeout")
|
|
.expect("init failed");
|
|
|
|
let params = SetDefaultModelParams {
|
|
model: Some("gpt-4.1".to_string()),
|
|
reasoning_effort: None,
|
|
};
|
|
|
|
let request_id = mcp
|
|
.send_set_default_model_request(params)
|
|
.await
|
|
.expect("send setDefaultModel");
|
|
|
|
let resp: JSONRPCResponse = timeout(
|
|
DEFAULT_READ_TIMEOUT,
|
|
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
|
|
)
|
|
.await
|
|
.expect("setDefaultModel timeout")
|
|
.expect("setDefaultModel response");
|
|
|
|
let _: SetDefaultModelResponse =
|
|
to_response(resp).expect("deserialize setDefaultModel response");
|
|
|
|
let config_path = codex_home.path().join("config.toml");
|
|
let config_contents = tokio::fs::read_to_string(&config_path)
|
|
.await
|
|
.expect("read config.toml");
|
|
let config_toml: ConfigToml = toml::from_str(&config_contents).expect("parse config.toml");
|
|
|
|
assert_eq!(
|
|
ConfigToml {
|
|
model: Some("gpt-4.1".to_string()),
|
|
model_reasoning_effort: None,
|
|
..Default::default()
|
|
},
|
|
config_toml,
|
|
);
|
|
}
|
|
|
|
// Helper to create a config.toml; mirrors create_conversation.rs
|
|
fn create_config_toml(codex_home: &Path) -> std::io::Result<()> {
|
|
let config_toml = codex_home.join("config.toml");
|
|
std::fs::write(
|
|
config_toml,
|
|
r#"
|
|
model = "gpt-5"
|
|
model_reasoning_effort = "medium"
|
|
"#,
|
|
)
|
|
}
|