We continue the separation between `codex app-server` and `codex mcp-server`. In particular, we introduce a new crate, `codex-app-server-protocol`, and migrate `codex-rs/protocol/src/mcp_protocol.rs` into it, renaming it `codex-rs/app-server-protocol/src/protocol.rs`. Because `ConversationId` was defined in `mcp_protocol.rs`, we move it into its own file, `codex-rs/protocol/src/conversation_id.rs`, and because it is referenced in a ton of places, we have to touch a lot of files as part of this PR. We also decide to get away from proper JSON-RPC 2.0 semantics, so we also introduce `codex-rs/app-server-protocol/src/jsonrpc_lite.rs`, which is basically the same `JSONRPCMessage` type defined in `mcp-types` except with all of the `"jsonrpc": "2.0"` removed. Getting rid of `"jsonrpc": "2.0"` makes our serialization logic considerably simpler, as we can lean heavier on serde to serialize directly into the wire format that we use now.
77 lines
2.3 KiB
Rust
77 lines
2.3 KiB
Rust
use std::path::Path;
|
|
|
|
use app_test_support::McpProcess;
|
|
use app_test_support::to_response;
|
|
use codex_app_server_protocol::JSONRPCResponse;
|
|
use codex_app_server_protocol::RequestId;
|
|
use codex_app_server_protocol::SetDefaultModelParams;
|
|
use codex_app_server_protocol::SetDefaultModelResponse;
|
|
use codex_core::config::ConfigToml;
|
|
use pretty_assertions::assert_eq;
|
|
use tempfile::TempDir;
|
|
use tokio::time::timeout;
|
|
|
|
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
|
|
|
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
|
async fn set_default_model_persists_overrides() {
|
|
let codex_home = TempDir::new().expect("create tempdir");
|
|
create_config_toml(codex_home.path()).expect("write config.toml");
|
|
|
|
let mut mcp = McpProcess::new(codex_home.path())
|
|
.await
|
|
.expect("spawn mcp process");
|
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize())
|
|
.await
|
|
.expect("init timeout")
|
|
.expect("init failed");
|
|
|
|
let params = SetDefaultModelParams {
|
|
model: Some("gpt-4.1".to_string()),
|
|
reasoning_effort: None,
|
|
};
|
|
|
|
let request_id = mcp
|
|
.send_set_default_model_request(params)
|
|
.await
|
|
.expect("send setDefaultModel");
|
|
|
|
let resp: JSONRPCResponse = timeout(
|
|
DEFAULT_READ_TIMEOUT,
|
|
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
|
|
)
|
|
.await
|
|
.expect("setDefaultModel timeout")
|
|
.expect("setDefaultModel response");
|
|
|
|
let _: SetDefaultModelResponse =
|
|
to_response(resp).expect("deserialize setDefaultModel response");
|
|
|
|
let config_path = codex_home.path().join("config.toml");
|
|
let config_contents = tokio::fs::read_to_string(&config_path)
|
|
.await
|
|
.expect("read config.toml");
|
|
let config_toml: ConfigToml = toml::from_str(&config_contents).expect("parse config.toml");
|
|
|
|
assert_eq!(
|
|
ConfigToml {
|
|
model: Some("gpt-4.1".to_string()),
|
|
model_reasoning_effort: None,
|
|
..Default::default()
|
|
},
|
|
config_toml,
|
|
);
|
|
}
|
|
|
|
// Helper to create a config.toml; mirrors create_conversation.rs
|
|
fn create_config_toml(codex_home: &Path) -> std::io::Result<()> {
|
|
let config_toml = codex_home.join("config.toml");
|
|
std::fs::write(
|
|
config_toml,
|
|
r#"
|
|
model = "gpt-5-codex"
|
|
model_reasoning_effort = "medium"
|
|
"#,
|
|
)
|
|
}
|