[mcp-server] Update read config interface (#3093)

## Summary
Follow-up to #3056

This PR updates the mcp-server interface for reading the config settings
saved by the user. At risk of introducing _another_ Config struct, I
think it makes sense to avoid tying our protocol to ConfigToml, as its
become a bit unwieldy. GetConfigTomlResponse was a de-facto struct for
this already - better to make it explicit, in my opinion.

This is technically a breaking change of the mcp-server protocol, but
given the previous interface was introduced so recently in #2725, and we
have not yet even started to call it, I propose proceeding with the
breaking change - but am open to preserving the old endpoint.

## Testing
- [x] Added additional integration test coverage
This commit is contained in:
Dylan
2025-09-04 16:26:41 -07:00
committed by GitHub
parent 1c04e1314d
commit 82ed7bd285
9 changed files with 246 additions and 72 deletions

View File

@@ -20,6 +20,8 @@ use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::config_types::SandboxMode;
use codex_protocol::config_types::Verbosity;
use codex_protocol::mcp_protocol::AuthMode;
use codex_protocol::mcp_protocol::Tools;
use codex_protocol::mcp_protocol::UserSavedConfig;
use dirs::home_dir;
use serde::Deserialize;
use std::collections::HashMap;
@@ -503,6 +505,29 @@ pub struct ConfigToml {
pub disable_paste_burst: Option<bool>,
}
impl From<ConfigToml> for UserSavedConfig {
fn from(config_toml: ConfigToml) -> Self {
let profiles = config_toml
.profiles
.into_iter()
.map(|(k, v)| (k, v.into()))
.collect();
Self {
approval_policy: config_toml.approval_policy,
sandbox_mode: config_toml.sandbox_mode,
sandbox_settings: config_toml.sandbox_workspace_write.map(From::from),
model: config_toml.model,
model_reasoning_effort: config_toml.model_reasoning_effort,
model_reasoning_summary: config_toml.model_reasoning_summary,
model_verbosity: config_toml.model_verbosity,
tools: config_toml.tools.map(From::from),
profile: config_toml.profile,
profiles,
}
}
}
#[derive(Deserialize, Debug, Clone, PartialEq, Eq)]
pub struct ProjectConfig {
pub trust_level: Option<String>,
@@ -518,6 +543,15 @@ pub struct ToolsToml {
pub view_image: Option<bool>,
}
impl From<ToolsToml> for Tools {
fn from(tools_toml: ToolsToml) -> Self {
Self {
web_search: tools_toml.web_search,
view_image: tools_toml.view_image,
}
}
}
impl ConfigToml {
/// Derive the effective sandbox policy from the configuration.
fn derive_sandbox_policy(&self, sandbox_mode_override: Option<SandboxMode>) -> SandboxPolicy {

View File

@@ -22,3 +22,18 @@ pub struct ConfigProfile {
pub chatgpt_base_url: Option<String>,
pub experimental_instructions_file: Option<PathBuf>,
}
impl From<ConfigProfile> for codex_protocol::mcp_protocol::Profile {
fn from(config_profile: ConfigProfile) -> Self {
Self {
model: config_profile.model,
model_provider: config_profile.model_provider,
approval_policy: config_profile.approval_policy,
disable_response_storage: config_profile.disable_response_storage,
model_reasoning_effort: config_profile.model_reasoning_effort,
model_reasoning_summary: config_profile.model_reasoning_summary,
model_verbosity: config_profile.model_verbosity,
chatgpt_base_url: config_profile.chatgpt_base_url,
}
}
}

View File

@@ -88,6 +88,17 @@ pub struct SandboxWorkspaceWrite {
pub exclude_slash_tmp: bool,
}
impl From<SandboxWorkspaceWrite> for codex_protocol::mcp_protocol::SandboxSettings {
fn from(sandbox_workspace_write: SandboxWorkspaceWrite) -> Self {
Self {
writable_roots: sandbox_workspace_write.writable_roots,
network_access: Some(sandbox_workspace_write.network_access),
exclude_tmpdir_env_var: Some(sandbox_workspace_write.exclude_tmpdir_env_var),
exclude_slash_tmp: Some(sandbox_workspace_write.exclude_slash_tmp),
}
}
}
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
#[serde(rename_all = "kebab-case")]
pub enum ShellEnvironmentPolicyInherit {

View File

@@ -45,7 +45,7 @@ use codex_protocol::mcp_protocol::ExecArbitraryCommandResponse;
use codex_protocol::mcp_protocol::ExecCommandApprovalParams;
use codex_protocol::mcp_protocol::ExecCommandApprovalResponse;
use codex_protocol::mcp_protocol::ExecOneOffCommandParams;
use codex_protocol::mcp_protocol::GetConfigTomlResponse;
use codex_protocol::mcp_protocol::GetUserSavedConfigResponse;
use codex_protocol::mcp_protocol::GitDiffToRemoteResponse;
use codex_protocol::mcp_protocol::InputItem as WireInputItem;
use codex_protocol::mcp_protocol::InterruptConversationParams;
@@ -61,6 +61,7 @@ use codex_protocol::mcp_protocol::SendUserMessageResponse;
use codex_protocol::mcp_protocol::SendUserTurnParams;
use codex_protocol::mcp_protocol::SendUserTurnResponse;
use codex_protocol::mcp_protocol::ServerNotification;
use codex_protocol::mcp_protocol::UserSavedConfig;
use mcp_types::JSONRPCErrorError;
use mcp_types::RequestId;
use tokio::sync::Mutex;
@@ -153,8 +154,8 @@ impl CodexMessageProcessor {
ClientRequest::GetAuthStatus { request_id, params } => {
self.get_auth_status(request_id, params).await;
}
ClientRequest::GetConfigToml { request_id } => {
self.get_config_toml(request_id).await;
ClientRequest::GetUserSavedConfig { request_id } => {
self.get_user_saved_config(request_id).await;
}
ClientRequest::ExecOneOffCommand { request_id, params } => {
self.exec_one_off_command(request_id, params).await;
@@ -371,7 +372,7 @@ impl CodexMessageProcessor {
self.outgoing.send_response(request_id, response).await;
}
async fn get_config_toml(&self, request_id: RequestId) {
async fn get_user_saved_config(&self, request_id: RequestId) {
let toml_value = match load_config_as_toml(&self.config.codex_home) {
Ok(val) => val,
Err(err) => {
@@ -398,32 +399,11 @@ impl CodexMessageProcessor {
}
};
let profiles: HashMap<String, codex_protocol::config_types::ConfigProfile> = cfg
.profiles
.into_iter()
.map(|(k, v)| {
(
k,
// Define this explicitly here to avoid the need to
// implement `From<codex_core::config_profile::ConfigProfile>`
// for the `ConfigProfile` type and introduce a dependency on codex_core
codex_protocol::config_types::ConfigProfile {
model: v.model,
approval_policy: v.approval_policy,
model_reasoning_effort: v.model_reasoning_effort,
},
)
})
.collect();
let user_saved_config: UserSavedConfig = cfg.into();
let response = GetConfigTomlResponse {
approval_policy: cfg.approval_policy,
sandbox_mode: cfg.sandbox_mode,
model_reasoning_effort: cfg.model_reasoning_effort,
profile: cfg.profile,
profiles: Some(profiles),
let response = GetUserSavedConfigResponse {
config: user_saved_config,
};
self.outgoing.send_response(request_id, response).await;
}

View File

@@ -240,9 +240,9 @@ impl McpProcess {
self.send_request("getAuthStatus", params).await
}
/// Send a `getConfigToml` JSON-RPC request.
pub async fn send_get_config_toml_request(&mut self) -> anyhow::Result<i64> {
self.send_request("getConfigToml", None).await
/// Send a `getUserSavedConfig` JSON-RPC request.
pub async fn send_get_user_saved_config_request(&mut self) -> anyhow::Result<i64> {
self.send_request("getUserSavedConfig", None).await
}
/// Send a `loginChatGpt` JSON-RPC request.

View File

@@ -2,10 +2,15 @@ use std::collections::HashMap;
use std::path::Path;
use codex_core::protocol::AskForApproval;
use codex_protocol::config_types::ConfigProfile;
use codex_protocol::config_types::ReasoningEffort;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::config_types::SandboxMode;
use codex_protocol::mcp_protocol::GetConfigTomlResponse;
use codex_protocol::config_types::Verbosity;
use codex_protocol::mcp_protocol::GetUserSavedConfigResponse;
use codex_protocol::mcp_protocol::Profile;
use codex_protocol::mcp_protocol::SandboxSettings;
use codex_protocol::mcp_protocol::Tools;
use codex_protocol::mcp_protocol::UserSavedConfig;
use mcp_test_support::McpProcess;
use mcp_test_support::to_response;
use mcp_types::JSONRPCResponse;
@@ -21,22 +26,39 @@ fn create_config_toml(codex_home: &Path) -> std::io::Result<()> {
std::fs::write(
config_toml,
r#"
model = "gpt-5"
approval_policy = "on-request"
sandbox_mode = "workspace-write"
model_reasoning_summary = "detailed"
model_reasoning_effort = "high"
model_verbosity = "medium"
profile = "test"
[sandbox_workspace_write]
writable_roots = ["/tmp"]
network_access = true
exclude_tmpdir_env_var = true
exclude_slash_tmp = true
[tools]
web_search = false
view_image = true
[profiles.test]
model = "gpt-4o"
approval_policy = "on-request"
model_reasoning_effort = "high"
model_reasoning_summary = "detailed"
model_verbosity = "medium"
model_provider = "openai"
disable_response_storage = false
chatgpt_base_url = "https://api.chatgpt.com"
"#,
)
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn get_config_toml_returns_subset() {
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn get_config_toml_parses_all_fields() {
let codex_home = TempDir::new().unwrap_or_else(|e| panic!("create tempdir: {e}"));
create_config_toml(codex_home.path()).expect("write config.toml");
@@ -49,32 +71,95 @@ async fn get_config_toml_returns_subset() {
.expect("init failed");
let request_id = mcp
.send_get_config_toml_request()
.send_get_user_saved_config_request()
.await
.expect("send getConfigToml");
.expect("send getUserSavedConfig");
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await
.expect("getConfigToml timeout")
.expect("getConfigToml response");
.expect("getUserSavedConfig timeout")
.expect("getUserSavedConfig response");
let config: GetConfigTomlResponse = to_response(resp).expect("deserialize config");
let expected = GetConfigTomlResponse {
approval_policy: Some(AskForApproval::OnRequest),
sandbox_mode: Some(SandboxMode::WorkspaceWrite),
model_reasoning_effort: Some(ReasoningEffort::High),
profile: Some("test".to_string()),
profiles: Some(HashMap::from([(
"test".into(),
ConfigProfile {
model: Some("gpt-4o".into()),
approval_policy: Some(AskForApproval::OnRequest),
model_reasoning_effort: Some(ReasoningEffort::High),
},
)])),
let config: GetUserSavedConfigResponse = to_response(resp).expect("deserialize config");
let expected = GetUserSavedConfigResponse {
config: UserSavedConfig {
approval_policy: Some(AskForApproval::OnRequest),
sandbox_mode: Some(SandboxMode::WorkspaceWrite),
sandbox_settings: Some(SandboxSettings {
writable_roots: vec!["/tmp".into()],
network_access: Some(true),
exclude_tmpdir_env_var: Some(true),
exclude_slash_tmp: Some(true),
}),
model: Some("gpt-5".into()),
model_reasoning_effort: Some(ReasoningEffort::High),
model_reasoning_summary: Some(ReasoningSummary::Detailed),
model_verbosity: Some(Verbosity::Medium),
tools: Some(Tools {
web_search: Some(false),
view_image: Some(true),
}),
profile: Some("test".to_string()),
profiles: HashMap::from([(
"test".into(),
Profile {
model: Some("gpt-4o".into()),
approval_policy: Some(AskForApproval::OnRequest),
model_reasoning_effort: Some(ReasoningEffort::High),
model_reasoning_summary: Some(ReasoningSummary::Detailed),
model_verbosity: Some(Verbosity::Medium),
model_provider: Some("openai".into()),
disable_response_storage: Some(false),
chatgpt_base_url: Some("https://api.chatgpt.com".into()),
},
)]),
},
};
assert_eq!(expected, config);
assert_eq!(config, expected);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn get_config_toml_empty() {
let codex_home = TempDir::new().unwrap_or_else(|e| panic!("create tempdir: {e}"));
let mut mcp = McpProcess::new(codex_home.path())
.await
.expect("spawn mcp process");
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize())
.await
.expect("init timeout")
.expect("init failed");
let request_id = mcp
.send_get_user_saved_config_request()
.await
.expect("send getUserSavedConfig");
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await
.expect("getUserSavedConfig timeout")
.expect("getUserSavedConfig response");
let config: GetUserSavedConfigResponse = to_response(resp).expect("deserialize config");
let expected = GetUserSavedConfigResponse {
config: UserSavedConfig {
approval_policy: None,
sandbox_mode: None,
sandbox_settings: None,
model: None,
model_reasoning_effort: None,
model_reasoning_summary: None,
model_verbosity: None,
tools: None,
profile: None,
profiles: HashMap::new(),
},
};
assert_eq!(config, expected);
}

View File

@@ -46,6 +46,7 @@ pub fn generate_ts(out_dir: &Path, prettier: Option<&Path>) -> Result<()> {
codex_protocol::mcp_protocol::ApplyPatchApprovalResponse::export_all_to(out_dir)?;
codex_protocol::mcp_protocol::ExecCommandApprovalParams::export_all_to(out_dir)?;
codex_protocol::mcp_protocol::ExecCommandApprovalResponse::export_all_to(out_dir)?;
codex_protocol::mcp_protocol::GetUserSavedConfigResponse::export_all_to(out_dir)?;
codex_protocol::mcp_protocol::ServerNotification::export_all_to(out_dir)?;
generate_index_ts(out_dir)?;

View File

@@ -4,8 +4,6 @@ use strum_macros::Display;
use strum_macros::EnumIter;
use ts_rs::TS;
use crate::protocol::AskForApproval;
/// See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning
#[derive(
Debug, Serialize, Deserialize, Default, Clone, Copy, PartialEq, Eq, Display, TS, EnumIter,
@@ -37,7 +35,7 @@ pub enum ReasoningSummary {
/// Controls output length/detail on GPT-5 models via the Responses API.
/// Serialized with lowercase values to match the OpenAI API.
#[derive(Debug, Serialize, Deserialize, Default, Clone, Copy, PartialEq, Eq, Display)]
#[derive(Debug, Serialize, Deserialize, Default, Clone, Copy, PartialEq, Eq, Display, TS)]
#[serde(rename_all = "lowercase")]
#[strum(serialize_all = "lowercase")]
pub enum Verbosity {
@@ -61,13 +59,3 @@ pub enum SandboxMode {
#[serde(rename = "danger-full-access")]
DangerFullAccess,
}
/// Collection of common configuration options that a user can define as a unit
/// in `config.toml`. Currently only a subset of the fields are supported.
#[derive(Deserialize, Debug, Clone, PartialEq, Serialize, TS)]
#[serde(rename_all = "camelCase")]
pub struct ConfigProfile {
pub model: Option<String>,
pub approval_policy: Option<AskForApproval>,
pub model_reasoning_effort: Option<ReasoningEffort>,
}

View File

@@ -2,10 +2,10 @@ use std::collections::HashMap;
use std::fmt::Display;
use std::path::PathBuf;
use crate::config_types::ConfigProfile;
use crate::config_types::ReasoningEffort;
use crate::config_types::ReasoningSummary;
use crate::config_types::SandboxMode;
use crate::config_types::Verbosity;
use crate::protocol::AskForApproval;
use crate::protocol::FileChange;
use crate::protocol::ReviewDecision;
@@ -102,7 +102,7 @@ pub enum ClientRequest {
request_id: RequestId,
params: GetAuthStatusParams,
},
GetConfigToml {
GetUserSavedConfig {
#[serde(rename = "id")]
request_id: RequestId,
},
@@ -260,22 +260,82 @@ pub struct GetAuthStatusResponse {
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS)]
#[serde(rename_all = "camelCase")]
pub struct GetConfigTomlResponse {
pub struct GetUserSavedConfigResponse {
pub config: UserSavedConfig,
}
/// UserSavedConfig contains a subset of the config. It is meant to expose mcp
/// client-configurable settings that can be specified in the NewConversation
/// and SendUserTurn requests.
#[derive(Deserialize, Debug, Clone, PartialEq, Serialize, TS)]
#[serde(rename_all = "camelCase")]
pub struct UserSavedConfig {
/// Approvals
#[serde(skip_serializing_if = "Option::is_none")]
pub approval_policy: Option<AskForApproval>,
#[serde(skip_serializing_if = "Option::is_none")]
pub sandbox_mode: Option<SandboxMode>,
#[serde(skip_serializing_if = "Option::is_none")]
pub sandbox_settings: Option<SandboxSettings>,
/// Relevant model configuration
/// Model-specific configuration
#[serde(skip_serializing_if = "Option::is_none")]
pub model: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub model_reasoning_effort: Option<ReasoningEffort>,
#[serde(skip_serializing_if = "Option::is_none")]
pub model_reasoning_summary: Option<ReasoningSummary>,
#[serde(skip_serializing_if = "Option::is_none")]
pub model_verbosity: Option<Verbosity>,
/// Tools
#[serde(skip_serializing_if = "Option::is_none")]
pub tools: Option<Tools>,
/// Profiles
#[serde(skip_serializing_if = "Option::is_none")]
pub profile: Option<String>,
#[serde(default)]
pub profiles: HashMap<String, Profile>,
}
/// MCP representation of a [`codex_core::config_profile::ConfigProfile`].
#[derive(Deserialize, Debug, Clone, PartialEq, Serialize, TS)]
#[serde(rename_all = "camelCase")]
pub struct Profile {
pub model: Option<String>,
/// The key in the `model_providers` map identifying the
/// [`ModelProviderInfo`] to use.
pub model_provider: Option<String>,
pub approval_policy: Option<AskForApproval>,
pub disable_response_storage: Option<bool>,
pub model_reasoning_effort: Option<ReasoningEffort>,
pub model_reasoning_summary: Option<ReasoningSummary>,
pub model_verbosity: Option<Verbosity>,
pub chatgpt_base_url: Option<String>,
}
/// MCP representation of a [`codex_core::config::ToolsToml`].
#[derive(Deserialize, Debug, Clone, PartialEq, Serialize, TS)]
#[serde(rename_all = "camelCase")]
pub struct Tools {
#[serde(skip_serializing_if = "Option::is_none")]
pub profiles: Option<HashMap<String, ConfigProfile>>,
pub web_search: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub view_image: Option<bool>,
}
/// MCP representation of a [`codex_core::config_types::SandboxWorkspaceWrite`].
#[derive(Deserialize, Debug, Clone, PartialEq, Serialize, TS)]
#[serde(rename_all = "camelCase")]
pub struct SandboxSettings {
#[serde(default)]
pub writable_roots: Vec<PathBuf>,
#[serde(skip_serializing_if = "Option::is_none")]
pub network_access: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub exclude_tmpdir_env_var: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub exclude_slash_tmp: Option<bool>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS)]