use crate::config_profile::ConfigProfile; use crate::config_types::History; use crate::config_types::McpServerConfig; use crate::config_types::ReasoningSummaryFormat; use crate::config_types::SandboxWorkspaceWrite; use crate::config_types::ShellEnvironmentPolicy; use crate::config_types::ShellEnvironmentPolicyToml; use crate::config_types::Tui; use crate::config_types::UriBasedFileOpener; use crate::git_info::resolve_root_git_project_for_trust; use crate::model_family::ModelFamily; use crate::model_family::find_family_for_model; use crate::model_provider_info::ModelProviderInfo; use crate::model_provider_info::built_in_model_providers; use crate::openai_model_info::get_model_info; use crate::protocol::AskForApproval; use crate::protocol::SandboxPolicy; use codex_protocol::config_types::ReasoningEffort; use codex_protocol::config_types::ReasoningSummary; use codex_protocol::config_types::SandboxMode; use codex_protocol::config_types::Verbosity; use codex_protocol::mcp_protocol::AuthMode; use codex_protocol::mcp_protocol::Tools; use codex_protocol::mcp_protocol::UserSavedConfig; use dirs::home_dir; use serde::Deserialize; use std::collections::HashMap; use std::path::Path; use std::path::PathBuf; use tempfile::NamedTempFile; use toml::Value as TomlValue; use toml_edit::DocumentMut; const OPENAI_DEFAULT_MODEL: &str = "gpt-5"; /// Maximum number of bytes of the documentation that will be embedded. Larger /// files are *silently truncated* to this size so we do not take up too much of /// the context window. pub(crate) const PROJECT_DOC_MAX_BYTES: usize = 32 * 1024; // 32 KiB pub(crate) const CONFIG_TOML_FILE: &str = "config.toml"; /// Application configuration loaded from disk and merged with overrides. #[derive(Debug, Clone, PartialEq)] pub struct Config { /// Optional override of model selection. pub model: String, pub model_family: ModelFamily, /// Size of the context window for the model, in tokens. pub model_context_window: Option, /// Maximum number of output tokens. pub model_max_output_tokens: Option, /// Key into the model_providers map that specifies which provider to use. pub model_provider_id: String, /// Info needed to make an API request to the model. pub model_provider: ModelProviderInfo, /// Approval policy for executing commands. pub approval_policy: AskForApproval, pub sandbox_policy: SandboxPolicy, pub shell_environment_policy: ShellEnvironmentPolicy, /// When `true`, `AgentReasoning` events emitted by the backend will be /// suppressed from the frontend output. This can reduce visual noise when /// users are only interested in the final agent responses. pub hide_agent_reasoning: bool, /// When set to `true`, `AgentReasoningRawContentEvent` events will be shown in the UI/output. /// Defaults to `false`. pub show_raw_agent_reasoning: bool, /// User-provided instructions from AGENTS.md. pub user_instructions: Option, /// Base instructions override. pub base_instructions: Option, /// Optional external notifier command. When set, Codex will spawn this /// program after each completed *turn* (i.e. when the agent finishes /// processing a user submission). The value must be the full command /// broken into argv tokens **without** the trailing JSON argument - Codex /// appends one extra argument containing a JSON payload describing the /// event. /// /// Example `~/.codex/config.toml` snippet: /// /// ```toml /// notify = ["notify-send", "Codex"] /// ``` /// /// which will be invoked as: /// /// ```shell /// notify-send Codex '{"type":"agent-turn-complete","turn-id":"12345"}' /// ``` /// /// If unset the feature is disabled. pub notify: Option>, /// The directory that should be treated as the current working directory /// for the session. All relative paths inside the business-logic layer are /// resolved against this path. pub cwd: PathBuf, /// Definition for MCP servers that Codex can reach out to for tool calls. pub mcp_servers: HashMap, /// Combined provider map (defaults merged with user-defined overrides). pub model_providers: HashMap, /// Maximum number of bytes to include from an AGENTS.md project doc file. pub project_doc_max_bytes: usize, /// Directory containing all Codex state (defaults to `~/.codex` but can be /// overridden by the `CODEX_HOME` environment variable). pub codex_home: PathBuf, /// Settings that govern if and what will be written to `~/.codex/history.jsonl`. pub history: History, /// Optional URI-based file opener. If set, citations to files in the model /// output will be hyperlinked using the specified URI scheme. pub file_opener: UriBasedFileOpener, /// Collection of settings that are specific to the TUI. pub tui: Tui, /// Path to the `codex-linux-sandbox` executable. This must be set if /// [`crate::exec::SandboxType::LinuxSeccomp`] is used. Note that this /// cannot be set in the config file: it must be set in code via /// [`ConfigOverrides`]. /// /// When this program is invoked, arg0 will be set to `codex-linux-sandbox`. pub codex_linux_sandbox_exe: Option, /// Value to use for `reasoning.effort` when making a request using the /// Responses API. pub model_reasoning_effort: ReasoningEffort, /// If not "none", the value to use for `reasoning.summary` when making a /// request using the Responses API. pub model_reasoning_summary: ReasoningSummary, /// Optional verbosity control for GPT-5 models (Responses API `text.verbosity`). pub model_verbosity: Option, /// Base URL for requests to ChatGPT (as opposed to the OpenAI API). pub chatgpt_base_url: String, /// Experimental rollout resume path (absolute path to .jsonl; undocumented). pub experimental_resume: Option, /// Include an experimental plan tool that the model can use to update its current plan and status of each step. pub include_plan_tool: bool, /// Include the `apply_patch` tool for models that benefit from invoking /// file edits as a structured tool call. When unset, this falls back to the /// model family's default preference. pub include_apply_patch_tool: bool, pub tools_web_search_request: bool, /// If set to `true`, the API key will be signed with the `originator` header. pub preferred_auth_method: AuthMode, pub use_experimental_streamable_shell_tool: bool, /// Include the `view_image` tool that lets the agent attach a local image path to context. pub include_view_image_tool: bool, /// The active profile name used to derive this `Config` (if any). pub active_profile: Option, /// When true, disables burst-paste detection for typed input entirely. /// All characters are inserted as they are received, and no buffering /// or placeholder replacement will occur for fast keypress bursts. pub disable_paste_burst: bool, } impl Config { /// Load configuration with *generic* CLI overrides (`-c key=value`) applied /// **in between** the values parsed from `config.toml` and the /// strongly-typed overrides specified via [`ConfigOverrides`]. /// /// The precedence order is therefore: `config.toml` < `-c` overrides < /// `ConfigOverrides`. pub fn load_with_cli_overrides( cli_overrides: Vec<(String, TomlValue)>, overrides: ConfigOverrides, ) -> std::io::Result { // Resolve the directory that stores Codex state (e.g. ~/.codex or the // value of $CODEX_HOME) so we can embed it into the resulting // `Config` instance. let codex_home = find_codex_home()?; // Step 1: parse `config.toml` into a generic JSON value. let mut root_value = load_config_as_toml(&codex_home)?; // Step 2: apply the `-c` overrides. for (path, value) in cli_overrides.into_iter() { apply_toml_override(&mut root_value, &path, value); } // Step 3: deserialize into `ConfigToml` so that Serde can enforce the // correct types. let cfg: ConfigToml = root_value.try_into().map_err(|e| { tracing::error!("Failed to deserialize overridden config: {e}"); std::io::Error::new(std::io::ErrorKind::InvalidData, e) })?; // Step 4: merge with the strongly-typed overrides. Self::load_from_base_config_with_overrides(cfg, overrides, codex_home) } } pub fn load_config_as_toml_with_cli_overrides( codex_home: &Path, cli_overrides: Vec<(String, TomlValue)>, ) -> std::io::Result { let mut root_value = load_config_as_toml(codex_home)?; for (path, value) in cli_overrides.into_iter() { apply_toml_override(&mut root_value, &path, value); } let cfg: ConfigToml = root_value.try_into().map_err(|e| { tracing::error!("Failed to deserialize overridden config: {e}"); std::io::Error::new(std::io::ErrorKind::InvalidData, e) })?; Ok(cfg) } /// Read `CODEX_HOME/config.toml` and return it as a generic TOML value. Returns /// an empty TOML table when the file does not exist. pub fn load_config_as_toml(codex_home: &Path) -> std::io::Result { let config_path = codex_home.join(CONFIG_TOML_FILE); match std::fs::read_to_string(&config_path) { Ok(contents) => match toml::from_str::(&contents) { Ok(val) => Ok(val), Err(e) => { tracing::error!("Failed to parse config.toml: {e}"); Err(std::io::Error::new(std::io::ErrorKind::InvalidData, e)) } }, Err(e) if e.kind() == std::io::ErrorKind::NotFound => { tracing::info!("config.toml not found, using defaults"); Ok(TomlValue::Table(Default::default())) } Err(e) => { tracing::error!("Failed to read config.toml: {e}"); Err(e) } } } /// Patch `CODEX_HOME/config.toml` project state. /// Use with caution. pub fn set_project_trusted(codex_home: &Path, project_path: &Path) -> anyhow::Result<()> { let config_path = codex_home.join(CONFIG_TOML_FILE); // Parse existing config if present; otherwise start a new document. let mut doc = match std::fs::read_to_string(config_path.clone()) { Ok(s) => s.parse::()?, Err(e) if e.kind() == std::io::ErrorKind::NotFound => DocumentMut::new(), Err(e) => return Err(e.into()), }; // Ensure we render a human-friendly structure: // // [projects] // [projects."/path/to/project"] // trust_level = "trusted" // // rather than inline tables like: // // [projects] // "/path/to/project" = { trust_level = "trusted" } let project_key = project_path.to_string_lossy().to_string(); // Ensure top-level `projects` exists as a non-inline, explicit table. If it // exists but was previously represented as a non-table (e.g., inline), // replace it with an explicit table. let mut created_projects_table = false; { let root = doc.as_table_mut(); let needs_table = !root.contains_key("projects") || root.get("projects").and_then(|i| i.as_table()).is_none(); if needs_table { root.insert("projects", toml_edit::table()); created_projects_table = true; } } let Some(projects_tbl) = doc["projects"].as_table_mut() else { return Err(anyhow::anyhow!( "projects table missing after initialization" )); }; // If we created the `projects` table ourselves, keep it implicit so we // don't render a standalone `[projects]` header. if created_projects_table { projects_tbl.set_implicit(true); } // Ensure the per-project entry is its own explicit table. If it exists but // is not a table (e.g., an inline table), replace it with an explicit table. let needs_proj_table = !projects_tbl.contains_key(project_key.as_str()) || projects_tbl .get(project_key.as_str()) .and_then(|i| i.as_table()) .is_none(); if needs_proj_table { projects_tbl.insert(project_key.as_str(), toml_edit::table()); } let Some(proj_tbl) = projects_tbl .get_mut(project_key.as_str()) .and_then(|i| i.as_table_mut()) else { return Err(anyhow::anyhow!("project table missing for {}", project_key)); }; proj_tbl.set_implicit(false); proj_tbl["trust_level"] = toml_edit::value("trusted"); // ensure codex_home exists std::fs::create_dir_all(codex_home)?; // create a tmp_file let tmp_file = NamedTempFile::new_in(codex_home)?; std::fs::write(tmp_file.path(), doc.to_string())?; // atomically move the tmp file into config.toml tmp_file.persist(config_path)?; Ok(()) } /// Apply a single dotted-path override onto a TOML value. fn apply_toml_override(root: &mut TomlValue, path: &str, value: TomlValue) { use toml::value::Table; let segments: Vec<&str> = path.split('.').collect(); let mut current = root; for (idx, segment) in segments.iter().enumerate() { let is_last = idx == segments.len() - 1; if is_last { match current { TomlValue::Table(table) => { table.insert(segment.to_string(), value); } _ => { let mut table = Table::new(); table.insert(segment.to_string(), value); *current = TomlValue::Table(table); } } return; } // Traverse or create intermediate object. match current { TomlValue::Table(table) => { current = table .entry(segment.to_string()) .or_insert_with(|| TomlValue::Table(Table::new())); } _ => { *current = TomlValue::Table(Table::new()); if let TomlValue::Table(tbl) = current { current = tbl .entry(segment.to_string()) .or_insert_with(|| TomlValue::Table(Table::new())); } } } } } /// Base config deserialized from ~/.codex/config.toml. #[derive(Deserialize, Debug, Clone, Default)] pub struct ConfigToml { /// Optional override of model selection. pub model: Option, /// Provider to use from the model_providers map. pub model_provider: Option, /// Size of the context window for the model, in tokens. pub model_context_window: Option, /// Maximum number of output tokens. pub model_max_output_tokens: Option, /// Default approval policy for executing commands. pub approval_policy: Option, #[serde(default)] pub shell_environment_policy: ShellEnvironmentPolicyToml, /// Sandbox mode to use. pub sandbox_mode: Option, /// Sandbox configuration to apply if `sandbox` is `WorkspaceWrite`. pub sandbox_workspace_write: Option, /// Optional external command to spawn for end-user notifications. #[serde(default)] pub notify: Option>, /// System instructions. pub instructions: Option, /// Definition for MCP servers that Codex can reach out to for tool calls. #[serde(default)] pub mcp_servers: HashMap, /// User-defined provider entries that extend/override the built-in list. #[serde(default)] pub model_providers: HashMap, /// Maximum number of bytes to include from an AGENTS.md project doc file. pub project_doc_max_bytes: Option, /// Profile to use from the `profiles` map. pub profile: Option, /// Named profiles to facilitate switching between different configurations. #[serde(default)] pub profiles: HashMap, /// Settings that govern if and what will be written to `~/.codex/history.jsonl`. #[serde(default)] pub history: Option, /// Optional URI-based file opener. If set, citations to files in the model /// output will be hyperlinked using the specified URI scheme. pub file_opener: Option, /// Collection of settings that are specific to the TUI. pub tui: Option, /// When set to `true`, `AgentReasoning` events will be hidden from the /// UI/output. Defaults to `false`. pub hide_agent_reasoning: Option, /// When set to `true`, `AgentReasoningRawContentEvent` events will be shown in the UI/output. /// Defaults to `false`. pub show_raw_agent_reasoning: Option, pub model_reasoning_effort: Option, pub model_reasoning_summary: Option, /// Optional verbosity control for GPT-5 models (Responses API `text.verbosity`). pub model_verbosity: Option, /// Override to force-enable reasoning summaries for the configured model. pub model_supports_reasoning_summaries: Option, /// Override to force reasoning summary format for the configured model. pub model_reasoning_summary_format: Option, /// Base URL for requests to ChatGPT (as opposed to the OpenAI API). pub chatgpt_base_url: Option, /// Experimental rollout resume path (absolute path to .jsonl; undocumented). pub experimental_resume: Option, /// Experimental path to a file whose contents replace the built-in BASE_INSTRUCTIONS. pub experimental_instructions_file: Option, pub experimental_use_exec_command_tool: Option, pub projects: Option>, /// If set to `true`, the API key will be signed with the `originator` header. pub preferred_auth_method: Option, /// Nested tools section for feature toggles pub tools: Option, /// When true, disables burst-paste detection for typed input entirely. /// All characters are inserted as they are received, and no buffering /// or placeholder replacement will occur for fast keypress bursts. pub disable_paste_burst: Option, } impl From for UserSavedConfig { fn from(config_toml: ConfigToml) -> Self { let profiles = config_toml .profiles .into_iter() .map(|(k, v)| (k, v.into())) .collect(); Self { approval_policy: config_toml.approval_policy, sandbox_mode: config_toml.sandbox_mode, sandbox_settings: config_toml.sandbox_workspace_write.map(From::from), model: config_toml.model, model_reasoning_effort: config_toml.model_reasoning_effort, model_reasoning_summary: config_toml.model_reasoning_summary, model_verbosity: config_toml.model_verbosity, tools: config_toml.tools.map(From::from), profile: config_toml.profile, profiles, } } } #[derive(Deserialize, Debug, Clone, PartialEq, Eq)] pub struct ProjectConfig { pub trust_level: Option, } #[derive(Deserialize, Debug, Clone, Default)] pub struct ToolsToml { #[serde(default, alias = "web_search_request")] pub web_search: Option, /// Enable the `view_image` tool that lets the agent attach local images. #[serde(default)] pub view_image: Option, } impl From for Tools { fn from(tools_toml: ToolsToml) -> Self { Self { web_search: tools_toml.web_search, view_image: tools_toml.view_image, } } } impl ConfigToml { /// Derive the effective sandbox policy from the configuration. fn derive_sandbox_policy(&self, sandbox_mode_override: Option) -> SandboxPolicy { let resolved_sandbox_mode = sandbox_mode_override .or(self.sandbox_mode) .unwrap_or_default(); match resolved_sandbox_mode { SandboxMode::ReadOnly => SandboxPolicy::new_read_only_policy(), SandboxMode::WorkspaceWrite => match self.sandbox_workspace_write.as_ref() { Some(SandboxWorkspaceWrite { writable_roots, network_access, exclude_tmpdir_env_var, exclude_slash_tmp, }) => SandboxPolicy::WorkspaceWrite { writable_roots: writable_roots.clone(), network_access: *network_access, exclude_tmpdir_env_var: *exclude_tmpdir_env_var, exclude_slash_tmp: *exclude_slash_tmp, }, None => SandboxPolicy::new_workspace_write_policy(), }, SandboxMode::DangerFullAccess => SandboxPolicy::DangerFullAccess, } } pub fn is_cwd_trusted(&self, resolved_cwd: &Path) -> bool { let projects = self.projects.clone().unwrap_or_default(); let is_path_trusted = |path: &Path| { let path_str = path.to_string_lossy().to_string(); projects .get(&path_str) .map(|p| p.trust_level.as_deref() == Some("trusted")) .unwrap_or(false) }; // Fast path: exact cwd match if is_path_trusted(resolved_cwd) { return true; } // If cwd lives inside a git worktree, check whether the root git project // (the primary repository working directory) is trusted. This lets // worktrees inherit trust from the main project. if let Some(root_project) = resolve_root_git_project_for_trust(resolved_cwd) { return is_path_trusted(&root_project); } false } pub fn get_config_profile( &self, override_profile: Option, ) -> Result { let profile = override_profile.or_else(|| self.profile.clone()); match profile { Some(key) => { if let Some(profile) = self.profiles.get(key.as_str()) { return Ok(profile.clone()); } Err(std::io::Error::new( std::io::ErrorKind::NotFound, format!("config profile `{key}` not found"), )) } None => Ok(ConfigProfile::default()), } } } /// Optional overrides for user configuration (e.g., from CLI flags). #[derive(Default, Debug, Clone)] pub struct ConfigOverrides { pub model: Option, pub cwd: Option, pub approval_policy: Option, pub sandbox_mode: Option, pub model_provider: Option, pub config_profile: Option, pub codex_linux_sandbox_exe: Option, pub base_instructions: Option, pub include_plan_tool: Option, pub include_apply_patch_tool: Option, pub include_view_image_tool: Option, pub show_raw_agent_reasoning: Option, pub tools_web_search_request: Option, } impl Config { /// Meant to be used exclusively for tests: `load_with_overrides()` should /// be used in all other cases. pub fn load_from_base_config_with_overrides( cfg: ConfigToml, overrides: ConfigOverrides, codex_home: PathBuf, ) -> std::io::Result { let user_instructions = Self::load_instructions(Some(&codex_home)); // Destructure ConfigOverrides fully to ensure all overrides are applied. let ConfigOverrides { model, cwd, approval_policy, sandbox_mode, model_provider, config_profile: config_profile_key, codex_linux_sandbox_exe, base_instructions, include_plan_tool, include_apply_patch_tool, include_view_image_tool, show_raw_agent_reasoning, tools_web_search_request: override_tools_web_search_request, } = overrides; let active_profile_name = config_profile_key .as_ref() .or(cfg.profile.as_ref()) .cloned(); let config_profile = match active_profile_name.as_ref() { Some(key) => cfg .profiles .get(key) .ok_or_else(|| { std::io::Error::new( std::io::ErrorKind::NotFound, format!("config profile `{key}` not found"), ) })? .clone(), None => ConfigProfile::default(), }; let sandbox_policy = cfg.derive_sandbox_policy(sandbox_mode); let mut model_providers = built_in_model_providers(); // Merge user-defined providers into the built-in list. for (key, provider) in cfg.model_providers.into_iter() { model_providers.entry(key).or_insert(provider); } let model_provider_id = model_provider .or(config_profile.model_provider) .or(cfg.model_provider) .unwrap_or_else(|| "openai".to_string()); let model_provider = model_providers .get(&model_provider_id) .ok_or_else(|| { std::io::Error::new( std::io::ErrorKind::NotFound, format!("Model provider `{model_provider_id}` not found"), ) })? .clone(); let shell_environment_policy = cfg.shell_environment_policy.into(); let resolved_cwd = { use std::env; match cwd { None => { tracing::info!("cwd not set, using current dir"); env::current_dir()? } Some(p) if p.is_absolute() => p, Some(p) => { // Resolve relative path against the current working directory. tracing::info!("cwd is relative, resolving against current dir"); let mut current = env::current_dir()?; current.push(p); current } } }; let history = cfg.history.unwrap_or_default(); let tools_web_search_request = override_tools_web_search_request .or(cfg.tools.as_ref().and_then(|t| t.web_search)) .unwrap_or(false); let include_view_image_tool = include_view_image_tool .or(cfg.tools.as_ref().and_then(|t| t.view_image)) .unwrap_or(true); let model = model .or(config_profile.model) .or(cfg.model) .unwrap_or_else(default_model); let mut model_family = find_family_for_model(&model).unwrap_or_else(|| ModelFamily { slug: model.clone(), family: model.clone(), needs_special_apply_patch_instructions: false, supports_reasoning_summaries: false, reasoning_summary_format: ReasoningSummaryFormat::None, uses_local_shell_tool: false, apply_patch_tool_type: None, }); if let Some(supports_reasoning_summaries) = cfg.model_supports_reasoning_summaries { model_family.supports_reasoning_summaries = supports_reasoning_summaries; } if let Some(model_reasoning_summary_format) = cfg.model_reasoning_summary_format { model_family.reasoning_summary_format = model_reasoning_summary_format; } let openai_model_info = get_model_info(&model_family); let model_context_window = cfg .model_context_window .or_else(|| openai_model_info.as_ref().map(|info| info.context_window)); let model_max_output_tokens = cfg.model_max_output_tokens.or_else(|| { openai_model_info .as_ref() .map(|info| info.max_output_tokens) }); let experimental_resume = cfg.experimental_resume; // Load base instructions override from a file if specified. If the // path is relative, resolve it against the effective cwd so the // behaviour matches other path-like config values. let experimental_instructions_path = config_profile .experimental_instructions_file .as_ref() .or(cfg.experimental_instructions_file.as_ref()); let file_base_instructions = Self::get_base_instructions(experimental_instructions_path, &resolved_cwd)?; let base_instructions = base_instructions.or(file_base_instructions); let config = Self { model, model_family, model_context_window, model_max_output_tokens, model_provider_id, model_provider, cwd: resolved_cwd, approval_policy: approval_policy .or(config_profile.approval_policy) .or(cfg.approval_policy) .unwrap_or_else(AskForApproval::default), sandbox_policy, shell_environment_policy, notify: cfg.notify, user_instructions, base_instructions, mcp_servers: cfg.mcp_servers, model_providers, project_doc_max_bytes: cfg.project_doc_max_bytes.unwrap_or(PROJECT_DOC_MAX_BYTES), codex_home, history, file_opener: cfg.file_opener.unwrap_or(UriBasedFileOpener::VsCode), tui: cfg.tui.unwrap_or_default(), codex_linux_sandbox_exe, hide_agent_reasoning: cfg.hide_agent_reasoning.unwrap_or(false), show_raw_agent_reasoning: cfg .show_raw_agent_reasoning .or(show_raw_agent_reasoning) .unwrap_or(false), model_reasoning_effort: config_profile .model_reasoning_effort .or(cfg.model_reasoning_effort) .unwrap_or_default(), model_reasoning_summary: config_profile .model_reasoning_summary .or(cfg.model_reasoning_summary) .unwrap_or_default(), model_verbosity: config_profile.model_verbosity.or(cfg.model_verbosity), chatgpt_base_url: config_profile .chatgpt_base_url .or(cfg.chatgpt_base_url) .unwrap_or("https://chatgpt.com/backend-api/".to_string()), experimental_resume, include_plan_tool: include_plan_tool.unwrap_or(false), include_apply_patch_tool: include_apply_patch_tool.unwrap_or(false), tools_web_search_request, preferred_auth_method: cfg.preferred_auth_method.unwrap_or(AuthMode::ChatGPT), use_experimental_streamable_shell_tool: cfg .experimental_use_exec_command_tool .unwrap_or(false), include_view_image_tool, active_profile: active_profile_name, disable_paste_burst: cfg.disable_paste_burst.unwrap_or(false), }; Ok(config) } fn load_instructions(codex_dir: Option<&Path>) -> Option { let mut p = match codex_dir { Some(p) => p.to_path_buf(), None => return None, }; p.push("AGENTS.md"); std::fs::read_to_string(&p).ok().and_then(|s| { let s = s.trim(); if s.is_empty() { None } else { Some(s.to_string()) } }) } fn get_base_instructions( path: Option<&PathBuf>, cwd: &Path, ) -> std::io::Result> { let p = match path.as_ref() { None => return Ok(None), Some(p) => p, }; // Resolve relative paths against the provided cwd to make CLI // overrides consistent regardless of where the process was launched // from. let full_path = if p.is_relative() { cwd.join(p) } else { p.to_path_buf() }; let contents = std::fs::read_to_string(&full_path).map_err(|e| { std::io::Error::new( e.kind(), format!( "failed to read experimental instructions file {}: {e}", full_path.display() ), ) })?; let s = contents.trim().to_string(); if s.is_empty() { Err(std::io::Error::new( std::io::ErrorKind::InvalidData, format!( "experimental instructions file is empty: {}", full_path.display() ), )) } else { Ok(Some(s)) } } } fn default_model() -> String { OPENAI_DEFAULT_MODEL.to_string() } /// Returns the path to the Codex configuration directory, which can be /// specified by the `CODEX_HOME` environment variable. If not set, defaults to /// `~/.codex`. /// /// - If `CODEX_HOME` is set, the value will be canonicalized and this /// function will Err if the path does not exist. /// - If `CODEX_HOME` is not set, this function does not verify that the /// directory exists. pub fn find_codex_home() -> std::io::Result { // Honor the `CODEX_HOME` environment variable when it is set to allow users // (and tests) to override the default location. if let Ok(val) = std::env::var("CODEX_HOME") && !val.is_empty() { return PathBuf::from(val).canonicalize(); } let mut p = home_dir().ok_or_else(|| { std::io::Error::new( std::io::ErrorKind::NotFound, "Could not find home directory", ) })?; p.push(".codex"); Ok(p) } /// Returns the path to the folder where Codex logs are stored. Does not verify /// that the directory exists. pub fn log_dir(cfg: &Config) -> std::io::Result { let mut p = cfg.codex_home.clone(); p.push("log"); Ok(p) } #[cfg(test)] mod tests { use crate::config_types::HistoryPersistence; use super::*; use pretty_assertions::assert_eq; use tempfile::TempDir; #[test] fn test_toml_parsing() { let history_with_persistence = r#" [history] persistence = "save-all" "#; let history_with_persistence_cfg = toml::from_str::(history_with_persistence) .expect("TOML deserialization should succeed"); assert_eq!( Some(History { persistence: HistoryPersistence::SaveAll, max_bytes: None, }), history_with_persistence_cfg.history ); let history_no_persistence = r#" [history] persistence = "none" "#; let history_no_persistence_cfg = toml::from_str::(history_no_persistence) .expect("TOML deserialization should succeed"); assert_eq!( Some(History { persistence: HistoryPersistence::None, max_bytes: None, }), history_no_persistence_cfg.history ); } #[test] fn test_sandbox_config_parsing() { let sandbox_full_access = r#" sandbox_mode = "danger-full-access" [sandbox_workspace_write] network_access = false # This should be ignored. "#; let sandbox_full_access_cfg = toml::from_str::(sandbox_full_access) .expect("TOML deserialization should succeed"); let sandbox_mode_override = None; assert_eq!( SandboxPolicy::DangerFullAccess, sandbox_full_access_cfg.derive_sandbox_policy(sandbox_mode_override) ); let sandbox_read_only = r#" sandbox_mode = "read-only" [sandbox_workspace_write] network_access = true # This should be ignored. "#; let sandbox_read_only_cfg = toml::from_str::(sandbox_read_only) .expect("TOML deserialization should succeed"); let sandbox_mode_override = None; assert_eq!( SandboxPolicy::ReadOnly, sandbox_read_only_cfg.derive_sandbox_policy(sandbox_mode_override) ); let sandbox_workspace_write = r#" sandbox_mode = "workspace-write" [sandbox_workspace_write] writable_roots = [ "/my/workspace", ] exclude_tmpdir_env_var = true exclude_slash_tmp = true "#; let sandbox_workspace_write_cfg = toml::from_str::(sandbox_workspace_write) .expect("TOML deserialization should succeed"); let sandbox_mode_override = None; assert_eq!( SandboxPolicy::WorkspaceWrite { writable_roots: vec![PathBuf::from("/my/workspace")], network_access: false, exclude_tmpdir_env_var: true, exclude_slash_tmp: true, }, sandbox_workspace_write_cfg.derive_sandbox_policy(sandbox_mode_override) ); } struct PrecedenceTestFixture { cwd: TempDir, codex_home: TempDir, cfg: ConfigToml, model_provider_map: HashMap, openai_provider: ModelProviderInfo, openai_chat_completions_provider: ModelProviderInfo, } impl PrecedenceTestFixture { fn cwd(&self) -> PathBuf { self.cwd.path().to_path_buf() } fn codex_home(&self) -> PathBuf { self.codex_home.path().to_path_buf() } } fn create_test_fixture() -> std::io::Result { let toml = r#" model = "o3" approval_policy = "untrusted" # Can be used to determine which profile to use if not specified by # `ConfigOverrides`. profile = "gpt3" [model_providers.openai-chat-completions] name = "OpenAI using Chat Completions" base_url = "https://api.openai.com/v1" env_key = "OPENAI_API_KEY" wire_api = "chat" request_max_retries = 4 # retry failed HTTP requests stream_max_retries = 10 # retry dropped SSE streams stream_idle_timeout_ms = 300000 # 5m idle timeout [profiles.o3] model = "o3" model_provider = "openai" approval_policy = "never" model_reasoning_effort = "high" model_reasoning_summary = "detailed" [profiles.gpt3] model = "gpt-3.5-turbo" model_provider = "openai-chat-completions" [profiles.zdr] model = "o3" model_provider = "openai" approval_policy = "on-failure" [profiles.gpt5] model = "gpt-5" model_provider = "openai" approval_policy = "on-failure" model_reasoning_effort = "high" model_reasoning_summary = "detailed" model_verbosity = "high" "#; let cfg: ConfigToml = toml::from_str(toml).expect("TOML deserialization should succeed"); // Use a temporary directory for the cwd so it does not contain an // AGENTS.md file. let cwd_temp_dir = TempDir::new().unwrap(); let cwd = cwd_temp_dir.path().to_path_buf(); // Make it look like a Git repo so it does not search for AGENTS.md in // a parent folder, either. std::fs::write(cwd.join(".git"), "gitdir: nowhere")?; let codex_home_temp_dir = TempDir::new().unwrap(); let openai_chat_completions_provider = ModelProviderInfo { name: "OpenAI using Chat Completions".to_string(), base_url: Some("https://api.openai.com/v1".to_string()), env_key: Some("OPENAI_API_KEY".to_string()), wire_api: crate::WireApi::Chat, env_key_instructions: None, query_params: None, http_headers: None, env_http_headers: None, request_max_retries: Some(4), stream_max_retries: Some(10), stream_idle_timeout_ms: Some(300_000), requires_openai_auth: false, }; let model_provider_map = { let mut model_provider_map = built_in_model_providers(); model_provider_map.insert( "openai-chat-completions".to_string(), openai_chat_completions_provider.clone(), ); model_provider_map }; let openai_provider = model_provider_map .get("openai") .expect("openai provider should exist") .clone(); Ok(PrecedenceTestFixture { cwd: cwd_temp_dir, codex_home: codex_home_temp_dir, cfg, model_provider_map, openai_provider, openai_chat_completions_provider, }) } /// Users can specify config values at multiple levels that have the /// following precedence: /// /// 1. custom command-line argument, e.g. `--model o3` /// 2. as part of a profile, where the `--profile` is specified via a CLI /// (or in the config file itself) /// 3. as an entry in `config.toml`, e.g. `model = "o3"` /// 4. the default value for a required field defined in code, e.g., /// `crate::flags::OPENAI_DEFAULT_MODEL` /// /// Note that profiles are the recommended way to specify a group of /// configuration options together. #[test] fn test_precedence_fixture_with_o3_profile() -> std::io::Result<()> { let fixture = create_test_fixture()?; let o3_profile_overrides = ConfigOverrides { config_profile: Some("o3".to_string()), cwd: Some(fixture.cwd()), ..Default::default() }; let o3_profile_config: Config = Config::load_from_base_config_with_overrides( fixture.cfg.clone(), o3_profile_overrides, fixture.codex_home(), )?; assert_eq!( Config { model: "o3".to_string(), model_family: find_family_for_model("o3").expect("known model slug"), model_context_window: Some(200_000), model_max_output_tokens: Some(100_000), model_provider_id: "openai".to_string(), model_provider: fixture.openai_provider.clone(), approval_policy: AskForApproval::Never, sandbox_policy: SandboxPolicy::new_read_only_policy(), shell_environment_policy: ShellEnvironmentPolicy::default(), user_instructions: None, notify: None, cwd: fixture.cwd(), mcp_servers: HashMap::new(), model_providers: fixture.model_provider_map.clone(), project_doc_max_bytes: PROJECT_DOC_MAX_BYTES, codex_home: fixture.codex_home(), history: History::default(), file_opener: UriBasedFileOpener::VsCode, tui: Tui::default(), codex_linux_sandbox_exe: None, hide_agent_reasoning: false, show_raw_agent_reasoning: false, model_reasoning_effort: ReasoningEffort::High, model_reasoning_summary: ReasoningSummary::Detailed, model_verbosity: None, chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(), experimental_resume: None, base_instructions: None, include_plan_tool: false, include_apply_patch_tool: false, tools_web_search_request: false, preferred_auth_method: AuthMode::ChatGPT, use_experimental_streamable_shell_tool: false, include_view_image_tool: true, active_profile: Some("o3".to_string()), disable_paste_burst: false, }, o3_profile_config ); Ok(()) } #[test] fn test_precedence_fixture_with_gpt3_profile() -> std::io::Result<()> { let fixture = create_test_fixture()?; let gpt3_profile_overrides = ConfigOverrides { config_profile: Some("gpt3".to_string()), cwd: Some(fixture.cwd()), ..Default::default() }; let gpt3_profile_config = Config::load_from_base_config_with_overrides( fixture.cfg.clone(), gpt3_profile_overrides, fixture.codex_home(), )?; let expected_gpt3_profile_config = Config { model: "gpt-3.5-turbo".to_string(), model_family: find_family_for_model("gpt-3.5-turbo").expect("known model slug"), model_context_window: Some(16_385), model_max_output_tokens: Some(4_096), model_provider_id: "openai-chat-completions".to_string(), model_provider: fixture.openai_chat_completions_provider.clone(), approval_policy: AskForApproval::UnlessTrusted, sandbox_policy: SandboxPolicy::new_read_only_policy(), shell_environment_policy: ShellEnvironmentPolicy::default(), user_instructions: None, notify: None, cwd: fixture.cwd(), mcp_servers: HashMap::new(), model_providers: fixture.model_provider_map.clone(), project_doc_max_bytes: PROJECT_DOC_MAX_BYTES, codex_home: fixture.codex_home(), history: History::default(), file_opener: UriBasedFileOpener::VsCode, tui: Tui::default(), codex_linux_sandbox_exe: None, hide_agent_reasoning: false, show_raw_agent_reasoning: false, model_reasoning_effort: ReasoningEffort::default(), model_reasoning_summary: ReasoningSummary::default(), model_verbosity: None, chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(), experimental_resume: None, base_instructions: None, include_plan_tool: false, include_apply_patch_tool: false, tools_web_search_request: false, preferred_auth_method: AuthMode::ChatGPT, use_experimental_streamable_shell_tool: false, include_view_image_tool: true, active_profile: Some("gpt3".to_string()), disable_paste_burst: false, }; assert_eq!(expected_gpt3_profile_config, gpt3_profile_config); // Verify that loading without specifying a profile in ConfigOverrides // uses the default profile from the config file (which is "gpt3"). let default_profile_overrides = ConfigOverrides { cwd: Some(fixture.cwd()), ..Default::default() }; let default_profile_config = Config::load_from_base_config_with_overrides( fixture.cfg.clone(), default_profile_overrides, fixture.codex_home(), )?; assert_eq!(expected_gpt3_profile_config, default_profile_config); Ok(()) } #[test] fn test_precedence_fixture_with_zdr_profile() -> std::io::Result<()> { let fixture = create_test_fixture()?; let zdr_profile_overrides = ConfigOverrides { config_profile: Some("zdr".to_string()), cwd: Some(fixture.cwd()), ..Default::default() }; let zdr_profile_config = Config::load_from_base_config_with_overrides( fixture.cfg.clone(), zdr_profile_overrides, fixture.codex_home(), )?; let expected_zdr_profile_config = Config { model: "o3".to_string(), model_family: find_family_for_model("o3").expect("known model slug"), model_context_window: Some(200_000), model_max_output_tokens: Some(100_000), model_provider_id: "openai".to_string(), model_provider: fixture.openai_provider.clone(), approval_policy: AskForApproval::OnFailure, sandbox_policy: SandboxPolicy::new_read_only_policy(), shell_environment_policy: ShellEnvironmentPolicy::default(), user_instructions: None, notify: None, cwd: fixture.cwd(), mcp_servers: HashMap::new(), model_providers: fixture.model_provider_map.clone(), project_doc_max_bytes: PROJECT_DOC_MAX_BYTES, codex_home: fixture.codex_home(), history: History::default(), file_opener: UriBasedFileOpener::VsCode, tui: Tui::default(), codex_linux_sandbox_exe: None, hide_agent_reasoning: false, show_raw_agent_reasoning: false, model_reasoning_effort: ReasoningEffort::default(), model_reasoning_summary: ReasoningSummary::default(), model_verbosity: None, chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(), experimental_resume: None, base_instructions: None, include_plan_tool: false, include_apply_patch_tool: false, tools_web_search_request: false, preferred_auth_method: AuthMode::ChatGPT, use_experimental_streamable_shell_tool: false, include_view_image_tool: true, active_profile: Some("zdr".to_string()), disable_paste_burst: false, }; assert_eq!(expected_zdr_profile_config, zdr_profile_config); Ok(()) } #[test] fn test_precedence_fixture_with_gpt5_profile() -> std::io::Result<()> { let fixture = create_test_fixture()?; let gpt5_profile_overrides = ConfigOverrides { config_profile: Some("gpt5".to_string()), cwd: Some(fixture.cwd()), ..Default::default() }; let gpt5_profile_config = Config::load_from_base_config_with_overrides( fixture.cfg.clone(), gpt5_profile_overrides, fixture.codex_home(), )?; let expected_gpt5_profile_config = Config { model: "gpt-5".to_string(), model_family: find_family_for_model("gpt-5").expect("known model slug"), model_context_window: Some(272_000), model_max_output_tokens: Some(128_000), model_provider_id: "openai".to_string(), model_provider: fixture.openai_provider.clone(), approval_policy: AskForApproval::OnFailure, sandbox_policy: SandboxPolicy::new_read_only_policy(), shell_environment_policy: ShellEnvironmentPolicy::default(), user_instructions: None, notify: None, cwd: fixture.cwd(), mcp_servers: HashMap::new(), model_providers: fixture.model_provider_map.clone(), project_doc_max_bytes: PROJECT_DOC_MAX_BYTES, codex_home: fixture.codex_home(), history: History::default(), file_opener: UriBasedFileOpener::VsCode, tui: Tui::default(), codex_linux_sandbox_exe: None, hide_agent_reasoning: false, show_raw_agent_reasoning: false, model_reasoning_effort: ReasoningEffort::High, model_reasoning_summary: ReasoningSummary::Detailed, model_verbosity: Some(Verbosity::High), chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(), experimental_resume: None, base_instructions: None, include_plan_tool: false, include_apply_patch_tool: false, tools_web_search_request: false, preferred_auth_method: AuthMode::ChatGPT, use_experimental_streamable_shell_tool: false, include_view_image_tool: true, active_profile: Some("gpt5".to_string()), disable_paste_burst: false, }; assert_eq!(expected_gpt5_profile_config, gpt5_profile_config); Ok(()) } #[test] fn test_set_project_trusted_writes_explicit_tables() -> anyhow::Result<()> { let codex_home = TempDir::new().unwrap(); let project_dir = TempDir::new().unwrap(); // Call the function under test set_project_trusted(codex_home.path(), project_dir.path())?; // Read back the generated config.toml and assert exact contents let config_path = codex_home.path().join(CONFIG_TOML_FILE); let contents = std::fs::read_to_string(&config_path)?; let raw_path = project_dir.path().to_string_lossy(); let path_str = if raw_path.contains('\\') { format!("'{raw_path}'") } else { format!("\"{raw_path}\"") }; let expected = format!( r#"[projects.{path_str}] trust_level = "trusted" "# ); assert_eq!(contents, expected); Ok(()) } #[test] fn test_set_project_trusted_converts_inline_to_explicit() -> anyhow::Result<()> { let codex_home = TempDir::new().unwrap(); let project_dir = TempDir::new().unwrap(); // Seed config.toml with an inline project entry under [projects] let config_path = codex_home.path().join(CONFIG_TOML_FILE); let raw_path = project_dir.path().to_string_lossy(); let path_str = if raw_path.contains('\\') { format!("'{raw_path}'") } else { format!("\"{raw_path}\"") }; // Use a quoted key so backslashes don't require escaping on Windows let initial = format!( r#"[projects] {path_str} = {{ trust_level = "untrusted" }} "# ); std::fs::create_dir_all(codex_home.path())?; std::fs::write(&config_path, initial)?; // Run the function; it should convert to explicit tables and set trusted set_project_trusted(codex_home.path(), project_dir.path())?; let contents = std::fs::read_to_string(&config_path)?; // Assert exact output after conversion to explicit table let expected = format!( r#"[projects] [projects.{path_str}] trust_level = "trusted" "# ); assert_eq!(contents, expected); Ok(()) } }