diff --git a/codex-rs/Cargo.lock b/codex-rs/Cargo.lock index aa22911b..15a62983 100644 --- a/codex-rs/Cargo.lock +++ b/codex-rs/Cargo.lock @@ -531,6 +531,7 @@ dependencies = [ "patch", "path-absolutize", "predicates", + "pretty_assertions", "rand", "reqwest", "seccompiler", diff --git a/codex-rs/README.md b/codex-rs/README.md index 827a5659..fa724420 100644 --- a/codex-rs/README.md +++ b/codex-rs/README.md @@ -109,6 +109,52 @@ approval_policy = "on-failure" approval_policy = "never" ``` +### profiles + +A _profile_ is a collection of configuration values that can be set together. Multiple profiles can be defined in `config.toml` and you can specify the one you +want to use at runtime via the `--profile` flag. + +Here is an example of a `config.toml` that defines multiple profiles: + +```toml +model = "o3" +approval_policy = "unless-allow-listed" +sandbox_permissions = ["disk-full-read-access"] +disable_response_storage = false + +# Setting `profile` is equivalent to specifying `--profile o3` on the command +# line, though the `--profile` flag can still be used to override this value. +profile = "o3" + +[model_providers.openai-chat-completions] +name = "OpenAI using Chat Completions" +base_url = "https://api.openai.com/v1" +env_key = "OPENAI_API_KEY" +wire_api = "chat" + +[profiles.o3] +model = "o3" +model_provider = "openai" +approval_policy = "never" + +[profiles.gpt3] +model = "gpt-3.5-turbo" +model_provider = "openai-chat-completions" + +[profiles.zdr] +model = "o3" +model_provider = "openai" +approval_policy = "on-failure" +disable_response_storage = true +``` + +Users can specify config values at multiple levels. Order of precedence is as follows: + +1. custom command-line argument, e.g., `--model o3` +2. as part of a profile, where the `--profile` is specified via a CLI (or in the config file itself) +3. as an entry in `config.toml`, e.g., `model = "o3"` +4. the default value that comes with Codex CLI (i.e., Codex CLI defaults to `o4-mini`) + ### sandbox_permissions List of permissions to grant to the sandbox that Codex uses to execute untrusted commands: diff --git a/codex-rs/core/Cargo.toml b/codex-rs/core/Cargo.toml index c04bcb6a..6154d91d 100644 --- a/codex-rs/core/Cargo.toml +++ b/codex-rs/core/Cargo.toml @@ -58,5 +58,6 @@ openssl-sys = { version = "*", features = ["vendored"] } [dev-dependencies] assert_cmd = "2" predicates = "3" +pretty_assertions = "1.4.1" tempfile = "3" wiremock = "0.6" diff --git a/codex-rs/core/src/config.rs b/codex-rs/core/src/config.rs index 4c815ad0..42c1684a 100644 --- a/codex-rs/core/src/config.rs +++ b/codex-rs/core/src/config.rs @@ -1,3 +1,4 @@ +use crate::config_profile::ConfigProfile; use crate::flags::OPENAI_DEFAULT_MODEL; use crate::mcp_server_config::McpServerConfig; use crate::model_provider_info::ModelProviderInfo; @@ -8,6 +9,7 @@ use crate::protocol::SandboxPolicy; use dirs::home_dir; use serde::Deserialize; use std::collections::HashMap; +use std::path::Path; use std::path::PathBuf; /// Maximum number of bytes of the documentation that will be embedded. Larger @@ -16,7 +18,7 @@ use std::path::PathBuf; pub(crate) const PROJECT_DOC_MAX_BYTES: usize = 32 * 1024; // 32 KiB /// Application configuration loaded from disk and merged with overrides. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub struct Config { /// Optional override of model selection. pub model: String, @@ -117,6 +119,13 @@ pub struct ConfigToml { /// Maximum number of bytes to include from an AGENTS.md project doc file. pub project_doc_max_bytes: Option, + + /// Profile to use from the `profiles` map. + pub profile: Option, + + /// Named profiles to facilitate switching between different configurations. + #[serde(default)] + pub profiles: HashMap, } impl ConfigToml { @@ -176,7 +185,8 @@ pub struct ConfigOverrides { pub approval_policy: Option, pub sandbox_policy: Option, pub disable_response_storage: Option, - pub provider: Option, + pub model_provider: Option, + pub config_profile: Option, } impl Config { @@ -186,14 +196,16 @@ impl Config { pub fn load_with_overrides(overrides: ConfigOverrides) -> std::io::Result { let cfg: ConfigToml = ConfigToml::load_from_toml()?; tracing::warn!("Config parsed from config.toml: {cfg:?}"); - Self::load_from_base_config_with_overrides(cfg, overrides) + let codex_dir = codex_dir().ok(); + Self::load_from_base_config_with_overrides(cfg, overrides, codex_dir.as_deref()) } fn load_from_base_config_with_overrides( cfg: ConfigToml, overrides: ConfigOverrides, + codex_dir: Option<&Path>, ) -> std::io::Result { - let instructions = Self::load_instructions(); + let instructions = Self::load_instructions(codex_dir); // Destructure ConfigOverrides fully to ensure all overrides are applied. let ConfigOverrides { @@ -202,9 +214,24 @@ impl Config { approval_policy, sandbox_policy, disable_response_storage, - provider, + model_provider, + config_profile: config_profile_key, } = overrides; + let config_profile = match config_profile_key.or(cfg.profile) { + Some(key) => cfg + .profiles + .get(&key) + .ok_or_else(|| { + std::io::Error::new( + std::io::ErrorKind::NotFound, + format!("config profile `{key}` not found"), + ) + })? + .clone(), + None => ConfigProfile::default(), + }; + let sandbox_policy = match sandbox_policy { Some(sandbox_policy) => sandbox_policy, None => { @@ -226,7 +253,8 @@ impl Config { model_providers.entry(key).or_insert(provider); } - let model_provider_id = provider + let model_provider_id = model_provider + .or(config_profile.model_provider) .or(cfg.model_provider) .unwrap_or_else(|| "openai".to_string()); let model_provider = model_providers @@ -259,15 +287,20 @@ impl Config { }; let config = Self { - model: model.or(cfg.model).unwrap_or_else(default_model), + model: model + .or(config_profile.model) + .or(cfg.model) + .unwrap_or_else(default_model), model_provider_id, model_provider, cwd: resolved_cwd, approval_policy: approval_policy + .or(config_profile.approval_policy) .or(cfg.approval_policy) .unwrap_or_else(AskForApproval::default), sandbox_policy, disable_response_storage: disable_response_storage + .or(config_profile.disable_response_storage) .or(cfg.disable_response_storage) .unwrap_or(false), notify: cfg.notify, @@ -279,8 +312,12 @@ impl Config { Ok(config) } - fn load_instructions() -> Option { - let mut p = codex_dir().ok()?; + fn load_instructions(codex_dir: Option<&Path>) -> Option { + let mut p = match codex_dir { + Some(p) => p.to_path_buf(), + None => return None, + }; + p.push("instructions.md"); std::fs::read_to_string(&p).ok().and_then(|s| { let s = s.trim(); @@ -299,6 +336,7 @@ impl Config { Self::load_from_base_config_with_overrides( ConfigToml::default(), ConfigOverrides::default(), + None, ) .expect("defaults for test should always succeed") } @@ -377,6 +415,8 @@ pub fn parse_sandbox_permission_with_base_path( mod tests { #![allow(clippy::expect_used, clippy::unwrap_used)] use super::*; + use pretty_assertions::assert_eq; + use tempfile::TempDir; /// Verify that the `sandbox_permissions` field on `ConfigToml` correctly /// differentiates between a value that is completely absent in the @@ -429,4 +469,173 @@ mod tests { let msg = err.to_string(); assert!(msg.contains("not-a-real-permission")); } + + /// Users can specify config values at multiple levels that have the + /// following precedence: + /// + /// 1. custom command-line argument, e.g. `--model o3` + /// 2. as part of a profile, where the `--profile` is specified via a CLI + /// (or in the config file itelf) + /// 3. as an entry in `config.toml`, e.g. `model = "o3"` + /// 4. the default value for a required field defined in code, e.g., + /// `crate::flags::OPENAI_DEFAULT_MODEL` + /// + /// Note that profiles are the recommended way to specify a group of + /// configuration options together. + #[test] + fn test_precedence_overrides_then_profile_then_config_toml() -> std::io::Result<()> { + let toml = r#" +model = "o3" +approval_policy = "unless-allow-listed" +sandbox_permissions = ["disk-full-read-access"] +disable_response_storage = false + +# Can be used to determine which profile to use if not specified by +# `ConfigOverrides`. +profile = "gpt3" + +[model_providers.openai-chat-completions] +name = "OpenAI using Chat Completions" +base_url = "https://api.openai.com/v1" +env_key = "OPENAI_API_KEY" +wire_api = "chat" + +[profiles.o3] +model = "o3" +model_provider = "openai" +approval_policy = "never" + +[profiles.gpt3] +model = "gpt-3.5-turbo" +model_provider = "openai-chat-completions" + +[profiles.zdr] +model = "o3" +model_provider = "openai" +approval_policy = "on-failure" +disable_response_storage = true +"#; + + let cfg: ConfigToml = toml::from_str(toml).expect("TOML deserialization should succeed"); + + // Use a temporary directory for the cwd so it does not contain an + // AGENTS.md file. + let cwd_temp_dir = TempDir::new().unwrap(); + let cwd = cwd_temp_dir.path().to_path_buf(); + // Make it look like a Git repo so it does not search for AGENTS.md in + // a parent folder, either. + std::fs::write(cwd.join(".git"), "gitdir: nowhere")?; + + let openai_chat_completions_provider = ModelProviderInfo { + name: "OpenAI using Chat Completions".to_string(), + base_url: "https://api.openai.com/v1".to_string(), + env_key: Some("OPENAI_API_KEY".to_string()), + wire_api: crate::WireApi::Chat, + env_key_instructions: None, + }; + let model_provider_map = { + let mut model_provider_map = built_in_model_providers(); + model_provider_map.insert( + "openai-chat-completions".to_string(), + openai_chat_completions_provider.clone(), + ); + model_provider_map + }; + + let openai_provider = model_provider_map + .get("openai") + .expect("openai provider should exist") + .clone(); + + let o3_profile_overrides = ConfigOverrides { + config_profile: Some("o3".to_string()), + cwd: Some(cwd.clone()), + ..Default::default() + }; + let o3_profile_config = + Config::load_from_base_config_with_overrides(cfg.clone(), o3_profile_overrides, None)?; + assert_eq!( + Config { + model: "o3".to_string(), + model_provider_id: "openai".to_string(), + model_provider: openai_provider.clone(), + approval_policy: AskForApproval::Never, + sandbox_policy: SandboxPolicy::new_read_only_policy(), + disable_response_storage: false, + instructions: None, + notify: None, + cwd: cwd.clone(), + mcp_servers: HashMap::new(), + model_providers: model_provider_map.clone(), + project_doc_max_bytes: PROJECT_DOC_MAX_BYTES, + }, + o3_profile_config + ); + + let gpt3_profile_overrides = ConfigOverrides { + config_profile: Some("gpt3".to_string()), + cwd: Some(cwd.clone()), + ..Default::default() + }; + let gpt3_profile_config = Config::load_from_base_config_with_overrides( + cfg.clone(), + gpt3_profile_overrides, + None, + )?; + let expected_gpt3_profile_config = Config { + model: "gpt-3.5-turbo".to_string(), + model_provider_id: "openai-chat-completions".to_string(), + model_provider: openai_chat_completions_provider, + approval_policy: AskForApproval::UnlessAllowListed, + sandbox_policy: SandboxPolicy::new_read_only_policy(), + disable_response_storage: false, + instructions: None, + notify: None, + cwd: cwd.clone(), + mcp_servers: HashMap::new(), + model_providers: model_provider_map.clone(), + project_doc_max_bytes: PROJECT_DOC_MAX_BYTES, + }; + assert_eq!(expected_gpt3_profile_config.clone(), gpt3_profile_config); + + // Verify that loading without specifying a profile in ConfigOverrides + // uses the default profile from the config file. + let default_profile_overrides = ConfigOverrides { + cwd: Some(cwd.clone()), + ..Default::default() + }; + let default_profile_config = Config::load_from_base_config_with_overrides( + cfg.clone(), + default_profile_overrides, + None, + )?; + assert_eq!(expected_gpt3_profile_config, default_profile_config); + + let zdr_profile_overrides = ConfigOverrides { + config_profile: Some("zdr".to_string()), + cwd: Some(cwd.clone()), + ..Default::default() + }; + let zdr_profile_config = + Config::load_from_base_config_with_overrides(cfg.clone(), zdr_profile_overrides, None)?; + assert_eq!( + Config { + model: "o3".to_string(), + model_provider_id: "openai".to_string(), + model_provider: openai_provider.clone(), + approval_policy: AskForApproval::OnFailure, + sandbox_policy: SandboxPolicy::new_read_only_policy(), + disable_response_storage: true, + instructions: None, + notify: None, + cwd: cwd.clone(), + mcp_servers: HashMap::new(), + model_providers: model_provider_map.clone(), + project_doc_max_bytes: PROJECT_DOC_MAX_BYTES, + }, + zdr_profile_config + ); + + Ok(()) + } } diff --git a/codex-rs/core/src/config_profile.rs b/codex-rs/core/src/config_profile.rs new file mode 100644 index 00000000..98d73bb5 --- /dev/null +++ b/codex-rs/core/src/config_profile.rs @@ -0,0 +1,15 @@ +use serde::Deserialize; + +use crate::protocol::AskForApproval; + +/// Collection of common configuration options that a user can define as a unit +/// in `config.toml`. +#[derive(Debug, Clone, Default, PartialEq, Deserialize)] +pub struct ConfigProfile { + pub model: Option, + /// The key in the `model_providers` map identifying the + /// [`ModelProviderInfo`] to use. + pub model_provider: Option, + pub approval_policy: Option, + pub disable_response_storage: Option, +} diff --git a/codex-rs/core/src/flags.rs b/codex-rs/core/src/flags.rs index 44198fde..e8cc973c 100644 --- a/codex-rs/core/src/flags.rs +++ b/codex-rs/core/src/flags.rs @@ -3,7 +3,7 @@ use std::time::Duration; use env_flags::env_flags; env_flags! { - pub OPENAI_DEFAULT_MODEL: &str = "o3"; + pub OPENAI_DEFAULT_MODEL: &str = "o4-mini"; pub OPENAI_API_BASE: &str = "https://api.openai.com/v1"; /// Fallback when the provider-specific key is not set. diff --git a/codex-rs/core/src/lib.rs b/codex-rs/core/src/lib.rs index 43c97a87..c4f38026 100644 --- a/codex-rs/core/src/lib.rs +++ b/codex-rs/core/src/lib.rs @@ -13,6 +13,7 @@ pub mod codex; pub use codex::Codex; pub mod codex_wrapper; pub mod config; +pub mod config_profile; mod conversation_history; pub mod error; pub mod exec; diff --git a/codex-rs/core/src/mcp_server_config.rs b/codex-rs/core/src/mcp_server_config.rs index 261a75d1..30845431 100644 --- a/codex-rs/core/src/mcp_server_config.rs +++ b/codex-rs/core/src/mcp_server_config.rs @@ -2,7 +2,7 @@ use std::collections::HashMap; use serde::Deserialize; -#[derive(Deserialize, Debug, Clone)] +#[derive(Deserialize, Debug, Clone, PartialEq)] pub struct McpServerConfig { pub command: String, diff --git a/codex-rs/core/src/model_provider_info.rs b/codex-rs/core/src/model_provider_info.rs index 969797cb..186e28d3 100644 --- a/codex-rs/core/src/model_provider_info.rs +++ b/codex-rs/core/src/model_provider_info.rs @@ -29,7 +29,7 @@ pub enum WireApi { } /// Serializable representation of a provider definition. -#[derive(Debug, Clone, Deserialize, Serialize)] +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq)] pub struct ModelProviderInfo { /// Friendly display name. pub name: String, diff --git a/codex-rs/exec/src/cli.rs b/codex-rs/exec/src/cli.rs index 1248ef3b..dd72b3e9 100644 --- a/codex-rs/exec/src/cli.rs +++ b/codex-rs/exec/src/cli.rs @@ -14,6 +14,10 @@ pub struct Cli { #[arg(long, short = 'm')] pub model: Option, + /// Configuration profile from config.toml to specify default options. + #[arg(long = "profile", short = 'p')] + pub config_profile: Option, + /// Convenience alias for low-friction sandboxed automatic execution (network-disabled sandbox that can write to cwd and TMPDIR) #[arg(long = "full-auto", default_value_t = false)] pub full_auto: bool, diff --git a/codex-rs/exec/src/lib.rs b/codex-rs/exec/src/lib.rs index d711388f..348bff08 100644 --- a/codex-rs/exec/src/lib.rs +++ b/codex-rs/exec/src/lib.rs @@ -25,6 +25,7 @@ pub async fn run_main(cli: Cli) -> anyhow::Result<()> { let Cli { images, model, + config_profile, full_auto, sandbox, cwd, @@ -52,6 +53,7 @@ pub async fn run_main(cli: Cli) -> anyhow::Result<()> { // Load configuration and determine approval policy let overrides = ConfigOverrides { model, + config_profile, // This CLI is intended to be headless and has no affordances for asking // the user for approval. approval_policy: Some(AskForApproval::Never), @@ -62,7 +64,7 @@ pub async fn run_main(cli: Cli) -> anyhow::Result<()> { None }, cwd: cwd.map(|p| p.canonicalize().unwrap_or(p)), - provider: None, + model_provider: None, }; let config = Config::load_with_overrides(overrides)?; diff --git a/codex-rs/mcp-server/src/codex_tool_config.rs b/codex-rs/mcp-server/src/codex_tool_config.rs index 78080795..2ddc00fb 100644 --- a/codex-rs/mcp-server/src/codex_tool_config.rs +++ b/codex-rs/mcp-server/src/codex_tool_config.rs @@ -22,6 +22,10 @@ pub(crate) struct CodexToolCallParam { #[serde(default, skip_serializing_if = "Option::is_none")] pub model: Option, + /// Configuration profile from config.toml to specify default options. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub profile: Option, + /// Working directory for the session. If relative, it is resolved against /// the server process's current working directory. #[serde(default, skip_serializing_if = "Option::is_none")] @@ -144,6 +148,7 @@ impl CodexToolCallParam { let Self { prompt, model, + profile, cwd, approval_policy, sandbox_permissions, @@ -156,11 +161,12 @@ impl CodexToolCallParam { // Build ConfigOverrides recognised by codex-core. let overrides = codex_core::config::ConfigOverrides { model, + config_profile: profile, cwd: cwd.map(PathBuf::from), approval_policy: approval_policy.map(Into::into), sandbox_policy, disable_response_storage, - provider: None, + model_provider: None, }; let cfg = codex_core::config::Config::load_with_overrides(overrides)?; @@ -218,6 +224,10 @@ mod tests { "description": "Optional override for the model name (e.g. \"o3\", \"o4-mini\")", "type": "string" }, + "profile": { + "description": "Configuration profile from config.toml to specify default options.", + "type": "string" + }, "prompt": { "description": "The *initial user prompt* to start the Codex conversation.", "type": "string" diff --git a/codex-rs/tui/src/cli.rs b/codex-rs/tui/src/cli.rs index c260caa9..f077d267 100644 --- a/codex-rs/tui/src/cli.rs +++ b/codex-rs/tui/src/cli.rs @@ -17,6 +17,10 @@ pub struct Cli { #[arg(long, short = 'm')] pub model: Option, + /// Configuration profile from config.toml to specify default options. + #[arg(long = "profile", short = 'p')] + pub config_profile: Option, + /// Configure when the model requires human approval before executing a command. #[arg(long = "ask-for-approval", short = 'a')] pub approval_policy: Option, diff --git a/codex-rs/tui/src/lib.rs b/codex-rs/tui/src/lib.rs index fe4f9954..e0b6274c 100644 --- a/codex-rs/tui/src/lib.rs +++ b/codex-rs/tui/src/lib.rs @@ -55,7 +55,8 @@ pub fn run_main(cli: Cli) -> std::io::Result<()> { None }, cwd: cli.cwd.clone().map(|p| p.canonicalize().unwrap_or(p)), - provider: None, + model_provider: None, + config_profile: cli.config_profile.clone(), }; #[allow(clippy::print_stderr)] match Config::load_with_overrides(overrides) {