1062 lines
38 KiB
Rust
1062 lines
38 KiB
Rust
use crate::config_profile::ConfigProfile;
|
|
use crate::config_types::History;
|
|
use crate::config_types::McpServerConfig;
|
|
use crate::config_types::ReasoningEffort;
|
|
use crate::config_types::ReasoningSummary;
|
|
use crate::config_types::SandboxMode;
|
|
use crate::config_types::SandboxWorkspaceWrite;
|
|
use crate::config_types::ShellEnvironmentPolicy;
|
|
use crate::config_types::ShellEnvironmentPolicyToml;
|
|
use crate::config_types::Tui;
|
|
use crate::config_types::UriBasedFileOpener;
|
|
use crate::flags::OPENAI_DEFAULT_MODEL;
|
|
use crate::model_family::ModelFamily;
|
|
use crate::model_family::find_family_for_model;
|
|
use crate::model_provider_info::ModelProviderInfo;
|
|
use crate::model_provider_info::built_in_model_providers;
|
|
use crate::openai_model_info::get_model_info;
|
|
use crate::protocol::AskForApproval;
|
|
use crate::protocol::SandboxPolicy;
|
|
use dirs::home_dir;
|
|
use serde::Deserialize;
|
|
use std::collections::HashMap;
|
|
use std::path::Path;
|
|
use std::path::PathBuf;
|
|
use toml::Value as TomlValue;
|
|
|
|
/// Maximum number of bytes of the documentation that will be embedded. Larger
|
|
/// files are *silently truncated* to this size so we do not take up too much of
|
|
/// the context window.
|
|
pub(crate) const PROJECT_DOC_MAX_BYTES: usize = 32 * 1024; // 32 KiB
|
|
|
|
/// Application configuration loaded from disk and merged with overrides.
|
|
#[derive(Debug, Clone, PartialEq)]
|
|
pub struct Config {
|
|
/// Optional override of model selection.
|
|
pub model: String,
|
|
|
|
pub model_family: ModelFamily,
|
|
|
|
/// Size of the context window for the model, in tokens.
|
|
pub model_context_window: Option<u64>,
|
|
|
|
/// Maximum number of output tokens.
|
|
pub model_max_output_tokens: Option<u64>,
|
|
|
|
/// Key into the model_providers map that specifies which provider to use.
|
|
pub model_provider_id: String,
|
|
|
|
/// Info needed to make an API request to the model.
|
|
pub model_provider: ModelProviderInfo,
|
|
|
|
/// Approval policy for executing commands.
|
|
pub approval_policy: AskForApproval,
|
|
|
|
pub sandbox_policy: SandboxPolicy,
|
|
|
|
pub shell_environment_policy: ShellEnvironmentPolicy,
|
|
|
|
/// When `true`, `AgentReasoning` events emitted by the backend will be
|
|
/// suppressed from the frontend output. This can reduce visual noise when
|
|
/// users are only interested in the final agent responses.
|
|
pub hide_agent_reasoning: bool,
|
|
|
|
/// When set to `true`, `AgentReasoningRawContentEvent` events will be shown in the UI/output.
|
|
/// Defaults to `false`.
|
|
pub show_raw_agent_reasoning: bool,
|
|
|
|
/// Disable server-side response storage (sends the full conversation
|
|
/// context with every request). Currently necessary for OpenAI customers
|
|
/// who have opted into Zero Data Retention (ZDR).
|
|
pub disable_response_storage: bool,
|
|
|
|
/// User-provided instructions from AGENTS.md.
|
|
pub user_instructions: Option<String>,
|
|
|
|
/// Base instructions override.
|
|
pub base_instructions: Option<String>,
|
|
|
|
/// Optional external notifier command. When set, Codex will spawn this
|
|
/// program after each completed *turn* (i.e. when the agent finishes
|
|
/// processing a user submission). The value must be the full command
|
|
/// broken into argv tokens **without** the trailing JSON argument - Codex
|
|
/// appends one extra argument containing a JSON payload describing the
|
|
/// event.
|
|
///
|
|
/// Example `~/.codex/config.toml` snippet:
|
|
///
|
|
/// ```toml
|
|
/// notify = ["notify-send", "Codex"]
|
|
/// ```
|
|
///
|
|
/// which will be invoked as:
|
|
///
|
|
/// ```shell
|
|
/// notify-send Codex '{"type":"agent-turn-complete","turn-id":"12345"}'
|
|
/// ```
|
|
///
|
|
/// If unset the feature is disabled.
|
|
pub notify: Option<Vec<String>>,
|
|
|
|
/// The directory that should be treated as the current working directory
|
|
/// for the session. All relative paths inside the business-logic layer are
|
|
/// resolved against this path.
|
|
pub cwd: PathBuf,
|
|
|
|
/// Definition for MCP servers that Codex can reach out to for tool calls.
|
|
pub mcp_servers: HashMap<String, McpServerConfig>,
|
|
|
|
/// Combined provider map (defaults merged with user-defined overrides).
|
|
pub model_providers: HashMap<String, ModelProviderInfo>,
|
|
|
|
/// Maximum number of bytes to include from an AGENTS.md project doc file.
|
|
pub project_doc_max_bytes: usize,
|
|
|
|
/// Directory containing all Codex state (defaults to `~/.codex` but can be
|
|
/// overridden by the `CODEX_HOME` environment variable).
|
|
pub codex_home: PathBuf,
|
|
|
|
/// Settings that govern if and what will be written to `~/.codex/history.jsonl`.
|
|
pub history: History,
|
|
|
|
/// Optional URI-based file opener. If set, citations to files in the model
|
|
/// output will be hyperlinked using the specified URI scheme.
|
|
pub file_opener: UriBasedFileOpener,
|
|
|
|
/// Collection of settings that are specific to the TUI.
|
|
pub tui: Tui,
|
|
|
|
/// Path to the `codex-linux-sandbox` executable. This must be set if
|
|
/// [`crate::exec::SandboxType::LinuxSeccomp`] is used. Note that this
|
|
/// cannot be set in the config file: it must be set in code via
|
|
/// [`ConfigOverrides`].
|
|
///
|
|
/// When this program is invoked, arg0 will be set to `codex-linux-sandbox`.
|
|
pub codex_linux_sandbox_exe: Option<PathBuf>,
|
|
|
|
/// If not "none", the value to use for `reasoning.effort` when making a
|
|
/// request using the Responses API.
|
|
pub model_reasoning_effort: ReasoningEffort,
|
|
|
|
/// If not "none", the value to use for `reasoning.summary` when making a
|
|
/// request using the Responses API.
|
|
pub model_reasoning_summary: ReasoningSummary,
|
|
|
|
/// Base URL for requests to ChatGPT (as opposed to the OpenAI API).
|
|
pub chatgpt_base_url: String,
|
|
|
|
/// Experimental rollout resume path (absolute path to .jsonl; undocumented).
|
|
pub experimental_resume: Option<PathBuf>,
|
|
|
|
/// Include an experimental plan tool that the model can use to update its current plan and status of each step.
|
|
pub include_plan_tool: bool,
|
|
|
|
/// The value for the `originator` header included with Responses API requests.
|
|
pub internal_originator: Option<String>,
|
|
}
|
|
|
|
impl Config {
|
|
/// Load configuration with *generic* CLI overrides (`-c key=value`) applied
|
|
/// **in between** the values parsed from `config.toml` and the
|
|
/// strongly-typed overrides specified via [`ConfigOverrides`].
|
|
///
|
|
/// The precedence order is therefore: `config.toml` < `-c` overrides <
|
|
/// `ConfigOverrides`.
|
|
pub fn load_with_cli_overrides(
|
|
cli_overrides: Vec<(String, TomlValue)>,
|
|
overrides: ConfigOverrides,
|
|
) -> std::io::Result<Self> {
|
|
// Resolve the directory that stores Codex state (e.g. ~/.codex or the
|
|
// value of $CODEX_HOME) so we can embed it into the resulting
|
|
// `Config` instance.
|
|
let codex_home = find_codex_home()?;
|
|
|
|
// Step 1: parse `config.toml` into a generic JSON value.
|
|
let mut root_value = load_config_as_toml(&codex_home)?;
|
|
|
|
// Step 2: apply the `-c` overrides.
|
|
for (path, value) in cli_overrides.into_iter() {
|
|
apply_toml_override(&mut root_value, &path, value);
|
|
}
|
|
|
|
// Step 3: deserialize into `ConfigToml` so that Serde can enforce the
|
|
// correct types.
|
|
let cfg: ConfigToml = root_value.try_into().map_err(|e| {
|
|
tracing::error!("Failed to deserialize overridden config: {e}");
|
|
std::io::Error::new(std::io::ErrorKind::InvalidData, e)
|
|
})?;
|
|
|
|
// Step 4: merge with the strongly-typed overrides.
|
|
Self::load_from_base_config_with_overrides(cfg, overrides, codex_home)
|
|
}
|
|
}
|
|
|
|
/// Read `CODEX_HOME/config.toml` and return it as a generic TOML value. Returns
|
|
/// an empty TOML table when the file does not exist.
|
|
fn load_config_as_toml(codex_home: &Path) -> std::io::Result<TomlValue> {
|
|
let config_path = codex_home.join("config.toml");
|
|
match std::fs::read_to_string(&config_path) {
|
|
Ok(contents) => match toml::from_str::<TomlValue>(&contents) {
|
|
Ok(val) => Ok(val),
|
|
Err(e) => {
|
|
tracing::error!("Failed to parse config.toml: {e}");
|
|
Err(std::io::Error::new(std::io::ErrorKind::InvalidData, e))
|
|
}
|
|
},
|
|
Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
|
|
tracing::info!("config.toml not found, using defaults");
|
|
Ok(TomlValue::Table(Default::default()))
|
|
}
|
|
Err(e) => {
|
|
tracing::error!("Failed to read config.toml: {e}");
|
|
Err(e)
|
|
}
|
|
}
|
|
}
|
|
|
|
/// Apply a single dotted-path override onto a TOML value.
|
|
fn apply_toml_override(root: &mut TomlValue, path: &str, value: TomlValue) {
|
|
use toml::value::Table;
|
|
|
|
let segments: Vec<&str> = path.split('.').collect();
|
|
let mut current = root;
|
|
|
|
for (idx, segment) in segments.iter().enumerate() {
|
|
let is_last = idx == segments.len() - 1;
|
|
|
|
if is_last {
|
|
match current {
|
|
TomlValue::Table(table) => {
|
|
table.insert(segment.to_string(), value);
|
|
}
|
|
_ => {
|
|
let mut table = Table::new();
|
|
table.insert(segment.to_string(), value);
|
|
*current = TomlValue::Table(table);
|
|
}
|
|
}
|
|
return;
|
|
}
|
|
|
|
// Traverse or create intermediate object.
|
|
match current {
|
|
TomlValue::Table(table) => {
|
|
current = table
|
|
.entry(segment.to_string())
|
|
.or_insert_with(|| TomlValue::Table(Table::new()));
|
|
}
|
|
_ => {
|
|
*current = TomlValue::Table(Table::new());
|
|
if let TomlValue::Table(tbl) = current {
|
|
current = tbl
|
|
.entry(segment.to_string())
|
|
.or_insert_with(|| TomlValue::Table(Table::new()));
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
/// Base config deserialized from ~/.codex/config.toml.
|
|
#[derive(Deserialize, Debug, Clone, Default)]
|
|
pub struct ConfigToml {
|
|
/// Optional override of model selection.
|
|
pub model: Option<String>,
|
|
|
|
/// Provider to use from the model_providers map.
|
|
pub model_provider: Option<String>,
|
|
|
|
/// Size of the context window for the model, in tokens.
|
|
pub model_context_window: Option<u64>,
|
|
|
|
/// Maximum number of output tokens.
|
|
pub model_max_output_tokens: Option<u64>,
|
|
|
|
/// Default approval policy for executing commands.
|
|
pub approval_policy: Option<AskForApproval>,
|
|
|
|
#[serde(default)]
|
|
pub shell_environment_policy: ShellEnvironmentPolicyToml,
|
|
|
|
/// Sandbox mode to use.
|
|
pub sandbox_mode: Option<SandboxMode>,
|
|
|
|
/// Sandbox configuration to apply if `sandbox` is `WorkspaceWrite`.
|
|
pub sandbox_workspace_write: Option<SandboxWorkspaceWrite>,
|
|
|
|
/// Disable server-side response storage (sends the full conversation
|
|
/// context with every request). Currently necessary for OpenAI customers
|
|
/// who have opted into Zero Data Retention (ZDR).
|
|
pub disable_response_storage: Option<bool>,
|
|
|
|
/// Optional external command to spawn for end-user notifications.
|
|
#[serde(default)]
|
|
pub notify: Option<Vec<String>>,
|
|
|
|
/// System instructions.
|
|
pub instructions: Option<String>,
|
|
|
|
/// Definition for MCP servers that Codex can reach out to for tool calls.
|
|
#[serde(default)]
|
|
pub mcp_servers: HashMap<String, McpServerConfig>,
|
|
|
|
/// User-defined provider entries that extend/override the built-in list.
|
|
#[serde(default)]
|
|
pub model_providers: HashMap<String, ModelProviderInfo>,
|
|
|
|
/// Maximum number of bytes to include from an AGENTS.md project doc file.
|
|
pub project_doc_max_bytes: Option<usize>,
|
|
|
|
/// Profile to use from the `profiles` map.
|
|
pub profile: Option<String>,
|
|
|
|
/// Named profiles to facilitate switching between different configurations.
|
|
#[serde(default)]
|
|
pub profiles: HashMap<String, ConfigProfile>,
|
|
|
|
/// Settings that govern if and what will be written to `~/.codex/history.jsonl`.
|
|
#[serde(default)]
|
|
pub history: Option<History>,
|
|
|
|
/// Optional URI-based file opener. If set, citations to files in the model
|
|
/// output will be hyperlinked using the specified URI scheme.
|
|
pub file_opener: Option<UriBasedFileOpener>,
|
|
|
|
/// Collection of settings that are specific to the TUI.
|
|
pub tui: Option<Tui>,
|
|
|
|
/// When set to `true`, `AgentReasoning` events will be hidden from the
|
|
/// UI/output. Defaults to `false`.
|
|
pub hide_agent_reasoning: Option<bool>,
|
|
|
|
/// When set to `true`, `AgentReasoningRawContentEvent` events will be shown in the UI/output.
|
|
/// Defaults to `false`.
|
|
pub show_raw_agent_reasoning: Option<bool>,
|
|
|
|
pub model_reasoning_effort: Option<ReasoningEffort>,
|
|
pub model_reasoning_summary: Option<ReasoningSummary>,
|
|
|
|
/// Override to force-enable reasoning summaries for the configured model.
|
|
pub model_supports_reasoning_summaries: Option<bool>,
|
|
|
|
/// Base URL for requests to ChatGPT (as opposed to the OpenAI API).
|
|
pub chatgpt_base_url: Option<String>,
|
|
|
|
/// Experimental rollout resume path (absolute path to .jsonl; undocumented).
|
|
pub experimental_resume: Option<PathBuf>,
|
|
|
|
/// Experimental path to a file whose contents replace the built-in BASE_INSTRUCTIONS.
|
|
pub experimental_instructions_file: Option<PathBuf>,
|
|
|
|
/// The value for the `originator` header included with Responses API requests.
|
|
pub internal_originator: Option<String>,
|
|
}
|
|
|
|
impl ConfigToml {
|
|
/// Derive the effective sandbox policy from the configuration.
|
|
fn derive_sandbox_policy(&self, sandbox_mode_override: Option<SandboxMode>) -> SandboxPolicy {
|
|
let resolved_sandbox_mode = sandbox_mode_override
|
|
.or(self.sandbox_mode)
|
|
.unwrap_or_default();
|
|
match resolved_sandbox_mode {
|
|
SandboxMode::ReadOnly => SandboxPolicy::new_read_only_policy(),
|
|
SandboxMode::WorkspaceWrite => match self.sandbox_workspace_write.as_ref() {
|
|
Some(SandboxWorkspaceWrite {
|
|
writable_roots,
|
|
network_access,
|
|
exclude_tmpdir_env_var,
|
|
exclude_slash_tmp,
|
|
}) => SandboxPolicy::WorkspaceWrite {
|
|
writable_roots: writable_roots.clone(),
|
|
network_access: *network_access,
|
|
exclude_tmpdir_env_var: *exclude_tmpdir_env_var,
|
|
exclude_slash_tmp: *exclude_slash_tmp,
|
|
},
|
|
None => SandboxPolicy::new_workspace_write_policy(),
|
|
},
|
|
SandboxMode::DangerFullAccess => SandboxPolicy::DangerFullAccess,
|
|
}
|
|
}
|
|
}
|
|
|
|
/// Optional overrides for user configuration (e.g., from CLI flags).
|
|
#[derive(Default, Debug, Clone)]
|
|
pub struct ConfigOverrides {
|
|
pub model: Option<String>,
|
|
pub cwd: Option<PathBuf>,
|
|
pub approval_policy: Option<AskForApproval>,
|
|
pub sandbox_mode: Option<SandboxMode>,
|
|
pub model_provider: Option<String>,
|
|
pub config_profile: Option<String>,
|
|
pub codex_linux_sandbox_exe: Option<PathBuf>,
|
|
pub base_instructions: Option<String>,
|
|
pub include_plan_tool: Option<bool>,
|
|
pub disable_response_storage: Option<bool>,
|
|
pub show_raw_agent_reasoning: Option<bool>,
|
|
}
|
|
|
|
impl Config {
|
|
/// Meant to be used exclusively for tests: `load_with_overrides()` should
|
|
/// be used in all other cases.
|
|
pub fn load_from_base_config_with_overrides(
|
|
cfg: ConfigToml,
|
|
overrides: ConfigOverrides,
|
|
codex_home: PathBuf,
|
|
) -> std::io::Result<Self> {
|
|
let user_instructions = Self::load_instructions(Some(&codex_home));
|
|
|
|
// Destructure ConfigOverrides fully to ensure all overrides are applied.
|
|
let ConfigOverrides {
|
|
model,
|
|
cwd,
|
|
approval_policy,
|
|
sandbox_mode,
|
|
model_provider,
|
|
config_profile: config_profile_key,
|
|
codex_linux_sandbox_exe,
|
|
base_instructions,
|
|
include_plan_tool,
|
|
disable_response_storage,
|
|
show_raw_agent_reasoning,
|
|
} = overrides;
|
|
|
|
let config_profile = match config_profile_key.as_ref().or(cfg.profile.as_ref()) {
|
|
Some(key) => cfg
|
|
.profiles
|
|
.get(key)
|
|
.ok_or_else(|| {
|
|
std::io::Error::new(
|
|
std::io::ErrorKind::NotFound,
|
|
format!("config profile `{key}` not found"),
|
|
)
|
|
})?
|
|
.clone(),
|
|
None => ConfigProfile::default(),
|
|
};
|
|
|
|
let sandbox_policy = cfg.derive_sandbox_policy(sandbox_mode);
|
|
|
|
let mut model_providers = built_in_model_providers();
|
|
// Merge user-defined providers into the built-in list.
|
|
for (key, provider) in cfg.model_providers.into_iter() {
|
|
model_providers.entry(key).or_insert(provider);
|
|
}
|
|
|
|
let model_provider_id = model_provider
|
|
.or(config_profile.model_provider)
|
|
.or(cfg.model_provider)
|
|
.unwrap_or_else(|| "openai".to_string());
|
|
let model_provider = model_providers
|
|
.get(&model_provider_id)
|
|
.ok_or_else(|| {
|
|
std::io::Error::new(
|
|
std::io::ErrorKind::NotFound,
|
|
format!("Model provider `{model_provider_id}` not found"),
|
|
)
|
|
})?
|
|
.clone();
|
|
|
|
let shell_environment_policy = cfg.shell_environment_policy.into();
|
|
|
|
let resolved_cwd = {
|
|
use std::env;
|
|
|
|
match cwd {
|
|
None => {
|
|
tracing::info!("cwd not set, using current dir");
|
|
env::current_dir()?
|
|
}
|
|
Some(p) if p.is_absolute() => p,
|
|
Some(p) => {
|
|
// Resolve relative path against the current working directory.
|
|
tracing::info!("cwd is relative, resolving against current dir");
|
|
let mut current = env::current_dir()?;
|
|
current.push(p);
|
|
current
|
|
}
|
|
}
|
|
};
|
|
|
|
let history = cfg.history.unwrap_or_default();
|
|
|
|
let model = model
|
|
.or(config_profile.model)
|
|
.or(cfg.model)
|
|
.unwrap_or_else(default_model);
|
|
let model_family = find_family_for_model(&model).unwrap_or_else(|| {
|
|
let supports_reasoning_summaries =
|
|
cfg.model_supports_reasoning_summaries.unwrap_or(false);
|
|
ModelFamily {
|
|
slug: model.clone(),
|
|
family: model.clone(),
|
|
needs_special_apply_patch_instructions: false,
|
|
supports_reasoning_summaries,
|
|
uses_local_shell_tool: false,
|
|
}
|
|
});
|
|
|
|
let openai_model_info = get_model_info(&model_family);
|
|
let model_context_window = cfg
|
|
.model_context_window
|
|
.or_else(|| openai_model_info.as_ref().map(|info| info.context_window));
|
|
let model_max_output_tokens = cfg.model_max_output_tokens.or_else(|| {
|
|
openai_model_info
|
|
.as_ref()
|
|
.map(|info| info.max_output_tokens)
|
|
});
|
|
|
|
let experimental_resume = cfg.experimental_resume;
|
|
|
|
// Load base instructions override from a file if specified. If the
|
|
// path is relative, resolve it against the effective cwd so the
|
|
// behaviour matches other path-like config values.
|
|
let experimental_instructions_path = config_profile
|
|
.experimental_instructions_file
|
|
.as_ref()
|
|
.or(cfg.experimental_instructions_file.as_ref());
|
|
let file_base_instructions =
|
|
Self::get_base_instructions(experimental_instructions_path, &resolved_cwd)?;
|
|
let base_instructions = base_instructions.or(file_base_instructions);
|
|
|
|
let config = Self {
|
|
model,
|
|
model_family,
|
|
model_context_window,
|
|
model_max_output_tokens,
|
|
model_provider_id,
|
|
model_provider,
|
|
cwd: resolved_cwd,
|
|
approval_policy: approval_policy
|
|
.or(config_profile.approval_policy)
|
|
.or(cfg.approval_policy)
|
|
.unwrap_or_else(AskForApproval::default),
|
|
sandbox_policy,
|
|
shell_environment_policy,
|
|
disable_response_storage: config_profile
|
|
.disable_response_storage
|
|
.or(cfg.disable_response_storage)
|
|
.or(disable_response_storage)
|
|
.unwrap_or(false),
|
|
notify: cfg.notify,
|
|
user_instructions,
|
|
base_instructions,
|
|
mcp_servers: cfg.mcp_servers,
|
|
model_providers,
|
|
project_doc_max_bytes: cfg.project_doc_max_bytes.unwrap_or(PROJECT_DOC_MAX_BYTES),
|
|
codex_home,
|
|
history,
|
|
file_opener: cfg.file_opener.unwrap_or(UriBasedFileOpener::VsCode),
|
|
tui: cfg.tui.unwrap_or_default(),
|
|
codex_linux_sandbox_exe,
|
|
|
|
hide_agent_reasoning: cfg.hide_agent_reasoning.unwrap_or(false),
|
|
show_raw_agent_reasoning: cfg
|
|
.show_raw_agent_reasoning
|
|
.or(show_raw_agent_reasoning)
|
|
.unwrap_or(false),
|
|
model_reasoning_effort: config_profile
|
|
.model_reasoning_effort
|
|
.or(cfg.model_reasoning_effort)
|
|
.unwrap_or_default(),
|
|
model_reasoning_summary: config_profile
|
|
.model_reasoning_summary
|
|
.or(cfg.model_reasoning_summary)
|
|
.unwrap_or_default(),
|
|
|
|
chatgpt_base_url: config_profile
|
|
.chatgpt_base_url
|
|
.or(cfg.chatgpt_base_url)
|
|
.unwrap_or("https://chatgpt.com/backend-api/".to_string()),
|
|
|
|
experimental_resume,
|
|
include_plan_tool: include_plan_tool.unwrap_or(false),
|
|
internal_originator: cfg.internal_originator,
|
|
};
|
|
Ok(config)
|
|
}
|
|
|
|
fn load_instructions(codex_dir: Option<&Path>) -> Option<String> {
|
|
let mut p = match codex_dir {
|
|
Some(p) => p.to_path_buf(),
|
|
None => return None,
|
|
};
|
|
|
|
p.push("AGENTS.md");
|
|
std::fs::read_to_string(&p).ok().and_then(|s| {
|
|
let s = s.trim();
|
|
if s.is_empty() {
|
|
None
|
|
} else {
|
|
Some(s.to_string())
|
|
}
|
|
})
|
|
}
|
|
|
|
fn get_base_instructions(
|
|
path: Option<&PathBuf>,
|
|
cwd: &Path,
|
|
) -> std::io::Result<Option<String>> {
|
|
let p = match path.as_ref() {
|
|
None => return Ok(None),
|
|
Some(p) => p,
|
|
};
|
|
|
|
// Resolve relative paths against the provided cwd to make CLI
|
|
// overrides consistent regardless of where the process was launched
|
|
// from.
|
|
let full_path = if p.is_relative() {
|
|
cwd.join(p)
|
|
} else {
|
|
p.to_path_buf()
|
|
};
|
|
|
|
let contents = std::fs::read_to_string(&full_path).map_err(|e| {
|
|
std::io::Error::new(
|
|
e.kind(),
|
|
format!(
|
|
"failed to read experimental instructions file {}: {e}",
|
|
full_path.display()
|
|
),
|
|
)
|
|
})?;
|
|
|
|
let s = contents.trim().to_string();
|
|
if s.is_empty() {
|
|
Err(std::io::Error::new(
|
|
std::io::ErrorKind::InvalidData,
|
|
format!(
|
|
"experimental instructions file is empty: {}",
|
|
full_path.display()
|
|
),
|
|
))
|
|
} else {
|
|
Ok(Some(s))
|
|
}
|
|
}
|
|
}
|
|
|
|
fn default_model() -> String {
|
|
OPENAI_DEFAULT_MODEL.to_string()
|
|
}
|
|
|
|
/// Returns the path to the Codex configuration directory, which can be
|
|
/// specified by the `CODEX_HOME` environment variable. If not set, defaults to
|
|
/// `~/.codex`.
|
|
///
|
|
/// - If `CODEX_HOME` is set, the value will be canonicalized and this
|
|
/// function will Err if the path does not exist.
|
|
/// - If `CODEX_HOME` is not set, this function does not verify that the
|
|
/// directory exists.
|
|
pub fn find_codex_home() -> std::io::Result<PathBuf> {
|
|
// Honor the `CODEX_HOME` environment variable when it is set to allow users
|
|
// (and tests) to override the default location.
|
|
if let Ok(val) = std::env::var("CODEX_HOME") {
|
|
if !val.is_empty() {
|
|
return PathBuf::from(val).canonicalize();
|
|
}
|
|
}
|
|
|
|
let mut p = home_dir().ok_or_else(|| {
|
|
std::io::Error::new(
|
|
std::io::ErrorKind::NotFound,
|
|
"Could not find home directory",
|
|
)
|
|
})?;
|
|
p.push(".codex");
|
|
Ok(p)
|
|
}
|
|
|
|
/// Returns the path to the folder where Codex logs are stored. Does not verify
|
|
/// that the directory exists.
|
|
pub fn log_dir(cfg: &Config) -> std::io::Result<PathBuf> {
|
|
let mut p = cfg.codex_home.clone();
|
|
p.push("log");
|
|
Ok(p)
|
|
}
|
|
|
|
#[cfg(test)]
|
|
mod tests {
|
|
#![allow(clippy::expect_used, clippy::unwrap_used)]
|
|
use crate::config_types::HistoryPersistence;
|
|
|
|
use super::*;
|
|
use pretty_assertions::assert_eq;
|
|
use tempfile::TempDir;
|
|
|
|
#[test]
|
|
fn test_toml_parsing() {
|
|
let history_with_persistence = r#"
|
|
[history]
|
|
persistence = "save-all"
|
|
"#;
|
|
let history_with_persistence_cfg = toml::from_str::<ConfigToml>(history_with_persistence)
|
|
.expect("TOML deserialization should succeed");
|
|
assert_eq!(
|
|
Some(History {
|
|
persistence: HistoryPersistence::SaveAll,
|
|
max_bytes: None,
|
|
}),
|
|
history_with_persistence_cfg.history
|
|
);
|
|
|
|
let history_no_persistence = r#"
|
|
[history]
|
|
persistence = "none"
|
|
"#;
|
|
|
|
let history_no_persistence_cfg = toml::from_str::<ConfigToml>(history_no_persistence)
|
|
.expect("TOML deserialization should succeed");
|
|
assert_eq!(
|
|
Some(History {
|
|
persistence: HistoryPersistence::None,
|
|
max_bytes: None,
|
|
}),
|
|
history_no_persistence_cfg.history
|
|
);
|
|
}
|
|
|
|
#[test]
|
|
fn test_sandbox_config_parsing() {
|
|
let sandbox_full_access = r#"
|
|
sandbox_mode = "danger-full-access"
|
|
|
|
[sandbox_workspace_write]
|
|
network_access = false # This should be ignored.
|
|
"#;
|
|
let sandbox_full_access_cfg = toml::from_str::<ConfigToml>(sandbox_full_access)
|
|
.expect("TOML deserialization should succeed");
|
|
let sandbox_mode_override = None;
|
|
assert_eq!(
|
|
SandboxPolicy::DangerFullAccess,
|
|
sandbox_full_access_cfg.derive_sandbox_policy(sandbox_mode_override)
|
|
);
|
|
|
|
let sandbox_read_only = r#"
|
|
sandbox_mode = "read-only"
|
|
|
|
[sandbox_workspace_write]
|
|
network_access = true # This should be ignored.
|
|
"#;
|
|
|
|
let sandbox_read_only_cfg = toml::from_str::<ConfigToml>(sandbox_read_only)
|
|
.expect("TOML deserialization should succeed");
|
|
let sandbox_mode_override = None;
|
|
assert_eq!(
|
|
SandboxPolicy::ReadOnly,
|
|
sandbox_read_only_cfg.derive_sandbox_policy(sandbox_mode_override)
|
|
);
|
|
|
|
let sandbox_workspace_write = r#"
|
|
sandbox_mode = "workspace-write"
|
|
|
|
[sandbox_workspace_write]
|
|
writable_roots = [
|
|
"/my/workspace",
|
|
]
|
|
exclude_tmpdir_env_var = true
|
|
exclude_slash_tmp = true
|
|
"#;
|
|
|
|
let sandbox_workspace_write_cfg = toml::from_str::<ConfigToml>(sandbox_workspace_write)
|
|
.expect("TOML deserialization should succeed");
|
|
let sandbox_mode_override = None;
|
|
assert_eq!(
|
|
SandboxPolicy::WorkspaceWrite {
|
|
writable_roots: vec![PathBuf::from("/my/workspace")],
|
|
network_access: false,
|
|
exclude_tmpdir_env_var: true,
|
|
exclude_slash_tmp: true,
|
|
},
|
|
sandbox_workspace_write_cfg.derive_sandbox_policy(sandbox_mode_override)
|
|
);
|
|
}
|
|
|
|
struct PrecedenceTestFixture {
|
|
cwd: TempDir,
|
|
codex_home: TempDir,
|
|
cfg: ConfigToml,
|
|
model_provider_map: HashMap<String, ModelProviderInfo>,
|
|
openai_provider: ModelProviderInfo,
|
|
openai_chat_completions_provider: ModelProviderInfo,
|
|
}
|
|
|
|
impl PrecedenceTestFixture {
|
|
fn cwd(&self) -> PathBuf {
|
|
self.cwd.path().to_path_buf()
|
|
}
|
|
|
|
fn codex_home(&self) -> PathBuf {
|
|
self.codex_home.path().to_path_buf()
|
|
}
|
|
}
|
|
|
|
fn create_test_fixture() -> std::io::Result<PrecedenceTestFixture> {
|
|
let toml = r#"
|
|
model = "o3"
|
|
approval_policy = "untrusted"
|
|
disable_response_storage = false
|
|
|
|
# Can be used to determine which profile to use if not specified by
|
|
# `ConfigOverrides`.
|
|
profile = "gpt3"
|
|
|
|
[model_providers.openai-chat-completions]
|
|
name = "OpenAI using Chat Completions"
|
|
base_url = "https://api.openai.com/v1"
|
|
env_key = "OPENAI_API_KEY"
|
|
wire_api = "chat"
|
|
request_max_retries = 4 # retry failed HTTP requests
|
|
stream_max_retries = 10 # retry dropped SSE streams
|
|
stream_idle_timeout_ms = 300000 # 5m idle timeout
|
|
|
|
[profiles.o3]
|
|
model = "o3"
|
|
model_provider = "openai"
|
|
approval_policy = "never"
|
|
model_reasoning_effort = "high"
|
|
model_reasoning_summary = "detailed"
|
|
|
|
[profiles.gpt3]
|
|
model = "gpt-3.5-turbo"
|
|
model_provider = "openai-chat-completions"
|
|
|
|
[profiles.zdr]
|
|
model = "o3"
|
|
model_provider = "openai"
|
|
approval_policy = "on-failure"
|
|
disable_response_storage = true
|
|
"#;
|
|
|
|
let cfg: ConfigToml = toml::from_str(toml).expect("TOML deserialization should succeed");
|
|
|
|
// Use a temporary directory for the cwd so it does not contain an
|
|
// AGENTS.md file.
|
|
let cwd_temp_dir = TempDir::new().unwrap();
|
|
let cwd = cwd_temp_dir.path().to_path_buf();
|
|
// Make it look like a Git repo so it does not search for AGENTS.md in
|
|
// a parent folder, either.
|
|
std::fs::write(cwd.join(".git"), "gitdir: nowhere")?;
|
|
|
|
let codex_home_temp_dir = TempDir::new().unwrap();
|
|
|
|
let openai_chat_completions_provider = ModelProviderInfo {
|
|
name: "OpenAI using Chat Completions".to_string(),
|
|
base_url: Some("https://api.openai.com/v1".to_string()),
|
|
env_key: Some("OPENAI_API_KEY".to_string()),
|
|
wire_api: crate::WireApi::Chat,
|
|
env_key_instructions: None,
|
|
query_params: None,
|
|
http_headers: None,
|
|
env_http_headers: None,
|
|
request_max_retries: Some(4),
|
|
stream_max_retries: Some(10),
|
|
stream_idle_timeout_ms: Some(300_000),
|
|
requires_openai_auth: false,
|
|
};
|
|
let model_provider_map = {
|
|
let mut model_provider_map = built_in_model_providers();
|
|
model_provider_map.insert(
|
|
"openai-chat-completions".to_string(),
|
|
openai_chat_completions_provider.clone(),
|
|
);
|
|
model_provider_map
|
|
};
|
|
|
|
let openai_provider = model_provider_map
|
|
.get("openai")
|
|
.expect("openai provider should exist")
|
|
.clone();
|
|
|
|
Ok(PrecedenceTestFixture {
|
|
cwd: cwd_temp_dir,
|
|
codex_home: codex_home_temp_dir,
|
|
cfg,
|
|
model_provider_map,
|
|
openai_provider,
|
|
openai_chat_completions_provider,
|
|
})
|
|
}
|
|
|
|
/// Users can specify config values at multiple levels that have the
|
|
/// following precedence:
|
|
///
|
|
/// 1. custom command-line argument, e.g. `--model o3`
|
|
/// 2. as part of a profile, where the `--profile` is specified via a CLI
|
|
/// (or in the config file itself)
|
|
/// 3. as an entry in `config.toml`, e.g. `model = "o3"`
|
|
/// 4. the default value for a required field defined in code, e.g.,
|
|
/// `crate::flags::OPENAI_DEFAULT_MODEL`
|
|
///
|
|
/// Note that profiles are the recommended way to specify a group of
|
|
/// configuration options together.
|
|
#[test]
|
|
fn test_precedence_fixture_with_o3_profile() -> std::io::Result<()> {
|
|
let fixture = create_test_fixture()?;
|
|
|
|
let o3_profile_overrides = ConfigOverrides {
|
|
config_profile: Some("o3".to_string()),
|
|
cwd: Some(fixture.cwd()),
|
|
..Default::default()
|
|
};
|
|
let o3_profile_config: Config = Config::load_from_base_config_with_overrides(
|
|
fixture.cfg.clone(),
|
|
o3_profile_overrides,
|
|
fixture.codex_home(),
|
|
)?;
|
|
assert_eq!(
|
|
Config {
|
|
model: "o3".to_string(),
|
|
model_family: find_family_for_model("o3").expect("known model slug"),
|
|
model_context_window: Some(200_000),
|
|
model_max_output_tokens: Some(100_000),
|
|
model_provider_id: "openai".to_string(),
|
|
model_provider: fixture.openai_provider.clone(),
|
|
approval_policy: AskForApproval::Never,
|
|
sandbox_policy: SandboxPolicy::new_read_only_policy(),
|
|
shell_environment_policy: ShellEnvironmentPolicy::default(),
|
|
disable_response_storage: false,
|
|
user_instructions: None,
|
|
notify: None,
|
|
cwd: fixture.cwd(),
|
|
mcp_servers: HashMap::new(),
|
|
model_providers: fixture.model_provider_map.clone(),
|
|
project_doc_max_bytes: PROJECT_DOC_MAX_BYTES,
|
|
codex_home: fixture.codex_home(),
|
|
history: History::default(),
|
|
file_opener: UriBasedFileOpener::VsCode,
|
|
tui: Tui::default(),
|
|
codex_linux_sandbox_exe: None,
|
|
hide_agent_reasoning: false,
|
|
show_raw_agent_reasoning: false,
|
|
model_reasoning_effort: ReasoningEffort::High,
|
|
model_reasoning_summary: ReasoningSummary::Detailed,
|
|
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
|
|
experimental_resume: None,
|
|
base_instructions: None,
|
|
include_plan_tool: false,
|
|
internal_originator: None,
|
|
},
|
|
o3_profile_config
|
|
);
|
|
Ok(())
|
|
}
|
|
|
|
#[test]
|
|
fn test_precedence_fixture_with_gpt3_profile() -> std::io::Result<()> {
|
|
let fixture = create_test_fixture()?;
|
|
|
|
let gpt3_profile_overrides = ConfigOverrides {
|
|
config_profile: Some("gpt3".to_string()),
|
|
cwd: Some(fixture.cwd()),
|
|
..Default::default()
|
|
};
|
|
let gpt3_profile_config = Config::load_from_base_config_with_overrides(
|
|
fixture.cfg.clone(),
|
|
gpt3_profile_overrides,
|
|
fixture.codex_home(),
|
|
)?;
|
|
let expected_gpt3_profile_config = Config {
|
|
model: "gpt-3.5-turbo".to_string(),
|
|
model_family: find_family_for_model("gpt-3.5-turbo").expect("known model slug"),
|
|
model_context_window: Some(16_385),
|
|
model_max_output_tokens: Some(4_096),
|
|
model_provider_id: "openai-chat-completions".to_string(),
|
|
model_provider: fixture.openai_chat_completions_provider.clone(),
|
|
approval_policy: AskForApproval::UnlessTrusted,
|
|
sandbox_policy: SandboxPolicy::new_read_only_policy(),
|
|
shell_environment_policy: ShellEnvironmentPolicy::default(),
|
|
disable_response_storage: false,
|
|
user_instructions: None,
|
|
notify: None,
|
|
cwd: fixture.cwd(),
|
|
mcp_servers: HashMap::new(),
|
|
model_providers: fixture.model_provider_map.clone(),
|
|
project_doc_max_bytes: PROJECT_DOC_MAX_BYTES,
|
|
codex_home: fixture.codex_home(),
|
|
history: History::default(),
|
|
file_opener: UriBasedFileOpener::VsCode,
|
|
tui: Tui::default(),
|
|
codex_linux_sandbox_exe: None,
|
|
hide_agent_reasoning: false,
|
|
show_raw_agent_reasoning: false,
|
|
model_reasoning_effort: ReasoningEffort::default(),
|
|
model_reasoning_summary: ReasoningSummary::default(),
|
|
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
|
|
experimental_resume: None,
|
|
base_instructions: None,
|
|
include_plan_tool: false,
|
|
internal_originator: None,
|
|
};
|
|
|
|
assert_eq!(expected_gpt3_profile_config, gpt3_profile_config);
|
|
|
|
// Verify that loading without specifying a profile in ConfigOverrides
|
|
// uses the default profile from the config file (which is "gpt3").
|
|
let default_profile_overrides = ConfigOverrides {
|
|
cwd: Some(fixture.cwd()),
|
|
..Default::default()
|
|
};
|
|
|
|
let default_profile_config = Config::load_from_base_config_with_overrides(
|
|
fixture.cfg.clone(),
|
|
default_profile_overrides,
|
|
fixture.codex_home(),
|
|
)?;
|
|
|
|
assert_eq!(expected_gpt3_profile_config, default_profile_config);
|
|
Ok(())
|
|
}
|
|
|
|
#[test]
|
|
fn test_precedence_fixture_with_zdr_profile() -> std::io::Result<()> {
|
|
let fixture = create_test_fixture()?;
|
|
|
|
let zdr_profile_overrides = ConfigOverrides {
|
|
config_profile: Some("zdr".to_string()),
|
|
cwd: Some(fixture.cwd()),
|
|
..Default::default()
|
|
};
|
|
let zdr_profile_config = Config::load_from_base_config_with_overrides(
|
|
fixture.cfg.clone(),
|
|
zdr_profile_overrides,
|
|
fixture.codex_home(),
|
|
)?;
|
|
let expected_zdr_profile_config = Config {
|
|
model: "o3".to_string(),
|
|
model_family: find_family_for_model("o3").expect("known model slug"),
|
|
model_context_window: Some(200_000),
|
|
model_max_output_tokens: Some(100_000),
|
|
model_provider_id: "openai".to_string(),
|
|
model_provider: fixture.openai_provider.clone(),
|
|
approval_policy: AskForApproval::OnFailure,
|
|
sandbox_policy: SandboxPolicy::new_read_only_policy(),
|
|
shell_environment_policy: ShellEnvironmentPolicy::default(),
|
|
disable_response_storage: true,
|
|
user_instructions: None,
|
|
notify: None,
|
|
cwd: fixture.cwd(),
|
|
mcp_servers: HashMap::new(),
|
|
model_providers: fixture.model_provider_map.clone(),
|
|
project_doc_max_bytes: PROJECT_DOC_MAX_BYTES,
|
|
codex_home: fixture.codex_home(),
|
|
history: History::default(),
|
|
file_opener: UriBasedFileOpener::VsCode,
|
|
tui: Tui::default(),
|
|
codex_linux_sandbox_exe: None,
|
|
hide_agent_reasoning: false,
|
|
show_raw_agent_reasoning: false,
|
|
model_reasoning_effort: ReasoningEffort::default(),
|
|
model_reasoning_summary: ReasoningSummary::default(),
|
|
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
|
|
experimental_resume: None,
|
|
base_instructions: None,
|
|
include_plan_tool: false,
|
|
internal_originator: None,
|
|
};
|
|
|
|
assert_eq!(expected_zdr_profile_config, zdr_profile_config);
|
|
|
|
Ok(())
|
|
}
|
|
}
|