diff --git a/codex-rs/config.md b/codex-rs/config.md index b5a94a09..eeb9a266 100644 --- a/codex-rs/config.md +++ b/codex-rs/config.md @@ -94,15 +94,15 @@ env_http_headers = { "X-Example-Features": "EXAMPLE_FEATURES" } ## model_provider -Identifies which provider to use from the `model_providers` map. Defaults to `"openai"`. +Identifies which provider to use from the `model_providers` map. Defaults to `"openai"`. You can override the `base_url` for the built-in `openai` provider via the `OPENAI_BASE_URL` environment variable. Note that if you override `model_provider`, then you likely want to override `model`, as well. For example, if you are running ollama with Mistral locally, then you would need to add the following to your config in addition to the new entry in the `model_providers` map: ```toml -model = "mistral" model_provider = "ollama" +model = "mistral" ``` ## approval_policy diff --git a/codex-rs/core/src/model_provider_info.rs b/codex-rs/core/src/model_provider_info.rs index 5d51b10f..b38c912d 100644 --- a/codex-rs/core/src/model_provider_info.rs +++ b/codex-rs/core/src/model_provider_info.rs @@ -176,7 +176,15 @@ pub fn built_in_model_providers() -> HashMap { "openai", P { name: "OpenAI".into(), - base_url: "https://api.openai.com/v1".into(), + // Allow users to override the default OpenAI endpoint by + // exporting `OPENAI_BASE_URL`. This is useful when pointing + // Codex at a proxy, mock server, or Azure-style deployment + // without requiring a full TOML override for the built-in + // OpenAI provider. + base_url: std::env::var("OPENAI_BASE_URL") + .ok() + .filter(|v| !v.trim().is_empty()) + .unwrap_or_else(|| "https://api.openai.com/v1".to_string()), env_key: Some("OPENAI_API_KEY".into()), env_key_instructions: Some("Create an API key (https://platform.openai.com) and export it as an environment variable.".into()), wire_api: WireApi::Responses,