feat: read model_provider and model_providers from config.toml (#853)

This is the first step in supporting other model providers in the Rust
CLI. Specifically, this PR adds support for the new entries in `Config`
and `ConfigOverrides` to specify a `ModelProviderInfo`, which is the
basic config needed for an LLM provider. This PR does not get us all the
way there yet because `client.rs` still categorically appends
`/responses` to the URL and expects the endpoint to support the OpenAI
Responses API. Will fix that next!
This commit is contained in:
Michael Bolin
2025-05-07 17:38:28 -07:00
committed by GitHub
parent cfe50c7107
commit 86022f097e
12 changed files with 208 additions and 30 deletions

View File

@@ -2,12 +2,11 @@ use std::time::Duration;
use env_flags::env_flags;
use crate::error::CodexErr;
use crate::error::Result;
env_flags! {
pub OPENAI_DEFAULT_MODEL: &str = "o3";
pub OPENAI_API_BASE: &str = "https://api.openai.com";
pub OPENAI_API_BASE: &str = "https://api.openai.com/v1";
/// Fallback when the provider-specific key is not set.
pub OPENAI_API_KEY: Option<&str> = None;
pub OPENAI_TIMEOUT_MS: Duration = Duration::from_millis(300_000), |value| {
value.parse().map(Duration::from_millis)
@@ -21,9 +20,6 @@ env_flags! {
value.parse().map(Duration::from_millis)
};
/// Fixture path for offline tests (see client.rs).
pub CODEX_RS_SSE_FIXTURE: Option<&str> = None;
}
pub fn get_api_key() -> Result<&'static str> {
OPENAI_API_KEY.ok_or_else(|| CodexErr::EnvVar("OPENAI_API_KEY"))
}