diff --git a/llmx-rs/core/src/config/mod.rs b/llmx-rs/core/src/config/mod.rs index 1ee16cf2..b153dd58 100644 --- a/llmx-rs/core/src/config/mod.rs +++ b/llmx-rs/core/src/config/mod.rs @@ -58,12 +58,13 @@ pub mod edit; pub mod profile; pub mod types; +// Default models for LLMX using LiteLLM format (provider/model) #[cfg(target_os = "windows")] -pub const OPENAI_DEFAULT_MODEL: &str = "gpt-5"; +pub const OPENAI_DEFAULT_MODEL: &str = "anthropic/claude-sonnet-4-20250514"; #[cfg(not(target_os = "windows"))] -pub const OPENAI_DEFAULT_MODEL: &str = "gpt-5-codex"; -const OPENAI_DEFAULT_REVIEW_MODEL: &str = "gpt-5-codex"; -pub const GPT_5_CODEX_MEDIUM_MODEL: &str = "gpt-5-codex"; +pub const OPENAI_DEFAULT_MODEL: &str = "anthropic/claude-sonnet-4-20250514"; +const OPENAI_DEFAULT_REVIEW_MODEL: &str = "anthropic/claude-sonnet-4-20250514"; +pub const GPT_5_CODEX_MEDIUM_MODEL: &str = "anthropic/claude-sonnet-4-20250514"; /// Maximum number of bytes of the documentation that will be embedded. Larger /// files are *silently truncated* to this size so we do not take up too much of diff --git a/llmx-rs/core/src/model_provider_info.rs b/llmx-rs/core/src/model_provider_info.rs index 61c61eb1..297a6c66 100644 --- a/llmx-rs/core/src/model_provider_info.rs +++ b/llmx-rs/core/src/model_provider_info.rs @@ -267,10 +267,32 @@ pub fn built_in_model_providers() -> HashMap { use ModelProviderInfo as P; // We do not want to be in the business of adjucating which third-party - // providers are bundled with Codex CLI, so we only include the OpenAI and - // open source ("oss") providers by default. Users are encouraged to add to - // `model_providers` in config.toml to add their own providers. + // providers are bundled with LLMX CLI, so we include LiteLLM as the default, + // along with OpenAI and open source ("oss") providers. Users are encouraged + // to add to `model_providers` in config.toml to add their own providers. [ + ( + "litellm", + P { + name: "LiteLLM".into(), + // Allow users to override the default LiteLLM endpoint + base_url: std::env::var("LITELLM_BASE_URL") + .ok() + .filter(|v| !v.trim().is_empty()) + .or_else(|| Some("http://localhost:4000/v1".into())), + env_key: Some("LITELLM_API_KEY".into()), + env_key_instructions: Some("Set LITELLM_API_KEY to your LiteLLM master key. See https://docs.litellm.ai/ for setup.".into()), + experimental_bearer_token: None, + wire_api: WireApi::Chat, + query_params: None, + http_headers: None, + env_http_headers: None, + request_max_retries: None, + stream_max_retries: None, + stream_idle_timeout_ms: None, + requires_openai_auth: false, + }, + ), ( "openai", P {