fix: when using --oss, ensure correct configuration is threaded through correctly (#1859)

This PR started as an investigation with the goal of eliminating the use
of `unsafe { std::env::set_var() }` in `ollama/src/client.rs`, as
setting environment variables in a multithreaded context is indeed
unsafe and these tests were observed to be flaky, as a result.

Though as I dug deeper into the issue, I discovered that the logic for
instantiating `OllamaClient` under test scenarios was not quite right.
In this PR, I aimed to:

- share more code between the two creation codepaths,
`try_from_oss_provider()` and `try_from_provider_with_base_url()`
- use the values from `Config` when setting up Ollama, as we have
various mechanisms for overriding config values, so we should be sure
that we are always using the ultimate `Config` for things such as the
`ModelProviderInfo` associated with the `oss` id

Once this was in place,
`OllamaClient::try_from_provider_with_base_url()` could be used in unit
tests for `OllamaClient` so it was possible to create a properly
configured client without having to set environment variables.
This commit is contained in:
Michael Bolin
2025-08-05 13:55:32 -07:00
committed by GitHub
parent 0c5fa271bc
commit d365cae077
6 changed files with 176 additions and 178 deletions

View File

@@ -4,6 +4,7 @@ mod pull;
mod url;
pub use client::OllamaClient;
use codex_core::config::Config;
pub use pull::CliProgressReporter;
pub use pull::PullEvent;
pub use pull::PullProgressReporter;
@@ -15,38 +16,29 @@ pub const DEFAULT_OSS_MODEL: &str = "gpt-oss:20b";
/// Prepare the local OSS environment when `--oss` is selected.
///
/// - Ensures a local Ollama server is reachable.
/// - Selects the final model name (CLI override or default).
/// - Checks if the model exists locally and pulls it if missing.
///
/// Returns the final model name that should be used by the caller.
pub async fn ensure_oss_ready(cli_model: Option<String>) -> std::io::Result<String> {
pub async fn ensure_oss_ready(config: &Config) -> std::io::Result<()> {
// Only download when the requested model is the default OSS model (or when -m is not provided).
let should_download = cli_model
.as_deref()
.map(|name| name == DEFAULT_OSS_MODEL)
.unwrap_or(true);
let model = cli_model.unwrap_or_else(|| DEFAULT_OSS_MODEL.to_string());
let model = config.model.as_ref();
// Verify local Ollama is reachable.
let ollama_client = crate::OllamaClient::try_from_oss_provider().await?;
let ollama_client = crate::OllamaClient::try_from_oss_provider(config).await?;
if should_download {
// If the model is not present locally, pull it.
match ollama_client.fetch_models().await {
Ok(models) => {
if !models.iter().any(|m| m == &model) {
let mut reporter = crate::CliProgressReporter::new();
ollama_client
.pull_with_reporter(&model, &mut reporter)
.await?;
}
}
Err(err) => {
// Not fatal; higher layers may still proceed and surface errors later.
tracing::warn!("Failed to query local models from Ollama: {}.", err);
// If the model is not present locally, pull it.
match ollama_client.fetch_models().await {
Ok(models) => {
if !models.iter().any(|m| m == model) {
let mut reporter = crate::CliProgressReporter::new();
ollama_client
.pull_with_reporter(model, &mut reporter)
.await?;
}
}
Err(err) => {
// Not fatal; higher layers may still proceed and surface errors later.
tracing::warn!("Failed to query local models from Ollama: {}.", err);
}
}
Ok(model)
Ok(())
}