fix: when using --oss, ensure correct configuration is threaded through correctly (#1859)
This PR started as an investigation with the goal of eliminating the use
of `unsafe { std::env::set_var() }` in `ollama/src/client.rs`, as
setting environment variables in a multithreaded context is indeed
unsafe and these tests were observed to be flaky, as a result.
Though as I dug deeper into the issue, I discovered that the logic for
instantiating `OllamaClient` under test scenarios was not quite right.
In this PR, I aimed to:
- share more code between the two creation codepaths,
`try_from_oss_provider()` and `try_from_provider_with_base_url()`
- use the values from `Config` when setting up Ollama, as we have
various mechanisms for overriding config values, so we should be sure
that we are always using the ultimate `Config` for things such as the
`ModelProviderInfo` associated with the `oss` id
Once this was in place,
`OllamaClient::try_from_provider_with_base_url()` could be used in unit
tests for `OllamaClient` so it was possible to create a properly
configured client without having to set environment variables.
This commit is contained in:
@@ -10,6 +10,7 @@ use codex_core::config_types::SandboxMode;
|
||||
use codex_core::protocol::AskForApproval;
|
||||
use codex_core::util::is_inside_git_repo;
|
||||
use codex_login::load_auth;
|
||||
use codex_ollama::DEFAULT_OSS_MODEL;
|
||||
use log_layer::TuiLogLayer;
|
||||
use std::fs::OpenOptions;
|
||||
use std::io::Write;
|
||||
@@ -71,25 +72,27 @@ pub async fn run_main(
|
||||
)
|
||||
};
|
||||
|
||||
// When using `--oss`, let the bootstrapper pick the model (defaulting to
|
||||
// gpt-oss:20b) and ensure it is present locally. Also, force the built‑in
|
||||
// `oss` model provider.
|
||||
let model = if let Some(model) = &cli.model {
|
||||
Some(model.clone())
|
||||
} else if cli.oss {
|
||||
Some(DEFAULT_OSS_MODEL.to_owned())
|
||||
} else {
|
||||
None // No model specified, will use the default.
|
||||
};
|
||||
|
||||
let model_provider_override = if cli.oss {
|
||||
Some(BUILT_IN_OSS_MODEL_PROVIDER_ID.to_owned())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let config = {
|
||||
// Load configuration and support CLI overrides.
|
||||
let overrides = ConfigOverrides {
|
||||
// When using `--oss`, let the bootstrapper pick the model
|
||||
// (defaulting to gpt-oss:20b) and ensure it is present locally.
|
||||
model: if cli.oss {
|
||||
Some(
|
||||
codex_ollama::ensure_oss_ready(cli.model.clone())
|
||||
.await
|
||||
.map_err(|e| std::io::Error::other(format!("OSS setup failed: {e}")))?,
|
||||
)
|
||||
} else {
|
||||
cli.model.clone()
|
||||
},
|
||||
model,
|
||||
approval_policy,
|
||||
sandbox_mode,
|
||||
cwd: cli.cwd.clone().map(|p| p.canonicalize().unwrap_or(p)),
|
||||
@@ -154,6 +157,12 @@ pub async fn run_main(
|
||||
.with_target(false)
|
||||
.with_filter(env_filter());
|
||||
|
||||
if cli.oss {
|
||||
codex_ollama::ensure_oss_ready(&config)
|
||||
.await
|
||||
.map_err(|e| std::io::Error::other(format!("OSS setup failed: {e}")))?;
|
||||
}
|
||||
|
||||
// Channel that carries formatted log lines to the UI.
|
||||
let (log_tx, log_rx) = tokio::sync::mpsc::unbounded_channel::<String>();
|
||||
let tui_layer = TuiLogLayer::new(log_tx.clone(), 120).with_filter(env_filter());
|
||||
|
||||
Reference in New Issue
Block a user