Refactor env settings into config (#1601)

## Summary
- add OpenAI retry and timeout fields to Config
- inject these settings in tests instead of mutating env vars
- plumb Config values through client and chat completions logic
- document new configuration options

## Testing
- `cargo test -p codex-core --no-run`

------
https://chatgpt.com/codex/tasks/task_i_68792c5b04cc832195c03050c8b6ea94

---------

Co-authored-by: Michael Bolin <mbolin@openai.com>
This commit is contained in:
aibrahim-oai
2025-07-18 12:12:39 -07:00
committed by GitHub
parent d5a2148deb
commit 9846adeabf
12 changed files with 228 additions and 94 deletions

View File

@@ -173,7 +173,7 @@ async fn integration_creates_and_checks_session_file() {
// 5. Sessions are written asynchronously; wait briefly for the directory to appear.
let sessions_dir = home.path().join("sessions");
let start = Instant::now();
while !sessions_dir.exists() && start.elapsed() < Duration::from_secs(2) {
while !sessions_dir.exists() && start.elapsed() < Duration::from_secs(3) {
std::thread::sleep(Duration::from_millis(50));
}

View File

@@ -49,13 +49,6 @@ async fn includes_session_id_and_model_headers_in_request() {
.mount(&server)
.await;
// Environment
// Update environment `set_var` is `unsafe` starting with the 2024
// edition so we group the calls into a single `unsafe { … }` block.
unsafe {
std::env::set_var("OPENAI_REQUEST_MAX_RETRIES", "0");
std::env::set_var("OPENAI_STREAM_MAX_RETRIES", "0");
}
let model_provider = ModelProviderInfo {
name: "openai".into(),
base_url: format!("{}/v1", server.uri()),
@@ -72,6 +65,9 @@ async fn includes_session_id_and_model_headers_in_request() {
.collect(),
),
env_http_headers: None,
request_max_retries: Some(0),
stream_max_retries: Some(0),
stream_idle_timeout_ms: None,
};
// Init session

View File

@@ -45,22 +45,10 @@ async fn spawn_codex() -> Result<Codex, CodexErr> {
"OPENAI_API_KEY must be set for live tests"
);
// Environment tweaks to keep the tests snappy and inexpensive while still
// exercising retry/robustness logic.
//
// NOTE: Starting with the 2024 edition `std::env::set_var` is `unsafe`
// because changing the process environment races with any other threads
// that might be performing environment look-ups at the same time.
// Restrict the unsafety to this tiny block that happens at the very
// beginning of the test, before we spawn any background tasks that could
// observe the environment.
unsafe {
std::env::set_var("OPENAI_REQUEST_MAX_RETRIES", "2");
std::env::set_var("OPENAI_STREAM_MAX_RETRIES", "2");
}
let codex_home = TempDir::new().unwrap();
let config = load_default_config_for_test(&codex_home);
let mut config = load_default_config_for_test(&codex_home);
config.model_provider.request_max_retries = Some(2);
config.model_provider.stream_max_retries = Some(2);
let (agent, _init_id) = Codex::spawn(config, std::sync::Arc::new(Notify::new())).await?;
Ok(agent)
@@ -79,7 +67,7 @@ async fn live_streaming_and_prev_id_reset() {
let codex = spawn_codex().await.unwrap();
// ---------- Task 1 ----------
// ---------- Task 1 ----------
codex
.submit(Op::UserInput {
items: vec![InputItem::Text {
@@ -113,7 +101,7 @@ async fn live_streaming_and_prev_id_reset() {
"Agent did not stream any AgentMessage before TaskComplete"
);
// ---------- Task 2 (same session) ----------
// ---------- Task 2 (same session) ----------
codex
.submit(Op::UserInput {
items: vec![InputItem::Text {

View File

@@ -88,13 +88,8 @@ async fn keeps_previous_response_id_between_tasks() {
.mount(&server)
.await;
// Environment
// Update environment `set_var` is `unsafe` starting with the 2024
// edition so we group the calls into a single `unsafe { … }` block.
unsafe {
std::env::set_var("OPENAI_REQUEST_MAX_RETRIES", "0");
std::env::set_var("OPENAI_STREAM_MAX_RETRIES", "0");
}
// Configure retry behavior explicitly to avoid mutating process-wide
// environment variables.
let model_provider = ModelProviderInfo {
name: "openai".into(),
base_url: format!("{}/v1", server.uri()),
@@ -107,6 +102,10 @@ async fn keeps_previous_response_id_between_tasks() {
query_params: None,
http_headers: None,
env_http_headers: None,
// disable retries so we don't get duplicate calls in this test
request_max_retries: Some(0),
stream_max_retries: Some(0),
stream_idle_timeout_ms: None,
};
// Init session

View File

@@ -32,8 +32,6 @@ fn sse_completed(id: &str) -> String {
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
// this test is flaky (has race conditions), so we ignore it for now
#[ignore]
async fn retries_on_early_close() {
#![allow(clippy::unwrap_used)]
@@ -72,19 +70,8 @@ async fn retries_on_early_close() {
.mount(&server)
.await;
// Environment
//
// As of Rust 2024 `std::env::set_var` has been made `unsafe` because
// mutating the process environment is inherently racy when other threads
// are running. We therefore have to wrap every call in an explicit
// `unsafe` block. These are limited to the test-setup section so the
// scope is very small and clearly delineated.
unsafe {
std::env::set_var("OPENAI_REQUEST_MAX_RETRIES", "0");
std::env::set_var("OPENAI_STREAM_MAX_RETRIES", "1");
std::env::set_var("OPENAI_STREAM_IDLE_TIMEOUT_MS", "2000");
}
// Configure retry behavior explicitly to avoid mutating process-wide
// environment variables.
let model_provider = ModelProviderInfo {
name: "openai".into(),
@@ -98,6 +85,10 @@ async fn retries_on_early_close() {
query_params: None,
http_headers: None,
env_http_headers: None,
// exercise retry path: first attempt yields incomplete stream, so allow 1 retry
request_max_retries: Some(0),
stream_max_retries: Some(1),
stream_idle_timeout_ms: Some(2000),
};
let ctrl_c = std::sync::Arc::new(tokio::sync::Notify::new());