2025-08-14 17:59:01 -07:00
|
|
|
|
#![expect(clippy::expect_used)]
|
2025-05-15 00:30:13 -07:00
|
|
|
|
|
|
|
|
|
|
use tempfile::TempDir;
|
|
|
|
|
|
|
chore: introduce ConversationManager as a clearinghouse for all conversations (#2240)
This PR does two things because after I got deep into the first one I
started pulling on the thread to the second:
- Makes `ConversationManager` the place where all in-memory
conversations are created and stored. Previously, `MessageProcessor` in
the `codex-mcp-server` crate was doing this via its `session_map`, but
this is something that should be done in `codex-core`.
- It unwinds the `ctrl_c: tokio::sync::Notify` that was threaded
throughout our code. I think this made sense at one time, but now that
we handle Ctrl-C within the TUI and have a proper `Op::Interrupt` event,
I don't think this was quite right, so I removed it. For `codex exec`
and `codex proto`, we now use `tokio::signal::ctrl_c()` directly, but we
no longer make `Notify` a field of `Codex` or `CodexConversation`.
Changes of note:
- Adds the files `conversation_manager.rs` and `codex_conversation.rs`
to `codex-core`.
- `Codex` and `CodexSpawnOk` are no longer exported from `codex-core`:
other crates must use `CodexConversation` instead (which is created via
`ConversationManager`).
- `core/src/codex_wrapper.rs` has been deleted in favor of
`ConversationManager`.
- `ConversationManager::new_conversation()` returns `NewConversation`,
which is in line with the `new_conversation` tool we want to add to the
MCP server. Note `NewConversation` includes `SessionConfiguredEvent`, so
we eliminate checks in cases like `codex-rs/core/tests/client.rs` to
verify `SessionConfiguredEvent` is the first event because that is now
internal to `ConversationManager`.
- Quite a bit of code was deleted from
`codex-rs/mcp-server/src/message_processor.rs` since it no longer has to
manage multiple conversations itself: it goes through
`ConversationManager` instead.
- `core/tests/live_agent.rs` has been deleted because I had to update a
bunch of tests and all the tests in here were ignored, and I don't think
anyone ever ran them, so this was just technical debt, at this point.
- Removed `notify_on_sigint()` from `util.rs` (and in a follow-up, I
hope to refactor the blandly-named `util.rs` into more descriptive
files).
- In general, I started replacing local variables named `codex` as
`conversation`, where appropriate, though admittedly I didn't do it
through all the integration tests because that would have added a lot of
noise to this PR.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/2240).
* #2264
* #2263
* __->__ #2240
2025-08-13 13:38:18 -07:00
|
|
|
|
use codex_core::CodexConversation;
|
2025-05-15 00:30:13 -07:00
|
|
|
|
use codex_core::config::Config;
|
|
|
|
|
|
use codex_core::config::ConfigOverrides;
|
|
|
|
|
|
use codex_core::config::ConfigToml;
|
2025-10-05 16:01:38 -07:00
|
|
|
|
use regex_lite::Regex;
|
2025-05-15 00:30:13 -07:00
|
|
|
|
|
chore: refactor tool handling (#4510)
# Tool System Refactor
- Centralizes tool definitions and execution in `core/src/tools/*`:
specs (`spec.rs`), handlers (`handlers/*`), router (`router.rs`),
registry/dispatch (`registry.rs`), and shared context (`context.rs`).
One registry now builds the model-visible tool list and binds handlers.
- Router converts model responses to tool calls; Registry dispatches
with consistent telemetry via `codex-rs/otel` and unified error
handling. Function, Local Shell, MCP, and experimental `unified_exec`
all flow through this path; legacy shell aliases still work.
- Rationale: reduce per‑tool boilerplate, keep spec/handler in sync, and
make adding tools predictable and testable.
Example: `read_file`
- Spec: `core/src/tools/spec.rs` (see `create_read_file_tool`,
registered by `build_specs`).
- Handler: `core/src/tools/handlers/read_file.rs` (absolute `file_path`,
1‑indexed `offset`, `limit`, `L#: ` prefixes, safe truncation).
- E2E test: `core/tests/suite/read_file.rs` validates the tool returns
the requested lines.
## Next steps:
- Decompose `handle_container_exec_with_params`
- Add parallel tool calls
2025-10-03 13:21:06 +01:00
|
|
|
|
#[cfg(target_os = "linux")]
|
|
|
|
|
|
use assert_cmd::cargo::cargo_bin;
|
|
|
|
|
|
|
2025-09-18 17:53:14 -07:00
|
|
|
|
pub mod responses;
|
2025-09-23 07:25:46 -07:00
|
|
|
|
pub mod test_codex;
|
2025-09-25 17:12:45 -07:00
|
|
|
|
pub mod test_codex_exec;
|
2025-09-18 17:53:14 -07:00
|
|
|
|
|
2025-10-05 16:01:38 -07:00
|
|
|
|
#[track_caller]
|
|
|
|
|
|
pub fn assert_regex_match<'s>(pattern: &str, actual: &'s str) -> regex_lite::Captures<'s> {
|
|
|
|
|
|
let regex = Regex::new(pattern).unwrap_or_else(|err| {
|
|
|
|
|
|
panic!("failed to compile regex {pattern:?}: {err}");
|
|
|
|
|
|
});
|
|
|
|
|
|
regex
|
|
|
|
|
|
.captures(actual)
|
|
|
|
|
|
.unwrap_or_else(|| panic!("regex {pattern:?} did not match {actual:?}"))
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-05-15 00:30:13 -07:00
|
|
|
|
/// Returns a default `Config` whose on-disk state is confined to the provided
|
|
|
|
|
|
/// temporary directory. Using a per-test directory keeps tests hermetic and
|
|
|
|
|
|
/// avoids clobbering a developer’s real `~/.codex`.
|
|
|
|
|
|
pub fn load_default_config_for_test(codex_home: &TempDir) -> Config {
|
|
|
|
|
|
Config::load_from_base_config_with_overrides(
|
|
|
|
|
|
ConfigToml::default(),
|
chore: refactor tool handling (#4510)
# Tool System Refactor
- Centralizes tool definitions and execution in `core/src/tools/*`:
specs (`spec.rs`), handlers (`handlers/*`), router (`router.rs`),
registry/dispatch (`registry.rs`), and shared context (`context.rs`).
One registry now builds the model-visible tool list and binds handlers.
- Router converts model responses to tool calls; Registry dispatches
with consistent telemetry via `codex-rs/otel` and unified error
handling. Function, Local Shell, MCP, and experimental `unified_exec`
all flow through this path; legacy shell aliases still work.
- Rationale: reduce per‑tool boilerplate, keep spec/handler in sync, and
make adding tools predictable and testable.
Example: `read_file`
- Spec: `core/src/tools/spec.rs` (see `create_read_file_tool`,
registered by `build_specs`).
- Handler: `core/src/tools/handlers/read_file.rs` (absolute `file_path`,
1‑indexed `offset`, `limit`, `L#: ` prefixes, safe truncation).
- E2E test: `core/tests/suite/read_file.rs` validates the tool returns
the requested lines.
## Next steps:
- Decompose `handle_container_exec_with_params`
- Add parallel tool calls
2025-10-03 13:21:06 +01:00
|
|
|
|
default_test_overrides(),
|
2025-05-15 00:30:13 -07:00
|
|
|
|
codex_home.path().to_path_buf(),
|
|
|
|
|
|
)
|
|
|
|
|
|
.expect("defaults for test should always succeed")
|
|
|
|
|
|
}
|
2025-07-12 16:53:55 -07:00
|
|
|
|
|
chore: refactor tool handling (#4510)
# Tool System Refactor
- Centralizes tool definitions and execution in `core/src/tools/*`:
specs (`spec.rs`), handlers (`handlers/*`), router (`router.rs`),
registry/dispatch (`registry.rs`), and shared context (`context.rs`).
One registry now builds the model-visible tool list and binds handlers.
- Router converts model responses to tool calls; Registry dispatches
with consistent telemetry via `codex-rs/otel` and unified error
handling. Function, Local Shell, MCP, and experimental `unified_exec`
all flow through this path; legacy shell aliases still work.
- Rationale: reduce per‑tool boilerplate, keep spec/handler in sync, and
make adding tools predictable and testable.
Example: `read_file`
- Spec: `core/src/tools/spec.rs` (see `create_read_file_tool`,
registered by `build_specs`).
- Handler: `core/src/tools/handlers/read_file.rs` (absolute `file_path`,
1‑indexed `offset`, `limit`, `L#: ` prefixes, safe truncation).
- E2E test: `core/tests/suite/read_file.rs` validates the tool returns
the requested lines.
## Next steps:
- Decompose `handle_container_exec_with_params`
- Add parallel tool calls
2025-10-03 13:21:06 +01:00
|
|
|
|
#[cfg(target_os = "linux")]
|
|
|
|
|
|
fn default_test_overrides() -> ConfigOverrides {
|
|
|
|
|
|
ConfigOverrides {
|
|
|
|
|
|
codex_linux_sandbox_exe: Some(cargo_bin("codex-linux-sandbox")),
|
|
|
|
|
|
..ConfigOverrides::default()
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#[cfg(not(target_os = "linux"))]
|
|
|
|
|
|
fn default_test_overrides() -> ConfigOverrides {
|
|
|
|
|
|
ConfigOverrides::default()
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-07-12 16:53:55 -07:00
|
|
|
|
/// Builds an SSE stream body from a JSON fixture.
|
|
|
|
|
|
///
|
|
|
|
|
|
/// The fixture must contain an array of objects where each object represents a
|
|
|
|
|
|
/// single SSE event with at least a `type` field matching the `event:` value.
|
|
|
|
|
|
/// Additional fields become the JSON payload for the `data:` line. An object
|
|
|
|
|
|
/// with only a `type` field results in an event with no `data:` section. This
|
|
|
|
|
|
/// makes it trivial to extend the fixtures as OpenAI adds new event kinds or
|
|
|
|
|
|
/// fields.
|
|
|
|
|
|
pub fn load_sse_fixture(path: impl AsRef<std::path::Path>) -> String {
|
|
|
|
|
|
let events: Vec<serde_json::Value> =
|
|
|
|
|
|
serde_json::from_reader(std::fs::File::open(path).expect("read fixture"))
|
|
|
|
|
|
.expect("parse JSON fixture");
|
2025-08-15 11:55:53 -04:00
|
|
|
|
events
|
|
|
|
|
|
.into_iter()
|
|
|
|
|
|
.map(|e| {
|
|
|
|
|
|
let kind = e
|
|
|
|
|
|
.get("type")
|
|
|
|
|
|
.and_then(|v| v.as_str())
|
|
|
|
|
|
.expect("fixture event missing type");
|
|
|
|
|
|
if e.as_object().map(|o| o.len() == 1).unwrap_or(false) {
|
|
|
|
|
|
format!("event: {kind}\n\n")
|
|
|
|
|
|
} else {
|
|
|
|
|
|
format!("event: {kind}\ndata: {e}\n\n")
|
|
|
|
|
|
}
|
|
|
|
|
|
})
|
|
|
|
|
|
.collect()
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
pub fn load_sse_fixture_with_id_from_str(raw: &str, id: &str) -> String {
|
|
|
|
|
|
let replaced = raw.replace("__ID__", id);
|
|
|
|
|
|
let events: Vec<serde_json::Value> =
|
|
|
|
|
|
serde_json::from_str(&replaced).expect("parse JSON fixture");
|
2025-07-12 16:53:55 -07:00
|
|
|
|
events
|
|
|
|
|
|
.into_iter()
|
|
|
|
|
|
.map(|e| {
|
|
|
|
|
|
let kind = e
|
|
|
|
|
|
.get("type")
|
|
|
|
|
|
.and_then(|v| v.as_str())
|
|
|
|
|
|
.expect("fixture event missing type");
|
|
|
|
|
|
if e.as_object().map(|o| o.len() == 1).unwrap_or(false) {
|
|
|
|
|
|
format!("event: {kind}\n\n")
|
|
|
|
|
|
} else {
|
|
|
|
|
|
format!("event: {kind}\ndata: {e}\n\n")
|
|
|
|
|
|
}
|
|
|
|
|
|
})
|
|
|
|
|
|
.collect()
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/// Same as [`load_sse_fixture`], but replaces the placeholder `__ID__` in the
|
|
|
|
|
|
/// fixture template with the supplied identifier before parsing. This lets a
|
|
|
|
|
|
/// single JSON template be reused by multiple tests that each need a unique
|
|
|
|
|
|
/// `response_id`.
|
|
|
|
|
|
pub fn load_sse_fixture_with_id(path: impl AsRef<std::path::Path>, id: &str) -> String {
|
|
|
|
|
|
let raw = std::fs::read_to_string(path).expect("read fixture template");
|
|
|
|
|
|
let replaced = raw.replace("__ID__", id);
|
|
|
|
|
|
let events: Vec<serde_json::Value> =
|
|
|
|
|
|
serde_json::from_str(&replaced).expect("parse JSON fixture");
|
|
|
|
|
|
events
|
|
|
|
|
|
.into_iter()
|
|
|
|
|
|
.map(|e| {
|
|
|
|
|
|
let kind = e
|
|
|
|
|
|
.get("type")
|
|
|
|
|
|
.and_then(|v| v.as_str())
|
|
|
|
|
|
.expect("fixture event missing type");
|
|
|
|
|
|
if e.as_object().map(|o| o.len() == 1).unwrap_or(false) {
|
|
|
|
|
|
format!("event: {kind}\n\n")
|
|
|
|
|
|
} else {
|
|
|
|
|
|
format!("event: {kind}\ndata: {e}\n\n")
|
|
|
|
|
|
}
|
|
|
|
|
|
})
|
|
|
|
|
|
.collect()
|
|
|
|
|
|
}
|
2025-07-22 09:42:22 -07:00
|
|
|
|
|
|
|
|
|
|
pub async fn wait_for_event<F>(
|
chore: introduce ConversationManager as a clearinghouse for all conversations (#2240)
This PR does two things because after I got deep into the first one I
started pulling on the thread to the second:
- Makes `ConversationManager` the place where all in-memory
conversations are created and stored. Previously, `MessageProcessor` in
the `codex-mcp-server` crate was doing this via its `session_map`, but
this is something that should be done in `codex-core`.
- It unwinds the `ctrl_c: tokio::sync::Notify` that was threaded
throughout our code. I think this made sense at one time, but now that
we handle Ctrl-C within the TUI and have a proper `Op::Interrupt` event,
I don't think this was quite right, so I removed it. For `codex exec`
and `codex proto`, we now use `tokio::signal::ctrl_c()` directly, but we
no longer make `Notify` a field of `Codex` or `CodexConversation`.
Changes of note:
- Adds the files `conversation_manager.rs` and `codex_conversation.rs`
to `codex-core`.
- `Codex` and `CodexSpawnOk` are no longer exported from `codex-core`:
other crates must use `CodexConversation` instead (which is created via
`ConversationManager`).
- `core/src/codex_wrapper.rs` has been deleted in favor of
`ConversationManager`.
- `ConversationManager::new_conversation()` returns `NewConversation`,
which is in line with the `new_conversation` tool we want to add to the
MCP server. Note `NewConversation` includes `SessionConfiguredEvent`, so
we eliminate checks in cases like `codex-rs/core/tests/client.rs` to
verify `SessionConfiguredEvent` is the first event because that is now
internal to `ConversationManager`.
- Quite a bit of code was deleted from
`codex-rs/mcp-server/src/message_processor.rs` since it no longer has to
manage multiple conversations itself: it goes through
`ConversationManager` instead.
- `core/tests/live_agent.rs` has been deleted because I had to update a
bunch of tests and all the tests in here were ignored, and I don't think
anyone ever ran them, so this was just technical debt, at this point.
- Removed `notify_on_sigint()` from `util.rs` (and in a follow-up, I
hope to refactor the blandly-named `util.rs` into more descriptive
files).
- In general, I started replacing local variables named `codex` as
`conversation`, where appropriate, though admittedly I didn't do it
through all the integration tests because that would have added a lot of
noise to this PR.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/2240).
* #2264
* #2263
* __->__ #2240
2025-08-13 13:38:18 -07:00
|
|
|
|
codex: &CodexConversation,
|
2025-08-08 18:21:19 -07:00
|
|
|
|
predicate: F,
|
2025-07-22 09:42:22 -07:00
|
|
|
|
) -> codex_core::protocol::EventMsg
|
|
|
|
|
|
where
|
|
|
|
|
|
F: FnMut(&codex_core::protocol::EventMsg) -> bool,
|
|
|
|
|
|
{
|
|
|
|
|
|
use tokio::time::Duration;
|
2025-08-08 18:21:19 -07:00
|
|
|
|
wait_for_event_with_timeout(codex, predicate, Duration::from_secs(1)).await
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-10-20 13:34:44 -07:00
|
|
|
|
pub async fn wait_for_event_match<T, F>(codex: &CodexConversation, matcher: F) -> T
|
|
|
|
|
|
where
|
|
|
|
|
|
F: Fn(&codex_core::protocol::EventMsg) -> Option<T>,
|
|
|
|
|
|
{
|
|
|
|
|
|
let ev = wait_for_event(codex, |ev| matcher(ev).is_some()).await;
|
|
|
|
|
|
matcher(&ev).unwrap()
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-08-08 18:21:19 -07:00
|
|
|
|
pub async fn wait_for_event_with_timeout<F>(
|
chore: introduce ConversationManager as a clearinghouse for all conversations (#2240)
This PR does two things because after I got deep into the first one I
started pulling on the thread to the second:
- Makes `ConversationManager` the place where all in-memory
conversations are created and stored. Previously, `MessageProcessor` in
the `codex-mcp-server` crate was doing this via its `session_map`, but
this is something that should be done in `codex-core`.
- It unwinds the `ctrl_c: tokio::sync::Notify` that was threaded
throughout our code. I think this made sense at one time, but now that
we handle Ctrl-C within the TUI and have a proper `Op::Interrupt` event,
I don't think this was quite right, so I removed it. For `codex exec`
and `codex proto`, we now use `tokio::signal::ctrl_c()` directly, but we
no longer make `Notify` a field of `Codex` or `CodexConversation`.
Changes of note:
- Adds the files `conversation_manager.rs` and `codex_conversation.rs`
to `codex-core`.
- `Codex` and `CodexSpawnOk` are no longer exported from `codex-core`:
other crates must use `CodexConversation` instead (which is created via
`ConversationManager`).
- `core/src/codex_wrapper.rs` has been deleted in favor of
`ConversationManager`.
- `ConversationManager::new_conversation()` returns `NewConversation`,
which is in line with the `new_conversation` tool we want to add to the
MCP server. Note `NewConversation` includes `SessionConfiguredEvent`, so
we eliminate checks in cases like `codex-rs/core/tests/client.rs` to
verify `SessionConfiguredEvent` is the first event because that is now
internal to `ConversationManager`.
- Quite a bit of code was deleted from
`codex-rs/mcp-server/src/message_processor.rs` since it no longer has to
manage multiple conversations itself: it goes through
`ConversationManager` instead.
- `core/tests/live_agent.rs` has been deleted because I had to update a
bunch of tests and all the tests in here were ignored, and I don't think
anyone ever ran them, so this was just technical debt, at this point.
- Removed `notify_on_sigint()` from `util.rs` (and in a follow-up, I
hope to refactor the blandly-named `util.rs` into more descriptive
files).
- In general, I started replacing local variables named `codex` as
`conversation`, where appropriate, though admittedly I didn't do it
through all the integration tests because that would have added a lot of
noise to this PR.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/2240).
* #2264
* #2263
* __->__ #2240
2025-08-13 13:38:18 -07:00
|
|
|
|
codex: &CodexConversation,
|
2025-08-08 18:21:19 -07:00
|
|
|
|
mut predicate: F,
|
|
|
|
|
|
wait_time: tokio::time::Duration,
|
|
|
|
|
|
) -> codex_core::protocol::EventMsg
|
|
|
|
|
|
where
|
|
|
|
|
|
F: FnMut(&codex_core::protocol::EventMsg) -> bool,
|
|
|
|
|
|
{
|
2025-08-12 17:37:28 -07:00
|
|
|
|
use tokio::time::Duration;
|
2025-07-22 09:42:22 -07:00
|
|
|
|
use tokio::time::timeout;
|
|
|
|
|
|
loop {
|
2025-08-12 17:37:28 -07:00
|
|
|
|
// Allow a bit more time to accommodate async startup work (e.g. config IO, tool discovery)
|
|
|
|
|
|
let ev = timeout(wait_time.max(Duration::from_secs(5)), codex.next_event())
|
2025-07-22 09:42:22 -07:00
|
|
|
|
.await
|
|
|
|
|
|
.expect("timeout waiting for event")
|
|
|
|
|
|
.expect("stream ended unexpectedly");
|
|
|
|
|
|
if predicate(&ev.msg) {
|
|
|
|
|
|
return ev.msg;
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
2025-09-22 07:50:41 -07:00
|
|
|
|
|
2025-09-25 13:11:14 -07:00
|
|
|
|
pub fn sandbox_env_var() -> &'static str {
|
|
|
|
|
|
codex_core::spawn::CODEX_SANDBOX_ENV_VAR
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
pub fn sandbox_network_env_var() -> &'static str {
|
|
|
|
|
|
codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-10-15 09:56:59 +01:00
|
|
|
|
pub mod fs_wait {
|
|
|
|
|
|
use anyhow::Result;
|
|
|
|
|
|
use anyhow::anyhow;
|
|
|
|
|
|
use notify::RecursiveMode;
|
|
|
|
|
|
use notify::Watcher;
|
|
|
|
|
|
use std::path::Path;
|
|
|
|
|
|
use std::path::PathBuf;
|
|
|
|
|
|
use std::sync::mpsc;
|
|
|
|
|
|
use std::sync::mpsc::RecvTimeoutError;
|
|
|
|
|
|
use std::time::Duration;
|
|
|
|
|
|
use std::time::Instant;
|
|
|
|
|
|
use tokio::task;
|
|
|
|
|
|
use walkdir::WalkDir;
|
|
|
|
|
|
|
|
|
|
|
|
pub async fn wait_for_path_exists(
|
|
|
|
|
|
path: impl Into<PathBuf>,
|
|
|
|
|
|
timeout: Duration,
|
|
|
|
|
|
) -> Result<PathBuf> {
|
|
|
|
|
|
let path = path.into();
|
|
|
|
|
|
task::spawn_blocking(move || wait_for_path_exists_blocking(path, timeout)).await?
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
pub async fn wait_for_matching_file(
|
|
|
|
|
|
root: impl Into<PathBuf>,
|
|
|
|
|
|
timeout: Duration,
|
|
|
|
|
|
predicate: impl FnMut(&Path) -> bool + Send + 'static,
|
|
|
|
|
|
) -> Result<PathBuf> {
|
|
|
|
|
|
let root = root.into();
|
|
|
|
|
|
task::spawn_blocking(move || {
|
|
|
|
|
|
let mut predicate = predicate;
|
|
|
|
|
|
blocking_find_matching_file(root, timeout, &mut predicate)
|
|
|
|
|
|
})
|
|
|
|
|
|
.await?
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
fn wait_for_path_exists_blocking(path: PathBuf, timeout: Duration) -> Result<PathBuf> {
|
|
|
|
|
|
if path.exists() {
|
|
|
|
|
|
return Ok(path);
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
let watch_root = nearest_existing_ancestor(&path);
|
|
|
|
|
|
let (tx, rx) = mpsc::channel();
|
|
|
|
|
|
let mut watcher = notify::recommended_watcher(move |res| {
|
|
|
|
|
|
let _ = tx.send(res);
|
|
|
|
|
|
})?;
|
|
|
|
|
|
watcher.watch(&watch_root, RecursiveMode::Recursive)?;
|
|
|
|
|
|
|
|
|
|
|
|
let deadline = Instant::now() + timeout;
|
|
|
|
|
|
loop {
|
|
|
|
|
|
if path.exists() {
|
|
|
|
|
|
return Ok(path.clone());
|
|
|
|
|
|
}
|
|
|
|
|
|
let now = Instant::now();
|
|
|
|
|
|
if now >= deadline {
|
|
|
|
|
|
break;
|
|
|
|
|
|
}
|
|
|
|
|
|
let remaining = deadline.saturating_duration_since(now);
|
|
|
|
|
|
match rx.recv_timeout(remaining) {
|
|
|
|
|
|
Ok(Ok(_event)) => {
|
|
|
|
|
|
if path.exists() {
|
|
|
|
|
|
return Ok(path.clone());
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
Ok(Err(err)) => return Err(err.into()),
|
|
|
|
|
|
Err(RecvTimeoutError::Timeout) => break,
|
|
|
|
|
|
Err(RecvTimeoutError::Disconnected) => break,
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
if path.exists() {
|
|
|
|
|
|
Ok(path)
|
|
|
|
|
|
} else {
|
|
|
|
|
|
Err(anyhow!("timed out waiting for {:?}", path))
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
fn blocking_find_matching_file(
|
|
|
|
|
|
root: PathBuf,
|
|
|
|
|
|
timeout: Duration,
|
|
|
|
|
|
predicate: &mut impl FnMut(&Path) -> bool,
|
|
|
|
|
|
) -> Result<PathBuf> {
|
|
|
|
|
|
let root = wait_for_path_exists_blocking(root, timeout)?;
|
|
|
|
|
|
|
|
|
|
|
|
if let Some(found) = scan_for_match(&root, predicate) {
|
|
|
|
|
|
return Ok(found);
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
let (tx, rx) = mpsc::channel();
|
|
|
|
|
|
let mut watcher = notify::recommended_watcher(move |res| {
|
|
|
|
|
|
let _ = tx.send(res);
|
|
|
|
|
|
})?;
|
|
|
|
|
|
watcher.watch(&root, RecursiveMode::Recursive)?;
|
|
|
|
|
|
|
|
|
|
|
|
let deadline = Instant::now() + timeout;
|
|
|
|
|
|
|
|
|
|
|
|
while Instant::now() < deadline {
|
|
|
|
|
|
let remaining = deadline.saturating_duration_since(Instant::now());
|
|
|
|
|
|
match rx.recv_timeout(remaining) {
|
|
|
|
|
|
Ok(Ok(_event)) => {
|
|
|
|
|
|
if let Some(found) = scan_for_match(&root, predicate) {
|
|
|
|
|
|
return Ok(found);
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
Ok(Err(err)) => return Err(err.into()),
|
|
|
|
|
|
Err(RecvTimeoutError::Timeout) => break,
|
|
|
|
|
|
Err(RecvTimeoutError::Disconnected) => break,
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
if let Some(found) = scan_for_match(&root, predicate) {
|
|
|
|
|
|
Ok(found)
|
|
|
|
|
|
} else {
|
|
|
|
|
|
Err(anyhow!("timed out waiting for matching file in {:?}", root))
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
fn scan_for_match(root: &Path, predicate: &mut impl FnMut(&Path) -> bool) -> Option<PathBuf> {
|
|
|
|
|
|
for entry in WalkDir::new(root).into_iter().filter_map(Result::ok) {
|
|
|
|
|
|
let path = entry.path();
|
|
|
|
|
|
if !entry.file_type().is_file() {
|
|
|
|
|
|
continue;
|
|
|
|
|
|
}
|
|
|
|
|
|
if predicate(path) {
|
|
|
|
|
|
return Some(path.to_path_buf());
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
None
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
fn nearest_existing_ancestor(path: &Path) -> PathBuf {
|
|
|
|
|
|
let mut current = path;
|
|
|
|
|
|
loop {
|
|
|
|
|
|
if current.exists() {
|
|
|
|
|
|
return current.to_path_buf();
|
|
|
|
|
|
}
|
|
|
|
|
|
match current.parent() {
|
|
|
|
|
|
Some(parent) => current = parent,
|
|
|
|
|
|
None => return PathBuf::from("."),
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-09-25 13:11:14 -07:00
|
|
|
|
#[macro_export]
|
|
|
|
|
|
macro_rules! skip_if_sandbox {
|
|
|
|
|
|
() => {{
|
|
|
|
|
|
if ::std::env::var($crate::sandbox_env_var())
|
|
|
|
|
|
== ::core::result::Result::Ok("seatbelt".to_string())
|
|
|
|
|
|
{
|
|
|
|
|
|
eprintln!(
|
|
|
|
|
|
"{} is set to 'seatbelt', skipping test.",
|
|
|
|
|
|
$crate::sandbox_env_var()
|
|
|
|
|
|
);
|
|
|
|
|
|
return;
|
|
|
|
|
|
}
|
|
|
|
|
|
}};
|
|
|
|
|
|
($return_value:expr $(,)?) => {{
|
|
|
|
|
|
if ::std::env::var($crate::sandbox_env_var())
|
|
|
|
|
|
== ::core::result::Result::Ok("seatbelt".to_string())
|
|
|
|
|
|
{
|
|
|
|
|
|
eprintln!(
|
|
|
|
|
|
"{} is set to 'seatbelt', skipping test.",
|
|
|
|
|
|
$crate::sandbox_env_var()
|
|
|
|
|
|
);
|
|
|
|
|
|
return $return_value;
|
|
|
|
|
|
}
|
|
|
|
|
|
}};
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-09-22 07:50:41 -07:00
|
|
|
|
#[macro_export]
|
2025-09-25 13:11:14 -07:00
|
|
|
|
macro_rules! skip_if_no_network {
|
2025-09-22 07:50:41 -07:00
|
|
|
|
() => {{
|
2025-09-25 13:11:14 -07:00
|
|
|
|
if ::std::env::var($crate::sandbox_network_env_var()).is_ok() {
|
|
|
|
|
|
println!(
|
|
|
|
|
|
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
|
|
|
|
|
|
);
|
2025-09-22 07:50:41 -07:00
|
|
|
|
return;
|
|
|
|
|
|
}
|
|
|
|
|
|
}};
|
2025-09-25 13:11:14 -07:00
|
|
|
|
($return_value:expr $(,)?) => {{
|
|
|
|
|
|
if ::std::env::var($crate::sandbox_network_env_var()).is_ok() {
|
|
|
|
|
|
println!(
|
|
|
|
|
|
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
|
|
|
|
|
|
);
|
|
|
|
|
|
return $return_value;
|
2025-09-22 07:50:41 -07:00
|
|
|
|
}
|
|
|
|
|
|
}};
|
|
|
|
|
|
}
|