2025-05-05 07:16:19 -07:00
|
|
|
//! Asynchronous worker that executes a **Codex** tool-call inside a spawned
|
|
|
|
|
//! Tokio task. Separated from `message_processor.rs` to keep that file small
|
|
|
|
|
//! and to make future feature-growth easier to manage.
|
|
|
|
|
|
chore: introduce OutgoingMessageSender (#1622)
Previous to this change, `MessageProcessor` had a
`tokio::sync::mpsc::Sender<JSONRPCMessage>` as an abstraction for server
code to send a message down to the MCP client. Because `Sender` is cheap
to `clone()`, it was straightforward to make it available to tasks
scheduled with `tokio::task::spawn()`.
This worked well when we were only sending notifications or responses
back down to the client, but we want to add support for sending
elicitations in #1623, which means that we need to be able to send
_requests_ to the client, and now we need a bit of centralization to
ensure all request ids are unique.
To that end, this PR introduces `OutgoingMessageSender`, which houses
the existing `Sender<OutgoingMessage>` as well as an `AtomicI64` to mint
out new, unique request ids. It has methods like `send_request()` and
`send_response()` so that callers do not have to deal with
`JSONRPCMessage` directly, as having to set the `jsonrpc` for each
message was a bit tedious (this cleans up `codex_tool_runner.rs` quite a
bit).
We do not have `OutgoingMessageSender` implement `Clone` because it is
important that the `AtomicI64` is shared across all users of
`OutgoingMessageSender`. As such, `Arc<OutgoingMessageSender>` must be
used instead, as it is frequently shared with new tokio tasks.
As part of this change, we update `message_processor.rs` to embrace
`await`, though we must be careful that no individual handler blocks the
main loop and prevents other messages from being handled.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/1622).
* #1623
* __->__ #1622
* #1621
* #1620
2025-07-19 00:30:56 -04:00
|
|
|
use std::sync::Arc;
|
|
|
|
|
|
2025-07-19 01:32:03 -04:00
|
|
|
use codex_core::Codex;
|
2025-05-05 07:16:19 -07:00
|
|
|
use codex_core::codex_wrapper::init_codex;
|
|
|
|
|
use codex_core::config::Config as CodexConfig;
|
2025-05-13 20:44:42 -07:00
|
|
|
use codex_core::protocol::AgentMessageEvent;
|
2025-05-05 07:16:19 -07:00
|
|
|
use codex_core::protocol::EventMsg;
|
2025-07-19 01:32:03 -04:00
|
|
|
use codex_core::protocol::ExecApprovalRequestEvent;
|
2025-05-05 07:16:19 -07:00
|
|
|
use codex_core::protocol::InputItem;
|
|
|
|
|
use codex_core::protocol::Op;
|
2025-07-19 01:32:03 -04:00
|
|
|
use codex_core::protocol::ReviewDecision;
|
2025-07-12 16:22:02 -07:00
|
|
|
use codex_core::protocol::Submission;
|
2025-05-19 16:08:18 -07:00
|
|
|
use codex_core::protocol::TaskCompleteEvent;
|
2025-05-05 07:16:19 -07:00
|
|
|
use mcp_types::CallToolResult;
|
2025-07-19 00:09:34 -04:00
|
|
|
use mcp_types::ContentBlock;
|
2025-07-19 01:32:03 -04:00
|
|
|
use mcp_types::ElicitRequest;
|
|
|
|
|
use mcp_types::ElicitRequestParamsRequestedSchema;
|
|
|
|
|
use mcp_types::ModelContextProtocolRequest;
|
2025-05-05 07:16:19 -07:00
|
|
|
use mcp_types::RequestId;
|
|
|
|
|
use mcp_types::TextContent;
|
2025-07-19 01:32:03 -04:00
|
|
|
use serde::Deserialize;
|
|
|
|
|
use serde_json::json;
|
|
|
|
|
use tracing::error;
|
2025-05-05 07:16:19 -07:00
|
|
|
|
chore: introduce OutgoingMessageSender (#1622)
Previous to this change, `MessageProcessor` had a
`tokio::sync::mpsc::Sender<JSONRPCMessage>` as an abstraction for server
code to send a message down to the MCP client. Because `Sender` is cheap
to `clone()`, it was straightforward to make it available to tasks
scheduled with `tokio::task::spawn()`.
This worked well when we were only sending notifications or responses
back down to the client, but we want to add support for sending
elicitations in #1623, which means that we need to be able to send
_requests_ to the client, and now we need a bit of centralization to
ensure all request ids are unique.
To that end, this PR introduces `OutgoingMessageSender`, which houses
the existing `Sender<OutgoingMessage>` as well as an `AtomicI64` to mint
out new, unique request ids. It has methods like `send_request()` and
`send_response()` so that callers do not have to deal with
`JSONRPCMessage` directly, as having to set the `jsonrpc` for each
message was a bit tedious (this cleans up `codex_tool_runner.rs` quite a
bit).
We do not have `OutgoingMessageSender` implement `Clone` because it is
important that the `AtomicI64` is shared across all users of
`OutgoingMessageSender`. As such, `Arc<OutgoingMessageSender>` must be
used instead, as it is frequently shared with new tokio tasks.
As part of this change, we update `message_processor.rs` to embrace
`await`, though we must be careful that no individual handler blocks the
main loop and prevents other messages from being handled.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/1622).
* #1623
* __->__ #1622
* #1621
* #1620
2025-07-19 00:30:56 -04:00
|
|
|
use crate::outgoing_message::OutgoingMessageSender;
|
2025-05-05 07:16:19 -07:00
|
|
|
|
|
|
|
|
/// Run a complete Codex session and stream events back to the client.
|
|
|
|
|
///
|
|
|
|
|
/// On completion (success or error) the function sends the appropriate
|
|
|
|
|
/// `tools/call` response so the LLM can continue the conversation.
|
|
|
|
|
pub async fn run_codex_tool_session(
|
|
|
|
|
id: RequestId,
|
|
|
|
|
initial_prompt: String,
|
|
|
|
|
config: CodexConfig,
|
chore: introduce OutgoingMessageSender (#1622)
Previous to this change, `MessageProcessor` had a
`tokio::sync::mpsc::Sender<JSONRPCMessage>` as an abstraction for server
code to send a message down to the MCP client. Because `Sender` is cheap
to `clone()`, it was straightforward to make it available to tasks
scheduled with `tokio::task::spawn()`.
This worked well when we were only sending notifications or responses
back down to the client, but we want to add support for sending
elicitations in #1623, which means that we need to be able to send
_requests_ to the client, and now we need a bit of centralization to
ensure all request ids are unique.
To that end, this PR introduces `OutgoingMessageSender`, which houses
the existing `Sender<OutgoingMessage>` as well as an `AtomicI64` to mint
out new, unique request ids. It has methods like `send_request()` and
`send_response()` so that callers do not have to deal with
`JSONRPCMessage` directly, as having to set the `jsonrpc` for each
message was a bit tedious (this cleans up `codex_tool_runner.rs` quite a
bit).
We do not have `OutgoingMessageSender` implement `Clone` because it is
important that the `AtomicI64` is shared across all users of
`OutgoingMessageSender`. As such, `Arc<OutgoingMessageSender>` must be
used instead, as it is frequently shared with new tokio tasks.
As part of this change, we update `message_processor.rs` to embrace
`await`, though we must be careful that no individual handler blocks the
main loop and prevents other messages from being handled.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/1622).
* #1623
* __->__ #1622
* #1621
* #1620
2025-07-19 00:30:56 -04:00
|
|
|
outgoing: Arc<OutgoingMessageSender>,
|
2025-05-05 07:16:19 -07:00
|
|
|
) {
|
|
|
|
|
let (codex, first_event, _ctrl_c) = match init_codex(config).await {
|
|
|
|
|
Ok(res) => res,
|
|
|
|
|
Err(e) => {
|
|
|
|
|
let result = CallToolResult {
|
2025-07-19 00:09:34 -04:00
|
|
|
content: vec![ContentBlock::TextContent(TextContent {
|
2025-05-05 07:16:19 -07:00
|
|
|
r#type: "text".to_string(),
|
|
|
|
|
text: format!("Failed to start Codex session: {e}"),
|
|
|
|
|
annotations: None,
|
|
|
|
|
})],
|
|
|
|
|
is_error: Some(true),
|
2025-07-19 00:09:34 -04:00
|
|
|
structured_content: None,
|
2025-05-05 07:16:19 -07:00
|
|
|
};
|
chore: introduce OutgoingMessageSender (#1622)
Previous to this change, `MessageProcessor` had a
`tokio::sync::mpsc::Sender<JSONRPCMessage>` as an abstraction for server
code to send a message down to the MCP client. Because `Sender` is cheap
to `clone()`, it was straightforward to make it available to tasks
scheduled with `tokio::task::spawn()`.
This worked well when we were only sending notifications or responses
back down to the client, but we want to add support for sending
elicitations in #1623, which means that we need to be able to send
_requests_ to the client, and now we need a bit of centralization to
ensure all request ids are unique.
To that end, this PR introduces `OutgoingMessageSender`, which houses
the existing `Sender<OutgoingMessage>` as well as an `AtomicI64` to mint
out new, unique request ids. It has methods like `send_request()` and
`send_response()` so that callers do not have to deal with
`JSONRPCMessage` directly, as having to set the `jsonrpc` for each
message was a bit tedious (this cleans up `codex_tool_runner.rs` quite a
bit).
We do not have `OutgoingMessageSender` implement `Clone` because it is
important that the `AtomicI64` is shared across all users of
`OutgoingMessageSender`. As such, `Arc<OutgoingMessageSender>` must be
used instead, as it is frequently shared with new tokio tasks.
As part of this change, we update `message_processor.rs` to embrace
`await`, though we must be careful that no individual handler blocks the
main loop and prevents other messages from being handled.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/1622).
* #1623
* __->__ #1622
* #1621
* #1620
2025-07-19 00:30:56 -04:00
|
|
|
outgoing.send_response(id.clone(), result.into()).await;
|
2025-05-05 07:16:19 -07:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
};
|
2025-07-19 01:32:03 -04:00
|
|
|
let codex = Arc::new(codex);
|
2025-05-05 07:16:19 -07:00
|
|
|
|
|
|
|
|
// Send initial SessionConfigured event.
|
chore: introduce OutgoingMessageSender (#1622)
Previous to this change, `MessageProcessor` had a
`tokio::sync::mpsc::Sender<JSONRPCMessage>` as an abstraction for server
code to send a message down to the MCP client. Because `Sender` is cheap
to `clone()`, it was straightforward to make it available to tasks
scheduled with `tokio::task::spawn()`.
This worked well when we were only sending notifications or responses
back down to the client, but we want to add support for sending
elicitations in #1623, which means that we need to be able to send
_requests_ to the client, and now we need a bit of centralization to
ensure all request ids are unique.
To that end, this PR introduces `OutgoingMessageSender`, which houses
the existing `Sender<OutgoingMessage>` as well as an `AtomicI64` to mint
out new, unique request ids. It has methods like `send_request()` and
`send_response()` so that callers do not have to deal with
`JSONRPCMessage` directly, as having to set the `jsonrpc` for each
message was a bit tedious (this cleans up `codex_tool_runner.rs` quite a
bit).
We do not have `OutgoingMessageSender` implement `Clone` because it is
important that the `AtomicI64` is shared across all users of
`OutgoingMessageSender`. As such, `Arc<OutgoingMessageSender>` must be
used instead, as it is frequently shared with new tokio tasks.
As part of this change, we update `message_processor.rs` to embrace
`await`, though we must be careful that no individual handler blocks the
main loop and prevents other messages from being handled.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/1622).
* #1623
* __->__ #1622
* #1621
* #1620
2025-07-19 00:30:56 -04:00
|
|
|
outgoing.send_event_as_notification(&first_event).await;
|
2025-05-05 07:16:19 -07:00
|
|
|
|
2025-07-12 16:22:02 -07:00
|
|
|
// Use the original MCP request ID as the `sub_id` for the Codex submission so that
|
|
|
|
|
// any events emitted for this tool-call can be correlated with the
|
|
|
|
|
// originating `tools/call` request.
|
|
|
|
|
let sub_id = match &id {
|
|
|
|
|
RequestId::String(s) => s.clone(),
|
|
|
|
|
RequestId::Integer(n) => n.to_string(),
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let submission = Submission {
|
2025-07-19 01:32:03 -04:00
|
|
|
id: sub_id.clone(),
|
2025-07-12 16:22:02 -07:00
|
|
|
op: Op::UserInput {
|
2025-05-05 07:16:19 -07:00
|
|
|
items: vec![InputItem::Text {
|
|
|
|
|
text: initial_prompt.clone(),
|
|
|
|
|
}],
|
2025-07-12 16:22:02 -07:00
|
|
|
},
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
if let Err(e) = codex.submit_with_id(submission).await {
|
2025-05-05 07:16:19 -07:00
|
|
|
tracing::error!("Failed to submit initial prompt: {e}");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Stream events until the task needs to pause for user interaction or
|
|
|
|
|
// completes.
|
|
|
|
|
loop {
|
|
|
|
|
match codex.next_event().await {
|
|
|
|
|
Ok(event) => {
|
chore: introduce OutgoingMessageSender (#1622)
Previous to this change, `MessageProcessor` had a
`tokio::sync::mpsc::Sender<JSONRPCMessage>` as an abstraction for server
code to send a message down to the MCP client. Because `Sender` is cheap
to `clone()`, it was straightforward to make it available to tasks
scheduled with `tokio::task::spawn()`.
This worked well when we were only sending notifications or responses
back down to the client, but we want to add support for sending
elicitations in #1623, which means that we need to be able to send
_requests_ to the client, and now we need a bit of centralization to
ensure all request ids are unique.
To that end, this PR introduces `OutgoingMessageSender`, which houses
the existing `Sender<OutgoingMessage>` as well as an `AtomicI64` to mint
out new, unique request ids. It has methods like `send_request()` and
`send_response()` so that callers do not have to deal with
`JSONRPCMessage` directly, as having to set the `jsonrpc` for each
message was a bit tedious (this cleans up `codex_tool_runner.rs` quite a
bit).
We do not have `OutgoingMessageSender` implement `Clone` because it is
important that the `AtomicI64` is shared across all users of
`OutgoingMessageSender`. As such, `Arc<OutgoingMessageSender>` must be
used instead, as it is frequently shared with new tokio tasks.
As part of this change, we update `message_processor.rs` to embrace
`await`, though we must be careful that no individual handler blocks the
main loop and prevents other messages from being handled.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/1622).
* #1623
* __->__ #1622
* #1621
* #1620
2025-07-19 00:30:56 -04:00
|
|
|
outgoing.send_event_as_notification(&event).await;
|
2025-05-05 07:16:19 -07:00
|
|
|
|
2025-07-19 01:32:03 -04:00
|
|
|
match event.msg {
|
|
|
|
|
EventMsg::ExecApprovalRequest(ExecApprovalRequestEvent {
|
|
|
|
|
command,
|
|
|
|
|
cwd,
|
|
|
|
|
reason: _,
|
|
|
|
|
}) => {
|
|
|
|
|
let escaped_command = shlex::try_join(command.iter().map(|s| s.as_str()))
|
|
|
|
|
.unwrap_or_else(|_| command.join(" "));
|
|
|
|
|
let message = format!("Allow Codex to run `{escaped_command}` in {cwd:?}?");
|
|
|
|
|
|
|
|
|
|
let params = json!({
|
|
|
|
|
// These fields are required so that `params`
|
|
|
|
|
// conforms to ElicitRequestParams.
|
|
|
|
|
"message": message,
|
|
|
|
|
"requestedSchema": ElicitRequestParamsRequestedSchema {
|
|
|
|
|
r#type: "object".to_string(),
|
|
|
|
|
properties: json!({}),
|
|
|
|
|
required: None,
|
|
|
|
|
},
|
|
|
|
|
|
|
|
|
|
// These are additional fields the client can use to
|
|
|
|
|
// correlate the request with the codex tool call.
|
|
|
|
|
"codex_elicitation": "exec-approval",
|
|
|
|
|
"codex_mcp_tool_call_id": sub_id,
|
|
|
|
|
"codex_event_id": event.id,
|
|
|
|
|
"codex_command": command,
|
|
|
|
|
// Could convert it to base64 encoded bytes if we
|
|
|
|
|
// don't want to use to_string_lossy() here?
|
|
|
|
|
"codex_cwd": cwd.to_string_lossy().to_string()
|
|
|
|
|
});
|
|
|
|
|
let on_response = outgoing
|
|
|
|
|
.send_request(ElicitRequest::METHOD, Some(params))
|
|
|
|
|
.await;
|
|
|
|
|
|
|
|
|
|
// Listen for the response on a separate task so we do
|
|
|
|
|
// not block the main loop of this function.
|
|
|
|
|
{
|
|
|
|
|
let codex = codex.clone();
|
|
|
|
|
let event_id = event.id.clone();
|
|
|
|
|
tokio::spawn(async move {
|
|
|
|
|
on_exec_approval_response(event_id, on_response, codex).await;
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
|
2025-07-20 10:31:19 -07:00
|
|
|
// Continue, don't break so the session continues.
|
|
|
|
|
continue;
|
2025-05-05 07:16:19 -07:00
|
|
|
}
|
2025-05-13 20:44:42 -07:00
|
|
|
EventMsg::ApplyPatchApprovalRequest(_) => {
|
2025-05-05 07:16:19 -07:00
|
|
|
let result = CallToolResult {
|
2025-07-19 00:09:34 -04:00
|
|
|
content: vec![ContentBlock::TextContent(TextContent {
|
2025-05-05 07:16:19 -07:00
|
|
|
r#type: "text".to_string(),
|
|
|
|
|
text: "PATCH_APPROVAL_REQUIRED".to_string(),
|
|
|
|
|
annotations: None,
|
|
|
|
|
})],
|
|
|
|
|
is_error: None,
|
2025-07-19 00:09:34 -04:00
|
|
|
structured_content: None,
|
2025-05-05 07:16:19 -07:00
|
|
|
};
|
chore: introduce OutgoingMessageSender (#1622)
Previous to this change, `MessageProcessor` had a
`tokio::sync::mpsc::Sender<JSONRPCMessage>` as an abstraction for server
code to send a message down to the MCP client. Because `Sender` is cheap
to `clone()`, it was straightforward to make it available to tasks
scheduled with `tokio::task::spawn()`.
This worked well when we were only sending notifications or responses
back down to the client, but we want to add support for sending
elicitations in #1623, which means that we need to be able to send
_requests_ to the client, and now we need a bit of centralization to
ensure all request ids are unique.
To that end, this PR introduces `OutgoingMessageSender`, which houses
the existing `Sender<OutgoingMessage>` as well as an `AtomicI64` to mint
out new, unique request ids. It has methods like `send_request()` and
`send_response()` so that callers do not have to deal with
`JSONRPCMessage` directly, as having to set the `jsonrpc` for each
message was a bit tedious (this cleans up `codex_tool_runner.rs` quite a
bit).
We do not have `OutgoingMessageSender` implement `Clone` because it is
important that the `AtomicI64` is shared across all users of
`OutgoingMessageSender`. As such, `Arc<OutgoingMessageSender>` must be
used instead, as it is frequently shared with new tokio tasks.
As part of this change, we update `message_processor.rs` to embrace
`await`, though we must be careful that no individual handler blocks the
main loop and prevents other messages from being handled.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/1622).
* #1623
* __->__ #1622
* #1621
* #1620
2025-07-19 00:30:56 -04:00
|
|
|
outgoing.send_response(id.clone(), result.into()).await;
|
2025-07-20 10:31:19 -07:00
|
|
|
// Continue, don't break so the session continues.
|
|
|
|
|
continue;
|
2025-05-05 07:16:19 -07:00
|
|
|
}
|
2025-07-19 00:09:34 -04:00
|
|
|
EventMsg::TaskComplete(TaskCompleteEvent { last_agent_message }) => {
|
|
|
|
|
let text = match last_agent_message {
|
|
|
|
|
Some(msg) => msg.clone(),
|
|
|
|
|
None => "".to_string(),
|
|
|
|
|
};
|
|
|
|
|
let result = CallToolResult {
|
|
|
|
|
content: vec![ContentBlock::TextContent(TextContent {
|
|
|
|
|
r#type: "text".to_string(),
|
|
|
|
|
text,
|
|
|
|
|
annotations: None,
|
|
|
|
|
})],
|
|
|
|
|
is_error: None,
|
|
|
|
|
structured_content: None,
|
2025-05-05 07:16:19 -07:00
|
|
|
};
|
chore: introduce OutgoingMessageSender (#1622)
Previous to this change, `MessageProcessor` had a
`tokio::sync::mpsc::Sender<JSONRPCMessage>` as an abstraction for server
code to send a message down to the MCP client. Because `Sender` is cheap
to `clone()`, it was straightforward to make it available to tasks
scheduled with `tokio::task::spawn()`.
This worked well when we were only sending notifications or responses
back down to the client, but we want to add support for sending
elicitations in #1623, which means that we need to be able to send
_requests_ to the client, and now we need a bit of centralization to
ensure all request ids are unique.
To that end, this PR introduces `OutgoingMessageSender`, which houses
the existing `Sender<OutgoingMessage>` as well as an `AtomicI64` to mint
out new, unique request ids. It has methods like `send_request()` and
`send_response()` so that callers do not have to deal with
`JSONRPCMessage` directly, as having to set the `jsonrpc` for each
message was a bit tedious (this cleans up `codex_tool_runner.rs` quite a
bit).
We do not have `OutgoingMessageSender` implement `Clone` because it is
important that the `AtomicI64` is shared across all users of
`OutgoingMessageSender`. As such, `Arc<OutgoingMessageSender>` must be
used instead, as it is frequently shared with new tokio tasks.
As part of this change, we update `message_processor.rs` to embrace
`await`, though we must be careful that no individual handler blocks the
main loop and prevents other messages from being handled.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/1622).
* #1623
* __->__ #1622
* #1621
* #1620
2025-07-19 00:30:56 -04:00
|
|
|
outgoing.send_response(id.clone(), result.into()).await;
|
2025-05-05 07:16:19 -07:00
|
|
|
break;
|
|
|
|
|
}
|
2025-05-13 20:44:42 -07:00
|
|
|
EventMsg::SessionConfigured(_) => {
|
2025-05-05 07:16:19 -07:00
|
|
|
tracing::error!("unexpected SessionConfigured event");
|
|
|
|
|
}
|
2025-07-16 15:11:18 -07:00
|
|
|
EventMsg::AgentMessageDelta(_) => {
|
|
|
|
|
// TODO: think how we want to support this in the MCP
|
|
|
|
|
}
|
|
|
|
|
EventMsg::AgentReasoningDelta(_) => {
|
|
|
|
|
// TODO: think how we want to support this in the MCP
|
|
|
|
|
}
|
2025-07-19 00:09:34 -04:00
|
|
|
EventMsg::AgentMessage(AgentMessageEvent { .. }) => {
|
|
|
|
|
// TODO: think how we want to support this in the MCP
|
|
|
|
|
}
|
2025-05-14 13:36:43 -07:00
|
|
|
EventMsg::Error(_)
|
|
|
|
|
| EventMsg::TaskStarted
|
feat: show number of tokens remaining in UI (#1388)
When using the OpenAI Responses API, we now record the `usage` field for
a `"response.completed"` event, which includes metrics about the number
of tokens consumed. We also introduce `openai_model_info.rs`, which
includes current data about the most common OpenAI models available via
the API (specifically `context_window` and `max_output_tokens`). If
Codex does not recognize the model, you can set `model_context_window`
and `model_max_output_tokens` explicitly in `config.toml`.
When then introduce a new event type to `protocol.rs`, `TokenCount`,
which includes the `TokenUsage` for the most recent turn.
Finally, we update the TUI to record the running sum of tokens used so
the percentage of available context window remaining can be reported via
the placeholder text for the composer:

We could certainly get much fancier with this (such as reporting the
estimated cost of the conversation), but for now, we are just trying to
achieve feature parity with the TypeScript CLI.
Though arguably this improves upon the TypeScript CLI, as the TypeScript
CLI uses heuristics to estimate the number of tokens used rather than
using the `usage` information directly:
https://github.com/openai/codex/blob/296996d74e345b1b05d8c3451a06ace21c5ada96/codex-cli/src/utils/approximate-tokens-used.ts#L3-L16
Fixes https://github.com/openai/codex/issues/1242
2025-06-25 23:31:11 -07:00
|
|
|
| EventMsg::TokenCount(_)
|
2025-05-14 13:36:43 -07:00
|
|
|
| EventMsg::AgentReasoning(_)
|
|
|
|
|
| EventMsg::McpToolCallBegin(_)
|
|
|
|
|
| EventMsg::McpToolCallEnd(_)
|
|
|
|
|
| EventMsg::ExecCommandBegin(_)
|
|
|
|
|
| EventMsg::ExecCommandEnd(_)
|
|
|
|
|
| EventMsg::BackgroundEvent(_)
|
|
|
|
|
| EventMsg::PatchApplyBegin(_)
|
feat: record messages from user in ~/.codex/history.jsonl (#939)
This is a large change to support a "history" feature like you would
expect in a shell like Bash.
History events are recorded in `$CODEX_HOME/history.jsonl`. Because it
is a JSONL file, it is straightforward to append new entries (as opposed
to the TypeScript file that uses `$CODEX_HOME/history.json`, so to be
valid JSON, each new entry entails rewriting the entire file). Because
it is possible for there to be multiple instances of Codex CLI writing
to `history.jsonl` at once, we use advisory file locking when working
with `history.jsonl` in `codex-rs/core/src/message_history.rs`.
Because we believe history is a sufficiently useful feature, we enable
it by default. Though to provide some safety, we set the file
permissions of `history.jsonl` to be `o600` so that other users on the
system cannot read the user's history. We do not yet support a default
list of `SENSITIVE_PATTERNS` as the TypeScript CLI does:
https://github.com/openai/codex/blob/3fdf9df1335ac9501e3fb0e61715359145711e8b/codex-cli/src/utils/storage/command-history.ts#L10-L17
We are going to take a more conservative approach to this list in the
Rust CLI. For example, while `/\b[A-Za-z0-9-_]{20,}\b/` might exclude
sensitive information like API tokens, it would also exclude valuable
information such as references to Git commits.
As noted in the updated documentation, users can opt-out of history by
adding the following to `config.toml`:
```toml
[history]
persistence = "none"
```
Because `history.jsonl` could, in theory, be quite large, we take a[n
arguably overly pedantic] approach in reading history entries into
memory. Specifically, we start by telling the client the current number
of entries in the history file (`history_entry_count`) as well as the
inode (`history_log_id`) of `history.jsonl` (see the new fields on
`SessionConfiguredEvent`).
The client is responsible for keeping new entries in memory to create a
"local history," but if the user hits up enough times to go "past" the
end of local history, then the client should use the new
`GetHistoryEntryRequest` in the protocol to fetch older entries.
Specifically, it should pass the `history_log_id` it was given
originally and work backwards from `history_entry_count`. (It should
really fetch history in batches rather than one-at-a-time, but that is
something we can improve upon in subsequent PRs.)
The motivation behind this crazy scheme is that it is designed to defend
against:
* The `history.jsonl` being truncated during the session such that the
index into the history is no longer consistent with what had been read
up to that point. We do not yet have logic to enforce a `max_bytes` for
`history.jsonl`, but once we do, we will aspire to implement it in a way
that should result in a new inode for the file on most systems.
* New items from concurrent Codex CLI sessions amending to the history.
Because, in absence of truncation, `history.jsonl` is an append-only
log, so long as the client reads backwards from `history_entry_count`,
it should always get a consistent view of history. (That said, it will
not be able to read _new_ commands from concurrent sessions, but perhaps
we will introduce a `/` command to reload latest history or something
down the road.)
Admittedly, my testing of this feature thus far has been fairly light. I
expect we will find bugs and introduce enhancements/fixes going forward.
2025-05-15 16:26:23 -07:00
|
|
|
| EventMsg::PatchApplyEnd(_)
|
|
|
|
|
| EventMsg::GetHistoryEntryResponse(_) => {
|
2025-05-14 13:36:43 -07:00
|
|
|
// For now, we do not do anything extra for these
|
|
|
|
|
// events. Note that
|
|
|
|
|
// send(codex_event_to_notification(&event)) above has
|
|
|
|
|
// already dispatched these events as notifications,
|
|
|
|
|
// though we may want to do give different treatment to
|
|
|
|
|
// individual events in the future.
|
|
|
|
|
}
|
2025-05-05 07:16:19 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
Err(e) => {
|
|
|
|
|
let result = CallToolResult {
|
2025-07-19 00:09:34 -04:00
|
|
|
content: vec![ContentBlock::TextContent(TextContent {
|
2025-05-05 07:16:19 -07:00
|
|
|
r#type: "text".to_string(),
|
|
|
|
|
text: format!("Codex runtime error: {e}"),
|
|
|
|
|
annotations: None,
|
|
|
|
|
})],
|
|
|
|
|
is_error: Some(true),
|
2025-07-19 00:09:34 -04:00
|
|
|
// TODO(mbolin): Could present the error in a more
|
|
|
|
|
// structured way.
|
|
|
|
|
structured_content: None,
|
2025-05-05 07:16:19 -07:00
|
|
|
};
|
chore: introduce OutgoingMessageSender (#1622)
Previous to this change, `MessageProcessor` had a
`tokio::sync::mpsc::Sender<JSONRPCMessage>` as an abstraction for server
code to send a message down to the MCP client. Because `Sender` is cheap
to `clone()`, it was straightforward to make it available to tasks
scheduled with `tokio::task::spawn()`.
This worked well when we were only sending notifications or responses
back down to the client, but we want to add support for sending
elicitations in #1623, which means that we need to be able to send
_requests_ to the client, and now we need a bit of centralization to
ensure all request ids are unique.
To that end, this PR introduces `OutgoingMessageSender`, which houses
the existing `Sender<OutgoingMessage>` as well as an `AtomicI64` to mint
out new, unique request ids. It has methods like `send_request()` and
`send_response()` so that callers do not have to deal with
`JSONRPCMessage` directly, as having to set the `jsonrpc` for each
message was a bit tedious (this cleans up `codex_tool_runner.rs` quite a
bit).
We do not have `OutgoingMessageSender` implement `Clone` because it is
important that the `AtomicI64` is shared across all users of
`OutgoingMessageSender`. As such, `Arc<OutgoingMessageSender>` must be
used instead, as it is frequently shared with new tokio tasks.
As part of this change, we update `message_processor.rs` to embrace
`await`, though we must be careful that no individual handler blocks the
main loop and prevents other messages from being handled.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/1622).
* #1623
* __->__ #1622
* #1621
* #1620
2025-07-19 00:30:56 -04:00
|
|
|
outgoing.send_response(id.clone(), result.into()).await;
|
2025-05-05 07:16:19 -07:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2025-07-19 01:32:03 -04:00
|
|
|
|
|
|
|
|
async fn on_exec_approval_response(
|
|
|
|
|
event_id: String,
|
|
|
|
|
receiver: tokio::sync::oneshot::Receiver<mcp_types::Result>,
|
|
|
|
|
codex: Arc<Codex>,
|
|
|
|
|
) {
|
|
|
|
|
let response = receiver.await;
|
|
|
|
|
let value = match response {
|
|
|
|
|
Ok(value) => value,
|
|
|
|
|
Err(err) => {
|
|
|
|
|
error!("request failed: {err:?}");
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
// Try to deserialize `value` and then make the appropriate call to `codex`.
|
|
|
|
|
let response = match serde_json::from_value::<ExecApprovalResponse>(value) {
|
|
|
|
|
Ok(response) => response,
|
|
|
|
|
Err(err) => {
|
|
|
|
|
error!("failed to deserialize ExecApprovalResponse: {err}");
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
if let Err(err) = codex
|
|
|
|
|
.submit(Op::ExecApproval {
|
|
|
|
|
id: event_id,
|
|
|
|
|
decision: response.decision,
|
|
|
|
|
})
|
|
|
|
|
.await
|
|
|
|
|
{
|
|
|
|
|
error!("failed to submit ExecApproval: {err}");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[derive(Debug, Deserialize)]
|
|
|
|
|
pub struct ExecApprovalResponse {
|
|
|
|
|
pub decision: ReviewDecision,
|
|
|
|
|
}
|