2025-09-03 17:05:03 -07:00
|
|
|
use crate::error_code::INTERNAL_ERROR_CODE;
|
|
|
|
|
use crate::error_code::INVALID_REQUEST_ERROR_CODE;
|
2025-09-29 12:19:09 -07:00
|
|
|
use crate::fuzzy_file_search::run_fuzzy_file_search;
|
2025-10-21 11:15:17 -07:00
|
|
|
use crate::models::supported_models;
|
2025-09-03 17:05:03 -07:00
|
|
|
use crate::outgoing_message::OutgoingMessageSender;
|
|
|
|
|
use crate::outgoing_message::OutgoingNotification;
|
fix: remove mcp-types from app server protocol (#4537)
We continue the separation between `codex app-server` and `codex
mcp-server`.
In particular, we introduce a new crate, `codex-app-server-protocol`,
and migrate `codex-rs/protocol/src/mcp_protocol.rs` into it, renaming it
`codex-rs/app-server-protocol/src/protocol.rs`.
Because `ConversationId` was defined in `mcp_protocol.rs`, we move it
into its own file, `codex-rs/protocol/src/conversation_id.rs`, and
because it is referenced in a ton of places, we have to touch a lot of
files as part of this PR.
We also decide to get away from proper JSON-RPC 2.0 semantics, so we
also introduce `codex-rs/app-server-protocol/src/jsonrpc_lite.rs`, which
is basically the same `JSONRPCMessage` type defined in `mcp-types`
except with all of the `"jsonrpc": "2.0"` removed.
Getting rid of `"jsonrpc": "2.0"` makes our serialization logic
considerably simpler, as we can lean heavier on serde to serialize
directly into the wire format that we use now.
2025-09-30 19:16:26 -07:00
|
|
|
use codex_app_server_protocol::AddConversationListenerParams;
|
|
|
|
|
use codex_app_server_protocol::AddConversationSubscriptionResponse;
|
|
|
|
|
use codex_app_server_protocol::ApplyPatchApprovalParams;
|
|
|
|
|
use codex_app_server_protocol::ApplyPatchApprovalResponse;
|
|
|
|
|
use codex_app_server_protocol::ArchiveConversationParams;
|
|
|
|
|
use codex_app_server_protocol::ArchiveConversationResponse;
|
2025-10-20 14:11:54 -07:00
|
|
|
use codex_app_server_protocol::AuthMode;
|
fix: remove mcp-types from app server protocol (#4537)
We continue the separation between `codex app-server` and `codex
mcp-server`.
In particular, we introduce a new crate, `codex-app-server-protocol`,
and migrate `codex-rs/protocol/src/mcp_protocol.rs` into it, renaming it
`codex-rs/app-server-protocol/src/protocol.rs`.
Because `ConversationId` was defined in `mcp_protocol.rs`, we move it
into its own file, `codex-rs/protocol/src/conversation_id.rs`, and
because it is referenced in a ton of places, we have to touch a lot of
files as part of this PR.
We also decide to get away from proper JSON-RPC 2.0 semantics, so we
also introduce `codex-rs/app-server-protocol/src/jsonrpc_lite.rs`, which
is basically the same `JSONRPCMessage` type defined in `mcp-types`
except with all of the `"jsonrpc": "2.0"` removed.
Getting rid of `"jsonrpc": "2.0"` makes our serialization logic
considerably simpler, as we can lean heavier on serde to serialize
directly into the wire format that we use now.
2025-09-30 19:16:26 -07:00
|
|
|
use codex_app_server_protocol::AuthStatusChangeNotification;
|
|
|
|
|
use codex_app_server_protocol::ClientRequest;
|
|
|
|
|
use codex_app_server_protocol::ConversationSummary;
|
|
|
|
|
use codex_app_server_protocol::ExecCommandApprovalParams;
|
|
|
|
|
use codex_app_server_protocol::ExecCommandApprovalResponse;
|
|
|
|
|
use codex_app_server_protocol::ExecOneOffCommandParams;
|
|
|
|
|
use codex_app_server_protocol::ExecOneOffCommandResponse;
|
|
|
|
|
use codex_app_server_protocol::FuzzyFileSearchParams;
|
|
|
|
|
use codex_app_server_protocol::FuzzyFileSearchResponse;
|
2025-10-20 14:11:54 -07:00
|
|
|
use codex_app_server_protocol::GetAccountRateLimitsResponse;
|
2025-10-27 09:11:45 -07:00
|
|
|
use codex_app_server_protocol::GetConversationSummaryParams;
|
|
|
|
|
use codex_app_server_protocol::GetConversationSummaryResponse;
|
fix: remove mcp-types from app server protocol (#4537)
We continue the separation between `codex app-server` and `codex
mcp-server`.
In particular, we introduce a new crate, `codex-app-server-protocol`,
and migrate `codex-rs/protocol/src/mcp_protocol.rs` into it, renaming it
`codex-rs/app-server-protocol/src/protocol.rs`.
Because `ConversationId` was defined in `mcp_protocol.rs`, we move it
into its own file, `codex-rs/protocol/src/conversation_id.rs`, and
because it is referenced in a ton of places, we have to touch a lot of
files as part of this PR.
We also decide to get away from proper JSON-RPC 2.0 semantics, so we
also introduce `codex-rs/app-server-protocol/src/jsonrpc_lite.rs`, which
is basically the same `JSONRPCMessage` type defined in `mcp-types`
except with all of the `"jsonrpc": "2.0"` removed.
Getting rid of `"jsonrpc": "2.0"` makes our serialization logic
considerably simpler, as we can lean heavier on serde to serialize
directly into the wire format that we use now.
2025-09-30 19:16:26 -07:00
|
|
|
use codex_app_server_protocol::GetUserAgentResponse;
|
|
|
|
|
use codex_app_server_protocol::GetUserSavedConfigResponse;
|
|
|
|
|
use codex_app_server_protocol::GitDiffToRemoteResponse;
|
|
|
|
|
use codex_app_server_protocol::InputItem as WireInputItem;
|
|
|
|
|
use codex_app_server_protocol::InterruptConversationParams;
|
|
|
|
|
use codex_app_server_protocol::InterruptConversationResponse;
|
|
|
|
|
use codex_app_server_protocol::JSONRPCErrorError;
|
|
|
|
|
use codex_app_server_protocol::ListConversationsParams;
|
|
|
|
|
use codex_app_server_protocol::ListConversationsResponse;
|
2025-10-21 11:15:17 -07:00
|
|
|
use codex_app_server_protocol::ListModelsParams;
|
|
|
|
|
use codex_app_server_protocol::ListModelsResponse;
|
fix: remove mcp-types from app server protocol (#4537)
We continue the separation between `codex app-server` and `codex
mcp-server`.
In particular, we introduce a new crate, `codex-app-server-protocol`,
and migrate `codex-rs/protocol/src/mcp_protocol.rs` into it, renaming it
`codex-rs/app-server-protocol/src/protocol.rs`.
Because `ConversationId` was defined in `mcp_protocol.rs`, we move it
into its own file, `codex-rs/protocol/src/conversation_id.rs`, and
because it is referenced in a ton of places, we have to touch a lot of
files as part of this PR.
We also decide to get away from proper JSON-RPC 2.0 semantics, so we
also introduce `codex-rs/app-server-protocol/src/jsonrpc_lite.rs`, which
is basically the same `JSONRPCMessage` type defined in `mcp-types`
except with all of the `"jsonrpc": "2.0"` removed.
Getting rid of `"jsonrpc": "2.0"` makes our serialization logic
considerably simpler, as we can lean heavier on serde to serialize
directly into the wire format that we use now.
2025-09-30 19:16:26 -07:00
|
|
|
use codex_app_server_protocol::LoginApiKeyParams;
|
|
|
|
|
use codex_app_server_protocol::LoginApiKeyResponse;
|
|
|
|
|
use codex_app_server_protocol::LoginChatGptCompleteNotification;
|
|
|
|
|
use codex_app_server_protocol::LoginChatGptResponse;
|
|
|
|
|
use codex_app_server_protocol::NewConversationParams;
|
|
|
|
|
use codex_app_server_protocol::NewConversationResponse;
|
|
|
|
|
use codex_app_server_protocol::RemoveConversationListenerParams;
|
|
|
|
|
use codex_app_server_protocol::RemoveConversationSubscriptionResponse;
|
|
|
|
|
use codex_app_server_protocol::RequestId;
|
|
|
|
|
use codex_app_server_protocol::Result as JsonRpcResult;
|
|
|
|
|
use codex_app_server_protocol::ResumeConversationParams;
|
|
|
|
|
use codex_app_server_protocol::SendUserMessageParams;
|
|
|
|
|
use codex_app_server_protocol::SendUserMessageResponse;
|
|
|
|
|
use codex_app_server_protocol::SendUserTurnParams;
|
|
|
|
|
use codex_app_server_protocol::SendUserTurnResponse;
|
|
|
|
|
use codex_app_server_protocol::ServerNotification;
|
|
|
|
|
use codex_app_server_protocol::ServerRequestPayload;
|
|
|
|
|
use codex_app_server_protocol::SessionConfiguredNotification;
|
|
|
|
|
use codex_app_server_protocol::SetDefaultModelParams;
|
|
|
|
|
use codex_app_server_protocol::SetDefaultModelResponse;
|
2025-10-26 22:53:39 -07:00
|
|
|
use codex_app_server_protocol::UploadFeedbackParams;
|
|
|
|
|
use codex_app_server_protocol::UploadFeedbackResponse;
|
fix: remove mcp-types from app server protocol (#4537)
We continue the separation between `codex app-server` and `codex
mcp-server`.
In particular, we introduce a new crate, `codex-app-server-protocol`,
and migrate `codex-rs/protocol/src/mcp_protocol.rs` into it, renaming it
`codex-rs/app-server-protocol/src/protocol.rs`.
Because `ConversationId` was defined in `mcp_protocol.rs`, we move it
into its own file, `codex-rs/protocol/src/conversation_id.rs`, and
because it is referenced in a ton of places, we have to touch a lot of
files as part of this PR.
We also decide to get away from proper JSON-RPC 2.0 semantics, so we
also introduce `codex-rs/app-server-protocol/src/jsonrpc_lite.rs`, which
is basically the same `JSONRPCMessage` type defined in `mcp-types`
except with all of the `"jsonrpc": "2.0"` removed.
Getting rid of `"jsonrpc": "2.0"` makes our serialization logic
considerably simpler, as we can lean heavier on serde to serialize
directly into the wire format that we use now.
2025-09-30 19:16:26 -07:00
|
|
|
use codex_app_server_protocol::UserInfoResponse;
|
|
|
|
|
use codex_app_server_protocol::UserSavedConfig;
|
2025-10-20 14:11:54 -07:00
|
|
|
use codex_backend_client::Client as BackendClient;
|
2025-09-02 18:36:19 -07:00
|
|
|
use codex_core::AuthManager;
|
2025-08-13 23:00:50 -07:00
|
|
|
use codex_core::CodexConversation;
|
feat: support traditional JSON-RPC request/response in MCP server (#2264)
This introduces a new set of request types that our `codex mcp`
supports. Note that these do not conform to MCP tool calls so that
instead of having to send something like this:
```json
{
"jsonrpc": "2.0",
"method": "tools/call",
"id": 42,
"params": {
"name": "newConversation",
"arguments": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
}
```
we can send something like this:
```json
{
"jsonrpc": "2.0",
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
```
Admittedly, this new format is not a valid MCP tool call, but we are OK
with that right now. (That is, not everything we might want to request
of `codex mcp` is something that is appropriate for an autonomous agent
to do.)
To start, this introduces four request types:
- `newConversation`
- `sendUserMessage`
- `addConversationListener`
- `removeConversationListener`
The new `mcp-server/tests/codex_message_processor_flow.rs` shows how
these can be used.
The types are defined on the `CodexRequest` enum, so we introduce a new
`CodexMessageProcessor` that is responsible for dealing with requests
from this enum. The top-level `MessageProcessor` has been updated so
that when `process_request()` is called, it first checks whether the
request conforms to `CodexRequest` and dispatches it to
`CodexMessageProcessor` if so.
Note that I also decided to use `camelCase` for the on-the-wire format,
as that seems to be the convention for MCP.
For the moment, the new protocol is defined in `wire_format.rs` within
the `mcp-server` crate, but in a subsequent PR, I will probably move it
to its own crate to ensure the protocol has minimal dependencies and
that we can codegen a schema from it.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/2264).
* #2278
* __->__ #2264
2025-08-13 17:36:29 -07:00
|
|
|
use codex_core::ConversationManager;
|
2025-09-04 16:44:18 -07:00
|
|
|
use codex_core::Cursor as RolloutCursor;
|
2025-10-02 13:06:21 -07:00
|
|
|
use codex_core::INTERACTIVE_SESSION_SOURCES;
|
feat: support traditional JSON-RPC request/response in MCP server (#2264)
This introduces a new set of request types that our `codex mcp`
supports. Note that these do not conform to MCP tool calls so that
instead of having to send something like this:
```json
{
"jsonrpc": "2.0",
"method": "tools/call",
"id": 42,
"params": {
"name": "newConversation",
"arguments": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
}
```
we can send something like this:
```json
{
"jsonrpc": "2.0",
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
```
Admittedly, this new format is not a valid MCP tool call, but we are OK
with that right now. (That is, not everything we might want to request
of `codex mcp` is something that is appropriate for an autonomous agent
to do.)
To start, this introduces four request types:
- `newConversation`
- `sendUserMessage`
- `addConversationListener`
- `removeConversationListener`
The new `mcp-server/tests/codex_message_processor_flow.rs` shows how
these can be used.
The types are defined on the `CodexRequest` enum, so we introduce a new
`CodexMessageProcessor` that is responsible for dealing with requests
from this enum. The top-level `MessageProcessor` has been updated so
that when `process_request()` is called, it first checks whether the
request conforms to `CodexRequest` and dispatches it to
`CodexMessageProcessor` if so.
Note that I also decided to use `camelCase` for the on-the-wire format,
as that seems to be the convention for MCP.
For the moment, the new protocol is defined in `wire_format.rs` within
the `mcp-server` crate, but in a subsequent PR, I will probably move it
to its own crate to ensure the protocol has minimal dependencies and
that we can codegen a schema from it.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/2264).
* #2278
* __->__ #2264
2025-08-13 17:36:29 -07:00
|
|
|
use codex_core::NewConversation;
|
2025-09-04 16:44:18 -07:00
|
|
|
use codex_core::RolloutRecorder;
|
2025-09-08 14:54:47 -07:00
|
|
|
use codex_core::SessionMeta;
|
2025-09-03 17:05:03 -07:00
|
|
|
use codex_core::auth::CLIENT_ID;
|
2025-09-11 09:16:34 -07:00
|
|
|
use codex_core::auth::login_with_api_key;
|
feat: support traditional JSON-RPC request/response in MCP server (#2264)
This introduces a new set of request types that our `codex mcp`
supports. Note that these do not conform to MCP tool calls so that
instead of having to send something like this:
```json
{
"jsonrpc": "2.0",
"method": "tools/call",
"id": 42,
"params": {
"name": "newConversation",
"arguments": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
}
```
we can send something like this:
```json
{
"jsonrpc": "2.0",
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
```
Admittedly, this new format is not a valid MCP tool call, but we are OK
with that right now. (That is, not everything we might want to request
of `codex mcp` is something that is appropriate for an autonomous agent
to do.)
To start, this introduces four request types:
- `newConversation`
- `sendUserMessage`
- `addConversationListener`
- `removeConversationListener`
The new `mcp-server/tests/codex_message_processor_flow.rs` shows how
these can be used.
The types are defined on the `CodexRequest` enum, so we introduce a new
`CodexMessageProcessor` that is responsible for dealing with requests
from this enum. The top-level `MessageProcessor` has been updated so
that when `process_request()` is called, it first checks whether the
request conforms to `CodexRequest` and dispatches it to
`CodexMessageProcessor` if so.
Note that I also decided to use `camelCase` for the on-the-wire format,
as that seems to be the convention for MCP.
For the moment, the new protocol is defined in `wire_format.rs` within
the `mcp-server` crate, but in a subsequent PR, I will probably move it
to its own crate to ensure the protocol has minimal dependencies and
that we can codegen a schema from it.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/2264).
* #2278
* __->__ #2264
2025-08-13 17:36:29 -07:00
|
|
|
use codex_core::config::Config;
|
|
|
|
|
use codex_core::config::ConfigOverrides;
|
2025-08-27 09:59:03 -07:00
|
|
|
use codex_core::config::ConfigToml;
|
|
|
|
|
use codex_core::config::load_config_as_toml;
|
2025-09-11 23:44:17 -07:00
|
|
|
use codex_core::config_edit::CONFIG_KEY_EFFORT;
|
|
|
|
|
use codex_core::config_edit::CONFIG_KEY_MODEL;
|
2025-09-12 11:35:51 -07:00
|
|
|
use codex_core::config_edit::persist_overrides_and_clear_if_none;
|
2025-09-08 10:30:13 -07:00
|
|
|
use codex_core::default_client::get_codex_user_agent;
|
2025-09-03 17:05:03 -07:00
|
|
|
use codex_core::exec::ExecParams;
|
|
|
|
|
use codex_core::exec_env::create_env;
|
|
|
|
|
use codex_core::get_platform_sandbox;
|
2025-08-19 19:50:28 -07:00
|
|
|
use codex_core::git_info::git_diff_to_remote;
|
2025-08-13 23:00:50 -07:00
|
|
|
use codex_core::protocol::ApplyPatchApprovalRequestEvent;
|
|
|
|
|
use codex_core::protocol::Event;
|
|
|
|
|
use codex_core::protocol::EventMsg;
|
|
|
|
|
use codex_core::protocol::ExecApprovalRequestEvent;
|
feat: support traditional JSON-RPC request/response in MCP server (#2264)
This introduces a new set of request types that our `codex mcp`
supports. Note that these do not conform to MCP tool calls so that
instead of having to send something like this:
```json
{
"jsonrpc": "2.0",
"method": "tools/call",
"id": 42,
"params": {
"name": "newConversation",
"arguments": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
}
```
we can send something like this:
```json
{
"jsonrpc": "2.0",
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
```
Admittedly, this new format is not a valid MCP tool call, but we are OK
with that right now. (That is, not everything we might want to request
of `codex mcp` is something that is appropriate for an autonomous agent
to do.)
To start, this introduces four request types:
- `newConversation`
- `sendUserMessage`
- `addConversationListener`
- `removeConversationListener`
The new `mcp-server/tests/codex_message_processor_flow.rs` shows how
these can be used.
The types are defined on the `CodexRequest` enum, so we introduce a new
`CodexMessageProcessor` that is responsible for dealing with requests
from this enum. The top-level `MessageProcessor` has been updated so
that when `process_request()` is called, it first checks whether the
request conforms to `CodexRequest` and dispatches it to
`CodexMessageProcessor` if so.
Note that I also decided to use `camelCase` for the on-the-wire format,
as that seems to be the convention for MCP.
For the moment, the new protocol is defined in `wire_format.rs` within
the `mcp-server` crate, but in a subsequent PR, I will probably move it
to its own crate to ensure the protocol has minimal dependencies and
that we can codegen a schema from it.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/2264).
* #2278
* __->__ #2264
2025-08-13 17:36:29 -07:00
|
|
|
use codex_core::protocol::Op;
|
2025-09-03 17:05:03 -07:00
|
|
|
use codex_core::protocol::ReviewDecision;
|
2025-10-27 09:11:45 -07:00
|
|
|
use codex_core::read_head_for_summary;
|
2025-10-26 22:53:39 -07:00
|
|
|
use codex_feedback::CodexFeedback;
|
2025-08-17 10:03:52 -07:00
|
|
|
use codex_login::ServerOptions as LoginServerOptions;
|
|
|
|
|
use codex_login::ShutdownHandle;
|
|
|
|
|
use codex_login::run_login_server;
|
fix: remove mcp-types from app server protocol (#4537)
We continue the separation between `codex app-server` and `codex
mcp-server`.
In particular, we introduce a new crate, `codex-app-server-protocol`,
and migrate `codex-rs/protocol/src/mcp_protocol.rs` into it, renaming it
`codex-rs/app-server-protocol/src/protocol.rs`.
Because `ConversationId` was defined in `mcp_protocol.rs`, we move it
into its own file, `codex-rs/protocol/src/conversation_id.rs`, and
because it is referenced in a ton of places, we have to touch a lot of
files as part of this PR.
We also decide to get away from proper JSON-RPC 2.0 semantics, so we
also introduce `codex-rs/app-server-protocol/src/jsonrpc_lite.rs`, which
is basically the same `JSONRPCMessage` type defined in `mcp-types`
except with all of the `"jsonrpc": "2.0"` removed.
Getting rid of `"jsonrpc": "2.0"` makes our serialization logic
considerably simpler, as we can lean heavier on serde to serialize
directly into the wire format that we use now.
2025-09-30 19:16:26 -07:00
|
|
|
use codex_protocol::ConversationId;
|
2025-10-20 08:50:54 -07:00
|
|
|
use codex_protocol::config_types::ForcedLoginMethod;
|
2025-10-22 10:14:50 -07:00
|
|
|
use codex_protocol::items::TurnItem;
|
2025-09-08 14:54:47 -07:00
|
|
|
use codex_protocol::models::ResponseItem;
|
2025-10-20 14:11:54 -07:00
|
|
|
use codex_protocol::protocol::RateLimitSnapshot;
|
2025-09-08 14:54:47 -07:00
|
|
|
use codex_protocol::protocol::USER_MESSAGE_BEGIN;
|
2025-10-20 13:34:44 -07:00
|
|
|
use codex_protocol::user_input::UserInput as CoreInputItem;
|
fix: separate `codex mcp` into `codex mcp-server` and `codex app-server` (#4471)
This is a very large PR with some non-backwards-compatible changes.
Historically, `codex mcp` (or `codex mcp serve`) started a JSON-RPC-ish
server that had two overlapping responsibilities:
- Running an MCP server, providing some basic tool calls.
- Running the app server used to power experiences such as the VS Code
extension.
This PR aims to separate these into distinct concepts:
- `codex mcp-server` for the MCP server
- `codex app-server` for the "application server"
Note `codex mcp` still exists because it already has its own subcommands
for MCP management (`list`, `add`, etc.)
The MCP logic continues to live in `codex-rs/mcp-server` whereas the
refactored app server logic is in the new `codex-rs/app-server` folder.
Note that most of the existing integration tests in
`codex-rs/mcp-server/tests/suite` were actually for the app server, so
all the tests have been moved with the exception of
`codex-rs/mcp-server/tests/suite/mod.rs`.
Because this is already a large diff, I tried not to change more than I
had to, so `codex-rs/app-server/tests/common/mcp_process.rs` still uses
the name `McpProcess` for now, but I will do some mechanical renamings
to things like `AppServer` in subsequent PRs.
While `mcp-server` and `app-server` share some overlapping functionality
(like reading streams of JSONL and dispatching based on message types)
and some differences (completely different message types), I ended up
doing a bit of copypasta between the two crates, as both have somewhat
similar `message_processor.rs` and `outgoing_message.rs` files for now,
though I expect them to diverge more in the near future.
One material change is that of the initialize handshake for `codex
app-server`, as we no longer use the MCP types for that handshake.
Instead, we update `codex-rs/protocol/src/mcp_protocol.rs` to add an
`Initialize` variant to `ClientRequest`, which takes the `ClientInfo`
object we need to update the `USER_AGENT_SUFFIX` in
`codex-rs/app-server/src/message_processor.rs`.
One other material change is in
`codex-rs/app-server/src/codex_message_processor.rs` where I eliminated
a use of the `send_event_as_notification()` method I am generally trying
to deprecate (because it blindly maps an `EventMsg` into a
`JSONNotification`) in favor of `send_server_notification()`, which
takes a `ServerNotification`, as that is intended to be a custom enum of
all notification types supported by the app server. So to make this
update, I had to introduce a new variant of `ServerNotification`,
`SessionConfigured`, which is a non-backwards compatible change with the
old `codex mcp`, and clients will have to be updated after the next
release that contains this PR. Note that
`codex-rs/app-server/tests/suite/list_resume.rs` also had to be update
to reflect this change.
I introduced `codex-rs/utils/json-to-toml/src/lib.rs` as a small utility
crate to avoid some of the copying between `mcp-server` and
`app-server`.
2025-09-30 00:06:18 -07:00
|
|
|
use codex_utils_json_to_toml::json_to_toml;
|
2025-09-08 14:54:47 -07:00
|
|
|
use std::collections::HashMap;
|
2025-09-09 08:39:00 -07:00
|
|
|
use std::ffi::OsStr;
|
2025-10-27 09:11:45 -07:00
|
|
|
use std::io::Error as IoError;
|
|
|
|
|
use std::path::Path;
|
2025-09-08 14:54:47 -07:00
|
|
|
use std::path::PathBuf;
|
|
|
|
|
use std::sync::Arc;
|
2025-09-29 12:19:09 -07:00
|
|
|
use std::sync::atomic::AtomicBool;
|
|
|
|
|
use std::sync::atomic::Ordering;
|
2025-09-08 14:54:47 -07:00
|
|
|
use std::time::Duration;
|
2025-09-09 08:39:00 -07:00
|
|
|
use tokio::select;
|
2025-09-03 17:05:03 -07:00
|
|
|
use tokio::sync::Mutex;
|
|
|
|
|
use tokio::sync::oneshot;
|
|
|
|
|
use tracing::error;
|
2025-09-09 08:39:00 -07:00
|
|
|
use tracing::info;
|
|
|
|
|
use tracing::warn;
|
2025-09-03 17:05:03 -07:00
|
|
|
use uuid::Uuid;
|
2025-08-17 10:03:52 -07:00
|
|
|
|
|
|
|
|
// Duration before a ChatGPT login attempt is abandoned.
|
|
|
|
|
const LOGIN_CHATGPT_TIMEOUT: Duration = Duration::from_secs(10 * 60);
|
|
|
|
|
struct ActiveLogin {
|
|
|
|
|
shutdown_handle: ShutdownHandle,
|
|
|
|
|
login_id: Uuid,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl ActiveLogin {
|
|
|
|
|
fn drop(&self) {
|
2025-08-18 17:57:04 -07:00
|
|
|
self.shutdown_handle.shutdown();
|
2025-08-17 10:03:52 -07:00
|
|
|
}
|
|
|
|
|
}
|
feat: support traditional JSON-RPC request/response in MCP server (#2264)
This introduces a new set of request types that our `codex mcp`
supports. Note that these do not conform to MCP tool calls so that
instead of having to send something like this:
```json
{
"jsonrpc": "2.0",
"method": "tools/call",
"id": 42,
"params": {
"name": "newConversation",
"arguments": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
}
```
we can send something like this:
```json
{
"jsonrpc": "2.0",
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
```
Admittedly, this new format is not a valid MCP tool call, but we are OK
with that right now. (That is, not everything we might want to request
of `codex mcp` is something that is appropriate for an autonomous agent
to do.)
To start, this introduces four request types:
- `newConversation`
- `sendUserMessage`
- `addConversationListener`
- `removeConversationListener`
The new `mcp-server/tests/codex_message_processor_flow.rs` shows how
these can be used.
The types are defined on the `CodexRequest` enum, so we introduce a new
`CodexMessageProcessor` that is responsible for dealing with requests
from this enum. The top-level `MessageProcessor` has been updated so
that when `process_request()` is called, it first checks whether the
request conforms to `CodexRequest` and dispatches it to
`CodexMessageProcessor` if so.
Note that I also decided to use `camelCase` for the on-the-wire format,
as that seems to be the convention for MCP.
For the moment, the new protocol is defined in `wire_format.rs` within
the `mcp-server` crate, but in a subsequent PR, I will probably move it
to its own crate to ensure the protocol has minimal dependencies and
that we can codegen a schema from it.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/2264).
* #2278
* __->__ #2264
2025-08-13 17:36:29 -07:00
|
|
|
|
|
|
|
|
/// Handles JSON-RPC messages for Codex conversations.
|
|
|
|
|
pub(crate) struct CodexMessageProcessor {
|
2025-08-22 13:10:11 -07:00
|
|
|
auth_manager: Arc<AuthManager>,
|
feat: support traditional JSON-RPC request/response in MCP server (#2264)
This introduces a new set of request types that our `codex mcp`
supports. Note that these do not conform to MCP tool calls so that
instead of having to send something like this:
```json
{
"jsonrpc": "2.0",
"method": "tools/call",
"id": 42,
"params": {
"name": "newConversation",
"arguments": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
}
```
we can send something like this:
```json
{
"jsonrpc": "2.0",
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
```
Admittedly, this new format is not a valid MCP tool call, but we are OK
with that right now. (That is, not everything we might want to request
of `codex mcp` is something that is appropriate for an autonomous agent
to do.)
To start, this introduces four request types:
- `newConversation`
- `sendUserMessage`
- `addConversationListener`
- `removeConversationListener`
The new `mcp-server/tests/codex_message_processor_flow.rs` shows how
these can be used.
The types are defined on the `CodexRequest` enum, so we introduce a new
`CodexMessageProcessor` that is responsible for dealing with requests
from this enum. The top-level `MessageProcessor` has been updated so
that when `process_request()` is called, it first checks whether the
request conforms to `CodexRequest` and dispatches it to
`CodexMessageProcessor` if so.
Note that I also decided to use `camelCase` for the on-the-wire format,
as that seems to be the convention for MCP.
For the moment, the new protocol is defined in `wire_format.rs` within
the `mcp-server` crate, but in a subsequent PR, I will probably move it
to its own crate to ensure the protocol has minimal dependencies and
that we can codegen a schema from it.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/2264).
* #2278
* __->__ #2264
2025-08-13 17:36:29 -07:00
|
|
|
conversation_manager: Arc<ConversationManager>,
|
|
|
|
|
outgoing: Arc<OutgoingMessageSender>,
|
|
|
|
|
codex_linux_sandbox_exe: Option<PathBuf>,
|
2025-08-20 20:36:34 -07:00
|
|
|
config: Arc<Config>,
|
feat: support traditional JSON-RPC request/response in MCP server (#2264)
This introduces a new set of request types that our `codex mcp`
supports. Note that these do not conform to MCP tool calls so that
instead of having to send something like this:
```json
{
"jsonrpc": "2.0",
"method": "tools/call",
"id": 42,
"params": {
"name": "newConversation",
"arguments": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
}
```
we can send something like this:
```json
{
"jsonrpc": "2.0",
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
```
Admittedly, this new format is not a valid MCP tool call, but we are OK
with that right now. (That is, not everything we might want to request
of `codex mcp` is something that is appropriate for an autonomous agent
to do.)
To start, this introduces four request types:
- `newConversation`
- `sendUserMessage`
- `addConversationListener`
- `removeConversationListener`
The new `mcp-server/tests/codex_message_processor_flow.rs` shows how
these can be used.
The types are defined on the `CodexRequest` enum, so we introduce a new
`CodexMessageProcessor` that is responsible for dealing with requests
from this enum. The top-level `MessageProcessor` has been updated so
that when `process_request()` is called, it first checks whether the
request conforms to `CodexRequest` and dispatches it to
`CodexMessageProcessor` if so.
Note that I also decided to use `camelCase` for the on-the-wire format,
as that seems to be the convention for MCP.
For the moment, the new protocol is defined in `wire_format.rs` within
the `mcp-server` crate, but in a subsequent PR, I will probably move it
to its own crate to ensure the protocol has minimal dependencies and
that we can codegen a schema from it.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/2264).
* #2278
* __->__ #2264
2025-08-13 17:36:29 -07:00
|
|
|
conversation_listeners: HashMap<Uuid, oneshot::Sender<()>>,
|
2025-08-17 10:03:52 -07:00
|
|
|
active_login: Arc<Mutex<Option<ActiveLogin>>>,
|
2025-08-17 21:40:31 -07:00
|
|
|
// Queue of pending interrupt requests per conversation. We reply when TurnAborted arrives.
|
2025-09-07 20:22:25 -07:00
|
|
|
pending_interrupts: Arc<Mutex<HashMap<ConversationId, Vec<RequestId>>>>,
|
2025-09-29 12:19:09 -07:00
|
|
|
pending_fuzzy_searches: Arc<Mutex<HashMap<String, Arc<AtomicBool>>>>,
|
2025-10-26 22:53:39 -07:00
|
|
|
feedback: CodexFeedback,
|
feat: support traditional JSON-RPC request/response in MCP server (#2264)
This introduces a new set of request types that our `codex mcp`
supports. Note that these do not conform to MCP tool calls so that
instead of having to send something like this:
```json
{
"jsonrpc": "2.0",
"method": "tools/call",
"id": 42,
"params": {
"name": "newConversation",
"arguments": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
}
```
we can send something like this:
```json
{
"jsonrpc": "2.0",
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
```
Admittedly, this new format is not a valid MCP tool call, but we are OK
with that right now. (That is, not everything we might want to request
of `codex mcp` is something that is appropriate for an autonomous agent
to do.)
To start, this introduces four request types:
- `newConversation`
- `sendUserMessage`
- `addConversationListener`
- `removeConversationListener`
The new `mcp-server/tests/codex_message_processor_flow.rs` shows how
these can be used.
The types are defined on the `CodexRequest` enum, so we introduce a new
`CodexMessageProcessor` that is responsible for dealing with requests
from this enum. The top-level `MessageProcessor` has been updated so
that when `process_request()` is called, it first checks whether the
request conforms to `CodexRequest` and dispatches it to
`CodexMessageProcessor` if so.
Note that I also decided to use `camelCase` for the on-the-wire format,
as that seems to be the convention for MCP.
For the moment, the new protocol is defined in `wire_format.rs` within
the `mcp-server` crate, but in a subsequent PR, I will probably move it
to its own crate to ensure the protocol has minimal dependencies and
that we can codegen a schema from it.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/2264).
* #2278
* __->__ #2264
2025-08-13 17:36:29 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl CodexMessageProcessor {
|
|
|
|
|
pub fn new(
|
2025-08-22 13:10:11 -07:00
|
|
|
auth_manager: Arc<AuthManager>,
|
feat: support traditional JSON-RPC request/response in MCP server (#2264)
This introduces a new set of request types that our `codex mcp`
supports. Note that these do not conform to MCP tool calls so that
instead of having to send something like this:
```json
{
"jsonrpc": "2.0",
"method": "tools/call",
"id": 42,
"params": {
"name": "newConversation",
"arguments": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
}
```
we can send something like this:
```json
{
"jsonrpc": "2.0",
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
```
Admittedly, this new format is not a valid MCP tool call, but we are OK
with that right now. (That is, not everything we might want to request
of `codex mcp` is something that is appropriate for an autonomous agent
to do.)
To start, this introduces four request types:
- `newConversation`
- `sendUserMessage`
- `addConversationListener`
- `removeConversationListener`
The new `mcp-server/tests/codex_message_processor_flow.rs` shows how
these can be used.
The types are defined on the `CodexRequest` enum, so we introduce a new
`CodexMessageProcessor` that is responsible for dealing with requests
from this enum. The top-level `MessageProcessor` has been updated so
that when `process_request()` is called, it first checks whether the
request conforms to `CodexRequest` and dispatches it to
`CodexMessageProcessor` if so.
Note that I also decided to use `camelCase` for the on-the-wire format,
as that seems to be the convention for MCP.
For the moment, the new protocol is defined in `wire_format.rs` within
the `mcp-server` crate, but in a subsequent PR, I will probably move it
to its own crate to ensure the protocol has minimal dependencies and
that we can codegen a schema from it.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/2264).
* #2278
* __->__ #2264
2025-08-13 17:36:29 -07:00
|
|
|
conversation_manager: Arc<ConversationManager>,
|
|
|
|
|
outgoing: Arc<OutgoingMessageSender>,
|
|
|
|
|
codex_linux_sandbox_exe: Option<PathBuf>,
|
2025-08-20 20:36:34 -07:00
|
|
|
config: Arc<Config>,
|
2025-10-26 22:53:39 -07:00
|
|
|
feedback: CodexFeedback,
|
feat: support traditional JSON-RPC request/response in MCP server (#2264)
This introduces a new set of request types that our `codex mcp`
supports. Note that these do not conform to MCP tool calls so that
instead of having to send something like this:
```json
{
"jsonrpc": "2.0",
"method": "tools/call",
"id": 42,
"params": {
"name": "newConversation",
"arguments": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
}
```
we can send something like this:
```json
{
"jsonrpc": "2.0",
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
```
Admittedly, this new format is not a valid MCP tool call, but we are OK
with that right now. (That is, not everything we might want to request
of `codex mcp` is something that is appropriate for an autonomous agent
to do.)
To start, this introduces four request types:
- `newConversation`
- `sendUserMessage`
- `addConversationListener`
- `removeConversationListener`
The new `mcp-server/tests/codex_message_processor_flow.rs` shows how
these can be used.
The types are defined on the `CodexRequest` enum, so we introduce a new
`CodexMessageProcessor` that is responsible for dealing with requests
from this enum. The top-level `MessageProcessor` has been updated so
that when `process_request()` is called, it first checks whether the
request conforms to `CodexRequest` and dispatches it to
`CodexMessageProcessor` if so.
Note that I also decided to use `camelCase` for the on-the-wire format,
as that seems to be the convention for MCP.
For the moment, the new protocol is defined in `wire_format.rs` within
the `mcp-server` crate, but in a subsequent PR, I will probably move it
to its own crate to ensure the protocol has minimal dependencies and
that we can codegen a schema from it.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/2264).
* #2278
* __->__ #2264
2025-08-13 17:36:29 -07:00
|
|
|
) -> Self {
|
|
|
|
|
Self {
|
2025-08-22 13:10:11 -07:00
|
|
|
auth_manager,
|
feat: support traditional JSON-RPC request/response in MCP server (#2264)
This introduces a new set of request types that our `codex mcp`
supports. Note that these do not conform to MCP tool calls so that
instead of having to send something like this:
```json
{
"jsonrpc": "2.0",
"method": "tools/call",
"id": 42,
"params": {
"name": "newConversation",
"arguments": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
}
```
we can send something like this:
```json
{
"jsonrpc": "2.0",
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
```
Admittedly, this new format is not a valid MCP tool call, but we are OK
with that right now. (That is, not everything we might want to request
of `codex mcp` is something that is appropriate for an autonomous agent
to do.)
To start, this introduces four request types:
- `newConversation`
- `sendUserMessage`
- `addConversationListener`
- `removeConversationListener`
The new `mcp-server/tests/codex_message_processor_flow.rs` shows how
these can be used.
The types are defined on the `CodexRequest` enum, so we introduce a new
`CodexMessageProcessor` that is responsible for dealing with requests
from this enum. The top-level `MessageProcessor` has been updated so
that when `process_request()` is called, it first checks whether the
request conforms to `CodexRequest` and dispatches it to
`CodexMessageProcessor` if so.
Note that I also decided to use `camelCase` for the on-the-wire format,
as that seems to be the convention for MCP.
For the moment, the new protocol is defined in `wire_format.rs` within
the `mcp-server` crate, but in a subsequent PR, I will probably move it
to its own crate to ensure the protocol has minimal dependencies and
that we can codegen a schema from it.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/2264).
* #2278
* __->__ #2264
2025-08-13 17:36:29 -07:00
|
|
|
conversation_manager,
|
|
|
|
|
outgoing,
|
|
|
|
|
codex_linux_sandbox_exe,
|
2025-08-20 20:36:34 -07:00
|
|
|
config,
|
feat: support traditional JSON-RPC request/response in MCP server (#2264)
This introduces a new set of request types that our `codex mcp`
supports. Note that these do not conform to MCP tool calls so that
instead of having to send something like this:
```json
{
"jsonrpc": "2.0",
"method": "tools/call",
"id": 42,
"params": {
"name": "newConversation",
"arguments": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
}
```
we can send something like this:
```json
{
"jsonrpc": "2.0",
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
```
Admittedly, this new format is not a valid MCP tool call, but we are OK
with that right now. (That is, not everything we might want to request
of `codex mcp` is something that is appropriate for an autonomous agent
to do.)
To start, this introduces four request types:
- `newConversation`
- `sendUserMessage`
- `addConversationListener`
- `removeConversationListener`
The new `mcp-server/tests/codex_message_processor_flow.rs` shows how
these can be used.
The types are defined on the `CodexRequest` enum, so we introduce a new
`CodexMessageProcessor` that is responsible for dealing with requests
from this enum. The top-level `MessageProcessor` has been updated so
that when `process_request()` is called, it first checks whether the
request conforms to `CodexRequest` and dispatches it to
`CodexMessageProcessor` if so.
Note that I also decided to use `camelCase` for the on-the-wire format,
as that seems to be the convention for MCP.
For the moment, the new protocol is defined in `wire_format.rs` within
the `mcp-server` crate, but in a subsequent PR, I will probably move it
to its own crate to ensure the protocol has minimal dependencies and
that we can codegen a schema from it.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/2264).
* #2278
* __->__ #2264
2025-08-13 17:36:29 -07:00
|
|
|
conversation_listeners: HashMap::new(),
|
2025-08-17 10:03:52 -07:00
|
|
|
active_login: Arc::new(Mutex::new(None)),
|
2025-08-17 21:40:31 -07:00
|
|
|
pending_interrupts: Arc::new(Mutex::new(HashMap::new())),
|
2025-09-29 12:19:09 -07:00
|
|
|
pending_fuzzy_searches: Arc::new(Mutex::new(HashMap::new())),
|
2025-10-26 22:53:39 -07:00
|
|
|
feedback,
|
feat: support traditional JSON-RPC request/response in MCP server (#2264)
This introduces a new set of request types that our `codex mcp`
supports. Note that these do not conform to MCP tool calls so that
instead of having to send something like this:
```json
{
"jsonrpc": "2.0",
"method": "tools/call",
"id": 42,
"params": {
"name": "newConversation",
"arguments": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
}
```
we can send something like this:
```json
{
"jsonrpc": "2.0",
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
```
Admittedly, this new format is not a valid MCP tool call, but we are OK
with that right now. (That is, not everything we might want to request
of `codex mcp` is something that is appropriate for an autonomous agent
to do.)
To start, this introduces four request types:
- `newConversation`
- `sendUserMessage`
- `addConversationListener`
- `removeConversationListener`
The new `mcp-server/tests/codex_message_processor_flow.rs` shows how
these can be used.
The types are defined on the `CodexRequest` enum, so we introduce a new
`CodexMessageProcessor` that is responsible for dealing with requests
from this enum. The top-level `MessageProcessor` has been updated so
that when `process_request()` is called, it first checks whether the
request conforms to `CodexRequest` and dispatches it to
`CodexMessageProcessor` if so.
Note that I also decided to use `camelCase` for the on-the-wire format,
as that seems to be the convention for MCP.
For the moment, the new protocol is defined in `wire_format.rs` within
the `mcp-server` crate, but in a subsequent PR, I will probably move it
to its own crate to ensure the protocol has minimal dependencies and
that we can codegen a schema from it.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/2264).
* #2278
* __->__ #2264
2025-08-13 17:36:29 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2025-08-13 23:00:50 -07:00
|
|
|
pub async fn process_request(&mut self, request: ClientRequest) {
|
feat: support traditional JSON-RPC request/response in MCP server (#2264)
This introduces a new set of request types that our `codex mcp`
supports. Note that these do not conform to MCP tool calls so that
instead of having to send something like this:
```json
{
"jsonrpc": "2.0",
"method": "tools/call",
"id": 42,
"params": {
"name": "newConversation",
"arguments": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
}
```
we can send something like this:
```json
{
"jsonrpc": "2.0",
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
```
Admittedly, this new format is not a valid MCP tool call, but we are OK
with that right now. (That is, not everything we might want to request
of `codex mcp` is something that is appropriate for an autonomous agent
to do.)
To start, this introduces four request types:
- `newConversation`
- `sendUserMessage`
- `addConversationListener`
- `removeConversationListener`
The new `mcp-server/tests/codex_message_processor_flow.rs` shows how
these can be used.
The types are defined on the `CodexRequest` enum, so we introduce a new
`CodexMessageProcessor` that is responsible for dealing with requests
from this enum. The top-level `MessageProcessor` has been updated so
that when `process_request()` is called, it first checks whether the
request conforms to `CodexRequest` and dispatches it to
`CodexMessageProcessor` if so.
Note that I also decided to use `camelCase` for the on-the-wire format,
as that seems to be the convention for MCP.
For the moment, the new protocol is defined in `wire_format.rs` within
the `mcp-server` crate, but in a subsequent PR, I will probably move it
to its own crate to ensure the protocol has minimal dependencies and
that we can codegen a schema from it.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/2264).
* #2278
* __->__ #2264
2025-08-13 17:36:29 -07:00
|
|
|
match request {
|
fix: separate `codex mcp` into `codex mcp-server` and `codex app-server` (#4471)
This is a very large PR with some non-backwards-compatible changes.
Historically, `codex mcp` (or `codex mcp serve`) started a JSON-RPC-ish
server that had two overlapping responsibilities:
- Running an MCP server, providing some basic tool calls.
- Running the app server used to power experiences such as the VS Code
extension.
This PR aims to separate these into distinct concepts:
- `codex mcp-server` for the MCP server
- `codex app-server` for the "application server"
Note `codex mcp` still exists because it already has its own subcommands
for MCP management (`list`, `add`, etc.)
The MCP logic continues to live in `codex-rs/mcp-server` whereas the
refactored app server logic is in the new `codex-rs/app-server` folder.
Note that most of the existing integration tests in
`codex-rs/mcp-server/tests/suite` were actually for the app server, so
all the tests have been moved with the exception of
`codex-rs/mcp-server/tests/suite/mod.rs`.
Because this is already a large diff, I tried not to change more than I
had to, so `codex-rs/app-server/tests/common/mcp_process.rs` still uses
the name `McpProcess` for now, but I will do some mechanical renamings
to things like `AppServer` in subsequent PRs.
While `mcp-server` and `app-server` share some overlapping functionality
(like reading streams of JSONL and dispatching based on message types)
and some differences (completely different message types), I ended up
doing a bit of copypasta between the two crates, as both have somewhat
similar `message_processor.rs` and `outgoing_message.rs` files for now,
though I expect them to diverge more in the near future.
One material change is that of the initialize handshake for `codex
app-server`, as we no longer use the MCP types for that handshake.
Instead, we update `codex-rs/protocol/src/mcp_protocol.rs` to add an
`Initialize` variant to `ClientRequest`, which takes the `ClientInfo`
object we need to update the `USER_AGENT_SUFFIX` in
`codex-rs/app-server/src/message_processor.rs`.
One other material change is in
`codex-rs/app-server/src/codex_message_processor.rs` where I eliminated
a use of the `send_event_as_notification()` method I am generally trying
to deprecate (because it blindly maps an `EventMsg` into a
`JSONNotification`) in favor of `send_server_notification()`, which
takes a `ServerNotification`, as that is intended to be a custom enum of
all notification types supported by the app server. So to make this
update, I had to introduce a new variant of `ServerNotification`,
`SessionConfigured`, which is a non-backwards compatible change with the
old `codex mcp`, and clients will have to be updated after the next
release that contains this PR. Note that
`codex-rs/app-server/tests/suite/list_resume.rs` also had to be update
to reflect this change.
I introduced `codex-rs/utils/json-to-toml/src/lib.rs` as a small utility
crate to avoid some of the copying between `mcp-server` and
`app-server`.
2025-09-30 00:06:18 -07:00
|
|
|
ClientRequest::Initialize { .. } => {
|
|
|
|
|
panic!("Initialize should be handled in MessageProcessor");
|
|
|
|
|
}
|
2025-08-13 23:00:50 -07:00
|
|
|
ClientRequest::NewConversation { request_id, params } => {
|
feat: support traditional JSON-RPC request/response in MCP server (#2264)
This introduces a new set of request types that our `codex mcp`
supports. Note that these do not conform to MCP tool calls so that
instead of having to send something like this:
```json
{
"jsonrpc": "2.0",
"method": "tools/call",
"id": 42,
"params": {
"name": "newConversation",
"arguments": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
}
```
we can send something like this:
```json
{
"jsonrpc": "2.0",
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
```
Admittedly, this new format is not a valid MCP tool call, but we are OK
with that right now. (That is, not everything we might want to request
of `codex mcp` is something that is appropriate for an autonomous agent
to do.)
To start, this introduces four request types:
- `newConversation`
- `sendUserMessage`
- `addConversationListener`
- `removeConversationListener`
The new `mcp-server/tests/codex_message_processor_flow.rs` shows how
these can be used.
The types are defined on the `CodexRequest` enum, so we introduce a new
`CodexMessageProcessor` that is responsible for dealing with requests
from this enum. The top-level `MessageProcessor` has been updated so
that when `process_request()` is called, it first checks whether the
request conforms to `CodexRequest` and dispatches it to
`CodexMessageProcessor` if so.
Note that I also decided to use `camelCase` for the on-the-wire format,
as that seems to be the convention for MCP.
For the moment, the new protocol is defined in `wire_format.rs` within
the `mcp-server` crate, but in a subsequent PR, I will probably move it
to its own crate to ensure the protocol has minimal dependencies and
that we can codegen a schema from it.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/2264).
* #2278
* __->__ #2264
2025-08-13 17:36:29 -07:00
|
|
|
// Do not tokio::spawn() to process new_conversation()
|
|
|
|
|
// asynchronously because we need to ensure the conversation is
|
|
|
|
|
// created before processing any subsequent messages.
|
|
|
|
|
self.process_new_conversation(request_id, params).await;
|
|
|
|
|
}
|
2025-10-27 09:11:45 -07:00
|
|
|
ClientRequest::GetConversationSummary { request_id, params } => {
|
|
|
|
|
self.get_conversation_summary(request_id, params).await;
|
|
|
|
|
}
|
2025-09-04 16:44:18 -07:00
|
|
|
ClientRequest::ListConversations { request_id, params } => {
|
|
|
|
|
self.handle_list_conversations(request_id, params).await;
|
|
|
|
|
}
|
2025-10-21 11:15:17 -07:00
|
|
|
ClientRequest::ListModels { request_id, params } => {
|
|
|
|
|
self.list_models(request_id, params).await;
|
|
|
|
|
}
|
2025-10-22 15:36:11 -07:00
|
|
|
ClientRequest::LoginAccount {
|
|
|
|
|
request_id,
|
|
|
|
|
params: _,
|
|
|
|
|
} => {
|
|
|
|
|
self.send_unimplemented_error(request_id, "account/login")
|
|
|
|
|
.await;
|
|
|
|
|
}
|
|
|
|
|
ClientRequest::LogoutAccount {
|
|
|
|
|
request_id,
|
|
|
|
|
params: _,
|
|
|
|
|
} => {
|
|
|
|
|
self.send_unimplemented_error(request_id, "account/logout")
|
|
|
|
|
.await;
|
|
|
|
|
}
|
|
|
|
|
ClientRequest::GetAccount {
|
|
|
|
|
request_id,
|
|
|
|
|
params: _,
|
|
|
|
|
} => {
|
|
|
|
|
self.send_unimplemented_error(request_id, "account/read")
|
|
|
|
|
.await;
|
|
|
|
|
}
|
2025-09-04 16:44:18 -07:00
|
|
|
ClientRequest::ResumeConversation { request_id, params } => {
|
|
|
|
|
self.handle_resume_conversation(request_id, params).await;
|
|
|
|
|
}
|
2025-09-09 08:39:00 -07:00
|
|
|
ClientRequest::ArchiveConversation { request_id, params } => {
|
|
|
|
|
self.archive_conversation(request_id, params).await;
|
|
|
|
|
}
|
2025-08-13 23:00:50 -07:00
|
|
|
ClientRequest::SendUserMessage { request_id, params } => {
|
feat: support traditional JSON-RPC request/response in MCP server (#2264)
This introduces a new set of request types that our `codex mcp`
supports. Note that these do not conform to MCP tool calls so that
instead of having to send something like this:
```json
{
"jsonrpc": "2.0",
"method": "tools/call",
"id": 42,
"params": {
"name": "newConversation",
"arguments": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
}
```
we can send something like this:
```json
{
"jsonrpc": "2.0",
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
```
Admittedly, this new format is not a valid MCP tool call, but we are OK
with that right now. (That is, not everything we might want to request
of `codex mcp` is something that is appropriate for an autonomous agent
to do.)
To start, this introduces four request types:
- `newConversation`
- `sendUserMessage`
- `addConversationListener`
- `removeConversationListener`
The new `mcp-server/tests/codex_message_processor_flow.rs` shows how
these can be used.
The types are defined on the `CodexRequest` enum, so we introduce a new
`CodexMessageProcessor` that is responsible for dealing with requests
from this enum. The top-level `MessageProcessor` has been updated so
that when `process_request()` is called, it first checks whether the
request conforms to `CodexRequest` and dispatches it to
`CodexMessageProcessor` if so.
Note that I also decided to use `camelCase` for the on-the-wire format,
as that seems to be the convention for MCP.
For the moment, the new protocol is defined in `wire_format.rs` within
the `mcp-server` crate, but in a subsequent PR, I will probably move it
to its own crate to ensure the protocol has minimal dependencies and
that we can codegen a schema from it.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/2264).
* #2278
* __->__ #2264
2025-08-13 17:36:29 -07:00
|
|
|
self.send_user_message(request_id, params).await;
|
|
|
|
|
}
|
2025-08-15 10:05:58 -07:00
|
|
|
ClientRequest::SendUserTurn { request_id, params } => {
|
|
|
|
|
self.send_user_turn(request_id, params).await;
|
|
|
|
|
}
|
2025-08-13 23:12:03 -07:00
|
|
|
ClientRequest::InterruptConversation { request_id, params } => {
|
|
|
|
|
self.interrupt_conversation(request_id, params).await;
|
|
|
|
|
}
|
2025-08-13 23:00:50 -07:00
|
|
|
ClientRequest::AddConversationListener { request_id, params } => {
|
feat: support traditional JSON-RPC request/response in MCP server (#2264)
This introduces a new set of request types that our `codex mcp`
supports. Note that these do not conform to MCP tool calls so that
instead of having to send something like this:
```json
{
"jsonrpc": "2.0",
"method": "tools/call",
"id": 42,
"params": {
"name": "newConversation",
"arguments": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
}
```
we can send something like this:
```json
{
"jsonrpc": "2.0",
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
```
Admittedly, this new format is not a valid MCP tool call, but we are OK
with that right now. (That is, not everything we might want to request
of `codex mcp` is something that is appropriate for an autonomous agent
to do.)
To start, this introduces four request types:
- `newConversation`
- `sendUserMessage`
- `addConversationListener`
- `removeConversationListener`
The new `mcp-server/tests/codex_message_processor_flow.rs` shows how
these can be used.
The types are defined on the `CodexRequest` enum, so we introduce a new
`CodexMessageProcessor` that is responsible for dealing with requests
from this enum. The top-level `MessageProcessor` has been updated so
that when `process_request()` is called, it first checks whether the
request conforms to `CodexRequest` and dispatches it to
`CodexMessageProcessor` if so.
Note that I also decided to use `camelCase` for the on-the-wire format,
as that seems to be the convention for MCP.
For the moment, the new protocol is defined in `wire_format.rs` within
the `mcp-server` crate, but in a subsequent PR, I will probably move it
to its own crate to ensure the protocol has minimal dependencies and
that we can codegen a schema from it.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/2264).
* #2278
* __->__ #2264
2025-08-13 17:36:29 -07:00
|
|
|
self.add_conversation_listener(request_id, params).await;
|
|
|
|
|
}
|
2025-08-13 23:00:50 -07:00
|
|
|
ClientRequest::RemoveConversationListener { request_id, params } => {
|
feat: support traditional JSON-RPC request/response in MCP server (#2264)
This introduces a new set of request types that our `codex mcp`
supports. Note that these do not conform to MCP tool calls so that
instead of having to send something like this:
```json
{
"jsonrpc": "2.0",
"method": "tools/call",
"id": 42,
"params": {
"name": "newConversation",
"arguments": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
}
```
we can send something like this:
```json
{
"jsonrpc": "2.0",
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
```
Admittedly, this new format is not a valid MCP tool call, but we are OK
with that right now. (That is, not everything we might want to request
of `codex mcp` is something that is appropriate for an autonomous agent
to do.)
To start, this introduces four request types:
- `newConversation`
- `sendUserMessage`
- `addConversationListener`
- `removeConversationListener`
The new `mcp-server/tests/codex_message_processor_flow.rs` shows how
these can be used.
The types are defined on the `CodexRequest` enum, so we introduce a new
`CodexMessageProcessor` that is responsible for dealing with requests
from this enum. The top-level `MessageProcessor` has been updated so
that when `process_request()` is called, it first checks whether the
request conforms to `CodexRequest` and dispatches it to
`CodexMessageProcessor` if so.
Note that I also decided to use `camelCase` for the on-the-wire format,
as that seems to be the convention for MCP.
For the moment, the new protocol is defined in `wire_format.rs` within
the `mcp-server` crate, but in a subsequent PR, I will probably move it
to its own crate to ensure the protocol has minimal dependencies and
that we can codegen a schema from it.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/2264).
* #2278
* __->__ #2264
2025-08-13 17:36:29 -07:00
|
|
|
self.remove_conversation_listener(request_id, params).await;
|
|
|
|
|
}
|
2025-08-22 13:10:11 -07:00
|
|
|
ClientRequest::GitDiffToRemote { request_id, params } => {
|
|
|
|
|
self.git_diff_to_origin(request_id, params.cwd).await;
|
|
|
|
|
}
|
2025-09-11 09:16:34 -07:00
|
|
|
ClientRequest::LoginApiKey { request_id, params } => {
|
|
|
|
|
self.login_api_key(request_id, params).await;
|
|
|
|
|
}
|
2025-09-30 12:03:32 -07:00
|
|
|
ClientRequest::LoginChatGpt {
|
|
|
|
|
request_id,
|
|
|
|
|
params: _,
|
|
|
|
|
} => {
|
2025-08-17 10:03:52 -07:00
|
|
|
self.login_chatgpt(request_id).await;
|
|
|
|
|
}
|
|
|
|
|
ClientRequest::CancelLoginChatGpt { request_id, params } => {
|
|
|
|
|
self.cancel_login_chatgpt(request_id, params.login_id).await;
|
|
|
|
|
}
|
2025-09-30 12:03:32 -07:00
|
|
|
ClientRequest::LogoutChatGpt {
|
|
|
|
|
request_id,
|
|
|
|
|
params: _,
|
|
|
|
|
} => {
|
2025-08-20 20:36:34 -07:00
|
|
|
self.logout_chatgpt(request_id).await;
|
|
|
|
|
}
|
2025-08-22 13:10:11 -07:00
|
|
|
ClientRequest::GetAuthStatus { request_id, params } => {
|
|
|
|
|
self.get_auth_status(request_id, params).await;
|
2025-08-19 19:50:28 -07:00
|
|
|
}
|
2025-09-30 12:03:32 -07:00
|
|
|
ClientRequest::GetUserSavedConfig {
|
|
|
|
|
request_id,
|
|
|
|
|
params: _,
|
|
|
|
|
} => {
|
2025-09-04 16:26:41 -07:00
|
|
|
self.get_user_saved_config(request_id).await;
|
2025-08-27 09:59:03 -07:00
|
|
|
}
|
2025-09-11 23:44:17 -07:00
|
|
|
ClientRequest::SetDefaultModel { request_id, params } => {
|
|
|
|
|
self.set_default_model(request_id, params).await;
|
|
|
|
|
}
|
2025-09-30 12:03:32 -07:00
|
|
|
ClientRequest::GetUserAgent {
|
|
|
|
|
request_id,
|
|
|
|
|
params: _,
|
|
|
|
|
} => {
|
2025-09-08 10:30:13 -07:00
|
|
|
self.get_user_agent(request_id).await;
|
|
|
|
|
}
|
2025-09-30 12:03:32 -07:00
|
|
|
ClientRequest::UserInfo {
|
|
|
|
|
request_id,
|
|
|
|
|
params: _,
|
|
|
|
|
} => {
|
2025-09-10 17:03:35 -07:00
|
|
|
self.get_user_info(request_id).await;
|
|
|
|
|
}
|
2025-09-29 12:19:09 -07:00
|
|
|
ClientRequest::FuzzyFileSearch { request_id, params } => {
|
|
|
|
|
self.fuzzy_file_search(request_id, params).await;
|
|
|
|
|
}
|
2025-09-03 17:05:03 -07:00
|
|
|
ClientRequest::ExecOneOffCommand { request_id, params } => {
|
|
|
|
|
self.exec_one_off_command(request_id, params).await;
|
|
|
|
|
}
|
2025-10-20 14:11:54 -07:00
|
|
|
ClientRequest::GetAccountRateLimits {
|
|
|
|
|
request_id,
|
|
|
|
|
params: _,
|
|
|
|
|
} => {
|
|
|
|
|
self.get_account_rate_limits(request_id).await;
|
|
|
|
|
}
|
2025-10-26 22:53:39 -07:00
|
|
|
ClientRequest::UploadFeedback { request_id, params } => {
|
|
|
|
|
self.upload_feedback(request_id, params).await;
|
|
|
|
|
}
|
2025-08-17 10:03:52 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2025-10-22 15:36:11 -07:00
|
|
|
async fn send_unimplemented_error(&self, request_id: RequestId, method: &str) {
|
|
|
|
|
let error = JSONRPCErrorError {
|
|
|
|
|
code: INTERNAL_ERROR_CODE,
|
|
|
|
|
message: format!("{method} is not implemented yet"),
|
|
|
|
|
data: None,
|
|
|
|
|
};
|
|
|
|
|
self.outgoing.send_error(request_id, error).await;
|
|
|
|
|
}
|
|
|
|
|
|
2025-09-11 09:16:34 -07:00
|
|
|
async fn login_api_key(&mut self, request_id: RequestId, params: LoginApiKeyParams) {
|
2025-10-20 08:50:54 -07:00
|
|
|
if matches!(
|
|
|
|
|
self.config.forced_login_method,
|
|
|
|
|
Some(ForcedLoginMethod::Chatgpt)
|
|
|
|
|
) {
|
|
|
|
|
let error = JSONRPCErrorError {
|
|
|
|
|
code: INVALID_REQUEST_ERROR_CODE,
|
|
|
|
|
message: "API key login is disabled. Use ChatGPT login instead.".to_string(),
|
|
|
|
|
data: None,
|
|
|
|
|
};
|
|
|
|
|
self.outgoing.send_error(request_id, error).await;
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2025-09-11 09:16:34 -07:00
|
|
|
{
|
|
|
|
|
let mut guard = self.active_login.lock().await;
|
|
|
|
|
if let Some(active) = guard.take() {
|
|
|
|
|
active.drop();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2025-10-27 19:41:49 -07:00
|
|
|
match login_with_api_key(
|
|
|
|
|
&self.config.codex_home,
|
|
|
|
|
¶ms.api_key,
|
|
|
|
|
self.config.cli_auth_credentials_store_mode,
|
|
|
|
|
) {
|
2025-09-11 09:16:34 -07:00
|
|
|
Ok(()) => {
|
|
|
|
|
self.auth_manager.reload();
|
|
|
|
|
self.outgoing
|
|
|
|
|
.send_response(request_id, LoginApiKeyResponse {})
|
|
|
|
|
.await;
|
|
|
|
|
|
|
|
|
|
let payload = AuthStatusChangeNotification {
|
|
|
|
|
auth_method: self.auth_manager.auth().map(|auth| auth.mode),
|
|
|
|
|
};
|
|
|
|
|
self.outgoing
|
|
|
|
|
.send_server_notification(ServerNotification::AuthStatusChange(payload))
|
|
|
|
|
.await;
|
|
|
|
|
}
|
|
|
|
|
Err(err) => {
|
|
|
|
|
let error = JSONRPCErrorError {
|
|
|
|
|
code: INTERNAL_ERROR_CODE,
|
|
|
|
|
message: format!("failed to save api key: {err}"),
|
|
|
|
|
data: None,
|
|
|
|
|
};
|
|
|
|
|
self.outgoing.send_error(request_id, error).await;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2025-08-17 10:03:52 -07:00
|
|
|
async fn login_chatgpt(&mut self, request_id: RequestId) {
|
2025-08-20 20:36:34 -07:00
|
|
|
let config = self.config.as_ref();
|
2025-08-17 10:03:52 -07:00
|
|
|
|
2025-10-20 08:50:54 -07:00
|
|
|
if matches!(config.forced_login_method, Some(ForcedLoginMethod::Api)) {
|
|
|
|
|
let error = JSONRPCErrorError {
|
|
|
|
|
code: INVALID_REQUEST_ERROR_CODE,
|
|
|
|
|
message: "ChatGPT login is disabled. Use API key login instead.".to_string(),
|
|
|
|
|
data: None,
|
|
|
|
|
};
|
|
|
|
|
self.outgoing.send_error(request_id, error).await;
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2025-08-17 10:03:52 -07:00
|
|
|
let opts = LoginServerOptions {
|
|
|
|
|
open_browser: false,
|
2025-10-20 08:50:54 -07:00
|
|
|
..LoginServerOptions::new(
|
|
|
|
|
config.codex_home.clone(),
|
|
|
|
|
CLIENT_ID.to_string(),
|
|
|
|
|
config.forced_chatgpt_workspace_id.clone(),
|
2025-10-27 19:41:49 -07:00
|
|
|
config.cli_auth_credentials_store_mode,
|
2025-10-20 08:50:54 -07:00
|
|
|
)
|
2025-08-17 10:03:52 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
enum LoginChatGptReply {
|
|
|
|
|
Response(LoginChatGptResponse),
|
|
|
|
|
Error(JSONRPCErrorError),
|
|
|
|
|
}
|
|
|
|
|
|
2025-08-18 18:15:50 -07:00
|
|
|
let reply = match run_login_server(opts) {
|
2025-08-17 10:03:52 -07:00
|
|
|
Ok(server) => {
|
|
|
|
|
let login_id = Uuid::new_v4();
|
2025-08-18 17:49:13 -07:00
|
|
|
let shutdown_handle = server.cancel_handle();
|
2025-08-17 10:03:52 -07:00
|
|
|
|
|
|
|
|
// Replace active login if present.
|
|
|
|
|
{
|
|
|
|
|
let mut guard = self.active_login.lock().await;
|
|
|
|
|
if let Some(existing) = guard.take() {
|
|
|
|
|
existing.drop();
|
|
|
|
|
}
|
|
|
|
|
*guard = Some(ActiveLogin {
|
2025-08-18 17:49:13 -07:00
|
|
|
shutdown_handle: shutdown_handle.clone(),
|
2025-08-17 10:03:52 -07:00
|
|
|
login_id,
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let response = LoginChatGptResponse {
|
|
|
|
|
login_id,
|
|
|
|
|
auth_url: server.auth_url.clone(),
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
// Spawn background task to monitor completion.
|
|
|
|
|
let outgoing_clone = self.outgoing.clone();
|
|
|
|
|
let active_login = self.active_login.clone();
|
2025-08-22 13:10:11 -07:00
|
|
|
let auth_manager = self.auth_manager.clone();
|
2025-08-17 10:03:52 -07:00
|
|
|
tokio::spawn(async move {
|
2025-08-18 17:49:13 -07:00
|
|
|
let (success, error_msg) = match tokio::time::timeout(
|
|
|
|
|
LOGIN_CHATGPT_TIMEOUT,
|
|
|
|
|
server.block_until_done(),
|
|
|
|
|
)
|
|
|
|
|
.await
|
|
|
|
|
{
|
|
|
|
|
Ok(Ok(())) => (true, None),
|
|
|
|
|
Ok(Err(err)) => (false, Some(format!("Login server error: {err}"))),
|
|
|
|
|
Err(_elapsed) => {
|
|
|
|
|
// Timeout: cancel server and report
|
2025-08-18 17:57:04 -07:00
|
|
|
shutdown_handle.shutdown();
|
2025-08-18 17:49:13 -07:00
|
|
|
(false, Some("Login timed out".to_string()))
|
|
|
|
|
}
|
2025-08-17 10:03:52 -07:00
|
|
|
};
|
2025-08-20 20:36:34 -07:00
|
|
|
let payload = LoginChatGptCompleteNotification {
|
2025-08-17 10:03:52 -07:00
|
|
|
login_id,
|
|
|
|
|
success,
|
|
|
|
|
error: error_msg,
|
|
|
|
|
};
|
|
|
|
|
outgoing_clone
|
2025-08-20 20:36:34 -07:00
|
|
|
.send_server_notification(ServerNotification::LoginChatGptComplete(payload))
|
2025-08-17 10:03:52 -07:00
|
|
|
.await;
|
|
|
|
|
|
2025-08-20 20:36:34 -07:00
|
|
|
// Send an auth status change notification.
|
|
|
|
|
if success {
|
2025-08-22 13:10:11 -07:00
|
|
|
// Update in-memory auth cache now that login completed.
|
|
|
|
|
auth_manager.reload();
|
|
|
|
|
|
|
|
|
|
// Notify clients with the actual current auth mode.
|
|
|
|
|
let current_auth_method = auth_manager.auth().map(|a| a.mode);
|
2025-08-20 20:36:34 -07:00
|
|
|
let payload = AuthStatusChangeNotification {
|
2025-08-22 13:10:11 -07:00
|
|
|
auth_method: current_auth_method,
|
2025-08-20 20:36:34 -07:00
|
|
|
};
|
|
|
|
|
outgoing_clone
|
|
|
|
|
.send_server_notification(ServerNotification::AuthStatusChange(payload))
|
|
|
|
|
.await;
|
|
|
|
|
}
|
|
|
|
|
|
2025-08-17 10:03:52 -07:00
|
|
|
// Clear the active login if it matches this attempt. It may have been replaced or cancelled.
|
|
|
|
|
let mut guard = active_login.lock().await;
|
|
|
|
|
if guard.as_ref().map(|l| l.login_id) == Some(login_id) {
|
|
|
|
|
*guard = None;
|
|
|
|
|
}
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
LoginChatGptReply::Response(response)
|
|
|
|
|
}
|
|
|
|
|
Err(err) => LoginChatGptReply::Error(JSONRPCErrorError {
|
|
|
|
|
code: INTERNAL_ERROR_CODE,
|
|
|
|
|
message: format!("failed to start login server: {err}"),
|
|
|
|
|
data: None,
|
|
|
|
|
}),
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
match reply {
|
|
|
|
|
LoginChatGptReply::Response(resp) => {
|
|
|
|
|
self.outgoing.send_response(request_id, resp).await
|
|
|
|
|
}
|
|
|
|
|
LoginChatGptReply::Error(err) => self.outgoing.send_error(request_id, err).await,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
async fn cancel_login_chatgpt(&mut self, request_id: RequestId, login_id: Uuid) {
|
|
|
|
|
let mut guard = self.active_login.lock().await;
|
|
|
|
|
if guard.as_ref().map(|l| l.login_id) == Some(login_id) {
|
|
|
|
|
if let Some(active) = guard.take() {
|
|
|
|
|
active.drop();
|
|
|
|
|
}
|
|
|
|
|
drop(guard);
|
|
|
|
|
self.outgoing
|
|
|
|
|
.send_response(
|
|
|
|
|
request_id,
|
fix: remove mcp-types from app server protocol (#4537)
We continue the separation between `codex app-server` and `codex
mcp-server`.
In particular, we introduce a new crate, `codex-app-server-protocol`,
and migrate `codex-rs/protocol/src/mcp_protocol.rs` into it, renaming it
`codex-rs/app-server-protocol/src/protocol.rs`.
Because `ConversationId` was defined in `mcp_protocol.rs`, we move it
into its own file, `codex-rs/protocol/src/conversation_id.rs`, and
because it is referenced in a ton of places, we have to touch a lot of
files as part of this PR.
We also decide to get away from proper JSON-RPC 2.0 semantics, so we
also introduce `codex-rs/app-server-protocol/src/jsonrpc_lite.rs`, which
is basically the same `JSONRPCMessage` type defined in `mcp-types`
except with all of the `"jsonrpc": "2.0"` removed.
Getting rid of `"jsonrpc": "2.0"` makes our serialization logic
considerably simpler, as we can lean heavier on serde to serialize
directly into the wire format that we use now.
2025-09-30 19:16:26 -07:00
|
|
|
codex_app_server_protocol::CancelLoginChatGptResponse {},
|
2025-08-17 10:03:52 -07:00
|
|
|
)
|
|
|
|
|
.await;
|
|
|
|
|
} else {
|
|
|
|
|
drop(guard);
|
|
|
|
|
let error = JSONRPCErrorError {
|
|
|
|
|
code: INVALID_REQUEST_ERROR_CODE,
|
|
|
|
|
message: format!("login id not found: {login_id}"),
|
|
|
|
|
data: None,
|
|
|
|
|
};
|
|
|
|
|
self.outgoing.send_error(request_id, error).await;
|
feat: support traditional JSON-RPC request/response in MCP server (#2264)
This introduces a new set of request types that our `codex mcp`
supports. Note that these do not conform to MCP tool calls so that
instead of having to send something like this:
```json
{
"jsonrpc": "2.0",
"method": "tools/call",
"id": 42,
"params": {
"name": "newConversation",
"arguments": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
}
```
we can send something like this:
```json
{
"jsonrpc": "2.0",
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
```
Admittedly, this new format is not a valid MCP tool call, but we are OK
with that right now. (That is, not everything we might want to request
of `codex mcp` is something that is appropriate for an autonomous agent
to do.)
To start, this introduces four request types:
- `newConversation`
- `sendUserMessage`
- `addConversationListener`
- `removeConversationListener`
The new `mcp-server/tests/codex_message_processor_flow.rs` shows how
these can be used.
The types are defined on the `CodexRequest` enum, so we introduce a new
`CodexMessageProcessor` that is responsible for dealing with requests
from this enum. The top-level `MessageProcessor` has been updated so
that when `process_request()` is called, it first checks whether the
request conforms to `CodexRequest` and dispatches it to
`CodexMessageProcessor` if so.
Note that I also decided to use `camelCase` for the on-the-wire format,
as that seems to be the convention for MCP.
For the moment, the new protocol is defined in `wire_format.rs` within
the `mcp-server` crate, but in a subsequent PR, I will probably move it
to its own crate to ensure the protocol has minimal dependencies and
that we can codegen a schema from it.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/2264).
* #2278
* __->__ #2264
2025-08-13 17:36:29 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2025-08-20 20:36:34 -07:00
|
|
|
async fn logout_chatgpt(&mut self, request_id: RequestId) {
|
|
|
|
|
{
|
|
|
|
|
// Cancel any active login attempt.
|
|
|
|
|
let mut guard = self.active_login.lock().await;
|
|
|
|
|
if let Some(active) = guard.take() {
|
|
|
|
|
active.drop();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2025-08-22 13:10:11 -07:00
|
|
|
if let Err(err) = self.auth_manager.logout() {
|
2025-08-20 20:36:34 -07:00
|
|
|
let error = JSONRPCErrorError {
|
|
|
|
|
code: INTERNAL_ERROR_CODE,
|
|
|
|
|
message: format!("logout failed: {err}"),
|
|
|
|
|
data: None,
|
|
|
|
|
};
|
|
|
|
|
self.outgoing.send_error(request_id, error).await;
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
self.outgoing
|
|
|
|
|
.send_response(
|
|
|
|
|
request_id,
|
fix: remove mcp-types from app server protocol (#4537)
We continue the separation between `codex app-server` and `codex
mcp-server`.
In particular, we introduce a new crate, `codex-app-server-protocol`,
and migrate `codex-rs/protocol/src/mcp_protocol.rs` into it, renaming it
`codex-rs/app-server-protocol/src/protocol.rs`.
Because `ConversationId` was defined in `mcp_protocol.rs`, we move it
into its own file, `codex-rs/protocol/src/conversation_id.rs`, and
because it is referenced in a ton of places, we have to touch a lot of
files as part of this PR.
We also decide to get away from proper JSON-RPC 2.0 semantics, so we
also introduce `codex-rs/app-server-protocol/src/jsonrpc_lite.rs`, which
is basically the same `JSONRPCMessage` type defined in `mcp-types`
except with all of the `"jsonrpc": "2.0"` removed.
Getting rid of `"jsonrpc": "2.0"` makes our serialization logic
considerably simpler, as we can lean heavier on serde to serialize
directly into the wire format that we use now.
2025-09-30 19:16:26 -07:00
|
|
|
codex_app_server_protocol::LogoutChatGptResponse {},
|
2025-08-20 20:36:34 -07:00
|
|
|
)
|
|
|
|
|
.await;
|
|
|
|
|
|
2025-08-22 13:10:11 -07:00
|
|
|
// Send auth status change notification reflecting the current auth mode
|
2025-09-11 09:16:34 -07:00
|
|
|
// after logout.
|
2025-08-22 13:10:11 -07:00
|
|
|
let current_auth_method = self.auth_manager.auth().map(|auth| auth.mode);
|
|
|
|
|
let payload = AuthStatusChangeNotification {
|
|
|
|
|
auth_method: current_auth_method,
|
|
|
|
|
};
|
2025-08-20 20:36:34 -07:00
|
|
|
self.outgoing
|
|
|
|
|
.send_server_notification(ServerNotification::AuthStatusChange(payload))
|
|
|
|
|
.await;
|
|
|
|
|
}
|
|
|
|
|
|
2025-08-22 13:10:11 -07:00
|
|
|
async fn get_auth_status(
|
|
|
|
|
&self,
|
|
|
|
|
request_id: RequestId,
|
fix: remove mcp-types from app server protocol (#4537)
We continue the separation between `codex app-server` and `codex
mcp-server`.
In particular, we introduce a new crate, `codex-app-server-protocol`,
and migrate `codex-rs/protocol/src/mcp_protocol.rs` into it, renaming it
`codex-rs/app-server-protocol/src/protocol.rs`.
Because `ConversationId` was defined in `mcp_protocol.rs`, we move it
into its own file, `codex-rs/protocol/src/conversation_id.rs`, and
because it is referenced in a ton of places, we have to touch a lot of
files as part of this PR.
We also decide to get away from proper JSON-RPC 2.0 semantics, so we
also introduce `codex-rs/app-server-protocol/src/jsonrpc_lite.rs`, which
is basically the same `JSONRPCMessage` type defined in `mcp-types`
except with all of the `"jsonrpc": "2.0"` removed.
Getting rid of `"jsonrpc": "2.0"` makes our serialization logic
considerably simpler, as we can lean heavier on serde to serialize
directly into the wire format that we use now.
2025-09-30 19:16:26 -07:00
|
|
|
params: codex_app_server_protocol::GetAuthStatusParams,
|
2025-08-22 13:10:11 -07:00
|
|
|
) {
|
|
|
|
|
let include_token = params.include_token.unwrap_or(false);
|
|
|
|
|
let do_refresh = params.refresh_token.unwrap_or(false);
|
2025-08-20 20:36:34 -07:00
|
|
|
|
2025-08-22 13:10:11 -07:00
|
|
|
if do_refresh && let Err(err) = self.auth_manager.refresh_token().await {
|
|
|
|
|
tracing::warn!("failed to refresh token while getting auth status: {err}");
|
|
|
|
|
}
|
|
|
|
|
|
2025-09-11 09:16:34 -07:00
|
|
|
// Determine whether auth is required based on the active model provider.
|
|
|
|
|
// If a custom provider is configured with `requires_openai_auth == false`,
|
|
|
|
|
// then no auth step is required; otherwise, default to requiring auth.
|
2025-09-14 18:27:02 -07:00
|
|
|
let requires_openai_auth = self.config.model_provider.requires_openai_auth;
|
|
|
|
|
|
|
|
|
|
let response = if !requires_openai_auth {
|
fix: remove mcp-types from app server protocol (#4537)
We continue the separation between `codex app-server` and `codex
mcp-server`.
In particular, we introduce a new crate, `codex-app-server-protocol`,
and migrate `codex-rs/protocol/src/mcp_protocol.rs` into it, renaming it
`codex-rs/app-server-protocol/src/protocol.rs`.
Because `ConversationId` was defined in `mcp_protocol.rs`, we move it
into its own file, `codex-rs/protocol/src/conversation_id.rs`, and
because it is referenced in a ton of places, we have to touch a lot of
files as part of this PR.
We also decide to get away from proper JSON-RPC 2.0 semantics, so we
also introduce `codex-rs/app-server-protocol/src/jsonrpc_lite.rs`, which
is basically the same `JSONRPCMessage` type defined in `mcp-types`
except with all of the `"jsonrpc": "2.0"` removed.
Getting rid of `"jsonrpc": "2.0"` makes our serialization logic
considerably simpler, as we can lean heavier on serde to serialize
directly into the wire format that we use now.
2025-09-30 19:16:26 -07:00
|
|
|
codex_app_server_protocol::GetAuthStatusResponse {
|
2025-09-14 18:27:02 -07:00
|
|
|
auth_method: None,
|
|
|
|
|
auth_token: None,
|
|
|
|
|
requires_openai_auth: Some(false),
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
match self.auth_manager.auth() {
|
|
|
|
|
Some(auth) => {
|
|
|
|
|
let auth_mode = auth.mode;
|
|
|
|
|
let (reported_auth_method, token_opt) = match auth.get_token().await {
|
|
|
|
|
Ok(token) if !token.is_empty() => {
|
|
|
|
|
let tok = if include_token { Some(token) } else { None };
|
|
|
|
|
(Some(auth_mode), tok)
|
|
|
|
|
}
|
|
|
|
|
Ok(_) => (None, None),
|
|
|
|
|
Err(err) => {
|
|
|
|
|
tracing::warn!("failed to get token for auth status: {err}");
|
|
|
|
|
(None, None)
|
|
|
|
|
}
|
|
|
|
|
};
|
fix: remove mcp-types from app server protocol (#4537)
We continue the separation between `codex app-server` and `codex
mcp-server`.
In particular, we introduce a new crate, `codex-app-server-protocol`,
and migrate `codex-rs/protocol/src/mcp_protocol.rs` into it, renaming it
`codex-rs/app-server-protocol/src/protocol.rs`.
Because `ConversationId` was defined in `mcp_protocol.rs`, we move it
into its own file, `codex-rs/protocol/src/conversation_id.rs`, and
because it is referenced in a ton of places, we have to touch a lot of
files as part of this PR.
We also decide to get away from proper JSON-RPC 2.0 semantics, so we
also introduce `codex-rs/app-server-protocol/src/jsonrpc_lite.rs`, which
is basically the same `JSONRPCMessage` type defined in `mcp-types`
except with all of the `"jsonrpc": "2.0"` removed.
Getting rid of `"jsonrpc": "2.0"` makes our serialization logic
considerably simpler, as we can lean heavier on serde to serialize
directly into the wire format that we use now.
2025-09-30 19:16:26 -07:00
|
|
|
codex_app_server_protocol::GetAuthStatusResponse {
|
2025-09-14 18:27:02 -07:00
|
|
|
auth_method: reported_auth_method,
|
|
|
|
|
auth_token: token_opt,
|
|
|
|
|
requires_openai_auth: Some(true),
|
2025-08-22 13:10:11 -07:00
|
|
|
}
|
|
|
|
|
}
|
fix: remove mcp-types from app server protocol (#4537)
We continue the separation between `codex app-server` and `codex
mcp-server`.
In particular, we introduce a new crate, `codex-app-server-protocol`,
and migrate `codex-rs/protocol/src/mcp_protocol.rs` into it, renaming it
`codex-rs/app-server-protocol/src/protocol.rs`.
Because `ConversationId` was defined in `mcp_protocol.rs`, we move it
into its own file, `codex-rs/protocol/src/conversation_id.rs`, and
because it is referenced in a ton of places, we have to touch a lot of
files as part of this PR.
We also decide to get away from proper JSON-RPC 2.0 semantics, so we
also introduce `codex-rs/app-server-protocol/src/jsonrpc_lite.rs`, which
is basically the same `JSONRPCMessage` type defined in `mcp-types`
except with all of the `"jsonrpc": "2.0"` removed.
Getting rid of `"jsonrpc": "2.0"` makes our serialization logic
considerably simpler, as we can lean heavier on serde to serialize
directly into the wire format that we use now.
2025-09-30 19:16:26 -07:00
|
|
|
None => codex_app_server_protocol::GetAuthStatusResponse {
|
2025-09-14 18:27:02 -07:00
|
|
|
auth_method: None,
|
|
|
|
|
auth_token: None,
|
|
|
|
|
requires_openai_auth: Some(true),
|
|
|
|
|
},
|
2025-08-22 13:10:11 -07:00
|
|
|
}
|
|
|
|
|
};
|
2025-08-20 20:36:34 -07:00
|
|
|
|
|
|
|
|
self.outgoing.send_response(request_id, response).await;
|
|
|
|
|
}
|
|
|
|
|
|
2025-09-08 10:30:13 -07:00
|
|
|
async fn get_user_agent(&self, request_id: RequestId) {
|
2025-09-09 14:23:23 -07:00
|
|
|
let user_agent = get_codex_user_agent();
|
2025-09-08 10:30:13 -07:00
|
|
|
let response = GetUserAgentResponse { user_agent };
|
|
|
|
|
self.outgoing.send_response(request_id, response).await;
|
|
|
|
|
}
|
|
|
|
|
|
2025-10-20 14:11:54 -07:00
|
|
|
async fn get_account_rate_limits(&self, request_id: RequestId) {
|
|
|
|
|
match self.fetch_account_rate_limits().await {
|
|
|
|
|
Ok(rate_limits) => {
|
|
|
|
|
let response = GetAccountRateLimitsResponse { rate_limits };
|
|
|
|
|
self.outgoing.send_response(request_id, response).await;
|
|
|
|
|
}
|
|
|
|
|
Err(error) => {
|
|
|
|
|
self.outgoing.send_error(request_id, error).await;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
async fn fetch_account_rate_limits(&self) -> Result<RateLimitSnapshot, JSONRPCErrorError> {
|
|
|
|
|
let Some(auth) = self.auth_manager.auth() else {
|
|
|
|
|
return Err(JSONRPCErrorError {
|
|
|
|
|
code: INVALID_REQUEST_ERROR_CODE,
|
|
|
|
|
message: "codex account authentication required to read rate limits".to_string(),
|
|
|
|
|
data: None,
|
|
|
|
|
});
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
if auth.mode != AuthMode::ChatGPT {
|
|
|
|
|
return Err(JSONRPCErrorError {
|
|
|
|
|
code: INVALID_REQUEST_ERROR_CODE,
|
|
|
|
|
message: "chatgpt authentication required to read rate limits".to_string(),
|
|
|
|
|
data: None,
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let client = BackendClient::from_auth(self.config.chatgpt_base_url.clone(), &auth)
|
|
|
|
|
.await
|
|
|
|
|
.map_err(|err| JSONRPCErrorError {
|
|
|
|
|
code: INTERNAL_ERROR_CODE,
|
|
|
|
|
message: format!("failed to construct backend client: {err}"),
|
|
|
|
|
data: None,
|
|
|
|
|
})?;
|
|
|
|
|
|
|
|
|
|
client
|
|
|
|
|
.get_rate_limits()
|
|
|
|
|
.await
|
|
|
|
|
.map_err(|err| JSONRPCErrorError {
|
|
|
|
|
code: INTERNAL_ERROR_CODE,
|
|
|
|
|
message: format!("failed to fetch codex rate limits: {err}"),
|
|
|
|
|
data: None,
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
|
2025-09-04 16:26:41 -07:00
|
|
|
async fn get_user_saved_config(&self, request_id: RequestId) {
|
2025-10-03 13:02:26 -07:00
|
|
|
let toml_value = match load_config_as_toml(&self.config.codex_home).await {
|
2025-08-27 09:59:03 -07:00
|
|
|
Ok(val) => val,
|
|
|
|
|
Err(err) => {
|
|
|
|
|
let error = JSONRPCErrorError {
|
|
|
|
|
code: INTERNAL_ERROR_CODE,
|
|
|
|
|
message: format!("failed to load config.toml: {err}"),
|
|
|
|
|
data: None,
|
|
|
|
|
};
|
|
|
|
|
self.outgoing.send_error(request_id, error).await;
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let cfg: ConfigToml = match toml_value.try_into() {
|
|
|
|
|
Ok(cfg) => cfg,
|
|
|
|
|
Err(err) => {
|
|
|
|
|
let error = JSONRPCErrorError {
|
|
|
|
|
code: INTERNAL_ERROR_CODE,
|
|
|
|
|
message: format!("failed to parse config.toml: {err}"),
|
|
|
|
|
data: None,
|
|
|
|
|
};
|
|
|
|
|
self.outgoing.send_error(request_id, error).await;
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
2025-09-04 16:26:41 -07:00
|
|
|
let user_saved_config: UserSavedConfig = cfg.into();
|
2025-08-27 09:59:03 -07:00
|
|
|
|
2025-09-04 16:26:41 -07:00
|
|
|
let response = GetUserSavedConfigResponse {
|
|
|
|
|
config: user_saved_config,
|
|
|
|
|
};
|
2025-08-27 09:59:03 -07:00
|
|
|
self.outgoing.send_response(request_id, response).await;
|
|
|
|
|
}
|
|
|
|
|
|
2025-09-10 17:03:35 -07:00
|
|
|
async fn get_user_info(&self, request_id: RequestId) {
|
2025-10-27 11:01:14 -07:00
|
|
|
// Read alleged user email from cached auth (best-effort; not verified).
|
|
|
|
|
let alleged_user_email = self.auth_manager.auth().and_then(|a| a.get_account_email());
|
2025-09-10 17:03:35 -07:00
|
|
|
|
|
|
|
|
let response = UserInfoResponse { alleged_user_email };
|
|
|
|
|
self.outgoing.send_response(request_id, response).await;
|
|
|
|
|
}
|
|
|
|
|
|
2025-09-11 23:44:17 -07:00
|
|
|
async fn set_default_model(&self, request_id: RequestId, params: SetDefaultModelParams) {
|
|
|
|
|
let SetDefaultModelParams {
|
|
|
|
|
model,
|
|
|
|
|
reasoning_effort,
|
|
|
|
|
} = params;
|
|
|
|
|
let effort_str = reasoning_effort.map(|effort| effort.to_string());
|
|
|
|
|
|
|
|
|
|
let overrides: [(&[&str], Option<&str>); 2] = [
|
|
|
|
|
(&[CONFIG_KEY_MODEL], model.as_deref()),
|
|
|
|
|
(&[CONFIG_KEY_EFFORT], effort_str.as_deref()),
|
|
|
|
|
];
|
|
|
|
|
|
2025-09-12 11:35:51 -07:00
|
|
|
match persist_overrides_and_clear_if_none(
|
2025-09-11 23:44:17 -07:00
|
|
|
&self.config.codex_home,
|
|
|
|
|
self.config.active_profile.as_deref(),
|
|
|
|
|
&overrides,
|
|
|
|
|
)
|
|
|
|
|
.await
|
|
|
|
|
{
|
|
|
|
|
Ok(()) => {
|
|
|
|
|
let response = SetDefaultModelResponse {};
|
|
|
|
|
self.outgoing.send_response(request_id, response).await;
|
|
|
|
|
}
|
|
|
|
|
Err(err) => {
|
|
|
|
|
let error = JSONRPCErrorError {
|
|
|
|
|
code: INTERNAL_ERROR_CODE,
|
|
|
|
|
message: format!("failed to persist overrides: {err}"),
|
|
|
|
|
data: None,
|
|
|
|
|
};
|
|
|
|
|
self.outgoing.send_error(request_id, error).await;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2025-09-03 17:05:03 -07:00
|
|
|
async fn exec_one_off_command(&self, request_id: RequestId, params: ExecOneOffCommandParams) {
|
|
|
|
|
tracing::debug!("ExecOneOffCommand params: {params:?}");
|
|
|
|
|
|
|
|
|
|
if params.command.is_empty() {
|
|
|
|
|
let error = JSONRPCErrorError {
|
|
|
|
|
code: INVALID_REQUEST_ERROR_CODE,
|
|
|
|
|
message: "command must not be empty".to_string(),
|
|
|
|
|
data: None,
|
|
|
|
|
};
|
|
|
|
|
self.outgoing.send_error(request_id, error).await;
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let cwd = params.cwd.unwrap_or_else(|| self.config.cwd.clone());
|
|
|
|
|
let env = create_env(&self.config.shell_environment_policy);
|
|
|
|
|
let timeout_ms = params.timeout_ms;
|
|
|
|
|
let exec_params = ExecParams {
|
|
|
|
|
command: params.command,
|
|
|
|
|
cwd,
|
|
|
|
|
timeout_ms,
|
|
|
|
|
env,
|
|
|
|
|
with_escalated_permissions: None,
|
|
|
|
|
justification: None,
|
2025-10-20 20:57:37 +01:00
|
|
|
arg0: None,
|
2025-09-03 17:05:03 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let effective_policy = params
|
|
|
|
|
.sandbox_policy
|
|
|
|
|
.unwrap_or_else(|| self.config.sandbox_policy.clone());
|
|
|
|
|
|
|
|
|
|
let sandbox_type = match &effective_policy {
|
|
|
|
|
codex_core::protocol::SandboxPolicy::DangerFullAccess => {
|
|
|
|
|
codex_core::exec::SandboxType::None
|
|
|
|
|
}
|
|
|
|
|
_ => get_platform_sandbox().unwrap_or(codex_core::exec::SandboxType::None),
|
|
|
|
|
};
|
|
|
|
|
tracing::debug!("Sandbox type: {sandbox_type:?}");
|
|
|
|
|
let codex_linux_sandbox_exe = self.config.codex_linux_sandbox_exe.clone();
|
|
|
|
|
let outgoing = self.outgoing.clone();
|
|
|
|
|
let req_id = request_id;
|
2025-09-18 14:37:06 -07:00
|
|
|
let sandbox_cwd = self.config.cwd.clone();
|
2025-09-03 17:05:03 -07:00
|
|
|
|
|
|
|
|
tokio::spawn(async move {
|
|
|
|
|
match codex_core::exec::process_exec_tool_call(
|
|
|
|
|
exec_params,
|
|
|
|
|
sandbox_type,
|
|
|
|
|
&effective_policy,
|
2025-09-18 14:37:06 -07:00
|
|
|
sandbox_cwd.as_path(),
|
2025-09-03 17:05:03 -07:00
|
|
|
&codex_linux_sandbox_exe,
|
|
|
|
|
None,
|
|
|
|
|
)
|
|
|
|
|
.await
|
|
|
|
|
{
|
|
|
|
|
Ok(output) => {
|
2025-09-30 18:06:05 -07:00
|
|
|
let response = ExecOneOffCommandResponse {
|
2025-09-03 17:05:03 -07:00
|
|
|
exit_code: output.exit_code,
|
|
|
|
|
stdout: output.stdout.text,
|
|
|
|
|
stderr: output.stderr.text,
|
|
|
|
|
};
|
|
|
|
|
outgoing.send_response(req_id, response).await;
|
|
|
|
|
}
|
|
|
|
|
Err(err) => {
|
|
|
|
|
let error = JSONRPCErrorError {
|
|
|
|
|
code: INTERNAL_ERROR_CODE,
|
|
|
|
|
message: format!("exec failed: {err}"),
|
|
|
|
|
data: None,
|
|
|
|
|
};
|
|
|
|
|
outgoing.send_error(req_id, error).await;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
|
feat: support traditional JSON-RPC request/response in MCP server (#2264)
This introduces a new set of request types that our `codex mcp`
supports. Note that these do not conform to MCP tool calls so that
instead of having to send something like this:
```json
{
"jsonrpc": "2.0",
"method": "tools/call",
"id": 42,
"params": {
"name": "newConversation",
"arguments": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
}
```
we can send something like this:
```json
{
"jsonrpc": "2.0",
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
```
Admittedly, this new format is not a valid MCP tool call, but we are OK
with that right now. (That is, not everything we might want to request
of `codex mcp` is something that is appropriate for an autonomous agent
to do.)
To start, this introduces four request types:
- `newConversation`
- `sendUserMessage`
- `addConversationListener`
- `removeConversationListener`
The new `mcp-server/tests/codex_message_processor_flow.rs` shows how
these can be used.
The types are defined on the `CodexRequest` enum, so we introduce a new
`CodexMessageProcessor` that is responsible for dealing with requests
from this enum. The top-level `MessageProcessor` has been updated so
that when `process_request()` is called, it first checks whether the
request conforms to `CodexRequest` and dispatches it to
`CodexMessageProcessor` if so.
Note that I also decided to use `camelCase` for the on-the-wire format,
as that seems to be the convention for MCP.
For the moment, the new protocol is defined in `wire_format.rs` within
the `mcp-server` crate, but in a subsequent PR, I will probably move it
to its own crate to ensure the protocol has minimal dependencies and
that we can codegen a schema from it.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/2264).
* #2278
* __->__ #2264
2025-08-13 17:36:29 -07:00
|
|
|
async fn process_new_conversation(&self, request_id: RequestId, params: NewConversationParams) {
|
2025-10-03 13:02:26 -07:00
|
|
|
let config =
|
|
|
|
|
match derive_config_from_params(params, self.codex_linux_sandbox_exe.clone()).await {
|
|
|
|
|
Ok(config) => config,
|
|
|
|
|
Err(err) => {
|
|
|
|
|
let error = JSONRPCErrorError {
|
|
|
|
|
code: INVALID_REQUEST_ERROR_CODE,
|
|
|
|
|
message: format!("error deriving config: {err}"),
|
|
|
|
|
data: None,
|
|
|
|
|
};
|
|
|
|
|
self.outgoing.send_error(request_id, error).await;
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
};
|
feat: support traditional JSON-RPC request/response in MCP server (#2264)
This introduces a new set of request types that our `codex mcp`
supports. Note that these do not conform to MCP tool calls so that
instead of having to send something like this:
```json
{
"jsonrpc": "2.0",
"method": "tools/call",
"id": 42,
"params": {
"name": "newConversation",
"arguments": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
}
```
we can send something like this:
```json
{
"jsonrpc": "2.0",
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
```
Admittedly, this new format is not a valid MCP tool call, but we are OK
with that right now. (That is, not everything we might want to request
of `codex mcp` is something that is appropriate for an autonomous agent
to do.)
To start, this introduces four request types:
- `newConversation`
- `sendUserMessage`
- `addConversationListener`
- `removeConversationListener`
The new `mcp-server/tests/codex_message_processor_flow.rs` shows how
these can be used.
The types are defined on the `CodexRequest` enum, so we introduce a new
`CodexMessageProcessor` that is responsible for dealing with requests
from this enum. The top-level `MessageProcessor` has been updated so
that when `process_request()` is called, it first checks whether the
request conforms to `CodexRequest` and dispatches it to
`CodexMessageProcessor` if so.
Note that I also decided to use `camelCase` for the on-the-wire format,
as that seems to be the convention for MCP.
For the moment, the new protocol is defined in `wire_format.rs` within
the `mcp-server` crate, but in a subsequent PR, I will probably move it
to its own crate to ensure the protocol has minimal dependencies and
that we can codegen a schema from it.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/2264).
* #2278
* __->__ #2264
2025-08-13 17:36:29 -07:00
|
|
|
|
|
|
|
|
match self.conversation_manager.new_conversation(config).await {
|
|
|
|
|
Ok(conversation_id) => {
|
|
|
|
|
let NewConversation {
|
|
|
|
|
conversation_id,
|
|
|
|
|
session_configured,
|
|
|
|
|
..
|
|
|
|
|
} = conversation_id;
|
|
|
|
|
let response = NewConversationResponse {
|
2025-09-07 20:22:25 -07:00
|
|
|
conversation_id,
|
feat: support traditional JSON-RPC request/response in MCP server (#2264)
This introduces a new set of request types that our `codex mcp`
supports. Note that these do not conform to MCP tool calls so that
instead of having to send something like this:
```json
{
"jsonrpc": "2.0",
"method": "tools/call",
"id": 42,
"params": {
"name": "newConversation",
"arguments": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
}
```
we can send something like this:
```json
{
"jsonrpc": "2.0",
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
```
Admittedly, this new format is not a valid MCP tool call, but we are OK
with that right now. (That is, not everything we might want to request
of `codex mcp` is something that is appropriate for an autonomous agent
to do.)
To start, this introduces four request types:
- `newConversation`
- `sendUserMessage`
- `addConversationListener`
- `removeConversationListener`
The new `mcp-server/tests/codex_message_processor_flow.rs` shows how
these can be used.
The types are defined on the `CodexRequest` enum, so we introduce a new
`CodexMessageProcessor` that is responsible for dealing with requests
from this enum. The top-level `MessageProcessor` has been updated so
that when `process_request()` is called, it first checks whether the
request conforms to `CodexRequest` and dispatches it to
`CodexMessageProcessor` if so.
Note that I also decided to use `camelCase` for the on-the-wire format,
as that seems to be the convention for MCP.
For the moment, the new protocol is defined in `wire_format.rs` within
the `mcp-server` crate, but in a subsequent PR, I will probably move it
to its own crate to ensure the protocol has minimal dependencies and
that we can codegen a schema from it.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/2264).
* #2278
* __->__ #2264
2025-08-13 17:36:29 -07:00
|
|
|
model: session_configured.model,
|
2025-09-11 21:04:40 -07:00
|
|
|
reasoning_effort: session_configured.reasoning_effort,
|
2025-09-09 00:11:48 -07:00
|
|
|
rollout_path: session_configured.rollout_path,
|
feat: support traditional JSON-RPC request/response in MCP server (#2264)
This introduces a new set of request types that our `codex mcp`
supports. Note that these do not conform to MCP tool calls so that
instead of having to send something like this:
```json
{
"jsonrpc": "2.0",
"method": "tools/call",
"id": 42,
"params": {
"name": "newConversation",
"arguments": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
}
```
we can send something like this:
```json
{
"jsonrpc": "2.0",
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
```
Admittedly, this new format is not a valid MCP tool call, but we are OK
with that right now. (That is, not everything we might want to request
of `codex mcp` is something that is appropriate for an autonomous agent
to do.)
To start, this introduces four request types:
- `newConversation`
- `sendUserMessage`
- `addConversationListener`
- `removeConversationListener`
The new `mcp-server/tests/codex_message_processor_flow.rs` shows how
these can be used.
The types are defined on the `CodexRequest` enum, so we introduce a new
`CodexMessageProcessor` that is responsible for dealing with requests
from this enum. The top-level `MessageProcessor` has been updated so
that when `process_request()` is called, it first checks whether the
request conforms to `CodexRequest` and dispatches it to
`CodexMessageProcessor` if so.
Note that I also decided to use `camelCase` for the on-the-wire format,
as that seems to be the convention for MCP.
For the moment, the new protocol is defined in `wire_format.rs` within
the `mcp-server` crate, but in a subsequent PR, I will probably move it
to its own crate to ensure the protocol has minimal dependencies and
that we can codegen a schema from it.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/2264).
* #2278
* __->__ #2264
2025-08-13 17:36:29 -07:00
|
|
|
};
|
|
|
|
|
self.outgoing.send_response(request_id, response).await;
|
|
|
|
|
}
|
|
|
|
|
Err(err) => {
|
|
|
|
|
let error = JSONRPCErrorError {
|
|
|
|
|
code: INTERNAL_ERROR_CODE,
|
|
|
|
|
message: format!("error creating conversation: {err}"),
|
|
|
|
|
data: None,
|
|
|
|
|
};
|
|
|
|
|
self.outgoing.send_error(request_id, error).await;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2025-10-27 09:11:45 -07:00
|
|
|
async fn get_conversation_summary(
|
|
|
|
|
&self,
|
|
|
|
|
request_id: RequestId,
|
|
|
|
|
params: GetConversationSummaryParams,
|
|
|
|
|
) {
|
2025-10-28 17:17:22 -07:00
|
|
|
let path = match params {
|
|
|
|
|
GetConversationSummaryParams::RolloutPath { rollout_path } => {
|
|
|
|
|
if rollout_path.is_relative() {
|
|
|
|
|
self.config.codex_home.join(&rollout_path)
|
|
|
|
|
} else {
|
|
|
|
|
rollout_path
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
GetConversationSummaryParams::ConversationId { conversation_id } => {
|
|
|
|
|
match codex_core::find_conversation_path_by_id_str(
|
|
|
|
|
&self.config.codex_home,
|
|
|
|
|
&conversation_id.to_string(),
|
|
|
|
|
)
|
|
|
|
|
.await
|
|
|
|
|
{
|
|
|
|
|
Ok(Some(p)) => p,
|
|
|
|
|
_ => {
|
|
|
|
|
let error = JSONRPCErrorError {
|
|
|
|
|
code: INVALID_REQUEST_ERROR_CODE,
|
|
|
|
|
message: format!(
|
|
|
|
|
"no rollout found for conversation id {conversation_id}"
|
|
|
|
|
),
|
|
|
|
|
data: None,
|
|
|
|
|
};
|
|
|
|
|
self.outgoing.send_error(request_id, error).await;
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2025-10-27 09:11:45 -07:00
|
|
|
};
|
2025-10-28 17:17:22 -07:00
|
|
|
|
2025-10-27 09:11:45 -07:00
|
|
|
let fallback_provider = self.config.model_provider_id.as_str();
|
|
|
|
|
|
|
|
|
|
match read_summary_from_rollout(&path, fallback_provider).await {
|
|
|
|
|
Ok(summary) => {
|
|
|
|
|
let response = GetConversationSummaryResponse { summary };
|
|
|
|
|
self.outgoing.send_response(request_id, response).await;
|
|
|
|
|
}
|
|
|
|
|
Err(err) => {
|
|
|
|
|
let error = JSONRPCErrorError {
|
|
|
|
|
code: INTERNAL_ERROR_CODE,
|
|
|
|
|
message: format!(
|
|
|
|
|
"failed to load conversation summary from {}: {}",
|
|
|
|
|
path.display(),
|
|
|
|
|
err
|
|
|
|
|
),
|
|
|
|
|
data: None,
|
|
|
|
|
};
|
|
|
|
|
self.outgoing.send_error(request_id, error).await;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2025-09-04 16:44:18 -07:00
|
|
|
async fn handle_list_conversations(
|
|
|
|
|
&self,
|
|
|
|
|
request_id: RequestId,
|
|
|
|
|
params: ListConversationsParams,
|
|
|
|
|
) {
|
2025-10-27 02:03:30 -07:00
|
|
|
let ListConversationsParams {
|
|
|
|
|
page_size,
|
|
|
|
|
cursor,
|
|
|
|
|
model_providers: model_provider,
|
|
|
|
|
} = params;
|
|
|
|
|
let page_size = page_size.unwrap_or(25);
|
2025-09-04 16:44:18 -07:00
|
|
|
// Decode the optional cursor string to a Cursor via serde (Cursor implements Deserialize from string)
|
2025-10-27 02:03:30 -07:00
|
|
|
let cursor_obj: Option<RolloutCursor> = match cursor {
|
2025-09-04 16:44:18 -07:00
|
|
|
Some(s) => serde_json::from_str::<RolloutCursor>(&format!("\"{s}\"")).ok(),
|
|
|
|
|
None => None,
|
|
|
|
|
};
|
|
|
|
|
let cursor_ref = cursor_obj.as_ref();
|
2025-10-27 02:03:30 -07:00
|
|
|
let model_provider_filter = match model_provider {
|
|
|
|
|
Some(providers) => {
|
|
|
|
|
if providers.is_empty() {
|
|
|
|
|
None
|
|
|
|
|
} else {
|
|
|
|
|
Some(providers)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
None => Some(vec![self.config.model_provider_id.clone()]),
|
|
|
|
|
};
|
|
|
|
|
let model_provider_slice = model_provider_filter.as_deref();
|
|
|
|
|
let fallback_provider = self.config.model_provider_id.clone();
|
2025-09-04 16:44:18 -07:00
|
|
|
|
|
|
|
|
let page = match RolloutRecorder::list_conversations(
|
|
|
|
|
&self.config.codex_home,
|
|
|
|
|
page_size,
|
|
|
|
|
cursor_ref,
|
2025-10-02 13:06:21 -07:00
|
|
|
INTERACTIVE_SESSION_SOURCES,
|
2025-10-27 02:03:30 -07:00
|
|
|
model_provider_slice,
|
|
|
|
|
fallback_provider.as_str(),
|
2025-09-04 16:44:18 -07:00
|
|
|
)
|
|
|
|
|
.await
|
|
|
|
|
{
|
|
|
|
|
Ok(p) => p,
|
|
|
|
|
Err(err) => {
|
|
|
|
|
let error = JSONRPCErrorError {
|
|
|
|
|
code: INTERNAL_ERROR_CODE,
|
|
|
|
|
message: format!("failed to list conversations: {err}"),
|
|
|
|
|
data: None,
|
|
|
|
|
};
|
|
|
|
|
self.outgoing.send_error(request_id, error).await;
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
2025-09-08 14:54:47 -07:00
|
|
|
let items = page
|
|
|
|
|
.items
|
|
|
|
|
.into_iter()
|
2025-10-27 02:03:30 -07:00
|
|
|
.filter_map(|it| extract_conversation_summary(it.path, &it.head, &fallback_provider))
|
2025-09-08 14:54:47 -07:00
|
|
|
.collect();
|
2025-09-04 16:44:18 -07:00
|
|
|
|
|
|
|
|
// Encode next_cursor as a plain string
|
|
|
|
|
let next_cursor = match page.next_cursor {
|
|
|
|
|
Some(c) => match serde_json::to_value(&c) {
|
|
|
|
|
Ok(serde_json::Value::String(s)) => Some(s),
|
|
|
|
|
_ => None,
|
|
|
|
|
},
|
|
|
|
|
None => None,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let response = ListConversationsResponse { items, next_cursor };
|
|
|
|
|
self.outgoing.send_response(request_id, response).await;
|
|
|
|
|
}
|
|
|
|
|
|
2025-10-21 11:15:17 -07:00
|
|
|
async fn list_models(&self, request_id: RequestId, params: ListModelsParams) {
|
|
|
|
|
let ListModelsParams { page_size, cursor } = params;
|
|
|
|
|
let models = supported_models();
|
|
|
|
|
let total = models.len();
|
|
|
|
|
|
|
|
|
|
if total == 0 {
|
|
|
|
|
let response = ListModelsResponse {
|
|
|
|
|
items: Vec::new(),
|
|
|
|
|
next_cursor: None,
|
|
|
|
|
};
|
|
|
|
|
self.outgoing.send_response(request_id, response).await;
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let effective_page_size = page_size.unwrap_or(total).max(1).min(total);
|
|
|
|
|
let start = match cursor {
|
|
|
|
|
Some(cursor) => match cursor.parse::<usize>() {
|
|
|
|
|
Ok(idx) => idx,
|
|
|
|
|
Err(_) => {
|
|
|
|
|
let error = JSONRPCErrorError {
|
|
|
|
|
code: INVALID_REQUEST_ERROR_CODE,
|
|
|
|
|
message: format!("invalid cursor: {cursor}"),
|
|
|
|
|
data: None,
|
|
|
|
|
};
|
|
|
|
|
self.outgoing.send_error(request_id, error).await;
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
},
|
|
|
|
|
None => 0,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
if start > total {
|
|
|
|
|
let error = JSONRPCErrorError {
|
|
|
|
|
code: INVALID_REQUEST_ERROR_CODE,
|
|
|
|
|
message: format!("cursor {start} exceeds total models {total}"),
|
|
|
|
|
data: None,
|
|
|
|
|
};
|
|
|
|
|
self.outgoing.send_error(request_id, error).await;
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let end = start.saturating_add(effective_page_size).min(total);
|
|
|
|
|
let items = models[start..end].to_vec();
|
|
|
|
|
let next_cursor = if end < total {
|
|
|
|
|
Some(end.to_string())
|
|
|
|
|
} else {
|
|
|
|
|
None
|
|
|
|
|
};
|
|
|
|
|
let response = ListModelsResponse { items, next_cursor };
|
|
|
|
|
self.outgoing.send_response(request_id, response).await;
|
|
|
|
|
}
|
|
|
|
|
|
2025-09-04 16:44:18 -07:00
|
|
|
async fn handle_resume_conversation(
|
|
|
|
|
&self,
|
|
|
|
|
request_id: RequestId,
|
|
|
|
|
params: ResumeConversationParams,
|
|
|
|
|
) {
|
2025-10-28 17:17:22 -07:00
|
|
|
let path = match params {
|
|
|
|
|
ResumeConversationParams {
|
|
|
|
|
path: Some(path), ..
|
|
|
|
|
} => path,
|
|
|
|
|
ResumeConversationParams {
|
|
|
|
|
conversation_id: Some(conversation_id),
|
|
|
|
|
..
|
|
|
|
|
} => {
|
|
|
|
|
match codex_core::find_conversation_path_by_id_str(
|
|
|
|
|
&self.config.codex_home,
|
|
|
|
|
&conversation_id.to_string(),
|
|
|
|
|
)
|
|
|
|
|
.await
|
|
|
|
|
{
|
|
|
|
|
Ok(Some(p)) => p,
|
|
|
|
|
_ => {
|
|
|
|
|
let error = JSONRPCErrorError {
|
|
|
|
|
code: INVALID_REQUEST_ERROR_CODE,
|
|
|
|
|
message: "unable to locate rollout path".to_string(),
|
|
|
|
|
data: None,
|
|
|
|
|
};
|
|
|
|
|
self.outgoing.send_error(request_id, error).await;
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
_ => {
|
|
|
|
|
let error = JSONRPCErrorError {
|
|
|
|
|
code: INVALID_REQUEST_ERROR_CODE,
|
|
|
|
|
message: "either path or conversation id must be provided".to_string(),
|
|
|
|
|
data: None,
|
|
|
|
|
};
|
|
|
|
|
self.outgoing.send_error(request_id, error).await;
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
2025-09-04 16:44:18 -07:00
|
|
|
// Derive a Config using the same logic as new conversation, honoring overrides if provided.
|
|
|
|
|
let config = match params.overrides {
|
|
|
|
|
Some(overrides) => {
|
2025-10-03 13:02:26 -07:00
|
|
|
derive_config_from_params(overrides, self.codex_linux_sandbox_exe.clone()).await
|
2025-09-04 16:44:18 -07:00
|
|
|
}
|
|
|
|
|
None => Ok(self.config.as_ref().clone()),
|
|
|
|
|
};
|
|
|
|
|
let config = match config {
|
|
|
|
|
Ok(cfg) => cfg,
|
|
|
|
|
Err(err) => {
|
|
|
|
|
let error = JSONRPCErrorError {
|
|
|
|
|
code: INVALID_REQUEST_ERROR_CODE,
|
|
|
|
|
message: format!("error deriving config: {err}"),
|
|
|
|
|
data: None,
|
|
|
|
|
};
|
|
|
|
|
self.outgoing.send_error(request_id, error).await;
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
match self
|
|
|
|
|
.conversation_manager
|
2025-10-28 17:17:22 -07:00
|
|
|
.resume_conversation_from_rollout(config, path.clone(), self.auth_manager.clone())
|
2025-09-04 16:44:18 -07:00
|
|
|
.await
|
|
|
|
|
{
|
|
|
|
|
Ok(NewConversation {
|
|
|
|
|
conversation_id,
|
|
|
|
|
session_configured,
|
|
|
|
|
..
|
|
|
|
|
}) => {
|
fix: separate `codex mcp` into `codex mcp-server` and `codex app-server` (#4471)
This is a very large PR with some non-backwards-compatible changes.
Historically, `codex mcp` (or `codex mcp serve`) started a JSON-RPC-ish
server that had two overlapping responsibilities:
- Running an MCP server, providing some basic tool calls.
- Running the app server used to power experiences such as the VS Code
extension.
This PR aims to separate these into distinct concepts:
- `codex mcp-server` for the MCP server
- `codex app-server` for the "application server"
Note `codex mcp` still exists because it already has its own subcommands
for MCP management (`list`, `add`, etc.)
The MCP logic continues to live in `codex-rs/mcp-server` whereas the
refactored app server logic is in the new `codex-rs/app-server` folder.
Note that most of the existing integration tests in
`codex-rs/mcp-server/tests/suite` were actually for the app server, so
all the tests have been moved with the exception of
`codex-rs/mcp-server/tests/suite/mod.rs`.
Because this is already a large diff, I tried not to change more than I
had to, so `codex-rs/app-server/tests/common/mcp_process.rs` still uses
the name `McpProcess` for now, but I will do some mechanical renamings
to things like `AppServer` in subsequent PRs.
While `mcp-server` and `app-server` share some overlapping functionality
(like reading streams of JSONL and dispatching based on message types)
and some differences (completely different message types), I ended up
doing a bit of copypasta between the two crates, as both have somewhat
similar `message_processor.rs` and `outgoing_message.rs` files for now,
though I expect them to diverge more in the near future.
One material change is that of the initialize handshake for `codex
app-server`, as we no longer use the MCP types for that handshake.
Instead, we update `codex-rs/protocol/src/mcp_protocol.rs` to add an
`Initialize` variant to `ClientRequest`, which takes the `ClientInfo`
object we need to update the `USER_AGENT_SUFFIX` in
`codex-rs/app-server/src/message_processor.rs`.
One other material change is in
`codex-rs/app-server/src/codex_message_processor.rs` where I eliminated
a use of the `send_event_as_notification()` method I am generally trying
to deprecate (because it blindly maps an `EventMsg` into a
`JSONNotification`) in favor of `send_server_notification()`, which
takes a `ServerNotification`, as that is intended to be a custom enum of
all notification types supported by the app server. So to make this
update, I had to introduce a new variant of `ServerNotification`,
`SessionConfigured`, which is a non-backwards compatible change with the
old `codex mcp`, and clients will have to be updated after the next
release that contains this PR. Note that
`codex-rs/app-server/tests/suite/list_resume.rs` also had to be update
to reflect this change.
I introduced `codex-rs/utils/json-to-toml/src/lib.rs` as a small utility
crate to avoid some of the copying between `mcp-server` and
`app-server`.
2025-09-30 00:06:18 -07:00
|
|
|
self.outgoing
|
|
|
|
|
.send_server_notification(ServerNotification::SessionConfigured(
|
|
|
|
|
SessionConfiguredNotification {
|
|
|
|
|
session_id: session_configured.session_id,
|
|
|
|
|
model: session_configured.model.clone(),
|
|
|
|
|
reasoning_effort: session_configured.reasoning_effort,
|
|
|
|
|
history_log_id: session_configured.history_log_id,
|
|
|
|
|
history_entry_count: session_configured.history_entry_count,
|
|
|
|
|
initial_messages: session_configured.initial_messages.clone(),
|
|
|
|
|
rollout_path: session_configured.rollout_path.clone(),
|
|
|
|
|
},
|
|
|
|
|
))
|
|
|
|
|
.await;
|
2025-10-22 10:14:50 -07:00
|
|
|
let initial_messages = session_configured
|
|
|
|
|
.initial_messages
|
|
|
|
|
.map(|msgs| msgs.into_iter().collect());
|
2025-09-04 16:44:18 -07:00
|
|
|
|
|
|
|
|
// Reply with conversation id + model and initial messages (when present)
|
fix: remove mcp-types from app server protocol (#4537)
We continue the separation between `codex app-server` and `codex
mcp-server`.
In particular, we introduce a new crate, `codex-app-server-protocol`,
and migrate `codex-rs/protocol/src/mcp_protocol.rs` into it, renaming it
`codex-rs/app-server-protocol/src/protocol.rs`.
Because `ConversationId` was defined in `mcp_protocol.rs`, we move it
into its own file, `codex-rs/protocol/src/conversation_id.rs`, and
because it is referenced in a ton of places, we have to touch a lot of
files as part of this PR.
We also decide to get away from proper JSON-RPC 2.0 semantics, so we
also introduce `codex-rs/app-server-protocol/src/jsonrpc_lite.rs`, which
is basically the same `JSONRPCMessage` type defined in `mcp-types`
except with all of the `"jsonrpc": "2.0"` removed.
Getting rid of `"jsonrpc": "2.0"` makes our serialization logic
considerably simpler, as we can lean heavier on serde to serialize
directly into the wire format that we use now.
2025-09-30 19:16:26 -07:00
|
|
|
let response = codex_app_server_protocol::ResumeConversationResponse {
|
2025-09-07 20:22:25 -07:00
|
|
|
conversation_id,
|
2025-09-04 16:44:18 -07:00
|
|
|
model: session_configured.model.clone(),
|
2025-09-08 14:54:47 -07:00
|
|
|
initial_messages,
|
2025-10-28 17:17:22 -07:00
|
|
|
rollout_path: session_configured.rollout_path.clone(),
|
2025-09-04 16:44:18 -07:00
|
|
|
};
|
|
|
|
|
self.outgoing.send_response(request_id, response).await;
|
|
|
|
|
}
|
|
|
|
|
Err(err) => {
|
|
|
|
|
let error = JSONRPCErrorError {
|
|
|
|
|
code: INTERNAL_ERROR_CODE,
|
|
|
|
|
message: format!("error resuming conversation: {err}"),
|
|
|
|
|
data: None,
|
|
|
|
|
};
|
|
|
|
|
self.outgoing.send_error(request_id, error).await;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2025-09-09 08:39:00 -07:00
|
|
|
async fn archive_conversation(&self, request_id: RequestId, params: ArchiveConversationParams) {
|
|
|
|
|
let ArchiveConversationParams {
|
|
|
|
|
conversation_id,
|
|
|
|
|
rollout_path,
|
|
|
|
|
} = params;
|
|
|
|
|
|
|
|
|
|
// Verify that the rollout path is in the sessions directory or else
|
|
|
|
|
// a malicious client could specify an arbitrary path.
|
|
|
|
|
let rollout_folder = self.config.codex_home.join(codex_core::SESSIONS_SUBDIR);
|
|
|
|
|
let canonical_rollout_path = tokio::fs::canonicalize(&rollout_path).await;
|
|
|
|
|
let canonical_rollout_path = if let Ok(path) = canonical_rollout_path
|
|
|
|
|
&& path.starts_with(&rollout_folder)
|
|
|
|
|
{
|
|
|
|
|
path
|
|
|
|
|
} else {
|
|
|
|
|
let error = JSONRPCErrorError {
|
|
|
|
|
code: INVALID_REQUEST_ERROR_CODE,
|
|
|
|
|
message: format!(
|
|
|
|
|
"rollout path `{}` must be in sessions directory",
|
|
|
|
|
rollout_path.display()
|
|
|
|
|
),
|
|
|
|
|
data: None,
|
|
|
|
|
};
|
|
|
|
|
self.outgoing.send_error(request_id, error).await;
|
|
|
|
|
return;
|
|
|
|
|
};
|
|
|
|
|
|
2025-09-18 07:37:03 -07:00
|
|
|
let required_suffix = format!("{conversation_id}.jsonl");
|
2025-09-09 08:39:00 -07:00
|
|
|
let Some(file_name) = canonical_rollout_path.file_name().map(OsStr::to_owned) else {
|
|
|
|
|
let error = JSONRPCErrorError {
|
|
|
|
|
code: INVALID_REQUEST_ERROR_CODE,
|
|
|
|
|
message: format!(
|
|
|
|
|
"rollout path `{}` missing file name",
|
|
|
|
|
rollout_path.display()
|
|
|
|
|
),
|
|
|
|
|
data: None,
|
|
|
|
|
};
|
|
|
|
|
self.outgoing.send_error(request_id, error).await;
|
|
|
|
|
return;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
if !file_name
|
|
|
|
|
.to_string_lossy()
|
|
|
|
|
.ends_with(required_suffix.as_str())
|
|
|
|
|
{
|
|
|
|
|
let error = JSONRPCErrorError {
|
|
|
|
|
code: INVALID_REQUEST_ERROR_CODE,
|
|
|
|
|
message: format!(
|
|
|
|
|
"rollout path `{}` does not match conversation id {conversation_id}",
|
|
|
|
|
rollout_path.display()
|
|
|
|
|
),
|
|
|
|
|
data: None,
|
|
|
|
|
};
|
|
|
|
|
self.outgoing.send_error(request_id, error).await;
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let removed_conversation = self
|
|
|
|
|
.conversation_manager
|
|
|
|
|
.remove_conversation(&conversation_id)
|
|
|
|
|
.await;
|
|
|
|
|
if let Some(conversation) = removed_conversation {
|
|
|
|
|
info!("conversation {conversation_id} was active; shutting down");
|
|
|
|
|
let conversation_clone = conversation.clone();
|
|
|
|
|
let notify = Arc::new(tokio::sync::Notify::new());
|
|
|
|
|
let notify_clone = notify.clone();
|
|
|
|
|
|
|
|
|
|
// Establish the listener for ShutdownComplete before submitting
|
|
|
|
|
// Shutdown so it is not missed.
|
|
|
|
|
let is_shutdown = tokio::spawn(async move {
|
|
|
|
|
loop {
|
|
|
|
|
select! {
|
|
|
|
|
_ = notify_clone.notified() => {
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
event = conversation_clone.next_event() => {
|
|
|
|
|
if let Ok(event) = event && matches!(event.msg, EventMsg::ShutdownComplete) {
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
// Request shutdown.
|
|
|
|
|
match conversation.submit(Op::Shutdown).await {
|
|
|
|
|
Ok(_) => {
|
|
|
|
|
// Successfully submitted Shutdown; wait before proceeding.
|
|
|
|
|
select! {
|
|
|
|
|
_ = is_shutdown => {
|
|
|
|
|
// Normal shutdown: proceed with archive.
|
|
|
|
|
}
|
|
|
|
|
_ = tokio::time::sleep(Duration::from_secs(10)) => {
|
|
|
|
|
warn!("conversation {conversation_id} shutdown timed out; proceeding with archive");
|
|
|
|
|
notify.notify_one();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
Err(err) => {
|
|
|
|
|
error!("failed to submit Shutdown to conversation {conversation_id}: {err}");
|
|
|
|
|
notify.notify_one();
|
|
|
|
|
// Perhaps we lost a shutdown race, so let's continue to
|
|
|
|
|
// clean up the .jsonl file.
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Move the .jsonl file to the archived sessions subdir.
|
|
|
|
|
let result: std::io::Result<()> = async {
|
|
|
|
|
let archive_folder = self
|
|
|
|
|
.config
|
|
|
|
|
.codex_home
|
|
|
|
|
.join(codex_core::ARCHIVED_SESSIONS_SUBDIR);
|
|
|
|
|
tokio::fs::create_dir_all(&archive_folder).await?;
|
|
|
|
|
tokio::fs::rename(&canonical_rollout_path, &archive_folder.join(&file_name)).await?;
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
.await;
|
|
|
|
|
|
|
|
|
|
match result {
|
|
|
|
|
Ok(()) => {
|
|
|
|
|
let response = ArchiveConversationResponse {};
|
|
|
|
|
self.outgoing.send_response(request_id, response).await;
|
|
|
|
|
}
|
|
|
|
|
Err(err) => {
|
|
|
|
|
let error = JSONRPCErrorError {
|
|
|
|
|
code: INTERNAL_ERROR_CODE,
|
|
|
|
|
message: format!("failed to archive conversation: {err}"),
|
|
|
|
|
data: None,
|
|
|
|
|
};
|
|
|
|
|
self.outgoing.send_error(request_id, error).await;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
feat: support traditional JSON-RPC request/response in MCP server (#2264)
This introduces a new set of request types that our `codex mcp`
supports. Note that these do not conform to MCP tool calls so that
instead of having to send something like this:
```json
{
"jsonrpc": "2.0",
"method": "tools/call",
"id": 42,
"params": {
"name": "newConversation",
"arguments": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
}
```
we can send something like this:
```json
{
"jsonrpc": "2.0",
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
```
Admittedly, this new format is not a valid MCP tool call, but we are OK
with that right now. (That is, not everything we might want to request
of `codex mcp` is something that is appropriate for an autonomous agent
to do.)
To start, this introduces four request types:
- `newConversation`
- `sendUserMessage`
- `addConversationListener`
- `removeConversationListener`
The new `mcp-server/tests/codex_message_processor_flow.rs` shows how
these can be used.
The types are defined on the `CodexRequest` enum, so we introduce a new
`CodexMessageProcessor` that is responsible for dealing with requests
from this enum. The top-level `MessageProcessor` has been updated so
that when `process_request()` is called, it first checks whether the
request conforms to `CodexRequest` and dispatches it to
`CodexMessageProcessor` if so.
Note that I also decided to use `camelCase` for the on-the-wire format,
as that seems to be the convention for MCP.
For the moment, the new protocol is defined in `wire_format.rs` within
the `mcp-server` crate, but in a subsequent PR, I will probably move it
to its own crate to ensure the protocol has minimal dependencies and
that we can codegen a schema from it.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/2264).
* #2278
* __->__ #2264
2025-08-13 17:36:29 -07:00
|
|
|
async fn send_user_message(&self, request_id: RequestId, params: SendUserMessageParams) {
|
|
|
|
|
let SendUserMessageParams {
|
|
|
|
|
conversation_id,
|
|
|
|
|
items,
|
|
|
|
|
} = params;
|
|
|
|
|
let Ok(conversation) = self
|
|
|
|
|
.conversation_manager
|
2025-09-07 20:22:25 -07:00
|
|
|
.get_conversation(conversation_id)
|
feat: support traditional JSON-RPC request/response in MCP server (#2264)
This introduces a new set of request types that our `codex mcp`
supports. Note that these do not conform to MCP tool calls so that
instead of having to send something like this:
```json
{
"jsonrpc": "2.0",
"method": "tools/call",
"id": 42,
"params": {
"name": "newConversation",
"arguments": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
}
```
we can send something like this:
```json
{
"jsonrpc": "2.0",
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
```
Admittedly, this new format is not a valid MCP tool call, but we are OK
with that right now. (That is, not everything we might want to request
of `codex mcp` is something that is appropriate for an autonomous agent
to do.)
To start, this introduces four request types:
- `newConversation`
- `sendUserMessage`
- `addConversationListener`
- `removeConversationListener`
The new `mcp-server/tests/codex_message_processor_flow.rs` shows how
these can be used.
The types are defined on the `CodexRequest` enum, so we introduce a new
`CodexMessageProcessor` that is responsible for dealing with requests
from this enum. The top-level `MessageProcessor` has been updated so
that when `process_request()` is called, it first checks whether the
request conforms to `CodexRequest` and dispatches it to
`CodexMessageProcessor` if so.
Note that I also decided to use `camelCase` for the on-the-wire format,
as that seems to be the convention for MCP.
For the moment, the new protocol is defined in `wire_format.rs` within
the `mcp-server` crate, but in a subsequent PR, I will probably move it
to its own crate to ensure the protocol has minimal dependencies and
that we can codegen a schema from it.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/2264).
* #2278
* __->__ #2264
2025-08-13 17:36:29 -07:00
|
|
|
.await
|
|
|
|
|
else {
|
|
|
|
|
let error = JSONRPCErrorError {
|
|
|
|
|
code: INVALID_REQUEST_ERROR_CODE,
|
|
|
|
|
message: format!("conversation not found: {conversation_id}"),
|
|
|
|
|
data: None,
|
|
|
|
|
};
|
|
|
|
|
self.outgoing.send_error(request_id, error).await;
|
|
|
|
|
return;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let mapped_items: Vec<CoreInputItem> = items
|
|
|
|
|
.into_iter()
|
|
|
|
|
.map(|item| match item {
|
|
|
|
|
WireInputItem::Text { text } => CoreInputItem::Text { text },
|
|
|
|
|
WireInputItem::Image { image_url } => CoreInputItem::Image { image_url },
|
|
|
|
|
WireInputItem::LocalImage { path } => CoreInputItem::LocalImage { path },
|
|
|
|
|
})
|
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
|
|
// Submit user input to the conversation.
|
|
|
|
|
let _ = conversation
|
|
|
|
|
.submit(Op::UserInput {
|
|
|
|
|
items: mapped_items,
|
|
|
|
|
})
|
|
|
|
|
.await;
|
|
|
|
|
|
|
|
|
|
// Acknowledge with an empty result.
|
|
|
|
|
self.outgoing
|
|
|
|
|
.send_response(request_id, SendUserMessageResponse {})
|
|
|
|
|
.await;
|
|
|
|
|
}
|
|
|
|
|
|
2025-08-15 10:05:58 -07:00
|
|
|
async fn send_user_turn(&self, request_id: RequestId, params: SendUserTurnParams) {
|
|
|
|
|
let SendUserTurnParams {
|
|
|
|
|
conversation_id,
|
|
|
|
|
items,
|
|
|
|
|
cwd,
|
|
|
|
|
approval_policy,
|
|
|
|
|
sandbox_policy,
|
|
|
|
|
model,
|
|
|
|
|
effort,
|
|
|
|
|
summary,
|
|
|
|
|
} = params;
|
|
|
|
|
|
|
|
|
|
let Ok(conversation) = self
|
|
|
|
|
.conversation_manager
|
2025-09-07 20:22:25 -07:00
|
|
|
.get_conversation(conversation_id)
|
2025-08-15 10:05:58 -07:00
|
|
|
.await
|
|
|
|
|
else {
|
|
|
|
|
let error = JSONRPCErrorError {
|
|
|
|
|
code: INVALID_REQUEST_ERROR_CODE,
|
|
|
|
|
message: format!("conversation not found: {conversation_id}"),
|
|
|
|
|
data: None,
|
|
|
|
|
};
|
|
|
|
|
self.outgoing.send_error(request_id, error).await;
|
|
|
|
|
return;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let mapped_items: Vec<CoreInputItem> = items
|
|
|
|
|
.into_iter()
|
|
|
|
|
.map(|item| match item {
|
|
|
|
|
WireInputItem::Text { text } => CoreInputItem::Text { text },
|
|
|
|
|
WireInputItem::Image { image_url } => CoreInputItem::Image { image_url },
|
|
|
|
|
WireInputItem::LocalImage { path } => CoreInputItem::LocalImage { path },
|
|
|
|
|
})
|
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
|
|
let _ = conversation
|
|
|
|
|
.submit(Op::UserTurn {
|
|
|
|
|
items: mapped_items,
|
|
|
|
|
cwd,
|
|
|
|
|
approval_policy,
|
|
|
|
|
sandbox_policy,
|
|
|
|
|
model,
|
|
|
|
|
effort,
|
|
|
|
|
summary,
|
2025-09-23 13:59:16 -07:00
|
|
|
final_output_json_schema: None,
|
2025-08-15 10:05:58 -07:00
|
|
|
})
|
|
|
|
|
.await;
|
|
|
|
|
|
|
|
|
|
self.outgoing
|
|
|
|
|
.send_response(request_id, SendUserTurnResponse {})
|
|
|
|
|
.await;
|
|
|
|
|
}
|
|
|
|
|
|
2025-08-13 23:12:03 -07:00
|
|
|
async fn interrupt_conversation(
|
|
|
|
|
&mut self,
|
|
|
|
|
request_id: RequestId,
|
|
|
|
|
params: InterruptConversationParams,
|
|
|
|
|
) {
|
|
|
|
|
let InterruptConversationParams { conversation_id } = params;
|
|
|
|
|
let Ok(conversation) = self
|
|
|
|
|
.conversation_manager
|
2025-09-07 20:22:25 -07:00
|
|
|
.get_conversation(conversation_id)
|
2025-08-13 23:12:03 -07:00
|
|
|
.await
|
|
|
|
|
else {
|
|
|
|
|
let error = JSONRPCErrorError {
|
|
|
|
|
code: INVALID_REQUEST_ERROR_CODE,
|
|
|
|
|
message: format!("conversation not found: {conversation_id}"),
|
|
|
|
|
data: None,
|
|
|
|
|
};
|
|
|
|
|
self.outgoing.send_error(request_id, error).await;
|
|
|
|
|
return;
|
|
|
|
|
};
|
|
|
|
|
|
2025-08-17 21:40:31 -07:00
|
|
|
// Record the pending interrupt so we can reply when TurnAborted arrives.
|
|
|
|
|
{
|
|
|
|
|
let mut map = self.pending_interrupts.lock().await;
|
2025-09-07 20:22:25 -07:00
|
|
|
map.entry(conversation_id).or_default().push(request_id);
|
2025-08-17 21:40:31 -07:00
|
|
|
}
|
2025-08-13 23:12:03 -07:00
|
|
|
|
2025-08-17 21:40:31 -07:00
|
|
|
// Submit the interrupt; we'll respond upon TurnAborted.
|
|
|
|
|
let _ = conversation.submit(Op::Interrupt).await;
|
2025-08-13 23:12:03 -07:00
|
|
|
}
|
|
|
|
|
|
feat: support traditional JSON-RPC request/response in MCP server (#2264)
This introduces a new set of request types that our `codex mcp`
supports. Note that these do not conform to MCP tool calls so that
instead of having to send something like this:
```json
{
"jsonrpc": "2.0",
"method": "tools/call",
"id": 42,
"params": {
"name": "newConversation",
"arguments": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
}
```
we can send something like this:
```json
{
"jsonrpc": "2.0",
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
```
Admittedly, this new format is not a valid MCP tool call, but we are OK
with that right now. (That is, not everything we might want to request
of `codex mcp` is something that is appropriate for an autonomous agent
to do.)
To start, this introduces four request types:
- `newConversation`
- `sendUserMessage`
- `addConversationListener`
- `removeConversationListener`
The new `mcp-server/tests/codex_message_processor_flow.rs` shows how
these can be used.
The types are defined on the `CodexRequest` enum, so we introduce a new
`CodexMessageProcessor` that is responsible for dealing with requests
from this enum. The top-level `MessageProcessor` has been updated so
that when `process_request()` is called, it first checks whether the
request conforms to `CodexRequest` and dispatches it to
`CodexMessageProcessor` if so.
Note that I also decided to use `camelCase` for the on-the-wire format,
as that seems to be the convention for MCP.
For the moment, the new protocol is defined in `wire_format.rs` within
the `mcp-server` crate, but in a subsequent PR, I will probably move it
to its own crate to ensure the protocol has minimal dependencies and
that we can codegen a schema from it.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/2264).
* #2278
* __->__ #2264
2025-08-13 17:36:29 -07:00
|
|
|
async fn add_conversation_listener(
|
|
|
|
|
&mut self,
|
|
|
|
|
request_id: RequestId,
|
|
|
|
|
params: AddConversationListenerParams,
|
|
|
|
|
) {
|
2025-10-24 15:41:52 -07:00
|
|
|
let AddConversationListenerParams {
|
|
|
|
|
conversation_id,
|
|
|
|
|
experimental_raw_events,
|
|
|
|
|
} = params;
|
feat: support traditional JSON-RPC request/response in MCP server (#2264)
This introduces a new set of request types that our `codex mcp`
supports. Note that these do not conform to MCP tool calls so that
instead of having to send something like this:
```json
{
"jsonrpc": "2.0",
"method": "tools/call",
"id": 42,
"params": {
"name": "newConversation",
"arguments": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
}
```
we can send something like this:
```json
{
"jsonrpc": "2.0",
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
```
Admittedly, this new format is not a valid MCP tool call, but we are OK
with that right now. (That is, not everything we might want to request
of `codex mcp` is something that is appropriate for an autonomous agent
to do.)
To start, this introduces four request types:
- `newConversation`
- `sendUserMessage`
- `addConversationListener`
- `removeConversationListener`
The new `mcp-server/tests/codex_message_processor_flow.rs` shows how
these can be used.
The types are defined on the `CodexRequest` enum, so we introduce a new
`CodexMessageProcessor` that is responsible for dealing with requests
from this enum. The top-level `MessageProcessor` has been updated so
that when `process_request()` is called, it first checks whether the
request conforms to `CodexRequest` and dispatches it to
`CodexMessageProcessor` if so.
Note that I also decided to use `camelCase` for the on-the-wire format,
as that seems to be the convention for MCP.
For the moment, the new protocol is defined in `wire_format.rs` within
the `mcp-server` crate, but in a subsequent PR, I will probably move it
to its own crate to ensure the protocol has minimal dependencies and
that we can codegen a schema from it.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/2264).
* #2278
* __->__ #2264
2025-08-13 17:36:29 -07:00
|
|
|
let Ok(conversation) = self
|
|
|
|
|
.conversation_manager
|
2025-09-07 20:22:25 -07:00
|
|
|
.get_conversation(conversation_id)
|
feat: support traditional JSON-RPC request/response in MCP server (#2264)
This introduces a new set of request types that our `codex mcp`
supports. Note that these do not conform to MCP tool calls so that
instead of having to send something like this:
```json
{
"jsonrpc": "2.0",
"method": "tools/call",
"id": 42,
"params": {
"name": "newConversation",
"arguments": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
}
```
we can send something like this:
```json
{
"jsonrpc": "2.0",
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
```
Admittedly, this new format is not a valid MCP tool call, but we are OK
with that right now. (That is, not everything we might want to request
of `codex mcp` is something that is appropriate for an autonomous agent
to do.)
To start, this introduces four request types:
- `newConversation`
- `sendUserMessage`
- `addConversationListener`
- `removeConversationListener`
The new `mcp-server/tests/codex_message_processor_flow.rs` shows how
these can be used.
The types are defined on the `CodexRequest` enum, so we introduce a new
`CodexMessageProcessor` that is responsible for dealing with requests
from this enum. The top-level `MessageProcessor` has been updated so
that when `process_request()` is called, it first checks whether the
request conforms to `CodexRequest` and dispatches it to
`CodexMessageProcessor` if so.
Note that I also decided to use `camelCase` for the on-the-wire format,
as that seems to be the convention for MCP.
For the moment, the new protocol is defined in `wire_format.rs` within
the `mcp-server` crate, but in a subsequent PR, I will probably move it
to its own crate to ensure the protocol has minimal dependencies and
that we can codegen a schema from it.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/2264).
* #2278
* __->__ #2264
2025-08-13 17:36:29 -07:00
|
|
|
.await
|
|
|
|
|
else {
|
|
|
|
|
let error = JSONRPCErrorError {
|
|
|
|
|
code: INVALID_REQUEST_ERROR_CODE,
|
2025-09-07 20:22:25 -07:00
|
|
|
message: format!("conversation not found: {conversation_id}"),
|
feat: support traditional JSON-RPC request/response in MCP server (#2264)
This introduces a new set of request types that our `codex mcp`
supports. Note that these do not conform to MCP tool calls so that
instead of having to send something like this:
```json
{
"jsonrpc": "2.0",
"method": "tools/call",
"id": 42,
"params": {
"name": "newConversation",
"arguments": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
}
```
we can send something like this:
```json
{
"jsonrpc": "2.0",
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
```
Admittedly, this new format is not a valid MCP tool call, but we are OK
with that right now. (That is, not everything we might want to request
of `codex mcp` is something that is appropriate for an autonomous agent
to do.)
To start, this introduces four request types:
- `newConversation`
- `sendUserMessage`
- `addConversationListener`
- `removeConversationListener`
The new `mcp-server/tests/codex_message_processor_flow.rs` shows how
these can be used.
The types are defined on the `CodexRequest` enum, so we introduce a new
`CodexMessageProcessor` that is responsible for dealing with requests
from this enum. The top-level `MessageProcessor` has been updated so
that when `process_request()` is called, it first checks whether the
request conforms to `CodexRequest` and dispatches it to
`CodexMessageProcessor` if so.
Note that I also decided to use `camelCase` for the on-the-wire format,
as that seems to be the convention for MCP.
For the moment, the new protocol is defined in `wire_format.rs` within
the `mcp-server` crate, but in a subsequent PR, I will probably move it
to its own crate to ensure the protocol has minimal dependencies and
that we can codegen a schema from it.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/2264).
* #2278
* __->__ #2264
2025-08-13 17:36:29 -07:00
|
|
|
data: None,
|
|
|
|
|
};
|
|
|
|
|
self.outgoing.send_error(request_id, error).await;
|
|
|
|
|
return;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let subscription_id = Uuid::new_v4();
|
|
|
|
|
let (cancel_tx, mut cancel_rx) = oneshot::channel();
|
|
|
|
|
self.conversation_listeners
|
|
|
|
|
.insert(subscription_id, cancel_tx);
|
|
|
|
|
let outgoing_for_task = self.outgoing.clone();
|
2025-08-17 21:40:31 -07:00
|
|
|
let pending_interrupts = self.pending_interrupts.clone();
|
feat: support traditional JSON-RPC request/response in MCP server (#2264)
This introduces a new set of request types that our `codex mcp`
supports. Note that these do not conform to MCP tool calls so that
instead of having to send something like this:
```json
{
"jsonrpc": "2.0",
"method": "tools/call",
"id": 42,
"params": {
"name": "newConversation",
"arguments": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
}
```
we can send something like this:
```json
{
"jsonrpc": "2.0",
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
```
Admittedly, this new format is not a valid MCP tool call, but we are OK
with that right now. (That is, not everything we might want to request
of `codex mcp` is something that is appropriate for an autonomous agent
to do.)
To start, this introduces four request types:
- `newConversation`
- `sendUserMessage`
- `addConversationListener`
- `removeConversationListener`
The new `mcp-server/tests/codex_message_processor_flow.rs` shows how
these can be used.
The types are defined on the `CodexRequest` enum, so we introduce a new
`CodexMessageProcessor` that is responsible for dealing with requests
from this enum. The top-level `MessageProcessor` has been updated so
that when `process_request()` is called, it first checks whether the
request conforms to `CodexRequest` and dispatches it to
`CodexMessageProcessor` if so.
Note that I also decided to use `camelCase` for the on-the-wire format,
as that seems to be the convention for MCP.
For the moment, the new protocol is defined in `wire_format.rs` within
the `mcp-server` crate, but in a subsequent PR, I will probably move it
to its own crate to ensure the protocol has minimal dependencies and
that we can codegen a schema from it.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/2264).
* #2278
* __->__ #2264
2025-08-13 17:36:29 -07:00
|
|
|
tokio::spawn(async move {
|
|
|
|
|
loop {
|
|
|
|
|
tokio::select! {
|
|
|
|
|
_ = &mut cancel_rx => {
|
|
|
|
|
// User has unsubscribed, so exit this task.
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
event = conversation.next_event() => {
|
|
|
|
|
let event = match event {
|
|
|
|
|
Ok(event) => event,
|
|
|
|
|
Err(err) => {
|
|
|
|
|
tracing::warn!("conversation.next_event() failed with: {err}");
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
2025-10-24 15:41:52 -07:00
|
|
|
if let EventMsg::RawResponseItem(_) = &event.msg
|
|
|
|
|
&& !experimental_raw_events {
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2025-08-13 23:00:50 -07:00
|
|
|
// For now, we send a notification for every event,
|
2025-09-04 17:49:50 -07:00
|
|
|
// JSON-serializing the `Event` as-is, but these should
|
|
|
|
|
// be migrated to be variants of `ServerNotification`
|
|
|
|
|
// instead.
|
2025-08-13 17:54:12 -07:00
|
|
|
let method = format!("codex/event/{}", event.msg);
|
2025-08-13 23:00:50 -07:00
|
|
|
let mut params = match serde_json::to_value(event.clone()) {
|
2025-08-13 17:54:12 -07:00
|
|
|
Ok(serde_json::Value::Object(map)) => map,
|
|
|
|
|
Ok(_) => {
|
2025-09-08 14:54:47 -07:00
|
|
|
error!("event did not serialize to an object");
|
2025-08-13 17:54:12 -07:00
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
Err(err) => {
|
2025-09-08 14:54:47 -07:00
|
|
|
error!("failed to serialize event: {err}");
|
2025-08-13 17:54:12 -07:00
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
params.insert("conversationId".to_string(), conversation_id.to_string().into());
|
|
|
|
|
|
|
|
|
|
outgoing_for_task.send_notification(OutgoingNotification {
|
|
|
|
|
method,
|
|
|
|
|
params: Some(params.into()),
|
|
|
|
|
})
|
feat: support traditional JSON-RPC request/response in MCP server (#2264)
This introduces a new set of request types that our `codex mcp`
supports. Note that these do not conform to MCP tool calls so that
instead of having to send something like this:
```json
{
"jsonrpc": "2.0",
"method": "tools/call",
"id": 42,
"params": {
"name": "newConversation",
"arguments": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
}
```
we can send something like this:
```json
{
"jsonrpc": "2.0",
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
```
Admittedly, this new format is not a valid MCP tool call, but we are OK
with that right now. (That is, not everything we might want to request
of `codex mcp` is something that is appropriate for an autonomous agent
to do.)
To start, this introduces four request types:
- `newConversation`
- `sendUserMessage`
- `addConversationListener`
- `removeConversationListener`
The new `mcp-server/tests/codex_message_processor_flow.rs` shows how
these can be used.
The types are defined on the `CodexRequest` enum, so we introduce a new
`CodexMessageProcessor` that is responsible for dealing with requests
from this enum. The top-level `MessageProcessor` has been updated so
that when `process_request()` is called, it first checks whether the
request conforms to `CodexRequest` and dispatches it to
`CodexMessageProcessor` if so.
Note that I also decided to use `camelCase` for the on-the-wire format,
as that seems to be the convention for MCP.
For the moment, the new protocol is defined in `wire_format.rs` within
the `mcp-server` crate, but in a subsequent PR, I will probably move it
to its own crate to ensure the protocol has minimal dependencies and
that we can codegen a schema from it.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/2264).
* #2278
* __->__ #2264
2025-08-13 17:36:29 -07:00
|
|
|
.await;
|
2025-08-13 23:00:50 -07:00
|
|
|
|
2025-08-17 21:40:31 -07:00
|
|
|
apply_bespoke_event_handling(event.clone(), conversation_id, conversation.clone(), outgoing_for_task.clone(), pending_interrupts.clone()).await;
|
feat: support traditional JSON-RPC request/response in MCP server (#2264)
This introduces a new set of request types that our `codex mcp`
supports. Note that these do not conform to MCP tool calls so that
instead of having to send something like this:
```json
{
"jsonrpc": "2.0",
"method": "tools/call",
"id": 42,
"params": {
"name": "newConversation",
"arguments": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
}
```
we can send something like this:
```json
{
"jsonrpc": "2.0",
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
```
Admittedly, this new format is not a valid MCP tool call, but we are OK
with that right now. (That is, not everything we might want to request
of `codex mcp` is something that is appropriate for an autonomous agent
to do.)
To start, this introduces four request types:
- `newConversation`
- `sendUserMessage`
- `addConversationListener`
- `removeConversationListener`
The new `mcp-server/tests/codex_message_processor_flow.rs` shows how
these can be used.
The types are defined on the `CodexRequest` enum, so we introduce a new
`CodexMessageProcessor` that is responsible for dealing with requests
from this enum. The top-level `MessageProcessor` has been updated so
that when `process_request()` is called, it first checks whether the
request conforms to `CodexRequest` and dispatches it to
`CodexMessageProcessor` if so.
Note that I also decided to use `camelCase` for the on-the-wire format,
as that seems to be the convention for MCP.
For the moment, the new protocol is defined in `wire_format.rs` within
the `mcp-server` crate, but in a subsequent PR, I will probably move it
to its own crate to ensure the protocol has minimal dependencies and
that we can codegen a schema from it.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/2264).
* #2278
* __->__ #2264
2025-08-13 17:36:29 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
});
|
|
|
|
|
let response = AddConversationSubscriptionResponse { subscription_id };
|
|
|
|
|
self.outgoing.send_response(request_id, response).await;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
async fn remove_conversation_listener(
|
|
|
|
|
&mut self,
|
|
|
|
|
request_id: RequestId,
|
|
|
|
|
params: RemoveConversationListenerParams,
|
|
|
|
|
) {
|
|
|
|
|
let RemoveConversationListenerParams { subscription_id } = params;
|
|
|
|
|
match self.conversation_listeners.remove(&subscription_id) {
|
|
|
|
|
Some(sender) => {
|
|
|
|
|
// Signal the spawned task to exit and acknowledge.
|
|
|
|
|
let _ = sender.send(());
|
|
|
|
|
let response = RemoveConversationSubscriptionResponse {};
|
|
|
|
|
self.outgoing.send_response(request_id, response).await;
|
|
|
|
|
}
|
|
|
|
|
None => {
|
|
|
|
|
let error = JSONRPCErrorError {
|
|
|
|
|
code: INVALID_REQUEST_ERROR_CODE,
|
|
|
|
|
message: format!("subscription not found: {subscription_id}"),
|
|
|
|
|
data: None,
|
2025-08-19 19:50:28 -07:00
|
|
|
};
|
|
|
|
|
self.outgoing.send_error(request_id, error).await;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
async fn git_diff_to_origin(&self, request_id: RequestId, cwd: PathBuf) {
|
|
|
|
|
let diff = git_diff_to_remote(&cwd).await;
|
|
|
|
|
match diff {
|
|
|
|
|
Some(value) => {
|
|
|
|
|
let response = GitDiffToRemoteResponse {
|
|
|
|
|
sha: value.sha,
|
|
|
|
|
diff: value.diff,
|
|
|
|
|
};
|
|
|
|
|
self.outgoing.send_response(request_id, response).await;
|
|
|
|
|
}
|
|
|
|
|
None => {
|
|
|
|
|
let error = JSONRPCErrorError {
|
|
|
|
|
code: INVALID_REQUEST_ERROR_CODE,
|
|
|
|
|
message: format!("failed to compute git diff to remote for cwd: {cwd:?}"),
|
|
|
|
|
data: None,
|
feat: support traditional JSON-RPC request/response in MCP server (#2264)
This introduces a new set of request types that our `codex mcp`
supports. Note that these do not conform to MCP tool calls so that
instead of having to send something like this:
```json
{
"jsonrpc": "2.0",
"method": "tools/call",
"id": 42,
"params": {
"name": "newConversation",
"arguments": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
}
```
we can send something like this:
```json
{
"jsonrpc": "2.0",
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
```
Admittedly, this new format is not a valid MCP tool call, but we are OK
with that right now. (That is, not everything we might want to request
of `codex mcp` is something that is appropriate for an autonomous agent
to do.)
To start, this introduces four request types:
- `newConversation`
- `sendUserMessage`
- `addConversationListener`
- `removeConversationListener`
The new `mcp-server/tests/codex_message_processor_flow.rs` shows how
these can be used.
The types are defined on the `CodexRequest` enum, so we introduce a new
`CodexMessageProcessor` that is responsible for dealing with requests
from this enum. The top-level `MessageProcessor` has been updated so
that when `process_request()` is called, it first checks whether the
request conforms to `CodexRequest` and dispatches it to
`CodexMessageProcessor` if so.
Note that I also decided to use `camelCase` for the on-the-wire format,
as that seems to be the convention for MCP.
For the moment, the new protocol is defined in `wire_format.rs` within
the `mcp-server` crate, but in a subsequent PR, I will probably move it
to its own crate to ensure the protocol has minimal dependencies and
that we can codegen a schema from it.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/2264).
* #2278
* __->__ #2264
2025-08-13 17:36:29 -07:00
|
|
|
};
|
|
|
|
|
self.outgoing.send_error(request_id, error).await;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2025-09-29 12:19:09 -07:00
|
|
|
|
|
|
|
|
async fn fuzzy_file_search(&mut self, request_id: RequestId, params: FuzzyFileSearchParams) {
|
|
|
|
|
let FuzzyFileSearchParams {
|
|
|
|
|
query,
|
|
|
|
|
roots,
|
|
|
|
|
cancellation_token,
|
|
|
|
|
} = params;
|
|
|
|
|
|
|
|
|
|
let cancel_flag = match cancellation_token.clone() {
|
|
|
|
|
Some(token) => {
|
|
|
|
|
let mut pending_fuzzy_searches = self.pending_fuzzy_searches.lock().await;
|
|
|
|
|
// if a cancellation_token is provided and a pending_request exists for
|
|
|
|
|
// that token, cancel it
|
|
|
|
|
if let Some(existing) = pending_fuzzy_searches.get(&token) {
|
|
|
|
|
existing.store(true, Ordering::Relaxed);
|
|
|
|
|
}
|
|
|
|
|
let flag = Arc::new(AtomicBool::new(false));
|
|
|
|
|
pending_fuzzy_searches.insert(token.clone(), flag.clone());
|
|
|
|
|
flag
|
|
|
|
|
}
|
|
|
|
|
None => Arc::new(AtomicBool::new(false)),
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let results = match query.as_str() {
|
|
|
|
|
"" => vec![],
|
|
|
|
|
_ => run_fuzzy_file_search(query, roots, cancel_flag.clone()).await,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
if let Some(token) = cancellation_token {
|
|
|
|
|
let mut pending_fuzzy_searches = self.pending_fuzzy_searches.lock().await;
|
|
|
|
|
if let Some(current_flag) = pending_fuzzy_searches.get(&token)
|
|
|
|
|
&& Arc::ptr_eq(current_flag, &cancel_flag)
|
|
|
|
|
{
|
|
|
|
|
pending_fuzzy_searches.remove(&token);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let response = FuzzyFileSearchResponse { files: results };
|
|
|
|
|
self.outgoing.send_response(request_id, response).await;
|
|
|
|
|
}
|
2025-10-26 22:53:39 -07:00
|
|
|
|
|
|
|
|
async fn upload_feedback(&self, request_id: RequestId, params: UploadFeedbackParams) {
|
|
|
|
|
let UploadFeedbackParams {
|
|
|
|
|
classification,
|
|
|
|
|
reason,
|
|
|
|
|
conversation_id,
|
|
|
|
|
include_logs,
|
|
|
|
|
} = params;
|
|
|
|
|
|
|
|
|
|
let snapshot = self.feedback.snapshot(conversation_id);
|
|
|
|
|
let thread_id = snapshot.thread_id.clone();
|
|
|
|
|
|
|
|
|
|
let validated_rollout_path = if include_logs {
|
|
|
|
|
match conversation_id {
|
|
|
|
|
Some(conv_id) => self.resolve_rollout_path(conv_id).await,
|
|
|
|
|
None => None,
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
None
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let upload_result = tokio::task::spawn_blocking(move || {
|
|
|
|
|
let rollout_path_ref = validated_rollout_path.as_deref();
|
|
|
|
|
snapshot.upload_feedback(
|
|
|
|
|
&classification,
|
|
|
|
|
reason.as_deref(),
|
|
|
|
|
include_logs,
|
|
|
|
|
rollout_path_ref,
|
|
|
|
|
)
|
|
|
|
|
})
|
|
|
|
|
.await;
|
|
|
|
|
|
|
|
|
|
let upload_result = match upload_result {
|
|
|
|
|
Ok(result) => result,
|
|
|
|
|
Err(join_err) => {
|
|
|
|
|
let error = JSONRPCErrorError {
|
|
|
|
|
code: INTERNAL_ERROR_CODE,
|
|
|
|
|
message: format!("failed to upload feedback: {join_err}"),
|
|
|
|
|
data: None,
|
|
|
|
|
};
|
|
|
|
|
self.outgoing.send_error(request_id, error).await;
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
match upload_result {
|
|
|
|
|
Ok(()) => {
|
|
|
|
|
let response = UploadFeedbackResponse { thread_id };
|
|
|
|
|
self.outgoing.send_response(request_id, response).await;
|
|
|
|
|
}
|
|
|
|
|
Err(err) => {
|
|
|
|
|
let error = JSONRPCErrorError {
|
|
|
|
|
code: INTERNAL_ERROR_CODE,
|
|
|
|
|
message: format!("failed to upload feedback: {err}"),
|
|
|
|
|
data: None,
|
|
|
|
|
};
|
|
|
|
|
self.outgoing.send_error(request_id, error).await;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
async fn resolve_rollout_path(&self, conversation_id: ConversationId) -> Option<PathBuf> {
|
|
|
|
|
match self
|
|
|
|
|
.conversation_manager
|
|
|
|
|
.get_conversation(conversation_id)
|
|
|
|
|
.await
|
|
|
|
|
{
|
|
|
|
|
Ok(conv) => Some(conv.rollout_path()),
|
|
|
|
|
Err(_) => None,
|
|
|
|
|
}
|
|
|
|
|
}
|
feat: support traditional JSON-RPC request/response in MCP server (#2264)
This introduces a new set of request types that our `codex mcp`
supports. Note that these do not conform to MCP tool calls so that
instead of having to send something like this:
```json
{
"jsonrpc": "2.0",
"method": "tools/call",
"id": 42,
"params": {
"name": "newConversation",
"arguments": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
}
```
we can send something like this:
```json
{
"jsonrpc": "2.0",
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
```
Admittedly, this new format is not a valid MCP tool call, but we are OK
with that right now. (That is, not everything we might want to request
of `codex mcp` is something that is appropriate for an autonomous agent
to do.)
To start, this introduces four request types:
- `newConversation`
- `sendUserMessage`
- `addConversationListener`
- `removeConversationListener`
The new `mcp-server/tests/codex_message_processor_flow.rs` shows how
these can be used.
The types are defined on the `CodexRequest` enum, so we introduce a new
`CodexMessageProcessor` that is responsible for dealing with requests
from this enum. The top-level `MessageProcessor` has been updated so
that when `process_request()` is called, it first checks whether the
request conforms to `CodexRequest` and dispatches it to
`CodexMessageProcessor` if so.
Note that I also decided to use `camelCase` for the on-the-wire format,
as that seems to be the convention for MCP.
For the moment, the new protocol is defined in `wire_format.rs` within
the `mcp-server` crate, but in a subsequent PR, I will probably move it
to its own crate to ensure the protocol has minimal dependencies and
that we can codegen a schema from it.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/2264).
* #2278
* __->__ #2264
2025-08-13 17:36:29 -07:00
|
|
|
}
|
|
|
|
|
|
2025-08-13 23:00:50 -07:00
|
|
|
async fn apply_bespoke_event_handling(
|
|
|
|
|
event: Event,
|
|
|
|
|
conversation_id: ConversationId,
|
|
|
|
|
conversation: Arc<CodexConversation>,
|
|
|
|
|
outgoing: Arc<OutgoingMessageSender>,
|
2025-09-07 20:22:25 -07:00
|
|
|
pending_interrupts: Arc<Mutex<HashMap<ConversationId, Vec<RequestId>>>>,
|
2025-08-13 23:00:50 -07:00
|
|
|
) {
|
|
|
|
|
let Event { id: event_id, msg } = event;
|
|
|
|
|
match msg {
|
|
|
|
|
EventMsg::ApplyPatchApprovalRequest(ApplyPatchApprovalRequestEvent {
|
2025-08-14 16:09:12 -07:00
|
|
|
call_id,
|
2025-08-13 23:00:50 -07:00
|
|
|
changes,
|
|
|
|
|
reason,
|
|
|
|
|
grant_root,
|
|
|
|
|
}) => {
|
|
|
|
|
let params = ApplyPatchApprovalParams {
|
|
|
|
|
conversation_id,
|
2025-08-14 16:09:12 -07:00
|
|
|
call_id,
|
2025-08-13 23:00:50 -07:00
|
|
|
file_changes: changes,
|
|
|
|
|
reason,
|
|
|
|
|
grant_root,
|
|
|
|
|
};
|
|
|
|
|
let rx = outgoing
|
2025-09-30 18:06:05 -07:00
|
|
|
.send_request(ServerRequestPayload::ApplyPatchApproval(params))
|
2025-08-13 23:00:50 -07:00
|
|
|
.await;
|
|
|
|
|
// TODO(mbolin): Enforce a timeout so this task does not live indefinitely?
|
|
|
|
|
tokio::spawn(async move {
|
|
|
|
|
on_patch_approval_response(event_id, rx, conversation).await;
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
EventMsg::ExecApprovalRequest(ExecApprovalRequestEvent {
|
2025-08-14 16:09:12 -07:00
|
|
|
call_id,
|
2025-08-13 23:00:50 -07:00
|
|
|
command,
|
|
|
|
|
cwd,
|
|
|
|
|
reason,
|
2025-10-24 17:23:44 -05:00
|
|
|
risk,
|
2025-10-15 13:58:40 -07:00
|
|
|
parsed_cmd,
|
2025-08-13 23:00:50 -07:00
|
|
|
}) => {
|
|
|
|
|
let params = ExecCommandApprovalParams {
|
|
|
|
|
conversation_id,
|
2025-08-14 16:09:12 -07:00
|
|
|
call_id,
|
2025-08-13 23:00:50 -07:00
|
|
|
command,
|
|
|
|
|
cwd,
|
|
|
|
|
reason,
|
2025-10-24 17:23:44 -05:00
|
|
|
risk,
|
2025-10-15 13:58:40 -07:00
|
|
|
parsed_cmd,
|
2025-08-13 23:00:50 -07:00
|
|
|
};
|
|
|
|
|
let rx = outgoing
|
2025-09-30 18:06:05 -07:00
|
|
|
.send_request(ServerRequestPayload::ExecCommandApproval(params))
|
2025-08-13 23:00:50 -07:00
|
|
|
.await;
|
|
|
|
|
|
|
|
|
|
// TODO(mbolin): Enforce a timeout so this task does not live indefinitely?
|
|
|
|
|
tokio::spawn(async move {
|
|
|
|
|
on_exec_approval_response(event_id, rx, conversation).await;
|
|
|
|
|
});
|
|
|
|
|
}
|
2025-10-22 13:12:40 -07:00
|
|
|
EventMsg::TokenCount(token_count_event) => {
|
|
|
|
|
if let Some(rate_limits) = token_count_event.rate_limits {
|
|
|
|
|
outgoing
|
|
|
|
|
.send_server_notification(ServerNotification::AccountRateLimitsUpdated(
|
|
|
|
|
rate_limits,
|
|
|
|
|
))
|
|
|
|
|
.await;
|
|
|
|
|
}
|
|
|
|
|
}
|
2025-08-17 21:40:31 -07:00
|
|
|
// If this is a TurnAborted, reply to any pending interrupt requests.
|
|
|
|
|
EventMsg::TurnAborted(turn_aborted_event) => {
|
|
|
|
|
let pending = {
|
|
|
|
|
let mut map = pending_interrupts.lock().await;
|
2025-09-07 20:22:25 -07:00
|
|
|
map.remove(&conversation_id).unwrap_or_default()
|
2025-08-17 21:40:31 -07:00
|
|
|
};
|
|
|
|
|
if !pending.is_empty() {
|
|
|
|
|
let response = InterruptConversationResponse {
|
|
|
|
|
abort_reason: turn_aborted_event.reason,
|
|
|
|
|
};
|
|
|
|
|
for rid in pending {
|
|
|
|
|
outgoing.send_response(rid, response.clone()).await;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2025-08-13 23:00:50 -07:00
|
|
|
_ => {}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2025-10-03 13:02:26 -07:00
|
|
|
async fn derive_config_from_params(
|
feat: support traditional JSON-RPC request/response in MCP server (#2264)
This introduces a new set of request types that our `codex mcp`
supports. Note that these do not conform to MCP tool calls so that
instead of having to send something like this:
```json
{
"jsonrpc": "2.0",
"method": "tools/call",
"id": 42,
"params": {
"name": "newConversation",
"arguments": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
}
```
we can send something like this:
```json
{
"jsonrpc": "2.0",
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
```
Admittedly, this new format is not a valid MCP tool call, but we are OK
with that right now. (That is, not everything we might want to request
of `codex mcp` is something that is appropriate for an autonomous agent
to do.)
To start, this introduces four request types:
- `newConversation`
- `sendUserMessage`
- `addConversationListener`
- `removeConversationListener`
The new `mcp-server/tests/codex_message_processor_flow.rs` shows how
these can be used.
The types are defined on the `CodexRequest` enum, so we introduce a new
`CodexMessageProcessor` that is responsible for dealing with requests
from this enum. The top-level `MessageProcessor` has been updated so
that when `process_request()` is called, it first checks whether the
request conforms to `CodexRequest` and dispatches it to
`CodexMessageProcessor` if so.
Note that I also decided to use `camelCase` for the on-the-wire format,
as that seems to be the convention for MCP.
For the moment, the new protocol is defined in `wire_format.rs` within
the `mcp-server` crate, but in a subsequent PR, I will probably move it
to its own crate to ensure the protocol has minimal dependencies and
that we can codegen a schema from it.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/2264).
* #2278
* __->__ #2264
2025-08-13 17:36:29 -07:00
|
|
|
params: NewConversationParams,
|
|
|
|
|
codex_linux_sandbox_exe: Option<PathBuf>,
|
|
|
|
|
) -> std::io::Result<Config> {
|
|
|
|
|
let NewConversationParams {
|
|
|
|
|
model,
|
2025-10-27 02:33:30 -07:00
|
|
|
model_provider,
|
feat: support traditional JSON-RPC request/response in MCP server (#2264)
This introduces a new set of request types that our `codex mcp`
supports. Note that these do not conform to MCP tool calls so that
instead of having to send something like this:
```json
{
"jsonrpc": "2.0",
"method": "tools/call",
"id": 42,
"params": {
"name": "newConversation",
"arguments": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
}
```
we can send something like this:
```json
{
"jsonrpc": "2.0",
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
```
Admittedly, this new format is not a valid MCP tool call, but we are OK
with that right now. (That is, not everything we might want to request
of `codex mcp` is something that is appropriate for an autonomous agent
to do.)
To start, this introduces four request types:
- `newConversation`
- `sendUserMessage`
- `addConversationListener`
- `removeConversationListener`
The new `mcp-server/tests/codex_message_processor_flow.rs` shows how
these can be used.
The types are defined on the `CodexRequest` enum, so we introduce a new
`CodexMessageProcessor` that is responsible for dealing with requests
from this enum. The top-level `MessageProcessor` has been updated so
that when `process_request()` is called, it first checks whether the
request conforms to `CodexRequest` and dispatches it to
`CodexMessageProcessor` if so.
Note that I also decided to use `camelCase` for the on-the-wire format,
as that seems to be the convention for MCP.
For the moment, the new protocol is defined in `wire_format.rs` within
the `mcp-server` crate, but in a subsequent PR, I will probably move it
to its own crate to ensure the protocol has minimal dependencies and
that we can codegen a schema from it.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/2264).
* #2278
* __->__ #2264
2025-08-13 17:36:29 -07:00
|
|
|
profile,
|
|
|
|
|
cwd,
|
|
|
|
|
approval_policy,
|
2025-08-18 09:36:57 -07:00
|
|
|
sandbox: sandbox_mode,
|
feat: support traditional JSON-RPC request/response in MCP server (#2264)
This introduces a new set of request types that our `codex mcp`
supports. Note that these do not conform to MCP tool calls so that
instead of having to send something like this:
```json
{
"jsonrpc": "2.0",
"method": "tools/call",
"id": 42,
"params": {
"name": "newConversation",
"arguments": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
}
```
we can send something like this:
```json
{
"jsonrpc": "2.0",
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
```
Admittedly, this new format is not a valid MCP tool call, but we are OK
with that right now. (That is, not everything we might want to request
of `codex mcp` is something that is appropriate for an autonomous agent
to do.)
To start, this introduces four request types:
- `newConversation`
- `sendUserMessage`
- `addConversationListener`
- `removeConversationListener`
The new `mcp-server/tests/codex_message_processor_flow.rs` shows how
these can be used.
The types are defined on the `CodexRequest` enum, so we introduce a new
`CodexMessageProcessor` that is responsible for dealing with requests
from this enum. The top-level `MessageProcessor` has been updated so
that when `process_request()` is called, it first checks whether the
request conforms to `CodexRequest` and dispatches it to
`CodexMessageProcessor` if so.
Note that I also decided to use `camelCase` for the on-the-wire format,
as that seems to be the convention for MCP.
For the moment, the new protocol is defined in `wire_format.rs` within
the `mcp-server` crate, but in a subsequent PR, I will probably move it
to its own crate to ensure the protocol has minimal dependencies and
that we can codegen a schema from it.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/2264).
* #2278
* __->__ #2264
2025-08-13 17:36:29 -07:00
|
|
|
config: cli_overrides,
|
|
|
|
|
base_instructions,
|
2025-08-15 11:55:53 -04:00
|
|
|
include_apply_patch_tool,
|
feat: support traditional JSON-RPC request/response in MCP server (#2264)
This introduces a new set of request types that our `codex mcp`
supports. Note that these do not conform to MCP tool calls so that
instead of having to send something like this:
```json
{
"jsonrpc": "2.0",
"method": "tools/call",
"id": 42,
"params": {
"name": "newConversation",
"arguments": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
}
```
we can send something like this:
```json
{
"jsonrpc": "2.0",
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
```
Admittedly, this new format is not a valid MCP tool call, but we are OK
with that right now. (That is, not everything we might want to request
of `codex mcp` is something that is appropriate for an autonomous agent
to do.)
To start, this introduces four request types:
- `newConversation`
- `sendUserMessage`
- `addConversationListener`
- `removeConversationListener`
The new `mcp-server/tests/codex_message_processor_flow.rs` shows how
these can be used.
The types are defined on the `CodexRequest` enum, so we introduce a new
`CodexMessageProcessor` that is responsible for dealing with requests
from this enum. The top-level `MessageProcessor` has been updated so
that when `process_request()` is called, it first checks whether the
request conforms to `CodexRequest` and dispatches it to
`CodexMessageProcessor` if so.
Note that I also decided to use `camelCase` for the on-the-wire format,
as that seems to be the convention for MCP.
For the moment, the new protocol is defined in `wire_format.rs` within
the `mcp-server` crate, but in a subsequent PR, I will probably move it
to its own crate to ensure the protocol has minimal dependencies and
that we can codegen a schema from it.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/2264).
* #2278
* __->__ #2264
2025-08-13 17:36:29 -07:00
|
|
|
} = params;
|
|
|
|
|
let overrides = ConfigOverrides {
|
|
|
|
|
model,
|
Review Mode (Core) (#3401)
## 📝 Review Mode -- Core
This PR introduces the Core implementation for Review mode:
- New op `Op::Review { prompt: String }:` spawns a child review task
with isolated context, a review‑specific system prompt, and a
`Config.review_model`.
- `EnteredReviewMode`: emitted when the child review session starts.
Every event from this point onwards reflects the review session.
- `ExitedReviewMode(Option<ReviewOutputEvent>)`: emitted when the review
finishes or is interrupted, with optional structured findings:
```json
{
"findings": [
{
"title": "<≤ 80 chars, imperative>",
"body": "<valid Markdown explaining *why* this is a problem; cite files/lines/functions>",
"confidence_score": <float 0.0-1.0>,
"priority": <int 0-3>,
"code_location": {
"absolute_file_path": "<file path>",
"line_range": {"start": <int>, "end": <int>}
}
}
],
"overall_correctness": "patch is correct" | "patch is incorrect",
"overall_explanation": "<1-3 sentence explanation justifying the overall_correctness verdict>",
"overall_confidence_score": <float 0.0-1.0>
}
```
## Questions
### Why separate out its own message history?
We want the review thread to match the training of our review models as
much as possible -- that means using a custom prompt, removing user
instructions, and starting a clean chat history.
We also want to make sure the review thread doesn't leak into the parent
thread.
### Why do this as a mode, vs. sub-agents?
1. We want review to be a synchronous task, so it's fine for now to do a
bespoke implementation.
2. We're still unclear about the final structure for sub-agents. We'd
prefer to land this quickly and then refactor into sub-agents without
rushing that implementation.
2025-09-12 16:25:10 -07:00
|
|
|
review_model: None,
|
feat: support traditional JSON-RPC request/response in MCP server (#2264)
This introduces a new set of request types that our `codex mcp`
supports. Note that these do not conform to MCP tool calls so that
instead of having to send something like this:
```json
{
"jsonrpc": "2.0",
"method": "tools/call",
"id": 42,
"params": {
"name": "newConversation",
"arguments": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
}
```
we can send something like this:
```json
{
"jsonrpc": "2.0",
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
```
Admittedly, this new format is not a valid MCP tool call, but we are OK
with that right now. (That is, not everything we might want to request
of `codex mcp` is something that is appropriate for an autonomous agent
to do.)
To start, this introduces four request types:
- `newConversation`
- `sendUserMessage`
- `addConversationListener`
- `removeConversationListener`
The new `mcp-server/tests/codex_message_processor_flow.rs` shows how
these can be used.
The types are defined on the `CodexRequest` enum, so we introduce a new
`CodexMessageProcessor` that is responsible for dealing with requests
from this enum. The top-level `MessageProcessor` has been updated so
that when `process_request()` is called, it first checks whether the
request conforms to `CodexRequest` and dispatches it to
`CodexMessageProcessor` if so.
Note that I also decided to use `camelCase` for the on-the-wire format,
as that seems to be the convention for MCP.
For the moment, the new protocol is defined in `wire_format.rs` within
the `mcp-server` crate, but in a subsequent PR, I will probably move it
to its own crate to ensure the protocol has minimal dependencies and
that we can codegen a schema from it.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/2264).
* #2278
* __->__ #2264
2025-08-13 17:36:29 -07:00
|
|
|
config_profile: profile,
|
|
|
|
|
cwd: cwd.map(PathBuf::from),
|
2025-08-18 09:36:57 -07:00
|
|
|
approval_policy,
|
|
|
|
|
sandbox_mode,
|
2025-10-27 02:33:30 -07:00
|
|
|
model_provider,
|
feat: support traditional JSON-RPC request/response in MCP server (#2264)
This introduces a new set of request types that our `codex mcp`
supports. Note that these do not conform to MCP tool calls so that
instead of having to send something like this:
```json
{
"jsonrpc": "2.0",
"method": "tools/call",
"id": 42,
"params": {
"name": "newConversation",
"arguments": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
}
```
we can send something like this:
```json
{
"jsonrpc": "2.0",
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
```
Admittedly, this new format is not a valid MCP tool call, but we are OK
with that right now. (That is, not everything we might want to request
of `codex mcp` is something that is appropriate for an autonomous agent
to do.)
To start, this introduces four request types:
- `newConversation`
- `sendUserMessage`
- `addConversationListener`
- `removeConversationListener`
The new `mcp-server/tests/codex_message_processor_flow.rs` shows how
these can be used.
The types are defined on the `CodexRequest` enum, so we introduce a new
`CodexMessageProcessor` that is responsible for dealing with requests
from this enum. The top-level `MessageProcessor` has been updated so
that when `process_request()` is called, it first checks whether the
request conforms to `CodexRequest` and dispatches it to
`CodexMessageProcessor` if so.
Note that I also decided to use `camelCase` for the on-the-wire format,
as that seems to be the convention for MCP.
For the moment, the new protocol is defined in `wire_format.rs` within
the `mcp-server` crate, but in a subsequent PR, I will probably move it
to its own crate to ensure the protocol has minimal dependencies and
that we can codegen a schema from it.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/2264).
* #2278
* __->__ #2264
2025-08-13 17:36:29 -07:00
|
|
|
codex_linux_sandbox_exe,
|
|
|
|
|
base_instructions,
|
2025-08-15 11:55:53 -04:00
|
|
|
include_apply_patch_tool,
|
2025-08-27 17:41:23 -07:00
|
|
|
include_view_image_tool: None,
|
feat: support traditional JSON-RPC request/response in MCP server (#2264)
This introduces a new set of request types that our `codex mcp`
supports. Note that these do not conform to MCP tool calls so that
instead of having to send something like this:
```json
{
"jsonrpc": "2.0",
"method": "tools/call",
"id": 42,
"params": {
"name": "newConversation",
"arguments": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
}
```
we can send something like this:
```json
{
"jsonrpc": "2.0",
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
```
Admittedly, this new format is not a valid MCP tool call, but we are OK
with that right now. (That is, not everything we might want to request
of `codex mcp` is something that is appropriate for an autonomous agent
to do.)
To start, this introduces four request types:
- `newConversation`
- `sendUserMessage`
- `addConversationListener`
- `removeConversationListener`
The new `mcp-server/tests/codex_message_processor_flow.rs` shows how
these can be used.
The types are defined on the `CodexRequest` enum, so we introduce a new
`CodexMessageProcessor` that is responsible for dealing with requests
from this enum. The top-level `MessageProcessor` has been updated so
that when `process_request()` is called, it first checks whether the
request conforms to `CodexRequest` and dispatches it to
`CodexMessageProcessor` if so.
Note that I also decided to use `camelCase` for the on-the-wire format,
as that seems to be the convention for MCP.
For the moment, the new protocol is defined in `wire_format.rs` within
the `mcp-server` crate, but in a subsequent PR, I will probably move it
to its own crate to ensure the protocol has minimal dependencies and
that we can codegen a schema from it.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/2264).
* #2278
* __->__ #2264
2025-08-13 17:36:29 -07:00
|
|
|
show_raw_agent_reasoning: None,
|
2025-08-23 22:58:56 -07:00
|
|
|
tools_web_search_request: None,
|
2025-10-24 17:23:44 -05:00
|
|
|
experimental_sandbox_command_assessment: None,
|
2025-10-18 22:13:53 -07:00
|
|
|
additional_writable_roots: Vec::new(),
|
feat: support traditional JSON-RPC request/response in MCP server (#2264)
This introduces a new set of request types that our `codex mcp`
supports. Note that these do not conform to MCP tool calls so that
instead of having to send something like this:
```json
{
"jsonrpc": "2.0",
"method": "tools/call",
"id": 42,
"params": {
"name": "newConversation",
"arguments": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
}
```
we can send something like this:
```json
{
"jsonrpc": "2.0",
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
```
Admittedly, this new format is not a valid MCP tool call, but we are OK
with that right now. (That is, not everything we might want to request
of `codex mcp` is something that is appropriate for an autonomous agent
to do.)
To start, this introduces four request types:
- `newConversation`
- `sendUserMessage`
- `addConversationListener`
- `removeConversationListener`
The new `mcp-server/tests/codex_message_processor_flow.rs` shows how
these can be used.
The types are defined on the `CodexRequest` enum, so we introduce a new
`CodexMessageProcessor` that is responsible for dealing with requests
from this enum. The top-level `MessageProcessor` has been updated so
that when `process_request()` is called, it first checks whether the
request conforms to `CodexRequest` and dispatches it to
`CodexMessageProcessor` if so.
Note that I also decided to use `camelCase` for the on-the-wire format,
as that seems to be the convention for MCP.
For the moment, the new protocol is defined in `wire_format.rs` within
the `mcp-server` crate, but in a subsequent PR, I will probably move it
to its own crate to ensure the protocol has minimal dependencies and
that we can codegen a schema from it.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/2264).
* #2278
* __->__ #2264
2025-08-13 17:36:29 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let cli_overrides = cli_overrides
|
|
|
|
|
.unwrap_or_default()
|
|
|
|
|
.into_iter()
|
|
|
|
|
.map(|(k, v)| (k, json_to_toml(v)))
|
|
|
|
|
.collect();
|
|
|
|
|
|
2025-10-03 13:02:26 -07:00
|
|
|
Config::load_with_cli_overrides(cli_overrides, overrides).await
|
feat: support traditional JSON-RPC request/response in MCP server (#2264)
This introduces a new set of request types that our `codex mcp`
supports. Note that these do not conform to MCP tool calls so that
instead of having to send something like this:
```json
{
"jsonrpc": "2.0",
"method": "tools/call",
"id": 42,
"params": {
"name": "newConversation",
"arguments": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
}
```
we can send something like this:
```json
{
"jsonrpc": "2.0",
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5",
"approvalPolicy": "on-request"
}
}
```
Admittedly, this new format is not a valid MCP tool call, but we are OK
with that right now. (That is, not everything we might want to request
of `codex mcp` is something that is appropriate for an autonomous agent
to do.)
To start, this introduces four request types:
- `newConversation`
- `sendUserMessage`
- `addConversationListener`
- `removeConversationListener`
The new `mcp-server/tests/codex_message_processor_flow.rs` shows how
these can be used.
The types are defined on the `CodexRequest` enum, so we introduce a new
`CodexMessageProcessor` that is responsible for dealing with requests
from this enum. The top-level `MessageProcessor` has been updated so
that when `process_request()` is called, it first checks whether the
request conforms to `CodexRequest` and dispatches it to
`CodexMessageProcessor` if so.
Note that I also decided to use `camelCase` for the on-the-wire format,
as that seems to be the convention for MCP.
For the moment, the new protocol is defined in `wire_format.rs` within
the `mcp-server` crate, but in a subsequent PR, I will probably move it
to its own crate to ensure the protocol has minimal dependencies and
that we can codegen a schema from it.
---
[//]: # (BEGIN SAPLING FOOTER)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with [ReviewStack](https://reviewstack.dev/openai/codex/pull/2264).
* #2278
* __->__ #2264
2025-08-13 17:36:29 -07:00
|
|
|
}
|
2025-08-13 23:00:50 -07:00
|
|
|
|
|
|
|
|
async fn on_patch_approval_response(
|
|
|
|
|
event_id: String,
|
fix: remove mcp-types from app server protocol (#4537)
We continue the separation between `codex app-server` and `codex
mcp-server`.
In particular, we introduce a new crate, `codex-app-server-protocol`,
and migrate `codex-rs/protocol/src/mcp_protocol.rs` into it, renaming it
`codex-rs/app-server-protocol/src/protocol.rs`.
Because `ConversationId` was defined in `mcp_protocol.rs`, we move it
into its own file, `codex-rs/protocol/src/conversation_id.rs`, and
because it is referenced in a ton of places, we have to touch a lot of
files as part of this PR.
We also decide to get away from proper JSON-RPC 2.0 semantics, so we
also introduce `codex-rs/app-server-protocol/src/jsonrpc_lite.rs`, which
is basically the same `JSONRPCMessage` type defined in `mcp-types`
except with all of the `"jsonrpc": "2.0"` removed.
Getting rid of `"jsonrpc": "2.0"` makes our serialization logic
considerably simpler, as we can lean heavier on serde to serialize
directly into the wire format that we use now.
2025-09-30 19:16:26 -07:00
|
|
|
receiver: oneshot::Receiver<JsonRpcResult>,
|
2025-08-13 23:00:50 -07:00
|
|
|
codex: Arc<CodexConversation>,
|
|
|
|
|
) {
|
|
|
|
|
let response = receiver.await;
|
|
|
|
|
let value = match response {
|
|
|
|
|
Ok(value) => value,
|
|
|
|
|
Err(err) => {
|
|
|
|
|
error!("request failed: {err:?}");
|
|
|
|
|
if let Err(submit_err) = codex
|
|
|
|
|
.submit(Op::PatchApproval {
|
|
|
|
|
id: event_id.clone(),
|
|
|
|
|
decision: ReviewDecision::Denied,
|
|
|
|
|
})
|
|
|
|
|
.await
|
|
|
|
|
{
|
|
|
|
|
error!("failed to submit denied PatchApproval after request failure: {submit_err}");
|
|
|
|
|
}
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let response =
|
|
|
|
|
serde_json::from_value::<ApplyPatchApprovalResponse>(value).unwrap_or_else(|err| {
|
|
|
|
|
error!("failed to deserialize ApplyPatchApprovalResponse: {err}");
|
|
|
|
|
ApplyPatchApprovalResponse {
|
|
|
|
|
decision: ReviewDecision::Denied,
|
|
|
|
|
}
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
if let Err(err) = codex
|
|
|
|
|
.submit(Op::PatchApproval {
|
|
|
|
|
id: event_id,
|
|
|
|
|
decision: response.decision,
|
|
|
|
|
})
|
|
|
|
|
.await
|
|
|
|
|
{
|
|
|
|
|
error!("failed to submit PatchApproval: {err}");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
async fn on_exec_approval_response(
|
|
|
|
|
event_id: String,
|
fix: remove mcp-types from app server protocol (#4537)
We continue the separation between `codex app-server` and `codex
mcp-server`.
In particular, we introduce a new crate, `codex-app-server-protocol`,
and migrate `codex-rs/protocol/src/mcp_protocol.rs` into it, renaming it
`codex-rs/app-server-protocol/src/protocol.rs`.
Because `ConversationId` was defined in `mcp_protocol.rs`, we move it
into its own file, `codex-rs/protocol/src/conversation_id.rs`, and
because it is referenced in a ton of places, we have to touch a lot of
files as part of this PR.
We also decide to get away from proper JSON-RPC 2.0 semantics, so we
also introduce `codex-rs/app-server-protocol/src/jsonrpc_lite.rs`, which
is basically the same `JSONRPCMessage` type defined in `mcp-types`
except with all of the `"jsonrpc": "2.0"` removed.
Getting rid of `"jsonrpc": "2.0"` makes our serialization logic
considerably simpler, as we can lean heavier on serde to serialize
directly into the wire format that we use now.
2025-09-30 19:16:26 -07:00
|
|
|
receiver: oneshot::Receiver<JsonRpcResult>,
|
2025-08-13 23:00:50 -07:00
|
|
|
conversation: Arc<CodexConversation>,
|
|
|
|
|
) {
|
|
|
|
|
let response = receiver.await;
|
|
|
|
|
let value = match response {
|
|
|
|
|
Ok(value) => value,
|
|
|
|
|
Err(err) => {
|
2025-09-08 14:54:47 -07:00
|
|
|
error!("request failed: {err:?}");
|
2025-08-13 23:00:50 -07:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
// Try to deserialize `value` and then make the appropriate call to `codex`.
|
|
|
|
|
let response =
|
|
|
|
|
serde_json::from_value::<ExecCommandApprovalResponse>(value).unwrap_or_else(|err| {
|
|
|
|
|
error!("failed to deserialize ExecCommandApprovalResponse: {err}");
|
|
|
|
|
// If we cannot deserialize the response, we deny the request to be
|
|
|
|
|
// conservative.
|
|
|
|
|
ExecCommandApprovalResponse {
|
|
|
|
|
decision: ReviewDecision::Denied,
|
|
|
|
|
}
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
if let Err(err) = conversation
|
|
|
|
|
.submit(Op::ExecApproval {
|
|
|
|
|
id: event_id,
|
|
|
|
|
decision: response.decision,
|
|
|
|
|
})
|
|
|
|
|
.await
|
|
|
|
|
{
|
|
|
|
|
error!("failed to submit ExecApproval: {err}");
|
|
|
|
|
}
|
|
|
|
|
}
|
2025-09-04 16:44:18 -07:00
|
|
|
|
2025-10-27 09:11:45 -07:00
|
|
|
async fn read_summary_from_rollout(
|
|
|
|
|
path: &Path,
|
|
|
|
|
fallback_provider: &str,
|
|
|
|
|
) -> std::io::Result<ConversationSummary> {
|
|
|
|
|
let head = read_head_for_summary(path).await?;
|
|
|
|
|
|
|
|
|
|
let Some(first) = head.first() else {
|
|
|
|
|
return Err(IoError::other(format!(
|
|
|
|
|
"rollout at {} is empty",
|
|
|
|
|
path.display()
|
|
|
|
|
)));
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let session_meta = serde_json::from_value::<SessionMeta>(first.clone()).map_err(|_| {
|
|
|
|
|
IoError::other(format!(
|
|
|
|
|
"rollout at {} does not start with session metadata",
|
|
|
|
|
path.display()
|
|
|
|
|
))
|
|
|
|
|
})?;
|
|
|
|
|
|
|
|
|
|
if let Some(summary) =
|
|
|
|
|
extract_conversation_summary(path.to_path_buf(), &head, fallback_provider)
|
|
|
|
|
{
|
|
|
|
|
return Ok(summary);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let timestamp = if session_meta.timestamp.is_empty() {
|
|
|
|
|
None
|
|
|
|
|
} else {
|
|
|
|
|
Some(session_meta.timestamp.clone())
|
|
|
|
|
};
|
|
|
|
|
let model_provider = session_meta
|
|
|
|
|
.model_provider
|
|
|
|
|
.unwrap_or_else(|| fallback_provider.to_string());
|
|
|
|
|
|
|
|
|
|
Ok(ConversationSummary {
|
|
|
|
|
conversation_id: session_meta.id,
|
|
|
|
|
timestamp,
|
|
|
|
|
path: path.to_path_buf(),
|
|
|
|
|
preview: String::new(),
|
|
|
|
|
model_provider,
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
|
2025-09-08 14:54:47 -07:00
|
|
|
fn extract_conversation_summary(
|
|
|
|
|
path: PathBuf,
|
|
|
|
|
head: &[serde_json::Value],
|
2025-10-27 02:03:30 -07:00
|
|
|
fallback_provider: &str,
|
2025-09-08 14:54:47 -07:00
|
|
|
) -> Option<ConversationSummary> {
|
|
|
|
|
let session_meta = match head.first() {
|
2025-09-09 16:52:33 -07:00
|
|
|
Some(first_line) => serde_json::from_value::<SessionMeta>(first_line.clone()).ok()?,
|
2025-09-08 14:54:47 -07:00
|
|
|
None => return None,
|
|
|
|
|
};
|
2025-09-04 16:44:18 -07:00
|
|
|
|
2025-09-08 14:54:47 -07:00
|
|
|
let preview = head
|
|
|
|
|
.iter()
|
|
|
|
|
.filter_map(|value| serde_json::from_value::<ResponseItem>(value.clone()).ok())
|
2025-10-22 10:14:50 -07:00
|
|
|
.find_map(|item| match codex_core::parse_turn_item(&item) {
|
|
|
|
|
Some(TurnItem::UserMessage(user)) => Some(user.message()),
|
2025-09-08 14:54:47 -07:00
|
|
|
_ => None,
|
|
|
|
|
})?;
|
|
|
|
|
|
|
|
|
|
let preview = match preview.find(USER_MESSAGE_BEGIN) {
|
|
|
|
|
Some(idx) => preview[idx + USER_MESSAGE_BEGIN.len()..].trim(),
|
|
|
|
|
None => preview.as_str(),
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let timestamp = if session_meta.timestamp.is_empty() {
|
|
|
|
|
None
|
|
|
|
|
} else {
|
|
|
|
|
Some(session_meta.timestamp.clone())
|
|
|
|
|
};
|
2025-10-27 02:03:30 -07:00
|
|
|
let conversation_id = session_meta.id;
|
|
|
|
|
let model_provider = session_meta
|
|
|
|
|
.model_provider
|
|
|
|
|
.unwrap_or_else(|| fallback_provider.to_string());
|
2025-09-08 14:54:47 -07:00
|
|
|
|
|
|
|
|
Some(ConversationSummary {
|
2025-10-27 02:03:30 -07:00
|
|
|
conversation_id,
|
2025-09-08 14:54:47 -07:00
|
|
|
timestamp,
|
|
|
|
|
path,
|
|
|
|
|
preview: preview.to_string(),
|
2025-10-27 02:03:30 -07:00
|
|
|
model_provider,
|
2025-09-08 14:54:47 -07:00
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
|
mod tests {
|
|
|
|
|
use super::*;
|
2025-09-23 13:31:36 -07:00
|
|
|
use anyhow::Result;
|
2025-09-08 14:54:47 -07:00
|
|
|
use pretty_assertions::assert_eq;
|
|
|
|
|
use serde_json::json;
|
2025-10-27 09:11:45 -07:00
|
|
|
use tempfile::TempDir;
|
2025-09-08 14:54:47 -07:00
|
|
|
|
|
|
|
|
#[test]
|
2025-09-23 13:31:36 -07:00
|
|
|
fn extract_conversation_summary_prefers_plain_user_messages() -> Result<()> {
|
|
|
|
|
let conversation_id = ConversationId::from_string("3f941c35-29b3-493b-b0a4-e25800d9aeb0")?;
|
2025-09-08 14:54:47 -07:00
|
|
|
let timestamp = Some("2025-09-05T16:53:11.850Z".to_string());
|
|
|
|
|
let path = PathBuf::from("rollout.jsonl");
|
|
|
|
|
|
|
|
|
|
let head = vec![
|
|
|
|
|
json!({
|
2025-09-18 07:37:03 -07:00
|
|
|
"id": conversation_id.to_string(),
|
2025-09-08 14:54:47 -07:00
|
|
|
"timestamp": timestamp,
|
2025-09-09 16:52:33 -07:00
|
|
|
"cwd": "/",
|
|
|
|
|
"originator": "codex",
|
|
|
|
|
"cli_version": "0.0.0",
|
2025-10-27 02:03:30 -07:00
|
|
|
"instructions": null,
|
|
|
|
|
"model_provider": "test-provider"
|
2025-09-08 14:54:47 -07:00
|
|
|
}),
|
|
|
|
|
json!({
|
|
|
|
|
"type": "message",
|
|
|
|
|
"role": "user",
|
|
|
|
|
"content": [{
|
|
|
|
|
"type": "input_text",
|
|
|
|
|
"text": "<user_instructions>\n<AGENTS.md contents>\n</user_instructions>".to_string(),
|
|
|
|
|
}],
|
|
|
|
|
}),
|
|
|
|
|
json!({
|
|
|
|
|
"type": "message",
|
|
|
|
|
"role": "user",
|
|
|
|
|
"content": [{
|
|
|
|
|
"type": "input_text",
|
|
|
|
|
"text": format!("<prior context> {USER_MESSAGE_BEGIN}Count to 5"),
|
|
|
|
|
}],
|
|
|
|
|
}),
|
|
|
|
|
];
|
|
|
|
|
|
2025-10-27 02:03:30 -07:00
|
|
|
let summary =
|
|
|
|
|
extract_conversation_summary(path.clone(), &head, "test-provider").expect("summary");
|
2025-09-08 14:54:47 -07:00
|
|
|
|
2025-10-27 09:11:45 -07:00
|
|
|
let expected = ConversationSummary {
|
|
|
|
|
conversation_id,
|
|
|
|
|
timestamp,
|
|
|
|
|
path,
|
|
|
|
|
preview: "Count to 5".to_string(),
|
|
|
|
|
model_provider: "test-provider".to_string(),
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
assert_eq!(summary, expected);
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn read_summary_from_rollout_returns_empty_preview_when_no_user_message() -> Result<()> {
|
|
|
|
|
use codex_protocol::protocol::RolloutItem;
|
|
|
|
|
use codex_protocol::protocol::RolloutLine;
|
|
|
|
|
use codex_protocol::protocol::SessionMetaLine;
|
|
|
|
|
use std::fs;
|
|
|
|
|
|
|
|
|
|
let temp_dir = TempDir::new()?;
|
|
|
|
|
let path = temp_dir.path().join("rollout.jsonl");
|
|
|
|
|
|
|
|
|
|
let conversation_id = ConversationId::from_string("bfd12a78-5900-467b-9bc5-d3d35df08191")?;
|
|
|
|
|
let timestamp = "2025-09-05T16:53:11.850Z".to_string();
|
|
|
|
|
|
|
|
|
|
let session_meta = SessionMeta {
|
|
|
|
|
id: conversation_id,
|
|
|
|
|
timestamp: timestamp.clone(),
|
|
|
|
|
model_provider: None,
|
|
|
|
|
..SessionMeta::default()
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let line = RolloutLine {
|
|
|
|
|
timestamp: timestamp.clone(),
|
|
|
|
|
item: RolloutItem::SessionMeta(SessionMetaLine {
|
|
|
|
|
meta: session_meta.clone(),
|
|
|
|
|
git: None,
|
|
|
|
|
}),
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
fs::write(&path, format!("{}\n", serde_json::to_string(&line)?))?;
|
|
|
|
|
|
|
|
|
|
let summary = read_summary_from_rollout(path.as_path(), "fallback").await?;
|
|
|
|
|
|
|
|
|
|
let expected = ConversationSummary {
|
|
|
|
|
conversation_id,
|
|
|
|
|
timestamp: Some(timestamp),
|
|
|
|
|
path: path.clone(),
|
|
|
|
|
preview: String::new(),
|
|
|
|
|
model_provider: "fallback".to_string(),
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
assert_eq!(summary, expected);
|
2025-09-23 13:31:36 -07:00
|
|
|
Ok(())
|
2025-09-04 16:44:18 -07:00
|
|
|
}
|
|
|
|
|
}
|