2025-05-06 17:38:56 -07:00
|
|
|
use codex_common::elapsed::format_elapsed;
|
2025-06-24 17:48:51 -07:00
|
|
|
use codex_common::summarize_sandbox_policy;
|
feat: make reasoning effort/summaries configurable (#1199)
Previous to this PR, we always set `reasoning` when making a request
using the Responses API:
https://github.com/openai/codex/blob/d7245cbbc9d8ff5446da45e5951761103492476d/codex-rs/core/src/client.rs#L108-L111
Though if you tried to use the Rust CLI with `--model gpt-4.1`, this
would fail with:
```shell
"Unsupported parameter: 'reasoning.effort' is not supported with this model."
```
We take a cue from the TypeScript CLI, which does a check on the model
name:
https://github.com/openai/codex/blob/d7245cbbc9d8ff5446da45e5951761103492476d/codex-cli/src/utils/agent/agent-loop.ts#L786-L789
This PR does a similar check, though also adds support for the following
config options:
```
model_reasoning_effort = "low" | "medium" | "high" | "none"
model_reasoning_summary = "auto" | "concise" | "detailed" | "none"
```
This way, if you have a model whose name happens to start with `"o"` (or
`"codex"`?), you can set these to `"none"` to explicitly disable
reasoning, if necessary. (That said, it seems unlikely anyone would use
the Responses API with non-OpenAI models, but we provide an escape
hatch, anyway.)
This PR also updates both the TUI and `codex exec` to show `reasoning
effort` and `reasoning summaries` in the header.
2025-06-02 16:01:34 -07:00
|
|
|
use codex_core::WireApi;
|
2025-05-21 22:53:02 -07:00
|
|
|
use codex_core::config::Config;
|
feat: make reasoning effort/summaries configurable (#1199)
Previous to this PR, we always set `reasoning` when making a request
using the Responses API:
https://github.com/openai/codex/blob/d7245cbbc9d8ff5446da45e5951761103492476d/codex-rs/core/src/client.rs#L108-L111
Though if you tried to use the Rust CLI with `--model gpt-4.1`, this
would fail with:
```shell
"Unsupported parameter: 'reasoning.effort' is not supported with this model."
```
We take a cue from the TypeScript CLI, which does a check on the model
name:
https://github.com/openai/codex/blob/d7245cbbc9d8ff5446da45e5951761103492476d/codex-cli/src/utils/agent/agent-loop.ts#L786-L789
This PR does a similar check, though also adds support for the following
config options:
```
model_reasoning_effort = "low" | "medium" | "high" | "none"
model_reasoning_summary = "auto" | "concise" | "detailed" | "none"
```
This way, if you have a model whose name happens to start with `"o"` (or
`"codex"`?), you can set these to `"none"` to explicitly disable
reasoning, if necessary. (That said, it seems unlikely anyone would use
the Responses API with non-OpenAI models, but we provide an escape
hatch, anyway.)
This PR also updates both the TUI and `codex exec` to show `reasoning
effort` and `reasoning summaries` in the header.
2025-06-02 16:01:34 -07:00
|
|
|
use codex_core::model_supports_reasoning_summaries;
|
2025-07-16 15:11:18 -07:00
|
|
|
use codex_core::protocol::AgentMessageDeltaEvent;
|
2025-05-13 20:44:42 -07:00
|
|
|
use codex_core::protocol::AgentMessageEvent;
|
2025-07-16 15:11:18 -07:00
|
|
|
use codex_core::protocol::AgentReasoningDeltaEvent;
|
2025-05-13 20:44:42 -07:00
|
|
|
use codex_core::protocol::BackgroundEventEvent;
|
|
|
|
|
use codex_core::protocol::ErrorEvent;
|
2025-04-29 09:59:35 -07:00
|
|
|
use codex_core::protocol::Event;
|
|
|
|
|
use codex_core::protocol::EventMsg;
|
2025-05-13 20:44:42 -07:00
|
|
|
use codex_core::protocol::ExecCommandBeginEvent;
|
|
|
|
|
use codex_core::protocol::ExecCommandEndEvent;
|
2025-04-29 09:59:35 -07:00
|
|
|
use codex_core::protocol::FileChange;
|
2025-05-13 20:44:42 -07:00
|
|
|
use codex_core::protocol::McpToolCallBeginEvent;
|
|
|
|
|
use codex_core::protocol::McpToolCallEndEvent;
|
|
|
|
|
use codex_core::protocol::PatchApplyBeginEvent;
|
|
|
|
|
use codex_core::protocol::PatchApplyEndEvent;
|
2025-05-14 13:36:43 -07:00
|
|
|
use codex_core::protocol::SessionConfiguredEvent;
|
feat: show number of tokens remaining in UI (#1388)
When using the OpenAI Responses API, we now record the `usage` field for
a `"response.completed"` event, which includes metrics about the number
of tokens consumed. We also introduce `openai_model_info.rs`, which
includes current data about the most common OpenAI models available via
the API (specifically `context_window` and `max_output_tokens`). If
Codex does not recognize the model, you can set `model_context_window`
and `model_max_output_tokens` explicitly in `config.toml`.
When then introduce a new event type to `protocol.rs`, `TokenCount`,
which includes the `TokenUsage` for the most recent turn.
Finally, we update the TUI to record the running sum of tokens used so
the percentage of available context window remaining can be reported via
the placeholder text for the composer:

We could certainly get much fancier with this (such as reporting the
estimated cost of the conversation), but for now, we are just trying to
achieve feature parity with the TypeScript CLI.
Though arguably this improves upon the TypeScript CLI, as the TypeScript
CLI uses heuristics to estimate the number of tokens used rather than
using the `usage` information directly:
https://github.com/openai/codex/blob/296996d74e345b1b05d8c3451a06ace21c5ada96/codex-cli/src/utils/approximate-tokens-used.ts#L3-L16
Fixes https://github.com/openai/codex/issues/1242
2025-06-25 23:31:11 -07:00
|
|
|
use codex_core::protocol::TokenUsage;
|
2025-04-29 09:59:35 -07:00
|
|
|
use owo_colors::OwoColorize;
|
|
|
|
|
use owo_colors::Style;
|
|
|
|
|
use shlex::try_join;
|
|
|
|
|
use std::collections::HashMap;
|
2025-07-16 22:26:31 -07:00
|
|
|
use std::io::Write;
|
2025-05-16 08:14:50 -07:00
|
|
|
use std::time::Instant;
|
2025-04-29 09:59:35 -07:00
|
|
|
|
|
|
|
|
/// This should be configurable. When used in CI, users may not want to impose
|
|
|
|
|
/// a limit so they can see the full transcript.
|
|
|
|
|
const MAX_OUTPUT_LINES_FOR_EXEC_TOOL_CALL: usize = 20;
|
|
|
|
|
|
|
|
|
|
pub(crate) struct EventProcessor {
|
|
|
|
|
call_id_to_command: HashMap<String, ExecCommandBegin>,
|
|
|
|
|
call_id_to_patch: HashMap<String, PatchApplyBegin>,
|
|
|
|
|
|
2025-05-06 16:52:43 -07:00
|
|
|
/// Tracks in-flight MCP tool calls so we can calculate duration and print
|
|
|
|
|
/// a concise summary when the corresponding `McpToolCallEnd` event is
|
|
|
|
|
/// received.
|
|
|
|
|
call_id_to_tool_call: HashMap<String, McpToolCallBegin>,
|
|
|
|
|
|
2025-04-29 09:59:35 -07:00
|
|
|
// To ensure that --color=never is respected, ANSI escapes _must_ be added
|
|
|
|
|
// using .style() with one of these fields. If you need a new style, add a
|
|
|
|
|
// new field here.
|
|
|
|
|
bold: Style,
|
2025-05-30 16:22:10 -07:00
|
|
|
italic: Style,
|
2025-04-29 09:59:35 -07:00
|
|
|
dimmed: Style,
|
|
|
|
|
|
|
|
|
|
magenta: Style,
|
|
|
|
|
red: Style,
|
|
|
|
|
green: Style,
|
2025-05-30 16:22:10 -07:00
|
|
|
cyan: Style,
|
2025-05-30 23:14:56 -07:00
|
|
|
|
|
|
|
|
/// Whether to include `AgentReasoning` events in the output.
|
|
|
|
|
show_agent_reasoning: bool,
|
2025-07-16 22:26:31 -07:00
|
|
|
answer_started: bool,
|
|
|
|
|
reasoning_started: bool,
|
2025-04-29 09:59:35 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl EventProcessor {
|
2025-07-16 22:26:31 -07:00
|
|
|
pub(crate) fn create_with_ansi(with_ansi: bool, config: &Config) -> Self {
|
2025-04-29 09:59:35 -07:00
|
|
|
let call_id_to_command = HashMap::new();
|
|
|
|
|
let call_id_to_patch = HashMap::new();
|
2025-05-06 16:52:43 -07:00
|
|
|
let call_id_to_tool_call = HashMap::new();
|
2025-04-29 09:59:35 -07:00
|
|
|
|
|
|
|
|
if with_ansi {
|
|
|
|
|
Self {
|
|
|
|
|
call_id_to_command,
|
|
|
|
|
call_id_to_patch,
|
|
|
|
|
bold: Style::new().bold(),
|
2025-05-30 16:22:10 -07:00
|
|
|
italic: Style::new().italic(),
|
2025-04-29 09:59:35 -07:00
|
|
|
dimmed: Style::new().dimmed(),
|
|
|
|
|
magenta: Style::new().magenta(),
|
|
|
|
|
red: Style::new().red(),
|
|
|
|
|
green: Style::new().green(),
|
2025-05-30 16:22:10 -07:00
|
|
|
cyan: Style::new().cyan(),
|
2025-05-06 16:52:43 -07:00
|
|
|
call_id_to_tool_call,
|
2025-07-16 22:26:31 -07:00
|
|
|
show_agent_reasoning: !config.hide_agent_reasoning,
|
|
|
|
|
answer_started: false,
|
|
|
|
|
reasoning_started: false,
|
2025-04-29 09:59:35 -07:00
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
Self {
|
|
|
|
|
call_id_to_command,
|
|
|
|
|
call_id_to_patch,
|
|
|
|
|
bold: Style::new(),
|
2025-05-30 16:22:10 -07:00
|
|
|
italic: Style::new(),
|
2025-04-29 09:59:35 -07:00
|
|
|
dimmed: Style::new(),
|
|
|
|
|
magenta: Style::new(),
|
|
|
|
|
red: Style::new(),
|
|
|
|
|
green: Style::new(),
|
2025-05-30 16:22:10 -07:00
|
|
|
cyan: Style::new(),
|
2025-05-06 16:52:43 -07:00
|
|
|
call_id_to_tool_call,
|
2025-07-16 22:26:31 -07:00
|
|
|
show_agent_reasoning: !config.hide_agent_reasoning,
|
|
|
|
|
answer_started: false,
|
|
|
|
|
reasoning_started: false,
|
2025-04-29 09:59:35 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct ExecCommandBegin {
|
|
|
|
|
command: Vec<String>,
|
2025-05-16 08:14:50 -07:00
|
|
|
start_time: Instant,
|
2025-04-29 09:59:35 -07:00
|
|
|
}
|
|
|
|
|
|
2025-05-06 16:52:43 -07:00
|
|
|
/// Metadata captured when an `McpToolCallBegin` event is received.
|
|
|
|
|
struct McpToolCallBegin {
|
|
|
|
|
/// Formatted invocation string, e.g. `server.tool({"city":"sf"})`.
|
|
|
|
|
invocation: String,
|
|
|
|
|
/// Timestamp when the call started so we can compute duration later.
|
2025-05-16 08:14:50 -07:00
|
|
|
start_time: Instant,
|
2025-05-06 16:52:43 -07:00
|
|
|
}
|
|
|
|
|
|
2025-04-29 09:59:35 -07:00
|
|
|
struct PatchApplyBegin {
|
2025-05-16 08:14:50 -07:00
|
|
|
start_time: Instant,
|
2025-04-29 09:59:35 -07:00
|
|
|
auto_approved: bool,
|
|
|
|
|
}
|
|
|
|
|
|
2025-05-30 16:27:37 -07:00
|
|
|
// Timestamped println helper. The timestamp is styled with self.dimmed.
|
2025-05-30 16:22:10 -07:00
|
|
|
#[macro_export]
|
2025-04-29 09:59:35 -07:00
|
|
|
macro_rules! ts_println {
|
2025-05-30 16:27:37 -07:00
|
|
|
($self:ident, $($arg:tt)*) => {{
|
2025-05-30 16:22:10 -07:00
|
|
|
let now = chrono::Utc::now();
|
2025-05-30 16:27:37 -07:00
|
|
|
let formatted = now.format("[%Y-%m-%dT%H:%M:%S]");
|
|
|
|
|
print!("{} ", formatted.style($self.dimmed));
|
2025-04-29 09:59:35 -07:00
|
|
|
println!($($arg)*);
|
|
|
|
|
}};
|
|
|
|
|
}
|
|
|
|
|
|
2025-05-30 16:22:10 -07:00
|
|
|
impl EventProcessor {
|
|
|
|
|
/// Print a concise summary of the effective configuration that will be used
|
|
|
|
|
/// for the session. This mirrors the information shown in the TUI welcome
|
|
|
|
|
/// screen.
|
|
|
|
|
pub(crate) fn print_config_summary(&mut self, config: &Config, prompt: &str) {
|
2025-05-30 23:24:36 -07:00
|
|
|
const VERSION: &str = env!("CARGO_PKG_VERSION");
|
|
|
|
|
ts_println!(
|
|
|
|
|
self,
|
|
|
|
|
"OpenAI Codex v{} (research preview)\n--------",
|
|
|
|
|
VERSION
|
|
|
|
|
);
|
2025-05-30 16:22:10 -07:00
|
|
|
|
feat: make reasoning effort/summaries configurable (#1199)
Previous to this PR, we always set `reasoning` when making a request
using the Responses API:
https://github.com/openai/codex/blob/d7245cbbc9d8ff5446da45e5951761103492476d/codex-rs/core/src/client.rs#L108-L111
Though if you tried to use the Rust CLI with `--model gpt-4.1`, this
would fail with:
```shell
"Unsupported parameter: 'reasoning.effort' is not supported with this model."
```
We take a cue from the TypeScript CLI, which does a check on the model
name:
https://github.com/openai/codex/blob/d7245cbbc9d8ff5446da45e5951761103492476d/codex-cli/src/utils/agent/agent-loop.ts#L786-L789
This PR does a similar check, though also adds support for the following
config options:
```
model_reasoning_effort = "low" | "medium" | "high" | "none"
model_reasoning_summary = "auto" | "concise" | "detailed" | "none"
```
This way, if you have a model whose name happens to start with `"o"` (or
`"codex"`?), you can set these to `"none"` to explicitly disable
reasoning, if necessary. (That said, it seems unlikely anyone would use
the Responses API with non-OpenAI models, but we provide an escape
hatch, anyway.)
This PR also updates both the TUI and `codex exec` to show `reasoning
effort` and `reasoning summaries` in the header.
2025-06-02 16:01:34 -07:00
|
|
|
let mut entries = vec![
|
2025-05-30 16:22:10 -07:00
|
|
|
("workdir", config.cwd.display().to_string()),
|
|
|
|
|
("model", config.model.clone()),
|
|
|
|
|
("provider", config.model_provider_id.clone()),
|
|
|
|
|
("approval", format!("{:?}", config.approval_policy)),
|
2025-06-24 17:48:51 -07:00
|
|
|
("sandbox", summarize_sandbox_policy(&config.sandbox_policy)),
|
2025-05-30 16:22:10 -07:00
|
|
|
];
|
feat: make reasoning effort/summaries configurable (#1199)
Previous to this PR, we always set `reasoning` when making a request
using the Responses API:
https://github.com/openai/codex/blob/d7245cbbc9d8ff5446da45e5951761103492476d/codex-rs/core/src/client.rs#L108-L111
Though if you tried to use the Rust CLI with `--model gpt-4.1`, this
would fail with:
```shell
"Unsupported parameter: 'reasoning.effort' is not supported with this model."
```
We take a cue from the TypeScript CLI, which does a check on the model
name:
https://github.com/openai/codex/blob/d7245cbbc9d8ff5446da45e5951761103492476d/codex-cli/src/utils/agent/agent-loop.ts#L786-L789
This PR does a similar check, though also adds support for the following
config options:
```
model_reasoning_effort = "low" | "medium" | "high" | "none"
model_reasoning_summary = "auto" | "concise" | "detailed" | "none"
```
This way, if you have a model whose name happens to start with `"o"` (or
`"codex"`?), you can set these to `"none"` to explicitly disable
reasoning, if necessary. (That said, it seems unlikely anyone would use
the Responses API with non-OpenAI models, but we provide an escape
hatch, anyway.)
This PR also updates both the TUI and `codex exec` to show `reasoning
effort` and `reasoning summaries` in the header.
2025-06-02 16:01:34 -07:00
|
|
|
if config.model_provider.wire_api == WireApi::Responses
|
2025-07-10 14:30:33 -07:00
|
|
|
&& model_supports_reasoning_summaries(config)
|
feat: make reasoning effort/summaries configurable (#1199)
Previous to this PR, we always set `reasoning` when making a request
using the Responses API:
https://github.com/openai/codex/blob/d7245cbbc9d8ff5446da45e5951761103492476d/codex-rs/core/src/client.rs#L108-L111
Though if you tried to use the Rust CLI with `--model gpt-4.1`, this
would fail with:
```shell
"Unsupported parameter: 'reasoning.effort' is not supported with this model."
```
We take a cue from the TypeScript CLI, which does a check on the model
name:
https://github.com/openai/codex/blob/d7245cbbc9d8ff5446da45e5951761103492476d/codex-cli/src/utils/agent/agent-loop.ts#L786-L789
This PR does a similar check, though also adds support for the following
config options:
```
model_reasoning_effort = "low" | "medium" | "high" | "none"
model_reasoning_summary = "auto" | "concise" | "detailed" | "none"
```
This way, if you have a model whose name happens to start with `"o"` (or
`"codex"`?), you can set these to `"none"` to explicitly disable
reasoning, if necessary. (That said, it seems unlikely anyone would use
the Responses API with non-OpenAI models, but we provide an escape
hatch, anyway.)
This PR also updates both the TUI and `codex exec` to show `reasoning
effort` and `reasoning summaries` in the header.
2025-06-02 16:01:34 -07:00
|
|
|
{
|
|
|
|
|
entries.push((
|
|
|
|
|
"reasoning effort",
|
|
|
|
|
config.model_reasoning_effort.to_string(),
|
|
|
|
|
));
|
|
|
|
|
entries.push((
|
|
|
|
|
"reasoning summaries",
|
|
|
|
|
config.model_reasoning_summary.to_string(),
|
|
|
|
|
));
|
|
|
|
|
}
|
2025-05-30 16:22:10 -07:00
|
|
|
|
|
|
|
|
for (key, value) in entries {
|
feat: make reasoning effort/summaries configurable (#1199)
Previous to this PR, we always set `reasoning` when making a request
using the Responses API:
https://github.com/openai/codex/blob/d7245cbbc9d8ff5446da45e5951761103492476d/codex-rs/core/src/client.rs#L108-L111
Though if you tried to use the Rust CLI with `--model gpt-4.1`, this
would fail with:
```shell
"Unsupported parameter: 'reasoning.effort' is not supported with this model."
```
We take a cue from the TypeScript CLI, which does a check on the model
name:
https://github.com/openai/codex/blob/d7245cbbc9d8ff5446da45e5951761103492476d/codex-cli/src/utils/agent/agent-loop.ts#L786-L789
This PR does a similar check, though also adds support for the following
config options:
```
model_reasoning_effort = "low" | "medium" | "high" | "none"
model_reasoning_summary = "auto" | "concise" | "detailed" | "none"
```
This way, if you have a model whose name happens to start with `"o"` (or
`"codex"`?), you can set these to `"none"` to explicitly disable
reasoning, if necessary. (That said, it seems unlikely anyone would use
the Responses API with non-OpenAI models, but we provide an escape
hatch, anyway.)
This PR also updates both the TUI and `codex exec` to show `reasoning
effort` and `reasoning summaries` in the header.
2025-06-02 16:01:34 -07:00
|
|
|
println!("{} {}", format!("{key}:").style(self.bold), value);
|
2025-05-30 16:22:10 -07:00
|
|
|
}
|
2025-05-21 22:53:02 -07:00
|
|
|
|
2025-05-30 16:22:10 -07:00
|
|
|
println!("--------");
|
|
|
|
|
|
|
|
|
|
// Echo the prompt that will be sent to the agent so it is visible in the
|
|
|
|
|
// transcript/logs before any events come in. Note the prompt may have been
|
|
|
|
|
// read from stdin, so it may not be visible in the terminal otherwise.
|
|
|
|
|
ts_println!(
|
2025-05-30 16:27:37 -07:00
|
|
|
self,
|
2025-05-30 16:22:10 -07:00
|
|
|
"{}\n{}",
|
|
|
|
|
"User instructions:".style(self.bold).style(self.cyan),
|
|
|
|
|
prompt
|
|
|
|
|
);
|
|
|
|
|
}
|
2025-05-21 22:53:02 -07:00
|
|
|
|
2025-04-29 09:59:35 -07:00
|
|
|
pub(crate) fn process_event(&mut self, event: Event) {
|
2025-05-21 22:53:02 -07:00
|
|
|
let Event { id: _, msg } = event;
|
2025-04-29 09:59:35 -07:00
|
|
|
match msg {
|
2025-05-13 20:44:42 -07:00
|
|
|
EventMsg::Error(ErrorEvent { message }) => {
|
2025-04-29 09:59:35 -07:00
|
|
|
let prefix = "ERROR:".style(self.red);
|
2025-05-30 16:27:37 -07:00
|
|
|
ts_println!(self, "{prefix} {message}");
|
2025-04-29 09:59:35 -07:00
|
|
|
}
|
2025-05-13 20:44:42 -07:00
|
|
|
EventMsg::BackgroundEvent(BackgroundEventEvent { message }) => {
|
2025-05-30 16:27:37 -07:00
|
|
|
ts_println!(self, "{}", message.style(self.dimmed));
|
2025-04-29 09:59:35 -07:00
|
|
|
}
|
2025-05-21 22:53:02 -07:00
|
|
|
EventMsg::TaskStarted | EventMsg::TaskComplete(_) => {
|
|
|
|
|
// Ignore.
|
2025-04-29 09:59:35 -07:00
|
|
|
}
|
feat: show number of tokens remaining in UI (#1388)
When using the OpenAI Responses API, we now record the `usage` field for
a `"response.completed"` event, which includes metrics about the number
of tokens consumed. We also introduce `openai_model_info.rs`, which
includes current data about the most common OpenAI models available via
the API (specifically `context_window` and `max_output_tokens`). If
Codex does not recognize the model, you can set `model_context_window`
and `model_max_output_tokens` explicitly in `config.toml`.
When then introduce a new event type to `protocol.rs`, `TokenCount`,
which includes the `TokenUsage` for the most recent turn.
Finally, we update the TUI to record the running sum of tokens used so
the percentage of available context window remaining can be reported via
the placeholder text for the composer:

We could certainly get much fancier with this (such as reporting the
estimated cost of the conversation), but for now, we are just trying to
achieve feature parity with the TypeScript CLI.
Though arguably this improves upon the TypeScript CLI, as the TypeScript
CLI uses heuristics to estimate the number of tokens used rather than
using the `usage` information directly:
https://github.com/openai/codex/blob/296996d74e345b1b05d8c3451a06ace21c5ada96/codex-cli/src/utils/approximate-tokens-used.ts#L3-L16
Fixes https://github.com/openai/codex/issues/1242
2025-06-25 23:31:11 -07:00
|
|
|
EventMsg::TokenCount(TokenUsage { total_tokens, .. }) => {
|
|
|
|
|
ts_println!(self, "tokens used: {total_tokens}");
|
|
|
|
|
}
|
2025-07-16 22:26:31 -07:00
|
|
|
EventMsg::AgentMessageDelta(AgentMessageDeltaEvent { delta }) => {
|
|
|
|
|
if !self.answer_started {
|
|
|
|
|
ts_println!(self, "{}\n", "codex".style(self.italic).style(self.magenta));
|
|
|
|
|
self.answer_started = true;
|
|
|
|
|
}
|
|
|
|
|
print!("{delta}");
|
|
|
|
|
#[allow(clippy::expect_used)]
|
|
|
|
|
std::io::stdout().flush().expect("could not flush stdout");
|
2025-07-16 15:11:18 -07:00
|
|
|
}
|
2025-07-16 22:26:31 -07:00
|
|
|
EventMsg::AgentReasoningDelta(AgentReasoningDeltaEvent { delta }) => {
|
|
|
|
|
if !self.show_agent_reasoning {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
if !self.reasoning_started {
|
|
|
|
|
ts_println!(
|
|
|
|
|
self,
|
|
|
|
|
"{}\n",
|
|
|
|
|
"thinking".style(self.italic).style(self.magenta),
|
|
|
|
|
);
|
|
|
|
|
self.reasoning_started = true;
|
|
|
|
|
}
|
|
|
|
|
print!("{delta}");
|
|
|
|
|
#[allow(clippy::expect_used)]
|
|
|
|
|
std::io::stdout().flush().expect("could not flush stdout");
|
2025-07-16 15:11:18 -07:00
|
|
|
}
|
2025-05-13 20:44:42 -07:00
|
|
|
EventMsg::AgentMessage(AgentMessageEvent { message }) => {
|
2025-07-16 22:26:31 -07:00
|
|
|
// if answer_started is false, this means we haven't received any
|
|
|
|
|
// delta. Thus, we need to print the message as a new answer.
|
|
|
|
|
if !self.answer_started {
|
|
|
|
|
ts_println!(
|
|
|
|
|
self,
|
|
|
|
|
"{}\n{}",
|
|
|
|
|
"codex".style(self.italic).style(self.magenta),
|
|
|
|
|
message,
|
|
|
|
|
);
|
|
|
|
|
} else {
|
|
|
|
|
println!();
|
|
|
|
|
self.answer_started = false;
|
|
|
|
|
}
|
2025-04-29 09:59:35 -07:00
|
|
|
}
|
2025-05-13 20:44:42 -07:00
|
|
|
EventMsg::ExecCommandBegin(ExecCommandBeginEvent {
|
2025-04-29 09:59:35 -07:00
|
|
|
call_id,
|
|
|
|
|
command,
|
|
|
|
|
cwd,
|
2025-05-13 20:44:42 -07:00
|
|
|
}) => {
|
2025-04-29 09:59:35 -07:00
|
|
|
self.call_id_to_command.insert(
|
|
|
|
|
call_id.clone(),
|
|
|
|
|
ExecCommandBegin {
|
|
|
|
|
command: command.clone(),
|
2025-05-16 08:14:50 -07:00
|
|
|
start_time: Instant::now(),
|
2025-04-29 09:59:35 -07:00
|
|
|
},
|
|
|
|
|
);
|
|
|
|
|
ts_println!(
|
2025-05-30 16:27:37 -07:00
|
|
|
self,
|
2025-04-29 09:59:35 -07:00
|
|
|
"{} {} in {}",
|
|
|
|
|
"exec".style(self.magenta),
|
|
|
|
|
escape_command(&command).style(self.bold),
|
2025-05-04 10:57:12 -07:00
|
|
|
cwd.to_string_lossy(),
|
2025-04-29 09:59:35 -07:00
|
|
|
);
|
|
|
|
|
}
|
2025-05-13 20:44:42 -07:00
|
|
|
EventMsg::ExecCommandEnd(ExecCommandEndEvent {
|
2025-04-29 09:59:35 -07:00
|
|
|
call_id,
|
|
|
|
|
stdout,
|
|
|
|
|
stderr,
|
|
|
|
|
exit_code,
|
2025-05-13 20:44:42 -07:00
|
|
|
}) => {
|
2025-04-29 09:59:35 -07:00
|
|
|
let exec_command = self.call_id_to_command.remove(&call_id);
|
|
|
|
|
let (duration, call) = if let Some(ExecCommandBegin {
|
|
|
|
|
command,
|
|
|
|
|
start_time,
|
|
|
|
|
}) = exec_command
|
|
|
|
|
{
|
|
|
|
|
(
|
2025-05-06 17:38:56 -07:00
|
|
|
format!(" in {}", format_elapsed(start_time)),
|
2025-04-29 09:59:35 -07:00
|
|
|
format!("{}", escape_command(&command).style(self.bold)),
|
|
|
|
|
)
|
|
|
|
|
} else {
|
|
|
|
|
("".to_string(), format!("exec('{call_id}')"))
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let output = if exit_code == 0 { stdout } else { stderr };
|
|
|
|
|
let truncated_output = output
|
|
|
|
|
.lines()
|
|
|
|
|
.take(MAX_OUTPUT_LINES_FOR_EXEC_TOOL_CALL)
|
|
|
|
|
.collect::<Vec<_>>()
|
|
|
|
|
.join("\n");
|
|
|
|
|
match exit_code {
|
|
|
|
|
0 => {
|
2025-05-06 17:38:56 -07:00
|
|
|
let title = format!("{call} succeeded{duration}:");
|
2025-05-30 16:27:37 -07:00
|
|
|
ts_println!(self, "{}", title.style(self.green));
|
2025-04-29 09:59:35 -07:00
|
|
|
}
|
|
|
|
|
_ => {
|
|
|
|
|
let title = format!("{call} exited {exit_code}{duration}:");
|
2025-05-30 16:27:37 -07:00
|
|
|
ts_println!(self, "{}", title.style(self.red));
|
2025-04-29 09:59:35 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
println!("{}", truncated_output.style(self.dimmed));
|
|
|
|
|
}
|
2025-05-13 20:44:42 -07:00
|
|
|
EventMsg::McpToolCallBegin(McpToolCallBeginEvent {
|
2025-05-06 16:52:43 -07:00
|
|
|
call_id,
|
|
|
|
|
server,
|
|
|
|
|
tool,
|
|
|
|
|
arguments,
|
2025-05-13 20:44:42 -07:00
|
|
|
}) => {
|
2025-05-06 16:52:43 -07:00
|
|
|
// Build fully-qualified tool name: server.tool
|
|
|
|
|
let fq_tool_name = format!("{server}.{tool}");
|
|
|
|
|
|
|
|
|
|
// Format arguments as compact JSON so they fit on one line.
|
|
|
|
|
let args_str = arguments
|
|
|
|
|
.as_ref()
|
2025-05-13 20:44:42 -07:00
|
|
|
.map(|v: &serde_json::Value| {
|
|
|
|
|
serde_json::to_string(v).unwrap_or_else(|_| v.to_string())
|
|
|
|
|
})
|
2025-05-06 16:52:43 -07:00
|
|
|
.unwrap_or_default();
|
|
|
|
|
|
|
|
|
|
let invocation = if args_str.is_empty() {
|
|
|
|
|
format!("{fq_tool_name}()")
|
|
|
|
|
} else {
|
|
|
|
|
format!("{fq_tool_name}({args_str})")
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
self.call_id_to_tool_call.insert(
|
|
|
|
|
call_id.clone(),
|
|
|
|
|
McpToolCallBegin {
|
|
|
|
|
invocation: invocation.clone(),
|
2025-05-16 08:14:50 -07:00
|
|
|
start_time: Instant::now(),
|
2025-05-06 16:52:43 -07:00
|
|
|
},
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
ts_println!(
|
2025-05-30 16:27:37 -07:00
|
|
|
self,
|
2025-05-06 16:52:43 -07:00
|
|
|
"{} {}",
|
|
|
|
|
"tool".style(self.magenta),
|
|
|
|
|
invocation.style(self.bold),
|
|
|
|
|
);
|
|
|
|
|
}
|
fix: introduce ResponseInputItem::McpToolCallOutput variant (#1151)
The output of an MCP server tool call can be one of several types, but
to date, we treated all outputs as text by showing the serialized JSON
as the "tool output" in Codex:
https://github.com/openai/codex/blob/25a9949c49194d5a64de54a11bcc5b4724ac9bd5/codex-rs/mcp-types/src/lib.rs#L96-L101
This PR adds support for the `ImageContent` variant so we can now
display an image output from an MCP tool call.
In making this change, we introduce a new
`ResponseInputItem::McpToolCallOutput` variant so that we can work with
the `mcp_types::CallToolResult` directly when the function call is made
to an MCP server.
Though arguably the more significant change is the introduction of
`HistoryCell::CompletedMcpToolCallWithImageOutput`, which is a cell that
uses `ratatui_image` to render an image into the terminal. To support
this, we introduce `ImageRenderCache`, cache a
`ratatui_image::picker::Picker`, and `ensure_image_cache()` to cache the
appropriate scaled image data and dimensions based on the current
terminal size.
To test, I created a minimal `package.json`:
```json
{
"name": "kitty-mcp",
"version": "1.0.0",
"type": "module",
"description": "MCP that returns image of kitty",
"main": "index.js",
"dependencies": {
"@modelcontextprotocol/sdk": "^1.12.0"
}
}
```
with the following `index.js` to define the MCP server:
```js
#!/usr/bin/env node
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
import { readFile } from "node:fs/promises";
import { join } from "node:path";
const IMAGE_URI = "image://Ada.png";
const server = new McpServer({
name: "Demo",
version: "1.0.0",
});
server.tool(
"get-cat-image",
"If you need a cat image, this tool will provide one.",
async () => ({
content: [
{ type: "image", data: await getAdaPngBase64(), mimeType: "image/png" },
],
})
);
server.resource("Ada the Cat", IMAGE_URI, async (uri) => {
const base64Image = await getAdaPngBase64();
return {
contents: [
{
uri: uri.href,
mimeType: "image/png",
blob: base64Image,
},
],
};
});
async function getAdaPngBase64() {
const __dirname = new URL(".", import.meta.url).pathname;
// From https://github.com/benjajaja/ratatui-image/blob/9705ce2c59ec669abbce2924cbfd1f5ae22c9860/assets/Ada.png
const filePath = join(__dirname, "Ada.png");
const imageData = await readFile(filePath);
const base64Image = imageData.toString("base64");
return base64Image;
}
const transport = new StdioServerTransport();
await server.connect(transport);
```
With the local changes from this PR, I added the following to my
`config.toml`:
```toml
[mcp_servers.kitty]
command = "node"
args = ["/Users/mbolin/code/kitty-mcp/index.js"]
```
Running the TUI from source:
```
cargo run --bin codex -- --model o3 'I need a picture of a cat'
```
I get:
<img width="732" alt="image"
src="https://github.com/user-attachments/assets/bf80b721-9ca0-4d81-aec7-77d6899e2869"
/>
Now, that said, I have only tested in iTerm and there is definitely some
funny business with getting an accurate character-to-pixel ratio
(sometimes the `CompletedMcpToolCallWithImageOutput` thinks it needs 10
rows to render instead of 4), so there is still work to be done here.
2025-05-28 19:03:17 -07:00
|
|
|
EventMsg::McpToolCallEnd(tool_call_end_event) => {
|
|
|
|
|
let is_success = tool_call_end_event.is_success();
|
|
|
|
|
let McpToolCallEndEvent { call_id, result } = tool_call_end_event;
|
2025-05-06 16:52:43 -07:00
|
|
|
// Retrieve start time and invocation for duration calculation and labeling.
|
|
|
|
|
let info = self.call_id_to_tool_call.remove(&call_id);
|
|
|
|
|
|
|
|
|
|
let (duration, invocation) = if let Some(McpToolCallBegin {
|
|
|
|
|
invocation,
|
|
|
|
|
start_time,
|
|
|
|
|
..
|
|
|
|
|
}) = info
|
|
|
|
|
{
|
2025-05-06 17:38:56 -07:00
|
|
|
(format!(" in {}", format_elapsed(start_time)), invocation)
|
2025-05-06 16:52:43 -07:00
|
|
|
} else {
|
|
|
|
|
(String::new(), format!("tool('{call_id}')"))
|
|
|
|
|
};
|
|
|
|
|
|
fix: introduce ResponseInputItem::McpToolCallOutput variant (#1151)
The output of an MCP server tool call can be one of several types, but
to date, we treated all outputs as text by showing the serialized JSON
as the "tool output" in Codex:
https://github.com/openai/codex/blob/25a9949c49194d5a64de54a11bcc5b4724ac9bd5/codex-rs/mcp-types/src/lib.rs#L96-L101
This PR adds support for the `ImageContent` variant so we can now
display an image output from an MCP tool call.
In making this change, we introduce a new
`ResponseInputItem::McpToolCallOutput` variant so that we can work with
the `mcp_types::CallToolResult` directly when the function call is made
to an MCP server.
Though arguably the more significant change is the introduction of
`HistoryCell::CompletedMcpToolCallWithImageOutput`, which is a cell that
uses `ratatui_image` to render an image into the terminal. To support
this, we introduce `ImageRenderCache`, cache a
`ratatui_image::picker::Picker`, and `ensure_image_cache()` to cache the
appropriate scaled image data and dimensions based on the current
terminal size.
To test, I created a minimal `package.json`:
```json
{
"name": "kitty-mcp",
"version": "1.0.0",
"type": "module",
"description": "MCP that returns image of kitty",
"main": "index.js",
"dependencies": {
"@modelcontextprotocol/sdk": "^1.12.0"
}
}
```
with the following `index.js` to define the MCP server:
```js
#!/usr/bin/env node
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
import { readFile } from "node:fs/promises";
import { join } from "node:path";
const IMAGE_URI = "image://Ada.png";
const server = new McpServer({
name: "Demo",
version: "1.0.0",
});
server.tool(
"get-cat-image",
"If you need a cat image, this tool will provide one.",
async () => ({
content: [
{ type: "image", data: await getAdaPngBase64(), mimeType: "image/png" },
],
})
);
server.resource("Ada the Cat", IMAGE_URI, async (uri) => {
const base64Image = await getAdaPngBase64();
return {
contents: [
{
uri: uri.href,
mimeType: "image/png",
blob: base64Image,
},
],
};
});
async function getAdaPngBase64() {
const __dirname = new URL(".", import.meta.url).pathname;
// From https://github.com/benjajaja/ratatui-image/blob/9705ce2c59ec669abbce2924cbfd1f5ae22c9860/assets/Ada.png
const filePath = join(__dirname, "Ada.png");
const imageData = await readFile(filePath);
const base64Image = imageData.toString("base64");
return base64Image;
}
const transport = new StdioServerTransport();
await server.connect(transport);
```
With the local changes from this PR, I added the following to my
`config.toml`:
```toml
[mcp_servers.kitty]
command = "node"
args = ["/Users/mbolin/code/kitty-mcp/index.js"]
```
Running the TUI from source:
```
cargo run --bin codex -- --model o3 'I need a picture of a cat'
```
I get:
<img width="732" alt="image"
src="https://github.com/user-attachments/assets/bf80b721-9ca0-4d81-aec7-77d6899e2869"
/>
Now, that said, I have only tested in iTerm and there is definitely some
funny business with getting an accurate character-to-pixel ratio
(sometimes the `CompletedMcpToolCallWithImageOutput` thinks it needs 10
rows to render instead of 4), so there is still work to be done here.
2025-05-28 19:03:17 -07:00
|
|
|
let status_str = if is_success { "success" } else { "failed" };
|
|
|
|
|
let title_style = if is_success { self.green } else { self.red };
|
2025-05-06 16:52:43 -07:00
|
|
|
let title = format!("{invocation} {status_str}{duration}:");
|
|
|
|
|
|
2025-05-30 16:27:37 -07:00
|
|
|
ts_println!(self, "{}", title.style(title_style));
|
2025-05-06 16:52:43 -07:00
|
|
|
|
fix: introduce ResponseInputItem::McpToolCallOutput variant (#1151)
The output of an MCP server tool call can be one of several types, but
to date, we treated all outputs as text by showing the serialized JSON
as the "tool output" in Codex:
https://github.com/openai/codex/blob/25a9949c49194d5a64de54a11bcc5b4724ac9bd5/codex-rs/mcp-types/src/lib.rs#L96-L101
This PR adds support for the `ImageContent` variant so we can now
display an image output from an MCP tool call.
In making this change, we introduce a new
`ResponseInputItem::McpToolCallOutput` variant so that we can work with
the `mcp_types::CallToolResult` directly when the function call is made
to an MCP server.
Though arguably the more significant change is the introduction of
`HistoryCell::CompletedMcpToolCallWithImageOutput`, which is a cell that
uses `ratatui_image` to render an image into the terminal. To support
this, we introduce `ImageRenderCache`, cache a
`ratatui_image::picker::Picker`, and `ensure_image_cache()` to cache the
appropriate scaled image data and dimensions based on the current
terminal size.
To test, I created a minimal `package.json`:
```json
{
"name": "kitty-mcp",
"version": "1.0.0",
"type": "module",
"description": "MCP that returns image of kitty",
"main": "index.js",
"dependencies": {
"@modelcontextprotocol/sdk": "^1.12.0"
}
}
```
with the following `index.js` to define the MCP server:
```js
#!/usr/bin/env node
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
import { readFile } from "node:fs/promises";
import { join } from "node:path";
const IMAGE_URI = "image://Ada.png";
const server = new McpServer({
name: "Demo",
version: "1.0.0",
});
server.tool(
"get-cat-image",
"If you need a cat image, this tool will provide one.",
async () => ({
content: [
{ type: "image", data: await getAdaPngBase64(), mimeType: "image/png" },
],
})
);
server.resource("Ada the Cat", IMAGE_URI, async (uri) => {
const base64Image = await getAdaPngBase64();
return {
contents: [
{
uri: uri.href,
mimeType: "image/png",
blob: base64Image,
},
],
};
});
async function getAdaPngBase64() {
const __dirname = new URL(".", import.meta.url).pathname;
// From https://github.com/benjajaja/ratatui-image/blob/9705ce2c59ec669abbce2924cbfd1f5ae22c9860/assets/Ada.png
const filePath = join(__dirname, "Ada.png");
const imageData = await readFile(filePath);
const base64Image = imageData.toString("base64");
return base64Image;
}
const transport = new StdioServerTransport();
await server.connect(transport);
```
With the local changes from this PR, I added the following to my
`config.toml`:
```toml
[mcp_servers.kitty]
command = "node"
args = ["/Users/mbolin/code/kitty-mcp/index.js"]
```
Running the TUI from source:
```
cargo run --bin codex -- --model o3 'I need a picture of a cat'
```
I get:
<img width="732" alt="image"
src="https://github.com/user-attachments/assets/bf80b721-9ca0-4d81-aec7-77d6899e2869"
/>
Now, that said, I have only tested in iTerm and there is definitely some
funny business with getting an accurate character-to-pixel ratio
(sometimes the `CompletedMcpToolCallWithImageOutput` thinks it needs 10
rows to render instead of 4), so there is still work to be done here.
2025-05-28 19:03:17 -07:00
|
|
|
if let Ok(res) = result {
|
2025-05-06 16:52:43 -07:00
|
|
|
let val: serde_json::Value = res.into();
|
|
|
|
|
let pretty =
|
|
|
|
|
serde_json::to_string_pretty(&val).unwrap_or_else(|_| val.to_string());
|
|
|
|
|
|
|
|
|
|
for line in pretty.lines().take(MAX_OUTPUT_LINES_FOR_EXEC_TOOL_CALL) {
|
|
|
|
|
println!("{}", line.style(self.dimmed));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2025-05-13 20:44:42 -07:00
|
|
|
EventMsg::PatchApplyBegin(PatchApplyBeginEvent {
|
2025-04-29 09:59:35 -07:00
|
|
|
call_id,
|
|
|
|
|
auto_approved,
|
|
|
|
|
changes,
|
2025-05-13 20:44:42 -07:00
|
|
|
}) => {
|
2025-04-29 09:59:35 -07:00
|
|
|
// Store metadata so we can calculate duration later when we
|
|
|
|
|
// receive the corresponding PatchApplyEnd event.
|
|
|
|
|
self.call_id_to_patch.insert(
|
|
|
|
|
call_id.clone(),
|
|
|
|
|
PatchApplyBegin {
|
2025-05-16 08:14:50 -07:00
|
|
|
start_time: Instant::now(),
|
2025-04-29 09:59:35 -07:00
|
|
|
auto_approved,
|
|
|
|
|
},
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
ts_println!(
|
2025-05-30 16:27:37 -07:00
|
|
|
self,
|
2025-04-29 09:59:35 -07:00
|
|
|
"{} auto_approved={}:",
|
|
|
|
|
"apply_patch".style(self.magenta),
|
|
|
|
|
auto_approved,
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
// Pretty-print the patch summary with colored diff markers so
|
2025-07-16 22:26:31 -07:00
|
|
|
// it's easy to scan in the terminal output.
|
2025-04-29 09:59:35 -07:00
|
|
|
for (path, change) in changes.iter() {
|
|
|
|
|
match change {
|
|
|
|
|
FileChange::Add { content } => {
|
|
|
|
|
let header = format!(
|
|
|
|
|
"{} {}",
|
|
|
|
|
format_file_change(change),
|
|
|
|
|
path.to_string_lossy()
|
|
|
|
|
);
|
|
|
|
|
println!("{}", header.style(self.magenta));
|
|
|
|
|
for line in content.lines() {
|
|
|
|
|
println!("{}", line.style(self.green));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
FileChange::Delete => {
|
|
|
|
|
let header = format!(
|
|
|
|
|
"{} {}",
|
|
|
|
|
format_file_change(change),
|
|
|
|
|
path.to_string_lossy()
|
|
|
|
|
);
|
|
|
|
|
println!("{}", header.style(self.magenta));
|
|
|
|
|
}
|
|
|
|
|
FileChange::Update {
|
|
|
|
|
unified_diff,
|
|
|
|
|
move_path,
|
|
|
|
|
} => {
|
|
|
|
|
let header = if let Some(dest) = move_path {
|
|
|
|
|
format!(
|
|
|
|
|
"{} {} -> {}",
|
|
|
|
|
format_file_change(change),
|
|
|
|
|
path.to_string_lossy(),
|
|
|
|
|
dest.to_string_lossy()
|
|
|
|
|
)
|
|
|
|
|
} else {
|
|
|
|
|
format!("{} {}", format_file_change(change), path.to_string_lossy())
|
|
|
|
|
};
|
|
|
|
|
println!("{}", header.style(self.magenta));
|
|
|
|
|
|
|
|
|
|
// Colorize diff lines. We keep file header lines
|
|
|
|
|
// (--- / +++) without extra coloring so they are
|
|
|
|
|
// still readable.
|
|
|
|
|
for diff_line in unified_diff.lines() {
|
|
|
|
|
if diff_line.starts_with('+') && !diff_line.starts_with("+++") {
|
|
|
|
|
println!("{}", diff_line.style(self.green));
|
|
|
|
|
} else if diff_line.starts_with('-')
|
|
|
|
|
&& !diff_line.starts_with("---")
|
|
|
|
|
{
|
|
|
|
|
println!("{}", diff_line.style(self.red));
|
|
|
|
|
} else {
|
|
|
|
|
println!("{diff_line}");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2025-05-13 20:44:42 -07:00
|
|
|
EventMsg::PatchApplyEnd(PatchApplyEndEvent {
|
2025-04-29 09:59:35 -07:00
|
|
|
call_id,
|
|
|
|
|
stdout,
|
|
|
|
|
stderr,
|
|
|
|
|
success,
|
2025-05-13 20:44:42 -07:00
|
|
|
}) => {
|
2025-04-29 09:59:35 -07:00
|
|
|
let patch_begin = self.call_id_to_patch.remove(&call_id);
|
|
|
|
|
|
|
|
|
|
// Compute duration and summary label similar to exec commands.
|
|
|
|
|
let (duration, label) = if let Some(PatchApplyBegin {
|
|
|
|
|
start_time,
|
|
|
|
|
auto_approved,
|
|
|
|
|
}) = patch_begin
|
|
|
|
|
{
|
|
|
|
|
(
|
2025-05-06 17:38:56 -07:00
|
|
|
format!(" in {}", format_elapsed(start_time)),
|
2025-07-10 20:08:16 +02:00
|
|
|
format!("apply_patch(auto_approved={auto_approved})"),
|
2025-04-29 09:59:35 -07:00
|
|
|
)
|
|
|
|
|
} else {
|
|
|
|
|
(String::new(), format!("apply_patch('{call_id}')"))
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let (exit_code, output, title_style) = if success {
|
|
|
|
|
(0, stdout, self.green)
|
|
|
|
|
} else {
|
|
|
|
|
(1, stderr, self.red)
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let title = format!("{label} exited {exit_code}{duration}:");
|
2025-05-30 16:27:37 -07:00
|
|
|
ts_println!(self, "{}", title.style(title_style));
|
2025-04-29 09:59:35 -07:00
|
|
|
for line in output.lines() {
|
|
|
|
|
println!("{}", line.style(self.dimmed));
|
|
|
|
|
}
|
|
|
|
|
}
|
2025-05-13 20:44:42 -07:00
|
|
|
EventMsg::ExecApprovalRequest(_) => {
|
2025-04-29 09:59:35 -07:00
|
|
|
// Should we exit?
|
|
|
|
|
}
|
2025-05-13 20:44:42 -07:00
|
|
|
EventMsg::ApplyPatchApprovalRequest(_) => {
|
2025-04-29 09:59:35 -07:00
|
|
|
// Should we exit?
|
|
|
|
|
}
|
2025-05-14 13:36:43 -07:00
|
|
|
EventMsg::AgentReasoning(agent_reasoning_event) => {
|
2025-05-30 23:14:56 -07:00
|
|
|
if self.show_agent_reasoning {
|
2025-07-16 22:26:31 -07:00
|
|
|
if !self.reasoning_started {
|
|
|
|
|
ts_println!(
|
|
|
|
|
self,
|
|
|
|
|
"{}\n{}",
|
|
|
|
|
"codex".style(self.italic).style(self.magenta),
|
|
|
|
|
agent_reasoning_event.text,
|
|
|
|
|
);
|
|
|
|
|
} else {
|
|
|
|
|
println!();
|
|
|
|
|
self.reasoning_started = false;
|
|
|
|
|
}
|
2025-05-30 23:14:56 -07:00
|
|
|
}
|
2025-05-14 13:36:43 -07:00
|
|
|
}
|
|
|
|
|
EventMsg::SessionConfigured(session_configured_event) => {
|
feat: record messages from user in ~/.codex/history.jsonl (#939)
This is a large change to support a "history" feature like you would
expect in a shell like Bash.
History events are recorded in `$CODEX_HOME/history.jsonl`. Because it
is a JSONL file, it is straightforward to append new entries (as opposed
to the TypeScript file that uses `$CODEX_HOME/history.json`, so to be
valid JSON, each new entry entails rewriting the entire file). Because
it is possible for there to be multiple instances of Codex CLI writing
to `history.jsonl` at once, we use advisory file locking when working
with `history.jsonl` in `codex-rs/core/src/message_history.rs`.
Because we believe history is a sufficiently useful feature, we enable
it by default. Though to provide some safety, we set the file
permissions of `history.jsonl` to be `o600` so that other users on the
system cannot read the user's history. We do not yet support a default
list of `SENSITIVE_PATTERNS` as the TypeScript CLI does:
https://github.com/openai/codex/blob/3fdf9df1335ac9501e3fb0e61715359145711e8b/codex-cli/src/utils/storage/command-history.ts#L10-L17
We are going to take a more conservative approach to this list in the
Rust CLI. For example, while `/\b[A-Za-z0-9-_]{20,}\b/` might exclude
sensitive information like API tokens, it would also exclude valuable
information such as references to Git commits.
As noted in the updated documentation, users can opt-out of history by
adding the following to `config.toml`:
```toml
[history]
persistence = "none"
```
Because `history.jsonl` could, in theory, be quite large, we take a[n
arguably overly pedantic] approach in reading history entries into
memory. Specifically, we start by telling the client the current number
of entries in the history file (`history_entry_count`) as well as the
inode (`history_log_id`) of `history.jsonl` (see the new fields on
`SessionConfiguredEvent`).
The client is responsible for keeping new entries in memory to create a
"local history," but if the user hits up enough times to go "past" the
end of local history, then the client should use the new
`GetHistoryEntryRequest` in the protocol to fetch older entries.
Specifically, it should pass the `history_log_id` it was given
originally and work backwards from `history_entry_count`. (It should
really fetch history in batches rather than one-at-a-time, but that is
something we can improve upon in subsequent PRs.)
The motivation behind this crazy scheme is that it is designed to defend
against:
* The `history.jsonl` being truncated during the session such that the
index into the history is no longer consistent with what had been read
up to that point. We do not yet have logic to enforce a `max_bytes` for
`history.jsonl`, but once we do, we will aspire to implement it in a way
that should result in a new inode for the file on most systems.
* New items from concurrent Codex CLI sessions amending to the history.
Because, in absence of truncation, `history.jsonl` is an append-only
log, so long as the client reads backwards from `history_entry_count`,
it should always get a consistent view of history. (That said, it will
not be able to read _new_ commands from concurrent sessions, but perhaps
we will introduce a `/` command to reload latest history or something
down the road.)
Admittedly, my testing of this feature thus far has been fairly light. I
expect we will find bugs and introduce enhancements/fixes going forward.
2025-05-15 16:26:23 -07:00
|
|
|
let SessionConfiguredEvent {
|
|
|
|
|
session_id,
|
|
|
|
|
model,
|
|
|
|
|
history_log_id: _,
|
|
|
|
|
history_entry_count: _,
|
|
|
|
|
} = session_configured_event;
|
2025-05-21 22:53:02 -07:00
|
|
|
|
|
|
|
|
ts_println!(
|
2025-05-30 16:27:37 -07:00
|
|
|
self,
|
2025-05-21 22:53:02 -07:00
|
|
|
"{} {}",
|
|
|
|
|
"codex session".style(self.magenta).style(self.bold),
|
|
|
|
|
session_id.to_string().style(self.dimmed)
|
|
|
|
|
);
|
|
|
|
|
|
2025-05-30 16:27:37 -07:00
|
|
|
ts_println!(self, "model: {}", model);
|
2025-05-21 22:53:02 -07:00
|
|
|
println!();
|
2025-04-29 09:59:35 -07:00
|
|
|
}
|
feat: record messages from user in ~/.codex/history.jsonl (#939)
This is a large change to support a "history" feature like you would
expect in a shell like Bash.
History events are recorded in `$CODEX_HOME/history.jsonl`. Because it
is a JSONL file, it is straightforward to append new entries (as opposed
to the TypeScript file that uses `$CODEX_HOME/history.json`, so to be
valid JSON, each new entry entails rewriting the entire file). Because
it is possible for there to be multiple instances of Codex CLI writing
to `history.jsonl` at once, we use advisory file locking when working
with `history.jsonl` in `codex-rs/core/src/message_history.rs`.
Because we believe history is a sufficiently useful feature, we enable
it by default. Though to provide some safety, we set the file
permissions of `history.jsonl` to be `o600` so that other users on the
system cannot read the user's history. We do not yet support a default
list of `SENSITIVE_PATTERNS` as the TypeScript CLI does:
https://github.com/openai/codex/blob/3fdf9df1335ac9501e3fb0e61715359145711e8b/codex-cli/src/utils/storage/command-history.ts#L10-L17
We are going to take a more conservative approach to this list in the
Rust CLI. For example, while `/\b[A-Za-z0-9-_]{20,}\b/` might exclude
sensitive information like API tokens, it would also exclude valuable
information such as references to Git commits.
As noted in the updated documentation, users can opt-out of history by
adding the following to `config.toml`:
```toml
[history]
persistence = "none"
```
Because `history.jsonl` could, in theory, be quite large, we take a[n
arguably overly pedantic] approach in reading history entries into
memory. Specifically, we start by telling the client the current number
of entries in the history file (`history_entry_count`) as well as the
inode (`history_log_id`) of `history.jsonl` (see the new fields on
`SessionConfiguredEvent`).
The client is responsible for keeping new entries in memory to create a
"local history," but if the user hits up enough times to go "past" the
end of local history, then the client should use the new
`GetHistoryEntryRequest` in the protocol to fetch older entries.
Specifically, it should pass the `history_log_id` it was given
originally and work backwards from `history_entry_count`. (It should
really fetch history in batches rather than one-at-a-time, but that is
something we can improve upon in subsequent PRs.)
The motivation behind this crazy scheme is that it is designed to defend
against:
* The `history.jsonl` being truncated during the session such that the
index into the history is no longer consistent with what had been read
up to that point. We do not yet have logic to enforce a `max_bytes` for
`history.jsonl`, but once we do, we will aspire to implement it in a way
that should result in a new inode for the file on most systems.
* New items from concurrent Codex CLI sessions amending to the history.
Because, in absence of truncation, `history.jsonl` is an append-only
log, so long as the client reads backwards from `history_entry_count`,
it should always get a consistent view of history. (That said, it will
not be able to read _new_ commands from concurrent sessions, but perhaps
we will introduce a `/` command to reload latest history or something
down the road.)
Admittedly, my testing of this feature thus far has been fairly light. I
expect we will find bugs and introduce enhancements/fixes going forward.
2025-05-15 16:26:23 -07:00
|
|
|
EventMsg::GetHistoryEntryResponse(_) => {
|
|
|
|
|
// Currently ignored in exec output.
|
|
|
|
|
}
|
2025-04-29 09:59:35 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fn escape_command(command: &[String]) -> String {
|
|
|
|
|
try_join(command.iter().map(|s| s.as_str())).unwrap_or_else(|_| command.join(" "))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fn format_file_change(change: &FileChange) -> &'static str {
|
|
|
|
|
match change {
|
|
|
|
|
FileChange::Add { .. } => "A",
|
|
|
|
|
FileChange::Delete => "D",
|
|
|
|
|
FileChange::Update {
|
|
|
|
|
move_path: Some(_), ..
|
|
|
|
|
} => "R",
|
|
|
|
|
FileChange::Update {
|
|
|
|
|
move_path: None, ..
|
|
|
|
|
} => "M",
|
|
|
|
|
}
|
|
|
|
|
}
|