2025-08-11 11:26:15 -07:00
|
|
|
|
use crate::colors::LIGHT_BLUE;
|
2025-08-11 12:31:34 -07:00
|
|
|
|
use crate::diff_render::create_diff_summary;
|
2025-08-06 14:36:48 -07:00
|
|
|
|
use crate::exec_command::relativize_to_home;
|
2025-07-31 00:43:21 -07:00
|
|
|
|
use crate::exec_command::strip_bash_lc_and_escape;
|
2025-08-06 14:36:48 -07:00
|
|
|
|
use crate::slash_command::SlashCommand;
|
fix: introduce ResponseInputItem::McpToolCallOutput variant (#1151)
The output of an MCP server tool call can be one of several types, but
to date, we treated all outputs as text by showing the serialized JSON
as the "tool output" in Codex:
https://github.com/openai/codex/blob/25a9949c49194d5a64de54a11bcc5b4724ac9bd5/codex-rs/mcp-types/src/lib.rs#L96-L101
This PR adds support for the `ImageContent` variant so we can now
display an image output from an MCP tool call.
In making this change, we introduce a new
`ResponseInputItem::McpToolCallOutput` variant so that we can work with
the `mcp_types::CallToolResult` directly when the function call is made
to an MCP server.
Though arguably the more significant change is the introduction of
`HistoryCell::CompletedMcpToolCallWithImageOutput`, which is a cell that
uses `ratatui_image` to render an image into the terminal. To support
this, we introduce `ImageRenderCache`, cache a
`ratatui_image::picker::Picker`, and `ensure_image_cache()` to cache the
appropriate scaled image data and dimensions based on the current
terminal size.
To test, I created a minimal `package.json`:
```json
{
"name": "kitty-mcp",
"version": "1.0.0",
"type": "module",
"description": "MCP that returns image of kitty",
"main": "index.js",
"dependencies": {
"@modelcontextprotocol/sdk": "^1.12.0"
}
}
```
with the following `index.js` to define the MCP server:
```js
#!/usr/bin/env node
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
import { readFile } from "node:fs/promises";
import { join } from "node:path";
const IMAGE_URI = "image://Ada.png";
const server = new McpServer({
name: "Demo",
version: "1.0.0",
});
server.tool(
"get-cat-image",
"If you need a cat image, this tool will provide one.",
async () => ({
content: [
{ type: "image", data: await getAdaPngBase64(), mimeType: "image/png" },
],
})
);
server.resource("Ada the Cat", IMAGE_URI, async (uri) => {
const base64Image = await getAdaPngBase64();
return {
contents: [
{
uri: uri.href,
mimeType: "image/png",
blob: base64Image,
},
],
};
});
async function getAdaPngBase64() {
const __dirname = new URL(".", import.meta.url).pathname;
// From https://github.com/benjajaja/ratatui-image/blob/9705ce2c59ec669abbce2924cbfd1f5ae22c9860/assets/Ada.png
const filePath = join(__dirname, "Ada.png");
const imageData = await readFile(filePath);
const base64Image = imageData.toString("base64");
return base64Image;
}
const transport = new StdioServerTransport();
await server.connect(transport);
```
With the local changes from this PR, I added the following to my
`config.toml`:
```toml
[mcp_servers.kitty]
command = "node"
args = ["/Users/mbolin/code/kitty-mcp/index.js"]
```
Running the TUI from source:
```
cargo run --bin codex -- --model o3 'I need a picture of a cat'
```
I get:
<img width="732" alt="image"
src="https://github.com/user-attachments/assets/bf80b721-9ca0-4d81-aec7-77d6899e2869"
/>
Now, that said, I have only tested in iTerm and there is definitely some
funny business with getting an accurate character-to-pixel ratio
(sometimes the `CompletedMcpToolCallWithImageOutput` thinks it needs 10
rows to render instead of 4), so there is still work to be done here.
2025-05-28 19:03:17 -07:00
|
|
|
|
use crate::text_block::TextBlock;
|
2025-06-03 14:29:26 -07:00
|
|
|
|
use crate::text_formatting::format_and_truncate_tool_result;
|
fix: introduce ResponseInputItem::McpToolCallOutput variant (#1151)
The output of an MCP server tool call can be one of several types, but
to date, we treated all outputs as text by showing the serialized JSON
as the "tool output" in Codex:
https://github.com/openai/codex/blob/25a9949c49194d5a64de54a11bcc5b4724ac9bd5/codex-rs/mcp-types/src/lib.rs#L96-L101
This PR adds support for the `ImageContent` variant so we can now
display an image output from an MCP tool call.
In making this change, we introduce a new
`ResponseInputItem::McpToolCallOutput` variant so that we can work with
the `mcp_types::CallToolResult` directly when the function call is made
to an MCP server.
Though arguably the more significant change is the introduction of
`HistoryCell::CompletedMcpToolCallWithImageOutput`, which is a cell that
uses `ratatui_image` to render an image into the terminal. To support
this, we introduce `ImageRenderCache`, cache a
`ratatui_image::picker::Picker`, and `ensure_image_cache()` to cache the
appropriate scaled image data and dimensions based on the current
terminal size.
To test, I created a minimal `package.json`:
```json
{
"name": "kitty-mcp",
"version": "1.0.0",
"type": "module",
"description": "MCP that returns image of kitty",
"main": "index.js",
"dependencies": {
"@modelcontextprotocol/sdk": "^1.12.0"
}
}
```
with the following `index.js` to define the MCP server:
```js
#!/usr/bin/env node
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
import { readFile } from "node:fs/promises";
import { join } from "node:path";
const IMAGE_URI = "image://Ada.png";
const server = new McpServer({
name: "Demo",
version: "1.0.0",
});
server.tool(
"get-cat-image",
"If you need a cat image, this tool will provide one.",
async () => ({
content: [
{ type: "image", data: await getAdaPngBase64(), mimeType: "image/png" },
],
})
);
server.resource("Ada the Cat", IMAGE_URI, async (uri) => {
const base64Image = await getAdaPngBase64();
return {
contents: [
{
uri: uri.href,
mimeType: "image/png",
blob: base64Image,
},
],
};
});
async function getAdaPngBase64() {
const __dirname = new URL(".", import.meta.url).pathname;
// From https://github.com/benjajaja/ratatui-image/blob/9705ce2c59ec669abbce2924cbfd1f5ae22c9860/assets/Ada.png
const filePath = join(__dirname, "Ada.png");
const imageData = await readFile(filePath);
const base64Image = imageData.toString("base64");
return base64Image;
}
const transport = new StdioServerTransport();
await server.connect(transport);
```
With the local changes from this PR, I added the following to my
`config.toml`:
```toml
[mcp_servers.kitty]
command = "node"
args = ["/Users/mbolin/code/kitty-mcp/index.js"]
```
Running the TUI from source:
```
cargo run --bin codex -- --model o3 'I need a picture of a cat'
```
I get:
<img width="732" alt="image"
src="https://github.com/user-attachments/assets/bf80b721-9ca0-4d81-aec7-77d6899e2869"
/>
Now, that said, I have only tested in iTerm and there is definitely some
funny business with getting an accurate character-to-pixel ratio
(sometimes the `CompletedMcpToolCallWithImageOutput` thinks it needs 10
rows to render instead of 4), so there is still work to be done here.
2025-05-28 19:03:17 -07:00
|
|
|
|
use base64::Engine;
|
feat: initial import of Rust implementation of Codex CLI in codex-rs/ (#629)
As stated in `codex-rs/README.md`:
Today, Codex CLI is written in TypeScript and requires Node.js 22+ to
run it. For a number of users, this runtime requirement inhibits
adoption: they would be better served by a standalone executable. As
maintainers, we want Codex to run efficiently in a wide range of
environments with minimal overhead. We also want to take advantage of
operating system-specific APIs to provide better sandboxing, where
possible.
To that end, we are moving forward with a Rust implementation of Codex
CLI contained in this folder, which has the following benefits:
- The CLI compiles to small, standalone, platform-specific binaries.
- Can make direct, native calls to
[seccomp](https://man7.org/linux/man-pages/man2/seccomp.2.html) and
[landlock](https://man7.org/linux/man-pages/man7/landlock.7.html) in
order to support sandboxing on Linux.
- No runtime garbage collection, resulting in lower memory consumption
and better, more predictable performance.
Currently, the Rust implementation is materially behind the TypeScript
implementation in functionality, so continue to use the TypeScript
implmentation for the time being. We will publish native executables via
GitHub Releases as soon as we feel the Rust version is usable.
2025-04-24 13:31:40 -07:00
|
|
|
|
use codex_ansi_escape::ansi_escape_line;
|
2025-08-05 23:57:52 -07:00
|
|
|
|
use codex_common::create_config_summary_entries;
|
2025-05-06 17:38:56 -07:00
|
|
|
|
use codex_common::elapsed::format_duration;
|
2025-04-27 21:47:50 -07:00
|
|
|
|
use codex_core::config::Config;
|
2025-08-11 11:26:15 -07:00
|
|
|
|
use codex_core::parse_command::ParsedCommand;
|
2025-07-31 13:45:52 -07:00
|
|
|
|
use codex_core::plan_tool::PlanItemArg;
|
|
|
|
|
|
use codex_core::plan_tool::StepStatus;
|
|
|
|
|
|
use codex_core::plan_tool::UpdatePlanArgs;
|
feat: initial import of Rust implementation of Codex CLI in codex-rs/ (#629)
As stated in `codex-rs/README.md`:
Today, Codex CLI is written in TypeScript and requires Node.js 22+ to
run it. For a number of users, this runtime requirement inhibits
adoption: they would be better served by a standalone executable. As
maintainers, we want Codex to run efficiently in a wide range of
environments with minimal overhead. We also want to take advantage of
operating system-specific APIs to provide better sandboxing, where
possible.
To that end, we are moving forward with a Rust implementation of Codex
CLI contained in this folder, which has the following benefits:
- The CLI compiles to small, standalone, platform-specific binaries.
- Can make direct, native calls to
[seccomp](https://man7.org/linux/man-pages/man2/seccomp.2.html) and
[landlock](https://man7.org/linux/man-pages/man7/landlock.7.html) in
order to support sandboxing on Linux.
- No runtime garbage collection, resulting in lower memory consumption
and better, more predictable performance.
Currently, the Rust implementation is materially behind the TypeScript
implementation in functionality, so continue to use the TypeScript
implmentation for the time being. We will publish native executables via
GitHub Releases as soon as we feel the Rust version is usable.
2025-04-24 13:31:40 -07:00
|
|
|
|
use codex_core::protocol::FileChange;
|
2025-07-30 10:05:40 -07:00
|
|
|
|
use codex_core::protocol::McpInvocation;
|
2025-08-07 04:02:58 -07:00
|
|
|
|
use codex_core::protocol::SandboxPolicy;
|
2025-05-13 19:22:16 -07:00
|
|
|
|
use codex_core::protocol::SessionConfiguredEvent;
|
2025-08-05 23:57:52 -07:00
|
|
|
|
use codex_core::protocol::TokenUsage;
|
2025-08-07 01:27:45 -07:00
|
|
|
|
use codex_login::get_auth_file;
|
|
|
|
|
|
use codex_login::try_read_auth_json;
|
fix: introduce ResponseInputItem::McpToolCallOutput variant (#1151)
The output of an MCP server tool call can be one of several types, but
to date, we treated all outputs as text by showing the serialized JSON
as the "tool output" in Codex:
https://github.com/openai/codex/blob/25a9949c49194d5a64de54a11bcc5b4724ac9bd5/codex-rs/mcp-types/src/lib.rs#L96-L101
This PR adds support for the `ImageContent` variant so we can now
display an image output from an MCP tool call.
In making this change, we introduce a new
`ResponseInputItem::McpToolCallOutput` variant so that we can work with
the `mcp_types::CallToolResult` directly when the function call is made
to an MCP server.
Though arguably the more significant change is the introduction of
`HistoryCell::CompletedMcpToolCallWithImageOutput`, which is a cell that
uses `ratatui_image` to render an image into the terminal. To support
this, we introduce `ImageRenderCache`, cache a
`ratatui_image::picker::Picker`, and `ensure_image_cache()` to cache the
appropriate scaled image data and dimensions based on the current
terminal size.
To test, I created a minimal `package.json`:
```json
{
"name": "kitty-mcp",
"version": "1.0.0",
"type": "module",
"description": "MCP that returns image of kitty",
"main": "index.js",
"dependencies": {
"@modelcontextprotocol/sdk": "^1.12.0"
}
}
```
with the following `index.js` to define the MCP server:
```js
#!/usr/bin/env node
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
import { readFile } from "node:fs/promises";
import { join } from "node:path";
const IMAGE_URI = "image://Ada.png";
const server = new McpServer({
name: "Demo",
version: "1.0.0",
});
server.tool(
"get-cat-image",
"If you need a cat image, this tool will provide one.",
async () => ({
content: [
{ type: "image", data: await getAdaPngBase64(), mimeType: "image/png" },
],
})
);
server.resource("Ada the Cat", IMAGE_URI, async (uri) => {
const base64Image = await getAdaPngBase64();
return {
contents: [
{
uri: uri.href,
mimeType: "image/png",
blob: base64Image,
},
],
};
});
async function getAdaPngBase64() {
const __dirname = new URL(".", import.meta.url).pathname;
// From https://github.com/benjajaja/ratatui-image/blob/9705ce2c59ec669abbce2924cbfd1f5ae22c9860/assets/Ada.png
const filePath = join(__dirname, "Ada.png");
const imageData = await readFile(filePath);
const base64Image = imageData.toString("base64");
return base64Image;
}
const transport = new StdioServerTransport();
await server.connect(transport);
```
With the local changes from this PR, I added the following to my
`config.toml`:
```toml
[mcp_servers.kitty]
command = "node"
args = ["/Users/mbolin/code/kitty-mcp/index.js"]
```
Running the TUI from source:
```
cargo run --bin codex -- --model o3 'I need a picture of a cat'
```
I get:
<img width="732" alt="image"
src="https://github.com/user-attachments/assets/bf80b721-9ca0-4d81-aec7-77d6899e2869"
/>
Now, that said, I have only tested in iTerm and there is definitely some
funny business with getting an accurate character-to-pixel ratio
(sometimes the `CompletedMcpToolCallWithImageOutput` thinks it needs 10
rows to render instead of 4), so there is still work to be done here.
2025-05-28 19:03:17 -07:00
|
|
|
|
use image::DynamicImage;
|
|
|
|
|
|
use image::ImageReader;
|
2025-06-03 14:29:26 -07:00
|
|
|
|
use mcp_types::EmbeddedResourceResource;
|
2025-07-19 00:09:34 -04:00
|
|
|
|
use mcp_types::ResourceLink;
|
feat: initial import of Rust implementation of Codex CLI in codex-rs/ (#629)
As stated in `codex-rs/README.md`:
Today, Codex CLI is written in TypeScript and requires Node.js 22+ to
run it. For a number of users, this runtime requirement inhibits
adoption: they would be better served by a standalone executable. As
maintainers, we want Codex to run efficiently in a wide range of
environments with minimal overhead. We also want to take advantage of
operating system-specific APIs to provide better sandboxing, where
possible.
To that end, we are moving forward with a Rust implementation of Codex
CLI contained in this folder, which has the following benefits:
- The CLI compiles to small, standalone, platform-specific binaries.
- Can make direct, native calls to
[seccomp](https://man7.org/linux/man-pages/man2/seccomp.2.html) and
[landlock](https://man7.org/linux/man-pages/man7/landlock.7.html) in
order to support sandboxing on Linux.
- No runtime garbage collection, resulting in lower memory consumption
and better, more predictable performance.
Currently, the Rust implementation is materially behind the TypeScript
implementation in functionality, so continue to use the TypeScript
implmentation for the time being. We will publish native executables via
GitHub Releases as soon as we feel the Rust version is usable.
2025-04-24 13:31:40 -07:00
|
|
|
|
use ratatui::prelude::*;
|
|
|
|
|
|
use ratatui::style::Color;
|
|
|
|
|
|
use ratatui::style::Modifier;
|
|
|
|
|
|
use ratatui::style::Style;
|
2025-08-06 12:03:45 -07:00
|
|
|
|
use ratatui::widgets::Paragraph;
|
|
|
|
|
|
use ratatui::widgets::WidgetRef;
|
2025-08-07 18:38:39 -07:00
|
|
|
|
use ratatui::widgets::Wrap;
|
feat: initial import of Rust implementation of Codex CLI in codex-rs/ (#629)
As stated in `codex-rs/README.md`:
Today, Codex CLI is written in TypeScript and requires Node.js 22+ to
run it. For a number of users, this runtime requirement inhibits
adoption: they would be better served by a standalone executable. As
maintainers, we want Codex to run efficiently in a wide range of
environments with minimal overhead. We also want to take advantage of
operating system-specific APIs to provide better sandboxing, where
possible.
To that end, we are moving forward with a Rust implementation of Codex
CLI contained in this folder, which has the following benefits:
- The CLI compiles to small, standalone, platform-specific binaries.
- Can make direct, native calls to
[seccomp](https://man7.org/linux/man-pages/man2/seccomp.2.html) and
[landlock](https://man7.org/linux/man-pages/man7/landlock.7.html) in
order to support sandboxing on Linux.
- No runtime garbage collection, resulting in lower memory consumption
and better, more predictable performance.
Currently, the Rust implementation is materially behind the TypeScript
implementation in functionality, so continue to use the TypeScript
implmentation for the time being. We will publish native executables via
GitHub Releases as soon as we feel the Rust version is usable.
2025-04-24 13:31:40 -07:00
|
|
|
|
use std::collections::HashMap;
|
fix: introduce ResponseInputItem::McpToolCallOutput variant (#1151)
The output of an MCP server tool call can be one of several types, but
to date, we treated all outputs as text by showing the serialized JSON
as the "tool output" in Codex:
https://github.com/openai/codex/blob/25a9949c49194d5a64de54a11bcc5b4724ac9bd5/codex-rs/mcp-types/src/lib.rs#L96-L101
This PR adds support for the `ImageContent` variant so we can now
display an image output from an MCP tool call.
In making this change, we introduce a new
`ResponseInputItem::McpToolCallOutput` variant so that we can work with
the `mcp_types::CallToolResult` directly when the function call is made
to an MCP server.
Though arguably the more significant change is the introduction of
`HistoryCell::CompletedMcpToolCallWithImageOutput`, which is a cell that
uses `ratatui_image` to render an image into the terminal. To support
this, we introduce `ImageRenderCache`, cache a
`ratatui_image::picker::Picker`, and `ensure_image_cache()` to cache the
appropriate scaled image data and dimensions based on the current
terminal size.
To test, I created a minimal `package.json`:
```json
{
"name": "kitty-mcp",
"version": "1.0.0",
"type": "module",
"description": "MCP that returns image of kitty",
"main": "index.js",
"dependencies": {
"@modelcontextprotocol/sdk": "^1.12.0"
}
}
```
with the following `index.js` to define the MCP server:
```js
#!/usr/bin/env node
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
import { readFile } from "node:fs/promises";
import { join } from "node:path";
const IMAGE_URI = "image://Ada.png";
const server = new McpServer({
name: "Demo",
version: "1.0.0",
});
server.tool(
"get-cat-image",
"If you need a cat image, this tool will provide one.",
async () => ({
content: [
{ type: "image", data: await getAdaPngBase64(), mimeType: "image/png" },
],
})
);
server.resource("Ada the Cat", IMAGE_URI, async (uri) => {
const base64Image = await getAdaPngBase64();
return {
contents: [
{
uri: uri.href,
mimeType: "image/png",
blob: base64Image,
},
],
};
});
async function getAdaPngBase64() {
const __dirname = new URL(".", import.meta.url).pathname;
// From https://github.com/benjajaja/ratatui-image/blob/9705ce2c59ec669abbce2924cbfd1f5ae22c9860/assets/Ada.png
const filePath = join(__dirname, "Ada.png");
const imageData = await readFile(filePath);
const base64Image = imageData.toString("base64");
return base64Image;
}
const transport = new StdioServerTransport();
await server.connect(transport);
```
With the local changes from this PR, I added the following to my
`config.toml`:
```toml
[mcp_servers.kitty]
command = "node"
args = ["/Users/mbolin/code/kitty-mcp/index.js"]
```
Running the TUI from source:
```
cargo run --bin codex -- --model o3 'I need a picture of a cat'
```
I get:
<img width="732" alt="image"
src="https://github.com/user-attachments/assets/bf80b721-9ca0-4d81-aec7-77d6899e2869"
/>
Now, that said, I have only tested in iTerm and there is definitely some
funny business with getting an accurate character-to-pixel ratio
(sometimes the `CompletedMcpToolCallWithImageOutput` thinks it needs 10
rows to render instead of 4), so there is still work to be done here.
2025-05-28 19:03:17 -07:00
|
|
|
|
use std::io::Cursor;
|
feat: initial import of Rust implementation of Codex CLI in codex-rs/ (#629)
As stated in `codex-rs/README.md`:
Today, Codex CLI is written in TypeScript and requires Node.js 22+ to
run it. For a number of users, this runtime requirement inhibits
adoption: they would be better served by a standalone executable. As
maintainers, we want Codex to run efficiently in a wide range of
environments with minimal overhead. We also want to take advantage of
operating system-specific APIs to provide better sandboxing, where
possible.
To that end, we are moving forward with a Rust implementation of Codex
CLI contained in this folder, which has the following benefits:
- The CLI compiles to small, standalone, platform-specific binaries.
- Can make direct, native calls to
[seccomp](https://man7.org/linux/man-pages/man2/seccomp.2.html) and
[landlock](https://man7.org/linux/man-pages/man7/landlock.7.html) in
order to support sandboxing on Linux.
- No runtime garbage collection, resulting in lower memory consumption
and better, more predictable performance.
Currently, the Rust implementation is materially behind the TypeScript
implementation in functionality, so continue to use the TypeScript
implmentation for the time being. We will publish native executables via
GitHub Releases as soon as we feel the Rust version is usable.
2025-04-24 13:31:40 -07:00
|
|
|
|
use std::path::PathBuf;
|
|
|
|
|
|
use std::time::Duration;
|
fix: introduce ResponseInputItem::McpToolCallOutput variant (#1151)
The output of an MCP server tool call can be one of several types, but
to date, we treated all outputs as text by showing the serialized JSON
as the "tool output" in Codex:
https://github.com/openai/codex/blob/25a9949c49194d5a64de54a11bcc5b4724ac9bd5/codex-rs/mcp-types/src/lib.rs#L96-L101
This PR adds support for the `ImageContent` variant so we can now
display an image output from an MCP tool call.
In making this change, we introduce a new
`ResponseInputItem::McpToolCallOutput` variant so that we can work with
the `mcp_types::CallToolResult` directly when the function call is made
to an MCP server.
Though arguably the more significant change is the introduction of
`HistoryCell::CompletedMcpToolCallWithImageOutput`, which is a cell that
uses `ratatui_image` to render an image into the terminal. To support
this, we introduce `ImageRenderCache`, cache a
`ratatui_image::picker::Picker`, and `ensure_image_cache()` to cache the
appropriate scaled image data and dimensions based on the current
terminal size.
To test, I created a minimal `package.json`:
```json
{
"name": "kitty-mcp",
"version": "1.0.0",
"type": "module",
"description": "MCP that returns image of kitty",
"main": "index.js",
"dependencies": {
"@modelcontextprotocol/sdk": "^1.12.0"
}
}
```
with the following `index.js` to define the MCP server:
```js
#!/usr/bin/env node
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
import { readFile } from "node:fs/promises";
import { join } from "node:path";
const IMAGE_URI = "image://Ada.png";
const server = new McpServer({
name: "Demo",
version: "1.0.0",
});
server.tool(
"get-cat-image",
"If you need a cat image, this tool will provide one.",
async () => ({
content: [
{ type: "image", data: await getAdaPngBase64(), mimeType: "image/png" },
],
})
);
server.resource("Ada the Cat", IMAGE_URI, async (uri) => {
const base64Image = await getAdaPngBase64();
return {
contents: [
{
uri: uri.href,
mimeType: "image/png",
blob: base64Image,
},
],
};
});
async function getAdaPngBase64() {
const __dirname = new URL(".", import.meta.url).pathname;
// From https://github.com/benjajaja/ratatui-image/blob/9705ce2c59ec669abbce2924cbfd1f5ae22c9860/assets/Ada.png
const filePath = join(__dirname, "Ada.png");
const imageData = await readFile(filePath);
const base64Image = imageData.toString("base64");
return base64Image;
}
const transport = new StdioServerTransport();
await server.connect(transport);
```
With the local changes from this PR, I added the following to my
`config.toml`:
```toml
[mcp_servers.kitty]
command = "node"
args = ["/Users/mbolin/code/kitty-mcp/index.js"]
```
Running the TUI from source:
```
cargo run --bin codex -- --model o3 'I need a picture of a cat'
```
I get:
<img width="732" alt="image"
src="https://github.com/user-attachments/assets/bf80b721-9ca0-4d81-aec7-77d6899e2869"
/>
Now, that said, I have only tested in iTerm and there is definitely some
funny business with getting an accurate character-to-pixel ratio
(sometimes the `CompletedMcpToolCallWithImageOutput` thinks it needs 10
rows to render instead of 4), so there is still work to be done here.
2025-05-28 19:03:17 -07:00
|
|
|
|
use tracing::error;
|
feat: initial import of Rust implementation of Codex CLI in codex-rs/ (#629)
As stated in `codex-rs/README.md`:
Today, Codex CLI is written in TypeScript and requires Node.js 22+ to
run it. For a number of users, this runtime requirement inhibits
adoption: they would be better served by a standalone executable. As
maintainers, we want Codex to run efficiently in a wide range of
environments with minimal overhead. We also want to take advantage of
operating system-specific APIs to provide better sandboxing, where
possible.
To that end, we are moving forward with a Rust implementation of Codex
CLI contained in this folder, which has the following benefits:
- The CLI compiles to small, standalone, platform-specific binaries.
- Can make direct, native calls to
[seccomp](https://man7.org/linux/man-pages/man2/seccomp.2.html) and
[landlock](https://man7.org/linux/man-pages/man7/landlock.7.html) in
order to support sandboxing on Linux.
- No runtime garbage collection, resulting in lower memory consumption
and better, more predictable performance.
Currently, the Rust implementation is materially behind the TypeScript
implementation in functionality, so continue to use the TypeScript
implmentation for the time being. We will publish native executables via
GitHub Releases as soon as we feel the Rust version is usable.
2025-04-24 13:31:40 -07:00
|
|
|
|
|
2025-08-11 12:40:12 -07:00
|
|
|
|
#[derive(Clone)]
|
feat: initial import of Rust implementation of Codex CLI in codex-rs/ (#629)
As stated in `codex-rs/README.md`:
Today, Codex CLI is written in TypeScript and requires Node.js 22+ to
run it. For a number of users, this runtime requirement inhibits
adoption: they would be better served by a standalone executable. As
maintainers, we want Codex to run efficiently in a wide range of
environments with minimal overhead. We also want to take advantage of
operating system-specific APIs to provide better sandboxing, where
possible.
To that end, we are moving forward with a Rust implementation of Codex
CLI contained in this folder, which has the following benefits:
- The CLI compiles to small, standalone, platform-specific binaries.
- Can make direct, native calls to
[seccomp](https://man7.org/linux/man-pages/man2/seccomp.2.html) and
[landlock](https://man7.org/linux/man-pages/man7/landlock.7.html) in
order to support sandboxing on Linux.
- No runtime garbage collection, resulting in lower memory consumption
and better, more predictable performance.
Currently, the Rust implementation is materially behind the TypeScript
implementation in functionality, so continue to use the TypeScript
implmentation for the time being. We will publish native executables via
GitHub Releases as soon as we feel the Rust version is usable.
2025-04-24 13:31:40 -07:00
|
|
|
|
pub(crate) struct CommandOutput {
|
|
|
|
|
|
pub(crate) exit_code: i32,
|
|
|
|
|
|
pub(crate) stdout: String,
|
|
|
|
|
|
pub(crate) stderr: String,
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
pub(crate) enum PatchEventType {
|
|
|
|
|
|
ApprovalRequest,
|
|
|
|
|
|
ApplyBegin { auto_approved: bool },
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-07-30 10:05:40 -07:00
|
|
|
|
fn span_to_static(span: &Span) -> Span<'static> {
|
|
|
|
|
|
Span {
|
|
|
|
|
|
style: span.style,
|
|
|
|
|
|
content: std::borrow::Cow::Owned(span.content.clone().into_owned()),
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
fn line_to_static(line: &Line) -> Line<'static> {
|
|
|
|
|
|
Line {
|
|
|
|
|
|
style: line.style,
|
|
|
|
|
|
alignment: line.alignment,
|
|
|
|
|
|
spans: line.spans.iter().map(span_to_static).collect(),
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-08-11 12:40:12 -07:00
|
|
|
|
pub(crate) struct ExecCell {
|
|
|
|
|
|
pub(crate) command: Vec<String>,
|
|
|
|
|
|
pub(crate) parsed: Vec<ParsedCommand>,
|
|
|
|
|
|
pub(crate) output: Option<CommandOutput>,
|
|
|
|
|
|
}
|
|
|
|
|
|
|
feat: initial import of Rust implementation of Codex CLI in codex-rs/ (#629)
As stated in `codex-rs/README.md`:
Today, Codex CLI is written in TypeScript and requires Node.js 22+ to
run it. For a number of users, this runtime requirement inhibits
adoption: they would be better served by a standalone executable. As
maintainers, we want Codex to run efficiently in a wide range of
environments with minimal overhead. We also want to take advantage of
operating system-specific APIs to provide better sandboxing, where
possible.
To that end, we are moving forward with a Rust implementation of Codex
CLI contained in this folder, which has the following benefits:
- The CLI compiles to small, standalone, platform-specific binaries.
- Can make direct, native calls to
[seccomp](https://man7.org/linux/man-pages/man2/seccomp.2.html) and
[landlock](https://man7.org/linux/man-pages/man7/landlock.7.html) in
order to support sandboxing on Linux.
- No runtime garbage collection, resulting in lower memory consumption
and better, more predictable performance.
Currently, the Rust implementation is materially behind the TypeScript
implementation in functionality, so continue to use the TypeScript
implmentation for the time being. We will publish native executables via
GitHub Releases as soon as we feel the Rust version is usable.
2025-04-24 13:31:40 -07:00
|
|
|
|
/// Represents an event to display in the conversation history. Returns its
|
|
|
|
|
|
/// `Vec<Line<'static>>` representation to make it easier to display in a
|
|
|
|
|
|
/// scrollable list.
|
|
|
|
|
|
pub(crate) enum HistoryCell {
|
2025-05-08 21:46:06 -07:00
|
|
|
|
/// Welcome message.
|
2025-08-11 12:40:12 -07:00
|
|
|
|
WelcomeMessage {
|
|
|
|
|
|
view: TextBlock,
|
|
|
|
|
|
},
|
2025-05-08 21:46:06 -07:00
|
|
|
|
|
feat: initial import of Rust implementation of Codex CLI in codex-rs/ (#629)
As stated in `codex-rs/README.md`:
Today, Codex CLI is written in TypeScript and requires Node.js 22+ to
run it. For a number of users, this runtime requirement inhibits
adoption: they would be better served by a standalone executable. As
maintainers, we want Codex to run efficiently in a wide range of
environments with minimal overhead. We also want to take advantage of
operating system-specific APIs to provide better sandboxing, where
possible.
To that end, we are moving forward with a Rust implementation of Codex
CLI contained in this folder, which has the following benefits:
- The CLI compiles to small, standalone, platform-specific binaries.
- Can make direct, native calls to
[seccomp](https://man7.org/linux/man-pages/man2/seccomp.2.html) and
[landlock](https://man7.org/linux/man-pages/man7/landlock.7.html) in
order to support sandboxing on Linux.
- No runtime garbage collection, resulting in lower memory consumption
and better, more predictable performance.
Currently, the Rust implementation is materially behind the TypeScript
implementation in functionality, so continue to use the TypeScript
implmentation for the time being. We will publish native executables via
GitHub Releases as soon as we feel the Rust version is usable.
2025-04-24 13:31:40 -07:00
|
|
|
|
/// Message from the user.
|
2025-08-11 12:40:12 -07:00
|
|
|
|
UserPrompt {
|
|
|
|
|
|
view: TextBlock,
|
2025-08-11 11:43:58 -07:00
|
|
|
|
},
|
feat: initial import of Rust implementation of Codex CLI in codex-rs/ (#629)
As stated in `codex-rs/README.md`:
Today, Codex CLI is written in TypeScript and requires Node.js 22+ to
run it. For a number of users, this runtime requirement inhibits
adoption: they would be better served by a standalone executable. As
maintainers, we want Codex to run efficiently in a wide range of
environments with minimal overhead. We also want to take advantage of
operating system-specific APIs to provide better sandboxing, where
possible.
To that end, we are moving forward with a Rust implementation of Codex
CLI contained in this folder, which has the following benefits:
- The CLI compiles to small, standalone, platform-specific binaries.
- Can make direct, native calls to
[seccomp](https://man7.org/linux/man-pages/man2/seccomp.2.html) and
[landlock](https://man7.org/linux/man-pages/man7/landlock.7.html) in
order to support sandboxing on Linux.
- No runtime garbage collection, resulting in lower memory consumption
and better, more predictable performance.
Currently, the Rust implementation is materially behind the TypeScript
implementation in functionality, so continue to use the TypeScript
implmentation for the time being. We will publish native executables via
GitHub Releases as soon as we feel the Rust version is usable.
2025-04-24 13:31:40 -07:00
|
|
|
|
|
2025-08-11 12:40:12 -07:00
|
|
|
|
Exec(ExecCell),
|
|
|
|
|
|
|
2025-05-06 16:12:15 -07:00
|
|
|
|
/// An MCP tool call that has not finished yet.
|
2025-08-11 12:40:12 -07:00
|
|
|
|
ActiveMcpToolCall {
|
|
|
|
|
|
view: TextBlock,
|
|
|
|
|
|
},
|
2025-05-06 16:12:15 -07:00
|
|
|
|
|
fix: introduce ResponseInputItem::McpToolCallOutput variant (#1151)
The output of an MCP server tool call can be one of several types, but
to date, we treated all outputs as text by showing the serialized JSON
as the "tool output" in Codex:
https://github.com/openai/codex/blob/25a9949c49194d5a64de54a11bcc5b4724ac9bd5/codex-rs/mcp-types/src/lib.rs#L96-L101
This PR adds support for the `ImageContent` variant so we can now
display an image output from an MCP tool call.
In making this change, we introduce a new
`ResponseInputItem::McpToolCallOutput` variant so that we can work with
the `mcp_types::CallToolResult` directly when the function call is made
to an MCP server.
Though arguably the more significant change is the introduction of
`HistoryCell::CompletedMcpToolCallWithImageOutput`, which is a cell that
uses `ratatui_image` to render an image into the terminal. To support
this, we introduce `ImageRenderCache`, cache a
`ratatui_image::picker::Picker`, and `ensure_image_cache()` to cache the
appropriate scaled image data and dimensions based on the current
terminal size.
To test, I created a minimal `package.json`:
```json
{
"name": "kitty-mcp",
"version": "1.0.0",
"type": "module",
"description": "MCP that returns image of kitty",
"main": "index.js",
"dependencies": {
"@modelcontextprotocol/sdk": "^1.12.0"
}
}
```
with the following `index.js` to define the MCP server:
```js
#!/usr/bin/env node
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
import { readFile } from "node:fs/promises";
import { join } from "node:path";
const IMAGE_URI = "image://Ada.png";
const server = new McpServer({
name: "Demo",
version: "1.0.0",
});
server.tool(
"get-cat-image",
"If you need a cat image, this tool will provide one.",
async () => ({
content: [
{ type: "image", data: await getAdaPngBase64(), mimeType: "image/png" },
],
})
);
server.resource("Ada the Cat", IMAGE_URI, async (uri) => {
const base64Image = await getAdaPngBase64();
return {
contents: [
{
uri: uri.href,
mimeType: "image/png",
blob: base64Image,
},
],
};
});
async function getAdaPngBase64() {
const __dirname = new URL(".", import.meta.url).pathname;
// From https://github.com/benjajaja/ratatui-image/blob/9705ce2c59ec669abbce2924cbfd1f5ae22c9860/assets/Ada.png
const filePath = join(__dirname, "Ada.png");
const imageData = await readFile(filePath);
const base64Image = imageData.toString("base64");
return base64Image;
}
const transport = new StdioServerTransport();
await server.connect(transport);
```
With the local changes from this PR, I added the following to my
`config.toml`:
```toml
[mcp_servers.kitty]
command = "node"
args = ["/Users/mbolin/code/kitty-mcp/index.js"]
```
Running the TUI from source:
```
cargo run --bin codex -- --model o3 'I need a picture of a cat'
```
I get:
<img width="732" alt="image"
src="https://github.com/user-attachments/assets/bf80b721-9ca0-4d81-aec7-77d6899e2869"
/>
Now, that said, I have only tested in iTerm and there is definitely some
funny business with getting an accurate character-to-pixel ratio
(sometimes the `CompletedMcpToolCallWithImageOutput` thinks it needs 10
rows to render instead of 4), so there is still work to be done here.
2025-05-28 19:03:17 -07:00
|
|
|
|
/// Completed MCP tool call where we show the result serialized as JSON.
|
2025-08-11 12:40:12 -07:00
|
|
|
|
CompletedMcpToolCall {
|
|
|
|
|
|
view: TextBlock,
|
|
|
|
|
|
},
|
fix: introduce ResponseInputItem::McpToolCallOutput variant (#1151)
The output of an MCP server tool call can be one of several types, but
to date, we treated all outputs as text by showing the serialized JSON
as the "tool output" in Codex:
https://github.com/openai/codex/blob/25a9949c49194d5a64de54a11bcc5b4724ac9bd5/codex-rs/mcp-types/src/lib.rs#L96-L101
This PR adds support for the `ImageContent` variant so we can now
display an image output from an MCP tool call.
In making this change, we introduce a new
`ResponseInputItem::McpToolCallOutput` variant so that we can work with
the `mcp_types::CallToolResult` directly when the function call is made
to an MCP server.
Though arguably the more significant change is the introduction of
`HistoryCell::CompletedMcpToolCallWithImageOutput`, which is a cell that
uses `ratatui_image` to render an image into the terminal. To support
this, we introduce `ImageRenderCache`, cache a
`ratatui_image::picker::Picker`, and `ensure_image_cache()` to cache the
appropriate scaled image data and dimensions based on the current
terminal size.
To test, I created a minimal `package.json`:
```json
{
"name": "kitty-mcp",
"version": "1.0.0",
"type": "module",
"description": "MCP that returns image of kitty",
"main": "index.js",
"dependencies": {
"@modelcontextprotocol/sdk": "^1.12.0"
}
}
```
with the following `index.js` to define the MCP server:
```js
#!/usr/bin/env node
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
import { readFile } from "node:fs/promises";
import { join } from "node:path";
const IMAGE_URI = "image://Ada.png";
const server = new McpServer({
name: "Demo",
version: "1.0.0",
});
server.tool(
"get-cat-image",
"If you need a cat image, this tool will provide one.",
async () => ({
content: [
{ type: "image", data: await getAdaPngBase64(), mimeType: "image/png" },
],
})
);
server.resource("Ada the Cat", IMAGE_URI, async (uri) => {
const base64Image = await getAdaPngBase64();
return {
contents: [
{
uri: uri.href,
mimeType: "image/png",
blob: base64Image,
},
],
};
});
async function getAdaPngBase64() {
const __dirname = new URL(".", import.meta.url).pathname;
// From https://github.com/benjajaja/ratatui-image/blob/9705ce2c59ec669abbce2924cbfd1f5ae22c9860/assets/Ada.png
const filePath = join(__dirname, "Ada.png");
const imageData = await readFile(filePath);
const base64Image = imageData.toString("base64");
return base64Image;
}
const transport = new StdioServerTransport();
await server.connect(transport);
```
With the local changes from this PR, I added the following to my
`config.toml`:
```toml
[mcp_servers.kitty]
command = "node"
args = ["/Users/mbolin/code/kitty-mcp/index.js"]
```
Running the TUI from source:
```
cargo run --bin codex -- --model o3 'I need a picture of a cat'
```
I get:
<img width="732" alt="image"
src="https://github.com/user-attachments/assets/bf80b721-9ca0-4d81-aec7-77d6899e2869"
/>
Now, that said, I have only tested in iTerm and there is definitely some
funny business with getting an accurate character-to-pixel ratio
(sometimes the `CompletedMcpToolCallWithImageOutput` thinks it needs 10
rows to render instead of 4), so there is still work to be done here.
2025-05-28 19:03:17 -07:00
|
|
|
|
|
|
|
|
|
|
/// Completed MCP tool call where the result is an image.
|
|
|
|
|
|
/// Admittedly, [mcp_types::CallToolResult] can have multiple content types,
|
|
|
|
|
|
/// which could be a mix of text and images, so we need to tighten this up.
|
|
|
|
|
|
// NOTE: For image output we keep the *original* image around and lazily
|
|
|
|
|
|
// compute a resized copy that fits the available cell width. Caching the
|
|
|
|
|
|
// resized version avoids doing the potentially expensive rescale twice
|
|
|
|
|
|
// because the scroll-view first calls `height()` for layouting and then
|
|
|
|
|
|
// `render_window()` for painting.
|
2025-08-11 12:40:12 -07:00
|
|
|
|
CompletedMcpToolCallWithImageOutput {
|
|
|
|
|
|
_image: DynamicImage,
|
|
|
|
|
|
},
|
2025-05-06 16:12:15 -07:00
|
|
|
|
|
2025-05-28 14:03:19 -07:00
|
|
|
|
/// Background event.
|
2025-08-11 12:40:12 -07:00
|
|
|
|
BackgroundEvent {
|
|
|
|
|
|
view: TextBlock,
|
|
|
|
|
|
},
|
feat: initial import of Rust implementation of Codex CLI in codex-rs/ (#629)
As stated in `codex-rs/README.md`:
Today, Codex CLI is written in TypeScript and requires Node.js 22+ to
run it. For a number of users, this runtime requirement inhibits
adoption: they would be better served by a standalone executable. As
maintainers, we want Codex to run efficiently in a wide range of
environments with minimal overhead. We also want to take advantage of
operating system-specific APIs to provide better sandboxing, where
possible.
To that end, we are moving forward with a Rust implementation of Codex
CLI contained in this folder, which has the following benefits:
- The CLI compiles to small, standalone, platform-specific binaries.
- Can make direct, native calls to
[seccomp](https://man7.org/linux/man-pages/man2/seccomp.2.html) and
[landlock](https://man7.org/linux/man-pages/man7/landlock.7.html) in
order to support sandboxing on Linux.
- No runtime garbage collection, resulting in lower memory consumption
and better, more predictable performance.
Currently, the Rust implementation is materially behind the TypeScript
implementation in functionality, so continue to use the TypeScript
implmentation for the time being. We will publish native executables via
GitHub Releases as soon as we feel the Rust version is usable.
2025-04-24 13:31:40 -07:00
|
|
|
|
|
2025-06-26 13:03:31 -07:00
|
|
|
|
/// Output from the `/diff` command.
|
2025-08-11 12:40:12 -07:00
|
|
|
|
GitDiffOutput {
|
|
|
|
|
|
view: TextBlock,
|
|
|
|
|
|
},
|
2025-06-26 13:03:31 -07:00
|
|
|
|
|
2025-08-05 23:57:52 -07:00
|
|
|
|
/// Output from the `/status` command.
|
2025-08-11 12:40:12 -07:00
|
|
|
|
StatusOutput {
|
|
|
|
|
|
view: TextBlock,
|
|
|
|
|
|
},
|
2025-08-05 23:57:52 -07:00
|
|
|
|
|
2025-08-07 03:55:59 -07:00
|
|
|
|
/// Output from the `/prompts` command.
|
2025-08-11 12:40:12 -07:00
|
|
|
|
PromptsOutput {
|
|
|
|
|
|
view: TextBlock,
|
|
|
|
|
|
},
|
2025-08-07 03:55:59 -07:00
|
|
|
|
|
2025-05-08 21:46:06 -07:00
|
|
|
|
/// Error event from the backend.
|
2025-08-11 12:40:12 -07:00
|
|
|
|
ErrorEvent {
|
|
|
|
|
|
view: TextBlock,
|
|
|
|
|
|
},
|
2025-05-08 21:46:06 -07:00
|
|
|
|
|
2025-05-28 14:03:19 -07:00
|
|
|
|
/// Info describing the newly-initialized session.
|
2025-08-11 12:40:12 -07:00
|
|
|
|
SessionInfo {
|
|
|
|
|
|
view: TextBlock,
|
|
|
|
|
|
},
|
feat: initial import of Rust implementation of Codex CLI in codex-rs/ (#629)
As stated in `codex-rs/README.md`:
Today, Codex CLI is written in TypeScript and requires Node.js 22+ to
run it. For a number of users, this runtime requirement inhibits
adoption: they would be better served by a standalone executable. As
maintainers, we want Codex to run efficiently in a wide range of
environments with minimal overhead. We also want to take advantage of
operating system-specific APIs to provide better sandboxing, where
possible.
To that end, we are moving forward with a Rust implementation of Codex
CLI contained in this folder, which has the following benefits:
- The CLI compiles to small, standalone, platform-specific binaries.
- Can make direct, native calls to
[seccomp](https://man7.org/linux/man-pages/man2/seccomp.2.html) and
[landlock](https://man7.org/linux/man-pages/man7/landlock.7.html) in
order to support sandboxing on Linux.
- No runtime garbage collection, resulting in lower memory consumption
and better, more predictable performance.
Currently, the Rust implementation is materially behind the TypeScript
implementation in functionality, so continue to use the TypeScript
implmentation for the time being. We will publish native executables via
GitHub Releases as soon as we feel the Rust version is usable.
2025-04-24 13:31:40 -07:00
|
|
|
|
|
|
|
|
|
|
/// A pending code patch that is awaiting user approval. Mirrors the
|
2025-08-11 11:43:58 -07:00
|
|
|
|
/// behaviour of `ExecCell` so the user sees *what* patch the
|
feat: initial import of Rust implementation of Codex CLI in codex-rs/ (#629)
As stated in `codex-rs/README.md`:
Today, Codex CLI is written in TypeScript and requires Node.js 22+ to
run it. For a number of users, this runtime requirement inhibits
adoption: they would be better served by a standalone executable. As
maintainers, we want Codex to run efficiently in a wide range of
environments with minimal overhead. We also want to take advantage of
operating system-specific APIs to provide better sandboxing, where
possible.
To that end, we are moving forward with a Rust implementation of Codex
CLI contained in this folder, which has the following benefits:
- The CLI compiles to small, standalone, platform-specific binaries.
- Can make direct, native calls to
[seccomp](https://man7.org/linux/man-pages/man2/seccomp.2.html) and
[landlock](https://man7.org/linux/man-pages/man7/landlock.7.html) in
order to support sandboxing on Linux.
- No runtime garbage collection, resulting in lower memory consumption
and better, more predictable performance.
Currently, the Rust implementation is materially behind the TypeScript
implementation in functionality, so continue to use the TypeScript
implmentation for the time being. We will publish native executables via
GitHub Releases as soon as we feel the Rust version is usable.
2025-04-24 13:31:40 -07:00
|
|
|
|
/// model wants to apply before being prompted to approve or deny it.
|
2025-08-11 12:40:12 -07:00
|
|
|
|
PendingPatch {
|
|
|
|
|
|
view: TextBlock,
|
|
|
|
|
|
},
|
2025-07-31 13:45:52 -07:00
|
|
|
|
|
|
|
|
|
|
/// A human‑friendly rendering of the model's current plan and step
|
|
|
|
|
|
/// statuses provided via the `update_plan` tool.
|
2025-08-11 12:40:12 -07:00
|
|
|
|
PlanUpdate {
|
|
|
|
|
|
view: TextBlock,
|
|
|
|
|
|
},
|
2025-08-06 12:03:45 -07:00
|
|
|
|
|
|
|
|
|
|
/// Result of applying a patch (success or failure) with optional output.
|
2025-08-11 12:40:12 -07:00
|
|
|
|
PatchApplyResult {
|
|
|
|
|
|
view: TextBlock,
|
|
|
|
|
|
},
|
feat: initial import of Rust implementation of Codex CLI in codex-rs/ (#629)
As stated in `codex-rs/README.md`:
Today, Codex CLI is written in TypeScript and requires Node.js 22+ to
run it. For a number of users, this runtime requirement inhibits
adoption: they would be better served by a standalone executable. As
maintainers, we want Codex to run efficiently in a wide range of
environments with minimal overhead. We also want to take advantage of
operating system-specific APIs to provide better sandboxing, where
possible.
To that end, we are moving forward with a Rust implementation of Codex
CLI contained in this folder, which has the following benefits:
- The CLI compiles to small, standalone, platform-specific binaries.
- Can make direct, native calls to
[seccomp](https://man7.org/linux/man-pages/man2/seccomp.2.html) and
[landlock](https://man7.org/linux/man-pages/man7/landlock.7.html) in
order to support sandboxing on Linux.
- No runtime garbage collection, resulting in lower memory consumption
and better, more predictable performance.
Currently, the Rust implementation is materially behind the TypeScript
implementation in functionality, so continue to use the TypeScript
implmentation for the time being. We will publish native executables via
GitHub Releases as soon as we feel the Rust version is usable.
2025-04-24 13:31:40 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
2025-08-10 21:32:56 -07:00
|
|
|
|
const TOOL_CALL_MAX_LINES: usize = 5;
|
2025-05-06 16:12:15 -07:00
|
|
|
|
|
2025-08-07 04:02:58 -07:00
|
|
|
|
fn title_case(s: &str) -> String {
|
|
|
|
|
|
if s.is_empty() {
|
|
|
|
|
|
return String::new();
|
|
|
|
|
|
}
|
|
|
|
|
|
let mut chars = s.chars();
|
|
|
|
|
|
let first = match chars.next() {
|
|
|
|
|
|
Some(c) => c,
|
|
|
|
|
|
None => return String::new(),
|
|
|
|
|
|
};
|
|
|
|
|
|
let rest: String = chars.as_str().to_ascii_lowercase();
|
|
|
|
|
|
first.to_uppercase().collect::<String>() + &rest
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
fn pretty_provider_name(id: &str) -> String {
|
|
|
|
|
|
if id.eq_ignore_ascii_case("openai") {
|
|
|
|
|
|
"OpenAI".to_string()
|
|
|
|
|
|
} else {
|
|
|
|
|
|
title_case(id)
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
feat: initial import of Rust implementation of Codex CLI in codex-rs/ (#629)
As stated in `codex-rs/README.md`:
Today, Codex CLI is written in TypeScript and requires Node.js 22+ to
run it. For a number of users, this runtime requirement inhibits
adoption: they would be better served by a standalone executable. As
maintainers, we want Codex to run efficiently in a wide range of
environments with minimal overhead. We also want to take advantage of
operating system-specific APIs to provide better sandboxing, where
possible.
To that end, we are moving forward with a Rust implementation of Codex
CLI contained in this folder, which has the following benefits:
- The CLI compiles to small, standalone, platform-specific binaries.
- Can make direct, native calls to
[seccomp](https://man7.org/linux/man-pages/man2/seccomp.2.html) and
[landlock](https://man7.org/linux/man-pages/man7/landlock.7.html) in
order to support sandboxing on Linux.
- No runtime garbage collection, resulting in lower memory consumption
and better, more predictable performance.
Currently, the Rust implementation is materially behind the TypeScript
implementation in functionality, so continue to use the TypeScript
implmentation for the time being. We will publish native executables via
GitHub Releases as soon as we feel the Rust version is usable.
2025-04-24 13:31:40 -07:00
|
|
|
|
impl HistoryCell {
|
2025-07-25 01:56:40 -07:00
|
|
|
|
/// Return a cloned, plain representation of the cell's lines suitable for
|
|
|
|
|
|
/// one‑shot insertion into the terminal scrollback. Image cells are
|
2025-08-12 17:37:28 -07:00
|
|
|
|
/// represented with a simple placeholder.
|
2025-08-11 11:26:15 -07:00
|
|
|
|
/// These lines are also rendered directly by ratatui wrapped in a Paragraph.
|
2025-07-25 01:56:40 -07:00
|
|
|
|
pub(crate) fn plain_lines(&self) -> Vec<Line<'static>> {
|
|
|
|
|
|
match self {
|
|
|
|
|
|
HistoryCell::WelcomeMessage { view }
|
|
|
|
|
|
| HistoryCell::UserPrompt { view }
|
|
|
|
|
|
| HistoryCell::BackgroundEvent { view }
|
|
|
|
|
|
| HistoryCell::GitDiffOutput { view }
|
2025-08-05 23:57:52 -07:00
|
|
|
|
| HistoryCell::StatusOutput { view }
|
2025-08-07 03:55:59 -07:00
|
|
|
|
| HistoryCell::PromptsOutput { view }
|
2025-07-25 01:56:40 -07:00
|
|
|
|
| HistoryCell::ErrorEvent { view }
|
|
|
|
|
|
| HistoryCell::SessionInfo { view }
|
|
|
|
|
|
| HistoryCell::CompletedMcpToolCall { view }
|
|
|
|
|
|
| HistoryCell::PendingPatch { view }
|
2025-07-31 13:45:52 -07:00
|
|
|
|
| HistoryCell::PlanUpdate { view }
|
2025-08-06 12:03:45 -07:00
|
|
|
|
| HistoryCell::PatchApplyResult { view }
|
2025-07-30 10:05:40 -07:00
|
|
|
|
| HistoryCell::ActiveMcpToolCall { view, .. } => {
|
|
|
|
|
|
view.lines.iter().map(line_to_static).collect()
|
|
|
|
|
|
}
|
2025-08-11 12:40:12 -07:00
|
|
|
|
HistoryCell::Exec(ExecCell {
|
2025-08-11 11:43:58 -07:00
|
|
|
|
command,
|
|
|
|
|
|
parsed,
|
|
|
|
|
|
output,
|
2025-08-11 12:40:12 -07:00
|
|
|
|
}) => HistoryCell::exec_command_lines(command, parsed, output.as_ref()),
|
2025-07-25 01:56:40 -07:00
|
|
|
|
HistoryCell::CompletedMcpToolCallWithImageOutput { .. } => vec![
|
|
|
|
|
|
Line::from("tool result (image output omitted)"),
|
|
|
|
|
|
Line::from(""),
|
|
|
|
|
|
],
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
2025-08-06 12:03:45 -07:00
|
|
|
|
|
2025-08-12 17:37:28 -07:00
|
|
|
|
pub(crate) fn new_background_event(message: String) -> Self {
|
|
|
|
|
|
let mut lines: Vec<Line<'static>> = Vec::new();
|
|
|
|
|
|
lines.push(Line::from("event".dim()));
|
|
|
|
|
|
lines.extend(message.lines().map(|line| ansi_escape_line(line).dim()));
|
|
|
|
|
|
lines.push(Line::from(""));
|
|
|
|
|
|
HistoryCell::BackgroundEvent {
|
|
|
|
|
|
view: TextBlock::new(lines),
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-08-06 12:03:45 -07:00
|
|
|
|
pub(crate) fn desired_height(&self, width: u16) -> u16 {
|
2025-08-07 18:38:39 -07:00
|
|
|
|
Paragraph::new(Text::from(self.plain_lines()))
|
|
|
|
|
|
.wrap(Wrap { trim: false })
|
|
|
|
|
|
.line_count(width)
|
|
|
|
|
|
.try_into()
|
|
|
|
|
|
.unwrap_or(0)
|
2025-08-06 12:03:45 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
2025-05-13 19:22:16 -07:00
|
|
|
|
pub(crate) fn new_session_info(
|
|
|
|
|
|
config: &Config,
|
|
|
|
|
|
event: SessionConfiguredEvent,
|
|
|
|
|
|
is_first_event: bool,
|
|
|
|
|
|
) -> Self {
|
feat: record messages from user in ~/.codex/history.jsonl (#939)
This is a large change to support a "history" feature like you would
expect in a shell like Bash.
History events are recorded in `$CODEX_HOME/history.jsonl`. Because it
is a JSONL file, it is straightforward to append new entries (as opposed
to the TypeScript file that uses `$CODEX_HOME/history.json`, so to be
valid JSON, each new entry entails rewriting the entire file). Because
it is possible for there to be multiple instances of Codex CLI writing
to `history.jsonl` at once, we use advisory file locking when working
with `history.jsonl` in `codex-rs/core/src/message_history.rs`.
Because we believe history is a sufficiently useful feature, we enable
it by default. Though to provide some safety, we set the file
permissions of `history.jsonl` to be `o600` so that other users on the
system cannot read the user's history. We do not yet support a default
list of `SENSITIVE_PATTERNS` as the TypeScript CLI does:
https://github.com/openai/codex/blob/3fdf9df1335ac9501e3fb0e61715359145711e8b/codex-cli/src/utils/storage/command-history.ts#L10-L17
We are going to take a more conservative approach to this list in the
Rust CLI. For example, while `/\b[A-Za-z0-9-_]{20,}\b/` might exclude
sensitive information like API tokens, it would also exclude valuable
information such as references to Git commits.
As noted in the updated documentation, users can opt-out of history by
adding the following to `config.toml`:
```toml
[history]
persistence = "none"
```
Because `history.jsonl` could, in theory, be quite large, we take a[n
arguably overly pedantic] approach in reading history entries into
memory. Specifically, we start by telling the client the current number
of entries in the history file (`history_entry_count`) as well as the
inode (`history_log_id`) of `history.jsonl` (see the new fields on
`SessionConfiguredEvent`).
The client is responsible for keeping new entries in memory to create a
"local history," but if the user hits up enough times to go "past" the
end of local history, then the client should use the new
`GetHistoryEntryRequest` in the protocol to fetch older entries.
Specifically, it should pass the `history_log_id` it was given
originally and work backwards from `history_entry_count`. (It should
really fetch history in batches rather than one-at-a-time, but that is
something we can improve upon in subsequent PRs.)
The motivation behind this crazy scheme is that it is designed to defend
against:
* The `history.jsonl` being truncated during the session such that the
index into the history is no longer consistent with what had been read
up to that point. We do not yet have logic to enforce a `max_bytes` for
`history.jsonl`, but once we do, we will aspire to implement it in a way
that should result in a new inode for the file on most systems.
* New items from concurrent Codex CLI sessions amending to the history.
Because, in absence of truncation, `history.jsonl` is an append-only
log, so long as the client reads backwards from `history_entry_count`,
it should always get a consistent view of history. (That said, it will
not be able to read _new_ commands from concurrent sessions, but perhaps
we will introduce a `/` command to reload latest history or something
down the road.)
Admittedly, my testing of this feature thus far has been fairly light. I
expect we will find bugs and introduce enhancements/fixes going forward.
2025-05-15 16:26:23 -07:00
|
|
|
|
let SessionConfiguredEvent {
|
|
|
|
|
|
model,
|
2025-08-06 14:36:48 -07:00
|
|
|
|
session_id: _,
|
feat: record messages from user in ~/.codex/history.jsonl (#939)
This is a large change to support a "history" feature like you would
expect in a shell like Bash.
History events are recorded in `$CODEX_HOME/history.jsonl`. Because it
is a JSONL file, it is straightforward to append new entries (as opposed
to the TypeScript file that uses `$CODEX_HOME/history.json`, so to be
valid JSON, each new entry entails rewriting the entire file). Because
it is possible for there to be multiple instances of Codex CLI writing
to `history.jsonl` at once, we use advisory file locking when working
with `history.jsonl` in `codex-rs/core/src/message_history.rs`.
Because we believe history is a sufficiently useful feature, we enable
it by default. Though to provide some safety, we set the file
permissions of `history.jsonl` to be `o600` so that other users on the
system cannot read the user's history. We do not yet support a default
list of `SENSITIVE_PATTERNS` as the TypeScript CLI does:
https://github.com/openai/codex/blob/3fdf9df1335ac9501e3fb0e61715359145711e8b/codex-cli/src/utils/storage/command-history.ts#L10-L17
We are going to take a more conservative approach to this list in the
Rust CLI. For example, while `/\b[A-Za-z0-9-_]{20,}\b/` might exclude
sensitive information like API tokens, it would also exclude valuable
information such as references to Git commits.
As noted in the updated documentation, users can opt-out of history by
adding the following to `config.toml`:
```toml
[history]
persistence = "none"
```
Because `history.jsonl` could, in theory, be quite large, we take a[n
arguably overly pedantic] approach in reading history entries into
memory. Specifically, we start by telling the client the current number
of entries in the history file (`history_entry_count`) as well as the
inode (`history_log_id`) of `history.jsonl` (see the new fields on
`SessionConfiguredEvent`).
The client is responsible for keeping new entries in memory to create a
"local history," but if the user hits up enough times to go "past" the
end of local history, then the client should use the new
`GetHistoryEntryRequest` in the protocol to fetch older entries.
Specifically, it should pass the `history_log_id` it was given
originally and work backwards from `history_entry_count`. (It should
really fetch history in batches rather than one-at-a-time, but that is
something we can improve upon in subsequent PRs.)
The motivation behind this crazy scheme is that it is designed to defend
against:
* The `history.jsonl` being truncated during the session such that the
index into the history is no longer consistent with what had been read
up to that point. We do not yet have logic to enforce a `max_bytes` for
`history.jsonl`, but once we do, we will aspire to implement it in a way
that should result in a new inode for the file on most systems.
* New items from concurrent Codex CLI sessions amending to the history.
Because, in absence of truncation, `history.jsonl` is an append-only
log, so long as the client reads backwards from `history_entry_count`,
it should always get a consistent view of history. (That said, it will
not be able to read _new_ commands from concurrent sessions, but perhaps
we will introduce a `/` command to reload latest history or something
down the road.)
Admittedly, my testing of this feature thus far has been fairly light. I
expect we will find bugs and introduce enhancements/fixes going forward.
2025-05-15 16:26:23 -07:00
|
|
|
|
history_log_id: _,
|
|
|
|
|
|
history_entry_count: _,
|
|
|
|
|
|
} = event;
|
2025-05-13 19:22:16 -07:00
|
|
|
|
if is_first_event {
|
2025-08-06 14:36:48 -07:00
|
|
|
|
let cwd_str = match relativize_to_home(&config.cwd) {
|
|
|
|
|
|
Some(rel) if !rel.as_os_str().is_empty() => format!("~/{}", rel.display()),
|
|
|
|
|
|
Some(_) => "~".to_string(),
|
|
|
|
|
|
None => config.cwd.display().to_string(),
|
|
|
|
|
|
};
|
2025-05-30 23:24:36 -07:00
|
|
|
|
|
2025-08-06 14:36:48 -07:00
|
|
|
|
let lines: Vec<Line<'static>> = vec![
|
2025-05-13 19:22:16 -07:00
|
|
|
|
Line::from(vec![
|
2025-08-06 14:36:48 -07:00
|
|
|
|
Span::raw(">_ ").dim(),
|
|
|
|
|
|
Span::styled(
|
|
|
|
|
|
"You are using OpenAI Codex in",
|
|
|
|
|
|
Style::default().add_modifier(Modifier::BOLD),
|
|
|
|
|
|
),
|
|
|
|
|
|
Span::raw(format!(" {cwd_str}")).dim(),
|
2025-05-13 19:22:16 -07:00
|
|
|
|
]),
|
2025-08-06 14:36:48 -07:00
|
|
|
|
Line::from("".dim()),
|
2025-08-07 03:29:33 -07:00
|
|
|
|
Line::from(" To get started, describe a task or try one of these commands:".dim()),
|
2025-08-06 14:36:48 -07:00
|
|
|
|
Line::from("".dim()),
|
2025-08-07 03:29:33 -07:00
|
|
|
|
Line::from(format!(" /init - {}", SlashCommand::Init.description()).dim()),
|
|
|
|
|
|
Line::from(format!(" /status - {}", SlashCommand::Status.description()).dim()),
|
|
|
|
|
|
Line::from(format!(" /diff - {}", SlashCommand::Diff.description()).dim()),
|
2025-08-07 03:55:59 -07:00
|
|
|
|
Line::from(format!(" /prompts - {}", SlashCommand::Prompts.description()).dim()),
|
2025-08-06 14:36:48 -07:00
|
|
|
|
Line::from("".dim()),
|
2025-05-13 19:22:16 -07:00
|
|
|
|
];
|
2025-05-28 14:03:19 -07:00
|
|
|
|
HistoryCell::WelcomeMessage {
|
|
|
|
|
|
view: TextBlock::new(lines),
|
|
|
|
|
|
}
|
2025-05-13 19:22:16 -07:00
|
|
|
|
} else if config.model == model {
|
2025-05-28 14:03:19 -07:00
|
|
|
|
HistoryCell::SessionInfo {
|
|
|
|
|
|
view: TextBlock::new(Vec::new()),
|
|
|
|
|
|
}
|
2025-05-13 19:22:16 -07:00
|
|
|
|
} else {
|
|
|
|
|
|
let lines = vec![
|
|
|
|
|
|
Line::from("model changed:".magenta().bold()),
|
|
|
|
|
|
Line::from(format!("requested: {}", config.model)),
|
2025-07-10 20:08:16 +02:00
|
|
|
|
Line::from(format!("used: {model}")),
|
2025-05-13 19:22:16 -07:00
|
|
|
|
Line::from(""),
|
|
|
|
|
|
];
|
2025-05-28 14:03:19 -07:00
|
|
|
|
HistoryCell::SessionInfo {
|
|
|
|
|
|
view: TextBlock::new(lines),
|
|
|
|
|
|
}
|
2025-05-08 21:46:06 -07:00
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
feat: initial import of Rust implementation of Codex CLI in codex-rs/ (#629)
As stated in `codex-rs/README.md`:
Today, Codex CLI is written in TypeScript and requires Node.js 22+ to
run it. For a number of users, this runtime requirement inhibits
adoption: they would be better served by a standalone executable. As
maintainers, we want Codex to run efficiently in a wide range of
environments with minimal overhead. We also want to take advantage of
operating system-specific APIs to provide better sandboxing, where
possible.
To that end, we are moving forward with a Rust implementation of Codex
CLI contained in this folder, which has the following benefits:
- The CLI compiles to small, standalone, platform-specific binaries.
- Can make direct, native calls to
[seccomp](https://man7.org/linux/man-pages/man2/seccomp.2.html) and
[landlock](https://man7.org/linux/man-pages/man7/landlock.7.html) in
order to support sandboxing on Linux.
- No runtime garbage collection, resulting in lower memory consumption
and better, more predictable performance.
Currently, the Rust implementation is materially behind the TypeScript
implementation in functionality, so continue to use the TypeScript
implmentation for the time being. We will publish native executables via
GitHub Releases as soon as we feel the Rust version is usable.
2025-04-24 13:31:40 -07:00
|
|
|
|
pub(crate) fn new_user_prompt(message: String) -> Self {
|
|
|
|
|
|
let mut lines: Vec<Line<'static>> = Vec::new();
|
|
|
|
|
|
lines.push(Line::from("user".cyan().bold()));
|
|
|
|
|
|
lines.extend(message.lines().map(|l| Line::from(l.to_string())));
|
|
|
|
|
|
lines.push(Line::from(""));
|
|
|
|
|
|
|
2025-05-28 14:03:19 -07:00
|
|
|
|
HistoryCell::UserPrompt {
|
|
|
|
|
|
view: TextBlock::new(lines),
|
|
|
|
|
|
}
|
feat: initial import of Rust implementation of Codex CLI in codex-rs/ (#629)
As stated in `codex-rs/README.md`:
Today, Codex CLI is written in TypeScript and requires Node.js 22+ to
run it. For a number of users, this runtime requirement inhibits
adoption: they would be better served by a standalone executable. As
maintainers, we want Codex to run efficiently in a wide range of
environments with minimal overhead. We also want to take advantage of
operating system-specific APIs to provide better sandboxing, where
possible.
To that end, we are moving forward with a Rust implementation of Codex
CLI contained in this folder, which has the following benefits:
- The CLI compiles to small, standalone, platform-specific binaries.
- Can make direct, native calls to
[seccomp](https://man7.org/linux/man-pages/man2/seccomp.2.html) and
[landlock](https://man7.org/linux/man-pages/man7/landlock.7.html) in
order to support sandboxing on Linux.
- No runtime garbage collection, resulting in lower memory consumption
and better, more predictable performance.
Currently, the Rust implementation is materially behind the TypeScript
implementation in functionality, so continue to use the TypeScript
implmentation for the time being. We will publish native executables via
GitHub Releases as soon as we feel the Rust version is usable.
2025-04-24 13:31:40 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
2025-08-11 11:26:15 -07:00
|
|
|
|
pub(crate) fn new_active_exec_command(
|
|
|
|
|
|
command: Vec<String>,
|
|
|
|
|
|
parsed: Vec<ParsedCommand>,
|
|
|
|
|
|
) -> Self {
|
2025-08-11 11:43:58 -07:00
|
|
|
|
HistoryCell::new_exec_cell(command, parsed, None)
|
2025-08-11 11:26:15 -07:00
|
|
|
|
}
|
feat: initial import of Rust implementation of Codex CLI in codex-rs/ (#629)
As stated in `codex-rs/README.md`:
Today, Codex CLI is written in TypeScript and requires Node.js 22+ to
run it. For a number of users, this runtime requirement inhibits
adoption: they would be better served by a standalone executable. As
maintainers, we want Codex to run efficiently in a wide range of
environments with minimal overhead. We also want to take advantage of
operating system-specific APIs to provide better sandboxing, where
possible.
To that end, we are moving forward with a Rust implementation of Codex
CLI contained in this folder, which has the following benefits:
- The CLI compiles to small, standalone, platform-specific binaries.
- Can make direct, native calls to
[seccomp](https://man7.org/linux/man-pages/man2/seccomp.2.html) and
[landlock](https://man7.org/linux/man-pages/man7/landlock.7.html) in
order to support sandboxing on Linux.
- No runtime garbage collection, resulting in lower memory consumption
and better, more predictable performance.
Currently, the Rust implementation is materially behind the TypeScript
implementation in functionality, so continue to use the TypeScript
implmentation for the time being. We will publish native executables via
GitHub Releases as soon as we feel the Rust version is usable.
2025-04-24 13:31:40 -07:00
|
|
|
|
|
2025-08-11 11:26:15 -07:00
|
|
|
|
pub(crate) fn new_completed_exec_command(
|
|
|
|
|
|
command: Vec<String>,
|
|
|
|
|
|
parsed: Vec<ParsedCommand>,
|
|
|
|
|
|
output: CommandOutput,
|
|
|
|
|
|
) -> Self {
|
2025-08-11 11:43:58 -07:00
|
|
|
|
HistoryCell::new_exec_cell(command, parsed, Some(output))
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
fn new_exec_cell(
|
|
|
|
|
|
command: Vec<String>,
|
|
|
|
|
|
parsed: Vec<ParsedCommand>,
|
|
|
|
|
|
output: Option<CommandOutput>,
|
|
|
|
|
|
) -> Self {
|
2025-08-11 12:40:12 -07:00
|
|
|
|
HistoryCell::Exec(ExecCell {
|
2025-08-11 11:43:58 -07:00
|
|
|
|
command,
|
|
|
|
|
|
parsed,
|
|
|
|
|
|
output,
|
2025-08-11 12:40:12 -07:00
|
|
|
|
})
|
2025-08-11 11:26:15 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
fn exec_command_lines(
|
|
|
|
|
|
command: &[String],
|
|
|
|
|
|
parsed: &[ParsedCommand],
|
|
|
|
|
|
output: Option<&CommandOutput>,
|
|
|
|
|
|
) -> Vec<Line<'static>> {
|
2025-08-11 11:43:58 -07:00
|
|
|
|
match parsed.is_empty() {
|
|
|
|
|
|
true => HistoryCell::new_exec_command_generic(command, output),
|
|
|
|
|
|
false => HistoryCell::new_parsed_command(parsed, output),
|
2025-08-08 10:52:24 -07:00
|
|
|
|
}
|
2025-08-11 11:26:15 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
fn new_parsed_command(
|
|
|
|
|
|
parsed_commands: &[ParsedCommand],
|
|
|
|
|
|
output: Option<&CommandOutput>,
|
|
|
|
|
|
) -> Vec<Line<'static>> {
|
2025-08-13 11:10:48 -07:00
|
|
|
|
let mut lines: Vec<Line> = vec![match output {
|
|
|
|
|
|
None => Line::from("⚙︎ Working".magenta().bold()),
|
|
|
|
|
|
Some(o) if o.exit_code == 0 => Line::from("✓ Completed".green().bold()),
|
|
|
|
|
|
Some(o) => Line::from(format!("✗ Failed (exit {})", o.exit_code).red().bold()),
|
|
|
|
|
|
}];
|
2025-08-11 11:26:15 -07:00
|
|
|
|
|
|
|
|
|
|
for (i, parsed) in parsed_commands.iter().enumerate() {
|
2025-08-11 16:11:46 -07:00
|
|
|
|
let text = match parsed {
|
2025-08-11 11:26:15 -07:00
|
|
|
|
ParsedCommand::Read { name, .. } => format!("📖 {name}"),
|
|
|
|
|
|
ParsedCommand::ListFiles { cmd, path } => match path {
|
|
|
|
|
|
Some(p) => format!("📂 {p}"),
|
|
|
|
|
|
None => format!("📂 {}", shlex_join_safe(cmd)),
|
|
|
|
|
|
},
|
|
|
|
|
|
ParsedCommand::Search { query, path, cmd } => match (query, path) {
|
|
|
|
|
|
(Some(q), Some(p)) => format!("🔎 {q} in {p}"),
|
|
|
|
|
|
(Some(q), None) => format!("🔎 {q}"),
|
|
|
|
|
|
(None, Some(p)) => format!("🔎 {p}"),
|
|
|
|
|
|
(None, None) => format!("🔎 {}", shlex_join_safe(cmd)),
|
|
|
|
|
|
},
|
|
|
|
|
|
ParsedCommand::Format { .. } => "✨ Formatting".to_string(),
|
|
|
|
|
|
ParsedCommand::Test { cmd } => format!("🧪 {}", shlex_join_safe(cmd)),
|
|
|
|
|
|
ParsedCommand::Lint { cmd, .. } => format!("🧹 {}", shlex_join_safe(cmd)),
|
|
|
|
|
|
ParsedCommand::Unknown { cmd } => format!("⌨️ {}", shlex_join_safe(cmd)),
|
|
|
|
|
|
};
|
|
|
|
|
|
|
2025-08-13 19:14:03 -04:00
|
|
|
|
let first_prefix = if i == 0 { " └ " } else { " " };
|
2025-08-11 16:11:46 -07:00
|
|
|
|
for (j, line_text) in text.lines().enumerate() {
|
|
|
|
|
|
let prefix = if j == 0 { first_prefix } else { " " };
|
|
|
|
|
|
lines.push(Line::from(vec![
|
|
|
|
|
|
Span::styled(prefix, Style::default().add_modifier(Modifier::DIM)),
|
|
|
|
|
|
Span::styled(line_text.to_string(), Style::default().fg(LIGHT_BLUE)),
|
|
|
|
|
|
]));
|
|
|
|
|
|
}
|
2025-08-08 10:52:24 -07:00
|
|
|
|
}
|
2025-08-11 11:26:15 -07:00
|
|
|
|
|
|
|
|
|
|
lines.extend(output_lines(output, true, false));
|
2025-08-08 10:52:24 -07:00
|
|
|
|
lines.push(Line::from(""));
|
feat: initial import of Rust implementation of Codex CLI in codex-rs/ (#629)
As stated in `codex-rs/README.md`:
Today, Codex CLI is written in TypeScript and requires Node.js 22+ to
run it. For a number of users, this runtime requirement inhibits
adoption: they would be better served by a standalone executable. As
maintainers, we want Codex to run efficiently in a wide range of
environments with minimal overhead. We also want to take advantage of
operating system-specific APIs to provide better sandboxing, where
possible.
To that end, we are moving forward with a Rust implementation of Codex
CLI contained in this folder, which has the following benefits:
- The CLI compiles to small, standalone, platform-specific binaries.
- Can make direct, native calls to
[seccomp](https://man7.org/linux/man-pages/man2/seccomp.2.html) and
[landlock](https://man7.org/linux/man-pages/man7/landlock.7.html) in
order to support sandboxing on Linux.
- No runtime garbage collection, resulting in lower memory consumption
and better, more predictable performance.
Currently, the Rust implementation is materially behind the TypeScript
implementation in functionality, so continue to use the TypeScript
implmentation for the time being. We will publish native executables via
GitHub Releases as soon as we feel the Rust version is usable.
2025-04-24 13:31:40 -07:00
|
|
|
|
|
2025-08-11 11:26:15 -07:00
|
|
|
|
lines
|
feat: initial import of Rust implementation of Codex CLI in codex-rs/ (#629)
As stated in `codex-rs/README.md`:
Today, Codex CLI is written in TypeScript and requires Node.js 22+ to
run it. For a number of users, this runtime requirement inhibits
adoption: they would be better served by a standalone executable. As
maintainers, we want Codex to run efficiently in a wide range of
environments with minimal overhead. We also want to take advantage of
operating system-specific APIs to provide better sandboxing, where
possible.
To that end, we are moving forward with a Rust implementation of Codex
CLI contained in this folder, which has the following benefits:
- The CLI compiles to small, standalone, platform-specific binaries.
- Can make direct, native calls to
[seccomp](https://man7.org/linux/man-pages/man2/seccomp.2.html) and
[landlock](https://man7.org/linux/man-pages/man7/landlock.7.html) in
order to support sandboxing on Linux.
- No runtime garbage collection, resulting in lower memory consumption
and better, more predictable performance.
Currently, the Rust implementation is materially behind the TypeScript
implementation in functionality, so continue to use the TypeScript
implmentation for the time being. We will publish native executables via
GitHub Releases as soon as we feel the Rust version is usable.
2025-04-24 13:31:40 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
2025-08-11 11:26:15 -07:00
|
|
|
|
fn new_exec_command_generic(
|
|
|
|
|
|
command: &[String],
|
|
|
|
|
|
output: Option<&CommandOutput>,
|
|
|
|
|
|
) -> Vec<Line<'static>> {
|
feat: initial import of Rust implementation of Codex CLI in codex-rs/ (#629)
As stated in `codex-rs/README.md`:
Today, Codex CLI is written in TypeScript and requires Node.js 22+ to
run it. For a number of users, this runtime requirement inhibits
adoption: they would be better served by a standalone executable. As
maintainers, we want Codex to run efficiently in a wide range of
environments with minimal overhead. We also want to take advantage of
operating system-specific APIs to provide better sandboxing, where
possible.
To that end, we are moving forward with a Rust implementation of Codex
CLI contained in this folder, which has the following benefits:
- The CLI compiles to small, standalone, platform-specific binaries.
- Can make direct, native calls to
[seccomp](https://man7.org/linux/man-pages/man2/seccomp.2.html) and
[landlock](https://man7.org/linux/man-pages/man7/landlock.7.html) in
order to support sandboxing on Linux.
- No runtime garbage collection, resulting in lower memory consumption
and better, more predictable performance.
Currently, the Rust implementation is materially behind the TypeScript
implementation in functionality, so continue to use the TypeScript
implmentation for the time being. We will publish native executables via
GitHub Releases as soon as we feel the Rust version is usable.
2025-04-24 13:31:40 -07:00
|
|
|
|
let mut lines: Vec<Line<'static>> = Vec::new();
|
2025-08-11 11:26:15 -07:00
|
|
|
|
let command_escaped = strip_bash_lc_and_escape(command);
|
2025-08-08 10:52:24 -07:00
|
|
|
|
let mut cmd_lines = command_escaped.lines();
|
|
|
|
|
|
if let Some(first) = cmd_lines.next() {
|
|
|
|
|
|
lines.push(Line::from(vec![
|
2025-08-11 11:26:15 -07:00
|
|
|
|
"⚡ Running ".to_string().magenta(),
|
2025-08-08 10:52:24 -07:00
|
|
|
|
first.to_string().into(),
|
|
|
|
|
|
]));
|
|
|
|
|
|
} else {
|
2025-08-11 11:26:15 -07:00
|
|
|
|
lines.push(Line::from("⚡ Running".to_string().magenta()));
|
2025-08-08 10:52:24 -07:00
|
|
|
|
}
|
|
|
|
|
|
for cont in cmd_lines {
|
|
|
|
|
|
lines.push(Line::from(cont.to_string()));
|
|
|
|
|
|
}
|
feat: initial import of Rust implementation of Codex CLI in codex-rs/ (#629)
As stated in `codex-rs/README.md`:
Today, Codex CLI is written in TypeScript and requires Node.js 22+ to
run it. For a number of users, this runtime requirement inhibits
adoption: they would be better served by a standalone executable. As
maintainers, we want Codex to run efficiently in a wide range of
environments with minimal overhead. We also want to take advantage of
operating system-specific APIs to provide better sandboxing, where
possible.
To that end, we are moving forward with a Rust implementation of Codex
CLI contained in this folder, which has the following benefits:
- The CLI compiles to small, standalone, platform-specific binaries.
- Can make direct, native calls to
[seccomp](https://man7.org/linux/man-pages/man2/seccomp.2.html) and
[landlock](https://man7.org/linux/man-pages/man7/landlock.7.html) in
order to support sandboxing on Linux.
- No runtime garbage collection, resulting in lower memory consumption
and better, more predictable performance.
Currently, the Rust implementation is materially behind the TypeScript
implementation in functionality, so continue to use the TypeScript
implmentation for the time being. We will publish native executables via
GitHub Releases as soon as we feel the Rust version is usable.
2025-04-24 13:31:40 -07:00
|
|
|
|
|
2025-08-11 11:26:15 -07:00
|
|
|
|
lines.extend(output_lines(output, false, true));
|
feat: initial import of Rust implementation of Codex CLI in codex-rs/ (#629)
As stated in `codex-rs/README.md`:
Today, Codex CLI is written in TypeScript and requires Node.js 22+ to
run it. For a number of users, this runtime requirement inhibits
adoption: they would be better served by a standalone executable. As
maintainers, we want Codex to run efficiently in a wide range of
environments with minimal overhead. We also want to take advantage of
operating system-specific APIs to provide better sandboxing, where
possible.
To that end, we are moving forward with a Rust implementation of Codex
CLI contained in this folder, which has the following benefits:
- The CLI compiles to small, standalone, platform-specific binaries.
- Can make direct, native calls to
[seccomp](https://man7.org/linux/man-pages/man2/seccomp.2.html) and
[landlock](https://man7.org/linux/man-pages/man7/landlock.7.html) in
order to support sandboxing on Linux.
- No runtime garbage collection, resulting in lower memory consumption
and better, more predictable performance.
Currently, the Rust implementation is materially behind the TypeScript
implementation in functionality, so continue to use the TypeScript
implmentation for the time being. We will publish native executables via
GitHub Releases as soon as we feel the Rust version is usable.
2025-04-24 13:31:40 -07:00
|
|
|
|
|
2025-08-11 11:26:15 -07:00
|
|
|
|
lines
|
feat: initial import of Rust implementation of Codex CLI in codex-rs/ (#629)
As stated in `codex-rs/README.md`:
Today, Codex CLI is written in TypeScript and requires Node.js 22+ to
run it. For a number of users, this runtime requirement inhibits
adoption: they would be better served by a standalone executable. As
maintainers, we want Codex to run efficiently in a wide range of
environments with minimal overhead. We also want to take advantage of
operating system-specific APIs to provide better sandboxing, where
possible.
To that end, we are moving forward with a Rust implementation of Codex
CLI contained in this folder, which has the following benefits:
- The CLI compiles to small, standalone, platform-specific binaries.
- Can make direct, native calls to
[seccomp](https://man7.org/linux/man-pages/man2/seccomp.2.html) and
[landlock](https://man7.org/linux/man-pages/man7/landlock.7.html) in
order to support sandboxing on Linux.
- No runtime garbage collection, resulting in lower memory consumption
and better, more predictable performance.
Currently, the Rust implementation is materially behind the TypeScript
implementation in functionality, so continue to use the TypeScript
implmentation for the time being. We will publish native executables via
GitHub Releases as soon as we feel the Rust version is usable.
2025-04-24 13:31:40 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
2025-07-30 10:05:40 -07:00
|
|
|
|
pub(crate) fn new_active_mcp_tool_call(invocation: McpInvocation) -> Self {
|
2025-05-06 16:12:15 -07:00
|
|
|
|
let title_line = Line::from(vec!["tool".magenta(), " running...".dim()]);
|
2025-07-30 10:05:40 -07:00
|
|
|
|
let lines: Vec<Line> = vec![
|
|
|
|
|
|
title_line,
|
|
|
|
|
|
format_mcp_invocation(invocation.clone()),
|
|
|
|
|
|
Line::from(""),
|
|
|
|
|
|
];
|
2025-05-06 16:12:15 -07:00
|
|
|
|
|
|
|
|
|
|
HistoryCell::ActiveMcpToolCall {
|
2025-05-28 14:03:19 -07:00
|
|
|
|
view: TextBlock::new(lines),
|
2025-05-06 16:12:15 -07:00
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-06-03 14:29:26 -07:00
|
|
|
|
/// If the first content is an image, return a new cell with the image.
|
|
|
|
|
|
/// TODO(rgwood-dd): Handle images properly even if they're not the first result.
|
fix: introduce ResponseInputItem::McpToolCallOutput variant (#1151)
The output of an MCP server tool call can be one of several types, but
to date, we treated all outputs as text by showing the serialized JSON
as the "tool output" in Codex:
https://github.com/openai/codex/blob/25a9949c49194d5a64de54a11bcc5b4724ac9bd5/codex-rs/mcp-types/src/lib.rs#L96-L101
This PR adds support for the `ImageContent` variant so we can now
display an image output from an MCP tool call.
In making this change, we introduce a new
`ResponseInputItem::McpToolCallOutput` variant so that we can work with
the `mcp_types::CallToolResult` directly when the function call is made
to an MCP server.
Though arguably the more significant change is the introduction of
`HistoryCell::CompletedMcpToolCallWithImageOutput`, which is a cell that
uses `ratatui_image` to render an image into the terminal. To support
this, we introduce `ImageRenderCache`, cache a
`ratatui_image::picker::Picker`, and `ensure_image_cache()` to cache the
appropriate scaled image data and dimensions based on the current
terminal size.
To test, I created a minimal `package.json`:
```json
{
"name": "kitty-mcp",
"version": "1.0.0",
"type": "module",
"description": "MCP that returns image of kitty",
"main": "index.js",
"dependencies": {
"@modelcontextprotocol/sdk": "^1.12.0"
}
}
```
with the following `index.js` to define the MCP server:
```js
#!/usr/bin/env node
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
import { readFile } from "node:fs/promises";
import { join } from "node:path";
const IMAGE_URI = "image://Ada.png";
const server = new McpServer({
name: "Demo",
version: "1.0.0",
});
server.tool(
"get-cat-image",
"If you need a cat image, this tool will provide one.",
async () => ({
content: [
{ type: "image", data: await getAdaPngBase64(), mimeType: "image/png" },
],
})
);
server.resource("Ada the Cat", IMAGE_URI, async (uri) => {
const base64Image = await getAdaPngBase64();
return {
contents: [
{
uri: uri.href,
mimeType: "image/png",
blob: base64Image,
},
],
};
});
async function getAdaPngBase64() {
const __dirname = new URL(".", import.meta.url).pathname;
// From https://github.com/benjajaja/ratatui-image/blob/9705ce2c59ec669abbce2924cbfd1f5ae22c9860/assets/Ada.png
const filePath = join(__dirname, "Ada.png");
const imageData = await readFile(filePath);
const base64Image = imageData.toString("base64");
return base64Image;
}
const transport = new StdioServerTransport();
await server.connect(transport);
```
With the local changes from this PR, I added the following to my
`config.toml`:
```toml
[mcp_servers.kitty]
command = "node"
args = ["/Users/mbolin/code/kitty-mcp/index.js"]
```
Running the TUI from source:
```
cargo run --bin codex -- --model o3 'I need a picture of a cat'
```
I get:
<img width="732" alt="image"
src="https://github.com/user-attachments/assets/bf80b721-9ca0-4d81-aec7-77d6899e2869"
/>
Now, that said, I have only tested in iTerm and there is definitely some
funny business with getting an accurate character-to-pixel ratio
(sometimes the `CompletedMcpToolCallWithImageOutput` thinks it needs 10
rows to render instead of 4), so there is still work to be done here.
2025-05-28 19:03:17 -07:00
|
|
|
|
fn try_new_completed_mcp_tool_call_with_image_output(
|
|
|
|
|
|
result: &Result<mcp_types::CallToolResult, String>,
|
|
|
|
|
|
) -> Option<Self> {
|
|
|
|
|
|
match result {
|
|
|
|
|
|
Ok(mcp_types::CallToolResult { content, .. }) => {
|
2025-07-19 00:09:34 -04:00
|
|
|
|
if let Some(mcp_types::ContentBlock::ImageContent(image)) = content.first() {
|
fix: introduce ResponseInputItem::McpToolCallOutput variant (#1151)
The output of an MCP server tool call can be one of several types, but
to date, we treated all outputs as text by showing the serialized JSON
as the "tool output" in Codex:
https://github.com/openai/codex/blob/25a9949c49194d5a64de54a11bcc5b4724ac9bd5/codex-rs/mcp-types/src/lib.rs#L96-L101
This PR adds support for the `ImageContent` variant so we can now
display an image output from an MCP tool call.
In making this change, we introduce a new
`ResponseInputItem::McpToolCallOutput` variant so that we can work with
the `mcp_types::CallToolResult` directly when the function call is made
to an MCP server.
Though arguably the more significant change is the introduction of
`HistoryCell::CompletedMcpToolCallWithImageOutput`, which is a cell that
uses `ratatui_image` to render an image into the terminal. To support
this, we introduce `ImageRenderCache`, cache a
`ratatui_image::picker::Picker`, and `ensure_image_cache()` to cache the
appropriate scaled image data and dimensions based on the current
terminal size.
To test, I created a minimal `package.json`:
```json
{
"name": "kitty-mcp",
"version": "1.0.0",
"type": "module",
"description": "MCP that returns image of kitty",
"main": "index.js",
"dependencies": {
"@modelcontextprotocol/sdk": "^1.12.0"
}
}
```
with the following `index.js` to define the MCP server:
```js
#!/usr/bin/env node
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
import { readFile } from "node:fs/promises";
import { join } from "node:path";
const IMAGE_URI = "image://Ada.png";
const server = new McpServer({
name: "Demo",
version: "1.0.0",
});
server.tool(
"get-cat-image",
"If you need a cat image, this tool will provide one.",
async () => ({
content: [
{ type: "image", data: await getAdaPngBase64(), mimeType: "image/png" },
],
})
);
server.resource("Ada the Cat", IMAGE_URI, async (uri) => {
const base64Image = await getAdaPngBase64();
return {
contents: [
{
uri: uri.href,
mimeType: "image/png",
blob: base64Image,
},
],
};
});
async function getAdaPngBase64() {
const __dirname = new URL(".", import.meta.url).pathname;
// From https://github.com/benjajaja/ratatui-image/blob/9705ce2c59ec669abbce2924cbfd1f5ae22c9860/assets/Ada.png
const filePath = join(__dirname, "Ada.png");
const imageData = await readFile(filePath);
const base64Image = imageData.toString("base64");
return base64Image;
}
const transport = new StdioServerTransport();
await server.connect(transport);
```
With the local changes from this PR, I added the following to my
`config.toml`:
```toml
[mcp_servers.kitty]
command = "node"
args = ["/Users/mbolin/code/kitty-mcp/index.js"]
```
Running the TUI from source:
```
cargo run --bin codex -- --model o3 'I need a picture of a cat'
```
I get:
<img width="732" alt="image"
src="https://github.com/user-attachments/assets/bf80b721-9ca0-4d81-aec7-77d6899e2869"
/>
Now, that said, I have only tested in iTerm and there is definitely some
funny business with getting an accurate character-to-pixel ratio
(sometimes the `CompletedMcpToolCallWithImageOutput` thinks it needs 10
rows to render instead of 4), so there is still work to be done here.
2025-05-28 19:03:17 -07:00
|
|
|
|
let raw_data =
|
|
|
|
|
|
match base64::engine::general_purpose::STANDARD.decode(&image.data) {
|
|
|
|
|
|
Ok(data) => data,
|
|
|
|
|
|
Err(e) => {
|
|
|
|
|
|
error!("Failed to decode image data: {e}");
|
|
|
|
|
|
return None;
|
|
|
|
|
|
}
|
|
|
|
|
|
};
|
|
|
|
|
|
let reader = match ImageReader::new(Cursor::new(raw_data)).with_guessed_format()
|
|
|
|
|
|
{
|
|
|
|
|
|
Ok(reader) => reader,
|
|
|
|
|
|
Err(e) => {
|
|
|
|
|
|
error!("Failed to guess image format: {e}");
|
|
|
|
|
|
return None;
|
|
|
|
|
|
}
|
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
let image = match reader.decode() {
|
|
|
|
|
|
Ok(image) => image,
|
|
|
|
|
|
Err(e) => {
|
|
|
|
|
|
error!("Image decoding failed: {e}");
|
|
|
|
|
|
return None;
|
|
|
|
|
|
}
|
|
|
|
|
|
};
|
|
|
|
|
|
|
2025-07-30 10:05:40 -07:00
|
|
|
|
Some(HistoryCell::CompletedMcpToolCallWithImageOutput { _image: image })
|
fix: introduce ResponseInputItem::McpToolCallOutput variant (#1151)
The output of an MCP server tool call can be one of several types, but
to date, we treated all outputs as text by showing the serialized JSON
as the "tool output" in Codex:
https://github.com/openai/codex/blob/25a9949c49194d5a64de54a11bcc5b4724ac9bd5/codex-rs/mcp-types/src/lib.rs#L96-L101
This PR adds support for the `ImageContent` variant so we can now
display an image output from an MCP tool call.
In making this change, we introduce a new
`ResponseInputItem::McpToolCallOutput` variant so that we can work with
the `mcp_types::CallToolResult` directly when the function call is made
to an MCP server.
Though arguably the more significant change is the introduction of
`HistoryCell::CompletedMcpToolCallWithImageOutput`, which is a cell that
uses `ratatui_image` to render an image into the terminal. To support
this, we introduce `ImageRenderCache`, cache a
`ratatui_image::picker::Picker`, and `ensure_image_cache()` to cache the
appropriate scaled image data and dimensions based on the current
terminal size.
To test, I created a minimal `package.json`:
```json
{
"name": "kitty-mcp",
"version": "1.0.0",
"type": "module",
"description": "MCP that returns image of kitty",
"main": "index.js",
"dependencies": {
"@modelcontextprotocol/sdk": "^1.12.0"
}
}
```
with the following `index.js` to define the MCP server:
```js
#!/usr/bin/env node
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
import { readFile } from "node:fs/promises";
import { join } from "node:path";
const IMAGE_URI = "image://Ada.png";
const server = new McpServer({
name: "Demo",
version: "1.0.0",
});
server.tool(
"get-cat-image",
"If you need a cat image, this tool will provide one.",
async () => ({
content: [
{ type: "image", data: await getAdaPngBase64(), mimeType: "image/png" },
],
})
);
server.resource("Ada the Cat", IMAGE_URI, async (uri) => {
const base64Image = await getAdaPngBase64();
return {
contents: [
{
uri: uri.href,
mimeType: "image/png",
blob: base64Image,
},
],
};
});
async function getAdaPngBase64() {
const __dirname = new URL(".", import.meta.url).pathname;
// From https://github.com/benjajaja/ratatui-image/blob/9705ce2c59ec669abbce2924cbfd1f5ae22c9860/assets/Ada.png
const filePath = join(__dirname, "Ada.png");
const imageData = await readFile(filePath);
const base64Image = imageData.toString("base64");
return base64Image;
}
const transport = new StdioServerTransport();
await server.connect(transport);
```
With the local changes from this PR, I added the following to my
`config.toml`:
```toml
[mcp_servers.kitty]
command = "node"
args = ["/Users/mbolin/code/kitty-mcp/index.js"]
```
Running the TUI from source:
```
cargo run --bin codex -- --model o3 'I need a picture of a cat'
```
I get:
<img width="732" alt="image"
src="https://github.com/user-attachments/assets/bf80b721-9ca0-4d81-aec7-77d6899e2869"
/>
Now, that said, I have only tested in iTerm and there is definitely some
funny business with getting an accurate character-to-pixel ratio
(sometimes the `CompletedMcpToolCallWithImageOutput` thinks it needs 10
rows to render instead of 4), so there is still work to be done here.
2025-05-28 19:03:17 -07:00
|
|
|
|
} else {
|
|
|
|
|
|
None
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
_ => None,
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-05-06 16:12:15 -07:00
|
|
|
|
pub(crate) fn new_completed_mcp_tool_call(
|
2025-08-11 18:32:59 -07:00
|
|
|
|
num_cols: usize,
|
2025-07-30 10:05:40 -07:00
|
|
|
|
invocation: McpInvocation,
|
|
|
|
|
|
duration: Duration,
|
2025-05-06 16:12:15 -07:00
|
|
|
|
success: bool,
|
fix: introduce ResponseInputItem::McpToolCallOutput variant (#1151)
The output of an MCP server tool call can be one of several types, but
to date, we treated all outputs as text by showing the serialized JSON
as the "tool output" in Codex:
https://github.com/openai/codex/blob/25a9949c49194d5a64de54a11bcc5b4724ac9bd5/codex-rs/mcp-types/src/lib.rs#L96-L101
This PR adds support for the `ImageContent` variant so we can now
display an image output from an MCP tool call.
In making this change, we introduce a new
`ResponseInputItem::McpToolCallOutput` variant so that we can work with
the `mcp_types::CallToolResult` directly when the function call is made
to an MCP server.
Though arguably the more significant change is the introduction of
`HistoryCell::CompletedMcpToolCallWithImageOutput`, which is a cell that
uses `ratatui_image` to render an image into the terminal. To support
this, we introduce `ImageRenderCache`, cache a
`ratatui_image::picker::Picker`, and `ensure_image_cache()` to cache the
appropriate scaled image data and dimensions based on the current
terminal size.
To test, I created a minimal `package.json`:
```json
{
"name": "kitty-mcp",
"version": "1.0.0",
"type": "module",
"description": "MCP that returns image of kitty",
"main": "index.js",
"dependencies": {
"@modelcontextprotocol/sdk": "^1.12.0"
}
}
```
with the following `index.js` to define the MCP server:
```js
#!/usr/bin/env node
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
import { readFile } from "node:fs/promises";
import { join } from "node:path";
const IMAGE_URI = "image://Ada.png";
const server = new McpServer({
name: "Demo",
version: "1.0.0",
});
server.tool(
"get-cat-image",
"If you need a cat image, this tool will provide one.",
async () => ({
content: [
{ type: "image", data: await getAdaPngBase64(), mimeType: "image/png" },
],
})
);
server.resource("Ada the Cat", IMAGE_URI, async (uri) => {
const base64Image = await getAdaPngBase64();
return {
contents: [
{
uri: uri.href,
mimeType: "image/png",
blob: base64Image,
},
],
};
});
async function getAdaPngBase64() {
const __dirname = new URL(".", import.meta.url).pathname;
// From https://github.com/benjajaja/ratatui-image/blob/9705ce2c59ec669abbce2924cbfd1f5ae22c9860/assets/Ada.png
const filePath = join(__dirname, "Ada.png");
const imageData = await readFile(filePath);
const base64Image = imageData.toString("base64");
return base64Image;
}
const transport = new StdioServerTransport();
await server.connect(transport);
```
With the local changes from this PR, I added the following to my
`config.toml`:
```toml
[mcp_servers.kitty]
command = "node"
args = ["/Users/mbolin/code/kitty-mcp/index.js"]
```
Running the TUI from source:
```
cargo run --bin codex -- --model o3 'I need a picture of a cat'
```
I get:
<img width="732" alt="image"
src="https://github.com/user-attachments/assets/bf80b721-9ca0-4d81-aec7-77d6899e2869"
/>
Now, that said, I have only tested in iTerm and there is definitely some
funny business with getting an accurate character-to-pixel ratio
(sometimes the `CompletedMcpToolCallWithImageOutput` thinks it needs 10
rows to render instead of 4), so there is still work to be done here.
2025-05-28 19:03:17 -07:00
|
|
|
|
result: Result<mcp_types::CallToolResult, String>,
|
2025-05-06 16:12:15 -07:00
|
|
|
|
) -> Self {
|
fix: introduce ResponseInputItem::McpToolCallOutput variant (#1151)
The output of an MCP server tool call can be one of several types, but
to date, we treated all outputs as text by showing the serialized JSON
as the "tool output" in Codex:
https://github.com/openai/codex/blob/25a9949c49194d5a64de54a11bcc5b4724ac9bd5/codex-rs/mcp-types/src/lib.rs#L96-L101
This PR adds support for the `ImageContent` variant so we can now
display an image output from an MCP tool call.
In making this change, we introduce a new
`ResponseInputItem::McpToolCallOutput` variant so that we can work with
the `mcp_types::CallToolResult` directly when the function call is made
to an MCP server.
Though arguably the more significant change is the introduction of
`HistoryCell::CompletedMcpToolCallWithImageOutput`, which is a cell that
uses `ratatui_image` to render an image into the terminal. To support
this, we introduce `ImageRenderCache`, cache a
`ratatui_image::picker::Picker`, and `ensure_image_cache()` to cache the
appropriate scaled image data and dimensions based on the current
terminal size.
To test, I created a minimal `package.json`:
```json
{
"name": "kitty-mcp",
"version": "1.0.0",
"type": "module",
"description": "MCP that returns image of kitty",
"main": "index.js",
"dependencies": {
"@modelcontextprotocol/sdk": "^1.12.0"
}
}
```
with the following `index.js` to define the MCP server:
```js
#!/usr/bin/env node
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
import { readFile } from "node:fs/promises";
import { join } from "node:path";
const IMAGE_URI = "image://Ada.png";
const server = new McpServer({
name: "Demo",
version: "1.0.0",
});
server.tool(
"get-cat-image",
"If you need a cat image, this tool will provide one.",
async () => ({
content: [
{ type: "image", data: await getAdaPngBase64(), mimeType: "image/png" },
],
})
);
server.resource("Ada the Cat", IMAGE_URI, async (uri) => {
const base64Image = await getAdaPngBase64();
return {
contents: [
{
uri: uri.href,
mimeType: "image/png",
blob: base64Image,
},
],
};
});
async function getAdaPngBase64() {
const __dirname = new URL(".", import.meta.url).pathname;
// From https://github.com/benjajaja/ratatui-image/blob/9705ce2c59ec669abbce2924cbfd1f5ae22c9860/assets/Ada.png
const filePath = join(__dirname, "Ada.png");
const imageData = await readFile(filePath);
const base64Image = imageData.toString("base64");
return base64Image;
}
const transport = new StdioServerTransport();
await server.connect(transport);
```
With the local changes from this PR, I added the following to my
`config.toml`:
```toml
[mcp_servers.kitty]
command = "node"
args = ["/Users/mbolin/code/kitty-mcp/index.js"]
```
Running the TUI from source:
```
cargo run --bin codex -- --model o3 'I need a picture of a cat'
```
I get:
<img width="732" alt="image"
src="https://github.com/user-attachments/assets/bf80b721-9ca0-4d81-aec7-77d6899e2869"
/>
Now, that said, I have only tested in iTerm and there is definitely some
funny business with getting an accurate character-to-pixel ratio
(sometimes the `CompletedMcpToolCallWithImageOutput` thinks it needs 10
rows to render instead of 4), so there is still work to be done here.
2025-05-28 19:03:17 -07:00
|
|
|
|
if let Some(cell) = Self::try_new_completed_mcp_tool_call_with_image_output(&result) {
|
|
|
|
|
|
return cell;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-07-30 10:05:40 -07:00
|
|
|
|
let duration = format_duration(duration);
|
2025-05-06 16:12:15 -07:00
|
|
|
|
let status_str = if success { "success" } else { "failed" };
|
|
|
|
|
|
let title_line = Line::from(vec![
|
|
|
|
|
|
"tool".magenta(),
|
2025-06-03 14:29:26 -07:00
|
|
|
|
" ".into(),
|
|
|
|
|
|
if success {
|
|
|
|
|
|
status_str.green()
|
|
|
|
|
|
} else {
|
|
|
|
|
|
status_str.red()
|
|
|
|
|
|
},
|
2025-08-13 15:50:50 -07:00
|
|
|
|
format!(", duration: {duration}").dim(),
|
2025-05-06 16:12:15 -07:00
|
|
|
|
]);
|
|
|
|
|
|
|
|
|
|
|
|
let mut lines: Vec<Line<'static>> = Vec::new();
|
|
|
|
|
|
lines.push(title_line);
|
2025-07-30 10:05:40 -07:00
|
|
|
|
lines.push(format_mcp_invocation(invocation));
|
2025-06-03 14:29:26 -07:00
|
|
|
|
|
|
|
|
|
|
match result {
|
|
|
|
|
|
Ok(mcp_types::CallToolResult { content, .. }) => {
|
|
|
|
|
|
if !content.is_empty() {
|
|
|
|
|
|
lines.push(Line::from(""));
|
|
|
|
|
|
|
|
|
|
|
|
for tool_call_result in content {
|
|
|
|
|
|
let line_text = match tool_call_result {
|
2025-07-19 00:09:34 -04:00
|
|
|
|
mcp_types::ContentBlock::TextContent(text) => {
|
2025-06-03 14:29:26 -07:00
|
|
|
|
format_and_truncate_tool_result(
|
|
|
|
|
|
&text.text,
|
|
|
|
|
|
TOOL_CALL_MAX_LINES,
|
2025-08-11 18:32:59 -07:00
|
|
|
|
num_cols,
|
2025-06-03 14:29:26 -07:00
|
|
|
|
)
|
|
|
|
|
|
}
|
2025-07-19 00:09:34 -04:00
|
|
|
|
mcp_types::ContentBlock::ImageContent(_) => {
|
2025-06-03 14:29:26 -07:00
|
|
|
|
// TODO show images even if they're not the first result, will require a refactor of `CompletedMcpToolCall`
|
|
|
|
|
|
"<image content>".to_string()
|
|
|
|
|
|
}
|
2025-07-19 00:09:34 -04:00
|
|
|
|
mcp_types::ContentBlock::AudioContent(_) => {
|
2025-06-03 14:29:26 -07:00
|
|
|
|
"<audio content>".to_string()
|
|
|
|
|
|
}
|
2025-07-19 00:09:34 -04:00
|
|
|
|
mcp_types::ContentBlock::EmbeddedResource(resource) => {
|
2025-06-03 14:29:26 -07:00
|
|
|
|
let uri = match resource.resource {
|
|
|
|
|
|
EmbeddedResourceResource::TextResourceContents(text) => {
|
|
|
|
|
|
text.uri
|
|
|
|
|
|
}
|
|
|
|
|
|
EmbeddedResourceResource::BlobResourceContents(blob) => {
|
|
|
|
|
|
blob.uri
|
|
|
|
|
|
}
|
|
|
|
|
|
};
|
|
|
|
|
|
format!("embedded resource: {uri}")
|
|
|
|
|
|
}
|
2025-07-19 00:09:34 -04:00
|
|
|
|
mcp_types::ContentBlock::ResourceLink(ResourceLink { uri, .. }) => {
|
|
|
|
|
|
format!("link: {uri}")
|
|
|
|
|
|
}
|
2025-06-03 14:29:26 -07:00
|
|
|
|
};
|
2025-08-13 15:50:50 -07:00
|
|
|
|
lines.push(Line::styled(
|
|
|
|
|
|
line_text,
|
|
|
|
|
|
Style::default().add_modifier(Modifier::DIM),
|
|
|
|
|
|
));
|
2025-06-03 14:29:26 -07:00
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
lines.push(Line::from(""));
|
2025-05-06 16:12:15 -07:00
|
|
|
|
}
|
2025-06-03 14:29:26 -07:00
|
|
|
|
Err(e) => {
|
|
|
|
|
|
lines.push(Line::from(vec![
|
|
|
|
|
|
Span::styled(
|
|
|
|
|
|
"Error: ",
|
|
|
|
|
|
Style::default().fg(Color::Red).add_modifier(Modifier::BOLD),
|
|
|
|
|
|
),
|
|
|
|
|
|
Span::raw(e),
|
|
|
|
|
|
]));
|
2025-05-06 16:12:15 -07:00
|
|
|
|
}
|
2025-06-03 14:29:26 -07:00
|
|
|
|
};
|
2025-05-06 16:12:15 -07:00
|
|
|
|
|
2025-06-03 14:29:26 -07:00
|
|
|
|
HistoryCell::CompletedMcpToolCall {
|
2025-05-28 14:03:19 -07:00
|
|
|
|
view: TextBlock::new(lines),
|
|
|
|
|
|
}
|
2025-05-06 16:12:15 -07:00
|
|
|
|
}
|
feat: initial import of Rust implementation of Codex CLI in codex-rs/ (#629)
As stated in `codex-rs/README.md`:
Today, Codex CLI is written in TypeScript and requires Node.js 22+ to
run it. For a number of users, this runtime requirement inhibits
adoption: they would be better served by a standalone executable. As
maintainers, we want Codex to run efficiently in a wide range of
environments with minimal overhead. We also want to take advantage of
operating system-specific APIs to provide better sandboxing, where
possible.
To that end, we are moving forward with a Rust implementation of Codex
CLI contained in this folder, which has the following benefits:
- The CLI compiles to small, standalone, platform-specific binaries.
- Can make direct, native calls to
[seccomp](https://man7.org/linux/man-pages/man2/seccomp.2.html) and
[landlock](https://man7.org/linux/man-pages/man7/landlock.7.html) in
order to support sandboxing on Linux.
- No runtime garbage collection, resulting in lower memory consumption
and better, more predictable performance.
Currently, the Rust implementation is materially behind the TypeScript
implementation in functionality, so continue to use the TypeScript
implmentation for the time being. We will publish native executables via
GitHub Releases as soon as we feel the Rust version is usable.
2025-04-24 13:31:40 -07:00
|
|
|
|
|
2025-06-26 13:03:31 -07:00
|
|
|
|
pub(crate) fn new_diff_output(message: String) -> Self {
|
|
|
|
|
|
let mut lines: Vec<Line<'static>> = Vec::new();
|
|
|
|
|
|
lines.push(Line::from("/diff".magenta()));
|
|
|
|
|
|
|
|
|
|
|
|
if message.trim().is_empty() {
|
|
|
|
|
|
lines.push(Line::from("No changes detected.".italic()));
|
|
|
|
|
|
} else {
|
|
|
|
|
|
lines.extend(message.lines().map(ansi_escape_line));
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
lines.push(Line::from(""));
|
|
|
|
|
|
HistoryCell::GitDiffOutput {
|
|
|
|
|
|
view: TextBlock::new(lines),
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-08-05 23:57:52 -07:00
|
|
|
|
pub(crate) fn new_status_output(config: &Config, usage: &TokenUsage) -> Self {
|
|
|
|
|
|
let mut lines: Vec<Line<'static>> = Vec::new();
|
|
|
|
|
|
lines.push(Line::from("/status".magenta()));
|
|
|
|
|
|
|
2025-08-07 04:02:58 -07:00
|
|
|
|
let config_entries = create_config_summary_entries(config);
|
|
|
|
|
|
let lookup = |k: &str| -> String {
|
|
|
|
|
|
config_entries
|
|
|
|
|
|
.iter()
|
|
|
|
|
|
.find(|(key, _)| *key == k)
|
|
|
|
|
|
.map(|(_, v)| v.clone())
|
|
|
|
|
|
.unwrap_or_default()
|
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
// 📂 Workspace
|
|
|
|
|
|
lines.push(Line::from(vec!["📂 ".into(), "Workspace".bold()]));
|
|
|
|
|
|
// Path (home-relative, e.g., ~/code/project)
|
|
|
|
|
|
let cwd_str = match relativize_to_home(&config.cwd) {
|
|
|
|
|
|
Some(rel) if !rel.as_os_str().is_empty() => format!("~/{}", rel.display()),
|
|
|
|
|
|
Some(_) => "~".to_string(),
|
|
|
|
|
|
None => config.cwd.display().to_string(),
|
|
|
|
|
|
};
|
|
|
|
|
|
lines.push(Line::from(vec![" • Path: ".into(), cwd_str.into()]));
|
|
|
|
|
|
// Approval mode (as-is)
|
|
|
|
|
|
lines.push(Line::from(vec![
|
|
|
|
|
|
" • Approval Mode: ".into(),
|
|
|
|
|
|
lookup("approval").into(),
|
|
|
|
|
|
]));
|
|
|
|
|
|
// Sandbox (simplified name only)
|
|
|
|
|
|
let sandbox_name = match &config.sandbox_policy {
|
|
|
|
|
|
SandboxPolicy::DangerFullAccess => "danger-full-access",
|
|
|
|
|
|
SandboxPolicy::ReadOnly => "read-only",
|
|
|
|
|
|
SandboxPolicy::WorkspaceWrite { .. } => "workspace-write",
|
|
|
|
|
|
};
|
|
|
|
|
|
lines.push(Line::from(vec![
|
|
|
|
|
|
" • Sandbox: ".into(),
|
|
|
|
|
|
sandbox_name.into(),
|
|
|
|
|
|
]));
|
2025-08-05 23:57:52 -07:00
|
|
|
|
|
|
|
|
|
|
lines.push(Line::from(""));
|
2025-08-07 01:27:45 -07:00
|
|
|
|
|
2025-08-07 04:02:58 -07:00
|
|
|
|
// 👤 Account (only if ChatGPT tokens exist), shown under the first block
|
2025-08-07 01:27:45 -07:00
|
|
|
|
let auth_file = get_auth_file(&config.codex_home);
|
|
|
|
|
|
if let Ok(auth) = try_read_auth_json(&auth_file) {
|
2025-08-07 04:02:58 -07:00
|
|
|
|
if let Some(tokens) = auth.tokens.clone() {
|
|
|
|
|
|
lines.push(Line::from(vec!["👤 ".into(), "Account".bold()]));
|
|
|
|
|
|
lines.push(Line::from(" • Signed in with ChatGPT"));
|
2025-08-07 01:27:45 -07:00
|
|
|
|
|
2025-08-07 04:02:58 -07:00
|
|
|
|
let info = tokens.id_token;
|
2025-08-07 18:00:31 -07:00
|
|
|
|
if let Some(email) = &info.email {
|
|
|
|
|
|
lines.push(Line::from(vec![" • Login: ".into(), email.clone().into()]));
|
2025-08-07 04:02:58 -07:00
|
|
|
|
}
|
2025-08-07 01:27:45 -07:00
|
|
|
|
|
2025-08-07 04:02:58 -07:00
|
|
|
|
match auth.openai_api_key.as_deref() {
|
|
|
|
|
|
Some(key) if !key.is_empty() => {
|
2025-08-07 04:10:13 -07:00
|
|
|
|
lines.push(Line::from(
|
|
|
|
|
|
" • Using API key. Run codex login to use ChatGPT plan",
|
|
|
|
|
|
));
|
2025-08-07 04:02:58 -07:00
|
|
|
|
}
|
|
|
|
|
|
_ => {
|
|
|
|
|
|
let plan_text = info
|
2025-08-07 18:00:31 -07:00
|
|
|
|
.get_chatgpt_plan_type()
|
|
|
|
|
|
.map(|s| title_case(&s))
|
2025-08-07 04:02:58 -07:00
|
|
|
|
.unwrap_or_else(|| "Unknown".to_string());
|
|
|
|
|
|
lines.push(Line::from(vec![" • Plan: ".into(), plan_text.into()]));
|
2025-08-07 01:27:45 -07:00
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
lines.push(Line::from(""));
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-08-07 04:02:58 -07:00
|
|
|
|
// 🧠 Model
|
|
|
|
|
|
lines.push(Line::from(vec!["🧠 ".into(), "Model".bold()]));
|
2025-08-05 23:57:52 -07:00
|
|
|
|
lines.push(Line::from(vec![
|
2025-08-07 04:02:58 -07:00
|
|
|
|
" • Name: ".into(),
|
|
|
|
|
|
config.model.clone().into(),
|
|
|
|
|
|
]));
|
|
|
|
|
|
let provider_disp = pretty_provider_name(&config.model_provider_id);
|
|
|
|
|
|
lines.push(Line::from(vec![
|
|
|
|
|
|
" • Provider: ".into(),
|
|
|
|
|
|
provider_disp.into(),
|
2025-08-05 23:57:52 -07:00
|
|
|
|
]));
|
2025-08-07 04:02:58 -07:00
|
|
|
|
// Only show Reasoning fields if present in config summary
|
|
|
|
|
|
let reff = lookup("reasoning effort");
|
|
|
|
|
|
if !reff.is_empty() {
|
|
|
|
|
|
lines.push(Line::from(vec![
|
|
|
|
|
|
" • Reasoning Effort: ".into(),
|
|
|
|
|
|
title_case(&reff).into(),
|
|
|
|
|
|
]));
|
|
|
|
|
|
}
|
|
|
|
|
|
let rsum = lookup("reasoning summaries");
|
|
|
|
|
|
if !rsum.is_empty() {
|
|
|
|
|
|
lines.push(Line::from(vec![
|
|
|
|
|
|
" • Reasoning Summaries: ".into(),
|
|
|
|
|
|
title_case(&rsum).into(),
|
|
|
|
|
|
]));
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
lines.push(Line::from(""));
|
|
|
|
|
|
|
|
|
|
|
|
// 📊 Token Usage
|
|
|
|
|
|
lines.push(Line::from(vec!["📊 ".into(), "Token Usage".bold()]));
|
|
|
|
|
|
// Input: <input> [+ <cached> cached]
|
|
|
|
|
|
let mut input_line_spans: Vec<Span<'static>> = vec![
|
|
|
|
|
|
" • Input: ".into(),
|
|
|
|
|
|
usage.non_cached_input().to_string().into(),
|
|
|
|
|
|
];
|
|
|
|
|
|
if let Some(cached) = usage.cached_input_tokens {
|
|
|
|
|
|
if cached > 0 {
|
|
|
|
|
|
input_line_spans.push(format!(" (+ {cached} cached)").into());
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
lines.push(Line::from(input_line_spans));
|
|
|
|
|
|
// Output: <output>
|
2025-08-05 23:57:52 -07:00
|
|
|
|
lines.push(Line::from(vec![
|
2025-08-07 04:02:58 -07:00
|
|
|
|
" • Output: ".into(),
|
2025-08-05 23:57:52 -07:00
|
|
|
|
usage.output_tokens.to_string().into(),
|
|
|
|
|
|
]));
|
2025-08-07 04:02:58 -07:00
|
|
|
|
// Total: <total>
|
2025-08-05 23:57:52 -07:00
|
|
|
|
lines.push(Line::from(vec![
|
2025-08-07 04:02:58 -07:00
|
|
|
|
" • Total: ".into(),
|
2025-08-07 01:13:36 -07:00
|
|
|
|
usage.blended_total().to_string().into(),
|
2025-08-05 23:57:52 -07:00
|
|
|
|
]));
|
|
|
|
|
|
|
|
|
|
|
|
lines.push(Line::from(""));
|
|
|
|
|
|
HistoryCell::StatusOutput {
|
|
|
|
|
|
view: TextBlock::new(lines),
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-08-07 03:55:59 -07:00
|
|
|
|
pub(crate) fn new_prompts_output() -> Self {
|
|
|
|
|
|
let lines: Vec<Line<'static>> = vec![
|
|
|
|
|
|
Line::from("/prompts".magenta()),
|
|
|
|
|
|
Line::from(""),
|
|
|
|
|
|
Line::from(" 1. Explain this codebase"),
|
|
|
|
|
|
Line::from(" 2. Summarize recent commits"),
|
|
|
|
|
|
Line::from(" 3. Implement {feature}"),
|
|
|
|
|
|
Line::from(" 4. Find and fix a bug in @filename"),
|
|
|
|
|
|
Line::from(" 5. Write tests for @filename"),
|
|
|
|
|
|
Line::from(" 6. Improve documentation in @filename"),
|
|
|
|
|
|
Line::from(""),
|
|
|
|
|
|
];
|
|
|
|
|
|
HistoryCell::PromptsOutput {
|
|
|
|
|
|
view: TextBlock::new(lines),
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-05-08 21:46:06 -07:00
|
|
|
|
pub(crate) fn new_error_event(message: String) -> Self {
|
2025-08-07 00:46:45 -07:00
|
|
|
|
let lines: Vec<Line<'static>> =
|
|
|
|
|
|
vec![vec!["🖐 ".red().bold(), message.into()].into(), "".into()];
|
2025-05-28 14:03:19 -07:00
|
|
|
|
HistoryCell::ErrorEvent {
|
|
|
|
|
|
view: TextBlock::new(lines),
|
|
|
|
|
|
}
|
2025-05-08 21:46:06 -07:00
|
|
|
|
}
|
feat: initial import of Rust implementation of Codex CLI in codex-rs/ (#629)
As stated in `codex-rs/README.md`:
Today, Codex CLI is written in TypeScript and requires Node.js 22+ to
run it. For a number of users, this runtime requirement inhibits
adoption: they would be better served by a standalone executable. As
maintainers, we want Codex to run efficiently in a wide range of
environments with minimal overhead. We also want to take advantage of
operating system-specific APIs to provide better sandboxing, where
possible.
To that end, we are moving forward with a Rust implementation of Codex
CLI contained in this folder, which has the following benefits:
- The CLI compiles to small, standalone, platform-specific binaries.
- Can make direct, native calls to
[seccomp](https://man7.org/linux/man-pages/man2/seccomp.2.html) and
[landlock](https://man7.org/linux/man-pages/man7/landlock.7.html) in
order to support sandboxing on Linux.
- No runtime garbage collection, resulting in lower memory consumption
and better, more predictable performance.
Currently, the Rust implementation is materially behind the TypeScript
implementation in functionality, so continue to use the TypeScript
implmentation for the time being. We will publish native executables via
GitHub Releases as soon as we feel the Rust version is usable.
2025-04-24 13:31:40 -07:00
|
|
|
|
|
2025-08-07 00:01:38 -07:00
|
|
|
|
/// Render a user‑friendly plan update styled like a checkbox todo list.
|
2025-07-31 13:45:52 -07:00
|
|
|
|
pub(crate) fn new_plan_update(update: UpdatePlanArgs) -> Self {
|
|
|
|
|
|
let UpdatePlanArgs { explanation, plan } = update;
|
|
|
|
|
|
|
|
|
|
|
|
let mut lines: Vec<Line<'static>> = Vec::new();
|
2025-08-07 00:01:38 -07:00
|
|
|
|
// Header with progress summary
|
|
|
|
|
|
let total = plan.len();
|
|
|
|
|
|
let completed = plan
|
|
|
|
|
|
.iter()
|
|
|
|
|
|
.filter(|p| matches!(p.status, StepStatus::Completed))
|
|
|
|
|
|
.count();
|
|
|
|
|
|
|
|
|
|
|
|
let width: usize = 10;
|
|
|
|
|
|
let filled = if total > 0 {
|
|
|
|
|
|
(completed * width + total / 2) / total
|
|
|
|
|
|
} else {
|
|
|
|
|
|
0
|
|
|
|
|
|
};
|
|
|
|
|
|
let empty = width.saturating_sub(filled);
|
|
|
|
|
|
|
|
|
|
|
|
let mut header: Vec<Span> = Vec::new();
|
|
|
|
|
|
header.push(Span::raw("📋"));
|
|
|
|
|
|
header.push(Span::styled(
|
2025-08-12 13:26:57 -07:00
|
|
|
|
" Update plan",
|
2025-08-07 00:01:38 -07:00
|
|
|
|
Style::default().add_modifier(Modifier::BOLD).magenta(),
|
|
|
|
|
|
));
|
2025-08-12 13:26:57 -07:00
|
|
|
|
header.push(Span::raw(" ["));
|
2025-08-07 00:01:38 -07:00
|
|
|
|
if filled > 0 {
|
|
|
|
|
|
header.push(Span::styled(
|
|
|
|
|
|
"█".repeat(filled),
|
|
|
|
|
|
Style::default().fg(Color::Green),
|
|
|
|
|
|
));
|
|
|
|
|
|
}
|
|
|
|
|
|
if empty > 0 {
|
|
|
|
|
|
header.push(Span::styled(
|
|
|
|
|
|
"░".repeat(empty),
|
2025-08-13 15:50:50 -07:00
|
|
|
|
Style::default().add_modifier(Modifier::DIM),
|
2025-08-07 00:01:38 -07:00
|
|
|
|
));
|
2025-07-31 13:45:52 -07:00
|
|
|
|
}
|
2025-08-07 00:01:38 -07:00
|
|
|
|
header.push(Span::raw("] "));
|
|
|
|
|
|
header.push(Span::raw(format!("{completed}/{total}")));
|
|
|
|
|
|
lines.push(Line::from(header));
|
2025-07-31 13:45:52 -07:00
|
|
|
|
|
|
|
|
|
|
// Optional explanation/note from the model
|
|
|
|
|
|
if let Some(expl) = explanation.and_then(|s| {
|
|
|
|
|
|
let t = s.trim().to_string();
|
|
|
|
|
|
if t.is_empty() { None } else { Some(t) }
|
|
|
|
|
|
}) {
|
2025-08-13 15:50:50 -07:00
|
|
|
|
lines.push(Line::from("note".dim().italic()));
|
2025-07-31 13:45:52 -07:00
|
|
|
|
for l in expl.lines() {
|
2025-08-13 15:50:50 -07:00
|
|
|
|
lines.push(Line::from(l.to_string()).dim());
|
2025-07-31 13:45:52 -07:00
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-08-07 00:01:38 -07:00
|
|
|
|
// Steps styled as checkbox items
|
2025-07-31 13:45:52 -07:00
|
|
|
|
if plan.is_empty() {
|
2025-08-13 15:50:50 -07:00
|
|
|
|
lines.push(Line::from("(no steps provided)".dim().italic()));
|
2025-07-31 13:45:52 -07:00
|
|
|
|
} else {
|
|
|
|
|
|
for (idx, PlanItemArg { step, status }) in plan.into_iter().enumerate() {
|
2025-08-07 00:01:38 -07:00
|
|
|
|
let (box_span, text_span) = match status {
|
|
|
|
|
|
StepStatus::Completed => (
|
|
|
|
|
|
Span::styled("✔", Style::default().fg(Color::Green)),
|
|
|
|
|
|
Span::styled(
|
|
|
|
|
|
step,
|
2025-08-13 15:50:50 -07:00
|
|
|
|
Style::default().add_modifier(Modifier::CROSSED_OUT | Modifier::DIM),
|
2025-08-07 00:01:38 -07:00
|
|
|
|
),
|
|
|
|
|
|
),
|
|
|
|
|
|
StepStatus::InProgress => (
|
|
|
|
|
|
Span::raw("□"),
|
|
|
|
|
|
Span::styled(
|
|
|
|
|
|
step,
|
|
|
|
|
|
Style::default()
|
|
|
|
|
|
.fg(Color::Blue)
|
|
|
|
|
|
.add_modifier(Modifier::BOLD),
|
|
|
|
|
|
),
|
|
|
|
|
|
),
|
|
|
|
|
|
StepStatus::Pending => (
|
|
|
|
|
|
Span::raw("□"),
|
2025-08-13 15:50:50 -07:00
|
|
|
|
Span::styled(step, Style::default().add_modifier(Modifier::DIM)),
|
2025-08-07 00:01:38 -07:00
|
|
|
|
),
|
|
|
|
|
|
};
|
|
|
|
|
|
let prefix = if idx == 0 {
|
2025-08-13 19:14:03 -04:00
|
|
|
|
Span::raw(" └ ")
|
2025-08-07 00:01:38 -07:00
|
|
|
|
} else {
|
|
|
|
|
|
Span::raw(" ")
|
2025-07-31 13:45:52 -07:00
|
|
|
|
};
|
2025-08-01 10:37:43 -07:00
|
|
|
|
lines.push(Line::from(vec![
|
2025-08-07 00:01:38 -07:00
|
|
|
|
prefix,
|
|
|
|
|
|
box_span,
|
|
|
|
|
|
Span::raw(" "),
|
|
|
|
|
|
text_span,
|
2025-08-01 10:37:43 -07:00
|
|
|
|
]));
|
2025-07-31 13:45:52 -07:00
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
lines.push(Line::from(""));
|
|
|
|
|
|
|
|
|
|
|
|
HistoryCell::PlanUpdate {
|
|
|
|
|
|
view: TextBlock::new(lines),
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
feat: initial import of Rust implementation of Codex CLI in codex-rs/ (#629)
As stated in `codex-rs/README.md`:
Today, Codex CLI is written in TypeScript and requires Node.js 22+ to
run it. For a number of users, this runtime requirement inhibits
adoption: they would be better served by a standalone executable. As
maintainers, we want Codex to run efficiently in a wide range of
environments with minimal overhead. We also want to take advantage of
operating system-specific APIs to provide better sandboxing, where
possible.
To that end, we are moving forward with a Rust implementation of Codex
CLI contained in this folder, which has the following benefits:
- The CLI compiles to small, standalone, platform-specific binaries.
- Can make direct, native calls to
[seccomp](https://man7.org/linux/man-pages/man2/seccomp.2.html) and
[landlock](https://man7.org/linux/man-pages/man7/landlock.7.html) in
order to support sandboxing on Linux.
- No runtime garbage collection, resulting in lower memory consumption
and better, more predictable performance.
Currently, the Rust implementation is materially behind the TypeScript
implementation in functionality, so continue to use the TypeScript
implmentation for the time being. We will publish native executables via
GitHub Releases as soon as we feel the Rust version is usable.
2025-04-24 13:31:40 -07:00
|
|
|
|
/// Create a new `PendingPatch` cell that lists the file‑level summary of
|
|
|
|
|
|
/// a proposed patch. The summary lines should already be formatted (e.g.
|
|
|
|
|
|
/// "A path/to/file.rs").
|
|
|
|
|
|
pub(crate) fn new_patch_event(
|
|
|
|
|
|
event_type: PatchEventType,
|
|
|
|
|
|
changes: HashMap<PathBuf, FileChange>,
|
|
|
|
|
|
) -> Self {
|
2025-08-11 12:31:34 -07:00
|
|
|
|
let title = match &event_type {
|
feat: initial import of Rust implementation of Codex CLI in codex-rs/ (#629)
As stated in `codex-rs/README.md`:
Today, Codex CLI is written in TypeScript and requires Node.js 22+ to
run it. For a number of users, this runtime requirement inhibits
adoption: they would be better served by a standalone executable. As
maintainers, we want Codex to run efficiently in a wide range of
environments with minimal overhead. We also want to take advantage of
operating system-specific APIs to provide better sandboxing, where
possible.
To that end, we are moving forward with a Rust implementation of Codex
CLI contained in this folder, which has the following benefits:
- The CLI compiles to small, standalone, platform-specific binaries.
- Can make direct, native calls to
[seccomp](https://man7.org/linux/man-pages/man2/seccomp.2.html) and
[landlock](https://man7.org/linux/man-pages/man7/landlock.7.html) in
order to support sandboxing on Linux.
- No runtime garbage collection, resulting in lower memory consumption
and better, more predictable performance.
Currently, the Rust implementation is materially behind the TypeScript
implementation in functionality, so continue to use the TypeScript
implmentation for the time being. We will publish native executables via
GitHub Releases as soon as we feel the Rust version is usable.
2025-04-24 13:31:40 -07:00
|
|
|
|
PatchEventType::ApprovalRequest => "proposed patch",
|
|
|
|
|
|
PatchEventType::ApplyBegin {
|
|
|
|
|
|
auto_approved: true,
|
2025-08-06 22:25:41 -07:00
|
|
|
|
} => "✏️ Applying patch",
|
feat: initial import of Rust implementation of Codex CLI in codex-rs/ (#629)
As stated in `codex-rs/README.md`:
Today, Codex CLI is written in TypeScript and requires Node.js 22+ to
run it. For a number of users, this runtime requirement inhibits
adoption: they would be better served by a standalone executable. As
maintainers, we want Codex to run efficiently in a wide range of
environments with minimal overhead. We also want to take advantage of
operating system-specific APIs to provide better sandboxing, where
possible.
To that end, we are moving forward with a Rust implementation of Codex
CLI contained in this folder, which has the following benefits:
- The CLI compiles to small, standalone, platform-specific binaries.
- Can make direct, native calls to
[seccomp](https://man7.org/linux/man-pages/man2/seccomp.2.html) and
[landlock](https://man7.org/linux/man-pages/man7/landlock.7.html) in
order to support sandboxing on Linux.
- No runtime garbage collection, resulting in lower memory consumption
and better, more predictable performance.
Currently, the Rust implementation is materially behind the TypeScript
implementation in functionality, so continue to use the TypeScript
implmentation for the time being. We will publish native executables via
GitHub Releases as soon as we feel the Rust version is usable.
2025-04-24 13:31:40 -07:00
|
|
|
|
PatchEventType::ApplyBegin {
|
|
|
|
|
|
auto_approved: false,
|
|
|
|
|
|
} => {
|
2025-08-06 12:03:45 -07:00
|
|
|
|
let lines: Vec<Line<'static>> = vec![
|
2025-08-06 22:25:41 -07:00
|
|
|
|
Line::from("✏️ Applying patch".magenta().bold()),
|
2025-08-06 12:03:45 -07:00
|
|
|
|
Line::from(""),
|
|
|
|
|
|
];
|
2025-05-28 14:03:19 -07:00
|
|
|
|
return Self::PendingPatch {
|
|
|
|
|
|
view: TextBlock::new(lines),
|
|
|
|
|
|
};
|
feat: initial import of Rust implementation of Codex CLI in codex-rs/ (#629)
As stated in `codex-rs/README.md`:
Today, Codex CLI is written in TypeScript and requires Node.js 22+ to
run it. For a number of users, this runtime requirement inhibits
adoption: they would be better served by a standalone executable. As
maintainers, we want Codex to run efficiently in a wide range of
environments with minimal overhead. We also want to take advantage of
operating system-specific APIs to provide better sandboxing, where
possible.
To that end, we are moving forward with a Rust implementation of Codex
CLI contained in this folder, which has the following benefits:
- The CLI compiles to small, standalone, platform-specific binaries.
- Can make direct, native calls to
[seccomp](https://man7.org/linux/man-pages/man2/seccomp.2.html) and
[landlock](https://man7.org/linux/man-pages/man7/landlock.7.html) in
order to support sandboxing on Linux.
- No runtime garbage collection, resulting in lower memory consumption
and better, more predictable performance.
Currently, the Rust implementation is materially behind the TypeScript
implementation in functionality, so continue to use the TypeScript
implmentation for the time being. We will publish native executables via
GitHub Releases as soon as we feel the Rust version is usable.
2025-04-24 13:31:40 -07:00
|
|
|
|
}
|
|
|
|
|
|
};
|
|
|
|
|
|
|
2025-08-11 18:32:59 -07:00
|
|
|
|
let mut lines: Vec<Line<'static>> = create_diff_summary(title, &changes, event_type);
|
|
|
|
|
|
|
|
|
|
|
|
lines.push(Line::from(""));
|
feat: initial import of Rust implementation of Codex CLI in codex-rs/ (#629)
As stated in `codex-rs/README.md`:
Today, Codex CLI is written in TypeScript and requires Node.js 22+ to
run it. For a number of users, this runtime requirement inhibits
adoption: they would be better served by a standalone executable. As
maintainers, we want Codex to run efficiently in a wide range of
environments with minimal overhead. We also want to take advantage of
operating system-specific APIs to provide better sandboxing, where
possible.
To that end, we are moving forward with a Rust implementation of Codex
CLI contained in this folder, which has the following benefits:
- The CLI compiles to small, standalone, platform-specific binaries.
- Can make direct, native calls to
[seccomp](https://man7.org/linux/man-pages/man2/seccomp.2.html) and
[landlock](https://man7.org/linux/man-pages/man7/landlock.7.html) in
order to support sandboxing on Linux.
- No runtime garbage collection, resulting in lower memory consumption
and better, more predictable performance.
Currently, the Rust implementation is materially behind the TypeScript
implementation in functionality, so continue to use the TypeScript
implmentation for the time being. We will publish native executables via
GitHub Releases as soon as we feel the Rust version is usable.
2025-04-24 13:31:40 -07:00
|
|
|
|
|
2025-05-28 14:03:19 -07:00
|
|
|
|
HistoryCell::PendingPatch {
|
|
|
|
|
|
view: TextBlock::new(lines),
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
2025-08-05 22:44:27 -07:00
|
|
|
|
|
2025-08-06 22:25:41 -07:00
|
|
|
|
pub(crate) fn new_patch_apply_failure(stderr: String) -> Self {
|
2025-08-06 12:03:45 -07:00
|
|
|
|
let mut lines: Vec<Line<'static>> = Vec::new();
|
|
|
|
|
|
|
2025-08-06 22:25:41 -07:00
|
|
|
|
// Failure title
|
|
|
|
|
|
lines.push(Line::from("✘ Failed to apply patch".magenta().bold()));
|
2025-08-05 22:44:27 -07:00
|
|
|
|
|
2025-08-06 22:25:41 -07:00
|
|
|
|
if !stderr.trim().is_empty() {
|
2025-08-11 11:26:15 -07:00
|
|
|
|
lines.extend(output_lines(
|
|
|
|
|
|
Some(&CommandOutput {
|
|
|
|
|
|
exit_code: 1,
|
|
|
|
|
|
stdout: String::new(),
|
|
|
|
|
|
stderr,
|
|
|
|
|
|
}),
|
|
|
|
|
|
true,
|
|
|
|
|
|
true,
|
|
|
|
|
|
));
|
2025-08-06 12:03:45 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
2025-08-05 22:44:27 -07:00
|
|
|
|
lines.push(Line::from(""));
|
|
|
|
|
|
|
2025-08-06 12:03:45 -07:00
|
|
|
|
HistoryCell::PatchApplyResult {
|
2025-08-05 22:44:27 -07:00
|
|
|
|
view: TextBlock::new(lines),
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
2025-08-12 17:37:28 -07:00
|
|
|
|
|
|
|
|
|
|
pub(crate) fn new_patch_apply_success(stdout: String) -> Self {
|
|
|
|
|
|
let mut lines: Vec<Line<'static>> = Vec::new();
|
|
|
|
|
|
|
|
|
|
|
|
// Success title
|
|
|
|
|
|
lines.push(Line::from("✓ Applied patch".magenta().bold()));
|
|
|
|
|
|
|
|
|
|
|
|
if !stdout.trim().is_empty() {
|
|
|
|
|
|
let mut iter = stdout.lines();
|
|
|
|
|
|
for (i, raw) in iter.by_ref().take(TOOL_CALL_MAX_LINES).enumerate() {
|
2025-08-13 19:14:03 -04:00
|
|
|
|
let prefix = if i == 0 { " └ " } else { " " };
|
2025-08-12 17:37:28 -07:00
|
|
|
|
let s = format!("{prefix}{raw}");
|
|
|
|
|
|
lines.push(ansi_escape_line(&s).dim());
|
|
|
|
|
|
}
|
|
|
|
|
|
let remaining = iter.count();
|
|
|
|
|
|
if remaining > 0 {
|
|
|
|
|
|
lines.push(Line::from(""));
|
|
|
|
|
|
lines.push(Line::from(format!("... +{remaining} lines")).dim());
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
lines.push(Line::from(""));
|
|
|
|
|
|
|
|
|
|
|
|
HistoryCell::PatchApplyResult {
|
|
|
|
|
|
view: TextBlock::new(lines),
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
2025-05-28 14:03:19 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
2025-08-06 12:03:45 -07:00
|
|
|
|
impl WidgetRef for &HistoryCell {
|
|
|
|
|
|
fn render_ref(&self, area: Rect, buf: &mut Buffer) {
|
2025-08-07 18:38:39 -07:00
|
|
|
|
Paragraph::new(Text::from(self.plain_lines()))
|
|
|
|
|
|
.wrap(Wrap { trim: false })
|
|
|
|
|
|
.render(area, buf);
|
2025-08-06 12:03:45 -07:00
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-08-11 11:26:15 -07:00
|
|
|
|
fn output_lines(
|
|
|
|
|
|
output: Option<&CommandOutput>,
|
|
|
|
|
|
only_err: bool,
|
|
|
|
|
|
include_angle_pipe: bool,
|
|
|
|
|
|
) -> Vec<Line<'static>> {
|
|
|
|
|
|
let CommandOutput {
|
|
|
|
|
|
exit_code,
|
|
|
|
|
|
stdout,
|
|
|
|
|
|
stderr,
|
|
|
|
|
|
} = match output {
|
|
|
|
|
|
Some(output) if only_err && output.exit_code == 0 => return vec![],
|
|
|
|
|
|
Some(output) => output,
|
|
|
|
|
|
None => return vec![],
|
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
let src = if *exit_code == 0 { stdout } else { stderr };
|
|
|
|
|
|
let lines: Vec<&str> = src.lines().collect();
|
|
|
|
|
|
let total = lines.len();
|
|
|
|
|
|
let limit = TOOL_CALL_MAX_LINES;
|
|
|
|
|
|
|
|
|
|
|
|
let mut out = Vec::new();
|
|
|
|
|
|
|
|
|
|
|
|
let head_end = total.min(limit);
|
|
|
|
|
|
for (i, raw) in lines[..head_end].iter().enumerate() {
|
|
|
|
|
|
let mut line = ansi_escape_line(raw);
|
|
|
|
|
|
let prefix = if i == 0 && include_angle_pipe {
|
2025-08-13 19:14:03 -04:00
|
|
|
|
" └ "
|
2025-08-11 11:26:15 -07:00
|
|
|
|
} else {
|
|
|
|
|
|
" "
|
|
|
|
|
|
};
|
|
|
|
|
|
line.spans.insert(0, prefix.into());
|
|
|
|
|
|
line.spans.iter_mut().for_each(|span| {
|
|
|
|
|
|
span.style = span.style.add_modifier(Modifier::DIM);
|
|
|
|
|
|
});
|
|
|
|
|
|
out.push(line);
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// If we will ellipsize less than the limit, just show it.
|
|
|
|
|
|
let show_ellipsis = total > 2 * limit;
|
|
|
|
|
|
if show_ellipsis {
|
|
|
|
|
|
let omitted = total - 2 * limit;
|
|
|
|
|
|
out.push(Line::from(format!("… +{omitted} lines")));
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
let tail_start = if show_ellipsis {
|
|
|
|
|
|
total - limit
|
|
|
|
|
|
} else {
|
|
|
|
|
|
head_end
|
|
|
|
|
|
};
|
|
|
|
|
|
for raw in lines[tail_start..].iter() {
|
|
|
|
|
|
let mut line = ansi_escape_line(raw);
|
|
|
|
|
|
line.spans.insert(0, " ".into());
|
|
|
|
|
|
line.spans.iter_mut().for_each(|span| {
|
2025-08-06 22:25:41 -07:00
|
|
|
|
span.style = span.style.add_modifier(Modifier::DIM);
|
|
|
|
|
|
});
|
|
|
|
|
|
out.push(line);
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
out
|
feat: initial import of Rust implementation of Codex CLI in codex-rs/ (#629)
As stated in `codex-rs/README.md`:
Today, Codex CLI is written in TypeScript and requires Node.js 22+ to
run it. For a number of users, this runtime requirement inhibits
adoption: they would be better served by a standalone executable. As
maintainers, we want Codex to run efficiently in a wide range of
environments with minimal overhead. We also want to take advantage of
operating system-specific APIs to provide better sandboxing, where
possible.
To that end, we are moving forward with a Rust implementation of Codex
CLI contained in this folder, which has the following benefits:
- The CLI compiles to small, standalone, platform-specific binaries.
- Can make direct, native calls to
[seccomp](https://man7.org/linux/man-pages/man2/seccomp.2.html) and
[landlock](https://man7.org/linux/man-pages/man7/landlock.7.html) in
order to support sandboxing on Linux.
- No runtime garbage collection, resulting in lower memory consumption
and better, more predictable performance.
Currently, the Rust implementation is materially behind the TypeScript
implementation in functionality, so continue to use the TypeScript
implmentation for the time being. We will publish native executables via
GitHub Releases as soon as we feel the Rust version is usable.
2025-04-24 13:31:40 -07:00
|
|
|
|
}
|
fix: introduce ResponseInputItem::McpToolCallOutput variant (#1151)
The output of an MCP server tool call can be one of several types, but
to date, we treated all outputs as text by showing the serialized JSON
as the "tool output" in Codex:
https://github.com/openai/codex/blob/25a9949c49194d5a64de54a11bcc5b4724ac9bd5/codex-rs/mcp-types/src/lib.rs#L96-L101
This PR adds support for the `ImageContent` variant so we can now
display an image output from an MCP tool call.
In making this change, we introduce a new
`ResponseInputItem::McpToolCallOutput` variant so that we can work with
the `mcp_types::CallToolResult` directly when the function call is made
to an MCP server.
Though arguably the more significant change is the introduction of
`HistoryCell::CompletedMcpToolCallWithImageOutput`, which is a cell that
uses `ratatui_image` to render an image into the terminal. To support
this, we introduce `ImageRenderCache`, cache a
`ratatui_image::picker::Picker`, and `ensure_image_cache()` to cache the
appropriate scaled image data and dimensions based on the current
terminal size.
To test, I created a minimal `package.json`:
```json
{
"name": "kitty-mcp",
"version": "1.0.0",
"type": "module",
"description": "MCP that returns image of kitty",
"main": "index.js",
"dependencies": {
"@modelcontextprotocol/sdk": "^1.12.0"
}
}
```
with the following `index.js` to define the MCP server:
```js
#!/usr/bin/env node
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
import { readFile } from "node:fs/promises";
import { join } from "node:path";
const IMAGE_URI = "image://Ada.png";
const server = new McpServer({
name: "Demo",
version: "1.0.0",
});
server.tool(
"get-cat-image",
"If you need a cat image, this tool will provide one.",
async () => ({
content: [
{ type: "image", data: await getAdaPngBase64(), mimeType: "image/png" },
],
})
);
server.resource("Ada the Cat", IMAGE_URI, async (uri) => {
const base64Image = await getAdaPngBase64();
return {
contents: [
{
uri: uri.href,
mimeType: "image/png",
blob: base64Image,
},
],
};
});
async function getAdaPngBase64() {
const __dirname = new URL(".", import.meta.url).pathname;
// From https://github.com/benjajaja/ratatui-image/blob/9705ce2c59ec669abbce2924cbfd1f5ae22c9860/assets/Ada.png
const filePath = join(__dirname, "Ada.png");
const imageData = await readFile(filePath);
const base64Image = imageData.toString("base64");
return base64Image;
}
const transport = new StdioServerTransport();
await server.connect(transport);
```
With the local changes from this PR, I added the following to my
`config.toml`:
```toml
[mcp_servers.kitty]
command = "node"
args = ["/Users/mbolin/code/kitty-mcp/index.js"]
```
Running the TUI from source:
```
cargo run --bin codex -- --model o3 'I need a picture of a cat'
```
I get:
<img width="732" alt="image"
src="https://github.com/user-attachments/assets/bf80b721-9ca0-4d81-aec7-77d6899e2869"
/>
Now, that said, I have only tested in iTerm and there is definitely some
funny business with getting an accurate character-to-pixel ratio
(sometimes the `CompletedMcpToolCallWithImageOutput` thinks it needs 10
rows to render instead of 4), so there is still work to be done here.
2025-05-28 19:03:17 -07:00
|
|
|
|
|
2025-07-30 10:05:40 -07:00
|
|
|
|
fn format_mcp_invocation<'a>(invocation: McpInvocation) -> Line<'a> {
|
|
|
|
|
|
let args_str = invocation
|
|
|
|
|
|
.arguments
|
|
|
|
|
|
.as_ref()
|
|
|
|
|
|
.map(|v| {
|
|
|
|
|
|
// Use compact form to keep things short but readable.
|
|
|
|
|
|
serde_json::to_string(v).unwrap_or_else(|_| v.to_string())
|
|
|
|
|
|
})
|
|
|
|
|
|
.unwrap_or_default();
|
|
|
|
|
|
|
|
|
|
|
|
let invocation_spans = vec![
|
|
|
|
|
|
Span::styled(invocation.server.clone(), Style::default().fg(Color::Blue)),
|
|
|
|
|
|
Span::raw("."),
|
|
|
|
|
|
Span::styled(invocation.tool.clone(), Style::default().fg(Color::Blue)),
|
|
|
|
|
|
Span::raw("("),
|
2025-08-13 15:50:50 -07:00
|
|
|
|
Span::styled(args_str, Style::default().add_modifier(Modifier::DIM)),
|
2025-07-30 10:05:40 -07:00
|
|
|
|
Span::raw(")"),
|
|
|
|
|
|
];
|
|
|
|
|
|
Line::from(invocation_spans)
|
fix: introduce ResponseInputItem::McpToolCallOutput variant (#1151)
The output of an MCP server tool call can be one of several types, but
to date, we treated all outputs as text by showing the serialized JSON
as the "tool output" in Codex:
https://github.com/openai/codex/blob/25a9949c49194d5a64de54a11bcc5b4724ac9bd5/codex-rs/mcp-types/src/lib.rs#L96-L101
This PR adds support for the `ImageContent` variant so we can now
display an image output from an MCP tool call.
In making this change, we introduce a new
`ResponseInputItem::McpToolCallOutput` variant so that we can work with
the `mcp_types::CallToolResult` directly when the function call is made
to an MCP server.
Though arguably the more significant change is the introduction of
`HistoryCell::CompletedMcpToolCallWithImageOutput`, which is a cell that
uses `ratatui_image` to render an image into the terminal. To support
this, we introduce `ImageRenderCache`, cache a
`ratatui_image::picker::Picker`, and `ensure_image_cache()` to cache the
appropriate scaled image data and dimensions based on the current
terminal size.
To test, I created a minimal `package.json`:
```json
{
"name": "kitty-mcp",
"version": "1.0.0",
"type": "module",
"description": "MCP that returns image of kitty",
"main": "index.js",
"dependencies": {
"@modelcontextprotocol/sdk": "^1.12.0"
}
}
```
with the following `index.js` to define the MCP server:
```js
#!/usr/bin/env node
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
import { readFile } from "node:fs/promises";
import { join } from "node:path";
const IMAGE_URI = "image://Ada.png";
const server = new McpServer({
name: "Demo",
version: "1.0.0",
});
server.tool(
"get-cat-image",
"If you need a cat image, this tool will provide one.",
async () => ({
content: [
{ type: "image", data: await getAdaPngBase64(), mimeType: "image/png" },
],
})
);
server.resource("Ada the Cat", IMAGE_URI, async (uri) => {
const base64Image = await getAdaPngBase64();
return {
contents: [
{
uri: uri.href,
mimeType: "image/png",
blob: base64Image,
},
],
};
});
async function getAdaPngBase64() {
const __dirname = new URL(".", import.meta.url).pathname;
// From https://github.com/benjajaja/ratatui-image/blob/9705ce2c59ec669abbce2924cbfd1f5ae22c9860/assets/Ada.png
const filePath = join(__dirname, "Ada.png");
const imageData = await readFile(filePath);
const base64Image = imageData.toString("base64");
return base64Image;
}
const transport = new StdioServerTransport();
await server.connect(transport);
```
With the local changes from this PR, I added the following to my
`config.toml`:
```toml
[mcp_servers.kitty]
command = "node"
args = ["/Users/mbolin/code/kitty-mcp/index.js"]
```
Running the TUI from source:
```
cargo run --bin codex -- --model o3 'I need a picture of a cat'
```
I get:
<img width="732" alt="image"
src="https://github.com/user-attachments/assets/bf80b721-9ca0-4d81-aec7-77d6899e2869"
/>
Now, that said, I have only tested in iTerm and there is definitely some
funny business with getting an accurate character-to-pixel ratio
(sometimes the `CompletedMcpToolCallWithImageOutput` thinks it needs 10
rows to render instead of 4), so there is still work to be done here.
2025-05-28 19:03:17 -07:00
|
|
|
|
}
|
2025-08-11 11:26:15 -07:00
|
|
|
|
|
|
|
|
|
|
fn shlex_join_safe(command: &[String]) -> String {
|
|
|
|
|
|
match shlex::try_join(command.iter().map(|s| s.as_str())) {
|
|
|
|
|
|
Ok(cmd) => cmd,
|
|
|
|
|
|
Err(_) => command.join(" "),
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
2025-08-11 16:11:46 -07:00
|
|
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
|
|
mod tests {
|
|
|
|
|
|
use super::*;
|
|
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
|
fn parsed_command_with_newlines_starts_each_line_at_origin() {
|
|
|
|
|
|
let parsed = vec![ParsedCommand::Unknown {
|
|
|
|
|
|
cmd: vec!["printf".into(), "foo\nbar".into()],
|
|
|
|
|
|
}];
|
|
|
|
|
|
let lines = HistoryCell::exec_command_lines(&[], &parsed, None);
|
|
|
|
|
|
assert!(lines.len() >= 3);
|
2025-08-13 19:14:03 -04:00
|
|
|
|
assert_eq!(lines[1].spans[0].content, " └ ");
|
2025-08-11 16:11:46 -07:00
|
|
|
|
assert_eq!(lines[2].spans[0].content, " ");
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|