Initial commit

Signed-off-by: Ilan Bigio <ilan@openai.com>
This commit is contained in:
Ilan Bigio
2025-04-16 12:56:08 -04:00
commit 59a180ddec
163 changed files with 30587 additions and 0 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,644 @@
import fs from "fs";
import path from "path";
// -----------------------------------------------------------------------------
// Types & Models
// -----------------------------------------------------------------------------
export enum ActionType {
ADD = "add",
DELETE = "delete",
UPDATE = "update",
}
export interface FileChange {
type: ActionType;
old_content?: string | null;
new_content?: string | null;
move_path?: string | null;
}
export interface Commit {
changes: Record<string, FileChange>;
}
export function assemble_changes(
orig: Record<string, string | null>,
updatedFiles: Record<string, string | null>,
): Commit {
const commit: Commit = { changes: {} };
for (const [p, newContent] of Object.entries(updatedFiles)) {
const oldContent = orig[p];
if (oldContent === newContent) {
continue;
}
if (oldContent !== undefined && newContent !== undefined) {
commit.changes[p] = {
type: ActionType.UPDATE,
old_content: oldContent,
new_content: newContent,
};
} else if (newContent !== undefined) {
commit.changes[p] = {
type: ActionType.ADD,
new_content: newContent,
};
} else if (oldContent !== undefined) {
commit.changes[p] = {
type: ActionType.DELETE,
old_content: oldContent,
};
} else {
throw new Error("Unexpected state in assemble_changes");
}
}
return commit;
}
// -----------------------------------------------------------------------------
// Patchrelated structures
// -----------------------------------------------------------------------------
export interface Chunk {
orig_index: number; // line index of the first line in the original file
del_lines: Array<string>;
ins_lines: Array<string>;
}
export interface PatchAction {
type: ActionType;
new_file?: string | null;
chunks: Array<Chunk>;
move_path?: string | null;
}
export interface Patch {
actions: Record<string, PatchAction>;
}
export class DiffError extends Error {}
// -----------------------------------------------------------------------------
// Parser (patch text -> Patch)
// -----------------------------------------------------------------------------
class Parser {
current_files: Record<string, string>;
lines: Array<string>;
index = 0;
patch: Patch = { actions: {} };
fuzz = 0;
constructor(currentFiles: Record<string, string>, lines: Array<string>) {
this.current_files = currentFiles;
this.lines = lines;
}
private is_done(prefixes?: Array<string>): boolean {
if (this.index >= this.lines.length) {
return true;
}
if (
prefixes &&
prefixes.some((p) => this.lines[this.index]!.startsWith(p))
) {
return true;
}
return false;
}
private startswith(prefix: string | Array<string>): boolean {
const prefixes = Array.isArray(prefix) ? prefix : [prefix];
return prefixes.some((p) => this.lines[this.index]!.startsWith(p));
}
private read_str(prefix = "", returnEverything = false): string {
if (this.index >= this.lines.length) {
throw new DiffError(`Index: ${this.index} >= ${this.lines.length}`);
}
if (this.lines[this.index]!.startsWith(prefix)) {
const text = returnEverything
? this.lines[this.index]
: this.lines[this.index]!.slice(prefix.length);
this.index += 1;
return text ?? "";
}
return "";
}
parse(): void {
while (!this.is_done(["*** End Patch"])) {
let path = this.read_str("*** Update File: ");
if (path) {
if (this.patch.actions[path]) {
throw new DiffError(`Update File Error: Duplicate Path: ${path}`);
}
const moveTo = this.read_str("*** Move to: ");
if (!(path in this.current_files)) {
throw new DiffError(`Update File Error: Missing File: ${path}`);
}
const text = this.current_files[path];
const action = this.parse_update_file(text ?? "");
action.move_path = moveTo || undefined;
this.patch.actions[path] = action;
continue;
}
path = this.read_str("*** Delete File: ");
if (path) {
if (this.patch.actions[path]) {
throw new DiffError(`Delete File Error: Duplicate Path: ${path}`);
}
if (!(path in this.current_files)) {
throw new DiffError(`Delete File Error: Missing File: ${path}`);
}
this.patch.actions[path] = { type: ActionType.DELETE, chunks: [] };
continue;
}
path = this.read_str("*** Add File: ");
if (path) {
if (this.patch.actions[path]) {
throw new DiffError(`Add File Error: Duplicate Path: ${path}`);
}
if (path in this.current_files) {
throw new DiffError(`Add File Error: File already exists: ${path}`);
}
this.patch.actions[path] = this.parse_add_file();
continue;
}
throw new DiffError(`Unknown Line: ${this.lines[this.index]}`);
}
if (!this.startswith("*** End Patch")) {
throw new DiffError("Missing End Patch");
}
this.index += 1;
}
private parse_update_file(text: string): PatchAction {
const action: PatchAction = { type: ActionType.UPDATE, chunks: [] };
const fileLines = text.split("\n");
let index = 0;
while (
!this.is_done([
"*** End Patch",
"*** Update File:",
"*** Delete File:",
"*** Add File:",
"*** End of File",
])
) {
const defStr = this.read_str("@@ ");
let sectionStr = "";
if (!defStr && this.lines[this.index] === "@@") {
sectionStr = this.lines[this.index]!;
this.index += 1;
}
if (!(defStr || sectionStr || index === 0)) {
throw new DiffError(`Invalid Line:\n${this.lines[this.index]}`);
}
if (defStr.trim()) {
let found = false;
if (!fileLines.slice(0, index).some((s) => s === defStr)) {
for (let i = index; i < fileLines.length; i++) {
if (fileLines[i] === defStr) {
index = i + 1;
found = true;
break;
}
}
}
if (
!found &&
!fileLines.slice(0, index).some((s) => s.trim() === defStr.trim())
) {
for (let i = index; i < fileLines.length; i++) {
if (fileLines[i]!.trim() === defStr.trim()) {
index = i + 1;
this.fuzz += 1;
found = true;
break;
}
}
}
}
const [nextChunkContext, chunks, endPatchIndex, eof] = peek_next_section(
this.lines,
this.index,
);
const [newIndex, fuzz] = find_context(
fileLines,
nextChunkContext,
index,
eof,
);
if (newIndex === -1) {
const ctxText = nextChunkContext.join("\n");
if (eof) {
throw new DiffError(`Invalid EOF Context ${index}:\n${ctxText}`);
} else {
throw new DiffError(`Invalid Context ${index}:\n${ctxText}`);
}
}
this.fuzz += fuzz;
for (const ch of chunks) {
ch.orig_index += newIndex;
action.chunks.push(ch);
}
index = newIndex + nextChunkContext.length;
this.index = endPatchIndex;
}
return action;
}
private parse_add_file(): PatchAction {
const lines: Array<string> = [];
while (
!this.is_done([
"*** End Patch",
"*** Update File:",
"*** Delete File:",
"*** Add File:",
])
) {
const s = this.read_str();
if (!s.startsWith("+")) {
throw new DiffError(`Invalid Add File Line: ${s}`);
}
lines.push(s.slice(1));
}
return {
type: ActionType.ADD,
new_file: lines.join("\n"),
chunks: [],
};
}
}
function find_context_core(
lines: Array<string>,
context: Array<string>,
start: number,
): [number, number] {
if (context.length === 0) {
return [start, 0];
}
for (let i = start; i < lines.length; i++) {
if (lines.slice(i, i + context.length).join("\n") === context.join("\n")) {
return [i, 0];
}
}
for (let i = start; i < lines.length; i++) {
if (
lines
.slice(i, i + context.length)
.map((s) => s.trimEnd())
.join("\n") === context.map((s) => s.trimEnd()).join("\n")
) {
return [i, 1];
}
}
for (let i = start; i < lines.length; i++) {
if (
lines
.slice(i, i + context.length)
.map((s) => s.trim())
.join("\n") === context.map((s) => s.trim()).join("\n")
) {
return [i, 100];
}
}
return [-1, 0];
}
function find_context(
lines: Array<string>,
context: Array<string>,
start: number,
eof: boolean,
): [number, number] {
if (eof) {
let [newIndex, fuzz] = find_context_core(
lines,
context,
lines.length - context.length,
);
if (newIndex !== -1) {
return [newIndex, fuzz];
}
[newIndex, fuzz] = find_context_core(lines, context, start);
return [newIndex, fuzz + 10000];
}
return find_context_core(lines, context, start);
}
function peek_next_section(
lines: Array<string>,
initialIndex: number,
): [Array<string>, Array<Chunk>, number, boolean] {
let index = initialIndex;
const old: Array<string> = [];
let delLines: Array<string> = [];
let insLines: Array<string> = [];
const chunks: Array<Chunk> = [];
let mode: "keep" | "add" | "delete" = "keep";
while (index < lines.length) {
const s = lines[index]!;
if (
s.startsWith("@@") ||
s.startsWith("*** End Patch") ||
s.startsWith("*** Update File:") ||
s.startsWith("*** Delete File:") ||
s.startsWith("*** Add File:") ||
s.startsWith("*** End of File")
) {
break;
}
if (s === "***") {
break;
}
if (s.startsWith("***")) {
throw new DiffError(`Invalid Line: ${s}`);
}
index += 1;
const lastMode: "keep" | "add" | "delete" = mode;
let line = s;
if (line[0] === "+") {
mode = "add";
} else if (line[0] === "-") {
mode = "delete";
} else if (line[0] === " ") {
mode = "keep";
} else {
// Tolerate invalid lines where the leading whitespace is missing. This is necessary as
// the model sometimes doesn't fully adhere to the spec and returns lines without leading
// whitespace for context lines.
mode = "keep";
line = " " + line;
// TODO: Re-enable strict mode.
// throw new DiffError(`Invalid Line: ${line}`)
}
line = line.slice(1);
if (mode === "keep" && lastMode !== mode) {
if (insLines.length || delLines.length) {
chunks.push({
orig_index: old.length - delLines.length,
del_lines: delLines,
ins_lines: insLines,
});
}
delLines = [];
insLines = [];
}
if (mode === "delete") {
delLines.push(line);
old.push(line);
} else if (mode === "add") {
insLines.push(line);
} else {
old.push(line);
}
}
if (insLines.length || delLines.length) {
chunks.push({
orig_index: old.length - delLines.length,
del_lines: delLines,
ins_lines: insLines,
});
}
if (index < lines.length && lines[index] === "*** End of File") {
index += 1;
return [old, chunks, index, true];
}
return [old, chunks, index, false];
}
// -----------------------------------------------------------------------------
// Highlevel helpers
// -----------------------------------------------------------------------------
export function text_to_patch(
text: string,
orig: Record<string, string>,
): [Patch, number] {
const lines = text.trim().split("\n");
if (
lines.length < 2 ||
!(lines[0] ?? "").startsWith("*** Begin Patch") ||
lines[lines.length - 1] !== "*** End Patch"
) {
throw new DiffError("Invalid patch text");
}
const parser = new Parser(orig, lines);
parser.index = 1;
parser.parse();
return [parser.patch, parser.fuzz];
}
export function identify_files_needed(text: string): Array<string> {
const lines = text.trim().split("\n");
const result = new Set<string>();
for (const line of lines) {
if (line.startsWith("*** Update File: ")) {
result.add(line.slice("*** Update File: ".length));
}
if (line.startsWith("*** Delete File: ")) {
result.add(line.slice("*** Delete File: ".length));
}
}
return [...result];
}
export function identify_files_added(text: string): Array<string> {
const lines = text.trim().split("\n");
const result = new Set<string>();
for (const line of lines) {
if (line.startsWith("*** Add File: ")) {
result.add(line.slice("*** Add File: ".length));
}
}
return [...result];
}
function _get_updated_file(
text: string,
action: PatchAction,
path: string,
): string {
if (action.type !== ActionType.UPDATE) {
throw new Error("Expected UPDATE action");
}
const origLines = text.split("\n");
const destLines: Array<string> = [];
let origIndex = 0;
for (const chunk of action.chunks) {
if (chunk.orig_index > origLines.length) {
throw new DiffError(
`${path}: chunk.orig_index ${chunk.orig_index} > len(lines) ${origLines.length}`,
);
}
if (origIndex > chunk.orig_index) {
throw new DiffError(
`${path}: orig_index ${origIndex} > chunk.orig_index ${chunk.orig_index}`,
);
}
destLines.push(...origLines.slice(origIndex, chunk.orig_index));
const delta = chunk.orig_index - origIndex;
origIndex += delta;
// inserted lines
if (chunk.ins_lines.length) {
for (const l of chunk.ins_lines) {
destLines.push(l);
}
}
origIndex += chunk.del_lines.length;
}
destLines.push(...origLines.slice(origIndex));
return destLines.join("\n");
}
export function patch_to_commit(
patch: Patch,
orig: Record<string, string>,
): Commit {
const commit: Commit = { changes: {} };
for (const [pathKey, action] of Object.entries(patch.actions)) {
if (action.type === ActionType.DELETE) {
commit.changes[pathKey] = {
type: ActionType.DELETE,
old_content: orig[pathKey],
};
} else if (action.type === ActionType.ADD) {
commit.changes[pathKey] = {
type: ActionType.ADD,
new_content: action.new_file ?? "",
};
} else if (action.type === ActionType.UPDATE) {
const newContent = _get_updated_file(orig[pathKey]!, action, pathKey);
commit.changes[pathKey] = {
type: ActionType.UPDATE,
old_content: orig[pathKey],
new_content: newContent,
move_path: action.move_path ?? undefined,
};
}
}
return commit;
}
// -----------------------------------------------------------------------------
// Filesystem helpers for Node environment
// -----------------------------------------------------------------------------
export function load_files(
paths: Array<string>,
openFn: (p: string) => string,
): Record<string, string> {
const orig: Record<string, string> = {};
for (const p of paths) {
try {
orig[p] = openFn(p);
} catch {
// Convert any file read error into a DiffError so that callers
// consistently receive DiffError for patch-related failures.
throw new DiffError(`File not found: ${p}`);
}
}
return orig;
}
export function apply_commit(
commit: Commit,
writeFn: (p: string, c: string) => void,
removeFn: (p: string) => void,
): void {
for (const [p, change] of Object.entries(commit.changes)) {
if (change.type === ActionType.DELETE) {
removeFn(p);
} else if (change.type === ActionType.ADD) {
writeFn(p, change.new_content ?? "");
} else if (change.type === ActionType.UPDATE) {
if (change.move_path) {
writeFn(change.move_path, change.new_content ?? "");
removeFn(p);
} else {
writeFn(p, change.new_content ?? "");
}
}
}
}
export function process_patch(
text: string,
openFn: (p: string) => string,
writeFn: (p: string, c: string) => void,
removeFn: (p: string) => void,
): string {
if (!text.startsWith("*** Begin Patch")) {
throw new DiffError("Patch must start with *** Begin Patch");
}
const paths = identify_files_needed(text);
const orig = load_files(paths, openFn);
const [patch, _fuzz] = text_to_patch(text, orig);
const commit = patch_to_commit(patch, orig);
apply_commit(commit, writeFn, removeFn);
return "Done!";
}
// -----------------------------------------------------------------------------
// Default filesystem implementations
// -----------------------------------------------------------------------------
function open_file(p: string): string {
return fs.readFileSync(p, "utf8");
}
function write_file(p: string, content: string): void {
if (path.isAbsolute(p)) {
throw new DiffError("We do not support absolute paths.");
}
const parent = path.dirname(p);
if (parent !== ".") {
fs.mkdirSync(parent, { recursive: true });
}
fs.writeFileSync(p, content, "utf8");
}
function remove_file(p: string): void {
fs.unlinkSync(p);
}
// -----------------------------------------------------------------------------
// CLI mode. Not exported, executed only if run directly.
// -----------------------------------------------------------------------------
if (import.meta.url === `file://${process.argv[1]}`) {
let patchText = "";
process.stdin.setEncoding("utf8");
process.stdin.on("data", (chunk) => (patchText += chunk));
process.stdin.on("end", () => {
if (!patchText) {
// eslint-disable-next-line no-console
console.error("Please pass patch text through stdin");
process.exit(1);
}
try {
const result = process_patch(
patchText,
open_file,
write_file,
remove_file,
);
// eslint-disable-next-line no-console
console.log(result);
} catch (err: unknown) {
// eslint-disable-next-line no-console
console.error(err instanceof Error ? err.message : String(err));
process.exit(1);
}
});
}

View File

@@ -0,0 +1,67 @@
import type { ExecInput, ExecResult } from "./sandbox/interface.js";
import type { SpawnOptions } from "child_process";
import { process_patch } from "./apply-patch.js";
import { SandboxType } from "./sandbox/interface.js";
import { execWithSeatbelt } from "./sandbox/macos-seatbelt.js";
import { exec as rawExec } from "./sandbox/raw-exec.js";
import { formatCommandForDisplay } from "@lib/format-command.js";
import fs from "fs";
import os from "os";
const DEFAULT_TIMEOUT_MS = 10_000; // 10 seconds
/**
* This function should never return a rejected promise: errors should be
* mapped to a non-zero exit code and the error message should be in stderr.
*/
export function exec(
{ cmd, workdir, timeoutInMillis }: ExecInput,
sandbox: SandboxType,
abortSignal?: AbortSignal,
): Promise<ExecResult> {
// This is a temporary measure to understand what are the common base commands
// until we start persisting and uploading rollouts
const execForSandbox =
sandbox === SandboxType.MACOS_SEATBELT ? execWithSeatbelt : rawExec;
const opts: SpawnOptions = {
timeout: timeoutInMillis || DEFAULT_TIMEOUT_MS,
...(workdir ? { cwd: workdir } : {}),
};
const writableRoots = [process.cwd(), os.tmpdir()];
return execForSandbox(cmd, opts, writableRoots, abortSignal);
}
export function execApplyPatch(patchText: string): ExecResult {
// This is a temporary measure to understand what are the common base commands
// until we start persisting and uploading rollouts
try {
const result = process_patch(
patchText,
(p) => fs.readFileSync(p, "utf8"),
(p, c) => fs.writeFileSync(p, c, "utf8"),
(p) => fs.unlinkSync(p),
);
return {
stdout: result,
stderr: "",
exitCode: 0,
};
} catch (error: unknown) {
// @ts-expect-error error might not be an object or have a message property.
const stderr = String(error.message ?? error);
return {
stdout: "",
stderr: stderr,
exitCode: 1,
};
}
}
export function getBaseCmd(cmd: Array<string>): string {
const formattedCommand = formatCommandForDisplay(cmd);
return formattedCommand.split(" ")[0] || cmd[0] || "<unknown>";
}

View File

@@ -0,0 +1,315 @@
import type { CommandConfirmation } from "./agent-loop.js";
import type { AppConfig } from "../config.js";
import type { ExecInput } from "./sandbox/interface.js";
import type { ApplyPatchCommand, ApprovalPolicy } from "@lib/approvals.js";
import type { ResponseInputItem } from "openai/resources/responses/responses.mjs";
import { exec, execApplyPatch } from "./exec.js";
import { isLoggingEnabled, log } from "./log.js";
import { ReviewDecision } from "./review.js";
import { FullAutoErrorMode } from "../auto-approval-mode.js";
import { SandboxType } from "./sandbox/interface.js";
import { canAutoApprove } from "@lib/approvals.js";
import { formatCommandForDisplay } from "@lib/format-command.js";
import { access } from "fs/promises";
// ---------------------------------------------------------------------------
// Sessionlevel cache of commands that the user has chosen to always approve.
//
// The values are derived via `deriveCommandKey()` which intentionally ignores
// volatile arguments (for example the patch text passed to `apply_patch`).
// Storing *generalised* keys means that once a user selects "always approve"
// for a given class of command we will genuinely stop prompting them for
// subsequent, equivalent invocations during the same CLI session.
// ---------------------------------------------------------------------------
const alwaysApprovedCommands = new Set<string>();
// ---------------------------------------------------------------------------
// Helper: Given the argv-style representation of a command, return a stable
// string key that can be used for equality checks.
//
// The key space purposefully abstracts away parts of the command line that
// are expected to change between invocations while still retaining enough
// information to differentiate *meaningfully distinct* operations. See the
// extensive inline documentation for details.
// ---------------------------------------------------------------------------
function deriveCommandKey(cmd: Array<string>): string {
// pull off only the bits you care about
const [
maybeShell,
maybeFlag,
coreInvocation,
/* …ignore the rest… */
] = cmd;
if (coreInvocation?.startsWith("apply_patch")) {
return "apply_patch";
}
if (maybeShell === "bash" && maybeFlag === "-lc") {
// If the command was invoked through `bash -lc "<script>"` we extract the
// base program name from the script string.
const script = coreInvocation ?? "";
return script.split(/\s+/)[0] || "bash";
}
// For every other command we fall back to using only the program name (the
// first argv element). This guarantees we always return a *string* even if
// `coreInvocation` is undefined.
if (coreInvocation) {
return coreInvocation.split(/\s+/)[0]!;
}
return JSON.stringify(cmd);
}
type HandleExecCommandResult = {
outputText: string;
metadata: Record<string, unknown>;
additionalItems?: Array<ResponseInputItem>;
};
export async function handleExecCommand(
args: ExecInput,
config: AppConfig,
policy: ApprovalPolicy,
getCommandConfirmation: (
command: Array<string>,
applyPatch: ApplyPatchCommand | undefined,
) => Promise<CommandConfirmation>,
abortSignal?: AbortSignal,
): Promise<HandleExecCommandResult> {
const { cmd: command } = args;
const key = deriveCommandKey(command);
// 1) If the user has already said "always approve", skip
// any policy & never sandbox.
if (alwaysApprovedCommands.has(key)) {
return execCommand(
args,
/* applyPatch */ undefined,
/* runInSandbox */ false,
abortSignal,
).then(convertSummaryToResult);
}
// 2) Otherwise fall back to the normal policy
// `canAutoApprove` now requires the list of writable roots that the command
// is allowed to modify. For the CLI we conservatively pass the current
// working directory so that edits are constrained to the project root. If
// the caller wishes to broaden or restrict the set it can be made
// configurable in the future.
const safety = canAutoApprove(command, policy, [process.cwd()]);
let runInSandbox: boolean;
switch (safety.type) {
case "ask-user": {
const review = await askUserPermission(
args,
safety.applyPatch,
getCommandConfirmation,
);
if (review != null) {
return review;
}
runInSandbox = false;
break;
}
case "auto-approve": {
runInSandbox = safety.runInSandbox;
break;
}
case "reject": {
return {
outputText: "aborted",
metadata: {
error: "command rejected",
reason: "Command rejected by auto-approval system.",
},
};
}
}
const { applyPatch } = safety;
const summary = await execCommand(
args,
applyPatch,
runInSandbox,
abortSignal,
);
// If the operation was aborted in the meantime, propagate the cancellation
// upward by returning an empty (noop) result so that the agent loop will
// exit cleanly without emitting spurious output.
if (abortSignal?.aborted) {
return {
outputText: "",
metadata: {},
};
}
if (
summary.exitCode !== 0 &&
runInSandbox &&
// Default: If the user has configured to ignore and continue,
// skip re-running the command.
//
// Otherwise, if they selected "ask-user", then we should ask the user
// for permission to re-run the command outside of the sandbox.
config.fullAutoErrorMode &&
config.fullAutoErrorMode === FullAutoErrorMode.ASK_USER
) {
const review = await askUserPermission(
args,
safety.applyPatch,
getCommandConfirmation,
);
if (review != null) {
return review;
} else {
// The user has approved the command, so we will run it outside of the
// sandbox.
const summary = await execCommand(args, applyPatch, false, abortSignal);
return convertSummaryToResult(summary);
}
} else {
return convertSummaryToResult(summary);
}
}
function convertSummaryToResult(
summary: ExecCommandSummary,
): HandleExecCommandResult {
const { stdout, stderr, exitCode, durationMs } = summary;
return {
outputText: stdout || stderr,
metadata: {
exit_code: exitCode,
duration_seconds: Math.round(durationMs / 100) / 10,
},
};
}
type ExecCommandSummary = {
stdout: string;
stderr: string;
exitCode: number;
durationMs: number;
};
async function execCommand(
execInput: ExecInput,
applyPatchCommand: ApplyPatchCommand | undefined,
runInSandbox: boolean,
abortSignal?: AbortSignal,
): Promise<ExecCommandSummary> {
if (isLoggingEnabled()) {
if (applyPatchCommand != null) {
log("EXEC running apply_patch command");
} else {
const { cmd, workdir, timeoutInMillis } = execInput;
// Seconds are a bit easier to read in log messages and most timeouts
// are specified as multiples of 1000, anyway.
const timeout =
timeoutInMillis != null
? Math.round(timeoutInMillis / 1000).toString()
: "undefined";
log(
`EXEC running \`${formatCommandForDisplay(
cmd,
)}\` in workdir=${workdir} with timeout=${timeout}s`,
);
}
}
// Note execApplyPatch() and exec() are coded defensively and should not
// throw. Any internal errors should be mapped to a non-zero value for the
// exitCode field.
const start = Date.now();
const execResult =
applyPatchCommand != null
? execApplyPatch(applyPatchCommand.patch)
: await exec(execInput, await getSandbox(runInSandbox), abortSignal);
const duration = Date.now() - start;
const { stdout, stderr, exitCode } = execResult;
if (isLoggingEnabled()) {
log(
`EXEC exit=${exitCode} time=${duration}ms:\n\tSTDOUT: ${stdout}\n\tSTDERR: ${stderr}`,
);
}
return {
stdout,
stderr,
exitCode,
durationMs: duration,
};
}
const isInContainer = async (): Promise<boolean> => {
try {
await access("/proc/1/cgroup");
return true;
} catch {
return false;
}
};
async function getSandbox(runInSandbox: boolean): Promise<SandboxType> {
if (runInSandbox) {
if (process.platform === "darwin") {
return SandboxType.MACOS_SEATBELT;
} else if (await isInContainer()) {
return SandboxType.NONE;
}
throw new Error("Sandbox was mandated, but no sandbox is available!");
} else {
return SandboxType.NONE;
}
}
/**
* If return value is non-null, then the command was rejected by the user.
*/
async function askUserPermission(
args: ExecInput,
applyPatchCommand: ApplyPatchCommand | undefined,
getCommandConfirmation: (
command: Array<string>,
applyPatch: ApplyPatchCommand | undefined,
) => Promise<CommandConfirmation>,
): Promise<HandleExecCommandResult | null> {
const { review: decision, customDenyMessage } = await getCommandConfirmation(
args.cmd,
applyPatchCommand,
);
if (decision === ReviewDecision.ALWAYS) {
// Persist this command so we won't ask again during this session.
const key = deriveCommandKey(args.cmd);
alwaysApprovedCommands.add(key);
}
// Any decision other than an affirmative (YES / ALWAYS) aborts execution.
if (decision !== ReviewDecision.YES && decision !== ReviewDecision.ALWAYS) {
const note =
decision === ReviewDecision.NO_CONTINUE
? customDenyMessage?.trim() || "No, don't do that — keep going though."
: "No, don't do that — stop for now.";
return {
outputText: "aborted",
metadata: {},
additionalItems: [
{
type: "message",
role: "user",
content: [{ type: "input_text", text: note }],
},
],
};
} else {
return null;
}
}

View File

@@ -0,0 +1,129 @@
import * as fsSync from "fs";
import * as fs from "fs/promises";
import * as os from "os";
import * as path from "path";
interface Logger {
/** Checking this can be used to avoid constructing a large log message. */
isLoggingEnabled(): boolean;
log(message: string): void;
}
class AsyncLogger implements Logger {
private queue: Array<string> = [];
private isWriting: boolean = false;
constructor(private filePath: string) {
this.filePath = filePath;
}
isLoggingEnabled(): boolean {
return true;
}
log(message: string): void {
const entry = `[${now()}] ${message}\n`;
this.queue.push(entry);
this.maybeWrite();
}
private async maybeWrite(): Promise<void> {
if (this.isWriting || this.queue.length === 0) {
return;
}
this.isWriting = true;
const messages = this.queue.join("");
this.queue = [];
try {
await fs.appendFile(this.filePath, messages);
} finally {
this.isWriting = false;
}
this.maybeWrite();
}
}
class EmptyLogger implements Logger {
isLoggingEnabled(): boolean {
return false;
}
log(_message: string): void {
// No-op
}
}
function now() {
const date = new Date();
const year = date.getFullYear();
const month = String(date.getMonth() + 1).padStart(2, "0");
const day = String(date.getDate()).padStart(2, "0");
const hours = String(date.getHours()).padStart(2, "0");
const minutes = String(date.getMinutes()).padStart(2, "0");
const seconds = String(date.getSeconds()).padStart(2, "0");
return `${year}-${month}-${day}T${hours}:${minutes}:${seconds}`;
}
let logger: Logger;
/**
* Creates a .log file for this session, but also symlinks codex-cli-latest.log
* to the current log file so you can reliably run:
*
* - Mac/Windows: `tail -F "$TMPDIR/oai-codex/codex-cli-latest.log"`
* - Linux: `tail -F ~/.local/oai-codex/codex-cli-latest.log`
*/
export function initLogger(): Logger {
if (logger) {
return logger;
} else if (!process.env["DEBUG"]) {
logger = new EmptyLogger();
return logger;
}
const isMac = process.platform === "darwin";
const isWin = process.platform === "win32";
// On Mac and Windows, os.tmpdir() returns a user-specifc folder, so prefer
// it there. On Linux, use ~/.local/oai-codex so logs are not world-readable.
const logDir =
isMac || isWin
? path.join(os.tmpdir(), "oai-codex")
: path.join(os.homedir(), ".local", "oai-codex");
fsSync.mkdirSync(logDir, { recursive: true });
const logFile = path.join(logDir, `codex-cli-${now()}.log`);
// Write the empty string so the file exists and can be tail'd.
fsSync.writeFileSync(logFile, "");
// Symlink to codex-cli-latest.log on UNIX because Windows is funny about
// symlinks.
if (!isWin) {
const latestLink = path.join(logDir, "codex-cli-latest.log");
try {
fsSync.symlinkSync(logFile, latestLink, "file");
} catch (err: unknown) {
const error = err as NodeJS.ErrnoException;
if (error.code === "EEXIST") {
fsSync.unlinkSync(latestLink);
fsSync.symlinkSync(logFile, latestLink, "file");
} else {
throw err;
}
}
}
logger = new AsyncLogger(logFile);
return logger;
}
export function log(message: string): void {
(logger ?? initLogger()).log(message);
}
export function isLoggingEnabled(): boolean {
return (logger ?? initLogger()).isLoggingEnabled();
}

View File

@@ -0,0 +1,112 @@
export type ApplyPatchCreateFileOp = {
type: "create";
path: string;
content: string;
};
export type ApplyPatchDeleteFileOp = {
type: "delete";
path: string;
};
export type ApplyPatchUpdateFileOp = {
type: "update";
path: string;
update: string;
added: number;
deleted: number;
};
export type ApplyPatchOp =
| ApplyPatchCreateFileOp
| ApplyPatchDeleteFileOp
| ApplyPatchUpdateFileOp;
const PATCH_PREFIX = "*** Begin Patch\n";
const PATCH_SUFFIX = "\n*** End Patch";
const ADD_FILE_PREFIX = "*** Add File: ";
const DELETE_FILE_PREFIX = "*** Delete File: ";
const UPDATE_FILE_PREFIX = "*** Update File: ";
const END_OF_FILE_PREFIX = "*** End of File";
const HUNK_ADD_LINE_PREFIX = "+";
/**
* @returns null when the patch is invalid
*/
export function parseApplyPatch(patch: string): Array<ApplyPatchOp> | null {
if (!patch.startsWith(PATCH_PREFIX)) {
// Patch must begin with '*** Begin Patch'
return null;
} else if (!patch.endsWith(PATCH_SUFFIX)) {
// Patch must end with '*** End Patch'
return null;
}
const patchBody = patch.slice(
PATCH_PREFIX.length,
patch.length - PATCH_SUFFIX.length,
);
const lines = patchBody.split("\n");
const ops: Array<ApplyPatchOp> = [];
for (const line of lines) {
if (line.startsWith(END_OF_FILE_PREFIX)) {
continue;
} else if (line.startsWith(ADD_FILE_PREFIX)) {
ops.push({
type: "create",
path: line.slice(ADD_FILE_PREFIX.length).trim(),
content: "",
});
continue;
} else if (line.startsWith(DELETE_FILE_PREFIX)) {
ops.push({
type: "delete",
path: line.slice(DELETE_FILE_PREFIX.length).trim(),
});
continue;
} else if (line.startsWith(UPDATE_FILE_PREFIX)) {
ops.push({
type: "update",
path: line.slice(UPDATE_FILE_PREFIX.length).trim(),
update: "",
added: 0,
deleted: 0,
});
continue;
}
const lastOp = ops[ops.length - 1];
if (lastOp?.type === "create") {
lastOp.content = appendLine(
lastOp.content,
line.slice(HUNK_ADD_LINE_PREFIX.length),
);
continue;
}
if (lastOp?.type !== "update") {
// Expected update op but got ${lastOp?.type} for line ${line}
return null;
}
if (line.startsWith(HUNK_ADD_LINE_PREFIX)) {
lastOp.added += 1;
} else if (line.startsWith("-")) {
lastOp.deleted += 1;
}
lastOp.update += lastOp.update ? "\n" + line : line;
}
return ops;
}
function appendLine(content: string, line: string) {
if (!content.length) {
return line;
}
return [content, line].join("\n");
}

View File

@@ -0,0 +1,18 @@
import type { SafeCommandReason } from "@lib/approvals";
export type CommandReviewDetails = {
cmd: Array<string>;
cmdReadableText: string;
autoApproval: SafeCommandReason | null;
};
export enum ReviewDecision {
YES = "yes",
NO_CONTINUE = "no-continue",
NO_EXIT = "no-exit",
/**
* User has approved this command and wants to automatically approve any
* future identical instances for the remainder of the session.
*/
ALWAYS = "always",
}

View File

@@ -0,0 +1,30 @@
export enum SandboxType {
NONE = "none",
MACOS_SEATBELT = "macos.seatbelt",
LINUX_LANDLOCK = "linux.landlock",
}
export type ExecInput = {
cmd: Array<string>;
workdir: string | undefined;
timeoutInMillis: number | undefined;
};
/**
* Result of executing a command. Caller is responsible for checking `code` to
* determine whether the command was successful.
*/
export type ExecResult = {
stdout: string;
stderr: string;
exitCode: number;
};
/**
* Value to use with the `metadata` field of a `ResponseItem` whose type is
* `function_call_output`.
*/
export type ExecOutputMetadata = {
exit_code: number;
duration_seconds: number;
};

View File

@@ -0,0 +1,141 @@
import type { ExecResult } from "./interface.js";
import type { SpawnOptions } from "child_process";
import { exec } from "./raw-exec.js";
import { log } from "../log.js";
import { CONFIG_DIR } from "src/utils/config.js";
function getCommonRoots() {
return [
CONFIG_DIR,
// Without this root, it'll cause:
// pyenv: cannot rehash: $HOME/.pyenv/shims isn't writable
`${process.env["HOME"]}/.pyenv`,
];
}
export function execWithSeatbelt(
cmd: Array<string>,
opts: SpawnOptions,
writableRoots: Array<string>,
abortSignal?: AbortSignal,
): Promise<ExecResult> {
let scopedWritePolicy: string;
let policyTemplateParams: Array<string>;
if (writableRoots.length > 0) {
// Add `~/.codex` to the list of writable roots
// (if there's any already, not in read-only mode)
getCommonRoots().map((root) => writableRoots.push(root));
const { policies, params } = writableRoots
.map((root, index) => ({
policy: `(subpath (param "WRITABLE_ROOT_${index}"))`,
param: `-DWRITABLE_ROOT_${index}=${root}`,
}))
.reduce(
(
acc: { policies: Array<string>; params: Array<string> },
{ policy, param },
) => {
acc.policies.push(policy);
acc.params.push(param);
return acc;
},
{ policies: [], params: [] },
);
scopedWritePolicy = `\n(allow file-write*\n${policies.join(" ")}\n)`;
policyTemplateParams = params;
} else {
scopedWritePolicy = "";
policyTemplateParams = [];
}
const fullPolicy = READ_ONLY_SEATBELT_POLICY + scopedWritePolicy;
log(
`Running seatbelt with policy: ${fullPolicy} and ${
policyTemplateParams.length
} template params: ${policyTemplateParams.join(", ")}`,
);
const fullCommand = [
"sandbox-exec",
"-p",
fullPolicy,
...policyTemplateParams,
"--",
...cmd,
];
return exec(fullCommand, opts, writableRoots, abortSignal);
}
const READ_ONLY_SEATBELT_POLICY = `
(version 1)
; inspired by Chrome's sandbox policy:
; https://source.chromium.org/chromium/chromium/src/+/main:sandbox/policy/mac/common.sb;l=273-319;drc=7b3962fe2e5fc9e2ee58000dc8fbf3429d84d3bd
; start with closed-by-default
(deny default)
; allow read-only file operations
(allow file-read*)
; child processes inherit the policy of their parent
(allow process-exec)
(allow process-fork)
(allow signal (target self))
(allow file-write-data
(require-all
(path "/dev/null")
(vnode-type CHARACTER-DEVICE)))
; sysctls permitted.
(allow sysctl-read
(sysctl-name "hw.activecpu")
(sysctl-name "hw.busfrequency_compat")
(sysctl-name "hw.byteorder")
(sysctl-name "hw.cacheconfig")
(sysctl-name "hw.cachelinesize_compat")
(sysctl-name "hw.cpufamily")
(sysctl-name "hw.cpufrequency_compat")
(sysctl-name "hw.cputype")
(sysctl-name "hw.l1dcachesize_compat")
(sysctl-name "hw.l1icachesize_compat")
(sysctl-name "hw.l2cachesize_compat")
(sysctl-name "hw.l3cachesize_compat")
(sysctl-name "hw.logicalcpu_max")
(sysctl-name "hw.machine")
(sysctl-name "hw.ncpu")
(sysctl-name "hw.nperflevels")
(sysctl-name "hw.optional.arm.FEAT_BF16")
(sysctl-name "hw.optional.arm.FEAT_DotProd")
(sysctl-name "hw.optional.arm.FEAT_FCMA")
(sysctl-name "hw.optional.arm.FEAT_FHM")
(sysctl-name "hw.optional.arm.FEAT_FP16")
(sysctl-name "hw.optional.arm.FEAT_I8MM")
(sysctl-name "hw.optional.arm.FEAT_JSCVT")
(sysctl-name "hw.optional.arm.FEAT_LSE")
(sysctl-name "hw.optional.arm.FEAT_RDM")
(sysctl-name "hw.optional.arm.FEAT_SHA512")
(sysctl-name "hw.optional.armv8_2_sha512")
(sysctl-name "hw.memsize")
(sysctl-name "hw.pagesize")
(sysctl-name "hw.packages")
(sysctl-name "hw.pagesize_compat")
(sysctl-name "hw.physicalcpu_max")
(sysctl-name "hw.tbfrequency_compat")
(sysctl-name "hw.vectorunit")
(sysctl-name "kern.hostname")
(sysctl-name "kern.maxfilesperproc")
(sysctl-name "kern.osproductversion")
(sysctl-name "kern.osrelease")
(sysctl-name "kern.ostype")
(sysctl-name "kern.osvariant_status")
(sysctl-name "kern.osversion")
(sysctl-name "kern.secure_kernel")
(sysctl-name "kern.usrstack64")
(sysctl-name "kern.version")
(sysctl-name "sysctl.proc_cputype")
(sysctl-name-prefix "hw.perflevel")
)`.trim();

View File

@@ -0,0 +1,199 @@
import type { ExecResult } from "./interface";
import type {
ChildProcess,
SpawnOptions,
SpawnOptionsWithStdioTuple,
StdioNull,
StdioPipe,
} from "child_process";
import { log, isLoggingEnabled } from "../log.js";
import { spawn } from "child_process";
import * as os from "os";
const MAX_BUFFER = 1024 * 100; // 100 KB
/**
* This function should never return a rejected promise: errors should be
* mapped to a non-zero exit code and the error message should be in stderr.
*/
export function exec(
command: Array<string>,
options: SpawnOptions,
_writableRoots: Array<string>,
abortSignal?: AbortSignal,
): Promise<ExecResult> {
const prog = command[0];
if (typeof prog !== "string") {
return Promise.resolve({
stdout: "",
stderr: "command[0] is not a string",
exitCode: 1,
});
}
// We use spawn() instead of exec() or execFile() so that we can set the
// stdio options to "ignore" for stdin. Ripgrep has a heuristic where it
// may try to read from stdin as explained here:
//
// https://github.com/BurntSushi/ripgrep/blob/e2362d4d5185d02fa857bf381e7bd52e66fafc73/crates/core/flags/hiargs.rs#L1101-L1103
//
// This can be a problem because if you save the following to a file and
// run it with `node`, it will hang forever:
//
// ```
// const {execFile} = require('child_process');
//
// execFile('rg', ['foo'], (error, stdout, stderr) => {
// if (error) {
// console.error(`error: ${error}n\nstderr: ${stderr}`);
// } else {
// console.log(`stdout: ${stdout}`);
// }
// });
// ```
//
// Even if you pass `{stdio: ["ignore", "pipe", "pipe"] }` to execFile(), the
// hang still happens as the `stdio` is seemingly ignored. Using spawn()
// works around this issue.
const fullOptions: SpawnOptionsWithStdioTuple<
StdioNull,
StdioPipe,
StdioPipe
> = {
...options,
// Inherit any callersupplied stdio flags but force stdin to "ignore" so
// the child never attempts to read from us (see lengthy comment above).
stdio: ["ignore", "pipe", "pipe"],
// Launch the child in its *own* process group so that we can later send a
// single signal to the entire group this reliably terminates not only
// the immediate child but also any grandchildren it might have spawned
// (think `bash -c "sleep 999"`).
detached: true,
};
const child: ChildProcess = spawn(prog, command.slice(1), fullOptions);
// If an AbortSignal is provided, ensure the spawned process is terminated
// when the signal is triggered so that cancellations propagate down to any
// longrunning child processes. We default to SIGTERM to give the process a
// chance to clean up, falling back to SIGKILL if it does not exit in a
// timely fashion.
if (abortSignal) {
const abortHandler = () => {
if (isLoggingEnabled()) {
log(`raw-exec: abort signal received killing child ${child.pid}`);
}
const killTarget = (signal: NodeJS.Signals) => {
if (!child.pid) {
return;
}
try {
try {
// Send to the *process group* so grandchildren are included.
process.kill(-child.pid, signal);
} catch {
// Fallback: kill only the immediate child (may leave orphans on
// exotic kernels that lack processgroup semantics, but better
// than nothing).
try {
child.kill(signal);
} catch {
/* ignore */
}
}
} catch {
/* already gone */
}
};
// First try graceful termination.
killTarget("SIGTERM");
// Escalate to SIGKILL if the group refuses to die.
setTimeout(() => {
if (!child.killed) {
killTarget("SIGKILL");
}
}, 2000).unref();
};
if (abortSignal.aborted) {
abortHandler();
} else {
abortSignal.addEventListener("abort", abortHandler, { once: true });
}
}
if (!child.pid) {
return Promise.resolve({
stdout: "",
stderr: `likely failed because ${prog} could not be found`,
exitCode: 1,
});
}
const stdoutChunks: Array<Buffer> = [];
const stderrChunks: Array<Buffer> = [];
let numStdoutBytes = 0;
let numStderrBytes = 0;
let hitMaxStdout = false;
let hitMaxStderr = false;
return new Promise<ExecResult>((resolve) => {
child.stdout?.on("data", (data: Buffer) => {
if (!hitMaxStdout) {
numStdoutBytes += data.length;
if (numStdoutBytes <= MAX_BUFFER) {
stdoutChunks.push(data);
} else {
hitMaxStdout = true;
}
}
});
child.stderr?.on("data", (data: Buffer) => {
if (!hitMaxStderr) {
numStderrBytes += data.length;
if (numStderrBytes <= MAX_BUFFER) {
stderrChunks.push(data);
} else {
hitMaxStderr = true;
}
}
});
child.on("exit", (code, signal) => {
const stdout = Buffer.concat(stdoutChunks).toString("utf8");
const stderr = Buffer.concat(stderrChunks).toString("utf8");
// Map (code, signal) to an exit code. We expect exactly one of the two
// values to be non-null, but we code defensively to handle the case where
// both are null.
let exitCode: number;
if (code != null) {
exitCode = code;
} else if (signal != null && signal in os.constants.signals) {
const signalNum =
os.constants.signals[signal as keyof typeof os.constants.signals];
exitCode = 128 + signalNum;
} else {
exitCode = 1;
}
if (isLoggingEnabled()) {
log(
`raw-exec: child ${child.pid} exited code=${exitCode} signal=${signal}`,
);
}
resolve({
stdout,
stderr,
exitCode,
});
});
child.on("error", (err) => {
resolve({
stdout: "",
stderr: String(err),
exitCode: 1,
});
});
});
}

View File

@@ -0,0 +1,51 @@
import type { ResponseItem } from "openai/resources/responses/responses.mjs";
/**
* Roughly estimate the number of languagemodel tokens represented by a list
* of OpenAI `ResponseItem`s.
*
* A full tokenizer would be more accurate, but would add a heavyweight
* dependency for only marginal benefit. Empirically, assuming ~4 characters
* per token offers a good enough signal for displaying contextwindow usage
* to the user.
*
* The algorithm counts characters from the different content types we may
* encounter and then converts that char count to tokens by dividing by four
* and rounding up.
*/
export function approximateTokensUsed(items: Array<ResponseItem>): number {
let charCount = 0;
for (const item of items) {
switch (item.type) {
case "message": {
for (const c of item.content) {
if (c.type === "input_text" || c.type === "output_text") {
charCount += c.text.length;
} else if (c.type === "refusal") {
charCount += c.refusal.length;
} else if (c.type === "input_file") {
charCount += c.filename?.length ?? 0;
}
// images and other content types are ignored (0 chars)
}
break;
}
case "function_call": {
charCount += (item.name?.length || 0) + (item.arguments?.length || 0);
break;
}
case "function_call_output": {
charCount += item.output.length;
break;
}
default:
break;
}
}
return Math.ceil(charCount / 4);
}

View File

@@ -0,0 +1,9 @@
// This tiny shim exists solely so that development tooling such as `ts-node`
// (which executes the *source* files directly) can resolve the existing
// `./auto-approval-mode.js` import specifier used throughout the codebase.
//
// In the emitted JavaScript (built via `tsc --module nodenext`) the compiler
// rewrites the path to point at the generated `.js` file automatically, so
// having this shim in the source tree is completely transparent for
// production builds.
export { AutoApprovalMode, FullAutoErrorMode } from "./auto-approval-mode.ts";

View File

@@ -0,0 +1,10 @@
export enum AutoApprovalMode {
SUGGEST = "suggest",
AUTO_EDIT = "auto-edit",
FULL_AUTO = "full-auto",
}
export enum FullAutoErrorMode {
ASK_USER = "ask-user",
IGNORE_AND_CONTINUE = "ignore-and-continue",
}

View File

@@ -0,0 +1,31 @@
import { execSync } from "child_process";
/**
* Returns true if the given directory is part of a Git repository.
*
* This uses the canonical Git command `git rev-parse --is-inside-work-tree`
* which exits with status 0 when executed anywhere inside a working tree
* (including the repo root) and exits with a nonzero status otherwise. We
* intentionally ignore stdout/stderr and only rely on the exit code so that
* this works consistently across Git versions and configurations.
*
* The function is fully synchronous because it is typically used during CLI
* startup (e.g. to decide whether to enable certain Gitspecific features) and
* a synchronous check keeps such callsites simple. The command is extremely
* fast (~1ms) so blocking the eventloop briefly is acceptable.
*/
export function checkInGit(workdir: string): boolean {
try {
// "git rev-parse --is-inside-work-tree" prints either "true" or "false" to
// stdout. We don't care about the output — only the exit status — so we
// discard stdio for maximum performance and to avoid leaking noise if the
// caller happens to inherit stdio.
execSync("git rev-parse --is-inside-work-tree", {
cwd: workdir,
stdio: "ignore",
});
return true;
} catch {
return false;
}
}

View File

@@ -0,0 +1,356 @@
// NOTE: We intentionally point the TypeScript import at the source file
// (`./auto-approval-mode.ts`) instead of the emitted `.js` bundle. This makes
// the module resolvable when the project is executed via `ts-node`, which
// resolves *source* paths rather than built artefacts. During a production
// build the TypeScript compiler will automatically rewrite the path to
// `./auto-approval-mode.js`, so the change is completely transparent for the
// compiled `dist/` output used by the published CLI.
import type { FullAutoErrorMode } from "./auto-approval-mode.js";
import { log, isLoggingEnabled } from "./agent/log.js";
import { AutoApprovalMode } from "./auto-approval-mode.js";
import { existsSync, mkdirSync, readFileSync, writeFileSync } from "fs";
import { load as loadYaml, dump as dumpYaml } from "js-yaml";
import { homedir } from "os";
import { dirname, join, extname, resolve as resolvePath } from "path";
export const DEFAULT_AGENTIC_MODEL = "o4-mini";
export const DEFAULT_FULL_CONTEXT_MODEL = "gpt-4.1";
export const DEFAULT_APPROVAL_MODE = AutoApprovalMode.SUGGEST;
export const DEFAULT_INSTRUCTIONS = "";
export const CONFIG_DIR = join(homedir(), ".codex");
export const CONFIG_JSON_FILEPATH = join(CONFIG_DIR, "config.json");
export const CONFIG_YAML_FILEPATH = join(CONFIG_DIR, "config.yaml");
export const CONFIG_YML_FILEPATH = join(CONFIG_DIR, "config.yml");
// Keep the original constant name for backward compatibility, but point it at
// the default JSON path. Code that relies on this constant will continue to
// work unchanged.
export const CONFIG_FILEPATH = CONFIG_JSON_FILEPATH;
export const INSTRUCTIONS_FILEPATH = join(CONFIG_DIR, "instructions.md");
export const OPENAI_TIMEOUT_MS =
parseInt(process.env["OPENAI_TIMEOUT_MS"] || "0", 10) || undefined;
export const OPENAI_BASE_URL = process.env["OPENAI_BASE_URL"] || "";
export let OPENAI_API_KEY = process.env["OPENAI_API_KEY"] || "";
export function setApiKey(apiKey: string): void {
OPENAI_API_KEY = apiKey;
}
// Formatting (quiet mode-only).
export const PRETTY_PRINT = Boolean(process.env["PRETTY_PRINT"] || "");
// Represents config as persisted in config.json.
export type StoredConfig = {
model?: string;
approvalMode?: AutoApprovalMode;
fullAutoErrorMode?: FullAutoErrorMode;
memory?: MemoryConfig;
};
// Minimal config written on first run. An *empty* model string ensures that
// we always fall back to DEFAULT_MODEL on load, so updates to the default keep
// propagating to existing users until they explicitly set a model.
export const EMPTY_STORED_CONFIG: StoredConfig = { model: "" };
// Prestringified JSON variant so we dont stringify repeatedly.
const EMPTY_CONFIG_JSON = JSON.stringify(EMPTY_STORED_CONFIG, null, 2) + "\n";
export type MemoryConfig = {
enabled: boolean;
};
// Represents full runtime config, including loaded instructions
export type AppConfig = {
apiKey?: string;
model: string;
instructions: string;
fullAutoErrorMode?: FullAutoErrorMode;
memory?: MemoryConfig;
};
// ---------------------------------------------------------------------------
// Project doc support (codex.md)
// ---------------------------------------------------------------------------
export const PROJECT_DOC_MAX_BYTES = 32 * 1024; // 32 kB
const PROJECT_DOC_FILENAMES = ["codex.md", ".codex.md", "CODEX.md"];
export function discoverProjectDocPath(startDir: string): string | null {
const cwd = resolvePath(startDir);
// 1) Look in the explicit CWD first:
for (const name of PROJECT_DOC_FILENAMES) {
const direct = join(cwd, name);
if (existsSync(direct)) {
return direct;
}
}
// 2) Fallback: walk up to the Git root and look there
let dir = cwd;
// eslint-disable-next-line no-constant-condition
while (true) {
const gitPath = join(dir, ".git");
if (existsSync(gitPath)) {
// Once we hit the Git root, search its toplevel for the doc
for (const name of PROJECT_DOC_FILENAMES) {
const candidate = join(dir, name);
if (existsSync(candidate)) {
return candidate;
}
}
// If Git root but no doc, stop looking
return null;
}
const parent = dirname(dir);
if (parent === dir) {
// Reached filesystem root without finding Git
return null;
}
dir = parent;
}
}
/**
* Load the project documentation markdown (codex.md) if present. If the file
* exceeds {@link PROJECT_DOC_MAX_BYTES} it will be truncated and a warning is
* logged.
*
* @param cwd The current working directory of the caller
* @param explicitPath If provided, skips discovery and loads the given path
*/
export function loadProjectDoc(cwd: string, explicitPath?: string): string {
let filepath: string | null = null;
if (explicitPath) {
filepath = resolvePath(cwd, explicitPath);
if (!existsSync(filepath)) {
// eslint-disable-next-line no-console
console.warn(`codex: project doc not found at ${filepath}`);
filepath = null;
}
} else {
filepath = discoverProjectDocPath(cwd);
}
if (!filepath) {
return "";
}
try {
const buf = readFileSync(filepath);
if (buf.byteLength > PROJECT_DOC_MAX_BYTES) {
// eslint-disable-next-line no-console
console.warn(
`codex: project doc '${filepath}' exceeds ${PROJECT_DOC_MAX_BYTES} bytes truncating.`,
);
}
return buf.slice(0, PROJECT_DOC_MAX_BYTES).toString("utf-8");
} catch {
return "";
}
}
// (Receives params for testing)
export type LoadConfigOptions = {
/** Working directory used for project doc discovery */
cwd?: string;
/** Disable inclusion of the project doc */
disableProjectDoc?: boolean;
/** Explicit path to project doc (overrides discovery) */
projectDocPath?: string;
/** Whether we are in fullcontext mode. */
isFullContext?: boolean;
};
export const loadConfig = (
configPath: string | undefined = CONFIG_FILEPATH,
instructionsPath: string | undefined = INSTRUCTIONS_FILEPATH,
options: LoadConfigOptions = {},
): AppConfig => {
// Determine the actual path to load. If the provided path doesn't exist and
// the caller passed the default JSON path, automatically fall back to YAML
// variants.
let actualConfigPath = configPath;
if (!existsSync(actualConfigPath)) {
if (configPath === CONFIG_FILEPATH) {
if (existsSync(CONFIG_YAML_FILEPATH)) {
actualConfigPath = CONFIG_YAML_FILEPATH;
} else if (existsSync(CONFIG_YML_FILEPATH)) {
actualConfigPath = CONFIG_YML_FILEPATH;
}
}
}
let storedConfig: StoredConfig = {};
if (existsSync(actualConfigPath)) {
const raw = readFileSync(actualConfigPath, "utf-8");
const ext = extname(actualConfigPath).toLowerCase();
try {
if (ext === ".yaml" || ext === ".yml") {
storedConfig = loadYaml(raw) as unknown as StoredConfig;
} else {
storedConfig = JSON.parse(raw);
}
} catch {
// If parsing fails, fall back to empty config to avoid crashing.
storedConfig = {};
}
}
const instructionsFilePathResolved =
instructionsPath ?? INSTRUCTIONS_FILEPATH;
const userInstructions = existsSync(instructionsFilePathResolved)
? readFileSync(instructionsFilePathResolved, "utf-8")
: DEFAULT_INSTRUCTIONS;
// Project doc -----------------------------------------------------------
const shouldLoadProjectDoc =
!options.disableProjectDoc &&
process.env["CODEX_DISABLE_PROJECT_DOC"] !== "1";
let projectDoc = "";
let projectDocPath: string | null = null;
if (shouldLoadProjectDoc) {
const cwd = options.cwd ?? process.cwd();
projectDoc = loadProjectDoc(cwd, options.projectDocPath);
projectDocPath = options.projectDocPath
? resolvePath(cwd, options.projectDocPath)
: discoverProjectDocPath(cwd);
if (projectDocPath) {
if (isLoggingEnabled()) {
log(
`[codex] Loaded project doc from ${projectDocPath} (${projectDoc.length} bytes)`,
);
}
} else {
if (isLoggingEnabled()) {
log(`[codex] No project doc found in ${cwd}`);
}
}
}
const combinedInstructions = [userInstructions, projectDoc]
.filter((s) => s && s.trim() !== "")
.join("\n\n--- project-doc ---\n\n");
// Treat empty string ("" or whitespace) as absence so we can fall back to
// the latest DEFAULT_MODEL.
const storedModel =
storedConfig.model && storedConfig.model.trim() !== ""
? storedConfig.model.trim()
: undefined;
const config: AppConfig = {
model:
storedModel ??
(options.isFullContext
? DEFAULT_FULL_CONTEXT_MODEL
: DEFAULT_AGENTIC_MODEL),
instructions: combinedInstructions,
};
// -----------------------------------------------------------------------
// Firstrun bootstrap: if the configuration file (and/or its containing
// directory) didn't exist we create them now so that users end up with a
// materialised ~/.codex/config.json file on first execution. This mirrors
// what `saveConfig()` would do but without requiring callers to remember to
// invoke it separately.
//
// We intentionally perform this *after* we have computed the final
// `config` object so that we can just persist the resolved defaults. The
// write operations are guarded by `existsSync` checks so that subsequent
// runs that already have a config will remain readonly here.
// -----------------------------------------------------------------------
try {
if (!existsSync(actualConfigPath)) {
// Ensure the directory exists first.
const dir = dirname(actualConfigPath);
if (!existsSync(dir)) {
mkdirSync(dir, { recursive: true });
}
// Persist a minimal config we include the `model` key but leave it as
// an empty string so that `loadConfig()` treats it as "unset" and falls
// back to whatever DEFAULT_MODEL is current at runtime. This prevents
// pinning users to an old default after upgrading Codex.
const ext = extname(actualConfigPath).toLowerCase();
if (ext === ".yaml" || ext === ".yml") {
writeFileSync(actualConfigPath, dumpYaml(EMPTY_STORED_CONFIG), "utf-8");
} else {
writeFileSync(actualConfigPath, EMPTY_CONFIG_JSON, "utf-8");
}
}
// Always ensure the instructions file exists so users can edit it.
if (!existsSync(instructionsFilePathResolved)) {
const instrDir = dirname(instructionsFilePathResolved);
if (!existsSync(instrDir)) {
mkdirSync(instrDir, { recursive: true });
}
writeFileSync(instructionsFilePathResolved, userInstructions, "utf-8");
}
} catch {
// Silently ignore any errors failure to persist the defaults shouldn't
// block the CLI from starting. A future explicit `codex config` command
// or `saveConfig()` call can handle (re)writing later.
}
// Only include the "memory" key if it was explicitly set by the user. This
// preserves backwardcompatibility with older config files (and our test
// fixtures) that don't include a "memory" section.
if (storedConfig.memory !== undefined) {
config.memory = storedConfig.memory;
}
if (storedConfig.fullAutoErrorMode) {
config.fullAutoErrorMode = storedConfig.fullAutoErrorMode;
}
return config;
};
export const saveConfig = (
config: AppConfig,
configPath = CONFIG_FILEPATH,
instructionsPath = INSTRUCTIONS_FILEPATH,
): void => {
// If the caller passed the default JSON path *and* a YAML config already
// exists on disk, save back to that YAML file instead to preserve the
// user's chosen format.
let targetPath = configPath;
if (
configPath === CONFIG_FILEPATH &&
!existsSync(configPath) &&
(existsSync(CONFIG_YAML_FILEPATH) || existsSync(CONFIG_YML_FILEPATH))
) {
targetPath = existsSync(CONFIG_YAML_FILEPATH)
? CONFIG_YAML_FILEPATH
: CONFIG_YML_FILEPATH;
}
const dir = dirname(targetPath);
if (!existsSync(dir)) {
mkdirSync(dir, { recursive: true });
}
const ext = extname(targetPath).toLowerCase();
if (ext === ".yaml" || ext === ".yml") {
writeFileSync(targetPath, dumpYaml({ model: config.model }), "utf-8");
} else {
writeFileSync(
targetPath,
JSON.stringify({ model: config.model }, null, 2),
"utf-8",
);
}
writeFileSync(instructionsPath, config.instructions, "utf-8");
};

View File

@@ -0,0 +1,31 @@
import type { ResponseInputItem } from "openai/resources/responses/responses";
import { fileTypeFromBuffer } from "file-type";
import fs from "fs/promises";
export async function createInputItem(
text: string,
images: Array<string>,
): Promise<ResponseInputItem.Message> {
const inputItem: ResponseInputItem.Message = {
role: "user",
content: [{ type: "input_text", text }],
type: "message",
};
for (const filePath of images) {
/* eslint-disable no-await-in-loop */
const binary = await fs.readFile(filePath);
const kind = await fileTypeFromBuffer(binary);
/* eslint-enable no-await-in-loop */
const encoded = binary.toString("base64");
const mime = kind?.mime ?? "application/octet-stream";
inputItem.content.push({
type: "input_image",
detail: "auto",
image_url: `data:${mime};base64,${encoded}`,
});
}
return inputItem;
}

View File

@@ -0,0 +1,91 @@
import { OPENAI_API_KEY } from "./config";
import OpenAI from "openai";
export const RECOMMENDED_MODELS: Array<string> = ["o4-mini", "o3"];
/**
* Background model loader / cache.
*
* We start fetching the list of available models from OpenAI once the CLI
* enters interactive mode. The request is made exactly once during the
* lifetime of the process and the results are cached for subsequent calls.
*/
let modelsPromise: Promise<Array<string>> | null = null;
async function fetchModels(): Promise<Array<string>> {
// If the user has not configured an API key we cannot hit the network
if (!OPENAI_API_KEY) {
return ["o4-mini"];
}
try {
const openai = new OpenAI({ apiKey: OPENAI_API_KEY });
const list = await openai.models.list();
const models: Array<string> = [];
for await (const model of list as AsyncIterable<{ id?: string }>) {
if (model && typeof model.id === "string") {
models.push(model.id);
}
}
return models.sort();
} catch {
return [];
}
}
export function preloadModels(): void {
if (!modelsPromise) {
// Fireandforget callers that truly need the list should `await`
// `getAvailableModels()` instead.
void getAvailableModels();
}
}
export async function getAvailableModels(): Promise<Array<string>> {
if (!modelsPromise) {
modelsPromise = fetchModels();
}
return modelsPromise;
}
/**
* Verify that the provided model identifier is present in the set returned by
* {@link getAvailableModels}. The list of models is fetched from the OpenAI
* `/models` endpoint the first time it is required and then cached inprocess.
*/
export async function isModelSupportedForResponses(
model: string | undefined | null,
): Promise<boolean> {
if (
typeof model !== "string" ||
model.trim() === "" ||
RECOMMENDED_MODELS.includes(model)
) {
return true;
}
const MODEL_LIST_TIMEOUT_MS = 2_000;
try {
const models = await Promise.race<Array<string>>([
getAvailableModels(),
new Promise<Array<string>>((resolve) =>
setTimeout(() => resolve([]), MODEL_LIST_TIMEOUT_MS),
),
]);
// If the timeout fired we get an empty list → treat as supported to avoid
// false negatives.
if (models.length === 0) {
return true;
}
return models.includes(model.trim());
} catch {
// Network or library failure → don't block startup.
return true;
}
}

View File

@@ -0,0 +1,240 @@
import type { CommandReviewDetails } from "./agent/review.js";
import type {
ExecInput,
ExecOutputMetadata,
} from "./agent/sandbox/interface.js";
import type { SafeCommandReason } from "@lib/approvals.js";
import type { ResponseFunctionToolCall } from "openai/resources/responses/responses.mjs";
import { log } from "node:console";
import process from "process";
// The console utility import is intentionally explicit to avoid bundlers from
// including the entire `console` module when only the `log` function is
// required.
// Allowed shell operators that we consider "safe" as they do not introduce
// sideeffects on their own (unlike redirections). Parentheses and braces for
// grouping are excluded for simplicity.
const SAFE_SHELL_OPERATORS: ReadonlySet<string> = new Set([
"&&",
"||",
"|",
";",
]);
// Lazily resolve heavy dependencies at runtime to avoid test environments
// (which might not have the @lib alias configured) from failing at import
// time. If the modules cannot be loaded we fall back to permissive stub
// implementations so that basic functionality like unittesting small UI
// helpers continues to work without the full codexlib dependency tree.
let isSafeCommand: (cmd: Array<string>) => SafeCommandReason | null = () =>
null;
let shellQuoteParse:
| ((cmd: string, env?: Record<string, string | undefined>) => Array<unknown>)
| undefined;
let formatCommandForDisplay: (cmd: Array<string>) => string = (cmd) =>
cmd.join(" ");
async function loadLibs(): Promise<void> {
try {
const approvals = await import("@lib/approvals.js");
if (typeof approvals.isSafeCommand === "function") {
isSafeCommand = approvals.isSafeCommand;
}
} catch {
// ignore keep stub
}
try {
const fmt = await import("@lib/format-command.js");
if (typeof fmt.formatCommandForDisplay === "function") {
formatCommandForDisplay = fmt.formatCommandForDisplay;
}
} catch {
// ignore keep stub
}
try {
const sq = await import("shell-quote");
if (typeof sq.parse === "function") {
shellQuoteParse = sq.parse as typeof shellQuoteParse;
}
} catch {
// ignore keep stub
}
}
// Trigger the dynamic import in the background; callers that need the real
// implementation should await the returned promise (parsers currently does not
// require this for correctness during tests).
void loadLibs();
export function parseToolCallOutput(toolCallOutput: string): {
output: string;
metadata: ExecOutputMetadata;
} {
try {
const { output, metadata } = JSON.parse(toolCallOutput);
return {
output,
metadata,
};
} catch (err) {
return {
output: `Failed to parse JSON result`,
metadata: {
exit_code: 1,
duration_seconds: 0,
},
};
}
}
export function parseToolCall(
toolCall: ResponseFunctionToolCall,
): CommandReviewDetails | undefined {
const toolCallArgs = parseToolCallArguments(toolCall.arguments);
if (toolCallArgs == null) {
return undefined;
}
const { cmd } = toolCallArgs;
const cmdReadableText = formatCommandForDisplay(cmd);
const autoApproval = computeAutoApproval(cmd);
return {
cmd,
cmdReadableText,
autoApproval,
};
}
/**
* If toolCallArguments is a string of JSON that can be parsed into an object
* with a "cmd" or "command" property that is an `Array<string>`, then returns
* that array. Otherwise, returns undefined.
*/
export function parseToolCallArguments(
toolCallArguments: string,
): ExecInput | undefined {
let json: unknown;
try {
json = JSON.parse(toolCallArguments);
} catch (err) {
log(`Failed to parse toolCall.arguments: ${toolCallArguments}`);
return undefined;
}
if (typeof json !== "object" || json == null) {
return undefined;
}
const { cmd, command } = json as Record<string, unknown>;
const commandArray = toStringArray(cmd) ?? toStringArray(command);
if (commandArray == null) {
return undefined;
}
// @ts-expect-error timeout and workdir may not exist on json.
const { timeout, workdir } = json;
return {
cmd: commandArray,
workdir: typeof workdir === "string" ? workdir : undefined,
timeoutInMillis: typeof timeout === "number" ? timeout : undefined,
};
}
function toStringArray(obj: unknown): Array<string> | undefined {
if (Array.isArray(obj) && obj.every((item) => typeof item === "string")) {
const arrayOfStrings: Array<string> = obj;
return arrayOfStrings;
} else {
return undefined;
}
}
// ---------------- safecommand helpers ----------------
/**
* Attempts to determine whether `cmd` is composed exclusively of safe
* subcommands combined using only operators from the SAFE_SHELL_OPERATORS
* allowlist. Returns the `SafeCommandReason` (taken from the first subcommand)
* if the whole expression is safe; otherwise returns `null`.
*/
function computeAutoApproval(cmd: Array<string>): SafeCommandReason | null {
// Fast path: a simple command with no shell processing.
const direct = isSafeCommand(cmd);
if (direct != null) {
return direct;
}
// For expressions like ["bash", "-lc", "ls && pwd"] break down the inner
// string and verify each segment.
if (
cmd.length === 3 &&
cmd[0] === "bash" &&
cmd[1] === "-lc" &&
typeof cmd[2] === "string" &&
shellQuoteParse
) {
const parsed = shellQuoteParse(cmd[2], process.env ?? {});
if (parsed.length === 0) {
return null;
}
let current: Array<string> = [];
let first: SafeCommandReason | null = null;
const flush = (): boolean => {
if (current.length === 0) {
return true;
}
const safe = isSafeCommand(current);
if (safe == null) {
return false;
}
if (!first) {
first = safe;
}
current = [];
return true;
};
for (const part of parsed) {
if (typeof part === "string") {
// Simple word/argument token.
if (part === "(" || part === ")" || part === "{" || part === "}") {
// We treat explicit grouping tokens as unsafe because their
// semantics depend on the shell evaluation environment.
return null;
}
current.push(part);
} else if (part && typeof part === "object") {
const opToken = part as { op?: string };
if (typeof opToken.op === "string") {
if (!flush()) {
return null;
}
if (!SAFE_SHELL_OPERATORS.has(opToken.op)) {
return null;
}
} else {
// Unknown object token kind (e.g. redirection) treat as unsafe.
return null;
}
} else {
// Token types such as numbers / booleans are unexpected treat as unsafe.
return null;
}
}
if (!flush()) {
return null;
}
return first;
}
return null;
}

View File

@@ -0,0 +1,53 @@
export const CLI_VERSION = "0.1.04152057"; // Must be in sync with package.json.
export const ORIGIN = "codex_cli_ts";
export type TerminalChatSession = {
/** Globally unique session identifier */
id: string;
/** The OpenAI username associated with this session */
user: string;
/** Version identifier of the Codex CLI that produced the session */
version: string;
/** The model used for the conversation */
model: string;
/** ISO timestamp noting when the session was persisted */
timestamp: string;
/** Optional custom instructions that were active for the run */
instructions: string;
};
let sessionId = "";
/**
* Update the globally tracked session identifier.
* Passing an empty string clears the current session.
*/
export function setSessionId(id: string): void {
sessionId = id;
}
/**
* Retrieve the currently active session identifier, or an empty string when
* no session is active.
*/
export function getSessionId(): string {
return sessionId;
}
let currentModel = "";
/**
* Record the model that is currently being used for the conversation.
* Setting an empty string clears the record so the next agent run can update it.
*/
export function setCurrentModel(model: string): void {
currentModel = model;
}
/**
* Return the model that was last supplied to {@link setCurrentModel}.
* If no model has been recorded yet, an empty string is returned.
*/
export function getCurrentModel(): string {
return currentModel;
}

View File

@@ -0,0 +1,27 @@
import path from "path";
export function shortenPath(p: string, maxLength = 40): string {
const home = process.env["HOME"];
// Replace home directory with '~' if applicable.
const displayPath =
home !== undefined && p.startsWith(home) ? p.replace(home, "~") : p;
if (displayPath.length <= maxLength) {
return displayPath;
}
const parts = displayPath.split(path.sep);
let result = "";
for (let i = parts.length - 1; i >= 0; i--) {
const candidate = path.join("~", "...", ...parts.slice(i));
if (candidate.length <= maxLength) {
result = candidate;
} else {
break;
}
}
return result || displayPath.slice(-maxLength);
}
export function shortCwd(maxLength = 40): string {
return shortenPath(process.cwd(), maxLength);
}

View File

@@ -0,0 +1,190 @@
import type { EditedFiles, FileOperation } from "./file_ops";
import { createTwoFilesPatch } from "diff";
/**************************************
* ANSI color codes for output styling
**************************************/
const RED = "\u001b[31m";
const GREEN = "\u001b[32m";
const CYAN = "\u001b[36m";
const YELLOW = "\u001b[33m";
const RESET = "\u001b[0m";
/******************************************************
* Generate a unified diff of two file contents
* akin to generate_file_diff(original, updated)
******************************************************/
export function generateFileDiff(
originalContent: string,
updatedContent: string,
filePath: string,
): string {
return createTwoFilesPatch(
`${filePath} (original)`,
`${filePath} (modified)`,
originalContent,
updatedContent,
undefined,
undefined,
{ context: 5 },
);
}
/******************************************************
* Apply colorization to a unified diff
* akin to generate_colored_diff(diff_content)
******************************************************/
export function generateColoredDiff(diffContent: string): string {
const lines = diffContent.split(/\r?\n/);
const coloredLines: Array<string> = [];
for (const line of lines) {
if (line.startsWith("+++") || line.startsWith("---")) {
// keep these lines uncolored, preserving the original style
coloredLines.push(line);
} else if (line.startsWith("+")) {
// color lines that begin with + but not +++
coloredLines.push(`${GREEN}${line}${RESET}`);
} else if (line.startsWith("-")) {
// color lines that begin with - but not ---
coloredLines.push(`${RED}${line}${RESET}`);
} else if (line.startsWith("@@")) {
// hunk header
coloredLines.push(`${CYAN}${line}${RESET}`);
} else {
coloredLines.push(line);
}
}
return coloredLines.join("\n");
}
/******************************************************
* Count lines added and removed in a unified diff.
* akin to generate_diff_stats(diff_content).
******************************************************/
export function generateDiffStats(diffContent: string): [number, number] {
let linesAdded = 0;
let linesRemoved = 0;
const lines = diffContent.split(/\r?\n/);
for (const line of lines) {
if (line.startsWith("+") && !line.startsWith("+++")) {
linesAdded += 1;
} else if (line.startsWith("-") && !line.startsWith("---")) {
linesRemoved += 1;
}
}
return [linesAdded, linesRemoved];
}
/************************************************
* Helper for generating a short header block
************************************************/
function generateDiffHeader(fileOp: FileOperation): string {
const TTY_WIDTH = 80;
const separatorLine = "=".repeat(TTY_WIDTH) + "\n";
const subSeparatorLine = "-".repeat(TTY_WIDTH) + "\n";
const headerLine = `Changes for: ${fileOp.path}`;
return separatorLine + headerLine + "\n" + subSeparatorLine;
}
/****************************************************************
* Summarize diffs for each file operation that has differences.
* akin to generate_diff_summary(edited_files, original_files)
****************************************************************/
export function generateDiffSummary(
editedFiles: EditedFiles,
originalFileContents: Record<string, string>,
): [string, Array<FileOperation>] {
let combinedDiffs = "";
const opsToApply: Array<FileOperation> = [];
for (const fileOp of editedFiles.ops) {
const diffHeader = generateDiffHeader(fileOp);
if (fileOp.delete) {
// file will be deleted
combinedDiffs += diffHeader + "File will be deleted.\n\n";
opsToApply.push(fileOp);
continue;
} else if (fileOp.move_to) {
combinedDiffs +=
diffHeader + `File will be moved to: ${fileOp.move_to}\n\n`;
opsToApply.push(fileOp);
continue;
}
// otherwise it's an update
const originalContent = originalFileContents[fileOp.path] ?? "";
const updatedContent = fileOp.updated_full_content ?? "";
if (originalContent === updatedContent) {
// no changes => skip
continue;
}
const diffOutput = generateFileDiff(
originalContent,
updatedContent,
fileOp.path,
);
if (diffOutput.trim()) {
const coloredDiff = generateColoredDiff(diffOutput);
combinedDiffs += diffHeader + coloredDiff + "\n";
opsToApply.push(fileOp);
}
}
return [combinedDiffs, opsToApply];
}
/****************************************************************
* Generate a user-friendly summary of the pending file ops.
* akin to generate_edit_summary(ops_to_apply, original_files)
****************************************************************/
export function generateEditSummary(
opsToApply: Array<FileOperation>,
originalFileContents: Record<string, string>,
): string {
if (!opsToApply || opsToApply.length === 0) {
return "No changes detected.";
}
const summaryLines: Array<string> = [];
for (const fileOp of opsToApply) {
if (fileOp.delete) {
// red for deleted
summaryLines.push(`${RED} Deleted: ${fileOp.path}${RESET}`);
} else if (fileOp.move_to) {
// yellow for moved
summaryLines.push(
`${YELLOW} Moved: ${fileOp.path} -> ${fileOp.move_to}${RESET}`,
);
} else {
const originalContent = originalFileContents[fileOp.path];
const updatedContent = fileOp.updated_full_content ?? "";
if (originalContent === undefined) {
// newly created file
const linesAdded = updatedContent.split(/\r?\n/).length;
summaryLines.push(
`${GREEN} Created: ${fileOp.path} (+${linesAdded} lines)${RESET}`,
);
} else {
const diffOutput = generateFileDiff(
originalContent,
updatedContent,
fileOp.path,
);
const [added, removed] = generateDiffStats(diffOutput);
summaryLines.push(
` Modified: ${fileOp.path} (${GREEN}+${added}${RESET}/${RED}-${removed}${RESET})`,
);
}
}
}
return summaryLines.join("\n");
}

View File

@@ -0,0 +1,64 @@
/** Represents file contents with a path and its full text. */
export interface FileContent {
path: string;
content: string;
}
/**
* Represents the context for a task, including:
* - A prompt (the user's request)
* - A list of input paths being considered editable
* - A directory structure overview
* - A collection of file contents
*/
export interface TaskContext {
prompt: string;
input_paths: Array<string>;
input_paths_structure: string;
files: Array<FileContent>;
}
/**
* Renders a string version of the TaskContext, including a note about important output requirements,
* summary of the directory structure (unless omitted), and an XML-like listing of the file contents.
*
* The user is instructed to produce only changes for files strictly under the specified paths
* and provide full file contents in any modifications.
*/
export function renderTaskContext(taskContext: TaskContext): string {
const inputPathsJoined = taskContext.input_paths.join(", ");
return `
Complete the following task: ${taskContext.prompt}
# IMPORTANT OUTPUT REQUIREMENTS
- UNDER NO CIRCUMSTANCES PRODUCE PARTIAL OR TRUNCATED FILE CONTENT. You MUST provide the FULL AND FINAL content for every file modified.
- ALWAYS INCLUDE THE COMPLETE UPDATED VERSION OF THE FILE, do not omit or only partially include lines.
- ONLY produce changes for files located strictly under ${inputPathsJoined}.
- ALWAYS produce absolute paths in the output.
- Do not delete or change code UNRELATED to the task.
# **Directory structure**
${taskContext.input_paths_structure}
# Files
${renderFilesToXml(taskContext.files)}
`;
}
/**
* Converts the provided list of FileContent objects into a custom XML-like format.
*
* For each file, we embed the content in a CDATA section.
*/
function renderFilesToXml(files: Array<FileContent>): string {
let xmlContent = "<files>";
for (const fc of files) {
xmlContent += `
<file>
<path>${fc.path}</path>
<content><![CDATA[${fc.content}]]></content>
</file>`;
}
xmlContent += "\n</files>";
return xmlContent;
}

View File

@@ -0,0 +1,409 @@
/* eslint-disable no-await-in-loop */
import * as fsSync from "fs";
import fs from "fs/promises";
import path from "path";
/** Represents file contents with absolute path. */
export interface FileContent {
path: string;
content: string;
}
/** A simple LRU cache entry structure. */
interface CacheEntry {
/** Last modification time of the file (epoch ms). */
mtime: number;
/** Size of the file in bytes. */
size: number;
/** Entire file content. */
content: string;
}
/**
* A minimal LRU-based file cache to store file contents keyed by absolute path.
* We store (mtime, size, content). If a file's mtime or size changes, we consider
* the cache invalid and re-read.
*/
class LRUFileCache {
private maxSize: number;
private cache: Map<string, CacheEntry>;
constructor(maxSize: number) {
this.maxSize = maxSize;
this.cache = new Map();
}
/**
* Retrieves the cached entry for the given path, if it exists.
* If found, we re-insert it in the map to mark it as recently used.
*/
get(key: string): CacheEntry | undefined {
const entry = this.cache.get(key);
if (entry) {
// Re-insert to maintain recency
this.cache.delete(key);
this.cache.set(key, entry);
}
return entry;
}
/**
* Insert or update an entry in the cache.
*/
set(key: string, entry: CacheEntry): void {
// if key already in map, delete it so that insertion below sets recency.
if (this.cache.has(key)) {
this.cache.delete(key);
}
this.cache.set(key, entry);
// If over capacity, evict the least recently used entry.
if (this.cache.size > this.maxSize) {
const firstKey = this.cache.keys().next();
if (!firstKey.done) {
this.cache.delete(firstKey.value);
}
}
}
/**
* Remove an entry from the cache.
*/
delete(key: string): void {
this.cache.delete(key);
}
/**
* Returns all keys in the cache (for pruning old files, etc.).
*/
keys(): IterableIterator<string> {
return this.cache.keys();
}
}
// Environment-based defaults
const MAX_CACHE_ENTRIES = parseInt(
process.env["TENX_FILE_CACHE_MAX_ENTRIES"] || "1000",
10,
);
// Global LRU file cache instance.
const FILE_CONTENTS_CACHE = new LRUFileCache(MAX_CACHE_ENTRIES);
// Default list of glob patterns to ignore if the user doesn't provide a custom ignore file.
const DEFAULT_IGNORE_PATTERNS = `
# Binaries and large media
*.woff
*.exe
*.dll
*.bin
*.dat
*.pdf
*.png
*.jpg
*.jpeg
*.gif
*.bmp
*.tiff
*.ico
*.zip
*.tar
*.gz
*.rar
*.7z
*.mp3
*.mp4
*.avi
*.mov
*.wmv
# Build and distribution
build/*
dist/*
# Logs and temporary files
*.log
*.tmp
*.swp
*.swo
*.bak
*.old
# Python artifacts
*.egg-info/*
__pycache__/*
*.pyc
*.pyo
*.pyd
.pytest_cache/*
.ruff_cache/*
venv/*
.venv/*
env/*
# Rust artifacts
target/*
Cargo.lock
# Node.js artifacts
*.tsbuildinfo
node_modules/*
package-lock.json
# Environment files
.env/*
# Git
.git/*
# OS specific files
.DS_Store
Thumbs.db
# Hidden files
.*/*
.*
`;
function _read_default_patterns_file(filePath?: string): string {
if (!filePath) {
return DEFAULT_IGNORE_PATTERNS;
}
return fsSync.readFileSync(filePath, "utf-8");
}
/** Loads ignore patterns from a file (or a default list) and returns a list of RegExp patterns. */
export function loadIgnorePatterns(filePath?: string): Array<RegExp> {
try {
const raw = _read_default_patterns_file(filePath);
const lines = raw.split(/\r?\n/);
const cleaned = lines
.map((l: string) => l.trim())
.filter((l: string) => l && !l.startsWith("#"));
// Convert each pattern to a RegExp with a leading '*/'.
const regs = cleaned.map((pattern: string) => {
const escaped = pattern
.replace(/[.+^${}()|[\]\\]/g, "\\$&")
.replace(/\*/g, ".*")
.replace(/\?/g, ".");
const finalRe = `^(?:(?:(?:.*/)?)(?:${escaped}))$`;
return new RegExp(finalRe, "i");
});
return regs;
} catch {
return [];
}
}
/** Checks if a given path is ignored by any of the compiled patterns. */
export function shouldIgnorePath(
p: string,
compiledPatterns: Array<RegExp>,
): boolean {
const normalized = path.resolve(p);
for (const regex of compiledPatterns) {
if (regex.test(normalized)) {
return true;
}
}
return false;
}
/**
* Recursively builds an ASCII representation of a directory structure, given a list
* of file paths.
*/
export function makeAsciiDirectoryStructure(
rootPath: string,
filePaths: Array<string>,
): string {
const root = path.resolve(rootPath);
// We'll store a nested object. Directories => sub-tree or null if it's a file.
interface DirTree {
[key: string]: DirTree | null;
}
const tree: DirTree = {};
for (const file of filePaths) {
const resolved = path.resolve(file);
let relPath: string;
try {
const rp = path.relative(root, resolved);
// If it's outside of root, skip.
if (rp.startsWith("..")) {
continue;
}
relPath = rp;
} catch {
continue;
}
const parts = relPath.split(path.sep);
let current: DirTree = tree;
for (let i = 0; i < parts.length; i++) {
const part = parts[i];
if (!part) {
continue;
}
if (i === parts.length - 1) {
// file
current[part] = null;
} else {
if (!current[part]) {
current[part] = {};
}
current = current[part] as DirTree;
}
}
}
const lines: Array<string> = [root];
function recurse(node: DirTree, prefix: string): void {
const entries = Object.keys(node).sort((a, b) => {
// Directories first, then files
const aIsDir = node[a] != null;
const bIsDir = node[b] != null;
if (aIsDir && !bIsDir) {
return -1;
}
if (!aIsDir && bIsDir) {
return 1;
}
return a.localeCompare(b);
});
for (let i = 0; i < entries.length; i++) {
const entry = entries[i];
if (!entry) {
continue;
}
const isLast = i === entries.length - 1;
const connector = isLast ? "└──" : "├──";
const isDir = node[entry] != null;
lines.push(`${prefix}${connector} ${entry}`);
if (isDir) {
const newPrefix = prefix + (isLast ? " " : "│ ");
recurse(node[entry] as DirTree, newPrefix);
}
}
}
recurse(tree, "");
return lines.join("\n");
}
/**
* Recursively collects all files under rootPath that are not ignored, skipping symlinks.
* Then for each file, we check if it's in the LRU cache. If not or changed, we read it.
* Returns an array of FileContent.
*
* After collecting, we remove from the cache any file that no longer exists in the BFS.
*/
export async function getFileContents(
rootPath: string,
compiledPatterns: Array<RegExp>,
): Promise<Array<FileContent>> {
const root = path.resolve(rootPath);
const candidateFiles: Array<string> = [];
// BFS queue of directories
const queue: Array<string> = [root];
while (queue.length > 0) {
const currentDir = queue.pop()!;
let dirents: Array<fsSync.Dirent> = [];
try {
dirents = await fs.readdir(currentDir, { withFileTypes: true });
} catch {
continue;
}
for (const dirent of dirents) {
try {
const resolved = path.resolve(currentDir, dirent.name);
// skip symlinks
const lstat = await fs.lstat(resolved);
if (lstat.isSymbolicLink()) {
continue;
}
if (dirent.isDirectory()) {
// check if ignored
if (!shouldIgnorePath(resolved, compiledPatterns)) {
queue.push(resolved);
}
} else if (dirent.isFile()) {
// check if ignored
if (!shouldIgnorePath(resolved, compiledPatterns)) {
candidateFiles.push(resolved);
}
}
} catch {
// skip
}
}
}
// We'll read the stat for each candidate file, see if we can skip reading from cache.
const results: Array<FileContent> = [];
// We'll keep track of which files we actually see.
const seenPaths = new Set<string>();
await Promise.all(
candidateFiles.map(async (filePath) => {
seenPaths.add(filePath);
let st: fsSync.Stats | null = null;
try {
st = await fs.stat(filePath);
} catch {
return;
}
if (!st) {
return;
}
const cEntry = FILE_CONTENTS_CACHE.get(filePath);
if (
cEntry &&
Math.abs(cEntry.mtime - st.mtime.getTime()) < 1 &&
cEntry.size === st.size
) {
// same mtime, same size => use cache
results.push({ path: filePath, content: cEntry.content });
} else {
// read file
try {
const buf = await fs.readFile(filePath);
const content = buf.toString("utf-8");
// store in cache
FILE_CONTENTS_CACHE.set(filePath, {
mtime: st.mtime.getTime(),
size: st.size,
content,
});
results.push({ path: filePath, content });
} catch {
// skip
}
}
}),
);
// Now remove from cache any file that wasn't encountered.
const currentKeys = [...FILE_CONTENTS_CACHE.keys()];
for (const key of currentKeys) {
if (!seenPaths.has(key)) {
FILE_CONTENTS_CACHE.delete(key);
}
}
// sort results by path
results.sort((a, b) => a.path.localeCompare(b.path));
return results;
}

View File

@@ -0,0 +1,208 @@
/* eslint-disable no-console */
import type { FileContent } from "./context_files.js";
import path from "path";
/**
* Builds file-size and total-size maps for the provided files, keyed by absolute path.
*
* @param root - The root directory (absolute path) to treat as the top-level. Ascension stops here.
* @param files - An array of FileContent objects, each with a path and content.
* @returns A tuple [fileSizeMap, totalSizeMap] where:
* - fileSizeMap[path] = size (in characters) of the file
* - totalSizeMap[path] = cumulative size (in characters) for path (file or directory)
*/
export function computeSizeMap(
root: string,
files: Array<FileContent>,
): [Record<string, number>, Record<string, number>] {
const rootAbs = path.resolve(root);
const fileSizeMap: Record<string, number> = {};
const totalSizeMap: Record<string, number> = {};
for (const fc of files) {
const pAbs = path.resolve(fc.path);
const length = fc.content.length;
// Record size in fileSizeMap
fileSizeMap[pAbs] = length;
// Ascend from pAbs up to root, adding size along the way.
let current = pAbs;
// eslint-disable-next-line no-constant-condition
while (true) {
totalSizeMap[current] = (totalSizeMap[current] ?? 0) + length;
if (current === rootAbs) {
break;
}
const parent = path.dirname(current);
// If we've reached the top or gone outside root, break.
if (parent === current) {
// e.g. we're at "/" in a *nix system or some root in Windows.
break;
}
// If we have gone above the root (meaning the parent no longer starts with rootAbs), break.
if (!parent.startsWith(rootAbs) && parent !== rootAbs) {
break;
}
current = parent;
}
}
return [fileSizeMap, totalSizeMap];
}
/**
* Builds a mapping of directories to their immediate children. The keys and values
* are absolute paths. For each path in totalSizeMap (except the root itself), we find
* its parent (if also in totalSizeMap) and add the path to the children of that parent.
*
* @param root - The root directory (absolute path).
* @param totalSizeMap - A map from path -> cumulative size.
* @returns A record that maps directory paths to arrays of child paths.
*/
export function buildChildrenMap(
root: string,
totalSizeMap: Record<string, number>,
): Record<string, Array<string>> {
const rootAbs = path.resolve(root);
const childrenMap: Record<string, Array<string>> = {};
// Initialize all potential keys so that each path has an entry.
for (const p of Object.keys(totalSizeMap)) {
if (!childrenMap[p]) {
childrenMap[p] = [];
}
}
for (const p of Object.keys(totalSizeMap)) {
if (p === rootAbs) {
continue;
}
const parent = path.dirname(p);
// If the parent is also tracked in totalSizeMap, we record p as a child.
if (totalSizeMap[parent] !== undefined && parent !== p) {
if (!childrenMap[parent]) {
childrenMap[parent] = [];
}
childrenMap[parent].push(p);
}
}
// Sort the children.
for (const val of Object.values(childrenMap)) {
val.sort((a, b) => {
return a.localeCompare(b);
});
}
return childrenMap;
}
/**
* Recursively prints a directory/file tree, showing size usage.
*
* @param current - The current absolute path (directory or file) to print.
* @param childrenMap - A mapping from directory paths to an array of their child paths.
* @param fileSizeMap - A map from file path to file size (characters).
* @param totalSizeMap - A map from path to total cumulative size.
* @param prefix - The current prefix used for ASCII indentation.
* @param isLast - Whether the current path is the last child in its parent.
* @param contextLimit - The context limit for reference.
*/
export function printSizeTree(
current: string,
childrenMap: Record<string, Array<string>>,
fileSizeMap: Record<string, number>,
totalSizeMap: Record<string, number>,
prefix: string,
isLast: boolean,
contextLimit: number,
): void {
const connector = isLast ? "└──" : "├──";
const label = path.basename(current) || current;
const totalSz = totalSizeMap[current] ?? 0;
const percentageOfLimit =
contextLimit > 0 ? (totalSz / contextLimit) * 100 : 0;
if (fileSizeMap[current] !== undefined) {
// It's a file
const fileSz = fileSizeMap[current];
console.log(
`${prefix}${connector} ${label} [file: ${fileSz} bytes, cumulative: ${totalSz} bytes, ${percentageOfLimit.toFixed(
2,
)}% of limit]`,
);
} else {
// It's a directory
console.log(
`${prefix}${connector} ${label} [dir: ${totalSz} bytes, ${percentageOfLimit.toFixed(
2,
)}% of limit]`,
);
}
const newPrefix = prefix + (isLast ? " " : "│ ");
const children = childrenMap[current] || [];
for (let i = 0; i < children.length; i++) {
const child = children[i];
const childIsLast = i === children.length - 1;
printSizeTree(
child!,
childrenMap,
fileSizeMap,
totalSizeMap,
newPrefix,
childIsLast,
contextLimit,
);
}
}
/**
* Prints a size breakdown for the entire directory (and subpaths), listing cumulative percentages.
*
* @param directory - The directory path (absolute or relative) for which to print the breakdown.
* @param files - The array of FileContent representing the files under that directory.
* @param contextLimit - The maximum context character limit.
*/
export function printDirectorySizeBreakdown(
directory: string,
files: Array<FileContent>,
contextLimit = 300_000,
): void {
const rootAbs = path.resolve(directory);
const [fileSizeMap, totalSizeMap] = computeSizeMap(rootAbs, files);
const childrenMap = buildChildrenMap(rootAbs, totalSizeMap);
console.log("\nContext size breakdown by directory and file:");
const rootTotal = totalSizeMap[rootAbs] ?? 0;
const rootPct =
contextLimit > 0 ? ((rootTotal / contextLimit) * 100).toFixed(2) : "0";
const rootLabel = path.basename(rootAbs) || rootAbs;
console.log(`${rootLabel} [dir: ${rootTotal} bytes, ${rootPct}% of limit]`);
const rootChildren = childrenMap[rootAbs] || [];
rootChildren.sort((a, b) => a.localeCompare(b));
for (let i = 0; i < rootChildren.length; i++) {
const child = rootChildren[i];
const childIsLast = i === rootChildren.length - 1;
printSizeTree(
child!,
childrenMap,
fileSizeMap,
totalSizeMap,
"",
childIsLast,
contextLimit,
);
}
}

View File

@@ -0,0 +1,47 @@
import { z } from "zod";
/**
* Represents a file operation, including modifications, deletes, and moves.
*/
export const FileOperationSchema = z.object({
/**
* Absolute path to the file.
*/
path: z.string(),
/**
* FULL CONTENT of the file after modification. Provides the FULL AND FINAL content of
* the file after modification WITHOUT OMITTING OR TRUNCATING ANY PART OF THE FILE.
*
* Mutually exclusive with 'delete' and 'move_to'.
*/
updated_full_content: z.string().nullable().optional(),
/**
* Set to true if the file is to be deleted.
*
* Mutually exclusive with 'updated_full_content' and 'move_to'.
*/
delete: z.boolean().nullable().optional(),
/**
* New path of the file if it is to be moved.
*
* Mutually exclusive with 'updated_full_content' and 'delete'.
*/
move_to: z.string().nullable().optional(),
});
export type FileOperation = z.infer<typeof FileOperationSchema>;
/**
* Container for one or more FileOperation objects.
*/
export const EditedFilesSchema = z.object({
/**
* A list of file operations that are applied in order.
*/
ops: z.array(FileOperationSchema),
});
export type EditedFiles = z.infer<typeof EditedFilesSchema>;

View File

@@ -0,0 +1,61 @@
/* eslint-disable no-console */
import type { ResponseItem } from "openai/resources/responses/responses";
import { loadConfig } from "../config";
import fs from "fs/promises";
import os from "os";
import path from "path";
const SESSIONS_ROOT = path.join(os.homedir(), ".codex", "sessions");
async function saveRolloutToHomeSessions(
items: Array<ResponseItem>,
): Promise<void> {
await fs.mkdir(SESSIONS_ROOT, { recursive: true });
const sessionId = crypto.randomUUID();
const timestamp = new Date().toISOString();
const ts = timestamp.replace(/[:.]/g, "-").slice(0, 10);
const filename = `rollout-${ts}-${sessionId}.json`;
const filePath = path.join(SESSIONS_ROOT, filename);
const config = loadConfig();
try {
await fs.writeFile(
filePath,
JSON.stringify(
{
session: {
timestamp,
id: sessionId,
instructions: config.instructions,
},
items,
},
null,
2,
),
"utf8",
);
} catch (error) {
console.error(`Failed to save rollout to ${filePath}: `, error);
}
}
let debounceTimer: NodeJS.Timeout | null = null;
let pendingItems: Array<ResponseItem> | null = null;
export function saveRollout(items: Array<ResponseItem>): void {
pendingItems = items;
if (debounceTimer) {
clearTimeout(debounceTimer);
}
debounceTimer = setTimeout(() => {
if (pendingItems) {
saveRolloutToHomeSessions(pendingItems).catch(() => {});
pendingItems = null;
}
debounceTimer = null;
}, 2000);
}

View File

@@ -0,0 +1,82 @@
import type { Instance } from "ink";
import type React from "react";
let inkRenderer: Instance | null = null;
// Track whether the cleanup routine has already executed so repeat calls are
// silently ignored. This can happen when different exit paths (e.g. the raw
// CtrlC handler and the process "exit" event) both attempt to tidy up.
let didRunOnExit = false;
export function setInkRenderer(renderer: Instance): void {
inkRenderer = renderer;
if (process.env["CODEX_FPS_DEBUG"]) {
let last = Date.now();
const logFrame = () => {
const now = Date.now();
// eslint-disable-next-line no-console
console.error(`[fps] frame in ${now - last}ms`);
last = now;
};
// Monkeypatch the public rerender/unmount methods so we know when Ink
// flushes a new frame. Reacts internal renders eventually call
// `rerender()` so this gives us a good approximation without poking into
// private APIs.
const origRerender = renderer.rerender.bind(renderer);
renderer.rerender = (node: React.ReactNode) => {
logFrame();
return origRerender(node);
};
const origClear = renderer.clear.bind(renderer);
renderer.clear = () => {
logFrame();
return origClear();
};
}
}
export function clearTerminal(): void {
if (process.env["CODEX_QUIET_MODE"] === "1") {
return;
}
// When using the alternate screen the content never scrolls, so we rarely
// need a full clear. Still expose the behaviour when explicitly requested
// (e.g. via CtrlL) but avoid unnecessary clears on every render to minimise
// flicker.
if (inkRenderer) {
inkRenderer.clear();
}
}
export function onExit(): void {
// Ensure the cleanup logic only runs once even if multiple exit signals
// (e.g. CtrlC data handler *and* the process "exit" event) invoke this
// function. Rerunning the sequence is mostly harmless but can lead to
// duplicate log messages and increases the risk of confusing sideeffects
// should future cleanup steps become nonidempotent.
if (didRunOnExit) {
return;
}
didRunOnExit = true;
// First make sure Ink is properly unmounted so it can restore any terminal
// state it modified (e.g. rawmode on stdin). Failing to do so leaves the
// terminal in rawmode after the Node process has exited which looks like
// a “frozen” shell no input is echoed and CtrlC/Z no longer work. This
// regression was introduced when we switched from `inkRenderer.unmount()`
// to letting `process.exit` terminate the program a few commits ago. By
// explicitly unmounting here we ensure Ink performs its cleanup logic
// *before* we restore the primary screen buffer.
if (inkRenderer) {
try {
inkRenderer.unmount();
} catch {
/* besteffort continue even if Ink throws */
}
}
}