chore: consolidate model utils and drive-by cleanups (#476)
Signed-off-by: Thibault Sottiaux <tibo@openai.com>
This commit is contained in:
committed by
GitHub
parent
dc276999a9
commit
3c4f1fea9b
@@ -136,8 +136,8 @@ export function canAutoApprove(
|
|||||||
// bashCmd could be a mix of strings and operators, e.g.:
|
// bashCmd could be a mix of strings and operators, e.g.:
|
||||||
// "ls || (true && pwd)" => [ 'ls', { op: '||' }, '(', 'true', { op: '&&' }, 'pwd', ')' ]
|
// "ls || (true && pwd)" => [ 'ls', { op: '||' }, '(', 'true', { op: '&&' }, 'pwd', ')' ]
|
||||||
// We try to ensure that *every* command segment is deemed safe and that
|
// We try to ensure that *every* command segment is deemed safe and that
|
||||||
// all operators belong to an allow‑list. If so, the entire expression is
|
// all operators belong to an allow-list. If so, the entire expression is
|
||||||
// considered auto‑approvable.
|
// considered auto-approvable.
|
||||||
|
|
||||||
const shellSafe = isEntireShellExpressionSafe(bashCmd);
|
const shellSafe = isEntireShellExpressionSafe(bashCmd);
|
||||||
if (shellSafe != null) {
|
if (shellSafe != null) {
|
||||||
@@ -333,7 +333,7 @@ export function isSafeCommand(
|
|||||||
};
|
};
|
||||||
case "true":
|
case "true":
|
||||||
return {
|
return {
|
||||||
reason: "No‑op (true)",
|
reason: "No-op (true)",
|
||||||
group: "Utility",
|
group: "Utility",
|
||||||
};
|
};
|
||||||
case "echo":
|
case "echo":
|
||||||
@@ -442,10 +442,10 @@ function isValidSedNArg(arg: string | undefined): boolean {
|
|||||||
|
|
||||||
// ---------------- Helper utilities for complex shell expressions -----------------
|
// ---------------- Helper utilities for complex shell expressions -----------------
|
||||||
|
|
||||||
// A conservative allow‑list of bash operators that do not, on their own, cause
|
// A conservative allow-list of bash operators that do not, on their own, cause
|
||||||
// side effects. Redirections (>, >>, <, etc.) and command substitution `$()`
|
// side effects. Redirections (>, >>, <, etc.) and command substitution `$()`
|
||||||
// are intentionally excluded. Parentheses used for grouping are treated as
|
// are intentionally excluded. Parentheses used for grouping are treated as
|
||||||
// strings by `shell‑quote`, so we do not add them here. Reference:
|
// strings by `shell-quote`, so we do not add them here. Reference:
|
||||||
// https://github.com/substack/node-shell-quote#parsecmd-opts
|
// https://github.com/substack/node-shell-quote#parsecmd-opts
|
||||||
const SAFE_SHELL_OPERATORS: ReadonlySet<string> = new Set([
|
const SAFE_SHELL_OPERATORS: ReadonlySet<string> = new Set([
|
||||||
"&&", // logical AND
|
"&&", // logical AND
|
||||||
@@ -471,7 +471,7 @@ function isEntireShellExpressionSafe(
|
|||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// Collect command segments delimited by operators. `shell‑quote` represents
|
// Collect command segments delimited by operators. `shell-quote` represents
|
||||||
// subshell grouping parentheses as literal strings "(" and ")"; treat them
|
// subshell grouping parentheses as literal strings "(" and ")"; treat them
|
||||||
// as unsafe to keep the logic simple (since subshells could introduce
|
// as unsafe to keep the logic simple (since subshells could introduce
|
||||||
// unexpected scope changes).
|
// unexpected scope changes).
|
||||||
@@ -539,7 +539,7 @@ function isParseEntryWithOp(
|
|||||||
return (
|
return (
|
||||||
typeof entry === "object" &&
|
typeof entry === "object" &&
|
||||||
entry != null &&
|
entry != null &&
|
||||||
// Using the safe `in` operator keeps the check property‑safe even when
|
// Using the safe `in` operator keeps the check property-safe even when
|
||||||
// `entry` is a `string`.
|
// `entry` is a `string`.
|
||||||
"op" in entry &&
|
"op" in entry &&
|
||||||
typeof (entry as { op?: unknown }).op === "string"
|
typeof (entry as { op?: unknown }).op === "string"
|
||||||
|
|||||||
@@ -136,7 +136,7 @@ const cli = meow(
|
|||||||
},
|
},
|
||||||
noProjectDoc: {
|
noProjectDoc: {
|
||||||
type: "boolean",
|
type: "boolean",
|
||||||
description: "Disable automatic inclusion of project‑level codex.md",
|
description: "Disable automatic inclusion of project-level codex.md",
|
||||||
},
|
},
|
||||||
projectDoc: {
|
projectDoc: {
|
||||||
type: "string",
|
type: "string",
|
||||||
@@ -202,19 +202,20 @@ complete -c codex -a '(_fish_complete_path)' -d 'file path'`,
|
|||||||
console.log(script);
|
console.log(script);
|
||||||
process.exit(0);
|
process.exit(0);
|
||||||
}
|
}
|
||||||
// Show help if requested
|
|
||||||
|
// For --help, show help and exit.
|
||||||
if (cli.flags.help) {
|
if (cli.flags.help) {
|
||||||
cli.showHelp();
|
cli.showHelp();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handle config flag: open instructions file in editor and exit
|
// For --config, open custom instructions file in editor and exit.
|
||||||
if (cli.flags.config) {
|
if (cli.flags.config) {
|
||||||
// Ensure configuration and instructions file exist
|
|
||||||
try {
|
try {
|
||||||
loadConfig();
|
loadConfig(); // Ensures the file is created if it doesn't already exit.
|
||||||
} catch {
|
} catch {
|
||||||
// ignore errors
|
// ignore errors
|
||||||
}
|
}
|
||||||
|
|
||||||
const filePath = INSTRUCTIONS_FILEPATH;
|
const filePath = INSTRUCTIONS_FILEPATH;
|
||||||
const editor =
|
const editor =
|
||||||
process.env["EDITOR"] || (process.platform === "win32" ? "notepad" : "vi");
|
process.env["EDITOR"] || (process.platform === "win32" ? "notepad" : "vi");
|
||||||
@@ -237,13 +238,13 @@ let config = loadConfig(undefined, undefined, {
|
|||||||
const prompt = cli.input[0];
|
const prompt = cli.input[0];
|
||||||
const model = cli.flags.model ?? config.model;
|
const model = cli.flags.model ?? config.model;
|
||||||
const imagePaths = cli.flags.image;
|
const imagePaths = cli.flags.image;
|
||||||
const provider = cli.flags.provider ?? config.provider;
|
const provider = cli.flags.provider ?? config.provider ?? "openai";
|
||||||
const apiKey = getApiKey(provider);
|
const apiKey = getApiKey(provider);
|
||||||
|
|
||||||
if (!apiKey) {
|
if (!apiKey) {
|
||||||
// eslint-disable-next-line no-console
|
// eslint-disable-next-line no-console
|
||||||
console.error(
|
console.error(
|
||||||
`\n${chalk.red("Missing OpenAI API key.")}\n\n` +
|
`\n${chalk.red(`Missing ${provider} API key.`)}\n\n` +
|
||||||
`Set the environment variable ${chalk.bold("OPENAI_API_KEY")} ` +
|
`Set the environment variable ${chalk.bold("OPENAI_API_KEY")} ` +
|
||||||
`and re-run this command.\n` +
|
`and re-run this command.\n` +
|
||||||
`You can create a key here: ${chalk.bold(
|
`You can create a key here: ${chalk.bold(
|
||||||
@@ -262,13 +263,11 @@ config = {
|
|||||||
provider,
|
provider,
|
||||||
};
|
};
|
||||||
|
|
||||||
// Check for updates after loading config
|
// Check for updates after loading config. This is important because we write state file in
|
||||||
// This is important because we write state file in the config dir
|
// the config dir.
|
||||||
await checkForUpdates().catch();
|
await checkForUpdates().catch();
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
// --flex-mode validation (only allowed for o3 and o4-mini)
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
|
// For --flex-mode, validate and exit if incorrect.
|
||||||
if (cli.flags.flexMode) {
|
if (cli.flags.flexMode) {
|
||||||
const allowedFlexModels = new Set(["o3", "o4-mini"]);
|
const allowedFlexModels = new Set(["o3", "o4-mini"]);
|
||||||
if (!allowedFlexModels.has(config.model)) {
|
if (!allowedFlexModels.has(config.model)) {
|
||||||
@@ -282,13 +281,13 @@ if (cli.flags.flexMode) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (
|
if (
|
||||||
!(await isModelSupportedForResponses(config.model)) &&
|
!(await isModelSupportedForResponses(provider, config.model)) &&
|
||||||
(!provider || provider.toLowerCase() === "openai")
|
(!provider || provider.toLowerCase() === "openai")
|
||||||
) {
|
) {
|
||||||
// eslint-disable-next-line no-console
|
// eslint-disable-next-line no-console
|
||||||
console.error(
|
console.error(
|
||||||
`The model "${config.model}" does not appear in the list of models ` +
|
`The model "${config.model}" does not appear in the list of models ` +
|
||||||
`available to your account. Double‑check the spelling (use\n` +
|
`available to your account. Double-check the spelling (use\n` +
|
||||||
` openai models list\n` +
|
` openai models list\n` +
|
||||||
`to see the full list) or choose another model with the --model flag.`,
|
`to see the full list) or choose another model with the --model flag.`,
|
||||||
);
|
);
|
||||||
@@ -297,6 +296,7 @@ if (
|
|||||||
|
|
||||||
let rollout: AppRollout | undefined;
|
let rollout: AppRollout | undefined;
|
||||||
|
|
||||||
|
// For --view, optionally load an existing rollout from disk, display it and exit.
|
||||||
if (cli.flags.view) {
|
if (cli.flags.view) {
|
||||||
const viewPath = cli.flags.view;
|
const viewPath = cli.flags.view;
|
||||||
const absolutePath = path.isAbsolute(viewPath)
|
const absolutePath = path.isAbsolute(viewPath)
|
||||||
@@ -312,7 +312,7 @@ if (cli.flags.view) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we are running in --fullcontext mode, do that and exit.
|
// For --fullcontext, run the separate cli entrypoint and exit.
|
||||||
if (fullContextMode) {
|
if (fullContextMode) {
|
||||||
await runSinglePass({
|
await runSinglePass({
|
||||||
originalPrompt: prompt,
|
originalPrompt: prompt,
|
||||||
@@ -328,11 +328,8 @@ const additionalWritableRoots: ReadonlyArray<string> = (
|
|||||||
cli.flags.writableRoot ?? []
|
cli.flags.writableRoot ?? []
|
||||||
).map((p) => path.resolve(p));
|
).map((p) => path.resolve(p));
|
||||||
|
|
||||||
// If we are running in --quiet mode, do that and exit.
|
// For --quiet, run the cli without user interactions and exit.
|
||||||
const quietMode = Boolean(cli.flags.quiet);
|
if (cli.flags.quiet) {
|
||||||
const fullStdout = Boolean(cli.flags.fullStdout);
|
|
||||||
|
|
||||||
if (quietMode) {
|
|
||||||
process.env["CODEX_QUIET_MODE"] = "1";
|
process.env["CODEX_QUIET_MODE"] = "1";
|
||||||
if (!prompt || prompt.trim() === "") {
|
if (!prompt || prompt.trim() === "") {
|
||||||
// eslint-disable-next-line no-console
|
// eslint-disable-next-line no-console
|
||||||
@@ -389,7 +386,7 @@ const instance = render(
|
|||||||
imagePaths={imagePaths}
|
imagePaths={imagePaths}
|
||||||
approvalPolicy={approvalPolicy}
|
approvalPolicy={approvalPolicy}
|
||||||
additionalWritableRoots={additionalWritableRoots}
|
additionalWritableRoots={additionalWritableRoots}
|
||||||
fullStdout={fullStdout}
|
fullStdout={Boolean(cli.flags.fullStdout)}
|
||||||
/>,
|
/>,
|
||||||
{
|
{
|
||||||
patchConsole: process.env["DEBUG"] ? false : true,
|
patchConsole: process.env["DEBUG"] ? false : true,
|
||||||
@@ -501,13 +498,13 @@ process.on("SIGQUIT", exit);
|
|||||||
process.on("SIGTERM", exit);
|
process.on("SIGTERM", exit);
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// Fallback for Ctrl‑C when stdin is in raw‑mode
|
// Fallback for Ctrl-C when stdin is in raw-mode
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
if (process.stdin.isTTY) {
|
if (process.stdin.isTTY) {
|
||||||
// Ensure we do not leave the terminal in raw mode if the user presses
|
// Ensure we do not leave the terminal in raw mode if the user presses
|
||||||
// Ctrl‑C while some other component has focus and Ink is intercepting
|
// Ctrl-C while some other component has focus and Ink is intercepting
|
||||||
// input. Node does *not* emit a SIGINT in raw‑mode, so we listen for the
|
// input. Node does *not* emit a SIGINT in raw-mode, so we listen for the
|
||||||
// corresponding byte (0x03) ourselves and trigger a graceful shutdown.
|
// corresponding byte (0x03) ourselves and trigger a graceful shutdown.
|
||||||
const onRawData = (data: Buffer | string): void => {
|
const onRawData = (data: Buffer | string): void => {
|
||||||
const str = Buffer.isBuffer(data) ? data.toString("utf8") : data;
|
const str = Buffer.isBuffer(data) ? data.toString("utf8") : data;
|
||||||
@@ -518,6 +515,6 @@ if (process.stdin.isTTY) {
|
|||||||
process.stdin.on("data", onRawData);
|
process.stdin.on("data", onRawData);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure terminal clean‑up always runs, even when other code calls
|
// Ensure terminal clean-up always runs, even when other code calls
|
||||||
// `process.exit()` directly.
|
// `process.exit()` directly.
|
||||||
process.once("exit", onExit);
|
process.once("exit", onExit);
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ import React, { useRef, useState } from "react";
|
|||||||
* The real `process.stdin` object exposed by Node.js inherits these methods
|
* The real `process.stdin` object exposed by Node.js inherits these methods
|
||||||
* from `Socket`, but the lightweight stub used in tests only extends
|
* from `Socket`, but the lightweight stub used in tests only extends
|
||||||
* `EventEmitter`. Ink calls the two methods when enabling/disabling raw
|
* `EventEmitter`. Ink calls the two methods when enabling/disabling raw
|
||||||
* mode, so make them harmless no‑ops when they're absent to avoid runtime
|
* mode, so make them harmless no-ops when they're absent to avoid runtime
|
||||||
* failures during unit tests.
|
* failures during unit tests.
|
||||||
* ----------------------------------------------------------------------- */
|
* ----------------------------------------------------------------------- */
|
||||||
|
|
||||||
|
|||||||
@@ -1,113 +0,0 @@
|
|||||||
import type { ResponseItem } from "openai/resources/responses/responses.mjs";
|
|
||||||
|
|
||||||
import { approximateTokensUsed } from "../../utils/approximate-tokens-used.js";
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Type‑guard that narrows a {@link ResponseItem} to one that represents a
|
|
||||||
* user‑authored message. The OpenAI SDK represents both input *and* output
|
|
||||||
* messages with a discriminated union where:
|
|
||||||
* • `type` is the string literal "message" and
|
|
||||||
* • `role` is one of "user" | "assistant" | "system" | "developer".
|
|
||||||
*
|
|
||||||
* For the purposes of de‑duplication we only care about *user* messages so we
|
|
||||||
* detect those here in a single, reusable helper.
|
|
||||||
*/
|
|
||||||
function isUserMessage(
|
|
||||||
item: ResponseItem,
|
|
||||||
): item is ResponseItem & { type: "message"; role: "user"; content: unknown } {
|
|
||||||
return item.type === "message" && (item as { role?: string }).role === "user";
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the maximum context length (in tokens) for a given model.
|
|
||||||
* These numbers are best‑effort guesses and provide a basis for UI percentages.
|
|
||||||
*/
|
|
||||||
export function maxTokensForModel(model: string): number {
|
|
||||||
const lower = model.toLowerCase();
|
|
||||||
if (lower.includes("32k")) {
|
|
||||||
return 32000;
|
|
||||||
}
|
|
||||||
if (lower.includes("16k")) {
|
|
||||||
return 16000;
|
|
||||||
}
|
|
||||||
if (lower.includes("8k")) {
|
|
||||||
return 8000;
|
|
||||||
}
|
|
||||||
if (lower.includes("4k")) {
|
|
||||||
return 4000;
|
|
||||||
}
|
|
||||||
// Default to 128k for newer long‑context models
|
|
||||||
return 128000;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Calculates the percentage of tokens remaining in context for a model.
|
|
||||||
*/
|
|
||||||
export function calculateContextPercentRemaining(
|
|
||||||
items: Array<ResponseItem>,
|
|
||||||
model: string,
|
|
||||||
): number {
|
|
||||||
const used = approximateTokensUsed(items);
|
|
||||||
const max = maxTokensForModel(model);
|
|
||||||
const remaining = Math.max(0, max - used);
|
|
||||||
return (remaining / max) * 100;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Deduplicate the stream of {@link ResponseItem}s before they are persisted in
|
|
||||||
* component state.
|
|
||||||
*
|
|
||||||
* Historically we used the (optional) {@code id} field returned by the
|
|
||||||
* OpenAI streaming API as the primary key: the first occurrence of any given
|
|
||||||
* {@code id} “won” and subsequent duplicates were dropped. In practice this
|
|
||||||
* proved brittle because locally‑generated user messages don’t include an
|
|
||||||
* {@code id}. The result was that if a user quickly pressed <Enter> twice the
|
|
||||||
* exact same message would appear twice in the transcript.
|
|
||||||
*
|
|
||||||
* The new rules are therefore:
|
|
||||||
* 1. If a {@link ResponseItem} has an {@code id} keep only the *first*
|
|
||||||
* occurrence of that {@code id} (this retains the previous behaviour for
|
|
||||||
* assistant / tool messages).
|
|
||||||
* 2. Additionally, collapse *consecutive* user messages with identical
|
|
||||||
* content. Two messages are considered identical when their serialized
|
|
||||||
* {@code content} array matches exactly. We purposefully restrict this
|
|
||||||
* to **adjacent** duplicates so that legitimately repeated questions at
|
|
||||||
* a later point in the conversation are still shown.
|
|
||||||
*/
|
|
||||||
export function uniqueById(items: Array<ResponseItem>): Array<ResponseItem> {
|
|
||||||
const seenIds = new Set<string>();
|
|
||||||
const deduped: Array<ResponseItem> = [];
|
|
||||||
|
|
||||||
for (const item of items) {
|
|
||||||
// ──────────────────────────────────────────────────────────────────
|
|
||||||
// Rule #1 – de‑duplicate by id when present
|
|
||||||
// ──────────────────────────────────────────────────────────────────
|
|
||||||
if (typeof item.id === "string" && item.id.length > 0) {
|
|
||||||
if (seenIds.has(item.id)) {
|
|
||||||
continue; // skip duplicates
|
|
||||||
}
|
|
||||||
seenIds.add(item.id);
|
|
||||||
}
|
|
||||||
|
|
||||||
// ──────────────────────────────────────────────────────────────────
|
|
||||||
// Rule #2 – collapse consecutive identical user messages
|
|
||||||
// ──────────────────────────────────────────────────────────────────
|
|
||||||
if (isUserMessage(item) && deduped.length > 0) {
|
|
||||||
const prev = deduped[deduped.length - 1]!;
|
|
||||||
|
|
||||||
if (
|
|
||||||
isUserMessage(prev) &&
|
|
||||||
// Note: the `content` field is an array of message parts. Performing
|
|
||||||
// a deep compare is over‑kill here; serialising to JSON is sufficient
|
|
||||||
// (and fast for the tiny payloads involved).
|
|
||||||
JSON.stringify(prev.content) === JSON.stringify(item.content)
|
|
||||||
) {
|
|
||||||
continue; // skip duplicate user message
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
deduped.push(item);
|
|
||||||
}
|
|
||||||
|
|
||||||
return deduped;
|
|
||||||
}
|
|
||||||
@@ -6,10 +6,6 @@ import type { ResponseItem } from "openai/resources/responses/responses.mjs";
|
|||||||
|
|
||||||
import TerminalChatInput from "./terminal-chat-input.js";
|
import TerminalChatInput from "./terminal-chat-input.js";
|
||||||
import { TerminalChatToolCallCommand } from "./terminal-chat-tool-call-command.js";
|
import { TerminalChatToolCallCommand } from "./terminal-chat-tool-call-command.js";
|
||||||
import {
|
|
||||||
calculateContextPercentRemaining,
|
|
||||||
uniqueById,
|
|
||||||
} from "./terminal-chat-utils.js";
|
|
||||||
import TerminalMessageHistory from "./terminal-message-history.js";
|
import TerminalMessageHistory from "./terminal-message-history.js";
|
||||||
import { formatCommandForDisplay } from "../../format-command.js";
|
import { formatCommandForDisplay } from "../../format-command.js";
|
||||||
import { useConfirmation } from "../../hooks/use-confirmation.js";
|
import { useConfirmation } from "../../hooks/use-confirmation.js";
|
||||||
@@ -22,7 +18,11 @@ import { extractAppliedPatches as _extractAppliedPatches } from "../../utils/ext
|
|||||||
import { getGitDiff } from "../../utils/get-diff.js";
|
import { getGitDiff } from "../../utils/get-diff.js";
|
||||||
import { createInputItem } from "../../utils/input-utils.js";
|
import { createInputItem } from "../../utils/input-utils.js";
|
||||||
import { log } from "../../utils/logger/log.js";
|
import { log } from "../../utils/logger/log.js";
|
||||||
import { getAvailableModels } from "../../utils/model-utils.js";
|
import {
|
||||||
|
getAvailableModels,
|
||||||
|
calculateContextPercentRemaining,
|
||||||
|
uniqueById,
|
||||||
|
} from "../../utils/model-utils.js";
|
||||||
import { CLI_VERSION } from "../../utils/session.js";
|
import { CLI_VERSION } from "../../utils/session.js";
|
||||||
import { shortCwd } from "../../utils/short-path.js";
|
import { shortCwd } from "../../utils/short-path.js";
|
||||||
import { saveRollout } from "../../utils/storage/save-rollout.js";
|
import { saveRollout } from "../../utils/storage/save-rollout.js";
|
||||||
@@ -106,11 +106,8 @@ async function generateCommandExplanation(
|
|||||||
} catch (error) {
|
} catch (error) {
|
||||||
log(`Error generating command explanation: ${error}`);
|
log(`Error generating command explanation: ${error}`);
|
||||||
|
|
||||||
// Improved error handling with more specific error information
|
|
||||||
let errorMessage = "Unable to generate explanation due to an error.";
|
let errorMessage = "Unable to generate explanation due to an error.";
|
||||||
|
|
||||||
if (error instanceof Error) {
|
if (error instanceof Error) {
|
||||||
// Include specific error message for better debugging
|
|
||||||
errorMessage = `Unable to generate explanation: ${error.message}`;
|
errorMessage = `Unable to generate explanation: ${error.message}`;
|
||||||
|
|
||||||
// If it's an API error, check for more specific information
|
// If it's an API error, check for more specific information
|
||||||
@@ -141,18 +138,17 @@ export default function TerminalChat({
|
|||||||
additionalWritableRoots,
|
additionalWritableRoots,
|
||||||
fullStdout,
|
fullStdout,
|
||||||
}: Props): React.ReactElement {
|
}: Props): React.ReactElement {
|
||||||
// Desktop notification setting
|
|
||||||
const notify = config.notify;
|
const notify = config.notify;
|
||||||
const [model, setModel] = useState<string>(config.model);
|
const [model, setModel] = useState<string>(config.model);
|
||||||
const [provider, setProvider] = useState<string>(config.provider || "openai");
|
const [provider, setProvider] = useState<string>(config.provider || "openai");
|
||||||
const [lastResponseId, setLastResponseId] = useState<string | null>(null);
|
const [lastResponseId, setLastResponseId] = useState<string | null>(null);
|
||||||
const [items, setItems] = useState<Array<ResponseItem>>([]);
|
const [items, setItems] = useState<Array<ResponseItem>>([]);
|
||||||
const [loading, setLoading] = useState<boolean>(false);
|
const [loading, setLoading] = useState<boolean>(false);
|
||||||
// Allow switching approval modes at runtime via an overlay.
|
|
||||||
const [approvalPolicy, setApprovalPolicy] = useState<ApprovalPolicy>(
|
const [approvalPolicy, setApprovalPolicy] = useState<ApprovalPolicy>(
|
||||||
initialApprovalPolicy,
|
initialApprovalPolicy,
|
||||||
);
|
);
|
||||||
const [thinkingSeconds, setThinkingSeconds] = useState(0);
|
const [thinkingSeconds, setThinkingSeconds] = useState(0);
|
||||||
|
|
||||||
const handleCompact = async () => {
|
const handleCompact = async () => {
|
||||||
setLoading(true);
|
setLoading(true);
|
||||||
try {
|
try {
|
||||||
@@ -185,6 +181,7 @@ export default function TerminalChat({
|
|||||||
setLoading(false);
|
setLoading(false);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
const {
|
const {
|
||||||
requestConfirmation,
|
requestConfirmation,
|
||||||
confirmationPrompt,
|
confirmationPrompt,
|
||||||
@@ -215,13 +212,13 @@ export default function TerminalChat({
|
|||||||
// DEBUG: log every render w/ key bits of state
|
// DEBUG: log every render w/ key bits of state
|
||||||
// ────────────────────────────────────────────────────────────────
|
// ────────────────────────────────────────────────────────────────
|
||||||
log(
|
log(
|
||||||
`render – agent? ${Boolean(agentRef.current)} loading=${loading} items=${
|
`render - agent? ${Boolean(agentRef.current)} loading=${loading} items=${
|
||||||
items.length
|
items.length
|
||||||
}`,
|
}`,
|
||||||
);
|
);
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
// Skip recreating the agent if awaiting a decision on a pending confirmation
|
// Skip recreating the agent if awaiting a decision on a pending confirmation.
|
||||||
if (confirmationPrompt != null) {
|
if (confirmationPrompt != null) {
|
||||||
log("skip AgentLoop recreation due to pending confirmationPrompt");
|
log("skip AgentLoop recreation due to pending confirmationPrompt");
|
||||||
return;
|
return;
|
||||||
@@ -234,7 +231,7 @@ export default function TerminalChat({
|
|||||||
)} approvalPolicy=${approvalPolicy}`,
|
)} approvalPolicy=${approvalPolicy}`,
|
||||||
);
|
);
|
||||||
|
|
||||||
// Tear down any existing loop before creating a new one
|
// Tear down any existing loop before creating a new one.
|
||||||
agentRef.current?.terminate();
|
agentRef.current?.terminate();
|
||||||
|
|
||||||
const sessionId = crypto.randomUUID();
|
const sessionId = crypto.randomUUID();
|
||||||
@@ -267,11 +264,9 @@ export default function TerminalChat({
|
|||||||
<TerminalChatToolCallCommand commandForDisplay={commandForDisplay} />,
|
<TerminalChatToolCallCommand commandForDisplay={commandForDisplay} />,
|
||||||
);
|
);
|
||||||
|
|
||||||
// If the user wants an explanation, generate one and ask again
|
// If the user wants an explanation, generate one and ask again.
|
||||||
if (review === ReviewDecision.EXPLAIN) {
|
if (review === ReviewDecision.EXPLAIN) {
|
||||||
log(`Generating explanation for command: ${commandForDisplay}`);
|
log(`Generating explanation for command: ${commandForDisplay}`);
|
||||||
|
|
||||||
// Generate an explanation using the same model
|
|
||||||
const explanation = await generateCommandExplanation(
|
const explanation = await generateCommandExplanation(
|
||||||
command,
|
command,
|
||||||
model,
|
model,
|
||||||
@@ -279,7 +274,7 @@ export default function TerminalChat({
|
|||||||
);
|
);
|
||||||
log(`Generated explanation: ${explanation}`);
|
log(`Generated explanation: ${explanation}`);
|
||||||
|
|
||||||
// Ask for confirmation again, but with the explanation
|
// Ask for confirmation again, but with the explanation.
|
||||||
const confirmResult = await requestConfirmation(
|
const confirmResult = await requestConfirmation(
|
||||||
<TerminalChatToolCallCommand
|
<TerminalChatToolCallCommand
|
||||||
commandForDisplay={commandForDisplay}
|
commandForDisplay={commandForDisplay}
|
||||||
@@ -287,11 +282,11 @@ export default function TerminalChat({
|
|||||||
/>,
|
/>,
|
||||||
);
|
);
|
||||||
|
|
||||||
// Update the decision based on the second confirmation
|
// Update the decision based on the second confirmation.
|
||||||
review = confirmResult.decision;
|
review = confirmResult.decision;
|
||||||
customDenyMessage = confirmResult.customDenyMessage;
|
customDenyMessage = confirmResult.customDenyMessage;
|
||||||
|
|
||||||
// Return the final decision with the explanation
|
// Return the final decision with the explanation.
|
||||||
return { review, customDenyMessage, applyPatch, explanation };
|
return { review, customDenyMessage, applyPatch, explanation };
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -299,7 +294,7 @@ export default function TerminalChat({
|
|||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
// force a render so JSX below can "see" the freshly created agent
|
// Force a render so JSX below can "see" the freshly created agent.
|
||||||
forceUpdate();
|
forceUpdate();
|
||||||
|
|
||||||
log(`AgentLoop created: ${inspect(agentRef.current, { depth: 1 })}`);
|
log(`AgentLoop created: ${inspect(agentRef.current, { depth: 1 })}`);
|
||||||
@@ -320,7 +315,7 @@ export default function TerminalChat({
|
|||||||
additionalWritableRoots,
|
additionalWritableRoots,
|
||||||
]);
|
]);
|
||||||
|
|
||||||
// whenever loading starts/stops, reset or start a timer — but pause the
|
// Whenever loading starts/stops, reset or start a timer — but pause the
|
||||||
// timer while a confirmation overlay is displayed so we don't trigger a
|
// timer while a confirmation overlay is displayed so we don't trigger a
|
||||||
// re‑render every second during apply_patch reviews.
|
// re‑render every second during apply_patch reviews.
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
@@ -345,14 +340,15 @@ export default function TerminalChat({
|
|||||||
};
|
};
|
||||||
}, [loading, confirmationPrompt]);
|
}, [loading, confirmationPrompt]);
|
||||||
|
|
||||||
// Notify desktop with a preview when an assistant response arrives
|
// Notify desktop with a preview when an assistant response arrives.
|
||||||
const prevLoadingRef = useRef<boolean>(false);
|
const prevLoadingRef = useRef<boolean>(false);
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
// Only notify when notifications are enabled
|
// Only notify when notifications are enabled.
|
||||||
if (!notify) {
|
if (!notify) {
|
||||||
prevLoadingRef.current = loading;
|
prevLoadingRef.current = loading;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (
|
if (
|
||||||
prevLoadingRef.current &&
|
prevLoadingRef.current &&
|
||||||
!loading &&
|
!loading &&
|
||||||
@@ -389,7 +385,7 @@ export default function TerminalChat({
|
|||||||
prevLoadingRef.current = loading;
|
prevLoadingRef.current = loading;
|
||||||
}, [notify, loading, confirmationPrompt, items, PWD]);
|
}, [notify, loading, confirmationPrompt, items, PWD]);
|
||||||
|
|
||||||
// Let's also track whenever the ref becomes available
|
// Let's also track whenever the ref becomes available.
|
||||||
const agent = agentRef.current;
|
const agent = agentRef.current;
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
log(`agentRef.current is now ${Boolean(agent)}`);
|
log(`agentRef.current is now ${Boolean(agent)}`);
|
||||||
@@ -412,7 +408,7 @@ export default function TerminalChat({
|
|||||||
const inputItems = [
|
const inputItems = [
|
||||||
await createInputItem(initialPrompt || "", initialImagePaths || []),
|
await createInputItem(initialPrompt || "", initialImagePaths || []),
|
||||||
];
|
];
|
||||||
// Clear them to prevent subsequent runs
|
// Clear them to prevent subsequent runs.
|
||||||
setInitialPrompt("");
|
setInitialPrompt("");
|
||||||
setInitialImagePaths([]);
|
setInitialImagePaths([]);
|
||||||
agent?.run(inputItems);
|
agent?.run(inputItems);
|
||||||
@@ -447,7 +443,7 @@ export default function TerminalChat({
|
|||||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||||
}, []);
|
}, []);
|
||||||
|
|
||||||
// Just render every item in order, no grouping/collapse
|
// Just render every item in order, no grouping/collapse.
|
||||||
const lastMessageBatch = items.map((item) => ({ item }));
|
const lastMessageBatch = items.map((item) => ({ item }));
|
||||||
const groupCounts: Record<string, number> = {};
|
const groupCounts: Record<string, number> = {};
|
||||||
const userMsgCount = items.filter(
|
const userMsgCount = items.filter(
|
||||||
@@ -626,10 +622,10 @@ export default function TerminalChat({
|
|||||||
agent?.cancel();
|
agent?.cancel();
|
||||||
setLoading(false);
|
setLoading(false);
|
||||||
|
|
||||||
// Select default model for the new provider
|
// Select default model for the new provider.
|
||||||
const defaultModel = model;
|
const defaultModel = model;
|
||||||
|
|
||||||
// Save provider to config
|
// Save provider to config.
|
||||||
const updatedConfig = {
|
const updatedConfig = {
|
||||||
...config,
|
...config,
|
||||||
provider: newProvider,
|
provider: newProvider,
|
||||||
@@ -669,13 +665,12 @@ export default function TerminalChat({
|
|||||||
<ApprovalModeOverlay
|
<ApprovalModeOverlay
|
||||||
currentMode={approvalPolicy}
|
currentMode={approvalPolicy}
|
||||||
onSelect={(newMode) => {
|
onSelect={(newMode) => {
|
||||||
// update approval policy without cancelling an in-progress session
|
// Update approval policy without cancelling an in-progress session.
|
||||||
if (newMode === approvalPolicy) {
|
if (newMode === approvalPolicy) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
// update state
|
|
||||||
setApprovalPolicy(newMode as ApprovalPolicy);
|
setApprovalPolicy(newMode as ApprovalPolicy);
|
||||||
// update existing AgentLoop instance
|
|
||||||
if (agentRef.current) {
|
if (agentRef.current) {
|
||||||
(
|
(
|
||||||
agentRef.current as unknown as {
|
agentRef.current as unknown as {
|
||||||
|
|||||||
@@ -34,9 +34,9 @@ const TerminalHeader: React.FC<TerminalHeaderProps> = ({
|
|||||||
{terminalRows < 10 ? (
|
{terminalRows < 10 ? (
|
||||||
// Compact header for small terminal windows
|
// Compact header for small terminal windows
|
||||||
<Text>
|
<Text>
|
||||||
● Codex v{version} – {PWD} – {model} ({provider}) –{" "}
|
● Codex v{version} - {PWD} - {model} ({provider}) -{" "}
|
||||||
<Text color={colorsByPolicy[approvalPolicy]}>{approvalPolicy}</Text>
|
<Text color={colorsByPolicy[approvalPolicy]}>{approvalPolicy}</Text>
|
||||||
{flexModeEnabled ? " – flex-mode" : ""}
|
{flexModeEnabled ? " - flex-mode" : ""}
|
||||||
</Text>
|
</Text>
|
||||||
) : (
|
) : (
|
||||||
<>
|
<>
|
||||||
|
|||||||
@@ -399,8 +399,8 @@ export function SinglePassApp({
|
|||||||
});
|
});
|
||||||
|
|
||||||
const openai = new OpenAI({
|
const openai = new OpenAI({
|
||||||
apiKey: getApiKey(config.provider),
|
apiKey: getApiKey(config.provider ?? "openai"),
|
||||||
baseURL: getBaseUrl(config.provider),
|
baseURL: getBaseUrl(config.provider ?? "openai"),
|
||||||
timeout: OPENAI_TIMEOUT_MS,
|
timeout: OPENAI_TIMEOUT_MS,
|
||||||
});
|
});
|
||||||
const chatResp = await openai.beta.chat.completions.parse({
|
const chatResp = await openai.beta.chat.completions.parse({
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
// use-confirmation.ts
|
|
||||||
import type { ReviewDecision } from "../utils/agent/review";
|
import type { ReviewDecision } from "../utils/agent/review";
|
||||||
import type React from "react";
|
import type React from "react";
|
||||||
|
|
||||||
|
|||||||
@@ -423,7 +423,7 @@ export default class TextBuffer {
|
|||||||
/** Delete the word to the *left* of the caret, mirroring common
|
/** Delete the word to the *left* of the caret, mirroring common
|
||||||
* Ctrl/Alt+Backspace behaviour in editors & terminals. Both the adjacent
|
* Ctrl/Alt+Backspace behaviour in editors & terminals. Both the adjacent
|
||||||
* whitespace *and* the word characters immediately preceding the caret are
|
* whitespace *and* the word characters immediately preceding the caret are
|
||||||
* removed. If the caret is already at column‑0 this becomes a no‑op. */
|
* removed. If the caret is already at column‑0 this becomes a no-op. */
|
||||||
deleteWordLeft(): void {
|
deleteWordLeft(): void {
|
||||||
dbg("deleteWordLeft", { beforeCursor: this.getCursor() });
|
dbg("deleteWordLeft", { beforeCursor: this.getCursor() });
|
||||||
|
|
||||||
@@ -710,7 +710,7 @@ export default class TextBuffer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
endSelection(): void {
|
endSelection(): void {
|
||||||
// no‑op for now, kept for API symmetry
|
// no-op for now, kept for API symmetry
|
||||||
// we rely on anchor + current cursor to compute selection
|
// we rely on anchor + current cursor to compute selection
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -744,7 +744,7 @@ export class AgentLoop {
|
|||||||
for await (const event of stream as AsyncIterable<ResponseEvent>) {
|
for await (const event of stream as AsyncIterable<ResponseEvent>) {
|
||||||
log(`AgentLoop.run(): response event ${event.type}`);
|
log(`AgentLoop.run(): response event ${event.type}`);
|
||||||
|
|
||||||
// process and surface each item (no‑op until we can depend on streaming events)
|
// process and surface each item (no-op until we can depend on streaming events)
|
||||||
if (event.type === "response.output_item.done") {
|
if (event.type === "response.output_item.done") {
|
||||||
const item = event.item;
|
const item = event.item;
|
||||||
// 1) if it's a reasoning item, annotate it
|
// 1) if it's a reasoning item, annotate it
|
||||||
@@ -936,7 +936,7 @@ export class AgentLoop {
|
|||||||
],
|
],
|
||||||
});
|
});
|
||||||
} catch {
|
} catch {
|
||||||
/* no‑op – emitting the error message is best‑effort */
|
/* no-op – emitting the error message is best‑effort */
|
||||||
}
|
}
|
||||||
this.onLoading(false);
|
this.onLoading(false);
|
||||||
return;
|
return;
|
||||||
|
|||||||
@@ -144,7 +144,7 @@ export async function handleExecCommand(
|
|||||||
abortSignal,
|
abortSignal,
|
||||||
);
|
);
|
||||||
// If the operation was aborted in the meantime, propagate the cancellation
|
// If the operation was aborted in the meantime, propagate the cancellation
|
||||||
// upward by returning an empty (no‑op) result so that the agent loop will
|
// upward by returning an empty (no-op) result so that the agent loop will
|
||||||
// exit cleanly without emitting spurious output.
|
// exit cleanly without emitting spurious output.
|
||||||
if (abortSignal?.aborted) {
|
if (abortSignal?.aborted) {
|
||||||
return {
|
return {
|
||||||
|
|||||||
@@ -41,7 +41,7 @@ export function setApiKey(apiKey: string): void {
|
|||||||
OPENAI_API_KEY = apiKey;
|
OPENAI_API_KEY = apiKey;
|
||||||
}
|
}
|
||||||
|
|
||||||
export function getBaseUrl(provider: string = "openai"): string | undefined {
|
export function getBaseUrl(provider: string): string | undefined {
|
||||||
const providerInfo = providers[provider.toLowerCase()];
|
const providerInfo = providers[provider.toLowerCase()];
|
||||||
if (providerInfo) {
|
if (providerInfo) {
|
||||||
return providerInfo.baseURL;
|
return providerInfo.baseURL;
|
||||||
@@ -49,7 +49,7 @@ export function getBaseUrl(provider: string = "openai"): string | undefined {
|
|||||||
return undefined;
|
return undefined;
|
||||||
}
|
}
|
||||||
|
|
||||||
export function getApiKey(provider: string = "openai"): string | undefined {
|
export function getApiKey(provider: string): string | undefined {
|
||||||
const providerInfo = providers[provider.toLowerCase()];
|
const providerInfo = providers[provider.toLowerCase()];
|
||||||
if (providerInfo) {
|
if (providerInfo) {
|
||||||
if (providerInfo.name === "Ollama") {
|
if (providerInfo.name === "Ollama") {
|
||||||
|
|||||||
@@ -1,3 +1,6 @@
|
|||||||
|
import type { ResponseItem } from "openai/resources/responses/responses.mjs";
|
||||||
|
|
||||||
|
import { approximateTokensUsed } from "./approximate-tokens-used.js";
|
||||||
import { getBaseUrl, getApiKey } from "./config";
|
import { getBaseUrl, getApiKey } from "./config";
|
||||||
import OpenAI from "openai";
|
import OpenAI from "openai";
|
||||||
|
|
||||||
@@ -11,9 +14,8 @@ export const RECOMMENDED_MODELS: Array<string> = ["o4-mini", "o3"];
|
|||||||
* enters interactive mode. The request is made exactly once during the
|
* enters interactive mode. The request is made exactly once during the
|
||||||
* lifetime of the process and the results are cached for subsequent calls.
|
* lifetime of the process and the results are cached for subsequent calls.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
async function fetchModels(provider: string): Promise<Array<string>> {
|
async function fetchModels(provider: string): Promise<Array<string>> {
|
||||||
// If the user has not configured an API key we cannot hit the network.
|
// If the user has not configured an API key we cannot retrieve the models.
|
||||||
if (!getApiKey(provider)) {
|
if (!getApiKey(provider)) {
|
||||||
throw new Error("No API key configured for provider: " + provider);
|
throw new Error("No API key configured for provider: " + provider);
|
||||||
}
|
}
|
||||||
@@ -26,7 +28,7 @@ async function fetchModels(provider: string): Promise<Array<string>> {
|
|||||||
for await (const model of list as AsyncIterable<{ id?: string }>) {
|
for await (const model of list as AsyncIterable<{ id?: string }>) {
|
||||||
if (model && typeof model.id === "string") {
|
if (model && typeof model.id === "string") {
|
||||||
let modelStr = model.id;
|
let modelStr = model.id;
|
||||||
// fix for gemini
|
// Fix for gemini.
|
||||||
if (modelStr.startsWith("models/")) {
|
if (modelStr.startsWith("models/")) {
|
||||||
modelStr = modelStr.replace("models/", "");
|
modelStr = modelStr.replace("models/", "");
|
||||||
}
|
}
|
||||||
@@ -40,6 +42,7 @@ async function fetchModels(provider: string): Promise<Array<string>> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Returns the list of models available for the provided key / credentials. */
|
||||||
export async function getAvailableModels(
|
export async function getAvailableModels(
|
||||||
provider: string,
|
provider: string,
|
||||||
): Promise<Array<string>> {
|
): Promise<Array<string>> {
|
||||||
@@ -47,11 +50,11 @@ export async function getAvailableModels(
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Verify that the provided model identifier is present in the set returned by
|
* Verifies that the provided model identifier is present in the set returned by
|
||||||
* {@link getAvailableModels}. The list of models is fetched from the OpenAI
|
* {@link getAvailableModels}.
|
||||||
* `/models` endpoint the first time it is required and then cached in‑process.
|
|
||||||
*/
|
*/
|
||||||
export async function isModelSupportedForResponses(
|
export async function isModelSupportedForResponses(
|
||||||
|
provider: string,
|
||||||
model: string | undefined | null,
|
model: string | undefined | null,
|
||||||
): Promise<boolean> {
|
): Promise<boolean> {
|
||||||
if (
|
if (
|
||||||
@@ -64,7 +67,7 @@ export async function isModelSupportedForResponses(
|
|||||||
|
|
||||||
try {
|
try {
|
||||||
const models = await Promise.race<Array<string>>([
|
const models = await Promise.race<Array<string>>([
|
||||||
getAvailableModels("openai"),
|
getAvailableModels(provider),
|
||||||
new Promise<Array<string>>((resolve) =>
|
new Promise<Array<string>>((resolve) =>
|
||||||
setTimeout(() => resolve([]), MODEL_LIST_TIMEOUT_MS),
|
setTimeout(() => resolve([]), MODEL_LIST_TIMEOUT_MS),
|
||||||
),
|
),
|
||||||
@@ -82,3 +85,110 @@ export async function isModelSupportedForResponses(
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Returns the maximum context length (in tokens) for a given model. */
|
||||||
|
function maxTokensForModel(model: string): number {
|
||||||
|
// TODO: These numbers are best‑effort guesses and provide a basis for UI percentages. They
|
||||||
|
// should be provider & model specific instead of being wild guesses.
|
||||||
|
|
||||||
|
const lower = model.toLowerCase();
|
||||||
|
if (lower.includes("32k")) {
|
||||||
|
return 32000;
|
||||||
|
}
|
||||||
|
if (lower.includes("16k")) {
|
||||||
|
return 16000;
|
||||||
|
}
|
||||||
|
if (lower.includes("8k")) {
|
||||||
|
return 8000;
|
||||||
|
}
|
||||||
|
if (lower.includes("4k")) {
|
||||||
|
return 4000;
|
||||||
|
}
|
||||||
|
return 128000; // Default to 128k for any other model.
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Calculates the percentage of tokens remaining in context for a model. */
|
||||||
|
export function calculateContextPercentRemaining(
|
||||||
|
items: Array<ResponseItem>,
|
||||||
|
model: string,
|
||||||
|
): number {
|
||||||
|
const used = approximateTokensUsed(items);
|
||||||
|
const max = maxTokensForModel(model);
|
||||||
|
const remaining = Math.max(0, max - used);
|
||||||
|
return (remaining / max) * 100;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Type‑guard that narrows a {@link ResponseItem} to one that represents a
|
||||||
|
* user‑authored message. The OpenAI SDK represents both input *and* output
|
||||||
|
* messages with a discriminated union where:
|
||||||
|
* • `type` is the string literal "message" and
|
||||||
|
* • `role` is one of "user" | "assistant" | "system" | "developer".
|
||||||
|
*
|
||||||
|
* For the purposes of de‑duplication we only care about *user* messages so we
|
||||||
|
* detect those here in a single, reusable helper.
|
||||||
|
*/
|
||||||
|
function isUserMessage(
|
||||||
|
item: ResponseItem,
|
||||||
|
): item is ResponseItem & { type: "message"; role: "user"; content: unknown } {
|
||||||
|
return item.type === "message" && (item as { role?: string }).role === "user";
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Deduplicate the stream of {@link ResponseItem}s before they are persisted in
|
||||||
|
* component state.
|
||||||
|
*
|
||||||
|
* Historically we used the (optional) {@code id} field returned by the
|
||||||
|
* OpenAI streaming API as the primary key: the first occurrence of any given
|
||||||
|
* {@code id} “won” and subsequent duplicates were dropped. In practice this
|
||||||
|
* proved brittle because locally‑generated user messages don’t include an
|
||||||
|
* {@code id}. The result was that if a user quickly pressed <Enter> twice the
|
||||||
|
* exact same message would appear twice in the transcript.
|
||||||
|
*
|
||||||
|
* The new rules are therefore:
|
||||||
|
* 1. If a {@link ResponseItem} has an {@code id} keep only the *first*
|
||||||
|
* occurrence of that {@code id} (this retains the previous behaviour for
|
||||||
|
* assistant / tool messages).
|
||||||
|
* 2. Additionally, collapse *consecutive* user messages with identical
|
||||||
|
* content. Two messages are considered identical when their serialized
|
||||||
|
* {@code content} array matches exactly. We purposefully restrict this
|
||||||
|
* to **adjacent** duplicates so that legitimately repeated questions at
|
||||||
|
* a later point in the conversation are still shown.
|
||||||
|
*/
|
||||||
|
export function uniqueById(items: Array<ResponseItem>): Array<ResponseItem> {
|
||||||
|
const seenIds = new Set<string>();
|
||||||
|
const deduped: Array<ResponseItem> = [];
|
||||||
|
|
||||||
|
for (const item of items) {
|
||||||
|
// ──────────────────────────────────────────────────────────────────
|
||||||
|
// Rule #1 – de‑duplicate by id when present
|
||||||
|
// ──────────────────────────────────────────────────────────────────
|
||||||
|
if (typeof item.id === "string" && item.id.length > 0) {
|
||||||
|
if (seenIds.has(item.id)) {
|
||||||
|
continue; // skip duplicates
|
||||||
|
}
|
||||||
|
seenIds.add(item.id);
|
||||||
|
}
|
||||||
|
|
||||||
|
// ──────────────────────────────────────────────────────────────────
|
||||||
|
// Rule #2 – collapse consecutive identical user messages
|
||||||
|
// ──────────────────────────────────────────────────────────────────
|
||||||
|
if (isUserMessage(item) && deduped.length > 0) {
|
||||||
|
const prev = deduped[deduped.length - 1]!;
|
||||||
|
|
||||||
|
if (
|
||||||
|
isUserMessage(prev) &&
|
||||||
|
// Note: the `content` field is an array of message parts. Performing
|
||||||
|
// a deep compare is over‑kill here; serialising to JSON is sufficient
|
||||||
|
// (and fast for the tiny payloads involved).
|
||||||
|
JSON.stringify(prev.content) === JSON.stringify(item.content)
|
||||||
|
) {
|
||||||
|
continue; // skip duplicate user message
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
deduped.push(item);
|
||||||
|
}
|
||||||
|
|
||||||
|
return deduped;
|
||||||
|
}
|
||||||
|
|||||||
0
codex-cli/src/utils/terminal-chat-utils.ts
Normal file
0
codex-cli/src/utils/terminal-chat-utils.ts
Normal file
@@ -50,7 +50,7 @@ vi.mock("openai", () => {
|
|||||||
|
|
||||||
// The AgentLoop pulls these helpers in order to decide whether a command can
|
// The AgentLoop pulls these helpers in order to decide whether a command can
|
||||||
// be auto‑approved. None of that matters for this test, so we stub the module
|
// be auto‑approved. None of that matters for this test, so we stub the module
|
||||||
// with minimal no‑op implementations.
|
// with minimal no-op implementations.
|
||||||
vi.mock("../src/approvals.js", () => {
|
vi.mock("../src/approvals.js", () => {
|
||||||
return {
|
return {
|
||||||
__esModule: true,
|
__esModule: true,
|
||||||
|
|||||||
@@ -79,7 +79,7 @@ describe("canAutoApprove()", () => {
|
|||||||
test("true command is considered safe", () => {
|
test("true command is considered safe", () => {
|
||||||
expect(check(["true"])).toEqual({
|
expect(check(["true"])).toEqual({
|
||||||
type: "auto-approve",
|
type: "auto-approve",
|
||||||
reason: "No‑op (true)",
|
reason: "No-op (true)",
|
||||||
group: "Utility",
|
group: "Utility",
|
||||||
runInSandbox: false,
|
runInSandbox: false,
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -26,7 +26,7 @@ vi.mock("fs", async () => {
|
|||||||
memfs[path] = data;
|
memfs[path] = data;
|
||||||
},
|
},
|
||||||
mkdirSync: () => {
|
mkdirSync: () => {
|
||||||
// no‑op in in‑memory store
|
// no-op in in‑memory store
|
||||||
},
|
},
|
||||||
rmSync: (path: string) => {
|
rmSync: (path: string) => {
|
||||||
// recursively delete any key under this prefix
|
// recursively delete any key under this prefix
|
||||||
|
|||||||
@@ -44,7 +44,7 @@ describe("model-utils – offline resilience", () => {
|
|||||||
"../src/utils/model-utils.js"
|
"../src/utils/model-utils.js"
|
||||||
);
|
);
|
||||||
|
|
||||||
const supported = await isModelSupportedForResponses("o4-mini");
|
const supported = await isModelSupportedForResponses("openai", "o4-mini");
|
||||||
expect(supported).toBe(true);
|
expect(supported).toBe(true);
|
||||||
});
|
});
|
||||||
|
|
||||||
@@ -63,8 +63,11 @@ describe("model-utils – offline resilience", () => {
|
|||||||
"../src/utils/model-utils.js"
|
"../src/utils/model-utils.js"
|
||||||
);
|
);
|
||||||
|
|
||||||
// Should resolve true despite the network failure
|
// Should resolve true despite the network failure.
|
||||||
const supported = await isModelSupportedForResponses("some-model");
|
const supported = await isModelSupportedForResponses(
|
||||||
|
"openai",
|
||||||
|
"some-model",
|
||||||
|
);
|
||||||
expect(supported).toBe(true);
|
expect(supported).toBe(true);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -57,7 +57,7 @@ async function type(
|
|||||||
await flush();
|
await flush();
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Build a set of no‑op callbacks so <TerminalChatInput> renders with minimal
|
/** Build a set of no-op callbacks so <TerminalChatInput> renders with minimal
|
||||||
* scaffolding.
|
* scaffolding.
|
||||||
*/
|
*/
|
||||||
function stubProps(): any {
|
function stubProps(): any {
|
||||||
|
|||||||
@@ -127,7 +127,7 @@ describe("TextBuffer – basic editing parity with Rust suite", () => {
|
|||||||
expect(buf.getCursor()).toEqual([0, 2]); // after 'b'
|
expect(buf.getCursor()).toEqual([0, 2]); // after 'b'
|
||||||
});
|
});
|
||||||
|
|
||||||
it("is a no‑op at the very beginning of the buffer", () => {
|
it("is a no-op at the very beginning of the buffer", () => {
|
||||||
const buf = new TextBuffer("ab");
|
const buf = new TextBuffer("ab");
|
||||||
buf.backspace(); // caret starts at (0,0)
|
buf.backspace(); // caret starts at (0,0)
|
||||||
|
|
||||||
|
|||||||
@@ -26,7 +26,7 @@ vi.mock("../src/components/select-input/select-input.js", () => {
|
|||||||
|
|
||||||
// Ink's <TextInput> toggles raw‑mode which calls .ref() / .unref() on stdin.
|
// Ink's <TextInput> toggles raw‑mode which calls .ref() / .unref() on stdin.
|
||||||
// The test environment's mock streams don't implement those methods, so we
|
// The test environment's mock streams don't implement those methods, so we
|
||||||
// polyfill them to no‑ops on the prototype *before* the component tree mounts.
|
// polyfill them to no-ops on the prototype *before* the component tree mounts.
|
||||||
import { EventEmitter } from "node:events";
|
import { EventEmitter } from "node:events";
|
||||||
if (!(EventEmitter.prototype as any).ref) {
|
if (!(EventEmitter.prototype as any).ref) {
|
||||||
(EventEmitter.prototype as any).ref = () => {};
|
(EventEmitter.prototype as any).ref = () => {};
|
||||||
|
|||||||
Reference in New Issue
Block a user