I had Codex read #182 and draft a PR to fix it. This is its suggested approach. I've tested it and it works. It removes the purple `thinking for 386s` type lines entirely, and replaces them with a single yellow `thinking for #s` line: ``` thinking for 31s ╭────────────────────────────────────────╮ │( ● ) Thinking.. ╰────────────────────────────────────────╯ ``` prompt. I've been using it that way via `npm run dev`, and prefer it. ## What Empty "reasoning" updates were showing up as blank lines in the terminal chat history. We now short-circuit and return `null` whenever `message.summary` is empty, so those no-ops are suppressed. ## How - In `TerminalChatResponseReasoning`, return early if `message.summary` is falsy or empty. - In `TerminalMessageHistory`, drop any reasoning items whose `summary.length === 0`. - Swapped out the loose `any` cast for a safer `unknown`-based cast. - Rolled back the temporary Vitest script hacks that were causing stack overflows. ## Why Cluttering the chat with empty lines was confusing; this change ensures only real reasoning text is rendered. Reference: openai/codex#182 --------- Co-authored-by: Thibault Sottiaux <tibo@openai.com>
80 lines
2.5 KiB
TypeScript
80 lines
2.5 KiB
TypeScript
import type { TerminalHeaderProps } from "./terminal-header.js";
|
||
import type { GroupedResponseItem } from "./use-message-grouping.js";
|
||
import type { ResponseItem } from "openai/resources/responses/responses.mjs";
|
||
|
||
import TerminalChatResponseItem from "./terminal-chat-response-item.js";
|
||
import TerminalHeader from "./terminal-header.js";
|
||
import { Box, Static, Text } from "ink";
|
||
import React, { useMemo } from "react";
|
||
|
||
// A batch entry can either be a standalone response item or a grouped set of
|
||
// items (e.g. auto‑approved tool‑call batches) that should be rendered
|
||
// together.
|
||
type BatchEntry = { item?: ResponseItem; group?: GroupedResponseItem };
|
||
type MessageHistoryProps = {
|
||
batch: Array<BatchEntry>;
|
||
groupCounts: Record<string, number>;
|
||
items: Array<ResponseItem>;
|
||
userMsgCount: number;
|
||
confirmationPrompt: React.ReactNode;
|
||
loading: boolean;
|
||
thinkingSeconds: number;
|
||
headerProps: TerminalHeaderProps;
|
||
fullStdout: boolean;
|
||
};
|
||
|
||
const MessageHistory: React.FC<MessageHistoryProps> = ({
|
||
batch,
|
||
headerProps,
|
||
loading,
|
||
thinkingSeconds,
|
||
fullStdout,
|
||
}) => {
|
||
// Flatten batch entries to response items.
|
||
const messages = useMemo(() => batch.map(({ item }) => item!), [batch]);
|
||
|
||
return (
|
||
<Box flexDirection="column">
|
||
{loading && (
|
||
<Box marginTop={1}>
|
||
<Text color="yellow">{`thinking for ${thinkingSeconds}s`}</Text>
|
||
</Box>
|
||
)}
|
||
<Static items={["header", ...messages]}>
|
||
{(item, index) => {
|
||
if (item === "header") {
|
||
return <TerminalHeader key="header" {...headerProps} />;
|
||
}
|
||
|
||
// After the guard above, item is a ResponseItem
|
||
const message = item as ResponseItem;
|
||
// Suppress empty reasoning updates (i.e. items with an empty summary).
|
||
const msg = message as unknown as { summary?: Array<unknown> };
|
||
if (msg.summary?.length === 0) {
|
||
return null;
|
||
}
|
||
return (
|
||
<Box
|
||
key={`${message.id}-${index}`}
|
||
flexDirection="column"
|
||
marginLeft={
|
||
message.type === "message" && message.role === "user" ? 0 : 4
|
||
}
|
||
marginTop={
|
||
message.type === "message" && message.role === "user" ? 0 : 1
|
||
}
|
||
>
|
||
<TerminalChatResponseItem
|
||
item={message}
|
||
fullStdout={fullStdout}
|
||
/>
|
||
</Box>
|
||
);
|
||
}}
|
||
</Static>
|
||
</Box>
|
||
);
|
||
};
|
||
|
||
export default React.memo(MessageHistory);
|