feat: add /compact (#289)

Added the ability to compact. Not sure if I should switch the model over
to gpt-4.1 for longer context or if keeping the current model is fine.
Also I'm not sure if setting the compacted to system is best practice,
would love feedback 😄

Mentioned in this issue: https://github.com/openai/codex/issues/230
This commit is contained in:
Thomas
2025-04-18 15:48:30 +10:00
committed by GitHub
parent 35d47a5ab4
commit 9a948836bf
5 changed files with 135 additions and 1 deletions

View File

@@ -42,6 +42,7 @@ export default function TerminalChatInput({
openModelOverlay,
openApprovalOverlay,
openHelpOverlay,
onCompact,
interruptAgent,
active,
}: {
@@ -61,6 +62,7 @@ export default function TerminalChatInput({
openModelOverlay: () => void;
openApprovalOverlay: () => void;
openHelpOverlay: () => void;
onCompact: () => void;
interruptAgent: () => void;
active: boolean;
}): React.ReactElement {
@@ -166,6 +168,12 @@ export default function TerminalChatInput({
return;
}
if (inputValue === "/compact") {
setInput("");
onCompact();
return;
}
if (inputValue.startsWith("/model")) {
setInput("");
openModelOverlay();
@@ -295,6 +303,7 @@ export default function TerminalChatInput({
openModelOverlay,
openHelpOverlay,
history, // Add history to the dependency array
onCompact,
],
);
@@ -366,7 +375,8 @@ export default function TerminalChatInput({
<>
{" — "}
<Text color="red">
{Math.round(contextLeftPercent)}% context left
{Math.round(contextLeftPercent)}% context left send
"/compact" to condense context
</Text>
</>
)}

View File

@@ -17,6 +17,7 @@ import { useTerminalSize } from "../../hooks/use-terminal-size.js";
import { AgentLoop } from "../../utils/agent/agent-loop.js";
import { isLoggingEnabled, log } from "../../utils/agent/log.js";
import { ReviewDecision } from "../../utils/agent/review.js";
import { generateCompactSummary } from "../../utils/compact-summary.js";
import { OPENAI_BASE_URL } from "../../utils/config.js";
import { createInputItem } from "../../utils/input-utils.js";
import { getAvailableModels } from "../../utils/model-utils.js";
@@ -138,6 +139,34 @@ export default function TerminalChat({
initialApprovalPolicy,
);
const [thinkingSeconds, setThinkingSeconds] = useState(0);
const handleCompact = async () => {
setLoading(true);
try {
const summary = await generateCompactSummary(items, model);
setItems([
{
id: `compact-${Date.now()}`,
type: "message",
role: "assistant",
content: [{ type: "output_text", text: summary }],
} as ResponseItem,
]);
} catch (err) {
setItems((prev) => [
...prev,
{
id: `compact-error-${Date.now()}`,
type: "message",
role: "system",
content: [
{ type: "input_text", text: `Failed to compact context: ${err}` },
],
} as ResponseItem,
]);
} finally {
setLoading(false);
}
};
const {
requestConfirmation,
confirmationPrompt,
@@ -453,6 +482,7 @@ export default function TerminalChat({
openModelOverlay={() => setOverlayMode("model")}
openApprovalOverlay={() => setOverlayMode("approval")}
openHelpOverlay={() => setOverlayMode("help")}
onCompact={handleCompact}
active={overlayMode === "none"}
interruptAgent={() => {
if (!agent) {

View File

@@ -52,6 +52,9 @@ export default function HelpOverlay({
<Text>
<Text color="cyan">/clearhistory</Text> clear command history
</Text>
<Text>
<Text color="cyan">/compact</Text> condense context into a summary
</Text>
<Box marginTop={1}>
<Text bold dimColor>

View File

@@ -0,0 +1,60 @@
import type { ResponseItem } from "openai/resources/responses/responses.mjs";
import { OPENAI_BASE_URL } from "./config.js";
import OpenAI from "openai";
/**
* Generate a condensed summary of the conversation items.
* @param items The list of conversation items to summarize
* @param model The model to use for generating the summary
* @returns A concise structured summary string
*/
export async function generateCompactSummary(
items: Array<ResponseItem>,
model: string,
): Promise<string> {
const oai = new OpenAI({
apiKey: process.env["OPENAI_API_KEY"],
baseURL: OPENAI_BASE_URL,
});
const conversationText = items
.filter(
(
item,
): item is ResponseItem & { content: Array<unknown>; role: string } =>
item.type === "message" &&
(item.role === "user" || item.role === "assistant") &&
Array.isArray(item.content),
)
.map((item) => {
const text = item.content
.filter(
(part): part is { text: string } =>
typeof part === "object" &&
part != null &&
"text" in part &&
typeof (part as { text: unknown }).text === "string",
)
.map((part) => part.text)
.join("");
return `${item.role}: ${text}`;
})
.join("\n");
const response = await oai.chat.completions.create({
model,
messages: [
{
role: "assistant",
content:
"You are an expert coding assistant. Your goal is to generate a concise, structured summary of the conversation below that captures all essential information needed to continue development after context replacement. Include tasks performed, code areas modified or reviewed, key decisions or assumptions, test results or errors, and outstanding tasks or next steps.",
},
{
role: "user",
content: `Here is the conversation so far:\n${conversationText}\n\nPlease summarize this conversation, covering:\n1. Tasks performed and outcomes\n2. Code files, modules, or functions modified or examined\n3. Important decisions or assumptions made\n4. Errors encountered and test or build results\n5. Remaining tasks, open questions, or next steps\nProvide the summary in a clear, concise format.`,
},
],
});
return response.choices[0]?.message.content ?? "Unable to generate summary.";
}