Files
llmx/codex-cli/tests/agent-rate-limit-error.test.ts
Luci a9ecb2efce chore: upgrade prettier to v3 (#644)
## Description

This PR addresses the following improvements:

**Unify Prettier Version**: Currently, the Prettier version used in
`/package.json` and `/codex-cli/package.json` are different. In this PR,
we're updating both to use Prettier v3.

- Prettier v3 introduces improved support for JavaScript and TypeScript.
(e.g. the formatting scenario shown in the image below. This is more
aligned with the TypeScript indentation standard).

<img width="1126" alt="image"
src="https://github.com/user-attachments/assets/6e237eb8-4553-4574-b336-ed9561c55370"
/>

**Add Prettier Auto-Formatting in lint-staged**: We've added a step to
automatically run prettier --write on JavaScript and TypeScript files as
part of the lint-staged process, before the ESLint checks.

- This will help ensure that all committed code is properly formatted
according to the project's Prettier configuration.
2025-04-25 07:21:50 -07:00

131 lines
4.3 KiB
TypeScript
Raw Blame History

This file contains ambiguous Unicode characters
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
import { describe, it, expect, vi } from "vitest";
// ---------------------------------------------------------------------------
// Mock helpers
// ---------------------------------------------------------------------------
// Keep reference so test cases can programmatically change behaviour of the
// fake OpenAI client.
const openAiState: { createSpy?: ReturnType<typeof vi.fn> } = {};
/**
* Mock the "openai" package so we can simulate ratelimit errors without
* making real network calls. The AgentLoop only relies on `responses.create`
* so we expose a minimal stub.
*/
vi.mock("openai", () => {
class FakeOpenAI {
public responses = {
// Will be replaced pertest via `openAiState.createSpy`.
create: (...args: Array<any>) => openAiState.createSpy!(...args),
};
}
// The real SDK exports this constructor include it for typings even
// though it is not used in this spec.
class APIConnectionTimeoutError extends Error {}
return {
__esModule: true,
default: FakeOpenAI,
APIConnectionTimeoutError,
};
});
// Stub helpers that the agent indirectly imports so it does not attempt any
// filesystem access or real approvals logic during the test.
vi.mock("../src/approvals.js", () => ({
__esModule: true,
alwaysApprovedCommands: new Set<string>(),
canAutoApprove: () => ({ type: "auto-approve", runInSandbox: false }) as any,
isSafeCommand: () => null,
}));
vi.mock("../src/format-command.js", () => ({
__esModule: true,
formatCommandForDisplay: (c: Array<string>) => c.join(" "),
}));
// Silence agentloop debug logging so test output stays clean.
vi.mock("../src/utils/agent/log.js", () => ({
__esModule: true,
log: () => {},
isLoggingEnabled: () => false,
}));
import { AgentLoop } from "../src/utils/agent/agent-loop.js";
describe("AgentLoop ratelimit handling", () => {
it("retries up to the maximum and then surfaces a system message", async () => {
// Enable fake timers for this test only we restore real timers at the end
// so other tests are unaffected.
vi.useFakeTimers();
try {
// Construct a dummy ratelimit error that matches the implementation's
// detection logic (`status === 429`).
const rateLimitErr: any = new Error("Rate limit exceeded");
rateLimitErr.status = 429;
// Always throw the ratelimit error to force the loop to exhaust all
// retries (5 attempts in total).
openAiState.createSpy = vi.fn(async () => {
throw rateLimitErr;
});
const received: Array<any> = [];
const agent = new AgentLoop({
model: "any",
instructions: "",
approvalPolicy: { mode: "auto" } as any,
additionalWritableRoots: [],
onItem: (i) => received.push(i),
onLoading: () => {},
getCommandConfirmation: async () => ({ review: "yes" }) as any,
onLastResponseId: () => {},
});
const userMsg = [
{
type: "message",
role: "user",
content: [{ type: "input_text", text: "hello" }],
},
];
// Start the run but don't await yet so we can advance fake timers while it
// is in progress.
const runPromise = agent.run(userMsg as any);
// The agent waits 15 000 ms between retries (ratelimit backoff) and does
// this four times (after attempts 14). Fastforward a bit more to cover
// any additional small `setTimeout` calls inside the implementation.
await vi.advanceTimersByTimeAsync(61_000); // 4 * 15s + 1s safety margin
// Ensure the promise settles without throwing.
await expect(runPromise).resolves.not.toThrow();
// Flush the 10 ms staging delay used when emitting items.
await vi.advanceTimersByTimeAsync(20);
// The OpenAI client should have been called the maximum number of retry
// attempts (5).
expect(openAiState.createSpy).toHaveBeenCalledTimes(5);
// Finally, verify that the user sees a helpful system message.
const sysMsg = received.find(
(i) =>
i.role === "system" &&
typeof i.content?.[0]?.text === "string" &&
i.content[0].text.includes("Rate limit reached"),
);
expect(sysMsg).toBeTruthy();
} finally {
// Ensure global timer state is restored for subsequent tests.
vi.useRealTimers();
}
});
});