bug: non-openai mode - fix for gemini content: null, fix 429 to throw before stream (#563)

Gemini's API is finicky, it 400's without an error when you pass
content: null
Also fixed the rate limiting issues by throwing outside of the iterator.
I think there's a separate issue with the second isRateLimit check in
agent-loop - turnInput is cleared by that time, so it retries without
the last message.
This commit is contained in:
Daniel Nakov
2025-04-22 20:37:48 -04:00
committed by GitHub
parent 20b6ef0de8
commit 23f0887df3
2 changed files with 273 additions and 319 deletions

View File

@@ -294,7 +294,7 @@ describe("responsesCreateViaChatCompletions", () => {
expect(callArgs.messages).toEqual([
{ role: "user", content: "Hello world" },
]);
expect(callArgs.stream).toBeUndefined();
expect(callArgs.stream).toBe(false);
}
// Verify result format
@@ -736,33 +736,6 @@ describe("responsesCreateViaChatCompletions", () => {
}
});
it("should handle errors gracefully", async () => {
// Setup mock to throw an error
openAiState.createSpy = vi
.fn()
.mockRejectedValue(new Error("API connection error"));
const openaiClient = new (await import("openai")).default({
apiKey: "test-key",
}) as unknown as OpenAI;
const inputMessage = createTestInput({
model: "gpt-4o",
userMessage: "Test message",
stream: false,
});
// Expect the function to throw an error
await expect(
responsesModule.responsesCreateViaChatCompletions(
openaiClient,
inputMessage as unknown as ResponseCreateParamsNonStreaming & {
stream?: false | undefined;
},
),
).rejects.toThrow("Failed to process chat completion");
});
it("handles streaming with tool calls", async () => {
// Mock a streaming response with tool calls
const mockStream = createToolCallsStream();