feat: support multiple providers via Responses-Completion transformation (#247)

https://github.com/user-attachments/assets/9ecb51be-fa65-4e99-8512-abb898dda569

Implemented it as a transformation between Responses API and Completion
API so that it supports existing providers that implement the Completion
API and minimizes the changes needed to the codex repo.

---------

Co-authored-by: Thibault Sottiaux <tibo@openai.com>
Co-authored-by: Fouad Matin <169186268+fouad-openai@users.noreply.github.com>
Co-authored-by: Fouad Matin <fouad@openai.com>
This commit is contained in:
Daniel Nakov
2025-04-20 23:59:34 -04:00
committed by GitHub
parent 693bd59ecc
commit eafbc75612
11 changed files with 1870 additions and 83 deletions

View File

@@ -1,4 +1,4 @@
import { OPENAI_API_KEY } from "./config";
import { getBaseUrl, getApiKey } from "./config";
import OpenAI from "openai";
const MODEL_LIST_TIMEOUT_MS = 2_000; // 2 seconds
@@ -12,44 +12,38 @@ export const RECOMMENDED_MODELS: Array<string> = ["o4-mini", "o3"];
* lifetime of the process and the results are cached for subsequent calls.
*/
let modelsPromise: Promise<Array<string>> | null = null;
async function fetchModels(): Promise<Array<string>> {
async function fetchModels(provider: string): Promise<Array<string>> {
// If the user has not configured an API key we cannot hit the network.
if (!OPENAI_API_KEY) {
return RECOMMENDED_MODELS;
if (!getApiKey(provider)) {
throw new Error("No API key configured for provider: " + provider);
}
const baseURL = getBaseUrl(provider);
try {
const openai = new OpenAI({ apiKey: OPENAI_API_KEY });
const openai = new OpenAI({ apiKey: getApiKey(provider), baseURL });
const list = await openai.models.list();
const models: Array<string> = [];
for await (const model of list as AsyncIterable<{ id?: string }>) {
if (model && typeof model.id === "string") {
models.push(model.id);
let modelStr = model.id;
// fix for gemini
if (modelStr.startsWith("models/")) {
modelStr = modelStr.replace("models/", "");
}
models.push(modelStr);
}
}
return models.sort();
} catch {
} catch (error) {
return [];
}
}
export function preloadModels(): void {
if (!modelsPromise) {
// Fireandforget callers that truly need the list should `await`
// `getAvailableModels()` instead.
void getAvailableModels();
}
}
export async function getAvailableModels(): Promise<Array<string>> {
if (!modelsPromise) {
modelsPromise = fetchModels();
}
return modelsPromise;
export async function getAvailableModels(
provider: string,
): Promise<Array<string>> {
return fetchModels(provider.toLowerCase());
}
/**
@@ -70,7 +64,7 @@ export async function isModelSupportedForResponses(
try {
const models = await Promise.race<Array<string>>([
getAvailableModels(),
getAvailableModels("openai"),
new Promise<Array<string>>((resolve) =>
setTimeout(() => resolve([]), MODEL_LIST_TIMEOUT_MS),
),