feat: add openai model info configuration (#551)

In reference to [Issue 548](https://github.com/openai/codex/issues/548)
- part 1.
This commit is contained in:
chunterb
2025-04-22 16:31:25 -05:00
committed by GitHub
parent 12bc2dcc4e
commit 750d97e8ad
4 changed files with 301 additions and 3 deletions

View File

@@ -2,6 +2,7 @@ import type { ResponseItem } from "openai/resources/responses/responses.mjs";
import { approximateTokensUsed } from "./approximate-tokens-used.js";
import { getBaseUrl, getApiKey } from "./config";
import { type SupportedModelId, openAiModelInfo } from "./model-info.js";
import OpenAI from "openai";
const MODEL_LIST_TIMEOUT_MS = 2_000; // 2 seconds
@@ -89,10 +90,12 @@ export async function isModelSupportedForResponses(
}
/** Returns the maximum context length (in tokens) for a given model. */
function maxTokensForModel(model: string): number {
// TODO: These numbers are besteffort guesses and provide a basis for UI percentages. They
// should be provider & model specific instead of being wild guesses.
export function maxTokensForModel(model: string): number {
if (model in openAiModelInfo) {
return openAiModelInfo[model as SupportedModelId].maxContextLength;
}
// fallback to heuristics for models not in the registry
const lower = model.toLowerCase();
if (lower.includes("32k")) {
return 32000;