feat: add openai model info configuration (#551)
In reference to [Issue 548](https://github.com/openai/codex/issues/548) - part 1.
This commit is contained in:
@@ -2,6 +2,7 @@ import type { ResponseItem } from "openai/resources/responses/responses.mjs";
|
||||
|
||||
import { approximateTokensUsed } from "./approximate-tokens-used.js";
|
||||
import { getBaseUrl, getApiKey } from "./config";
|
||||
import { type SupportedModelId, openAiModelInfo } from "./model-info.js";
|
||||
import OpenAI from "openai";
|
||||
|
||||
const MODEL_LIST_TIMEOUT_MS = 2_000; // 2 seconds
|
||||
@@ -89,10 +90,12 @@ export async function isModelSupportedForResponses(
|
||||
}
|
||||
|
||||
/** Returns the maximum context length (in tokens) for a given model. */
|
||||
function maxTokensForModel(model: string): number {
|
||||
// TODO: These numbers are best‑effort guesses and provide a basis for UI percentages. They
|
||||
// should be provider & model specific instead of being wild guesses.
|
||||
export function maxTokensForModel(model: string): number {
|
||||
if (model in openAiModelInfo) {
|
||||
return openAiModelInfo[model as SupportedModelId].maxContextLength;
|
||||
}
|
||||
|
||||
// fallback to heuristics for models not in the registry
|
||||
const lower = model.toLowerCase();
|
||||
if (lower.includes("32k")) {
|
||||
return 32000;
|
||||
|
||||
Reference in New Issue
Block a user