From db4aa6f916dfa576150265f4dda9ecfb63ddad01 Mon Sep 17 00:00:00 2001 From: jif-oai Date: Wed, 24 Sep 2025 16:31:27 +0100 Subject: [PATCH] nit: 350k tokens (#4156) 350k tokens for gpt-5-codex auto-compaction and update comments for better description --- codex-rs/core/src/openai_model_info.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/codex-rs/core/src/openai_model_info.rs b/codex-rs/core/src/openai_model_info.rs index b413ccc7..d1d2305a 100644 --- a/codex-rs/core/src/openai_model_info.rs +++ b/codex-rs/core/src/openai_model_info.rs @@ -7,13 +7,14 @@ use crate::model_family::ModelFamily; /// Though this would help present more accurate pricing information in the UI. #[derive(Debug)] pub(crate) struct ModelInfo { - /// Size of the context window in tokens. + /// Size of the context window in tokens. This is the maximum size of the input context. pub(crate) context_window: u64, /// Maximum number of output tokens that can be generated for the model. pub(crate) max_output_tokens: u64, - /// Token threshold where we should automatically compact conversation history. + /// Token threshold where we should automatically compact conversation history. This considers + /// input tokens + output tokens of this turn. pub(crate) auto_compact_token_limit: Option, } @@ -64,7 +65,7 @@ pub(crate) fn get_model_info(model_family: &ModelFamily) -> Option { _ if slug.starts_with("gpt-5-codex") => Some(ModelInfo { context_window: 272_000, max_output_tokens: 128_000, - auto_compact_token_limit: Some(250_000), + auto_compact_token_limit: Some(350_000), }), _ if slug.starts_with("gpt-5") => Some(ModelInfo::new(272_000, 128_000)),