From 8a424fcfa356c04eb855115ba641bb0c4c2cd0e2 Mon Sep 17 00:00:00 2001 From: Michael Bolin Date: Thu, 10 Jul 2025 14:30:33 -0700 Subject: [PATCH] feat: add new config option: model_supports_reasoning_summaries (#1524) As noted in the updated docs, this makes it so that you can set: ```toml model_supports_reasoning_summaries = true ``` as a way of overriding the existing heuristic for when to set the `reasoning` field on a sampling request: https://github.com/openai/codex/blob/341c091c5b09dc706ab5c7d629516e6ef5aaf902/codex-rs/core/src/client_common.rs#L152-L166 --- codex-rs/config.md | 8 ++++++++ codex-rs/core/src/client.rs | 9 +++++++-- codex-rs/core/src/client_common.rs | 28 +++++++++++++++++----------- codex-rs/core/src/codex.rs | 2 +- codex-rs/core/src/config.rs | 14 ++++++++++++++ codex-rs/exec/src/event_processor.rs | 2 +- codex-rs/tui/src/history_cell.rs | 2 +- 7 files changed, 49 insertions(+), 16 deletions(-) diff --git a/codex-rs/config.md b/codex-rs/config.md index eeb9a266..438b7e76 100644 --- a/codex-rs/config.md +++ b/codex-rs/config.md @@ -206,6 +206,14 @@ To disable reasoning summaries, set `model_reasoning_summary` to `"none"` in you model_reasoning_summary = "none" # disable reasoning summaries ``` +## model_supports_reasoning_summaries + +By default, `reasoning` is only set on requests to OpenAI models that are known to support them. To force `reasoning` to set on requests to the current model, you can force this behavior by setting the following in `config.toml`: + +```toml +model_supports_reasoning_summaries = true +``` + ## sandbox_mode Codex executes model-generated shell commands inside an OS-level sandbox. diff --git a/codex-rs/core/src/client.rs b/codex-rs/core/src/client.rs index 9dcb7289..4eccd7fa 100644 --- a/codex-rs/core/src/client.rs +++ b/codex-rs/core/src/client.rs @@ -23,6 +23,7 @@ use crate::client_common::ResponseEvent; use crate::client_common::ResponseStream; use crate::client_common::ResponsesApiRequest; use crate::client_common::create_reasoning_param_for_request; +use crate::config::Config; use crate::config_types::ReasoningEffort as ReasoningEffortConfig; use crate::config_types::ReasoningSummary as ReasoningSummaryConfig; use crate::error::CodexErr; @@ -36,9 +37,11 @@ use crate::models::ResponseItem; use crate::openai_tools::create_tools_json_for_responses_api; use crate::protocol::TokenUsage; use crate::util::backoff; +use std::sync::Arc; #[derive(Clone)] pub struct ModelClient { + config: Arc, model: String, client: reqwest::Client, provider: ModelProviderInfo, @@ -48,12 +51,14 @@ pub struct ModelClient { impl ModelClient { pub fn new( - model: impl ToString, + config: Arc, provider: ModelProviderInfo, effort: ReasoningEffortConfig, summary: ReasoningSummaryConfig, ) -> Self { + let model = config.model.clone(); Self { + config, model: model.to_string(), client: reqwest::Client::new(), provider, @@ -108,7 +113,7 @@ impl ModelClient { let full_instructions = prompt.get_full_instructions(&self.model); let tools_json = create_tools_json_for_responses_api(prompt, &self.model)?; - let reasoning = create_reasoning_param_for_request(&self.model, self.effort, self.summary); + let reasoning = create_reasoning_param_for_request(&self.config, self.effort, self.summary); let payload = ResponsesApiRequest { model: &self.model, instructions: &full_instructions, diff --git a/codex-rs/core/src/client_common.rs b/codex-rs/core/src/client_common.rs index 97d74baf..f9a816a7 100644 --- a/codex-rs/core/src/client_common.rs +++ b/codex-rs/core/src/client_common.rs @@ -131,15 +131,16 @@ pub(crate) struct ResponsesApiRequest<'a> { pub(crate) stream: bool, } +use crate::config::Config; + pub(crate) fn create_reasoning_param_for_request( - model: &str, + config: &Config, effort: ReasoningEffortConfig, summary: ReasoningSummaryConfig, ) -> Option { - let effort: Option = effort.into(); - let effort = effort?; - - if model_supports_reasoning_summaries(model) { + if model_supports_reasoning_summaries(config) { + let effort: Option = effort.into(); + let effort = effort?; Some(Reasoning { effort, summary: summary.into(), @@ -149,19 +150,24 @@ pub(crate) fn create_reasoning_param_for_request( } } -pub fn model_supports_reasoning_summaries(model: &str) -> bool { - // Currently, we hardcode this rule to decide whether enable reasoning. +pub fn model_supports_reasoning_summaries(config: &Config) -> bool { + // Currently, we hardcode this rule to decide whether to enable reasoning. // We expect reasoning to apply only to OpenAI models, but we do not want // users to have to mess with their config to disable reasoning for models // that do not support it, such as `gpt-4.1`. // // Though if a user is using Codex with non-OpenAI models that, say, happen - // to start with "o", then they can set `model_reasoning_effort = "none` in + // to start with "o", then they can set `model_reasoning_effort = "none"` in // config.toml to disable reasoning. // - // Ultimately, this should also be configurable in config.toml, but we - // need to have defaults that "just work." Perhaps we could have a - // "reasoning models pattern" as part of ModelProviderInfo? + // Converseley, if a user has a non-OpenAI provider that supports reasoning, + // they can set the top-level `model_supports_reasoning_summaries = true` + // config option to enable reasoning. + if config.model_supports_reasoning_summaries { + return true; + } + + let model = &config.model; model.starts_with("o") || model.starts_with("codex") } diff --git a/codex-rs/core/src/codex.rs b/codex-rs/core/src/codex.rs index 708db889..52c37c51 100644 --- a/codex-rs/core/src/codex.rs +++ b/codex-rs/core/src/codex.rs @@ -586,7 +586,7 @@ async fn submission_loop( } let client = ModelClient::new( - model.clone(), + config.clone(), provider.clone(), model_reasoning_effort, model_reasoning_summary, diff --git a/codex-rs/core/src/config.rs b/codex-rs/core/src/config.rs index db4f9ffe..d2f21922 100644 --- a/codex-rs/core/src/config.rs +++ b/codex-rs/core/src/config.rs @@ -130,6 +130,10 @@ pub struct Config { /// If not "none", the value to use for `reasoning.summary` when making a /// request using the Responses API. pub model_reasoning_summary: ReasoningSummary, + + /// When set to `true`, overrides the default heuristic and forces + /// `model_supports_reasoning_summaries()` to return `true`. + pub model_supports_reasoning_summaries: bool, } impl Config { @@ -308,6 +312,9 @@ pub struct ConfigToml { pub model_reasoning_effort: Option, pub model_reasoning_summary: Option, + + /// Override to force-enable reasoning summaries for the configured model. + pub model_supports_reasoning_summaries: Option, } impl ConfigToml { @@ -472,6 +479,10 @@ impl Config { .model_reasoning_summary .or(cfg.model_reasoning_summary) .unwrap_or_default(), + + model_supports_reasoning_summaries: cfg + .model_supports_reasoning_summaries + .unwrap_or(false), }; Ok(config) } @@ -776,6 +787,7 @@ disable_response_storage = true hide_agent_reasoning: false, model_reasoning_effort: ReasoningEffort::High, model_reasoning_summary: ReasoningSummary::Detailed, + model_supports_reasoning_summaries: false, }, o3_profile_config ); @@ -820,6 +832,7 @@ disable_response_storage = true hide_agent_reasoning: false, model_reasoning_effort: ReasoningEffort::default(), model_reasoning_summary: ReasoningSummary::default(), + model_supports_reasoning_summaries: false, }; assert_eq!(expected_gpt3_profile_config, gpt3_profile_config); @@ -879,6 +892,7 @@ disable_response_storage = true hide_agent_reasoning: false, model_reasoning_effort: ReasoningEffort::default(), model_reasoning_summary: ReasoningSummary::default(), + model_supports_reasoning_summaries: false, }; assert_eq!(expected_zdr_profile_config, zdr_profile_config); diff --git a/codex-rs/exec/src/event_processor.rs b/codex-rs/exec/src/event_processor.rs index 4c7120cd..540e0142 100644 --- a/codex-rs/exec/src/event_processor.rs +++ b/codex-rs/exec/src/event_processor.rs @@ -139,7 +139,7 @@ impl EventProcessor { ("sandbox", summarize_sandbox_policy(&config.sandbox_policy)), ]; if config.model_provider.wire_api == WireApi::Responses - && model_supports_reasoning_summaries(&config.model) + && model_supports_reasoning_summaries(config) { entries.push(( "reasoning effort", diff --git a/codex-rs/tui/src/history_cell.rs b/codex-rs/tui/src/history_cell.rs index 18740f11..0bfbc414 100644 --- a/codex-rs/tui/src/history_cell.rs +++ b/codex-rs/tui/src/history_cell.rs @@ -159,7 +159,7 @@ impl HistoryCell { ("sandbox", summarize_sandbox_policy(&config.sandbox_policy)), ]; if config.model_provider.wire_api == WireApi::Responses - && model_supports_reasoning_summaries(&config.model) + && model_supports_reasoning_summaries(config) { entries.push(( "reasoning effort",