feat: add new config option: model_supports_reasoning_summaries (#1524)

As noted in the updated docs, this makes it so that you can set:

```toml
model_supports_reasoning_summaries = true
```

as a way of overriding the existing heuristic for when to set the
`reasoning` field on a sampling request:


341c091c5b/codex-rs/core/src/client_common.rs (L152-L166)
This commit is contained in:
Michael Bolin
2025-07-10 14:30:33 -07:00
committed by GitHub
parent 341c091c5b
commit 8a424fcfa3
7 changed files with 49 additions and 16 deletions

View File

@@ -206,6 +206,14 @@ To disable reasoning summaries, set `model_reasoning_summary` to `"none"` in you
model_reasoning_summary = "none" # disable reasoning summaries
```
## model_supports_reasoning_summaries
By default, `reasoning` is only set on requests to OpenAI models that are known to support them. To force `reasoning` to set on requests to the current model, you can force this behavior by setting the following in `config.toml`:
```toml
model_supports_reasoning_summaries = true
```
## sandbox_mode
Codex executes model-generated shell commands inside an OS-level sandbox.

View File

@@ -23,6 +23,7 @@ use crate::client_common::ResponseEvent;
use crate::client_common::ResponseStream;
use crate::client_common::ResponsesApiRequest;
use crate::client_common::create_reasoning_param_for_request;
use crate::config::Config;
use crate::config_types::ReasoningEffort as ReasoningEffortConfig;
use crate::config_types::ReasoningSummary as ReasoningSummaryConfig;
use crate::error::CodexErr;
@@ -36,9 +37,11 @@ use crate::models::ResponseItem;
use crate::openai_tools::create_tools_json_for_responses_api;
use crate::protocol::TokenUsage;
use crate::util::backoff;
use std::sync::Arc;
#[derive(Clone)]
pub struct ModelClient {
config: Arc<Config>,
model: String,
client: reqwest::Client,
provider: ModelProviderInfo,
@@ -48,12 +51,14 @@ pub struct ModelClient {
impl ModelClient {
pub fn new(
model: impl ToString,
config: Arc<Config>,
provider: ModelProviderInfo,
effort: ReasoningEffortConfig,
summary: ReasoningSummaryConfig,
) -> Self {
let model = config.model.clone();
Self {
config,
model: model.to_string(),
client: reqwest::Client::new(),
provider,
@@ -108,7 +113,7 @@ impl ModelClient {
let full_instructions = prompt.get_full_instructions(&self.model);
let tools_json = create_tools_json_for_responses_api(prompt, &self.model)?;
let reasoning = create_reasoning_param_for_request(&self.model, self.effort, self.summary);
let reasoning = create_reasoning_param_for_request(&self.config, self.effort, self.summary);
let payload = ResponsesApiRequest {
model: &self.model,
instructions: &full_instructions,

View File

@@ -131,15 +131,16 @@ pub(crate) struct ResponsesApiRequest<'a> {
pub(crate) stream: bool,
}
use crate::config::Config;
pub(crate) fn create_reasoning_param_for_request(
model: &str,
config: &Config,
effort: ReasoningEffortConfig,
summary: ReasoningSummaryConfig,
) -> Option<Reasoning> {
let effort: Option<OpenAiReasoningEffort> = effort.into();
let effort = effort?;
if model_supports_reasoning_summaries(model) {
if model_supports_reasoning_summaries(config) {
let effort: Option<OpenAiReasoningEffort> = effort.into();
let effort = effort?;
Some(Reasoning {
effort,
summary: summary.into(),
@@ -149,19 +150,24 @@ pub(crate) fn create_reasoning_param_for_request(
}
}
pub fn model_supports_reasoning_summaries(model: &str) -> bool {
// Currently, we hardcode this rule to decide whether enable reasoning.
pub fn model_supports_reasoning_summaries(config: &Config) -> bool {
// Currently, we hardcode this rule to decide whether to enable reasoning.
// We expect reasoning to apply only to OpenAI models, but we do not want
// users to have to mess with their config to disable reasoning for models
// that do not support it, such as `gpt-4.1`.
//
// Though if a user is using Codex with non-OpenAI models that, say, happen
// to start with "o", then they can set `model_reasoning_effort = "none` in
// to start with "o", then they can set `model_reasoning_effort = "none"` in
// config.toml to disable reasoning.
//
// Ultimately, this should also be configurable in config.toml, but we
// need to have defaults that "just work." Perhaps we could have a
// "reasoning models pattern" as part of ModelProviderInfo?
// Converseley, if a user has a non-OpenAI provider that supports reasoning,
// they can set the top-level `model_supports_reasoning_summaries = true`
// config option to enable reasoning.
if config.model_supports_reasoning_summaries {
return true;
}
let model = &config.model;
model.starts_with("o") || model.starts_with("codex")
}

View File

@@ -586,7 +586,7 @@ async fn submission_loop(
}
let client = ModelClient::new(
model.clone(),
config.clone(),
provider.clone(),
model_reasoning_effort,
model_reasoning_summary,

View File

@@ -130,6 +130,10 @@ pub struct Config {
/// If not "none", the value to use for `reasoning.summary` when making a
/// request using the Responses API.
pub model_reasoning_summary: ReasoningSummary,
/// When set to `true`, overrides the default heuristic and forces
/// `model_supports_reasoning_summaries()` to return `true`.
pub model_supports_reasoning_summaries: bool,
}
impl Config {
@@ -308,6 +312,9 @@ pub struct ConfigToml {
pub model_reasoning_effort: Option<ReasoningEffort>,
pub model_reasoning_summary: Option<ReasoningSummary>,
/// Override to force-enable reasoning summaries for the configured model.
pub model_supports_reasoning_summaries: Option<bool>,
}
impl ConfigToml {
@@ -472,6 +479,10 @@ impl Config {
.model_reasoning_summary
.or(cfg.model_reasoning_summary)
.unwrap_or_default(),
model_supports_reasoning_summaries: cfg
.model_supports_reasoning_summaries
.unwrap_or(false),
};
Ok(config)
}
@@ -776,6 +787,7 @@ disable_response_storage = true
hide_agent_reasoning: false,
model_reasoning_effort: ReasoningEffort::High,
model_reasoning_summary: ReasoningSummary::Detailed,
model_supports_reasoning_summaries: false,
},
o3_profile_config
);
@@ -820,6 +832,7 @@ disable_response_storage = true
hide_agent_reasoning: false,
model_reasoning_effort: ReasoningEffort::default(),
model_reasoning_summary: ReasoningSummary::default(),
model_supports_reasoning_summaries: false,
};
assert_eq!(expected_gpt3_profile_config, gpt3_profile_config);
@@ -879,6 +892,7 @@ disable_response_storage = true
hide_agent_reasoning: false,
model_reasoning_effort: ReasoningEffort::default(),
model_reasoning_summary: ReasoningSummary::default(),
model_supports_reasoning_summaries: false,
};
assert_eq!(expected_zdr_profile_config, zdr_profile_config);

View File

@@ -139,7 +139,7 @@ impl EventProcessor {
("sandbox", summarize_sandbox_policy(&config.sandbox_policy)),
];
if config.model_provider.wire_api == WireApi::Responses
&& model_supports_reasoning_summaries(&config.model)
&& model_supports_reasoning_summaries(config)
{
entries.push((
"reasoning effort",

View File

@@ -159,7 +159,7 @@ impl HistoryCell {
("sandbox", summarize_sandbox_policy(&config.sandbox_policy)),
];
if config.model_provider.wire_api == WireApi::Responses
&& model_supports_reasoning_summaries(&config.model)
&& model_supports_reasoning_summaries(config)
{
entries.push((
"reasoning effort",