feat: reasoning effort as optional (#3527)

Allow the reasoning effort to be optional
This commit is contained in:
jif-oai
2025-09-12 12:06:33 -07:00
committed by GitHub
parent abdcb40f4c
commit c6fd056aa6
17 changed files with 95 additions and 61 deletions

View File

@@ -17,7 +17,10 @@ pub fn create_config_summary_entries(config: &Config) -> Vec<(&'static str, Stri
{ {
entries.push(( entries.push((
"reasoning effort", "reasoning effort",
config.model_reasoning_effort.to_string(), config
.model_reasoning_effort
.map(|effort| effort.to_string())
.unwrap_or_else(|| "none".to_string()),
)); ));
entries.push(( entries.push((
"reasoning summaries", "reasoning summaries",

View File

@@ -12,7 +12,7 @@ pub struct ModelPreset {
/// Model slug (e.g., "gpt-5"). /// Model slug (e.g., "gpt-5").
pub model: &'static str, pub model: &'static str,
/// Reasoning effort to apply for this preset. /// Reasoning effort to apply for this preset.
pub effort: ReasoningEffort, pub effort: Option<ReasoningEffort>,
} }
/// Built-in list of model presets that pair a model with a reasoning effort. /// Built-in list of model presets that pair a model with a reasoning effort.
@@ -26,35 +26,35 @@ pub fn builtin_model_presets() -> &'static [ModelPreset] {
label: "gpt-5 minimal", label: "gpt-5 minimal",
description: "— fastest responses with limited reasoning; ideal for coding, instructions, or lightweight tasks", description: "— fastest responses with limited reasoning; ideal for coding, instructions, or lightweight tasks",
model: "gpt-5", model: "gpt-5",
effort: ReasoningEffort::Minimal, effort: Some(ReasoningEffort::Minimal),
}, },
ModelPreset { ModelPreset {
id: "gpt-5-low", id: "gpt-5-low",
label: "gpt-5 low", label: "gpt-5 low",
description: "— balances speed with some reasoning; useful for straightforward queries and short explanations", description: "— balances speed with some reasoning; useful for straightforward queries and short explanations",
model: "gpt-5", model: "gpt-5",
effort: ReasoningEffort::Low, effort: Some(ReasoningEffort::Low),
}, },
ModelPreset { ModelPreset {
id: "gpt-5-medium", id: "gpt-5-medium",
label: "gpt-5 medium", label: "gpt-5 medium",
description: "— default setting; provides a solid balance of reasoning depth and latency for general-purpose tasks", description: "— default setting; provides a solid balance of reasoning depth and latency for general-purpose tasks",
model: "gpt-5", model: "gpt-5",
effort: ReasoningEffort::Medium, effort: Some(ReasoningEffort::Medium),
}, },
ModelPreset { ModelPreset {
id: "gpt-5-high", id: "gpt-5-high",
label: "gpt-5 high", label: "gpt-5 high",
description: "— maximizes reasoning depth for complex or ambiguous problems", description: "— maximizes reasoning depth for complex or ambiguous problems",
model: "gpt-5", model: "gpt-5",
effort: ReasoningEffort::High, effort: Some(ReasoningEffort::High),
}, },
ModelPreset { ModelPreset {
id: "gpt-5-high-new", id: "gpt-5-high-new",
label: "gpt-5 high new", label: "gpt-5 high new",
description: "— our latest release tuned to rely on the model's built-in reasoning defaults", description: "— our latest release tuned to rely on the model's built-in reasoning defaults",
model: "gpt-5-high-new", model: "gpt-5-high-new",
effort: ReasoningEffort::Medium, effort: None,
}, },
]; ];
PRESETS PRESETS

View File

@@ -72,7 +72,7 @@ pub struct ModelClient {
client: reqwest::Client, client: reqwest::Client,
provider: ModelProviderInfo, provider: ModelProviderInfo,
conversation_id: ConversationId, conversation_id: ConversationId,
effort: ReasoningEffortConfig, effort: Option<ReasoningEffortConfig>,
summary: ReasoningSummaryConfig, summary: ReasoningSummaryConfig,
} }
@@ -81,7 +81,7 @@ impl ModelClient {
config: Arc<Config>, config: Arc<Config>,
auth_manager: Option<Arc<AuthManager>>, auth_manager: Option<Arc<AuthManager>>,
provider: ModelProviderInfo, provider: ModelProviderInfo,
effort: ReasoningEffortConfig, effort: Option<ReasoningEffortConfig>,
summary: ReasoningSummaryConfig, summary: ReasoningSummaryConfig,
conversation_id: ConversationId, conversation_id: ConversationId,
) -> Self { ) -> Self {
@@ -356,7 +356,7 @@ impl ModelClient {
} }
/// Returns the current reasoning effort setting. /// Returns the current reasoning effort setting.
pub fn get_reasoning_effort(&self) -> ReasoningEffortConfig { pub fn get_reasoning_effort(&self) -> Option<ReasoningEffortConfig> {
self.effort self.effort
} }

View File

@@ -136,14 +136,14 @@ pub(crate) struct ResponsesApiRequest<'a> {
pub(crate) fn create_reasoning_param_for_request( pub(crate) fn create_reasoning_param_for_request(
model_family: &ModelFamily, model_family: &ModelFamily,
effort: ReasoningEffortConfig, effort: Option<ReasoningEffortConfig>,
summary: ReasoningSummaryConfig, summary: ReasoningSummaryConfig,
) -> Option<Reasoning> { ) -> Option<Reasoning> {
if model_family.supports_reasoning_summaries { if !model_family.supports_reasoning_summaries {
Some(Reasoning { effort, summary }) return None;
} else {
None
} }
effort.map(|effort| Reasoning { effort, summary })
} }
pub(crate) fn create_text_param_for_request( pub(crate) fn create_text_param_for_request(

View File

@@ -323,7 +323,7 @@ struct ConfigureSession {
/// If not specified, server will use its default model. /// If not specified, server will use its default model.
model: String, model: String,
model_reasoning_effort: ReasoningEffortConfig, model_reasoning_effort: Option<ReasoningEffortConfig>,
model_reasoning_summary: ReasoningSummaryConfig, model_reasoning_summary: ReasoningSummaryConfig,
/// Model instructions that are appended to the base instructions. /// Model instructions that are appended to the base instructions.

View File

@@ -140,7 +140,7 @@ pub struct Config {
/// Value to use for `reasoning.effort` when making a request using the /// Value to use for `reasoning.effort` when making a request using the
/// Responses API. /// Responses API.
pub model_reasoning_effort: ReasoningEffort, pub model_reasoning_effort: Option<ReasoningEffort>,
/// If not "none", the value to use for `reasoning.summary` when making a /// If not "none", the value to use for `reasoning.summary` when making a
/// request using the Responses API. /// request using the Responses API.
@@ -423,14 +423,24 @@ pub async fn persist_model_selection(
if let Some(profile_name) = active_profile { if let Some(profile_name) = active_profile {
let profile_table = ensure_profile_table(&mut doc, profile_name)?; let profile_table = ensure_profile_table(&mut doc, profile_name)?;
profile_table["model"] = toml_edit::value(model); profile_table["model"] = toml_edit::value(model);
if let Some(effort) = effort { match effort {
profile_table["model_reasoning_effort"] = toml_edit::value(effort.to_string()); Some(effort) => {
profile_table["model_reasoning_effort"] = toml_edit::value(effort.to_string());
}
None => {
profile_table.remove("model_reasoning_effort");
}
} }
} else { } else {
let table = doc.as_table_mut(); let table = doc.as_table_mut();
table["model"] = toml_edit::value(model); table["model"] = toml_edit::value(model);
if let Some(effort) = effort { match effort {
table["model_reasoning_effort"] = toml_edit::value(effort.to_string()); Some(effort) => {
table["model_reasoning_effort"] = toml_edit::value(effort.to_string());
}
None => {
table.remove("model_reasoning_effort");
}
} }
} }
@@ -913,8 +923,7 @@ impl Config {
.unwrap_or(false), .unwrap_or(false),
model_reasoning_effort: config_profile model_reasoning_effort: config_profile
.model_reasoning_effort .model_reasoning_effort
.or(cfg.model_reasoning_effort) .or(cfg.model_reasoning_effort),
.unwrap_or_default(),
model_reasoning_summary: config_profile model_reasoning_summary: config_profile
.model_reasoning_summary .model_reasoning_summary
.or(cfg.model_reasoning_summary) .or(cfg.model_reasoning_summary)
@@ -1438,7 +1447,7 @@ model_verbosity = "high"
codex_linux_sandbox_exe: None, codex_linux_sandbox_exe: None,
hide_agent_reasoning: false, hide_agent_reasoning: false,
show_raw_agent_reasoning: false, show_raw_agent_reasoning: false,
model_reasoning_effort: ReasoningEffort::High, model_reasoning_effort: Some(ReasoningEffort::High),
model_reasoning_summary: ReasoningSummary::Detailed, model_reasoning_summary: ReasoningSummary::Detailed,
model_verbosity: None, model_verbosity: None,
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(), chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
@@ -1494,7 +1503,7 @@ model_verbosity = "high"
codex_linux_sandbox_exe: None, codex_linux_sandbox_exe: None,
hide_agent_reasoning: false, hide_agent_reasoning: false,
show_raw_agent_reasoning: false, show_raw_agent_reasoning: false,
model_reasoning_effort: ReasoningEffort::default(), model_reasoning_effort: None,
model_reasoning_summary: ReasoningSummary::default(), model_reasoning_summary: ReasoningSummary::default(),
model_verbosity: None, model_verbosity: None,
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(), chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
@@ -1565,7 +1574,7 @@ model_verbosity = "high"
codex_linux_sandbox_exe: None, codex_linux_sandbox_exe: None,
hide_agent_reasoning: false, hide_agent_reasoning: false,
show_raw_agent_reasoning: false, show_raw_agent_reasoning: false,
model_reasoning_effort: ReasoningEffort::default(), model_reasoning_effort: None,
model_reasoning_summary: ReasoningSummary::default(), model_reasoning_summary: ReasoningSummary::default(),
model_verbosity: None, model_verbosity: None,
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(), chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
@@ -1622,7 +1631,7 @@ model_verbosity = "high"
codex_linux_sandbox_exe: None, codex_linux_sandbox_exe: None,
hide_agent_reasoning: false, hide_agent_reasoning: false,
show_raw_agent_reasoning: false, show_raw_agent_reasoning: false,
model_reasoning_effort: ReasoningEffort::High, model_reasoning_effort: Some(ReasoningEffort::High),
model_reasoning_summary: ReasoningSummary::Detailed, model_reasoning_summary: ReasoningSummary::Detailed,
model_verbosity: Some(Verbosity::High), model_verbosity: Some(Verbosity::High),
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(), chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),

View File

@@ -441,6 +441,7 @@ async fn chatgpt_auth_sends_correct_request() {
// Init session // Init session
let codex_home = TempDir::new().unwrap(); let codex_home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&codex_home); let mut config = load_default_config_for_test(&codex_home);
let include_reasoning = config.model_reasoning_effort.is_some();
config.model_provider = model_provider; config.model_provider = model_provider;
let conversation_manager = ConversationManager::with_auth(create_dummy_codex_auth()); let conversation_manager = ConversationManager::with_auth(create_dummy_codex_auth());
let NewConversation { let NewConversation {
@@ -482,10 +483,18 @@ async fn chatgpt_auth_sends_correct_request() {
); );
assert_eq!(request_chatgpt_account_id.to_str().unwrap(), "account_id"); assert_eq!(request_chatgpt_account_id.to_str().unwrap(), "account_id");
assert!(request_body["stream"].as_bool().unwrap()); assert!(request_body["stream"].as_bool().unwrap());
assert_eq!( if include_reasoning {
request_body["include"][0].as_str().unwrap(), assert_eq!(
"reasoning.encrypted_content" request_body["include"][0].as_str().unwrap(),
); "reasoning.encrypted_content"
);
} else {
assert!(
request_body["include"]
.as_array()
.is_none_or(|items| items.is_empty())
);
}
} }
#[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[tokio::test(flavor = "multi_thread", worker_threads = 2)]

View File

@@ -36,7 +36,7 @@ async fn override_turn_context_does_not_persist_when_config_exists() {
approval_policy: None, approval_policy: None,
sandbox_policy: None, sandbox_policy: None,
model: Some("o3".to_string()), model: Some("o3".to_string()),
effort: Some(ReasoningEffort::High), effort: Some(Some(ReasoningEffort::High)),
summary: None, summary: None,
}) })
.await .await
@@ -76,7 +76,7 @@ async fn override_turn_context_does_not_create_config_file() {
approval_policy: None, approval_policy: None,
sandbox_policy: None, sandbox_policy: None,
model: Some("o3".to_string()), model: Some("o3".to_string()),
effort: Some(ReasoningEffort::Medium), effort: Some(Some(ReasoningEffort::Medium)),
summary: None, summary: None,
}) })
.await .await

View File

@@ -387,7 +387,7 @@ async fn overrides_turn_context_but_keeps_cached_prefix_and_key_constant() {
exclude_slash_tmp: true, exclude_slash_tmp: true,
}), }),
model: Some("o3".to_string()), model: Some("o3".to_string()),
effort: Some(ReasoningEffort::High), effort: Some(Some(ReasoningEffort::High)),
summary: Some(ReasoningSummary::Detailed), summary: Some(ReasoningSummary::Detailed),
}) })
.await .await
@@ -519,7 +519,7 @@ async fn per_turn_overrides_keep_cached_prefix_and_key_constant() {
exclude_slash_tmp: true, exclude_slash_tmp: true,
}, },
model: "o3".to_string(), model: "o3".to_string(),
effort: ReasoningEffort::High, effort: Some(ReasoningEffort::High),
summary: ReasoningSummary::Detailed, summary: ReasoningSummary::Detailed,
}) })
.await .await

View File

@@ -280,7 +280,7 @@ mod tests {
msg: EventMsg::SessionConfigured(SessionConfiguredEvent { msg: EventMsg::SessionConfigured(SessionConfiguredEvent {
session_id: conversation_id, session_id: conversation_id,
model: "gpt-4o".to_string(), model: "gpt-4o".to_string(),
reasoning_effort: ReasoningEffort::default(), reasoning_effort: Some(ReasoningEffort::default()),
history_log_id: 1, history_log_id: 1,
history_entry_count: 1000, history_entry_count: 1000,
initial_messages: None, initial_messages: None,
@@ -314,7 +314,7 @@ mod tests {
let session_configured_event = SessionConfiguredEvent { let session_configured_event = SessionConfiguredEvent {
session_id: conversation_id, session_id: conversation_id,
model: "gpt-4o".to_string(), model: "gpt-4o".to_string(),
reasoning_effort: ReasoningEffort::default(), reasoning_effort: Some(ReasoningEffort::default()),
history_log_id: 1, history_log_id: 1,
history_entry_count: 1000, history_entry_count: 1000,
initial_messages: None, initial_messages: None,

View File

@@ -320,7 +320,7 @@ async fn test_send_user_turn_changes_approval_policy_behavior() {
approval_policy: AskForApproval::Never, approval_policy: AskForApproval::Never,
sandbox_policy: SandboxPolicy::new_read_only_policy(), sandbox_policy: SandboxPolicy::new_read_only_policy(),
model: "mock-model".to_string(), model: "mock-model".to_string(),
effort: ReasoningEffort::Medium, effort: Some(ReasoningEffort::Medium),
summary: ReasoningSummary::Auto, summary: ReasoningSummary::Auto,
}) })
.await .await

View File

@@ -223,7 +223,8 @@ pub struct NewConversationResponse {
pub conversation_id: ConversationId, pub conversation_id: ConversationId,
pub model: String, pub model: String,
/// Note this could be ignored by the model. /// Note this could be ignored by the model.
pub reasoning_effort: ReasoningEffort, #[serde(skip_serializing_if = "Option::is_none")]
pub reasoning_effort: Option<ReasoningEffort>,
pub rollout_path: PathBuf, pub rollout_path: PathBuf,
} }
@@ -526,7 +527,8 @@ pub struct SendUserTurnParams {
pub approval_policy: AskForApproval, pub approval_policy: AskForApproval,
pub sandbox_policy: SandboxPolicy, pub sandbox_policy: SandboxPolicy,
pub model: String, pub model: String,
pub effort: ReasoningEffort, #[serde(skip_serializing_if = "Option::is_none")]
pub effort: Option<ReasoningEffort>,
pub summary: ReasoningSummary, pub summary: ReasoningSummary,
} }

View File

@@ -81,7 +81,8 @@ pub enum Op {
model: String, model: String,
/// Will only be honored if the model is configured to use reasoning. /// Will only be honored if the model is configured to use reasoning.
effort: ReasoningEffortConfig, #[serde(skip_serializing_if = "Option::is_none")]
effort: Option<ReasoningEffortConfig>,
/// Will only be honored if the model is configured to use reasoning. /// Will only be honored if the model is configured to use reasoning.
summary: ReasoningSummaryConfig, summary: ReasoningSummaryConfig,
@@ -111,8 +112,11 @@ pub enum Op {
model: Option<String>, model: Option<String>,
/// Updated reasoning effort (honored only for reasoning-capable models). /// Updated reasoning effort (honored only for reasoning-capable models).
///
/// Use `Some(Some(_))` to set a specific effort, `Some(None)` to clear
/// the effort, or `None` to leave the existing value unchanged.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
effort: Option<ReasoningEffortConfig>, effort: Option<Option<ReasoningEffortConfig>>,
/// Updated reasoning summary preference (honored only for reasoning-capable models). /// Updated reasoning summary preference (honored only for reasoning-capable models).
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
@@ -913,7 +917,8 @@ pub struct TurnContextItem {
pub approval_policy: AskForApproval, pub approval_policy: AskForApproval,
pub sandbox_policy: SandboxPolicy, pub sandbox_policy: SandboxPolicy,
pub model: String, pub model: String,
pub effort: ReasoningEffortConfig, #[serde(skip_serializing_if = "Option::is_none")]
pub effort: Option<ReasoningEffortConfig>,
pub summary: ReasoningSummaryConfig, pub summary: ReasoningSummaryConfig,
} }
@@ -1082,7 +1087,8 @@ pub struct SessionConfiguredEvent {
pub model: String, pub model: String,
/// The effort the model is putting into reasoning about the user's request. /// The effort the model is putting into reasoning about the user's request.
pub reasoning_effort: ReasoningEffortConfig, #[serde(skip_serializing_if = "Option::is_none")]
pub reasoning_effort: Option<ReasoningEffortConfig>,
/// Identifier of the history log file (inode on Unix, 0 otherwise). /// Identifier of the history log file (inode on Unix, 0 otherwise).
pub history_log_id: u64, pub history_log_id: u64,
@@ -1172,7 +1178,7 @@ mod tests {
msg: EventMsg::SessionConfigured(SessionConfiguredEvent { msg: EventMsg::SessionConfigured(SessionConfiguredEvent {
session_id: conversation_id, session_id: conversation_id,
model: "codex-mini-latest".to_string(), model: "codex-mini-latest".to_string(),
reasoning_effort: ReasoningEffortConfig::default(), reasoning_effort: Some(ReasoningEffortConfig::default()),
history_log_id: 0, history_log_id: 0,
history_entry_count: 0, history_entry_count: 0,
initial_messages: None, initial_messages: None,

View File

@@ -337,7 +337,7 @@ impl App {
} }
} }
fn on_update_reasoning_effort(&mut self, effort: ReasoningEffortConfig) { fn on_update_reasoning_effort(&mut self, effort: Option<ReasoningEffortConfig>) {
let changed = self.config.model_reasoning_effort != effort; let changed = self.config.model_reasoning_effort != effort;
self.chat_widget.set_reasoning_effort(effort); self.chat_widget.set_reasoning_effort(effort);
self.config.model_reasoning_effort = effort; self.config.model_reasoning_effort = effort;
@@ -372,17 +372,18 @@ impl App {
let model = self.config.model.clone(); let model = self.config.model.clone();
let effort = self.config.model_reasoning_effort; let effort = self.config.model_reasoning_effort;
let effort_label = effort
.map(|effort| effort.to_string())
.unwrap_or_else(|| "none".to_string());
let codex_home = self.config.codex_home.clone(); let codex_home = self.config.codex_home.clone();
match scope { match scope {
SaveScope::Profile(profile) => { SaveScope::Profile(profile) => {
match persist_model_selection(&codex_home, Some(profile), &model, Some(effort)) match persist_model_selection(&codex_home, Some(profile), &model, effort).await {
.await
{
Ok(()) => { Ok(()) => {
self.model_saved_to_profile = true; self.model_saved_to_profile = true;
self.chat_widget.add_info_message(format!( self.chat_widget.add_info_message(format!(
"Saved model {model} ({effort}) for profile `{profile}`. Press Ctrl+S again to make this your global default." "Saved model {model} ({effort_label}) for profile `{profile}`. Press Ctrl+S again to make this your global default."
)); ));
} }
Err(err) => { Err(err) => {
@@ -397,11 +398,11 @@ impl App {
} }
} }
SaveScope::Global => { SaveScope::Global => {
match persist_model_selection(&codex_home, None, &model, Some(effort)).await { match persist_model_selection(&codex_home, None, &model, effort).await {
Ok(()) => { Ok(()) => {
self.model_saved_to_global = true; self.model_saved_to_global = true;
self.chat_widget.add_info_message(format!( self.chat_widget.add_info_message(format!(
"Saved model {model} ({effort}) as your global default." "Saved model {model} ({effort_label}) as your global default."
)); ));
} }
Err(err) => { Err(err) => {
@@ -537,19 +538,19 @@ mod tests {
let mut app = make_test_app(); let mut app = make_test_app();
app.model_saved_to_profile = true; app.model_saved_to_profile = true;
app.model_saved_to_global = true; app.model_saved_to_global = true;
app.config.model_reasoning_effort = ReasoningEffortConfig::Medium; app.config.model_reasoning_effort = Some(ReasoningEffortConfig::Medium);
app.chat_widget app.chat_widget
.set_reasoning_effort(ReasoningEffortConfig::Medium); .set_reasoning_effort(Some(ReasoningEffortConfig::Medium));
app.on_update_reasoning_effort(ReasoningEffortConfig::High); app.on_update_reasoning_effort(Some(ReasoningEffortConfig::High));
assert_eq!( assert_eq!(
app.config.model_reasoning_effort, app.config.model_reasoning_effort,
ReasoningEffortConfig::High Some(ReasoningEffortConfig::High)
); );
assert_eq!( assert_eq!(
app.chat_widget.config_ref().model_reasoning_effort, app.chat_widget.config_ref().model_reasoning_effort,
ReasoningEffortConfig::High Some(ReasoningEffortConfig::High)
); );
assert!(!app.model_saved_to_profile); assert!(!app.model_saved_to_profile);
assert!(!app.model_saved_to_global); assert!(!app.model_saved_to_global);

View File

@@ -46,7 +46,7 @@ pub(crate) enum AppEvent {
CommitTick, CommitTick,
/// Update the current reasoning effort in the running app and widget. /// Update the current reasoning effort in the running app and widget.
UpdateReasoningEffort(ReasoningEffort), UpdateReasoningEffort(Option<ReasoningEffort>),
/// Update the current model slug in the running app and widget. /// Update the current model slug in the running app and widget.
UpdateModel(String), UpdateModel(String),

View File

@@ -1191,9 +1191,13 @@ impl ChatWidget {
tracing::info!( tracing::info!(
"New model: {}, New effort: {}, Current model: {}, Current effort: {}", "New model: {}, New effort: {}, Current model: {}, Current effort: {}",
model_slug.clone(), model_slug.clone(),
effort, effort
.map(|effort| effort.to_string())
.unwrap_or_else(|| "none".to_string()),
current_model, current_model,
current_effort current_effort
.map(|effort| effort.to_string())
.unwrap_or_else(|| "none".to_string())
); );
})]; })];
items.push(SelectionItem { items.push(SelectionItem {
@@ -1264,7 +1268,7 @@ impl ChatWidget {
} }
/// Set the reasoning effort in the widget's config copy. /// Set the reasoning effort in the widget's config copy.
pub(crate) fn set_reasoning_effort(&mut self, effort: ReasoningEffortConfig) { pub(crate) fn set_reasoning_effort(&mut self, effort: Option<ReasoningEffortConfig>) {
self.config.model_reasoning_effort = effort; self.config.model_reasoning_effort = effort;
} }

View File

@@ -138,7 +138,7 @@ fn resumed_initial_messages_render_history() {
let configured = codex_core::protocol::SessionConfiguredEvent { let configured = codex_core::protocol::SessionConfiguredEvent {
session_id: conversation_id, session_id: conversation_id,
model: "test-model".to_string(), model: "test-model".to_string(),
reasoning_effort: ReasoningEffortConfig::default(), reasoning_effort: Some(ReasoningEffortConfig::default()),
history_log_id: 0, history_log_id: 0,
history_entry_count: 0, history_entry_count: 0,
initial_messages: Some(vec![ initial_messages: Some(vec![