consolidate reasoning enums into one (#2428)

We have three enums for each of reasoning summaries and reasoning effort
with same values. They can be consolidated into one.
This commit is contained in:
Ahmed Ibrahim
2025-08-18 11:50:17 -07:00
committed by GitHub
parent a4f76bd75a
commit c9963b52e9
7 changed files with 16 additions and 122 deletions

View File

@@ -29,8 +29,6 @@ use crate::client_common::ResponseStream;
use crate::client_common::ResponsesApiRequest; use crate::client_common::ResponsesApiRequest;
use crate::client_common::create_reasoning_param_for_request; use crate::client_common::create_reasoning_param_for_request;
use crate::config::Config; use crate::config::Config;
use crate::config_types::ReasoningEffort as ReasoningEffortConfig;
use crate::config_types::ReasoningSummary as ReasoningSummaryConfig;
use crate::error::CodexErr; use crate::error::CodexErr;
use crate::error::Result; use crate::error::Result;
use crate::error::UsageLimitReachedError; use crate::error::UsageLimitReachedError;
@@ -42,6 +40,8 @@ use crate::openai_tools::create_tools_json_for_responses_api;
use crate::protocol::TokenUsage; use crate::protocol::TokenUsage;
use crate::user_agent::get_codex_user_agent; use crate::user_agent::get_codex_user_agent;
use crate::util::backoff; use crate::util::backoff;
use codex_protocol::config_types::ReasoningEffort as ReasoningEffortConfig;
use codex_protocol::config_types::ReasoningSummary as ReasoningSummaryConfig;
use std::sync::Arc; use std::sync::Arc;
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]

View File

@@ -1,5 +1,3 @@
use crate::config_types::ReasoningEffort as ReasoningEffortConfig;
use crate::config_types::ReasoningSummary as ReasoningSummaryConfig;
use crate::error::Result; use crate::error::Result;
use crate::model_family::ModelFamily; use crate::model_family::ModelFamily;
use crate::models::ContentItem; use crate::models::ContentItem;
@@ -7,6 +5,8 @@ use crate::models::ResponseItem;
use crate::openai_tools::OpenAiTool; use crate::openai_tools::OpenAiTool;
use crate::protocol::TokenUsage; use crate::protocol::TokenUsage;
use codex_apply_patch::APPLY_PATCH_TOOL_INSTRUCTIONS; use codex_apply_patch::APPLY_PATCH_TOOL_INSTRUCTIONS;
use codex_protocol::config_types::ReasoningEffort as ReasoningEffortConfig;
use codex_protocol::config_types::ReasoningSummary as ReasoningSummaryConfig;
use futures::Stream; use futures::Stream;
use serde::Serialize; use serde::Serialize;
use std::borrow::Cow; use std::borrow::Cow;
@@ -85,55 +85,8 @@ pub enum ResponseEvent {
#[derive(Debug, Serialize)] #[derive(Debug, Serialize)]
pub(crate) struct Reasoning { pub(crate) struct Reasoning {
pub(crate) effort: OpenAiReasoningEffort, pub(crate) effort: ReasoningEffortConfig,
#[serde(skip_serializing_if = "Option::is_none")] pub(crate) summary: ReasoningSummaryConfig,
pub(crate) summary: Option<OpenAiReasoningSummary>,
}
/// See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning
#[derive(Debug, Serialize, Default, Clone, Copy)]
#[serde(rename_all = "lowercase")]
pub(crate) enum OpenAiReasoningEffort {
Minimal,
Low,
#[default]
Medium,
High,
}
impl From<ReasoningEffortConfig> for Option<OpenAiReasoningEffort> {
fn from(effort: ReasoningEffortConfig) -> Self {
match effort {
ReasoningEffortConfig::Minimal => Some(OpenAiReasoningEffort::Minimal),
ReasoningEffortConfig::Low => Some(OpenAiReasoningEffort::Low),
ReasoningEffortConfig::Medium => Some(OpenAiReasoningEffort::Medium),
ReasoningEffortConfig::High => Some(OpenAiReasoningEffort::High),
ReasoningEffortConfig::None => None,
}
}
}
/// A summary of the reasoning performed by the model. This can be useful for
/// debugging and understanding the model's reasoning process.
/// See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#reasoning-summaries
#[derive(Debug, Serialize, Default, Clone, Copy)]
#[serde(rename_all = "lowercase")]
pub(crate) enum OpenAiReasoningSummary {
#[default]
Auto,
Concise,
Detailed,
}
impl From<ReasoningSummaryConfig> for Option<OpenAiReasoningSummary> {
fn from(summary: ReasoningSummaryConfig) -> Self {
match summary {
ReasoningSummaryConfig::Auto => Some(OpenAiReasoningSummary::Auto),
ReasoningSummaryConfig::Concise => Some(OpenAiReasoningSummary::Concise),
ReasoningSummaryConfig::Detailed => Some(OpenAiReasoningSummary::Detailed),
ReasoningSummaryConfig::None => None,
}
}
} }
/// Request object that is serialized as JSON and POST'ed when using the /// Request object that is serialized as JSON and POST'ed when using the
@@ -164,12 +117,7 @@ pub(crate) fn create_reasoning_param_for_request(
summary: ReasoningSummaryConfig, summary: ReasoningSummaryConfig,
) -> Option<Reasoning> { ) -> Option<Reasoning> {
if model_family.supports_reasoning_summaries { if model_family.supports_reasoning_summaries {
let effort: Option<OpenAiReasoningEffort> = effort.into(); Some(Reasoning { effort, summary })
let effort = effort?;
Some(Reasoning {
effort,
summary: summary.into(),
})
} else { } else {
None None
} }

View File

@@ -39,8 +39,6 @@ use crate::client::ModelClient;
use crate::client_common::Prompt; use crate::client_common::Prompt;
use crate::client_common::ResponseEvent; use crate::client_common::ResponseEvent;
use crate::config::Config; use crate::config::Config;
use crate::config_types::ReasoningEffort as ReasoningEffortConfig;
use crate::config_types::ReasoningSummary as ReasoningSummaryConfig;
use crate::config_types::ShellEnvironmentPolicy; use crate::config_types::ShellEnvironmentPolicy;
use crate::conversation_history::ConversationHistory; use crate::conversation_history::ConversationHistory;
use crate::environment_context::EnvironmentContext; use crate::environment_context::EnvironmentContext;
@@ -107,6 +105,8 @@ use crate::shell;
use crate::turn_diff_tracker::TurnDiffTracker; use crate::turn_diff_tracker::TurnDiffTracker;
use crate::user_notification::UserNotification; use crate::user_notification::UserNotification;
use crate::util::backoff; use crate::util::backoff;
use codex_protocol::config_types::ReasoningEffort as ReasoningEffortConfig;
use codex_protocol::config_types::ReasoningSummary as ReasoningSummaryConfig;
// A convenience extension trait for acquiring mutex locks where poisoning is // A convenience extension trait for acquiring mutex locks where poisoning is
// unrecoverable and should abort the program. This avoids scattered `.unwrap()` // unrecoverable and should abort the program. This avoids scattered `.unwrap()`
@@ -1035,8 +1035,8 @@ async fn submission_loop(
Arc::new(per_turn_config), Arc::new(per_turn_config),
None, None,
provider, provider,
effort.into(), effort,
summary.into(), summary,
sess.session_id, sess.session_id,
); );

View File

@@ -1,8 +1,6 @@
use crate::config_profile::ConfigProfile; use crate::config_profile::ConfigProfile;
use crate::config_types::History; use crate::config_types::History;
use crate::config_types::McpServerConfig; use crate::config_types::McpServerConfig;
use crate::config_types::ReasoningEffort;
use crate::config_types::ReasoningSummary;
use crate::config_types::SandboxWorkspaceWrite; use crate::config_types::SandboxWorkspaceWrite;
use crate::config_types::ShellEnvironmentPolicy; use crate::config_types::ShellEnvironmentPolicy;
use crate::config_types::ShellEnvironmentPolicyToml; use crate::config_types::ShellEnvironmentPolicyToml;
@@ -15,6 +13,8 @@ use crate::model_provider_info::built_in_model_providers;
use crate::openai_model_info::get_model_info; use crate::openai_model_info::get_model_info;
use crate::protocol::AskForApproval; use crate::protocol::AskForApproval;
use crate::protocol::SandboxPolicy; use crate::protocol::SandboxPolicy;
use codex_protocol::config_types::ReasoningEffort;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::config_types::SandboxMode; use codex_protocol::config_types::SandboxMode;
use dirs::home_dir; use dirs::home_dir;
use serde::Deserialize; use serde::Deserialize;

View File

@@ -1,9 +1,9 @@
use serde::Deserialize; use serde::Deserialize;
use std::path::PathBuf; use std::path::PathBuf;
use crate::config_types::ReasoningEffort;
use crate::config_types::ReasoningSummary;
use crate::protocol::AskForApproval; use crate::protocol::AskForApproval;
use codex_protocol::config_types::ReasoningEffort;
use codex_protocol::config_types::ReasoningSummary;
/// Collection of common configuration options that a user can define as a unit /// Collection of common configuration options that a user can define as a unit
/// in `config.toml`. /// in `config.toml`.

View File

@@ -5,11 +5,9 @@
use std::collections::HashMap; use std::collections::HashMap;
use std::path::PathBuf; use std::path::PathBuf;
use strum_macros::Display;
use wildmatch::WildMatchPattern; use wildmatch::WildMatchPattern;
use serde::Deserialize; use serde::Deserialize;
use serde::Serialize;
#[derive(Deserialize, Debug, Clone, PartialEq)] #[derive(Deserialize, Debug, Clone, PartialEq)]
pub struct McpServerConfig { pub struct McpServerConfig {
@@ -185,56 +183,3 @@ impl From<ShellEnvironmentPolicyToml> for ShellEnvironmentPolicy {
} }
} }
} }
/// See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning
#[derive(Debug, Serialize, Deserialize, Default, Clone, Copy, PartialEq, Eq, Display)]
#[serde(rename_all = "lowercase")]
#[strum(serialize_all = "lowercase")]
pub enum ReasoningEffort {
Minimal,
Low,
#[default]
Medium,
High,
/// Option to disable reasoning.
None,
}
/// A summary of the reasoning performed by the model. This can be useful for
/// debugging and understanding the model's reasoning process.
/// See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#reasoning-summaries
#[derive(Debug, Serialize, Deserialize, Default, Clone, Copy, PartialEq, Eq, Display)]
#[serde(rename_all = "lowercase")]
#[strum(serialize_all = "lowercase")]
pub enum ReasoningSummary {
#[default]
Auto,
Concise,
Detailed,
/// Option to disable reasoning summaries.
None,
}
// Conversions from protocol enums to core config enums used where protocol
// values are supplied by clients and core needs its internal representations.
impl From<codex_protocol::config_types::ReasoningEffort> for ReasoningEffort {
fn from(v: codex_protocol::config_types::ReasoningEffort) -> Self {
match v {
codex_protocol::config_types::ReasoningEffort::Low => ReasoningEffort::Low,
codex_protocol::config_types::ReasoningEffort::Medium => ReasoningEffort::Medium,
codex_protocol::config_types::ReasoningEffort::High => ReasoningEffort::High,
codex_protocol::config_types::ReasoningEffort::None => ReasoningEffort::None,
}
}
}
impl From<codex_protocol::config_types::ReasoningSummary> for ReasoningSummary {
fn from(v: codex_protocol::config_types::ReasoningSummary) -> Self {
match v {
codex_protocol::config_types::ReasoningSummary::Auto => ReasoningSummary::Auto,
codex_protocol::config_types::ReasoningSummary::Concise => ReasoningSummary::Concise,
codex_protocol::config_types::ReasoningSummary::Detailed => ReasoningSummary::Detailed,
codex_protocol::config_types::ReasoningSummary::None => ReasoningSummary::None,
}
}
}

View File

@@ -7,6 +7,7 @@ use strum_macros::Display;
#[serde(rename_all = "lowercase")] #[serde(rename_all = "lowercase")]
#[strum(serialize_all = "lowercase")] #[strum(serialize_all = "lowercase")]
pub enum ReasoningEffort { pub enum ReasoningEffort {
Minimal,
Low, Low,
#[default] #[default]
Medium, Medium,