chore: Clean up verbosity config (#3056)
## Summary It appears that #2108 hit a merge conflict with #2355 - I failed to notice the path difference when re-reviewing the former. This PR rectifies that, and consolidates it into the protocol package, in line with our philosophy of specifying types in one place. ## Testing - [x] Adds config test for model_verbosity
This commit is contained in:
@@ -1,4 +1,3 @@
|
|||||||
use crate::config_types::Verbosity as VerbosityConfig;
|
|
||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
use crate::model_family::ModelFamily;
|
use crate::model_family::ModelFamily;
|
||||||
use crate::openai_tools::OpenAiTool;
|
use crate::openai_tools::OpenAiTool;
|
||||||
@@ -6,6 +5,7 @@ use crate::protocol::TokenUsage;
|
|||||||
use codex_apply_patch::APPLY_PATCH_TOOL_INSTRUCTIONS;
|
use codex_apply_patch::APPLY_PATCH_TOOL_INSTRUCTIONS;
|
||||||
use codex_protocol::config_types::ReasoningEffort as ReasoningEffortConfig;
|
use codex_protocol::config_types::ReasoningEffort as ReasoningEffortConfig;
|
||||||
use codex_protocol::config_types::ReasoningSummary as ReasoningSummaryConfig;
|
use codex_protocol::config_types::ReasoningSummary as ReasoningSummaryConfig;
|
||||||
|
use codex_protocol::config_types::Verbosity as VerbosityConfig;
|
||||||
use codex_protocol::models::ContentItem;
|
use codex_protocol::models::ContentItem;
|
||||||
use codex_protocol::models::ResponseItem;
|
use codex_protocol::models::ResponseItem;
|
||||||
use futures::Stream;
|
use futures::Stream;
|
||||||
|
|||||||
@@ -6,7 +6,6 @@ use crate::config_types::ShellEnvironmentPolicy;
|
|||||||
use crate::config_types::ShellEnvironmentPolicyToml;
|
use crate::config_types::ShellEnvironmentPolicyToml;
|
||||||
use crate::config_types::Tui;
|
use crate::config_types::Tui;
|
||||||
use crate::config_types::UriBasedFileOpener;
|
use crate::config_types::UriBasedFileOpener;
|
||||||
use crate::config_types::Verbosity;
|
|
||||||
use crate::git_info::resolve_root_git_project_for_trust;
|
use crate::git_info::resolve_root_git_project_for_trust;
|
||||||
use crate::model_family::ModelFamily;
|
use crate::model_family::ModelFamily;
|
||||||
use crate::model_family::find_family_for_model;
|
use crate::model_family::find_family_for_model;
|
||||||
@@ -18,6 +17,7 @@ use crate::protocol::SandboxPolicy;
|
|||||||
use codex_protocol::config_types::ReasoningEffort;
|
use codex_protocol::config_types::ReasoningEffort;
|
||||||
use codex_protocol::config_types::ReasoningSummary;
|
use codex_protocol::config_types::ReasoningSummary;
|
||||||
use codex_protocol::config_types::SandboxMode;
|
use codex_protocol::config_types::SandboxMode;
|
||||||
|
use codex_protocol::config_types::Verbosity;
|
||||||
use codex_protocol::mcp_protocol::AuthMode;
|
use codex_protocol::mcp_protocol::AuthMode;
|
||||||
use dirs::home_dir;
|
use dirs::home_dir;
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
@@ -1067,6 +1067,14 @@ model = "o3"
|
|||||||
model_provider = "openai"
|
model_provider = "openai"
|
||||||
approval_policy = "on-failure"
|
approval_policy = "on-failure"
|
||||||
disable_response_storage = true
|
disable_response_storage = true
|
||||||
|
|
||||||
|
[profiles.gpt5]
|
||||||
|
model = "gpt-5"
|
||||||
|
model_provider = "openai"
|
||||||
|
approval_policy = "on-failure"
|
||||||
|
model_reasoning_effort = "high"
|
||||||
|
model_reasoning_summary = "detailed"
|
||||||
|
model_verbosity = "high"
|
||||||
"#;
|
"#;
|
||||||
|
|
||||||
let cfg: ConfigToml = toml::from_str(toml).expect("TOML deserialization should succeed");
|
let cfg: ConfigToml = toml::from_str(toml).expect("TOML deserialization should succeed");
|
||||||
@@ -1325,6 +1333,66 @@ disable_response_storage = true
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_precedence_fixture_with_gpt5_profile() -> std::io::Result<()> {
|
||||||
|
let fixture = create_test_fixture()?;
|
||||||
|
|
||||||
|
let gpt5_profile_overrides = ConfigOverrides {
|
||||||
|
config_profile: Some("gpt5".to_string()),
|
||||||
|
cwd: Some(fixture.cwd()),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
let gpt5_profile_config = Config::load_from_base_config_with_overrides(
|
||||||
|
fixture.cfg.clone(),
|
||||||
|
gpt5_profile_overrides,
|
||||||
|
fixture.codex_home(),
|
||||||
|
)?;
|
||||||
|
let expected_gpt5_profile_config = Config {
|
||||||
|
model: "gpt-5".to_string(),
|
||||||
|
model_family: find_family_for_model("gpt-5").expect("known model slug"),
|
||||||
|
model_context_window: Some(400_000),
|
||||||
|
model_max_output_tokens: Some(128_000),
|
||||||
|
model_provider_id: "openai".to_string(),
|
||||||
|
model_provider: fixture.openai_provider.clone(),
|
||||||
|
approval_policy: AskForApproval::OnFailure,
|
||||||
|
sandbox_policy: SandboxPolicy::new_read_only_policy(),
|
||||||
|
shell_environment_policy: ShellEnvironmentPolicy::default(),
|
||||||
|
disable_response_storage: false,
|
||||||
|
user_instructions: None,
|
||||||
|
notify: None,
|
||||||
|
cwd: fixture.cwd(),
|
||||||
|
mcp_servers: HashMap::new(),
|
||||||
|
model_providers: fixture.model_provider_map.clone(),
|
||||||
|
project_doc_max_bytes: PROJECT_DOC_MAX_BYTES,
|
||||||
|
codex_home: fixture.codex_home(),
|
||||||
|
history: History::default(),
|
||||||
|
file_opener: UriBasedFileOpener::VsCode,
|
||||||
|
tui: Tui::default(),
|
||||||
|
codex_linux_sandbox_exe: None,
|
||||||
|
hide_agent_reasoning: false,
|
||||||
|
show_raw_agent_reasoning: false,
|
||||||
|
model_reasoning_effort: ReasoningEffort::High,
|
||||||
|
model_reasoning_summary: ReasoningSummary::Detailed,
|
||||||
|
model_verbosity: Some(Verbosity::High),
|
||||||
|
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
|
||||||
|
experimental_resume: None,
|
||||||
|
base_instructions: None,
|
||||||
|
include_plan_tool: false,
|
||||||
|
include_apply_patch_tool: false,
|
||||||
|
tools_web_search_request: false,
|
||||||
|
responses_originator_header: "codex_cli_rs".to_string(),
|
||||||
|
preferred_auth_method: AuthMode::ChatGPT,
|
||||||
|
use_experimental_streamable_shell_tool: false,
|
||||||
|
include_view_image_tool: true,
|
||||||
|
disable_paste_burst: false,
|
||||||
|
use_experimental_reasoning_summary: false,
|
||||||
|
};
|
||||||
|
|
||||||
|
assert_eq!(expected_gpt5_profile_config, gpt5_profile_config);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_set_project_trusted_writes_explicit_tables() -> anyhow::Result<()> {
|
fn test_set_project_trusted_writes_explicit_tables() -> anyhow::Result<()> {
|
||||||
let codex_home = TempDir::new().unwrap();
|
let codex_home = TempDir::new().unwrap();
|
||||||
|
|||||||
@@ -1,10 +1,10 @@
|
|||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
|
||||||
use crate::config_types::Verbosity;
|
|
||||||
use crate::protocol::AskForApproval;
|
use crate::protocol::AskForApproval;
|
||||||
use codex_protocol::config_types::ReasoningEffort;
|
use codex_protocol::config_types::ReasoningEffort;
|
||||||
use codex_protocol::config_types::ReasoningSummary;
|
use codex_protocol::config_types::ReasoningSummary;
|
||||||
|
use codex_protocol::config_types::Verbosity;
|
||||||
|
|
||||||
/// Collection of common configuration options that a user can define as a unit
|
/// Collection of common configuration options that a user can define as a unit
|
||||||
/// in `config.toml`.
|
/// in `config.toml`.
|
||||||
|
|||||||
@@ -8,8 +8,6 @@ use std::path::PathBuf;
|
|||||||
use wildmatch::WildMatchPattern;
|
use wildmatch::WildMatchPattern;
|
||||||
|
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
use serde::Serialize;
|
|
||||||
use strum_macros::Display;
|
|
||||||
|
|
||||||
#[derive(Deserialize, Debug, Clone, PartialEq)]
|
#[derive(Deserialize, Debug, Clone, PartialEq)]
|
||||||
pub struct McpServerConfig {
|
pub struct McpServerConfig {
|
||||||
@@ -185,43 +183,3 @@ impl From<ShellEnvironmentPolicyToml> for ShellEnvironmentPolicy {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning
|
|
||||||
#[derive(Debug, Serialize, Deserialize, Default, Clone, Copy, PartialEq, Eq, Display)]
|
|
||||||
#[serde(rename_all = "lowercase")]
|
|
||||||
#[strum(serialize_all = "lowercase")]
|
|
||||||
pub enum ReasoningEffort {
|
|
||||||
Low,
|
|
||||||
#[default]
|
|
||||||
Medium,
|
|
||||||
High,
|
|
||||||
/// Option to disable reasoning.
|
|
||||||
None,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A summary of the reasoning performed by the model. This can be useful for
|
|
||||||
/// debugging and understanding the model's reasoning process.
|
|
||||||
/// See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#reasoning-summaries
|
|
||||||
#[derive(Debug, Serialize, Deserialize, Default, Clone, Copy, PartialEq, Eq, Display)]
|
|
||||||
#[serde(rename_all = "lowercase")]
|
|
||||||
#[strum(serialize_all = "lowercase")]
|
|
||||||
pub enum ReasoningSummary {
|
|
||||||
#[default]
|
|
||||||
Auto,
|
|
||||||
Concise,
|
|
||||||
Detailed,
|
|
||||||
/// Option to disable reasoning summaries.
|
|
||||||
None,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Controls output length/detail on GPT-5 models via the Responses API.
|
|
||||||
/// Serialized with lowercase values to match the OpenAI API.
|
|
||||||
#[derive(Debug, Serialize, Deserialize, Default, Clone, Copy, PartialEq, Eq, Display)]
|
|
||||||
#[serde(rename_all = "lowercase")]
|
|
||||||
#[strum(serialize_all = "lowercase")]
|
|
||||||
pub enum Verbosity {
|
|
||||||
Low,
|
|
||||||
#[default]
|
|
||||||
Medium,
|
|
||||||
High,
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -35,6 +35,18 @@ pub enum ReasoningSummary {
|
|||||||
None,
|
None,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Controls output length/detail on GPT-5 models via the Responses API.
|
||||||
|
/// Serialized with lowercase values to match the OpenAI API.
|
||||||
|
#[derive(Debug, Serialize, Deserialize, Default, Clone, Copy, PartialEq, Eq, Display)]
|
||||||
|
#[serde(rename_all = "lowercase")]
|
||||||
|
#[strum(serialize_all = "lowercase")]
|
||||||
|
pub enum Verbosity {
|
||||||
|
Low,
|
||||||
|
#[default]
|
||||||
|
Medium,
|
||||||
|
High,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Debug, Clone, Copy, PartialEq, Default, Serialize, Display, TS)]
|
#[derive(Deserialize, Debug, Clone, Copy, PartialEq, Default, Serialize, Display, TS)]
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
#[strum(serialize_all = "kebab-case")]
|
#[strum(serialize_all = "kebab-case")]
|
||||||
|
|||||||
Reference in New Issue
Block a user