[app-server] model/list API (#5382)

Adds a `model/list` paginated API that returns the list of models
supported by Codex.
This commit is contained in:
Owen Lin
2025-10-21 11:15:17 -07:00
committed by GitHub
parent da82153a8d
commit 26f314904a
13 changed files with 489 additions and 131 deletions

View File

@@ -106,6 +106,13 @@ client_request_definitions! {
params: ListConversationsParams,
response: ListConversationsResponse,
},
#[serde(rename = "model/list")]
#[ts(rename = "model/list")]
/// List available Codex models along with display metadata.
ListModels {
params: ListModelsParams,
response: ListModelsResponse,
},
/// Resume a recorded Codex conversation from a rollout file.
ResumeConversation {
params: ResumeConversationParams,
@@ -304,6 +311,47 @@ pub struct ListConversationsResponse {
pub next_cursor: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ListModelsParams {
/// Optional page size; defaults to a reasonable server-side value.
#[serde(skip_serializing_if = "Option::is_none")]
pub page_size: Option<usize>,
/// Opaque pagination cursor returned by a previous call.
#[serde(skip_serializing_if = "Option::is_none")]
pub cursor: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct Model {
pub id: String,
pub model: String,
pub display_name: String,
pub description: String,
pub supported_reasoning_efforts: Vec<ReasoningEffortOption>,
pub default_reasoning_effort: ReasoningEffort,
// Only one model should be marked as default.
pub is_default: bool,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ReasoningEffortOption {
pub reasoning_effort: ReasoningEffort,
pub description: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ListModelsResponse {
pub items: Vec<Model>,
/// Opaque cursor to pass to the next call to continue after the last item.
/// if None, there are no more items to return.
#[serde(skip_serializing_if = "Option::is_none")]
pub next_cursor: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ResumeConversationParams {
@@ -994,4 +1042,21 @@ mod tests {
);
Ok(())
}
#[test]
fn serialize_list_models() -> Result<()> {
let request = ClientRequest::ListModels {
request_id: RequestId::Integer(2),
params: ListModelsParams::default(),
};
assert_eq!(
json!({
"method": "model/list",
"id": 2,
"params": {}
}),
serde_json::to_value(&request)?,
);
Ok(())
}
}

View File

@@ -1,6 +1,7 @@
use crate::error_code::INTERNAL_ERROR_CODE;
use crate::error_code::INVALID_REQUEST_ERROR_CODE;
use crate::fuzzy_file_search::run_fuzzy_file_search;
use crate::models::supported_models;
use crate::outgoing_message::OutgoingMessageSender;
use crate::outgoing_message::OutgoingNotification;
use codex_app_server_protocol::AddConversationListenerParams;
@@ -29,6 +30,8 @@ use codex_app_server_protocol::InterruptConversationResponse;
use codex_app_server_protocol::JSONRPCErrorError;
use codex_app_server_protocol::ListConversationsParams;
use codex_app_server_protocol::ListConversationsResponse;
use codex_app_server_protocol::ListModelsParams;
use codex_app_server_protocol::ListModelsResponse;
use codex_app_server_protocol::LoginApiKeyParams;
use codex_app_server_protocol::LoginApiKeyResponse;
use codex_app_server_protocol::LoginChatGptCompleteNotification;
@@ -111,7 +114,6 @@ use uuid::Uuid;
// Duration before a ChatGPT login attempt is abandoned.
const LOGIN_CHATGPT_TIMEOUT: Duration = Duration::from_secs(10 * 60);
struct ActiveLogin {
shutdown_handle: ShutdownHandle,
login_id: Uuid,
@@ -172,6 +174,9 @@ impl CodexMessageProcessor {
ClientRequest::ListConversations { request_id, params } => {
self.handle_list_conversations(request_id, params).await;
}
ClientRequest::ListModels { request_id, params } => {
self.list_models(request_id, params).await;
}
ClientRequest::ResumeConversation { request_id, params } => {
self.handle_resume_conversation(request_id, params).await;
}
@@ -831,6 +836,58 @@ impl CodexMessageProcessor {
self.outgoing.send_response(request_id, response).await;
}
async fn list_models(&self, request_id: RequestId, params: ListModelsParams) {
let ListModelsParams { page_size, cursor } = params;
let models = supported_models();
let total = models.len();
if total == 0 {
let response = ListModelsResponse {
items: Vec::new(),
next_cursor: None,
};
self.outgoing.send_response(request_id, response).await;
return;
}
let effective_page_size = page_size.unwrap_or(total).max(1).min(total);
let start = match cursor {
Some(cursor) => match cursor.parse::<usize>() {
Ok(idx) => idx,
Err(_) => {
let error = JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: format!("invalid cursor: {cursor}"),
data: None,
};
self.outgoing.send_error(request_id, error).await;
return;
}
},
None => 0,
};
if start > total {
let error = JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: format!("cursor {start} exceeds total models {total}"),
data: None,
};
self.outgoing.send_error(request_id, error).await;
return;
}
let end = start.saturating_add(effective_page_size).min(total);
let items = models[start..end].to_vec();
let next_cursor = if end < total {
Some(end.to_string())
} else {
None
};
let response = ListModelsResponse { items, next_cursor };
self.outgoing.send_response(request_id, response).await;
}
async fn handle_resume_conversation(
&self,
request_id: RequestId,

View File

@@ -27,6 +27,7 @@ mod codex_message_processor;
mod error_code;
mod fuzzy_file_search;
mod message_processor;
mod models;
mod outgoing_message;
/// Size of the bounded channels used to communicate between tasks. The value

View File

@@ -0,0 +1,38 @@
use codex_app_server_protocol::Model;
use codex_app_server_protocol::ReasoningEffortOption;
use codex_common::model_presets::ModelPreset;
use codex_common::model_presets::ReasoningEffortPreset;
use codex_common::model_presets::builtin_model_presets;
pub fn supported_models() -> Vec<Model> {
builtin_model_presets(None)
.into_iter()
.map(model_from_preset)
.collect()
}
fn model_from_preset(preset: ModelPreset) -> Model {
Model {
id: preset.id.to_string(),
model: preset.model.to_string(),
display_name: preset.display_name.to_string(),
description: preset.description.to_string(),
supported_reasoning_efforts: reasoning_efforts_from_preset(
preset.supported_reasoning_efforts,
),
default_reasoning_effort: preset.default_reasoning_effort,
is_default: preset.is_default,
}
}
fn reasoning_efforts_from_preset(
efforts: &'static [ReasoningEffortPreset],
) -> Vec<ReasoningEffortOption> {
efforts
.iter()
.map(|preset| ReasoningEffortOption {
reasoning_effort: preset.effort,
description: preset.description.to_string(),
})
.collect()
}

View File

@@ -21,6 +21,7 @@ use codex_app_server_protocol::GetAuthStatusParams;
use codex_app_server_protocol::InitializeParams;
use codex_app_server_protocol::InterruptConversationParams;
use codex_app_server_protocol::ListConversationsParams;
use codex_app_server_protocol::ListModelsParams;
use codex_app_server_protocol::LoginApiKeyParams;
use codex_app_server_protocol::NewConversationParams;
use codex_app_server_protocol::RemoveConversationListenerParams;
@@ -264,6 +265,15 @@ impl McpProcess {
self.send_request("listConversations", params).await
}
/// Send a `model/list` JSON-RPC request.
pub async fn send_list_models_request(
&mut self,
params: ListModelsParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("model/list", params).await
}
/// Send a `resumeConversation` JSON-RPC request.
pub async fn send_resume_conversation_request(
&mut self,

View File

@@ -7,6 +7,7 @@ mod fuzzy_file_search;
mod interrupt;
mod list_resume;
mod login;
mod model_list;
mod rate_limits;
mod send_message;
mod set_default_model;

View File

@@ -0,0 +1,183 @@
use std::time::Duration;
use anyhow::Result;
use anyhow::anyhow;
use app_test_support::McpProcess;
use app_test_support::to_response;
use codex_app_server_protocol::JSONRPCError;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::ListModelsParams;
use codex_app_server_protocol::ListModelsResponse;
use codex_app_server_protocol::Model;
use codex_app_server_protocol::ReasoningEffortOption;
use codex_app_server_protocol::RequestId;
use codex_protocol::config_types::ReasoningEffort;
use pretty_assertions::assert_eq;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_TIMEOUT: Duration = Duration::from_secs(10);
const INVALID_REQUEST_ERROR_CODE: i64 = -32600;
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
let codex_home = TempDir::new()?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp
.send_list_models_request(ListModelsParams {
page_size: Some(100),
cursor: None,
})
.await?;
let response: JSONRPCResponse = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let ListModelsResponse { items, next_cursor } = to_response::<ListModelsResponse>(response)?;
let expected_models = vec![
Model {
id: "gpt-5-codex".to_string(),
model: "gpt-5-codex".to_string(),
display_name: "gpt-5-codex".to_string(),
description: "Optimized for coding tasks with many tools.".to_string(),
supported_reasoning_efforts: vec![
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Low,
description: "Fastest responses with limited reasoning".to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Medium,
description: "Dynamically adjusts reasoning based on the task".to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex or ambiguous problems"
.to_string(),
},
],
default_reasoning_effort: ReasoningEffort::Medium,
is_default: true,
},
Model {
id: "gpt-5".to_string(),
model: "gpt-5".to_string(),
display_name: "gpt-5".to_string(),
description: "Broad world knowledge with strong general reasoning.".to_string(),
supported_reasoning_efforts: vec![
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Minimal,
description: "Fastest responses with little reasoning".to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Low,
description: "Balances speed with some reasoning; useful for straightforward \
queries and short explanations"
.to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Medium,
description: "Provides a solid balance of reasoning depth and latency for \
general-purpose tasks"
.to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex or ambiguous problems"
.to_string(),
},
],
default_reasoning_effort: ReasoningEffort::Medium,
is_default: false,
},
];
assert_eq!(items, expected_models);
assert!(next_cursor.is_none());
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn list_models_pagination_works() -> Result<()> {
let codex_home = TempDir::new()?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
let first_request = mcp
.send_list_models_request(ListModelsParams {
page_size: Some(1),
cursor: None,
})
.await?;
let first_response: JSONRPCResponse = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(first_request)),
)
.await??;
let ListModelsResponse {
items: first_items,
next_cursor: first_cursor,
} = to_response::<ListModelsResponse>(first_response)?;
assert_eq!(first_items.len(), 1);
assert_eq!(first_items[0].id, "gpt-5-codex");
let next_cursor = first_cursor.ok_or_else(|| anyhow!("cursor for second page"))?;
let second_request = mcp
.send_list_models_request(ListModelsParams {
page_size: Some(1),
cursor: Some(next_cursor.clone()),
})
.await?;
let second_response: JSONRPCResponse = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(second_request)),
)
.await??;
let ListModelsResponse {
items: second_items,
next_cursor: second_cursor,
} = to_response::<ListModelsResponse>(second_response)?;
assert_eq!(second_items.len(), 1);
assert_eq!(second_items[0].id, "gpt-5");
assert!(second_cursor.is_none());
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn list_models_rejects_invalid_cursor() -> Result<()> {
let codex_home = TempDir::new()?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp
.send_list_models_request(ListModelsParams {
page_size: None,
cursor: Some("invalid".to_string()),
})
.await?;
let error: JSONRPCError = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_error_message(RequestId::Integer(request_id)),
)
.await??;
assert_eq!(error.id, RequestId::Integer(request_id));
assert_eq!(error.error.code, INVALID_REQUEST_ERROR_CODE);
assert_eq!(error.error.message, "invalid cursor: invalid");
Ok(())
}

View File

@@ -1,73 +1,96 @@
use codex_app_server_protocol::AuthMode;
use codex_core::protocol_config_types::ReasoningEffort;
/// A simple preset pairing a model slug with a reasoning effort.
/// A reasoning effort option that can be surfaced for a model.
#[derive(Debug, Clone, Copy)]
pub struct ReasoningEffortPreset {
/// Effort level that the model supports.
pub effort: ReasoningEffort,
/// Short human description shown next to the effort in UIs.
pub description: &'static str,
}
/// Metadata describing a Codex-supported model.
#[derive(Debug, Clone, Copy)]
pub struct ModelPreset {
/// Stable identifier for the preset.
pub id: &'static str,
/// Display label shown in UIs.
pub label: &'static str,
/// Short human description shown next to the label in UIs.
pub description: &'static str,
/// Model slug (e.g., "gpt-5").
pub model: &'static str,
/// Reasoning effort to apply for this preset.
pub effort: Option<ReasoningEffort>,
/// Display name shown in UIs.
pub display_name: &'static str,
/// Short human description shown in UIs.
pub description: &'static str,
/// Reasoning effort applied when none is explicitly chosen.
pub default_reasoning_effort: ReasoningEffort,
/// Supported reasoning effort options.
pub supported_reasoning_efforts: &'static [ReasoningEffortPreset],
/// Whether this is the default model for new users.
pub is_default: bool,
}
const PRESETS: &[ModelPreset] = &[
ModelPreset {
id: "gpt-5-codex-low",
label: "gpt-5-codex low",
description: "Fastest responses with limited reasoning",
id: "gpt-5-codex",
model: "gpt-5-codex",
effort: Some(ReasoningEffort::Low),
display_name: "gpt-5-codex",
description: "Optimized for coding tasks with many tools.",
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: &[
ReasoningEffortPreset {
effort: ReasoningEffort::Low,
description: "Fastest responses with limited reasoning",
},
ReasoningEffortPreset {
effort: ReasoningEffort::Medium,
description: "Dynamically adjusts reasoning based on the task",
},
ReasoningEffortPreset {
effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex or ambiguous problems",
},
],
is_default: true,
},
ModelPreset {
id: "gpt-5-codex-medium",
label: "gpt-5-codex medium",
description: "Dynamically adjusts reasoning based on the task",
model: "gpt-5-codex",
effort: Some(ReasoningEffort::Medium),
},
ModelPreset {
id: "gpt-5-codex-high",
label: "gpt-5-codex high",
description: "Maximizes reasoning depth for complex or ambiguous problems",
model: "gpt-5-codex",
effort: Some(ReasoningEffort::High),
},
ModelPreset {
id: "gpt-5-minimal",
label: "gpt-5 minimal",
description: "Fastest responses with little reasoning",
id: "gpt-5",
model: "gpt-5",
effort: Some(ReasoningEffort::Minimal),
},
ModelPreset {
id: "gpt-5-low",
label: "gpt-5 low",
description: "Balances speed with some reasoning; useful for straightforward queries and short explanations",
model: "gpt-5",
effort: Some(ReasoningEffort::Low),
},
ModelPreset {
id: "gpt-5-medium",
label: "gpt-5 medium",
description: "Provides a solid balance of reasoning depth and latency for general-purpose tasks",
model: "gpt-5",
effort: Some(ReasoningEffort::Medium),
},
ModelPreset {
id: "gpt-5-high",
label: "gpt-5 high",
description: "Maximizes reasoning depth for complex or ambiguous problems",
model: "gpt-5",
effort: Some(ReasoningEffort::High),
display_name: "gpt-5",
description: "Broad world knowledge with strong general reasoning.",
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: &[
ReasoningEffortPreset {
effort: ReasoningEffort::Minimal,
description: "Fastest responses with little reasoning",
},
ReasoningEffortPreset {
effort: ReasoningEffort::Low,
description: "Balances speed with some reasoning; useful for straightforward queries and short explanations",
},
ReasoningEffortPreset {
effort: ReasoningEffort::Medium,
description: "Provides a solid balance of reasoning depth and latency for general-purpose tasks",
},
ReasoningEffortPreset {
effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex or ambiguous problems",
},
],
is_default: false,
},
];
pub fn builtin_model_presets(_auth_mode: Option<AuthMode>) -> Vec<ModelPreset> {
PRESETS.to_vec()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn only_one_default_model_is_configured() {
let default_models = PRESETS.iter().filter(|preset| preset.is_default).count();
assert!(default_models == 1);
}
}

View File

@@ -19,6 +19,7 @@ At a glance:
- `listConversations`, `resumeConversation`, `archiveConversation`
- Configuration and info
- `getUserSavedConfig`, `setDefaultModel`, `getUserAgent`, `userInfo`
- `model/list` → enumerate available models and reasoning options
- Auth
- `loginApiKey`, `loginChatGpt`, `cancelLoginChatGpt`, `logoutChatGpt`, `getAuthStatus`
- Utilities
@@ -73,6 +74,24 @@ Interrupt a running turn: `interruptConversation`.
List/resume/archive: `listConversations`, `resumeConversation`, `archiveConversation`.
## Models
Fetch the catalog of models available in the current Codex build with `model/list`. The request accepts optional pagination inputs:
- `pageSize` number of models to return (defaults to a server-selected value)
- `cursor` opaque string from the previous responses `nextCursor`
Each response yields:
- `items` ordered list of models. A model includes:
- `id`, `model`, `displayName`, `description`
- `supportedReasoningEfforts` array of objects with:
- `reasoningEffort` one of `minimal|low|medium|high`
- `description` human-friendly label for the effort
- `defaultReasoningEffort` suggested effort for the UI
- `isDefault` whether the model is recommended for most users
- `nextCursor` pass into the next request to continue paging (optional)
## Event stream
While a conversation runs, the server sends notifications:

View File

@@ -354,8 +354,8 @@ impl App {
self.config.model_family = family;
}
}
AppEvent::OpenReasoningPopup { model, presets } => {
self.chat_widget.open_reasoning_popup(model, presets);
AppEvent::OpenReasoningPopup { model } => {
self.chat_widget.open_reasoning_popup(model);
}
AppEvent::OpenFullAccessConfirmation { preset } => {
self.chat_widget.open_full_access_confirmation(preset);

View File

@@ -64,8 +64,7 @@ pub(crate) enum AppEvent {
/// Open the reasoning selection popup after picking a model.
OpenReasoningPopup {
model: String,
presets: Vec<ModelPreset>,
model: ModelPreset,
},
/// Open the confirmation prompt before enabling full access mode.

View File

@@ -302,16 +302,6 @@ fn create_initial_user_message(text: String, image_paths: Vec<PathBuf>) -> Optio
}
impl ChatWidget {
fn model_description_for(slug: &str) -> Option<&'static str> {
if slug.starts_with("gpt-5-codex") {
Some("Optimized for coding tasks with many tools.")
} else if slug.starts_with("gpt-5") {
Some("Broad world knowledge with strong general reasoning.")
} else {
None
}
}
fn flush_answer_stream_with_separator(&mut self) {
if let Some(mut controller) = self.stream_controller.take()
&& let Some(cell) = controller.finalize()
@@ -1661,39 +1651,22 @@ impl ChatWidget {
let auth_mode = self.auth_manager.auth().map(|auth| auth.mode);
let presets: Vec<ModelPreset> = builtin_model_presets(auth_mode);
let mut grouped: Vec<(&str, Vec<ModelPreset>)> = Vec::new();
for preset in presets.into_iter() {
if let Some((_, entries)) = grouped.iter_mut().find(|(model, _)| *model == preset.model)
{
entries.push(preset);
} else {
grouped.push((preset.model, vec![preset]));
}
}
let mut items: Vec<SelectionItem> = Vec::new();
for (model_slug, entries) in grouped.into_iter() {
let name = model_slug.to_string();
let description = Self::model_description_for(model_slug)
.map(std::string::ToString::to_string)
.or_else(|| {
entries
.iter()
.find(|preset| !preset.description.is_empty())
.map(|preset| preset.description.to_string())
})
.or_else(|| entries.first().map(|preset| preset.description.to_string()));
let is_current = model_slug == current_model;
let model_slug_string = model_slug.to_string();
let presets_for_model = entries.clone();
for preset in presets.into_iter() {
let description = if preset.description.is_empty() {
None
} else {
Some(preset.description.to_string())
};
let is_current = preset.model == current_model;
let preset_for_action = preset;
let actions: Vec<SelectionAction> = vec![Box::new(move |tx| {
tx.send(AppEvent::OpenReasoningPopup {
model: model_slug_string.clone(),
presets: presets_for_model.clone(),
model: preset_for_action,
});
})];
items.push(SelectionItem {
name,
name: preset.display_name.to_string(),
description,
is_current,
actions,
@@ -1712,28 +1685,22 @@ impl ChatWidget {
}
/// Open a popup to choose the reasoning effort (stage 2) for the given model.
pub(crate) fn open_reasoning_popup(&mut self, model_slug: String, presets: Vec<ModelPreset>) {
let default_effort = ReasoningEffortConfig::default();
pub(crate) fn open_reasoning_popup(&mut self, preset: ModelPreset) {
let default_effort: ReasoningEffortConfig = preset.default_reasoning_effort;
let supported = preset.supported_reasoning_efforts;
let has_none_choice = presets.iter().any(|preset| preset.effort.is_none());
struct EffortChoice {
stored: Option<ReasoningEffortConfig>,
display: ReasoningEffortConfig,
}
let mut choices: Vec<EffortChoice> = Vec::new();
for effort in ReasoningEffortConfig::iter() {
if presets.iter().any(|preset| preset.effort == Some(effort)) {
if supported.iter().any(|option| option.effort == effort) {
choices.push(EffortChoice {
stored: Some(effort),
display: effort,
});
}
if has_none_choice && default_effort == effort {
choices.push(EffortChoice {
stored: None,
display: effort,
});
}
}
if choices.is_empty() {
choices.push(EffortChoice {
@@ -1742,21 +1709,16 @@ impl ChatWidget {
});
}
let default_choice: Option<ReasoningEffortConfig> = if has_none_choice {
None
} else if choices
let default_choice: Option<ReasoningEffortConfig> = choices
.iter()
.any(|choice| choice.stored == Some(default_effort))
{
Some(default_effort)
} else {
choices
.iter()
.find_map(|choice| choice.stored)
.or(Some(default_effort))
};
.then_some(Some(default_effort))
.flatten()
.or_else(|| choices.iter().find_map(|choice| choice.stored))
.or(Some(default_effort));
let is_current_model = self.config.model == model_slug;
let model_slug = preset.model.to_string();
let is_current_model = self.config.model == preset.model;
let highlight_choice = if is_current_model {
self.config.model_reasoning_effort
} else {
@@ -1773,19 +1735,19 @@ impl ChatWidget {
effort_label.push_str(" (default)");
}
let description = presets
.iter()
.find(|preset| preset.effort == choice.stored && !preset.description.is_empty())
.map(|preset| preset.description.to_string())
.or_else(|| {
presets
let description = choice
.stored
.and_then(|effort| {
supported
.iter()
.find(|preset| preset.effort == choice.stored)
.map(|preset| preset.description.to_string())
});
.find(|option| option.effort == effort)
.map(|option| option.description.to_string())
})
.filter(|text| !text.is_empty());
let warning = "⚠ High reasoning effort can quickly consume Plus plan rate limits.";
let show_warning = model_slug == "gpt-5-codex" && effort == ReasoningEffortConfig::High;
let show_warning =
preset.model == "gpt-5-codex" && effort == ReasoningEffortConfig::High;
let selected_description = show_warning.then(|| {
description
.as_ref()

View File

@@ -1122,11 +1122,11 @@ fn model_reasoning_selection_popup_snapshot() {
chat.config.model = "gpt-5-codex".to_string();
chat.config.model_reasoning_effort = Some(ReasoningEffortConfig::High);
let presets = builtin_model_presets(None)
let preset = builtin_model_presets(None)
.into_iter()
.filter(|preset| preset.model == "gpt-5-codex")
.collect::<Vec<_>>();
chat.open_reasoning_popup("gpt-5-codex".to_string(), presets);
.find(|preset| preset.model == "gpt-5-codex")
.expect("gpt-5-codex preset");
chat.open_reasoning_popup(preset);
let popup = render_bottom_popup(&chat, 80);
assert_snapshot!("model_reasoning_selection_popup", popup);
@@ -1141,9 +1141,9 @@ fn reasoning_popup_escape_returns_to_model_popup() {
let presets = builtin_model_presets(None)
.into_iter()
.filter(|preset| preset.model == "gpt-5-codex")
.collect::<Vec<_>>();
chat.open_reasoning_popup("gpt-5-codex".to_string(), presets);
.find(|preset| preset.model == "gpt-5-codex")
.expect("gpt-5-codex preset");
chat.open_reasoning_popup(presets);
let before_escape = render_bottom_popup(&chat, 80);
assert!(before_escape.contains("Select Reasoning Level"));