[app-server] model/list API (#5382)
Adds a `model/list` paginated API that returns the list of models supported by Codex.
This commit is contained in:
@@ -106,6 +106,13 @@ client_request_definitions! {
|
|||||||
params: ListConversationsParams,
|
params: ListConversationsParams,
|
||||||
response: ListConversationsResponse,
|
response: ListConversationsResponse,
|
||||||
},
|
},
|
||||||
|
#[serde(rename = "model/list")]
|
||||||
|
#[ts(rename = "model/list")]
|
||||||
|
/// List available Codex models along with display metadata.
|
||||||
|
ListModels {
|
||||||
|
params: ListModelsParams,
|
||||||
|
response: ListModelsResponse,
|
||||||
|
},
|
||||||
/// Resume a recorded Codex conversation from a rollout file.
|
/// Resume a recorded Codex conversation from a rollout file.
|
||||||
ResumeConversation {
|
ResumeConversation {
|
||||||
params: ResumeConversationParams,
|
params: ResumeConversationParams,
|
||||||
@@ -304,6 +311,47 @@ pub struct ListConversationsResponse {
|
|||||||
pub next_cursor: Option<String>,
|
pub next_cursor: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct ListModelsParams {
|
||||||
|
/// Optional page size; defaults to a reasonable server-side value.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub page_size: Option<usize>,
|
||||||
|
/// Opaque pagination cursor returned by a previous call.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub cursor: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct Model {
|
||||||
|
pub id: String,
|
||||||
|
pub model: String,
|
||||||
|
pub display_name: String,
|
||||||
|
pub description: String,
|
||||||
|
pub supported_reasoning_efforts: Vec<ReasoningEffortOption>,
|
||||||
|
pub default_reasoning_effort: ReasoningEffort,
|
||||||
|
// Only one model should be marked as default.
|
||||||
|
pub is_default: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct ReasoningEffortOption {
|
||||||
|
pub reasoning_effort: ReasoningEffort,
|
||||||
|
pub description: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct ListModelsResponse {
|
||||||
|
pub items: Vec<Model>,
|
||||||
|
/// Opaque cursor to pass to the next call to continue after the last item.
|
||||||
|
/// if None, there are no more items to return.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub next_cursor: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub struct ResumeConversationParams {
|
pub struct ResumeConversationParams {
|
||||||
@@ -994,4 +1042,21 @@ mod tests {
|
|||||||
);
|
);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn serialize_list_models() -> Result<()> {
|
||||||
|
let request = ClientRequest::ListModels {
|
||||||
|
request_id: RequestId::Integer(2),
|
||||||
|
params: ListModelsParams::default(),
|
||||||
|
};
|
||||||
|
assert_eq!(
|
||||||
|
json!({
|
||||||
|
"method": "model/list",
|
||||||
|
"id": 2,
|
||||||
|
"params": {}
|
||||||
|
}),
|
||||||
|
serde_json::to_value(&request)?,
|
||||||
|
);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
use crate::error_code::INTERNAL_ERROR_CODE;
|
use crate::error_code::INTERNAL_ERROR_CODE;
|
||||||
use crate::error_code::INVALID_REQUEST_ERROR_CODE;
|
use crate::error_code::INVALID_REQUEST_ERROR_CODE;
|
||||||
use crate::fuzzy_file_search::run_fuzzy_file_search;
|
use crate::fuzzy_file_search::run_fuzzy_file_search;
|
||||||
|
use crate::models::supported_models;
|
||||||
use crate::outgoing_message::OutgoingMessageSender;
|
use crate::outgoing_message::OutgoingMessageSender;
|
||||||
use crate::outgoing_message::OutgoingNotification;
|
use crate::outgoing_message::OutgoingNotification;
|
||||||
use codex_app_server_protocol::AddConversationListenerParams;
|
use codex_app_server_protocol::AddConversationListenerParams;
|
||||||
@@ -29,6 +30,8 @@ use codex_app_server_protocol::InterruptConversationResponse;
|
|||||||
use codex_app_server_protocol::JSONRPCErrorError;
|
use codex_app_server_protocol::JSONRPCErrorError;
|
||||||
use codex_app_server_protocol::ListConversationsParams;
|
use codex_app_server_protocol::ListConversationsParams;
|
||||||
use codex_app_server_protocol::ListConversationsResponse;
|
use codex_app_server_protocol::ListConversationsResponse;
|
||||||
|
use codex_app_server_protocol::ListModelsParams;
|
||||||
|
use codex_app_server_protocol::ListModelsResponse;
|
||||||
use codex_app_server_protocol::LoginApiKeyParams;
|
use codex_app_server_protocol::LoginApiKeyParams;
|
||||||
use codex_app_server_protocol::LoginApiKeyResponse;
|
use codex_app_server_protocol::LoginApiKeyResponse;
|
||||||
use codex_app_server_protocol::LoginChatGptCompleteNotification;
|
use codex_app_server_protocol::LoginChatGptCompleteNotification;
|
||||||
@@ -111,7 +114,6 @@ use uuid::Uuid;
|
|||||||
|
|
||||||
// Duration before a ChatGPT login attempt is abandoned.
|
// Duration before a ChatGPT login attempt is abandoned.
|
||||||
const LOGIN_CHATGPT_TIMEOUT: Duration = Duration::from_secs(10 * 60);
|
const LOGIN_CHATGPT_TIMEOUT: Duration = Duration::from_secs(10 * 60);
|
||||||
|
|
||||||
struct ActiveLogin {
|
struct ActiveLogin {
|
||||||
shutdown_handle: ShutdownHandle,
|
shutdown_handle: ShutdownHandle,
|
||||||
login_id: Uuid,
|
login_id: Uuid,
|
||||||
@@ -172,6 +174,9 @@ impl CodexMessageProcessor {
|
|||||||
ClientRequest::ListConversations { request_id, params } => {
|
ClientRequest::ListConversations { request_id, params } => {
|
||||||
self.handle_list_conversations(request_id, params).await;
|
self.handle_list_conversations(request_id, params).await;
|
||||||
}
|
}
|
||||||
|
ClientRequest::ListModels { request_id, params } => {
|
||||||
|
self.list_models(request_id, params).await;
|
||||||
|
}
|
||||||
ClientRequest::ResumeConversation { request_id, params } => {
|
ClientRequest::ResumeConversation { request_id, params } => {
|
||||||
self.handle_resume_conversation(request_id, params).await;
|
self.handle_resume_conversation(request_id, params).await;
|
||||||
}
|
}
|
||||||
@@ -831,6 +836,58 @@ impl CodexMessageProcessor {
|
|||||||
self.outgoing.send_response(request_id, response).await;
|
self.outgoing.send_response(request_id, response).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn list_models(&self, request_id: RequestId, params: ListModelsParams) {
|
||||||
|
let ListModelsParams { page_size, cursor } = params;
|
||||||
|
let models = supported_models();
|
||||||
|
let total = models.len();
|
||||||
|
|
||||||
|
if total == 0 {
|
||||||
|
let response = ListModelsResponse {
|
||||||
|
items: Vec::new(),
|
||||||
|
next_cursor: None,
|
||||||
|
};
|
||||||
|
self.outgoing.send_response(request_id, response).await;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let effective_page_size = page_size.unwrap_or(total).max(1).min(total);
|
||||||
|
let start = match cursor {
|
||||||
|
Some(cursor) => match cursor.parse::<usize>() {
|
||||||
|
Ok(idx) => idx,
|
||||||
|
Err(_) => {
|
||||||
|
let error = JSONRPCErrorError {
|
||||||
|
code: INVALID_REQUEST_ERROR_CODE,
|
||||||
|
message: format!("invalid cursor: {cursor}"),
|
||||||
|
data: None,
|
||||||
|
};
|
||||||
|
self.outgoing.send_error(request_id, error).await;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
None => 0,
|
||||||
|
};
|
||||||
|
|
||||||
|
if start > total {
|
||||||
|
let error = JSONRPCErrorError {
|
||||||
|
code: INVALID_REQUEST_ERROR_CODE,
|
||||||
|
message: format!("cursor {start} exceeds total models {total}"),
|
||||||
|
data: None,
|
||||||
|
};
|
||||||
|
self.outgoing.send_error(request_id, error).await;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let end = start.saturating_add(effective_page_size).min(total);
|
||||||
|
let items = models[start..end].to_vec();
|
||||||
|
let next_cursor = if end < total {
|
||||||
|
Some(end.to_string())
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
let response = ListModelsResponse { items, next_cursor };
|
||||||
|
self.outgoing.send_response(request_id, response).await;
|
||||||
|
}
|
||||||
|
|
||||||
async fn handle_resume_conversation(
|
async fn handle_resume_conversation(
|
||||||
&self,
|
&self,
|
||||||
request_id: RequestId,
|
request_id: RequestId,
|
||||||
|
|||||||
@@ -27,6 +27,7 @@ mod codex_message_processor;
|
|||||||
mod error_code;
|
mod error_code;
|
||||||
mod fuzzy_file_search;
|
mod fuzzy_file_search;
|
||||||
mod message_processor;
|
mod message_processor;
|
||||||
|
mod models;
|
||||||
mod outgoing_message;
|
mod outgoing_message;
|
||||||
|
|
||||||
/// Size of the bounded channels used to communicate between tasks. The value
|
/// Size of the bounded channels used to communicate between tasks. The value
|
||||||
|
|||||||
38
codex-rs/app-server/src/models.rs
Normal file
38
codex-rs/app-server/src/models.rs
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
use codex_app_server_protocol::Model;
|
||||||
|
use codex_app_server_protocol::ReasoningEffortOption;
|
||||||
|
use codex_common::model_presets::ModelPreset;
|
||||||
|
use codex_common::model_presets::ReasoningEffortPreset;
|
||||||
|
use codex_common::model_presets::builtin_model_presets;
|
||||||
|
|
||||||
|
pub fn supported_models() -> Vec<Model> {
|
||||||
|
builtin_model_presets(None)
|
||||||
|
.into_iter()
|
||||||
|
.map(model_from_preset)
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn model_from_preset(preset: ModelPreset) -> Model {
|
||||||
|
Model {
|
||||||
|
id: preset.id.to_string(),
|
||||||
|
model: preset.model.to_string(),
|
||||||
|
display_name: preset.display_name.to_string(),
|
||||||
|
description: preset.description.to_string(),
|
||||||
|
supported_reasoning_efforts: reasoning_efforts_from_preset(
|
||||||
|
preset.supported_reasoning_efforts,
|
||||||
|
),
|
||||||
|
default_reasoning_effort: preset.default_reasoning_effort,
|
||||||
|
is_default: preset.is_default,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn reasoning_efforts_from_preset(
|
||||||
|
efforts: &'static [ReasoningEffortPreset],
|
||||||
|
) -> Vec<ReasoningEffortOption> {
|
||||||
|
efforts
|
||||||
|
.iter()
|
||||||
|
.map(|preset| ReasoningEffortOption {
|
||||||
|
reasoning_effort: preset.effort,
|
||||||
|
description: preset.description.to_string(),
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
@@ -21,6 +21,7 @@ use codex_app_server_protocol::GetAuthStatusParams;
|
|||||||
use codex_app_server_protocol::InitializeParams;
|
use codex_app_server_protocol::InitializeParams;
|
||||||
use codex_app_server_protocol::InterruptConversationParams;
|
use codex_app_server_protocol::InterruptConversationParams;
|
||||||
use codex_app_server_protocol::ListConversationsParams;
|
use codex_app_server_protocol::ListConversationsParams;
|
||||||
|
use codex_app_server_protocol::ListModelsParams;
|
||||||
use codex_app_server_protocol::LoginApiKeyParams;
|
use codex_app_server_protocol::LoginApiKeyParams;
|
||||||
use codex_app_server_protocol::NewConversationParams;
|
use codex_app_server_protocol::NewConversationParams;
|
||||||
use codex_app_server_protocol::RemoveConversationListenerParams;
|
use codex_app_server_protocol::RemoveConversationListenerParams;
|
||||||
@@ -264,6 +265,15 @@ impl McpProcess {
|
|||||||
self.send_request("listConversations", params).await
|
self.send_request("listConversations", params).await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Send a `model/list` JSON-RPC request.
|
||||||
|
pub async fn send_list_models_request(
|
||||||
|
&mut self,
|
||||||
|
params: ListModelsParams,
|
||||||
|
) -> anyhow::Result<i64> {
|
||||||
|
let params = Some(serde_json::to_value(params)?);
|
||||||
|
self.send_request("model/list", params).await
|
||||||
|
}
|
||||||
|
|
||||||
/// Send a `resumeConversation` JSON-RPC request.
|
/// Send a `resumeConversation` JSON-RPC request.
|
||||||
pub async fn send_resume_conversation_request(
|
pub async fn send_resume_conversation_request(
|
||||||
&mut self,
|
&mut self,
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ mod fuzzy_file_search;
|
|||||||
mod interrupt;
|
mod interrupt;
|
||||||
mod list_resume;
|
mod list_resume;
|
||||||
mod login;
|
mod login;
|
||||||
|
mod model_list;
|
||||||
mod rate_limits;
|
mod rate_limits;
|
||||||
mod send_message;
|
mod send_message;
|
||||||
mod set_default_model;
|
mod set_default_model;
|
||||||
|
|||||||
183
codex-rs/app-server/tests/suite/model_list.rs
Normal file
183
codex-rs/app-server/tests/suite/model_list.rs
Normal file
@@ -0,0 +1,183 @@
|
|||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use anyhow::Result;
|
||||||
|
use anyhow::anyhow;
|
||||||
|
use app_test_support::McpProcess;
|
||||||
|
use app_test_support::to_response;
|
||||||
|
use codex_app_server_protocol::JSONRPCError;
|
||||||
|
use codex_app_server_protocol::JSONRPCResponse;
|
||||||
|
use codex_app_server_protocol::ListModelsParams;
|
||||||
|
use codex_app_server_protocol::ListModelsResponse;
|
||||||
|
use codex_app_server_protocol::Model;
|
||||||
|
use codex_app_server_protocol::ReasoningEffortOption;
|
||||||
|
use codex_app_server_protocol::RequestId;
|
||||||
|
use codex_protocol::config_types::ReasoningEffort;
|
||||||
|
use pretty_assertions::assert_eq;
|
||||||
|
use tempfile::TempDir;
|
||||||
|
use tokio::time::timeout;
|
||||||
|
|
||||||
|
const DEFAULT_TIMEOUT: Duration = Duration::from_secs(10);
|
||||||
|
const INVALID_REQUEST_ERROR_CODE: i64 = -32600;
|
||||||
|
|
||||||
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
|
async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
|
||||||
|
let codex_home = TempDir::new()?;
|
||||||
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||||
|
|
||||||
|
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
|
let request_id = mcp
|
||||||
|
.send_list_models_request(ListModelsParams {
|
||||||
|
page_size: Some(100),
|
||||||
|
cursor: None,
|
||||||
|
})
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let response: JSONRPCResponse = timeout(
|
||||||
|
DEFAULT_TIMEOUT,
|
||||||
|
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
|
||||||
|
)
|
||||||
|
.await??;
|
||||||
|
|
||||||
|
let ListModelsResponse { items, next_cursor } = to_response::<ListModelsResponse>(response)?;
|
||||||
|
|
||||||
|
let expected_models = vec![
|
||||||
|
Model {
|
||||||
|
id: "gpt-5-codex".to_string(),
|
||||||
|
model: "gpt-5-codex".to_string(),
|
||||||
|
display_name: "gpt-5-codex".to_string(),
|
||||||
|
description: "Optimized for coding tasks with many tools.".to_string(),
|
||||||
|
supported_reasoning_efforts: vec![
|
||||||
|
ReasoningEffortOption {
|
||||||
|
reasoning_effort: ReasoningEffort::Low,
|
||||||
|
description: "Fastest responses with limited reasoning".to_string(),
|
||||||
|
},
|
||||||
|
ReasoningEffortOption {
|
||||||
|
reasoning_effort: ReasoningEffort::Medium,
|
||||||
|
description: "Dynamically adjusts reasoning based on the task".to_string(),
|
||||||
|
},
|
||||||
|
ReasoningEffortOption {
|
||||||
|
reasoning_effort: ReasoningEffort::High,
|
||||||
|
description: "Maximizes reasoning depth for complex or ambiguous problems"
|
||||||
|
.to_string(),
|
||||||
|
},
|
||||||
|
],
|
||||||
|
default_reasoning_effort: ReasoningEffort::Medium,
|
||||||
|
is_default: true,
|
||||||
|
},
|
||||||
|
Model {
|
||||||
|
id: "gpt-5".to_string(),
|
||||||
|
model: "gpt-5".to_string(),
|
||||||
|
display_name: "gpt-5".to_string(),
|
||||||
|
description: "Broad world knowledge with strong general reasoning.".to_string(),
|
||||||
|
supported_reasoning_efforts: vec![
|
||||||
|
ReasoningEffortOption {
|
||||||
|
reasoning_effort: ReasoningEffort::Minimal,
|
||||||
|
description: "Fastest responses with little reasoning".to_string(),
|
||||||
|
},
|
||||||
|
ReasoningEffortOption {
|
||||||
|
reasoning_effort: ReasoningEffort::Low,
|
||||||
|
description: "Balances speed with some reasoning; useful for straightforward \
|
||||||
|
queries and short explanations"
|
||||||
|
.to_string(),
|
||||||
|
},
|
||||||
|
ReasoningEffortOption {
|
||||||
|
reasoning_effort: ReasoningEffort::Medium,
|
||||||
|
description: "Provides a solid balance of reasoning depth and latency for \
|
||||||
|
general-purpose tasks"
|
||||||
|
.to_string(),
|
||||||
|
},
|
||||||
|
ReasoningEffortOption {
|
||||||
|
reasoning_effort: ReasoningEffort::High,
|
||||||
|
description: "Maximizes reasoning depth for complex or ambiguous problems"
|
||||||
|
.to_string(),
|
||||||
|
},
|
||||||
|
],
|
||||||
|
default_reasoning_effort: ReasoningEffort::Medium,
|
||||||
|
is_default: false,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
assert_eq!(items, expected_models);
|
||||||
|
assert!(next_cursor.is_none());
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
|
async fn list_models_pagination_works() -> Result<()> {
|
||||||
|
let codex_home = TempDir::new()?;
|
||||||
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||||
|
|
||||||
|
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
|
let first_request = mcp
|
||||||
|
.send_list_models_request(ListModelsParams {
|
||||||
|
page_size: Some(1),
|
||||||
|
cursor: None,
|
||||||
|
})
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let first_response: JSONRPCResponse = timeout(
|
||||||
|
DEFAULT_TIMEOUT,
|
||||||
|
mcp.read_stream_until_response_message(RequestId::Integer(first_request)),
|
||||||
|
)
|
||||||
|
.await??;
|
||||||
|
|
||||||
|
let ListModelsResponse {
|
||||||
|
items: first_items,
|
||||||
|
next_cursor: first_cursor,
|
||||||
|
} = to_response::<ListModelsResponse>(first_response)?;
|
||||||
|
|
||||||
|
assert_eq!(first_items.len(), 1);
|
||||||
|
assert_eq!(first_items[0].id, "gpt-5-codex");
|
||||||
|
let next_cursor = first_cursor.ok_or_else(|| anyhow!("cursor for second page"))?;
|
||||||
|
|
||||||
|
let second_request = mcp
|
||||||
|
.send_list_models_request(ListModelsParams {
|
||||||
|
page_size: Some(1),
|
||||||
|
cursor: Some(next_cursor.clone()),
|
||||||
|
})
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let second_response: JSONRPCResponse = timeout(
|
||||||
|
DEFAULT_TIMEOUT,
|
||||||
|
mcp.read_stream_until_response_message(RequestId::Integer(second_request)),
|
||||||
|
)
|
||||||
|
.await??;
|
||||||
|
|
||||||
|
let ListModelsResponse {
|
||||||
|
items: second_items,
|
||||||
|
next_cursor: second_cursor,
|
||||||
|
} = to_response::<ListModelsResponse>(second_response)?;
|
||||||
|
|
||||||
|
assert_eq!(second_items.len(), 1);
|
||||||
|
assert_eq!(second_items[0].id, "gpt-5");
|
||||||
|
assert!(second_cursor.is_none());
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
|
async fn list_models_rejects_invalid_cursor() -> Result<()> {
|
||||||
|
let codex_home = TempDir::new()?;
|
||||||
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||||
|
|
||||||
|
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
|
let request_id = mcp
|
||||||
|
.send_list_models_request(ListModelsParams {
|
||||||
|
page_size: None,
|
||||||
|
cursor: Some("invalid".to_string()),
|
||||||
|
})
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let error: JSONRPCError = timeout(
|
||||||
|
DEFAULT_TIMEOUT,
|
||||||
|
mcp.read_stream_until_error_message(RequestId::Integer(request_id)),
|
||||||
|
)
|
||||||
|
.await??;
|
||||||
|
|
||||||
|
assert_eq!(error.id, RequestId::Integer(request_id));
|
||||||
|
assert_eq!(error.error.code, INVALID_REQUEST_ERROR_CODE);
|
||||||
|
assert_eq!(error.error.message, "invalid cursor: invalid");
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
@@ -1,73 +1,96 @@
|
|||||||
use codex_app_server_protocol::AuthMode;
|
use codex_app_server_protocol::AuthMode;
|
||||||
use codex_core::protocol_config_types::ReasoningEffort;
|
use codex_core::protocol_config_types::ReasoningEffort;
|
||||||
|
|
||||||
/// A simple preset pairing a model slug with a reasoning effort.
|
/// A reasoning effort option that can be surfaced for a model.
|
||||||
|
#[derive(Debug, Clone, Copy)]
|
||||||
|
pub struct ReasoningEffortPreset {
|
||||||
|
/// Effort level that the model supports.
|
||||||
|
pub effort: ReasoningEffort,
|
||||||
|
/// Short human description shown next to the effort in UIs.
|
||||||
|
pub description: &'static str,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Metadata describing a Codex-supported model.
|
||||||
#[derive(Debug, Clone, Copy)]
|
#[derive(Debug, Clone, Copy)]
|
||||||
pub struct ModelPreset {
|
pub struct ModelPreset {
|
||||||
/// Stable identifier for the preset.
|
/// Stable identifier for the preset.
|
||||||
pub id: &'static str,
|
pub id: &'static str,
|
||||||
/// Display label shown in UIs.
|
|
||||||
pub label: &'static str,
|
|
||||||
/// Short human description shown next to the label in UIs.
|
|
||||||
pub description: &'static str,
|
|
||||||
/// Model slug (e.g., "gpt-5").
|
/// Model slug (e.g., "gpt-5").
|
||||||
pub model: &'static str,
|
pub model: &'static str,
|
||||||
/// Reasoning effort to apply for this preset.
|
/// Display name shown in UIs.
|
||||||
pub effort: Option<ReasoningEffort>,
|
pub display_name: &'static str,
|
||||||
|
/// Short human description shown in UIs.
|
||||||
|
pub description: &'static str,
|
||||||
|
/// Reasoning effort applied when none is explicitly chosen.
|
||||||
|
pub default_reasoning_effort: ReasoningEffort,
|
||||||
|
/// Supported reasoning effort options.
|
||||||
|
pub supported_reasoning_efforts: &'static [ReasoningEffortPreset],
|
||||||
|
/// Whether this is the default model for new users.
|
||||||
|
pub is_default: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
const PRESETS: &[ModelPreset] = &[
|
const PRESETS: &[ModelPreset] = &[
|
||||||
ModelPreset {
|
ModelPreset {
|
||||||
id: "gpt-5-codex-low",
|
id: "gpt-5-codex",
|
||||||
label: "gpt-5-codex low",
|
|
||||||
description: "Fastest responses with limited reasoning",
|
|
||||||
model: "gpt-5-codex",
|
model: "gpt-5-codex",
|
||||||
effort: Some(ReasoningEffort::Low),
|
display_name: "gpt-5-codex",
|
||||||
|
description: "Optimized for coding tasks with many tools.",
|
||||||
|
default_reasoning_effort: ReasoningEffort::Medium,
|
||||||
|
supported_reasoning_efforts: &[
|
||||||
|
ReasoningEffortPreset {
|
||||||
|
effort: ReasoningEffort::Low,
|
||||||
|
description: "Fastest responses with limited reasoning",
|
||||||
|
},
|
||||||
|
ReasoningEffortPreset {
|
||||||
|
effort: ReasoningEffort::Medium,
|
||||||
|
description: "Dynamically adjusts reasoning based on the task",
|
||||||
|
},
|
||||||
|
ReasoningEffortPreset {
|
||||||
|
effort: ReasoningEffort::High,
|
||||||
|
description: "Maximizes reasoning depth for complex or ambiguous problems",
|
||||||
|
},
|
||||||
|
],
|
||||||
|
is_default: true,
|
||||||
},
|
},
|
||||||
ModelPreset {
|
ModelPreset {
|
||||||
id: "gpt-5-codex-medium",
|
id: "gpt-5",
|
||||||
label: "gpt-5-codex medium",
|
|
||||||
description: "Dynamically adjusts reasoning based on the task",
|
|
||||||
model: "gpt-5-codex",
|
|
||||||
effort: Some(ReasoningEffort::Medium),
|
|
||||||
},
|
|
||||||
ModelPreset {
|
|
||||||
id: "gpt-5-codex-high",
|
|
||||||
label: "gpt-5-codex high",
|
|
||||||
description: "Maximizes reasoning depth for complex or ambiguous problems",
|
|
||||||
model: "gpt-5-codex",
|
|
||||||
effort: Some(ReasoningEffort::High),
|
|
||||||
},
|
|
||||||
ModelPreset {
|
|
||||||
id: "gpt-5-minimal",
|
|
||||||
label: "gpt-5 minimal",
|
|
||||||
description: "Fastest responses with little reasoning",
|
|
||||||
model: "gpt-5",
|
model: "gpt-5",
|
||||||
effort: Some(ReasoningEffort::Minimal),
|
display_name: "gpt-5",
|
||||||
},
|
description: "Broad world knowledge with strong general reasoning.",
|
||||||
ModelPreset {
|
default_reasoning_effort: ReasoningEffort::Medium,
|
||||||
id: "gpt-5-low",
|
supported_reasoning_efforts: &[
|
||||||
label: "gpt-5 low",
|
ReasoningEffortPreset {
|
||||||
description: "Balances speed with some reasoning; useful for straightforward queries and short explanations",
|
effort: ReasoningEffort::Minimal,
|
||||||
model: "gpt-5",
|
description: "Fastest responses with little reasoning",
|
||||||
effort: Some(ReasoningEffort::Low),
|
},
|
||||||
},
|
ReasoningEffortPreset {
|
||||||
ModelPreset {
|
effort: ReasoningEffort::Low,
|
||||||
id: "gpt-5-medium",
|
description: "Balances speed with some reasoning; useful for straightforward queries and short explanations",
|
||||||
label: "gpt-5 medium",
|
},
|
||||||
description: "Provides a solid balance of reasoning depth and latency for general-purpose tasks",
|
ReasoningEffortPreset {
|
||||||
model: "gpt-5",
|
effort: ReasoningEffort::Medium,
|
||||||
effort: Some(ReasoningEffort::Medium),
|
description: "Provides a solid balance of reasoning depth and latency for general-purpose tasks",
|
||||||
},
|
},
|
||||||
ModelPreset {
|
ReasoningEffortPreset {
|
||||||
id: "gpt-5-high",
|
effort: ReasoningEffort::High,
|
||||||
label: "gpt-5 high",
|
description: "Maximizes reasoning depth for complex or ambiguous problems",
|
||||||
description: "Maximizes reasoning depth for complex or ambiguous problems",
|
},
|
||||||
model: "gpt-5",
|
],
|
||||||
effort: Some(ReasoningEffort::High),
|
is_default: false,
|
||||||
},
|
},
|
||||||
];
|
];
|
||||||
|
|
||||||
pub fn builtin_model_presets(_auth_mode: Option<AuthMode>) -> Vec<ModelPreset> {
|
pub fn builtin_model_presets(_auth_mode: Option<AuthMode>) -> Vec<ModelPreset> {
|
||||||
PRESETS.to_vec()
|
PRESETS.to_vec()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn only_one_default_model_is_configured() {
|
||||||
|
let default_models = PRESETS.iter().filter(|preset| preset.is_default).count();
|
||||||
|
assert!(default_models == 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -19,6 +19,7 @@ At a glance:
|
|||||||
- `listConversations`, `resumeConversation`, `archiveConversation`
|
- `listConversations`, `resumeConversation`, `archiveConversation`
|
||||||
- Configuration and info
|
- Configuration and info
|
||||||
- `getUserSavedConfig`, `setDefaultModel`, `getUserAgent`, `userInfo`
|
- `getUserSavedConfig`, `setDefaultModel`, `getUserAgent`, `userInfo`
|
||||||
|
- `model/list` → enumerate available models and reasoning options
|
||||||
- Auth
|
- Auth
|
||||||
- `loginApiKey`, `loginChatGpt`, `cancelLoginChatGpt`, `logoutChatGpt`, `getAuthStatus`
|
- `loginApiKey`, `loginChatGpt`, `cancelLoginChatGpt`, `logoutChatGpt`, `getAuthStatus`
|
||||||
- Utilities
|
- Utilities
|
||||||
@@ -73,6 +74,24 @@ Interrupt a running turn: `interruptConversation`.
|
|||||||
|
|
||||||
List/resume/archive: `listConversations`, `resumeConversation`, `archiveConversation`.
|
List/resume/archive: `listConversations`, `resumeConversation`, `archiveConversation`.
|
||||||
|
|
||||||
|
## Models
|
||||||
|
|
||||||
|
Fetch the catalog of models available in the current Codex build with `model/list`. The request accepts optional pagination inputs:
|
||||||
|
|
||||||
|
- `pageSize` – number of models to return (defaults to a server-selected value)
|
||||||
|
- `cursor` – opaque string from the previous response’s `nextCursor`
|
||||||
|
|
||||||
|
Each response yields:
|
||||||
|
|
||||||
|
- `items` – ordered list of models. A model includes:
|
||||||
|
- `id`, `model`, `displayName`, `description`
|
||||||
|
- `supportedReasoningEfforts` – array of objects with:
|
||||||
|
- `reasoningEffort` – one of `minimal|low|medium|high`
|
||||||
|
- `description` – human-friendly label for the effort
|
||||||
|
- `defaultReasoningEffort` – suggested effort for the UI
|
||||||
|
- `isDefault` – whether the model is recommended for most users
|
||||||
|
- `nextCursor` – pass into the next request to continue paging (optional)
|
||||||
|
|
||||||
## Event stream
|
## Event stream
|
||||||
|
|
||||||
While a conversation runs, the server sends notifications:
|
While a conversation runs, the server sends notifications:
|
||||||
|
|||||||
@@ -354,8 +354,8 @@ impl App {
|
|||||||
self.config.model_family = family;
|
self.config.model_family = family;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
AppEvent::OpenReasoningPopup { model, presets } => {
|
AppEvent::OpenReasoningPopup { model } => {
|
||||||
self.chat_widget.open_reasoning_popup(model, presets);
|
self.chat_widget.open_reasoning_popup(model);
|
||||||
}
|
}
|
||||||
AppEvent::OpenFullAccessConfirmation { preset } => {
|
AppEvent::OpenFullAccessConfirmation { preset } => {
|
||||||
self.chat_widget.open_full_access_confirmation(preset);
|
self.chat_widget.open_full_access_confirmation(preset);
|
||||||
|
|||||||
@@ -64,8 +64,7 @@ pub(crate) enum AppEvent {
|
|||||||
|
|
||||||
/// Open the reasoning selection popup after picking a model.
|
/// Open the reasoning selection popup after picking a model.
|
||||||
OpenReasoningPopup {
|
OpenReasoningPopup {
|
||||||
model: String,
|
model: ModelPreset,
|
||||||
presets: Vec<ModelPreset>,
|
|
||||||
},
|
},
|
||||||
|
|
||||||
/// Open the confirmation prompt before enabling full access mode.
|
/// Open the confirmation prompt before enabling full access mode.
|
||||||
|
|||||||
@@ -302,16 +302,6 @@ fn create_initial_user_message(text: String, image_paths: Vec<PathBuf>) -> Optio
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl ChatWidget {
|
impl ChatWidget {
|
||||||
fn model_description_for(slug: &str) -> Option<&'static str> {
|
|
||||||
if slug.starts_with("gpt-5-codex") {
|
|
||||||
Some("Optimized for coding tasks with many tools.")
|
|
||||||
} else if slug.starts_with("gpt-5") {
|
|
||||||
Some("Broad world knowledge with strong general reasoning.")
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn flush_answer_stream_with_separator(&mut self) {
|
fn flush_answer_stream_with_separator(&mut self) {
|
||||||
if let Some(mut controller) = self.stream_controller.take()
|
if let Some(mut controller) = self.stream_controller.take()
|
||||||
&& let Some(cell) = controller.finalize()
|
&& let Some(cell) = controller.finalize()
|
||||||
@@ -1661,39 +1651,22 @@ impl ChatWidget {
|
|||||||
let auth_mode = self.auth_manager.auth().map(|auth| auth.mode);
|
let auth_mode = self.auth_manager.auth().map(|auth| auth.mode);
|
||||||
let presets: Vec<ModelPreset> = builtin_model_presets(auth_mode);
|
let presets: Vec<ModelPreset> = builtin_model_presets(auth_mode);
|
||||||
|
|
||||||
let mut grouped: Vec<(&str, Vec<ModelPreset>)> = Vec::new();
|
|
||||||
for preset in presets.into_iter() {
|
|
||||||
if let Some((_, entries)) = grouped.iter_mut().find(|(model, _)| *model == preset.model)
|
|
||||||
{
|
|
||||||
entries.push(preset);
|
|
||||||
} else {
|
|
||||||
grouped.push((preset.model, vec![preset]));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut items: Vec<SelectionItem> = Vec::new();
|
let mut items: Vec<SelectionItem> = Vec::new();
|
||||||
for (model_slug, entries) in grouped.into_iter() {
|
for preset in presets.into_iter() {
|
||||||
let name = model_slug.to_string();
|
let description = if preset.description.is_empty() {
|
||||||
let description = Self::model_description_for(model_slug)
|
None
|
||||||
.map(std::string::ToString::to_string)
|
} else {
|
||||||
.or_else(|| {
|
Some(preset.description.to_string())
|
||||||
entries
|
};
|
||||||
.iter()
|
let is_current = preset.model == current_model;
|
||||||
.find(|preset| !preset.description.is_empty())
|
let preset_for_action = preset;
|
||||||
.map(|preset| preset.description.to_string())
|
|
||||||
})
|
|
||||||
.or_else(|| entries.first().map(|preset| preset.description.to_string()));
|
|
||||||
let is_current = model_slug == current_model;
|
|
||||||
let model_slug_string = model_slug.to_string();
|
|
||||||
let presets_for_model = entries.clone();
|
|
||||||
let actions: Vec<SelectionAction> = vec![Box::new(move |tx| {
|
let actions: Vec<SelectionAction> = vec![Box::new(move |tx| {
|
||||||
tx.send(AppEvent::OpenReasoningPopup {
|
tx.send(AppEvent::OpenReasoningPopup {
|
||||||
model: model_slug_string.clone(),
|
model: preset_for_action,
|
||||||
presets: presets_for_model.clone(),
|
|
||||||
});
|
});
|
||||||
})];
|
})];
|
||||||
items.push(SelectionItem {
|
items.push(SelectionItem {
|
||||||
name,
|
name: preset.display_name.to_string(),
|
||||||
description,
|
description,
|
||||||
is_current,
|
is_current,
|
||||||
actions,
|
actions,
|
||||||
@@ -1712,28 +1685,22 @@ impl ChatWidget {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Open a popup to choose the reasoning effort (stage 2) for the given model.
|
/// Open a popup to choose the reasoning effort (stage 2) for the given model.
|
||||||
pub(crate) fn open_reasoning_popup(&mut self, model_slug: String, presets: Vec<ModelPreset>) {
|
pub(crate) fn open_reasoning_popup(&mut self, preset: ModelPreset) {
|
||||||
let default_effort = ReasoningEffortConfig::default();
|
let default_effort: ReasoningEffortConfig = preset.default_reasoning_effort;
|
||||||
|
let supported = preset.supported_reasoning_efforts;
|
||||||
|
|
||||||
let has_none_choice = presets.iter().any(|preset| preset.effort.is_none());
|
|
||||||
struct EffortChoice {
|
struct EffortChoice {
|
||||||
stored: Option<ReasoningEffortConfig>,
|
stored: Option<ReasoningEffortConfig>,
|
||||||
display: ReasoningEffortConfig,
|
display: ReasoningEffortConfig,
|
||||||
}
|
}
|
||||||
let mut choices: Vec<EffortChoice> = Vec::new();
|
let mut choices: Vec<EffortChoice> = Vec::new();
|
||||||
for effort in ReasoningEffortConfig::iter() {
|
for effort in ReasoningEffortConfig::iter() {
|
||||||
if presets.iter().any(|preset| preset.effort == Some(effort)) {
|
if supported.iter().any(|option| option.effort == effort) {
|
||||||
choices.push(EffortChoice {
|
choices.push(EffortChoice {
|
||||||
stored: Some(effort),
|
stored: Some(effort),
|
||||||
display: effort,
|
display: effort,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
if has_none_choice && default_effort == effort {
|
|
||||||
choices.push(EffortChoice {
|
|
||||||
stored: None,
|
|
||||||
display: effort,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if choices.is_empty() {
|
if choices.is_empty() {
|
||||||
choices.push(EffortChoice {
|
choices.push(EffortChoice {
|
||||||
@@ -1742,21 +1709,16 @@ impl ChatWidget {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
let default_choice: Option<ReasoningEffortConfig> = if has_none_choice {
|
let default_choice: Option<ReasoningEffortConfig> = choices
|
||||||
None
|
|
||||||
} else if choices
|
|
||||||
.iter()
|
.iter()
|
||||||
.any(|choice| choice.stored == Some(default_effort))
|
.any(|choice| choice.stored == Some(default_effort))
|
||||||
{
|
.then_some(Some(default_effort))
|
||||||
Some(default_effort)
|
.flatten()
|
||||||
} else {
|
.or_else(|| choices.iter().find_map(|choice| choice.stored))
|
||||||
choices
|
.or(Some(default_effort));
|
||||||
.iter()
|
|
||||||
.find_map(|choice| choice.stored)
|
|
||||||
.or(Some(default_effort))
|
|
||||||
};
|
|
||||||
|
|
||||||
let is_current_model = self.config.model == model_slug;
|
let model_slug = preset.model.to_string();
|
||||||
|
let is_current_model = self.config.model == preset.model;
|
||||||
let highlight_choice = if is_current_model {
|
let highlight_choice = if is_current_model {
|
||||||
self.config.model_reasoning_effort
|
self.config.model_reasoning_effort
|
||||||
} else {
|
} else {
|
||||||
@@ -1773,19 +1735,19 @@ impl ChatWidget {
|
|||||||
effort_label.push_str(" (default)");
|
effort_label.push_str(" (default)");
|
||||||
}
|
}
|
||||||
|
|
||||||
let description = presets
|
let description = choice
|
||||||
.iter()
|
.stored
|
||||||
.find(|preset| preset.effort == choice.stored && !preset.description.is_empty())
|
.and_then(|effort| {
|
||||||
.map(|preset| preset.description.to_string())
|
supported
|
||||||
.or_else(|| {
|
|
||||||
presets
|
|
||||||
.iter()
|
.iter()
|
||||||
.find(|preset| preset.effort == choice.stored)
|
.find(|option| option.effort == effort)
|
||||||
.map(|preset| preset.description.to_string())
|
.map(|option| option.description.to_string())
|
||||||
});
|
})
|
||||||
|
.filter(|text| !text.is_empty());
|
||||||
|
|
||||||
let warning = "⚠ High reasoning effort can quickly consume Plus plan rate limits.";
|
let warning = "⚠ High reasoning effort can quickly consume Plus plan rate limits.";
|
||||||
let show_warning = model_slug == "gpt-5-codex" && effort == ReasoningEffortConfig::High;
|
let show_warning =
|
||||||
|
preset.model == "gpt-5-codex" && effort == ReasoningEffortConfig::High;
|
||||||
let selected_description = show_warning.then(|| {
|
let selected_description = show_warning.then(|| {
|
||||||
description
|
description
|
||||||
.as_ref()
|
.as_ref()
|
||||||
|
|||||||
@@ -1122,11 +1122,11 @@ fn model_reasoning_selection_popup_snapshot() {
|
|||||||
chat.config.model = "gpt-5-codex".to_string();
|
chat.config.model = "gpt-5-codex".to_string();
|
||||||
chat.config.model_reasoning_effort = Some(ReasoningEffortConfig::High);
|
chat.config.model_reasoning_effort = Some(ReasoningEffortConfig::High);
|
||||||
|
|
||||||
let presets = builtin_model_presets(None)
|
let preset = builtin_model_presets(None)
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.filter(|preset| preset.model == "gpt-5-codex")
|
.find(|preset| preset.model == "gpt-5-codex")
|
||||||
.collect::<Vec<_>>();
|
.expect("gpt-5-codex preset");
|
||||||
chat.open_reasoning_popup("gpt-5-codex".to_string(), presets);
|
chat.open_reasoning_popup(preset);
|
||||||
|
|
||||||
let popup = render_bottom_popup(&chat, 80);
|
let popup = render_bottom_popup(&chat, 80);
|
||||||
assert_snapshot!("model_reasoning_selection_popup", popup);
|
assert_snapshot!("model_reasoning_selection_popup", popup);
|
||||||
@@ -1141,9 +1141,9 @@ fn reasoning_popup_escape_returns_to_model_popup() {
|
|||||||
|
|
||||||
let presets = builtin_model_presets(None)
|
let presets = builtin_model_presets(None)
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.filter(|preset| preset.model == "gpt-5-codex")
|
.find(|preset| preset.model == "gpt-5-codex")
|
||||||
.collect::<Vec<_>>();
|
.expect("gpt-5-codex preset");
|
||||||
chat.open_reasoning_popup("gpt-5-codex".to_string(), presets);
|
chat.open_reasoning_popup(presets);
|
||||||
|
|
||||||
let before_escape = render_bottom_popup(&chat, 80);
|
let before_escape = render_bottom_popup(&chat, 80);
|
||||||
assert!(before_escape.contains("Select Reasoning Level"));
|
assert!(before_escape.contains("Select Reasoning Level"));
|
||||||
|
|||||||
Reference in New Issue
Block a user