Add the following fields to Thread:
```
pub preview: String,
pub model_provider: String,
pub created_at: i64,
```
Will prob need another PR once this lands:
https://github.com/openai/codex/pull/6337
91 lines
2.9 KiB
Rust
91 lines
2.9 KiB
Rust
use anyhow::Result;
|
|
use app_test_support::McpProcess;
|
|
use app_test_support::create_mock_chat_completions_server;
|
|
use app_test_support::to_response;
|
|
use codex_app_server_protocol::JSONRPCNotification;
|
|
use codex_app_server_protocol::JSONRPCResponse;
|
|
use codex_app_server_protocol::RequestId;
|
|
use codex_app_server_protocol::ThreadStartParams;
|
|
use codex_app_server_protocol::ThreadStartResponse;
|
|
use codex_app_server_protocol::ThreadStartedNotification;
|
|
use std::path::Path;
|
|
use tempfile::TempDir;
|
|
use tokio::time::timeout;
|
|
|
|
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
|
|
|
|
#[tokio::test]
|
|
async fn thread_start_creates_thread_and_emits_started() -> Result<()> {
|
|
// Provide a mock server and config so model wiring is valid.
|
|
let server = create_mock_chat_completions_server(vec![]).await;
|
|
|
|
let codex_home = TempDir::new()?;
|
|
create_config_toml(codex_home.path(), &server.uri())?;
|
|
|
|
// Start server and initialize.
|
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
|
|
|
// Start a v2 thread with an explicit model override.
|
|
let req_id = mcp
|
|
.send_thread_start_request(ThreadStartParams {
|
|
model: Some("gpt-5".to_string()),
|
|
..Default::default()
|
|
})
|
|
.await?;
|
|
|
|
// Expect a proper JSON-RPC response with a thread id.
|
|
let resp: JSONRPCResponse = timeout(
|
|
DEFAULT_READ_TIMEOUT,
|
|
mcp.read_stream_until_response_message(RequestId::Integer(req_id)),
|
|
)
|
|
.await??;
|
|
let ThreadStartResponse { thread } = to_response::<ThreadStartResponse>(resp)?;
|
|
assert!(!thread.id.is_empty(), "thread id should not be empty");
|
|
assert!(
|
|
thread.preview.is_empty(),
|
|
"new threads should start with an empty preview"
|
|
);
|
|
assert_eq!(thread.model_provider, "mock_provider");
|
|
assert!(
|
|
thread.created_at > 0,
|
|
"created_at should be a positive UNIX timestamp"
|
|
);
|
|
|
|
// A corresponding thread/started notification should arrive.
|
|
let notif: JSONRPCNotification = timeout(
|
|
DEFAULT_READ_TIMEOUT,
|
|
mcp.read_stream_until_notification_message("thread/started"),
|
|
)
|
|
.await??;
|
|
let started: ThreadStartedNotification =
|
|
serde_json::from_value(notif.params.expect("params must be present"))?;
|
|
assert_eq!(started.thread, thread);
|
|
|
|
Ok(())
|
|
}
|
|
|
|
// Helper to create a config.toml pointing at the mock model server.
|
|
fn create_config_toml(codex_home: &Path, server_uri: &str) -> std::io::Result<()> {
|
|
let config_toml = codex_home.join("config.toml");
|
|
std::fs::write(
|
|
config_toml,
|
|
format!(
|
|
r#"
|
|
model = "mock-model"
|
|
approval_policy = "never"
|
|
sandbox_mode = "read-only"
|
|
|
|
model_provider = "mock_provider"
|
|
|
|
[model_providers.mock_provider]
|
|
name = "Mock provider for test"
|
|
base_url = "{server_uri}/v1"
|
|
wire_api = "chat"
|
|
request_max_retries = 0
|
|
stream_max_retries = 0
|
|
"#
|
|
),
|
|
)
|
|
}
|