This is a substantial PR to add support for the chat completions API, which in turn makes it possible to use non-OpenAI model providers (just like in the TypeScript CLI): * It moves a number of structs from `client.rs` to `client_common.rs` so they can be shared. * It introduces support for the chat completions API in `chat_completions.rs`. * It updates `ModelProviderInfo` so that `env_key` is `Option<String>` instead of `String` (for e.g., ollama) and adds a `wire_api` field * It updates `client.rs` to choose between `stream_responses()` and `stream_chat_completions()` based on the `wire_api` for the `ModelProviderInfo` * It updates the `exec` and TUI CLIs to no longer fail if the `OPENAI_API_KEY` environment variable is not set * It updates the TUI so that `EventMsg::Error` is displayed more prominently when it occurs, particularly now that it is important to alert users to the `CodexErr::EnvVar` variant. * `CodexErr::EnvVar` was updated to include an optional `instructions` field so we can preserve the behavior where we direct users to https://platform.openai.com if `OPENAI_API_KEY` is not set. * Cleaned up the "welcome message" in the TUI to ensure the model provider is displayed. * Updated the docs in `codex-rs/README.md`. To exercise the chat completions API from OpenAI models, I added the following to my `config.toml`: ```toml model = "gpt-4o" model_provider = "openai-chat-completions" [model_providers.openai-chat-completions] name = "OpenAI using Chat Completions" base_url = "https://api.openai.com/v1" env_key = "OPENAI_API_KEY" wire_api = "chat" ``` Though to test a non-OpenAI provider, I installed ollama with mistral locally on my Mac because ChatGPT said that would be a good match for my hardware: ```shell brew install ollama ollama serve ollama pull mistral ``` Then I added the following to my `~/.codex/config.toml`: ```toml model = "mistral" model_provider = "ollama" ``` Note this code could certainly use more test coverage, but I want to get this in so folks can start playing with it. For reference, I believe https://github.com/openai/codex/pull/247 was roughly the comparable PR on the TypeScript side.
115 lines
3.7 KiB
Rust
115 lines
3.7 KiB
Rust
//! Verifies that the agent retries when the SSE stream terminates before
|
|
//! delivering a `response.completed` event.
|
|
|
|
use std::time::Duration;
|
|
|
|
use codex_core::Codex;
|
|
use codex_core::ModelProviderInfo;
|
|
use codex_core::config::Config;
|
|
use codex_core::protocol::InputItem;
|
|
use codex_core::protocol::Op;
|
|
use tokio::time::timeout;
|
|
use wiremock::Mock;
|
|
use wiremock::MockServer;
|
|
use wiremock::Request;
|
|
use wiremock::Respond;
|
|
use wiremock::ResponseTemplate;
|
|
use wiremock::matchers::method;
|
|
use wiremock::matchers::path;
|
|
|
|
fn sse_incomplete() -> String {
|
|
// Only a single line; missing the completed event.
|
|
"event: response.output_item.done\n\n".to_string()
|
|
}
|
|
|
|
fn sse_completed(id: &str) -> String {
|
|
format!(
|
|
"event: response.completed\n\
|
|
data: {{\"type\":\"response.completed\",\"response\":{{\"id\":\"{}\",\"output\":[]}}}}\n\n\n",
|
|
id
|
|
)
|
|
}
|
|
|
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
|
async fn retries_on_early_close() {
|
|
#![allow(clippy::unwrap_used)]
|
|
|
|
let server = MockServer::start().await;
|
|
|
|
struct SeqResponder;
|
|
impl Respond for SeqResponder {
|
|
fn respond(&self, _: &Request) -> ResponseTemplate {
|
|
use std::sync::atomic::AtomicUsize;
|
|
use std::sync::atomic::Ordering;
|
|
static CALLS: AtomicUsize = AtomicUsize::new(0);
|
|
let n = CALLS.fetch_add(1, Ordering::SeqCst);
|
|
if n == 0 {
|
|
ResponseTemplate::new(200)
|
|
.insert_header("content-type", "text/event-stream")
|
|
.set_body_raw(sse_incomplete(), "text/event-stream")
|
|
} else {
|
|
ResponseTemplate::new(200)
|
|
.insert_header("content-type", "text/event-stream")
|
|
.set_body_raw(sse_completed("resp_ok"), "text/event-stream")
|
|
}
|
|
}
|
|
}
|
|
|
|
Mock::given(method("POST"))
|
|
.and(path("/v1/responses"))
|
|
.respond_with(SeqResponder {})
|
|
.expect(2)
|
|
.mount(&server)
|
|
.await;
|
|
|
|
// Environment
|
|
//
|
|
// As of Rust 2024 `std::env::set_var` has been made `unsafe` because
|
|
// mutating the process environment is inherently racy when other threads
|
|
// are running. We therefore have to wrap every call in an explicit
|
|
// `unsafe` block. These are limited to the test-setup section so the
|
|
// scope is very small and clearly delineated.
|
|
|
|
unsafe {
|
|
std::env::set_var("OPENAI_REQUEST_MAX_RETRIES", "0");
|
|
std::env::set_var("OPENAI_STREAM_MAX_RETRIES", "1");
|
|
std::env::set_var("OPENAI_STREAM_IDLE_TIMEOUT_MS", "2000");
|
|
}
|
|
|
|
let model_provider = ModelProviderInfo {
|
|
name: "openai".into(),
|
|
base_url: format!("{}/v1", server.uri()),
|
|
// Environment variable that should exist in the test environment.
|
|
// ModelClient will return an error if the environment variable for the
|
|
// provider is not set.
|
|
env_key: Some("PATH".into()),
|
|
env_key_instructions: None,
|
|
wire_api: codex_core::WireApi::Responses,
|
|
};
|
|
|
|
let ctrl_c = std::sync::Arc::new(tokio::sync::Notify::new());
|
|
let mut config = Config::load_default_config_for_test();
|
|
config.model_provider = model_provider;
|
|
let (codex, _init_id) = Codex::spawn(config, ctrl_c).await.unwrap();
|
|
|
|
codex
|
|
.submit(Op::UserInput {
|
|
items: vec![InputItem::Text {
|
|
text: "hello".into(),
|
|
}],
|
|
})
|
|
.await
|
|
.unwrap();
|
|
|
|
// Wait until TaskComplete (should succeed after retry).
|
|
loop {
|
|
let ev = timeout(Duration::from_secs(10), codex.next_event())
|
|
.await
|
|
.unwrap()
|
|
.unwrap();
|
|
if matches!(ev.msg, codex_core::protocol::EventMsg::TaskComplete) {
|
|
break;
|
|
}
|
|
}
|
|
}
|