diff --git a/codex-rs/core/src/client.rs b/codex-rs/core/src/client.rs index 0f7f51c2..b9ea6b13 100644 --- a/codex-rs/core/src/client.rs +++ b/codex-rs/core/src/client.rs @@ -207,7 +207,14 @@ impl ModelClient { } } - let req_builder = self.provider.apply_http_headers(req_builder); + req_builder = self.provider.apply_http_headers(req_builder); + + let originator = self + .config + .internal_originator + .as_deref() + .unwrap_or("codex_cli_rs"); + req_builder = req_builder.header("originator", originator); let res = req_builder.send().await; if let Ok(resp) = &res { diff --git a/codex-rs/core/src/config.rs b/codex-rs/core/src/config.rs index a65ec096..1b8da894 100644 --- a/codex-rs/core/src/config.rs +++ b/codex-rs/core/src/config.rs @@ -146,6 +146,9 @@ pub struct Config { /// Include an experimental plan tool that the model can use to update its current plan and status of each step. pub include_plan_tool: bool, + + /// The value for the `originator` header included with Responses API requests. + pub internal_originator: Option, } impl Config { @@ -336,6 +339,9 @@ pub struct ConfigToml { /// Experimental path to a file whose contents replace the built-in BASE_INSTRUCTIONS. pub experimental_instructions_file: Option, + + /// The value for the `originator` header included with Responses API requests. + pub internal_originator: Option, } impl ConfigToml { @@ -529,6 +535,7 @@ impl Config { experimental_resume, include_plan_tool: include_plan_tool.unwrap_or(false), + internal_originator: cfg.internal_originator, }; Ok(config) } @@ -887,6 +894,7 @@ disable_response_storage = true experimental_resume: None, base_instructions: None, include_plan_tool: false, + internal_originator: None, }, o3_profile_config ); @@ -936,6 +944,7 @@ disable_response_storage = true experimental_resume: None, base_instructions: None, include_plan_tool: false, + internal_originator: None, }; assert_eq!(expected_gpt3_profile_config, gpt3_profile_config); @@ -1000,6 +1009,7 @@ disable_response_storage = true experimental_resume: None, base_instructions: None, include_plan_tool: false, + internal_originator: None, }; assert_eq!(expected_zdr_profile_config, zdr_profile_config); diff --git a/codex-rs/core/src/model_provider_info.rs b/codex-rs/core/src/model_provider_info.rs index 4640f53a..29366377 100644 --- a/codex-rs/core/src/model_provider_info.rs +++ b/codex-rs/core/src/model_provider_info.rs @@ -12,10 +12,6 @@ use std::env::VarError; use std::time::Duration; use crate::error::EnvVarError; - -/// Value for the `OpenAI-Originator` header that is sent with requests to -/// OpenAI. -const OPENAI_ORIGINATOR_HEADER: &str = "codex_cli_rs"; const DEFAULT_STREAM_IDLE_TIMEOUT_MS: u64 = 300_000; const DEFAULT_STREAM_MAX_RETRIES: u64 = 10; const DEFAULT_REQUEST_MAX_RETRIES: u64 = 4; @@ -229,15 +225,9 @@ pub fn built_in_model_providers() -> HashMap { wire_api: WireApi::Responses, query_params: None, http_headers: Some( - [ - ( - "originator".to_string(), - OPENAI_ORIGINATOR_HEADER.to_string(), - ), - ("version".to_string(), env!("CARGO_PKG_VERSION").to_string()), - ] - .into_iter() - .collect(), + [("version".to_string(), env!("CARGO_PKG_VERSION").to_string())] + .into_iter() + .collect(), ), env_http_headers: Some( [ diff --git a/codex-rs/core/tests/client.rs b/codex-rs/core/tests/client.rs index bfa29657..a22a9438 100644 --- a/codex-rs/core/tests/client.rs +++ b/codex-rs/core/tests/client.rs @@ -95,8 +95,8 @@ async fn includes_session_id_and_model_headers_in_request() { // get request from the server let request = &server.received_requests().await.unwrap()[0]; let request_session_id = request.headers.get("session_id").unwrap(); - let request_originator = request.headers.get("originator").unwrap(); let request_authorization = request.headers.get("authorization").unwrap(); + let request_originator = request.headers.get("originator").unwrap(); assert!(current_session_id.is_some()); assert_eq!( @@ -170,6 +170,59 @@ async fn includes_base_instructions_override_in_request() { ); } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn originator_config_override_is_used() { + #![allow(clippy::unwrap_used)] + + // Mock server + let server = MockServer::start().await; + + let first = ResponseTemplate::new(200) + .insert_header("content-type", "text/event-stream") + .set_body_raw(sse_completed("resp1"), "text/event-stream"); + + Mock::given(method("POST")) + .and(path("/v1/responses")) + .respond_with(first) + .expect(1) + .mount(&server) + .await; + + let model_provider = ModelProviderInfo { + base_url: Some(format!("{}/v1", server.uri())), + ..built_in_model_providers()["openai"].clone() + }; + + let codex_home = TempDir::new().unwrap(); + let mut config = load_default_config_for_test(&codex_home); + config.model_provider = model_provider; + config.internal_originator = Some("my_override".to_string()); + + let ctrl_c = std::sync::Arc::new(tokio::sync::Notify::new()); + let CodexSpawnOk { codex, .. } = Codex::spawn( + config, + Some(CodexAuth::from_api_key("Test API Key".to_string())), + ctrl_c.clone(), + ) + .await + .unwrap(); + + codex + .submit(Op::UserInput { + items: vec![InputItem::Text { + text: "hello".into(), + }], + }) + .await + .unwrap(); + + wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; + + let request = &server.received_requests().await.unwrap()[0]; + let request_originator = request.headers.get("originator").unwrap(); + assert_eq!(request_originator.to_str().unwrap(), "my_override"); +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn chatgpt_auth_sends_correct_request() { #![allow(clippy::unwrap_used)] @@ -235,8 +288,8 @@ async fn chatgpt_auth_sends_correct_request() { // get request from the server let request = &server.received_requests().await.unwrap()[0]; let request_session_id = request.headers.get("session_id").unwrap(); - let request_originator = request.headers.get("originator").unwrap(); let request_authorization = request.headers.get("authorization").unwrap(); + let request_originator = request.headers.get("originator").unwrap(); let request_chatgpt_account_id = request.headers.get("chatgpt-account-id").unwrap(); let request_body = request.body_json::().unwrap();