From 97f995a749a2cd8b4edf68d59edf6d18f4363825 Mon Sep 17 00:00:00 2001 From: Ahmed Ibrahim Date: Mon, 18 Aug 2025 20:22:48 -0700 Subject: [PATCH] Show login options when not signed in with ChatGPT (#2440) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Motivation: we have users who uses their API key although they want to use ChatGPT account. We want to give them the chance to always login with their account. This PR displays login options when the user is not signed in with ChatGPT. Even if you have set an OpenAI API key as an environment variable, you will still be prompted to log in with ChatGPT. We’ve also added a new flag, `always_use_api_key_signing` false by default, which ensures you are never asked to log in with ChatGPT and always defaults to using your API key. https://github.com/user-attachments/assets/b61ebfa9-3c5e-4ab7-bf94-395c23a0e0af After ChatGPT sign in: https://github.com/user-attachments/assets/d58b366b-c46a-428f-a22f-2ac230f991c0 --- README.md | 30 +++ codex-rs/chatgpt/src/chatgpt_token.rs | 3 +- codex-rs/cli/src/login.rs | 2 +- codex-rs/core/src/config.rs | 11 + codex-rs/core/src/conversation_manager.rs | 2 +- codex-rs/core/tests/client.rs | 204 ++++++++++++++++++ codex-rs/login/src/lib.rs | 42 +++- codex-rs/login/src/token_data.rs | 8 +- codex-rs/tui/src/app.rs | 87 +++++++- codex-rs/tui/src/lib.rs | 19 +- codex-rs/tui/src/onboarding/auth.rs | 54 ++++- .../tui/src/onboarding/onboarding_screen.rs | 11 +- 12 files changed, 440 insertions(+), 33 deletions(-) diff --git a/README.md b/README.md index 596362a3..f7428dc3 100644 --- a/README.md +++ b/README.md @@ -22,6 +22,7 @@ - [Authenticate locally and copy your credentials to the "headless" machine](#authenticate-locally-and-copy-your-credentials-to-the-headless-machine) - [Connecting through VPS or remote](#connecting-through-vps-or-remote) - [Usage-based billing alternative: Use an OpenAI API key](#usage-based-billing-alternative-use-an-openai-api-key) + - [Forcing a specific auth method (advanced)](#forcing-a-specific-auth-method-advanced) - [Choosing Codex's level of autonomy](#choosing-codexs-level-of-autonomy) - [**1. Read/write**](#1-readwrite) - [**2. Read-only**](#2-read-only) @@ -165,6 +166,35 @@ Notes: - This command only sets the key for your current terminal session, which we recommend. To set it for all future sessions, you can also add the `export` line to your shell's configuration file (e.g., `~/.zshrc`). - If you have signed in with ChatGPT, Codex will default to using your ChatGPT credits. If you wish to use your API key, use the `/logout` command to clear your ChatGPT authentication. +#### Forcing a specific auth method (advanced) + +You can explicitly choose which authentication Codex should prefer when both are available. + +- To always use your API key (even when ChatGPT auth exists), set: + +```toml +# ~/.codex/config.toml +preferred_auth_method = "apikey" +``` + +Or override ad-hoc via CLI: + +```bash +codex --config preferred_auth_method="apikey" +``` + +- To prefer ChatGPT auth (default), set: + +```toml +# ~/.codex/config.toml +preferred_auth_method = "chatgpt" +``` + +Notes: + +- When `preferred_auth_method = "apikey"` and an API key is available, the login screen is skipped. +- When `preferred_auth_method = "chatgpt"` (default), Codex prefers ChatGPT auth if present; if only an API key is present, it will use the API key. Certain account types may also require API-key mode. + ### Choosing Codex's level of autonomy We always recommend running Codex in its default sandbox that gives you strong guardrails around what the agent can do. The default sandbox prevents it from editing files outside its workspace, or from accessing the network. diff --git a/codex-rs/chatgpt/src/chatgpt_token.rs b/codex-rs/chatgpt/src/chatgpt_token.rs index c674afbc..f003c439 100644 --- a/codex-rs/chatgpt/src/chatgpt_token.rs +++ b/codex-rs/chatgpt/src/chatgpt_token.rs @@ -1,3 +1,4 @@ +use codex_login::AuthMode; use codex_login::CodexAuth; use std::path::Path; use std::sync::LazyLock; @@ -19,7 +20,7 @@ pub fn set_chatgpt_token_data(value: TokenData) { /// Initialize the ChatGPT token from auth.json file pub async fn init_chatgpt_token_from_auth(codex_home: &Path) -> std::io::Result<()> { - let auth = CodexAuth::from_codex_home(codex_home)?; + let auth = CodexAuth::from_codex_home(codex_home, AuthMode::ChatGPT)?; if let Some(auth) = auth { let token_data = auth.get_token_data().await?; set_chatgpt_token_data(token_data); diff --git a/codex-rs/cli/src/login.rs b/codex-rs/cli/src/login.rs index 36bbf220..72eb7b4f 100644 --- a/codex-rs/cli/src/login.rs +++ b/codex-rs/cli/src/login.rs @@ -60,7 +60,7 @@ pub async fn run_login_with_api_key( pub async fn run_login_status(cli_config_overrides: CliConfigOverrides) -> ! { let config = load_config_or_exit(cli_config_overrides); - match CodexAuth::from_codex_home(&config.codex_home) { + match CodexAuth::from_codex_home(&config.codex_home, config.preferred_auth_method) { Ok(Some(auth)) => match auth.mode { AuthMode::ApiKey => match auth.get_token().await { Ok(api_key) => { diff --git a/codex-rs/core/src/config.rs b/codex-rs/core/src/config.rs index 9c0c3c1c..f20912b2 100644 --- a/codex-rs/core/src/config.rs +++ b/codex-rs/core/src/config.rs @@ -13,6 +13,7 @@ use crate::model_provider_info::built_in_model_providers; use crate::openai_model_info::get_model_info; use crate::protocol::AskForApproval; use crate::protocol::SandboxPolicy; +use codex_login::AuthMode; use codex_protocol::config_types::ReasoningEffort; use codex_protocol::config_types::ReasoningSummary; use codex_protocol::config_types::SandboxMode; @@ -163,6 +164,9 @@ pub struct Config { /// The value for the `originator` header included with Responses API requests. pub internal_originator: Option, + + /// If set to `true`, the API key will be signed with the `originator` header. + pub preferred_auth_method: AuthMode, } impl Config { @@ -409,6 +413,9 @@ pub struct ConfigToml { pub internal_originator: Option, pub projects: Option>, + + /// If set to `true`, the API key will be signed with the `originator` header. + pub preferred_auth_method: Option, } #[derive(Deserialize, Debug, Clone, PartialEq, Eq)] @@ -672,6 +679,7 @@ impl Config { include_plan_tool: include_plan_tool.unwrap_or(false), include_apply_patch_tool: include_apply_patch_tool_val, internal_originator: cfg.internal_originator, + preferred_auth_method: cfg.preferred_auth_method.unwrap_or(AuthMode::ChatGPT), }; Ok(config) } @@ -1036,6 +1044,7 @@ disable_response_storage = true include_plan_tool: false, include_apply_patch_tool: false, internal_originator: None, + preferred_auth_method: AuthMode::ChatGPT, }, o3_profile_config ); @@ -1088,6 +1097,7 @@ disable_response_storage = true include_plan_tool: false, include_apply_patch_tool: false, internal_originator: None, + preferred_auth_method: AuthMode::ChatGPT, }; assert_eq!(expected_gpt3_profile_config, gpt3_profile_config); @@ -1155,6 +1165,7 @@ disable_response_storage = true include_plan_tool: false, include_apply_patch_tool: false, internal_originator: None, + preferred_auth_method: AuthMode::ChatGPT, }; assert_eq!(expected_zdr_profile_config, zdr_profile_config); diff --git a/codex-rs/core/src/conversation_manager.rs b/codex-rs/core/src/conversation_manager.rs index 48ccdddf..2dc69be4 100644 --- a/codex-rs/core/src/conversation_manager.rs +++ b/codex-rs/core/src/conversation_manager.rs @@ -40,7 +40,7 @@ impl Default for ConversationManager { impl ConversationManager { pub async fn new_conversation(&self, config: Config) -> CodexResult { - let auth = CodexAuth::from_codex_home(&config.codex_home)?; + let auth = CodexAuth::from_codex_home(&config.codex_home, config.preferred_auth_method)?; self.new_conversation_with_auth(config, auth).await } diff --git a/codex-rs/core/tests/client.rs b/codex-rs/core/tests/client.rs index 2f7977ee..075c496a 100644 --- a/codex-rs/core/tests/client.rs +++ b/codex-rs/core/tests/client.rs @@ -7,6 +7,7 @@ use codex_core::protocol::EventMsg; use codex_core::protocol::InputItem; use codex_core::protocol::Op; use codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR; +use codex_login::AuthMode; use codex_login::CodexAuth; use core_test_support::load_default_config_for_test; use core_test_support::load_sse_fixture_with_id; @@ -54,6 +55,59 @@ fn assert_message_ends_with(request_body: &serde_json::Value, text: &str) { ); } +/// Writes an `auth.json` into the provided `codex_home` with the specified parameters. +/// Returns the fake JWT string written to `tokens.id_token`. +#[expect(clippy::unwrap_used)] +fn write_auth_json( + codex_home: &TempDir, + openai_api_key: Option<&str>, + chatgpt_plan_type: &str, + access_token: &str, + account_id: Option<&str>, +) -> String { + use base64::Engine as _; + use serde_json::json; + + let header = json!({ "alg": "none", "typ": "JWT" }); + let payload = json!({ + "email": "user@example.com", + "https://api.openai.com/auth": { + "chatgpt_plan_type": chatgpt_plan_type, + "chatgpt_account_id": account_id.unwrap_or("acc-123") + } + }); + + let b64 = |b: &[u8]| base64::engine::general_purpose::URL_SAFE_NO_PAD.encode(b); + let header_b64 = b64(&serde_json::to_vec(&header).unwrap()); + let payload_b64 = b64(&serde_json::to_vec(&payload).unwrap()); + let signature_b64 = b64(b"sig"); + let fake_jwt = format!("{header_b64}.{payload_b64}.{signature_b64}"); + + let mut tokens = json!({ + "id_token": fake_jwt, + "access_token": access_token, + "refresh_token": "refresh-test", + }); + if let Some(acc) = account_id { + tokens["account_id"] = json!(acc); + } + + let auth_json = json!({ + "OPENAI_API_KEY": openai_api_key, + "tokens": tokens, + // RFC3339 datetime; value doesn't matter for these tests + "last_refresh": "2025-08-06T20:41:36.232376Z", + }); + + std::fs::write( + codex_home.path().join("auth.json"), + serde_json::to_string_pretty(&auth_json).unwrap(), + ) + .unwrap(); + + fake_jwt +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn includes_session_id_and_model_headers_in_request() { if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() { @@ -311,6 +365,156 @@ async fn chatgpt_auth_sends_correct_request() { ); } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn prefers_chatgpt_token_when_config_prefers_chatgpt() { + if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() { + println!( + "Skipping test because it cannot execute when network is disabled in a Codex sandbox." + ); + return; + } + + // Mock server + let server = MockServer::start().await; + + let first = ResponseTemplate::new(200) + .insert_header("content-type", "text/event-stream") + .set_body_raw(sse_completed("resp1"), "text/event-stream"); + + // Expect ChatGPT base path and correct headers + Mock::given(method("POST")) + .and(path("/v1/responses")) + .and(header_regex("Authorization", r"Bearer Access-123")) + .and(header_regex("chatgpt-account-id", r"acc-123")) + .respond_with(first) + .expect(1) + .mount(&server) + .await; + + let model_provider = ModelProviderInfo { + base_url: Some(format!("{}/v1", server.uri())), + ..built_in_model_providers()["openai"].clone() + }; + + // Init session + let codex_home = TempDir::new().unwrap(); + // Write auth.json that contains both API key and ChatGPT tokens for a plan that should prefer ChatGPT. + let _jwt = write_auth_json( + &codex_home, + Some("sk-test-key"), + "pro", + "Access-123", + Some("acc-123"), + ); + + let mut config = load_default_config_for_test(&codex_home); + config.model_provider = model_provider; + config.preferred_auth_method = AuthMode::ChatGPT; + + let conversation_manager = ConversationManager::default(); + let NewConversation { + conversation: codex, + .. + } = conversation_manager + .new_conversation(config) + .await + .expect("create new conversation"); + + codex + .submit(Op::UserInput { + items: vec![InputItem::Text { + text: "hello".into(), + }], + }) + .await + .unwrap(); + + wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; + + // verify request body flags + let request = &server.received_requests().await.unwrap()[0]; + let request_body = request.body_json::().unwrap(); + assert!( + !request_body["store"].as_bool().unwrap(), + "store should be false for ChatGPT auth" + ); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn prefers_apikey_when_config_prefers_apikey_even_with_chatgpt_tokens() { + if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() { + println!( + "Skipping test because it cannot execute when network is disabled in a Codex sandbox." + ); + return; + } + + // Mock server + let server = MockServer::start().await; + + let first = ResponseTemplate::new(200) + .insert_header("content-type", "text/event-stream") + .set_body_raw(sse_completed("resp1"), "text/event-stream"); + + // Expect API key header, no ChatGPT account header required. + Mock::given(method("POST")) + .and(path("/v1/responses")) + .and(header_regex("Authorization", r"Bearer sk-test-key")) + .respond_with(first) + .expect(1) + .mount(&server) + .await; + + let model_provider = ModelProviderInfo { + base_url: Some(format!("{}/v1", server.uri())), + ..built_in_model_providers()["openai"].clone() + }; + + // Init session + let codex_home = TempDir::new().unwrap(); + // Write auth.json that contains both API key and ChatGPT tokens for a plan that should prefer ChatGPT, + // but config will force API key preference. + let _jwt = write_auth_json( + &codex_home, + Some("sk-test-key"), + "pro", + "Access-123", + Some("acc-123"), + ); + + let mut config = load_default_config_for_test(&codex_home); + config.model_provider = model_provider; + config.preferred_auth_method = AuthMode::ApiKey; + + let conversation_manager = ConversationManager::default(); + let NewConversation { + conversation: codex, + .. + } = conversation_manager + .new_conversation(config) + .await + .expect("create new conversation"); + + codex + .submit(Op::UserInput { + items: vec![InputItem::Text { + text: "hello".into(), + }], + }) + .await + .unwrap(); + + wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; + + // verify request body flags + let request = &server.received_requests().await.unwrap()[0]; + let request_body = request.body_json::().unwrap(); + assert!( + request_body["store"].as_bool().unwrap(), + "store should be true for API key auth" + ); +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn includes_user_instructions_message_in_request() { let server = MockServer::start().await; diff --git a/codex-rs/login/src/lib.rs b/codex-rs/login/src/lib.rs index 80fc0e82..327daa7e 100644 --- a/codex-rs/login/src/lib.rs +++ b/codex-rs/login/src/lib.rs @@ -30,7 +30,8 @@ mod token_data; pub const CLIENT_ID: &str = "app_EMoamEEZ73f0CkXaXp7hrann"; pub const OPENAI_API_KEY_ENV_VAR: &str = "OPENAI_API_KEY"; -#[derive(Clone, Debug, PartialEq, Copy)] +#[derive(Clone, Debug, PartialEq, Copy, Eq, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] pub enum AuthMode { ApiKey, ChatGPT, @@ -63,8 +64,11 @@ impl CodexAuth { /// Loads the available auth information from the auth.json or /// OPENAI_API_KEY environment variable. - pub fn from_codex_home(codex_home: &Path) -> std::io::Result> { - load_auth(codex_home, true) + pub fn from_codex_home( + codex_home: &Path, + preferred_auth_method: AuthMode, + ) -> std::io::Result> { + load_auth(codex_home, true, preferred_auth_method) } pub async fn get_token_data(&self) -> Result { @@ -165,7 +169,11 @@ impl CodexAuth { } } -fn load_auth(codex_home: &Path, include_env_var: bool) -> std::io::Result> { +fn load_auth( + codex_home: &Path, + include_env_var: bool, + preferred_auth_method: AuthMode, +) -> std::io::Result> { // First, check to see if there is a valid auth.json file. If not, we fall // back to AuthMode::ApiKey using the OPENAI_API_KEY environment variable // (if it is set). @@ -201,7 +209,7 @@ fn load_auth(codex_home: &Path, include_env_var: bool) -> std::io::Result