From 515b6331bd1fcaf8dea55645c5e866ad7bc14b16 Mon Sep 17 00:00:00 2001 From: Michael Bolin Date: Wed, 4 Jun 2025 08:44:17 -0700 Subject: [PATCH] feat: add support for login with ChatGPT (#1212) This does not implement the full Login with ChatGPT experience, but it should unblock people. **What works** * The `codex` multitool now has a `login` subcommand, so you can run `codex login`, which should write `CODEX_HOME/auth.json` if you complete the flow successfully. The TUI will now read the `OPENAI_API_KEY` from `auth.json`. * The TUI should refresh the token if it has expired and the necessary information is in `auth.json`. * There is a `LoginScreen` in the TUI that tells you to run `codex login` if both (1) your model provider expects to use `OPENAI_API_KEY` as its env var, and (2) `OPENAI_API_KEY` is not set. **What does not work** * The `LoginScreen` does not support the login flow from within the TUI. Instead, it tells you to quit, run `codex login`, and then run `codex` again. * `codex exec` does read from `auth.json` yet, nor does it direct the user to go through the login flow if `OPENAI_API_KEY` is not be found. * The `maybeRedeemCredits()` function from `get-api-key.tsx` has not been ported from TypeScript to `login_with_chatgpt.py` yet: https://github.com/openai/codex/blob/a67a67f3258fc21e147b6786a143fe3e15e6d5ba/codex-cli/src/utils/get-api-key.tsx#L84-L89 **Implementation** Currently, the OAuth flow requires running a local webserver on `127.0.0.1:1455`. It seemed wasteful to incur the additional binary cost of a webserver dependency in the Rust CLI just to support login, so instead we implement this logic in Python, as Python has a `http.server` module as part of its standard library. Specifically, we bundle the contents of a single Python file as a string in the Rust CLI and then use it to spawn a subprocess as `python3 -c {{SOURCE_FOR_PYTHON_SERVER}}`. As such, the most significant files in this PR are: ``` codex-rs/login/src/login_with_chatgpt.py codex-rs/login/src/lib.rs ``` Now that the CLI may load `OPENAI_API_KEY` from the environment _or_ `CODEX_HOME/auth.json`, we need a new abstraction for reading/writing this variable, so we introduce: ``` codex-rs/core/src/openai_api_key.rs ``` Note that `std::env::set_var()` is [rightfully] `unsafe` in Rust 2024, so we use a LazyLock>> to store `OPENAI_API_KEY` so it is read in a thread-safe manner. Ultimately, it should be possible to go through the entire login flow from the TUI. This PR introduces a placeholder `LoginScreen` UI for that right now, though the new `codex login` subcommand introduced in this PR should be a viable workaround until the UI is ready. **Testing** Because the login flow is currently implemented in a standalone Python file, you can test it without building any Rust code as follows: ``` rm -rf /tmp/codex_home && mkdir /tmp/codex_home CODEX_HOME=/tmp/codex_home python3 codex-rs/login/src/login_with_chatgpt.py ``` For reference: * the original TypeScript implementation was introduced in https://github.com/openai/codex/pull/963 * support for redeeming credits was later added in https://github.com/openai/codex/pull/974 --- codex-cli/src/utils/get-api-key.tsx | 2 + codex-rs/Cargo.lock | 14 + codex-rs/Cargo.toml | 1 + codex-rs/cli/Cargo.toml | 1 + codex-rs/cli/src/lib.rs | 1 + codex-rs/cli/src/login.rs | 35 ++ codex-rs/cli/src/main.rs | 13 +- codex-rs/core/Cargo.toml | 1 + codex-rs/core/src/lib.rs | 1 + codex-rs/core/src/model_provider_info.rs | 34 +- codex-rs/core/src/openai_api_key.rs | 24 + codex-rs/login/Cargo.toml | 20 + codex-rs/login/src/lib.rs | 168 ++++++ codex-rs/login/src/login_with_chatgpt.py | 624 +++++++++++++++++++++++ codex-rs/tui/Cargo.toml | 1 + codex-rs/tui/src/app.rs | 34 +- codex-rs/tui/src/lib.rs | 56 +- codex-rs/tui/src/login_screen.rs | 46 ++ 18 files changed, 1051 insertions(+), 25 deletions(-) create mode 100644 codex-rs/cli/src/login.rs create mode 100644 codex-rs/core/src/openai_api_key.rs create mode 100644 codex-rs/login/Cargo.toml create mode 100644 codex-rs/login/src/lib.rs create mode 100644 codex-rs/login/src/login_with_chatgpt.py create mode 100644 codex-rs/tui/src/login_screen.rs diff --git a/codex-cli/src/utils/get-api-key.tsx b/codex-cli/src/utils/get-api-key.tsx index 4817e396..520f92ef 100644 --- a/codex-cli/src/utils/get-api-key.tsx +++ b/codex-cli/src/utils/get-api-key.tsx @@ -382,6 +382,8 @@ async function handleCallback( const exchanged = (await exchangeRes.json()) as { access_token: string; + // NOTE(mbolin): I did not see the "key" property set in practice. Note + // this property is not read by the code. key: string; }; diff --git a/codex-rs/Cargo.lock b/codex-rs/Cargo.lock index ae574ba3..66b4fa3e 100644 --- a/codex-rs/Cargo.lock +++ b/codex-rs/Cargo.lock @@ -585,6 +585,7 @@ dependencies = [ "codex-core", "codex-exec", "codex-linux-sandbox", + "codex-login", "codex-mcp-server", "codex-tui", "serde_json", @@ -613,6 +614,7 @@ dependencies = [ "base64 0.21.7", "bytes", "codex-apply-patch", + "codex-login", "codex-mcp-client", "dirs", "env-flags", @@ -704,6 +706,17 @@ dependencies = [ "tokio", ] +[[package]] +name = "codex-login" +version = "0.0.0" +dependencies = [ + "chrono", + "reqwest", + "serde", + "serde_json", + "tokio", +] + [[package]] name = "codex-mcp-client" version = "0.0.0" @@ -747,6 +760,7 @@ dependencies = [ "codex-common", "codex-core", "codex-linux-sandbox", + "codex-login", "color-eyre", "crossterm", "image", diff --git a/codex-rs/Cargo.toml b/codex-rs/Cargo.toml index 5af55f45..6991a622 100644 --- a/codex-rs/Cargo.toml +++ b/codex-rs/Cargo.toml @@ -9,6 +9,7 @@ members = [ "exec", "execpolicy", "linux-sandbox", + "login", "mcp-client", "mcp-server", "mcp-types", diff --git a/codex-rs/cli/Cargo.toml b/codex-rs/cli/Cargo.toml index a1474d8e..78fd08a7 100644 --- a/codex-rs/cli/Cargo.toml +++ b/codex-rs/cli/Cargo.toml @@ -20,6 +20,7 @@ clap = { version = "4", features = ["derive"] } codex-core = { path = "../core" } codex-common = { path = "../common", features = ["cli"] } codex-exec = { path = "../exec" } +codex-login = { path = "../login" } codex-linux-sandbox = { path = "../linux-sandbox" } codex-mcp-server = { path = "../mcp-server" } codex-tui = { path = "../tui" } diff --git a/codex-rs/cli/src/lib.rs b/codex-rs/cli/src/lib.rs index 0730a919..fa78d18a 100644 --- a/codex-rs/cli/src/lib.rs +++ b/codex-rs/cli/src/lib.rs @@ -1,5 +1,6 @@ pub mod debug_sandbox; mod exit_status; +pub mod login; pub mod proto; use clap::Parser; diff --git a/codex-rs/cli/src/login.rs b/codex-rs/cli/src/login.rs new file mode 100644 index 00000000..af3fb667 --- /dev/null +++ b/codex-rs/cli/src/login.rs @@ -0,0 +1,35 @@ +use codex_common::CliConfigOverrides; +use codex_core::config::Config; +use codex_core::config::ConfigOverrides; +use codex_login::login_with_chatgpt; + +pub async fn run_login_with_chatgpt(cli_config_overrides: CliConfigOverrides) -> ! { + let cli_overrides = match cli_config_overrides.parse_overrides() { + Ok(v) => v, + Err(e) => { + eprintln!("Error parsing -c overrides: {e}"); + std::process::exit(1); + } + }; + + let config_overrides = ConfigOverrides::default(); + let config = match Config::load_with_cli_overrides(cli_overrides, config_overrides) { + Ok(config) => config, + Err(e) => { + eprintln!("Error loading configuration: {e}"); + std::process::exit(1); + } + }; + + let capture_output = false; + match login_with_chatgpt(&config.codex_home, capture_output).await { + Ok(_) => { + eprintln!("Successfully logged in"); + std::process::exit(0); + } + Err(e) => { + eprintln!("Error logging in: {e}"); + std::process::exit(1); + } + } +} diff --git a/codex-rs/cli/src/main.rs b/codex-rs/cli/src/main.rs index 1c362d2a..0e9ba018 100644 --- a/codex-rs/cli/src/main.rs +++ b/codex-rs/cli/src/main.rs @@ -1,6 +1,7 @@ use clap::Parser; use codex_cli::LandlockCommand; use codex_cli::SeatbeltCommand; +use codex_cli::login::run_login_with_chatgpt; use codex_cli::proto; use codex_common::CliConfigOverrides; use codex_exec::Cli as ExecCli; @@ -36,6 +37,9 @@ enum Subcommand { #[clap(visible_alias = "e")] Exec(ExecCli), + /// Login with ChatGPT. + Login(LoginCommand), + /// Experimental: run Codex as an MCP server. Mcp, @@ -63,7 +67,10 @@ enum DebugCommand { } #[derive(Debug, Parser)] -struct ReplProto {} +struct LoginCommand { + #[clap(skip)] + config_overrides: CliConfigOverrides, +} fn main() -> anyhow::Result<()> { codex_linux_sandbox::run_with_sandbox(|codex_linux_sandbox_exe| async move { @@ -88,6 +95,10 @@ async fn cli_main(codex_linux_sandbox_exe: Option) -> anyhow::Result<() Some(Subcommand::Mcp) => { codex_mcp_server::run_main(codex_linux_sandbox_exe).await?; } + Some(Subcommand::Login(mut login_cli)) => { + prepend_config_flags(&mut login_cli.config_overrides, cli.config_overrides); + run_login_with_chatgpt(login_cli.config_overrides).await; + } Some(Subcommand::Proto(mut proto_cli)) => { prepend_config_flags(&mut proto_cli.config_overrides, cli.config_overrides); proto::run_main(proto_cli).await?; diff --git a/codex-rs/core/Cargo.toml b/codex-rs/core/Cargo.toml index 4739ef31..38f84461 100644 --- a/codex-rs/core/Cargo.toml +++ b/codex-rs/core/Cargo.toml @@ -16,6 +16,7 @@ async-channel = "2.3.1" base64 = "0.21" bytes = "1.10.1" codex-apply-patch = { path = "../apply-patch" } +codex-login = { path = "../login" } codex-mcp-client = { path = "../mcp-client" } dirs = "6" env-flags = "0.1.1" diff --git a/codex-rs/core/src/lib.rs b/codex-rs/core/src/lib.rs index 1dcf67bd..16cf1905 100644 --- a/codex-rs/core/src/lib.rs +++ b/codex-rs/core/src/lib.rs @@ -27,6 +27,7 @@ mod model_provider_info; pub use model_provider_info::ModelProviderInfo; pub use model_provider_info::WireApi; mod models; +pub mod openai_api_key; mod openai_tools; mod project_doc; pub mod protocol; diff --git a/codex-rs/core/src/model_provider_info.rs b/codex-rs/core/src/model_provider_info.rs index 186e28d3..44b406c9 100644 --- a/codex-rs/core/src/model_provider_info.rs +++ b/codex-rs/core/src/model_provider_info.rs @@ -11,6 +11,7 @@ use std::collections::HashMap; use std::env::VarError; use crate::error::EnvVarError; +use crate::openai_api_key::get_openai_api_key; /// Wire protocol that the provider speaks. Most third-party services only /// implement the classic OpenAI Chat Completions JSON schema, whereas OpenAI @@ -52,20 +53,27 @@ impl ModelProviderInfo { /// cannot be found, returns an error. pub fn api_key(&self) -> crate::error::Result> { match &self.env_key { - Some(env_key) => std::env::var(env_key) - .and_then(|v| { - if v.trim().is_empty() { - Err(VarError::NotPresent) - } else { - Ok(Some(v)) - } - }) - .map_err(|_| { - crate::error::CodexErr::EnvVar(EnvVarError { - var: env_key.clone(), - instructions: self.env_key_instructions.clone(), + Some(env_key) => { + let env_value = if env_key == crate::openai_api_key::OPENAI_API_KEY_ENV_VAR { + get_openai_api_key().map_or_else(|| Err(VarError::NotPresent), Ok) + } else { + std::env::var(env_key) + }; + env_value + .and_then(|v| { + if v.trim().is_empty() { + Err(VarError::NotPresent) + } else { + Ok(Some(v)) + } }) - }), + .map_err(|_| { + crate::error::CodexErr::EnvVar(EnvVarError { + var: env_key.clone(), + instructions: self.env_key_instructions.clone(), + }) + }) + } None => Ok(None), } } diff --git a/codex-rs/core/src/openai_api_key.rs b/codex-rs/core/src/openai_api_key.rs new file mode 100644 index 00000000..728914c0 --- /dev/null +++ b/codex-rs/core/src/openai_api_key.rs @@ -0,0 +1,24 @@ +use std::env; +use std::sync::LazyLock; +use std::sync::RwLock; + +pub const OPENAI_API_KEY_ENV_VAR: &str = "OPENAI_API_KEY"; + +static OPENAI_API_KEY: LazyLock>> = LazyLock::new(|| { + let val = env::var(OPENAI_API_KEY_ENV_VAR) + .ok() + .and_then(|s| if s.is_empty() { None } else { Some(s) }); + RwLock::new(val) +}); + +pub fn get_openai_api_key() -> Option { + #![allow(clippy::unwrap_used)] + OPENAI_API_KEY.read().unwrap().clone() +} + +pub fn set_openai_api_key(value: String) { + #![allow(clippy::unwrap_used)] + if !value.is_empty() { + *OPENAI_API_KEY.write().unwrap() = Some(value); + } +} diff --git a/codex-rs/login/Cargo.toml b/codex-rs/login/Cargo.toml new file mode 100644 index 00000000..e6eba6fd --- /dev/null +++ b/codex-rs/login/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "codex-login" +version = { workspace = true } +edition = "2024" + +[lints] +workspace = true + +[dependencies] +chrono = { version = "0.4", features = ["serde"] } +reqwest = { version = "0.12", features = ["json"] } +serde = { version = "1", features = ["derive"] } +serde_json = "1" +tokio = { version = "1", features = [ + "io-std", + "macros", + "process", + "rt-multi-thread", + "signal", +] } diff --git a/codex-rs/login/src/lib.rs b/codex-rs/login/src/lib.rs new file mode 100644 index 00000000..34c88f58 --- /dev/null +++ b/codex-rs/login/src/lib.rs @@ -0,0 +1,168 @@ +use chrono::DateTime; +use chrono::Utc; +use serde::Deserialize; +use serde::Serialize; +use std::fs::OpenOptions; +use std::io::Read; +use std::io::Write; +#[cfg(unix)] +use std::os::unix::fs::OpenOptionsExt; +use std::path::Path; +use std::process::Stdio; +use tokio::process::Command; + +const SOURCE_FOR_PYTHON_SERVER: &str = include_str!("./login_with_chatgpt.py"); + +const CLIENT_ID: &str = "app_EMoamEEZ73f0CkXaXp7hrann"; + +/// Run `python3 -c {{SOURCE_FOR_PYTHON_SERVER}}` with the CODEX_HOME +/// environment variable set to the provided `codex_home` path. If the +/// subprocess exits 0, read the OPENAI_API_KEY property out of +/// CODEX_HOME/auth.json and return Ok(OPENAI_API_KEY). Otherwise, return Err +/// with any information from the subprocess. +/// +/// If `capture_output` is true, the subprocess's output will be captured and +/// recorded in memory. Otherwise, the subprocess's output will be sent to the +/// current process's stdout/stderr. +pub async fn login_with_chatgpt( + codex_home: &Path, + capture_output: bool, +) -> std::io::Result { + let child = Command::new("python3") + .arg("-c") + .arg(SOURCE_FOR_PYTHON_SERVER) + .env("CODEX_HOME", codex_home) + .stdin(Stdio::null()) + .stdout(if capture_output { + Stdio::piped() + } else { + Stdio::inherit() + }) + .stderr(if capture_output { + Stdio::piped() + } else { + Stdio::inherit() + }) + .spawn()?; + + let output = child.wait_with_output().await?; + if output.status.success() { + try_read_openai_api_key(codex_home).await + } else { + let stderr = String::from_utf8_lossy(&output.stderr); + Err(std::io::Error::other(format!( + "login_with_chatgpt subprocess failed: {stderr}" + ))) + } +} + +/// Attempt to read the `OPENAI_API_KEY` from the `auth.json` file in the given +/// `CODEX_HOME` directory, refreshing it, if necessary. +pub async fn try_read_openai_api_key(codex_home: &Path) -> std::io::Result { + let auth_path = codex_home.join("auth.json"); + let mut file = std::fs::File::open(&auth_path)?; + let mut contents = String::new(); + file.read_to_string(&mut contents)?; + let auth_dot_json: AuthDotJson = serde_json::from_str(&contents)?; + + if is_expired(&auth_dot_json) { + let refresh_response = try_refresh_token(&auth_dot_json).await?; + let mut auth_dot_json = auth_dot_json; + auth_dot_json.tokens.id_token = refresh_response.id_token; + if let Some(refresh_token) = refresh_response.refresh_token { + auth_dot_json.tokens.refresh_token = refresh_token; + } + auth_dot_json.last_refresh = Utc::now(); + + let mut options = OpenOptions::new(); + options.write(true).create(true); + #[cfg(unix)] + { + options.mode(0o600); + } + + let json_data = serde_json::to_string(&auth_dot_json)?; + { + let mut file = options.open(&auth_path)?; + file.write_all(json_data.as_bytes())?; + file.flush()?; + } + + Ok(auth_dot_json.openai_api_key) + } else { + Ok(auth_dot_json.openai_api_key) + } +} + +fn is_expired(auth_dot_json: &AuthDotJson) -> bool { + let last_refresh = auth_dot_json.last_refresh; + last_refresh < Utc::now() - chrono::Duration::days(28) +} + +async fn try_refresh_token(auth_dot_json: &AuthDotJson) -> std::io::Result { + let refresh_request = RefreshRequest { + client_id: CLIENT_ID, + grant_type: "refresh_token", + refresh_token: auth_dot_json.tokens.refresh_token.clone(), + scope: "openid profile email", + }; + + let client = reqwest::Client::new(); + let response = client + .post("https://auth.openai.com/oauth/token") + .header("Content-Type", "application/json") + .json(&refresh_request) + .send() + .await + .map_err(std::io::Error::other)?; + + if response.status().is_success() { + let refresh_response = response + .json::() + .await + .map_err(std::io::Error::other)?; + Ok(refresh_response) + } else { + Err(std::io::Error::other(format!( + "Failed to refresh token: {}", + response.status() + ))) + } +} + +#[derive(Serialize)] +struct RefreshRequest { + client_id: &'static str, + grant_type: &'static str, + refresh_token: String, + scope: &'static str, +} + +#[derive(Deserialize)] +struct RefreshResponse { + id_token: String, + refresh_token: Option, +} + +/// Expected structure for $CODEX_HOME/auth.json. +#[derive(Deserialize, Serialize)] +struct AuthDotJson { + #[serde(rename = "OPENAI_API_KEY")] + openai_api_key: String, + + tokens: TokenData, + + last_refresh: DateTime, +} + +#[derive(Deserialize, Serialize)] +struct TokenData { + /// This is a JWT. + id_token: String, + + /// This is a JWT. + #[allow(dead_code)] + access_token: String, + + refresh_token: String, +} diff --git a/codex-rs/login/src/login_with_chatgpt.py b/codex-rs/login/src/login_with_chatgpt.py new file mode 100644 index 00000000..c1d47864 --- /dev/null +++ b/codex-rs/login/src/login_with_chatgpt.py @@ -0,0 +1,624 @@ +"""Script that spawns a local webserver for retrieving an OpenAI API key. + +- Listens on 127.0.0.1:1455 +- Opens http://localhost:1455/auth/callback in the browser +- If the user successfully navigates the auth flow, + $CODEX_HOME/auth.json will be written with the API key. +- User will be redirected to http://localhost:1455/success upon success. + +The script should exit with a non-zero code if the user fails to navigate the +auth flow. +""" + +from __future__ import annotations + +import argparse +import base64 +import datetime +import errno +import hashlib +import http.server +import json +import os +import secrets +import sys +import threading +import urllib.parse +import urllib.request +import webbrowser +from dataclasses import dataclass + +# Required port for OAuth client. +REQUIRED_PORT = 1455 +URL_BASE = f"http://localhost:{REQUIRED_PORT}" +DEFAULT_ISSUER = "https://auth.openai.com" +DEFAULT_CLIENT_ID = "app_EMoamEEZ73f0CkXaXp7hrann" + +EXIT_CODE_WHEN_ADDRESS_ALREADY_IN_USE = 13 + + +@dataclass +class TokenData: + id_token: str + access_token: str + refresh_token: str + + +@dataclass +class AuthBundle: + """Aggregates authentication data produced after successful OAuth flow.""" + + api_key: str + token_data: TokenData + last_refresh: str + + +def main() -> None: + parser = argparse.ArgumentParser(description="Retrieve API key via local HTTP flow") + parser.add_argument( + "--no-browser", + action="store_true", + help="Do not automatically open the browser", + ) + parser.add_argument("--verbose", action="store_true", help="Enable request logging") + args = parser.parse_args() + + codex_home = os.environ.get("CODEX_HOME") + if not codex_home: + eprint("ERROR: CODEX_HOME environment variable is not set") + sys.exit(1) + + # Spawn server. + try: + httpd = _ApiKeyHTTPServer( + ("127.0.0.1", REQUIRED_PORT), + _ApiKeyHTTPHandler, + codex_home=codex_home, + verbose=args.verbose, + ) + except OSError as e: + eprint(f"ERROR: {e}") + if e.errno == errno.EADDRINUSE: + # Caller might want to handle this case specially. + sys.exit(EXIT_CODE_WHEN_ADDRESS_ALREADY_IN_USE) + else: + sys.exit(1) + + auth_url = httpd.auth_url() + + with httpd: + eprint(f"Starting local login server on {URL_BASE}") + if not args.no_browser: + try: + webbrowser.open(auth_url, new=1, autoraise=True) + except Exception as e: + eprint(f"Failed to open browser: {e}") + + eprint( + f"If your browser did not open, navigate to this URL to authenticate:\n\n{auth_url}" + ) + + # Run the server in the main thread until `shutdown()` is called by the + # request handler. + try: + httpd.serve_forever() + except KeyboardInterrupt: + eprint("\nKeyboard interrupt received, exiting.") + + # Server has been shut down by the request handler. Exit with the code + # it set (0 on success, non-zero on failure). + sys.exit(httpd.exit_code) + + +class _ApiKeyHTTPHandler(http.server.BaseHTTPRequestHandler): + """A minimal request handler that captures an *api key* from query/post.""" + + # We store the result in the server instance itself. + server: "_ApiKeyHTTPServer" # type: ignore[override] - helpful annotation + + def do_GET(self) -> None: # noqa: N802 – required by BaseHTTPRequestHandler + path = urllib.parse.urlparse(self.path).path + + if path == "/success": + # Serve confirmation page then gracefully shut down the server so + # the main thread can exit with the previously captured exit code. + self._send_html(LOGIN_SUCCESS_HTML) + + # Ensure the data is flushed to the client before we stop. + try: + self.wfile.flush() + except Exception as e: + eprint(f"Failed to flush response: {e}") + + self.request_shutdown() + elif path == "/auth/callback": + query = urllib.parse.urlparse(self.path).query + params = urllib.parse.parse_qs(query) + + # Validate state ------------------------------------------------- + if params.get("state", [None])[0] != self.server.state: + self.send_error(400, "State parameter mismatch") + return + + # Standard OAuth flow ----------------------------------------- + code = params.get("code", [None])[0] + if not code: + self.send_error(400, "Missing authorization code") + return + + try: + auth_bundle, success_url = self._exchange_code_for_api_key(code) + except Exception as exc: # noqa: BLE001 – propagate to client + self.send_error(500, f"Token exchange failed: {exc}") + return + + # Persist API key along with additional token metadata. + if _write_auth_file( + auth=auth_bundle, + codex_home=self.server.codex_home, + ): + self.server.exit_code = 0 + self._send_redirect(success_url) + else: + self.send_error(500, "Unable to persist auth file") + else: + self.send_error(404, "Endpoint not supported") + + def do_POST(self) -> None: # noqa: N802 – required by BaseHTTPRequestHandler + self.send_error(404, "Endpoint not supported") + + def send_error(self, code, message=None, explain=None) -> None: + """Send an error response and stop the server. + + We avoid calling `sys.exit()` directly from the request-handling thread + so that the response has a chance to be written to the socket. Instead + we shut the server down; the main thread will then exit with the + appropriate status code. + """ + super().send_error(code, message, explain) + try: + self.wfile.flush() + except Exception as e: + eprint(f"Failed to flush response: {e}") + + self.request_shutdown() + + def _send_redirect(self, url: str) -> None: + self.send_response(302) + self.send_header("Location", url) + self.end_headers() + + def _send_html(self, body: str) -> None: + encoded = body.encode() + self.send_response(200) + self.send_header("Content-Type", "text/html; charset=utf-8") + self.send_header("Content-Length", str(len(encoded))) + self.end_headers() + self.wfile.write(encoded) + + # Silence logging for cleanliness unless --verbose flag is used. + def log_message(self, fmt: str, *args): # type: ignore[override] + if getattr(self.server, "verbose", False): # type: ignore[attr-defined] + super().log_message(fmt, *args) + + def _exchange_code_for_api_key(self, code: str) -> tuple[AuthBundle, str]: + """Perform token + token-exchange to obtain an OpenAI API key. + + Returns (AuthBundle, success_url). + """ + + token_endpoint = f"{self.server.issuer}/oauth/token" + + # 1. Authorization-code -> (id_token, access_token, refresh_token) + data = urllib.parse.urlencode( + { + "grant_type": "authorization_code", + "code": code, + "redirect_uri": self.server.redirect_uri, + "client_id": self.server.client_id, + "code_verifier": self.server.pkce.code_verifier, + } + ).encode() + + token_data: TokenData + + with urllib.request.urlopen( + urllib.request.Request( + token_endpoint, + data=data, + method="POST", + headers={"Content-Type": "application/x-www-form-urlencoded"}, + ) + ) as resp: + payload = json.loads(resp.read().decode()) + token_data = TokenData( + id_token=payload["id_token"], + access_token=payload["access_token"], + refresh_token=payload["refresh_token"], + ) + + id_token_parts = token_data.id_token.split(".") + if len(id_token_parts) != 3: + raise ValueError("Invalid ID token") + access_token_parts = token_data.access_token.split(".") + if len(access_token_parts) != 3: + raise ValueError("Invalid access token") + + id_token_claims = json.loads( + base64.urlsafe_b64decode(id_token_parts[1] + "==").decode("utf-8") + ) + access_token_claims = json.loads( + base64.urlsafe_b64decode(access_token_parts[1] + "==").decode("utf-8") + ) + + token_claims = id_token_claims.get("https://api.openai.com/auth", {}) + access_claims = access_token_claims.get("https://api.openai.com/auth", {}) + + org_id = token_claims.get("organization_id") + if not org_id: + raise ValueError("Missing organization in id_token claims") + + project_id = token_claims.get("project_id") + if not project_id: + raise ValueError("Missing project in id_token claims") + + random_id = secrets.token_hex(6) + + # 2. Token exchange to obtain API key + today = datetime.datetime.now(datetime.timezone.utc).strftime("%Y-%m-%d") + exchange_data = urllib.parse.urlencode( + { + "grant_type": "urn:ietf:params:oauth:grant-type:token-exchange", + "client_id": self.server.client_id, + "requested_token": "openai-api-key", + "subject_token": token_data.id_token, + "subject_token_type": "urn:ietf:params:oauth:token-type:id_token", + "name": f"Codex CLI [auto-generated] ({today}) [{random_id}]", + } + ).encode() + + exchanged_access_token: str + with urllib.request.urlopen( + urllib.request.Request( + token_endpoint, + data=exchange_data, + method="POST", + headers={"Content-Type": "application/x-www-form-urlencoded"}, + ) + ) as resp: + exchange_payload = json.loads(resp.read().decode()) + exchanged_access_token = exchange_payload["access_token"] + + # Determine whether the organization still requires additional + # setup (e.g., adding a payment method) based on the ID-token + # claim provided by the auth service. + completed_onboarding = token_claims.get("completed_platform_onboarding") == True + chatgpt_plan_type = access_claims.get("chatgpt_plan_type") + is_org_owner = token_claims.get("is_org_owner") == True + needs_setup = not completed_onboarding and is_org_owner + + # Build the success URL on the same host/port as the callback and + # include the required query parameters for the front-end page. + success_url_query = { + "id_token": token_data.id_token, + "needs_setup": "true" if needs_setup else "false", + "org_id": org_id, + "project_id": project_id, + "plan_type": chatgpt_plan_type, + "platform_url": ( + "https://platform.openai.com" + if self.server.issuer == "https://auth.openai.com" + else "https://platform.api.openai.org" + ), + } + success_url = f"{URL_BASE}/success?{urllib.parse.urlencode(success_url_query)}" + + # TODO(mbolin): Port maybeRedeemCredits() to Python and call it here. + + # Persist refresh_token/id_token for future use (redeem credits etc.) + last_refresh_str = ( + datetime.datetime.now(datetime.timezone.utc) + .isoformat() + .replace("+00:00", "Z") + ) + + auth_bundle = AuthBundle( + api_key=exchanged_access_token, + token_data=token_data, + last_refresh=last_refresh_str, + ) + + return (auth_bundle, success_url) + + def request_shutdown(self) -> None: + # shutdown() must be invoked from another thread to avoid + # deadlocking the serve_forever() loop, which is running in this + # same thread. A short-lived helper thread does the trick. + threading.Thread(target=self.server.shutdown, daemon=True).start() + + +def _write_auth_file(*, auth: AuthBundle, codex_home: str) -> bool: + """Persist *api_key* to $CODEX_HOME/auth.json. + + Returns True on success, False otherwise. Any error is printed to + *stderr* so that the Rust layer can surface the problem. + """ + if not os.path.isdir(codex_home): + try: + os.makedirs(codex_home, exist_ok=True) + except Exception as exc: # pragma: no cover – unlikely + eprint(f"ERROR: unable to create CODEX_HOME directory: {exc}") + return False + + auth_path = os.path.join(codex_home, "auth.json") + auth_json_contents = { + "OPENAI_API_KEY": auth.api_key, + "tokens": { + "id_token": auth.token_data.id_token, + "access_token": auth.token_data.access_token, + "refresh_token": auth.token_data.refresh_token, + }, + "last_refresh": auth.last_refresh, + } + try: + with open(auth_path, "w", encoding="utf-8") as fp: + if hasattr(os, "fchmod"): # POSIX-safe + os.fchmod(fp.fileno(), 0o600) + json.dump(auth_json_contents, fp, indent=2) + except Exception as exc: # pragma: no cover – permissions/filesystem + eprint(f"ERROR: unable to write auth file: {exc}") + return False + + return True + + +@dataclass +class PkceCodes: + code_verifier: str + code_challenge: str + + +class _ApiKeyHTTPServer(http.server.HTTPServer): + """HTTPServer with shutdown helper & self-contained OAuth configuration.""" + + def __init__( + self, + server_address: tuple[str, int], + request_handler_class: type[http.server.BaseHTTPRequestHandler], + *, + codex_home: str, + verbose: bool = False, + ) -> None: + super().__init__(server_address, request_handler_class, bind_and_activate=True) + + self.exit_code = 1 + self.codex_home = codex_home + self.verbose: bool = verbose + + self.issuer: str = DEFAULT_ISSUER + self.client_id: str = DEFAULT_CLIENT_ID + port = server_address[1] + self.redirect_uri: str = f"http://localhost:{port}/auth/callback" + self.pkce: PkceCodes = _generate_pkce() + self.state: str = secrets.token_hex(32) + + def auth_url(self) -> str: + """Return fully-formed OpenID authorization URL.""" + params = { + "response_type": "code", + "client_id": self.client_id, + "redirect_uri": self.redirect_uri, + "scope": "openid profile email offline_access", + "code_challenge": self.pkce.code_challenge, + "code_challenge_method": "S256", + "id_token_add_organizations": "true", + "state": self.state, + } + return f"{self.issuer}/oauth/authorize?" + urllib.parse.urlencode(params) + + +def _generate_pkce() -> PkceCodes: + """Generate PKCE *code_verifier* and *code_challenge* (S256).""" + code_verifier = secrets.token_hex(64) + digest = hashlib.sha256(code_verifier.encode()).digest() + code_challenge = base64.urlsafe_b64encode(digest).rstrip(b"=").decode() + return PkceCodes(code_verifier, code_challenge) + + +def eprint(*args, **kwargs) -> None: + print(*args, file=sys.stderr, **kwargs) + + +LOGIN_SUCCESS_HTML = """ + + + + Sign into Codex CLI + + + + +
+
+
+
+ + + +
+
Signed in to Codex CLI
+
+ + +
+
+ + +""" + +# Unconditionally call `main()` instead of gating it behind +# `if __name__ == "__main__"` because this script is either: +# +# - invoked as a string passed to `python3 -c` +# - run via `python3 login_with_chatgpt.py` for testing as part of local +# development +main() diff --git a/codex-rs/tui/Cargo.toml b/codex-rs/tui/Cargo.toml index ffc107e8..2d7840e6 100644 --- a/codex-rs/tui/Cargo.toml +++ b/codex-rs/tui/Cargo.toml @@ -22,6 +22,7 @@ codex-ansi-escape = { path = "../ansi-escape" } codex-core = { path = "../core" } codex-common = { path = "../common", features = ["cli", "elapsed"] } codex-linux-sandbox = { path = "../linux-sandbox" } +codex-login = { path = "../login" } color-eyre = "0.6.3" crossterm = { version = "0.28.1", features = ["bracketed-paste"] } image = { version = "^0.25.6", default-features = false, features = ["jpeg"] } diff --git a/codex-rs/tui/src/app.rs b/codex-rs/tui/src/app.rs index 7d518c23..8f35a350 100644 --- a/codex-rs/tui/src/app.rs +++ b/codex-rs/tui/src/app.rs @@ -3,11 +3,11 @@ use crate::app_event_sender::AppEventSender; use crate::chatwidget::ChatWidget; use crate::git_warning_screen::GitWarningOutcome; use crate::git_warning_screen::GitWarningScreen; +use crate::login_screen::LoginScreen; use crate::mouse_capture::MouseCapture; use crate::scroll_event_helper::ScrollEventHelper; use crate::slash_command::SlashCommand; use crate::tui; -// used by ChatWidgetArgs use codex_core::config::Config; use codex_core::protocol::Event; use codex_core::protocol::Op; @@ -29,6 +29,8 @@ enum AppState<'a> { /// `AppState`. widget: Box>, }, + /// The login screen for the OpenAI provider. + Login { screen: LoginScreen }, /// The start-up warning that recommends running codex inside a Git repo. GitWarning { screen: GitWarningScreen }, } @@ -56,6 +58,7 @@ impl<'a> App<'a> { pub(crate) fn new( config: Config, initial_prompt: Option, + show_login_screen: bool, show_git_warning: bool, initial_images: Vec, ) -> Self { @@ -113,7 +116,18 @@ impl<'a> App<'a> { }); } - let (app_state, chat_args) = if show_git_warning { + let (app_state, chat_args) = if show_login_screen { + ( + AppState::Login { + screen: LoginScreen::new(app_event_tx.clone(), config.codex_home.clone()), + }, + Some(ChatWidgetArgs { + config, + initial_prompt, + initial_images, + }), + ) + } else if show_git_warning { ( AppState::GitWarning { screen: GitWarningScreen::new(), @@ -175,7 +189,7 @@ impl<'a> App<'a> { AppState::Chat { widget } => { widget.submit_op(Op::Interrupt); } - AppState::GitWarning { .. } => { + AppState::Login { .. } | AppState::GitWarning { .. } => { // No-op. } } @@ -203,16 +217,16 @@ impl<'a> App<'a> { } AppEvent::CodexOp(op) => match &mut self.app_state { AppState::Chat { widget } => widget.submit_op(op), - AppState::GitWarning { .. } => {} + AppState::Login { .. } | AppState::GitWarning { .. } => {} }, AppEvent::LatestLog(line) => match &mut self.app_state { AppState::Chat { widget } => widget.update_latest_log(line), - AppState::GitWarning { .. } => {} + AppState::Login { .. } | AppState::GitWarning { .. } => {} }, AppEvent::DispatchCommand(command) => match command { SlashCommand::Clear => match &mut self.app_state { AppState::Chat { widget } => widget.clear_conversation_history(), - AppState::GitWarning { .. } => {} + AppState::Login { .. } | AppState::GitWarning { .. } => {} }, SlashCommand::ToggleMouseMode => { if let Err(e) = mouse_capture.toggle() { @@ -235,6 +249,9 @@ impl<'a> App<'a> { AppState::Chat { widget } => { terminal.draw(|frame| frame.render_widget_ref(&**widget, frame.area()))?; } + AppState::Login { screen } => { + terminal.draw(|frame| frame.render_widget_ref(&*screen, frame.area()))?; + } AppState::GitWarning { screen } => { terminal.draw(|frame| frame.render_widget_ref(&*screen, frame.area()))?; } @@ -249,6 +266,7 @@ impl<'a> App<'a> { AppState::Chat { widget } => { widget.handle_key_event(key_event); } + AppState::Login { screen } => screen.handle_key_event(key_event), AppState::GitWarning { screen } => match screen.handle_key_event(key_event) { GitWarningOutcome::Continue => { // User accepted – switch to chat view. @@ -279,14 +297,14 @@ impl<'a> App<'a> { fn dispatch_scroll_event(&mut self, scroll_delta: i32) { match &mut self.app_state { AppState::Chat { widget } => widget.handle_scroll_delta(scroll_delta), - AppState::GitWarning { .. } => {} + AppState::Login { .. } | AppState::GitWarning { .. } => {} } } fn dispatch_codex_event(&mut self, event: Event) { match &mut self.app_state { AppState::Chat { widget } => widget.handle_codex_event(event), - AppState::GitWarning { .. } => {} + AppState::Login { .. } | AppState::GitWarning { .. } => {} } } } diff --git a/codex-rs/tui/src/lib.rs b/codex-rs/tui/src/lib.rs index 535168e3..5f3e2d69 100644 --- a/codex-rs/tui/src/lib.rs +++ b/codex-rs/tui/src/lib.rs @@ -5,9 +5,13 @@ use app::App; use codex_core::config::Config; use codex_core::config::ConfigOverrides; +use codex_core::openai_api_key::OPENAI_API_KEY_ENV_VAR; +use codex_core::openai_api_key::get_openai_api_key; +use codex_core::openai_api_key::set_openai_api_key; use codex_core::protocol::AskForApproval; use codex_core::protocol::SandboxPolicy; use codex_core::util::is_inside_git_repo; +use codex_login::try_read_openai_api_key; use log_layer::TuiLogLayer; use std::fs::OpenOptions; use std::path::PathBuf; @@ -28,6 +32,7 @@ mod exec_command; mod git_warning_screen; mod history_cell; mod log_layer; +mod login_screen; mod markdown; mod mouse_capture; mod scroll_event_helper; @@ -124,13 +129,15 @@ pub fn run_main(cli: Cli, codex_linux_sandbox_exe: Option) -> std::io:: .with(tui_layer) .try_init(); + let show_login_screen = should_show_login_screen(&config); + // Determine whether we need to display the "not a git repo" warning // modal. The flag is shown when the current working directory is *not* // inside a Git repository **and** the user did *not* pass the // `--allow-no-git-exec` flag. let show_git_warning = !cli.skip_git_repo_check && !is_inside_git_repo(&config); - try_run_ratatui_app(cli, config, show_git_warning, log_rx); + try_run_ratatui_app(cli, config, show_login_screen, show_git_warning, log_rx); Ok(()) } @@ -141,10 +148,11 @@ pub fn run_main(cli: Cli, codex_linux_sandbox_exe: Option) -> std::io:: fn try_run_ratatui_app( cli: Cli, config: Config, + show_login_screen: bool, show_git_warning: bool, log_rx: tokio::sync::mpsc::UnboundedReceiver, ) { - if let Err(report) = run_ratatui_app(cli, config, show_git_warning, log_rx) { + if let Err(report) = run_ratatui_app(cli, config, show_login_screen, show_git_warning, log_rx) { eprintln!("Error: {report:?}"); } } @@ -152,6 +160,7 @@ fn try_run_ratatui_app( fn run_ratatui_app( cli: Cli, config: Config, + show_login_screen: bool, show_git_warning: bool, mut log_rx: tokio::sync::mpsc::UnboundedReceiver, ) -> color_eyre::Result<()> { @@ -167,7 +176,13 @@ fn run_ratatui_app( terminal.clear()?; let Cli { prompt, images, .. } = cli; - let mut app = App::new(config.clone(), prompt, show_git_warning, images); + let mut app = App::new( + config.clone(), + prompt, + show_login_screen, + show_git_warning, + images, + ); // Bridge log receiver into the AppEvent channel so latest log lines update the UI. { @@ -197,3 +212,38 @@ fn restore() { ); } } + +#[allow(clippy::unwrap_used)] +fn should_show_login_screen(config: &Config) -> bool { + if is_in_need_of_openai_api_key(config) { + // Reading the OpenAI API key is an async operation because it may need + // to refresh the token. Block on it. + let codex_home = config.codex_home.clone(); + let (tx, rx) = tokio::sync::oneshot::channel(); + tokio::spawn(async move { + match try_read_openai_api_key(&codex_home).await { + Ok(openai_api_key) => { + set_openai_api_key(openai_api_key); + tx.send(false).unwrap(); + } + Err(_) => { + tx.send(true).unwrap(); + } + } + }); + // TODO(mbolin): Impose some sort of timeout. + tokio::task::block_in_place(|| rx.blocking_recv()).unwrap() + } else { + false + } +} + +fn is_in_need_of_openai_api_key(config: &Config) -> bool { + let is_using_openai_key = config + .model_provider + .env_key + .as_ref() + .map(|s| s == OPENAI_API_KEY_ENV_VAR) + .unwrap_or(false); + is_using_openai_key && get_openai_api_key().is_none() +} diff --git a/codex-rs/tui/src/login_screen.rs b/codex-rs/tui/src/login_screen.rs new file mode 100644 index 00000000..1bd11c19 --- /dev/null +++ b/codex-rs/tui/src/login_screen.rs @@ -0,0 +1,46 @@ +use std::path::PathBuf; + +use crossterm::event::KeyCode; +use crossterm::event::KeyEvent; +use ratatui::buffer::Buffer; +use ratatui::layout::Rect; +use ratatui::widgets::Paragraph; +use ratatui::widgets::Widget as _; +use ratatui::widgets::WidgetRef; + +use crate::app_event::AppEvent; +use crate::app_event_sender::AppEventSender; + +pub(crate) struct LoginScreen { + app_event_tx: AppEventSender, + + /// Use this with login_with_chatgpt() in login/src/lib.rs and, if + /// successful, update the in-memory config via + /// codex_core::openai_api_key::set_openai_api_key(). + #[allow(dead_code)] + codex_home: PathBuf, +} + +impl LoginScreen { + pub(crate) fn new(app_event_tx: AppEventSender, codex_home: PathBuf) -> Self { + Self { + app_event_tx, + codex_home, + } + } + + pub(crate) fn handle_key_event(&mut self, key_event: KeyEvent) { + if let KeyCode::Char('q') = key_event.code { + self.app_event_tx.send(AppEvent::ExitRequest); + } + } +} + +impl WidgetRef for &LoginScreen { + fn render_ref(&self, area: Rect, buf: &mut Buffer) { + let text = Paragraph::new( + "Login using `codex login` and then run this command again. 'q' to quit.", + ); + text.render(area, buf); + } +}