Add support for a separate chatgpt auth endpoint (#1712)
Adds a `CodexAuth` type that encapsulates information about available auth modes and logic for refreshing the token. Changes `Responses` API to send requests to different endpoints based on the auth type. Updates login_with_chatgpt to support API-less mode and skip the key exchange.
This commit is contained in:
@@ -3,6 +3,8 @@ use std::path::Path;
|
||||
use std::time::Duration;
|
||||
|
||||
use bytes::Bytes;
|
||||
use codex_login::AuthMode;
|
||||
use codex_login::CodexAuth;
|
||||
use eventsource_stream::Eventsource;
|
||||
use futures::prelude::*;
|
||||
use reqwest::StatusCode;
|
||||
@@ -28,6 +30,7 @@ use crate::config::Config;
|
||||
use crate::config_types::ReasoningEffort as ReasoningEffortConfig;
|
||||
use crate::config_types::ReasoningSummary as ReasoningSummaryConfig;
|
||||
use crate::error::CodexErr;
|
||||
use crate::error::EnvVarError;
|
||||
use crate::error::Result;
|
||||
use crate::flags::CODEX_RS_SSE_FIXTURE;
|
||||
use crate::model_provider_info::ModelProviderInfo;
|
||||
@@ -41,6 +44,7 @@ use std::sync::Arc;
|
||||
#[derive(Clone)]
|
||||
pub struct ModelClient {
|
||||
config: Arc<Config>,
|
||||
auth: Option<CodexAuth>,
|
||||
client: reqwest::Client,
|
||||
provider: ModelProviderInfo,
|
||||
session_id: Uuid,
|
||||
@@ -51,6 +55,7 @@ pub struct ModelClient {
|
||||
impl ModelClient {
|
||||
pub fn new(
|
||||
config: Arc<Config>,
|
||||
auth: Option<CodexAuth>,
|
||||
provider: ModelProviderInfo,
|
||||
effort: ReasoningEffortConfig,
|
||||
summary: ReasoningSummaryConfig,
|
||||
@@ -58,6 +63,7 @@ impl ModelClient {
|
||||
) -> Self {
|
||||
Self {
|
||||
config,
|
||||
auth,
|
||||
client: reqwest::Client::new(),
|
||||
provider,
|
||||
session_id,
|
||||
@@ -115,6 +121,25 @@ impl ModelClient {
|
||||
return stream_from_fixture(path, self.provider.clone()).await;
|
||||
}
|
||||
|
||||
let auth = self.auth.as_ref().ok_or_else(|| {
|
||||
CodexErr::EnvVar(EnvVarError {
|
||||
var: "OPENAI_API_KEY".to_string(),
|
||||
instructions: Some("Create an API key (https://platform.openai.com) and export it as an environment variable.".to_string()),
|
||||
})
|
||||
})?;
|
||||
|
||||
let store = prompt.store && auth.mode != AuthMode::ChatGPT;
|
||||
|
||||
let base_url = match self.provider.base_url.clone() {
|
||||
Some(url) => url,
|
||||
None => match auth.mode {
|
||||
AuthMode::ChatGPT => "https://chatgpt.com/backend-api/codex".to_string(),
|
||||
AuthMode::ApiKey => "https://api.openai.com/v1".to_string(),
|
||||
},
|
||||
};
|
||||
|
||||
let token = auth.get_token().await?;
|
||||
|
||||
let full_instructions = prompt.get_full_instructions(&self.config.model);
|
||||
let tools_json = create_tools_json_for_responses_api(
|
||||
prompt,
|
||||
@@ -125,7 +150,7 @@ impl ModelClient {
|
||||
|
||||
// Request encrypted COT if we are not storing responses,
|
||||
// otherwise reasoning items will be referenced by ID
|
||||
let include = if !prompt.store && reasoning.is_some() {
|
||||
let include: Vec<String> = if !store && reasoning.is_some() {
|
||||
vec!["reasoning.encrypted_content".to_string()]
|
||||
} else {
|
||||
vec![]
|
||||
@@ -139,8 +164,7 @@ impl ModelClient {
|
||||
tool_choice: "auto",
|
||||
parallel_tool_calls: false,
|
||||
reasoning,
|
||||
store: prompt.store,
|
||||
// TODO: make this configurable
|
||||
store,
|
||||
stream: true,
|
||||
include,
|
||||
};
|
||||
@@ -153,17 +177,21 @@ impl ModelClient {
|
||||
|
||||
let mut attempt = 0;
|
||||
let max_retries = self.provider.request_max_retries();
|
||||
|
||||
loop {
|
||||
attempt += 1;
|
||||
|
||||
let req_builder = self
|
||||
.provider
|
||||
.create_request_builder(&self.client)?
|
||||
.client
|
||||
.post(format!("{base_url}/responses"))
|
||||
.header("OpenAI-Beta", "responses=experimental")
|
||||
.header("session_id", self.session_id.to_string())
|
||||
.bearer_auth(&token)
|
||||
.header(reqwest::header::ACCEPT, "text/event-stream")
|
||||
.json(&payload);
|
||||
|
||||
let req_builder = self.provider.apply_http_headers(req_builder);
|
||||
|
||||
let res = req_builder.send().await;
|
||||
if let Ok(resp) = &res {
|
||||
trace!(
|
||||
@@ -572,7 +600,7 @@ mod tests {
|
||||
|
||||
let provider = ModelProviderInfo {
|
||||
name: "test".to_string(),
|
||||
base_url: "https://test.com".to_string(),
|
||||
base_url: Some("https://test.com".to_string()),
|
||||
env_key: Some("TEST_API_KEY".to_string()),
|
||||
env_key_instructions: None,
|
||||
wire_api: WireApi::Responses,
|
||||
@@ -582,6 +610,7 @@ mod tests {
|
||||
request_max_retries: Some(0),
|
||||
stream_max_retries: Some(0),
|
||||
stream_idle_timeout_ms: Some(1000),
|
||||
requires_auth: false,
|
||||
};
|
||||
|
||||
let events = collect_events(
|
||||
@@ -631,7 +660,7 @@ mod tests {
|
||||
let sse1 = format!("event: response.output_item.done\ndata: {item1}\n\n");
|
||||
let provider = ModelProviderInfo {
|
||||
name: "test".to_string(),
|
||||
base_url: "https://test.com".to_string(),
|
||||
base_url: Some("https://test.com".to_string()),
|
||||
env_key: Some("TEST_API_KEY".to_string()),
|
||||
env_key_instructions: None,
|
||||
wire_api: WireApi::Responses,
|
||||
@@ -641,6 +670,7 @@ mod tests {
|
||||
request_max_retries: Some(0),
|
||||
stream_max_retries: Some(0),
|
||||
stream_idle_timeout_ms: Some(1000),
|
||||
requires_auth: false,
|
||||
};
|
||||
|
||||
let events = collect_events(&[sse1.as_bytes()], provider).await;
|
||||
@@ -733,7 +763,7 @@ mod tests {
|
||||
|
||||
let provider = ModelProviderInfo {
|
||||
name: "test".to_string(),
|
||||
base_url: "https://test.com".to_string(),
|
||||
base_url: Some("https://test.com".to_string()),
|
||||
env_key: Some("TEST_API_KEY".to_string()),
|
||||
env_key_instructions: None,
|
||||
wire_api: WireApi::Responses,
|
||||
@@ -743,6 +773,7 @@ mod tests {
|
||||
request_max_retries: Some(0),
|
||||
stream_max_retries: Some(0),
|
||||
stream_idle_timeout_ms: Some(1000),
|
||||
requires_auth: false,
|
||||
};
|
||||
|
||||
let out = run_sse(evs, provider).await;
|
||||
|
||||
@@ -15,6 +15,7 @@ use async_channel::Sender;
|
||||
use codex_apply_patch::ApplyPatchAction;
|
||||
use codex_apply_patch::MaybeApplyPatchVerified;
|
||||
use codex_apply_patch::maybe_parse_apply_patch_verified;
|
||||
use codex_login::CodexAuth;
|
||||
use futures::prelude::*;
|
||||
use mcp_types::CallToolResult;
|
||||
use serde::Serialize;
|
||||
@@ -103,7 +104,11 @@ pub struct CodexSpawnOk {
|
||||
|
||||
impl Codex {
|
||||
/// Spawn a new [`Codex`] and initialize the session.
|
||||
pub async fn spawn(config: Config, ctrl_c: Arc<Notify>) -> CodexResult<CodexSpawnOk> {
|
||||
pub async fn spawn(
|
||||
config: Config,
|
||||
auth: Option<CodexAuth>,
|
||||
ctrl_c: Arc<Notify>,
|
||||
) -> CodexResult<CodexSpawnOk> {
|
||||
// experimental resume path (undocumented)
|
||||
let resume_path = config.experimental_resume.clone();
|
||||
info!("resume_path: {resume_path:?}");
|
||||
@@ -132,7 +137,7 @@ impl Codex {
|
||||
// Generate a unique ID for the lifetime of this Codex session.
|
||||
let session_id = Uuid::new_v4();
|
||||
tokio::spawn(submission_loop(
|
||||
session_id, config, rx_sub, tx_event, ctrl_c,
|
||||
session_id, config, auth, rx_sub, tx_event, ctrl_c,
|
||||
));
|
||||
let codex = Codex {
|
||||
next_id: AtomicU64::new(0),
|
||||
@@ -525,6 +530,7 @@ impl AgentTask {
|
||||
async fn submission_loop(
|
||||
mut session_id: Uuid,
|
||||
config: Arc<Config>,
|
||||
auth: Option<CodexAuth>,
|
||||
rx_sub: Receiver<Submission>,
|
||||
tx_event: Sender<Event>,
|
||||
ctrl_c: Arc<Notify>,
|
||||
@@ -636,6 +642,7 @@ async fn submission_loop(
|
||||
|
||||
let client = ModelClient::new(
|
||||
config.clone(),
|
||||
auth.clone(),
|
||||
provider.clone(),
|
||||
model_reasoning_effort,
|
||||
model_reasoning_summary,
|
||||
|
||||
@@ -6,6 +6,7 @@ use crate::config::Config;
|
||||
use crate::protocol::Event;
|
||||
use crate::protocol::EventMsg;
|
||||
use crate::util::notify_on_sigint;
|
||||
use codex_login::load_auth;
|
||||
use tokio::sync::Notify;
|
||||
use uuid::Uuid;
|
||||
|
||||
@@ -25,11 +26,12 @@ pub struct CodexConversation {
|
||||
/// that callers can surface the information to the UI.
|
||||
pub async fn init_codex(config: Config) -> anyhow::Result<CodexConversation> {
|
||||
let ctrl_c = notify_on_sigint();
|
||||
let auth = load_auth(&config.codex_home)?;
|
||||
let CodexSpawnOk {
|
||||
codex,
|
||||
init_id,
|
||||
session_id,
|
||||
} = Codex::spawn(config, ctrl_c.clone()).await?;
|
||||
} = Codex::spawn(config, auth, ctrl_c.clone()).await?;
|
||||
|
||||
// The first event must be `SessionInitialized`. Validate and forward it to
|
||||
// the caller so that they can display it in the conversation history.
|
||||
|
||||
@@ -526,6 +526,7 @@ impl Config {
|
||||
.chatgpt_base_url
|
||||
.or(cfg.chatgpt_base_url)
|
||||
.unwrap_or("https://chatgpt.com/backend-api/".to_string()),
|
||||
|
||||
experimental_resume,
|
||||
include_plan_tool: include_plan_tool.unwrap_or(false),
|
||||
};
|
||||
@@ -794,7 +795,7 @@ disable_response_storage = true
|
||||
|
||||
let openai_chat_completions_provider = ModelProviderInfo {
|
||||
name: "OpenAI using Chat Completions".to_string(),
|
||||
base_url: "https://api.openai.com/v1".to_string(),
|
||||
base_url: Some("https://api.openai.com/v1".to_string()),
|
||||
env_key: Some("OPENAI_API_KEY".to_string()),
|
||||
wire_api: crate::WireApi::Chat,
|
||||
env_key_instructions: None,
|
||||
@@ -804,6 +805,7 @@ disable_response_storage = true
|
||||
request_max_retries: Some(4),
|
||||
stream_max_retries: Some(10),
|
||||
stream_idle_timeout_ms: Some(300_000),
|
||||
requires_auth: false,
|
||||
};
|
||||
let model_provider_map = {
|
||||
let mut model_provider_map = built_in_model_providers();
|
||||
|
||||
@@ -30,8 +30,8 @@ mod message_history;
|
||||
mod model_provider_info;
|
||||
pub use model_provider_info::ModelProviderInfo;
|
||||
pub use model_provider_info::WireApi;
|
||||
pub use model_provider_info::built_in_model_providers;
|
||||
mod models;
|
||||
pub mod openai_api_key;
|
||||
mod openai_model_info;
|
||||
mod openai_tools;
|
||||
pub mod plan_tool;
|
||||
|
||||
@@ -12,7 +12,6 @@ use std::env::VarError;
|
||||
use std::time::Duration;
|
||||
|
||||
use crate::error::EnvVarError;
|
||||
use crate::openai_api_key::get_openai_api_key;
|
||||
|
||||
/// Value for the `OpenAI-Originator` header that is sent with requests to
|
||||
/// OpenAI.
|
||||
@@ -30,7 +29,7 @@ const DEFAULT_REQUEST_MAX_RETRIES: u64 = 4;
|
||||
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum WireApi {
|
||||
/// The experimental "Responses" API exposed by OpenAI at `/v1/responses`.
|
||||
/// The Responses API exposed by OpenAI at `/v1/responses`.
|
||||
Responses,
|
||||
|
||||
/// Regular Chat Completions compatible with `/v1/chat/completions`.
|
||||
@@ -44,7 +43,7 @@ pub struct ModelProviderInfo {
|
||||
/// Friendly display name.
|
||||
pub name: String,
|
||||
/// Base URL for the provider's OpenAI-compatible API.
|
||||
pub base_url: String,
|
||||
pub base_url: Option<String>,
|
||||
/// Environment variable that stores the user's API key for this provider.
|
||||
pub env_key: Option<String>,
|
||||
|
||||
@@ -78,6 +77,10 @@ pub struct ModelProviderInfo {
|
||||
/// Idle timeout (in milliseconds) to wait for activity on a streaming response before treating
|
||||
/// the connection as lost.
|
||||
pub stream_idle_timeout_ms: Option<u64>,
|
||||
|
||||
/// Whether this provider requires some form of standard authentication (API key, ChatGPT token).
|
||||
#[serde(default)]
|
||||
pub requires_auth: bool,
|
||||
}
|
||||
|
||||
impl ModelProviderInfo {
|
||||
@@ -93,11 +96,11 @@ impl ModelProviderInfo {
|
||||
&'a self,
|
||||
client: &'a reqwest::Client,
|
||||
) -> crate::error::Result<reqwest::RequestBuilder> {
|
||||
let api_key = self.api_key()?;
|
||||
|
||||
let url = self.get_full_url();
|
||||
|
||||
let mut builder = client.post(url);
|
||||
|
||||
let api_key = self.api_key()?;
|
||||
if let Some(key) = api_key {
|
||||
builder = builder.bearer_auth(key);
|
||||
}
|
||||
@@ -117,9 +120,15 @@ impl ModelProviderInfo {
|
||||
.join("&");
|
||||
format!("?{full_params}")
|
||||
});
|
||||
let base_url = &self.base_url;
|
||||
let base_url = self
|
||||
.base_url
|
||||
.clone()
|
||||
.unwrap_or("https://api.openai.com/v1".to_string());
|
||||
|
||||
match self.wire_api {
|
||||
WireApi::Responses => format!("{base_url}/responses{query_string}"),
|
||||
WireApi::Responses => {
|
||||
format!("{base_url}/responses{query_string}")
|
||||
}
|
||||
WireApi::Chat => format!("{base_url}/chat/completions{query_string}"),
|
||||
}
|
||||
}
|
||||
@@ -127,7 +136,10 @@ impl ModelProviderInfo {
|
||||
/// Apply provider-specific HTTP headers (both static and environment-based)
|
||||
/// onto an existing `reqwest::RequestBuilder` and return the updated
|
||||
/// builder.
|
||||
fn apply_http_headers(&self, mut builder: reqwest::RequestBuilder) -> reqwest::RequestBuilder {
|
||||
pub fn apply_http_headers(
|
||||
&self,
|
||||
mut builder: reqwest::RequestBuilder,
|
||||
) -> reqwest::RequestBuilder {
|
||||
if let Some(extra) = &self.http_headers {
|
||||
for (k, v) in extra {
|
||||
builder = builder.header(k, v);
|
||||
@@ -152,11 +164,7 @@ impl ModelProviderInfo {
|
||||
fn api_key(&self) -> crate::error::Result<Option<String>> {
|
||||
match &self.env_key {
|
||||
Some(env_key) => {
|
||||
let env_value = if env_key == crate::openai_api_key::OPENAI_API_KEY_ENV_VAR {
|
||||
get_openai_api_key().map_or_else(|| Err(VarError::NotPresent), Ok)
|
||||
} else {
|
||||
std::env::var(env_key)
|
||||
};
|
||||
let env_value = std::env::var(env_key);
|
||||
env_value
|
||||
.and_then(|v| {
|
||||
if v.trim().is_empty() {
|
||||
@@ -204,47 +212,51 @@ pub fn built_in_model_providers() -> HashMap<String, ModelProviderInfo> {
|
||||
// providers are bundled with Codex CLI, so we only include the OpenAI
|
||||
// provider by default. Users are encouraged to add to `model_providers`
|
||||
// in config.toml to add their own providers.
|
||||
[
|
||||
(
|
||||
"openai",
|
||||
P {
|
||||
name: "OpenAI".into(),
|
||||
// Allow users to override the default OpenAI endpoint by
|
||||
// exporting `OPENAI_BASE_URL`. This is useful when pointing
|
||||
// Codex at a proxy, mock server, or Azure-style deployment
|
||||
// without requiring a full TOML override for the built-in
|
||||
// OpenAI provider.
|
||||
base_url: std::env::var("OPENAI_BASE_URL")
|
||||
.ok()
|
||||
.filter(|v| !v.trim().is_empty())
|
||||
.unwrap_or_else(|| "https://api.openai.com/v1".to_string()),
|
||||
env_key: Some("OPENAI_API_KEY".into()),
|
||||
env_key_instructions: Some("Create an API key (https://platform.openai.com) and export it as an environment variable.".into()),
|
||||
wire_api: WireApi::Responses,
|
||||
query_params: None,
|
||||
http_headers: Some(
|
||||
[
|
||||
("originator".to_string(), OPENAI_ORIGINATOR_HEADER.to_string()),
|
||||
("version".to_string(), env!("CARGO_PKG_VERSION").to_string()),
|
||||
]
|
||||
.into_iter()
|
||||
.collect(),
|
||||
),
|
||||
env_http_headers: Some(
|
||||
[
|
||||
("OpenAI-Organization".to_string(), "OPENAI_ORGANIZATION".to_string()),
|
||||
("OpenAI-Project".to_string(), "OPENAI_PROJECT".to_string()),
|
||||
]
|
||||
.into_iter()
|
||||
.collect(),
|
||||
),
|
||||
// Use global defaults for retry/timeout unless overridden in config.toml.
|
||||
request_max_retries: None,
|
||||
stream_max_retries: None,
|
||||
stream_idle_timeout_ms: None,
|
||||
},
|
||||
),
|
||||
]
|
||||
[(
|
||||
"openai",
|
||||
P {
|
||||
name: "OpenAI".into(),
|
||||
// Allow users to override the default OpenAI endpoint by
|
||||
// exporting `OPENAI_BASE_URL`. This is useful when pointing
|
||||
// Codex at a proxy, mock server, or Azure-style deployment
|
||||
// without requiring a full TOML override for the built-in
|
||||
// OpenAI provider.
|
||||
base_url: std::env::var("OPENAI_BASE_URL")
|
||||
.ok()
|
||||
.filter(|v| !v.trim().is_empty()),
|
||||
env_key: None,
|
||||
env_key_instructions: None,
|
||||
wire_api: WireApi::Responses,
|
||||
query_params: None,
|
||||
http_headers: Some(
|
||||
[
|
||||
(
|
||||
"originator".to_string(),
|
||||
OPENAI_ORIGINATOR_HEADER.to_string(),
|
||||
),
|
||||
("version".to_string(), env!("CARGO_PKG_VERSION").to_string()),
|
||||
]
|
||||
.into_iter()
|
||||
.collect(),
|
||||
),
|
||||
env_http_headers: Some(
|
||||
[
|
||||
(
|
||||
"OpenAI-Organization".to_string(),
|
||||
"OPENAI_ORGANIZATION".to_string(),
|
||||
),
|
||||
("OpenAI-Project".to_string(), "OPENAI_PROJECT".to_string()),
|
||||
]
|
||||
.into_iter()
|
||||
.collect(),
|
||||
),
|
||||
// Use global defaults for retry/timeout unless overridden in config.toml.
|
||||
request_max_retries: None,
|
||||
stream_max_retries: None,
|
||||
stream_idle_timeout_ms: None,
|
||||
requires_auth: true,
|
||||
},
|
||||
)]
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k.to_string(), v))
|
||||
.collect()
|
||||
@@ -264,7 +276,7 @@ base_url = "http://localhost:11434/v1"
|
||||
"#;
|
||||
let expected_provider = ModelProviderInfo {
|
||||
name: "Ollama".into(),
|
||||
base_url: "http://localhost:11434/v1".into(),
|
||||
base_url: Some("http://localhost:11434/v1".into()),
|
||||
env_key: None,
|
||||
env_key_instructions: None,
|
||||
wire_api: WireApi::Chat,
|
||||
@@ -274,6 +286,7 @@ base_url = "http://localhost:11434/v1"
|
||||
request_max_retries: None,
|
||||
stream_max_retries: None,
|
||||
stream_idle_timeout_ms: None,
|
||||
requires_auth: false,
|
||||
};
|
||||
|
||||
let provider: ModelProviderInfo = toml::from_str(azure_provider_toml).unwrap();
|
||||
@@ -290,7 +303,7 @@ query_params = { api-version = "2025-04-01-preview" }
|
||||
"#;
|
||||
let expected_provider = ModelProviderInfo {
|
||||
name: "Azure".into(),
|
||||
base_url: "https://xxxxx.openai.azure.com/openai".into(),
|
||||
base_url: Some("https://xxxxx.openai.azure.com/openai".into()),
|
||||
env_key: Some("AZURE_OPENAI_API_KEY".into()),
|
||||
env_key_instructions: None,
|
||||
wire_api: WireApi::Chat,
|
||||
@@ -302,6 +315,7 @@ query_params = { api-version = "2025-04-01-preview" }
|
||||
request_max_retries: None,
|
||||
stream_max_retries: None,
|
||||
stream_idle_timeout_ms: None,
|
||||
requires_auth: false,
|
||||
};
|
||||
|
||||
let provider: ModelProviderInfo = toml::from_str(azure_provider_toml).unwrap();
|
||||
@@ -319,7 +333,7 @@ env_http_headers = { "X-Example-Env-Header" = "EXAMPLE_ENV_VAR" }
|
||||
"#;
|
||||
let expected_provider = ModelProviderInfo {
|
||||
name: "Example".into(),
|
||||
base_url: "https://example.com".into(),
|
||||
base_url: Some("https://example.com".into()),
|
||||
env_key: Some("API_KEY".into()),
|
||||
env_key_instructions: None,
|
||||
wire_api: WireApi::Chat,
|
||||
@@ -333,6 +347,7 @@ env_http_headers = { "X-Example-Env-Header" = "EXAMPLE_ENV_VAR" }
|
||||
request_max_retries: None,
|
||||
stream_max_retries: None,
|
||||
stream_idle_timeout_ms: None,
|
||||
requires_auth: false,
|
||||
};
|
||||
|
||||
let provider: ModelProviderInfo = toml::from_str(azure_provider_toml).unwrap();
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
use std::env;
|
||||
use std::sync::LazyLock;
|
||||
use std::sync::RwLock;
|
||||
|
||||
pub const OPENAI_API_KEY_ENV_VAR: &str = "OPENAI_API_KEY";
|
||||
|
||||
static OPENAI_API_KEY: LazyLock<RwLock<Option<String>>> = LazyLock::new(|| {
|
||||
let val = env::var(OPENAI_API_KEY_ENV_VAR)
|
||||
.ok()
|
||||
.and_then(|s| if s.is_empty() { None } else { Some(s) });
|
||||
RwLock::new(val)
|
||||
});
|
||||
|
||||
pub fn get_openai_api_key() -> Option<String> {
|
||||
#![allow(clippy::unwrap_used)]
|
||||
OPENAI_API_KEY.read().unwrap().clone()
|
||||
}
|
||||
|
||||
pub fn set_openai_api_key(value: String) {
|
||||
#![allow(clippy::unwrap_used)]
|
||||
if !value.is_empty() {
|
||||
*OPENAI_API_KEY.write().unwrap() = Some(value);
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user