Complete comprehensive Codex → LLMX branding update

Fixed all remaining user-facing "Codex" references across entire codebase:
- Updated all UI strings and error messages
- Fixed GitHub issue templates and workflows
- Updated MCP server tool descriptions and error messages
- Fixed all test messages and comments
- Updated documentation comments
- Changed auth keyring service name to "LLMX Auth"

Reduced from 201 occurrences to only code identifiers (struct/type names).
Changes span 78 files across Rust, Python, YAML, and JSON.

🤖 Generated with Claude Code

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
Sebastian Krüger
2025-11-11 16:01:04 +01:00
parent cf72f42a77
commit 831e6fa6b5
78 changed files with 242 additions and 242 deletions

View File

@@ -122,7 +122,7 @@ impl AuthStorageBackend for FileAuthStorage {
}
}
const KEYRING_SERVICE: &str = "Codex Auth";
const KEYRING_SERVICE: &str = "LLMX Auth";
// turns codex_home path into a stable, short key string
fn compute_store_key(codex_home: &Path) -> std::io::Result<String> {

View File

@@ -421,7 +421,7 @@ pub fn apply_blocking(
std::fs::create_dir_all(codex_home).with_context(|| {
format!(
"failed to create Codex home directory at {}",
"failed to create LLMX home directory at {}",
codex_home.display()
)
})?;

View File

@@ -240,17 +240,17 @@ fn sanitize_user_agent(candidate: String, fallback: &str) -> String {
.collect();
if !sanitized.is_empty() && HeaderValue::from_str(sanitized.as_str()).is_ok() {
tracing::warn!(
"Sanitized Codex user agent because provided suffix contained invalid header characters"
"Sanitized LLMX user agent because provided suffix contained invalid header characters"
);
sanitized
} else if HeaderValue::from_str(fallback).is_ok() {
tracing::warn!(
"Falling back to base Codex user agent because provided suffix could not be sanitized"
"Falling back to base LLMX user agent because provided suffix could not be sanitized"
);
fallback.to_string()
} else {
tracing::warn!(
"Falling back to default Codex originator because base user agent string is invalid"
"Falling back to default LLMX originator because base user agent string is invalid"
);
originator().value.clone()
}
@@ -331,7 +331,7 @@ mod tests {
.expect("originator header missing");
assert_eq!(originator_header.to_str().unwrap(), "codex_cli_rs");
// User-Agent matches the computed Codex UA for that originator
// User-Agent matches the computed LLMX UA for that originator
let expected_ua = get_codex_user_agent();
let ua_header = headers
.get("user-agent")

View File

@@ -72,7 +72,7 @@ pub enum CodexErr {
Stream(String, Option<Duration>),
#[error(
"Codex ran out of room in the model's context window. Start a new conversation or clear earlier history before retrying."
"LLMX ran out of room in the model's context window. Start a new conversation or clear earlier history before retrying."
)]
ContextWindowExceeded,
@@ -113,7 +113,7 @@ pub enum CodexErr {
QuotaExceeded,
#[error(
"To use Codex with your ChatGPT plan, upgrade to Plus: https://openai.com/chatgpt/pricing."
"To use LLMX with your ChatGPT plan, upgrade to Plus: https://openai.com/chatgpt/pricing."
)]
UsageNotIncluded,
@@ -321,7 +321,7 @@ impl std::fmt::Display for UsageLimitReachedError {
)
}
Some(PlanType::Known(KnownPlan::Free)) => {
"You've hit your usage limit. Upgrade to Plus to continue using Codex (https://openai.com/chatgpt/pricing)."
"You've hit your usage limit. Upgrade to Plus to continue using LLMX (https://openai.com/chatgpt/pricing)."
.to_string()
}
Some(PlanType::Known(KnownPlan::Pro)) => format!(
@@ -596,7 +596,7 @@ mod tests {
};
assert_eq!(
err.to_string(),
"You've hit your usage limit. Upgrade to Plus to continue using Codex (https://openai.com/chatgpt/pricing)."
"You've hit your usage limit. Upgrade to Plus to continue using LLMX (https://openai.com/chatgpt/pricing)."
);
}

View File

@@ -219,7 +219,7 @@ mod tests {
id: Some("msg-1".to_string()),
role: "assistant".to_string(),
content: vec![ContentItem::OutputText {
text: "Hello from Codex".to_string(),
text: "Hello from LLMX".to_string(),
}],
};
@@ -230,7 +230,7 @@ mod tests {
let Some(AgentMessageContent::Text { text }) = message.content.first() else {
panic!("expected agent message text content");
};
assert_eq!(text, "Hello from Codex");
assert_eq!(text, "Hello from LLMX");
}
other => panic!("expected TurnItem::AgentMessage, got {other:?}"),
}

View File

@@ -183,7 +183,7 @@ impl Codex {
session_source,
};
// Generate a unique ID for the lifetime of this Codex session.
// Generate a unique ID for the lifetime of this LLMX session.
let session_source_clone = session_configuration.session_source.clone();
let session = Session::new(
session_configuration,
@@ -1583,7 +1583,7 @@ mod handlers {
pub async fn shutdown(sess: &Arc<Session>, sub_id: String) -> bool {
sess.abort_all_tasks(TurnAbortReason::Interrupted).await;
info!("Shutting down Codex instance");
info!("Shutting down LLMX instance");
// Gracefully flush and shutdown rollout recorder on session end so tests
// that inspect the rollout file do not race with the background writer.

View File

@@ -182,9 +182,9 @@ impl McpConnectionManager {
client_info: Implementation {
name: "codex-mcp-client".to_owned(),
version: env!("CARGO_PKG_VERSION").to_owned(),
title: Some("Codex".into()),
// This field is used by Codex when it is an MCP
// server: it should not be used when Codex is
title: Some("LLMX".into()),
// This field is used by LLMX when it is an MCP
// server: it should not be used when LLMX is
// an MCP client.
user_agent: None,
},

View File

@@ -1,7 +1,7 @@
//! Registry of model providers supported by Codex.
//! Registry of model providers supported by LLMX.
//!
//! Providers can be defined in two places:
//! 1. Built-in defaults compiled into the binary so Codex works out-of-the-box.
//! 1. Built-in defaults compiled into the binary so LLMX works out-of-the-box.
//! 2. User-defined entries inside `~/.llmx/config.toml` under the `model_providers`
//! key. These override or extend the defaults at runtime.
@@ -299,7 +299,7 @@ pub fn built_in_model_providers() -> HashMap<String, ModelProviderInfo> {
name: "OpenAI".into(),
// Allow users to override the default OpenAI endpoint by
// exporting `OPENAI_BASE_URL`. This is useful when pointing
// Codex at a proxy, mock server, or Azure-style deployment
// LLMX at a proxy, mock server, or Azure-style deployment
// without requiring a full TOML override for the built-in
// OpenAI provider.
base_url: std::env::var("OPENAI_BASE_URL")

View File

@@ -8,7 +8,7 @@ pub(crate) fn is_persisted_response_item(item: &RolloutItem) -> bool {
match item {
RolloutItem::ResponseItem(item) => should_persist_response_item(item),
RolloutItem::EventMsg(ev) => should_persist_event_msg(ev),
// Persist Codex executive markers so we can analyze flows (e.g., compaction, API turns).
// Persist LLMX executive markers so we can analyze flows (e.g., compaction, API turns).
RolloutItem::Compacted(_) | RolloutItem::TurnContext(_) | RolloutItem::SessionMeta(_) => {
true
}

View File

@@ -1,4 +1,4 @@
//! Persist Codex session rollouts (.jsonl) so sessions can be replayed or inspected later.
//! Persist LLMX session rollouts (.jsonl) so sessions can be replayed or inspected later.
use std::fs::File;
use std::fs::{self};

View File

@@ -60,7 +60,7 @@ pub(crate) async fn spawn_child_async(
cmd.env(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR, "1");
}
// If this Codex process dies (including being killed via SIGKILL), we want
// If this LLMX process dies (including being killed via SIGKILL), we want
// any child processes that were spawned as part of a `"shell"` tool call
// to also be terminated.
@@ -83,9 +83,9 @@ pub(crate) async fn spawn_child_async(
}
// Though if there was a race condition and this pre_exec() block is
// run _after_ the parent (i.e., the Codex process) has already
// run _after_ the parent (i.e., the LLMX process) has already
// exited, then parent will be the closest configured "subreaper"
// ancestor process, or PID 1 (init). If the Codex process has exited
// ancestor process, or PID 1 (init). If the LLMX process has exited
// already, so should the child process.
if libc::getppid() != parent_pid {
libc::raise(libc::SIGTERM);

View File

@@ -360,7 +360,7 @@ fn create_test_sync_tool() -> ToolSpec {
ToolSpec::Function(ResponsesApiTool {
name: "test_sync_tool".to_string(),
description: "Internal synchronization helper used by Codex integration tests.".to_string(),
description: "Internal synchronization helper used by LLMX integration tests.".to_string(),
strict: false,
parameters: JsonSchema::Object {
properties,

View File

@@ -193,7 +193,7 @@ fn first_assistant(messages: &[Value]) -> &Value {
async fn omits_reasoning_when_none_present() {
if network_disabled() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
"Skipping test because it cannot execute when network is disabled in an LLMX sandbox."
);
return;
}
@@ -210,7 +210,7 @@ async fn omits_reasoning_when_none_present() {
async fn attaches_reasoning_to_previous_assistant() {
if network_disabled() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
"Skipping test because it cannot execute when network is disabled in an LLMX sandbox."
);
return;
}
@@ -232,7 +232,7 @@ async fn attaches_reasoning_to_previous_assistant() {
async fn attaches_reasoning_to_function_call_anchor() {
if network_disabled() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
"Skipping test because it cannot execute when network is disabled in an LLMX sandbox."
);
return;
}
@@ -259,7 +259,7 @@ async fn attaches_reasoning_to_function_call_anchor() {
async fn attaches_reasoning_to_local_shell_call() {
if network_disabled() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
"Skipping test because it cannot execute when network is disabled in an LLMX sandbox."
);
return;
}
@@ -284,7 +284,7 @@ async fn attaches_reasoning_to_local_shell_call() {
async fn drops_reasoning_when_last_role_is_user() {
if network_disabled() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
"Skipping test because it cannot execute when network is disabled in an LLMX sandbox."
);
return;
}
@@ -303,7 +303,7 @@ async fn drops_reasoning_when_last_role_is_user() {
async fn ignores_reasoning_before_last_user() {
if network_disabled() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
"Skipping test because it cannot execute when network is disabled in an LLMX sandbox."
);
return;
}
@@ -323,7 +323,7 @@ async fn ignores_reasoning_before_last_user() {
async fn skips_empty_reasoning_segments() {
if network_disabled() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
"Skipping test because it cannot execute when network is disabled in an LLMX sandbox."
);
return;
}
@@ -344,7 +344,7 @@ async fn skips_empty_reasoning_segments() {
async fn suppresses_duplicate_assistant_messages() {
if network_disabled() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
"Skipping test because it cannot execute when network is disabled in an LLMX sandbox."
);
return;
}

View File

@@ -159,7 +159,7 @@ fn assert_reasoning(item: &ResponseItem, expected: &str) {
async fn streams_text_without_reasoning() {
if network_disabled() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
"Skipping test because it cannot execute when network is disabled in an LLMX sandbox."
);
return;
}
@@ -195,7 +195,7 @@ async fn streams_text_without_reasoning() {
async fn streams_reasoning_from_string_delta() {
if network_disabled() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
"Skipping test because it cannot execute when network is disabled in an LLMX sandbox."
);
return;
}
@@ -246,7 +246,7 @@ async fn streams_reasoning_from_string_delta() {
async fn streams_reasoning_from_object_delta() {
if network_disabled() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
"Skipping test because it cannot execute when network is disabled in an LLMX sandbox."
);
return;
}
@@ -303,7 +303,7 @@ async fn streams_reasoning_from_object_delta() {
async fn streams_reasoning_from_final_message() {
if network_disabled() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
"Skipping test because it cannot execute when network is disabled in an LLMX sandbox."
);
return;
}
@@ -335,7 +335,7 @@ async fn streams_reasoning_from_final_message() {
async fn streams_reasoning_before_tool_call() {
if network_disabled() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
"Skipping test because it cannot execute when network is disabled in an LLMX sandbox."
);
return;
}
@@ -385,7 +385,7 @@ async fn streams_reasoning_before_tool_call() {
async fn chat_sse_emits_failed_on_parse_error() {
if network_disabled() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
"Skipping test because it cannot execute when network is disabled in an LLMX sandbox."
);
return;
}
@@ -422,7 +422,7 @@ async fn chat_sse_emits_failed_on_parse_error() {
async fn chat_sse_done_chunk_emits_event() {
if network_disabled() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
"Skipping test because it cannot execute when network is disabled in an LLMX sandbox."
);
return;
}
@@ -445,7 +445,7 @@ async fn chat_sse_done_chunk_emits_event() {
async fn chat_sse_emits_error_on_invalid_utf8() {
if network_disabled() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
"Skipping test because it cannot execute when network is disabled in an LLMX sandbox."
);
return;
}

View File

@@ -346,7 +346,7 @@ macro_rules! skip_if_no_network {
() => {{
if ::std::env::var($crate::sandbox_network_env_var()).is_ok() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
"Skipping test because it cannot execute when network is disabled in an LLMX sandbox."
);
return;
}
@@ -354,7 +354,7 @@ macro_rules! skip_if_no_network {
($return_value:expr $(,)?) => {{
if ::std::env::var($crate::sandbox_network_env_var()).is_ok() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
"Skipping test because it cannot execute when network is disabled in an LLMX sandbox."
);
return $return_value;
}

View File

@@ -126,7 +126,7 @@ async fn exec_cli_applies_experimental_instructions_file() {
let custom_path_str = custom_path.to_string_lossy().replace('\\', "/");
// Build a provider override that points at the mock server and instructs
// Codex to use the Responses API with the dummy env var.
// LLMX to use the Responses API with the dummy env var.
let provider_override = format!(
"model_providers.mock={{ name = \"mock\", base_url = \"{}/v1\", env_key = \"PATH\", wire_api = \"responses\" }}",
server.uri()

View File

@@ -240,7 +240,7 @@ async fn resume_includes_initial_messages_and_sends_prior_items() {
responses::mount_sse_once_match(&server, path("/v1/responses"), sse_completed("resp1"))
.await;
// Configure Codex to resume from our file
// Configure LLMX to resume from our file
let model_provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
@@ -1325,7 +1325,7 @@ fn create_dummy_codex_auth() -> CodexAuth {
/// We assert that the `input` sent on each turn contains the expected conversation history
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn history_dedupes_streamed_and_final_messages_across_turns() {
// Skip under Codex sandbox network restrictions (mirrors other tests).
// Skip under LLMX sandbox network restrictions (mirrors other tests).
skip_if_no_network!();
// Mock server that will receive three sequential requests and return the same SSE stream

View File

@@ -122,7 +122,7 @@ async fn summarize_context_three_requests_and_instructions() {
};
let third_request_mock = mount_sse_once_match(&server, third_matcher, sse3).await;
// Build config pointing to the mock server and spawn Codex.
// Build config pointing to the mock server and spawn LLMX.
let model_provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
@@ -257,7 +257,7 @@ async fn summarize_context_three_requests_and_instructions() {
"third request should not include the summarize trigger"
);
// Shut down Codex to flush rollout entries before inspecting the file.
// Shut down LLMX to flush rollout entries before inspecting the file.
codex.submit(Op::Shutdown).await.unwrap();
wait_for_event(&codex, |ev| matches!(ev, EventMsg::ShutdownComplete)).await;

View File

@@ -4,7 +4,7 @@
//!
//! Each test sets up a mocked SSE conversation and drives the conversation through
//! a specific sequence of operations. After every operation we capture the
//! request payload that Codex would send to the model and assert that the
//! request payload that LLMX would send to the model and assert that the
//! model-visible history matches the expected sequence of messages.
use super::compact::COMPACT_WARNING_MESSAGE;

View File

@@ -44,7 +44,7 @@ async fn fork_conversation_twice_drops_to_first_message() {
.mount(&server)
.await;
// Configure Codex to use the mock server.
// Configure LLMX to use the mock server.
let model_provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()

View File

@@ -66,7 +66,7 @@ async fn quota_exceeded_emits_single_error_event() -> Result<()> {
}
}
assert_eq!(error_events, 1, "expected exactly one Codex:Error event");
assert_eq!(error_events, 1, "expected exactly one LLMX:Error event");
Ok(())
}

View File

@@ -40,7 +40,7 @@ use wiremock::matchers::path;
/// in that order when the model returns a structured review JSON payload.
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn review_op_emits_lifecycle_and_review_output() {
// Skip under Codex sandbox network restrictions.
// Skip under LLMX sandbox network restrictions.
skip_if_no_network!();
// Start mock Responses API server. Return a single assistant message whose

View File

@@ -68,12 +68,12 @@ fn init_git_repo(path: &Path) -> Result<()> {
// CI variance (default-branch hints, line ending differences, etc.).
git(path, &["init", "--initial-branch=main"])?;
git(path, &["config", "core.autocrlf", "false"])?;
git(path, &["config", "user.name", "Codex Tests"])?;
git(path, &["config", "user.name", "LLMX Tests"])?;
git(path, &["config", "user.email", "codex-tests@example.com"])?;
// Create README.txt
let readme_path = path.join("README.txt");
fs::write(&readme_path, "Test repository initialized by Codex.\n")?;
fs::write(&readme_path, "Test repository initialized by LLMX.\n")?;
// Stage and commit
git(path, &["add", "README.txt"])?;

View File

@@ -382,7 +382,7 @@ async fn view_image_tool_placeholder_for_non_image_files() -> anyhow::Result<()>
content.iter().find_map(|span| {
if span.get("type").and_then(Value::as_str) == Some("input_text") {
let text = span.get("text").and_then(Value::as_str)?;
if text.contains("Codex could not read the local image at")
if text.contains("LLMX could not read the local image at")
&& text.contains("unsupported MIME type `application/json`")
{
return Some(text.to_string());