1 Commits

Author SHA1 Message Date
dependabot[bot]
e7582abc39 chore(deps): bump lru from 0.12.5 to 0.16.2 in /llmx-rs
Bumps [lru](https://github.com/jeromefroe/lru-rs) from 0.12.5 to 0.16.2.
- [Changelog](https://github.com/jeromefroe/lru-rs/blob/master/CHANGELOG.md)
- [Commits](https://github.com/jeromefroe/lru-rs/compare/0.12.5...0.16.2)

---
updated-dependencies:
- dependency-name: lru
  dependency-version: 0.16.2
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-11-14 18:56:21 +00:00
25 changed files with 713 additions and 943 deletions

1398
llmx-rs/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -43,7 +43,7 @@ members = [
resolver = "2"
[workspace.package]
version = "0.1.8"
version = "0.1.4"
# Track the edition for all workspace crates in one place. Individual
# crates can still override this value, but keeping it here means new
# crates created with `cargo new -w ...` automatically inherit the 2024
@@ -135,7 +135,7 @@ landlock = "0.4.1"
lazy_static = "1"
libc = "0.2.175"
log = "0.4"
lru = "0.12.5"
lru = "0.16.2"
maplit = "1.0.2"
mime_guess = "2.0.5"
multimap = "0.10.0"

View File

@@ -138,7 +138,7 @@ impl McpProcess {
client_info: ClientInfo {
name: "llmx-app-server-tests".to_string(),
title: None,
version: "0.1.7".to_string(),
version: "0.1.4".to_string(),
},
})?);
let req_id = self.send_request("initialize", params).await?;

View File

@@ -26,7 +26,7 @@ async fn get_user_agent_returns_current_llmx_user_agent() -> Result<()> {
let os_info = os_info::get();
let user_agent = format!(
"llmx_cli_rs/0.1.7 ({} {}; {}) {} (llmx-app-server-tests; 0.1.7)",
"llmx_cli_rs/0.1.4 ({} {}; {}) {} (llmx-app-server-tests; 0.1.4)",
os_info.os_type(),
os_info.version(),
os_info.architecture().unwrap_or("unknown"),

View File

@@ -31,7 +31,6 @@ use thiserror::Error;
const BEGIN_PATCH_MARKER: &str = "*** Begin Patch";
const END_PATCH_MARKER: &str = "*** End Patch";
const ADD_FILE_MARKER: &str = "*** Add File: ";
const CREATE_FILE_MARKER: &str = "*** Create File: "; // Alias for Add File
const DELETE_FILE_MARKER: &str = "*** Delete File: ";
const UPDATE_FILE_MARKER: &str = "*** Update File: ";
const MOVE_TO_MARKER: &str = "*** Move to: ";
@@ -246,8 +245,8 @@ fn check_start_and_end_lines_strict(
fn parse_one_hunk(lines: &[&str], line_number: usize) -> Result<(Hunk, usize), ParseError> {
// Be tolerant of case mismatches and extra padding around marker strings.
let first_line = lines[0].trim();
if let Some(path) = first_line.strip_prefix(ADD_FILE_MARKER).or_else(|| first_line.strip_prefix(CREATE_FILE_MARKER)) {
// Add File (also accepts Create File as alias)
if let Some(path) = first_line.strip_prefix(ADD_FILE_MARKER) {
// Add File
let mut contents = String::new();
let mut parsed_lines = 1;
for add_line in &lines[1..] {
@@ -332,7 +331,7 @@ fn parse_one_hunk(lines: &[&str], line_number: usize) -> Result<(Hunk, usize), P
Err(InvalidHunkError {
message: format!(
"'{first_line}' is not a valid hunk header. Valid hunk headers: '*** Add File: {{path}}', '*** Create File: {{path}}', '*** Delete File: {{path}}', '*** Update File: {{path}}'"
"'{first_line}' is not a valid hunk header. Valid hunk headers: '*** Add File: {{path}}', '*** Delete File: {{path}}', '*** Update File: {{path}}'"
),
line_number,
})

View File

@@ -56,12 +56,7 @@ pub(crate) async fn stream_chat_completions(
let mut messages = Vec::<serde_json::Value>::new();
let full_instructions = prompt.get_full_instructions(model_family);
// Add cache_control to system instructions for Anthropic prompt caching
messages.push(json!({
"role": "system",
"content": full_instructions,
"cache_control": {"type": "ephemeral"}
}));
messages.push(json!({"role": "system", "content": full_instructions}));
let input = prompt.get_formatted_input();
@@ -166,65 +161,10 @@ pub(crate) async fn stream_chat_completions(
// aggregated assistant message was recorded alongside an earlier partial).
let mut last_assistant_text: Option<String> = None;
// Build a map of which call_ids have outputs
// We'll use this to ensure we never send a FunctionCall without its corresponding output
let mut call_ids_with_outputs: std::collections::HashSet<String> = std::collections::HashSet::new();
// First pass: collect all call_ids that have outputs
for item in input.iter() {
if let ResponseItem::FunctionCallOutput { call_id, .. } = item {
call_ids_with_outputs.insert(call_id.clone());
}
}
debug!("=== Chat Completions Request Debug ===");
debug!("Input items count: {}", input.len());
debug!("Call IDs with outputs: {:?}", call_ids_with_outputs);
// Second pass: find the first FunctionCall that doesn't have an output
let mut cutoff_at_idx: Option<usize> = None;
for (idx, item) in input.iter().enumerate() {
if let ResponseItem::FunctionCall { call_id, name, .. } = item {
if !call_ids_with_outputs.contains(call_id) {
debug!("Found unanswered function call '{}' (call_id: {}) at index {}", name, call_id, idx);
cutoff_at_idx = Some(idx);
break;
}
}
}
if let Some(cutoff) = cutoff_at_idx {
debug!("Cutting off at index {} to avoid orphaned tool calls", cutoff);
} else {
debug!("No unanswered function calls found, processing all items");
}
// Track whether the MOST RECENT FunctionCall with each call_id was skipped
// This allows the same call_id to be retried - we only skip outputs for the specific skipped calls
let mut call_id_skip_state: std::collections::HashMap<String, bool> = std::collections::HashMap::new();
// Track call_ids of skipped function calls so we can also skip their outputs
let mut skipped_call_ids: std::collections::HashSet<String> = std::collections::HashSet::new();
for (idx, item) in input.iter().enumerate() {
// Stop processing if we've reached an unanswered function call
if let Some(cutoff) = cutoff_at_idx {
if idx >= cutoff {
debug!("Stopping at index {} due to unanswered function call", idx);
break;
}
}
debug!("Processing item {} of type: {}", idx, match item {
ResponseItem::Message { role, .. } => format!("Message(role={})", role),
ResponseItem::FunctionCall { name, call_id, .. } => format!("FunctionCall(name={}, call_id={})", name, call_id),
ResponseItem::FunctionCallOutput { call_id, .. } => format!("FunctionCallOutput(call_id={})", call_id),
ResponseItem::LocalShellCall { .. } => "LocalShellCall".to_string(),
ResponseItem::CustomToolCall { .. } => "CustomToolCall".to_string(),
ResponseItem::CustomToolCallOutput { .. } => "CustomToolCallOutput".to_string(),
ResponseItem::Reasoning { .. } => "Reasoning".to_string(),
ResponseItem::WebSearchCall { .. } => "WebSearchCall".to_string(),
ResponseItem::GhostSnapshot { .. } => "GhostSnapshot".to_string(),
ResponseItem::Other => "Other".to_string(),
});
match item {
ResponseItem::Message { role, content, .. } => {
// Build content either as a plain string (typical for assistant text)
@@ -294,14 +234,11 @@ pub(crate) async fn stream_chat_completions(
// If invalid, skip this function call to avoid API errors
if serde_json::from_str::<serde_json::Value>(arguments).is_err() {
debug!("Skipping malformed function call with invalid JSON arguments: {}", arguments);
// Mark this call_id's most recent state as skipped
call_id_skip_state.insert(call_id.clone(), true);
// Track this call_id so we can also skip its corresponding output
skipped_call_ids.insert(call_id.clone());
continue;
}
// Mark this call_id's most recent state as NOT skipped (valid call)
call_id_skip_state.insert(call_id.clone(), false);
let mut msg = json!({
"role": "assistant",
"content": null,
@@ -346,9 +283,9 @@ pub(crate) async fn stream_chat_completions(
messages.push(msg);
}
ResponseItem::FunctionCallOutput { call_id, output } => {
// Skip outputs only if the MOST RECENT FunctionCall with this call_id was skipped
if call_id_skip_state.get(call_id) == Some(&true) {
debug!("Skipping function call output for most recent skipped call_id: {}", call_id);
// Skip outputs for function calls that were skipped due to malformed arguments
if skipped_call_ids.contains(call_id) {
debug!("Skipping function call output for skipped call_id: {}", call_id);
continue;
}
@@ -417,39 +354,14 @@ pub(crate) async fn stream_chat_completions(
}
}
debug!("Built {} messages for API request", messages.len());
// Add cache_control to conversation history for Anthropic prompt caching
// Add it to a message that's at least 3 messages before the end (stable history)
// This caches the earlier conversation while keeping recent turns uncached
if messages.len() > 4 {
let cache_idx = messages.len().saturating_sub(4);
if let Some(msg) = messages.get_mut(cache_idx) {
if let Some(obj) = msg.as_object_mut() {
obj.insert("cache_control".to_string(), json!({"type": "ephemeral"}));
debug!("Added cache_control to message at index {} (conversation history)", cache_idx);
}
}
}
debug!("=== End Chat Completions Request Debug ===");
let tools_json = create_tools_json_for_chat_completions_api(&prompt.tools)?;
let mut payload = json!({
let payload = json!({
"model": model_family.slug,
"messages": messages,
"stream": true,
"tools": tools_json,
});
// Add max_tokens - required by Anthropic Messages API
// Use provider config value or default to 20480 (5 * 4096, Claude Sonnet 4.5 supports up to 64K)
let max_tokens = provider.max_tokens.unwrap_or(20480);
if let Some(obj) = payload.as_object_mut() {
obj.insert("max_tokens".to_string(), json!(max_tokens));
}
debug!("Using max_tokens: {}", max_tokens);
debug!(
"POST to {}: {}",
provider.get_full_url(&None),
@@ -610,9 +522,7 @@ async fn process_chat_sse<S>(
) where
S: Stream<Item = Result<Bytes>> + Unpin,
{
debug!("process_chat_sse started, idle_timeout={:?}", idle_timeout);
let mut stream = stream.eventsource();
debug!("SSE stream initialized, waiting for first event");
// State to accumulate a function call across streaming chunks.
// OpenAI may split the `arguments` string over multiple `delta` events
@@ -647,14 +557,7 @@ async fn process_chat_sse<S>(
return;
}
Ok(None) => {
// Stream closed gracefully emit any pending items first, then Completed
debug!("Stream closed gracefully (Ok(None)), emitting pending items");
if let Some(item) = assistant_item.take() {
let _ = tx_event.send(Ok(ResponseEvent::OutputItemDone(item))).await;
}
if let Some(item) = reasoning_item.take() {
let _ = tx_event.send(Ok(ResponseEvent::OutputItemDone(item))).await;
}
// Stream closed gracefully emit Completed with dummy id.
let _ = tx_event
.send(Ok(ResponseEvent::Completed {
response_id: String::new(),
@@ -850,7 +753,6 @@ async fn process_chat_sse<S>(
// Emit end-of-turn when finish_reason signals completion.
if let Some(finish_reason) = choice.get("finish_reason").and_then(|v| v.as_str()) {
debug!("Received finish_reason: {}", finish_reason);
match finish_reason {
"tool_calls" if fn_call_state.active => {
// First, flush the terminal raw reasoning so UIs can finalize
@@ -869,46 +771,27 @@ async fn process_chat_sse<S>(
let _ = tx_event.send(Ok(ResponseEvent::OutputItemDone(item))).await;
}
"stop" | "length" => {
// Regular turn without tool-call, or hit max_tokens limit.
debug!("Processing finish_reason={}, assistant_item.is_some()={}, reasoning_item.is_some()={}",
finish_reason, assistant_item.is_some(), reasoning_item.is_some());
// Emit the final assistant message as a single OutputItemDone so non-delta consumers see the result.
"stop" => {
// Regular turn without tool-call. Emit the final assistant message
// as a single OutputItemDone so non-delta consumers see the result.
if let Some(item) = assistant_item.take() {
debug!("Emitting assistant_item: {:?}", item);
let _ = tx_event.send(Ok(ResponseEvent::OutputItemDone(item))).await;
} else {
debug!("No assistant_item to emit");
}
// Also emit a terminal Reasoning item so UIs can finalize raw reasoning.
if let Some(item) = reasoning_item.take() {
debug!("Emitting reasoning_item");
let _ = tx_event.send(Ok(ResponseEvent::OutputItemDone(item))).await;
} else {
debug!("No reasoning_item to emit");
}
}
_ => {
// Unknown finish_reason - still emit pending items to avoid hanging
debug!("Unknown finish_reason: {}, emitting pending items", finish_reason);
if let Some(item) = assistant_item.take() {
let _ = tx_event.send(Ok(ResponseEvent::OutputItemDone(item))).await;
}
if let Some(item) = reasoning_item.take() {
let _ = tx_event.send(Ok(ResponseEvent::OutputItemDone(item))).await;
}
}
_ => {}
}
// Emit Completed regardless of reason so the agent can advance.
debug!("Sending Completed event after finish_reason={}", finish_reason);
let _ = tx_event
.send(Ok(ResponseEvent::Completed {
response_id: String::new(),
token_usage: token_usage.clone(),
}))
.await;
debug!("Completed event sent, returning from SSE processor");
// Prepare for potential next turn (should not happen in same stream).
// fn_call_state = FunctionCallState::default();
@@ -917,22 +800,6 @@ async fn process_chat_sse<S>(
}
}
}
// Stream ended without finish_reason - this can happen when the stream closes abruptly
debug!("Stream ended without finish_reason, emitting final items and Completed event");
if let Some(item) = assistant_item.take() {
let _ = tx_event.send(Ok(ResponseEvent::OutputItemDone(item))).await;
}
if let Some(item) = reasoning_item.take() {
let _ = tx_event.send(Ok(ResponseEvent::OutputItemDone(item))).await;
}
// Send Completed event so llmx knows the turn is done
let _ = tx_event
.send(Ok(ResponseEvent::Completed {
response_id: String::new(),
token_usage: token_usage.clone(),
}))
.await;
}
/// Optional client-side aggregation helper

View File

@@ -1123,7 +1123,6 @@ mod tests {
request_max_retries: Some(0),
stream_max_retries: Some(0),
stream_idle_timeout_ms: Some(1000),
max_tokens: None,
requires_openai_auth: false,
};
@@ -1188,7 +1187,6 @@ mod tests {
request_max_retries: Some(0),
stream_max_retries: Some(0),
stream_idle_timeout_ms: Some(1000),
max_tokens: None,
requires_openai_auth: false,
};
@@ -1226,7 +1224,6 @@ mod tests {
request_max_retries: Some(0),
stream_max_retries: Some(0),
stream_idle_timeout_ms: Some(1000),
max_tokens: None,
requires_openai_auth: false,
};
@@ -1266,7 +1263,6 @@ mod tests {
request_max_retries: Some(0),
stream_max_retries: Some(0),
stream_idle_timeout_ms: Some(1000),
max_tokens: None,
requires_openai_auth: false,
};
@@ -1302,7 +1298,6 @@ mod tests {
request_max_retries: Some(0),
stream_max_retries: Some(0),
stream_idle_timeout_ms: Some(1000),
max_tokens: None,
requires_openai_auth: false,
};
@@ -1338,7 +1333,6 @@ mod tests {
request_max_retries: Some(0),
stream_max_retries: Some(0),
stream_idle_timeout_ms: Some(1000),
max_tokens: None,
requires_openai_auth: false,
};
@@ -1443,7 +1437,6 @@ mod tests {
request_max_retries: Some(0),
stream_max_retries: Some(0),
stream_idle_timeout_ms: Some(1000),
max_tokens: None,
requires_openai_auth: false,
};

View File

@@ -2809,7 +2809,6 @@ model_verbosity = "high"
request_max_retries: Some(4),
stream_max_retries: Some(10),
stream_idle_timeout_ms: Some(300_000),
max_tokens: None,
requires_openai_auth: false,
};
let model_provider_map = {

View File

@@ -54,7 +54,7 @@ fn parse_user_message(message: &[ContentItem]) -> Option<UserMessageItem> {
Some(UserMessageItem::new(&content))
}
fn parse_agent_message(id: Option<&String>, message: &[ContentItem]) -> Option<AgentMessageItem> {
fn parse_agent_message(id: Option<&String>, message: &[ContentItem]) -> AgentMessageItem {
let mut content: Vec<AgentMessageContent> = Vec::new();
for content_item in message.iter() {
match content_item {
@@ -69,23 +69,18 @@ fn parse_agent_message(id: Option<&String>, message: &[ContentItem]) -> Option<A
}
}
}
// If the message has no content, return None to signal turn completion
// This happens when the API ends a turn with an empty assistant message (e.g., after tool calls)
if content.is_empty() {
return None;
}
let id = id.cloned().unwrap_or_else(|| Uuid::new_v4().to_string());
Some(AgentMessageItem { id, content })
AgentMessageItem { id, content }
}
pub fn parse_turn_item(item: &ResponseItem) -> Option<TurnItem> {
match item {
ResponseItem::Message { role, content, id } => match role.as_str() {
"user" => parse_user_message(content).map(TurnItem::UserMessage),
"assistant" => parse_agent_message(id.as_ref(), content)
.map(TurnItem::AgentMessage),
"assistant" => Some(TurnItem::AgentMessage(parse_agent_message(
id.as_ref(),
content,
))),
"system" => None,
_ => None,
},

View File

@@ -87,10 +87,6 @@ pub struct ModelProviderInfo {
/// the connection as lost.
pub stream_idle_timeout_ms: Option<u64>,
/// Maximum number of tokens to generate in the response. If not specified, defaults to 8192.
/// This is required by some providers (e.g., Anthropic via LiteLLM).
pub max_tokens: Option<i64>,
/// Does this provider require an OpenAI API Key or ChatGPT login token? If true,
/// user is presented with login screen on first run, and login preference and token/key
/// are stored in auth.json. If false (which is the default), login screen is skipped,
@@ -294,7 +290,6 @@ pub fn built_in_model_providers() -> HashMap<String, ModelProviderInfo> {
request_max_retries: None,
stream_max_retries: None,
stream_idle_timeout_ms: None,
max_tokens: None,
requires_openai_auth: false,
},
),
@@ -335,7 +330,6 @@ pub fn built_in_model_providers() -> HashMap<String, ModelProviderInfo> {
request_max_retries: None,
stream_max_retries: None,
stream_idle_timeout_ms: None,
max_tokens: None,
requires_openai_auth: true,
},
),
@@ -381,7 +375,6 @@ pub fn create_oss_provider_with_base_url(base_url: &str) -> ModelProviderInfo {
request_max_retries: None,
stream_max_retries: None,
stream_idle_timeout_ms: None,
max_tokens: None,
requires_openai_auth: false,
}
}
@@ -422,7 +415,6 @@ base_url = "http://localhost:11434/v1"
request_max_retries: None,
stream_max_retries: None,
stream_idle_timeout_ms: None,
max_tokens: None,
requires_openai_auth: false,
};
@@ -453,7 +445,6 @@ query_params = { api-version = "2025-04-01-preview" }
request_max_retries: None,
stream_max_retries: None,
stream_idle_timeout_ms: None,
max_tokens: None,
requires_openai_auth: false,
};
@@ -487,7 +478,6 @@ env_http_headers = { "X-Example-Env-Header" = "EXAMPLE_ENV_VAR" }
request_max_retries: None,
stream_max_retries: None,
stream_idle_timeout_ms: None,
max_tokens: None,
requires_openai_auth: false,
};
@@ -511,7 +501,6 @@ env_http_headers = { "X-Example-Env-Header" = "EXAMPLE_ENV_VAR" }
request_max_retries: None,
stream_max_retries: None,
stream_idle_timeout_ms: None,
max_tokens: None,
requires_openai_auth: false,
}
}
@@ -545,7 +534,6 @@ env_http_headers = { "X-Example-Env-Header" = "EXAMPLE_ENV_VAR" }
request_max_retries: None,
stream_max_retries: None,
stream_idle_timeout_ms: None,
max_tokens: None,
requires_openai_auth: false,
};
assert!(named_provider.is_azure_responses_endpoint());

View File

@@ -693,7 +693,7 @@ pub(crate) fn create_tools_json_for_chat_completions_api(
// We start with the JSON for the Responses API and than rewrite it to match
// the chat completions tool call format.
let responses_api_tools_json = create_tools_json_for_responses_api(tools)?;
let mut tools_json = responses_api_tools_json
let tools_json = responses_api_tools_json
.into_iter()
.filter_map(|mut tool| {
if tool.get("type") != Some(&serde_json::Value::String("function".to_string())) {
@@ -712,14 +712,6 @@ pub(crate) fn create_tools_json_for_chat_completions_api(
}
})
.collect::<Vec<serde_json::Value>>();
// Add cache_control to the last tool to enable Anthropic prompt caching
if let Some(last_tool) = tools_json.last_mut() {
if let Some(obj) = last_tool.as_object_mut() {
obj.insert("cache_control".to_string(), json!({"type": "ephemeral"}));
}
}
Ok(tools_json)
}

View File

@@ -58,7 +58,6 @@ async fn run_request(input: Vec<ResponseItem>) -> Value {
request_max_retries: Some(0),
stream_max_retries: Some(0),
stream_idle_timeout_ms: Some(5_000),
max_tokens: None,
requires_openai_auth: false,
};

View File

@@ -58,7 +58,6 @@ async fn run_stream_with_bytes(sse_body: &[u8]) -> Vec<ResponseEvent> {
request_max_retries: Some(0),
stream_max_retries: Some(0),
stream_idle_timeout_ms: Some(5_000),
max_tokens: None,
requires_openai_auth: false,
};

View File

@@ -47,7 +47,6 @@ async fn responses_stream_includes_subagent_header_on_review() {
request_max_retries: Some(0),
stream_max_retries: Some(0),
stream_idle_timeout_ms: Some(5_000),
max_tokens: None,
requires_openai_auth: false,
};
@@ -136,7 +135,6 @@ async fn responses_stream_includes_subagent_header_on_other() {
request_max_retries: Some(0),
stream_max_retries: Some(0),
stream_idle_timeout_ms: Some(5_000),
max_tokens: None,
requires_openai_auth: false,
};

View File

@@ -712,7 +712,6 @@ async fn azure_responses_request_includes_store_and_reasoning_ids() {
request_max_retries: Some(0),
stream_max_retries: Some(0),
stream_idle_timeout_ms: Some(5_000),
max_tokens: None,
requires_openai_auth: false,
};
@@ -1196,7 +1195,6 @@ async fn azure_overrides_assign_properties_used_for_responses_url() {
request_max_retries: None,
stream_max_retries: None,
stream_idle_timeout_ms: None,
max_tokens: None,
requires_openai_auth: false,
};
@@ -1274,7 +1272,6 @@ async fn env_var_overrides_loaded_auth() {
request_max_retries: None,
stream_max_retries: None,
stream_idle_timeout_ms: None,
max_tokens: None,
requires_openai_auth: false,
};

View File

@@ -72,7 +72,6 @@ async fn continue_after_stream_error() {
request_max_retries: Some(1),
stream_max_retries: Some(1),
stream_idle_timeout_ms: Some(2_000),
max_tokens: None,
requires_openai_auth: false,
};

View File

@@ -80,7 +80,6 @@ async fn retries_on_early_close() {
request_max_retries: Some(0),
stream_max_retries: Some(1),
stream_idle_timeout_ms: Some(2000),
max_tokens: None,
requires_openai_auth: false,
};

View File

@@ -144,7 +144,7 @@ impl McpProcess {
let initialized = self.read_jsonrpc_message().await?;
let os_info = os_info::get();
let user_agent = format!(
"llmx_cli_rs/0.1.7 ({} {}; {}) {} (elicitation test; 0.0.0)",
"llmx_cli_rs/0.1.4 ({} {}; {}) {} (elicitation test; 0.0.0)",
os_info.os_type(),
os_info.version(),
os_info.architecture().unwrap_or("unknown"),
@@ -163,7 +163,7 @@ impl McpProcess {
"serverInfo": {
"name": "llmx-mcp-server",
"title": "LLMX",
"version": "0.1.7",
"version": "0.1.4",
"user_agent": user_agent
},
"protocolVersion": mcp_types::MCP_SCHEMA_VERSION

View File

@@ -5,7 +5,7 @@ expression: sanitized
/status
╭───────────────────────────────────────────────────────────────────────────╮
│ >_ LLMX (v0.1.7) │
│ >_ LLMX (v0.1.4) │
│ │
│ Visit https://chatgpt.com/llmx/settings/usage for up-to-date │
│ information on rate limits and credits │

View File

@@ -5,7 +5,7 @@ expression: sanitized
/status
╭─────────────────────────────────────────────────────────────────╮
│ >_ LLMX (v0.1.7) │
│ >_ LLMX (v0.1.4) │
│ │
│ Visit https://chatgpt.com/llmx/settings/usage for up-to-date │
│ information on rate limits and credits │

View File

@@ -5,7 +5,7 @@ expression: sanitized
/status
╭──────────────────────────────────────────────────────────────╮
│ >_ LLMX (v0.1.7) │
│ >_ LLMX (v0.1.4) │
│ │
│ Visit https://chatgpt.com/llmx/settings/usage for up-to-date │
│ information on rate limits and credits │

View File

@@ -5,7 +5,7 @@ expression: sanitized
/status
╭──────────────────────────────────────────────────────────────╮
│ >_ LLMX (v0.1.7) │
│ >_ LLMX (v0.1.4) │
│ │
│ Visit https://chatgpt.com/llmx/settings/usage for up-to-date │
│ information on rate limits and credits │

View File

@@ -5,7 +5,7 @@ expression: sanitized
/status
╭───────────────────────────────────────────────────────────────────╮
│ >_ LLMX (v0.1.7) │
│ >_ LLMX (v0.1.4) │
│ │
│ Visit https://chatgpt.com/llmx/settings/usage for up-to-date │
│ information on rate limits and credits │

View File

@@ -5,7 +5,7 @@ expression: sanitized
/status
╭────────────────────────────────────────────╮
│ >_ LLMX (v0.1.7) │
│ >_ LLMX (v0.1.4) │
│ │
│ Visit https://chatgpt.com/llmx/settings/ │
│ usage for up-to-date │

File diff suppressed because one or more lines are too long