Compare commits
16 Commits
rust-v0.1.
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| 66e0649b01 | |||
| 84bc98a66b | |||
| 3bc152029e | |||
| ffbd2e38ec | |||
| 0841ba05a8 | |||
| 44dc7a3bed | |||
| a3ced1f246 | |||
| 401b0b3935 | |||
| 7237627ac7 | |||
| 75dda1c285 | |||
| 8f79e89db2 | |||
| c0775ad8a3 | |||
| ee75cfaa7f | |||
| 085d8c9343 | |||
| 462b219d3f | |||
| 63de226119 |
1475
llmx-rs/Cargo.lock
generated
1475
llmx-rs/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -43,7 +43,7 @@ members = [
|
|||||||
resolver = "2"
|
resolver = "2"
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
version = "0.1.5"
|
version = "0.1.9"
|
||||||
# Track the edition for all workspace crates in one place. Individual
|
# Track the edition for all workspace crates in one place. Individual
|
||||||
# crates can still override this value, but keeping it here means new
|
# crates can still override this value, but keeping it here means new
|
||||||
# crates created with `cargo new -w ...` automatically inherit the 2024
|
# crates created with `cargo new -w ...` automatically inherit the 2024
|
||||||
@@ -191,7 +191,7 @@ tokio-util = "0.7.16"
|
|||||||
toml = "0.9.5"
|
toml = "0.9.5"
|
||||||
toml_edit = "0.23.4"
|
toml_edit = "0.23.4"
|
||||||
tonic = "0.13.1"
|
tonic = "0.13.1"
|
||||||
tracing = "0.1.51"
|
tracing = "0.1.41"
|
||||||
tracing-appender = "0.2.3"
|
tracing-appender = "0.2.3"
|
||||||
tracing-subscriber = "0.3.20"
|
tracing-subscriber = "0.3.20"
|
||||||
tracing-test = "0.2.5"
|
tracing-test = "0.2.5"
|
||||||
|
|||||||
@@ -138,7 +138,7 @@ impl McpProcess {
|
|||||||
client_info: ClientInfo {
|
client_info: ClientInfo {
|
||||||
name: "llmx-app-server-tests".to_string(),
|
name: "llmx-app-server-tests".to_string(),
|
||||||
title: None,
|
title: None,
|
||||||
version: "0.1.5".to_string(),
|
version: "0.1.7".to_string(),
|
||||||
},
|
},
|
||||||
})?);
|
})?);
|
||||||
let req_id = self.send_request("initialize", params).await?;
|
let req_id = self.send_request("initialize", params).await?;
|
||||||
|
|||||||
@@ -26,7 +26,7 @@ async fn get_user_agent_returns_current_llmx_user_agent() -> Result<()> {
|
|||||||
|
|
||||||
let os_info = os_info::get();
|
let os_info = os_info::get();
|
||||||
let user_agent = format!(
|
let user_agent = format!(
|
||||||
"llmx_cli_rs/0.1.5 ({} {}; {}) {} (llmx-app-server-tests; 0.1.5)",
|
"llmx_cli_rs/0.1.7 ({} {}; {}) {} (llmx-app-server-tests; 0.1.7)",
|
||||||
os_info.os_type(),
|
os_info.os_type(),
|
||||||
os_info.version(),
|
os_info.version(),
|
||||||
os_info.architecture().unwrap_or("unknown"),
|
os_info.architecture().unwrap_or("unknown"),
|
||||||
|
|||||||
@@ -31,6 +31,7 @@ use thiserror::Error;
|
|||||||
const BEGIN_PATCH_MARKER: &str = "*** Begin Patch";
|
const BEGIN_PATCH_MARKER: &str = "*** Begin Patch";
|
||||||
const END_PATCH_MARKER: &str = "*** End Patch";
|
const END_PATCH_MARKER: &str = "*** End Patch";
|
||||||
const ADD_FILE_MARKER: &str = "*** Add File: ";
|
const ADD_FILE_MARKER: &str = "*** Add File: ";
|
||||||
|
const CREATE_FILE_MARKER: &str = "*** Create File: "; // Alias for Add File
|
||||||
const DELETE_FILE_MARKER: &str = "*** Delete File: ";
|
const DELETE_FILE_MARKER: &str = "*** Delete File: ";
|
||||||
const UPDATE_FILE_MARKER: &str = "*** Update File: ";
|
const UPDATE_FILE_MARKER: &str = "*** Update File: ";
|
||||||
const MOVE_TO_MARKER: &str = "*** Move to: ";
|
const MOVE_TO_MARKER: &str = "*** Move to: ";
|
||||||
@@ -245,8 +246,8 @@ fn check_start_and_end_lines_strict(
|
|||||||
fn parse_one_hunk(lines: &[&str], line_number: usize) -> Result<(Hunk, usize), ParseError> {
|
fn parse_one_hunk(lines: &[&str], line_number: usize) -> Result<(Hunk, usize), ParseError> {
|
||||||
// Be tolerant of case mismatches and extra padding around marker strings.
|
// Be tolerant of case mismatches and extra padding around marker strings.
|
||||||
let first_line = lines[0].trim();
|
let first_line = lines[0].trim();
|
||||||
if let Some(path) = first_line.strip_prefix(ADD_FILE_MARKER) {
|
if let Some(path) = first_line.strip_prefix(ADD_FILE_MARKER).or_else(|| first_line.strip_prefix(CREATE_FILE_MARKER)) {
|
||||||
// Add File
|
// Add File (also accepts Create File as alias)
|
||||||
let mut contents = String::new();
|
let mut contents = String::new();
|
||||||
let mut parsed_lines = 1;
|
let mut parsed_lines = 1;
|
||||||
for add_line in &lines[1..] {
|
for add_line in &lines[1..] {
|
||||||
@@ -331,7 +332,7 @@ fn parse_one_hunk(lines: &[&str], line_number: usize) -> Result<(Hunk, usize), P
|
|||||||
|
|
||||||
Err(InvalidHunkError {
|
Err(InvalidHunkError {
|
||||||
message: format!(
|
message: format!(
|
||||||
"'{first_line}' is not a valid hunk header. Valid hunk headers: '*** Add File: {{path}}', '*** Delete File: {{path}}', '*** Update File: {{path}}'"
|
"'{first_line}' is not a valid hunk header. Valid hunk headers: '*** Add File: {{path}}', '*** Create File: {{path}}', '*** Delete File: {{path}}', '*** Update File: {{path}}'"
|
||||||
),
|
),
|
||||||
line_number,
|
line_number,
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -45,6 +45,7 @@ pub(crate) async fn stream_chat_completions(
|
|||||||
provider: &ModelProviderInfo,
|
provider: &ModelProviderInfo,
|
||||||
otel_event_manager: &OtelEventManager,
|
otel_event_manager: &OtelEventManager,
|
||||||
session_source: &SessionSource,
|
session_source: &SessionSource,
|
||||||
|
model_max_output_tokens: Option<i64>,
|
||||||
) -> Result<ResponseStream> {
|
) -> Result<ResponseStream> {
|
||||||
if prompt.output_schema.is_some() {
|
if prompt.output_schema.is_some() {
|
||||||
return Err(LlmxErr::UnsupportedOperation(
|
return Err(LlmxErr::UnsupportedOperation(
|
||||||
@@ -56,7 +57,12 @@ pub(crate) async fn stream_chat_completions(
|
|||||||
let mut messages = Vec::<serde_json::Value>::new();
|
let mut messages = Vec::<serde_json::Value>::new();
|
||||||
|
|
||||||
let full_instructions = prompt.get_full_instructions(model_family);
|
let full_instructions = prompt.get_full_instructions(model_family);
|
||||||
messages.push(json!({"role": "system", "content": full_instructions}));
|
// Add cache_control to system instructions for Anthropic prompt caching
|
||||||
|
messages.push(json!({
|
||||||
|
"role": "system",
|
||||||
|
"content": full_instructions,
|
||||||
|
"cache_control": {"type": "ephemeral"}
|
||||||
|
}));
|
||||||
|
|
||||||
let input = prompt.get_formatted_input();
|
let input = prompt.get_formatted_input();
|
||||||
|
|
||||||
@@ -413,6 +419,20 @@ pub(crate) async fn stream_chat_completions(
|
|||||||
}
|
}
|
||||||
|
|
||||||
debug!("Built {} messages for API request", messages.len());
|
debug!("Built {} messages for API request", messages.len());
|
||||||
|
|
||||||
|
// Add cache_control to conversation history for Anthropic prompt caching
|
||||||
|
// Add it to a message that's at least 3 messages before the end (stable history)
|
||||||
|
// This caches the earlier conversation while keeping recent turns uncached
|
||||||
|
if messages.len() > 4 {
|
||||||
|
let cache_idx = messages.len().saturating_sub(4);
|
||||||
|
if let Some(msg) = messages.get_mut(cache_idx) {
|
||||||
|
if let Some(obj) = msg.as_object_mut() {
|
||||||
|
obj.insert("cache_control".to_string(), json!({"type": "ephemeral"}));
|
||||||
|
debug!("Added cache_control to message at index {} (conversation history)", cache_idx);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
debug!("=== End Chat Completions Request Debug ===");
|
debug!("=== End Chat Completions Request Debug ===");
|
||||||
|
|
||||||
let tools_json = create_tools_json_for_chat_completions_api(&prompt.tools)?;
|
let tools_json = create_tools_json_for_chat_completions_api(&prompt.tools)?;
|
||||||
@@ -424,10 +444,14 @@ pub(crate) async fn stream_chat_completions(
|
|||||||
});
|
});
|
||||||
|
|
||||||
// Add max_tokens - required by Anthropic Messages API
|
// Add max_tokens - required by Anthropic Messages API
|
||||||
// Use a sensible default of 8192 if not configured
|
// Priority: config model_max_output_tokens > provider max_tokens > default 20480
|
||||||
|
let max_tokens = model_max_output_tokens
|
||||||
|
.or(provider.max_tokens)
|
||||||
|
.unwrap_or(20480);
|
||||||
if let Some(obj) = payload.as_object_mut() {
|
if let Some(obj) = payload.as_object_mut() {
|
||||||
obj.insert("max_tokens".to_string(), json!(8192));
|
obj.insert("max_tokens".to_string(), json!(max_tokens));
|
||||||
}
|
}
|
||||||
|
debug!("Using max_tokens: {}", max_tokens);
|
||||||
|
|
||||||
debug!(
|
debug!(
|
||||||
"POST to {}: {}",
|
"POST to {}: {}",
|
||||||
@@ -589,7 +613,9 @@ async fn process_chat_sse<S>(
|
|||||||
) where
|
) where
|
||||||
S: Stream<Item = Result<Bytes>> + Unpin,
|
S: Stream<Item = Result<Bytes>> + Unpin,
|
||||||
{
|
{
|
||||||
|
debug!("process_chat_sse started, idle_timeout={:?}", idle_timeout);
|
||||||
let mut stream = stream.eventsource();
|
let mut stream = stream.eventsource();
|
||||||
|
debug!("SSE stream initialized, waiting for first event");
|
||||||
|
|
||||||
// State to accumulate a function call across streaming chunks.
|
// State to accumulate a function call across streaming chunks.
|
||||||
// OpenAI may split the `arguments` string over multiple `delta` events
|
// OpenAI may split the `arguments` string over multiple `delta` events
|
||||||
@@ -624,7 +650,14 @@ async fn process_chat_sse<S>(
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
Ok(None) => {
|
Ok(None) => {
|
||||||
// Stream closed gracefully – emit Completed with dummy id.
|
// Stream closed gracefully – emit any pending items first, then Completed
|
||||||
|
debug!("Stream closed gracefully (Ok(None)), emitting pending items");
|
||||||
|
if let Some(item) = assistant_item.take() {
|
||||||
|
let _ = tx_event.send(Ok(ResponseEvent::OutputItemDone(item))).await;
|
||||||
|
}
|
||||||
|
if let Some(item) = reasoning_item.take() {
|
||||||
|
let _ = tx_event.send(Ok(ResponseEvent::OutputItemDone(item))).await;
|
||||||
|
}
|
||||||
let _ = tx_event
|
let _ = tx_event
|
||||||
.send(Ok(ResponseEvent::Completed {
|
.send(Ok(ResponseEvent::Completed {
|
||||||
response_id: String::new(),
|
response_id: String::new(),
|
||||||
@@ -820,6 +853,7 @@ async fn process_chat_sse<S>(
|
|||||||
|
|
||||||
// Emit end-of-turn when finish_reason signals completion.
|
// Emit end-of-turn when finish_reason signals completion.
|
||||||
if let Some(finish_reason) = choice.get("finish_reason").and_then(|v| v.as_str()) {
|
if let Some(finish_reason) = choice.get("finish_reason").and_then(|v| v.as_str()) {
|
||||||
|
debug!("Received finish_reason: {}", finish_reason);
|
||||||
match finish_reason {
|
match finish_reason {
|
||||||
"tool_calls" if fn_call_state.active => {
|
"tool_calls" if fn_call_state.active => {
|
||||||
// First, flush the terminal raw reasoning so UIs can finalize
|
// First, flush the terminal raw reasoning so UIs can finalize
|
||||||
@@ -838,27 +872,46 @@ async fn process_chat_sse<S>(
|
|||||||
|
|
||||||
let _ = tx_event.send(Ok(ResponseEvent::OutputItemDone(item))).await;
|
let _ = tx_event.send(Ok(ResponseEvent::OutputItemDone(item))).await;
|
||||||
}
|
}
|
||||||
"stop" => {
|
"stop" | "length" => {
|
||||||
// Regular turn without tool-call. Emit the final assistant message
|
// Regular turn without tool-call, or hit max_tokens limit.
|
||||||
// as a single OutputItemDone so non-delta consumers see the result.
|
debug!("Processing finish_reason={}, assistant_item.is_some()={}, reasoning_item.is_some()={}",
|
||||||
|
finish_reason, assistant_item.is_some(), reasoning_item.is_some());
|
||||||
|
// Emit the final assistant message as a single OutputItemDone so non-delta consumers see the result.
|
||||||
|
if let Some(item) = assistant_item.take() {
|
||||||
|
debug!("Emitting assistant_item: {:?}", item);
|
||||||
|
let _ = tx_event.send(Ok(ResponseEvent::OutputItemDone(item))).await;
|
||||||
|
} else {
|
||||||
|
debug!("No assistant_item to emit");
|
||||||
|
}
|
||||||
|
// Also emit a terminal Reasoning item so UIs can finalize raw reasoning.
|
||||||
|
if let Some(item) = reasoning_item.take() {
|
||||||
|
debug!("Emitting reasoning_item");
|
||||||
|
let _ = tx_event.send(Ok(ResponseEvent::OutputItemDone(item))).await;
|
||||||
|
} else {
|
||||||
|
debug!("No reasoning_item to emit");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
// Unknown finish_reason - still emit pending items to avoid hanging
|
||||||
|
debug!("Unknown finish_reason: {}, emitting pending items", finish_reason);
|
||||||
if let Some(item) = assistant_item.take() {
|
if let Some(item) = assistant_item.take() {
|
||||||
let _ = tx_event.send(Ok(ResponseEvent::OutputItemDone(item))).await;
|
let _ = tx_event.send(Ok(ResponseEvent::OutputItemDone(item))).await;
|
||||||
}
|
}
|
||||||
// Also emit a terminal Reasoning item so UIs can finalize raw reasoning.
|
|
||||||
if let Some(item) = reasoning_item.take() {
|
if let Some(item) = reasoning_item.take() {
|
||||||
let _ = tx_event.send(Ok(ResponseEvent::OutputItemDone(item))).await;
|
let _ = tx_event.send(Ok(ResponseEvent::OutputItemDone(item))).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_ => {}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Emit Completed regardless of reason so the agent can advance.
|
// Emit Completed regardless of reason so the agent can advance.
|
||||||
|
debug!("Sending Completed event after finish_reason={}", finish_reason);
|
||||||
let _ = tx_event
|
let _ = tx_event
|
||||||
.send(Ok(ResponseEvent::Completed {
|
.send(Ok(ResponseEvent::Completed {
|
||||||
response_id: String::new(),
|
response_id: String::new(),
|
||||||
token_usage: token_usage.clone(),
|
token_usage: token_usage.clone(),
|
||||||
}))
|
}))
|
||||||
.await;
|
.await;
|
||||||
|
debug!("Completed event sent, returning from SSE processor");
|
||||||
|
|
||||||
// Prepare for potential next turn (should not happen in same stream).
|
// Prepare for potential next turn (should not happen in same stream).
|
||||||
// fn_call_state = FunctionCallState::default();
|
// fn_call_state = FunctionCallState::default();
|
||||||
@@ -867,6 +920,22 @@ async fn process_chat_sse<S>(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Stream ended without finish_reason - this can happen when the stream closes abruptly
|
||||||
|
debug!("Stream ended without finish_reason, emitting final items and Completed event");
|
||||||
|
if let Some(item) = assistant_item.take() {
|
||||||
|
let _ = tx_event.send(Ok(ResponseEvent::OutputItemDone(item))).await;
|
||||||
|
}
|
||||||
|
if let Some(item) = reasoning_item.take() {
|
||||||
|
let _ = tx_event.send(Ok(ResponseEvent::OutputItemDone(item))).await;
|
||||||
|
}
|
||||||
|
// Send Completed event so llmx knows the turn is done
|
||||||
|
let _ = tx_event
|
||||||
|
.send(Ok(ResponseEvent::Completed {
|
||||||
|
response_id: String::new(),
|
||||||
|
token_usage: token_usage.clone(),
|
||||||
|
}))
|
||||||
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Optional client-side aggregation helper
|
/// Optional client-side aggregation helper
|
||||||
|
|||||||
@@ -152,6 +152,7 @@ impl ModelClient {
|
|||||||
&self.provider,
|
&self.provider,
|
||||||
&self.otel_event_manager,
|
&self.otel_event_manager,
|
||||||
&self.session_source,
|
&self.session_source,
|
||||||
|
self.config.model_max_output_tokens,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
@@ -1123,6 +1124,7 @@ mod tests {
|
|||||||
request_max_retries: Some(0),
|
request_max_retries: Some(0),
|
||||||
stream_max_retries: Some(0),
|
stream_max_retries: Some(0),
|
||||||
stream_idle_timeout_ms: Some(1000),
|
stream_idle_timeout_ms: Some(1000),
|
||||||
|
max_tokens: None,
|
||||||
requires_openai_auth: false,
|
requires_openai_auth: false,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -1187,6 +1189,7 @@ mod tests {
|
|||||||
request_max_retries: Some(0),
|
request_max_retries: Some(0),
|
||||||
stream_max_retries: Some(0),
|
stream_max_retries: Some(0),
|
||||||
stream_idle_timeout_ms: Some(1000),
|
stream_idle_timeout_ms: Some(1000),
|
||||||
|
max_tokens: None,
|
||||||
requires_openai_auth: false,
|
requires_openai_auth: false,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -1224,6 +1227,7 @@ mod tests {
|
|||||||
request_max_retries: Some(0),
|
request_max_retries: Some(0),
|
||||||
stream_max_retries: Some(0),
|
stream_max_retries: Some(0),
|
||||||
stream_idle_timeout_ms: Some(1000),
|
stream_idle_timeout_ms: Some(1000),
|
||||||
|
max_tokens: None,
|
||||||
requires_openai_auth: false,
|
requires_openai_auth: false,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -1263,6 +1267,7 @@ mod tests {
|
|||||||
request_max_retries: Some(0),
|
request_max_retries: Some(0),
|
||||||
stream_max_retries: Some(0),
|
stream_max_retries: Some(0),
|
||||||
stream_idle_timeout_ms: Some(1000),
|
stream_idle_timeout_ms: Some(1000),
|
||||||
|
max_tokens: None,
|
||||||
requires_openai_auth: false,
|
requires_openai_auth: false,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -1298,6 +1303,7 @@ mod tests {
|
|||||||
request_max_retries: Some(0),
|
request_max_retries: Some(0),
|
||||||
stream_max_retries: Some(0),
|
stream_max_retries: Some(0),
|
||||||
stream_idle_timeout_ms: Some(1000),
|
stream_idle_timeout_ms: Some(1000),
|
||||||
|
max_tokens: None,
|
||||||
requires_openai_auth: false,
|
requires_openai_auth: false,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -1333,6 +1339,7 @@ mod tests {
|
|||||||
request_max_retries: Some(0),
|
request_max_retries: Some(0),
|
||||||
stream_max_retries: Some(0),
|
stream_max_retries: Some(0),
|
||||||
stream_idle_timeout_ms: Some(1000),
|
stream_idle_timeout_ms: Some(1000),
|
||||||
|
max_tokens: None,
|
||||||
requires_openai_auth: false,
|
requires_openai_auth: false,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -1437,6 +1444,7 @@ mod tests {
|
|||||||
request_max_retries: Some(0),
|
request_max_retries: Some(0),
|
||||||
stream_max_retries: Some(0),
|
stream_max_retries: Some(0),
|
||||||
stream_idle_timeout_ms: Some(1000),
|
stream_idle_timeout_ms: Some(1000),
|
||||||
|
max_tokens: None,
|
||||||
requires_openai_auth: false,
|
requires_openai_auth: false,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -973,6 +973,8 @@ impl Config {
|
|||||||
|
|
||||||
let mut model_providers = built_in_model_providers();
|
let mut model_providers = built_in_model_providers();
|
||||||
// Merge user-defined providers into the built-in list.
|
// Merge user-defined providers into the built-in list.
|
||||||
|
// Note: This uses or_insert() so built-in providers take precedence.
|
||||||
|
// For custom max_tokens, use model_max_output_tokens config instead.
|
||||||
for (key, provider) in cfg.model_providers.into_iter() {
|
for (key, provider) in cfg.model_providers.into_iter() {
|
||||||
model_providers.entry(key).or_insert(provider);
|
model_providers.entry(key).or_insert(provider);
|
||||||
}
|
}
|
||||||
@@ -2809,6 +2811,7 @@ model_verbosity = "high"
|
|||||||
request_max_retries: Some(4),
|
request_max_retries: Some(4),
|
||||||
stream_max_retries: Some(10),
|
stream_max_retries: Some(10),
|
||||||
stream_idle_timeout_ms: Some(300_000),
|
stream_idle_timeout_ms: Some(300_000),
|
||||||
|
max_tokens: None,
|
||||||
requires_openai_auth: false,
|
requires_openai_auth: false,
|
||||||
};
|
};
|
||||||
let model_provider_map = {
|
let model_provider_map = {
|
||||||
|
|||||||
@@ -54,7 +54,7 @@ fn parse_user_message(message: &[ContentItem]) -> Option<UserMessageItem> {
|
|||||||
Some(UserMessageItem::new(&content))
|
Some(UserMessageItem::new(&content))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn parse_agent_message(id: Option<&String>, message: &[ContentItem]) -> AgentMessageItem {
|
fn parse_agent_message(id: Option<&String>, message: &[ContentItem]) -> Option<AgentMessageItem> {
|
||||||
let mut content: Vec<AgentMessageContent> = Vec::new();
|
let mut content: Vec<AgentMessageContent> = Vec::new();
|
||||||
for content_item in message.iter() {
|
for content_item in message.iter() {
|
||||||
match content_item {
|
match content_item {
|
||||||
@@ -69,18 +69,23 @@ fn parse_agent_message(id: Option<&String>, message: &[ContentItem]) -> AgentMes
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If the message has no content, return None to signal turn completion
|
||||||
|
// This happens when the API ends a turn with an empty assistant message (e.g., after tool calls)
|
||||||
|
if content.is_empty() {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
let id = id.cloned().unwrap_or_else(|| Uuid::new_v4().to_string());
|
let id = id.cloned().unwrap_or_else(|| Uuid::new_v4().to_string());
|
||||||
AgentMessageItem { id, content }
|
Some(AgentMessageItem { id, content })
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn parse_turn_item(item: &ResponseItem) -> Option<TurnItem> {
|
pub fn parse_turn_item(item: &ResponseItem) -> Option<TurnItem> {
|
||||||
match item {
|
match item {
|
||||||
ResponseItem::Message { role, content, id } => match role.as_str() {
|
ResponseItem::Message { role, content, id } => match role.as_str() {
|
||||||
"user" => parse_user_message(content).map(TurnItem::UserMessage),
|
"user" => parse_user_message(content).map(TurnItem::UserMessage),
|
||||||
"assistant" => Some(TurnItem::AgentMessage(parse_agent_message(
|
"assistant" => parse_agent_message(id.as_ref(), content)
|
||||||
id.as_ref(),
|
.map(TurnItem::AgentMessage),
|
||||||
content,
|
|
||||||
))),
|
|
||||||
"system" => None,
|
"system" => None,
|
||||||
_ => None,
|
_ => None,
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -87,6 +87,10 @@ pub struct ModelProviderInfo {
|
|||||||
/// the connection as lost.
|
/// the connection as lost.
|
||||||
pub stream_idle_timeout_ms: Option<u64>,
|
pub stream_idle_timeout_ms: Option<u64>,
|
||||||
|
|
||||||
|
/// Maximum number of tokens to generate in the response. If not specified, defaults to 8192.
|
||||||
|
/// This is required by some providers (e.g., Anthropic via LiteLLM).
|
||||||
|
pub max_tokens: Option<i64>,
|
||||||
|
|
||||||
/// Does this provider require an OpenAI API Key or ChatGPT login token? If true,
|
/// Does this provider require an OpenAI API Key or ChatGPT login token? If true,
|
||||||
/// user is presented with login screen on first run, and login preference and token/key
|
/// user is presented with login screen on first run, and login preference and token/key
|
||||||
/// are stored in auth.json. If false (which is the default), login screen is skipped,
|
/// are stored in auth.json. If false (which is the default), login screen is skipped,
|
||||||
@@ -290,6 +294,7 @@ pub fn built_in_model_providers() -> HashMap<String, ModelProviderInfo> {
|
|||||||
request_max_retries: None,
|
request_max_retries: None,
|
||||||
stream_max_retries: None,
|
stream_max_retries: None,
|
||||||
stream_idle_timeout_ms: None,
|
stream_idle_timeout_ms: None,
|
||||||
|
max_tokens: None,
|
||||||
requires_openai_auth: false,
|
requires_openai_auth: false,
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
@@ -330,6 +335,7 @@ pub fn built_in_model_providers() -> HashMap<String, ModelProviderInfo> {
|
|||||||
request_max_retries: None,
|
request_max_retries: None,
|
||||||
stream_max_retries: None,
|
stream_max_retries: None,
|
||||||
stream_idle_timeout_ms: None,
|
stream_idle_timeout_ms: None,
|
||||||
|
max_tokens: None,
|
||||||
requires_openai_auth: true,
|
requires_openai_auth: true,
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
@@ -375,6 +381,7 @@ pub fn create_oss_provider_with_base_url(base_url: &str) -> ModelProviderInfo {
|
|||||||
request_max_retries: None,
|
request_max_retries: None,
|
||||||
stream_max_retries: None,
|
stream_max_retries: None,
|
||||||
stream_idle_timeout_ms: None,
|
stream_idle_timeout_ms: None,
|
||||||
|
max_tokens: None,
|
||||||
requires_openai_auth: false,
|
requires_openai_auth: false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -415,6 +422,7 @@ base_url = "http://localhost:11434/v1"
|
|||||||
request_max_retries: None,
|
request_max_retries: None,
|
||||||
stream_max_retries: None,
|
stream_max_retries: None,
|
||||||
stream_idle_timeout_ms: None,
|
stream_idle_timeout_ms: None,
|
||||||
|
max_tokens: None,
|
||||||
requires_openai_auth: false,
|
requires_openai_auth: false,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -445,6 +453,7 @@ query_params = { api-version = "2025-04-01-preview" }
|
|||||||
request_max_retries: None,
|
request_max_retries: None,
|
||||||
stream_max_retries: None,
|
stream_max_retries: None,
|
||||||
stream_idle_timeout_ms: None,
|
stream_idle_timeout_ms: None,
|
||||||
|
max_tokens: None,
|
||||||
requires_openai_auth: false,
|
requires_openai_auth: false,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -478,6 +487,7 @@ env_http_headers = { "X-Example-Env-Header" = "EXAMPLE_ENV_VAR" }
|
|||||||
request_max_retries: None,
|
request_max_retries: None,
|
||||||
stream_max_retries: None,
|
stream_max_retries: None,
|
||||||
stream_idle_timeout_ms: None,
|
stream_idle_timeout_ms: None,
|
||||||
|
max_tokens: None,
|
||||||
requires_openai_auth: false,
|
requires_openai_auth: false,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -501,6 +511,7 @@ env_http_headers = { "X-Example-Env-Header" = "EXAMPLE_ENV_VAR" }
|
|||||||
request_max_retries: None,
|
request_max_retries: None,
|
||||||
stream_max_retries: None,
|
stream_max_retries: None,
|
||||||
stream_idle_timeout_ms: None,
|
stream_idle_timeout_ms: None,
|
||||||
|
max_tokens: None,
|
||||||
requires_openai_auth: false,
|
requires_openai_auth: false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -534,6 +545,7 @@ env_http_headers = { "X-Example-Env-Header" = "EXAMPLE_ENV_VAR" }
|
|||||||
request_max_retries: None,
|
request_max_retries: None,
|
||||||
stream_max_retries: None,
|
stream_max_retries: None,
|
||||||
stream_idle_timeout_ms: None,
|
stream_idle_timeout_ms: None,
|
||||||
|
max_tokens: None,
|
||||||
requires_openai_auth: false,
|
requires_openai_auth: false,
|
||||||
};
|
};
|
||||||
assert!(named_provider.is_azure_responses_endpoint());
|
assert!(named_provider.is_azure_responses_endpoint());
|
||||||
|
|||||||
@@ -58,6 +58,7 @@ async fn run_request(input: Vec<ResponseItem>) -> Value {
|
|||||||
request_max_retries: Some(0),
|
request_max_retries: Some(0),
|
||||||
stream_max_retries: Some(0),
|
stream_max_retries: Some(0),
|
||||||
stream_idle_timeout_ms: Some(5_000),
|
stream_idle_timeout_ms: Some(5_000),
|
||||||
|
max_tokens: None,
|
||||||
requires_openai_auth: false,
|
requires_openai_auth: false,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -58,6 +58,7 @@ async fn run_stream_with_bytes(sse_body: &[u8]) -> Vec<ResponseEvent> {
|
|||||||
request_max_retries: Some(0),
|
request_max_retries: Some(0),
|
||||||
stream_max_retries: Some(0),
|
stream_max_retries: Some(0),
|
||||||
stream_idle_timeout_ms: Some(5_000),
|
stream_idle_timeout_ms: Some(5_000),
|
||||||
|
max_tokens: None,
|
||||||
requires_openai_auth: false,
|
requires_openai_auth: false,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -47,6 +47,7 @@ async fn responses_stream_includes_subagent_header_on_review() {
|
|||||||
request_max_retries: Some(0),
|
request_max_retries: Some(0),
|
||||||
stream_max_retries: Some(0),
|
stream_max_retries: Some(0),
|
||||||
stream_idle_timeout_ms: Some(5_000),
|
stream_idle_timeout_ms: Some(5_000),
|
||||||
|
max_tokens: None,
|
||||||
requires_openai_auth: false,
|
requires_openai_auth: false,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -135,6 +136,7 @@ async fn responses_stream_includes_subagent_header_on_other() {
|
|||||||
request_max_retries: Some(0),
|
request_max_retries: Some(0),
|
||||||
stream_max_retries: Some(0),
|
stream_max_retries: Some(0),
|
||||||
stream_idle_timeout_ms: Some(5_000),
|
stream_idle_timeout_ms: Some(5_000),
|
||||||
|
max_tokens: None,
|
||||||
requires_openai_auth: false,
|
requires_openai_auth: false,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -712,6 +712,7 @@ async fn azure_responses_request_includes_store_and_reasoning_ids() {
|
|||||||
request_max_retries: Some(0),
|
request_max_retries: Some(0),
|
||||||
stream_max_retries: Some(0),
|
stream_max_retries: Some(0),
|
||||||
stream_idle_timeout_ms: Some(5_000),
|
stream_idle_timeout_ms: Some(5_000),
|
||||||
|
max_tokens: None,
|
||||||
requires_openai_auth: false,
|
requires_openai_auth: false,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -1195,6 +1196,7 @@ async fn azure_overrides_assign_properties_used_for_responses_url() {
|
|||||||
request_max_retries: None,
|
request_max_retries: None,
|
||||||
stream_max_retries: None,
|
stream_max_retries: None,
|
||||||
stream_idle_timeout_ms: None,
|
stream_idle_timeout_ms: None,
|
||||||
|
max_tokens: None,
|
||||||
requires_openai_auth: false,
|
requires_openai_auth: false,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -1272,6 +1274,7 @@ async fn env_var_overrides_loaded_auth() {
|
|||||||
request_max_retries: None,
|
request_max_retries: None,
|
||||||
stream_max_retries: None,
|
stream_max_retries: None,
|
||||||
stream_idle_timeout_ms: None,
|
stream_idle_timeout_ms: None,
|
||||||
|
max_tokens: None,
|
||||||
requires_openai_auth: false,
|
requires_openai_auth: false,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -72,6 +72,7 @@ async fn continue_after_stream_error() {
|
|||||||
request_max_retries: Some(1),
|
request_max_retries: Some(1),
|
||||||
stream_max_retries: Some(1),
|
stream_max_retries: Some(1),
|
||||||
stream_idle_timeout_ms: Some(2_000),
|
stream_idle_timeout_ms: Some(2_000),
|
||||||
|
max_tokens: None,
|
||||||
requires_openai_auth: false,
|
requires_openai_auth: false,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -80,6 +80,7 @@ async fn retries_on_early_close() {
|
|||||||
request_max_retries: Some(0),
|
request_max_retries: Some(0),
|
||||||
stream_max_retries: Some(1),
|
stream_max_retries: Some(1),
|
||||||
stream_idle_timeout_ms: Some(2000),
|
stream_idle_timeout_ms: Some(2000),
|
||||||
|
max_tokens: None,
|
||||||
requires_openai_auth: false,
|
requires_openai_auth: false,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -144,7 +144,7 @@ impl McpProcess {
|
|||||||
let initialized = self.read_jsonrpc_message().await?;
|
let initialized = self.read_jsonrpc_message().await?;
|
||||||
let os_info = os_info::get();
|
let os_info = os_info::get();
|
||||||
let user_agent = format!(
|
let user_agent = format!(
|
||||||
"llmx_cli_rs/0.1.5 ({} {}; {}) {} (elicitation test; 0.0.0)",
|
"llmx_cli_rs/0.1.7 ({} {}; {}) {} (elicitation test; 0.0.0)",
|
||||||
os_info.os_type(),
|
os_info.os_type(),
|
||||||
os_info.version(),
|
os_info.version(),
|
||||||
os_info.architecture().unwrap_or("unknown"),
|
os_info.architecture().unwrap_or("unknown"),
|
||||||
@@ -163,7 +163,7 @@ impl McpProcess {
|
|||||||
"serverInfo": {
|
"serverInfo": {
|
||||||
"name": "llmx-mcp-server",
|
"name": "llmx-mcp-server",
|
||||||
"title": "LLMX",
|
"title": "LLMX",
|
||||||
"version": "0.1.5",
|
"version": "0.1.7",
|
||||||
"user_agent": user_agent
|
"user_agent": user_agent
|
||||||
},
|
},
|
||||||
"protocolVersion": mcp_types::MCP_SCHEMA_VERSION
|
"protocolVersion": mcp_types::MCP_SCHEMA_VERSION
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ expression: sanitized
|
|||||||
/status
|
/status
|
||||||
|
|
||||||
╭───────────────────────────────────────────────────────────────────────────╮
|
╭───────────────────────────────────────────────────────────────────────────╮
|
||||||
│ >_ LLMX (v0.1.5) │
|
│ >_ LLMX (v0.1.7) │
|
||||||
│ │
|
│ │
|
||||||
│ Visit https://chatgpt.com/llmx/settings/usage for up-to-date │
|
│ Visit https://chatgpt.com/llmx/settings/usage for up-to-date │
|
||||||
│ information on rate limits and credits │
|
│ information on rate limits and credits │
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ expression: sanitized
|
|||||||
/status
|
/status
|
||||||
|
|
||||||
╭─────────────────────────────────────────────────────────────────╮
|
╭─────────────────────────────────────────────────────────────────╮
|
||||||
│ >_ LLMX (v0.1.5) │
|
│ >_ LLMX (v0.1.7) │
|
||||||
│ │
|
│ │
|
||||||
│ Visit https://chatgpt.com/llmx/settings/usage for up-to-date │
|
│ Visit https://chatgpt.com/llmx/settings/usage for up-to-date │
|
||||||
│ information on rate limits and credits │
|
│ information on rate limits and credits │
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ expression: sanitized
|
|||||||
/status
|
/status
|
||||||
|
|
||||||
╭──────────────────────────────────────────────────────────────╮
|
╭──────────────────────────────────────────────────────────────╮
|
||||||
│ >_ LLMX (v0.1.5) │
|
│ >_ LLMX (v0.1.7) │
|
||||||
│ │
|
│ │
|
||||||
│ Visit https://chatgpt.com/llmx/settings/usage for up-to-date │
|
│ Visit https://chatgpt.com/llmx/settings/usage for up-to-date │
|
||||||
│ information on rate limits and credits │
|
│ information on rate limits and credits │
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ expression: sanitized
|
|||||||
/status
|
/status
|
||||||
|
|
||||||
╭──────────────────────────────────────────────────────────────╮
|
╭──────────────────────────────────────────────────────────────╮
|
||||||
│ >_ LLMX (v0.1.5) │
|
│ >_ LLMX (v0.1.7) │
|
||||||
│ │
|
│ │
|
||||||
│ Visit https://chatgpt.com/llmx/settings/usage for up-to-date │
|
│ Visit https://chatgpt.com/llmx/settings/usage for up-to-date │
|
||||||
│ information on rate limits and credits │
|
│ information on rate limits and credits │
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ expression: sanitized
|
|||||||
/status
|
/status
|
||||||
|
|
||||||
╭───────────────────────────────────────────────────────────────────╮
|
╭───────────────────────────────────────────────────────────────────╮
|
||||||
│ >_ LLMX (v0.1.5) │
|
│ >_ LLMX (v0.1.7) │
|
||||||
│ │
|
│ │
|
||||||
│ Visit https://chatgpt.com/llmx/settings/usage for up-to-date │
|
│ Visit https://chatgpt.com/llmx/settings/usage for up-to-date │
|
||||||
│ information on rate limits and credits │
|
│ information on rate limits and credits │
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ expression: sanitized
|
|||||||
/status
|
/status
|
||||||
|
|
||||||
╭────────────────────────────────────────────╮
|
╭────────────────────────────────────────────╮
|
||||||
│ >_ LLMX (v0.1.5) │
|
│ >_ LLMX (v0.1.7) │
|
||||||
│ │
|
│ │
|
||||||
│ Visit https://chatgpt.com/llmx/settings/ │
|
│ Visit https://chatgpt.com/llmx/settings/ │
|
||||||
│ usage for up-to-date │
|
│ usage for up-to-date │
|
||||||
|
|||||||
10
llmx-rs/tui/tests/fixtures/binary-size-log.jsonl
vendored
10
llmx-rs/tui/tests/fixtures/binary-size-log.jsonl
vendored
File diff suppressed because one or more lines are too long
198
test_system_message.json
Normal file
198
test_system_message.json
Normal file
File diff suppressed because one or more lines are too long
Reference in New Issue
Block a user