From 401b0b39353e3a08945efc6d155c1ff2071af214 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sebastian=20Kr=C3=BCger?= Date: Mon, 17 Nov 2025 18:15:24 +0100 Subject: [PATCH] fix: Handle empty assistant messages as turn completion MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - When the API returns an empty assistant message (content: []), treat it as turn completion signal - This fixes the "working" hang that occurs after tool calls when the API stream ends with an empty message - Updated parse_agent_message to return None for empty content - Fixes issue where llmx would hang indefinitely waiting for content that never comes 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- llmx-rs/core/src/event_mapping.rs | 17 +++++++++++------ llmx-rs/core/src/model_provider_info.rs | 8 ++++++++ 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/llmx-rs/core/src/event_mapping.rs b/llmx-rs/core/src/event_mapping.rs index eabe0608..042075d9 100644 --- a/llmx-rs/core/src/event_mapping.rs +++ b/llmx-rs/core/src/event_mapping.rs @@ -54,7 +54,7 @@ fn parse_user_message(message: &[ContentItem]) -> Option { Some(UserMessageItem::new(&content)) } -fn parse_agent_message(id: Option<&String>, message: &[ContentItem]) -> AgentMessageItem { +fn parse_agent_message(id: Option<&String>, message: &[ContentItem]) -> Option { let mut content: Vec = Vec::new(); for content_item in message.iter() { match content_item { @@ -69,18 +69,23 @@ fn parse_agent_message(id: Option<&String>, message: &[ContentItem]) -> AgentMes } } } + + // If the message has no content, return None to signal turn completion + // This happens when the API ends a turn with an empty assistant message (e.g., after tool calls) + if content.is_empty() { + return None; + } + let id = id.cloned().unwrap_or_else(|| Uuid::new_v4().to_string()); - AgentMessageItem { id, content } + Some(AgentMessageItem { id, content }) } pub fn parse_turn_item(item: &ResponseItem) -> Option { match item { ResponseItem::Message { role, content, id } => match role.as_str() { "user" => parse_user_message(content).map(TurnItem::UserMessage), - "assistant" => Some(TurnItem::AgentMessage(parse_agent_message( - id.as_ref(), - content, - ))), + "assistant" => parse_agent_message(id.as_ref(), content) + .map(TurnItem::AgentMessage), "system" => None, _ => None, }, diff --git a/llmx-rs/core/src/model_provider_info.rs b/llmx-rs/core/src/model_provider_info.rs index a1db5242..58da2580 100644 --- a/llmx-rs/core/src/model_provider_info.rs +++ b/llmx-rs/core/src/model_provider_info.rs @@ -294,6 +294,7 @@ pub fn built_in_model_providers() -> HashMap { request_max_retries: None, stream_max_retries: None, stream_idle_timeout_ms: None, + max_tokens: None, max_tokens: None, requires_openai_auth: false, }, @@ -335,6 +336,7 @@ pub fn built_in_model_providers() -> HashMap { request_max_retries: None, stream_max_retries: None, stream_idle_timeout_ms: None, + max_tokens: None, max_tokens: None, requires_openai_auth: true, }, @@ -381,6 +383,7 @@ pub fn create_oss_provider_with_base_url(base_url: &str) -> ModelProviderInfo { request_max_retries: None, stream_max_retries: None, stream_idle_timeout_ms: None, + max_tokens: None, max_tokens: None, requires_openai_auth: false, } @@ -422,6 +425,7 @@ base_url = "http://localhost:11434/v1" request_max_retries: None, stream_max_retries: None, stream_idle_timeout_ms: None, + max_tokens: None, requires_openai_auth: false, }; @@ -452,6 +456,7 @@ query_params = { api-version = "2025-04-01-preview" } request_max_retries: None, stream_max_retries: None, stream_idle_timeout_ms: None, + max_tokens: None, requires_openai_auth: false, }; @@ -485,6 +490,7 @@ env_http_headers = { "X-Example-Env-Header" = "EXAMPLE_ENV_VAR" } request_max_retries: None, stream_max_retries: None, stream_idle_timeout_ms: None, + max_tokens: None, requires_openai_auth: false, }; @@ -508,6 +514,7 @@ env_http_headers = { "X-Example-Env-Header" = "EXAMPLE_ENV_VAR" } request_max_retries: None, stream_max_retries: None, stream_idle_timeout_ms: None, + max_tokens: None, requires_openai_auth: false, } } @@ -541,6 +548,7 @@ env_http_headers = { "X-Example-Env-Header" = "EXAMPLE_ENV_VAR" } request_max_retries: None, stream_max_retries: None, stream_idle_timeout_ms: None, + max_tokens: None, requires_openai_auth: false, }; assert!(named_provider.is_azure_responses_endpoint());