fix: for now, limit the number of deltas sent back to the UI (#2776)

This is a stopgap solution, but today, we are seeing the client get
flooded with events. Since we already truncate the output we send to the
model, it feels reasonable to limit how many deltas we send to the
client.
This commit is contained in:
Michael Bolin
2025-08-27 10:23:25 -07:00
committed by GitHub
parent 0cec0770e2
commit ffe585387b

View File

@@ -40,6 +40,10 @@ const EXIT_CODE_SIGNAL_BASE: i32 = 128; // conventional shell: 128 + signal
const READ_CHUNK_SIZE: usize = 8192; // bytes per read
const AGGREGATE_BUFFER_INITIAL_CAPACITY: usize = 8 * 1024; // 8 KiB
/// Limit the number of ExecCommandOutputDelta events emitted per exec call.
/// Aggregation still collects full output; only the live event stream is capped.
pub(crate) const MAX_EXEC_OUTPUT_DELTAS_PER_CALL: usize = 10_000;
#[derive(Debug, Clone)]
pub struct ExecParams {
pub command: Vec<String>,
@@ -344,6 +348,7 @@ async fn read_capped<R: AsyncRead + Unpin + Send + 'static>(
) -> io::Result<StreamOutput<Vec<u8>>> {
let mut buf = Vec::with_capacity(AGGREGATE_BUFFER_INITIAL_CAPACITY);
let mut tmp = [0u8; READ_CHUNK_SIZE];
let mut emitted_deltas: usize = 0;
// No caps: append all bytes
@@ -353,7 +358,9 @@ async fn read_capped<R: AsyncRead + Unpin + Send + 'static>(
break;
}
if let Some(stream) = &stream {
if let Some(stream) = &stream
&& emitted_deltas < MAX_EXEC_OUTPUT_DELTAS_PER_CALL
{
let chunk = tmp[..n].to_vec();
let msg = EventMsg::ExecCommandOutputDelta(ExecCommandOutputDeltaEvent {
call_id: stream.call_id.clone(),
@@ -370,6 +377,7 @@ async fn read_capped<R: AsyncRead + Unpin + Send + 'static>(
};
#[allow(clippy::let_unit_value)]
let _ = stream.tx_event.send(event).await;
emitted_deltas += 1;
}
if let Some(tx) = &aggregate_tx {