Centralize truncation in conversation history (#5652)

move the truncation logic to conversation history to use on any tool
output. This will help us in avoiding edge cases while truncating the
tool calls and mcp calls.
This commit is contained in:
Ahmed Ibrahim
2025-10-27 14:05:35 -07:00
committed by GitHub
parent 0fc295d958
commit 7226365397
6 changed files with 588 additions and 365 deletions

View File

@@ -19,7 +19,6 @@ use std::path::Path;
use std::path::PathBuf;
use std::time::Duration;
use super::format_exec_output;
use super::format_exec_output_str;
#[derive(Clone, Copy)]
@@ -146,7 +145,7 @@ impl ToolEmitter {
(*message).to_string(),
-1,
Duration::ZERO,
format_exec_output(&message),
message.clone(),
)
.await;
}
@@ -241,7 +240,7 @@ impl ToolEmitter {
(*message).to_string(),
-1,
Duration::ZERO,
format_exec_output(&message),
message.clone(),
)
.await;
}
@@ -277,7 +276,7 @@ impl ToolEmitter {
}
Err(ToolError::Codex(err)) => {
let message = format!("execution error: {err:?}");
let response = super::format_exec_output(&message);
let response = message.clone();
event = ToolEventStage::Failure(ToolEventFailure::Message(message));
Err(FunctionCallError::RespondToModel(response))
}
@@ -289,9 +288,9 @@ impl ToolEmitter {
} else {
msg
};
let response = super::format_exec_output(&normalized);
event = ToolEventStage::Failure(ToolEventFailure::Message(normalized));
Err(FunctionCallError::RespondToModel(response))
let response = &normalized;
event = ToolEventStage::Failure(ToolEventFailure::Message(normalized.clone()));
Err(FunctionCallError::RespondToModel(response.clone()))
}
};
self.emit(ctx, event).await;