Remove last turn reasoning filtering (#5986)

This commit is contained in:
Ahmed Ibrahim
2025-10-30 16:20:32 -07:00
committed by GitHub
parent 11e5327770
commit a3d3719481
2 changed files with 0 additions and 131 deletions

View File

@@ -73,7 +73,6 @@ impl ConversationHistory {
pub(crate) fn get_history_for_prompt(&mut self) -> Vec<ResponseItem> {
let mut history = self.get_history();
Self::remove_ghost_snapshots(&mut history);
Self::remove_reasoning_before_last_turn(&mut history);
history
}
@@ -125,25 +124,6 @@ impl ConversationHistory {
items.retain(|item| !matches!(item, ResponseItem::GhostSnapshot { .. }));
}
fn remove_reasoning_before_last_turn(items: &mut Vec<ResponseItem>) {
// Responses API drops reasoning items before the last user message.
// Sending them is harmless but can lead to validation errors when switching between API organizations.
// https://cookbook.openai.com/examples/responses_api/reasoning_items#caching
let Some(last_user_index) = items
.iter()
// Use last user message as the turn boundary.
.rposition(|item| matches!(item, ResponseItem::Message { role, .. } if role == "user"))
else {
return;
};
let mut index = 0usize;
items.retain(|item| {
let keep = index >= last_user_index || !matches!(item, ResponseItem::Reasoning { .. });
index += 1;
keep
});
}
fn ensure_call_outputs_present(&mut self) {
// Collect synthetic outputs to insert immediately after their calls.
// Store the insertion position (index of call) alongside the item so
@@ -540,15 +520,6 @@ mod tests {
}
}
fn reasoning(id: &str) -> ResponseItem {
ResponseItem::Reasoning {
id: id.to_string(),
summary: Vec::new(),
content: None,
encrypted_content: None,
}
}
fn create_history_with_items(items: Vec<ResponseItem>) -> ConversationHistory {
let mut h = ConversationHistory::new();
h.record_items(items.iter());
@@ -605,40 +576,6 @@ mod tests {
);
}
#[test]
fn get_history_drops_reasoning_before_last_user_message() {
let mut history = ConversationHistory::new();
let items = vec![
user_msg("initial"),
reasoning("first"),
assistant_msg("ack"),
user_msg("latest"),
reasoning("second"),
assistant_msg("ack"),
reasoning("third"),
];
history.record_items(items.iter());
let filtered = history.get_history_for_prompt();
assert_eq!(
filtered,
vec![
user_msg("initial"),
assistant_msg("ack"),
user_msg("latest"),
reasoning("second"),
assistant_msg("ack"),
reasoning("third"),
]
);
let reasoning_count = history
.contents()
.iter()
.filter(|item| matches!(item, ResponseItem::Reasoning { .. }))
.count();
assert_eq!(reasoning_count, 3);
}
#[test]
fn get_history_for_prompt_drops_ghost_commits() {
let items = vec![ResponseItem::GhostSnapshot {

View File

@@ -18,10 +18,7 @@ use codex_core::shell::default_user_shell;
use codex_protocol::user_input::UserInput;
use core_test_support::load_default_config_for_test;
use core_test_support::load_sse_fixture_with_id;
use core_test_support::responses;
use core_test_support::responses::mount_sse_once;
use core_test_support::skip_if_no_network;
use core_test_support::test_codex::test_codex;
use core_test_support::wait_for_event;
use std::collections::HashMap;
use tempfile::TempDir;
@@ -886,68 +883,3 @@ async fn send_user_turn_with_changes_sends_environment_context() {
]);
assert_eq!(body2["input"], expected_input_2);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn cached_prompt_filters_reasoning_items_from_previous_turns() -> anyhow::Result<()> {
skip_if_no_network!(Ok(()));
let server = responses::start_mock_server().await;
let call_id = "shell-call";
let shell_args = serde_json::json!({
"command": ["/bin/echo", "tool output"],
"timeout_ms": 1_000,
});
let initial_response = responses::sse(vec![
responses::ev_response_created("resp-first"),
responses::ev_reasoning_item("reason-1", &["Planning shell command"], &[]),
responses::ev_function_call(
call_id,
"shell",
&serde_json::to_string(&shell_args).expect("serialize shell args"),
),
responses::ev_completed("resp-first"),
]);
let follow_up_response = responses::sse(vec![
responses::ev_response_created("resp-follow-up"),
responses::ev_reasoning_item(
"reason-2",
&["Shell execution completed"],
&["stdout: tool output"],
),
responses::ev_assistant_message("assistant-1", "First turn reply"),
responses::ev_completed("resp-follow-up"),
]);
let second_turn_response = responses::sse(vec![
responses::ev_response_created("resp-second"),
responses::ev_assistant_message("assistant-2", "Second turn reply"),
responses::ev_completed("resp-second"),
]);
mount_sse_once(&server, initial_response).await;
let second_request = mount_sse_once(&server, follow_up_response).await;
let third_request = mount_sse_once(&server, second_turn_response).await;
let mut builder = test_codex();
let test = builder.build(&server).await?;
test.submit_turn("hello 1").await?;
test.submit_turn("hello 2").await?;
let second_request_input = second_request.single_request();
let reasoning_items = second_request_input.inputs_of_type("reasoning");
assert_eq!(
reasoning_items.len(),
1,
"expected first turn follow-up to include reasoning item"
);
let third_request_input = third_request.single_request();
let cached_reasoning = third_request_input.inputs_of_type("reasoning");
assert_eq!(
cached_reasoning.len(),
0,
"expected cached prompt to filter out prior reasoning items"
);
Ok(())
}