Send AGENTS.md as a separate user message (#1737)

This commit is contained in:
pakrym-oai
2025-07-30 13:56:24 -07:00
committed by GitHub
parent 2f5557056d
commit e0e245cc1c
4 changed files with 96 additions and 4 deletions

View File

@@ -40,6 +40,10 @@ pub(crate) async fn stream_chat_completions(
let full_instructions = prompt.get_full_instructions(model);
messages.push(json!({"role": "system", "content": full_instructions}));
if let Some(instr) = &prompt.user_instructions {
messages.push(json!({"role": "user", "content": instr}));
}
for item in &prompt.input {
match item {
ResponseItem::Message { role, content, .. } => {

View File

@@ -35,6 +35,7 @@ use crate::error::Result;
use crate::flags::CODEX_RS_SSE_FIXTURE;
use crate::model_provider_info::ModelProviderInfo;
use crate::model_provider_info::WireApi;
use crate::models::ContentItem;
use crate::models::ResponseItem;
use crate::openai_tools::create_tools_json_for_responses_api;
use crate::protocol::TokenUsage;
@@ -156,10 +157,20 @@ impl ModelClient {
vec![]
};
let mut input_with_instructions = Vec::with_capacity(prompt.input.len() + 1);
if let Some(ui) = &prompt.user_instructions {
input_with_instructions.push(ResponseItem::Message {
id: None,
role: "user".to_string(),
content: vec![ContentItem::InputText { text: ui.clone() }],
});
}
input_with_instructions.extend(prompt.input.clone());
let payload = ResponsesApiRequest {
model: &self.config.model,
instructions: &full_instructions,
input: &prompt.input,
input: &input_with_instructions,
tools: &tools_json,
tool_choice: "auto",
parallel_tool_calls: false,

View File

@@ -44,9 +44,6 @@ impl Prompt {
.as_deref()
.unwrap_or(BASE_INSTRUCTIONS);
let mut sections: Vec<&str> = vec![base];
if let Some(ref user) = self.user_instructions {
sections.push(user);
}
if model.starts_with("gpt-4.1") {
sections.push(APPLY_PATCH_TOOL_INSTRUCTIONS);
}
@@ -188,3 +185,19 @@ impl Stream for ResponseStream {
self.rx_event.poll_recv(cx)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn get_full_instructions_no_user_content() {
let prompt = Prompt {
user_instructions: Some("custom instruction".to_string()),
..Default::default()
};
let expected = format!("{BASE_INSTRUCTIONS}\n{APPLY_PATCH_TOOL_INSTRUCTIONS}");
let full = prompt.get_full_instructions("gpt-4.1");
assert_eq!(full, expected);
}
}

View File

@@ -257,6 +257,70 @@ async fn chatgpt_auth_sends_correct_request() {
);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn includes_user_instructions_message_in_request() {
#![allow(clippy::unwrap_used)]
let server = MockServer::start().await;
let first = ResponseTemplate::new(200)
.insert_header("content-type", "text/event-stream")
.set_body_raw(sse_completed("resp1"), "text/event-stream");
Mock::given(method("POST"))
.and(path("/v1/responses"))
.respond_with(first)
.expect(1)
.mount(&server)
.await;
let model_provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
};
let codex_home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&codex_home);
config.model_provider = model_provider;
config.user_instructions = Some("be nice".to_string());
let ctrl_c = std::sync::Arc::new(tokio::sync::Notify::new());
let CodexSpawnOk { codex, .. } = Codex::spawn(
config,
Some(CodexAuth::from_api_key("Test API Key".to_string())),
ctrl_c.clone(),
)
.await
.unwrap();
codex
.submit(Op::UserInput {
items: vec![InputItem::Text {
text: "hello".into(),
}],
})
.await
.unwrap();
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
let request = &server.received_requests().await.unwrap()[0];
let request_body = request.body_json::<serde_json::Value>().unwrap();
assert!(
!request_body["instructions"]
.as_str()
.unwrap()
.contains("be nice")
);
assert_eq!(request_body["input"][0]["role"], "user");
assert!(
request_body["input"][0]["content"][0]["text"]
.as_str()
.unwrap()
.starts_with("be nice")
);
}
fn auth_from_token(id_token: String) -> CodexAuth {
CodexAuth::new(
None,