[codex] add developer instructions (#5897)

we are using developer instructions for code reviews, we need to pass
them in cli as well.
This commit is contained in:
Anton Panasenko
2025-10-30 11:18:31 -07:00
committed by GitHub
parent 4a55646a02
commit 9572cfc782
11 changed files with 192 additions and 4 deletions

View File

@@ -112,6 +112,7 @@ use crate::tools::spec::ToolsConfig;
use crate::tools::spec::ToolsConfigParams;
use crate::turn_diff_tracker::TurnDiffTracker;
use crate::unified_exec::UnifiedExecSessionManager;
use crate::user_instructions::DeveloperInstructions;
use crate::user_instructions::UserInstructions;
use crate::user_notification::UserNotification;
use crate::util::backoff;
@@ -171,6 +172,7 @@ impl Codex {
model: config.model.clone(),
model_reasoning_effort: config.model_reasoning_effort,
model_reasoning_summary: config.model_reasoning_summary,
developer_instructions: config.developer_instructions.clone(),
user_instructions,
base_instructions: config.base_instructions.clone(),
compact_prompt: config.compact_prompt.clone(),
@@ -265,6 +267,7 @@ pub(crate) struct TurnContext {
/// the model as well as sandbox policies are resolved against this path
/// instead of `std::env::current_dir()`.
pub(crate) cwd: PathBuf,
pub(crate) developer_instructions: Option<String>,
pub(crate) base_instructions: Option<String>,
pub(crate) compact_prompt: Option<String>,
pub(crate) user_instructions: Option<String>,
@@ -303,6 +306,9 @@ pub(crate) struct SessionConfiguration {
model_reasoning_effort: Option<ReasoningEffortConfig>,
model_reasoning_summary: ReasoningSummaryConfig,
/// Developer instructions that supplement the base instructions.
developer_instructions: Option<String>,
/// Model instructions that are appended to the base instructions.
user_instructions: Option<String>,
@@ -417,6 +423,7 @@ impl Session {
sub_id,
client,
cwd: session_configuration.cwd.clone(),
developer_instructions: session_configuration.developer_instructions.clone(),
base_instructions: session_configuration.base_instructions.clone(),
compact_prompt: session_configuration.compact_prompt.clone(),
user_instructions: session_configuration.user_instructions.clone(),
@@ -991,7 +998,10 @@ impl Session {
}
pub(crate) fn build_initial_context(&self, turn_context: &TurnContext) -> Vec<ResponseItem> {
let mut items = Vec::<ResponseItem>::with_capacity(2);
let mut items = Vec::<ResponseItem>::with_capacity(3);
if let Some(developer_instructions) = turn_context.developer_instructions.as_deref() {
items.push(DeveloperInstructions::new(developer_instructions.to_string()).into());
}
if let Some(user_instructions) = turn_context.user_instructions.as_deref() {
items.push(UserInstructions::new(user_instructions.to_string()).into());
}
@@ -1674,6 +1684,7 @@ async fn spawn_review_thread(
sub_id: sub_id.to_string(),
client,
tools_config,
developer_instructions: None,
user_instructions: None,
base_instructions: Some(base_instructions.clone()),
compact_prompt: parent_turn_context.compact_prompt.clone(),
@@ -2511,6 +2522,7 @@ mod tests {
model: config.model.clone(),
model_reasoning_effort: config.model_reasoning_effort,
model_reasoning_summary: config.model_reasoning_summary,
developer_instructions: config.developer_instructions.clone(),
user_instructions: config.user_instructions.clone(),
base_instructions: config.base_instructions.clone(),
compact_prompt: config.compact_prompt.clone(),
@@ -2586,6 +2598,7 @@ mod tests {
model: config.model.clone(),
model_reasoning_effort: config.model_reasoning_effort,
model_reasoning_summary: config.model_reasoning_summary,
developer_instructions: config.developer_instructions.clone(),
user_instructions: config.user_instructions.clone(),
base_instructions: config.base_instructions.clone(),
compact_prompt: config.compact_prompt.clone(),

View File

@@ -128,6 +128,9 @@ pub struct Config {
/// Base instructions override.
pub base_instructions: Option<String>,
/// Developer instructions override injected as a separate message.
pub developer_instructions: Option<String>,
/// Compact prompt override.
pub compact_prompt: Option<String>,
@@ -543,6 +546,11 @@ pub struct ConfigToml {
/// System instructions.
pub instructions: Option<String>,
/// Developer instructions inserted as a `developer` role message.
#[serde(default)]
pub developer_instructions: Option<String>,
/// Compact prompt used for history compaction.
pub compact_prompt: Option<String>,
@@ -830,6 +838,7 @@ pub struct ConfigOverrides {
pub config_profile: Option<String>,
pub codex_linux_sandbox_exe: Option<PathBuf>,
pub base_instructions: Option<String>,
pub developer_instructions: Option<String>,
pub compact_prompt: Option<String>,
pub include_apply_patch_tool: Option<bool>,
pub include_view_image_tool: Option<bool>,
@@ -861,6 +870,7 @@ impl Config {
config_profile: config_profile_key,
codex_linux_sandbox_exe,
base_instructions,
developer_instructions,
compact_prompt,
include_apply_patch_tool: include_apply_patch_tool_override,
include_view_image_tool: include_view_image_tool_override,
@@ -1060,6 +1070,7 @@ impl Config {
"experimental instructions file",
)?;
let base_instructions = base_instructions.or(file_base_instructions);
let developer_instructions = developer_instructions.or(cfg.developer_instructions);
let experimental_compact_prompt_path = config_profile
.experimental_compact_prompt_file
@@ -1095,6 +1106,7 @@ impl Config {
notify: cfg.notify,
user_instructions,
base_instructions,
developer_instructions,
compact_prompt,
// The config.toml omits "_mode" because it's a config file. However, "_mode"
// is important in code to differentiate the mode from the store implementation.
@@ -2886,6 +2898,7 @@ model_verbosity = "high"
model_verbosity: None,
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
base_instructions: None,
developer_instructions: None,
compact_prompt: None,
forced_chatgpt_workspace_id: None,
forced_login_method: None,
@@ -2958,6 +2971,7 @@ model_verbosity = "high"
model_verbosity: None,
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
base_instructions: None,
developer_instructions: None,
compact_prompt: None,
forced_chatgpt_workspace_id: None,
forced_login_method: None,
@@ -3045,6 +3059,7 @@ model_verbosity = "high"
model_verbosity: None,
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
base_instructions: None,
developer_instructions: None,
compact_prompt: None,
forced_chatgpt_workspace_id: None,
forced_login_method: None,
@@ -3118,6 +3133,7 @@ model_verbosity = "high"
model_verbosity: Some(Verbosity::High),
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
base_instructions: None,
developer_instructions: None,
compact_prompt: None,
forced_chatgpt_workspace_id: None,
forced_login_method: None,

View File

@@ -40,3 +40,31 @@ impl From<UserInstructions> for ResponseItem {
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
#[serde(rename = "developer_instructions", rename_all = "snake_case")]
pub(crate) struct DeveloperInstructions {
text: String,
}
impl DeveloperInstructions {
pub fn new<T: Into<String>>(text: T) -> Self {
Self { text: text.into() }
}
pub fn into_text(self) -> String {
self.text
}
}
impl From<DeveloperInstructions> for ResponseItem {
fn from(di: DeveloperInstructions) -> Self {
ResponseItem::Message {
id: None,
role: "developer".to_string(),
content: vec![ContentItem::InputText {
text: di.into_text(),
}],
}
}
}

View File

@@ -58,6 +58,18 @@ fn assert_message_role(request_body: &serde_json::Value, role: &str) {
assert_eq!(request_body["role"].as_str().unwrap(), role);
}
#[expect(clippy::expect_used)]
fn assert_message_equals(request_body: &serde_json::Value, text: &str) {
let content = request_body["content"][0]["text"]
.as_str()
.expect("invalid message content");
assert_eq!(
content, text,
"expected message content '{content}' to equal '{text}'"
);
}
#[expect(clippy::expect_used)]
fn assert_message_starts_with(request_body: &serde_json::Value, text: &str) {
let content = request_body["content"][0]["text"]
@@ -608,6 +620,64 @@ async fn includes_user_instructions_message_in_request() {
assert_message_ends_with(&request_body["input"][1], "</environment_context>");
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn includes_developer_instructions_message_in_request() {
skip_if_no_network!();
let server = MockServer::start().await;
let resp_mock =
responses::mount_sse_once_match(&server, path("/v1/responses"), sse_completed("resp1"))
.await;
let model_provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
};
let codex_home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&codex_home);
config.model_provider = model_provider;
config.user_instructions = Some("be nice".to_string());
config.developer_instructions = Some("be useful".to_string());
let conversation_manager =
ConversationManager::with_auth(CodexAuth::from_api_key("Test API Key"));
let codex = conversation_manager
.new_conversation(config)
.await
.expect("create new conversation")
.conversation;
codex
.submit(Op::UserInput {
items: vec![UserInput::Text {
text: "hello".into(),
}],
})
.await
.unwrap();
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
let request = resp_mock.single_request();
let request_body = request.body_json();
assert!(
!request_body["instructions"]
.as_str()
.unwrap()
.contains("be nice")
);
assert_message_role(&request_body["input"][0], "developer");
assert_message_equals(&request_body["input"][0], "be useful");
assert_message_role(&request_body["input"][1], "user");
assert_message_starts_with(&request_body["input"][1], "<user_instructions>");
assert_message_ends_with(&request_body["input"][1], "</user_instructions>");
assert_message_role(&request_body["input"][2], "user");
assert_message_starts_with(&request_body["input"][2], "<environment_context>");
assert_message_ends_with(&request_body["input"][2], "</environment_context>");
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn azure_responses_request_includes_store_and_reasoning_ids() {
skip_if_no_network!();