feat: update default (#4076)

Changes:
- Default model and docs now use gpt-5-codex. 
- Disables the GPT-5 Codex NUX by default.
- Keeps presets available for API key users.
This commit is contained in:
Thibault Sottiaux
2025-09-22 20:10:52 -07:00
committed by GitHub
parent c415827ac2
commit c93e77b68b
14 changed files with 136 additions and 52 deletions

View File

@@ -822,7 +822,7 @@ async fn token_count_includes_rate_limits_snapshot() {
"reasoning_output_tokens": 0,
"total_tokens": 123
},
// Default model is gpt-5 in tests → 272000 context window
// Default model is gpt-5-codex in tests → 272000 context window
"model_context_window": 272000
},
"rate_limits": {

View File

@@ -133,7 +133,7 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
.to_string();
let user_turn_1 = json!(
{
"model": "gpt-5",
"model": "gpt-5-codex",
"instructions": prompt,
"input": [
{
@@ -182,7 +182,7 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
});
let compact_1 = json!(
{
"model": "gpt-5",
"model": "gpt-5-codex",
"instructions": "You have exceeded the maximum number of tokens, please stop coding and instead write a short memento message for the next agent. Your note should:
- Summarize what you finished and what still needs work. If there was a recent update_plan call, repeat its steps verbatim.
- List outstanding TODOs with file paths / line numbers so they're easy to find.
@@ -255,7 +255,7 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
});
let user_turn_2_after_compact = json!(
{
"model": "gpt-5",
"model": "gpt-5-codex",
"instructions": prompt,
"input": [
{
@@ -320,7 +320,7 @@ SUMMARY_ONLY_CONTEXT"
});
let usert_turn_3_after_resume = json!(
{
"model": "gpt-5",
"model": "gpt-5-codex",
"instructions": prompt,
"input": [
{
@@ -405,7 +405,7 @@ SUMMARY_ONLY_CONTEXT"
});
let user_turn_3_after_fork = json!(
{
"model": "gpt-5",
"model": "gpt-5-codex",
"instructions": prompt,
"input": [
{

View File

@@ -184,6 +184,7 @@ async fn prompt_tools_are_consistent_across_requests() {
let conversation_manager =
ConversationManager::with_auth(CodexAuth::from_api_key("Test API Key"));
let expected_instructions = config.model_family.base_instructions.clone();
let codex = conversation_manager
.new_conversation(config)
.await
@@ -213,7 +214,6 @@ async fn prompt_tools_are_consistent_across_requests() {
let requests = server.received_requests().await.unwrap();
assert_eq!(requests.len(), 2, "expected two POST requests");
let expected_instructions: &str = include_str!("../../prompt.md");
// our internal implementation is responsible for keeping tools in sync
// with the OpenAI schema, so we just verify the tool presence here
let expected_tools_names: &[&str] = &["shell", "update_plan", "apply_patch", "view_image"];