Never store requests (#3212)
When item ids are sent to Responses API it will load them from the database ignoring the provided values. This adds extra latency. Not having the mode to store requests also allows us to simplify the code. ## Breaking change The `disable_response_storage` configuration option is removed.
This commit is contained in:
@@ -994,7 +994,6 @@ fn derive_config_from_params(
|
||||
include_plan_tool,
|
||||
include_apply_patch_tool,
|
||||
include_view_image_tool: None,
|
||||
disable_response_storage: None,
|
||||
show_raw_agent_reasoning: None,
|
||||
tools_web_search_request: None,
|
||||
};
|
||||
|
||||
@@ -162,7 +162,6 @@ impl CodexToolCallParam {
|
||||
include_plan_tool,
|
||||
include_apply_patch_tool: None,
|
||||
include_view_image_tool: None,
|
||||
disable_response_storage: None,
|
||||
show_raw_agent_reasoning: None,
|
||||
tools_web_search_request: None,
|
||||
};
|
||||
|
||||
@@ -51,7 +51,6 @@ model_reasoning_effort = "high"
|
||||
model_reasoning_summary = "detailed"
|
||||
model_verbosity = "medium"
|
||||
model_provider = "openai"
|
||||
disable_response_storage = false
|
||||
chatgpt_base_url = "https://api.chatgpt.com"
|
||||
"#,
|
||||
)
|
||||
@@ -111,7 +110,6 @@ async fn get_config_toml_parses_all_fields() {
|
||||
model_reasoning_summary: Some(ReasoningSummary::Detailed),
|
||||
model_verbosity: Some(Verbosity::Medium),
|
||||
model_provider: Some("openai".into()),
|
||||
disable_response_storage: Some(false),
|
||||
chatgpt_base_url: Some("https://api.chatgpt.com".into()),
|
||||
},
|
||||
)]),
|
||||
|
||||
Reference in New Issue
Block a user