feat: make reasoning effort/summaries configurable (#1199)
Previous to this PR, we always set `reasoning` when making a request using the Responses API:d7245cbbc9/codex-rs/core/src/client.rs (L108-L111)Though if you tried to use the Rust CLI with `--model gpt-4.1`, this would fail with: ```shell "Unsupported parameter: 'reasoning.effort' is not supported with this model." ``` We take a cue from the TypeScript CLI, which does a check on the model name:d7245cbbc9/codex-cli/src/utils/agent/agent-loop.ts (L786-L789)This PR does a similar check, though also adds support for the following config options: ``` model_reasoning_effort = "low" | "medium" | "high" | "none" model_reasoning_summary = "auto" | "concise" | "detailed" | "none" ``` This way, if you have a model whose name happens to start with `"o"` (or `"codex"`?), you can set these to `"none"` to explicitly disable reasoning, if necessary. (That said, it seems unlikely anyone would use the Responses API with non-OpenAI models, but we provide an escape hatch, anyway.) This PR also updates both the TUI and `codex exec` to show `reasoning effort` and `reasoning summaries` in the header.
This commit is contained in:
@@ -12,6 +12,8 @@ use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::config_types::ReasoningEffort as ReasoningEffortConfig;
|
||||
use crate::config_types::ReasoningSummary as ReasoningSummaryConfig;
|
||||
use crate::message_history::HistoryEntry;
|
||||
use crate::model_provider_info::ModelProviderInfo;
|
||||
|
||||
@@ -37,6 +39,10 @@ pub enum Op {
|
||||
|
||||
/// If not specified, server will use its default model.
|
||||
model: String,
|
||||
|
||||
model_reasoning_effort: ReasoningEffortConfig,
|
||||
model_reasoning_summary: ReasoningSummaryConfig,
|
||||
|
||||
/// Model instructions
|
||||
instructions: Option<String>,
|
||||
/// When to escalate for approval for execution
|
||||
|
||||
Reference in New Issue
Block a user