From 42619734678b200d75c94a225df70ece74f4022b Mon Sep 17 00:00:00 2001 From: Daniel Nakov Date: Wed, 23 Apr 2025 01:07:40 -0400 Subject: [PATCH] bug: non-openai mode - don't default temp and top_p (#572) I haven't seen any actual errors due to this, but it's been bothering me that I had it defaulted to 1. I think best to leave it undefined and have each provider do their thing --- codex-cli/src/utils/responses.ts | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/codex-cli/src/utils/responses.ts b/codex-cli/src/utils/responses.ts index 99bbd7ee..6a763ebf 100644 --- a/codex-cli/src/utils/responses.ts +++ b/codex-cli/src/utils/responses.ts @@ -275,8 +275,8 @@ const createCompletion = (openai: OpenAI, input: ResponseCreateInput) => { messages: fullMessages, tools: chatTools, web_search_options: webSearchOptions, - temperature: input.temperature ?? 1.0, - top_p: input.top_p ?? 1.0, + temperature: input.temperature, + top_p: input.top_p, tool_choice: (input.tool_choice === "auto" ? "auto" : input.tool_choice) as OpenAI.Chat.Completions.ChatCompletionCreateParams["tool_choice"], @@ -385,11 +385,11 @@ async function nonStreamResponses( parallel_tool_calls: input.parallel_tool_calls ?? false, previous_response_id: input.previous_response_id ?? null, reasoning: null, - temperature: input.temperature ?? 1.0, + temperature: input.temperature, text: { format: { type: "text" } }, tool_choice: input.tool_choice ?? "auto", tools: input.tools ?? [], - top_p: input.top_p ?? 1.0, + top_p: input.top_p, truncation: input.truncation ?? "disabled", usage: chatResponse.usage ? { @@ -471,11 +471,11 @@ async function* streamResponses( parallel_tool_calls: true, previous_response_id: input.previous_response_id ?? null, reasoning: null, - temperature: input.temperature ?? 1.0, + temperature: input.temperature, text: { format: { type: "text" } }, tool_choice: input.tool_choice ?? "auto", tools: input.tools ?? [], - top_p: input.top_p ?? 1.0, + top_p: input.top_p, truncation: input.truncation ?? "disabled", usage: undefined, user: input.user ?? undefined, @@ -652,11 +652,11 @@ async function* streamResponses( parallel_tool_calls: true, previous_response_id: input.previous_response_id ?? null, reasoning: null, - temperature: input.temperature ?? 1.0, + temperature: input.temperature, text: { format: { type: "text" } }, tool_choice: input.tool_choice ?? "auto", tools: input.tools ?? [], - top_p: input.top_p ?? 1.0, + top_p: input.top_p, truncation: input.truncation ?? "disabled", usage: usage as ResponseOutput["usage"], user: input.user ?? undefined,