bug: non-openai mode - don't default temp and top_p (#572)
I haven't seen any actual errors due to this, but it's been bothering me that I had it defaulted to 1. I think best to leave it undefined and have each provider do their thing
This commit is contained in:
@@ -275,8 +275,8 @@ const createCompletion = (openai: OpenAI, input: ResponseCreateInput) => {
|
|||||||
messages: fullMessages,
|
messages: fullMessages,
|
||||||
tools: chatTools,
|
tools: chatTools,
|
||||||
web_search_options: webSearchOptions,
|
web_search_options: webSearchOptions,
|
||||||
temperature: input.temperature ?? 1.0,
|
temperature: input.temperature,
|
||||||
top_p: input.top_p ?? 1.0,
|
top_p: input.top_p,
|
||||||
tool_choice: (input.tool_choice === "auto"
|
tool_choice: (input.tool_choice === "auto"
|
||||||
? "auto"
|
? "auto"
|
||||||
: input.tool_choice) as OpenAI.Chat.Completions.ChatCompletionCreateParams["tool_choice"],
|
: input.tool_choice) as OpenAI.Chat.Completions.ChatCompletionCreateParams["tool_choice"],
|
||||||
@@ -385,11 +385,11 @@ async function nonStreamResponses(
|
|||||||
parallel_tool_calls: input.parallel_tool_calls ?? false,
|
parallel_tool_calls: input.parallel_tool_calls ?? false,
|
||||||
previous_response_id: input.previous_response_id ?? null,
|
previous_response_id: input.previous_response_id ?? null,
|
||||||
reasoning: null,
|
reasoning: null,
|
||||||
temperature: input.temperature ?? 1.0,
|
temperature: input.temperature,
|
||||||
text: { format: { type: "text" } },
|
text: { format: { type: "text" } },
|
||||||
tool_choice: input.tool_choice ?? "auto",
|
tool_choice: input.tool_choice ?? "auto",
|
||||||
tools: input.tools ?? [],
|
tools: input.tools ?? [],
|
||||||
top_p: input.top_p ?? 1.0,
|
top_p: input.top_p,
|
||||||
truncation: input.truncation ?? "disabled",
|
truncation: input.truncation ?? "disabled",
|
||||||
usage: chatResponse.usage
|
usage: chatResponse.usage
|
||||||
? {
|
? {
|
||||||
@@ -471,11 +471,11 @@ async function* streamResponses(
|
|||||||
parallel_tool_calls: true,
|
parallel_tool_calls: true,
|
||||||
previous_response_id: input.previous_response_id ?? null,
|
previous_response_id: input.previous_response_id ?? null,
|
||||||
reasoning: null,
|
reasoning: null,
|
||||||
temperature: input.temperature ?? 1.0,
|
temperature: input.temperature,
|
||||||
text: { format: { type: "text" } },
|
text: { format: { type: "text" } },
|
||||||
tool_choice: input.tool_choice ?? "auto",
|
tool_choice: input.tool_choice ?? "auto",
|
||||||
tools: input.tools ?? [],
|
tools: input.tools ?? [],
|
||||||
top_p: input.top_p ?? 1.0,
|
top_p: input.top_p,
|
||||||
truncation: input.truncation ?? "disabled",
|
truncation: input.truncation ?? "disabled",
|
||||||
usage: undefined,
|
usage: undefined,
|
||||||
user: input.user ?? undefined,
|
user: input.user ?? undefined,
|
||||||
@@ -652,11 +652,11 @@ async function* streamResponses(
|
|||||||
parallel_tool_calls: true,
|
parallel_tool_calls: true,
|
||||||
previous_response_id: input.previous_response_id ?? null,
|
previous_response_id: input.previous_response_id ?? null,
|
||||||
reasoning: null,
|
reasoning: null,
|
||||||
temperature: input.temperature ?? 1.0,
|
temperature: input.temperature,
|
||||||
text: { format: { type: "text" } },
|
text: { format: { type: "text" } },
|
||||||
tool_choice: input.tool_choice ?? "auto",
|
tool_choice: input.tool_choice ?? "auto",
|
||||||
tools: input.tools ?? [],
|
tools: input.tools ?? [],
|
||||||
top_p: input.top_p ?? 1.0,
|
top_p: input.top_p,
|
||||||
truncation: input.truncation ?? "disabled",
|
truncation: input.truncation ?? "disabled",
|
||||||
usage: usage as ResponseOutput["usage"],
|
usage: usage as ResponseOutput["usage"],
|
||||||
user: input.user ?? undefined,
|
user: input.user ?? undefined,
|
||||||
|
|||||||
Reference in New Issue
Block a user