chore: read model field off of Config instead of maintaining the parallel field (#1525)

https://github.com/openai/codex/pull/1524 introduced the new `config`
field on `ModelClient`, so this does the post-PR cleanup to remove the
now-unnecessary `model` field.
This commit is contained in:
Michael Bolin
2025-07-10 14:37:04 -07:00
committed by GitHub
parent 8a424fcfa3
commit 9e58076cf5

View File

@@ -42,7 +42,6 @@ use std::sync::Arc;
#[derive(Clone)] #[derive(Clone)]
pub struct ModelClient { pub struct ModelClient {
config: Arc<Config>, config: Arc<Config>,
model: String,
client: reqwest::Client, client: reqwest::Client,
provider: ModelProviderInfo, provider: ModelProviderInfo,
effort: ReasoningEffortConfig, effort: ReasoningEffortConfig,
@@ -56,10 +55,8 @@ impl ModelClient {
effort: ReasoningEffortConfig, effort: ReasoningEffortConfig,
summary: ReasoningSummaryConfig, summary: ReasoningSummaryConfig,
) -> Self { ) -> Self {
let model = config.model.clone();
Self { Self {
config, config,
model: model.to_string(),
client: reqwest::Client::new(), client: reqwest::Client::new(),
provider, provider,
effort, effort,
@@ -75,9 +72,13 @@ impl ModelClient {
WireApi::Responses => self.stream_responses(prompt).await, WireApi::Responses => self.stream_responses(prompt).await,
WireApi::Chat => { WireApi::Chat => {
// Create the raw streaming connection first. // Create the raw streaming connection first.
let response_stream = let response_stream = stream_chat_completions(
stream_chat_completions(prompt, &self.model, &self.client, &self.provider) prompt,
.await?; &self.config.model,
&self.client,
&self.provider,
)
.await?;
// Wrap it with the aggregation adapter so callers see *only* // Wrap it with the aggregation adapter so callers see *only*
// the final assistant message per turn (matching the // the final assistant message per turn (matching the
@@ -111,11 +112,11 @@ impl ModelClient {
return stream_from_fixture(path).await; return stream_from_fixture(path).await;
} }
let full_instructions = prompt.get_full_instructions(&self.model); let full_instructions = prompt.get_full_instructions(&self.config.model);
let tools_json = create_tools_json_for_responses_api(prompt, &self.model)?; let tools_json = create_tools_json_for_responses_api(prompt, &self.config.model)?;
let reasoning = create_reasoning_param_for_request(&self.config, self.effort, self.summary); let reasoning = create_reasoning_param_for_request(&self.config, self.effort, self.summary);
let payload = ResponsesApiRequest { let payload = ResponsesApiRequest {
model: &self.model, model: &self.config.model,
instructions: &full_instructions, instructions: &full_instructions,
input: &prompt.input, input: &prompt.input,
tools: &tools_json, tools: &tools_json,