use crate::config::types::ReasoningSummaryFormat; use crate::tools::handlers::apply_patch::ApplyPatchToolType; /// The `instructions` field in the payload sent to a model should always start /// with this content. const BASE_INSTRUCTIONS: &str = include_str!("../prompt.md"); const GPT_5_CODEX_INSTRUCTIONS: &str = include_str!("../gpt_5_codex_prompt.md"); /// A model family is a group of models that share certain characteristics. #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct ModelFamily { /// The full model slug used to derive this model family, e.g. /// "gpt-4.1-2025-04-14". pub slug: String, /// The model family name, e.g. "gpt-4.1". Note this should able to be used /// with [`crate::openai_model_info::get_model_info`]. pub family: String, /// True if the model needs additional instructions on how to use the /// "virtual" `apply_patch` CLI. pub needs_special_apply_patch_instructions: bool, // Whether the `reasoning` field can be set when making a request to this // model family. Note it has `effort` and `summary` subfields (though // `summary` is optional). pub supports_reasoning_summaries: bool, // Define if we need a special handling of reasoning summary pub reasoning_summary_format: ReasoningSummaryFormat, // This should be set to true when the model expects a tool named // "local_shell" to be provided. Its contract must be understood natively by // the model such that its description can be omitted. // See https://platform.openai.com/docs/guides/tools-local-shell pub uses_local_shell_tool: bool, /// Whether this model supports parallel tool calls when using the /// Responses API. pub supports_parallel_tool_calls: bool, /// Present if the model performs better when `apply_patch` is provided as /// a tool call instead of just a bash command pub apply_patch_tool_type: Option, // Instructions to use for querying the model pub base_instructions: String, /// Names of beta tools that should be exposed to this model family. pub experimental_supported_tools: Vec, /// Percentage of the context window considered usable for inputs, after /// reserving headroom for system prompts, tool overhead, and model output. /// This is applied when computing the effective context window seen by /// consumers. pub effective_context_window_percent: i64, /// If the model family supports setting the verbosity level when using Responses API. pub support_verbosity: bool, } macro_rules! model_family { ( $slug:expr, $family:expr $(, $key:ident : $value:expr )* $(,)? ) => {{ // defaults let mut mf = ModelFamily { slug: $slug.to_string(), family: $family.to_string(), needs_special_apply_patch_instructions: false, supports_reasoning_summaries: false, reasoning_summary_format: ReasoningSummaryFormat::None, uses_local_shell_tool: false, supports_parallel_tool_calls: false, apply_patch_tool_type: None, base_instructions: BASE_INSTRUCTIONS.to_string(), experimental_supported_tools: Vec::new(), effective_context_window_percent: 95, support_verbosity: false, }; // apply overrides $( mf.$key = $value; )* Some(mf) }}; } /// Returns a `ModelFamily` for the given model slug, or `None` if the slug /// does not match any known model family. pub fn find_family_for_model(slug: &str) -> Option { if slug.starts_with("o3") { model_family!( slug, "o3", supports_reasoning_summaries: true, needs_special_apply_patch_instructions: true, ) } else if slug.starts_with("o4-mini") { model_family!( slug, "o4-mini", supports_reasoning_summaries: true, needs_special_apply_patch_instructions: true, ) } else if slug.starts_with("codex-mini-latest") { model_family!( slug, "codex-mini-latest", supports_reasoning_summaries: true, uses_local_shell_tool: true, needs_special_apply_patch_instructions: true, ) } else if slug.starts_with("gpt-4.1") { model_family!( slug, "gpt-4.1", needs_special_apply_patch_instructions: true, ) } else if slug.starts_with("gpt-oss") || slug.starts_with("openai/gpt-oss") { model_family!(slug, "gpt-oss", apply_patch_tool_type: Some(ApplyPatchToolType::Function)) } else if slug.starts_with("gpt-4o") { model_family!(slug, "gpt-4o", needs_special_apply_patch_instructions: true) } else if slug.starts_with("gpt-3.5") { model_family!(slug, "gpt-3.5", needs_special_apply_patch_instructions: true) } else if slug.starts_with("test-gpt-5-codex") { model_family!( slug, slug, supports_reasoning_summaries: true, reasoning_summary_format: ReasoningSummaryFormat::Experimental, base_instructions: GPT_5_CODEX_INSTRUCTIONS.to_string(), experimental_supported_tools: vec![ "grep_files".to_string(), "list_dir".to_string(), "read_file".to_string(), "test_sync_tool".to_string(), ], supports_parallel_tool_calls: true, support_verbosity: true, ) // Internal models. } else if slug.starts_with("codex-exp-") { model_family!( slug, slug, supports_reasoning_summaries: true, reasoning_summary_format: ReasoningSummaryFormat::Experimental, base_instructions: GPT_5_CODEX_INSTRUCTIONS.to_string(), apply_patch_tool_type: Some(ApplyPatchToolType::Freeform), experimental_supported_tools: vec![ "grep_files".to_string(), "list_dir".to_string(), "read_file".to_string(), ], supports_parallel_tool_calls: true, support_verbosity: true, ) // Production models. } else if slug.starts_with("gpt-5-codex") || slug.starts_with("codex-") { model_family!( slug, slug, supports_reasoning_summaries: true, reasoning_summary_format: ReasoningSummaryFormat::Experimental, base_instructions: GPT_5_CODEX_INSTRUCTIONS.to_string(), apply_patch_tool_type: Some(ApplyPatchToolType::Freeform), support_verbosity: true, ) } else if slug.starts_with("gpt-5") { model_family!( slug, "gpt-5", supports_reasoning_summaries: true, needs_special_apply_patch_instructions: true, support_verbosity: true, ) } else { None } } pub fn derive_default_model_family(model: &str) -> ModelFamily { ModelFamily { slug: model.to_string(), family: model.to_string(), needs_special_apply_patch_instructions: false, supports_reasoning_summaries: false, reasoning_summary_format: ReasoningSummaryFormat::None, uses_local_shell_tool: false, supports_parallel_tool_calls: false, apply_patch_tool_type: None, base_instructions: BASE_INSTRUCTIONS.to_string(), experimental_supported_tools: Vec::new(), effective_context_window_percent: 95, support_verbosity: false, } }