https://github.com/openai/codex/pull/1835 has some messed up history. This adds support for streaming chat completions, which is useful for ollama. We should probably take a very skeptical eye to the code introduced in this PR. --------- Co-authored-by: Ahmed Ibrahim <aibrahim@openai.com>
197 lines
6.5 KiB
Rust
197 lines
6.5 KiB
Rust
use crate::config_types::ReasoningEffort as ReasoningEffortConfig;
|
|
use crate::config_types::ReasoningSummary as ReasoningSummaryConfig;
|
|
use crate::error::Result;
|
|
use crate::model_family::ModelFamily;
|
|
use crate::models::ResponseItem;
|
|
use crate::protocol::TokenUsage;
|
|
use codex_apply_patch::APPLY_PATCH_TOOL_INSTRUCTIONS;
|
|
use futures::Stream;
|
|
use serde::Serialize;
|
|
use std::borrow::Cow;
|
|
use std::collections::HashMap;
|
|
use std::pin::Pin;
|
|
use std::task::Context;
|
|
use std::task::Poll;
|
|
use tokio::sync::mpsc;
|
|
|
|
/// The `instructions` field in the payload sent to a model should always start
|
|
/// with this content.
|
|
const BASE_INSTRUCTIONS: &str = include_str!("../prompt.md");
|
|
|
|
/// wraps user instructions message in a tag for the model to parse more easily.
|
|
const USER_INSTRUCTIONS_START: &str = "<user_instructions>\n\n";
|
|
const USER_INSTRUCTIONS_END: &str = "\n\n</user_instructions>";
|
|
|
|
/// API request payload for a single model turn.
|
|
#[derive(Default, Debug, Clone)]
|
|
pub struct Prompt {
|
|
/// Conversation context input items.
|
|
pub input: Vec<ResponseItem>,
|
|
/// Optional instructions from the user to amend to the built-in agent
|
|
/// instructions.
|
|
pub user_instructions: Option<String>,
|
|
/// Whether to store response on server side (disable_response_storage = !store).
|
|
pub store: bool,
|
|
|
|
/// Additional tools sourced from external MCP servers. Note each key is
|
|
/// the "fully qualified" tool name (i.e., prefixed with the server name),
|
|
/// which should be reported to the model in place of Tool::name.
|
|
pub extra_tools: HashMap<String, mcp_types::Tool>,
|
|
|
|
/// Optional override for the built-in BASE_INSTRUCTIONS.
|
|
pub base_instructions_override: Option<String>,
|
|
}
|
|
|
|
impl Prompt {
|
|
pub(crate) fn get_full_instructions(&self, model: &ModelFamily) -> Cow<'_, str> {
|
|
let base = self
|
|
.base_instructions_override
|
|
.as_deref()
|
|
.unwrap_or(BASE_INSTRUCTIONS);
|
|
let mut sections: Vec<&str> = vec![base];
|
|
if model.needs_special_apply_patch_instructions {
|
|
sections.push(APPLY_PATCH_TOOL_INSTRUCTIONS);
|
|
}
|
|
Cow::Owned(sections.join("\n"))
|
|
}
|
|
|
|
pub(crate) fn get_formatted_user_instructions(&self) -> Option<String> {
|
|
self.user_instructions
|
|
.as_ref()
|
|
.map(|ui| format!("{USER_INSTRUCTIONS_START}{ui}{USER_INSTRUCTIONS_END}"))
|
|
}
|
|
}
|
|
|
|
#[derive(Debug)]
|
|
pub enum ResponseEvent {
|
|
Created,
|
|
OutputItemDone(ResponseItem),
|
|
Completed {
|
|
response_id: String,
|
|
token_usage: Option<TokenUsage>,
|
|
},
|
|
OutputTextDelta(String),
|
|
ReasoningSummaryDelta(String),
|
|
ReasoningContentDelta(String),
|
|
}
|
|
|
|
#[derive(Debug, Serialize)]
|
|
pub(crate) struct Reasoning {
|
|
pub(crate) effort: OpenAiReasoningEffort,
|
|
#[serde(skip_serializing_if = "Option::is_none")]
|
|
pub(crate) summary: Option<OpenAiReasoningSummary>,
|
|
}
|
|
|
|
/// See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning
|
|
#[derive(Debug, Serialize, Default, Clone, Copy)]
|
|
#[serde(rename_all = "lowercase")]
|
|
pub(crate) enum OpenAiReasoningEffort {
|
|
Low,
|
|
#[default]
|
|
Medium,
|
|
High,
|
|
}
|
|
|
|
impl From<ReasoningEffortConfig> for Option<OpenAiReasoningEffort> {
|
|
fn from(effort: ReasoningEffortConfig) -> Self {
|
|
match effort {
|
|
ReasoningEffortConfig::Low => Some(OpenAiReasoningEffort::Low),
|
|
ReasoningEffortConfig::Medium => Some(OpenAiReasoningEffort::Medium),
|
|
ReasoningEffortConfig::High => Some(OpenAiReasoningEffort::High),
|
|
ReasoningEffortConfig::None => None,
|
|
}
|
|
}
|
|
}
|
|
|
|
/// A summary of the reasoning performed by the model. This can be useful for
|
|
/// debugging and understanding the model's reasoning process.
|
|
/// See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#reasoning-summaries
|
|
#[derive(Debug, Serialize, Default, Clone, Copy)]
|
|
#[serde(rename_all = "lowercase")]
|
|
pub(crate) enum OpenAiReasoningSummary {
|
|
#[default]
|
|
Auto,
|
|
Concise,
|
|
Detailed,
|
|
}
|
|
|
|
impl From<ReasoningSummaryConfig> for Option<OpenAiReasoningSummary> {
|
|
fn from(summary: ReasoningSummaryConfig) -> Self {
|
|
match summary {
|
|
ReasoningSummaryConfig::Auto => Some(OpenAiReasoningSummary::Auto),
|
|
ReasoningSummaryConfig::Concise => Some(OpenAiReasoningSummary::Concise),
|
|
ReasoningSummaryConfig::Detailed => Some(OpenAiReasoningSummary::Detailed),
|
|
ReasoningSummaryConfig::None => None,
|
|
}
|
|
}
|
|
}
|
|
|
|
/// Request object that is serialized as JSON and POST'ed when using the
|
|
/// Responses API.
|
|
#[derive(Debug, Serialize)]
|
|
pub(crate) struct ResponsesApiRequest<'a> {
|
|
pub(crate) model: &'a str,
|
|
pub(crate) instructions: &'a str,
|
|
// TODO(mbolin): ResponseItem::Other should not be serialized. Currently,
|
|
// we code defensively to avoid this case, but perhaps we should use a
|
|
// separate enum for serialization.
|
|
pub(crate) input: &'a Vec<ResponseItem>,
|
|
pub(crate) tools: &'a [serde_json::Value],
|
|
pub(crate) tool_choice: &'static str,
|
|
pub(crate) parallel_tool_calls: bool,
|
|
pub(crate) reasoning: Option<Reasoning>,
|
|
/// true when using the Responses API.
|
|
pub(crate) store: bool,
|
|
pub(crate) stream: bool,
|
|
pub(crate) include: Vec<String>,
|
|
}
|
|
|
|
pub(crate) fn create_reasoning_param_for_request(
|
|
model_family: &ModelFamily,
|
|
effort: ReasoningEffortConfig,
|
|
summary: ReasoningSummaryConfig,
|
|
) -> Option<Reasoning> {
|
|
if model_family.supports_reasoning_summaries {
|
|
let effort: Option<OpenAiReasoningEffort> = effort.into();
|
|
let effort = effort?;
|
|
Some(Reasoning {
|
|
effort,
|
|
summary: summary.into(),
|
|
})
|
|
} else {
|
|
None
|
|
}
|
|
}
|
|
|
|
pub(crate) struct ResponseStream {
|
|
pub(crate) rx_event: mpsc::Receiver<Result<ResponseEvent>>,
|
|
}
|
|
|
|
impl Stream for ResponseStream {
|
|
type Item = Result<ResponseEvent>;
|
|
|
|
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
|
self.rx_event.poll_recv(cx)
|
|
}
|
|
}
|
|
|
|
#[cfg(test)]
|
|
mod tests {
|
|
#![allow(clippy::expect_used)]
|
|
use crate::model_family::find_family_for_model;
|
|
|
|
use super::*;
|
|
|
|
#[test]
|
|
fn get_full_instructions_no_user_content() {
|
|
let prompt = Prompt {
|
|
user_instructions: Some("custom instruction".to_string()),
|
|
..Default::default()
|
|
};
|
|
let expected = format!("{BASE_INSTRUCTIONS}\n{APPLY_PATCH_TOOL_INSTRUCTIONS}");
|
|
let model_family = find_family_for_model("gpt-4.1").expect("known model slug");
|
|
let full = prompt.get_full_instructions(&model_family);
|
|
assert_eq!(full, expected);
|
|
}
|
|
}
|