This PR introduces support for `-c`/`--config` so users can override individual config values on the command line using `--config name=value`. Example: ``` codex --config model=o4-mini ``` Making it possible to set arbitrary config values on the command line results in a more flexible configuration scheme and makes it easier to provide single-line examples that can be copy-pasted from documentation. Effectively, it means there are four levels of configuration for some values: - Default value (e.g., `model` currently defaults to `o4-mini`) - Value in `config.toml` (e.g., user could override the default to be `model = "o3"` in their `config.toml`) - Specifying `-c` or `--config` to override `model` (e.g., user can include `-c model=o3` in their list of args to Codex) - If available, a config-specific flag can be used, which takes precedence over `-c` (e.g., user can specify `--model o3` in their list of args to Codex) Now that it is possible to specify anything that could be configured in `config.toml` on the command line using `-c`, we do not need to have a custom flag for every possible config option (which can clutter the output of `--help`). To that end, as part of this PR, we drop support for the `--disable-response-storage` flag, as users can now specify `-c disable_response_storage=true` to get the equivalent functionality. Under the hood, this works by loading the `config.toml` into a `toml::Value`. Then for each `key=value`, we create a small synthetic TOML file with `value` so that we can run the TOML parser to get the equivalent `toml::Value`. We then parse `key` to determine the point in the original `toml::Value` to do the insert/replace. Once all of the overrides from `-c` args have been applied, the `toml::Value` is deserialized into a `ConfigToml` and then the `ConfigOverrides` are applied, as before.
116 lines
4.3 KiB
Rust
116 lines
4.3 KiB
Rust
//! Prototype MCP server.
|
||
#![deny(clippy::print_stdout, clippy::print_stderr)]
|
||
|
||
use std::io::Result as IoResult;
|
||
use std::path::PathBuf;
|
||
|
||
use mcp_types::JSONRPCMessage;
|
||
use tokio::io::AsyncBufReadExt;
|
||
use tokio::io::AsyncWriteExt;
|
||
use tokio::io::BufReader;
|
||
use tokio::io::{self};
|
||
use tokio::sync::mpsc;
|
||
use tracing::debug;
|
||
use tracing::error;
|
||
use tracing::info;
|
||
|
||
mod codex_tool_config;
|
||
mod codex_tool_runner;
|
||
mod json_to_toml;
|
||
mod message_processor;
|
||
|
||
use crate::message_processor::MessageProcessor;
|
||
|
||
/// Size of the bounded channels used to communicate between tasks. The value
|
||
/// is a balance between throughput and memory usage – 128 messages should be
|
||
/// plenty for an interactive CLI.
|
||
const CHANNEL_CAPACITY: usize = 128;
|
||
|
||
pub async fn run_main(codex_linux_sandbox_exe: Option<PathBuf>) -> IoResult<()> {
|
||
// Install a simple subscriber so `tracing` output is visible. Users can
|
||
// control the log level with `RUST_LOG`.
|
||
tracing_subscriber::fmt()
|
||
.with_writer(std::io::stderr)
|
||
.init();
|
||
|
||
// Set up channels.
|
||
let (incoming_tx, mut incoming_rx) = mpsc::channel::<JSONRPCMessage>(CHANNEL_CAPACITY);
|
||
let (outgoing_tx, mut outgoing_rx) = mpsc::channel::<JSONRPCMessage>(CHANNEL_CAPACITY);
|
||
|
||
// Task: read from stdin, push to `incoming_tx`.
|
||
let stdin_reader_handle = tokio::spawn({
|
||
let incoming_tx = incoming_tx.clone();
|
||
async move {
|
||
let stdin = io::stdin();
|
||
let reader = BufReader::new(stdin);
|
||
let mut lines = reader.lines();
|
||
|
||
while let Some(line) = lines.next_line().await.unwrap_or_default() {
|
||
match serde_json::from_str::<JSONRPCMessage>(&line) {
|
||
Ok(msg) => {
|
||
if incoming_tx.send(msg).await.is_err() {
|
||
// Receiver gone – nothing left to do.
|
||
break;
|
||
}
|
||
}
|
||
Err(e) => error!("Failed to deserialize JSONRPCMessage: {e}"),
|
||
}
|
||
}
|
||
|
||
debug!("stdin reader finished (EOF)");
|
||
}
|
||
});
|
||
|
||
// Task: process incoming messages.
|
||
let processor_handle = tokio::spawn({
|
||
let mut processor = MessageProcessor::new(outgoing_tx.clone(), codex_linux_sandbox_exe);
|
||
async move {
|
||
while let Some(msg) = incoming_rx.recv().await {
|
||
match msg {
|
||
JSONRPCMessage::Request(r) => processor.process_request(r),
|
||
JSONRPCMessage::Response(r) => processor.process_response(r),
|
||
JSONRPCMessage::Notification(n) => processor.process_notification(n),
|
||
JSONRPCMessage::BatchRequest(b) => processor.process_batch_request(b),
|
||
JSONRPCMessage::Error(e) => processor.process_error(e),
|
||
JSONRPCMessage::BatchResponse(b) => processor.process_batch_response(b),
|
||
}
|
||
}
|
||
|
||
info!("processor task exited (channel closed)");
|
||
}
|
||
});
|
||
|
||
// Task: write outgoing messages to stdout.
|
||
let stdout_writer_handle = tokio::spawn(async move {
|
||
let mut stdout = io::stdout();
|
||
while let Some(msg) = outgoing_rx.recv().await {
|
||
match serde_json::to_string(&msg) {
|
||
Ok(json) => {
|
||
if let Err(e) = stdout.write_all(json.as_bytes()).await {
|
||
error!("Failed to write to stdout: {e}");
|
||
break;
|
||
}
|
||
if let Err(e) = stdout.write_all(b"\n").await {
|
||
error!("Failed to write newline to stdout: {e}");
|
||
break;
|
||
}
|
||
if let Err(e) = stdout.flush().await {
|
||
error!("Failed to flush stdout: {e}");
|
||
break;
|
||
}
|
||
}
|
||
Err(e) => error!("Failed to serialize JSONRPCMessage: {e}"),
|
||
}
|
||
}
|
||
|
||
info!("stdout writer exited (channel closed)");
|
||
});
|
||
|
||
// Wait for all tasks to finish. The typical exit path is the stdin reader
|
||
// hitting EOF which, once it drops `incoming_tx`, propagates shutdown to
|
||
// the processor and then to the stdout task.
|
||
let _ = tokio::join!(stdin_reader_handle, processor_handle, stdout_writer_handle);
|
||
|
||
Ok(())
|
||
}
|