This adds support for easily running Codex backed by a local Ollama instance running our new open source models. See https://github.com/openai/gpt-oss for details. If you pass in `--oss` you'll be prompted to install/launch ollama, and it will automatically download the 20b model and attempt to use it. We'll likely want to expand this with some options later to make the experience smoother for users who can't run the 20b or want to run the 120b. Co-authored-by: Michael Bolin <mbolin@openai.com>
40 lines
1.1 KiB
Rust
40 lines
1.1 KiB
Rust
/// Identify whether a base_url points at an OpenAI-compatible root (".../v1").
|
|
pub(crate) fn is_openai_compatible_base_url(base_url: &str) -> bool {
|
|
base_url.trim_end_matches('/').ends_with("/v1")
|
|
}
|
|
|
|
/// Convert a provider base_url into the native Ollama host root.
|
|
/// For example, "http://localhost:11434/v1" -> "http://localhost:11434".
|
|
pub fn base_url_to_host_root(base_url: &str) -> String {
|
|
let trimmed = base_url.trim_end_matches('/');
|
|
if trimmed.ends_with("/v1") {
|
|
trimmed
|
|
.trim_end_matches("/v1")
|
|
.trim_end_matches('/')
|
|
.to_string()
|
|
} else {
|
|
trimmed.to_string()
|
|
}
|
|
}
|
|
|
|
#[cfg(test)]
|
|
mod tests {
|
|
use super::*;
|
|
|
|
#[test]
|
|
fn test_base_url_to_host_root() {
|
|
assert_eq!(
|
|
base_url_to_host_root("http://localhost:11434/v1"),
|
|
"http://localhost:11434"
|
|
);
|
|
assert_eq!(
|
|
base_url_to_host_root("http://localhost:11434"),
|
|
"http://localhost:11434"
|
|
);
|
|
assert_eq!(
|
|
base_url_to_host_root("http://localhost:11434/"),
|
|
"http://localhost:11434"
|
|
);
|
|
}
|
|
}
|