Files
llmx/codex-rs/exec/Cargo.toml
easong-openai 9285350842 Introduce --oss flag to use gpt-oss models (#1848)
This adds support for easily running Codex backed by a local Ollama
instance running our new open source models. See
https://github.com/openai/gpt-oss for details.

If you pass in `--oss` you'll be prompted to install/launch ollama, and
it will automatically download the 20b model and attempt to use it.

We'll likely want to expand this with some options later to make the
experience smoother for users who can't run the 20b or want to run the
120b.

Co-authored-by: Michael Bolin <mbolin@openai.com>
2025-08-05 11:31:11 -07:00

46 lines
879 B
TOML

[package]
edition = "2024"
name = "codex-exec"
version = { workspace = true }
[[bin]]
name = "codex-exec"
path = "src/main.rs"
[lib]
name = "codex_exec"
path = "src/lib.rs"
[lints]
workspace = true
[dependencies]
anyhow = "1"
chrono = "0.4.40"
clap = { version = "4", features = ["derive"] }
codex-arg0 = { path = "../arg0" }
codex-common = { path = "../common", features = [
"cli",
"elapsed",
"sandbox_summary",
] }
codex-core = { path = "../core" }
codex-ollama = { path = "../ollama" }
owo-colors = "4.2.0"
serde_json = "1"
shlex = "1.3.0"
tokio = { version = "1", features = [
"io-std",
"macros",
"process",
"rt-multi-thread",
"signal",
] }
tracing = { version = "0.1.41", features = ["log"] }
tracing-subscriber = { version = "0.3.19", features = ["env-filter"] }
[dev-dependencies]
assert_cmd = "2"
predicates = "3"
tempfile = "3.13.0"