Compare commits
1 Commits
main
...
dependabot
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7dbd70529f |
@@ -1,18 +1,18 @@
|
|||||||
# Containerized Development
|
# Containerized Development
|
||||||
|
|
||||||
We provide the following options to facilitate LLMX development in a container. This is particularly useful for verifying the Linux build when working on a macOS host.
|
We provide the following options to facilitate Codex development in a container. This is particularly useful for verifying the Linux build when working on a macOS host.
|
||||||
|
|
||||||
## Docker
|
## Docker
|
||||||
|
|
||||||
To build the Docker image locally for x64 and then run it with the repo mounted under `/workspace`:
|
To build the Docker image locally for x64 and then run it with the repo mounted under `/workspace`:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
LLMX_DOCKER_IMAGE_NAME=llmx-linux-dev
|
CODEX_DOCKER_IMAGE_NAME=codex-linux-dev
|
||||||
docker build --platform=linux/amd64 -t "$LLMX_DOCKER_IMAGE_NAME" ./.devcontainer
|
docker build --platform=linux/amd64 -t "$CODEX_DOCKER_IMAGE_NAME" ./.devcontainer
|
||||||
docker run --platform=linux/amd64 --rm -it -e CARGO_TARGET_DIR=/workspace/llmx-rs/target-amd64 -v "$PWD":/workspace -w /workspace/llmx-rs "$LLMX_DOCKER_IMAGE_NAME"
|
docker run --platform=linux/amd64 --rm -it -e CARGO_TARGET_DIR=/workspace/codex-rs/target-amd64 -v "$PWD":/workspace -w /workspace/codex-rs "$CODEX_DOCKER_IMAGE_NAME"
|
||||||
```
|
```
|
||||||
|
|
||||||
Note that `/workspace/target` will contain the binaries built for your host platform, so we include `-e CARGO_TARGET_DIR=/workspace/llmx-rs/target-amd64` in the `docker run` command so that the binaries built inside your container are written to a separate directory.
|
Note that `/workspace/target` will contain the binaries built for your host platform, so we include `-e CARGO_TARGET_DIR=/workspace/codex-rs/target-amd64` in the `docker run` command so that the binaries built inside your container are written to a separate directory.
|
||||||
|
|
||||||
For arm64, specify `--platform=linux/amd64` instead for both `docker build` and `docker run`.
|
For arm64, specify `--platform=linux/amd64` instead for both `docker build` and `docker run`.
|
||||||
|
|
||||||
@@ -20,7 +20,7 @@ Currently, the `Dockerfile` works for both x64 and arm64 Linux, though you need
|
|||||||
|
|
||||||
## VS Code
|
## VS Code
|
||||||
|
|
||||||
VS Code recognizes the `devcontainer.json` file and gives you the option to develop LLMX in a container. Currently, `devcontainer.json` builds and runs the `arm64` flavor of the container.
|
VS Code recognizes the `devcontainer.json` file and gives you the option to develop Codex in a container. Currently, `devcontainer.json` builds and runs the `arm64` flavor of the container.
|
||||||
|
|
||||||
From the integrated terminal in VS Code, you can build either flavor of the `arm64` build (GNU or musl):
|
From the integrated terminal in VS Code, you can build either flavor of the `arm64` build (GNU or musl):
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
{
|
{
|
||||||
"name": "LLMX",
|
"name": "Codex",
|
||||||
"build": {
|
"build": {
|
||||||
"dockerfile": "Dockerfile",
|
"dockerfile": "Dockerfile",
|
||||||
"context": "..",
|
"context": "..",
|
||||||
@@ -12,7 +12,7 @@
|
|||||||
|
|
||||||
"containerEnv": {
|
"containerEnv": {
|
||||||
"RUST_BACKTRACE": "1",
|
"RUST_BACKTRACE": "1",
|
||||||
"CARGO_TARGET_DIR": "${containerWorkspaceFolder}/llmx-rs/target-arm64"
|
"CARGO_TARGET_DIR": "${containerWorkspaceFolder}/codex-rs/target-arm64"
|
||||||
},
|
},
|
||||||
|
|
||||||
"remoteUser": "ubuntu",
|
"remoteUser": "ubuntu",
|
||||||
|
|||||||
10
.github/ISSUE_TEMPLATE/2-bug-report.yml
vendored
10
.github/ISSUE_TEMPLATE/2-bug-report.yml
vendored
@@ -7,19 +7,19 @@ body:
|
|||||||
- type: markdown
|
- type: markdown
|
||||||
attributes:
|
attributes:
|
||||||
value: |
|
value: |
|
||||||
Thank you for submitting a bug report! It helps make LLMX better for everyone.
|
Thank you for submitting a bug report! It helps make Codex better for everyone.
|
||||||
|
|
||||||
If you need help or support using LLMX, and are not reporting a bug, please post on [llmx/discussions](https://github.com/valknar/llmx/discussions), where you can ask questions or engage with others on ideas for how to improve llmx.
|
If you need help or support using Codex, and are not reporting a bug, please post on [codex/discussions](https://github.com/openai/codex/discussions), where you can ask questions or engage with others on ideas for how to improve codex.
|
||||||
|
|
||||||
Make sure you are running the [latest](https://npmjs.com/package/@llmx/llmx) version of LLMX CLI. The bug you are experiencing may already have been fixed.
|
Make sure you are running the [latest](https://npmjs.com/package/@openai/codex) version of Codex CLI. The bug you are experiencing may already have been fixed.
|
||||||
|
|
||||||
Please try to include as much information as possible.
|
Please try to include as much information as possible.
|
||||||
|
|
||||||
- type: input
|
- type: input
|
||||||
id: version
|
id: version
|
||||||
attributes:
|
attributes:
|
||||||
label: What version of LLMX is running?
|
label: What version of Codex is running?
|
||||||
description: Copy the output of `llmx --version`
|
description: Copy the output of `codex --version`
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
- type: input
|
- type: input
|
||||||
|
|||||||
2
.github/ISSUE_TEMPLATE/3-docs-issue.yml
vendored
2
.github/ISSUE_TEMPLATE/3-docs-issue.yml
vendored
@@ -5,7 +5,7 @@ body:
|
|||||||
- type: markdown
|
- type: markdown
|
||||||
attributes:
|
attributes:
|
||||||
value: |
|
value: |
|
||||||
Thank you for submitting a documentation request. It helps make LLMX better.
|
Thank you for submitting a documentation request. It helps make Codex better.
|
||||||
- type: dropdown
|
- type: dropdown
|
||||||
attributes:
|
attributes:
|
||||||
label: What is the type of issue?
|
label: What is the type of issue?
|
||||||
|
|||||||
6
.github/ISSUE_TEMPLATE/4-feature-request.yml
vendored
6
.github/ISSUE_TEMPLATE/4-feature-request.yml
vendored
@@ -1,16 +1,16 @@
|
|||||||
name: 🎁 Feature Request
|
name: 🎁 Feature Request
|
||||||
description: Propose a new feature for LLMX
|
description: Propose a new feature for Codex
|
||||||
labels:
|
labels:
|
||||||
- enhancement
|
- enhancement
|
||||||
body:
|
body:
|
||||||
- type: markdown
|
- type: markdown
|
||||||
attributes:
|
attributes:
|
||||||
value: |
|
value: |
|
||||||
Is LLMX missing a feature that you'd like to see? Feel free to propose it here.
|
Is Codex missing a feature that you'd like to see? Feel free to propose it here.
|
||||||
|
|
||||||
Before you submit a feature:
|
Before you submit a feature:
|
||||||
1. Search existing issues for similar features. If you find one, 👍 it rather than opening a new one.
|
1. Search existing issues for similar features. If you find one, 👍 it rather than opening a new one.
|
||||||
2. The LLMX team will try to balance the varying needs of the community when prioritizing or rejecting new features. Not all features will be accepted. See [Contributing](https://github.com/valknar/llmx#contributing) for more details.
|
2. The Codex team will try to balance the varying needs of the community when prioritizing or rejecting new features. Not all features will be accepted. See [Contributing](https://github.com/openai/codex#contributing) for more details.
|
||||||
|
|
||||||
- type: textarea
|
- type: textarea
|
||||||
id: feature
|
id: feature
|
||||||
|
|||||||
BIN
.github/codex-cli-login.png
vendored
Normal file
BIN
.github/codex-cli-login.png
vendored
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 2.9 MiB |
BIN
.github/codex-cli-permissions.png
vendored
Normal file
BIN
.github/codex-cli-permissions.png
vendored
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 408 KiB |
BIN
.github/codex-cli-splash.png
vendored
Normal file
BIN
.github/codex-cli-splash.png
vendored
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 3.1 MiB |
@@ -4,6 +4,6 @@ If a code change is required, create a new branch, commit the fix, and open a pu
|
|||||||
|
|
||||||
Here is the original GitHub issue that triggered this run:
|
Here is the original GitHub issue that triggered this run:
|
||||||
|
|
||||||
### {LLMX_ACTION_ISSUE_TITLE}
|
### {CODEX_ACTION_ISSUE_TITLE}
|
||||||
|
|
||||||
{LLMX_ACTION_ISSUE_BODY}
|
{CODEX_ACTION_ISSUE_BODY}
|
||||||
@@ -4,4 +4,4 @@ There should be a summary of the changes (1-2 sentences) and a few bullet points
|
|||||||
|
|
||||||
Then provide the **review** (1-2 sentences plus bullet points, friendly tone).
|
Then provide the **review** (1-2 sentences plus bullet points, friendly tone).
|
||||||
|
|
||||||
{LLMX_ACTION_GITHUB_EVENT_PATH} contains the JSON that triggered this GitHub workflow. It contains the `base` and `head` refs that define this PR. Both refs are available locally.
|
{CODEX_ACTION_GITHUB_EVENT_PATH} contains the JSON that triggered this GitHub workflow. It contains the `base` and `head` refs that define this PR. Both refs are available locally.
|
||||||
@@ -15,8 +15,8 @@ Things to look out for when doing the review:
|
|||||||
|
|
||||||
## Code Organization
|
## Code Organization
|
||||||
|
|
||||||
- Each create in the Cargo workspace in `llmx-rs` has a specific purpose: make a note if you believe new code is not introduced in the correct crate.
|
- Each create in the Cargo workspace in `codex-rs` has a specific purpose: make a note if you believe new code is not introduced in the correct crate.
|
||||||
- When possible, try to keep the `core` crate as small as possible. Non-core but shared logic is often a good candidate for `llmx-rs/common`.
|
- When possible, try to keep the `core` crate as small as possible. Non-core but shared logic is often a good candidate for `codex-rs/common`.
|
||||||
- Be wary of large files and offer suggestions for how to break things into more reasonably-sized files.
|
- Be wary of large files and offer suggestions for how to break things into more reasonably-sized files.
|
||||||
- Rust files should generally be organized such that the public parts of the API appear near the top of the file and helper functions go below. This is analagous to the "inverted pyramid" structure that is favored in journalism.
|
- Rust files should generally be organized such that the public parts of the API appear near the top of the file and helper functions go below. This is analagous to the "inverted pyramid" structure that is favored in journalism.
|
||||||
|
|
||||||
@@ -131,9 +131,9 @@ fn test_get_latest_messages() {
|
|||||||
|
|
||||||
## Pull Request Body
|
## Pull Request Body
|
||||||
|
|
||||||
- If the nature of the change seems to have a visual component (which is often the case for changes to `llmx-rs/tui`), recommend including a screenshot or video to demonstrate the change, if appropriate.
|
- If the nature of the change seems to have a visual component (which is often the case for changes to `codex-rs/tui`), recommend including a screenshot or video to demonstrate the change, if appropriate.
|
||||||
- References to existing GitHub issues and PRs are encouraged, where appropriate, though you likely do not have network access, so may not be able to help here.
|
- References to existing GitHub issues and PRs are encouraged, where appropriate, though you likely do not have network access, so may not be able to help here.
|
||||||
|
|
||||||
# PR Information
|
# PR Information
|
||||||
|
|
||||||
{LLMX_ACTION_GITHUB_EVENT_PATH} contains the JSON that triggered this GitHub workflow. It contains the `base` and `head` refs that define this PR. Both refs are available locally.
|
{CODEX_ACTION_GITHUB_EVENT_PATH} contains the JSON that triggered this GitHub workflow. It contains the `base` and `head` refs that define this PR. Both refs are available locally.
|
||||||
@@ -2,6 +2,6 @@ Troubleshoot whether the reported issue is valid.
|
|||||||
|
|
||||||
Provide a concise and respectful comment summarizing the findings.
|
Provide a concise and respectful comment summarizing the findings.
|
||||||
|
|
||||||
### {LLMX_ACTION_ISSUE_TITLE}
|
### {CODEX_ACTION_ISSUE_TITLE}
|
||||||
|
|
||||||
{LLMX_ACTION_ISSUE_BODY}
|
{CODEX_ACTION_ISSUE_BODY}
|
||||||
10
.github/dependabot.yaml
vendored
10
.github/dependabot.yaml
vendored
@@ -3,13 +3,13 @@
|
|||||||
version: 2
|
version: 2
|
||||||
updates:
|
updates:
|
||||||
- package-ecosystem: bun
|
- package-ecosystem: bun
|
||||||
directory: .github/actions/llmx
|
directory: .github/actions/codex
|
||||||
schedule:
|
schedule:
|
||||||
interval: weekly
|
interval: weekly
|
||||||
- package-ecosystem: cargo
|
- package-ecosystem: cargo
|
||||||
directories:
|
directories:
|
||||||
- llmx-rs
|
- codex-rs
|
||||||
- llmx-rs/*
|
- codex-rs/*
|
||||||
schedule:
|
schedule:
|
||||||
interval: weekly
|
interval: weekly
|
||||||
- package-ecosystem: devcontainers
|
- package-ecosystem: devcontainers
|
||||||
@@ -17,7 +17,7 @@ updates:
|
|||||||
schedule:
|
schedule:
|
||||||
interval: weekly
|
interval: weekly
|
||||||
- package-ecosystem: docker
|
- package-ecosystem: docker
|
||||||
directory: llmx-cli
|
directory: codex-cli
|
||||||
schedule:
|
schedule:
|
||||||
interval: weekly
|
interval: weekly
|
||||||
- package-ecosystem: github-actions
|
- package-ecosystem: github-actions
|
||||||
@@ -25,6 +25,6 @@ updates:
|
|||||||
schedule:
|
schedule:
|
||||||
interval: weekly
|
interval: weekly
|
||||||
- package-ecosystem: rust-toolchain
|
- package-ecosystem: rust-toolchain
|
||||||
directory: llmx-rs
|
directory: codex-rs
|
||||||
schedule:
|
schedule:
|
||||||
interval: weekly
|
interval: weekly
|
||||||
|
|||||||
54
.github/dotslash-config.json
vendored
54
.github/dotslash-config.json
vendored
@@ -1,30 +1,58 @@
|
|||||||
{
|
{
|
||||||
"outputs": {
|
"outputs": {
|
||||||
"llmx": {
|
"codex": {
|
||||||
"platforms": {
|
"platforms": {
|
||||||
"macos-aarch64": {
|
"macos-aarch64": {
|
||||||
"regex": "^llmx-aarch64-apple-darwin\\.zst$",
|
"regex": "^codex-aarch64-apple-darwin\\.zst$",
|
||||||
"path": "llmx"
|
"path": "codex"
|
||||||
},
|
},
|
||||||
"macos-x86_64": {
|
"macos-x86_64": {
|
||||||
"regex": "^llmx-x86_64-apple-darwin\\.zst$",
|
"regex": "^codex-x86_64-apple-darwin\\.zst$",
|
||||||
"path": "llmx"
|
"path": "codex"
|
||||||
},
|
},
|
||||||
"linux-x86_64": {
|
"linux-x86_64": {
|
||||||
"regex": "^llmx-x86_64-unknown-linux-musl\\.zst$",
|
"regex": "^codex-x86_64-unknown-linux-musl\\.zst$",
|
||||||
"path": "llmx"
|
"path": "codex"
|
||||||
},
|
},
|
||||||
"linux-aarch64": {
|
"linux-aarch64": {
|
||||||
"regex": "^llmx-aarch64-unknown-linux-musl\\.zst$",
|
"regex": "^codex-aarch64-unknown-linux-musl\\.zst$",
|
||||||
"path": "llmx"
|
"path": "codex"
|
||||||
},
|
},
|
||||||
"windows-x86_64": {
|
"windows-x86_64": {
|
||||||
"regex": "^llmx-x86_64-pc-windows-msvc\\.exe\\.zst$",
|
"regex": "^codex-x86_64-pc-windows-msvc\\.exe\\.zst$",
|
||||||
"path": "llmx.exe"
|
"path": "codex.exe"
|
||||||
},
|
},
|
||||||
"windows-aarch64": {
|
"windows-aarch64": {
|
||||||
"regex": "^llmx-aarch64-pc-windows-msvc\\.exe\\.zst$",
|
"regex": "^codex-aarch64-pc-windows-msvc\\.exe\\.zst$",
|
||||||
"path": "llmx.exe"
|
"path": "codex.exe"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"codex-responses-api-proxy": {
|
||||||
|
"platforms": {
|
||||||
|
"macos-aarch64": {
|
||||||
|
"regex": "^codex-responses-api-proxy-aarch64-apple-darwin\\.zst$",
|
||||||
|
"path": "codex-responses-api-proxy"
|
||||||
|
},
|
||||||
|
"macos-x86_64": {
|
||||||
|
"regex": "^codex-responses-api-proxy-x86_64-apple-darwin\\.zst$",
|
||||||
|
"path": "codex-responses-api-proxy"
|
||||||
|
},
|
||||||
|
"linux-x86_64": {
|
||||||
|
"regex": "^codex-responses-api-proxy-x86_64-unknown-linux-musl\\.zst$",
|
||||||
|
"path": "codex-responses-api-proxy"
|
||||||
|
},
|
||||||
|
"linux-aarch64": {
|
||||||
|
"regex": "^codex-responses-api-proxy-aarch64-unknown-linux-musl\\.zst$",
|
||||||
|
"path": "codex-responses-api-proxy"
|
||||||
|
},
|
||||||
|
"windows-x86_64": {
|
||||||
|
"regex": "^codex-responses-api-proxy-x86_64-pc-windows-msvc\\.exe\\.zst$",
|
||||||
|
"path": "codex-responses-api-proxy.exe"
|
||||||
|
},
|
||||||
|
"windows-aarch64": {
|
||||||
|
"regex": "^codex-responses-api-proxy-aarch64-pc-windows-msvc\\.exe\\.zst$",
|
||||||
|
"path": "codex-responses-api-proxy.exe"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
2
.github/pull_request_template.md
vendored
2
.github/pull_request_template.md
vendored
@@ -1,7 +1,7 @@
|
|||||||
# External (non-OpenAI) Pull Request Requirements
|
# External (non-OpenAI) Pull Request Requirements
|
||||||
|
|
||||||
Before opening this Pull Request, please read the dedicated "Contributing" markdown file or your PR may be closed:
|
Before opening this Pull Request, please read the dedicated "Contributing" markdown file or your PR may be closed:
|
||||||
https://github.com/valknar/llmx/blob/main/docs/contributing.md
|
https://github.com/openai/codex/blob/main/docs/contributing.md
|
||||||
|
|
||||||
If your PR conforms to our contribution guidelines, replace this text with a detailed and high quality description of your changes.
|
If your PR conforms to our contribution guidelines, replace this text with a detailed and high quality description of your changes.
|
||||||
|
|
||||||
|
|||||||
18
.github/workflows/ci.yml
vendored
18
.github/workflows/ci.yml
vendored
@@ -36,19 +36,19 @@ jobs:
|
|||||||
GH_TOKEN: ${{ github.token }}
|
GH_TOKEN: ${{ github.token }}
|
||||||
run: |
|
run: |
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
LLMX_VERSION=0.1.0
|
CODEX_VERSION=0.40.0
|
||||||
OUTPUT_DIR="${RUNNER_TEMP}"
|
OUTPUT_DIR="${RUNNER_TEMP}"
|
||||||
python3 ./scripts/stage_npm_packages.py \
|
python3 ./scripts/stage_npm_packages.py \
|
||||||
--release-version "$LLMX_VERSION" \
|
--release-version "$CODEX_VERSION" \
|
||||||
--package llmx \
|
--package codex \
|
||||||
--output-dir "$OUTPUT_DIR"
|
--output-dir "$OUTPUT_DIR"
|
||||||
PACK_OUTPUT="${OUTPUT_DIR}/llmx-npm-${LLMX_VERSION}.tgz"
|
PACK_OUTPUT="${OUTPUT_DIR}/codex-npm-${CODEX_VERSION}.tgz"
|
||||||
echo "pack_output=$PACK_OUTPUT" >> "$GITHUB_OUTPUT"
|
echo "pack_output=$PACK_OUTPUT" >> "$GITHUB_OUTPUT"
|
||||||
|
|
||||||
- name: Upload staged npm package artifact
|
- name: Upload staged npm package artifact
|
||||||
uses: actions/upload-artifact@v5
|
uses: actions/upload-artifact@v5
|
||||||
with:
|
with:
|
||||||
name: llmx-npm-staging
|
name: codex-npm-staging
|
||||||
path: ${{ steps.stage_npm_package.outputs.pack_output }}
|
path: ${{ steps.stage_npm_package.outputs.pack_output }}
|
||||||
|
|
||||||
- name: Ensure root README.md contains only ASCII and certain Unicode code points
|
- name: Ensure root README.md contains only ASCII and certain Unicode code points
|
||||||
@@ -56,10 +56,10 @@ jobs:
|
|||||||
- name: Check root README ToC
|
- name: Check root README ToC
|
||||||
run: python3 scripts/readme_toc.py README.md
|
run: python3 scripts/readme_toc.py README.md
|
||||||
|
|
||||||
- name: Ensure llmx-cli/README.md contains only ASCII and certain Unicode code points
|
- name: Ensure codex-cli/README.md contains only ASCII and certain Unicode code points
|
||||||
run: ./scripts/asciicheck.py llmx-cli/README.md
|
run: ./scripts/asciicheck.py codex-cli/README.md
|
||||||
- name: Check llmx-cli/README ToC
|
- name: Check codex-cli/README ToC
|
||||||
run: python3 scripts/readme_toc.py llmx-cli/README.md
|
run: python3 scripts/readme_toc.py codex-cli/README.md
|
||||||
|
|
||||||
- name: Prettier (run `pnpm run format:fix` to fix)
|
- name: Prettier (run `pnpm run format:fix` to fix)
|
||||||
run: pnpm run format
|
run: pnpm run format
|
||||||
|
|||||||
2
.github/workflows/cla.yml
vendored
2
.github/workflows/cla.yml
vendored
@@ -40,7 +40,7 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
with:
|
with:
|
||||||
path-to-document: https://github.com/openai/llmx/blob/main/docs/CLA.md
|
path-to-document: https://github.com/openai/codex/blob/main/docs/CLA.md
|
||||||
path-to-signatures: signatures/cla.json
|
path-to-signatures: signatures/cla.json
|
||||||
branch: cla-signatures
|
branch: cla-signatures
|
||||||
allowlist: dependabot[bot]
|
allowlist: dependabot[bot]
|
||||||
|
|||||||
38
.github/workflows/issue-deduplicator.yml
vendored
38
.github/workflows/issue-deduplicator.yml
vendored
@@ -9,23 +9,23 @@ on:
|
|||||||
jobs:
|
jobs:
|
||||||
gather-duplicates:
|
gather-duplicates:
|
||||||
name: Identify potential duplicates
|
name: Identify potential duplicates
|
||||||
if: ${{ github.event.action == 'opened' || (github.event.action == 'labeled' && github.event.label.name == 'llmx-deduplicate') }}
|
if: ${{ github.event.action == 'opened' || (github.event.action == 'labeled' && github.event.label.name == 'codex-deduplicate') }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
outputs:
|
outputs:
|
||||||
llmx_output: ${{ steps.llmx.outputs.final-message }}
|
codex_output: ${{ steps.codex.outputs.final-message }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
|
|
||||||
- name: Prepare LLMX inputs
|
- name: Prepare Codex inputs
|
||||||
env:
|
env:
|
||||||
GH_TOKEN: ${{ github.token }}
|
GH_TOKEN: ${{ github.token }}
|
||||||
run: |
|
run: |
|
||||||
set -eo pipefail
|
set -eo pipefail
|
||||||
|
|
||||||
CURRENT_ISSUE_FILE=llmx-current-issue.json
|
CURRENT_ISSUE_FILE=codex-current-issue.json
|
||||||
EXISTING_ISSUES_FILE=llmx-existing-issues.json
|
EXISTING_ISSUES_FILE=codex-existing-issues.json
|
||||||
|
|
||||||
gh issue list --repo "${{ github.repository }}" \
|
gh issue list --repo "${{ github.repository }}" \
|
||||||
--json number,title,body,createdAt \
|
--json number,title,body,createdAt \
|
||||||
@@ -41,18 +41,18 @@ jobs:
|
|||||||
| jq '.' \
|
| jq '.' \
|
||||||
> "$CURRENT_ISSUE_FILE"
|
> "$CURRENT_ISSUE_FILE"
|
||||||
|
|
||||||
- id: llmx
|
- id: codex
|
||||||
uses: valknar/llmx-action@main
|
uses: openai/codex-action@main
|
||||||
with:
|
with:
|
||||||
openai-api-key: ${{ secrets.LLMX_OPENAI_API_KEY }}
|
openai-api-key: ${{ secrets.CODEX_OPENAI_API_KEY }}
|
||||||
allow-users: "*"
|
allow-users: "*"
|
||||||
model: gpt-5
|
model: gpt-5
|
||||||
prompt: |
|
prompt: |
|
||||||
You are an assistant that triages new GitHub issues by identifying potential duplicates.
|
You are an assistant that triages new GitHub issues by identifying potential duplicates.
|
||||||
|
|
||||||
You will receive the following JSON files located in the current working directory:
|
You will receive the following JSON files located in the current working directory:
|
||||||
- `llmx-current-issue.json`: JSON object describing the newly created issue (fields: number, title, body).
|
- `codex-current-issue.json`: JSON object describing the newly created issue (fields: number, title, body).
|
||||||
- `llmx-existing-issues.json`: JSON array of recent issues (each element includes number, title, body, createdAt).
|
- `codex-existing-issues.json`: JSON array of recent issues (each element includes number, title, body, createdAt).
|
||||||
|
|
||||||
Instructions:
|
Instructions:
|
||||||
- Compare the current issue against the existing issues to find up to five that appear to describe the same underlying problem or request.
|
- Compare the current issue against the existing issues to find up to five that appear to describe the same underlying problem or request.
|
||||||
@@ -89,16 +89,16 @@ jobs:
|
|||||||
- name: Comment on issue
|
- name: Comment on issue
|
||||||
uses: actions/github-script@v8
|
uses: actions/github-script@v8
|
||||||
env:
|
env:
|
||||||
LLMX_OUTPUT: ${{ needs.gather-duplicates.outputs.llmx_output }}
|
CODEX_OUTPUT: ${{ needs.gather-duplicates.outputs.codex_output }}
|
||||||
with:
|
with:
|
||||||
github-token: ${{ github.token }}
|
github-token: ${{ github.token }}
|
||||||
script: |
|
script: |
|
||||||
const raw = process.env.LLMX_OUTPUT ?? '';
|
const raw = process.env.CODEX_OUTPUT ?? '';
|
||||||
let parsed;
|
let parsed;
|
||||||
try {
|
try {
|
||||||
parsed = JSON.parse(raw);
|
parsed = JSON.parse(raw);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
core.info(`LLMX output was not valid JSON. Raw output: ${raw}`);
|
core.info(`Codex output was not valid JSON. Raw output: ${raw}`);
|
||||||
core.info(`Parse error: ${error.message}`);
|
core.info(`Parse error: ${error.message}`);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@@ -112,7 +112,7 @@ jobs:
|
|||||||
const filteredIssues = issues.filter((value) => String(value) !== currentIssueNumber);
|
const filteredIssues = issues.filter((value) => String(value) !== currentIssueNumber);
|
||||||
|
|
||||||
if (filteredIssues.length === 0) {
|
if (filteredIssues.length === 0) {
|
||||||
core.info('LLMX reported no potential duplicates.');
|
core.info('Codex reported no potential duplicates.');
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -121,7 +121,7 @@ jobs:
|
|||||||
'',
|
'',
|
||||||
...filteredIssues.map((value) => `- #${String(value)}`),
|
...filteredIssues.map((value) => `- #${String(value)}`),
|
||||||
'',
|
'',
|
||||||
'*Powered by [LLMX Action](https://github.com/valknar/llmx-action)*'];
|
'*Powered by [Codex Action](https://github.com/openai/codex-action)*'];
|
||||||
|
|
||||||
await github.rest.issues.createComment({
|
await github.rest.issues.createComment({
|
||||||
owner: context.repo.owner,
|
owner: context.repo.owner,
|
||||||
@@ -130,11 +130,11 @@ jobs:
|
|||||||
body: lines.join("\n"),
|
body: lines.join("\n"),
|
||||||
});
|
});
|
||||||
|
|
||||||
- name: Remove llmx-deduplicate label
|
- name: Remove codex-deduplicate label
|
||||||
if: ${{ always() && github.event.action == 'labeled' && github.event.label.name == 'llmx-deduplicate' }}
|
if: ${{ always() && github.event.action == 'labeled' && github.event.label.name == 'codex-deduplicate' }}
|
||||||
env:
|
env:
|
||||||
GH_TOKEN: ${{ github.token }}
|
GH_TOKEN: ${{ github.token }}
|
||||||
GH_REPO: ${{ github.repository }}
|
GH_REPO: ${{ github.repository }}
|
||||||
run: |
|
run: |
|
||||||
gh issue edit "${{ github.event.issue.number }}" --remove-label llmx-deduplicate || true
|
gh issue edit "${{ github.event.issue.number }}" --remove-label codex-deduplicate || true
|
||||||
echo "Attempted to remove label: llmx-deduplicate"
|
echo "Attempted to remove label: codex-deduplicate"
|
||||||
|
|||||||
44
.github/workflows/issue-labeler.yml
vendored
44
.github/workflows/issue-labeler.yml
vendored
@@ -9,19 +9,19 @@ on:
|
|||||||
jobs:
|
jobs:
|
||||||
gather-labels:
|
gather-labels:
|
||||||
name: Generate label suggestions
|
name: Generate label suggestions
|
||||||
if: ${{ github.event.action == 'opened' || (github.event.action == 'labeled' && github.event.label.name == 'llmx-label') }}
|
if: ${{ github.event.action == 'opened' || (github.event.action == 'labeled' && github.event.label.name == 'codex-label') }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
outputs:
|
outputs:
|
||||||
llmx_output: ${{ steps.llmx.outputs.final-message }}
|
codex_output: ${{ steps.codex.outputs.final-message }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
|
|
||||||
- id: llmx
|
- id: codex
|
||||||
uses: openai/llmx-action@main
|
uses: openai/codex-action@main
|
||||||
with:
|
with:
|
||||||
openai-api-key: ${{ secrets.LLMX_OPENAI_API_KEY }}
|
openai-api-key: ${{ secrets.CODEX_OPENAI_API_KEY }}
|
||||||
allow-users: "*"
|
allow-users: "*"
|
||||||
prompt: |
|
prompt: |
|
||||||
You are an assistant that reviews GitHub issues for the repository.
|
You are an assistant that reviews GitHub issues for the repository.
|
||||||
@@ -30,26 +30,26 @@ jobs:
|
|||||||
Follow these rules:
|
Follow these rules:
|
||||||
|
|
||||||
- Add one (and only one) of the following three labels to distinguish the type of issue. Default to "bug" if unsure.
|
- Add one (and only one) of the following three labels to distinguish the type of issue. Default to "bug" if unsure.
|
||||||
1. bug — Reproducible defects in LLMX products (CLI, VS Code extension, web, auth).
|
1. bug — Reproducible defects in Codex products (CLI, VS Code extension, web, auth).
|
||||||
2. enhancement — Feature requests or usability improvements that ask for new capabilities, better ergonomics, or quality-of-life tweaks.
|
2. enhancement — Feature requests or usability improvements that ask for new capabilities, better ergonomics, or quality-of-life tweaks.
|
||||||
3. documentation — Updates or corrections needed in docs/README/config references (broken links, missing examples, outdated keys, clarification requests).
|
3. documentation — Updates or corrections needed in docs/README/config references (broken links, missing examples, outdated keys, clarification requests).
|
||||||
|
|
||||||
- If applicable, add one of the following labels to specify which sub-product or product surface the issue relates to.
|
- If applicable, add one of the following labels to specify which sub-product or product surface the issue relates to.
|
||||||
1. CLI — the LLMX command line interface.
|
1. CLI — the Codex command line interface.
|
||||||
2. extension — VS Code (or other IDE) extension-specific issues.
|
2. extension — VS Code (or other IDE) extension-specific issues.
|
||||||
3. llmx-web — Issues targeting the Llmx web UI/Cloud experience.
|
3. codex-web — Issues targeting the Codex web UI/Cloud experience.
|
||||||
4. github-action — Issues with the LLMX GitHub action.
|
4. github-action — Issues with the Codex GitHub action.
|
||||||
5. iOS — Issues with the LLMX iOS app.
|
5. iOS — Issues with the Codex iOS app.
|
||||||
|
|
||||||
- Additionally add zero or more of the following labels that are relevant to the issue content. Prefer a small set of precise labels over many broad ones.
|
- Additionally add zero or more of the following labels that are relevant to the issue content. Prefer a small set of precise labels over many broad ones.
|
||||||
1. windows-os — Bugs or friction specific to Windows environments (always when PowerShell is mentioned, path handling, copy/paste, OS-specific auth or tooling failures).
|
1. windows-os — Bugs or friction specific to Windows environments (always when PowerShell is mentioned, path handling, copy/paste, OS-specific auth or tooling failures).
|
||||||
2. mcp — Topics involving Model Context Protocol servers/clients.
|
2. mcp — Topics involving Model Context Protocol servers/clients.
|
||||||
3. mcp-server — Problems related to the llmx mcp-server command, where llmx runs as an MCP server.
|
3. mcp-server — Problems related to the codex mcp-server command, where codex runs as an MCP server.
|
||||||
4. azure — Problems or requests tied to Azure OpenAI deployments.
|
4. azure — Problems or requests tied to Azure OpenAI deployments.
|
||||||
5. model-behavior — Undesirable LLM behavior: forgetting goals, refusing work, hallucinating environment details, quota misreports, or other reasoning/performance anomalies.
|
5. model-behavior — Undesirable LLM behavior: forgetting goals, refusing work, hallucinating environment details, quota misreports, or other reasoning/performance anomalies.
|
||||||
6. code-review — Issues related to the code review feature or functionality.
|
6. code-review — Issues related to the code review feature or functionality.
|
||||||
7. auth - Problems related to authentication, login, or access tokens.
|
7. auth - Problems related to authentication, login, or access tokens.
|
||||||
8. llmx-exec - Problems related to the "llmx exec" command or functionality.
|
8. codex-exec - Problems related to the "codex exec" command or functionality.
|
||||||
9. context-management - Problems related to compaction, context windows, or available context reporting.
|
9. context-management - Problems related to compaction, context windows, or available context reporting.
|
||||||
10. custom-model - Problems that involve using custom model providers, local models, or OSS models.
|
10. custom-model - Problems that involve using custom model providers, local models, or OSS models.
|
||||||
11. rate-limits - Problems related to token limits, rate limits, or token usage reporting.
|
11. rate-limits - Problems related to token limits, rate limits, or token usage reporting.
|
||||||
@@ -84,7 +84,7 @@ jobs:
|
|||||||
}
|
}
|
||||||
|
|
||||||
apply-labels:
|
apply-labels:
|
||||||
name: Apply labels from LLMX output
|
name: Apply labels from Codex output
|
||||||
needs: gather-labels
|
needs: gather-labels
|
||||||
if: ${{ needs.gather-labels.result != 'skipped' }}
|
if: ${{ needs.gather-labels.result != 'skipped' }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
@@ -95,24 +95,24 @@ jobs:
|
|||||||
GH_TOKEN: ${{ github.token }}
|
GH_TOKEN: ${{ github.token }}
|
||||||
GH_REPO: ${{ github.repository }}
|
GH_REPO: ${{ github.repository }}
|
||||||
ISSUE_NUMBER: ${{ github.event.issue.number }}
|
ISSUE_NUMBER: ${{ github.event.issue.number }}
|
||||||
LLMX_OUTPUT: ${{ needs.gather-labels.outputs.llmx_output }}
|
CODEX_OUTPUT: ${{ needs.gather-labels.outputs.codex_output }}
|
||||||
steps:
|
steps:
|
||||||
- name: Apply labels
|
- name: Apply labels
|
||||||
run: |
|
run: |
|
||||||
json=${LLMX_OUTPUT//$'\r'/}
|
json=${CODEX_OUTPUT//$'\r'/}
|
||||||
if [ -z "$json" ]; then
|
if [ -z "$json" ]; then
|
||||||
echo "LLMX produced no output. Skipping label application."
|
echo "Codex produced no output. Skipping label application."
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if ! printf '%s' "$json" | jq -e 'type == "object" and (.labels | type == "array")' >/dev/null 2>&1; then
|
if ! printf '%s' "$json" | jq -e 'type == "object" and (.labels | type == "array")' >/dev/null 2>&1; then
|
||||||
echo "LLMX output did not include a labels array. Raw output: $json"
|
echo "Codex output did not include a labels array. Raw output: $json"
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
labels=$(printf '%s' "$json" | jq -r '.labels[] | tostring')
|
labels=$(printf '%s' "$json" | jq -r '.labels[] | tostring')
|
||||||
if [ -z "$labels" ]; then
|
if [ -z "$labels" ]; then
|
||||||
echo "LLMX returned an empty array. Nothing to do."
|
echo "Codex returned an empty array. Nothing to do."
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -123,8 +123,8 @@ jobs:
|
|||||||
|
|
||||||
"${cmd[@]}" || true
|
"${cmd[@]}" || true
|
||||||
|
|
||||||
- name: Remove llmx-label trigger
|
- name: Remove codex-label trigger
|
||||||
if: ${{ always() && github.event.action == 'labeled' && github.event.label.name == 'llmx-label' }}
|
if: ${{ always() && github.event.action == 'labeled' && github.event.label.name == 'codex-label' }}
|
||||||
run: |
|
run: |
|
||||||
gh issue edit "$ISSUE_NUMBER" --remove-label llmx-label || true
|
gh issue edit "$ISSUE_NUMBER" --remove-label codex-label || true
|
||||||
echo "Attempted to remove label: llmx-label"
|
echo "Attempted to remove label: codex-label"
|
||||||
|
|||||||
47
.github/workflows/rust-ci.yml
vendored
47
.github/workflows/rust-ci.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
|||||||
name: Detect changed areas
|
name: Detect changed areas
|
||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-24.04
|
||||||
outputs:
|
outputs:
|
||||||
llmx: ${{ steps.detect.outputs.llmx }}
|
codex: ${{ steps.detect.outputs.codex }}
|
||||||
workflows: ${{ steps.detect.outputs.workflows }}
|
workflows: ${{ steps.detect.outputs.workflows }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
@@ -33,17 +33,17 @@ jobs:
|
|||||||
mapfile -t files < <(git diff --name-only --no-renames "$BASE_SHA"...HEAD)
|
mapfile -t files < <(git diff --name-only --no-renames "$BASE_SHA"...HEAD)
|
||||||
else
|
else
|
||||||
# On push / manual runs, default to running everything
|
# On push / manual runs, default to running everything
|
||||||
files=("llmx-rs/force" ".github/force")
|
files=("codex-rs/force" ".github/force")
|
||||||
fi
|
fi
|
||||||
|
|
||||||
llmx=false
|
codex=false
|
||||||
workflows=false
|
workflows=false
|
||||||
for f in "${files[@]}"; do
|
for f in "${files[@]}"; do
|
||||||
[[ $f == llmx-rs/* ]] && llmx=true
|
[[ $f == codex-rs/* ]] && codex=true
|
||||||
[[ $f == .github/* ]] && workflows=true
|
[[ $f == .github/* ]] && workflows=true
|
||||||
done
|
done
|
||||||
|
|
||||||
echo "llmx=$llmx" >> "$GITHUB_OUTPUT"
|
echo "codex=$codex" >> "$GITHUB_OUTPUT"
|
||||||
echo "workflows=$workflows" >> "$GITHUB_OUTPUT"
|
echo "workflows=$workflows" >> "$GITHUB_OUTPUT"
|
||||||
|
|
||||||
# --- CI that doesn't need specific targets ---------------------------------
|
# --- CI that doesn't need specific targets ---------------------------------
|
||||||
@@ -51,10 +51,10 @@ jobs:
|
|||||||
name: Format / etc
|
name: Format / etc
|
||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-24.04
|
||||||
needs: changed
|
needs: changed
|
||||||
if: ${{ needs.changed.outputs.llmx == 'true' || needs.changed.outputs.workflows == 'true' || github.event_name == 'push' }}
|
if: ${{ needs.changed.outputs.codex == 'true' || needs.changed.outputs.workflows == 'true' || github.event_name == 'push' }}
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
working-directory: llmx-rs
|
working-directory: codex-rs
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- uses: dtolnay/rust-toolchain@1.90
|
- uses: dtolnay/rust-toolchain@1.90
|
||||||
@@ -69,14 +69,14 @@ jobs:
|
|||||||
name: cargo shear
|
name: cargo shear
|
||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-24.04
|
||||||
needs: changed
|
needs: changed
|
||||||
if: ${{ needs.changed.outputs.llmx == 'true' || needs.changed.outputs.workflows == 'true' || github.event_name == 'push' }}
|
if: ${{ needs.changed.outputs.codex == 'true' || needs.changed.outputs.workflows == 'true' || github.event_name == 'push' }}
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
working-directory: llmx-rs
|
working-directory: codex-rs
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- uses: dtolnay/rust-toolchain@1.90
|
- uses: dtolnay/rust-toolchain@1.90
|
||||||
- uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2
|
- uses: taiki-e/install-action@6cc14f7f2f4b3129aff07a8b071d2d4f2733465d # v2
|
||||||
with:
|
with:
|
||||||
tool: cargo-shear
|
tool: cargo-shear
|
||||||
version: 1.5.1
|
version: 1.5.1
|
||||||
@@ -90,10 +90,10 @@ jobs:
|
|||||||
timeout-minutes: 30
|
timeout-minutes: 30
|
||||||
needs: changed
|
needs: changed
|
||||||
# Keep job-level if to avoid spinning up runners when not needed
|
# Keep job-level if to avoid spinning up runners when not needed
|
||||||
if: ${{ needs.changed.outputs.llmx == 'true' || needs.changed.outputs.workflows == 'true' || github.event_name == 'push' }}
|
if: ${{ needs.changed.outputs.codex == 'true' || needs.changed.outputs.workflows == 'true' || github.event_name == 'push' }}
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
working-directory: llmx-rs
|
working-directory: codex-rs
|
||||||
env:
|
env:
|
||||||
# Speed up repeated builds across CI runs by caching compiled objects.
|
# Speed up repeated builds across CI runs by caching compiled objects.
|
||||||
RUSTC_WRAPPER: sccache
|
RUSTC_WRAPPER: sccache
|
||||||
@@ -164,13 +164,13 @@ jobs:
|
|||||||
~/.cargo/registry/index/
|
~/.cargo/registry/index/
|
||||||
~/.cargo/registry/cache/
|
~/.cargo/registry/cache/
|
||||||
~/.cargo/git/db/
|
~/.cargo/git/db/
|
||||||
key: cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ hashFiles('**/Cargo.lock') }}-${{ hashFiles('llmx-rs/rust-toolchain.toml') }}
|
key: cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ hashFiles('**/Cargo.lock') }}-${{ hashFiles('codex-rs/rust-toolchain.toml') }}
|
||||||
restore-keys: |
|
restore-keys: |
|
||||||
cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-
|
cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-
|
||||||
|
|
||||||
# Install and restore sccache cache
|
# Install and restore sccache cache
|
||||||
- name: Install sccache
|
- name: Install sccache
|
||||||
uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2
|
uses: taiki-e/install-action@6cc14f7f2f4b3129aff07a8b071d2d4f2733465d # v2
|
||||||
with:
|
with:
|
||||||
tool: sccache
|
tool: sccache
|
||||||
version: 0.7.5
|
version: 0.7.5
|
||||||
@@ -228,7 +228,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Install cargo-chef
|
- name: Install cargo-chef
|
||||||
if: ${{ matrix.profile == 'release' }}
|
if: ${{ matrix.profile == 'release' }}
|
||||||
uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2
|
uses: taiki-e/install-action@6cc14f7f2f4b3129aff07a8b071d2d4f2733465d # v2
|
||||||
with:
|
with:
|
||||||
tool: cargo-chef
|
tool: cargo-chef
|
||||||
version: 0.1.71
|
version: 0.1.71
|
||||||
@@ -271,7 +271,7 @@ jobs:
|
|||||||
~/.cargo/registry/index/
|
~/.cargo/registry/index/
|
||||||
~/.cargo/registry/cache/
|
~/.cargo/registry/cache/
|
||||||
~/.cargo/git/db/
|
~/.cargo/git/db/
|
||||||
key: cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ hashFiles('**/Cargo.lock') }}-${{ hashFiles('llmx-rs/rust-toolchain.toml') }}
|
key: cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ hashFiles('**/Cargo.lock') }}-${{ hashFiles('codex-rs/rust-toolchain.toml') }}
|
||||||
|
|
||||||
- name: Save sccache cache (fallback)
|
- name: Save sccache cache (fallback)
|
||||||
if: always() && !cancelled() && env.SCCACHE_GHA_ENABLED != 'true'
|
if: always() && !cancelled() && env.SCCACHE_GHA_ENABLED != 'true'
|
||||||
@@ -321,10 +321,10 @@ jobs:
|
|||||||
runs-on: ${{ matrix.runner }}
|
runs-on: ${{ matrix.runner }}
|
||||||
timeout-minutes: 30
|
timeout-minutes: 30
|
||||||
needs: changed
|
needs: changed
|
||||||
if: ${{ needs.changed.outputs.llmx == 'true' || needs.changed.outputs.workflows == 'true' || github.event_name == 'push' }}
|
if: ${{ needs.changed.outputs.codex == 'true' || needs.changed.outputs.workflows == 'true' || github.event_name == 'push' }}
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
working-directory: llmx-rs
|
working-directory: codex-rs
|
||||||
env:
|
env:
|
||||||
RUSTC_WRAPPER: sccache
|
RUSTC_WRAPPER: sccache
|
||||||
CARGO_INCREMENTAL: "0"
|
CARGO_INCREMENTAL: "0"
|
||||||
@@ -365,12 +365,12 @@ jobs:
|
|||||||
~/.cargo/registry/index/
|
~/.cargo/registry/index/
|
||||||
~/.cargo/registry/cache/
|
~/.cargo/registry/cache/
|
||||||
~/.cargo/git/db/
|
~/.cargo/git/db/
|
||||||
key: cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ hashFiles('**/Cargo.lock') }}-${{ hashFiles('llmx-rs/rust-toolchain.toml') }}
|
key: cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ hashFiles('**/Cargo.lock') }}-${{ hashFiles('codex-rs/rust-toolchain.toml') }}
|
||||||
restore-keys: |
|
restore-keys: |
|
||||||
cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-
|
cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-
|
||||||
|
|
||||||
- name: Install sccache
|
- name: Install sccache
|
||||||
uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2
|
uses: taiki-e/install-action@6cc14f7f2f4b3129aff07a8b071d2d4f2733465d # v2
|
||||||
with:
|
with:
|
||||||
tool: sccache
|
tool: sccache
|
||||||
version: 0.7.5
|
version: 0.7.5
|
||||||
@@ -399,7 +399,7 @@ jobs:
|
|||||||
sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ hashFiles('**/Cargo.lock') }}-
|
sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ hashFiles('**/Cargo.lock') }}-
|
||||||
sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-
|
sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-
|
||||||
|
|
||||||
- uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2
|
- uses: taiki-e/install-action@6cc14f7f2f4b3129aff07a8b071d2d4f2733465d # v2
|
||||||
with:
|
with:
|
||||||
tool: nextest
|
tool: nextest
|
||||||
version: 0.9.103
|
version: 0.9.103
|
||||||
@@ -410,7 +410,6 @@ jobs:
|
|||||||
run: cargo nextest run --all-features --no-fail-fast --target ${{ matrix.target }} --cargo-profile ci-test
|
run: cargo nextest run --all-features --no-fail-fast --target ${{ matrix.target }} --cargo-profile ci-test
|
||||||
env:
|
env:
|
||||||
RUST_BACKTRACE: 1
|
RUST_BACKTRACE: 1
|
||||||
LLMX_API_KEY: test
|
|
||||||
|
|
||||||
- name: Save cargo home cache
|
- name: Save cargo home cache
|
||||||
if: always() && !cancelled() && steps.cache_cargo_home_restore.outputs.cache-hit != 'true'
|
if: always() && !cancelled() && steps.cache_cargo_home_restore.outputs.cache-hit != 'true'
|
||||||
@@ -422,7 +421,7 @@ jobs:
|
|||||||
~/.cargo/registry/index/
|
~/.cargo/registry/index/
|
||||||
~/.cargo/registry/cache/
|
~/.cargo/registry/cache/
|
||||||
~/.cargo/git/db/
|
~/.cargo/git/db/
|
||||||
key: cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ hashFiles('**/Cargo.lock') }}-${{ hashFiles('llmx-rs/rust-toolchain.toml') }}
|
key: cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ hashFiles('**/Cargo.lock') }}-${{ hashFiles('codex-rs/rust-toolchain.toml') }}
|
||||||
|
|
||||||
- name: Save sccache cache (fallback)
|
- name: Save sccache cache (fallback)
|
||||||
if: always() && !cancelled() && env.SCCACHE_GHA_ENABLED != 'true'
|
if: always() && !cancelled() && env.SCCACHE_GHA_ENABLED != 'true'
|
||||||
@@ -472,7 +471,7 @@ jobs:
|
|||||||
|
|
||||||
# If nothing relevant changed (PR touching only root README, etc.),
|
# If nothing relevant changed (PR touching only root README, etc.),
|
||||||
# declare success regardless of other jobs.
|
# declare success regardless of other jobs.
|
||||||
if [[ '${{ needs.changed.outputs.llmx }}' != 'true' && '${{ needs.changed.outputs.workflows }}' != 'true' && '${{ github.event_name }}' != 'push' ]]; then
|
if [[ '${{ needs.changed.outputs.codex }}' != 'true' && '${{ needs.changed.outputs.workflows }}' != 'true' && '${{ github.event_name }}' != 'push' ]]; then
|
||||||
echo 'No relevant changes -> CI not required.'
|
echo 'No relevant changes -> CI not required.'
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|||||||
136
.github/workflows/rust-release.yml
vendored
136
.github/workflows/rust-release.yml
vendored
@@ -1,4 +1,4 @@
|
|||||||
# Release workflow for llmx-rs.
|
# Release workflow for codex-rs.
|
||||||
# To release, follow a workflow like:
|
# To release, follow a workflow like:
|
||||||
# ```
|
# ```
|
||||||
# git tag -a rust-v0.1.0 -m "Release 0.1.0"
|
# git tag -a rust-v0.1.0 -m "Release 0.1.0"
|
||||||
@@ -35,7 +35,7 @@ jobs:
|
|||||||
|
|
||||||
# 2. Extract versions
|
# 2. Extract versions
|
||||||
tag_ver="${GITHUB_REF_NAME#rust-v}"
|
tag_ver="${GITHUB_REF_NAME#rust-v}"
|
||||||
cargo_ver="$(grep -m1 '^version' llmx-rs/Cargo.toml \
|
cargo_ver="$(grep -m1 '^version' codex-rs/Cargo.toml \
|
||||||
| sed -E 's/version *= *"([^"]+)".*/\1/')"
|
| sed -E 's/version *= *"([^"]+)".*/\1/')"
|
||||||
|
|
||||||
# 3. Compare
|
# 3. Compare
|
||||||
@@ -52,7 +52,7 @@ jobs:
|
|||||||
timeout-minutes: 30
|
timeout-minutes: 30
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
working-directory: llmx-rs
|
working-directory: codex-rs
|
||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
@@ -88,7 +88,7 @@ jobs:
|
|||||||
~/.cargo/registry/index/
|
~/.cargo/registry/index/
|
||||||
~/.cargo/registry/cache/
|
~/.cargo/registry/cache/
|
||||||
~/.cargo/git/db/
|
~/.cargo/git/db/
|
||||||
${{ github.workspace }}/llmx-rs/target/
|
${{ github.workspace }}/codex-rs/target/
|
||||||
key: cargo-${{ matrix.runner }}-${{ matrix.target }}-release-${{ hashFiles('**/Cargo.lock') }}
|
key: cargo-${{ matrix.runner }}-${{ matrix.target }}-release-${{ hashFiles('**/Cargo.lock') }}
|
||||||
|
|
||||||
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
|
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
|
||||||
@@ -98,7 +98,7 @@ jobs:
|
|||||||
sudo apt-get install -y musl-tools pkg-config
|
sudo apt-get install -y musl-tools pkg-config
|
||||||
|
|
||||||
- name: Cargo build
|
- name: Cargo build
|
||||||
run: cargo build --target ${{ matrix.target }} --release --bin llmx --bin llmx-responses-api-proxy
|
run: cargo build --target ${{ matrix.target }} --release --bin codex --bin codex-responses-api-proxy
|
||||||
|
|
||||||
- if: ${{ matrix.runner == 'macos-15-xlarge' }}
|
- if: ${{ matrix.runner == 'macos-15-xlarge' }}
|
||||||
name: Configure Apple code signing
|
name: Configure Apple code signing
|
||||||
@@ -111,21 +111,19 @@ jobs:
|
|||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
|
|
||||||
if [[ -z "${APPLE_CERTIFICATE:-}" ]]; then
|
if [[ -z "${APPLE_CERTIFICATE:-}" ]]; then
|
||||||
echo "⚠️ APPLE_CERTIFICATE not set - skipping macOS code signing"
|
echo "APPLE_CERTIFICATE is required for macOS signing"
|
||||||
echo "SKIP_MACOS_SIGNING=true" >> "$GITHUB_ENV"
|
exit 1
|
||||||
exit 0
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ -z "${APPLE_CERTIFICATE_PASSWORD:-}" ]]; then
|
if [[ -z "${APPLE_CERTIFICATE_PASSWORD:-}" ]]; then
|
||||||
echo "⚠️ APPLE_CERTIFICATE_PASSWORD not set - skipping macOS code signing"
|
echo "APPLE_CERTIFICATE_PASSWORD is required for macOS signing"
|
||||||
echo "SKIP_MACOS_SIGNING=true" >> "$GITHUB_ENV"
|
exit 1
|
||||||
exit 0
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
cert_path="${RUNNER_TEMP}/apple_signing_certificate.p12"
|
cert_path="${RUNNER_TEMP}/apple_signing_certificate.p12"
|
||||||
echo "$APPLE_CERTIFICATE" | base64 -d > "$cert_path"
|
echo "$APPLE_CERTIFICATE" | base64 -d > "$cert_path"
|
||||||
|
|
||||||
keychain_path="${RUNNER_TEMP}/llmx-signing.keychain-db"
|
keychain_path="${RUNNER_TEMP}/codex-signing.keychain-db"
|
||||||
security create-keychain -p "$KEYCHAIN_PASSWORD" "$keychain_path"
|
security create-keychain -p "$KEYCHAIN_PASSWORD" "$keychain_path"
|
||||||
security set-keychain-settings -lut 21600 "$keychain_path"
|
security set-keychain-settings -lut 21600 "$keychain_path"
|
||||||
security unlock-keychain -p "$KEYCHAIN_PASSWORD" "$keychain_path"
|
security unlock-keychain -p "$KEYCHAIN_PASSWORD" "$keychain_path"
|
||||||
@@ -187,15 +185,15 @@ jobs:
|
|||||||
echo "APPLE_CODESIGN_KEYCHAIN=$keychain_path" >> "$GITHUB_ENV"
|
echo "APPLE_CODESIGN_KEYCHAIN=$keychain_path" >> "$GITHUB_ENV"
|
||||||
echo "::add-mask::$APPLE_CODESIGN_IDENTITY"
|
echo "::add-mask::$APPLE_CODESIGN_IDENTITY"
|
||||||
|
|
||||||
- if: ${{ matrix.runner == 'macos-15-xlarge' && env.SKIP_MACOS_SIGNING != 'true' }}
|
- if: ${{ matrix.runner == 'macos-15-xlarge' }}
|
||||||
name: Sign macOS binaries
|
name: Sign macOS binaries
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
|
|
||||||
if [[ -z "${APPLE_CODESIGN_IDENTITY:-}" ]]; then
|
if [[ -z "${APPLE_CODESIGN_IDENTITY:-}" ]]; then
|
||||||
echo "⚠️ APPLE_CODESIGN_IDENTITY not set - skipping macOS signing"
|
echo "APPLE_CODESIGN_IDENTITY is required for macOS signing"
|
||||||
exit 0
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
keychain_args=()
|
keychain_args=()
|
||||||
@@ -203,12 +201,12 @@ jobs:
|
|||||||
keychain_args+=(--keychain "${APPLE_CODESIGN_KEYCHAIN}")
|
keychain_args+=(--keychain "${APPLE_CODESIGN_KEYCHAIN}")
|
||||||
fi
|
fi
|
||||||
|
|
||||||
for binary in llmx llmx-responses-api-proxy; do
|
for binary in codex codex-responses-api-proxy; do
|
||||||
path="target/${{ matrix.target }}/release/${binary}"
|
path="target/${{ matrix.target }}/release/${binary}"
|
||||||
codesign --force --options runtime --timestamp --sign "$APPLE_CODESIGN_IDENTITY" "${keychain_args[@]}" "$path"
|
codesign --force --options runtime --timestamp --sign "$APPLE_CODESIGN_IDENTITY" "${keychain_args[@]}" "$path"
|
||||||
done
|
done
|
||||||
|
|
||||||
- if: ${{ matrix.runner == 'macos-15-xlarge' && env.SKIP_MACOS_SIGNING != 'true' }}
|
- if: ${{ matrix.runner == 'macos-15-xlarge' }}
|
||||||
name: Notarize macOS binaries
|
name: Notarize macOS binaries
|
||||||
shell: bash
|
shell: bash
|
||||||
env:
|
env:
|
||||||
@@ -220,8 +218,8 @@ jobs:
|
|||||||
|
|
||||||
for var in APPLE_NOTARIZATION_KEY_P8 APPLE_NOTARIZATION_KEY_ID APPLE_NOTARIZATION_ISSUER_ID; do
|
for var in APPLE_NOTARIZATION_KEY_P8 APPLE_NOTARIZATION_KEY_ID APPLE_NOTARIZATION_ISSUER_ID; do
|
||||||
if [[ -z "${!var:-}" ]]; then
|
if [[ -z "${!var:-}" ]]; then
|
||||||
echo "⚠️ $var not set - skipping macOS notarization"
|
echo "$var is required for notarization"
|
||||||
exit 0
|
exit 1
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
@@ -268,8 +266,8 @@ jobs:
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
notarize_binary "llmx"
|
notarize_binary "codex"
|
||||||
notarize_binary "llmx-responses-api-proxy"
|
notarize_binary "codex-responses-api-proxy"
|
||||||
|
|
||||||
- name: Stage artifacts
|
- name: Stage artifacts
|
||||||
shell: bash
|
shell: bash
|
||||||
@@ -278,11 +276,11 @@ jobs:
|
|||||||
mkdir -p "$dest"
|
mkdir -p "$dest"
|
||||||
|
|
||||||
if [[ "${{ matrix.runner }}" == windows* ]]; then
|
if [[ "${{ matrix.runner }}" == windows* ]]; then
|
||||||
cp target/${{ matrix.target }}/release/llmx.exe "$dest/llmx-${{ matrix.target }}.exe"
|
cp target/${{ matrix.target }}/release/codex.exe "$dest/codex-${{ matrix.target }}.exe"
|
||||||
cp target/${{ matrix.target }}/release/llmx-responses-api-proxy.exe "$dest/llmx-responses-api-proxy-${{ matrix.target }}.exe"
|
cp target/${{ matrix.target }}/release/codex-responses-api-proxy.exe "$dest/codex-responses-api-proxy-${{ matrix.target }}.exe"
|
||||||
else
|
else
|
||||||
cp target/${{ matrix.target }}/release/llmx "$dest/llmx-${{ matrix.target }}"
|
cp target/${{ matrix.target }}/release/codex "$dest/codex-${{ matrix.target }}"
|
||||||
cp target/${{ matrix.target }}/release/llmx-responses-api-proxy "$dest/llmx-responses-api-proxy-${{ matrix.target }}"
|
cp target/${{ matrix.target }}/release/codex-responses-api-proxy "$dest/codex-responses-api-proxy-${{ matrix.target }}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- if: ${{ matrix.runner == 'windows-11-arm' }}
|
- if: ${{ matrix.runner == 'windows-11-arm' }}
|
||||||
@@ -309,9 +307,9 @@ jobs:
|
|||||||
# For compatibility with environments that lack the `zstd` tool we
|
# For compatibility with environments that lack the `zstd` tool we
|
||||||
# additionally create a `.tar.gz` for all platforms and `.zip` for
|
# additionally create a `.tar.gz` for all platforms and `.zip` for
|
||||||
# Windows alongside every single binary that we publish. The end result is:
|
# Windows alongside every single binary that we publish. The end result is:
|
||||||
# llmx-<target>.zst (existing)
|
# codex-<target>.zst (existing)
|
||||||
# llmx-<target>.tar.gz (new)
|
# codex-<target>.tar.gz (new)
|
||||||
# llmx-<target>.zip (only for Windows)
|
# codex-<target>.zip (only for Windows)
|
||||||
|
|
||||||
# 1. Produce a .tar.gz for every file in the directory *before* we
|
# 1. Produce a .tar.gz for every file in the directory *before* we
|
||||||
# run `zstd --rm`, because that flag deletes the original files.
|
# run `zstd --rm`, because that flag deletes the original files.
|
||||||
@@ -343,7 +341,7 @@ jobs:
|
|||||||
done
|
done
|
||||||
|
|
||||||
- name: Remove signing keychain
|
- name: Remove signing keychain
|
||||||
if: ${{ always() && matrix.runner == 'macos-15-xlarge' && env.SKIP_MACOS_SIGNING != 'true' }}
|
if: ${{ always() && matrix.runner == 'macos-15-xlarge' }}
|
||||||
shell: bash
|
shell: bash
|
||||||
env:
|
env:
|
||||||
APPLE_CODESIGN_KEYCHAIN: ${{ env.APPLE_CODESIGN_KEYCHAIN }}
|
APPLE_CODESIGN_KEYCHAIN: ${{ env.APPLE_CODESIGN_KEYCHAIN }}
|
||||||
@@ -371,7 +369,7 @@ jobs:
|
|||||||
# Upload the per-binary .zst files as well as the new .tar.gz
|
# Upload the per-binary .zst files as well as the new .tar.gz
|
||||||
# equivalents we generated in the previous step.
|
# equivalents we generated in the previous step.
|
||||||
path: |
|
path: |
|
||||||
llmx-rs/dist/${{ matrix.target }}/*
|
codex-rs/dist/${{ matrix.target }}/*
|
||||||
|
|
||||||
release:
|
release:
|
||||||
needs: build
|
needs: build
|
||||||
@@ -445,19 +443,9 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
./scripts/stage_npm_packages.py \
|
./scripts/stage_npm_packages.py \
|
||||||
--release-version "${{ steps.release_name.outputs.name }}" \
|
--release-version "${{ steps.release_name.outputs.name }}" \
|
||||||
--package llmx
|
--package codex \
|
||||||
|
--package codex-responses-api-proxy \
|
||||||
# Delete any existing release to avoid conflicts with dotslash manifest file
|
--package codex-sdk
|
||||||
- name: Delete existing release if present
|
|
||||||
env:
|
|
||||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
run: |
|
|
||||||
if gh release view "${{ github.ref_name }}" --repo "${{ github.repository }}" >/dev/null 2>&1; then
|
|
||||||
echo "Deleting existing release ${{ github.ref_name }}"
|
|
||||||
gh release delete "${{ github.ref_name }}" --repo "${{ github.repository }}" --yes
|
|
||||||
else
|
|
||||||
echo "No existing release found for ${{ github.ref_name }}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
- name: Create GitHub Release
|
- name: Create GitHub Release
|
||||||
uses: softprops/action-gh-release@v2
|
uses: softprops/action-gh-release@v2
|
||||||
@@ -476,7 +464,9 @@ jobs:
|
|||||||
tag: ${{ github.ref_name }}
|
tag: ${{ github.ref_name }}
|
||||||
config: .github/dotslash-config.json
|
config: .github/dotslash-config.json
|
||||||
|
|
||||||
# Publish to npm using Trusted Publishers (OIDC)
|
# Publish to npm using OIDC authentication.
|
||||||
|
# July 31, 2025: https://github.blog/changelog/2025-07-31-npm-trusted-publishing-with-oidc-is-generally-available/
|
||||||
|
# npm docs: https://docs.npmjs.com/trusted-publishers
|
||||||
publish-npm:
|
publish-npm:
|
||||||
# Publish to npm for stable releases and alpha pre-releases with numeric suffixes.
|
# Publish to npm for stable releases and alpha pre-releases with numeric suffixes.
|
||||||
if: ${{ needs.release.outputs.should_publish_npm == 'true' }}
|
if: ${{ needs.release.outputs.should_publish_npm == 'true' }}
|
||||||
@@ -484,8 +474,8 @@ jobs:
|
|||||||
needs: release
|
needs: release
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
permissions:
|
permissions:
|
||||||
|
id-token: write # Required for OIDC
|
||||||
contents: read
|
contents: read
|
||||||
id-token: write # Required for OIDC authentication
|
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Setup Node.js
|
- name: Setup Node.js
|
||||||
@@ -493,8 +483,9 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
node-version: 22
|
node-version: 22
|
||||||
registry-url: "https://registry.npmjs.org"
|
registry-url: "https://registry.npmjs.org"
|
||||||
scope: "@valknarthing"
|
scope: "@openai"
|
||||||
|
|
||||||
|
# Trusted publishing requires npm CLI version 11.5.1 or later.
|
||||||
- name: Update npm
|
- name: Update npm
|
||||||
run: npm install -g npm@latest
|
run: npm install -g npm@latest
|
||||||
|
|
||||||
@@ -508,9 +499,18 @@ jobs:
|
|||||||
mkdir -p dist/npm
|
mkdir -p dist/npm
|
||||||
gh release download "$tag" \
|
gh release download "$tag" \
|
||||||
--repo "${GITHUB_REPOSITORY}" \
|
--repo "${GITHUB_REPOSITORY}" \
|
||||||
--pattern "llmx-npm-${version}.tgz" \
|
--pattern "codex-npm-${version}.tgz" \
|
||||||
|
--dir dist/npm
|
||||||
|
gh release download "$tag" \
|
||||||
|
--repo "${GITHUB_REPOSITORY}" \
|
||||||
|
--pattern "codex-responses-api-proxy-npm-${version}.tgz" \
|
||||||
|
--dir dist/npm
|
||||||
|
gh release download "$tag" \
|
||||||
|
--repo "${GITHUB_REPOSITORY}" \
|
||||||
|
--pattern "codex-sdk-npm-${version}.tgz" \
|
||||||
--dir dist/npm
|
--dir dist/npm
|
||||||
|
|
||||||
|
# No NODE_AUTH_TOKEN needed because we use OIDC.
|
||||||
- name: Publish to npm
|
- name: Publish to npm
|
||||||
env:
|
env:
|
||||||
VERSION: ${{ needs.release.outputs.version }}
|
VERSION: ${{ needs.release.outputs.version }}
|
||||||
@@ -523,28 +523,30 @@ jobs:
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
tarballs=(
|
tarballs=(
|
||||||
"llmx-npm-${VERSION}.tgz"
|
"codex-npm-${VERSION}.tgz"
|
||||||
|
"codex-responses-api-proxy-npm-${VERSION}.tgz"
|
||||||
|
"codex-sdk-npm-${VERSION}.tgz"
|
||||||
)
|
)
|
||||||
|
|
||||||
for tarball in "${tarballs[@]}"; do
|
for tarball in "${tarballs[@]}"; do
|
||||||
npm publish "${GITHUB_WORKSPACE}/dist/npm/${tarball}" --provenance --access public "${tag_args[@]}"
|
npm publish "${GITHUB_WORKSPACE}/dist/npm/${tarball}" "${tag_args[@]}"
|
||||||
done
|
done
|
||||||
|
|
||||||
# update-branch:
|
update-branch:
|
||||||
# name: Update latest-alpha-cli branch
|
name: Update latest-alpha-cli branch
|
||||||
# permissions:
|
permissions:
|
||||||
# contents: write
|
contents: write
|
||||||
# needs: release
|
needs: release
|
||||||
# runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
#
|
|
||||||
# steps:
|
steps:
|
||||||
# - name: Update latest-alpha-cli branch
|
- name: Update latest-alpha-cli branch
|
||||||
# env:
|
env:
|
||||||
# GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
# run: |
|
run: |
|
||||||
# set -euo pipefail
|
set -euo pipefail
|
||||||
# gh api \
|
gh api \
|
||||||
# repos/${GITHUB_REPOSITORY}/git/refs/heads/latest-alpha-cli \
|
repos/${GITHUB_REPOSITORY}/git/refs/heads/latest-alpha-cli \
|
||||||
# -X PATCH \
|
-X PATCH \
|
||||||
# -f sha="${GITHUB_SHA}" \
|
-f sha="${GITHUB_SHA}" \
|
||||||
# -F force=true
|
-F force=true
|
||||||
|
|||||||
8
.github/workflows/sdk.yml
vendored
8
.github/workflows/sdk.yml
vendored
@@ -26,9 +26,9 @@ jobs:
|
|||||||
|
|
||||||
- uses: dtolnay/rust-toolchain@1.90
|
- uses: dtolnay/rust-toolchain@1.90
|
||||||
|
|
||||||
- name: build llmx
|
- name: build codex
|
||||||
run: cargo build --bin llmx
|
run: cargo build --bin codex
|
||||||
working-directory: llmx-rs
|
working-directory: codex-rs
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: pnpm install --frozen-lockfile
|
run: pnpm install --frozen-lockfile
|
||||||
@@ -41,5 +41,3 @@ jobs:
|
|||||||
|
|
||||||
- name: Test SDK packages
|
- name: Test SDK packages
|
||||||
run: pnpm -r --filter ./sdk/typescript run test
|
run: pnpm -r --filter ./sdk/typescript run test
|
||||||
env:
|
|
||||||
LLMX_API_KEY: test
|
|
||||||
|
|||||||
6
.vscode/launch.json
vendored
6
.vscode/launch.json
vendored
@@ -6,15 +6,15 @@
|
|||||||
"request": "launch",
|
"request": "launch",
|
||||||
"name": "Cargo launch",
|
"name": "Cargo launch",
|
||||||
"cargo": {
|
"cargo": {
|
||||||
"cwd": "${workspaceFolder}/llmx-rs",
|
"cwd": "${workspaceFolder}/codex-rs",
|
||||||
"args": ["build", "--bin=llmx-tui"]
|
"args": ["build", "--bin=codex-tui"]
|
||||||
},
|
},
|
||||||
"args": []
|
"args": []
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"type": "lldb",
|
"type": "lldb",
|
||||||
"request": "attach",
|
"request": "attach",
|
||||||
"name": "Attach to running llmx CLI",
|
"name": "Attach to running codex CLI",
|
||||||
"pid": "${command:pickProcess}",
|
"pid": "${command:pickProcess}",
|
||||||
"sourceLanguages": ["rust"]
|
"sourceLanguages": ["rust"]
|
||||||
}
|
}
|
||||||
|
|||||||
4
.vscode/settings.json
vendored
4
.vscode/settings.json
vendored
@@ -3,7 +3,7 @@
|
|||||||
"rust-analyzer.check.command": "clippy",
|
"rust-analyzer.check.command": "clippy",
|
||||||
"rust-analyzer.check.extraArgs": ["--all-features", "--tests"],
|
"rust-analyzer.check.extraArgs": ["--all-features", "--tests"],
|
||||||
"rust-analyzer.rustfmt.extraArgs": ["--config", "imports_granularity=Item"],
|
"rust-analyzer.rustfmt.extraArgs": ["--config", "imports_granularity=Item"],
|
||||||
"rust-analyzer.cargo.targetDir": "${workspaceFolder}/llmx-rs/target/rust-analyzer",
|
"rust-analyzer.cargo.targetDir": "${workspaceFolder}/codex-rs/target/rust-analyzer",
|
||||||
"[rust]": {
|
"[rust]": {
|
||||||
"editor.defaultFormatter": "rust-lang.rust-analyzer",
|
"editor.defaultFormatter": "rust-lang.rust-analyzer",
|
||||||
"editor.formatOnSave": true,
|
"editor.formatOnSave": true,
|
||||||
@@ -12,7 +12,7 @@
|
|||||||
"editor.defaultFormatter": "tamasfe.even-better-toml",
|
"editor.defaultFormatter": "tamasfe.even-better-toml",
|
||||||
"editor.formatOnSave": true,
|
"editor.formatOnSave": true,
|
||||||
},
|
},
|
||||||
// Array order for options in ~/.llmx/config.toml such as `notify` and the
|
// Array order for options in ~/.codex/config.toml such as `notify` and the
|
||||||
// `args` for an MCP server is significant, so we disable reordering.
|
// `args` for an MCP server is significant, so we disable reordering.
|
||||||
"evenBetterToml.formatter.reorderArrays": false,
|
"evenBetterToml.formatter.reorderArrays": false,
|
||||||
"evenBetterToml.formatter.reorderKeys": true,
|
"evenBetterToml.formatter.reorderKeys": true,
|
||||||
|
|||||||
32
AGENTS.md
32
AGENTS.md
@@ -1,13 +1,13 @@
|
|||||||
# Rust/llmx-rs
|
# Rust/codex-rs
|
||||||
|
|
||||||
In the llmx-rs folder where the rust code lives:
|
In the codex-rs folder where the rust code lives:
|
||||||
|
|
||||||
- Crate names are prefixed with `llmx-`. For example, the `core` folder's crate is named `llmx-core`
|
- Crate names are prefixed with `codex-`. For example, the `core` folder's crate is named `codex-core`
|
||||||
- When using format! and you can inline variables into {}, always do that.
|
- When using format! and you can inline variables into {}, always do that.
|
||||||
- Install any commands the repo relies on (for example `just`, `rg`, or `cargo-insta`) if they aren't already available before running instructions here.
|
- Install any commands the repo relies on (for example `just`, `rg`, or `cargo-insta`) if they aren't already available before running instructions here.
|
||||||
- Never add or modify any code related to `LLMX_SANDBOX_NETWORK_DISABLED_ENV_VAR` or `LLMX_SANDBOX_ENV_VAR`.
|
- Never add or modify any code related to `CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR` or `CODEX_SANDBOX_ENV_VAR`.
|
||||||
- You operate in a sandbox where `LLMX_SANDBOX_NETWORK_DISABLED=1` will be set whenever you use the `shell` tool. Any existing code that uses `LLMX_SANDBOX_NETWORK_DISABLED_ENV_VAR` was authored with this fact in mind. It is often used to early exit out of tests that the author knew you would not be able to run given your sandbox limitations.
|
- You operate in a sandbox where `CODEX_SANDBOX_NETWORK_DISABLED=1` will be set whenever you use the `shell` tool. Any existing code that uses `CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR` was authored with this fact in mind. It is often used to early exit out of tests that the author knew you would not be able to run given your sandbox limitations.
|
||||||
- Similarly, when you spawn a process using Seatbelt (`/usr/bin/sandbox-exec`), `LLMX_SANDBOX=seatbelt` will be set on the child process. Integration tests that want to run Seatbelt themselves cannot be run under Seatbelt, so checks for `LLMX_SANDBOX=seatbelt` are also often used to early exit out of tests, as appropriate.
|
- Similarly, when you spawn a process using Seatbelt (`/usr/bin/sandbox-exec`), `CODEX_SANDBOX=seatbelt` will be set on the child process. Integration tests that want to run Seatbelt themselves cannot be run under Seatbelt, so checks for `CODEX_SANDBOX=seatbelt` are also often used to early exit out of tests, as appropriate.
|
||||||
- Always collapse if statements per https://rust-lang.github.io/rust-clippy/master/index.html#collapsible_if
|
- Always collapse if statements per https://rust-lang.github.io/rust-clippy/master/index.html#collapsible_if
|
||||||
- Always inline format! args when possible per https://rust-lang.github.io/rust-clippy/master/index.html#uninlined_format_args
|
- Always inline format! args when possible per https://rust-lang.github.io/rust-clippy/master/index.html#uninlined_format_args
|
||||||
- Use method references over closures when possible per https://rust-lang.github.io/rust-clippy/master/index.html#redundant_closure_for_method_calls
|
- Use method references over closures when possible per https://rust-lang.github.io/rust-clippy/master/index.html#redundant_closure_for_method_calls
|
||||||
@@ -15,15 +15,15 @@ In the llmx-rs folder where the rust code lives:
|
|||||||
- When writing tests, prefer comparing the equality of entire objects over fields one by one.
|
- When writing tests, prefer comparing the equality of entire objects over fields one by one.
|
||||||
- When making a change that adds or changes an API, ensure that the documentation in the `docs/` folder is up to date if applicable.
|
- When making a change that adds or changes an API, ensure that the documentation in the `docs/` folder is up to date if applicable.
|
||||||
|
|
||||||
Run `just fmt` (in `llmx-rs` directory) automatically after making Rust code changes; do not ask for approval to run it. Before finalizing a change to `llmx-rs`, run `just fix -p <project>` (in `llmx-rs` directory) to fix any linter issues in the code. Prefer scoping with `-p` to avoid slow workspace‑wide Clippy builds; only run `just fix` without `-p` if you changed shared crates. Additionally, run the tests:
|
Run `just fmt` (in `codex-rs` directory) automatically after making Rust code changes; do not ask for approval to run it. Before finalizing a change to `codex-rs`, run `just fix -p <project>` (in `codex-rs` directory) to fix any linter issues in the code. Prefer scoping with `-p` to avoid slow workspace‑wide Clippy builds; only run `just fix` without `-p` if you changed shared crates. Additionally, run the tests:
|
||||||
|
|
||||||
1. Run the test for the specific project that was changed. For example, if changes were made in `llmx-rs/tui`, run `cargo test -p llmx-tui`.
|
1. Run the test for the specific project that was changed. For example, if changes were made in `codex-rs/tui`, run `cargo test -p codex-tui`.
|
||||||
2. Once those pass, if any changes were made in common, core, or protocol, run the complete test suite with `cargo test --all-features`.
|
2. Once those pass, if any changes were made in common, core, or protocol, run the complete test suite with `cargo test --all-features`.
|
||||||
When running interactively, ask the user before running `just fix` to finalize. `just fmt` does not require approval. project-specific or individual tests can be run without asking the user, but do ask the user before running the complete test suite.
|
When running interactively, ask the user before running `just fix` to finalize. `just fmt` does not require approval. project-specific or individual tests can be run without asking the user, but do ask the user before running the complete test suite.
|
||||||
|
|
||||||
## TUI style conventions
|
## TUI style conventions
|
||||||
|
|
||||||
See `llmx-rs/tui/styles.md`.
|
See `codex-rs/tui/styles.md`.
|
||||||
|
|
||||||
## TUI code conventions
|
## TUI code conventions
|
||||||
|
|
||||||
@@ -57,16 +57,16 @@ See `llmx-rs/tui/styles.md`.
|
|||||||
|
|
||||||
### Snapshot tests
|
### Snapshot tests
|
||||||
|
|
||||||
This repo uses snapshot tests (via `insta`), especially in `llmx-rs/tui`, to validate rendered output. When UI or text output changes intentionally, update the snapshots as follows:
|
This repo uses snapshot tests (via `insta`), especially in `codex-rs/tui`, to validate rendered output. When UI or text output changes intentionally, update the snapshots as follows:
|
||||||
|
|
||||||
- Run tests to generate any updated snapshots:
|
- Run tests to generate any updated snapshots:
|
||||||
- `cargo test -p llmx-tui`
|
- `cargo test -p codex-tui`
|
||||||
- Check what’s pending:
|
- Check what’s pending:
|
||||||
- `cargo insta pending-snapshots -p llmx-tui`
|
- `cargo insta pending-snapshots -p codex-tui`
|
||||||
- Review changes by reading the generated `*.snap.new` files directly in the repo, or preview a specific file:
|
- Review changes by reading the generated `*.snap.new` files directly in the repo, or preview a specific file:
|
||||||
- `cargo insta show -p llmx-tui path/to/file.snap.new`
|
- `cargo insta show -p codex-tui path/to/file.snap.new`
|
||||||
- Only if you intend to accept all new snapshots in this crate, run:
|
- Only if you intend to accept all new snapshots in this crate, run:
|
||||||
- `cargo insta accept -p llmx-tui`
|
- `cargo insta accept -p codex-tui`
|
||||||
|
|
||||||
If you don’t have the tool:
|
If you don’t have the tool:
|
||||||
|
|
||||||
@@ -78,7 +78,7 @@ If you don’t have the tool:
|
|||||||
|
|
||||||
### Integration tests (core)
|
### Integration tests (core)
|
||||||
|
|
||||||
- Prefer the utilities in `core_test_support::responses` when writing end-to-end LLMX tests.
|
- Prefer the utilities in `core_test_support::responses` when writing end-to-end Codex tests.
|
||||||
|
|
||||||
- All `mount_sse*` helpers return a `ResponseMock`; hold onto it so you can assert against outbound `/responses` POST bodies.
|
- All `mount_sse*` helpers return a `ResponseMock`; hold onto it so you can assert against outbound `/responses` POST bodies.
|
||||||
- Use `ResponseMock::single_request()` when a test should only issue one POST, or `ResponseMock::requests()` to inspect every captured `ResponsesRequest`.
|
- Use `ResponseMock::single_request()` when a test should only issue one POST, or `ResponseMock::requests()` to inspect every captured `ResponsesRequest`.
|
||||||
@@ -95,7 +95,7 @@ If you don’t have the tool:
|
|||||||
responses::ev_completed("resp-1"),
|
responses::ev_completed("resp-1"),
|
||||||
])).await;
|
])).await;
|
||||||
|
|
||||||
llmx.submit(Op::UserTurn { ... }).await?;
|
codex.submit(Op::UserTurn { ... }).await?;
|
||||||
|
|
||||||
// Assert request body if needed.
|
// Assert request body if needed.
|
||||||
let request = mock.single_request();
|
let request = mock.single_request();
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
The changelog can be found on the [releases page](https://github.com/valknar/llmx/releases).
|
The changelog can be found on the [releases page](https://github.com/openai/codex/releases).
|
||||||
|
|||||||
@@ -1,83 +0,0 @@
|
|||||||
# LLMX with LiteLLM Configuration Guide
|
|
||||||
|
|
||||||
## Quick Start
|
|
||||||
|
|
||||||
### 1. Set Environment Variables
|
|
||||||
|
|
||||||
```bash
|
|
||||||
export LLMX_BASE_URL="https://llm.ai.pivoine.art/v1"
|
|
||||||
export LLMX_API_KEY="your-litellm-master-key"
|
|
||||||
```
|
|
||||||
|
|
||||||
### 2. Create Configuration File
|
|
||||||
|
|
||||||
Create `~/.llmx/config.toml`:
|
|
||||||
|
|
||||||
```toml
|
|
||||||
model_provider = "litellm"
|
|
||||||
model = "anthropic/claude-sonnet-4-20250514"
|
|
||||||
```
|
|
||||||
|
|
||||||
### 3. Run LLMX
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Use default config
|
|
||||||
llmx "hello world"
|
|
||||||
|
|
||||||
# Override model
|
|
||||||
llmx -m "openai/gpt-4" "hello world"
|
|
||||||
|
|
||||||
# Override provider and model
|
|
||||||
llmx -c model_provider=litellm -m "anthropic/claude-sonnet-4-20250514" "hello"
|
|
||||||
```
|
|
||||||
|
|
||||||
## Important Notes
|
|
||||||
|
|
||||||
### DO NOT use provider prefix in model name
|
|
||||||
|
|
||||||
❌ Wrong: `llmx -m "litellm:anthropic/claude-sonnet-4-20250514"`
|
|
||||||
✅ Correct: `llmx -c model_provider=litellm -m "anthropic/claude-sonnet-4-20250514"`
|
|
||||||
|
|
||||||
LLMX uses separate provider and model parameters, not a combined `provider:model` syntax.
|
|
||||||
|
|
||||||
### Provider Selection
|
|
||||||
|
|
||||||
The provider determines which API endpoint and format to use:
|
|
||||||
|
|
||||||
- `litellm` → Uses Chat Completions API (`/v1/chat/completions`)
|
|
||||||
- `openai` → Uses Responses API (`/v1/responses`) - NOT compatible with LiteLLM
|
|
||||||
|
|
||||||
### Model Names
|
|
||||||
|
|
||||||
LiteLLM uses `provider/model` format:
|
|
||||||
|
|
||||||
- `anthropic/claude-sonnet-4-20250514`
|
|
||||||
- `openai/gpt-4`
|
|
||||||
- `openai/gpt-4o`
|
|
||||||
|
|
||||||
Check your LiteLLM configuration for available models.
|
|
||||||
|
|
||||||
## Troubleshooting
|
|
||||||
|
|
||||||
### Error: "prompt_cache_key: Extra inputs are not permitted"
|
|
||||||
|
|
||||||
**Cause**: Using wrong provider (defaults to OpenAI which uses Responses API)
|
|
||||||
**Fix**: Add `-c model_provider=litellm` or set `model_provider = "litellm"` in config
|
|
||||||
|
|
||||||
### Error: "Invalid model name passed in model=litellm:..."
|
|
||||||
|
|
||||||
**Cause**: Including provider prefix in model name
|
|
||||||
**Fix**: Remove the `litellm:` prefix, use just the model name
|
|
||||||
|
|
||||||
### Error: "Model provider `litellm` not found"
|
|
||||||
|
|
||||||
**Cause**: Using old binary without LiteLLM provider
|
|
||||||
**Fix**: Use the newly built binary at `llmx-rs/target/release/llmx`
|
|
||||||
|
|
||||||
## Binary Location
|
|
||||||
|
|
||||||
Latest binary with LiteLLM support:
|
|
||||||
|
|
||||||
```
|
|
||||||
/home/valknar/Projects/llmx/llmx/llmx-rs/target/release/llmx
|
|
||||||
```
|
|
||||||
16
PNPM.md
16
PNPM.md
@@ -33,21 +33,21 @@ corepack prepare pnpm@10.8.1 --activate
|
|||||||
|
|
||||||
### Workspace-specific commands
|
### Workspace-specific commands
|
||||||
|
|
||||||
| Action | Command |
|
| Action | Command |
|
||||||
| ------------------------------------------ | ------------------------------------- |
|
| ------------------------------------------ | ---------------------------------------- |
|
||||||
| Run a command in a specific package | `pnpm --filter @llmx/llmx run build` |
|
| Run a command in a specific package | `pnpm --filter @openai/codex run build` |
|
||||||
| Install a dependency in a specific package | `pnpm --filter @llmx/llmx add lodash` |
|
| Install a dependency in a specific package | `pnpm --filter @openai/codex add lodash` |
|
||||||
| Run a command in all packages | `pnpm -r run test` |
|
| Run a command in all packages | `pnpm -r run test` |
|
||||||
|
|
||||||
## Monorepo structure
|
## Monorepo structure
|
||||||
|
|
||||||
```
|
```
|
||||||
llmx/
|
codex/
|
||||||
├── pnpm-workspace.yaml # Workspace configuration
|
├── pnpm-workspace.yaml # Workspace configuration
|
||||||
├── .npmrc # pnpm configuration
|
├── .npmrc # pnpm configuration
|
||||||
├── package.json # Root dependencies and scripts
|
├── package.json # Root dependencies and scripts
|
||||||
├── llmx-cli/ # Main package
|
├── codex-cli/ # Main package
|
||||||
│ └── package.json # llmx-cli specific dependencies
|
│ └── package.json # codex-cli specific dependencies
|
||||||
└── docs/ # Documentation (future package)
|
└── docs/ # Documentation (future package)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
122
README.md
122
README.md
@@ -1,102 +1,106 @@
|
|||||||
<p align="center"><code>npm i -g @valknarthing/llmx</code></p>
|
<p align="center"><code>npm i -g @openai/codex</code><br />or <code>brew install --cask codex</code></p>
|
||||||
|
|
||||||
<p align="center"><strong>LLMX CLI</strong> is a coding agent powered by LiteLLM that runs locally on your computer.
|
<p align="center"><strong>Codex CLI</strong> is a coding agent from OpenAI that runs locally on your computer.
|
||||||
</br>
|
</br>
|
||||||
</br>This project is a community fork with enhanced support for multiple LLM providers via LiteLLM.
|
</br>If you want Codex in your code editor (VS Code, Cursor, Windsurf), <a href="https://developers.openai.com/codex/ide">install in your IDE</a>
|
||||||
</br>Original project: <a href="https://github.com/openai/codex">github.com/openai/codex</a></p>
|
</br>If you are looking for the <em>cloud-based agent</em> from OpenAI, <strong>Codex Web</strong>, go to <a href="https://chatgpt.com/codex">chatgpt.com/codex</a></p>
|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
<img src="./.github/codex-cli-splash.png" alt="Codex CLI splash" width="80%" />
|
||||||
|
</p>
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Quickstart
|
## Quickstart
|
||||||
|
|
||||||
### Installing and running LLMX CLI
|
### Installing and running Codex CLI
|
||||||
|
|
||||||
Install globally with npm:
|
Install globally with your preferred package manager. If you use npm:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
npm install -g @valknarthing/llmx
|
npm install -g @openai/codex
|
||||||
```
|
```
|
||||||
|
|
||||||
Then simply run `llmx` to get started:
|
Alternatively, if you use Homebrew:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
llmx
|
brew install --cask codex
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Then simply run `codex` to get started:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
codex
|
||||||
|
```
|
||||||
|
|
||||||
|
If you're running into upgrade issues with Homebrew, see the [FAQ entry on brew upgrade codex](./docs/faq.md#brew-upgrade-codex-isnt-upgrading-me).
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary>You can also go to the <a href="https://github.com/valknarthing/llmx/releases/latest">latest GitHub Release</a> and download the appropriate binary for your platform.</summary>
|
<summary>You can also go to the <a href="https://github.com/openai/codex/releases/latest">latest GitHub Release</a> and download the appropriate binary for your platform.</summary>
|
||||||
|
|
||||||
Each GitHub Release contains many executables, but in practice, you likely want one of these:
|
Each GitHub Release contains many executables, but in practice, you likely want one of these:
|
||||||
|
|
||||||
- macOS
|
- macOS
|
||||||
- Apple Silicon/arm64: `llmx-aarch64-apple-darwin.tar.gz`
|
- Apple Silicon/arm64: `codex-aarch64-apple-darwin.tar.gz`
|
||||||
- x86_64 (older Mac hardware): `llmx-x86_64-apple-darwin.tar.gz`
|
- x86_64 (older Mac hardware): `codex-x86_64-apple-darwin.tar.gz`
|
||||||
- Linux
|
- Linux
|
||||||
- x86_64: `llmx-x86_64-unknown-linux-musl.tar.gz`
|
- x86_64: `codex-x86_64-unknown-linux-musl.tar.gz`
|
||||||
- arm64: `llmx-aarch64-unknown-linux-musl.tar.gz`
|
- arm64: `codex-aarch64-unknown-linux-musl.tar.gz`
|
||||||
|
|
||||||
Each archive contains a single entry with the platform baked into the name (e.g., `llmx-x86_64-unknown-linux-musl`), so you likely want to rename it to `llmx` after extracting it.
|
Each archive contains a single entry with the platform baked into the name (e.g., `codex-x86_64-unknown-linux-musl`), so you likely want to rename it to `codex` after extracting it.
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
### Using LLMX with LiteLLM
|
### Using Codex with your ChatGPT plan
|
||||||
|
|
||||||
LLMX is powered by [LiteLLM](https://docs.litellm.ai/), which provides access to 100+ LLM providers including OpenAI, Anthropic, Google, Azure, AWS Bedrock, and more.
|
<p align="center">
|
||||||
|
<img src="./.github/codex-cli-login.png" alt="Codex CLI login" width="80%" />
|
||||||
|
</p>
|
||||||
|
|
||||||
**Quick Start with LiteLLM:**
|
Run `codex` and select **Sign in with ChatGPT**. We recommend signing into your ChatGPT account to use Codex as part of your Plus, Pro, Team, Edu, or Enterprise plan. [Learn more about what's included in your ChatGPT plan](https://help.openai.com/en/articles/11369540-codex-in-chatgpt).
|
||||||
|
|
||||||
```bash
|
You can also use Codex with an API key, but this requires [additional setup](./docs/authentication.md#usage-based-billing-alternative-use-an-openai-api-key). If you previously used an API key for usage-based billing, see the [migration steps](./docs/authentication.md#migrating-from-usage-based-billing-api-key). If you're having trouble with login, please comment on [this issue](https://github.com/openai/codex/issues/1243).
|
||||||
# Set your LiteLLM server URL (default: http://localhost:4000/v1)
|
|
||||||
export LLMX_BASE_URL="http://localhost:4000/v1"
|
|
||||||
export LLMX_API_KEY="your-api-key"
|
|
||||||
|
|
||||||
# Run LLMX
|
|
||||||
llmx "hello world"
|
|
||||||
```
|
|
||||||
|
|
||||||
**Configuration:** See [LITELLM-SETUP.md](https://github.com/valknarthing/llmx/blob/main/LITELLM-SETUP.md) for detailed setup instructions.
|
|
||||||
|
|
||||||
You can also use LLMX with ChatGPT or OpenAI API keys. For authentication options, see the [authentication docs](https://github.com/valknarthing/llmx/blob/main/docs/authentication.md).
|
|
||||||
|
|
||||||
### Model Context Protocol (MCP)
|
### Model Context Protocol (MCP)
|
||||||
|
|
||||||
LLMX can access MCP servers. To configure them, refer to the [config docs](https://github.com/valknarthing/llmx/blob/main/docs/config.md#mcp_servers).
|
Codex can access MCP servers. To configure them, refer to the [config docs](./docs/config.md#mcp_servers).
|
||||||
|
|
||||||
### Configuration
|
### Configuration
|
||||||
|
|
||||||
LLMX CLI supports a rich set of configuration options, with preferences stored in `~/.llmx/config.toml`. For full configuration options, see [Configuration](https://github.com/valknarthing/llmx/blob/main/docs/config.md).
|
Codex CLI supports a rich set of configuration options, with preferences stored in `~/.codex/config.toml`. For full configuration options, see [Configuration](./docs/config.md).
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
### Docs & FAQ
|
### Docs & FAQ
|
||||||
|
|
||||||
- [**Getting started**](https://github.com/valknarthing/llmx/blob/main/docs/getting-started.md)
|
- [**Getting started**](./docs/getting-started.md)
|
||||||
- [CLI usage](https://github.com/valknarthing/llmx/blob/main/docs/getting-started.md#cli-usage)
|
- [CLI usage](./docs/getting-started.md#cli-usage)
|
||||||
- [Slash Commands](https://github.com/valknarthing/llmx/blob/main/docs/slash_commands.md)
|
- [Slash Commands](./docs/slash_commands.md)
|
||||||
- [Running with a prompt as input](https://github.com/valknarthing/llmx/blob/main/docs/getting-started.md#running-with-a-prompt-as-input)
|
- [Running with a prompt as input](./docs/getting-started.md#running-with-a-prompt-as-input)
|
||||||
- [Example prompts](https://github.com/valknarthing/llmx/blob/main/docs/getting-started.md#example-prompts)
|
- [Example prompts](./docs/getting-started.md#example-prompts)
|
||||||
- [Custom prompts](https://github.com/valknarthing/llmx/blob/main/docs/prompts.md)
|
- [Custom prompts](./docs/prompts.md)
|
||||||
- [Memory with AGENTS.md](https://github.com/valknarthing/llmx/blob/main/docs/getting-started.md#memory-with-agentsmd)
|
- [Memory with AGENTS.md](./docs/getting-started.md#memory-with-agentsmd)
|
||||||
- [**Configuration**](https://github.com/valknarthing/llmx/blob/main/docs/config.md)
|
- [**Configuration**](./docs/config.md)
|
||||||
- [Example config](https://github.com/valknarthing/llmx/blob/main/docs/example-config.md)
|
- [Example config](./docs/example-config.md)
|
||||||
- [**Sandbox & approvals**](https://github.com/valknarthing/llmx/blob/main/docs/sandbox.md)
|
- [**Sandbox & approvals**](./docs/sandbox.md)
|
||||||
- [**Authentication**](https://github.com/valknarthing/llmx/blob/main/docs/authentication.md)
|
- [**Authentication**](./docs/authentication.md)
|
||||||
- [Auth methods](https://github.com/valknarthing/llmx/blob/main/docs/authentication.md#forcing-a-specific-auth-method-advanced)
|
- [Auth methods](./docs/authentication.md#forcing-a-specific-auth-method-advanced)
|
||||||
- [Login on a "Headless" machine](https://github.com/valknarthing/llmx/blob/main/docs/authentication.md#connecting-on-a-headless-machine)
|
- [Login on a "Headless" machine](./docs/authentication.md#connecting-on-a-headless-machine)
|
||||||
- **Automating LLMX**
|
- **Automating Codex**
|
||||||
- [GitHub Action](https://github.com/valknarthing/llmx-action)
|
- [GitHub Action](https://github.com/openai/codex-action)
|
||||||
- [TypeScript SDK](https://github.com/valknarthing/llmx/blob/main/sdk/typescript/README.md)
|
- [TypeScript SDK](./sdk/typescript/README.md)
|
||||||
- [Non-interactive mode (`llmx exec`)](https://github.com/valknarthing/llmx/blob/main/docs/exec.md)
|
- [Non-interactive mode (`codex exec`)](./docs/exec.md)
|
||||||
- [**Advanced**](https://github.com/valknarthing/llmx/blob/main/docs/advanced.md)
|
- [**Advanced**](./docs/advanced.md)
|
||||||
- [Tracing / verbose logging](https://github.com/valknarthing/llmx/blob/main/docs/advanced.md#tracing--verbose-logging)
|
- [Tracing / verbose logging](./docs/advanced.md#tracing--verbose-logging)
|
||||||
- [Model Context Protocol (MCP)](https://github.com/valknarthing/llmx/blob/main/docs/advanced.md#model-context-protocol-mcp)
|
- [Model Context Protocol (MCP)](./docs/advanced.md#model-context-protocol-mcp)
|
||||||
- [**Zero data retention (ZDR)**](https://github.com/valknarthing/llmx/blob/main/docs/zdr.md)
|
- [**Zero data retention (ZDR)**](./docs/zdr.md)
|
||||||
- [**Contributing**](https://github.com/valknarthing/llmx/blob/main/docs/contributing.md)
|
- [**Contributing**](./docs/contributing.md)
|
||||||
- [**Install & build**](https://github.com/valknarthing/llmx/blob/main/docs/install.md)
|
- [**Install & build**](./docs/install.md)
|
||||||
- [System Requirements](https://github.com/valknarthing/llmx/blob/main/docs/install.md#system-requirements)
|
- [System Requirements](./docs/install.md#system-requirements)
|
||||||
- [DotSlash](https://github.com/valknarthing/llmx/blob/main/docs/install.md#dotslash)
|
- [DotSlash](./docs/install.md#dotslash)
|
||||||
- [Build from source](https://github.com/valknarthing/llmx/blob/main/docs/install.md#build-from-source)
|
- [Build from source](./docs/install.md#build-from-source)
|
||||||
- [**FAQ**](https://github.com/valknarthing/llmx/blob/main/docs/faq.md)
|
- [**FAQ**](./docs/faq.md)
|
||||||
|
- [**Open source fund**](./docs/open-source-fund.md)
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
header = """
|
header = """
|
||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
You can install any of these versions: `npm install -g @openai/llmx@<version>`
|
You can install any of these versions: `npm install -g @openai/codex@<version>`
|
||||||
"""
|
"""
|
||||||
|
|
||||||
body = """
|
body = """
|
||||||
|
|||||||
736
codex-cli/README.md
Normal file
736
codex-cli/README.md
Normal file
@@ -0,0 +1,736 @@
|
|||||||
|
<h1 align="center">OpenAI Codex CLI</h1>
|
||||||
|
<p align="center">Lightweight coding agent that runs in your terminal</p>
|
||||||
|
|
||||||
|
<p align="center"><code>npm i -g @openai/codex</code></p>
|
||||||
|
|
||||||
|
> [!IMPORTANT]
|
||||||
|
> This is the documentation for the _legacy_ TypeScript implementation of the Codex CLI. It has been superseded by the _Rust_ implementation. See the [README in the root of the Codex repository](https://github.com/openai/codex/blob/main/README.md) for details.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary><strong>Table of contents</strong></summary>
|
||||||
|
|
||||||
|
<!-- Begin ToC -->
|
||||||
|
|
||||||
|
- [Experimental technology disclaimer](#experimental-technology-disclaimer)
|
||||||
|
- [Quickstart](#quickstart)
|
||||||
|
- [Why Codex?](#why-codex)
|
||||||
|
- [Security model & permissions](#security-model--permissions)
|
||||||
|
- [Platform sandboxing details](#platform-sandboxing-details)
|
||||||
|
- [System requirements](#system-requirements)
|
||||||
|
- [CLI reference](#cli-reference)
|
||||||
|
- [Memory & project docs](#memory--project-docs)
|
||||||
|
- [Non-interactive / CI mode](#non-interactive--ci-mode)
|
||||||
|
- [Tracing / verbose logging](#tracing--verbose-logging)
|
||||||
|
- [Recipes](#recipes)
|
||||||
|
- [Installation](#installation)
|
||||||
|
- [Configuration guide](#configuration-guide)
|
||||||
|
- [Basic configuration parameters](#basic-configuration-parameters)
|
||||||
|
- [Custom AI provider configuration](#custom-ai-provider-configuration)
|
||||||
|
- [History configuration](#history-configuration)
|
||||||
|
- [Configuration examples](#configuration-examples)
|
||||||
|
- [Full configuration example](#full-configuration-example)
|
||||||
|
- [Custom instructions](#custom-instructions)
|
||||||
|
- [Environment variables setup](#environment-variables-setup)
|
||||||
|
- [FAQ](#faq)
|
||||||
|
- [Zero data retention (ZDR) usage](#zero-data-retention-zdr-usage)
|
||||||
|
- [Codex open source fund](#codex-open-source-fund)
|
||||||
|
- [Contributing](#contributing)
|
||||||
|
- [Development workflow](#development-workflow)
|
||||||
|
- [Git hooks with Husky](#git-hooks-with-husky)
|
||||||
|
- [Debugging](#debugging)
|
||||||
|
- [Writing high-impact code changes](#writing-high-impact-code-changes)
|
||||||
|
- [Opening a pull request](#opening-a-pull-request)
|
||||||
|
- [Review process](#review-process)
|
||||||
|
- [Community values](#community-values)
|
||||||
|
- [Getting help](#getting-help)
|
||||||
|
- [Contributor license agreement (CLA)](#contributor-license-agreement-cla)
|
||||||
|
- [Quick fixes](#quick-fixes)
|
||||||
|
- [Releasing `codex`](#releasing-codex)
|
||||||
|
- [Alternative build options](#alternative-build-options)
|
||||||
|
- [Nix flake development](#nix-flake-development)
|
||||||
|
- [Security & responsible AI](#security--responsible-ai)
|
||||||
|
- [License](#license)
|
||||||
|
|
||||||
|
<!-- End ToC -->
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Experimental technology disclaimer
|
||||||
|
|
||||||
|
Codex CLI is an experimental project under active development. It is not yet stable, may contain bugs, incomplete features, or undergo breaking changes. We're building it in the open with the community and welcome:
|
||||||
|
|
||||||
|
- Bug reports
|
||||||
|
- Feature requests
|
||||||
|
- Pull requests
|
||||||
|
- Good vibes
|
||||||
|
|
||||||
|
Help us improve by filing issues or submitting PRs (see the section below for how to contribute)!
|
||||||
|
|
||||||
|
## Quickstart
|
||||||
|
|
||||||
|
Install globally:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
npm install -g @openai/codex
|
||||||
|
```
|
||||||
|
|
||||||
|
Next, set your OpenAI API key as an environment variable:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
export OPENAI_API_KEY="your-api-key-here"
|
||||||
|
```
|
||||||
|
|
||||||
|
> **Note:** This command sets the key only for your current terminal session. You can add the `export` line to your shell's configuration file (e.g., `~/.zshrc`) but we recommend setting for the session. **Tip:** You can also place your API key into a `.env` file at the root of your project:
|
||||||
|
>
|
||||||
|
> ```env
|
||||||
|
> OPENAI_API_KEY=your-api-key-here
|
||||||
|
> ```
|
||||||
|
>
|
||||||
|
> The CLI will automatically load variables from `.env` (via `dotenv/config`).
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary><strong>Use <code>--provider</code> to use other models</strong></summary>
|
||||||
|
|
||||||
|
> Codex also allows you to use other providers that support the OpenAI Chat Completions API. You can set the provider in the config file or use the `--provider` flag. The possible options for `--provider` are:
|
||||||
|
>
|
||||||
|
> - openai (default)
|
||||||
|
> - openrouter
|
||||||
|
> - azure
|
||||||
|
> - gemini
|
||||||
|
> - ollama
|
||||||
|
> - mistral
|
||||||
|
> - deepseek
|
||||||
|
> - xai
|
||||||
|
> - groq
|
||||||
|
> - arceeai
|
||||||
|
> - any other provider that is compatible with the OpenAI API
|
||||||
|
>
|
||||||
|
> If you use a provider other than OpenAI, you will need to set the API key for the provider in the config file or in the environment variable as:
|
||||||
|
>
|
||||||
|
> ```shell
|
||||||
|
> export <provider>_API_KEY="your-api-key-here"
|
||||||
|
> ```
|
||||||
|
>
|
||||||
|
> If you use a provider not listed above, you must also set the base URL for the provider:
|
||||||
|
>
|
||||||
|
> ```shell
|
||||||
|
> export <provider>_BASE_URL="https://your-provider-api-base-url"
|
||||||
|
> ```
|
||||||
|
|
||||||
|
</details>
|
||||||
|
<br />
|
||||||
|
|
||||||
|
Run interactively:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
codex
|
||||||
|
```
|
||||||
|
|
||||||
|
Or, run with a prompt as input (and optionally in `Full Auto` mode):
|
||||||
|
|
||||||
|
```shell
|
||||||
|
codex "explain this codebase to me"
|
||||||
|
```
|
||||||
|
|
||||||
|
```shell
|
||||||
|
codex --approval-mode full-auto "create the fanciest todo-list app"
|
||||||
|
```
|
||||||
|
|
||||||
|
That's it - Codex will scaffold a file, run it inside a sandbox, install any
|
||||||
|
missing dependencies, and show you the live result. Approve the changes and
|
||||||
|
they'll be committed to your working directory.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Why Codex?
|
||||||
|
|
||||||
|
Codex CLI is built for developers who already **live in the terminal** and want
|
||||||
|
ChatGPT-level reasoning **plus** the power to actually run code, manipulate
|
||||||
|
files, and iterate - all under version control. In short, it's _chat-driven
|
||||||
|
development_ that understands and executes your repo.
|
||||||
|
|
||||||
|
- **Zero setup** - bring your OpenAI API key and it just works!
|
||||||
|
- **Full auto-approval, while safe + secure** by running network-disabled and directory-sandboxed
|
||||||
|
- **Multimodal** - pass in screenshots or diagrams to implement features ✨
|
||||||
|
|
||||||
|
And it's **fully open-source** so you can see and contribute to how it develops!
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Security model & permissions
|
||||||
|
|
||||||
|
Codex lets you decide _how much autonomy_ the agent receives and auto-approval policy via the
|
||||||
|
`--approval-mode` flag (or the interactive onboarding prompt):
|
||||||
|
|
||||||
|
| Mode | What the agent may do without asking | Still requires approval |
|
||||||
|
| ------------------------- | --------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------- |
|
||||||
|
| **Suggest** <br>(default) | <li>Read any file in the repo | <li>**All** file writes/patches<li> **Any** arbitrary shell commands (aside from reading files) |
|
||||||
|
| **Auto Edit** | <li>Read **and** apply-patch writes to files | <li>**All** shell commands |
|
||||||
|
| **Full Auto** | <li>Read/write files <li> Execute shell commands (network disabled, writes limited to your workdir) | - |
|
||||||
|
|
||||||
|
In **Full Auto** every command is run **network-disabled** and confined to the
|
||||||
|
current working directory (plus temporary files) for defense-in-depth. Codex
|
||||||
|
will also show a warning/confirmation if you start in **auto-edit** or
|
||||||
|
**full-auto** while the directory is _not_ tracked by Git, so you always have a
|
||||||
|
safety net.
|
||||||
|
|
||||||
|
Coming soon: you'll be able to whitelist specific commands to auto-execute with
|
||||||
|
the network enabled, once we're confident in additional safeguards.
|
||||||
|
|
||||||
|
### Platform sandboxing details
|
||||||
|
|
||||||
|
The hardening mechanism Codex uses depends on your OS:
|
||||||
|
|
||||||
|
- **macOS 12+** - commands are wrapped with **Apple Seatbelt** (`sandbox-exec`).
|
||||||
|
|
||||||
|
- Everything is placed in a read-only jail except for a small set of
|
||||||
|
writable roots (`$PWD`, `$TMPDIR`, `~/.codex`, etc.).
|
||||||
|
- Outbound network is _fully blocked_ by default - even if a child process
|
||||||
|
tries to `curl` somewhere it will fail.
|
||||||
|
|
||||||
|
- **Linux** - there is no sandboxing by default.
|
||||||
|
We recommend using Docker for sandboxing, where Codex launches itself inside a **minimal
|
||||||
|
container image** and mounts your repo _read/write_ at the same path. A
|
||||||
|
custom `iptables`/`ipset` firewall script denies all egress except the
|
||||||
|
OpenAI API. This gives you deterministic, reproducible runs without needing
|
||||||
|
root on the host. You can use the [`run_in_container.sh`](../codex-cli/scripts/run_in_container.sh) script to set up the sandbox.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## System requirements
|
||||||
|
|
||||||
|
| Requirement | Details |
|
||||||
|
| --------------------------- | --------------------------------------------------------------- |
|
||||||
|
| Operating systems | macOS 12+, Ubuntu 20.04+/Debian 10+, or Windows 11 **via WSL2** |
|
||||||
|
| Node.js | **16 or newer** (Node 20 LTS recommended) |
|
||||||
|
| Git (optional, recommended) | 2.23+ for built-in PR helpers |
|
||||||
|
| RAM | 4-GB minimum (8-GB recommended) |
|
||||||
|
|
||||||
|
> Never run `sudo npm install -g`; fix npm permissions instead.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## CLI reference
|
||||||
|
|
||||||
|
| Command | Purpose | Example |
|
||||||
|
| ------------------------------------ | ----------------------------------- | ------------------------------------ |
|
||||||
|
| `codex` | Interactive REPL | `codex` |
|
||||||
|
| `codex "..."` | Initial prompt for interactive REPL | `codex "fix lint errors"` |
|
||||||
|
| `codex -q "..."` | Non-interactive "quiet mode" | `codex -q --json "explain utils.ts"` |
|
||||||
|
| `codex completion <bash\|zsh\|fish>` | Print shell completion script | `codex completion bash` |
|
||||||
|
|
||||||
|
Key flags: `--model/-m`, `--approval-mode/-a`, `--quiet/-q`, and `--notify`.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Memory & project docs
|
||||||
|
|
||||||
|
You can give Codex extra instructions and guidance using `AGENTS.md` files. Codex looks for `AGENTS.md` files in the following places, and merges them top-down:
|
||||||
|
|
||||||
|
1. `~/.codex/AGENTS.md` - personal global guidance
|
||||||
|
2. `AGENTS.md` at repo root - shared project notes
|
||||||
|
3. `AGENTS.md` in the current working directory - sub-folder/feature specifics
|
||||||
|
|
||||||
|
Disable loading of these files with `--no-project-doc` or the environment variable `CODEX_DISABLE_PROJECT_DOC=1`.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Non-interactive / CI mode
|
||||||
|
|
||||||
|
Run Codex head-less in pipelines. Example GitHub Action step:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
- name: Update changelog via Codex
|
||||||
|
run: |
|
||||||
|
npm install -g @openai/codex
|
||||||
|
export OPENAI_API_KEY="${{ secrets.OPENAI_KEY }}"
|
||||||
|
codex -a auto-edit --quiet "update CHANGELOG for next release"
|
||||||
|
```
|
||||||
|
|
||||||
|
Set `CODEX_QUIET_MODE=1` to silence interactive UI noise.
|
||||||
|
|
||||||
|
## Tracing / verbose logging
|
||||||
|
|
||||||
|
Setting the environment variable `DEBUG=true` prints full API request and response details:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
DEBUG=true codex
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Recipes
|
||||||
|
|
||||||
|
Below are a few bite-size examples you can copy-paste. Replace the text in quotes with your own task. See the [prompting guide](https://github.com/openai/codex/blob/main/codex-cli/examples/prompting_guide.md) for more tips and usage patterns.
|
||||||
|
|
||||||
|
| ✨ | What you type | What happens |
|
||||||
|
| --- | ------------------------------------------------------------------------------- | -------------------------------------------------------------------------- |
|
||||||
|
| 1 | `codex "Refactor the Dashboard component to React Hooks"` | Codex rewrites the class component, runs `npm test`, and shows the diff. |
|
||||||
|
| 2 | `codex "Generate SQL migrations for adding a users table"` | Infers your ORM, creates migration files, and runs them in a sandboxed DB. |
|
||||||
|
| 3 | `codex "Write unit tests for utils/date.ts"` | Generates tests, executes them, and iterates until they pass. |
|
||||||
|
| 4 | `codex "Bulk-rename *.jpeg -> *.jpg with git mv"` | Safely renames files and updates imports/usages. |
|
||||||
|
| 5 | `codex "Explain what this regex does: ^(?=.*[A-Z]).{8,}$"` | Outputs a step-by-step human explanation. |
|
||||||
|
| 6 | `codex "Carefully review this repo, and propose 3 high impact well-scoped PRs"` | Suggests impactful PRs in the current codebase. |
|
||||||
|
| 7 | `codex "Look for vulnerabilities and create a security review report"` | Finds and explains security bugs. |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
<details open>
|
||||||
|
<summary><strong>From npm (Recommended)</strong></summary>
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npm install -g @openai/codex
|
||||||
|
# or
|
||||||
|
yarn global add @openai/codex
|
||||||
|
# or
|
||||||
|
bun install -g @openai/codex
|
||||||
|
# or
|
||||||
|
pnpm add -g @openai/codex
|
||||||
|
```
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary><strong>Build from source</strong></summary>
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Clone the repository and navigate to the CLI package
|
||||||
|
git clone https://github.com/openai/codex.git
|
||||||
|
cd codex/codex-cli
|
||||||
|
|
||||||
|
# Enable corepack
|
||||||
|
corepack enable
|
||||||
|
|
||||||
|
# Install dependencies and build
|
||||||
|
pnpm install
|
||||||
|
pnpm build
|
||||||
|
|
||||||
|
# Linux-only: download prebuilt sandboxing binaries (requires gh and zstd).
|
||||||
|
./scripts/install_native_deps.sh
|
||||||
|
|
||||||
|
# Get the usage and the options
|
||||||
|
node ./dist/cli.js --help
|
||||||
|
|
||||||
|
# Run the locally-built CLI directly
|
||||||
|
node ./dist/cli.js
|
||||||
|
|
||||||
|
# Or link the command globally for convenience
|
||||||
|
pnpm link
|
||||||
|
```
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Configuration guide
|
||||||
|
|
||||||
|
Codex configuration files can be placed in the `~/.codex/` directory, supporting both YAML and JSON formats.
|
||||||
|
|
||||||
|
### Basic configuration parameters
|
||||||
|
|
||||||
|
| Parameter | Type | Default | Description | Available Options |
|
||||||
|
| ------------------- | ------- | ---------- | -------------------------------- | ---------------------------------------------------------------------------------------------- |
|
||||||
|
| `model` | string | `o4-mini` | AI model to use | Any model name supporting OpenAI API |
|
||||||
|
| `approvalMode` | string | `suggest` | AI assistant's permission mode | `suggest` (suggestions only)<br>`auto-edit` (automatic edits)<br>`full-auto` (fully automatic) |
|
||||||
|
| `fullAutoErrorMode` | string | `ask-user` | Error handling in full-auto mode | `ask-user` (prompt for user input)<br>`ignore-and-continue` (ignore and proceed) |
|
||||||
|
| `notify` | boolean | `true` | Enable desktop notifications | `true`/`false` |
|
||||||
|
|
||||||
|
### Custom AI provider configuration
|
||||||
|
|
||||||
|
In the `providers` object, you can configure multiple AI service providers. Each provider requires the following parameters:
|
||||||
|
|
||||||
|
| Parameter | Type | Description | Example |
|
||||||
|
| --------- | ------ | --------------------------------------- | ----------------------------- |
|
||||||
|
| `name` | string | Display name of the provider | `"OpenAI"` |
|
||||||
|
| `baseURL` | string | API service URL | `"https://api.openai.com/v1"` |
|
||||||
|
| `envKey` | string | Environment variable name (for API key) | `"OPENAI_API_KEY"` |
|
||||||
|
|
||||||
|
### History configuration
|
||||||
|
|
||||||
|
In the `history` object, you can configure conversation history settings:
|
||||||
|
|
||||||
|
| Parameter | Type | Description | Example Value |
|
||||||
|
| ------------------- | ------- | ------------------------------------------------------ | ------------- |
|
||||||
|
| `maxSize` | number | Maximum number of history entries to save | `1000` |
|
||||||
|
| `saveHistory` | boolean | Whether to save history | `true` |
|
||||||
|
| `sensitivePatterns` | array | Patterns of sensitive information to filter in history | `[]` |
|
||||||
|
|
||||||
|
### Configuration examples
|
||||||
|
|
||||||
|
1. YAML format (save as `~/.codex/config.yaml`):
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
model: o4-mini
|
||||||
|
approvalMode: suggest
|
||||||
|
fullAutoErrorMode: ask-user
|
||||||
|
notify: true
|
||||||
|
```
|
||||||
|
|
||||||
|
2. JSON format (save as `~/.codex/config.json`):
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"model": "o4-mini",
|
||||||
|
"approvalMode": "suggest",
|
||||||
|
"fullAutoErrorMode": "ask-user",
|
||||||
|
"notify": true
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Full configuration example
|
||||||
|
|
||||||
|
Below is a comprehensive example of `config.json` with multiple custom providers:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"model": "o4-mini",
|
||||||
|
"provider": "openai",
|
||||||
|
"providers": {
|
||||||
|
"openai": {
|
||||||
|
"name": "OpenAI",
|
||||||
|
"baseURL": "https://api.openai.com/v1",
|
||||||
|
"envKey": "OPENAI_API_KEY"
|
||||||
|
},
|
||||||
|
"azure": {
|
||||||
|
"name": "AzureOpenAI",
|
||||||
|
"baseURL": "https://YOUR_PROJECT_NAME.openai.azure.com/openai",
|
||||||
|
"envKey": "AZURE_OPENAI_API_KEY"
|
||||||
|
},
|
||||||
|
"openrouter": {
|
||||||
|
"name": "OpenRouter",
|
||||||
|
"baseURL": "https://openrouter.ai/api/v1",
|
||||||
|
"envKey": "OPENROUTER_API_KEY"
|
||||||
|
},
|
||||||
|
"gemini": {
|
||||||
|
"name": "Gemini",
|
||||||
|
"baseURL": "https://generativelanguage.googleapis.com/v1beta/openai",
|
||||||
|
"envKey": "GEMINI_API_KEY"
|
||||||
|
},
|
||||||
|
"ollama": {
|
||||||
|
"name": "Ollama",
|
||||||
|
"baseURL": "http://localhost:11434/v1",
|
||||||
|
"envKey": "OLLAMA_API_KEY"
|
||||||
|
},
|
||||||
|
"mistral": {
|
||||||
|
"name": "Mistral",
|
||||||
|
"baseURL": "https://api.mistral.ai/v1",
|
||||||
|
"envKey": "MISTRAL_API_KEY"
|
||||||
|
},
|
||||||
|
"deepseek": {
|
||||||
|
"name": "DeepSeek",
|
||||||
|
"baseURL": "https://api.deepseek.com",
|
||||||
|
"envKey": "DEEPSEEK_API_KEY"
|
||||||
|
},
|
||||||
|
"xai": {
|
||||||
|
"name": "xAI",
|
||||||
|
"baseURL": "https://api.x.ai/v1",
|
||||||
|
"envKey": "XAI_API_KEY"
|
||||||
|
},
|
||||||
|
"groq": {
|
||||||
|
"name": "Groq",
|
||||||
|
"baseURL": "https://api.groq.com/openai/v1",
|
||||||
|
"envKey": "GROQ_API_KEY"
|
||||||
|
},
|
||||||
|
"arceeai": {
|
||||||
|
"name": "ArceeAI",
|
||||||
|
"baseURL": "https://conductor.arcee.ai/v1",
|
||||||
|
"envKey": "ARCEEAI_API_KEY"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"history": {
|
||||||
|
"maxSize": 1000,
|
||||||
|
"saveHistory": true,
|
||||||
|
"sensitivePatterns": []
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Custom instructions
|
||||||
|
|
||||||
|
You can create a `~/.codex/AGENTS.md` file to define custom guidance for the agent:
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
- Always respond with emojis
|
||||||
|
- Only use git commands when explicitly requested
|
||||||
|
```
|
||||||
|
|
||||||
|
### Environment variables setup
|
||||||
|
|
||||||
|
For each AI provider, you need to set the corresponding API key in your environment variables. For example:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# OpenAI
|
||||||
|
export OPENAI_API_KEY="your-api-key-here"
|
||||||
|
|
||||||
|
# Azure OpenAI
|
||||||
|
export AZURE_OPENAI_API_KEY="your-azure-api-key-here"
|
||||||
|
export AZURE_OPENAI_API_VERSION="2025-04-01-preview" (Optional)
|
||||||
|
|
||||||
|
# OpenRouter
|
||||||
|
export OPENROUTER_API_KEY="your-openrouter-key-here"
|
||||||
|
|
||||||
|
# Similarly for other providers
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## FAQ
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>OpenAI released a model called Codex in 2021 - is this related?</summary>
|
||||||
|
|
||||||
|
In 2021, OpenAI released Codex, an AI system designed to generate code from natural language prompts. That original Codex model was deprecated as of March 2023 and is separate from the CLI tool.
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>Which models are supported?</summary>
|
||||||
|
|
||||||
|
Any model available with [Responses API](https://platform.openai.com/docs/api-reference/responses). The default is `o4-mini`, but pass `--model gpt-4.1` or set `model: gpt-4.1` in your config file to override.
|
||||||
|
|
||||||
|
</details>
|
||||||
|
<details>
|
||||||
|
<summary>Why does <code>o3</code> or <code>o4-mini</code> not work for me?</summary>
|
||||||
|
|
||||||
|
It's possible that your [API account needs to be verified](https://help.openai.com/en/articles/10910291-api-organization-verification) in order to start streaming responses and seeing chain of thought summaries from the API. If you're still running into issues, please let us know!
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>How do I stop Codex from editing my files?</summary>
|
||||||
|
|
||||||
|
Codex runs model-generated commands in a sandbox. If a proposed command or file change doesn't look right, you can simply type **n** to deny the command or give the model feedback.
|
||||||
|
|
||||||
|
</details>
|
||||||
|
<details>
|
||||||
|
<summary>Does it work on Windows?</summary>
|
||||||
|
|
||||||
|
Not directly. It requires [Windows Subsystem for Linux (WSL2)](https://learn.microsoft.com/en-us/windows/wsl/install) - Codex is regularly tested on macOS and Linux with Node 20+, and also supports Node 16.
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Zero data retention (ZDR) usage
|
||||||
|
|
||||||
|
Codex CLI **does** support OpenAI organizations with [Zero Data Retention (ZDR)](https://platform.openai.com/docs/guides/your-data#zero-data-retention) enabled. If your OpenAI organization has Zero Data Retention enabled and you still encounter errors such as:
|
||||||
|
|
||||||
|
```
|
||||||
|
OpenAI rejected the request. Error details: Status: 400, Code: unsupported_parameter, Type: invalid_request_error, Message: 400 Previous response cannot be used for this organization due to Zero Data Retention.
|
||||||
|
```
|
||||||
|
|
||||||
|
You may need to upgrade to a more recent version with: `npm i -g @openai/codex@latest`
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Codex open source fund
|
||||||
|
|
||||||
|
We're excited to launch a **$1 million initiative** supporting open source projects that use Codex CLI and other OpenAI models.
|
||||||
|
|
||||||
|
- Grants are awarded up to **$25,000** API credits.
|
||||||
|
- Applications are reviewed **on a rolling basis**.
|
||||||
|
|
||||||
|
**Interested? [Apply here](https://openai.com/form/codex-open-source-fund/).**
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Contributing
|
||||||
|
|
||||||
|
This project is under active development and the code will likely change pretty significantly. We'll update this message once that's complete!
|
||||||
|
|
||||||
|
More broadly we welcome contributions - whether you are opening your very first pull request or you're a seasoned maintainer. At the same time we care about reliability and long-term maintainability, so the bar for merging code is intentionally **high**. The guidelines below spell out what "high-quality" means in practice and should make the whole process transparent and friendly.
|
||||||
|
|
||||||
|
### Development workflow
|
||||||
|
|
||||||
|
- Create a _topic branch_ from `main` - e.g. `feat/interactive-prompt`.
|
||||||
|
- Keep your changes focused. Multiple unrelated fixes should be opened as separate PRs.
|
||||||
|
- Use `pnpm test:watch` during development for super-fast feedback.
|
||||||
|
- We use **Vitest** for unit tests, **ESLint** + **Prettier** for style, and **TypeScript** for type-checking.
|
||||||
|
- Before pushing, run the full test/type/lint suite:
|
||||||
|
|
||||||
|
### Git hooks with Husky
|
||||||
|
|
||||||
|
This project uses [Husky](https://typicode.github.io/husky/) to enforce code quality checks:
|
||||||
|
|
||||||
|
- **Pre-commit hook**: Automatically runs lint-staged to format and lint files before committing
|
||||||
|
- **Pre-push hook**: Runs tests and type checking before pushing to the remote
|
||||||
|
|
||||||
|
These hooks help maintain code quality and prevent pushing code with failing tests. For more details, see [HUSKY.md](./HUSKY.md).
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pnpm test && pnpm run lint && pnpm run typecheck
|
||||||
|
```
|
||||||
|
|
||||||
|
- If you have **not** yet signed the Contributor License Agreement (CLA), add a PR comment containing the exact text
|
||||||
|
|
||||||
|
```text
|
||||||
|
I have read the CLA Document and I hereby sign the CLA
|
||||||
|
```
|
||||||
|
|
||||||
|
The CLA-Assistant bot will turn the PR status green once all authors have signed.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Watch mode (tests rerun on change)
|
||||||
|
pnpm test:watch
|
||||||
|
|
||||||
|
# Type-check without emitting files
|
||||||
|
pnpm typecheck
|
||||||
|
|
||||||
|
# Automatically fix lint + prettier issues
|
||||||
|
pnpm lint:fix
|
||||||
|
pnpm format:fix
|
||||||
|
```
|
||||||
|
|
||||||
|
### Debugging
|
||||||
|
|
||||||
|
To debug the CLI with a visual debugger, do the following in the `codex-cli` folder:
|
||||||
|
|
||||||
|
- Run `pnpm run build` to build the CLI, which will generate `cli.js.map` alongside `cli.js` in the `dist` folder.
|
||||||
|
- Run the CLI with `node --inspect-brk ./dist/cli.js` The program then waits until a debugger is attached before proceeding. Options:
|
||||||
|
- In VS Code, choose **Debug: Attach to Node Process** from the command palette and choose the option in the dropdown with debug port `9229` (likely the first option)
|
||||||
|
- Go to <chrome://inspect> in Chrome and find **localhost:9229** and click **trace**
|
||||||
|
|
||||||
|
### Writing high-impact code changes
|
||||||
|
|
||||||
|
1. **Start with an issue.** Open a new one or comment on an existing discussion so we can agree on the solution before code is written.
|
||||||
|
2. **Add or update tests.** Every new feature or bug-fix should come with test coverage that fails before your change and passes afterwards. 100% coverage is not required, but aim for meaningful assertions.
|
||||||
|
3. **Document behaviour.** If your change affects user-facing behaviour, update the README, inline help (`codex --help`), or relevant example projects.
|
||||||
|
4. **Keep commits atomic.** Each commit should compile and the tests should pass. This makes reviews and potential rollbacks easier.
|
||||||
|
|
||||||
|
### Opening a pull request
|
||||||
|
|
||||||
|
- Fill in the PR template (or include similar information) - **What? Why? How?**
|
||||||
|
- Run **all** checks locally (`npm test && npm run lint && npm run typecheck`). CI failures that could have been caught locally slow down the process.
|
||||||
|
- Make sure your branch is up-to-date with `main` and that you have resolved merge conflicts.
|
||||||
|
- Mark the PR as **Ready for review** only when you believe it is in a merge-able state.
|
||||||
|
|
||||||
|
### Review process
|
||||||
|
|
||||||
|
1. One maintainer will be assigned as a primary reviewer.
|
||||||
|
2. We may ask for changes - please do not take this personally. We value the work, we just also value consistency and long-term maintainability.
|
||||||
|
3. When there is consensus that the PR meets the bar, a maintainer will squash-and-merge.
|
||||||
|
|
||||||
|
### Community values
|
||||||
|
|
||||||
|
- **Be kind and inclusive.** Treat others with respect; we follow the [Contributor Covenant](https://www.contributor-covenant.org/).
|
||||||
|
- **Assume good intent.** Written communication is hard - err on the side of generosity.
|
||||||
|
- **Teach & learn.** If you spot something confusing, open an issue or PR with improvements.
|
||||||
|
|
||||||
|
### Getting help
|
||||||
|
|
||||||
|
If you run into problems setting up the project, would like feedback on an idea, or just want to say _hi_ - please open a Discussion or jump into the relevant issue. We are happy to help.
|
||||||
|
|
||||||
|
Together we can make Codex CLI an incredible tool. **Happy hacking!** :rocket:
|
||||||
|
|
||||||
|
### Contributor license agreement (CLA)
|
||||||
|
|
||||||
|
All contributors **must** accept the CLA. The process is lightweight:
|
||||||
|
|
||||||
|
1. Open your pull request.
|
||||||
|
2. Paste the following comment (or reply `recheck` if you've signed before):
|
||||||
|
|
||||||
|
```text
|
||||||
|
I have read the CLA Document and I hereby sign the CLA
|
||||||
|
```
|
||||||
|
|
||||||
|
3. The CLA-Assistant bot records your signature in the repo and marks the status check as passed.
|
||||||
|
|
||||||
|
No special Git commands, email attachments, or commit footers required.
|
||||||
|
|
||||||
|
#### Quick fixes
|
||||||
|
|
||||||
|
| Scenario | Command |
|
||||||
|
| ----------------- | ------------------------------------------------ |
|
||||||
|
| Amend last commit | `git commit --amend -s --no-edit && git push -f` |
|
||||||
|
|
||||||
|
The **DCO check** blocks merges until every commit in the PR carries the footer (with squash this is just the one).
|
||||||
|
|
||||||
|
### Releasing `codex`
|
||||||
|
|
||||||
|
To publish a new version of the CLI you first need to stage the npm package. A
|
||||||
|
helper script in `codex-cli/scripts/` does all the heavy lifting. Inside the
|
||||||
|
`codex-cli` folder run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Classic, JS implementation that includes small, native binaries for Linux sandboxing.
|
||||||
|
pnpm stage-release
|
||||||
|
|
||||||
|
# Optionally specify the temp directory to reuse between runs.
|
||||||
|
RELEASE_DIR=$(mktemp -d)
|
||||||
|
pnpm stage-release --tmp "$RELEASE_DIR"
|
||||||
|
|
||||||
|
# "Fat" package that additionally bundles the native Rust CLI binaries for
|
||||||
|
# Linux. End-users can then opt-in at runtime by setting CODEX_RUST=1.
|
||||||
|
pnpm stage-release --native
|
||||||
|
```
|
||||||
|
|
||||||
|
Go to the folder where the release is staged and verify that it works as intended. If so, run the following from the temp folder:
|
||||||
|
|
||||||
|
```
|
||||||
|
cd "$RELEASE_DIR"
|
||||||
|
npm publish
|
||||||
|
```
|
||||||
|
|
||||||
|
### Alternative build options
|
||||||
|
|
||||||
|
#### Nix flake development
|
||||||
|
|
||||||
|
Prerequisite: Nix >= 2.4 with flakes enabled (`experimental-features = nix-command flakes` in `~/.config/nix/nix.conf`).
|
||||||
|
|
||||||
|
Enter a Nix development shell:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Use either one of the commands according to which implementation you want to work with
|
||||||
|
nix develop .#codex-cli # For entering codex-cli specific shell
|
||||||
|
nix develop .#codex-rs # For entering codex-rs specific shell
|
||||||
|
```
|
||||||
|
|
||||||
|
This shell includes Node.js, installs dependencies, builds the CLI, and provides a `codex` command alias.
|
||||||
|
|
||||||
|
Build and run the CLI directly:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Use either one of the commands according to which implementation you want to work with
|
||||||
|
nix build .#codex-cli # For building codex-cli
|
||||||
|
nix build .#codex-rs # For building codex-rs
|
||||||
|
./result/bin/codex --help
|
||||||
|
```
|
||||||
|
|
||||||
|
Run the CLI via the flake app:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Use either one of the commands according to which implementation you want to work with
|
||||||
|
nix run .#codex-cli # For running codex-cli
|
||||||
|
nix run .#codex-rs # For running codex-rs
|
||||||
|
```
|
||||||
|
|
||||||
|
Use direnv with flakes
|
||||||
|
|
||||||
|
If you have direnv installed, you can use the following `.envrc` to automatically enter the Nix shell when you `cd` into the project directory:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd codex-rs
|
||||||
|
echo "use flake ../flake.nix#codex-cli" >> .envrc && direnv allow
|
||||||
|
cd codex-cli
|
||||||
|
echo "use flake ../flake.nix#codex-rs" >> .envrc && direnv allow
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Security & responsible AI
|
||||||
|
|
||||||
|
Have you discovered a vulnerability or have concerns about model output? Please e-mail **security@openai.com** and we will respond promptly.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
This repository is licensed under the [Apache-2.0 License](LICENSE).
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/env node
|
#!/usr/bin/env node
|
||||||
// Unified entry point for the LLMX CLI.
|
// Unified entry point for the Codex CLI.
|
||||||
|
|
||||||
import { spawn } from "node:child_process";
|
import { spawn } from "node:child_process";
|
||||||
import { existsSync } from "fs";
|
import { existsSync } from "fs";
|
||||||
@@ -61,8 +61,8 @@ if (!targetTriple) {
|
|||||||
|
|
||||||
const vendorRoot = path.join(__dirname, "..", "vendor");
|
const vendorRoot = path.join(__dirname, "..", "vendor");
|
||||||
const archRoot = path.join(vendorRoot, targetTriple);
|
const archRoot = path.join(vendorRoot, targetTriple);
|
||||||
const llmxBinaryName = process.platform === "win32" ? "llmx.exe" : "llmx";
|
const codexBinaryName = process.platform === "win32" ? "codex.exe" : "codex";
|
||||||
const binaryPath = path.join(archRoot, "llmx", llmxBinaryName);
|
const binaryPath = path.join(archRoot, "codex", codexBinaryName);
|
||||||
|
|
||||||
// Use an asynchronous spawn instead of spawnSync so that Node is able to
|
// Use an asynchronous spawn instead of spawnSync so that Node is able to
|
||||||
// respond to signals (e.g. Ctrl-C / SIGINT) while the native binary is
|
// respond to signals (e.g. Ctrl-C / SIGINT) while the native binary is
|
||||||
@@ -81,7 +81,7 @@ function getUpdatedPath(newDirs) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Use heuristics to detect the package manager that was used to install LLMX
|
* Use heuristics to detect the package manager that was used to install Codex
|
||||||
* in order to give the user a hint about how to update it.
|
* in order to give the user a hint about how to update it.
|
||||||
*/
|
*/
|
||||||
function detectPackageManager() {
|
function detectPackageManager() {
|
||||||
@@ -116,8 +116,8 @@ const updatedPath = getUpdatedPath(additionalDirs);
|
|||||||
const env = { ...process.env, PATH: updatedPath };
|
const env = { ...process.env, PATH: updatedPath };
|
||||||
const packageManagerEnvVar =
|
const packageManagerEnvVar =
|
||||||
detectPackageManager() === "bun"
|
detectPackageManager() === "bun"
|
||||||
? "LLMX_MANAGED_BY_BUN"
|
? "CODEX_MANAGED_BY_BUN"
|
||||||
: "LLMX_MANAGED_BY_NPM";
|
: "CODEX_MANAGED_BY_NPM";
|
||||||
env[packageManagerEnvVar] = "1";
|
env[packageManagerEnvVar] = "1";
|
||||||
|
|
||||||
const child = spawn(binaryPath, process.argv.slice(2), {
|
const child = spawn(binaryPath, process.argv.slice(2), {
|
||||||
@@ -1,15 +1,14 @@
|
|||||||
{
|
{
|
||||||
"name": "@llmx/llmx",
|
"name": "@openai/codex",
|
||||||
"version": "0.1.0",
|
"version": "0.0.0-dev",
|
||||||
"lockfileVersion": 3,
|
"lockfileVersion": 3,
|
||||||
"requires": true,
|
|
||||||
"packages": {
|
"packages": {
|
||||||
"": {
|
"": {
|
||||||
"name": "@llmx/llmx",
|
"name": "@openai/codex",
|
||||||
"version": "0.1.0",
|
"version": "0.0.0-dev",
|
||||||
"license": "Apache-2.0",
|
"license": "Apache-2.0",
|
||||||
"bin": {
|
"bin": {
|
||||||
"llmx": "bin/llmx.js"
|
"codex": "bin/codex.js"
|
||||||
},
|
},
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">=16"
|
"node": ">=16"
|
||||||
21
codex-cli/package.json
Normal file
21
codex-cli/package.json
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
{
|
||||||
|
"name": "@openai/codex",
|
||||||
|
"version": "0.0.0-dev",
|
||||||
|
"license": "Apache-2.0",
|
||||||
|
"bin": {
|
||||||
|
"codex": "bin/codex.js"
|
||||||
|
},
|
||||||
|
"type": "module",
|
||||||
|
"engines": {
|
||||||
|
"node": ">=16"
|
||||||
|
},
|
||||||
|
"files": [
|
||||||
|
"bin",
|
||||||
|
"vendor"
|
||||||
|
],
|
||||||
|
"repository": {
|
||||||
|
"type": "git",
|
||||||
|
"url": "git+https://github.com/openai/codex.git",
|
||||||
|
"directory": "codex-cli"
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -6,14 +6,14 @@ example, to stage the CLI, responses proxy, and SDK packages for version `0.6.0`
|
|||||||
```bash
|
```bash
|
||||||
./scripts/stage_npm_packages.py \
|
./scripts/stage_npm_packages.py \
|
||||||
--release-version 0.6.0 \
|
--release-version 0.6.0 \
|
||||||
--package llmx \
|
--package codex \
|
||||||
--package llmx-responses-api-proxy \
|
--package codex-responses-api-proxy \
|
||||||
--package llmx-sdk
|
--package codex-sdk
|
||||||
```
|
```
|
||||||
|
|
||||||
This downloads the native artifacts once, hydrates `vendor/` for each package, and writes
|
This downloads the native artifacts once, hydrates `vendor/` for each package, and writes
|
||||||
tarballs to `dist/npm/`.
|
tarballs to `dist/npm/`.
|
||||||
|
|
||||||
If you need to invoke `build_npm_package.py` directly, run
|
If you need to invoke `build_npm_package.py` directly, run
|
||||||
`llmx-cli/scripts/install_native_deps.py` first and pass `--vendor-src` pointing to the
|
`codex-cli/scripts/install_native_deps.py` first and pass `--vendor-src` pointing to the
|
||||||
directory that contains the populated `vendor/` tree.
|
directory that contains the populated `vendor/` tree.
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
"""Stage and optionally package the @valknar/llmx npm module."""
|
"""Stage and optionally package the @openai/codex npm module."""
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import json
|
import json
|
||||||
@@ -12,28 +12,28 @@ from pathlib import Path
|
|||||||
SCRIPT_DIR = Path(__file__).resolve().parent
|
SCRIPT_DIR = Path(__file__).resolve().parent
|
||||||
CODEX_CLI_ROOT = SCRIPT_DIR.parent
|
CODEX_CLI_ROOT = SCRIPT_DIR.parent
|
||||||
REPO_ROOT = CODEX_CLI_ROOT.parent
|
REPO_ROOT = CODEX_CLI_ROOT.parent
|
||||||
RESPONSES_API_PROXY_NPM_ROOT = REPO_ROOT / "llmx-rs" / "responses-api-proxy" / "npm"
|
RESPONSES_API_PROXY_NPM_ROOT = REPO_ROOT / "codex-rs" / "responses-api-proxy" / "npm"
|
||||||
CODEX_SDK_ROOT = REPO_ROOT / "sdk" / "typescript"
|
CODEX_SDK_ROOT = REPO_ROOT / "sdk" / "typescript"
|
||||||
|
|
||||||
PACKAGE_NATIVE_COMPONENTS: dict[str, list[str]] = {
|
PACKAGE_NATIVE_COMPONENTS: dict[str, list[str]] = {
|
||||||
"llmx": ["llmx", "rg"],
|
"codex": ["codex", "rg"],
|
||||||
"llmx-responses-api-proxy": ["llmx-responses-api-proxy"],
|
"codex-responses-api-proxy": ["codex-responses-api-proxy"],
|
||||||
"llmx-sdk": ["llmx"],
|
"codex-sdk": ["codex"],
|
||||||
}
|
}
|
||||||
COMPONENT_DEST_DIR: dict[str, str] = {
|
COMPONENT_DEST_DIR: dict[str, str] = {
|
||||||
"llmx": "llmx",
|
"codex": "codex",
|
||||||
"llmx-responses-api-proxy": "llmx-responses-api-proxy",
|
"codex-responses-api-proxy": "codex-responses-api-proxy",
|
||||||
"rg": "path",
|
"rg": "path",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def parse_args() -> argparse.Namespace:
|
def parse_args() -> argparse.Namespace:
|
||||||
parser = argparse.ArgumentParser(description="Build or stage the LLMX CLI npm package.")
|
parser = argparse.ArgumentParser(description="Build or stage the Codex CLI npm package.")
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--package",
|
"--package",
|
||||||
choices=("llmx", "llmx-responses-api-proxy", "llmx-sdk"),
|
choices=("codex", "codex-responses-api-proxy", "codex-sdk"),
|
||||||
default="llmx",
|
default="codex",
|
||||||
help="Which npm package to stage (default: llmx).",
|
help="Which npm package to stage (default: codex).",
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--version",
|
"--version",
|
||||||
@@ -107,18 +107,18 @@ def main() -> int:
|
|||||||
|
|
||||||
if release_version:
|
if release_version:
|
||||||
staging_dir_str = str(staging_dir)
|
staging_dir_str = str(staging_dir)
|
||||||
if package == "llmx":
|
if package == "codex":
|
||||||
print(
|
print(
|
||||||
f"Staged version {version} for release in {staging_dir_str}\n\n"
|
f"Staged version {version} for release in {staging_dir_str}\n\n"
|
||||||
"Verify the CLI:\n"
|
"Verify the CLI:\n"
|
||||||
f" node {staging_dir_str}/bin/llmx.js --version\n"
|
f" node {staging_dir_str}/bin/codex.js --version\n"
|
||||||
f" node {staging_dir_str}/bin/llmx.js --help\n\n"
|
f" node {staging_dir_str}/bin/codex.js --help\n\n"
|
||||||
)
|
)
|
||||||
elif package == "llmx-responses-api-proxy":
|
elif package == "codex-responses-api-proxy":
|
||||||
print(
|
print(
|
||||||
f"Staged version {version} for release in {staging_dir_str}\n\n"
|
f"Staged version {version} for release in {staging_dir_str}\n\n"
|
||||||
"Verify the responses API proxy:\n"
|
"Verify the responses API proxy:\n"
|
||||||
f" node {staging_dir_str}/bin/llmx-responses-api-proxy.js --help\n\n"
|
f" node {staging_dir_str}/bin/codex-responses-api-proxy.js --help\n\n"
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
print(
|
print(
|
||||||
@@ -155,10 +155,10 @@ def prepare_staging_dir(staging_dir: Path | None) -> tuple[Path, bool]:
|
|||||||
|
|
||||||
|
|
||||||
def stage_sources(staging_dir: Path, version: str, package: str) -> None:
|
def stage_sources(staging_dir: Path, version: str, package: str) -> None:
|
||||||
if package == "llmx":
|
if package == "codex":
|
||||||
bin_dir = staging_dir / "bin"
|
bin_dir = staging_dir / "bin"
|
||||||
bin_dir.mkdir(parents=True, exist_ok=True)
|
bin_dir.mkdir(parents=True, exist_ok=True)
|
||||||
shutil.copy2(CODEX_CLI_ROOT / "bin" / "llmx.js", bin_dir / "llmx.js")
|
shutil.copy2(CODEX_CLI_ROOT / "bin" / "codex.js", bin_dir / "codex.js")
|
||||||
rg_manifest = CODEX_CLI_ROOT / "bin" / "rg"
|
rg_manifest = CODEX_CLI_ROOT / "bin" / "rg"
|
||||||
if rg_manifest.exists():
|
if rg_manifest.exists():
|
||||||
shutil.copy2(rg_manifest, bin_dir / "rg")
|
shutil.copy2(rg_manifest, bin_dir / "rg")
|
||||||
@@ -168,18 +168,18 @@ def stage_sources(staging_dir: Path, version: str, package: str) -> None:
|
|||||||
shutil.copy2(readme_src, staging_dir / "README.md")
|
shutil.copy2(readme_src, staging_dir / "README.md")
|
||||||
|
|
||||||
package_json_path = CODEX_CLI_ROOT / "package.json"
|
package_json_path = CODEX_CLI_ROOT / "package.json"
|
||||||
elif package == "llmx-responses-api-proxy":
|
elif package == "codex-responses-api-proxy":
|
||||||
bin_dir = staging_dir / "bin"
|
bin_dir = staging_dir / "bin"
|
||||||
bin_dir.mkdir(parents=True, exist_ok=True)
|
bin_dir.mkdir(parents=True, exist_ok=True)
|
||||||
launcher_src = RESPONSES_API_PROXY_NPM_ROOT / "bin" / "llmx-responses-api-proxy.js"
|
launcher_src = RESPONSES_API_PROXY_NPM_ROOT / "bin" / "codex-responses-api-proxy.js"
|
||||||
shutil.copy2(launcher_src, bin_dir / "llmx-responses-api-proxy.js")
|
shutil.copy2(launcher_src, bin_dir / "codex-responses-api-proxy.js")
|
||||||
|
|
||||||
readme_src = RESPONSES_API_PROXY_NPM_ROOT / "README.md"
|
readme_src = RESPONSES_API_PROXY_NPM_ROOT / "README.md"
|
||||||
if readme_src.exists():
|
if readme_src.exists():
|
||||||
shutil.copy2(readme_src, staging_dir / "README.md")
|
shutil.copy2(readme_src, staging_dir / "README.md")
|
||||||
|
|
||||||
package_json_path = RESPONSES_API_PROXY_NPM_ROOT / "package.json"
|
package_json_path = RESPONSES_API_PROXY_NPM_ROOT / "package.json"
|
||||||
elif package == "llmx-sdk":
|
elif package == "codex-sdk":
|
||||||
package_json_path = CODEX_SDK_ROOT / "package.json"
|
package_json_path = CODEX_SDK_ROOT / "package.json"
|
||||||
stage_codex_sdk_sources(staging_dir)
|
stage_codex_sdk_sources(staging_dir)
|
||||||
else:
|
else:
|
||||||
@@ -189,7 +189,7 @@ def stage_sources(staging_dir: Path, version: str, package: str) -> None:
|
|||||||
package_json = json.load(fh)
|
package_json = json.load(fh)
|
||||||
package_json["version"] = version
|
package_json["version"] = version
|
||||||
|
|
||||||
if package == "llmx-sdk":
|
if package == "codex-sdk":
|
||||||
scripts = package_json.get("scripts")
|
scripts = package_json.get("scripts")
|
||||||
if isinstance(scripts, dict):
|
if isinstance(scripts, dict):
|
||||||
scripts.pop("prepare", None)
|
scripts.pop("prepare", None)
|
||||||
@@ -260,10 +260,9 @@ def copy_native_binaries(vendor_src: Path, staging_dir: Path, components: list[s
|
|||||||
|
|
||||||
src_component_dir = target_dir / dest_dir_name
|
src_component_dir = target_dir / dest_dir_name
|
||||||
if not src_component_dir.exists():
|
if not src_component_dir.exists():
|
||||||
print(
|
raise RuntimeError(
|
||||||
f"⚠️ Skipping {target_dir.name}/{dest_dir_name}: component not found (build may have failed)"
|
f"Missing native component '{component}' in vendor source: {src_component_dir}"
|
||||||
)
|
)
|
||||||
continue
|
|
||||||
|
|
||||||
dest_component_dir = dest_target_dir / dest_dir_name
|
dest_component_dir = dest_target_dir / dest_dir_name
|
||||||
if dest_component_dir.exists():
|
if dest_component_dir.exists():
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
"""Install LLMX native binaries (Rust CLI plus ripgrep helpers)."""
|
"""Install Codex native binaries (Rust CLI plus ripgrep helpers)."""
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import json
|
import json
|
||||||
@@ -17,10 +17,10 @@ from urllib.parse import urlparse
|
|||||||
from urllib.request import urlopen
|
from urllib.request import urlopen
|
||||||
|
|
||||||
SCRIPT_DIR = Path(__file__).resolve().parent
|
SCRIPT_DIR = Path(__file__).resolve().parent
|
||||||
LLMX_CLI_ROOT = SCRIPT_DIR.parent
|
CODEX_CLI_ROOT = SCRIPT_DIR.parent
|
||||||
DEFAULT_WORKFLOW_URL = "https://github.com/valknar/llmx/actions/runs/17952349351" # rust-v0.40.0
|
DEFAULT_WORKFLOW_URL = "https://github.com/openai/codex/actions/runs/17952349351" # rust-v0.40.0
|
||||||
VENDOR_DIR_NAME = "vendor"
|
VENDOR_DIR_NAME = "vendor"
|
||||||
RG_MANIFEST = LLMX_CLI_ROOT / "bin" / "rg"
|
RG_MANIFEST = CODEX_CLI_ROOT / "bin" / "rg"
|
||||||
BINARY_TARGETS = (
|
BINARY_TARGETS = (
|
||||||
"x86_64-unknown-linux-musl",
|
"x86_64-unknown-linux-musl",
|
||||||
"aarch64-unknown-linux-musl",
|
"aarch64-unknown-linux-musl",
|
||||||
@@ -39,15 +39,15 @@ class BinaryComponent:
|
|||||||
|
|
||||||
|
|
||||||
BINARY_COMPONENTS = {
|
BINARY_COMPONENTS = {
|
||||||
"llmx": BinaryComponent(
|
"codex": BinaryComponent(
|
||||||
artifact_prefix="llmx",
|
artifact_prefix="codex",
|
||||||
dest_dir="llmx",
|
dest_dir="codex",
|
||||||
binary_basename="llmx",
|
binary_basename="codex",
|
||||||
),
|
),
|
||||||
"llmx-responses-api-proxy": BinaryComponent(
|
"codex-responses-api-proxy": BinaryComponent(
|
||||||
artifact_prefix="llmx-responses-api-proxy",
|
artifact_prefix="codex-responses-api-proxy",
|
||||||
dest_dir="llmx-responses-api-proxy",
|
dest_dir="codex-responses-api-proxy",
|
||||||
binary_basename="llmx-responses-api-proxy",
|
binary_basename="codex-responses-api-proxy",
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -64,7 +64,7 @@ DEFAULT_RG_TARGETS = [target for target, _ in RG_TARGET_PLATFORM_PAIRS]
|
|||||||
|
|
||||||
|
|
||||||
def parse_args() -> argparse.Namespace:
|
def parse_args() -> argparse.Namespace:
|
||||||
parser = argparse.ArgumentParser(description="Install native LLMX binaries.")
|
parser = argparse.ArgumentParser(description="Install native Codex binaries.")
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--workflow-url",
|
"--workflow-url",
|
||||||
help=(
|
help=(
|
||||||
@@ -97,11 +97,11 @@ def parse_args() -> argparse.Namespace:
|
|||||||
def main() -> int:
|
def main() -> int:
|
||||||
args = parse_args()
|
args = parse_args()
|
||||||
|
|
||||||
codex_cli_root = (args.root or LLMX_CLI_ROOT).resolve()
|
codex_cli_root = (args.root or CODEX_CLI_ROOT).resolve()
|
||||||
vendor_dir = codex_cli_root / VENDOR_DIR_NAME
|
vendor_dir = codex_cli_root / VENDOR_DIR_NAME
|
||||||
vendor_dir.mkdir(parents=True, exist_ok=True)
|
vendor_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
components = args.components or ["llmx", "rg"]
|
components = args.components or ["codex", "rg"]
|
||||||
|
|
||||||
workflow_url = (args.workflow_url or DEFAULT_WORKFLOW_URL).strip()
|
workflow_url = (args.workflow_url or DEFAULT_WORKFLOW_URL).strip()
|
||||||
if not workflow_url:
|
if not workflow_url:
|
||||||
@@ -110,7 +110,7 @@ def main() -> int:
|
|||||||
workflow_id = workflow_url.rstrip("/").split("/")[-1]
|
workflow_id = workflow_url.rstrip("/").split("/")[-1]
|
||||||
print(f"Downloading native artifacts from workflow {workflow_id}...")
|
print(f"Downloading native artifacts from workflow {workflow_id}...")
|
||||||
|
|
||||||
with tempfile.TemporaryDirectory(prefix="llmx-native-artifacts-") as artifacts_dir_str:
|
with tempfile.TemporaryDirectory(prefix="codex-native-artifacts-") as artifacts_dir_str:
|
||||||
artifacts_dir = Path(artifacts_dir_str)
|
artifacts_dir = Path(artifacts_dir_str)
|
||||||
_download_artifacts(workflow_id, artifacts_dir)
|
_download_artifacts(workflow_id, artifacts_dir)
|
||||||
install_binary_components(
|
install_binary_components(
|
||||||
@@ -197,7 +197,7 @@ def _download_artifacts(workflow_id: str, dest_dir: Path) -> None:
|
|||||||
"--dir",
|
"--dir",
|
||||||
str(dest_dir),
|
str(dest_dir),
|
||||||
"--repo",
|
"--repo",
|
||||||
"valknarthing/llmx",
|
"openai/codex",
|
||||||
workflow_id,
|
workflow_id,
|
||||||
]
|
]
|
||||||
subprocess.check_call(cmd)
|
subprocess.check_call(cmd)
|
||||||
@@ -236,8 +236,7 @@ def install_binary_components(
|
|||||||
}
|
}
|
||||||
for future in as_completed(futures):
|
for future in as_completed(futures):
|
||||||
installed_path = future.result()
|
installed_path = future.result()
|
||||||
if installed_path is not None:
|
print(f" installed {installed_path}")
|
||||||
print(f" installed {installed_path}")
|
|
||||||
|
|
||||||
|
|
||||||
def _install_single_binary(
|
def _install_single_binary(
|
||||||
@@ -245,13 +244,12 @@ def _install_single_binary(
|
|||||||
vendor_dir: Path,
|
vendor_dir: Path,
|
||||||
target: str,
|
target: str,
|
||||||
component: BinaryComponent,
|
component: BinaryComponent,
|
||||||
) -> Path | None:
|
) -> Path:
|
||||||
artifact_subdir = artifacts_dir / target
|
artifact_subdir = artifacts_dir / target
|
||||||
archive_name = _archive_name_for_target(component.artifact_prefix, target)
|
archive_name = _archive_name_for_target(component.artifact_prefix, target)
|
||||||
archive_path = artifact_subdir / archive_name
|
archive_path = artifact_subdir / archive_name
|
||||||
if not archive_path.exists():
|
if not archive_path.exists():
|
||||||
print(f" ⚠️ Skipping {target}: artifact not found (build may have failed)")
|
raise FileNotFoundError(f"Expected artifact not found: {archive_path}")
|
||||||
return None
|
|
||||||
|
|
||||||
dest_dir = vendor_dir / target / component.dest_dir
|
dest_dir = vendor_dir / target / component.dest_dir
|
||||||
dest_dir.mkdir(parents=True, exist_ok=True)
|
dest_dir.mkdir(parents=True, exist_ok=True)
|
||||||
2893
llmx-rs/Cargo.lock → codex-rs/Cargo.lock
generated
2893
llmx-rs/Cargo.lock → codex-rs/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -8,7 +8,7 @@ members = [
|
|||||||
"apply-patch",
|
"apply-patch",
|
||||||
"arg0",
|
"arg0",
|
||||||
"feedback",
|
"feedback",
|
||||||
"llmx-backend-openapi-models",
|
"codex-backend-openapi-models",
|
||||||
"cloud-tasks",
|
"cloud-tasks",
|
||||||
"cloud-tasks-client",
|
"cloud-tasks-client",
|
||||||
"cli",
|
"cli",
|
||||||
@@ -19,7 +19,6 @@ members = [
|
|||||||
"keyring-store",
|
"keyring-store",
|
||||||
"file-search",
|
"file-search",
|
||||||
"linux-sandbox",
|
"linux-sandbox",
|
||||||
"windows-sandbox-rs",
|
|
||||||
"login",
|
"login",
|
||||||
"mcp-server",
|
"mcp-server",
|
||||||
"mcp-types",
|
"mcp-types",
|
||||||
@@ -43,7 +42,7 @@ members = [
|
|||||||
resolver = "2"
|
resolver = "2"
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
version = "0.1.9"
|
version = "0.0.0"
|
||||||
# Track the edition for all workspace crates in one place. Individual
|
# Track the edition for all workspace crates in one place. Individual
|
||||||
# crates can still override this value, but keeping it here means new
|
# crates can still override this value, but keeping it here means new
|
||||||
# crates created with `cargo new -w ...` automatically inherit the 2024
|
# crates created with `cargo new -w ...` automatically inherit the 2024
|
||||||
@@ -53,40 +52,40 @@ edition = "2024"
|
|||||||
[workspace.dependencies]
|
[workspace.dependencies]
|
||||||
# Internal
|
# Internal
|
||||||
app_test_support = { path = "app-server/tests/common" }
|
app_test_support = { path = "app-server/tests/common" }
|
||||||
llmx-ansi-escape = { path = "ansi-escape" }
|
codex-ansi-escape = { path = "ansi-escape" }
|
||||||
llmx-app-server = { path = "app-server" }
|
codex-app-server = { path = "app-server" }
|
||||||
llmx-app-server-protocol = { path = "app-server-protocol" }
|
codex-app-server-protocol = { path = "app-server-protocol" }
|
||||||
llmx-apply-patch = { path = "apply-patch" }
|
codex-apply-patch = { path = "apply-patch" }
|
||||||
llmx-arg0 = { path = "arg0" }
|
codex-arg0 = { path = "arg0" }
|
||||||
llmx-async-utils = { path = "async-utils" }
|
codex-async-utils = { path = "async-utils" }
|
||||||
llmx-backend-client = { path = "backend-client" }
|
codex-backend-client = { path = "backend-client" }
|
||||||
llmx-chatgpt = { path = "chatgpt" }
|
codex-chatgpt = { path = "chatgpt" }
|
||||||
llmx-common = { path = "common" }
|
codex-common = { path = "common" }
|
||||||
llmx-core = { path = "core" }
|
codex-core = { path = "core" }
|
||||||
llmx-exec = { path = "exec" }
|
codex-exec = { path = "exec" }
|
||||||
llmx-feedback = { path = "feedback" }
|
codex-feedback = { path = "feedback" }
|
||||||
llmx-file-search = { path = "file-search" }
|
codex-file-search = { path = "file-search" }
|
||||||
llmx-git = { path = "utils/git" }
|
codex-git = { path = "utils/git" }
|
||||||
llmx-keyring-store = { path = "keyring-store" }
|
codex-keyring-store = { path = "keyring-store" }
|
||||||
llmx-linux-sandbox = { path = "linux-sandbox" }
|
codex-linux-sandbox = { path = "linux-sandbox" }
|
||||||
llmx-login = { path = "login" }
|
codex-login = { path = "login" }
|
||||||
llmx-mcp-server = { path = "mcp-server" }
|
codex-mcp-server = { path = "mcp-server" }
|
||||||
llmx-ollama = { path = "ollama" }
|
codex-ollama = { path = "ollama" }
|
||||||
llmx-otel = { path = "otel" }
|
codex-otel = { path = "otel" }
|
||||||
llmx-process-hardening = { path = "process-hardening" }
|
codex-process-hardening = { path = "process-hardening" }
|
||||||
llmx-protocol = { path = "protocol" }
|
codex-protocol = { path = "protocol" }
|
||||||
llmx-responses-api-proxy = { path = "responses-api-proxy" }
|
codex-responses-api-proxy = { path = "responses-api-proxy" }
|
||||||
llmx-rmcp-client = { path = "rmcp-client" }
|
codex-rmcp-client = { path = "rmcp-client" }
|
||||||
llmx-stdio-to-uds = { path = "stdio-to-uds" }
|
codex-stdio-to-uds = { path = "stdio-to-uds" }
|
||||||
llmx-tui = { path = "tui" }
|
codex-tui = { path = "tui" }
|
||||||
llmx-utils-cache = { path = "utils/cache" }
|
codex-utils-cache = { path = "utils/cache" }
|
||||||
llmx-utils-image = { path = "utils/image" }
|
codex-utils-image = { path = "utils/image" }
|
||||||
llmx-utils-json-to-toml = { path = "utils/json-to-toml" }
|
codex-utils-json-to-toml = { path = "utils/json-to-toml" }
|
||||||
llmx-utils-pty = { path = "utils/pty" }
|
codex-utils-pty = { path = "utils/pty" }
|
||||||
llmx-utils-readiness = { path = "utils/readiness" }
|
codex-utils-readiness = { path = "utils/readiness" }
|
||||||
llmx-utils-string = { path = "utils/string" }
|
codex-utils-string = { path = "utils/string" }
|
||||||
llmx-utils-tokenizer = { path = "utils/tokenizer" }
|
codex-utils-tokenizer = { path = "utils/tokenizer" }
|
||||||
llmx-windows-sandbox = { path = "windows-sandbox-rs" }
|
codex-windows-sandbox = { path = "windows-sandbox-rs" }
|
||||||
core_test_support = { path = "core/tests/common" }
|
core_test_support = { path = "core/tests/common" }
|
||||||
mcp-types = { path = "mcp-types" }
|
mcp-types = { path = "mcp-types" }
|
||||||
mcp_test_support = { path = "mcp-server/tests/common" }
|
mcp_test_support = { path = "mcp-server/tests/common" }
|
||||||
@@ -258,8 +257,8 @@ unwrap_used = "deny"
|
|||||||
ignored = [
|
ignored = [
|
||||||
"icu_provider",
|
"icu_provider",
|
||||||
"openssl-sys",
|
"openssl-sys",
|
||||||
"llmx-utils-readiness",
|
"codex-utils-readiness",
|
||||||
"llmx-utils-tokenizer",
|
"codex-utils-tokenizer",
|
||||||
]
|
]
|
||||||
|
|
||||||
[profile.release]
|
[profile.release]
|
||||||
@@ -268,7 +267,7 @@ lto = "fat"
|
|||||||
# remove everything to make the binary as small as possible.
|
# remove everything to make the binary as small as possible.
|
||||||
strip = "symbols"
|
strip = "symbols"
|
||||||
|
|
||||||
# See https://github.com/openai/llmx/issues/1411 for details.
|
# See https://github.com/openai/codex/issues/1411 for details.
|
||||||
codegen-units = 1
|
codegen-units = 1
|
||||||
|
|
||||||
[profile.ci-test]
|
[profile.ci-test]
|
||||||
98
codex-rs/README.md
Normal file
98
codex-rs/README.md
Normal file
@@ -0,0 +1,98 @@
|
|||||||
|
# Codex CLI (Rust Implementation)
|
||||||
|
|
||||||
|
We provide Codex CLI as a standalone, native executable to ensure a zero-dependency install.
|
||||||
|
|
||||||
|
## Installing Codex
|
||||||
|
|
||||||
|
Today, the easiest way to install Codex is via `npm`:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
npm i -g @openai/codex
|
||||||
|
codex
|
||||||
|
```
|
||||||
|
|
||||||
|
You can also install via Homebrew (`brew install --cask codex`) or download a platform-specific release directly from our [GitHub Releases](https://github.com/openai/codex/releases).
|
||||||
|
|
||||||
|
## Documentation quickstart
|
||||||
|
|
||||||
|
- First run with Codex? Follow the walkthrough in [`docs/getting-started.md`](../docs/getting-started.md) for prompts, keyboard shortcuts, and session management.
|
||||||
|
- Already shipping with Codex and want deeper control? Jump to [`docs/advanced.md`](../docs/advanced.md) and the configuration reference at [`docs/config.md`](../docs/config.md).
|
||||||
|
|
||||||
|
## What's new in the Rust CLI
|
||||||
|
|
||||||
|
The Rust implementation is now the maintained Codex CLI and serves as the default experience. It includes a number of features that the legacy TypeScript CLI never supported.
|
||||||
|
|
||||||
|
### Config
|
||||||
|
|
||||||
|
Codex supports a rich set of configuration options. Note that the Rust CLI uses `config.toml` instead of `config.json`. See [`docs/config.md`](../docs/config.md) for details.
|
||||||
|
|
||||||
|
### Model Context Protocol Support
|
||||||
|
|
||||||
|
#### MCP client
|
||||||
|
|
||||||
|
Codex CLI functions as an MCP client that allows the Codex CLI and IDE extension to connect to MCP servers on startup. See the [`configuration documentation`](../docs/config.md#mcp_servers) for details.
|
||||||
|
|
||||||
|
#### MCP server (experimental)
|
||||||
|
|
||||||
|
Codex can be launched as an MCP _server_ by running `codex mcp-server`. This allows _other_ MCP clients to use Codex as a tool for another agent.
|
||||||
|
|
||||||
|
Use the [`@modelcontextprotocol/inspector`](https://github.com/modelcontextprotocol/inspector) to try it out:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
npx @modelcontextprotocol/inspector codex mcp-server
|
||||||
|
```
|
||||||
|
|
||||||
|
Use `codex mcp` to add/list/get/remove MCP server launchers defined in `config.toml`, and `codex mcp-server` to run the MCP server directly.
|
||||||
|
|
||||||
|
### Notifications
|
||||||
|
|
||||||
|
You can enable notifications by configuring a script that is run whenever the agent finishes a turn. The [notify documentation](../docs/config.md#notify) includes a detailed example that explains how to get desktop notifications via [terminal-notifier](https://github.com/julienXX/terminal-notifier) on macOS.
|
||||||
|
|
||||||
|
### `codex exec` to run Codex programmatically/non-interactively
|
||||||
|
|
||||||
|
To run Codex non-interactively, run `codex exec PROMPT` (you can also pass the prompt via `stdin`) and Codex will work on your task until it decides that it is done and exits. Output is printed to the terminal directly. You can set the `RUST_LOG` environment variable to see more about what's going on.
|
||||||
|
|
||||||
|
### Experimenting with the Codex Sandbox
|
||||||
|
|
||||||
|
To test to see what happens when a command is run under the sandbox provided by Codex, we provide the following subcommands in Codex CLI:
|
||||||
|
|
||||||
|
```
|
||||||
|
# macOS
|
||||||
|
codex sandbox macos [--full-auto] [--log-denials] [COMMAND]...
|
||||||
|
|
||||||
|
# Linux
|
||||||
|
codex sandbox linux [--full-auto] [COMMAND]...
|
||||||
|
|
||||||
|
# Windows
|
||||||
|
codex sandbox windows [--full-auto] [COMMAND]...
|
||||||
|
|
||||||
|
# Legacy aliases
|
||||||
|
codex debug seatbelt [--full-auto] [--log-denials] [COMMAND]...
|
||||||
|
codex debug landlock [--full-auto] [COMMAND]...
|
||||||
|
```
|
||||||
|
|
||||||
|
### Selecting a sandbox policy via `--sandbox`
|
||||||
|
|
||||||
|
The Rust CLI exposes a dedicated `--sandbox` (`-s`) flag that lets you pick the sandbox policy **without** having to reach for the generic `-c/--config` option:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
# Run Codex with the default, read-only sandbox
|
||||||
|
codex --sandbox read-only
|
||||||
|
|
||||||
|
# Allow the agent to write within the current workspace while still blocking network access
|
||||||
|
codex --sandbox workspace-write
|
||||||
|
|
||||||
|
# Danger! Disable sandboxing entirely (only do this if you are already running in a container or other isolated env)
|
||||||
|
codex --sandbox danger-full-access
|
||||||
|
```
|
||||||
|
|
||||||
|
The same setting can be persisted in `~/.codex/config.toml` via the top-level `sandbox_mode = "MODE"` key, e.g. `sandbox_mode = "workspace-write"`.
|
||||||
|
|
||||||
|
## Code Organization
|
||||||
|
|
||||||
|
This folder is the root of a Cargo workspace. It contains quite a bit of experimental code, but here are the key crates:
|
||||||
|
|
||||||
|
- [`core/`](./core) contains the business logic for Codex. Ultimately, we hope this to be a library crate that is generally useful for building other Rust/native applications that use Codex.
|
||||||
|
- [`exec/`](./exec) "headless" CLI for use in automation.
|
||||||
|
- [`tui/`](./tui) CLI that launches a fullscreen TUI built with [Ratatui](https://ratatui.rs/).
|
||||||
|
- [`cli/`](./cli) CLI multitool that provides the aforementioned CLIs via subcommands.
|
||||||
@@ -1,10 +1,10 @@
|
|||||||
[package]
|
[package]
|
||||||
edition = "2024"
|
edition = "2024"
|
||||||
name = "llmx-ansi-escape"
|
name = "codex-ansi-escape"
|
||||||
version = { workspace = true }
|
version = { workspace = true }
|
||||||
|
|
||||||
[lib]
|
[lib]
|
||||||
name = "llmx_ansi_escape"
|
name = "codex_ansi_escape"
|
||||||
path = "src/lib.rs"
|
path = "src/lib.rs"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
# oai-llmx-ansi-escape
|
# oai-codex-ansi-escape
|
||||||
|
|
||||||
Small helper functions that wrap functionality from
|
Small helper functions that wrap functionality from
|
||||||
<https://crates.io/crates/ansi-to-tui>:
|
<https://crates.io/crates/ansi-to-tui>:
|
||||||
@@ -1,10 +1,10 @@
|
|||||||
[package]
|
[package]
|
||||||
edition = "2024"
|
edition = "2024"
|
||||||
name = "llmx-app-server-protocol"
|
name = "codex-app-server-protocol"
|
||||||
version = { workspace = true }
|
version = { workspace = true }
|
||||||
|
|
||||||
[lib]
|
[lib]
|
||||||
name = "llmx_app_server_protocol"
|
name = "codex_app_server_protocol"
|
||||||
path = "src/lib.rs"
|
path = "src/lib.rs"
|
||||||
|
|
||||||
[lints]
|
[lints]
|
||||||
@@ -13,7 +13,7 @@ workspace = true
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow = { workspace = true }
|
anyhow = { workspace = true }
|
||||||
clap = { workspace = true, features = ["derive"] }
|
clap = { workspace = true, features = ["derive"] }
|
||||||
llmx-protocol = { workspace = true }
|
codex-protocol = { workspace = true }
|
||||||
mcp-types = { workspace = true }
|
mcp-types = { workspace = true }
|
||||||
paste = { workspace = true }
|
paste = { workspace = true }
|
||||||
schemars = { workspace = true }
|
schemars = { workspace = true }
|
||||||
@@ -3,7 +3,9 @@ use clap::Parser;
|
|||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
|
||||||
#[derive(Parser, Debug)]
|
#[derive(Parser, Debug)]
|
||||||
#[command(about = "Generate TypeScript bindings and JSON Schemas for the LLMX app-server protocol")]
|
#[command(
|
||||||
|
about = "Generate TypeScript bindings and JSON Schemas for the Codex app-server protocol"
|
||||||
|
)]
|
||||||
struct Args {
|
struct Args {
|
||||||
/// Output directory where generated files will be written
|
/// Output directory where generated files will be written
|
||||||
#[arg(short = 'o', long = "out", value_name = "DIR")]
|
#[arg(short = 'o', long = "out", value_name = "DIR")]
|
||||||
@@ -16,5 +18,5 @@ struct Args {
|
|||||||
|
|
||||||
fn main() -> Result<()> {
|
fn main() -> Result<()> {
|
||||||
let args = Args::parse();
|
let args = Args::parse();
|
||||||
llmx_app_server_protocol::generate_types(&args.out_dir, args.prettier.as_deref())
|
codex_app_server_protocol::generate_types(&args.out_dir, args.prettier.as_deref())
|
||||||
}
|
}
|
||||||
@@ -13,10 +13,10 @@ use crate::export_server_responses;
|
|||||||
use anyhow::Context;
|
use anyhow::Context;
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use anyhow::anyhow;
|
use anyhow::anyhow;
|
||||||
use llmx_protocol::parse_command::ParsedCommand;
|
use codex_protocol::parse_command::ParsedCommand;
|
||||||
use llmx_protocol::protocol::EventMsg;
|
use codex_protocol::protocol::EventMsg;
|
||||||
use llmx_protocol::protocol::FileChange;
|
use codex_protocol::protocol::FileChange;
|
||||||
use llmx_protocol::protocol::SandboxPolicy;
|
use codex_protocol::protocol::SandboxPolicy;
|
||||||
use schemars::JsonSchema;
|
use schemars::JsonSchema;
|
||||||
use schemars::schema_for;
|
use schemars::schema_for;
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
@@ -138,7 +138,7 @@ pub fn generate_json(out_dir: &Path) -> Result<()> {
|
|||||||
|
|
||||||
let bundle = build_schema_bundle(schemas)?;
|
let bundle = build_schema_bundle(schemas)?;
|
||||||
write_pretty_json(
|
write_pretty_json(
|
||||||
out_dir.join("llmx_app_server_protocol.schemas.json"),
|
out_dir.join("codex_app_server_protocol.schemas.json"),
|
||||||
&bundle,
|
&bundle,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
@@ -223,7 +223,7 @@ fn build_schema_bundle(schemas: Vec<GeneratedSchema>) -> Result<Value> {
|
|||||||
);
|
);
|
||||||
root.insert(
|
root.insert(
|
||||||
"title".to_string(),
|
"title".to_string(),
|
||||||
Value::String("LlmxAppServerProtocol".into()),
|
Value::String("CodexAppServerProtocol".into()),
|
||||||
);
|
);
|
||||||
root.insert("type".to_string(), Value::String("object".into()));
|
root.insert("type".to_string(), Value::String("object".into()));
|
||||||
root.insert("definitions".to_string(), Value::Object(definitions));
|
root.insert("definitions".to_string(), Value::Object(definitions));
|
||||||
@@ -719,7 +719,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn generated_ts_has_no_optional_nullable_fields() -> Result<()> {
|
fn generated_ts_has_no_optional_nullable_fields() -> Result<()> {
|
||||||
// Assert that there are no types of the form "?: T | null" in the generated TS files.
|
// Assert that there are no types of the form "?: T | null" in the generated TS files.
|
||||||
let output_dir = std::env::temp_dir().join(format!("llmx_ts_types_{}", Uuid::now_v7()));
|
let output_dir = std::env::temp_dir().join(format!("codex_ts_types_{}", Uuid::now_v7()));
|
||||||
fs::create_dir(&output_dir)?;
|
fs::create_dir(&output_dir)?;
|
||||||
|
|
||||||
struct TempDirGuard(PathBuf);
|
struct TempDirGuard(PathBuf);
|
||||||
@@ -9,11 +9,11 @@ use crate::export::GeneratedSchema;
|
|||||||
use crate::export::write_json_schema;
|
use crate::export::write_json_schema;
|
||||||
use crate::protocol::v1;
|
use crate::protocol::v1;
|
||||||
use crate::protocol::v2;
|
use crate::protocol::v2;
|
||||||
use llmx_protocol::ConversationId;
|
use codex_protocol::ConversationId;
|
||||||
use llmx_protocol::parse_command::ParsedCommand;
|
use codex_protocol::parse_command::ParsedCommand;
|
||||||
use llmx_protocol::protocol::FileChange;
|
use codex_protocol::protocol::FileChange;
|
||||||
use llmx_protocol::protocol::ReviewDecision;
|
use codex_protocol::protocol::ReviewDecision;
|
||||||
use llmx_protocol::protocol::SandboxCommandAssessment;
|
use codex_protocol::protocol::SandboxCommandAssessment;
|
||||||
use paste::paste;
|
use paste::paste;
|
||||||
use schemars::JsonSchema;
|
use schemars::JsonSchema;
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
@@ -182,12 +182,12 @@ client_request_definitions! {
|
|||||||
params: v1::GetConversationSummaryParams,
|
params: v1::GetConversationSummaryParams,
|
||||||
response: v1::GetConversationSummaryResponse,
|
response: v1::GetConversationSummaryResponse,
|
||||||
},
|
},
|
||||||
/// List recorded Llmx conversations (rollouts) with optional pagination and search.
|
/// List recorded Codex conversations (rollouts) with optional pagination and search.
|
||||||
ListConversations {
|
ListConversations {
|
||||||
params: v1::ListConversationsParams,
|
params: v1::ListConversationsParams,
|
||||||
response: v1::ListConversationsResponse,
|
response: v1::ListConversationsResponse,
|
||||||
},
|
},
|
||||||
/// Resume a recorded Llmx conversation from a rollout file.
|
/// Resume a recorded Codex conversation from a rollout file.
|
||||||
ResumeConversation {
|
ResumeConversation {
|
||||||
params: v1::ResumeConversationParams,
|
params: v1::ResumeConversationParams,
|
||||||
response: v1::ResumeConversationResponse,
|
response: v1::ResumeConversationResponse,
|
||||||
@@ -436,8 +436,8 @@ server_request_definitions! {
|
|||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub struct ApplyPatchApprovalParams {
|
pub struct ApplyPatchApprovalParams {
|
||||||
pub conversation_id: ConversationId,
|
pub conversation_id: ConversationId,
|
||||||
/// Use to correlate this with [llmx_core::protocol::PatchApplyBeginEvent]
|
/// Use to correlate this with [codex_core::protocol::PatchApplyBeginEvent]
|
||||||
/// and [llmx_core::protocol::PatchApplyEndEvent].
|
/// and [codex_core::protocol::PatchApplyEndEvent].
|
||||||
pub call_id: String,
|
pub call_id: String,
|
||||||
pub file_changes: HashMap<PathBuf, FileChange>,
|
pub file_changes: HashMap<PathBuf, FileChange>,
|
||||||
/// Optional explanatory reason (e.g. request for extra write access).
|
/// Optional explanatory reason (e.g. request for extra write access).
|
||||||
@@ -451,8 +451,8 @@ pub struct ApplyPatchApprovalParams {
|
|||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub struct ExecCommandApprovalParams {
|
pub struct ExecCommandApprovalParams {
|
||||||
pub conversation_id: ConversationId,
|
pub conversation_id: ConversationId,
|
||||||
/// Use to correlate this with [llmx_core::protocol::ExecCommandBeginEvent]
|
/// Use to correlate this with [codex_core::protocol::ExecCommandBeginEvent]
|
||||||
/// and [llmx_core::protocol::ExecCommandEndEvent].
|
/// and [codex_core::protocol::ExecCommandEndEvent].
|
||||||
pub call_id: String,
|
pub call_id: String,
|
||||||
pub command: Vec<String>,
|
pub command: Vec<String>,
|
||||||
pub cwd: PathBuf,
|
pub cwd: PathBuf,
|
||||||
@@ -481,7 +481,7 @@ pub struct FuzzyFileSearchParams {
|
|||||||
pub cancellation_token: Option<String>,
|
pub cancellation_token: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Superset of [`llmx_file_search::FileMatch`]
|
/// Superset of [`codex_file_search::FileMatch`]
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||||
pub struct FuzzyFileSearchResult {
|
pub struct FuzzyFileSearchResult {
|
||||||
pub root: String,
|
pub root: String,
|
||||||
@@ -530,8 +530,8 @@ client_notification_definitions! {
|
|||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use llmx_protocol::account::PlanType;
|
use codex_protocol::account::PlanType;
|
||||||
use llmx_protocol::protocol::AskForApproval;
|
use codex_protocol::protocol::AskForApproval;
|
||||||
use pretty_assertions::assert_eq;
|
use pretty_assertions::assert_eq;
|
||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
|
|
||||||
@@ -540,7 +540,7 @@ mod tests {
|
|||||||
let request = ClientRequest::NewConversation {
|
let request = ClientRequest::NewConversation {
|
||||||
request_id: RequestId::Integer(42),
|
request_id: RequestId::Integer(42),
|
||||||
params: v1::NewConversationParams {
|
params: v1::NewConversationParams {
|
||||||
model: Some("gpt-5-llmx".to_string()),
|
model: Some("gpt-5-codex".to_string()),
|
||||||
model_provider: None,
|
model_provider: None,
|
||||||
profile: None,
|
profile: None,
|
||||||
cwd: None,
|
cwd: None,
|
||||||
@@ -558,7 +558,7 @@ mod tests {
|
|||||||
"method": "newConversation",
|
"method": "newConversation",
|
||||||
"id": 42,
|
"id": 42,
|
||||||
"params": {
|
"params": {
|
||||||
"model": "gpt-5-llmx",
|
"model": "gpt-5-codex",
|
||||||
"modelProvider": null,
|
"modelProvider": null,
|
||||||
"profile": null,
|
"profile": null,
|
||||||
"cwd": null,
|
"cwd": null,
|
||||||
@@ -1,18 +1,18 @@
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
|
||||||
use llmx_protocol::ConversationId;
|
use codex_protocol::ConversationId;
|
||||||
use llmx_protocol::config_types::ForcedLoginMethod;
|
use codex_protocol::config_types::ForcedLoginMethod;
|
||||||
use llmx_protocol::config_types::ReasoningEffort;
|
use codex_protocol::config_types::ReasoningEffort;
|
||||||
use llmx_protocol::config_types::ReasoningSummary;
|
use codex_protocol::config_types::ReasoningSummary;
|
||||||
use llmx_protocol::config_types::SandboxMode;
|
use codex_protocol::config_types::SandboxMode;
|
||||||
use llmx_protocol::config_types::Verbosity;
|
use codex_protocol::config_types::Verbosity;
|
||||||
use llmx_protocol::models::ResponseItem;
|
use codex_protocol::models::ResponseItem;
|
||||||
use llmx_protocol::protocol::AskForApproval;
|
use codex_protocol::protocol::AskForApproval;
|
||||||
use llmx_protocol::protocol::EventMsg;
|
use codex_protocol::protocol::EventMsg;
|
||||||
use llmx_protocol::protocol::SandboxPolicy;
|
use codex_protocol::protocol::SandboxPolicy;
|
||||||
use llmx_protocol::protocol::SessionSource;
|
use codex_protocol::protocol::SessionSource;
|
||||||
use llmx_protocol::protocol::TurnAbortReason;
|
use codex_protocol::protocol::TurnAbortReason;
|
||||||
use schemars::JsonSchema;
|
use schemars::JsonSchema;
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
@@ -2,13 +2,13 @@ use std::collections::HashMap;
|
|||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
|
||||||
use crate::protocol::common::AuthMode;
|
use crate::protocol::common::AuthMode;
|
||||||
use llmx_protocol::ConversationId;
|
use codex_protocol::ConversationId;
|
||||||
use llmx_protocol::account::PlanType;
|
use codex_protocol::account::PlanType;
|
||||||
use llmx_protocol::config_types::ReasoningEffort;
|
use codex_protocol::config_types::ReasoningEffort;
|
||||||
use llmx_protocol::config_types::ReasoningSummary;
|
use codex_protocol::config_types::ReasoningSummary;
|
||||||
use llmx_protocol::protocol::RateLimitSnapshot as CoreRateLimitSnapshot;
|
use codex_protocol::protocol::RateLimitSnapshot as CoreRateLimitSnapshot;
|
||||||
use llmx_protocol::protocol::RateLimitWindow as CoreRateLimitWindow;
|
use codex_protocol::protocol::RateLimitWindow as CoreRateLimitWindow;
|
||||||
use llmx_protocol::user_input::UserInput as CoreUserInput;
|
use codex_protocol::user_input::UserInput as CoreUserInput;
|
||||||
use mcp_types::ContentBlock as McpContentBlock;
|
use mcp_types::ContentBlock as McpContentBlock;
|
||||||
use schemars::JsonSchema;
|
use schemars::JsonSchema;
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
@@ -42,13 +42,13 @@ macro_rules! v2_enum_from_core {
|
|||||||
}
|
}
|
||||||
|
|
||||||
v2_enum_from_core!(
|
v2_enum_from_core!(
|
||||||
pub enum AskForApproval from llmx_protocol::protocol::AskForApproval {
|
pub enum AskForApproval from codex_protocol::protocol::AskForApproval {
|
||||||
UnlessTrusted, OnFailure, OnRequest, Never
|
UnlessTrusted, OnFailure, OnRequest, Never
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
v2_enum_from_core!(
|
v2_enum_from_core!(
|
||||||
pub enum SandboxMode from llmx_protocol::config_types::SandboxMode {
|
pub enum SandboxMode from codex_protocol::config_types::SandboxMode {
|
||||||
ReadOnly, WorkspaceWrite, DangerFullAccess
|
ReadOnly, WorkspaceWrite, DangerFullAccess
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
@@ -73,18 +73,18 @@ pub enum SandboxPolicy {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl SandboxPolicy {
|
impl SandboxPolicy {
|
||||||
pub fn to_core(&self) -> llmx_protocol::protocol::SandboxPolicy {
|
pub fn to_core(&self) -> codex_protocol::protocol::SandboxPolicy {
|
||||||
match self {
|
match self {
|
||||||
SandboxPolicy::DangerFullAccess => {
|
SandboxPolicy::DangerFullAccess => {
|
||||||
llmx_protocol::protocol::SandboxPolicy::DangerFullAccess
|
codex_protocol::protocol::SandboxPolicy::DangerFullAccess
|
||||||
}
|
}
|
||||||
SandboxPolicy::ReadOnly => llmx_protocol::protocol::SandboxPolicy::ReadOnly,
|
SandboxPolicy::ReadOnly => codex_protocol::protocol::SandboxPolicy::ReadOnly,
|
||||||
SandboxPolicy::WorkspaceWrite {
|
SandboxPolicy::WorkspaceWrite {
|
||||||
writable_roots,
|
writable_roots,
|
||||||
network_access,
|
network_access,
|
||||||
exclude_tmpdir_env_var,
|
exclude_tmpdir_env_var,
|
||||||
exclude_slash_tmp,
|
exclude_slash_tmp,
|
||||||
} => llmx_protocol::protocol::SandboxPolicy::WorkspaceWrite {
|
} => codex_protocol::protocol::SandboxPolicy::WorkspaceWrite {
|
||||||
writable_roots: writable_roots.clone(),
|
writable_roots: writable_roots.clone(),
|
||||||
network_access: *network_access,
|
network_access: *network_access,
|
||||||
exclude_tmpdir_env_var: *exclude_tmpdir_env_var,
|
exclude_tmpdir_env_var: *exclude_tmpdir_env_var,
|
||||||
@@ -94,14 +94,14 @@ impl SandboxPolicy {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<llmx_protocol::protocol::SandboxPolicy> for SandboxPolicy {
|
impl From<codex_protocol::protocol::SandboxPolicy> for SandboxPolicy {
|
||||||
fn from(value: llmx_protocol::protocol::SandboxPolicy) -> Self {
|
fn from(value: codex_protocol::protocol::SandboxPolicy) -> Self {
|
||||||
match value {
|
match value {
|
||||||
llmx_protocol::protocol::SandboxPolicy::DangerFullAccess => {
|
codex_protocol::protocol::SandboxPolicy::DangerFullAccess => {
|
||||||
SandboxPolicy::DangerFullAccess
|
SandboxPolicy::DangerFullAccess
|
||||||
}
|
}
|
||||||
llmx_protocol::protocol::SandboxPolicy::ReadOnly => SandboxPolicy::ReadOnly,
|
codex_protocol::protocol::SandboxPolicy::ReadOnly => SandboxPolicy::ReadOnly,
|
||||||
llmx_protocol::protocol::SandboxPolicy::WorkspaceWrite {
|
codex_protocol::protocol::SandboxPolicy::WorkspaceWrite {
|
||||||
writable_roots,
|
writable_roots,
|
||||||
network_access,
|
network_access,
|
||||||
exclude_tmpdir_env_var,
|
exclude_tmpdir_env_var,
|
||||||
@@ -1,14 +1,14 @@
|
|||||||
[package]
|
[package]
|
||||||
edition = "2024"
|
edition = "2024"
|
||||||
name = "llmx-app-server"
|
name = "codex-app-server"
|
||||||
version = { workspace = true }
|
version = { workspace = true }
|
||||||
|
|
||||||
[[bin]]
|
[[bin]]
|
||||||
name = "llmx-app-server"
|
name = "codex-app-server"
|
||||||
path = "src/main.rs"
|
path = "src/main.rs"
|
||||||
|
|
||||||
[lib]
|
[lib]
|
||||||
name = "llmx_app_server"
|
name = "codex_app_server"
|
||||||
path = "src/lib.rs"
|
path = "src/lib.rs"
|
||||||
|
|
||||||
[lints]
|
[lints]
|
||||||
@@ -16,16 +16,16 @@ workspace = true
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow = { workspace = true }
|
anyhow = { workspace = true }
|
||||||
llmx-arg0 = { workspace = true }
|
codex-arg0 = { workspace = true }
|
||||||
llmx-common = { workspace = true, features = ["cli"] }
|
codex-common = { workspace = true, features = ["cli"] }
|
||||||
llmx-core = { workspace = true }
|
codex-core = { workspace = true }
|
||||||
llmx-backend-client = { workspace = true }
|
codex-backend-client = { workspace = true }
|
||||||
llmx-file-search = { workspace = true }
|
codex-file-search = { workspace = true }
|
||||||
llmx-login = { workspace = true }
|
codex-login = { workspace = true }
|
||||||
llmx-protocol = { workspace = true }
|
codex-protocol = { workspace = true }
|
||||||
llmx-app-server-protocol = { workspace = true }
|
codex-app-server-protocol = { workspace = true }
|
||||||
llmx-feedback = { workspace = true }
|
codex-feedback = { workspace = true }
|
||||||
llmx-utils-json-to-toml = { workspace = true }
|
codex-utils-json-to-toml = { workspace = true }
|
||||||
chrono = { workspace = true }
|
chrono = { workspace = true }
|
||||||
serde = { workspace = true, features = ["derive"] }
|
serde = { workspace = true, features = ["derive"] }
|
||||||
serde_json = { workspace = true }
|
serde_json = { workspace = true }
|
||||||
@@ -1,18 +1,18 @@
|
|||||||
# llmx-app-server
|
# codex-app-server
|
||||||
|
|
||||||
`llmx app-server` is the interface LLMX uses to power rich interfaces such as the [LLMX VS Code extension](https://marketplace.visualstudio.com/items?itemName=openai.chatgpt). The message schema is currently unstable, but those who wish to build experimental UIs on top of LLMX may find it valuable.
|
`codex app-server` is the interface Codex uses to power rich interfaces such as the [Codex VS Code extension](https://marketplace.visualstudio.com/items?itemName=openai.chatgpt). The message schema is currently unstable, but those who wish to build experimental UIs on top of Codex may find it valuable.
|
||||||
|
|
||||||
## Protocol
|
## Protocol
|
||||||
|
|
||||||
Similar to [MCP](https://modelcontextprotocol.io/), `llmx app-server` supports bidirectional communication, streaming JSONL over stdio. The protocol is JSON-RPC 2.0, though the `"jsonrpc":"2.0"` header is omitted.
|
Similar to [MCP](https://modelcontextprotocol.io/), `codex app-server` supports bidirectional communication, streaming JSONL over stdio. The protocol is JSON-RPC 2.0, though the `"jsonrpc":"2.0"` header is omitted.
|
||||||
|
|
||||||
## Message Schema
|
## Message Schema
|
||||||
|
|
||||||
Currently, you can dump a TypeScript version of the schema using `llmx app-server generate-ts`, or a JSON Schema bundle via `llmx app-server generate-json-schema`. Each output is specific to the version of LLMX you used to run the command, so the generated artifacts are guaranteed to match that version.
|
Currently, you can dump a TypeScript version of the schema using `codex app-server generate-ts`, or a JSON Schema bundle via `codex app-server generate-json-schema`. Each output is specific to the version of Codex you used to run the command, so the generated artifacts are guaranteed to match that version.
|
||||||
|
|
||||||
```
|
```
|
||||||
llmx app-server generate-ts --out DIR
|
codex app-server generate-ts --out DIR
|
||||||
llmx app-server generate-json-schema --out DIR
|
codex app-server generate-json-schema --out DIR
|
||||||
```
|
```
|
||||||
|
|
||||||
## Initialization
|
## Initialization
|
||||||
@@ -23,40 +23,40 @@ Example:
|
|||||||
|
|
||||||
```json
|
```json
|
||||||
{ "method": "initialize", "id": 0, "params": {
|
{ "method": "initialize", "id": 0, "params": {
|
||||||
"clientInfo": { "name": "llmx-vscode", "title": "LLMX VS Code Extension", "version": "0.1.0" }
|
"clientInfo": { "name": "codex-vscode", "title": "Codex VS Code Extension", "version": "0.1.0" }
|
||||||
} }
|
} }
|
||||||
{ "id": 0, "result": { "userAgent": "llmx-app-server/0.1.0 llmx-vscode/0.1.0" } }
|
{ "id": 0, "result": { "userAgent": "codex-app-server/0.1.0 codex-vscode/0.1.0" } }
|
||||||
{ "method": "initialized" }
|
{ "method": "initialized" }
|
||||||
```
|
```
|
||||||
|
|
||||||
## Core primitives
|
## Core primitives
|
||||||
|
|
||||||
We have 3 top level primitives:
|
We have 3 top level primitives:
|
||||||
- Thread - a conversation between the LLMX agent and a user. Each thread contains multiple turns.
|
- Thread - a conversation between the Codex agent and a user. Each thread contains multiple turns.
|
||||||
- Turn - one turn of the conversation, typically starting with a user message and finishing with an agent message. Each turn contains multiple items.
|
- Turn - one turn of the conversation, typically starting with a user message and finishing with an agent message. Each turn contains multiple items.
|
||||||
- Item - represents user inputs and agent outputs as part of the turn, persisted and used as the context for future conversations.
|
- Item - represents user inputs and agent outputs as part of the turn, persisted and used as the context for future conversations.
|
||||||
|
|
||||||
## Thread & turn endpoints
|
## Thread & turn endpoints
|
||||||
|
|
||||||
The JSON-RPC API exposes dedicated methods for managing LLMX conversations. Threads store long-lived conversation metadata, and turns store the per-message exchange (input → LLMX output, including streamed items). Use the thread APIs to create, list, or archive sessions, then drive the conversation with turn APIs and notifications.
|
The JSON-RPC API exposes dedicated methods for managing Codex conversations. Threads store long-lived conversation metadata, and turns store the per-message exchange (input → Codex output, including streamed items). Use the thread APIs to create, list, or archive sessions, then drive the conversation with turn APIs and notifications.
|
||||||
|
|
||||||
### Quick reference
|
### Quick reference
|
||||||
- `thread/start` — create a new thread; emits `thread/started` and auto-subscribes you to turn/item events for that thread.
|
- `thread/start` — create a new thread; emits `thread/started` and auto-subscribes you to turn/item events for that thread.
|
||||||
- `thread/resume` — reopen an existing thread by id so subsequent `turn/start` calls append to it.
|
- `thread/resume` — reopen an existing thread by id so subsequent `turn/start` calls append to it.
|
||||||
- `thread/list` — page through stored rollouts; supports cursor-based pagination and optional `modelProviders` filtering.
|
- `thread/list` — page through stored rollouts; supports cursor-based pagination and optional `modelProviders` filtering.
|
||||||
- `thread/archive` — move a thread’s rollout file into the archived directory; returns `{}` on success.
|
- `thread/archive` — move a thread’s rollout file into the archived directory; returns `{}` on success.
|
||||||
- `turn/start` — add user input to a thread and begin LLMX generation; responds with the initial `turn` object and streams `turn/started`, `item/*`, and `turn/completed` notifications.
|
- `turn/start` — add user input to a thread and begin Codex generation; responds with the initial `turn` object and streams `turn/started`, `item/*`, and `turn/completed` notifications.
|
||||||
- `turn/interrupt` — request cancellation of an in-flight turn by `(thread_id, turn_id)`; success is an empty `{}` response and the turn finishes with `status: "interrupted"`.
|
- `turn/interrupt` — request cancellation of an in-flight turn by `(thread_id, turn_id)`; success is an empty `{}` response and the turn finishes with `status: "interrupted"`.
|
||||||
|
|
||||||
### 1) Start or resume a thread
|
### 1) Start or resume a thread
|
||||||
|
|
||||||
Start a fresh thread when you need a new LLMX conversation.
|
Start a fresh thread when you need a new Codex conversation.
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{ "method": "thread/start", "id": 10, "params": {
|
{ "method": "thread/start", "id": 10, "params": {
|
||||||
// Optionally set config settings. If not specified, will use the user's
|
// Optionally set config settings. If not specified, will use the user's
|
||||||
// current config settings.
|
// current config settings.
|
||||||
"model": "gpt-5-llmx",
|
"model": "gpt-5-codex",
|
||||||
"cwd": "/Users/me/project",
|
"cwd": "/Users/me/project",
|
||||||
"approvalPolicy": "never",
|
"approvalPolicy": "never",
|
||||||
"sandbox": "workspaceWrite",
|
"sandbox": "workspaceWrite",
|
||||||
@@ -117,7 +117,7 @@ An archived thread will not appear in future calls to `thread/list`.
|
|||||||
|
|
||||||
### 4) Start a turn (send user input)
|
### 4) Start a turn (send user input)
|
||||||
|
|
||||||
Turns attach user input (text or images) to a thread and trigger LLMX generation. The `input` field is a list of discriminated unions:
|
Turns attach user input (text or images) to a thread and trigger Codex generation. The `input` field is a list of discriminated unions:
|
||||||
|
|
||||||
- `{"type":"text","text":"Explain this diff"}`
|
- `{"type":"text","text":"Explain this diff"}`
|
||||||
- `{"type":"image","url":"https://…png"}`
|
- `{"type":"image","url":"https://…png"}`
|
||||||
@@ -137,7 +137,7 @@ You can optionally specify config overrides on the new turn. If specified, these
|
|||||||
"writableRoots": ["/Users/me/project"],
|
"writableRoots": ["/Users/me/project"],
|
||||||
"networkAccess": true
|
"networkAccess": true
|
||||||
},
|
},
|
||||||
"model": "gpt-5-llmx",
|
"model": "gpt-5-codex",
|
||||||
"effort": "medium",
|
"effort": "medium",
|
||||||
"summary": "concise"
|
"summary": "concise"
|
||||||
} }
|
} }
|
||||||
@@ -161,7 +161,7 @@ You can cancel a running Turn with `turn/interrupt`.
|
|||||||
{ "id": 31, "result": {} }
|
{ "id": 31, "result": {} }
|
||||||
```
|
```
|
||||||
|
|
||||||
The server requests cancellations for running subprocesses, then emits a `turn/completed` event with `status: "interrupted"`. Rely on the `turn/completed` to know when LLMX-side cleanup is done.
|
The server requests cancellations for running subprocesses, then emits a `turn/completed` event with `status: "interrupted"`. Rely on the `turn/completed` to know when Codex-side cleanup is done.
|
||||||
|
|
||||||
## Auth endpoints
|
## Auth endpoints
|
||||||
|
|
||||||
@@ -193,7 +193,7 @@ Response examples:
|
|||||||
|
|
||||||
Field notes:
|
Field notes:
|
||||||
- `refreshToken` (bool): set `true` to force a token refresh.
|
- `refreshToken` (bool): set `true` to force a token refresh.
|
||||||
- `requiresOpenaiAuth` reflects the active provider; when `false`, LLMX can run without OpenAI credentials.
|
- `requiresOpenaiAuth` reflects the active provider; when `false`, Codex can run without OpenAI credentials.
|
||||||
|
|
||||||
### 2) Log in with an API key
|
### 2) Log in with an API key
|
||||||
|
|
||||||
@@ -255,6 +255,6 @@ Field notes:
|
|||||||
|
|
||||||
### Dev notes
|
### Dev notes
|
||||||
|
|
||||||
- `llmx app-server generate-ts --out <dir>` emits v2 types under `v2/`.
|
- `codex app-server generate-ts --out <dir>` emits v2 types under `v2/`.
|
||||||
- `llmx app-server generate-json-schema --out <dir>` outputs `llmx_app_server_protocol.schemas.json`.
|
- `codex app-server generate-json-schema --out <dir>` outputs `codex_app_server_protocol.schemas.json`.
|
||||||
- See [“Authentication and authorization” in the config docs](../../docs/config.md#authentication-and-authorization) for configuration knobs.
|
- See [“Authentication and authorization” in the config docs](../../docs/config.md#authentication-and-authorization) for configuration knobs.
|
||||||
@@ -6,143 +6,143 @@ use crate::outgoing_message::OutgoingMessageSender;
|
|||||||
use crate::outgoing_message::OutgoingNotification;
|
use crate::outgoing_message::OutgoingNotification;
|
||||||
use chrono::DateTime;
|
use chrono::DateTime;
|
||||||
use chrono::Utc;
|
use chrono::Utc;
|
||||||
use llmx_app_server_protocol::Account;
|
use codex_app_server_protocol::Account;
|
||||||
use llmx_app_server_protocol::AccountLoginCompletedNotification;
|
use codex_app_server_protocol::AccountLoginCompletedNotification;
|
||||||
use llmx_app_server_protocol::AccountRateLimitsUpdatedNotification;
|
use codex_app_server_protocol::AccountRateLimitsUpdatedNotification;
|
||||||
use llmx_app_server_protocol::AccountUpdatedNotification;
|
use codex_app_server_protocol::AccountUpdatedNotification;
|
||||||
use llmx_app_server_protocol::AddConversationListenerParams;
|
use codex_app_server_protocol::AddConversationListenerParams;
|
||||||
use llmx_app_server_protocol::AddConversationSubscriptionResponse;
|
use codex_app_server_protocol::AddConversationSubscriptionResponse;
|
||||||
use llmx_app_server_protocol::ApplyPatchApprovalParams;
|
use codex_app_server_protocol::ApplyPatchApprovalParams;
|
||||||
use llmx_app_server_protocol::ApplyPatchApprovalResponse;
|
use codex_app_server_protocol::ApplyPatchApprovalResponse;
|
||||||
use llmx_app_server_protocol::ArchiveConversationParams;
|
use codex_app_server_protocol::ArchiveConversationParams;
|
||||||
use llmx_app_server_protocol::ArchiveConversationResponse;
|
use codex_app_server_protocol::ArchiveConversationResponse;
|
||||||
use llmx_app_server_protocol::AskForApproval;
|
use codex_app_server_protocol::AskForApproval;
|
||||||
use llmx_app_server_protocol::AuthMode;
|
use codex_app_server_protocol::AuthMode;
|
||||||
use llmx_app_server_protocol::AuthStatusChangeNotification;
|
use codex_app_server_protocol::AuthStatusChangeNotification;
|
||||||
use llmx_app_server_protocol::CancelLoginAccountParams;
|
use codex_app_server_protocol::CancelLoginAccountParams;
|
||||||
use llmx_app_server_protocol::CancelLoginAccountResponse;
|
use codex_app_server_protocol::CancelLoginAccountResponse;
|
||||||
use llmx_app_server_protocol::CancelLoginChatGptResponse;
|
use codex_app_server_protocol::CancelLoginChatGptResponse;
|
||||||
use llmx_app_server_protocol::ClientRequest;
|
use codex_app_server_protocol::ClientRequest;
|
||||||
use llmx_app_server_protocol::ConversationGitInfo;
|
use codex_app_server_protocol::ConversationGitInfo;
|
||||||
use llmx_app_server_protocol::ConversationSummary;
|
use codex_app_server_protocol::ConversationSummary;
|
||||||
use llmx_app_server_protocol::ExecCommandApprovalParams;
|
use codex_app_server_protocol::ExecCommandApprovalParams;
|
||||||
use llmx_app_server_protocol::ExecCommandApprovalResponse;
|
use codex_app_server_protocol::ExecCommandApprovalResponse;
|
||||||
use llmx_app_server_protocol::ExecOneOffCommandParams;
|
use codex_app_server_protocol::ExecOneOffCommandParams;
|
||||||
use llmx_app_server_protocol::ExecOneOffCommandResponse;
|
use codex_app_server_protocol::ExecOneOffCommandResponse;
|
||||||
use llmx_app_server_protocol::FeedbackUploadParams;
|
use codex_app_server_protocol::FeedbackUploadParams;
|
||||||
use llmx_app_server_protocol::FeedbackUploadResponse;
|
use codex_app_server_protocol::FeedbackUploadResponse;
|
||||||
use llmx_app_server_protocol::FuzzyFileSearchParams;
|
use codex_app_server_protocol::FuzzyFileSearchParams;
|
||||||
use llmx_app_server_protocol::FuzzyFileSearchResponse;
|
use codex_app_server_protocol::FuzzyFileSearchResponse;
|
||||||
use llmx_app_server_protocol::GetAccountParams;
|
use codex_app_server_protocol::GetAccountParams;
|
||||||
use llmx_app_server_protocol::GetAccountRateLimitsResponse;
|
use codex_app_server_protocol::GetAccountRateLimitsResponse;
|
||||||
use llmx_app_server_protocol::GetAccountResponse;
|
use codex_app_server_protocol::GetAccountResponse;
|
||||||
use llmx_app_server_protocol::GetAuthStatusParams;
|
use codex_app_server_protocol::GetAuthStatusParams;
|
||||||
use llmx_app_server_protocol::GetAuthStatusResponse;
|
use codex_app_server_protocol::GetAuthStatusResponse;
|
||||||
use llmx_app_server_protocol::GetConversationSummaryParams;
|
use codex_app_server_protocol::GetConversationSummaryParams;
|
||||||
use llmx_app_server_protocol::GetConversationSummaryResponse;
|
use codex_app_server_protocol::GetConversationSummaryResponse;
|
||||||
use llmx_app_server_protocol::GetUserAgentResponse;
|
use codex_app_server_protocol::GetUserAgentResponse;
|
||||||
use llmx_app_server_protocol::GetUserSavedConfigResponse;
|
use codex_app_server_protocol::GetUserSavedConfigResponse;
|
||||||
use llmx_app_server_protocol::GitDiffToRemoteResponse;
|
use codex_app_server_protocol::GitDiffToRemoteResponse;
|
||||||
use llmx_app_server_protocol::InputItem as WireInputItem;
|
use codex_app_server_protocol::InputItem as WireInputItem;
|
||||||
use llmx_app_server_protocol::InterruptConversationParams;
|
use codex_app_server_protocol::InterruptConversationParams;
|
||||||
use llmx_app_server_protocol::InterruptConversationResponse;
|
use codex_app_server_protocol::InterruptConversationResponse;
|
||||||
use llmx_app_server_protocol::JSONRPCErrorError;
|
use codex_app_server_protocol::JSONRPCErrorError;
|
||||||
use llmx_app_server_protocol::ListConversationsParams;
|
use codex_app_server_protocol::ListConversationsParams;
|
||||||
use llmx_app_server_protocol::ListConversationsResponse;
|
use codex_app_server_protocol::ListConversationsResponse;
|
||||||
use llmx_app_server_protocol::LoginAccountParams;
|
use codex_app_server_protocol::LoginAccountParams;
|
||||||
use llmx_app_server_protocol::LoginApiKeyParams;
|
use codex_app_server_protocol::LoginApiKeyParams;
|
||||||
use llmx_app_server_protocol::LoginApiKeyResponse;
|
use codex_app_server_protocol::LoginApiKeyResponse;
|
||||||
use llmx_app_server_protocol::LoginChatGptCompleteNotification;
|
use codex_app_server_protocol::LoginChatGptCompleteNotification;
|
||||||
use llmx_app_server_protocol::LoginChatGptResponse;
|
use codex_app_server_protocol::LoginChatGptResponse;
|
||||||
use llmx_app_server_protocol::LogoutAccountResponse;
|
use codex_app_server_protocol::LogoutAccountResponse;
|
||||||
use llmx_app_server_protocol::LogoutChatGptResponse;
|
use codex_app_server_protocol::LogoutChatGptResponse;
|
||||||
use llmx_app_server_protocol::ModelListParams;
|
use codex_app_server_protocol::ModelListParams;
|
||||||
use llmx_app_server_protocol::ModelListResponse;
|
use codex_app_server_protocol::ModelListResponse;
|
||||||
use llmx_app_server_protocol::NewConversationParams;
|
use codex_app_server_protocol::NewConversationParams;
|
||||||
use llmx_app_server_protocol::NewConversationResponse;
|
use codex_app_server_protocol::NewConversationResponse;
|
||||||
use llmx_app_server_protocol::RemoveConversationListenerParams;
|
use codex_app_server_protocol::RemoveConversationListenerParams;
|
||||||
use llmx_app_server_protocol::RemoveConversationSubscriptionResponse;
|
use codex_app_server_protocol::RemoveConversationSubscriptionResponse;
|
||||||
use llmx_app_server_protocol::RequestId;
|
use codex_app_server_protocol::RequestId;
|
||||||
use llmx_app_server_protocol::Result as JsonRpcResult;
|
use codex_app_server_protocol::Result as JsonRpcResult;
|
||||||
use llmx_app_server_protocol::ResumeConversationParams;
|
use codex_app_server_protocol::ResumeConversationParams;
|
||||||
use llmx_app_server_protocol::ResumeConversationResponse;
|
use codex_app_server_protocol::ResumeConversationResponse;
|
||||||
use llmx_app_server_protocol::SandboxMode;
|
use codex_app_server_protocol::SandboxMode;
|
||||||
use llmx_app_server_protocol::SendUserMessageParams;
|
use codex_app_server_protocol::SendUserMessageParams;
|
||||||
use llmx_app_server_protocol::SendUserMessageResponse;
|
use codex_app_server_protocol::SendUserMessageResponse;
|
||||||
use llmx_app_server_protocol::SendUserTurnParams;
|
use codex_app_server_protocol::SendUserTurnParams;
|
||||||
use llmx_app_server_protocol::SendUserTurnResponse;
|
use codex_app_server_protocol::SendUserTurnResponse;
|
||||||
use llmx_app_server_protocol::ServerNotification;
|
use codex_app_server_protocol::ServerNotification;
|
||||||
use llmx_app_server_protocol::ServerRequestPayload;
|
use codex_app_server_protocol::ServerRequestPayload;
|
||||||
use llmx_app_server_protocol::SessionConfiguredNotification;
|
use codex_app_server_protocol::SessionConfiguredNotification;
|
||||||
use llmx_app_server_protocol::SetDefaultModelParams;
|
use codex_app_server_protocol::SetDefaultModelParams;
|
||||||
use llmx_app_server_protocol::SetDefaultModelResponse;
|
use codex_app_server_protocol::SetDefaultModelResponse;
|
||||||
use llmx_app_server_protocol::Thread;
|
use codex_app_server_protocol::Thread;
|
||||||
use llmx_app_server_protocol::ThreadArchiveParams;
|
use codex_app_server_protocol::ThreadArchiveParams;
|
||||||
use llmx_app_server_protocol::ThreadArchiveResponse;
|
use codex_app_server_protocol::ThreadArchiveResponse;
|
||||||
use llmx_app_server_protocol::ThreadItem;
|
use codex_app_server_protocol::ThreadItem;
|
||||||
use llmx_app_server_protocol::ThreadListParams;
|
use codex_app_server_protocol::ThreadListParams;
|
||||||
use llmx_app_server_protocol::ThreadListResponse;
|
use codex_app_server_protocol::ThreadListResponse;
|
||||||
use llmx_app_server_protocol::ThreadResumeParams;
|
use codex_app_server_protocol::ThreadResumeParams;
|
||||||
use llmx_app_server_protocol::ThreadResumeResponse;
|
use codex_app_server_protocol::ThreadResumeResponse;
|
||||||
use llmx_app_server_protocol::ThreadStartParams;
|
use codex_app_server_protocol::ThreadStartParams;
|
||||||
use llmx_app_server_protocol::ThreadStartResponse;
|
use codex_app_server_protocol::ThreadStartResponse;
|
||||||
use llmx_app_server_protocol::ThreadStartedNotification;
|
use codex_app_server_protocol::ThreadStartedNotification;
|
||||||
use llmx_app_server_protocol::Turn;
|
use codex_app_server_protocol::Turn;
|
||||||
use llmx_app_server_protocol::TurnInterruptParams;
|
use codex_app_server_protocol::TurnInterruptParams;
|
||||||
use llmx_app_server_protocol::TurnInterruptResponse;
|
use codex_app_server_protocol::TurnInterruptResponse;
|
||||||
use llmx_app_server_protocol::TurnStartParams;
|
use codex_app_server_protocol::TurnStartParams;
|
||||||
use llmx_app_server_protocol::TurnStartResponse;
|
use codex_app_server_protocol::TurnStartResponse;
|
||||||
use llmx_app_server_protocol::TurnStartedNotification;
|
use codex_app_server_protocol::TurnStartedNotification;
|
||||||
use llmx_app_server_protocol::TurnStatus;
|
use codex_app_server_protocol::TurnStatus;
|
||||||
use llmx_app_server_protocol::UserInfoResponse;
|
use codex_app_server_protocol::UserInfoResponse;
|
||||||
use llmx_app_server_protocol::UserInput as V2UserInput;
|
use codex_app_server_protocol::UserInput as V2UserInput;
|
||||||
use llmx_app_server_protocol::UserSavedConfig;
|
use codex_app_server_protocol::UserSavedConfig;
|
||||||
use llmx_backend_client::Client as BackendClient;
|
use codex_backend_client::Client as BackendClient;
|
||||||
use llmx_core::AuthManager;
|
use codex_core::AuthManager;
|
||||||
use llmx_core::ConversationManager;
|
use codex_core::CodexConversation;
|
||||||
use llmx_core::Cursor as RolloutCursor;
|
use codex_core::ConversationManager;
|
||||||
use llmx_core::INTERACTIVE_SESSION_SOURCES;
|
use codex_core::Cursor as RolloutCursor;
|
||||||
use llmx_core::InitialHistory;
|
use codex_core::INTERACTIVE_SESSION_SOURCES;
|
||||||
use llmx_core::LlmxConversation;
|
use codex_core::InitialHistory;
|
||||||
use llmx_core::NewConversation;
|
use codex_core::NewConversation;
|
||||||
use llmx_core::RolloutRecorder;
|
use codex_core::RolloutRecorder;
|
||||||
use llmx_core::SessionMeta;
|
use codex_core::SessionMeta;
|
||||||
use llmx_core::auth::CLIENT_ID;
|
use codex_core::auth::CLIENT_ID;
|
||||||
use llmx_core::auth::login_with_api_key;
|
use codex_core::auth::login_with_api_key;
|
||||||
use llmx_core::config::Config;
|
use codex_core::config::Config;
|
||||||
use llmx_core::config::ConfigOverrides;
|
use codex_core::config::ConfigOverrides;
|
||||||
use llmx_core::config::ConfigToml;
|
use codex_core::config::ConfigToml;
|
||||||
use llmx_core::config::edit::ConfigEditsBuilder;
|
use codex_core::config::edit::ConfigEditsBuilder;
|
||||||
use llmx_core::config_loader::load_config_as_toml;
|
use codex_core::config_loader::load_config_as_toml;
|
||||||
use llmx_core::default_client::get_llmx_user_agent;
|
use codex_core::default_client::get_codex_user_agent;
|
||||||
use llmx_core::exec::ExecParams;
|
use codex_core::exec::ExecParams;
|
||||||
use llmx_core::exec_env::create_env;
|
use codex_core::exec_env::create_env;
|
||||||
use llmx_core::find_conversation_path_by_id_str;
|
use codex_core::find_conversation_path_by_id_str;
|
||||||
use llmx_core::get_platform_sandbox;
|
use codex_core::get_platform_sandbox;
|
||||||
use llmx_core::git_info::git_diff_to_remote;
|
use codex_core::git_info::git_diff_to_remote;
|
||||||
use llmx_core::parse_cursor;
|
use codex_core::parse_cursor;
|
||||||
use llmx_core::protocol::ApplyPatchApprovalRequestEvent;
|
use codex_core::protocol::ApplyPatchApprovalRequestEvent;
|
||||||
use llmx_core::protocol::Event;
|
use codex_core::protocol::Event;
|
||||||
use llmx_core::protocol::EventMsg;
|
use codex_core::protocol::EventMsg;
|
||||||
use llmx_core::protocol::ExecApprovalRequestEvent;
|
use codex_core::protocol::ExecApprovalRequestEvent;
|
||||||
use llmx_core::protocol::Op;
|
use codex_core::protocol::Op;
|
||||||
use llmx_core::protocol::ReviewDecision;
|
use codex_core::protocol::ReviewDecision;
|
||||||
use llmx_core::read_head_for_summary;
|
use codex_core::read_head_for_summary;
|
||||||
use llmx_feedback::LlmxFeedback;
|
use codex_feedback::CodexFeedback;
|
||||||
use llmx_login::ServerOptions as LoginServerOptions;
|
use codex_login::ServerOptions as LoginServerOptions;
|
||||||
use llmx_login::ShutdownHandle;
|
use codex_login::ShutdownHandle;
|
||||||
use llmx_login::run_login_server;
|
use codex_login::run_login_server;
|
||||||
use llmx_protocol::ConversationId;
|
use codex_protocol::ConversationId;
|
||||||
use llmx_protocol::config_types::ForcedLoginMethod;
|
use codex_protocol::config_types::ForcedLoginMethod;
|
||||||
use llmx_protocol::items::TurnItem;
|
use codex_protocol::items::TurnItem;
|
||||||
use llmx_protocol::models::ResponseItem;
|
use codex_protocol::models::ResponseItem;
|
||||||
use llmx_protocol::protocol::GitInfo;
|
use codex_protocol::protocol::GitInfo;
|
||||||
use llmx_protocol::protocol::RateLimitSnapshot as CoreRateLimitSnapshot;
|
use codex_protocol::protocol::RateLimitSnapshot as CoreRateLimitSnapshot;
|
||||||
use llmx_protocol::protocol::RolloutItem;
|
use codex_protocol::protocol::RolloutItem;
|
||||||
use llmx_protocol::protocol::SessionMetaLine;
|
use codex_protocol::protocol::SessionMetaLine;
|
||||||
use llmx_protocol::protocol::USER_MESSAGE_BEGIN;
|
use codex_protocol::protocol::USER_MESSAGE_BEGIN;
|
||||||
use llmx_protocol::user_input::UserInput as CoreInputItem;
|
use codex_protocol::user_input::UserInput as CoreInputItem;
|
||||||
use llmx_utils_json_to_toml::json_to_toml;
|
use codex_utils_json_to_toml::json_to_toml;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::ffi::OsStr;
|
use std::ffi::OsStr;
|
||||||
use std::io::Error as IoError;
|
use std::io::Error as IoError;
|
||||||
@@ -176,19 +176,19 @@ impl ActiveLogin {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Handles JSON-RPC messages for Llmx conversations.
|
/// Handles JSON-RPC messages for Codex conversations.
|
||||||
pub(crate) struct LlmxMessageProcessor {
|
pub(crate) struct CodexMessageProcessor {
|
||||||
auth_manager: Arc<AuthManager>,
|
auth_manager: Arc<AuthManager>,
|
||||||
conversation_manager: Arc<ConversationManager>,
|
conversation_manager: Arc<ConversationManager>,
|
||||||
outgoing: Arc<OutgoingMessageSender>,
|
outgoing: Arc<OutgoingMessageSender>,
|
||||||
llmx_linux_sandbox_exe: Option<PathBuf>,
|
codex_linux_sandbox_exe: Option<PathBuf>,
|
||||||
config: Arc<Config>,
|
config: Arc<Config>,
|
||||||
conversation_listeners: HashMap<Uuid, oneshot::Sender<()>>,
|
conversation_listeners: HashMap<Uuid, oneshot::Sender<()>>,
|
||||||
active_login: Arc<Mutex<Option<ActiveLogin>>>,
|
active_login: Arc<Mutex<Option<ActiveLogin>>>,
|
||||||
// Queue of pending interrupt requests per conversation. We reply when TurnAborted arrives.
|
// Queue of pending interrupt requests per conversation. We reply when TurnAborted arrives.
|
||||||
pending_interrupts: PendingInterrupts,
|
pending_interrupts: PendingInterrupts,
|
||||||
pending_fuzzy_searches: Arc<Mutex<HashMap<String, Arc<AtomicBool>>>>,
|
pending_fuzzy_searches: Arc<Mutex<HashMap<String, Arc<AtomicBool>>>>,
|
||||||
feedback: LlmxFeedback,
|
feedback: CodexFeedback,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug)]
|
#[derive(Clone, Copy, Debug)]
|
||||||
@@ -197,11 +197,11 @@ enum ApiVersion {
|
|||||||
V2,
|
V2,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl LlmxMessageProcessor {
|
impl CodexMessageProcessor {
|
||||||
async fn conversation_from_thread_id(
|
async fn conversation_from_thread_id(
|
||||||
&self,
|
&self,
|
||||||
thread_id: &str,
|
thread_id: &str,
|
||||||
) -> Result<(ConversationId, Arc<LlmxConversation>), JSONRPCErrorError> {
|
) -> Result<(ConversationId, Arc<CodexConversation>), JSONRPCErrorError> {
|
||||||
// Resolve conversation id from v2 thread id string.
|
// Resolve conversation id from v2 thread id string.
|
||||||
let conversation_id =
|
let conversation_id =
|
||||||
ConversationId::from_string(thread_id).map_err(|err| JSONRPCErrorError {
|
ConversationId::from_string(thread_id).map_err(|err| JSONRPCErrorError {
|
||||||
@@ -226,15 +226,15 @@ impl LlmxMessageProcessor {
|
|||||||
auth_manager: Arc<AuthManager>,
|
auth_manager: Arc<AuthManager>,
|
||||||
conversation_manager: Arc<ConversationManager>,
|
conversation_manager: Arc<ConversationManager>,
|
||||||
outgoing: Arc<OutgoingMessageSender>,
|
outgoing: Arc<OutgoingMessageSender>,
|
||||||
llmx_linux_sandbox_exe: Option<PathBuf>,
|
codex_linux_sandbox_exe: Option<PathBuf>,
|
||||||
config: Arc<Config>,
|
config: Arc<Config>,
|
||||||
feedback: LlmxFeedback,
|
feedback: CodexFeedback,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
auth_manager,
|
auth_manager,
|
||||||
conversation_manager,
|
conversation_manager,
|
||||||
outgoing,
|
outgoing,
|
||||||
llmx_linux_sandbox_exe,
|
codex_linux_sandbox_exe,
|
||||||
config,
|
config,
|
||||||
conversation_listeners: HashMap::new(),
|
conversation_listeners: HashMap::new(),
|
||||||
active_login: Arc::new(Mutex::new(None)),
|
active_login: Arc::new(Mutex::new(None)),
|
||||||
@@ -434,7 +434,7 @@ impl LlmxMessageProcessor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
match login_with_api_key(
|
match login_with_api_key(
|
||||||
&self.config.llmx_home,
|
&self.config.codex_home,
|
||||||
¶ms.api_key,
|
¶ms.api_key,
|
||||||
self.config.cli_auth_credentials_store_mode,
|
self.config.cli_auth_credentials_store_mode,
|
||||||
) {
|
) {
|
||||||
@@ -473,7 +473,7 @@ impl LlmxMessageProcessor {
|
|||||||
async fn login_api_key_v2(&mut self, request_id: RequestId, params: LoginApiKeyParams) {
|
async fn login_api_key_v2(&mut self, request_id: RequestId, params: LoginApiKeyParams) {
|
||||||
match self.login_api_key_common(¶ms).await {
|
match self.login_api_key_common(¶ms).await {
|
||||||
Ok(()) => {
|
Ok(()) => {
|
||||||
let response = llmx_app_server_protocol::LoginAccountResponse::ApiKey {};
|
let response = codex_app_server_protocol::LoginAccountResponse::ApiKey {};
|
||||||
self.outgoing.send_response(request_id, response).await;
|
self.outgoing.send_response(request_id, response).await;
|
||||||
|
|
||||||
let payload_login_completed = AccountLoginCompletedNotification {
|
let payload_login_completed = AccountLoginCompletedNotification {
|
||||||
@@ -517,7 +517,7 @@ impl LlmxMessageProcessor {
|
|||||||
Ok(LoginServerOptions {
|
Ok(LoginServerOptions {
|
||||||
open_browser: false,
|
open_browser: false,
|
||||||
..LoginServerOptions::new(
|
..LoginServerOptions::new(
|
||||||
config.llmx_home.clone(),
|
config.codex_home.clone(),
|
||||||
CLIENT_ID.to_string(),
|
CLIENT_ID.to_string(),
|
||||||
config.forced_chatgpt_workspace_id.clone(),
|
config.forced_chatgpt_workspace_id.clone(),
|
||||||
config.cli_auth_credentials_store_mode,
|
config.cli_auth_credentials_store_mode,
|
||||||
@@ -688,7 +688,7 @@ impl LlmxMessageProcessor {
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
let response = llmx_app_server_protocol::LoginAccountResponse::Chatgpt {
|
let response = codex_app_server_protocol::LoginAccountResponse::Chatgpt {
|
||||||
login_id: login_id.to_string(),
|
login_id: login_id.to_string(),
|
||||||
auth_url,
|
auth_url,
|
||||||
};
|
};
|
||||||
@@ -843,32 +843,39 @@ impl LlmxMessageProcessor {
|
|||||||
// then no auth step is required; otherwise, default to requiring auth.
|
// then no auth step is required; otherwise, default to requiring auth.
|
||||||
let requires_openai_auth = self.config.model_provider.requires_openai_auth;
|
let requires_openai_auth = self.config.model_provider.requires_openai_auth;
|
||||||
|
|
||||||
// Check if user is authenticated, regardless of whether auth is required
|
let response = if !requires_openai_auth {
|
||||||
let response = match self.auth_manager.auth() {
|
GetAuthStatusResponse {
|
||||||
Some(auth) => {
|
|
||||||
let auth_mode = auth.mode;
|
|
||||||
let (reported_auth_method, token_opt) = match auth.get_token().await {
|
|
||||||
Ok(token) if !token.is_empty() => {
|
|
||||||
let tok = if include_token { Some(token) } else { None };
|
|
||||||
(Some(auth_mode), tok)
|
|
||||||
}
|
|
||||||
Ok(_) => (None, None),
|
|
||||||
Err(err) => {
|
|
||||||
tracing::warn!("failed to get token for auth status: {err}");
|
|
||||||
(None, None)
|
|
||||||
}
|
|
||||||
};
|
|
||||||
GetAuthStatusResponse {
|
|
||||||
auth_method: reported_auth_method,
|
|
||||||
auth_token: token_opt,
|
|
||||||
requires_openai_auth: Some(requires_openai_auth),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
None => GetAuthStatusResponse {
|
|
||||||
auth_method: None,
|
auth_method: None,
|
||||||
auth_token: None,
|
auth_token: None,
|
||||||
requires_openai_auth: Some(requires_openai_auth),
|
requires_openai_auth: Some(false),
|
||||||
},
|
}
|
||||||
|
} else {
|
||||||
|
match self.auth_manager.auth() {
|
||||||
|
Some(auth) => {
|
||||||
|
let auth_mode = auth.mode;
|
||||||
|
let (reported_auth_method, token_opt) = match auth.get_token().await {
|
||||||
|
Ok(token) if !token.is_empty() => {
|
||||||
|
let tok = if include_token { Some(token) } else { None };
|
||||||
|
(Some(auth_mode), tok)
|
||||||
|
}
|
||||||
|
Ok(_) => (None, None),
|
||||||
|
Err(err) => {
|
||||||
|
tracing::warn!("failed to get token for auth status: {err}");
|
||||||
|
(None, None)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
GetAuthStatusResponse {
|
||||||
|
auth_method: reported_auth_method,
|
||||||
|
auth_token: token_opt,
|
||||||
|
requires_openai_auth: Some(true),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None => GetAuthStatusResponse {
|
||||||
|
auth_method: None,
|
||||||
|
auth_token: None,
|
||||||
|
requires_openai_auth: Some(true),
|
||||||
|
},
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
self.outgoing.send_response(request_id, response).await;
|
self.outgoing.send_response(request_id, response).await;
|
||||||
@@ -925,7 +932,7 @@ impl LlmxMessageProcessor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn get_user_agent(&self, request_id: RequestId) {
|
async fn get_user_agent(&self, request_id: RequestId) {
|
||||||
let user_agent = get_llmx_user_agent();
|
let user_agent = get_codex_user_agent();
|
||||||
let response = GetUserAgentResponse { user_agent };
|
let response = GetUserAgentResponse { user_agent };
|
||||||
self.outgoing.send_response(request_id, response).await;
|
self.outgoing.send_response(request_id, response).await;
|
||||||
}
|
}
|
||||||
@@ -948,7 +955,7 @@ impl LlmxMessageProcessor {
|
|||||||
let Some(auth) = self.auth_manager.auth() else {
|
let Some(auth) = self.auth_manager.auth() else {
|
||||||
return Err(JSONRPCErrorError {
|
return Err(JSONRPCErrorError {
|
||||||
code: INVALID_REQUEST_ERROR_CODE,
|
code: INVALID_REQUEST_ERROR_CODE,
|
||||||
message: "llmx account authentication required to read rate limits".to_string(),
|
message: "codex account authentication required to read rate limits".to_string(),
|
||||||
data: None,
|
data: None,
|
||||||
});
|
});
|
||||||
};
|
};
|
||||||
@@ -974,13 +981,13 @@ impl LlmxMessageProcessor {
|
|||||||
.await
|
.await
|
||||||
.map_err(|err| JSONRPCErrorError {
|
.map_err(|err| JSONRPCErrorError {
|
||||||
code: INTERNAL_ERROR_CODE,
|
code: INTERNAL_ERROR_CODE,
|
||||||
message: format!("failed to fetch llmx rate limits: {err}"),
|
message: format!("failed to fetch codex rate limits: {err}"),
|
||||||
data: None,
|
data: None,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_user_saved_config(&self, request_id: RequestId) {
|
async fn get_user_saved_config(&self, request_id: RequestId) {
|
||||||
let toml_value = match load_config_as_toml(&self.config.llmx_home).await {
|
let toml_value = match load_config_as_toml(&self.config.codex_home).await {
|
||||||
Ok(val) => val,
|
Ok(val) => val,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
let error = JSONRPCErrorError {
|
let error = JSONRPCErrorError {
|
||||||
@@ -1028,7 +1035,7 @@ impl LlmxMessageProcessor {
|
|||||||
reasoning_effort,
|
reasoning_effort,
|
||||||
} = params;
|
} = params;
|
||||||
|
|
||||||
match ConfigEditsBuilder::new(&self.config.llmx_home)
|
match ConfigEditsBuilder::new(&self.config.codex_home)
|
||||||
.with_profile(self.config.active_profile.as_deref())
|
.with_profile(self.config.active_profile.as_deref())
|
||||||
.set_model(model.as_deref(), reasoning_effort)
|
.set_model(model.as_deref(), reasoning_effort)
|
||||||
.apply()
|
.apply()
|
||||||
@@ -1080,24 +1087,24 @@ impl LlmxMessageProcessor {
|
|||||||
.unwrap_or_else(|| self.config.sandbox_policy.clone());
|
.unwrap_or_else(|| self.config.sandbox_policy.clone());
|
||||||
|
|
||||||
let sandbox_type = match &effective_policy {
|
let sandbox_type = match &effective_policy {
|
||||||
llmx_core::protocol::SandboxPolicy::DangerFullAccess => {
|
codex_core::protocol::SandboxPolicy::DangerFullAccess => {
|
||||||
llmx_core::exec::SandboxType::None
|
codex_core::exec::SandboxType::None
|
||||||
}
|
}
|
||||||
_ => get_platform_sandbox().unwrap_or(llmx_core::exec::SandboxType::None),
|
_ => get_platform_sandbox().unwrap_or(codex_core::exec::SandboxType::None),
|
||||||
};
|
};
|
||||||
tracing::debug!("Sandbox type: {sandbox_type:?}");
|
tracing::debug!("Sandbox type: {sandbox_type:?}");
|
||||||
let llmx_linux_sandbox_exe = self.config.llmx_linux_sandbox_exe.clone();
|
let codex_linux_sandbox_exe = self.config.codex_linux_sandbox_exe.clone();
|
||||||
let outgoing = self.outgoing.clone();
|
let outgoing = self.outgoing.clone();
|
||||||
let req_id = request_id;
|
let req_id = request_id;
|
||||||
let sandbox_cwd = self.config.cwd.clone();
|
let sandbox_cwd = self.config.cwd.clone();
|
||||||
|
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
match llmx_core::exec::process_exec_tool_call(
|
match codex_core::exec::process_exec_tool_call(
|
||||||
exec_params,
|
exec_params,
|
||||||
sandbox_type,
|
sandbox_type,
|
||||||
&effective_policy,
|
&effective_policy,
|
||||||
sandbox_cwd.as_path(),
|
sandbox_cwd.as_path(),
|
||||||
&llmx_linux_sandbox_exe,
|
&codex_linux_sandbox_exe,
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
@@ -1144,7 +1151,7 @@ impl LlmxMessageProcessor {
|
|||||||
approval_policy,
|
approval_policy,
|
||||||
sandbox_mode,
|
sandbox_mode,
|
||||||
model_provider,
|
model_provider,
|
||||||
llmx_linux_sandbox_exe: self.llmx_linux_sandbox_exe.clone(),
|
codex_linux_sandbox_exe: self.codex_linux_sandbox_exe.clone(),
|
||||||
base_instructions,
|
base_instructions,
|
||||||
developer_instructions,
|
developer_instructions,
|
||||||
compact_prompt,
|
compact_prompt,
|
||||||
@@ -1200,7 +1207,7 @@ impl LlmxMessageProcessor {
|
|||||||
approval_policy: params.approval_policy.map(AskForApproval::to_core),
|
approval_policy: params.approval_policy.map(AskForApproval::to_core),
|
||||||
sandbox_mode: params.sandbox.map(SandboxMode::to_core),
|
sandbox_mode: params.sandbox.map(SandboxMode::to_core),
|
||||||
model_provider: params.model_provider,
|
model_provider: params.model_provider,
|
||||||
llmx_linux_sandbox_exe: self.llmx_linux_sandbox_exe.clone(),
|
codex_linux_sandbox_exe: self.codex_linux_sandbox_exe.clone(),
|
||||||
base_instructions: params.base_instructions,
|
base_instructions: params.base_instructions,
|
||||||
developer_instructions: params.developer_instructions,
|
developer_instructions: params.developer_instructions,
|
||||||
..Default::default()
|
..Default::default()
|
||||||
@@ -1298,7 +1305,7 @@ impl LlmxMessageProcessor {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let rollout_path = match find_conversation_path_by_id_str(
|
let rollout_path = match find_conversation_path_by_id_str(
|
||||||
&self.config.llmx_home,
|
&self.config.codex_home,
|
||||||
&conversation_id.to_string(),
|
&conversation_id.to_string(),
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
@@ -1379,7 +1386,7 @@ impl LlmxMessageProcessor {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let path = match find_conversation_path_by_id_str(
|
let path = match find_conversation_path_by_id_str(
|
||||||
&self.config.llmx_home,
|
&self.config.codex_home,
|
||||||
&conversation_id.to_string(),
|
&conversation_id.to_string(),
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
@@ -1481,14 +1488,14 @@ impl LlmxMessageProcessor {
|
|||||||
let path = match params {
|
let path = match params {
|
||||||
GetConversationSummaryParams::RolloutPath { rollout_path } => {
|
GetConversationSummaryParams::RolloutPath { rollout_path } => {
|
||||||
if rollout_path.is_relative() {
|
if rollout_path.is_relative() {
|
||||||
self.config.llmx_home.join(&rollout_path)
|
self.config.codex_home.join(&rollout_path)
|
||||||
} else {
|
} else {
|
||||||
rollout_path
|
rollout_path
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
GetConversationSummaryParams::ConversationId { conversation_id } => {
|
GetConversationSummaryParams::ConversationId { conversation_id } => {
|
||||||
match llmx_core::find_conversation_path_by_id_str(
|
match codex_core::find_conversation_path_by_id_str(
|
||||||
&self.config.llmx_home,
|
&self.config.codex_home,
|
||||||
&conversation_id.to_string(),
|
&conversation_id.to_string(),
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
@@ -1566,11 +1573,20 @@ impl LlmxMessageProcessor {
|
|||||||
let cursor_obj: Option<RolloutCursor> = cursor.as_ref().and_then(|s| parse_cursor(s));
|
let cursor_obj: Option<RolloutCursor> = cursor.as_ref().and_then(|s| parse_cursor(s));
|
||||||
let cursor_ref = cursor_obj.as_ref();
|
let cursor_ref = cursor_obj.as_ref();
|
||||||
|
|
||||||
let model_provider_filter = model_providers.filter(|providers| !providers.is_empty());
|
let model_provider_filter = match model_providers {
|
||||||
|
Some(providers) => {
|
||||||
|
if providers.is_empty() {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
Some(providers)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None => Some(vec![self.config.model_provider_id.clone()]),
|
||||||
|
};
|
||||||
let fallback_provider = self.config.model_provider_id.clone();
|
let fallback_provider = self.config.model_provider_id.clone();
|
||||||
|
|
||||||
let page = match RolloutRecorder::list_conversations(
|
let page = match RolloutRecorder::list_conversations(
|
||||||
&self.config.llmx_home,
|
&self.config.codex_home,
|
||||||
page_size,
|
page_size,
|
||||||
cursor_ref,
|
cursor_ref,
|
||||||
INTERACTIVE_SESSION_SOURCES,
|
INTERACTIVE_SESSION_SOURCES,
|
||||||
@@ -1708,7 +1724,7 @@ impl LlmxMessageProcessor {
|
|||||||
approval_policy,
|
approval_policy,
|
||||||
sandbox_mode,
|
sandbox_mode,
|
||||||
model_provider,
|
model_provider,
|
||||||
llmx_linux_sandbox_exe: self.llmx_linux_sandbox_exe.clone(),
|
codex_linux_sandbox_exe: self.codex_linux_sandbox_exe.clone(),
|
||||||
base_instructions,
|
base_instructions,
|
||||||
developer_instructions,
|
developer_instructions,
|
||||||
compact_prompt,
|
compact_prompt,
|
||||||
@@ -1746,7 +1762,7 @@ impl LlmxMessageProcessor {
|
|||||||
}
|
}
|
||||||
} else if let Some(conversation_id) = conversation_id {
|
} else if let Some(conversation_id) = conversation_id {
|
||||||
match find_conversation_path_by_id_str(
|
match find_conversation_path_by_id_str(
|
||||||
&self.config.llmx_home,
|
&self.config.codex_home,
|
||||||
&conversation_id.to_string(),
|
&conversation_id.to_string(),
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
@@ -1895,7 +1911,7 @@ impl LlmxMessageProcessor {
|
|||||||
rollout_path: &Path,
|
rollout_path: &Path,
|
||||||
) -> Result<(), JSONRPCErrorError> {
|
) -> Result<(), JSONRPCErrorError> {
|
||||||
// Verify rollout_path is under sessions dir.
|
// Verify rollout_path is under sessions dir.
|
||||||
let rollout_folder = self.config.llmx_home.join(llmx_core::SESSIONS_SUBDIR);
|
let rollout_folder = self.config.codex_home.join(codex_core::SESSIONS_SUBDIR);
|
||||||
|
|
||||||
let canonical_sessions_dir = match tokio::fs::canonicalize(&rollout_folder).await {
|
let canonical_sessions_dir = match tokio::fs::canonicalize(&rollout_folder).await {
|
||||||
Ok(path) => path,
|
Ok(path) => path,
|
||||||
@@ -2011,8 +2027,8 @@ impl LlmxMessageProcessor {
|
|||||||
let result: std::io::Result<()> = async {
|
let result: std::io::Result<()> = async {
|
||||||
let archive_folder = self
|
let archive_folder = self
|
||||||
.config
|
.config
|
||||||
.llmx_home
|
.codex_home
|
||||||
.join(llmx_core::ARCHIVED_SESSIONS_SUBDIR);
|
.join(codex_core::ARCHIVED_SESSIONS_SUBDIR);
|
||||||
tokio::fs::create_dir_all(&archive_folder).await?;
|
tokio::fs::create_dir_all(&archive_folder).await?;
|
||||||
tokio::fs::rename(&canonical_rollout_path, &archive_folder.join(&file_name)).await?;
|
tokio::fs::rename(&canonical_rollout_path, &archive_folder.join(&file_name)).await?;
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -2354,7 +2370,7 @@ impl LlmxMessageProcessor {
|
|||||||
// JSON-serializing the `Event` as-is, but these should
|
// JSON-serializing the `Event` as-is, but these should
|
||||||
// be migrated to be variants of `ServerNotification`
|
// be migrated to be variants of `ServerNotification`
|
||||||
// instead.
|
// instead.
|
||||||
let method = format!("llmx/event/{}", event.msg);
|
let method = format!("codex/event/{}", event.msg);
|
||||||
let mut params = match serde_json::to_value(event.clone()) {
|
let mut params = match serde_json::to_value(event.clone()) {
|
||||||
Ok(serde_json::Value::Object(map)) => map,
|
Ok(serde_json::Value::Object(map)) => map,
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
@@ -2529,7 +2545,7 @@ impl LlmxMessageProcessor {
|
|||||||
async fn apply_bespoke_event_handling(
|
async fn apply_bespoke_event_handling(
|
||||||
event: Event,
|
event: Event,
|
||||||
conversation_id: ConversationId,
|
conversation_id: ConversationId,
|
||||||
conversation: Arc<LlmxConversation>,
|
conversation: Arc<CodexConversation>,
|
||||||
outgoing: Arc<OutgoingMessageSender>,
|
outgoing: Arc<OutgoingMessageSender>,
|
||||||
pending_interrupts: PendingInterrupts,
|
pending_interrupts: PendingInterrupts,
|
||||||
) {
|
) {
|
||||||
@@ -2637,14 +2653,14 @@ async fn derive_config_from_params(
|
|||||||
async fn on_patch_approval_response(
|
async fn on_patch_approval_response(
|
||||||
event_id: String,
|
event_id: String,
|
||||||
receiver: oneshot::Receiver<JsonRpcResult>,
|
receiver: oneshot::Receiver<JsonRpcResult>,
|
||||||
llmx: Arc<LlmxConversation>,
|
codex: Arc<CodexConversation>,
|
||||||
) {
|
) {
|
||||||
let response = receiver.await;
|
let response = receiver.await;
|
||||||
let value = match response {
|
let value = match response {
|
||||||
Ok(value) => value,
|
Ok(value) => value,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
error!("request failed: {err:?}");
|
error!("request failed: {err:?}");
|
||||||
if let Err(submit_err) = llmx
|
if let Err(submit_err) = codex
|
||||||
.submit(Op::PatchApproval {
|
.submit(Op::PatchApproval {
|
||||||
id: event_id.clone(),
|
id: event_id.clone(),
|
||||||
decision: ReviewDecision::Denied,
|
decision: ReviewDecision::Denied,
|
||||||
@@ -2665,7 +2681,7 @@ async fn on_patch_approval_response(
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
if let Err(err) = llmx
|
if let Err(err) = codex
|
||||||
.submit(Op::PatchApproval {
|
.submit(Op::PatchApproval {
|
||||||
id: event_id,
|
id: event_id,
|
||||||
decision: response.decision,
|
decision: response.decision,
|
||||||
@@ -2679,7 +2695,7 @@ async fn on_patch_approval_response(
|
|||||||
async fn on_exec_approval_response(
|
async fn on_exec_approval_response(
|
||||||
event_id: String,
|
event_id: String,
|
||||||
receiver: oneshot::Receiver<JsonRpcResult>,
|
receiver: oneshot::Receiver<JsonRpcResult>,
|
||||||
conversation: Arc<LlmxConversation>,
|
conversation: Arc<CodexConversation>,
|
||||||
) {
|
) {
|
||||||
let response = receiver.await;
|
let response = receiver.await;
|
||||||
let value = match response {
|
let value = match response {
|
||||||
@@ -2690,7 +2706,7 @@ async fn on_exec_approval_response(
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// Try to deserialize `value` and then make the appropriate call to `llmx`.
|
// Try to deserialize `value` and then make the appropriate call to `codex`.
|
||||||
let response =
|
let response =
|
||||||
serde_json::from_value::<ExecCommandApprovalResponse>(value).unwrap_or_else(|err| {
|
serde_json::from_value::<ExecCommandApprovalResponse>(value).unwrap_or_else(|err| {
|
||||||
error!("failed to deserialize ExecCommandApprovalResponse: {err}");
|
error!("failed to deserialize ExecCommandApprovalResponse: {err}");
|
||||||
@@ -2781,7 +2797,7 @@ fn extract_conversation_summary(
|
|||||||
let preview = head
|
let preview = head
|
||||||
.iter()
|
.iter()
|
||||||
.filter_map(|value| serde_json::from_value::<ResponseItem>(value.clone()).ok())
|
.filter_map(|value| serde_json::from_value::<ResponseItem>(value.clone()).ok())
|
||||||
.find_map(|item| match llmx_core::parse_turn_item(&item) {
|
.find_map(|item| match codex_core::parse_turn_item(&item) {
|
||||||
Some(TurnItem::UserMessage(user)) => Some(user.message()),
|
Some(TurnItem::UserMessage(user)) => Some(user.message()),
|
||||||
_ => None,
|
_ => None,
|
||||||
})?;
|
})?;
|
||||||
@@ -2855,7 +2871,7 @@ fn summary_to_thread(summary: ConversationSummary) -> Thread {
|
|||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use llmx_protocol::protocol::SessionSource;
|
use codex_protocol::protocol::SessionSource;
|
||||||
use pretty_assertions::assert_eq;
|
use pretty_assertions::assert_eq;
|
||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
use tempfile::TempDir;
|
use tempfile::TempDir;
|
||||||
@@ -2871,7 +2887,7 @@ mod tests {
|
|||||||
"id": conversation_id.to_string(),
|
"id": conversation_id.to_string(),
|
||||||
"timestamp": timestamp,
|
"timestamp": timestamp,
|
||||||
"cwd": "/",
|
"cwd": "/",
|
||||||
"originator": "llmx",
|
"originator": "codex",
|
||||||
"cli_version": "0.0.0",
|
"cli_version": "0.0.0",
|
||||||
"instructions": null,
|
"instructions": null,
|
||||||
"model_provider": "test-provider"
|
"model_provider": "test-provider"
|
||||||
@@ -2918,9 +2934,9 @@ mod tests {
|
|||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn read_summary_from_rollout_returns_empty_preview_when_no_user_message() -> Result<()> {
|
async fn read_summary_from_rollout_returns_empty_preview_when_no_user_message() -> Result<()> {
|
||||||
use llmx_protocol::protocol::RolloutItem;
|
use codex_protocol::protocol::RolloutItem;
|
||||||
use llmx_protocol::protocol::RolloutLine;
|
use codex_protocol::protocol::RolloutLine;
|
||||||
use llmx_protocol::protocol::SessionMetaLine;
|
use codex_protocol::protocol::SessionMetaLine;
|
||||||
use std::fs;
|
use std::fs;
|
||||||
|
|
||||||
let temp_dir = TempDir::new()?;
|
let temp_dir = TempDir::new()?;
|
||||||
@@ -5,8 +5,8 @@ use std::path::PathBuf;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::sync::atomic::AtomicBool;
|
use std::sync::atomic::AtomicBool;
|
||||||
|
|
||||||
use llmx_app_server_protocol::FuzzyFileSearchResult;
|
use codex_app_server_protocol::FuzzyFileSearchResult;
|
||||||
use llmx_file_search as file_search;
|
use codex_file_search as file_search;
|
||||||
use tokio::task::JoinSet;
|
use tokio::task::JoinSet;
|
||||||
use tracing::warn;
|
use tracing::warn;
|
||||||
|
|
||||||
@@ -1,8 +1,8 @@
|
|||||||
#![deny(clippy::print_stdout, clippy::print_stderr)]
|
#![deny(clippy::print_stdout, clippy::print_stderr)]
|
||||||
|
|
||||||
use llmx_common::CliConfigOverrides;
|
use codex_common::CliConfigOverrides;
|
||||||
use llmx_core::config::Config;
|
use codex_core::config::Config;
|
||||||
use llmx_core::config::ConfigOverrides;
|
use codex_core::config::ConfigOverrides;
|
||||||
use opentelemetry_appender_tracing::layer::OpenTelemetryTracingBridge;
|
use opentelemetry_appender_tracing::layer::OpenTelemetryTracingBridge;
|
||||||
use std::io::ErrorKind;
|
use std::io::ErrorKind;
|
||||||
use std::io::Result as IoResult;
|
use std::io::Result as IoResult;
|
||||||
@@ -11,8 +11,8 @@ use std::path::PathBuf;
|
|||||||
use crate::message_processor::MessageProcessor;
|
use crate::message_processor::MessageProcessor;
|
||||||
use crate::outgoing_message::OutgoingMessage;
|
use crate::outgoing_message::OutgoingMessage;
|
||||||
use crate::outgoing_message::OutgoingMessageSender;
|
use crate::outgoing_message::OutgoingMessageSender;
|
||||||
use llmx_app_server_protocol::JSONRPCMessage;
|
use codex_app_server_protocol::JSONRPCMessage;
|
||||||
use llmx_feedback::LlmxFeedback;
|
use codex_feedback::CodexFeedback;
|
||||||
use tokio::io::AsyncBufReadExt;
|
use tokio::io::AsyncBufReadExt;
|
||||||
use tokio::io::AsyncWriteExt;
|
use tokio::io::AsyncWriteExt;
|
||||||
use tokio::io::BufReader;
|
use tokio::io::BufReader;
|
||||||
@@ -28,9 +28,9 @@ use tracing_subscriber::filter::Targets;
|
|||||||
use tracing_subscriber::layer::SubscriberExt;
|
use tracing_subscriber::layer::SubscriberExt;
|
||||||
use tracing_subscriber::util::SubscriberInitExt;
|
use tracing_subscriber::util::SubscriberInitExt;
|
||||||
|
|
||||||
|
mod codex_message_processor;
|
||||||
mod error_code;
|
mod error_code;
|
||||||
mod fuzzy_file_search;
|
mod fuzzy_file_search;
|
||||||
mod llmx_message_processor;
|
|
||||||
mod message_processor;
|
mod message_processor;
|
||||||
mod models;
|
mod models;
|
||||||
mod outgoing_message;
|
mod outgoing_message;
|
||||||
@@ -41,7 +41,7 @@ mod outgoing_message;
|
|||||||
const CHANNEL_CAPACITY: usize = 128;
|
const CHANNEL_CAPACITY: usize = 128;
|
||||||
|
|
||||||
pub async fn run_main(
|
pub async fn run_main(
|
||||||
llmx_linux_sandbox_exe: Option<PathBuf>,
|
codex_linux_sandbox_exe: Option<PathBuf>,
|
||||||
cli_config_overrides: CliConfigOverrides,
|
cli_config_overrides: CliConfigOverrides,
|
||||||
) -> IoResult<()> {
|
) -> IoResult<()> {
|
||||||
// Set up channels.
|
// Set up channels.
|
||||||
@@ -85,10 +85,10 @@ pub async fn run_main(
|
|||||||
std::io::Error::new(ErrorKind::InvalidData, format!("error loading config: {e}"))
|
std::io::Error::new(ErrorKind::InvalidData, format!("error loading config: {e}"))
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
let feedback = LlmxFeedback::new();
|
let feedback = CodexFeedback::new();
|
||||||
|
|
||||||
let otel =
|
let otel =
|
||||||
llmx_core::otel_init::build_provider(&config, env!("CARGO_PKG_VERSION")).map_err(|e| {
|
codex_core::otel_init::build_provider(&config, env!("CARGO_PKG_VERSION")).map_err(|e| {
|
||||||
std::io::Error::new(
|
std::io::Error::new(
|
||||||
ErrorKind::InvalidData,
|
ErrorKind::InvalidData,
|
||||||
format!("error loading otel config: {e}"),
|
format!("error loading otel config: {e}"),
|
||||||
@@ -112,7 +112,7 @@ pub async fn run_main(
|
|||||||
.with(feedback_layer)
|
.with(feedback_layer)
|
||||||
.with(otel.as_ref().map(|provider| {
|
.with(otel.as_ref().map(|provider| {
|
||||||
OpenTelemetryTracingBridge::new(&provider.logger).with_filter(
|
OpenTelemetryTracingBridge::new(&provider.logger).with_filter(
|
||||||
tracing_subscriber::filter::filter_fn(llmx_core::otel_init::llmx_export_filter),
|
tracing_subscriber::filter::filter_fn(codex_core::otel_init::codex_export_filter),
|
||||||
)
|
)
|
||||||
}))
|
}))
|
||||||
.try_init();
|
.try_init();
|
||||||
@@ -122,7 +122,7 @@ pub async fn run_main(
|
|||||||
let outgoing_message_sender = OutgoingMessageSender::new(outgoing_tx);
|
let outgoing_message_sender = OutgoingMessageSender::new(outgoing_tx);
|
||||||
let mut processor = MessageProcessor::new(
|
let mut processor = MessageProcessor::new(
|
||||||
outgoing_message_sender,
|
outgoing_message_sender,
|
||||||
llmx_linux_sandbox_exe,
|
codex_linux_sandbox_exe,
|
||||||
std::sync::Arc::new(config),
|
std::sync::Arc::new(config),
|
||||||
feedback.clone(),
|
feedback.clone(),
|
||||||
);
|
);
|
||||||
10
codex-rs/app-server/src/main.rs
Normal file
10
codex-rs/app-server/src/main.rs
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
use codex_app_server::run_main;
|
||||||
|
use codex_arg0::arg0_dispatch_or_else;
|
||||||
|
use codex_common::CliConfigOverrides;
|
||||||
|
|
||||||
|
fn main() -> anyhow::Result<()> {
|
||||||
|
arg0_dispatch_or_else(|codex_linux_sandbox_exe| async move {
|
||||||
|
run_main(codex_linux_sandbox_exe, CliConfigOverrides::default()).await?;
|
||||||
|
Ok(())
|
||||||
|
})
|
||||||
|
}
|
||||||
@@ -1,29 +1,29 @@
|
|||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
use crate::codex_message_processor::CodexMessageProcessor;
|
||||||
use crate::error_code::INVALID_REQUEST_ERROR_CODE;
|
use crate::error_code::INVALID_REQUEST_ERROR_CODE;
|
||||||
use crate::llmx_message_processor::LlmxMessageProcessor;
|
|
||||||
use crate::outgoing_message::OutgoingMessageSender;
|
use crate::outgoing_message::OutgoingMessageSender;
|
||||||
use llmx_app_server_protocol::ClientInfo;
|
use codex_app_server_protocol::ClientInfo;
|
||||||
use llmx_app_server_protocol::ClientRequest;
|
use codex_app_server_protocol::ClientRequest;
|
||||||
use llmx_app_server_protocol::InitializeResponse;
|
use codex_app_server_protocol::InitializeResponse;
|
||||||
|
|
||||||
use llmx_app_server_protocol::JSONRPCError;
|
use codex_app_server_protocol::JSONRPCError;
|
||||||
use llmx_app_server_protocol::JSONRPCErrorError;
|
use codex_app_server_protocol::JSONRPCErrorError;
|
||||||
use llmx_app_server_protocol::JSONRPCNotification;
|
use codex_app_server_protocol::JSONRPCNotification;
|
||||||
use llmx_app_server_protocol::JSONRPCRequest;
|
use codex_app_server_protocol::JSONRPCRequest;
|
||||||
use llmx_app_server_protocol::JSONRPCResponse;
|
use codex_app_server_protocol::JSONRPCResponse;
|
||||||
use llmx_core::AuthManager;
|
use codex_core::AuthManager;
|
||||||
use llmx_core::ConversationManager;
|
use codex_core::ConversationManager;
|
||||||
use llmx_core::config::Config;
|
use codex_core::config::Config;
|
||||||
use llmx_core::default_client::USER_AGENT_SUFFIX;
|
use codex_core::default_client::USER_AGENT_SUFFIX;
|
||||||
use llmx_core::default_client::get_llmx_user_agent;
|
use codex_core::default_client::get_codex_user_agent;
|
||||||
use llmx_feedback::LlmxFeedback;
|
use codex_feedback::CodexFeedback;
|
||||||
use llmx_protocol::protocol::SessionSource;
|
use codex_protocol::protocol::SessionSource;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
pub(crate) struct MessageProcessor {
|
pub(crate) struct MessageProcessor {
|
||||||
outgoing: Arc<OutgoingMessageSender>,
|
outgoing: Arc<OutgoingMessageSender>,
|
||||||
llmx_message_processor: LlmxMessageProcessor,
|
codex_message_processor: CodexMessageProcessor,
|
||||||
initialized: bool,
|
initialized: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -32,13 +32,13 @@ impl MessageProcessor {
|
|||||||
/// `Sender` so handlers can enqueue messages to be written to stdout.
|
/// `Sender` so handlers can enqueue messages to be written to stdout.
|
||||||
pub(crate) fn new(
|
pub(crate) fn new(
|
||||||
outgoing: OutgoingMessageSender,
|
outgoing: OutgoingMessageSender,
|
||||||
llmx_linux_sandbox_exe: Option<PathBuf>,
|
codex_linux_sandbox_exe: Option<PathBuf>,
|
||||||
config: Arc<Config>,
|
config: Arc<Config>,
|
||||||
feedback: LlmxFeedback,
|
feedback: CodexFeedback,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let outgoing = Arc::new(outgoing);
|
let outgoing = Arc::new(outgoing);
|
||||||
let auth_manager = AuthManager::shared(
|
let auth_manager = AuthManager::shared(
|
||||||
config.llmx_home.clone(),
|
config.codex_home.clone(),
|
||||||
false,
|
false,
|
||||||
config.cli_auth_credentials_store_mode,
|
config.cli_auth_credentials_store_mode,
|
||||||
);
|
);
|
||||||
@@ -46,18 +46,18 @@ impl MessageProcessor {
|
|||||||
auth_manager.clone(),
|
auth_manager.clone(),
|
||||||
SessionSource::VSCode,
|
SessionSource::VSCode,
|
||||||
));
|
));
|
||||||
let llmx_message_processor = LlmxMessageProcessor::new(
|
let codex_message_processor = CodexMessageProcessor::new(
|
||||||
auth_manager,
|
auth_manager,
|
||||||
conversation_manager,
|
conversation_manager,
|
||||||
outgoing.clone(),
|
outgoing.clone(),
|
||||||
llmx_linux_sandbox_exe,
|
codex_linux_sandbox_exe,
|
||||||
config,
|
config,
|
||||||
feedback,
|
feedback,
|
||||||
);
|
);
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
outgoing,
|
outgoing,
|
||||||
llmx_message_processor,
|
codex_message_processor,
|
||||||
initialized: false,
|
initialized: false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -77,8 +77,8 @@ impl MessageProcessor {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let llmx_request = match serde_json::from_value::<ClientRequest>(request_json) {
|
let codex_request = match serde_json::from_value::<ClientRequest>(request_json) {
|
||||||
Ok(llmx_request) => llmx_request,
|
Ok(codex_request) => codex_request,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
let error = JSONRPCErrorError {
|
let error = JSONRPCErrorError {
|
||||||
code: INVALID_REQUEST_ERROR_CODE,
|
code: INVALID_REQUEST_ERROR_CODE,
|
||||||
@@ -90,8 +90,8 @@ impl MessageProcessor {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
match llmx_request {
|
match codex_request {
|
||||||
// Handle Initialize internally so LlmxMessageProcessor does not have to concern
|
// Handle Initialize internally so CodexMessageProcessor does not have to concern
|
||||||
// itself with the `initialized` bool.
|
// itself with the `initialized` bool.
|
||||||
ClientRequest::Initialize { request_id, params } => {
|
ClientRequest::Initialize { request_id, params } => {
|
||||||
if self.initialized {
|
if self.initialized {
|
||||||
@@ -113,7 +113,7 @@ impl MessageProcessor {
|
|||||||
*suffix = Some(user_agent_suffix);
|
*suffix = Some(user_agent_suffix);
|
||||||
}
|
}
|
||||||
|
|
||||||
let user_agent = get_llmx_user_agent();
|
let user_agent = get_codex_user_agent();
|
||||||
let response = InitializeResponse { user_agent };
|
let response = InitializeResponse { user_agent };
|
||||||
self.outgoing.send_response(request_id, response).await;
|
self.outgoing.send_response(request_id, response).await;
|
||||||
|
|
||||||
@@ -134,8 +134,8 @@ impl MessageProcessor {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
self.llmx_message_processor
|
self.codex_message_processor
|
||||||
.process_request(llmx_request)
|
.process_request(codex_request)
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1,9 +1,9 @@
|
|||||||
use llmx_app_server_protocol::AuthMode;
|
use codex_app_server_protocol::AuthMode;
|
||||||
use llmx_app_server_protocol::Model;
|
use codex_app_server_protocol::Model;
|
||||||
use llmx_app_server_protocol::ReasoningEffortOption;
|
use codex_app_server_protocol::ReasoningEffortOption;
|
||||||
use llmx_common::model_presets::ModelPreset;
|
use codex_common::model_presets::ModelPreset;
|
||||||
use llmx_common::model_presets::ReasoningEffortPreset;
|
use codex_common::model_presets::ReasoningEffortPreset;
|
||||||
use llmx_common::model_presets::builtin_model_presets;
|
use codex_common::model_presets::builtin_model_presets;
|
||||||
|
|
||||||
pub fn supported_models(auth_mode: Option<AuthMode>) -> Vec<Model> {
|
pub fn supported_models(auth_mode: Option<AuthMode>) -> Vec<Model> {
|
||||||
builtin_model_presets(auth_mode)
|
builtin_model_presets(auth_mode)
|
||||||
@@ -2,12 +2,12 @@ use std::collections::HashMap;
|
|||||||
use std::sync::atomic::AtomicI64;
|
use std::sync::atomic::AtomicI64;
|
||||||
use std::sync::atomic::Ordering;
|
use std::sync::atomic::Ordering;
|
||||||
|
|
||||||
use llmx_app_server_protocol::JSONRPCErrorError;
|
use codex_app_server_protocol::JSONRPCErrorError;
|
||||||
use llmx_app_server_protocol::RequestId;
|
use codex_app_server_protocol::RequestId;
|
||||||
use llmx_app_server_protocol::Result;
|
use codex_app_server_protocol::Result;
|
||||||
use llmx_app_server_protocol::ServerNotification;
|
use codex_app_server_protocol::ServerNotification;
|
||||||
use llmx_app_server_protocol::ServerRequest;
|
use codex_app_server_protocol::ServerRequest;
|
||||||
use llmx_app_server_protocol::ServerRequestPayload;
|
use codex_app_server_protocol::ServerRequestPayload;
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use tokio::sync::Mutex;
|
use tokio::sync::Mutex;
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
@@ -141,13 +141,13 @@ pub(crate) struct OutgoingError {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use llmx_app_server_protocol::AccountLoginCompletedNotification;
|
use codex_app_server_protocol::AccountLoginCompletedNotification;
|
||||||
use llmx_app_server_protocol::AccountRateLimitsUpdatedNotification;
|
use codex_app_server_protocol::AccountRateLimitsUpdatedNotification;
|
||||||
use llmx_app_server_protocol::AccountUpdatedNotification;
|
use codex_app_server_protocol::AccountUpdatedNotification;
|
||||||
use llmx_app_server_protocol::AuthMode;
|
use codex_app_server_protocol::AuthMode;
|
||||||
use llmx_app_server_protocol::LoginChatGptCompleteNotification;
|
use codex_app_server_protocol::LoginChatGptCompleteNotification;
|
||||||
use llmx_app_server_protocol::RateLimitSnapshot;
|
use codex_app_server_protocol::RateLimitSnapshot;
|
||||||
use llmx_app_server_protocol::RateLimitWindow;
|
use codex_app_server_protocol::RateLimitWindow;
|
||||||
use pretty_assertions::assert_eq;
|
use pretty_assertions::assert_eq;
|
||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
@@ -11,9 +11,9 @@ anyhow = { workspace = true }
|
|||||||
assert_cmd = { workspace = true }
|
assert_cmd = { workspace = true }
|
||||||
base64 = { workspace = true }
|
base64 = { workspace = true }
|
||||||
chrono = { workspace = true }
|
chrono = { workspace = true }
|
||||||
llmx-app-server-protocol = { workspace = true }
|
codex-app-server-protocol = { workspace = true }
|
||||||
llmx-core = { workspace = true }
|
codex-core = { workspace = true }
|
||||||
llmx-protocol = { workspace = true }
|
codex-protocol = { workspace = true }
|
||||||
serde = { workspace = true }
|
serde = { workspace = true }
|
||||||
serde_json = { workspace = true }
|
serde_json = { workspace = true }
|
||||||
tokio = { workspace = true, features = [
|
tokio = { workspace = true, features = [
|
||||||
@@ -6,11 +6,11 @@ use base64::Engine;
|
|||||||
use base64::engine::general_purpose::URL_SAFE_NO_PAD;
|
use base64::engine::general_purpose::URL_SAFE_NO_PAD;
|
||||||
use chrono::DateTime;
|
use chrono::DateTime;
|
||||||
use chrono::Utc;
|
use chrono::Utc;
|
||||||
use llmx_core::auth::AuthCredentialsStoreMode;
|
use codex_core::auth::AuthCredentialsStoreMode;
|
||||||
use llmx_core::auth::AuthDotJson;
|
use codex_core::auth::AuthDotJson;
|
||||||
use llmx_core::auth::save_auth;
|
use codex_core::auth::save_auth;
|
||||||
use llmx_core::token_data::TokenData;
|
use codex_core::token_data::TokenData;
|
||||||
use llmx_core::token_data::parse_id_token;
|
use codex_core::token_data::parse_id_token;
|
||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
|
|
||||||
/// Builder for writing a fake ChatGPT auth.json in tests.
|
/// Builder for writing a fake ChatGPT auth.json in tests.
|
||||||
@@ -110,7 +110,7 @@ pub fn encode_id_token(claims: &ChatGptIdTokenClaims) -> Result<String> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn write_chatgpt_auth(
|
pub fn write_chatgpt_auth(
|
||||||
llmx_home: &Path,
|
codex_home: &Path,
|
||||||
fixture: ChatGptAuthFixture,
|
fixture: ChatGptAuthFixture,
|
||||||
cli_auth_credentials_store_mode: AuthCredentialsStoreMode,
|
cli_auth_credentials_store_mode: AuthCredentialsStoreMode,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
@@ -131,5 +131,5 @@ pub fn write_chatgpt_auth(
|
|||||||
last_refresh,
|
last_refresh,
|
||||||
};
|
};
|
||||||
|
|
||||||
save_auth(llmx_home, &auth, cli_auth_credentials_store_mode).context("write auth.json")
|
save_auth(codex_home, &auth, cli_auth_credentials_store_mode).context("write auth.json")
|
||||||
}
|
}
|
||||||
@@ -8,7 +8,7 @@ pub use auth_fixtures::ChatGptAuthFixture;
|
|||||||
pub use auth_fixtures::ChatGptIdTokenClaims;
|
pub use auth_fixtures::ChatGptIdTokenClaims;
|
||||||
pub use auth_fixtures::encode_id_token;
|
pub use auth_fixtures::encode_id_token;
|
||||||
pub use auth_fixtures::write_chatgpt_auth;
|
pub use auth_fixtures::write_chatgpt_auth;
|
||||||
use llmx_app_server_protocol::JSONRPCResponse;
|
use codex_app_server_protocol::JSONRPCResponse;
|
||||||
pub use mcp_process::McpProcess;
|
pub use mcp_process::McpProcess;
|
||||||
pub use mock_model_server::create_mock_chat_completions_server;
|
pub use mock_model_server::create_mock_chat_completions_server;
|
||||||
pub use mock_model_server::create_mock_chat_completions_server_unchecked;
|
pub use mock_model_server::create_mock_chat_completions_server_unchecked;
|
||||||
@@ -20,6 +20,6 @@ use serde::de::DeserializeOwned;
|
|||||||
|
|
||||||
pub fn to_response<T: DeserializeOwned>(response: JSONRPCResponse) -> anyhow::Result<T> {
|
pub fn to_response<T: DeserializeOwned>(response: JSONRPCResponse) -> anyhow::Result<T> {
|
||||||
let value = serde_json::to_value(response.result)?;
|
let value = serde_json::to_value(response.result)?;
|
||||||
let llmx_response = serde_json::from_value(value)?;
|
let codex_response = serde_json::from_value(value)?;
|
||||||
Ok(llmx_response)
|
Ok(codex_response)
|
||||||
}
|
}
|
||||||
@@ -12,39 +12,39 @@ use tokio::process::ChildStdout;
|
|||||||
|
|
||||||
use anyhow::Context;
|
use anyhow::Context;
|
||||||
use assert_cmd::prelude::*;
|
use assert_cmd::prelude::*;
|
||||||
use llmx_app_server_protocol::AddConversationListenerParams;
|
use codex_app_server_protocol::AddConversationListenerParams;
|
||||||
use llmx_app_server_protocol::ArchiveConversationParams;
|
use codex_app_server_protocol::ArchiveConversationParams;
|
||||||
use llmx_app_server_protocol::CancelLoginAccountParams;
|
use codex_app_server_protocol::CancelLoginAccountParams;
|
||||||
use llmx_app_server_protocol::CancelLoginChatGptParams;
|
use codex_app_server_protocol::CancelLoginChatGptParams;
|
||||||
use llmx_app_server_protocol::ClientInfo;
|
use codex_app_server_protocol::ClientInfo;
|
||||||
use llmx_app_server_protocol::ClientNotification;
|
use codex_app_server_protocol::ClientNotification;
|
||||||
use llmx_app_server_protocol::FeedbackUploadParams;
|
use codex_app_server_protocol::FeedbackUploadParams;
|
||||||
use llmx_app_server_protocol::GetAccountParams;
|
use codex_app_server_protocol::GetAccountParams;
|
||||||
use llmx_app_server_protocol::GetAuthStatusParams;
|
use codex_app_server_protocol::GetAuthStatusParams;
|
||||||
use llmx_app_server_protocol::InitializeParams;
|
use codex_app_server_protocol::InitializeParams;
|
||||||
use llmx_app_server_protocol::InterruptConversationParams;
|
use codex_app_server_protocol::InterruptConversationParams;
|
||||||
use llmx_app_server_protocol::JSONRPCError;
|
use codex_app_server_protocol::JSONRPCError;
|
||||||
use llmx_app_server_protocol::JSONRPCMessage;
|
use codex_app_server_protocol::JSONRPCMessage;
|
||||||
use llmx_app_server_protocol::JSONRPCNotification;
|
use codex_app_server_protocol::JSONRPCNotification;
|
||||||
use llmx_app_server_protocol::JSONRPCRequest;
|
use codex_app_server_protocol::JSONRPCRequest;
|
||||||
use llmx_app_server_protocol::JSONRPCResponse;
|
use codex_app_server_protocol::JSONRPCResponse;
|
||||||
use llmx_app_server_protocol::ListConversationsParams;
|
use codex_app_server_protocol::ListConversationsParams;
|
||||||
use llmx_app_server_protocol::LoginApiKeyParams;
|
use codex_app_server_protocol::LoginApiKeyParams;
|
||||||
use llmx_app_server_protocol::ModelListParams;
|
use codex_app_server_protocol::ModelListParams;
|
||||||
use llmx_app_server_protocol::NewConversationParams;
|
use codex_app_server_protocol::NewConversationParams;
|
||||||
use llmx_app_server_protocol::RemoveConversationListenerParams;
|
use codex_app_server_protocol::RemoveConversationListenerParams;
|
||||||
use llmx_app_server_protocol::RequestId;
|
use codex_app_server_protocol::RequestId;
|
||||||
use llmx_app_server_protocol::ResumeConversationParams;
|
use codex_app_server_protocol::ResumeConversationParams;
|
||||||
use llmx_app_server_protocol::SendUserMessageParams;
|
use codex_app_server_protocol::SendUserMessageParams;
|
||||||
use llmx_app_server_protocol::SendUserTurnParams;
|
use codex_app_server_protocol::SendUserTurnParams;
|
||||||
use llmx_app_server_protocol::ServerRequest;
|
use codex_app_server_protocol::ServerRequest;
|
||||||
use llmx_app_server_protocol::SetDefaultModelParams;
|
use codex_app_server_protocol::SetDefaultModelParams;
|
||||||
use llmx_app_server_protocol::ThreadArchiveParams;
|
use codex_app_server_protocol::ThreadArchiveParams;
|
||||||
use llmx_app_server_protocol::ThreadListParams;
|
use codex_app_server_protocol::ThreadListParams;
|
||||||
use llmx_app_server_protocol::ThreadResumeParams;
|
use codex_app_server_protocol::ThreadResumeParams;
|
||||||
use llmx_app_server_protocol::ThreadStartParams;
|
use codex_app_server_protocol::ThreadStartParams;
|
||||||
use llmx_app_server_protocol::TurnInterruptParams;
|
use codex_app_server_protocol::TurnInterruptParams;
|
||||||
use llmx_app_server_protocol::TurnStartParams;
|
use codex_app_server_protocol::TurnStartParams;
|
||||||
use std::process::Command as StdCommand;
|
use std::process::Command as StdCommand;
|
||||||
use tokio::process::Command;
|
use tokio::process::Command;
|
||||||
|
|
||||||
@@ -61,8 +61,8 @@ pub struct McpProcess {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl McpProcess {
|
impl McpProcess {
|
||||||
pub async fn new(llmx_home: &Path) -> anyhow::Result<Self> {
|
pub async fn new(codex_home: &Path) -> anyhow::Result<Self> {
|
||||||
Self::new_with_env(llmx_home, &[]).await
|
Self::new_with_env(codex_home, &[]).await
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a new MCP process, allowing tests to override or remove
|
/// Creates a new MCP process, allowing tests to override or remove
|
||||||
@@ -71,12 +71,12 @@ impl McpProcess {
|
|||||||
/// Pass a tuple of (key, Some(value)) to set/override, or (key, None) to
|
/// Pass a tuple of (key, Some(value)) to set/override, or (key, None) to
|
||||||
/// remove a variable from the child's environment.
|
/// remove a variable from the child's environment.
|
||||||
pub async fn new_with_env(
|
pub async fn new_with_env(
|
||||||
llmx_home: &Path,
|
codex_home: &Path,
|
||||||
env_overrides: &[(&str, Option<&str>)],
|
env_overrides: &[(&str, Option<&str>)],
|
||||||
) -> anyhow::Result<Self> {
|
) -> anyhow::Result<Self> {
|
||||||
// Use assert_cmd to locate the binary path and then switch to tokio::process::Command
|
// Use assert_cmd to locate the binary path and then switch to tokio::process::Command
|
||||||
let std_cmd = StdCommand::cargo_bin("llmx-app-server")
|
let std_cmd = StdCommand::cargo_bin("codex-app-server")
|
||||||
.context("should find binary for llmx-mcp-server")?;
|
.context("should find binary for codex-mcp-server")?;
|
||||||
|
|
||||||
let program = std_cmd.get_program().to_owned();
|
let program = std_cmd.get_program().to_owned();
|
||||||
|
|
||||||
@@ -85,7 +85,7 @@ impl McpProcess {
|
|||||||
cmd.stdin(Stdio::piped());
|
cmd.stdin(Stdio::piped());
|
||||||
cmd.stdout(Stdio::piped());
|
cmd.stdout(Stdio::piped());
|
||||||
cmd.stderr(Stdio::piped());
|
cmd.stderr(Stdio::piped());
|
||||||
cmd.env("LLMX_HOME", llmx_home);
|
cmd.env("CODEX_HOME", codex_home);
|
||||||
cmd.env("RUST_LOG", "debug");
|
cmd.env("RUST_LOG", "debug");
|
||||||
|
|
||||||
for (k, v) in env_overrides {
|
for (k, v) in env_overrides {
|
||||||
@@ -102,7 +102,7 @@ impl McpProcess {
|
|||||||
let mut process = cmd
|
let mut process = cmd
|
||||||
.kill_on_drop(true)
|
.kill_on_drop(true)
|
||||||
.spawn()
|
.spawn()
|
||||||
.context("llmx-mcp-server proc should start")?;
|
.context("codex-mcp-server proc should start")?;
|
||||||
let stdin = process
|
let stdin = process
|
||||||
.stdin
|
.stdin
|
||||||
.take()
|
.take()
|
||||||
@@ -136,9 +136,9 @@ impl McpProcess {
|
|||||||
pub async fn initialize(&mut self) -> anyhow::Result<()> {
|
pub async fn initialize(&mut self) -> anyhow::Result<()> {
|
||||||
let params = Some(serde_json::to_value(InitializeParams {
|
let params = Some(serde_json::to_value(InitializeParams {
|
||||||
client_info: ClientInfo {
|
client_info: ClientInfo {
|
||||||
name: "llmx-app-server-tests".to_string(),
|
name: "codex-app-server-tests".to_string(),
|
||||||
title: None,
|
title: None,
|
||||||
version: "0.1.7".to_string(),
|
version: "0.1.0".to_string(),
|
||||||
},
|
},
|
||||||
})?);
|
})?);
|
||||||
let req_id = self.send_request("initialize", params).await?;
|
let req_id = self.send_request("initialize", params).await?;
|
||||||
@@ -624,7 +624,7 @@ impl McpProcess {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn enqueue_user_message(&mut self, notification: JSONRPCNotification) {
|
fn enqueue_user_message(&mut self, notification: JSONRPCNotification) {
|
||||||
if notification.method == "llmx/event/user_message" {
|
if notification.method == "codex/event/user_message" {
|
||||||
self.pending_user_messages.push_back(notification);
|
self.pending_user_messages.push_back(notification);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1,14 +1,14 @@
|
|||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use llmx_protocol::ConversationId;
|
use codex_protocol::ConversationId;
|
||||||
use llmx_protocol::protocol::SessionMeta;
|
use codex_protocol::protocol::SessionMeta;
|
||||||
use llmx_protocol::protocol::SessionSource;
|
use codex_protocol::protocol::SessionSource;
|
||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
use std::fs;
|
use std::fs;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
/// Create a minimal rollout file under `LLMX_HOME/sessions/YYYY/MM/DD/`.
|
/// Create a minimal rollout file under `CODEX_HOME/sessions/YYYY/MM/DD/`.
|
||||||
///
|
///
|
||||||
/// - `filename_ts` is the filename timestamp component in `YYYY-MM-DDThh-mm-ss` format.
|
/// - `filename_ts` is the filename timestamp component in `YYYY-MM-DDThh-mm-ss` format.
|
||||||
/// - `meta_rfc3339` is the envelope timestamp used in JSON lines.
|
/// - `meta_rfc3339` is the envelope timestamp used in JSON lines.
|
||||||
@@ -17,7 +17,7 @@ use uuid::Uuid;
|
|||||||
///
|
///
|
||||||
/// Returns the generated conversation/session UUID as a string.
|
/// Returns the generated conversation/session UUID as a string.
|
||||||
pub fn create_fake_rollout(
|
pub fn create_fake_rollout(
|
||||||
llmx_home: &Path,
|
codex_home: &Path,
|
||||||
filename_ts: &str,
|
filename_ts: &str,
|
||||||
meta_rfc3339: &str,
|
meta_rfc3339: &str,
|
||||||
preview: &str,
|
preview: &str,
|
||||||
@@ -31,7 +31,7 @@ pub fn create_fake_rollout(
|
|||||||
let year = &filename_ts[0..4];
|
let year = &filename_ts[0..4];
|
||||||
let month = &filename_ts[5..7];
|
let month = &filename_ts[5..7];
|
||||||
let day = &filename_ts[8..10];
|
let day = &filename_ts[8..10];
|
||||||
let dir = llmx_home.join("sessions").join(year).join(month).join(day);
|
let dir = codex_home.join("sessions").join(year).join(month).join(day);
|
||||||
fs::create_dir_all(&dir)?;
|
fs::create_dir_all(&dir)?;
|
||||||
|
|
||||||
let file_path = dir.join(format!("rollout-{filename_ts}-{uuid}.jsonl"));
|
let file_path = dir.join(format!("rollout-{filename_ts}-{uuid}.jsonl"));
|
||||||
@@ -41,7 +41,7 @@ pub fn create_fake_rollout(
|
|||||||
id: conversation_id,
|
id: conversation_id,
|
||||||
timestamp: meta_rfc3339.to_string(),
|
timestamp: meta_rfc3339.to_string(),
|
||||||
cwd: PathBuf::from("/"),
|
cwd: PathBuf::from("/"),
|
||||||
originator: "llmx".to_string(),
|
originator: "codex".to_string(),
|
||||||
cli_version: "0.0.0".to_string(),
|
cli_version: "0.0.0".to_string(),
|
||||||
instructions: None,
|
instructions: None,
|
||||||
source: SessionSource::Cli,
|
source: SessionSource::Cli,
|
||||||
@@ -1,13 +1,13 @@
|
|||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use app_test_support::McpProcess;
|
use app_test_support::McpProcess;
|
||||||
use app_test_support::to_response;
|
use app_test_support::to_response;
|
||||||
use llmx_app_server_protocol::ArchiveConversationParams;
|
use codex_app_server_protocol::ArchiveConversationParams;
|
||||||
use llmx_app_server_protocol::ArchiveConversationResponse;
|
use codex_app_server_protocol::ArchiveConversationResponse;
|
||||||
use llmx_app_server_protocol::JSONRPCResponse;
|
use codex_app_server_protocol::JSONRPCResponse;
|
||||||
use llmx_app_server_protocol::NewConversationParams;
|
use codex_app_server_protocol::NewConversationParams;
|
||||||
use llmx_app_server_protocol::NewConversationResponse;
|
use codex_app_server_protocol::NewConversationResponse;
|
||||||
use llmx_app_server_protocol::RequestId;
|
use codex_app_server_protocol::RequestId;
|
||||||
use llmx_core::ARCHIVED_SESSIONS_SUBDIR;
|
use codex_core::ARCHIVED_SESSIONS_SUBDIR;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use tempfile::TempDir;
|
use tempfile::TempDir;
|
||||||
use tokio::time::timeout;
|
use tokio::time::timeout;
|
||||||
@@ -16,10 +16,10 @@ const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs
|
|||||||
|
|
||||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn archive_conversation_moves_rollout_into_archived_directory() -> Result<()> {
|
async fn archive_conversation_moves_rollout_into_archived_directory() -> Result<()> {
|
||||||
let llmx_home = TempDir::new()?;
|
let codex_home = TempDir::new()?;
|
||||||
create_config_toml(llmx_home.path())?;
|
create_config_toml(codex_home.path())?;
|
||||||
|
|
||||||
let mut mcp = McpProcess::new(llmx_home.path()).await?;
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
let new_request_id = mcp
|
let new_request_id = mcp
|
||||||
@@ -61,7 +61,7 @@ async fn archive_conversation_moves_rollout_into_archived_directory() -> Result<
|
|||||||
let _: ArchiveConversationResponse =
|
let _: ArchiveConversationResponse =
|
||||||
to_response::<ArchiveConversationResponse>(archive_response)?;
|
to_response::<ArchiveConversationResponse>(archive_response)?;
|
||||||
|
|
||||||
let archived_directory = llmx_home.path().join(ARCHIVED_SESSIONS_SUBDIR);
|
let archived_directory = codex_home.path().join(ARCHIVED_SESSIONS_SUBDIR);
|
||||||
let archived_rollout_path =
|
let archived_rollout_path =
|
||||||
archived_directory.join(rollout_path.file_name().unwrap_or_else(|| {
|
archived_directory.join(rollout_path.file_name().unwrap_or_else(|| {
|
||||||
panic!("rollout path {} missing file name", rollout_path.display())
|
panic!("rollout path {} missing file name", rollout_path.display())
|
||||||
@@ -81,8 +81,8 @@ async fn archive_conversation_moves_rollout_into_archived_directory() -> Result<
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn create_config_toml(llmx_home: &Path) -> std::io::Result<()> {
|
fn create_config_toml(codex_home: &Path) -> std::io::Result<()> {
|
||||||
let config_toml = llmx_home.join("config.toml");
|
let config_toml = codex_home.join("config.toml");
|
||||||
std::fs::write(config_toml, config_contents())
|
std::fs::write(config_toml, config_contents())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1,14 +1,14 @@
|
|||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use app_test_support::McpProcess;
|
use app_test_support::McpProcess;
|
||||||
use app_test_support::to_response;
|
use app_test_support::to_response;
|
||||||
use llmx_app_server_protocol::AuthMode;
|
use codex_app_server_protocol::AuthMode;
|
||||||
use llmx_app_server_protocol::GetAuthStatusParams;
|
use codex_app_server_protocol::GetAuthStatusParams;
|
||||||
use llmx_app_server_protocol::GetAuthStatusResponse;
|
use codex_app_server_protocol::GetAuthStatusResponse;
|
||||||
use llmx_app_server_protocol::JSONRPCError;
|
use codex_app_server_protocol::JSONRPCError;
|
||||||
use llmx_app_server_protocol::JSONRPCResponse;
|
use codex_app_server_protocol::JSONRPCResponse;
|
||||||
use llmx_app_server_protocol::LoginApiKeyParams;
|
use codex_app_server_protocol::LoginApiKeyParams;
|
||||||
use llmx_app_server_protocol::LoginApiKeyResponse;
|
use codex_app_server_protocol::LoginApiKeyResponse;
|
||||||
use llmx_app_server_protocol::RequestId;
|
use codex_app_server_protocol::RequestId;
|
||||||
use pretty_assertions::assert_eq;
|
use pretty_assertions::assert_eq;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use tempfile::TempDir;
|
use tempfile::TempDir;
|
||||||
@@ -17,10 +17,10 @@ use tokio::time::timeout;
|
|||||||
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
|
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
|
||||||
|
|
||||||
fn create_config_toml_custom_provider(
|
fn create_config_toml_custom_provider(
|
||||||
llmx_home: &Path,
|
codex_home: &Path,
|
||||||
requires_openai_auth: bool,
|
requires_openai_auth: bool,
|
||||||
) -> std::io::Result<()> {
|
) -> std::io::Result<()> {
|
||||||
let config_toml = llmx_home.join("config.toml");
|
let config_toml = codex_home.join("config.toml");
|
||||||
let requires_line = if requires_openai_auth {
|
let requires_line = if requires_openai_auth {
|
||||||
"requires_openai_auth = true\n"
|
"requires_openai_auth = true\n"
|
||||||
} else {
|
} else {
|
||||||
@@ -46,8 +46,8 @@ stream_max_retries = 0
|
|||||||
std::fs::write(config_toml, contents)
|
std::fs::write(config_toml, contents)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn create_config_toml(llmx_home: &Path) -> std::io::Result<()> {
|
fn create_config_toml(codex_home: &Path) -> std::io::Result<()> {
|
||||||
let config_toml = llmx_home.join("config.toml");
|
let config_toml = codex_home.join("config.toml");
|
||||||
std::fs::write(
|
std::fs::write(
|
||||||
config_toml,
|
config_toml,
|
||||||
r#"
|
r#"
|
||||||
@@ -58,8 +58,8 @@ sandbox_mode = "danger-full-access"
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn create_config_toml_forced_login(llmx_home: &Path, forced_method: &str) -> std::io::Result<()> {
|
fn create_config_toml_forced_login(codex_home: &Path, forced_method: &str) -> std::io::Result<()> {
|
||||||
let config_toml = llmx_home.join("config.toml");
|
let config_toml = codex_home.join("config.toml");
|
||||||
let contents = format!(
|
let contents = format!(
|
||||||
r#"
|
r#"
|
||||||
model = "mock-model"
|
model = "mock-model"
|
||||||
@@ -89,10 +89,10 @@ async fn login_with_api_key_via_request(mcp: &mut McpProcess, api_key: &str) ->
|
|||||||
|
|
||||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn get_auth_status_no_auth() -> Result<()> {
|
async fn get_auth_status_no_auth() -> Result<()> {
|
||||||
let llmx_home = TempDir::new()?;
|
let codex_home = TempDir::new()?;
|
||||||
create_config_toml(llmx_home.path())?;
|
create_config_toml(codex_home.path())?;
|
||||||
|
|
||||||
let mut mcp = McpProcess::new_with_env(llmx_home.path(), &[("OPENAI_API_KEY", None)]).await?;
|
let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]).await?;
|
||||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
let request_id = mcp
|
let request_id = mcp
|
||||||
@@ -115,10 +115,10 @@ async fn get_auth_status_no_auth() -> Result<()> {
|
|||||||
|
|
||||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn get_auth_status_with_api_key() -> Result<()> {
|
async fn get_auth_status_with_api_key() -> Result<()> {
|
||||||
let llmx_home = TempDir::new()?;
|
let codex_home = TempDir::new()?;
|
||||||
create_config_toml(llmx_home.path())?;
|
create_config_toml(codex_home.path())?;
|
||||||
|
|
||||||
let mut mcp = McpProcess::new(llmx_home.path()).await?;
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
login_with_api_key_via_request(&mut mcp, "sk-test-key").await?;
|
login_with_api_key_via_request(&mut mcp, "sk-test-key").await?;
|
||||||
@@ -143,10 +143,10 @@ async fn get_auth_status_with_api_key() -> Result<()> {
|
|||||||
|
|
||||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn get_auth_status_with_api_key_when_auth_not_required() -> Result<()> {
|
async fn get_auth_status_with_api_key_when_auth_not_required() -> Result<()> {
|
||||||
let llmx_home = TempDir::new()?;
|
let codex_home = TempDir::new()?;
|
||||||
create_config_toml_custom_provider(llmx_home.path(), false)?;
|
create_config_toml_custom_provider(codex_home.path(), false)?;
|
||||||
|
|
||||||
let mut mcp = McpProcess::new(llmx_home.path()).await?;
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
login_with_api_key_via_request(&mut mcp, "sk-test-key").await?;
|
login_with_api_key_via_request(&mut mcp, "sk-test-key").await?;
|
||||||
@@ -164,8 +164,8 @@ async fn get_auth_status_with_api_key_when_auth_not_required() -> Result<()> {
|
|||||||
)
|
)
|
||||||
.await??;
|
.await??;
|
||||||
let status: GetAuthStatusResponse = to_response(resp)?;
|
let status: GetAuthStatusResponse = to_response(resp)?;
|
||||||
assert_eq!(status.auth_method, Some(AuthMode::ApiKey));
|
assert_eq!(status.auth_method, None, "expected no auth method");
|
||||||
assert_eq!(status.auth_token, Some("sk-test-key".to_string()));
|
assert_eq!(status.auth_token, None, "expected no token");
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
status.requires_openai_auth,
|
status.requires_openai_auth,
|
||||||
Some(false),
|
Some(false),
|
||||||
@@ -176,10 +176,10 @@ async fn get_auth_status_with_api_key_when_auth_not_required() -> Result<()> {
|
|||||||
|
|
||||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn get_auth_status_with_api_key_no_include_token() -> Result<()> {
|
async fn get_auth_status_with_api_key_no_include_token() -> Result<()> {
|
||||||
let llmx_home = TempDir::new()?;
|
let codex_home = TempDir::new()?;
|
||||||
create_config_toml(llmx_home.path())?;
|
create_config_toml(codex_home.path())?;
|
||||||
|
|
||||||
let mut mcp = McpProcess::new(llmx_home.path()).await?;
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
login_with_api_key_via_request(&mut mcp, "sk-test-key").await?;
|
login_with_api_key_via_request(&mut mcp, "sk-test-key").await?;
|
||||||
@@ -204,10 +204,10 @@ async fn get_auth_status_with_api_key_no_include_token() -> Result<()> {
|
|||||||
|
|
||||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn login_api_key_rejected_when_forced_chatgpt() -> Result<()> {
|
async fn login_api_key_rejected_when_forced_chatgpt() -> Result<()> {
|
||||||
let llmx_home = TempDir::new()?;
|
let codex_home = TempDir::new()?;
|
||||||
create_config_toml_forced_login(llmx_home.path(), "chatgpt")?;
|
create_config_toml_forced_login(codex_home.path(), "chatgpt")?;
|
||||||
|
|
||||||
let mut mcp = McpProcess::new(llmx_home.path()).await?;
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
let request_id = mcp
|
let request_id = mcp
|
||||||
@@ -4,31 +4,31 @@ use app_test_support::create_final_assistant_message_sse_response;
|
|||||||
use app_test_support::create_mock_chat_completions_server;
|
use app_test_support::create_mock_chat_completions_server;
|
||||||
use app_test_support::create_shell_sse_response;
|
use app_test_support::create_shell_sse_response;
|
||||||
use app_test_support::to_response;
|
use app_test_support::to_response;
|
||||||
use llmx_app_server_protocol::AddConversationListenerParams;
|
use codex_app_server_protocol::AddConversationListenerParams;
|
||||||
use llmx_app_server_protocol::AddConversationSubscriptionResponse;
|
use codex_app_server_protocol::AddConversationSubscriptionResponse;
|
||||||
use llmx_app_server_protocol::ExecCommandApprovalParams;
|
use codex_app_server_protocol::ExecCommandApprovalParams;
|
||||||
use llmx_app_server_protocol::InputItem;
|
use codex_app_server_protocol::InputItem;
|
||||||
use llmx_app_server_protocol::JSONRPCNotification;
|
use codex_app_server_protocol::JSONRPCNotification;
|
||||||
use llmx_app_server_protocol::JSONRPCResponse;
|
use codex_app_server_protocol::JSONRPCResponse;
|
||||||
use llmx_app_server_protocol::NewConversationParams;
|
use codex_app_server_protocol::NewConversationParams;
|
||||||
use llmx_app_server_protocol::NewConversationResponse;
|
use codex_app_server_protocol::NewConversationResponse;
|
||||||
use llmx_app_server_protocol::RemoveConversationListenerParams;
|
use codex_app_server_protocol::RemoveConversationListenerParams;
|
||||||
use llmx_app_server_protocol::RemoveConversationSubscriptionResponse;
|
use codex_app_server_protocol::RemoveConversationSubscriptionResponse;
|
||||||
use llmx_app_server_protocol::RequestId;
|
use codex_app_server_protocol::RequestId;
|
||||||
use llmx_app_server_protocol::SendUserMessageParams;
|
use codex_app_server_protocol::SendUserMessageParams;
|
||||||
use llmx_app_server_protocol::SendUserMessageResponse;
|
use codex_app_server_protocol::SendUserMessageResponse;
|
||||||
use llmx_app_server_protocol::SendUserTurnParams;
|
use codex_app_server_protocol::SendUserTurnParams;
|
||||||
use llmx_app_server_protocol::SendUserTurnResponse;
|
use codex_app_server_protocol::SendUserTurnResponse;
|
||||||
use llmx_app_server_protocol::ServerRequest;
|
use codex_app_server_protocol::ServerRequest;
|
||||||
use llmx_core::protocol::AskForApproval;
|
use codex_core::protocol::AskForApproval;
|
||||||
use llmx_core::protocol::SandboxPolicy;
|
use codex_core::protocol::SandboxPolicy;
|
||||||
use llmx_core::protocol_config_types::ReasoningEffort;
|
use codex_core::protocol_config_types::ReasoningEffort;
|
||||||
use llmx_core::protocol_config_types::ReasoningSummary;
|
use codex_core::protocol_config_types::ReasoningSummary;
|
||||||
use llmx_core::spawn::LLMX_SANDBOX_NETWORK_DISABLED_ENV_VAR;
|
use codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR;
|
||||||
use llmx_protocol::config_types::SandboxMode;
|
use codex_protocol::config_types::SandboxMode;
|
||||||
use llmx_protocol::parse_command::ParsedCommand;
|
use codex_protocol::parse_command::ParsedCommand;
|
||||||
use llmx_protocol::protocol::Event;
|
use codex_protocol::protocol::Event;
|
||||||
use llmx_protocol::protocol::EventMsg;
|
use codex_protocol::protocol::EventMsg;
|
||||||
use pretty_assertions::assert_eq;
|
use pretty_assertions::assert_eq;
|
||||||
use std::env;
|
use std::env;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
@@ -38,18 +38,18 @@ use tokio::time::timeout;
|
|||||||
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
|
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
|
||||||
|
|
||||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||||
async fn test_llmx_jsonrpc_conversation_flow() -> Result<()> {
|
async fn test_codex_jsonrpc_conversation_flow() -> Result<()> {
|
||||||
if env::var(LLMX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
|
if env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
|
||||||
println!(
|
println!(
|
||||||
"Skipping test because it cannot execute when network is disabled in an LLMX sandbox."
|
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
|
||||||
);
|
);
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
let tmp = TempDir::new()?;
|
let tmp = TempDir::new()?;
|
||||||
// Temporary LLMX home with config pointing at the mock server.
|
// Temporary Codex home with config pointing at the mock server.
|
||||||
let llmx_home = tmp.path().join("llmx_home");
|
let codex_home = tmp.path().join("codex_home");
|
||||||
std::fs::create_dir(&llmx_home)?;
|
std::fs::create_dir(&codex_home)?;
|
||||||
let working_directory = tmp.path().join("workdir");
|
let working_directory = tmp.path().join("workdir");
|
||||||
std::fs::create_dir(&working_directory)?;
|
std::fs::create_dir(&working_directory)?;
|
||||||
|
|
||||||
@@ -65,10 +65,10 @@ async fn test_llmx_jsonrpc_conversation_flow() -> Result<()> {
|
|||||||
create_final_assistant_message_sse_response("Enjoy your new git repo!")?,
|
create_final_assistant_message_sse_response("Enjoy your new git repo!")?,
|
||||||
];
|
];
|
||||||
let server = create_mock_chat_completions_server(responses).await;
|
let server = create_mock_chat_completions_server(responses).await;
|
||||||
create_config_toml(&llmx_home, &server.uri())?;
|
create_config_toml(&codex_home, &server.uri())?;
|
||||||
|
|
||||||
// Start MCP server and initialize.
|
// Start MCP server and initialize.
|
||||||
let mut mcp = McpProcess::new(&llmx_home).await?;
|
let mut mcp = McpProcess::new(&codex_home).await?;
|
||||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
// 1) newConversation
|
// 1) newConversation
|
||||||
@@ -111,7 +111,7 @@ async fn test_llmx_jsonrpc_conversation_flow() -> Result<()> {
|
|||||||
let send_user_id = mcp
|
let send_user_id = mcp
|
||||||
.send_send_user_message_request(SendUserMessageParams {
|
.send_send_user_message_request(SendUserMessageParams {
|
||||||
conversation_id,
|
conversation_id,
|
||||||
items: vec![llmx_app_server_protocol::InputItem::Text {
|
items: vec![codex_app_server_protocol::InputItem::Text {
|
||||||
text: "text".to_string(),
|
text: "text".to_string(),
|
||||||
}],
|
}],
|
||||||
})
|
})
|
||||||
@@ -127,7 +127,7 @@ async fn test_llmx_jsonrpc_conversation_flow() -> Result<()> {
|
|||||||
// Note this also ensures that the final request to the server was made.
|
// Note this also ensures that the final request to the server was made.
|
||||||
let task_finished_notification: JSONRPCNotification = timeout(
|
let task_finished_notification: JSONRPCNotification = timeout(
|
||||||
DEFAULT_READ_TIMEOUT,
|
DEFAULT_READ_TIMEOUT,
|
||||||
mcp.read_stream_until_notification_message("llmx/event/task_complete"),
|
mcp.read_stream_until_notification_message("codex/event/task_complete"),
|
||||||
)
|
)
|
||||||
.await??;
|
.await??;
|
||||||
let serde_json::Value::Object(map) = task_finished_notification
|
let serde_json::Value::Object(map) = task_finished_notification
|
||||||
@@ -160,16 +160,16 @@ async fn test_llmx_jsonrpc_conversation_flow() -> Result<()> {
|
|||||||
|
|
||||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||||
async fn test_send_user_turn_changes_approval_policy_behavior() -> Result<()> {
|
async fn test_send_user_turn_changes_approval_policy_behavior() -> Result<()> {
|
||||||
if env::var(LLMX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
|
if env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
|
||||||
println!(
|
println!(
|
||||||
"Skipping test because it cannot execute when network is disabled in an LLMX sandbox."
|
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
|
||||||
);
|
);
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
let tmp = TempDir::new()?;
|
let tmp = TempDir::new()?;
|
||||||
let llmx_home = tmp.path().join("llmx_home");
|
let codex_home = tmp.path().join("codex_home");
|
||||||
std::fs::create_dir(&llmx_home)?;
|
std::fs::create_dir(&codex_home)?;
|
||||||
let working_directory = tmp.path().join("workdir");
|
let working_directory = tmp.path().join("workdir");
|
||||||
std::fs::create_dir(&working_directory)?;
|
std::fs::create_dir(&working_directory)?;
|
||||||
|
|
||||||
@@ -199,10 +199,10 @@ async fn test_send_user_turn_changes_approval_policy_behavior() -> Result<()> {
|
|||||||
create_final_assistant_message_sse_response("done 2")?,
|
create_final_assistant_message_sse_response("done 2")?,
|
||||||
];
|
];
|
||||||
let server = create_mock_chat_completions_server(responses).await;
|
let server = create_mock_chat_completions_server(responses).await;
|
||||||
create_config_toml(&llmx_home, &server.uri())?;
|
create_config_toml(&codex_home, &server.uri())?;
|
||||||
|
|
||||||
// Start MCP server and initialize.
|
// Start MCP server and initialize.
|
||||||
let mut mcp = McpProcess::new(&llmx_home).await?;
|
let mut mcp = McpProcess::new(&codex_home).await?;
|
||||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
// 1) Start conversation with approval_policy=untrusted
|
// 1) Start conversation with approval_policy=untrusted
|
||||||
@@ -240,7 +240,7 @@ async fn test_send_user_turn_changes_approval_policy_behavior() -> Result<()> {
|
|||||||
let send_user_id = mcp
|
let send_user_id = mcp
|
||||||
.send_send_user_message_request(SendUserMessageParams {
|
.send_send_user_message_request(SendUserMessageParams {
|
||||||
conversation_id,
|
conversation_id,
|
||||||
items: vec![llmx_app_server_protocol::InputItem::Text {
|
items: vec![codex_app_server_protocol::InputItem::Text {
|
||||||
text: "run python".to_string(),
|
text: "run python".to_string(),
|
||||||
}],
|
}],
|
||||||
})
|
})
|
||||||
@@ -285,14 +285,14 @@ async fn test_send_user_turn_changes_approval_policy_behavior() -> Result<()> {
|
|||||||
// Approve so the first turn can complete
|
// Approve so the first turn can complete
|
||||||
mcp.send_response(
|
mcp.send_response(
|
||||||
request_id,
|
request_id,
|
||||||
serde_json::json!({ "decision": llmx_core::protocol::ReviewDecision::Approved }),
|
serde_json::json!({ "decision": codex_core::protocol::ReviewDecision::Approved }),
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
// Wait for first TaskComplete
|
// Wait for first TaskComplete
|
||||||
let _ = timeout(
|
let _ = timeout(
|
||||||
DEFAULT_READ_TIMEOUT,
|
DEFAULT_READ_TIMEOUT,
|
||||||
mcp.read_stream_until_notification_message("llmx/event/task_complete"),
|
mcp.read_stream_until_notification_message("codex/event/task_complete"),
|
||||||
)
|
)
|
||||||
.await??;
|
.await??;
|
||||||
|
|
||||||
@@ -300,7 +300,7 @@ async fn test_send_user_turn_changes_approval_policy_behavior() -> Result<()> {
|
|||||||
let send_turn_id = mcp
|
let send_turn_id = mcp
|
||||||
.send_send_user_turn_request(SendUserTurnParams {
|
.send_send_user_turn_request(SendUserTurnParams {
|
||||||
conversation_id,
|
conversation_id,
|
||||||
items: vec![llmx_app_server_protocol::InputItem::Text {
|
items: vec![codex_app_server_protocol::InputItem::Text {
|
||||||
text: "run python again".to_string(),
|
text: "run python again".to_string(),
|
||||||
}],
|
}],
|
||||||
cwd: working_directory.clone(),
|
cwd: working_directory.clone(),
|
||||||
@@ -324,7 +324,7 @@ async fn test_send_user_turn_changes_approval_policy_behavior() -> Result<()> {
|
|||||||
// If any Request is seen while waiting for task_complete, the helper will error and the test fails.
|
// If any Request is seen while waiting for task_complete, the helper will error and the test fails.
|
||||||
let _ = timeout(
|
let _ = timeout(
|
||||||
DEFAULT_READ_TIMEOUT,
|
DEFAULT_READ_TIMEOUT,
|
||||||
mcp.read_stream_until_notification_message("llmx/event/task_complete"),
|
mcp.read_stream_until_notification_message("codex/event/task_complete"),
|
||||||
)
|
)
|
||||||
.await??;
|
.await??;
|
||||||
|
|
||||||
@@ -335,16 +335,16 @@ async fn test_send_user_turn_changes_approval_policy_behavior() -> Result<()> {
|
|||||||
|
|
||||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||||
async fn test_send_user_turn_updates_sandbox_and_cwd_between_turns() -> Result<()> {
|
async fn test_send_user_turn_updates_sandbox_and_cwd_between_turns() -> Result<()> {
|
||||||
if env::var(LLMX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
|
if env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
|
||||||
println!(
|
println!(
|
||||||
"Skipping test because it cannot execute when network is disabled in an LLMX sandbox."
|
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
|
||||||
);
|
);
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
let tmp = TempDir::new()?;
|
let tmp = TempDir::new()?;
|
||||||
let llmx_home = tmp.path().join("llmx_home");
|
let codex_home = tmp.path().join("codex_home");
|
||||||
std::fs::create_dir(&llmx_home)?;
|
std::fs::create_dir(&codex_home)?;
|
||||||
let workspace_root = tmp.path().join("workspace");
|
let workspace_root = tmp.path().join("workspace");
|
||||||
std::fs::create_dir(&workspace_root)?;
|
std::fs::create_dir(&workspace_root)?;
|
||||||
let first_cwd = workspace_root.join("turn1");
|
let first_cwd = workspace_root.join("turn1");
|
||||||
@@ -377,9 +377,9 @@ async fn test_send_user_turn_updates_sandbox_and_cwd_between_turns() -> Result<(
|
|||||||
create_final_assistant_message_sse_response("done second")?,
|
create_final_assistant_message_sse_response("done second")?,
|
||||||
];
|
];
|
||||||
let server = create_mock_chat_completions_server(responses).await;
|
let server = create_mock_chat_completions_server(responses).await;
|
||||||
create_config_toml(&llmx_home, &server.uri())?;
|
create_config_toml(&codex_home, &server.uri())?;
|
||||||
|
|
||||||
let mut mcp = McpProcess::new(&llmx_home).await?;
|
let mut mcp = McpProcess::new(&codex_home).await?;
|
||||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
let new_conv_id = mcp
|
let new_conv_id = mcp
|
||||||
@@ -439,7 +439,7 @@ async fn test_send_user_turn_updates_sandbox_and_cwd_between_turns() -> Result<(
|
|||||||
.await??;
|
.await??;
|
||||||
timeout(
|
timeout(
|
||||||
DEFAULT_READ_TIMEOUT,
|
DEFAULT_READ_TIMEOUT,
|
||||||
mcp.read_stream_until_notification_message("llmx/event/task_complete"),
|
mcp.read_stream_until_notification_message("codex/event/task_complete"),
|
||||||
)
|
)
|
||||||
.await??;
|
.await??;
|
||||||
|
|
||||||
@@ -465,7 +465,7 @@ async fn test_send_user_turn_updates_sandbox_and_cwd_between_turns() -> Result<(
|
|||||||
|
|
||||||
let exec_begin_notification = timeout(
|
let exec_begin_notification = timeout(
|
||||||
DEFAULT_READ_TIMEOUT,
|
DEFAULT_READ_TIMEOUT,
|
||||||
mcp.read_stream_until_notification_message("llmx/event/exec_command_begin"),
|
mcp.read_stream_until_notification_message("codex/event/exec_command_begin"),
|
||||||
)
|
)
|
||||||
.await??;
|
.await??;
|
||||||
let params = exec_begin_notification
|
let params = exec_begin_notification
|
||||||
@@ -493,15 +493,15 @@ async fn test_send_user_turn_updates_sandbox_and_cwd_between_turns() -> Result<(
|
|||||||
|
|
||||||
timeout(
|
timeout(
|
||||||
DEFAULT_READ_TIMEOUT,
|
DEFAULT_READ_TIMEOUT,
|
||||||
mcp.read_stream_until_notification_message("llmx/event/task_complete"),
|
mcp.read_stream_until_notification_message("codex/event/task_complete"),
|
||||||
)
|
)
|
||||||
.await??;
|
.await??;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn create_config_toml(llmx_home: &Path, server_uri: &str) -> std::io::Result<()> {
|
fn create_config_toml(codex_home: &Path, server_uri: &str) -> std::io::Result<()> {
|
||||||
let config_toml = llmx_home.join("config.toml");
|
let config_toml = codex_home.join("config.toml");
|
||||||
std::fs::write(
|
std::fs::write(
|
||||||
config_toml,
|
config_toml,
|
||||||
format!(
|
format!(
|
||||||
@@ -1,19 +1,19 @@
|
|||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use app_test_support::McpProcess;
|
use app_test_support::McpProcess;
|
||||||
use app_test_support::to_response;
|
use app_test_support::to_response;
|
||||||
use llmx_app_server_protocol::GetUserSavedConfigResponse;
|
use codex_app_server_protocol::GetUserSavedConfigResponse;
|
||||||
use llmx_app_server_protocol::JSONRPCResponse;
|
use codex_app_server_protocol::JSONRPCResponse;
|
||||||
use llmx_app_server_protocol::Profile;
|
use codex_app_server_protocol::Profile;
|
||||||
use llmx_app_server_protocol::RequestId;
|
use codex_app_server_protocol::RequestId;
|
||||||
use llmx_app_server_protocol::SandboxSettings;
|
use codex_app_server_protocol::SandboxSettings;
|
||||||
use llmx_app_server_protocol::Tools;
|
use codex_app_server_protocol::Tools;
|
||||||
use llmx_app_server_protocol::UserSavedConfig;
|
use codex_app_server_protocol::UserSavedConfig;
|
||||||
use llmx_core::protocol::AskForApproval;
|
use codex_core::protocol::AskForApproval;
|
||||||
use llmx_protocol::config_types::ForcedLoginMethod;
|
use codex_protocol::config_types::ForcedLoginMethod;
|
||||||
use llmx_protocol::config_types::ReasoningEffort;
|
use codex_protocol::config_types::ReasoningEffort;
|
||||||
use llmx_protocol::config_types::ReasoningSummary;
|
use codex_protocol::config_types::ReasoningSummary;
|
||||||
use llmx_protocol::config_types::SandboxMode;
|
use codex_protocol::config_types::SandboxMode;
|
||||||
use llmx_protocol::config_types::Verbosity;
|
use codex_protocol::config_types::Verbosity;
|
||||||
use pretty_assertions::assert_eq;
|
use pretty_assertions::assert_eq;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
@@ -22,12 +22,12 @@ use tokio::time::timeout;
|
|||||||
|
|
||||||
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
|
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
|
||||||
|
|
||||||
fn create_config_toml(llmx_home: &Path) -> std::io::Result<()> {
|
fn create_config_toml(codex_home: &Path) -> std::io::Result<()> {
|
||||||
let config_toml = llmx_home.join("config.toml");
|
let config_toml = codex_home.join("config.toml");
|
||||||
std::fs::write(
|
std::fs::write(
|
||||||
config_toml,
|
config_toml,
|
||||||
r#"
|
r#"
|
||||||
model = "gpt-5-llmx"
|
model = "gpt-5-codex"
|
||||||
approval_policy = "on-request"
|
approval_policy = "on-request"
|
||||||
sandbox_mode = "workspace-write"
|
sandbox_mode = "workspace-write"
|
||||||
model_reasoning_summary = "detailed"
|
model_reasoning_summary = "detailed"
|
||||||
@@ -61,10 +61,10 @@ chatgpt_base_url = "https://api.chatgpt.com"
|
|||||||
|
|
||||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||||
async fn get_config_toml_parses_all_fields() -> Result<()> {
|
async fn get_config_toml_parses_all_fields() -> Result<()> {
|
||||||
let llmx_home = TempDir::new()?;
|
let codex_home = TempDir::new()?;
|
||||||
create_config_toml(llmx_home.path())?;
|
create_config_toml(codex_home.path())?;
|
||||||
|
|
||||||
let mut mcp = McpProcess::new(llmx_home.path()).await?;
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
let request_id = mcp.send_get_user_saved_config_request().await?;
|
let request_id = mcp.send_get_user_saved_config_request().await?;
|
||||||
@@ -87,7 +87,7 @@ async fn get_config_toml_parses_all_fields() -> Result<()> {
|
|||||||
}),
|
}),
|
||||||
forced_chatgpt_workspace_id: Some("12345678-0000-0000-0000-000000000000".into()),
|
forced_chatgpt_workspace_id: Some("12345678-0000-0000-0000-000000000000".into()),
|
||||||
forced_login_method: Some(ForcedLoginMethod::Chatgpt),
|
forced_login_method: Some(ForcedLoginMethod::Chatgpt),
|
||||||
model: Some("gpt-5-llmx".into()),
|
model: Some("gpt-5-codex".into()),
|
||||||
model_reasoning_effort: Some(ReasoningEffort::High),
|
model_reasoning_effort: Some(ReasoningEffort::High),
|
||||||
model_reasoning_summary: Some(ReasoningSummary::Detailed),
|
model_reasoning_summary: Some(ReasoningSummary::Detailed),
|
||||||
model_verbosity: Some(Verbosity::Medium),
|
model_verbosity: Some(Verbosity::Medium),
|
||||||
@@ -117,9 +117,9 @@ async fn get_config_toml_parses_all_fields() -> Result<()> {
|
|||||||
|
|
||||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn get_config_toml_empty() -> Result<()> {
|
async fn get_config_toml_empty() -> Result<()> {
|
||||||
let llmx_home = TempDir::new()?;
|
let codex_home = TempDir::new()?;
|
||||||
|
|
||||||
let mut mcp = McpProcess::new(llmx_home.path()).await?;
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
let request_id = mcp.send_get_user_saved_config_request().await?;
|
let request_id = mcp.send_get_user_saved_config_request().await?;
|
||||||
@@ -3,15 +3,15 @@ use app_test_support::McpProcess;
|
|||||||
use app_test_support::create_final_assistant_message_sse_response;
|
use app_test_support::create_final_assistant_message_sse_response;
|
||||||
use app_test_support::create_mock_chat_completions_server;
|
use app_test_support::create_mock_chat_completions_server;
|
||||||
use app_test_support::to_response;
|
use app_test_support::to_response;
|
||||||
use llmx_app_server_protocol::AddConversationListenerParams;
|
use codex_app_server_protocol::AddConversationListenerParams;
|
||||||
use llmx_app_server_protocol::AddConversationSubscriptionResponse;
|
use codex_app_server_protocol::AddConversationSubscriptionResponse;
|
||||||
use llmx_app_server_protocol::InputItem;
|
use codex_app_server_protocol::InputItem;
|
||||||
use llmx_app_server_protocol::JSONRPCResponse;
|
use codex_app_server_protocol::JSONRPCResponse;
|
||||||
use llmx_app_server_protocol::NewConversationParams;
|
use codex_app_server_protocol::NewConversationParams;
|
||||||
use llmx_app_server_protocol::NewConversationResponse;
|
use codex_app_server_protocol::NewConversationResponse;
|
||||||
use llmx_app_server_protocol::RequestId;
|
use codex_app_server_protocol::RequestId;
|
||||||
use llmx_app_server_protocol::SendUserMessageParams;
|
use codex_app_server_protocol::SendUserMessageParams;
|
||||||
use llmx_app_server_protocol::SendUserMessageResponse;
|
use codex_app_server_protocol::SendUserMessageResponse;
|
||||||
use pretty_assertions::assert_eq;
|
use pretty_assertions::assert_eq;
|
||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
@@ -26,12 +26,12 @@ async fn test_conversation_create_and_send_message_ok() -> Result<()> {
|
|||||||
let responses = vec![create_final_assistant_message_sse_response("Done")?];
|
let responses = vec![create_final_assistant_message_sse_response("Done")?];
|
||||||
let server = create_mock_chat_completions_server(responses).await;
|
let server = create_mock_chat_completions_server(responses).await;
|
||||||
|
|
||||||
// Temporary LLMX home with config pointing at the mock server.
|
// Temporary Codex home with config pointing at the mock server.
|
||||||
let llmx_home = TempDir::new()?;
|
let codex_home = TempDir::new()?;
|
||||||
create_config_toml(llmx_home.path(), &server.uri())?;
|
create_config_toml(codex_home.path(), &server.uri())?;
|
||||||
|
|
||||||
// Start MCP server process and initialize.
|
// Start MCP server process and initialize.
|
||||||
let mut mcp = McpProcess::new(llmx_home.path()).await?;
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
// Create a conversation via the new JSON-RPC API.
|
// Create a conversation via the new JSON-RPC API.
|
||||||
@@ -118,8 +118,8 @@ async fn test_conversation_create_and_send_message_ok() -> Result<()> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Helper to create a config.toml pointing at the mock model server.
|
// Helper to create a config.toml pointing at the mock model server.
|
||||||
fn create_config_toml(llmx_home: &Path, server_uri: &str) -> std::io::Result<()> {
|
fn create_config_toml(codex_home: &Path, server_uri: &str) -> std::io::Result<()> {
|
||||||
let config_toml = llmx_home.join("config.toml");
|
let config_toml = codex_home.join("config.toml");
|
||||||
std::fs::write(
|
std::fs::write(
|
||||||
config_toml,
|
config_toml,
|
||||||
format!(
|
format!(
|
||||||
@@ -1,8 +1,8 @@
|
|||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use anyhow::anyhow;
|
use anyhow::anyhow;
|
||||||
use app_test_support::McpProcess;
|
use app_test_support::McpProcess;
|
||||||
use llmx_app_server_protocol::JSONRPCResponse;
|
use codex_app_server_protocol::JSONRPCResponse;
|
||||||
use llmx_app_server_protocol::RequestId;
|
use codex_app_server_protocol::RequestId;
|
||||||
use pretty_assertions::assert_eq;
|
use pretty_assertions::assert_eq;
|
||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
use tempfile::TempDir;
|
use tempfile::TempDir;
|
||||||
@@ -12,8 +12,8 @@ const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs
|
|||||||
|
|
||||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn test_fuzzy_file_search_sorts_and_includes_indices() -> Result<()> {
|
async fn test_fuzzy_file_search_sorts_and_includes_indices() -> Result<()> {
|
||||||
// Prepare a temporary LLMX home and a separate root with test files.
|
// Prepare a temporary Codex home and a separate root with test files.
|
||||||
let llmx_home = TempDir::new()?;
|
let codex_home = TempDir::new()?;
|
||||||
let root = TempDir::new()?;
|
let root = TempDir::new()?;
|
||||||
|
|
||||||
// Create files designed to have deterministic ordering for query "abe".
|
// Create files designed to have deterministic ordering for query "abe".
|
||||||
@@ -31,7 +31,7 @@ async fn test_fuzzy_file_search_sorts_and_includes_indices() -> Result<()> {
|
|||||||
.to_string();
|
.to_string();
|
||||||
|
|
||||||
// Start MCP server and initialize.
|
// Start MCP server and initialize.
|
||||||
let mut mcp = McpProcess::new(llmx_home.path()).await?;
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
let root_path = root.path().to_string_lossy().to_string();
|
let root_path = root.path().to_string_lossy().to_string();
|
||||||
@@ -85,12 +85,12 @@ async fn test_fuzzy_file_search_sorts_and_includes_indices() -> Result<()> {
|
|||||||
|
|
||||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn test_fuzzy_file_search_accepts_cancellation_token() -> Result<()> {
|
async fn test_fuzzy_file_search_accepts_cancellation_token() -> Result<()> {
|
||||||
let llmx_home = TempDir::new()?;
|
let codex_home = TempDir::new()?;
|
||||||
let root = TempDir::new()?;
|
let root = TempDir::new()?;
|
||||||
|
|
||||||
std::fs::write(root.path().join("alpha.txt"), "contents")?;
|
std::fs::write(root.path().join("alpha.txt"), "contents")?;
|
||||||
|
|
||||||
let mut mcp = McpProcess::new(llmx_home.path()).await?;
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
let root_path = root.path().to_string_lossy().to_string();
|
let root_path = root.path().to_string_lossy().to_string();
|
||||||
@@ -3,17 +3,17 @@
|
|||||||
|
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
|
|
||||||
|
use codex_app_server_protocol::AddConversationListenerParams;
|
||||||
|
use codex_app_server_protocol::InterruptConversationParams;
|
||||||
|
use codex_app_server_protocol::InterruptConversationResponse;
|
||||||
|
use codex_app_server_protocol::JSONRPCResponse;
|
||||||
|
use codex_app_server_protocol::NewConversationParams;
|
||||||
|
use codex_app_server_protocol::NewConversationResponse;
|
||||||
|
use codex_app_server_protocol::RequestId;
|
||||||
|
use codex_app_server_protocol::SendUserMessageParams;
|
||||||
|
use codex_app_server_protocol::SendUserMessageResponse;
|
||||||
|
use codex_core::protocol::TurnAbortReason;
|
||||||
use core_test_support::skip_if_no_network;
|
use core_test_support::skip_if_no_network;
|
||||||
use llmx_app_server_protocol::AddConversationListenerParams;
|
|
||||||
use llmx_app_server_protocol::InterruptConversationParams;
|
|
||||||
use llmx_app_server_protocol::InterruptConversationResponse;
|
|
||||||
use llmx_app_server_protocol::JSONRPCResponse;
|
|
||||||
use llmx_app_server_protocol::NewConversationParams;
|
|
||||||
use llmx_app_server_protocol::NewConversationResponse;
|
|
||||||
use llmx_app_server_protocol::RequestId;
|
|
||||||
use llmx_app_server_protocol::SendUserMessageParams;
|
|
||||||
use llmx_app_server_protocol::SendUserMessageResponse;
|
|
||||||
use llmx_core::protocol::TurnAbortReason;
|
|
||||||
use tempfile::TempDir;
|
use tempfile::TempDir;
|
||||||
use tokio::time::timeout;
|
use tokio::time::timeout;
|
||||||
|
|
||||||
@@ -49,9 +49,9 @@ async fn shell_command_interruption() -> anyhow::Result<()> {
|
|||||||
let shell_command = vec!["sleep".to_string(), "10".to_string()];
|
let shell_command = vec!["sleep".to_string(), "10".to_string()];
|
||||||
|
|
||||||
let tmp = TempDir::new()?;
|
let tmp = TempDir::new()?;
|
||||||
// Temporary LLMX home with config pointing at the mock server.
|
// Temporary Codex home with config pointing at the mock server.
|
||||||
let llmx_home = tmp.path().join("llmx_home");
|
let codex_home = tmp.path().join("codex_home");
|
||||||
std::fs::create_dir(&llmx_home)?;
|
std::fs::create_dir(&codex_home)?;
|
||||||
let working_directory = tmp.path().join("workdir");
|
let working_directory = tmp.path().join("workdir");
|
||||||
std::fs::create_dir(&working_directory)?;
|
std::fs::create_dir(&working_directory)?;
|
||||||
|
|
||||||
@@ -63,10 +63,10 @@ async fn shell_command_interruption() -> anyhow::Result<()> {
|
|||||||
"call_sleep",
|
"call_sleep",
|
||||||
)?])
|
)?])
|
||||||
.await;
|
.await;
|
||||||
create_config_toml(&llmx_home, server.uri())?;
|
create_config_toml(&codex_home, server.uri())?;
|
||||||
|
|
||||||
// Start MCP server and initialize.
|
// Start MCP server and initialize.
|
||||||
let mut mcp = McpProcess::new(&llmx_home).await?;
|
let mut mcp = McpProcess::new(&codex_home).await?;
|
||||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
// 1) newConversation
|
// 1) newConversation
|
||||||
@@ -103,7 +103,7 @@ async fn shell_command_interruption() -> anyhow::Result<()> {
|
|||||||
let send_user_id = mcp
|
let send_user_id = mcp
|
||||||
.send_send_user_message_request(SendUserMessageParams {
|
.send_send_user_message_request(SendUserMessageParams {
|
||||||
conversation_id,
|
conversation_id,
|
||||||
items: vec![llmx_app_server_protocol::InputItem::Text {
|
items: vec![codex_app_server_protocol::InputItem::Text {
|
||||||
text: "run first sleep command".to_string(),
|
text: "run first sleep command".to_string(),
|
||||||
}],
|
}],
|
||||||
})
|
})
|
||||||
@@ -138,8 +138,8 @@ async fn shell_command_interruption() -> anyhow::Result<()> {
|
|||||||
// Helpers
|
// Helpers
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
fn create_config_toml(llmx_home: &Path, server_uri: String) -> std::io::Result<()> {
|
fn create_config_toml(codex_home: &Path, server_uri: String) -> std::io::Result<()> {
|
||||||
let config_toml = llmx_home.join("config.toml");
|
let config_toml = codex_home.join("config.toml");
|
||||||
std::fs::write(
|
std::fs::write(
|
||||||
config_toml,
|
config_toml,
|
||||||
format!(
|
format!(
|
||||||
@@ -2,19 +2,19 @@ use anyhow::Result;
|
|||||||
use app_test_support::McpProcess;
|
use app_test_support::McpProcess;
|
||||||
use app_test_support::create_fake_rollout;
|
use app_test_support::create_fake_rollout;
|
||||||
use app_test_support::to_response;
|
use app_test_support::to_response;
|
||||||
use llmx_app_server_protocol::JSONRPCNotification;
|
use codex_app_server_protocol::JSONRPCNotification;
|
||||||
use llmx_app_server_protocol::JSONRPCResponse;
|
use codex_app_server_protocol::JSONRPCResponse;
|
||||||
use llmx_app_server_protocol::ListConversationsParams;
|
use codex_app_server_protocol::ListConversationsParams;
|
||||||
use llmx_app_server_protocol::ListConversationsResponse;
|
use codex_app_server_protocol::ListConversationsResponse;
|
||||||
use llmx_app_server_protocol::NewConversationParams; // reused for overrides shape
|
use codex_app_server_protocol::NewConversationParams; // reused for overrides shape
|
||||||
use llmx_app_server_protocol::RequestId;
|
use codex_app_server_protocol::RequestId;
|
||||||
use llmx_app_server_protocol::ResumeConversationParams;
|
use codex_app_server_protocol::ResumeConversationParams;
|
||||||
use llmx_app_server_protocol::ResumeConversationResponse;
|
use codex_app_server_protocol::ResumeConversationResponse;
|
||||||
use llmx_app_server_protocol::ServerNotification;
|
use codex_app_server_protocol::ServerNotification;
|
||||||
use llmx_app_server_protocol::SessionConfiguredNotification;
|
use codex_app_server_protocol::SessionConfiguredNotification;
|
||||||
use llmx_core::protocol::EventMsg;
|
use codex_core::protocol::EventMsg;
|
||||||
use llmx_protocol::models::ContentItem;
|
use codex_protocol::models::ContentItem;
|
||||||
use llmx_protocol::models::ResponseItem;
|
use codex_protocol::models::ResponseItem;
|
||||||
use pretty_assertions::assert_eq;
|
use pretty_assertions::assert_eq;
|
||||||
use tempfile::TempDir;
|
use tempfile::TempDir;
|
||||||
use tokio::time::timeout;
|
use tokio::time::timeout;
|
||||||
@@ -23,31 +23,31 @@ const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs
|
|||||||
|
|
||||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn test_list_and_resume_conversations() -> Result<()> {
|
async fn test_list_and_resume_conversations() -> Result<()> {
|
||||||
// Prepare a temporary LLMX_HOME with a few fake rollout files.
|
// Prepare a temporary CODEX_HOME with a few fake rollout files.
|
||||||
let llmx_home = TempDir::new()?;
|
let codex_home = TempDir::new()?;
|
||||||
create_fake_rollout(
|
create_fake_rollout(
|
||||||
llmx_home.path(),
|
codex_home.path(),
|
||||||
"2025-01-02T12-00-00",
|
"2025-01-02T12-00-00",
|
||||||
"2025-01-02T12:00:00Z",
|
"2025-01-02T12:00:00Z",
|
||||||
"Hello A",
|
"Hello A",
|
||||||
Some("openai"),
|
Some("openai"),
|
||||||
)?;
|
)?;
|
||||||
create_fake_rollout(
|
create_fake_rollout(
|
||||||
llmx_home.path(),
|
codex_home.path(),
|
||||||
"2025-01-01T13-00-00",
|
"2025-01-01T13-00-00",
|
||||||
"2025-01-01T13:00:00Z",
|
"2025-01-01T13:00:00Z",
|
||||||
"Hello B",
|
"Hello B",
|
||||||
Some("openai"),
|
Some("openai"),
|
||||||
)?;
|
)?;
|
||||||
create_fake_rollout(
|
create_fake_rollout(
|
||||||
llmx_home.path(),
|
codex_home.path(),
|
||||||
"2025-01-01T12-00-00",
|
"2025-01-01T12-00-00",
|
||||||
"2025-01-01T12:00:00Z",
|
"2025-01-01T12:00:00Z",
|
||||||
"Hello C",
|
"Hello C",
|
||||||
None,
|
None,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let mut mcp = McpProcess::new(llmx_home.path()).await?;
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
// Request first page with size 2
|
// Request first page with size 2
|
||||||
@@ -95,12 +95,12 @@ async fn test_list_and_resume_conversations() -> Result<()> {
|
|||||||
} = to_response::<ListConversationsResponse>(resp2)?;
|
} = to_response::<ListConversationsResponse>(resp2)?;
|
||||||
assert_eq!(items2.len(), 1);
|
assert_eq!(items2.len(), 1);
|
||||||
assert_eq!(items2[0].preview, "Hello C");
|
assert_eq!(items2[0].preview, "Hello C");
|
||||||
assert_eq!(items2[0].model_provider, "litellm");
|
assert_eq!(items2[0].model_provider, "openai");
|
||||||
assert_eq!(next2, None);
|
assert_eq!(next2, None);
|
||||||
|
|
||||||
// Add a conversation with an explicit non-OpenAI provider for filter tests.
|
// Add a conversation with an explicit non-OpenAI provider for filter tests.
|
||||||
create_fake_rollout(
|
create_fake_rollout(
|
||||||
llmx_home.path(),
|
codex_home.path(),
|
||||||
"2025-01-01T11-30-00",
|
"2025-01-01T11-30-00",
|
||||||
"2025-01-01T11:30:00Z",
|
"2025-01-01T11:30:00Z",
|
||||||
"Hello TP",
|
"Hello TP",
|
||||||
@@ -183,7 +183,7 @@ async fn test_list_and_resume_conversations() -> Result<()> {
|
|||||||
})
|
})
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
// Expect a llmx/event notification with msg.type == sessionConfigured
|
// Expect a codex/event notification with msg.type == sessionConfigured
|
||||||
let notification: JSONRPCNotification = timeout(
|
let notification: JSONRPCNotification = timeout(
|
||||||
DEFAULT_READ_TIMEOUT,
|
DEFAULT_READ_TIMEOUT,
|
||||||
mcp.read_stream_until_notification_message("sessionConfigured"),
|
mcp.read_stream_until_notification_message("sessionConfigured"),
|
||||||
@@ -1,16 +1,17 @@
|
|||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use app_test_support::McpProcess;
|
use app_test_support::McpProcess;
|
||||||
use app_test_support::to_response;
|
use app_test_support::to_response;
|
||||||
use llmx_app_server_protocol::CancelLoginChatGptParams;
|
use codex_app_server_protocol::CancelLoginChatGptParams;
|
||||||
use llmx_app_server_protocol::GetAuthStatusParams;
|
use codex_app_server_protocol::CancelLoginChatGptResponse;
|
||||||
use llmx_app_server_protocol::GetAuthStatusResponse;
|
use codex_app_server_protocol::GetAuthStatusParams;
|
||||||
use llmx_app_server_protocol::JSONRPCError;
|
use codex_app_server_protocol::GetAuthStatusResponse;
|
||||||
use llmx_app_server_protocol::JSONRPCResponse;
|
use codex_app_server_protocol::JSONRPCError;
|
||||||
use llmx_app_server_protocol::LoginChatGptResponse;
|
use codex_app_server_protocol::JSONRPCResponse;
|
||||||
use llmx_app_server_protocol::LogoutChatGptResponse;
|
use codex_app_server_protocol::LoginChatGptResponse;
|
||||||
use llmx_app_server_protocol::RequestId;
|
use codex_app_server_protocol::LogoutChatGptResponse;
|
||||||
use llmx_core::auth::AuthCredentialsStoreMode;
|
use codex_app_server_protocol::RequestId;
|
||||||
use llmx_login::login_with_api_key;
|
use codex_core::auth::AuthCredentialsStoreMode;
|
||||||
|
use codex_login::login_with_api_key;
|
||||||
use serial_test::serial;
|
use serial_test::serial;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
@@ -20,8 +21,8 @@ use tokio::time::timeout;
|
|||||||
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
|
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
|
||||||
|
|
||||||
// Helper to create a config.toml; mirrors create_conversation.rs
|
// Helper to create a config.toml; mirrors create_conversation.rs
|
||||||
fn create_config_toml(llmx_home: &Path) -> std::io::Result<()> {
|
fn create_config_toml(codex_home: &Path) -> std::io::Result<()> {
|
||||||
let config_toml = llmx_home.join("config.toml");
|
let config_toml = codex_home.join("config.toml");
|
||||||
std::fs::write(
|
std::fs::write(
|
||||||
config_toml,
|
config_toml,
|
||||||
r#"
|
r#"
|
||||||
@@ -43,16 +44,16 @@ stream_max_retries = 0
|
|||||||
|
|
||||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn logout_chatgpt_removes_auth() -> Result<()> {
|
async fn logout_chatgpt_removes_auth() -> Result<()> {
|
||||||
let llmx_home = TempDir::new()?;
|
let codex_home = TempDir::new()?;
|
||||||
create_config_toml(llmx_home.path())?;
|
create_config_toml(codex_home.path())?;
|
||||||
login_with_api_key(
|
login_with_api_key(
|
||||||
llmx_home.path(),
|
codex_home.path(),
|
||||||
"sk-test-key",
|
"sk-test-key",
|
||||||
AuthCredentialsStoreMode::File,
|
AuthCredentialsStoreMode::File,
|
||||||
)?;
|
)?;
|
||||||
assert!(llmx_home.path().join("auth.json").exists());
|
assert!(codex_home.path().join("auth.json").exists());
|
||||||
|
|
||||||
let mut mcp = McpProcess::new_with_env(llmx_home.path(), &[("OPENAI_API_KEY", None)]).await?;
|
let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]).await?;
|
||||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
let id = mcp.send_logout_chat_gpt_request().await?;
|
let id = mcp.send_logout_chat_gpt_request().await?;
|
||||||
@@ -64,7 +65,7 @@ async fn logout_chatgpt_removes_auth() -> Result<()> {
|
|||||||
let _ok: LogoutChatGptResponse = to_response(resp)?;
|
let _ok: LogoutChatGptResponse = to_response(resp)?;
|
||||||
|
|
||||||
assert!(
|
assert!(
|
||||||
!llmx_home.path().join("auth.json").exists(),
|
!codex_home.path().join("auth.json").exists(),
|
||||||
"auth.json should be deleted"
|
"auth.json should be deleted"
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -90,10 +91,10 @@ async fn logout_chatgpt_removes_auth() -> Result<()> {
|
|||||||
// Serialize tests that launch the login server since it binds to a fixed port.
|
// Serialize tests that launch the login server since it binds to a fixed port.
|
||||||
#[serial(login_port)]
|
#[serial(login_port)]
|
||||||
async fn login_and_cancel_chatgpt() -> Result<()> {
|
async fn login_and_cancel_chatgpt() -> Result<()> {
|
||||||
let llmx_home = TempDir::new()?;
|
let codex_home = TempDir::new()?;
|
||||||
create_config_toml(llmx_home.path())?;
|
create_config_toml(codex_home.path())?;
|
||||||
|
|
||||||
let mut mcp = McpProcess::new(llmx_home.path()).await?;
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
let login_id = mcp.send_login_chat_gpt_request().await?;
|
let login_id = mcp.send_login_chat_gpt_request().await?;
|
||||||
@@ -109,41 +110,27 @@ async fn login_and_cancel_chatgpt() -> Result<()> {
|
|||||||
login_id: login.login_id,
|
login_id: login.login_id,
|
||||||
})
|
})
|
||||||
.await?;
|
.await?;
|
||||||
|
let cancel_resp: JSONRPCResponse = timeout(
|
||||||
// The cancel might succeed or fail with "login id not found" if the login
|
DEFAULT_READ_TIMEOUT,
|
||||||
// completed/cancelled already due to a race condition. Either outcome is acceptable.
|
|
||||||
// Use a timeout and allow either success or error response.
|
|
||||||
let cancel_result = timeout(
|
|
||||||
Duration::from_secs(5),
|
|
||||||
mcp.read_stream_until_response_message(RequestId::Integer(cancel_id)),
|
mcp.read_stream_until_response_message(RequestId::Integer(cancel_id)),
|
||||||
)
|
)
|
||||||
.await;
|
.await??;
|
||||||
|
let _ok: CancelLoginChatGptResponse = to_response(cancel_resp)?;
|
||||||
match cancel_result {
|
|
||||||
Ok(Ok(_)) => {
|
|
||||||
// Successfully cancelled
|
|
||||||
eprintln!("cancel succeeded");
|
|
||||||
}
|
|
||||||
Ok(Err(_)) | Err(_) => {
|
|
||||||
// Cancel failed or timed out - acceptable in race condition
|
|
||||||
eprintln!("cancel failed or timed out (expected in race condition)");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Optionally observe the completion notification; do not fail if it races.
|
// Optionally observe the completion notification; do not fail if it races.
|
||||||
let maybe_note = timeout(
|
let maybe_note = timeout(
|
||||||
Duration::from_secs(2),
|
Duration::from_secs(2),
|
||||||
mcp.read_stream_until_notification_message("loginChatGptComplete"),
|
mcp.read_stream_until_notification_message("codex/event/login_chat_gpt_complete"),
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
if maybe_note.is_err() {
|
if maybe_note.is_err() {
|
||||||
eprintln!("warning: did not observe loginChatGptComplete notification after cancel");
|
eprintln!("warning: did not observe login_chat_gpt_complete notification after cancel");
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn create_config_toml_forced_login(llmx_home: &Path, forced_method: &str) -> std::io::Result<()> {
|
fn create_config_toml_forced_login(codex_home: &Path, forced_method: &str) -> std::io::Result<()> {
|
||||||
let config_toml = llmx_home.join("config.toml");
|
let config_toml = codex_home.join("config.toml");
|
||||||
let contents = format!(
|
let contents = format!(
|
||||||
r#"
|
r#"
|
||||||
model = "mock-model"
|
model = "mock-model"
|
||||||
@@ -156,10 +143,10 @@ forced_login_method = "{forced_method}"
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn create_config_toml_forced_workspace(
|
fn create_config_toml_forced_workspace(
|
||||||
llmx_home: &Path,
|
codex_home: &Path,
|
||||||
workspace_id: &str,
|
workspace_id: &str,
|
||||||
) -> std::io::Result<()> {
|
) -> std::io::Result<()> {
|
||||||
let config_toml = llmx_home.join("config.toml");
|
let config_toml = codex_home.join("config.toml");
|
||||||
let contents = format!(
|
let contents = format!(
|
||||||
r#"
|
r#"
|
||||||
model = "mock-model"
|
model = "mock-model"
|
||||||
@@ -173,10 +160,10 @@ forced_chatgpt_workspace_id = "{workspace_id}"
|
|||||||
|
|
||||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn login_chatgpt_rejected_when_forced_api() -> Result<()> {
|
async fn login_chatgpt_rejected_when_forced_api() -> Result<()> {
|
||||||
let llmx_home = TempDir::new()?;
|
let codex_home = TempDir::new()?;
|
||||||
create_config_toml_forced_login(llmx_home.path(), "api")?;
|
create_config_toml_forced_login(codex_home.path(), "api")?;
|
||||||
|
|
||||||
let mut mcp = McpProcess::new(llmx_home.path()).await?;
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
let request_id = mcp.send_login_chat_gpt_request().await?;
|
let request_id = mcp.send_login_chat_gpt_request().await?;
|
||||||
@@ -197,10 +184,10 @@ async fn login_chatgpt_rejected_when_forced_api() -> Result<()> {
|
|||||||
// Serialize tests that launch the login server since it binds to a fixed port.
|
// Serialize tests that launch the login server since it binds to a fixed port.
|
||||||
#[serial(login_port)]
|
#[serial(login_port)]
|
||||||
async fn login_chatgpt_includes_forced_workspace_query_param() -> Result<()> {
|
async fn login_chatgpt_includes_forced_workspace_query_param() -> Result<()> {
|
||||||
let llmx_home = TempDir::new()?;
|
let codex_home = TempDir::new()?;
|
||||||
create_config_toml_forced_workspace(llmx_home.path(), "ws-forced")?;
|
create_config_toml_forced_workspace(codex_home.path(), "ws-forced")?;
|
||||||
|
|
||||||
let mut mcp = McpProcess::new(llmx_home.path()).await?;
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
let request_id = mcp.send_login_chat_gpt_request().await?;
|
let request_id = mcp.send_login_chat_gpt_request().await?;
|
||||||
@@ -1,11 +1,11 @@
|
|||||||
mod archive_conversation;
|
mod archive_conversation;
|
||||||
mod auth;
|
mod auth;
|
||||||
|
mod codex_message_processor_flow;
|
||||||
mod config;
|
mod config;
|
||||||
mod create_conversation;
|
mod create_conversation;
|
||||||
mod fuzzy_file_search;
|
mod fuzzy_file_search;
|
||||||
mod interrupt;
|
mod interrupt;
|
||||||
mod list_resume;
|
mod list_resume;
|
||||||
mod llmx_message_processor_flow;
|
|
||||||
mod login;
|
mod login;
|
||||||
mod send_message;
|
mod send_message;
|
||||||
mod set_default_model;
|
mod set_default_model;
|
||||||
@@ -3,20 +3,20 @@ use app_test_support::McpProcess;
|
|||||||
use app_test_support::create_final_assistant_message_sse_response;
|
use app_test_support::create_final_assistant_message_sse_response;
|
||||||
use app_test_support::create_mock_chat_completions_server;
|
use app_test_support::create_mock_chat_completions_server;
|
||||||
use app_test_support::to_response;
|
use app_test_support::to_response;
|
||||||
use llmx_app_server_protocol::AddConversationListenerParams;
|
use codex_app_server_protocol::AddConversationListenerParams;
|
||||||
use llmx_app_server_protocol::AddConversationSubscriptionResponse;
|
use codex_app_server_protocol::AddConversationSubscriptionResponse;
|
||||||
use llmx_app_server_protocol::InputItem;
|
use codex_app_server_protocol::InputItem;
|
||||||
use llmx_app_server_protocol::JSONRPCNotification;
|
use codex_app_server_protocol::JSONRPCNotification;
|
||||||
use llmx_app_server_protocol::JSONRPCResponse;
|
use codex_app_server_protocol::JSONRPCResponse;
|
||||||
use llmx_app_server_protocol::NewConversationParams;
|
use codex_app_server_protocol::NewConversationParams;
|
||||||
use llmx_app_server_protocol::NewConversationResponse;
|
use codex_app_server_protocol::NewConversationResponse;
|
||||||
use llmx_app_server_protocol::RequestId;
|
use codex_app_server_protocol::RequestId;
|
||||||
use llmx_app_server_protocol::SendUserMessageParams;
|
use codex_app_server_protocol::SendUserMessageParams;
|
||||||
use llmx_app_server_protocol::SendUserMessageResponse;
|
use codex_app_server_protocol::SendUserMessageResponse;
|
||||||
use llmx_protocol::ConversationId;
|
use codex_protocol::ConversationId;
|
||||||
use llmx_protocol::models::ContentItem;
|
use codex_protocol::models::ContentItem;
|
||||||
use llmx_protocol::models::ResponseItem;
|
use codex_protocol::models::ResponseItem;
|
||||||
use llmx_protocol::protocol::RawResponseItemEvent;
|
use codex_protocol::protocol::RawResponseItemEvent;
|
||||||
use pretty_assertions::assert_eq;
|
use pretty_assertions::assert_eq;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use tempfile::TempDir;
|
use tempfile::TempDir;
|
||||||
@@ -26,20 +26,20 @@ const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs
|
|||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_send_message_success() -> Result<()> {
|
async fn test_send_message_success() -> Result<()> {
|
||||||
// Spin up a mock completions server that immediately ends the LLMX turn.
|
// Spin up a mock completions server that immediately ends the Codex turn.
|
||||||
// Two LLMX turns hit the mock model (session start + send-user-message). Provide two SSE responses.
|
// Two Codex turns hit the mock model (session start + send-user-message). Provide two SSE responses.
|
||||||
let responses = vec![
|
let responses = vec![
|
||||||
create_final_assistant_message_sse_response("Done")?,
|
create_final_assistant_message_sse_response("Done")?,
|
||||||
create_final_assistant_message_sse_response("Done")?,
|
create_final_assistant_message_sse_response("Done")?,
|
||||||
];
|
];
|
||||||
let server = create_mock_chat_completions_server(responses).await;
|
let server = create_mock_chat_completions_server(responses).await;
|
||||||
|
|
||||||
// Create a temporary LLMX home with config pointing at the mock server.
|
// Create a temporary Codex home with config pointing at the mock server.
|
||||||
let llmx_home = TempDir::new()?;
|
let codex_home = TempDir::new()?;
|
||||||
create_config_toml(llmx_home.path(), &server.uri())?;
|
create_config_toml(codex_home.path(), &server.uri())?;
|
||||||
|
|
||||||
// Start MCP server process and initialize.
|
// Start MCP server process and initialize.
|
||||||
let mut mcp = McpProcess::new(llmx_home.path()).await?;
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
// Start a conversation using the new wire API.
|
// Start a conversation using the new wire API.
|
||||||
@@ -106,7 +106,7 @@ async fn send_message(
|
|||||||
// Note this also ensures that the final request to the server was made.
|
// Note this also ensures that the final request to the server was made.
|
||||||
let task_finished_notification: JSONRPCNotification = timeout(
|
let task_finished_notification: JSONRPCNotification = timeout(
|
||||||
DEFAULT_READ_TIMEOUT,
|
DEFAULT_READ_TIMEOUT,
|
||||||
mcp.read_stream_until_notification_message("llmx/event/task_complete"),
|
mcp.read_stream_until_notification_message("codex/event/task_complete"),
|
||||||
)
|
)
|
||||||
.await??;
|
.await??;
|
||||||
let serde_json::Value::Object(map) = task_finished_notification
|
let serde_json::Value::Object(map) = task_finished_notification
|
||||||
@@ -123,7 +123,7 @@ async fn send_message(
|
|||||||
|
|
||||||
let raw_attempt = tokio::time::timeout(
|
let raw_attempt = tokio::time::timeout(
|
||||||
std::time::Duration::from_millis(200),
|
std::time::Duration::from_millis(200),
|
||||||
mcp.read_stream_until_notification_message("llmx/event/raw_response_item"),
|
mcp.read_stream_until_notification_message("codex/event/raw_response_item"),
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
assert!(
|
assert!(
|
||||||
@@ -138,10 +138,10 @@ async fn test_send_message_raw_notifications_opt_in() -> Result<()> {
|
|||||||
let responses = vec![create_final_assistant_message_sse_response("Done")?];
|
let responses = vec![create_final_assistant_message_sse_response("Done")?];
|
||||||
let server = create_mock_chat_completions_server(responses).await;
|
let server = create_mock_chat_completions_server(responses).await;
|
||||||
|
|
||||||
let llmx_home = TempDir::new()?;
|
let codex_home = TempDir::new()?;
|
||||||
create_config_toml(llmx_home.path(), &server.uri())?;
|
create_config_toml(codex_home.path(), &server.uri())?;
|
||||||
|
|
||||||
let mut mcp = McpProcess::new(llmx_home.path()).await?;
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
let new_conv_id = mcp
|
let new_conv_id = mcp
|
||||||
@@ -206,7 +206,7 @@ async fn test_send_message_raw_notifications_opt_in() -> Result<()> {
|
|||||||
|
|
||||||
let _ = tokio::time::timeout(
|
let _ = tokio::time::timeout(
|
||||||
std::time::Duration::from_millis(250),
|
std::time::Duration::from_millis(250),
|
||||||
mcp.read_stream_until_notification_message("llmx/event/task_complete"),
|
mcp.read_stream_until_notification_message("codex/event/task_complete"),
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
@@ -215,9 +215,9 @@ async fn test_send_message_raw_notifications_opt_in() -> Result<()> {
|
|||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_send_message_session_not_found() -> Result<()> {
|
async fn test_send_message_session_not_found() -> Result<()> {
|
||||||
// Start MCP without creating an LLMX session
|
// Start MCP without creating a Codex session
|
||||||
let llmx_home = TempDir::new()?;
|
let codex_home = TempDir::new()?;
|
||||||
let mut mcp = McpProcess::new(llmx_home.path()).await?;
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
let unknown = ConversationId::new();
|
let unknown = ConversationId::new();
|
||||||
@@ -244,8 +244,8 @@ async fn test_send_message_session_not_found() -> Result<()> {
|
|||||||
// Helpers
|
// Helpers
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
fn create_config_toml(llmx_home: &Path, server_uri: &str) -> std::io::Result<()> {
|
fn create_config_toml(codex_home: &Path, server_uri: &str) -> std::io::Result<()> {
|
||||||
let config_toml = llmx_home.join("config.toml");
|
let config_toml = codex_home.join("config.toml");
|
||||||
std::fs::write(
|
std::fs::write(
|
||||||
config_toml,
|
config_toml,
|
||||||
format!(
|
format!(
|
||||||
@@ -274,17 +274,17 @@ async fn read_raw_response_item(
|
|||||||
) -> ResponseItem {
|
) -> ResponseItem {
|
||||||
let raw_notification: JSONRPCNotification = timeout(
|
let raw_notification: JSONRPCNotification = timeout(
|
||||||
DEFAULT_READ_TIMEOUT,
|
DEFAULT_READ_TIMEOUT,
|
||||||
mcp.read_stream_until_notification_message("llmx/event/raw_response_item"),
|
mcp.read_stream_until_notification_message("codex/event/raw_response_item"),
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.expect("llmx/event/raw_response_item notification timeout")
|
.expect("codex/event/raw_response_item notification timeout")
|
||||||
.expect("llmx/event/raw_response_item notification resp");
|
.expect("codex/event/raw_response_item notification resp");
|
||||||
|
|
||||||
let serde_json::Value::Object(params) = raw_notification
|
let serde_json::Value::Object(params) = raw_notification
|
||||||
.params
|
.params
|
||||||
.expect("llmx/event/raw_response_item should have params")
|
.expect("codex/event/raw_response_item should have params")
|
||||||
else {
|
else {
|
||||||
panic!("llmx/event/raw_response_item should have params");
|
panic!("codex/event/raw_response_item should have params");
|
||||||
};
|
};
|
||||||
|
|
||||||
let conversation_id_value = params
|
let conversation_id_value = params
|
||||||
@@ -1,11 +1,11 @@
|
|||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use app_test_support::McpProcess;
|
use app_test_support::McpProcess;
|
||||||
use app_test_support::to_response;
|
use app_test_support::to_response;
|
||||||
use llmx_app_server_protocol::JSONRPCResponse;
|
use codex_app_server_protocol::JSONRPCResponse;
|
||||||
use llmx_app_server_protocol::RequestId;
|
use codex_app_server_protocol::RequestId;
|
||||||
use llmx_app_server_protocol::SetDefaultModelParams;
|
use codex_app_server_protocol::SetDefaultModelParams;
|
||||||
use llmx_app_server_protocol::SetDefaultModelResponse;
|
use codex_app_server_protocol::SetDefaultModelResponse;
|
||||||
use llmx_core::config::ConfigToml;
|
use codex_core::config::ConfigToml;
|
||||||
use pretty_assertions::assert_eq;
|
use pretty_assertions::assert_eq;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use tempfile::TempDir;
|
use tempfile::TempDir;
|
||||||
@@ -15,10 +15,10 @@ const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs
|
|||||||
|
|
||||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn set_default_model_persists_overrides() -> Result<()> {
|
async fn set_default_model_persists_overrides() -> Result<()> {
|
||||||
let llmx_home = TempDir::new()?;
|
let codex_home = TempDir::new()?;
|
||||||
create_config_toml(llmx_home.path())?;
|
create_config_toml(codex_home.path())?;
|
||||||
|
|
||||||
let mut mcp = McpProcess::new(llmx_home.path()).await?;
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
let params = SetDefaultModelParams {
|
let params = SetDefaultModelParams {
|
||||||
@@ -36,7 +36,7 @@ async fn set_default_model_persists_overrides() -> Result<()> {
|
|||||||
|
|
||||||
let _: SetDefaultModelResponse = to_response(resp)?;
|
let _: SetDefaultModelResponse = to_response(resp)?;
|
||||||
|
|
||||||
let config_path = llmx_home.path().join("config.toml");
|
let config_path = codex_home.path().join("config.toml");
|
||||||
let config_contents = tokio::fs::read_to_string(&config_path).await?;
|
let config_contents = tokio::fs::read_to_string(&config_path).await?;
|
||||||
let config_toml: ConfigToml = toml::from_str(&config_contents)?;
|
let config_toml: ConfigToml = toml::from_str(&config_contents)?;
|
||||||
|
|
||||||
@@ -52,12 +52,12 @@ async fn set_default_model_persists_overrides() -> Result<()> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Helper to create a config.toml; mirrors create_conversation.rs
|
// Helper to create a config.toml; mirrors create_conversation.rs
|
||||||
fn create_config_toml(llmx_home: &Path) -> std::io::Result<()> {
|
fn create_config_toml(codex_home: &Path) -> std::io::Result<()> {
|
||||||
let config_toml = llmx_home.join("config.toml");
|
let config_toml = codex_home.join("config.toml");
|
||||||
std::fs::write(
|
std::fs::write(
|
||||||
config_toml,
|
config_toml,
|
||||||
r#"
|
r#"
|
||||||
model = "gpt-5-llmx"
|
model = "gpt-5-codex"
|
||||||
model_reasoning_effort = "medium"
|
model_reasoning_effort = "medium"
|
||||||
"#,
|
"#,
|
||||||
)
|
)
|
||||||
@@ -1,9 +1,9 @@
|
|||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use app_test_support::McpProcess;
|
use app_test_support::McpProcess;
|
||||||
use app_test_support::to_response;
|
use app_test_support::to_response;
|
||||||
use llmx_app_server_protocol::GetUserAgentResponse;
|
use codex_app_server_protocol::GetUserAgentResponse;
|
||||||
use llmx_app_server_protocol::JSONRPCResponse;
|
use codex_app_server_protocol::JSONRPCResponse;
|
||||||
use llmx_app_server_protocol::RequestId;
|
use codex_app_server_protocol::RequestId;
|
||||||
use pretty_assertions::assert_eq;
|
use pretty_assertions::assert_eq;
|
||||||
use tempfile::TempDir;
|
use tempfile::TempDir;
|
||||||
use tokio::time::timeout;
|
use tokio::time::timeout;
|
||||||
@@ -11,10 +11,10 @@ use tokio::time::timeout;
|
|||||||
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
|
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
|
||||||
|
|
||||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn get_user_agent_returns_current_llmx_user_agent() -> Result<()> {
|
async fn get_user_agent_returns_current_codex_user_agent() -> Result<()> {
|
||||||
let llmx_home = TempDir::new()?;
|
let codex_home = TempDir::new()?;
|
||||||
|
|
||||||
let mut mcp = McpProcess::new(llmx_home.path()).await?;
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
let request_id = mcp.send_get_user_agent_request().await?;
|
let request_id = mcp.send_get_user_agent_request().await?;
|
||||||
@@ -26,11 +26,11 @@ async fn get_user_agent_returns_current_llmx_user_agent() -> Result<()> {
|
|||||||
|
|
||||||
let os_info = os_info::get();
|
let os_info = os_info::get();
|
||||||
let user_agent = format!(
|
let user_agent = format!(
|
||||||
"llmx_cli_rs/0.1.7 ({} {}; {}) {} (llmx-app-server-tests; 0.1.7)",
|
"codex_cli_rs/0.0.0 ({} {}; {}) {} (codex-app-server-tests; 0.1.0)",
|
||||||
os_info.os_type(),
|
os_info.os_type(),
|
||||||
os_info.version(),
|
os_info.version(),
|
||||||
os_info.architecture().unwrap_or("unknown"),
|
os_info.architecture().unwrap_or("unknown"),
|
||||||
llmx_core::terminal::user_agent()
|
codex_core::terminal::user_agent()
|
||||||
);
|
);
|
||||||
|
|
||||||
let received: GetUserAgentResponse = to_response(response)?;
|
let received: GetUserAgentResponse = to_response(response)?;
|
||||||
@@ -3,10 +3,10 @@ use app_test_support::ChatGptAuthFixture;
|
|||||||
use app_test_support::McpProcess;
|
use app_test_support::McpProcess;
|
||||||
use app_test_support::to_response;
|
use app_test_support::to_response;
|
||||||
use app_test_support::write_chatgpt_auth;
|
use app_test_support::write_chatgpt_auth;
|
||||||
use llmx_app_server_protocol::JSONRPCResponse;
|
use codex_app_server_protocol::JSONRPCResponse;
|
||||||
use llmx_app_server_protocol::RequestId;
|
use codex_app_server_protocol::RequestId;
|
||||||
use llmx_app_server_protocol::UserInfoResponse;
|
use codex_app_server_protocol::UserInfoResponse;
|
||||||
use llmx_core::auth::AuthCredentialsStoreMode;
|
use codex_core::auth::AuthCredentialsStoreMode;
|
||||||
use pretty_assertions::assert_eq;
|
use pretty_assertions::assert_eq;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use tempfile::TempDir;
|
use tempfile::TempDir;
|
||||||
@@ -16,17 +16,17 @@ const DEFAULT_READ_TIMEOUT: Duration = Duration::from_secs(10);
|
|||||||
|
|
||||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn user_info_returns_email_from_auth_json() -> Result<()> {
|
async fn user_info_returns_email_from_auth_json() -> Result<()> {
|
||||||
let llmx_home = TempDir::new()?;
|
let codex_home = TempDir::new()?;
|
||||||
|
|
||||||
write_chatgpt_auth(
|
write_chatgpt_auth(
|
||||||
llmx_home.path(),
|
codex_home.path(),
|
||||||
ChatGptAuthFixture::new("access")
|
ChatGptAuthFixture::new("access")
|
||||||
.refresh_token("refresh")
|
.refresh_token("refresh")
|
||||||
.email("user@example.com"),
|
.email("user@example.com"),
|
||||||
AuthCredentialsStoreMode::File,
|
AuthCredentialsStoreMode::File,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let mut mcp = McpProcess::new(llmx_home.path()).await?;
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
let request_id = mcp.send_user_info_request().await?;
|
let request_id = mcp.send_user_info_request().await?;
|
||||||
@@ -5,21 +5,21 @@ use app_test_support::to_response;
|
|||||||
|
|
||||||
use app_test_support::ChatGptAuthFixture;
|
use app_test_support::ChatGptAuthFixture;
|
||||||
use app_test_support::write_chatgpt_auth;
|
use app_test_support::write_chatgpt_auth;
|
||||||
use llmx_app_server_protocol::Account;
|
use codex_app_server_protocol::Account;
|
||||||
use llmx_app_server_protocol::AuthMode;
|
use codex_app_server_protocol::AuthMode;
|
||||||
use llmx_app_server_protocol::CancelLoginAccountParams;
|
use codex_app_server_protocol::CancelLoginAccountParams;
|
||||||
use llmx_app_server_protocol::CancelLoginAccountResponse;
|
use codex_app_server_protocol::CancelLoginAccountResponse;
|
||||||
use llmx_app_server_protocol::GetAccountParams;
|
use codex_app_server_protocol::GetAccountParams;
|
||||||
use llmx_app_server_protocol::GetAccountResponse;
|
use codex_app_server_protocol::GetAccountResponse;
|
||||||
use llmx_app_server_protocol::JSONRPCError;
|
use codex_app_server_protocol::JSONRPCError;
|
||||||
use llmx_app_server_protocol::JSONRPCResponse;
|
use codex_app_server_protocol::JSONRPCResponse;
|
||||||
use llmx_app_server_protocol::LoginAccountResponse;
|
use codex_app_server_protocol::LoginAccountResponse;
|
||||||
use llmx_app_server_protocol::LogoutAccountResponse;
|
use codex_app_server_protocol::LogoutAccountResponse;
|
||||||
use llmx_app_server_protocol::RequestId;
|
use codex_app_server_protocol::RequestId;
|
||||||
use llmx_app_server_protocol::ServerNotification;
|
use codex_app_server_protocol::ServerNotification;
|
||||||
use llmx_core::auth::AuthCredentialsStoreMode;
|
use codex_core::auth::AuthCredentialsStoreMode;
|
||||||
use llmx_login::login_with_api_key;
|
use codex_login::login_with_api_key;
|
||||||
use llmx_protocol::account::PlanType as AccountPlanType;
|
use codex_protocol::account::PlanType as AccountPlanType;
|
||||||
use pretty_assertions::assert_eq;
|
use pretty_assertions::assert_eq;
|
||||||
use serial_test::serial;
|
use serial_test::serial;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
@@ -37,8 +37,8 @@ struct CreateConfigTomlParams {
|
|||||||
requires_openai_auth: Option<bool>,
|
requires_openai_auth: Option<bool>,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn create_config_toml(llmx_home: &Path, params: CreateConfigTomlParams) -> std::io::Result<()> {
|
fn create_config_toml(codex_home: &Path, params: CreateConfigTomlParams) -> std::io::Result<()> {
|
||||||
let config_toml = llmx_home.join("config.toml");
|
let config_toml = codex_home.join("config.toml");
|
||||||
let forced_line = if let Some(method) = params.forced_method {
|
let forced_line = if let Some(method) = params.forced_method {
|
||||||
format!("forced_login_method = \"{method}\"\n")
|
format!("forced_login_method = \"{method}\"\n")
|
||||||
} else {
|
} else {
|
||||||
@@ -78,17 +78,17 @@ stream_max_retries = 0
|
|||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn logout_account_removes_auth_and_notifies() -> Result<()> {
|
async fn logout_account_removes_auth_and_notifies() -> Result<()> {
|
||||||
let llmx_home = TempDir::new()?;
|
let codex_home = TempDir::new()?;
|
||||||
create_config_toml(llmx_home.path(), CreateConfigTomlParams::default())?;
|
create_config_toml(codex_home.path(), CreateConfigTomlParams::default())?;
|
||||||
|
|
||||||
login_with_api_key(
|
login_with_api_key(
|
||||||
llmx_home.path(),
|
codex_home.path(),
|
||||||
"sk-test-key",
|
"sk-test-key",
|
||||||
AuthCredentialsStoreMode::File,
|
AuthCredentialsStoreMode::File,
|
||||||
)?;
|
)?;
|
||||||
assert!(llmx_home.path().join("auth.json").exists());
|
assert!(codex_home.path().join("auth.json").exists());
|
||||||
|
|
||||||
let mut mcp = McpProcess::new_with_env(llmx_home.path(), &[("OPENAI_API_KEY", None)]).await?;
|
let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]).await?;
|
||||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
let id = mcp.send_logout_account_request().await?;
|
let id = mcp.send_logout_account_request().await?;
|
||||||
@@ -114,7 +114,7 @@ async fn logout_account_removes_auth_and_notifies() -> Result<()> {
|
|||||||
);
|
);
|
||||||
|
|
||||||
assert!(
|
assert!(
|
||||||
!llmx_home.path().join("auth.json").exists(),
|
!codex_home.path().join("auth.json").exists(),
|
||||||
"auth.json should be deleted"
|
"auth.json should be deleted"
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -135,10 +135,10 @@ async fn logout_account_removes_auth_and_notifies() -> Result<()> {
|
|||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn login_account_api_key_succeeds_and_notifies() -> Result<()> {
|
async fn login_account_api_key_succeeds_and_notifies() -> Result<()> {
|
||||||
let llmx_home = TempDir::new()?;
|
let codex_home = TempDir::new()?;
|
||||||
create_config_toml(llmx_home.path(), CreateConfigTomlParams::default())?;
|
create_config_toml(codex_home.path(), CreateConfigTomlParams::default())?;
|
||||||
|
|
||||||
let mut mcp = McpProcess::new(llmx_home.path()).await?;
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
let req_id = mcp
|
let req_id = mcp
|
||||||
@@ -176,22 +176,22 @@ async fn login_account_api_key_succeeds_and_notifies() -> Result<()> {
|
|||||||
};
|
};
|
||||||
pretty_assertions::assert_eq!(payload.auth_mode, Some(AuthMode::ApiKey));
|
pretty_assertions::assert_eq!(payload.auth_mode, Some(AuthMode::ApiKey));
|
||||||
|
|
||||||
assert!(llmx_home.path().join("auth.json").exists());
|
assert!(codex_home.path().join("auth.json").exists());
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn login_account_api_key_rejected_when_forced_chatgpt() -> Result<()> {
|
async fn login_account_api_key_rejected_when_forced_chatgpt() -> Result<()> {
|
||||||
let llmx_home = TempDir::new()?;
|
let codex_home = TempDir::new()?;
|
||||||
create_config_toml(
|
create_config_toml(
|
||||||
llmx_home.path(),
|
codex_home.path(),
|
||||||
CreateConfigTomlParams {
|
CreateConfigTomlParams {
|
||||||
forced_method: Some("chatgpt".to_string()),
|
forced_method: Some("chatgpt".to_string()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let mut mcp = McpProcess::new(llmx_home.path()).await?;
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
let request_id = mcp
|
let request_id = mcp
|
||||||
@@ -212,16 +212,16 @@ async fn login_account_api_key_rejected_when_forced_chatgpt() -> Result<()> {
|
|||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn login_account_chatgpt_rejected_when_forced_api() -> Result<()> {
|
async fn login_account_chatgpt_rejected_when_forced_api() -> Result<()> {
|
||||||
let llmx_home = TempDir::new()?;
|
let codex_home = TempDir::new()?;
|
||||||
create_config_toml(
|
create_config_toml(
|
||||||
llmx_home.path(),
|
codex_home.path(),
|
||||||
CreateConfigTomlParams {
|
CreateConfigTomlParams {
|
||||||
forced_method: Some("api".to_string()),
|
forced_method: Some("api".to_string()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let mut mcp = McpProcess::new(llmx_home.path()).await?;
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
let request_id = mcp.send_login_account_chatgpt_request().await?;
|
let request_id = mcp.send_login_account_chatgpt_request().await?;
|
||||||
@@ -242,10 +242,10 @@ async fn login_account_chatgpt_rejected_when_forced_api() -> Result<()> {
|
|||||||
// Serialize tests that launch the login server since it binds to a fixed port.
|
// Serialize tests that launch the login server since it binds to a fixed port.
|
||||||
#[serial(login_port)]
|
#[serial(login_port)]
|
||||||
async fn login_account_chatgpt_start() -> Result<()> {
|
async fn login_account_chatgpt_start() -> Result<()> {
|
||||||
let llmx_home = TempDir::new()?;
|
let codex_home = TempDir::new()?;
|
||||||
create_config_toml(llmx_home.path(), CreateConfigTomlParams::default())?;
|
create_config_toml(codex_home.path(), CreateConfigTomlParams::default())?;
|
||||||
|
|
||||||
let mut mcp = McpProcess::new(llmx_home.path()).await?;
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
let request_id = mcp.send_login_account_chatgpt_request().await?;
|
let request_id = mcp.send_login_account_chatgpt_request().await?;
|
||||||
@@ -308,16 +308,16 @@ async fn login_account_chatgpt_start() -> Result<()> {
|
|||||||
// Serialize tests that launch the login server since it binds to a fixed port.
|
// Serialize tests that launch the login server since it binds to a fixed port.
|
||||||
#[serial(login_port)]
|
#[serial(login_port)]
|
||||||
async fn login_account_chatgpt_includes_forced_workspace_query_param() -> Result<()> {
|
async fn login_account_chatgpt_includes_forced_workspace_query_param() -> Result<()> {
|
||||||
let llmx_home = TempDir::new()?;
|
let codex_home = TempDir::new()?;
|
||||||
create_config_toml(
|
create_config_toml(
|
||||||
llmx_home.path(),
|
codex_home.path(),
|
||||||
CreateConfigTomlParams {
|
CreateConfigTomlParams {
|
||||||
forced_workspace_id: Some("ws-forced".to_string()),
|
forced_workspace_id: Some("ws-forced".to_string()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let mut mcp = McpProcess::new(llmx_home.path()).await?;
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
let request_id = mcp.send_login_account_chatgpt_request().await?;
|
let request_id = mcp.send_login_account_chatgpt_request().await?;
|
||||||
@@ -340,16 +340,16 @@ async fn login_account_chatgpt_includes_forced_workspace_query_param() -> Result
|
|||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn get_account_no_auth() -> Result<()> {
|
async fn get_account_no_auth() -> Result<()> {
|
||||||
let llmx_home = TempDir::new()?;
|
let codex_home = TempDir::new()?;
|
||||||
create_config_toml(
|
create_config_toml(
|
||||||
llmx_home.path(),
|
codex_home.path(),
|
||||||
CreateConfigTomlParams {
|
CreateConfigTomlParams {
|
||||||
requires_openai_auth: Some(true),
|
requires_openai_auth: Some(true),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let mut mcp = McpProcess::new_with_env(llmx_home.path(), &[("OPENAI_API_KEY", None)]).await?;
|
let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]).await?;
|
||||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
let params = GetAccountParams {
|
let params = GetAccountParams {
|
||||||
@@ -371,16 +371,16 @@ async fn get_account_no_auth() -> Result<()> {
|
|||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn get_account_with_api_key() -> Result<()> {
|
async fn get_account_with_api_key() -> Result<()> {
|
||||||
let llmx_home = TempDir::new()?;
|
let codex_home = TempDir::new()?;
|
||||||
create_config_toml(
|
create_config_toml(
|
||||||
llmx_home.path(),
|
codex_home.path(),
|
||||||
CreateConfigTomlParams {
|
CreateConfigTomlParams {
|
||||||
requires_openai_auth: Some(true),
|
requires_openai_auth: Some(true),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let mut mcp = McpProcess::new(llmx_home.path()).await?;
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
let req_id = mcp
|
let req_id = mcp
|
||||||
@@ -415,16 +415,16 @@ async fn get_account_with_api_key() -> Result<()> {
|
|||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn get_account_when_auth_not_required() -> Result<()> {
|
async fn get_account_when_auth_not_required() -> Result<()> {
|
||||||
let llmx_home = TempDir::new()?;
|
let codex_home = TempDir::new()?;
|
||||||
create_config_toml(
|
create_config_toml(
|
||||||
llmx_home.path(),
|
codex_home.path(),
|
||||||
CreateConfigTomlParams {
|
CreateConfigTomlParams {
|
||||||
requires_openai_auth: Some(false),
|
requires_openai_auth: Some(false),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let mut mcp = McpProcess::new(llmx_home.path()).await?;
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
let params = GetAccountParams {
|
let params = GetAccountParams {
|
||||||
@@ -449,23 +449,23 @@ async fn get_account_when_auth_not_required() -> Result<()> {
|
|||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn get_account_with_chatgpt() -> Result<()> {
|
async fn get_account_with_chatgpt() -> Result<()> {
|
||||||
let llmx_home = TempDir::new()?;
|
let codex_home = TempDir::new()?;
|
||||||
create_config_toml(
|
create_config_toml(
|
||||||
llmx_home.path(),
|
codex_home.path(),
|
||||||
CreateConfigTomlParams {
|
CreateConfigTomlParams {
|
||||||
requires_openai_auth: Some(true),
|
requires_openai_auth: Some(true),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
)?;
|
)?;
|
||||||
write_chatgpt_auth(
|
write_chatgpt_auth(
|
||||||
llmx_home.path(),
|
codex_home.path(),
|
||||||
ChatGptAuthFixture::new("access-chatgpt")
|
ChatGptAuthFixture::new("access-chatgpt")
|
||||||
.email("user@example.com")
|
.email("user@example.com")
|
||||||
.plan_type("pro"),
|
.plan_type("pro"),
|
||||||
AuthCredentialsStoreMode::File,
|
AuthCredentialsStoreMode::File,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let mut mcp = McpProcess::new_with_env(llmx_home.path(), &[("OPENAI_API_KEY", None)]).await?;
|
let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]).await?;
|
||||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
let params = GetAccountParams {
|
let params = GetAccountParams {
|
||||||
@@ -4,14 +4,14 @@ use anyhow::Result;
|
|||||||
use anyhow::anyhow;
|
use anyhow::anyhow;
|
||||||
use app_test_support::McpProcess;
|
use app_test_support::McpProcess;
|
||||||
use app_test_support::to_response;
|
use app_test_support::to_response;
|
||||||
use llmx_app_server_protocol::JSONRPCError;
|
use codex_app_server_protocol::JSONRPCError;
|
||||||
use llmx_app_server_protocol::JSONRPCResponse;
|
use codex_app_server_protocol::JSONRPCResponse;
|
||||||
use llmx_app_server_protocol::Model;
|
use codex_app_server_protocol::Model;
|
||||||
use llmx_app_server_protocol::ModelListParams;
|
use codex_app_server_protocol::ModelListParams;
|
||||||
use llmx_app_server_protocol::ModelListResponse;
|
use codex_app_server_protocol::ModelListResponse;
|
||||||
use llmx_app_server_protocol::ReasoningEffortOption;
|
use codex_app_server_protocol::ReasoningEffortOption;
|
||||||
use llmx_app_server_protocol::RequestId;
|
use codex_app_server_protocol::RequestId;
|
||||||
use llmx_protocol::config_types::ReasoningEffort;
|
use codex_protocol::config_types::ReasoningEffort;
|
||||||
use pretty_assertions::assert_eq;
|
use pretty_assertions::assert_eq;
|
||||||
use tempfile::TempDir;
|
use tempfile::TempDir;
|
||||||
use tokio::time::timeout;
|
use tokio::time::timeout;
|
||||||
@@ -21,8 +21,8 @@ const INVALID_REQUEST_ERROR_CODE: i64 = -32600;
|
|||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
|
async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
|
||||||
let llmx_home = TempDir::new()?;
|
let codex_home = TempDir::new()?;
|
||||||
let mut mcp = McpProcess::new(llmx_home.path()).await?;
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||||
|
|
||||||
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
|
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
@@ -46,10 +46,10 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
|
|||||||
|
|
||||||
let expected_models = vec![
|
let expected_models = vec![
|
||||||
Model {
|
Model {
|
||||||
id: "gpt-5-llmx".to_string(),
|
id: "gpt-5-codex".to_string(),
|
||||||
model: "gpt-5-llmx".to_string(),
|
model: "gpt-5-codex".to_string(),
|
||||||
display_name: "gpt-5-llmx".to_string(),
|
display_name: "gpt-5-codex".to_string(),
|
||||||
description: "Optimized for llmx.".to_string(),
|
description: "Optimized for codex.".to_string(),
|
||||||
supported_reasoning_efforts: vec![
|
supported_reasoning_efforts: vec![
|
||||||
ReasoningEffortOption {
|
ReasoningEffortOption {
|
||||||
reasoning_effort: ReasoningEffort::Low,
|
reasoning_effort: ReasoningEffort::Low,
|
||||||
@@ -108,8 +108,8 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
|
|||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn list_models_pagination_works() -> Result<()> {
|
async fn list_models_pagination_works() -> Result<()> {
|
||||||
let llmx_home = TempDir::new()?;
|
let codex_home = TempDir::new()?;
|
||||||
let mut mcp = McpProcess::new(llmx_home.path()).await?;
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||||
|
|
||||||
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
|
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
@@ -132,7 +132,7 @@ async fn list_models_pagination_works() -> Result<()> {
|
|||||||
} = to_response::<ModelListResponse>(first_response)?;
|
} = to_response::<ModelListResponse>(first_response)?;
|
||||||
|
|
||||||
assert_eq!(first_items.len(), 1);
|
assert_eq!(first_items.len(), 1);
|
||||||
assert_eq!(first_items[0].id, "gpt-5-llmx");
|
assert_eq!(first_items[0].id, "gpt-5-codex");
|
||||||
let next_cursor = first_cursor.ok_or_else(|| anyhow!("cursor for second page"))?;
|
let next_cursor = first_cursor.ok_or_else(|| anyhow!("cursor for second page"))?;
|
||||||
|
|
||||||
let second_request = mcp
|
let second_request = mcp
|
||||||
@@ -161,8 +161,8 @@ async fn list_models_pagination_works() -> Result<()> {
|
|||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn list_models_rejects_invalid_cursor() -> Result<()> {
|
async fn list_models_rejects_invalid_cursor() -> Result<()> {
|
||||||
let llmx_home = TempDir::new()?;
|
let codex_home = TempDir::new()?;
|
||||||
let mut mcp = McpProcess::new(llmx_home.path()).await?;
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||||
|
|
||||||
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
|
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
@@ -3,14 +3,14 @@ use app_test_support::ChatGptAuthFixture;
|
|||||||
use app_test_support::McpProcess;
|
use app_test_support::McpProcess;
|
||||||
use app_test_support::to_response;
|
use app_test_support::to_response;
|
||||||
use app_test_support::write_chatgpt_auth;
|
use app_test_support::write_chatgpt_auth;
|
||||||
use llmx_app_server_protocol::GetAccountRateLimitsResponse;
|
use codex_app_server_protocol::GetAccountRateLimitsResponse;
|
||||||
use llmx_app_server_protocol::JSONRPCError;
|
use codex_app_server_protocol::JSONRPCError;
|
||||||
use llmx_app_server_protocol::JSONRPCResponse;
|
use codex_app_server_protocol::JSONRPCResponse;
|
||||||
use llmx_app_server_protocol::LoginApiKeyParams;
|
use codex_app_server_protocol::LoginApiKeyParams;
|
||||||
use llmx_app_server_protocol::RateLimitSnapshot;
|
use codex_app_server_protocol::RateLimitSnapshot;
|
||||||
use llmx_app_server_protocol::RateLimitWindow;
|
use codex_app_server_protocol::RateLimitWindow;
|
||||||
use llmx_app_server_protocol::RequestId;
|
use codex_app_server_protocol::RequestId;
|
||||||
use llmx_core::auth::AuthCredentialsStoreMode;
|
use codex_core::auth::AuthCredentialsStoreMode;
|
||||||
use pretty_assertions::assert_eq;
|
use pretty_assertions::assert_eq;
|
||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
@@ -28,9 +28,9 @@ const INVALID_REQUEST_ERROR_CODE: i64 = -32600;
|
|||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn get_account_rate_limits_requires_auth() -> Result<()> {
|
async fn get_account_rate_limits_requires_auth() -> Result<()> {
|
||||||
let llmx_home = TempDir::new()?;
|
let codex_home = TempDir::new()?;
|
||||||
|
|
||||||
let mut mcp = McpProcess::new_with_env(llmx_home.path(), &[("OPENAI_API_KEY", None)]).await?;
|
let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]).await?;
|
||||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
let request_id = mcp.send_get_account_rate_limits_request().await?;
|
let request_id = mcp.send_get_account_rate_limits_request().await?;
|
||||||
@@ -45,7 +45,7 @@ async fn get_account_rate_limits_requires_auth() -> Result<()> {
|
|||||||
assert_eq!(error.error.code, INVALID_REQUEST_ERROR_CODE);
|
assert_eq!(error.error.code, INVALID_REQUEST_ERROR_CODE);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
error.error.message,
|
error.error.message,
|
||||||
"llmx account authentication required to read rate limits"
|
"codex account authentication required to read rate limits"
|
||||||
);
|
);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -53,9 +53,9 @@ async fn get_account_rate_limits_requires_auth() -> Result<()> {
|
|||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn get_account_rate_limits_requires_chatgpt_auth() -> Result<()> {
|
async fn get_account_rate_limits_requires_chatgpt_auth() -> Result<()> {
|
||||||
let llmx_home = TempDir::new()?;
|
let codex_home = TempDir::new()?;
|
||||||
|
|
||||||
let mut mcp = McpProcess::new(llmx_home.path()).await?;
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
login_with_api_key(&mut mcp, "sk-test-key").await?;
|
login_with_api_key(&mut mcp, "sk-test-key").await?;
|
||||||
@@ -80,9 +80,9 @@ async fn get_account_rate_limits_requires_chatgpt_auth() -> Result<()> {
|
|||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn get_account_rate_limits_returns_snapshot() -> Result<()> {
|
async fn get_account_rate_limits_returns_snapshot() -> Result<()> {
|
||||||
let llmx_home = TempDir::new()?;
|
let codex_home = TempDir::new()?;
|
||||||
write_chatgpt_auth(
|
write_chatgpt_auth(
|
||||||
llmx_home.path(),
|
codex_home.path(),
|
||||||
ChatGptAuthFixture::new("chatgpt-token")
|
ChatGptAuthFixture::new("chatgpt-token")
|
||||||
.account_id("account-123")
|
.account_id("account-123")
|
||||||
.plan_type("pro"),
|
.plan_type("pro"),
|
||||||
@@ -91,7 +91,7 @@ async fn get_account_rate_limits_returns_snapshot() -> Result<()> {
|
|||||||
|
|
||||||
let server = MockServer::start().await;
|
let server = MockServer::start().await;
|
||||||
let server_url = server.uri();
|
let server_url = server.uri();
|
||||||
write_chatgpt_base_url(llmx_home.path(), &server_url)?;
|
write_chatgpt_base_url(codex_home.path(), &server_url)?;
|
||||||
|
|
||||||
let primary_reset_timestamp = chrono::DateTime::parse_from_rfc3339("2025-01-01T00:02:00Z")
|
let primary_reset_timestamp = chrono::DateTime::parse_from_rfc3339("2025-01-01T00:02:00Z")
|
||||||
.expect("parse primary reset timestamp")
|
.expect("parse primary reset timestamp")
|
||||||
@@ -120,14 +120,14 @@ async fn get_account_rate_limits_returns_snapshot() -> Result<()> {
|
|||||||
});
|
});
|
||||||
|
|
||||||
Mock::given(method("GET"))
|
Mock::given(method("GET"))
|
||||||
.and(path("/api/llmx/usage"))
|
.and(path("/api/codex/usage"))
|
||||||
.and(header("authorization", "Bearer chatgpt-token"))
|
.and(header("authorization", "Bearer chatgpt-token"))
|
||||||
.and(header("chatgpt-account-id", "account-123"))
|
.and(header("chatgpt-account-id", "account-123"))
|
||||||
.respond_with(ResponseTemplate::new(200).set_body_json(response_body))
|
.respond_with(ResponseTemplate::new(200).set_body_json(response_body))
|
||||||
.mount(&server)
|
.mount(&server)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
let mut mcp = McpProcess::new_with_env(llmx_home.path(), &[("OPENAI_API_KEY", None)]).await?;
|
let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]).await?;
|
||||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
let request_id = mcp.send_get_account_rate_limits_request().await?;
|
let request_id = mcp.send_get_account_rate_limits_request().await?;
|
||||||
@@ -175,7 +175,7 @@ async fn login_with_api_key(mcp: &mut McpProcess, api_key: &str) -> Result<()> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn write_chatgpt_base_url(llmx_home: &Path, base_url: &str) -> std::io::Result<()> {
|
fn write_chatgpt_base_url(codex_home: &Path, base_url: &str) -> std::io::Result<()> {
|
||||||
let config_toml = llmx_home.join("config.toml");
|
let config_toml = codex_home.join("config.toml");
|
||||||
std::fs::write(config_toml, format!("chatgpt_base_url = \"{base_url}\"\n"))
|
std::fs::write(config_toml, format!("chatgpt_base_url = \"{base_url}\"\n"))
|
||||||
}
|
}
|
||||||
@@ -1,14 +1,14 @@
|
|||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use app_test_support::McpProcess;
|
use app_test_support::McpProcess;
|
||||||
use app_test_support::to_response;
|
use app_test_support::to_response;
|
||||||
use llmx_app_server_protocol::JSONRPCResponse;
|
use codex_app_server_protocol::JSONRPCResponse;
|
||||||
use llmx_app_server_protocol::RequestId;
|
use codex_app_server_protocol::RequestId;
|
||||||
use llmx_app_server_protocol::ThreadArchiveParams;
|
use codex_app_server_protocol::ThreadArchiveParams;
|
||||||
use llmx_app_server_protocol::ThreadArchiveResponse;
|
use codex_app_server_protocol::ThreadArchiveResponse;
|
||||||
use llmx_app_server_protocol::ThreadStartParams;
|
use codex_app_server_protocol::ThreadStartParams;
|
||||||
use llmx_app_server_protocol::ThreadStartResponse;
|
use codex_app_server_protocol::ThreadStartResponse;
|
||||||
use llmx_core::ARCHIVED_SESSIONS_SUBDIR;
|
use codex_core::ARCHIVED_SESSIONS_SUBDIR;
|
||||||
use llmx_core::find_conversation_path_by_id_str;
|
use codex_core::find_conversation_path_by_id_str;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use tempfile::TempDir;
|
use tempfile::TempDir;
|
||||||
use tokio::time::timeout;
|
use tokio::time::timeout;
|
||||||
@@ -17,10 +17,10 @@ const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs
|
|||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn thread_archive_moves_rollout_into_archived_directory() -> Result<()> {
|
async fn thread_archive_moves_rollout_into_archived_directory() -> Result<()> {
|
||||||
let llmx_home = TempDir::new()?;
|
let codex_home = TempDir::new()?;
|
||||||
create_config_toml(llmx_home.path())?;
|
create_config_toml(codex_home.path())?;
|
||||||
|
|
||||||
let mut mcp = McpProcess::new(llmx_home.path()).await?;
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
// Start a thread.
|
// Start a thread.
|
||||||
@@ -39,7 +39,7 @@ async fn thread_archive_moves_rollout_into_archived_directory() -> Result<()> {
|
|||||||
assert!(!thread.id.is_empty());
|
assert!(!thread.id.is_empty());
|
||||||
|
|
||||||
// Locate the rollout path recorded for this thread id.
|
// Locate the rollout path recorded for this thread id.
|
||||||
let rollout_path = find_conversation_path_by_id_str(llmx_home.path(), &thread.id)
|
let rollout_path = find_conversation_path_by_id_str(codex_home.path(), &thread.id)
|
||||||
.await?
|
.await?
|
||||||
.expect("expected rollout path for thread id to exist");
|
.expect("expected rollout path for thread id to exist");
|
||||||
assert!(
|
assert!(
|
||||||
@@ -62,7 +62,7 @@ async fn thread_archive_moves_rollout_into_archived_directory() -> Result<()> {
|
|||||||
let _: ThreadArchiveResponse = to_response::<ThreadArchiveResponse>(archive_resp)?;
|
let _: ThreadArchiveResponse = to_response::<ThreadArchiveResponse>(archive_resp)?;
|
||||||
|
|
||||||
// Verify file moved.
|
// Verify file moved.
|
||||||
let archived_directory = llmx_home.path().join(ARCHIVED_SESSIONS_SUBDIR);
|
let archived_directory = codex_home.path().join(ARCHIVED_SESSIONS_SUBDIR);
|
||||||
// The archived file keeps the original filename (rollout-...-<id>.jsonl).
|
// The archived file keeps the original filename (rollout-...-<id>.jsonl).
|
||||||
let archived_rollout_path =
|
let archived_rollout_path =
|
||||||
archived_directory.join(rollout_path.file_name().expect("rollout file name"));
|
archived_directory.join(rollout_path.file_name().expect("rollout file name"));
|
||||||
@@ -80,8 +80,8 @@ async fn thread_archive_moves_rollout_into_archived_directory() -> Result<()> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn create_config_toml(llmx_home: &Path) -> std::io::Result<()> {
|
fn create_config_toml(codex_home: &Path) -> std::io::Result<()> {
|
||||||
let config_toml = llmx_home.join("config.toml");
|
let config_toml = codex_home.join("config.toml");
|
||||||
std::fs::write(config_toml, config_contents())
|
std::fs::write(config_toml, config_contents())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2,10 +2,10 @@ use anyhow::Result;
|
|||||||
use app_test_support::McpProcess;
|
use app_test_support::McpProcess;
|
||||||
use app_test_support::create_fake_rollout;
|
use app_test_support::create_fake_rollout;
|
||||||
use app_test_support::to_response;
|
use app_test_support::to_response;
|
||||||
use llmx_app_server_protocol::JSONRPCResponse;
|
use codex_app_server_protocol::JSONRPCResponse;
|
||||||
use llmx_app_server_protocol::RequestId;
|
use codex_app_server_protocol::RequestId;
|
||||||
use llmx_app_server_protocol::ThreadListParams;
|
use codex_app_server_protocol::ThreadListParams;
|
||||||
use llmx_app_server_protocol::ThreadListResponse;
|
use codex_app_server_protocol::ThreadListResponse;
|
||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
use tempfile::TempDir;
|
use tempfile::TempDir;
|
||||||
use tokio::time::timeout;
|
use tokio::time::timeout;
|
||||||
@@ -15,13 +15,13 @@ const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs
|
|||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn thread_list_basic_empty() -> Result<()> {
|
async fn thread_list_basic_empty() -> Result<()> {
|
||||||
let llmx_home = TempDir::new()?;
|
let codex_home = TempDir::new()?;
|
||||||
create_minimal_config(llmx_home.path())?;
|
create_minimal_config(codex_home.path())?;
|
||||||
|
|
||||||
let mut mcp = McpProcess::new(llmx_home.path()).await?;
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
// List threads in an empty LLMX_HOME; should return an empty page with nextCursor: null.
|
// List threads in an empty CODEX_HOME; should return an empty page with nextCursor: null.
|
||||||
let list_id = mcp
|
let list_id = mcp
|
||||||
.send_thread_list_request(ThreadListParams {
|
.send_thread_list_request(ThreadListParams {
|
||||||
cursor: None,
|
cursor: None,
|
||||||
@@ -42,8 +42,8 @@ async fn thread_list_basic_empty() -> Result<()> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Minimal config.toml for listing.
|
// Minimal config.toml for listing.
|
||||||
fn create_minimal_config(llmx_home: &std::path::Path) -> std::io::Result<()> {
|
fn create_minimal_config(codex_home: &std::path::Path) -> std::io::Result<()> {
|
||||||
let config_toml = llmx_home.join("config.toml");
|
let config_toml = codex_home.join("config.toml");
|
||||||
std::fs::write(
|
std::fs::write(
|
||||||
config_toml,
|
config_toml,
|
||||||
r#"
|
r#"
|
||||||
@@ -55,33 +55,33 @@ approval_policy = "never"
|
|||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn thread_list_pagination_next_cursor_none_on_last_page() -> Result<()> {
|
async fn thread_list_pagination_next_cursor_none_on_last_page() -> Result<()> {
|
||||||
let llmx_home = TempDir::new()?;
|
let codex_home = TempDir::new()?;
|
||||||
create_minimal_config(llmx_home.path())?;
|
create_minimal_config(codex_home.path())?;
|
||||||
|
|
||||||
// Create three rollouts so we can paginate with limit=2.
|
// Create three rollouts so we can paginate with limit=2.
|
||||||
let _a = create_fake_rollout(
|
let _a = create_fake_rollout(
|
||||||
llmx_home.path(),
|
codex_home.path(),
|
||||||
"2025-01-02T12-00-00",
|
"2025-01-02T12-00-00",
|
||||||
"2025-01-02T12:00:00Z",
|
"2025-01-02T12:00:00Z",
|
||||||
"Hello",
|
"Hello",
|
||||||
Some("mock_provider"),
|
Some("mock_provider"),
|
||||||
)?;
|
)?;
|
||||||
let _b = create_fake_rollout(
|
let _b = create_fake_rollout(
|
||||||
llmx_home.path(),
|
codex_home.path(),
|
||||||
"2025-01-01T13-00-00",
|
"2025-01-01T13-00-00",
|
||||||
"2025-01-01T13:00:00Z",
|
"2025-01-01T13:00:00Z",
|
||||||
"Hello",
|
"Hello",
|
||||||
Some("mock_provider"),
|
Some("mock_provider"),
|
||||||
)?;
|
)?;
|
||||||
let _c = create_fake_rollout(
|
let _c = create_fake_rollout(
|
||||||
llmx_home.path(),
|
codex_home.path(),
|
||||||
"2025-01-01T12-00-00",
|
"2025-01-01T12-00-00",
|
||||||
"2025-01-01T12:00:00Z",
|
"2025-01-01T12:00:00Z",
|
||||||
"Hello",
|
"Hello",
|
||||||
Some("mock_provider"),
|
Some("mock_provider"),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let mut mcp = McpProcess::new(llmx_home.path()).await?;
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
// Page 1: limit 2 → expect next_cursor Some.
|
// Page 1: limit 2 → expect next_cursor Some.
|
||||||
@@ -139,12 +139,12 @@ async fn thread_list_pagination_next_cursor_none_on_last_page() -> Result<()> {
|
|||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn thread_list_respects_provider_filter() -> Result<()> {
|
async fn thread_list_respects_provider_filter() -> Result<()> {
|
||||||
let llmx_home = TempDir::new()?;
|
let codex_home = TempDir::new()?;
|
||||||
create_minimal_config(llmx_home.path())?;
|
create_minimal_config(codex_home.path())?;
|
||||||
|
|
||||||
// Create rollouts under two providers.
|
// Create rollouts under two providers.
|
||||||
let _a = create_fake_rollout(
|
let _a = create_fake_rollout(
|
||||||
llmx_home.path(),
|
codex_home.path(),
|
||||||
"2025-01-02T10-00-00",
|
"2025-01-02T10-00-00",
|
||||||
"2025-01-02T10:00:00Z",
|
"2025-01-02T10:00:00Z",
|
||||||
"X",
|
"X",
|
||||||
@@ -152,7 +152,7 @@ async fn thread_list_respects_provider_filter() -> Result<()> {
|
|||||||
)?; // mock_provider
|
)?; // mock_provider
|
||||||
// one with a different provider
|
// one with a different provider
|
||||||
let uuid = Uuid::new_v4();
|
let uuid = Uuid::new_v4();
|
||||||
let dir = llmx_home
|
let dir = codex_home
|
||||||
.path()
|
.path()
|
||||||
.join("sessions")
|
.join("sessions")
|
||||||
.join("2025")
|
.join("2025")
|
||||||
@@ -168,7 +168,7 @@ async fn thread_list_respects_provider_filter() -> Result<()> {
|
|||||||
"id": uuid,
|
"id": uuid,
|
||||||
"timestamp": "2025-01-02T11:00:00Z",
|
"timestamp": "2025-01-02T11:00:00Z",
|
||||||
"cwd": "/",
|
"cwd": "/",
|
||||||
"originator": "llmx",
|
"originator": "codex",
|
||||||
"cli_version": "0.0.0",
|
"cli_version": "0.0.0",
|
||||||
"instructions": null,
|
"instructions": null,
|
||||||
"source": "vscode",
|
"source": "vscode",
|
||||||
@@ -191,7 +191,7 @@ async fn thread_list_respects_provider_filter() -> Result<()> {
|
|||||||
];
|
];
|
||||||
std::fs::write(file_path, lines.join("\n") + "\n")?;
|
std::fs::write(file_path, lines.join("\n") + "\n")?;
|
||||||
|
|
||||||
let mut mcp = McpProcess::new(llmx_home.path()).await?;
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||||
|
|
||||||
// Filter to only other_provider; expect 1 item, nextCursor None.
|
// Filter to only other_provider; expect 1 item, nextCursor None.
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user