Compare commits
41 Commits
dependabot
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| 66e0649b01 | |||
| 84bc98a66b | |||
| 3bc152029e | |||
| ffbd2e38ec | |||
| 0841ba05a8 | |||
| 44dc7a3bed | |||
| a3ced1f246 | |||
| 401b0b3935 | |||
| 7237627ac7 | |||
| 75dda1c285 | |||
| 8f79e89db2 | |||
| c0775ad8a3 | |||
| ee75cfaa7f | |||
| 085d8c9343 | |||
| 462b219d3f | |||
| 63de226119 | |||
| 7d2842885a | |||
| 67ff31104f | |||
| 866ca2a372 | |||
|
|
4b2e4a1d48 | ||
|
|
207a0e2333 | ||
|
|
df6e9f8e0e | ||
|
|
c9f903a83e | ||
|
|
00eed932c0 | ||
|
|
b47a4dc354 | ||
|
|
d6f414b0ea | ||
|
|
5f848fe8be | ||
|
|
ca37c07257 | ||
|
|
e89209d021 | ||
|
|
4ddd4e078f | ||
|
|
5a9de8e195 | ||
|
|
89228842fc | ||
|
|
edefb6eb9c | ||
|
|
8c04526619 | ||
|
|
3821a18ec1 | ||
|
|
40cd73936c | ||
|
|
ec0c5a6fb7 | ||
|
|
2c0196efd3 | ||
|
|
f58398dfbb | ||
|
|
91ce3a3838 | ||
|
|
f3a1034d5d |
28
.github/dotslash-config.json
vendored
28
.github/dotslash-config.json
vendored
@@ -27,34 +27,6 @@
|
||||
"path": "llmx.exe"
|
||||
}
|
||||
}
|
||||
},
|
||||
"llmx-responses-api-proxy": {
|
||||
"platforms": {
|
||||
"macos-aarch64": {
|
||||
"regex": "^llmx-responses-api-proxy-aarch64-apple-darwin\\.zst$",
|
||||
"path": "llmx-responses-api-proxy"
|
||||
},
|
||||
"macos-x86_64": {
|
||||
"regex": "^llmx-responses-api-proxy-x86_64-apple-darwin\\.zst$",
|
||||
"path": "llmx-responses-api-proxy"
|
||||
},
|
||||
"linux-x86_64": {
|
||||
"regex": "^llmx-responses-api-proxy-x86_64-unknown-linux-musl\\.zst$",
|
||||
"path": "llmx-responses-api-proxy"
|
||||
},
|
||||
"linux-aarch64": {
|
||||
"regex": "^llmx-responses-api-proxy-aarch64-unknown-linux-musl\\.zst$",
|
||||
"path": "llmx-responses-api-proxy"
|
||||
},
|
||||
"windows-x86_64": {
|
||||
"regex": "^llmx-responses-api-proxy-x86_64-pc-windows-msvc\\.exe\\.zst$",
|
||||
"path": "llmx-responses-api-proxy.exe"
|
||||
},
|
||||
"windows-aarch64": {
|
||||
"regex": "^llmx-responses-api-proxy-aarch64-pc-windows-msvc\\.exe\\.zst$",
|
||||
"path": "llmx-responses-api-proxy.exe"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
BIN
.github/llmx-cli-login.png
vendored
BIN
.github/llmx-cli-login.png
vendored
Binary file not shown.
|
Before Width: | Height: | Size: 2.9 MiB |
BIN
.github/llmx-cli-permissions.png
vendored
BIN
.github/llmx-cli-permissions.png
vendored
Binary file not shown.
|
Before Width: | Height: | Size: 408 KiB |
BIN
.github/llmx-cli-splash.png
vendored
BIN
.github/llmx-cli-splash.png
vendored
Binary file not shown.
|
Before Width: | Height: | Size: 3.1 MiB |
66
.github/workflows/rust-release.yml
vendored
66
.github/workflows/rust-release.yml
vendored
@@ -445,7 +445,19 @@ jobs:
|
||||
run: |
|
||||
./scripts/stage_npm_packages.py \
|
||||
--release-version "${{ steps.release_name.outputs.name }}" \
|
||||
--package @valknar/llmx
|
||||
--package llmx
|
||||
|
||||
# Delete any existing release to avoid conflicts with dotslash manifest file
|
||||
- name: Delete existing release if present
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
if gh release view "${{ github.ref_name }}" --repo "${{ github.repository }}" >/dev/null 2>&1; then
|
||||
echo "Deleting existing release ${{ github.ref_name }}"
|
||||
gh release delete "${{ github.ref_name }}" --repo "${{ github.repository }}" --yes
|
||||
else
|
||||
echo "No existing release found for ${{ github.ref_name }}"
|
||||
fi
|
||||
|
||||
- name: Create GitHub Release
|
||||
uses: softprops/action-gh-release@v2
|
||||
@@ -464,9 +476,7 @@ jobs:
|
||||
tag: ${{ github.ref_name }}
|
||||
config: .github/dotslash-config.json
|
||||
|
||||
# Publish to npm using OIDC authentication.
|
||||
# July 31, 2025: https://github.blog/changelog/2025-07-31-npm-trusted-publishing-with-oidc-is-generally-available/
|
||||
# npm docs: https://docs.npmjs.com/trusted-publishers
|
||||
# Publish to npm using Trusted Publishers (OIDC)
|
||||
publish-npm:
|
||||
# Publish to npm for stable releases and alpha pre-releases with numeric suffixes.
|
||||
if: ${{ needs.release.outputs.should_publish_npm == 'true' }}
|
||||
@@ -474,8 +484,8 @@ jobs:
|
||||
needs: release
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
id-token: write # Required for OIDC
|
||||
contents: read
|
||||
id-token: write # Required for OIDC authentication
|
||||
|
||||
steps:
|
||||
- name: Setup Node.js
|
||||
@@ -483,9 +493,8 @@ jobs:
|
||||
with:
|
||||
node-version: 22
|
||||
registry-url: "https://registry.npmjs.org"
|
||||
scope: "@valknar"
|
||||
scope: "@valknarthing"
|
||||
|
||||
# Trusted publishing requires npm CLI version 11.5.1 or later.
|
||||
- name: Update npm
|
||||
run: npm install -g npm@latest
|
||||
|
||||
@@ -499,10 +508,9 @@ jobs:
|
||||
mkdir -p dist/npm
|
||||
gh release download "$tag" \
|
||||
--repo "${GITHUB_REPOSITORY}" \
|
||||
--pattern "valknar-llmx-npm-${version}.tgz" \
|
||||
--pattern "llmx-npm-${version}.tgz" \
|
||||
--dir dist/npm
|
||||
|
||||
# No NODE_AUTH_TOKEN needed because we use OIDC.
|
||||
- name: Publish to npm
|
||||
env:
|
||||
VERSION: ${{ needs.release.outputs.version }}
|
||||
@@ -515,28 +523,28 @@ jobs:
|
||||
fi
|
||||
|
||||
tarballs=(
|
||||
"valknar-llmx-npm-${VERSION}.tgz"
|
||||
"llmx-npm-${VERSION}.tgz"
|
||||
)
|
||||
|
||||
for tarball in "${tarballs[@]}"; do
|
||||
npm publish "${GITHUB_WORKSPACE}/dist/npm/${tarball}" "${tag_args[@]}"
|
||||
npm publish "${GITHUB_WORKSPACE}/dist/npm/${tarball}" --provenance --access public "${tag_args[@]}"
|
||||
done
|
||||
|
||||
update-branch:
|
||||
name: Update latest-alpha-cli branch
|
||||
permissions:
|
||||
contents: write
|
||||
needs: release
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Update latest-alpha-cli branch
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
gh api \
|
||||
repos/${GITHUB_REPOSITORY}/git/refs/heads/latest-alpha-cli \
|
||||
-X PATCH \
|
||||
-f sha="${GITHUB_SHA}" \
|
||||
-F force=true
|
||||
# update-branch:
|
||||
# name: Update latest-alpha-cli branch
|
||||
# permissions:
|
||||
# contents: write
|
||||
# needs: release
|
||||
# runs-on: ubuntu-latest
|
||||
#
|
||||
# steps:
|
||||
# - name: Update latest-alpha-cli branch
|
||||
# env:
|
||||
# GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
# run: |
|
||||
# set -euo pipefail
|
||||
# gh api \
|
||||
# repos/${GITHUB_REPOSITORY}/git/refs/heads/latest-alpha-cli \
|
||||
# -X PATCH \
|
||||
# -f sha="${GITHUB_SHA}" \
|
||||
# -F force=true
|
||||
|
||||
87
README.md
87
README.md
@@ -1,13 +1,9 @@
|
||||
<p align="center"><code>npm i -g @valknar/llmx</code><br />or <code>brew install --cask llmx</code></p>
|
||||
<p align="center"><code>npm i -g @valknarthing/llmx</code></p>
|
||||
|
||||
<p align="center"><strong>LLMX CLI</strong> is a coding agent powered by LiteLLM that runs locally on your computer.
|
||||
</br>
|
||||
</br>This project is a community fork with enhanced support for multiple LLM providers via LiteLLM.
|
||||
</br>Original project: <a href="https://github.com/openai/llmx">github.com/openai/llmx</a></p>
|
||||
|
||||
<p align="center">
|
||||
<img src="./.github/llmx-cli-splash.png" alt="LLMX CLI splash" width="80%" />
|
||||
</p>
|
||||
</br>Original project: <a href="https://github.com/openai/codex">github.com/openai/codex</a></p>
|
||||
|
||||
---
|
||||
|
||||
@@ -15,16 +11,10 @@
|
||||
|
||||
### Installing and running LLMX CLI
|
||||
|
||||
Install globally with your preferred package manager. If you use npm:
|
||||
Install globally with npm:
|
||||
|
||||
```shell
|
||||
npm install -g @valknar/llmx
|
||||
```
|
||||
|
||||
Alternatively, if you use Homebrew:
|
||||
|
||||
```shell
|
||||
brew install --cask llmx
|
||||
npm install -g @valknarthing/llmx
|
||||
```
|
||||
|
||||
Then simply run `llmx` to get started:
|
||||
@@ -33,10 +23,8 @@ Then simply run `llmx` to get started:
|
||||
llmx
|
||||
```
|
||||
|
||||
If you're running into upgrade issues with Homebrew, see the [FAQ entry on brew upgrade llmx](./docs/faq.md#brew-upgrade-llmx-isnt-upgrading-me).
|
||||
|
||||
<details>
|
||||
<summary>You can also go to the <a href="https://github.com/valknar/llmx/releases/latest">latest GitHub Release</a> and download the appropriate binary for your platform.</summary>
|
||||
<summary>You can also go to the <a href="https://github.com/valknarthing/llmx/releases/latest">latest GitHub Release</a> and download the appropriate binary for your platform.</summary>
|
||||
|
||||
Each GitHub Release contains many executables, but in practice, you likely want one of these:
|
||||
|
||||
@@ -59,57 +47,56 @@ LLMX is powered by [LiteLLM](https://docs.litellm.ai/), which provides access to
|
||||
|
||||
```bash
|
||||
# Set your LiteLLM server URL (default: http://localhost:4000/v1)
|
||||
export LITELLM_BASE_URL="http://localhost:4000/v1"
|
||||
export LITELLM_API_KEY="your-api-key"
|
||||
export LLMX_BASE_URL="http://localhost:4000/v1"
|
||||
export LLMX_API_KEY="your-api-key"
|
||||
|
||||
# Run LLMX
|
||||
llmx "hello world"
|
||||
```
|
||||
|
||||
**Configuration:** See [LITELLM-SETUP.md](./LITELLM-SETUP.md) for detailed setup instructions.
|
||||
**Configuration:** See [LITELLM-SETUP.md](https://github.com/valknarthing/llmx/blob/main/LITELLM-SETUP.md) for detailed setup instructions.
|
||||
|
||||
You can also use LLMX with ChatGPT or OpenAI API keys. For authentication options, see the [authentication docs](./docs/authentication.md).
|
||||
You can also use LLMX with ChatGPT or OpenAI API keys. For authentication options, see the [authentication docs](https://github.com/valknarthing/llmx/blob/main/docs/authentication.md).
|
||||
|
||||
### Model Context Protocol (MCP)
|
||||
|
||||
LLMX can access MCP servers. To configure them, refer to the [config docs](./docs/config.md#mcp_servers).
|
||||
LLMX can access MCP servers. To configure them, refer to the [config docs](https://github.com/valknarthing/llmx/blob/main/docs/config.md#mcp_servers).
|
||||
|
||||
### Configuration
|
||||
|
||||
LLMX CLI supports a rich set of configuration options, with preferences stored in `~/.llmx/config.toml`. For full configuration options, see [Configuration](./docs/config.md).
|
||||
LLMX CLI supports a rich set of configuration options, with preferences stored in `~/.llmx/config.toml`. For full configuration options, see [Configuration](https://github.com/valknarthing/llmx/blob/main/docs/config.md).
|
||||
|
||||
---
|
||||
|
||||
### Docs & FAQ
|
||||
|
||||
- [**Getting started**](./docs/getting-started.md)
|
||||
- [CLI usage](./docs/getting-started.md#cli-usage)
|
||||
- [Slash Commands](./docs/slash_commands.md)
|
||||
- [Running with a prompt as input](./docs/getting-started.md#running-with-a-prompt-as-input)
|
||||
- [Example prompts](./docs/getting-started.md#example-prompts)
|
||||
- [Custom prompts](./docs/prompts.md)
|
||||
- [Memory with AGENTS.md](./docs/getting-started.md#memory-with-agentsmd)
|
||||
- [**Configuration**](./docs/config.md)
|
||||
- [Example config](./docs/example-config.md)
|
||||
- [**Sandbox & approvals**](./docs/sandbox.md)
|
||||
- [**Authentication**](./docs/authentication.md)
|
||||
- [Auth methods](./docs/authentication.md#forcing-a-specific-auth-method-advanced)
|
||||
- [Login on a "Headless" machine](./docs/authentication.md#connecting-on-a-headless-machine)
|
||||
- [**Getting started**](https://github.com/valknarthing/llmx/blob/main/docs/getting-started.md)
|
||||
- [CLI usage](https://github.com/valknarthing/llmx/blob/main/docs/getting-started.md#cli-usage)
|
||||
- [Slash Commands](https://github.com/valknarthing/llmx/blob/main/docs/slash_commands.md)
|
||||
- [Running with a prompt as input](https://github.com/valknarthing/llmx/blob/main/docs/getting-started.md#running-with-a-prompt-as-input)
|
||||
- [Example prompts](https://github.com/valknarthing/llmx/blob/main/docs/getting-started.md#example-prompts)
|
||||
- [Custom prompts](https://github.com/valknarthing/llmx/blob/main/docs/prompts.md)
|
||||
- [Memory with AGENTS.md](https://github.com/valknarthing/llmx/blob/main/docs/getting-started.md#memory-with-agentsmd)
|
||||
- [**Configuration**](https://github.com/valknarthing/llmx/blob/main/docs/config.md)
|
||||
- [Example config](https://github.com/valknarthing/llmx/blob/main/docs/example-config.md)
|
||||
- [**Sandbox & approvals**](https://github.com/valknarthing/llmx/blob/main/docs/sandbox.md)
|
||||
- [**Authentication**](https://github.com/valknarthing/llmx/blob/main/docs/authentication.md)
|
||||
- [Auth methods](https://github.com/valknarthing/llmx/blob/main/docs/authentication.md#forcing-a-specific-auth-method-advanced)
|
||||
- [Login on a "Headless" machine](https://github.com/valknarthing/llmx/blob/main/docs/authentication.md#connecting-on-a-headless-machine)
|
||||
- **Automating LLMX**
|
||||
- [GitHub Action](https://github.com/valknar/llmx-action)
|
||||
- [TypeScript SDK](./sdk/typescript/README.md)
|
||||
- [Non-interactive mode (`llmx exec`)](./docs/exec.md)
|
||||
- [**Advanced**](./docs/advanced.md)
|
||||
- [Tracing / verbose logging](./docs/advanced.md#tracing--verbose-logging)
|
||||
- [Model Context Protocol (MCP)](./docs/advanced.md#model-context-protocol-mcp)
|
||||
- [**Zero data retention (ZDR)**](./docs/zdr.md)
|
||||
- [**Contributing**](./docs/contributing.md)
|
||||
- [**Install & build**](./docs/install.md)
|
||||
- [System Requirements](./docs/install.md#system-requirements)
|
||||
- [DotSlash](./docs/install.md#dotslash)
|
||||
- [Build from source](./docs/install.md#build-from-source)
|
||||
- [**FAQ**](./docs/faq.md)
|
||||
- [**Open source fund**](./docs/open-source-fund.md)
|
||||
- [GitHub Action](https://github.com/valknarthing/llmx-action)
|
||||
- [TypeScript SDK](https://github.com/valknarthing/llmx/blob/main/sdk/typescript/README.md)
|
||||
- [Non-interactive mode (`llmx exec`)](https://github.com/valknarthing/llmx/blob/main/docs/exec.md)
|
||||
- [**Advanced**](https://github.com/valknarthing/llmx/blob/main/docs/advanced.md)
|
||||
- [Tracing / verbose logging](https://github.com/valknarthing/llmx/blob/main/docs/advanced.md#tracing--verbose-logging)
|
||||
- [Model Context Protocol (MCP)](https://github.com/valknarthing/llmx/blob/main/docs/advanced.md#model-context-protocol-mcp)
|
||||
- [**Zero data retention (ZDR)**](https://github.com/valknarthing/llmx/blob/main/docs/zdr.md)
|
||||
- [**Contributing**](https://github.com/valknarthing/llmx/blob/main/docs/contributing.md)
|
||||
- [**Install & build**](https://github.com/valknarthing/llmx/blob/main/docs/install.md)
|
||||
- [System Requirements](https://github.com/valknarthing/llmx/blob/main/docs/install.md#system-requirements)
|
||||
- [DotSlash](https://github.com/valknarthing/llmx/blob/main/docs/install.md#dotslash)
|
||||
- [Build from source](https://github.com/valknarthing/llmx/blob/main/docs/install.md#build-from-source)
|
||||
- [**FAQ**](https://github.com/valknarthing/llmx/blob/main/docs/faq.md)
|
||||
|
||||
---
|
||||
|
||||
|
||||
49
docs/CLA.md
49
docs/CLA.md
@@ -1,49 +0,0 @@
|
||||
# Individual Contributor License Agreement (v1.0, OpenAI)
|
||||
|
||||
_Based on the Apache Software Foundation Individual CLA v 2.2._
|
||||
|
||||
By commenting **“I have read the CLA Document and I hereby sign the CLA”**
|
||||
on a Pull Request, **you (“Contributor”) agree to the following terms** for any
|
||||
past and future “Contributions” submitted to the **OpenAI LLMX CLI project
|
||||
(the “Project”)**.
|
||||
|
||||
---
|
||||
|
||||
## 1. Definitions
|
||||
|
||||
- **“Contribution”** – any original work of authorship submitted to the Project
|
||||
(code, documentation, designs, etc.).
|
||||
- **“You” / “Your”** – the individual (or legal entity) posting the acceptance
|
||||
comment.
|
||||
|
||||
## 2. Copyright License
|
||||
|
||||
You grant **OpenAI, Inc.** and all recipients of software distributed by the
|
||||
Project a perpetual, worldwide, non‑exclusive, royalty‑free, irrevocable
|
||||
license to reproduce, prepare derivative works of, publicly display, publicly
|
||||
perform, sublicense, and distribute Your Contributions and derivative works.
|
||||
|
||||
## 3. Patent License
|
||||
|
||||
You grant **OpenAI, Inc.** and all recipients of the Project a perpetual,
|
||||
worldwide, non‑exclusive, royalty‑free, irrevocable (except as below) patent
|
||||
license to make, have made, use, sell, offer to sell, import, and otherwise
|
||||
transfer Your Contributions alone or in combination with the Project.
|
||||
|
||||
If any entity brings patent litigation alleging that the Project or a
|
||||
Contribution infringes a patent, the patent licenses granted by You to that
|
||||
entity under this CLA terminate.
|
||||
|
||||
## 4. Representations
|
||||
|
||||
1. You are legally entitled to grant the licenses above.
|
||||
2. Each Contribution is either Your original creation or You have authority to
|
||||
submit it under this CLA.
|
||||
3. Your Contributions are provided **“AS IS”** without warranties of any kind.
|
||||
4. You will notify the Project if any statement above becomes inaccurate.
|
||||
|
||||
## 5. Miscellany
|
||||
|
||||
This Agreement is governed by the laws of the **State of California**, USA,
|
||||
excluding its conflict‑of‑laws rules. If any provision is held unenforceable,
|
||||
the remaining provisions remain in force.
|
||||
@@ -873,7 +873,7 @@ notifications = [ "agent-turn-complete", "approval-requested" ]
|
||||
|
||||
### Forcing a login method
|
||||
|
||||
To force users on a given machine to use a specific login method or workspace, use a combination of [managed configurations](https://developers.openai.com/llmx/security#managed-configuration) as well as either or both of the following fields:
|
||||
To force users on a given machine to use a specific login method or workspace, use either or both of the following fields in your configuration:
|
||||
|
||||
```toml
|
||||
# Force the user to log in with ChatGPT or via an api key.
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
## LLMX open source fund
|
||||
|
||||
We're excited to launch a **$1 million initiative** supporting open source projects that use LLMX CLI and other OpenAI models.
|
||||
|
||||
- Grants are awarded up to **$25,000** API credits.
|
||||
- Applications are reviewed **on a rolling basis**.
|
||||
|
||||
**Interested? [Apply here](https://openai.com/form/llmx-open-source-fund/).**
|
||||
@@ -1,736 +0,0 @@
|
||||
<h1 align="center">OpenAI LLMX CLI</h1>
|
||||
<p align="center">Lightweight coding agent that runs in your terminal</p>
|
||||
|
||||
<p align="center"><code>npm i -g @llmx/llmx</code></p>
|
||||
|
||||
> [!IMPORTANT]
|
||||
> This is the documentation for the _legacy_ TypeScript implementation of the LLMX CLI. It has been superseded by the _Rust_ implementation. See the [README in the root of the LLMX repository](https://github.com/valknar/llmx/blob/main/README.md) for details.
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
<details>
|
||||
<summary><strong>Table of contents</strong></summary>
|
||||
|
||||
<!-- Begin ToC -->
|
||||
|
||||
- [Experimental technology disclaimer](#experimental-technology-disclaimer)
|
||||
- [Quickstart](#quickstart)
|
||||
- [Why LLMX?](#why-llmx)
|
||||
- [Security model & permissions](#security-model--permissions)
|
||||
- [Platform sandboxing details](#platform-sandboxing-details)
|
||||
- [System requirements](#system-requirements)
|
||||
- [CLI reference](#cli-reference)
|
||||
- [Memory & project docs](#memory--project-docs)
|
||||
- [Non-interactive / CI mode](#non-interactive--ci-mode)
|
||||
- [Tracing / verbose logging](#tracing--verbose-logging)
|
||||
- [Recipes](#recipes)
|
||||
- [Installation](#installation)
|
||||
- [Configuration guide](#configuration-guide)
|
||||
- [Basic configuration parameters](#basic-configuration-parameters)
|
||||
- [Custom AI provider configuration](#custom-ai-provider-configuration)
|
||||
- [History configuration](#history-configuration)
|
||||
- [Configuration examples](#configuration-examples)
|
||||
- [Full configuration example](#full-configuration-example)
|
||||
- [Custom instructions](#custom-instructions)
|
||||
- [Environment variables setup](#environment-variables-setup)
|
||||
- [FAQ](#faq)
|
||||
- [Zero data retention (ZDR) usage](#zero-data-retention-zdr-usage)
|
||||
- [LLMX open source fund](#llmx-open-source-fund)
|
||||
- [Contributing](#contributing)
|
||||
- [Development workflow](#development-workflow)
|
||||
- [Git hooks with Husky](#git-hooks-with-husky)
|
||||
- [Debugging](#debugging)
|
||||
- [Writing high-impact code changes](#writing-high-impact-code-changes)
|
||||
- [Opening a pull request](#opening-a-pull-request)
|
||||
- [Review process](#review-process)
|
||||
- [Community values](#community-values)
|
||||
- [Getting help](#getting-help)
|
||||
- [Contributor license agreement (CLA)](#contributor-license-agreement-cla)
|
||||
- [Quick fixes](#quick-fixes)
|
||||
- [Releasing `llmx`](#releasing-llmx)
|
||||
- [Alternative build options](#alternative-build-options)
|
||||
- [Nix flake development](#nix-flake-development)
|
||||
- [Security & responsible AI](#security--responsible-ai)
|
||||
- [License](#license)
|
||||
|
||||
<!-- End ToC -->
|
||||
|
||||
</details>
|
||||
|
||||
---
|
||||
|
||||
## Experimental technology disclaimer
|
||||
|
||||
LLMX CLI is an experimental project under active development. It is not yet stable, may contain bugs, incomplete features, or undergo breaking changes. We're building it in the open with the community and welcome:
|
||||
|
||||
- Bug reports
|
||||
- Feature requests
|
||||
- Pull requests
|
||||
- Good vibes
|
||||
|
||||
Help us improve by filing issues or submitting PRs (see the section below for how to contribute)!
|
||||
|
||||
## Quickstart
|
||||
|
||||
Install globally:
|
||||
|
||||
```shell
|
||||
npm install -g @llmx/llmx
|
||||
```
|
||||
|
||||
Next, set your OpenAI API key as an environment variable:
|
||||
|
||||
```shell
|
||||
export OPENAI_API_KEY="your-api-key-here"
|
||||
```
|
||||
|
||||
> **Note:** This command sets the key only for your current terminal session. You can add the `export` line to your shell's configuration file (e.g., `~/.zshrc`) but we recommend setting for the session. **Tip:** You can also place your API key into a `.env` file at the root of your project:
|
||||
>
|
||||
> ```env
|
||||
> OPENAI_API_KEY=your-api-key-here
|
||||
> ```
|
||||
>
|
||||
> The CLI will automatically load variables from `.env` (via `dotenv/config`).
|
||||
|
||||
<details>
|
||||
<summary><strong>Use <code>--provider</code> to use other models</strong></summary>
|
||||
|
||||
> LLMX also allows you to use other providers that support the OpenAI Chat Completions API. You can set the provider in the config file or use the `--provider` flag. The possible options for `--provider` are:
|
||||
>
|
||||
> - openai (default)
|
||||
> - openrouter
|
||||
> - azure
|
||||
> - gemini
|
||||
> - ollama
|
||||
> - mistral
|
||||
> - deepseek
|
||||
> - xai
|
||||
> - groq
|
||||
> - arceeai
|
||||
> - any other provider that is compatible with the OpenAI API
|
||||
>
|
||||
> If you use a provider other than OpenAI, you will need to set the API key for the provider in the config file or in the environment variable as:
|
||||
>
|
||||
> ```shell
|
||||
> export <provider>_API_KEY="your-api-key-here"
|
||||
> ```
|
||||
>
|
||||
> If you use a provider not listed above, you must also set the base URL for the provider:
|
||||
>
|
||||
> ```shell
|
||||
> export <provider>_BASE_URL="https://your-provider-api-base-url"
|
||||
> ```
|
||||
|
||||
</details>
|
||||
<br />
|
||||
|
||||
Run interactively:
|
||||
|
||||
```shell
|
||||
llmx
|
||||
```
|
||||
|
||||
Or, run with a prompt as input (and optionally in `Full Auto` mode):
|
||||
|
||||
```shell
|
||||
llmx "explain this codebase to me"
|
||||
```
|
||||
|
||||
```shell
|
||||
llmx --approval-mode full-auto "create the fanciest todo-list app"
|
||||
```
|
||||
|
||||
That's it - LLMX will scaffold a file, run it inside a sandbox, install any
|
||||
missing dependencies, and show you the live result. Approve the changes and
|
||||
they'll be committed to your working directory.
|
||||
|
||||
---
|
||||
|
||||
## Why LLMX?
|
||||
|
||||
LLMX CLI is built for developers who already **live in the terminal** and want
|
||||
ChatGPT-level reasoning **plus** the power to actually run code, manipulate
|
||||
files, and iterate - all under version control. In short, it's _chat-driven
|
||||
development_ that understands and executes your repo.
|
||||
|
||||
- **Zero setup** - bring your OpenAI API key and it just works!
|
||||
- **Full auto-approval, while safe + secure** by running network-disabled and directory-sandboxed
|
||||
- **Multimodal** - pass in screenshots or diagrams to implement features ✨
|
||||
|
||||
And it's **fully open-source** so you can see and contribute to how it develops!
|
||||
|
||||
---
|
||||
|
||||
## Security model & permissions
|
||||
|
||||
LLMX lets you decide _how much autonomy_ the agent receives and auto-approval policy via the
|
||||
`--approval-mode` flag (or the interactive onboarding prompt):
|
||||
|
||||
| Mode | What the agent may do without asking | Still requires approval |
|
||||
| ------------------------- | --------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------- |
|
||||
| **Suggest** <br>(default) | <li>Read any file in the repo | <li>**All** file writes/patches<li> **Any** arbitrary shell commands (aside from reading files) |
|
||||
| **Auto Edit** | <li>Read **and** apply-patch writes to files | <li>**All** shell commands |
|
||||
| **Full Auto** | <li>Read/write files <li> Execute shell commands (network disabled, writes limited to your workdir) | - |
|
||||
|
||||
In **Full Auto** every command is run **network-disabled** and confined to the
|
||||
current working directory (plus temporary files) for defense-in-depth. LLMX
|
||||
will also show a warning/confirmation if you start in **auto-edit** or
|
||||
**full-auto** while the directory is _not_ tracked by Git, so you always have a
|
||||
safety net.
|
||||
|
||||
Coming soon: you'll be able to whitelist specific commands to auto-execute with
|
||||
the network enabled, once we're confident in additional safeguards.
|
||||
|
||||
### Platform sandboxing details
|
||||
|
||||
The hardening mechanism LLMX uses depends on your OS:
|
||||
|
||||
- **macOS 12+** - commands are wrapped with **Apple Seatbelt** (`sandbox-exec`).
|
||||
|
||||
- Everything is placed in a read-only jail except for a small set of
|
||||
writable roots (`$PWD`, `$TMPDIR`, `~/.llmx`, etc.).
|
||||
- Outbound network is _fully blocked_ by default - even if a child process
|
||||
tries to `curl` somewhere it will fail.
|
||||
|
||||
- **Linux** - there is no sandboxing by default.
|
||||
We recommend using Docker for sandboxing, where LLMX launches itself inside a **minimal
|
||||
container image** and mounts your repo _read/write_ at the same path. A
|
||||
custom `iptables`/`ipset` firewall script denies all egress except the
|
||||
OpenAI API. This gives you deterministic, reproducible runs without needing
|
||||
root on the host. You can use the [`run_in_container.sh`](../llmx-cli/scripts/run_in_container.sh) script to set up the sandbox.
|
||||
|
||||
---
|
||||
|
||||
## System requirements
|
||||
|
||||
| Requirement | Details |
|
||||
| --------------------------- | --------------------------------------------------------------- |
|
||||
| Operating systems | macOS 12+, Ubuntu 20.04+/Debian 10+, or Windows 11 **via WSL2** |
|
||||
| Node.js | **16 or newer** (Node 20 LTS recommended) |
|
||||
| Git (optional, recommended) | 2.23+ for built-in PR helpers |
|
||||
| RAM | 4-GB minimum (8-GB recommended) |
|
||||
|
||||
> Never run `sudo npm install -g`; fix npm permissions instead.
|
||||
|
||||
---
|
||||
|
||||
## CLI reference
|
||||
|
||||
| Command | Purpose | Example |
|
||||
| ------------------------------------ | ----------------------------------- | ------------------------------------ |
|
||||
| `llmx` | Interactive REPL | `llmx` |
|
||||
| `llmx "..."` | Initial prompt for interactive REPL | `llmx "fix lint errors"` |
|
||||
| `llmx -q "..."` | Non-interactive "quiet mode" | `llmx -q --json "explain utils.ts"` |
|
||||
| `llmx completion <bash\|zsh\|fish>` | Print shell completion script | `llmx completion bash` |
|
||||
|
||||
Key flags: `--model/-m`, `--approval-mode/-a`, `--quiet/-q`, and `--notify`.
|
||||
|
||||
---
|
||||
|
||||
## Memory & project docs
|
||||
|
||||
You can give LLMX extra instructions and guidance using `AGENTS.md` files. LLMX looks for `AGENTS.md` files in the following places, and merges them top-down:
|
||||
|
||||
1. `~/.llmx/AGENTS.md` - personal global guidance
|
||||
2. `AGENTS.md` at repo root - shared project notes
|
||||
3. `AGENTS.md` in the current working directory - sub-folder/feature specifics
|
||||
|
||||
Disable loading of these files with `--no-project-doc` or the environment variable `LLMX_DISABLE_PROJECT_DOC=1`.
|
||||
|
||||
---
|
||||
|
||||
## Non-interactive / CI mode
|
||||
|
||||
Run LLMX head-less in pipelines. Example GitHub Action step:
|
||||
|
||||
```yaml
|
||||
- name: Update changelog via LLMX
|
||||
run: |
|
||||
npm install -g @llmx/llmx
|
||||
export OPENAI_API_KEY="${{ secrets.OPENAI_KEY }}"
|
||||
llmx -a auto-edit --quiet "update CHANGELOG for next release"
|
||||
```
|
||||
|
||||
Set `LLMX_QUIET_MODE=1` to silence interactive UI noise.
|
||||
|
||||
## Tracing / verbose logging
|
||||
|
||||
Setting the environment variable `DEBUG=true` prints full API request and response details:
|
||||
|
||||
```shell
|
||||
DEBUG=true llmx
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Recipes
|
||||
|
||||
Below are a few bite-size examples you can copy-paste. Replace the text in quotes with your own task. See the [prompting guide](https://github.com/valknar/llmx/blob/main/llmx-cli/examples/prompting_guide.md) for more tips and usage patterns.
|
||||
|
||||
| ✨ | What you type | What happens |
|
||||
| --- | ------------------------------------------------------------------------------- | -------------------------------------------------------------------------- |
|
||||
| 1 | `llmx "Refactor the Dashboard component to React Hooks"` | LLMX rewrites the class component, runs `npm test`, and shows the diff. |
|
||||
| 2 | `llmx "Generate SQL migrations for adding a users table"` | Infers your ORM, creates migration files, and runs them in a sandboxed DB. |
|
||||
| 3 | `llmx "Write unit tests for utils/date.ts"` | Generates tests, executes them, and iterates until they pass. |
|
||||
| 4 | `llmx "Bulk-rename *.jpeg -> *.jpg with git mv"` | Safely renames files and updates imports/usages. |
|
||||
| 5 | `llmx "Explain what this regex does: ^(?=.*[A-Z]).{8,}$"` | Outputs a step-by-step human explanation. |
|
||||
| 6 | `llmx "Carefully review this repo, and propose 3 high impact well-scoped PRs"` | Suggests impactful PRs in the current codebase. |
|
||||
| 7 | `llmx "Look for vulnerabilities and create a security review report"` | Finds and explains security bugs. |
|
||||
|
||||
---
|
||||
|
||||
## Installation
|
||||
|
||||
<details open>
|
||||
<summary><strong>From npm (Recommended)</strong></summary>
|
||||
|
||||
```bash
|
||||
npm install -g @llmx/llmx
|
||||
# or
|
||||
yarn global add @llmx/llmx
|
||||
# or
|
||||
bun install -g @llmx/llmx
|
||||
# or
|
||||
pnpm add -g @llmx/llmx
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary><strong>Build from source</strong></summary>
|
||||
|
||||
```bash
|
||||
# Clone the repository and navigate to the CLI package
|
||||
git clone https://github.com/valknar/llmx.git
|
||||
cd llmx/llmx-cli
|
||||
|
||||
# Enable corepack
|
||||
corepack enable
|
||||
|
||||
# Install dependencies and build
|
||||
pnpm install
|
||||
pnpm build
|
||||
|
||||
# Linux-only: download prebuilt sandboxing binaries (requires gh and zstd).
|
||||
./scripts/install_native_deps.sh
|
||||
|
||||
# Get the usage and the options
|
||||
node ./dist/cli.js --help
|
||||
|
||||
# Run the locally-built CLI directly
|
||||
node ./dist/cli.js
|
||||
|
||||
# Or link the command globally for convenience
|
||||
pnpm link
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
---
|
||||
|
||||
## Configuration guide
|
||||
|
||||
LLMX configuration files can be placed in the `~/.llmx/` directory, supporting both YAML and JSON formats.
|
||||
|
||||
### Basic configuration parameters
|
||||
|
||||
| Parameter | Type | Default | Description | Available Options |
|
||||
| ------------------- | ------- | ---------- | -------------------------------- | ---------------------------------------------------------------------------------------------- |
|
||||
| `model` | string | `o4-mini` | AI model to use | Any model name supporting OpenAI API |
|
||||
| `approvalMode` | string | `suggest` | AI assistant's permission mode | `suggest` (suggestions only)<br>`auto-edit` (automatic edits)<br>`full-auto` (fully automatic) |
|
||||
| `fullAutoErrorMode` | string | `ask-user` | Error handling in full-auto mode | `ask-user` (prompt for user input)<br>`ignore-and-continue` (ignore and proceed) |
|
||||
| `notify` | boolean | `true` | Enable desktop notifications | `true`/`false` |
|
||||
|
||||
### Custom AI provider configuration
|
||||
|
||||
In the `providers` object, you can configure multiple AI service providers. Each provider requires the following parameters:
|
||||
|
||||
| Parameter | Type | Description | Example |
|
||||
| --------- | ------ | --------------------------------------- | ----------------------------- |
|
||||
| `name` | string | Display name of the provider | `"OpenAI"` |
|
||||
| `baseURL` | string | API service URL | `"https://api.openai.com/v1"` |
|
||||
| `envKey` | string | Environment variable name (for API key) | `"OPENAI_API_KEY"` |
|
||||
|
||||
### History configuration
|
||||
|
||||
In the `history` object, you can configure conversation history settings:
|
||||
|
||||
| Parameter | Type | Description | Example Value |
|
||||
| ------------------- | ------- | ------------------------------------------------------ | ------------- |
|
||||
| `maxSize` | number | Maximum number of history entries to save | `1000` |
|
||||
| `saveHistory` | boolean | Whether to save history | `true` |
|
||||
| `sensitivePatterns` | array | Patterns of sensitive information to filter in history | `[]` |
|
||||
|
||||
### Configuration examples
|
||||
|
||||
1. YAML format (save as `~/.llmx/config.yaml`):
|
||||
|
||||
```yaml
|
||||
model: o4-mini
|
||||
approvalMode: suggest
|
||||
fullAutoErrorMode: ask-user
|
||||
notify: true
|
||||
```
|
||||
|
||||
2. JSON format (save as `~/.llmx/config.json`):
|
||||
|
||||
```json
|
||||
{
|
||||
"model": "o4-mini",
|
||||
"approvalMode": "suggest",
|
||||
"fullAutoErrorMode": "ask-user",
|
||||
"notify": true
|
||||
}
|
||||
```
|
||||
|
||||
### Full configuration example
|
||||
|
||||
Below is a comprehensive example of `config.json` with multiple custom providers:
|
||||
|
||||
```json
|
||||
{
|
||||
"model": "o4-mini",
|
||||
"provider": "openai",
|
||||
"providers": {
|
||||
"openai": {
|
||||
"name": "OpenAI",
|
||||
"baseURL": "https://api.openai.com/v1",
|
||||
"envKey": "OPENAI_API_KEY"
|
||||
},
|
||||
"azure": {
|
||||
"name": "AzureOpenAI",
|
||||
"baseURL": "https://YOUR_PROJECT_NAME.openai.azure.com/openai",
|
||||
"envKey": "AZURE_OPENAI_API_KEY"
|
||||
},
|
||||
"openrouter": {
|
||||
"name": "OpenRouter",
|
||||
"baseURL": "https://openrouter.ai/api/v1",
|
||||
"envKey": "OPENROUTER_API_KEY"
|
||||
},
|
||||
"gemini": {
|
||||
"name": "Gemini",
|
||||
"baseURL": "https://generativelanguage.googleapis.com/v1beta/openai",
|
||||
"envKey": "GEMINI_API_KEY"
|
||||
},
|
||||
"ollama": {
|
||||
"name": "Ollama",
|
||||
"baseURL": "http://localhost:11434/v1",
|
||||
"envKey": "OLLAMA_API_KEY"
|
||||
},
|
||||
"mistral": {
|
||||
"name": "Mistral",
|
||||
"baseURL": "https://api.mistral.ai/v1",
|
||||
"envKey": "MISTRAL_API_KEY"
|
||||
},
|
||||
"deepseek": {
|
||||
"name": "DeepSeek",
|
||||
"baseURL": "https://api.deepseek.com",
|
||||
"envKey": "DEEPSEEK_API_KEY"
|
||||
},
|
||||
"xai": {
|
||||
"name": "xAI",
|
||||
"baseURL": "https://api.x.ai/v1",
|
||||
"envKey": "XAI_API_KEY"
|
||||
},
|
||||
"groq": {
|
||||
"name": "Groq",
|
||||
"baseURL": "https://api.groq.com/openai/v1",
|
||||
"envKey": "GROQ_API_KEY"
|
||||
},
|
||||
"arceeai": {
|
||||
"name": "ArceeAI",
|
||||
"baseURL": "https://conductor.arcee.ai/v1",
|
||||
"envKey": "ARCEEAI_API_KEY"
|
||||
}
|
||||
},
|
||||
"history": {
|
||||
"maxSize": 1000,
|
||||
"saveHistory": true,
|
||||
"sensitivePatterns": []
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Custom instructions
|
||||
|
||||
You can create a `~/.llmx/AGENTS.md` file to define custom guidance for the agent:
|
||||
|
||||
```markdown
|
||||
- Always respond with emojis
|
||||
- Only use git commands when explicitly requested
|
||||
```
|
||||
|
||||
### Environment variables setup
|
||||
|
||||
For each AI provider, you need to set the corresponding API key in your environment variables. For example:
|
||||
|
||||
```bash
|
||||
# OpenAI
|
||||
export OPENAI_API_KEY="your-api-key-here"
|
||||
|
||||
# Azure OpenAI
|
||||
export AZURE_OPENAI_API_KEY="your-azure-api-key-here"
|
||||
export AZURE_OPENAI_API_VERSION="2025-04-01-preview" (Optional)
|
||||
|
||||
# OpenRouter
|
||||
export OPENROUTER_API_KEY="your-openrouter-key-here"
|
||||
|
||||
# Similarly for other providers
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## FAQ
|
||||
|
||||
<details>
|
||||
<summary>OpenAI released a model called LLMX in 2021 - is this related?</summary>
|
||||
|
||||
In 2021, OpenAI released LLMX, an AI system designed to generate code from natural language prompts. That original LLMX model was deprecated as of March 2023 and is separate from the CLI tool.
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>Which models are supported?</summary>
|
||||
|
||||
Any model available with [Responses API](https://platform.openai.com/docs/api-reference/responses). The default is `o4-mini`, but pass `--model gpt-4.1` or set `model: gpt-4.1` in your config file to override.
|
||||
|
||||
</details>
|
||||
<details>
|
||||
<summary>Why does <code>o3</code> or <code>o4-mini</code> not work for me?</summary>
|
||||
|
||||
It's possible that your [API account needs to be verified](https://help.openai.com/en/articles/10910291-api-organization-verification) in order to start streaming responses and seeing chain of thought summaries from the API. If you're still running into issues, please let us know!
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>How do I stop LLMX from editing my files?</summary>
|
||||
|
||||
LLMX runs model-generated commands in a sandbox. If a proposed command or file change doesn't look right, you can simply type **n** to deny the command or give the model feedback.
|
||||
|
||||
</details>
|
||||
<details>
|
||||
<summary>Does it work on Windows?</summary>
|
||||
|
||||
Not directly. It requires [Windows Subsystem for Linux (WSL2)](https://learn.microsoft.com/en-us/windows/wsl/install) - LLMX is regularly tested on macOS and Linux with Node 20+, and also supports Node 16.
|
||||
|
||||
</details>
|
||||
|
||||
---
|
||||
|
||||
## Zero data retention (ZDR) usage
|
||||
|
||||
LLMX CLI **does** support OpenAI organizations with [Zero Data Retention (ZDR)](https://platform.openai.com/docs/guides/your-data#zero-data-retention) enabled. If your OpenAI organization has Zero Data Retention enabled and you still encounter errors such as:
|
||||
|
||||
```
|
||||
OpenAI rejected the request. Error details: Status: 400, Code: unsupported_parameter, Type: invalid_request_error, Message: 400 Previous response cannot be used for this organization due to Zero Data Retention.
|
||||
```
|
||||
|
||||
You may need to upgrade to a more recent version with: `npm i -g @llmx/llmx@latest`
|
||||
|
||||
---
|
||||
|
||||
## LLMX open source fund
|
||||
|
||||
We're excited to launch a **$1 million initiative** supporting open source projects that use LLMX CLI and other OpenAI models.
|
||||
|
||||
- Grants are awarded up to **$25,000** API credits.
|
||||
- Applications are reviewed **on a rolling basis**.
|
||||
|
||||
**Interested? [Apply here](https://openai.com/form/llmx-open-source-fund/).**
|
||||
|
||||
---
|
||||
|
||||
## Contributing
|
||||
|
||||
This project is under active development and the code will likely change pretty significantly. We'll update this message once that's complete!
|
||||
|
||||
More broadly we welcome contributions - whether you are opening your very first pull request or you're a seasoned maintainer. At the same time we care about reliability and long-term maintainability, so the bar for merging code is intentionally **high**. The guidelines below spell out what "high-quality" means in practice and should make the whole process transparent and friendly.
|
||||
|
||||
### Development workflow
|
||||
|
||||
- Create a _topic branch_ from `main` - e.g. `feat/interactive-prompt`.
|
||||
- Keep your changes focused. Multiple unrelated fixes should be opened as separate PRs.
|
||||
- Use `pnpm test:watch` during development for super-fast feedback.
|
||||
- We use **Vitest** for unit tests, **ESLint** + **Prettier** for style, and **TypeScript** for type-checking.
|
||||
- Before pushing, run the full test/type/lint suite:
|
||||
|
||||
### Git hooks with Husky
|
||||
|
||||
This project uses [Husky](https://typicode.github.io/husky/) to enforce code quality checks:
|
||||
|
||||
- **Pre-commit hook**: Automatically runs lint-staged to format and lint files before committing
|
||||
- **Pre-push hook**: Runs tests and type checking before pushing to the remote
|
||||
|
||||
These hooks help maintain code quality and prevent pushing code with failing tests. For more details, see [HUSKY.md](./HUSKY.md).
|
||||
|
||||
```bash
|
||||
pnpm test && pnpm run lint && pnpm run typecheck
|
||||
```
|
||||
|
||||
- If you have **not** yet signed the Contributor License Agreement (CLA), add a PR comment containing the exact text
|
||||
|
||||
```text
|
||||
I have read the CLA Document and I hereby sign the CLA
|
||||
```
|
||||
|
||||
The CLA-Assistant bot will turn the PR status green once all authors have signed.
|
||||
|
||||
```bash
|
||||
# Watch mode (tests rerun on change)
|
||||
pnpm test:watch
|
||||
|
||||
# Type-check without emitting files
|
||||
pnpm typecheck
|
||||
|
||||
# Automatically fix lint + prettier issues
|
||||
pnpm lint:fix
|
||||
pnpm format:fix
|
||||
```
|
||||
|
||||
### Debugging
|
||||
|
||||
To debug the CLI with a visual debugger, do the following in the `llmx-cli` folder:
|
||||
|
||||
- Run `pnpm run build` to build the CLI, which will generate `cli.js.map` alongside `cli.js` in the `dist` folder.
|
||||
- Run the CLI with `node --inspect-brk ./dist/cli.js` The program then waits until a debugger is attached before proceeding. Options:
|
||||
- In VS Code, choose **Debug: Attach to Node Process** from the command palette and choose the option in the dropdown with debug port `9229` (likely the first option)
|
||||
- Go to <chrome://inspect> in Chrome and find **localhost:9229** and click **trace**
|
||||
|
||||
### Writing high-impact code changes
|
||||
|
||||
1. **Start with an issue.** Open a new one or comment on an existing discussion so we can agree on the solution before code is written.
|
||||
2. **Add or update tests.** Every new feature or bug-fix should come with test coverage that fails before your change and passes afterwards. 100% coverage is not required, but aim for meaningful assertions.
|
||||
3. **Document behaviour.** If your change affects user-facing behaviour, update the README, inline help (`llmx --help`), or relevant example projects.
|
||||
4. **Keep commits atomic.** Each commit should compile and the tests should pass. This makes reviews and potential rollbacks easier.
|
||||
|
||||
### Opening a pull request
|
||||
|
||||
- Fill in the PR template (or include similar information) - **What? Why? How?**
|
||||
- Run **all** checks locally (`npm test && npm run lint && npm run typecheck`). CI failures that could have been caught locally slow down the process.
|
||||
- Make sure your branch is up-to-date with `main` and that you have resolved merge conflicts.
|
||||
- Mark the PR as **Ready for review** only when you believe it is in a merge-able state.
|
||||
|
||||
### Review process
|
||||
|
||||
1. One maintainer will be assigned as a primary reviewer.
|
||||
2. We may ask for changes - please do not take this personally. We value the work, we just also value consistency and long-term maintainability.
|
||||
3. When there is consensus that the PR meets the bar, a maintainer will squash-and-merge.
|
||||
|
||||
### Community values
|
||||
|
||||
- **Be kind and inclusive.** Treat others with respect; we follow the [Contributor Covenant](https://www.contributor-covenant.org/).
|
||||
- **Assume good intent.** Written communication is hard - err on the side of generosity.
|
||||
- **Teach & learn.** If you spot something confusing, open an issue or PR with improvements.
|
||||
|
||||
### Getting help
|
||||
|
||||
If you run into problems setting up the project, would like feedback on an idea, or just want to say _hi_ - please open a Discussion or jump into the relevant issue. We are happy to help.
|
||||
|
||||
Together we can make LLMX CLI an incredible tool. **Happy hacking!** :rocket:
|
||||
|
||||
### Contributor license agreement (CLA)
|
||||
|
||||
All contributors **must** accept the CLA. The process is lightweight:
|
||||
|
||||
1. Open your pull request.
|
||||
2. Paste the following comment (or reply `recheck` if you've signed before):
|
||||
|
||||
```text
|
||||
I have read the CLA Document and I hereby sign the CLA
|
||||
```
|
||||
|
||||
3. The CLA-Assistant bot records your signature in the repo and marks the status check as passed.
|
||||
|
||||
No special Git commands, email attachments, or commit footers required.
|
||||
|
||||
#### Quick fixes
|
||||
|
||||
| Scenario | Command |
|
||||
| ----------------- | ------------------------------------------------ |
|
||||
| Amend last commit | `git commit --amend -s --no-edit && git push -f` |
|
||||
|
||||
The **DCO check** blocks merges until every commit in the PR carries the footer (with squash this is just the one).
|
||||
|
||||
### Releasing `llmx`
|
||||
|
||||
To publish a new version of the CLI you first need to stage the npm package. A
|
||||
helper script in `llmx-cli/scripts/` does all the heavy lifting. Inside the
|
||||
`llmx-cli` folder run:
|
||||
|
||||
```bash
|
||||
# Classic, JS implementation that includes small, native binaries for Linux sandboxing.
|
||||
pnpm stage-release
|
||||
|
||||
# Optionally specify the temp directory to reuse between runs.
|
||||
RELEASE_DIR=$(mktemp -d)
|
||||
pnpm stage-release --tmp "$RELEASE_DIR"
|
||||
|
||||
# "Fat" package that additionally bundles the native Rust CLI binaries for
|
||||
# Linux. End-users can then opt-in at runtime by setting LLMX_RUST=1.
|
||||
pnpm stage-release --native
|
||||
```
|
||||
|
||||
Go to the folder where the release is staged and verify that it works as intended. If so, run the following from the temp folder:
|
||||
|
||||
```
|
||||
cd "$RELEASE_DIR"
|
||||
npm publish
|
||||
```
|
||||
|
||||
### Alternative build options
|
||||
|
||||
#### Nix flake development
|
||||
|
||||
Prerequisite: Nix >= 2.4 with flakes enabled (`experimental-features = nix-command flakes` in `~/.config/nix/nix.conf`).
|
||||
|
||||
Enter a Nix development shell:
|
||||
|
||||
```bash
|
||||
# Use either one of the commands according to which implementation you want to work with
|
||||
nix develop .#llmx-cli # For entering llmx-cli specific shell
|
||||
nix develop .#llmx-rs # For entering llmx-rs specific shell
|
||||
```
|
||||
|
||||
This shell includes Node.js, installs dependencies, builds the CLI, and provides a `llmx` command alias.
|
||||
|
||||
Build and run the CLI directly:
|
||||
|
||||
```bash
|
||||
# Use either one of the commands according to which implementation you want to work with
|
||||
nix build .#llmx-cli # For building llmx-cli
|
||||
nix build .#llmx-rs # For building llmx-rs
|
||||
./result/bin/llmx --help
|
||||
```
|
||||
|
||||
Run the CLI via the flake app:
|
||||
|
||||
```bash
|
||||
# Use either one of the commands according to which implementation you want to work with
|
||||
nix run .#llmx-cli # For running llmx-cli
|
||||
nix run .#llmx-rs # For running llmx-rs
|
||||
```
|
||||
|
||||
Use direnv with flakes
|
||||
|
||||
If you have direnv installed, you can use the following `.envrc` to automatically enter the Nix shell when you `cd` into the project directory:
|
||||
|
||||
```bash
|
||||
cd llmx-rs
|
||||
echo "use flake ../flake.nix#llmx-cli" >> .envrc && direnv allow
|
||||
cd llmx-cli
|
||||
echo "use flake ../flake.nix#llmx-rs" >> .envrc && direnv allow
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Security & responsible AI
|
||||
|
||||
Have you discovered a vulnerability or have concerns about model output? Please e-mail **security@openai.com** and we will respond promptly.
|
||||
|
||||
---
|
||||
|
||||
## License
|
||||
|
||||
This repository is licensed under the [Apache-2.0 License](LICENSE).
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@valknar/llmx",
|
||||
"version": "0.1.0",
|
||||
"name": "@valknarthing/llmx",
|
||||
"version": "0.1.2",
|
||||
"license": "Apache-2.0",
|
||||
"description": "LLMX CLI - Multi-provider coding agent powered by LiteLLM",
|
||||
"bin": {
|
||||
@@ -16,7 +16,7 @@
|
||||
],
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git+https://github.com/valknar/llmx.git",
|
||||
"url": "git+https://github.com/valknarthing/llmx.git",
|
||||
"directory": "llmx-cli"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,7 +33,7 @@ def parse_args() -> argparse.Namespace:
|
||||
"--package",
|
||||
choices=("llmx", "llmx-responses-api-proxy", "llmx-sdk"),
|
||||
default="llmx",
|
||||
help="Which npm package to stage (default: codex).",
|
||||
help="Which npm package to stage (default: llmx).",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--version",
|
||||
|
||||
1465
llmx-rs/Cargo.lock
generated
1465
llmx-rs/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -43,7 +43,7 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.1.0"
|
||||
version = "0.1.9"
|
||||
# Track the edition for all workspace crates in one place. Individual
|
||||
# crates can still override this value, but keeping it here means new
|
||||
# crates created with `cargo new -w ...` automatically inherit the 2024
|
||||
@@ -209,7 +209,7 @@ vt100 = "0.16.2"
|
||||
walkdir = "2.5.0"
|
||||
webbrowser = "1.0"
|
||||
which = "6"
|
||||
wildmatch = "2.6.0"
|
||||
wildmatch = "2.5.0"
|
||||
|
||||
wiremock = "0.6"
|
||||
zeroize = "1.8.2"
|
||||
|
||||
@@ -138,7 +138,7 @@ impl McpProcess {
|
||||
client_info: ClientInfo {
|
||||
name: "llmx-app-server-tests".to_string(),
|
||||
title: None,
|
||||
version: "0.1.0".to_string(),
|
||||
version: "0.1.7".to_string(),
|
||||
},
|
||||
})?);
|
||||
let req_id = self.send_request("initialize", params).await?;
|
||||
|
||||
@@ -2,7 +2,6 @@ use anyhow::Result;
|
||||
use app_test_support::McpProcess;
|
||||
use app_test_support::to_response;
|
||||
use llmx_app_server_protocol::CancelLoginChatGptParams;
|
||||
use llmx_app_server_protocol::CancelLoginChatGptResponse;
|
||||
use llmx_app_server_protocol::GetAuthStatusParams;
|
||||
use llmx_app_server_protocol::GetAuthStatusResponse;
|
||||
use llmx_app_server_protocol::JSONRPCError;
|
||||
@@ -110,21 +109,35 @@ async fn login_and_cancel_chatgpt() -> Result<()> {
|
||||
login_id: login.login_id,
|
||||
})
|
||||
.await?;
|
||||
let cancel_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
|
||||
// The cancel might succeed or fail with "login id not found" if the login
|
||||
// completed/cancelled already due to a race condition. Either outcome is acceptable.
|
||||
// Use a timeout and allow either success or error response.
|
||||
let cancel_result = timeout(
|
||||
Duration::from_secs(5),
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(cancel_id)),
|
||||
)
|
||||
.await??;
|
||||
let _ok: CancelLoginChatGptResponse = to_response(cancel_resp)?;
|
||||
.await;
|
||||
|
||||
match cancel_result {
|
||||
Ok(Ok(_)) => {
|
||||
// Successfully cancelled
|
||||
eprintln!("cancel succeeded");
|
||||
}
|
||||
Ok(Err(_)) | Err(_) => {
|
||||
// Cancel failed or timed out - acceptable in race condition
|
||||
eprintln!("cancel failed or timed out (expected in race condition)");
|
||||
}
|
||||
}
|
||||
|
||||
// Optionally observe the completion notification; do not fail if it races.
|
||||
let maybe_note = timeout(
|
||||
Duration::from_secs(2),
|
||||
mcp.read_stream_until_notification_message("llmx/event/login_chat_gpt_complete"),
|
||||
mcp.read_stream_until_notification_message("loginChatGptComplete"),
|
||||
)
|
||||
.await;
|
||||
if maybe_note.is_err() {
|
||||
eprintln!("warning: did not observe login_chat_gpt_complete notification after cancel");
|
||||
eprintln!("warning: did not observe loginChatGptComplete notification after cancel");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ async fn get_user_agent_returns_current_llmx_user_agent() -> Result<()> {
|
||||
|
||||
let os_info = os_info::get();
|
||||
let user_agent = format!(
|
||||
"llmx_cli_rs/0.1.0 ({} {}; {}) {} (llmx-app-server-tests; 0.1.0)",
|
||||
"llmx_cli_rs/0.1.7 ({} {}; {}) {} (llmx-app-server-tests; 0.1.7)",
|
||||
os_info.os_type(),
|
||||
os_info.version(),
|
||||
os_info.architecture().unwrap_or("unknown"),
|
||||
|
||||
@@ -31,6 +31,7 @@ use thiserror::Error;
|
||||
const BEGIN_PATCH_MARKER: &str = "*** Begin Patch";
|
||||
const END_PATCH_MARKER: &str = "*** End Patch";
|
||||
const ADD_FILE_MARKER: &str = "*** Add File: ";
|
||||
const CREATE_FILE_MARKER: &str = "*** Create File: "; // Alias for Add File
|
||||
const DELETE_FILE_MARKER: &str = "*** Delete File: ";
|
||||
const UPDATE_FILE_MARKER: &str = "*** Update File: ";
|
||||
const MOVE_TO_MARKER: &str = "*** Move to: ";
|
||||
@@ -245,8 +246,8 @@ fn check_start_and_end_lines_strict(
|
||||
fn parse_one_hunk(lines: &[&str], line_number: usize) -> Result<(Hunk, usize), ParseError> {
|
||||
// Be tolerant of case mismatches and extra padding around marker strings.
|
||||
let first_line = lines[0].trim();
|
||||
if let Some(path) = first_line.strip_prefix(ADD_FILE_MARKER) {
|
||||
// Add File
|
||||
if let Some(path) = first_line.strip_prefix(ADD_FILE_MARKER).or_else(|| first_line.strip_prefix(CREATE_FILE_MARKER)) {
|
||||
// Add File (also accepts Create File as alias)
|
||||
let mut contents = String::new();
|
||||
let mut parsed_lines = 1;
|
||||
for add_line in &lines[1..] {
|
||||
@@ -331,7 +332,7 @@ fn parse_one_hunk(lines: &[&str], line_number: usize) -> Result<(Hunk, usize), P
|
||||
|
||||
Err(InvalidHunkError {
|
||||
message: format!(
|
||||
"'{first_line}' is not a valid hunk header. Valid hunk headers: '*** Add File: {{path}}', '*** Delete File: {{path}}', '*** Update File: {{path}}'"
|
||||
"'{first_line}' is not a valid hunk header. Valid hunk headers: '*** Add File: {{path}}', '*** Create File: {{path}}', '*** Delete File: {{path}}', '*** Update File: {{path}}'"
|
||||
),
|
||||
line_number,
|
||||
})
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
You are a coding agent running in the LLMX CLI, a terminal-based coding assistant. LLMX CLI is an open source project led by OpenAI. You are expected to be precise, safe, and helpful.
|
||||
You are a coding agent running in the LLMX CLI, a terminal-based coding assistant. LLMX CLI is an open source community project. You are expected to be precise, safe, and helpful.
|
||||
|
||||
Your capabilities:
|
||||
|
||||
|
||||
@@ -45,6 +45,7 @@ pub(crate) async fn stream_chat_completions(
|
||||
provider: &ModelProviderInfo,
|
||||
otel_event_manager: &OtelEventManager,
|
||||
session_source: &SessionSource,
|
||||
model_max_output_tokens: Option<i64>,
|
||||
) -> Result<ResponseStream> {
|
||||
if prompt.output_schema.is_some() {
|
||||
return Err(LlmxErr::UnsupportedOperation(
|
||||
@@ -56,7 +57,12 @@ pub(crate) async fn stream_chat_completions(
|
||||
let mut messages = Vec::<serde_json::Value>::new();
|
||||
|
||||
let full_instructions = prompt.get_full_instructions(model_family);
|
||||
messages.push(json!({"role": "system", "content": full_instructions}));
|
||||
// Add cache_control to system instructions for Anthropic prompt caching
|
||||
messages.push(json!({
|
||||
"role": "system",
|
||||
"content": full_instructions,
|
||||
"cache_control": {"type": "ephemeral"}
|
||||
}));
|
||||
|
||||
let input = prompt.get_formatted_input();
|
||||
|
||||
@@ -161,7 +167,65 @@ pub(crate) async fn stream_chat_completions(
|
||||
// aggregated assistant message was recorded alongside an earlier partial).
|
||||
let mut last_assistant_text: Option<String> = None;
|
||||
|
||||
// Build a map of which call_ids have outputs
|
||||
// We'll use this to ensure we never send a FunctionCall without its corresponding output
|
||||
let mut call_ids_with_outputs: std::collections::HashSet<String> = std::collections::HashSet::new();
|
||||
|
||||
// First pass: collect all call_ids that have outputs
|
||||
for item in input.iter() {
|
||||
if let ResponseItem::FunctionCallOutput { call_id, .. } = item {
|
||||
call_ids_with_outputs.insert(call_id.clone());
|
||||
}
|
||||
}
|
||||
|
||||
debug!("=== Chat Completions Request Debug ===");
|
||||
debug!("Input items count: {}", input.len());
|
||||
debug!("Call IDs with outputs: {:?}", call_ids_with_outputs);
|
||||
|
||||
// Second pass: find the first FunctionCall that doesn't have an output
|
||||
let mut cutoff_at_idx: Option<usize> = None;
|
||||
for (idx, item) in input.iter().enumerate() {
|
||||
if let ResponseItem::FunctionCall { call_id, name, .. } = item {
|
||||
if !call_ids_with_outputs.contains(call_id) {
|
||||
debug!("Found unanswered function call '{}' (call_id: {}) at index {}", name, call_id, idx);
|
||||
cutoff_at_idx = Some(idx);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(cutoff) = cutoff_at_idx {
|
||||
debug!("Cutting off at index {} to avoid orphaned tool calls", cutoff);
|
||||
} else {
|
||||
debug!("No unanswered function calls found, processing all items");
|
||||
}
|
||||
|
||||
// Track whether the MOST RECENT FunctionCall with each call_id was skipped
|
||||
// This allows the same call_id to be retried - we only skip outputs for the specific skipped calls
|
||||
let mut call_id_skip_state: std::collections::HashMap<String, bool> = std::collections::HashMap::new();
|
||||
|
||||
for (idx, item) in input.iter().enumerate() {
|
||||
// Stop processing if we've reached an unanswered function call
|
||||
if let Some(cutoff) = cutoff_at_idx {
|
||||
if idx >= cutoff {
|
||||
debug!("Stopping at index {} due to unanswered function call", idx);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
debug!("Processing item {} of type: {}", idx, match item {
|
||||
ResponseItem::Message { role, .. } => format!("Message(role={})", role),
|
||||
ResponseItem::FunctionCall { name, call_id, .. } => format!("FunctionCall(name={}, call_id={})", name, call_id),
|
||||
ResponseItem::FunctionCallOutput { call_id, .. } => format!("FunctionCallOutput(call_id={})", call_id),
|
||||
ResponseItem::LocalShellCall { .. } => "LocalShellCall".to_string(),
|
||||
ResponseItem::CustomToolCall { .. } => "CustomToolCall".to_string(),
|
||||
ResponseItem::CustomToolCallOutput { .. } => "CustomToolCallOutput".to_string(),
|
||||
ResponseItem::Reasoning { .. } => "Reasoning".to_string(),
|
||||
ResponseItem::WebSearchCall { .. } => "WebSearchCall".to_string(),
|
||||
ResponseItem::GhostSnapshot { .. } => "GhostSnapshot".to_string(),
|
||||
ResponseItem::Other => "Other".to_string(),
|
||||
});
|
||||
|
||||
match item {
|
||||
ResponseItem::Message { role, content, .. } => {
|
||||
// Build content either as a plain string (typical for assistant text)
|
||||
@@ -175,7 +239,10 @@ pub(crate) async fn stream_chat_completions(
|
||||
ContentItem::InputText { text: t }
|
||||
| ContentItem::OutputText { text: t } => {
|
||||
text.push_str(t);
|
||||
items.push(json!({"type":"text","text": t}));
|
||||
// Only add text content blocks that are non-empty
|
||||
if !t.trim().is_empty() {
|
||||
items.push(json!({"type":"text","text": t}));
|
||||
}
|
||||
}
|
||||
ContentItem::InputImage { image_url } => {
|
||||
saw_image = true;
|
||||
@@ -184,6 +251,11 @@ pub(crate) async fn stream_chat_completions(
|
||||
}
|
||||
}
|
||||
|
||||
// Skip messages with empty or whitespace-only text content (unless they contain images)
|
||||
if text.trim().is_empty() && !saw_image {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Skip exact-duplicate assistant messages.
|
||||
if role == "assistant" {
|
||||
if let Some(prev) = &last_assistant_text
|
||||
@@ -219,6 +291,18 @@ pub(crate) async fn stream_chat_completions(
|
||||
call_id,
|
||||
..
|
||||
} => {
|
||||
// Validate that arguments is valid JSON before sending to API
|
||||
// If invalid, skip this function call to avoid API errors
|
||||
if serde_json::from_str::<serde_json::Value>(arguments).is_err() {
|
||||
debug!("Skipping malformed function call with invalid JSON arguments: {}", arguments);
|
||||
// Mark this call_id's most recent state as skipped
|
||||
call_id_skip_state.insert(call_id.clone(), true);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Mark this call_id's most recent state as NOT skipped (valid call)
|
||||
call_id_skip_state.insert(call_id.clone(), false);
|
||||
|
||||
let mut msg = json!({
|
||||
"role": "assistant",
|
||||
"content": null,
|
||||
@@ -263,6 +347,12 @@ pub(crate) async fn stream_chat_completions(
|
||||
messages.push(msg);
|
||||
}
|
||||
ResponseItem::FunctionCallOutput { call_id, output } => {
|
||||
// Skip outputs only if the MOST RECENT FunctionCall with this call_id was skipped
|
||||
if call_id_skip_state.get(call_id) == Some(&true) {
|
||||
debug!("Skipping function call output for most recent skipped call_id: {}", call_id);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Prefer structured content items when available (e.g., images)
|
||||
// otherwise fall back to the legacy plain-string content.
|
||||
let content_value = if let Some(items) = &output.content_items {
|
||||
@@ -328,14 +418,41 @@ pub(crate) async fn stream_chat_completions(
|
||||
}
|
||||
}
|
||||
|
||||
debug!("Built {} messages for API request", messages.len());
|
||||
|
||||
// Add cache_control to conversation history for Anthropic prompt caching
|
||||
// Add it to a message that's at least 3 messages before the end (stable history)
|
||||
// This caches the earlier conversation while keeping recent turns uncached
|
||||
if messages.len() > 4 {
|
||||
let cache_idx = messages.len().saturating_sub(4);
|
||||
if let Some(msg) = messages.get_mut(cache_idx) {
|
||||
if let Some(obj) = msg.as_object_mut() {
|
||||
obj.insert("cache_control".to_string(), json!({"type": "ephemeral"}));
|
||||
debug!("Added cache_control to message at index {} (conversation history)", cache_idx);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
debug!("=== End Chat Completions Request Debug ===");
|
||||
|
||||
let tools_json = create_tools_json_for_chat_completions_api(&prompt.tools)?;
|
||||
let payload = json!({
|
||||
let mut payload = json!({
|
||||
"model": model_family.slug,
|
||||
"messages": messages,
|
||||
"stream": true,
|
||||
"tools": tools_json,
|
||||
});
|
||||
|
||||
// Add max_tokens - required by Anthropic Messages API
|
||||
// Priority: config model_max_output_tokens > provider max_tokens > default 20480
|
||||
let max_tokens = model_max_output_tokens
|
||||
.or(provider.max_tokens)
|
||||
.unwrap_or(20480);
|
||||
if let Some(obj) = payload.as_object_mut() {
|
||||
obj.insert("max_tokens".to_string(), json!(max_tokens));
|
||||
}
|
||||
debug!("Using max_tokens: {}", max_tokens);
|
||||
|
||||
debug!(
|
||||
"POST to {}: {}",
|
||||
provider.get_full_url(&None),
|
||||
@@ -496,7 +613,9 @@ async fn process_chat_sse<S>(
|
||||
) where
|
||||
S: Stream<Item = Result<Bytes>> + Unpin,
|
||||
{
|
||||
debug!("process_chat_sse started, idle_timeout={:?}", idle_timeout);
|
||||
let mut stream = stream.eventsource();
|
||||
debug!("SSE stream initialized, waiting for first event");
|
||||
|
||||
// State to accumulate a function call across streaming chunks.
|
||||
// OpenAI may split the `arguments` string over multiple `delta` events
|
||||
@@ -531,7 +650,14 @@ async fn process_chat_sse<S>(
|
||||
return;
|
||||
}
|
||||
Ok(None) => {
|
||||
// Stream closed gracefully – emit Completed with dummy id.
|
||||
// Stream closed gracefully – emit any pending items first, then Completed
|
||||
debug!("Stream closed gracefully (Ok(None)), emitting pending items");
|
||||
if let Some(item) = assistant_item.take() {
|
||||
let _ = tx_event.send(Ok(ResponseEvent::OutputItemDone(item))).await;
|
||||
}
|
||||
if let Some(item) = reasoning_item.take() {
|
||||
let _ = tx_event.send(Ok(ResponseEvent::OutputItemDone(item))).await;
|
||||
}
|
||||
let _ = tx_event
|
||||
.send(Ok(ResponseEvent::Completed {
|
||||
response_id: String::new(),
|
||||
@@ -727,6 +853,7 @@ async fn process_chat_sse<S>(
|
||||
|
||||
// Emit end-of-turn when finish_reason signals completion.
|
||||
if let Some(finish_reason) = choice.get("finish_reason").and_then(|v| v.as_str()) {
|
||||
debug!("Received finish_reason: {}", finish_reason);
|
||||
match finish_reason {
|
||||
"tool_calls" if fn_call_state.active => {
|
||||
// First, flush the terminal raw reasoning so UIs can finalize
|
||||
@@ -745,27 +872,46 @@ async fn process_chat_sse<S>(
|
||||
|
||||
let _ = tx_event.send(Ok(ResponseEvent::OutputItemDone(item))).await;
|
||||
}
|
||||
"stop" => {
|
||||
// Regular turn without tool-call. Emit the final assistant message
|
||||
// as a single OutputItemDone so non-delta consumers see the result.
|
||||
"stop" | "length" => {
|
||||
// Regular turn without tool-call, or hit max_tokens limit.
|
||||
debug!("Processing finish_reason={}, assistant_item.is_some()={}, reasoning_item.is_some()={}",
|
||||
finish_reason, assistant_item.is_some(), reasoning_item.is_some());
|
||||
// Emit the final assistant message as a single OutputItemDone so non-delta consumers see the result.
|
||||
if let Some(item) = assistant_item.take() {
|
||||
debug!("Emitting assistant_item: {:?}", item);
|
||||
let _ = tx_event.send(Ok(ResponseEvent::OutputItemDone(item))).await;
|
||||
} else {
|
||||
debug!("No assistant_item to emit");
|
||||
}
|
||||
// Also emit a terminal Reasoning item so UIs can finalize raw reasoning.
|
||||
if let Some(item) = reasoning_item.take() {
|
||||
debug!("Emitting reasoning_item");
|
||||
let _ = tx_event.send(Ok(ResponseEvent::OutputItemDone(item))).await;
|
||||
} else {
|
||||
debug!("No reasoning_item to emit");
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
// Unknown finish_reason - still emit pending items to avoid hanging
|
||||
debug!("Unknown finish_reason: {}, emitting pending items", finish_reason);
|
||||
if let Some(item) = assistant_item.take() {
|
||||
let _ = tx_event.send(Ok(ResponseEvent::OutputItemDone(item))).await;
|
||||
}
|
||||
// Also emit a terminal Reasoning item so UIs can finalize raw reasoning.
|
||||
if let Some(item) = reasoning_item.take() {
|
||||
let _ = tx_event.send(Ok(ResponseEvent::OutputItemDone(item))).await;
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
// Emit Completed regardless of reason so the agent can advance.
|
||||
debug!("Sending Completed event after finish_reason={}", finish_reason);
|
||||
let _ = tx_event
|
||||
.send(Ok(ResponseEvent::Completed {
|
||||
response_id: String::new(),
|
||||
token_usage: token_usage.clone(),
|
||||
}))
|
||||
.await;
|
||||
debug!("Completed event sent, returning from SSE processor");
|
||||
|
||||
// Prepare for potential next turn (should not happen in same stream).
|
||||
// fn_call_state = FunctionCallState::default();
|
||||
@@ -774,6 +920,22 @@ async fn process_chat_sse<S>(
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Stream ended without finish_reason - this can happen when the stream closes abruptly
|
||||
debug!("Stream ended without finish_reason, emitting final items and Completed event");
|
||||
if let Some(item) = assistant_item.take() {
|
||||
let _ = tx_event.send(Ok(ResponseEvent::OutputItemDone(item))).await;
|
||||
}
|
||||
if let Some(item) = reasoning_item.take() {
|
||||
let _ = tx_event.send(Ok(ResponseEvent::OutputItemDone(item))).await;
|
||||
}
|
||||
// Send Completed event so llmx knows the turn is done
|
||||
let _ = tx_event
|
||||
.send(Ok(ResponseEvent::Completed {
|
||||
response_id: String::new(),
|
||||
token_usage: token_usage.clone(),
|
||||
}))
|
||||
.await;
|
||||
}
|
||||
|
||||
/// Optional client-side aggregation helper
|
||||
|
||||
@@ -152,6 +152,7 @@ impl ModelClient {
|
||||
&self.provider,
|
||||
&self.otel_event_manager,
|
||||
&self.session_source,
|
||||
self.config.model_max_output_tokens,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -1123,6 +1124,7 @@ mod tests {
|
||||
request_max_retries: Some(0),
|
||||
stream_max_retries: Some(0),
|
||||
stream_idle_timeout_ms: Some(1000),
|
||||
max_tokens: None,
|
||||
requires_openai_auth: false,
|
||||
};
|
||||
|
||||
@@ -1187,6 +1189,7 @@ mod tests {
|
||||
request_max_retries: Some(0),
|
||||
stream_max_retries: Some(0),
|
||||
stream_idle_timeout_ms: Some(1000),
|
||||
max_tokens: None,
|
||||
requires_openai_auth: false,
|
||||
};
|
||||
|
||||
@@ -1224,6 +1227,7 @@ mod tests {
|
||||
request_max_retries: Some(0),
|
||||
stream_max_retries: Some(0),
|
||||
stream_idle_timeout_ms: Some(1000),
|
||||
max_tokens: None,
|
||||
requires_openai_auth: false,
|
||||
};
|
||||
|
||||
@@ -1263,6 +1267,7 @@ mod tests {
|
||||
request_max_retries: Some(0),
|
||||
stream_max_retries: Some(0),
|
||||
stream_idle_timeout_ms: Some(1000),
|
||||
max_tokens: None,
|
||||
requires_openai_auth: false,
|
||||
};
|
||||
|
||||
@@ -1298,6 +1303,7 @@ mod tests {
|
||||
request_max_retries: Some(0),
|
||||
stream_max_retries: Some(0),
|
||||
stream_idle_timeout_ms: Some(1000),
|
||||
max_tokens: None,
|
||||
requires_openai_auth: false,
|
||||
};
|
||||
|
||||
@@ -1333,6 +1339,7 @@ mod tests {
|
||||
request_max_retries: Some(0),
|
||||
stream_max_retries: Some(0),
|
||||
stream_idle_timeout_ms: Some(1000),
|
||||
max_tokens: None,
|
||||
requires_openai_auth: false,
|
||||
};
|
||||
|
||||
@@ -1437,6 +1444,7 @@ mod tests {
|
||||
request_max_retries: Some(0),
|
||||
stream_max_retries: Some(0),
|
||||
stream_idle_timeout_ms: Some(1000),
|
||||
max_tokens: None,
|
||||
requires_openai_auth: false,
|
||||
};
|
||||
|
||||
|
||||
@@ -973,6 +973,8 @@ impl Config {
|
||||
|
||||
let mut model_providers = built_in_model_providers();
|
||||
// Merge user-defined providers into the built-in list.
|
||||
// Note: This uses or_insert() so built-in providers take precedence.
|
||||
// For custom max_tokens, use model_max_output_tokens config instead.
|
||||
for (key, provider) in cfg.model_providers.into_iter() {
|
||||
model_providers.entry(key).or_insert(provider);
|
||||
}
|
||||
@@ -2809,6 +2811,7 @@ model_verbosity = "high"
|
||||
request_max_retries: Some(4),
|
||||
stream_max_retries: Some(10),
|
||||
stream_idle_timeout_ms: Some(300_000),
|
||||
max_tokens: None,
|
||||
requires_openai_auth: false,
|
||||
};
|
||||
let model_provider_map = {
|
||||
|
||||
@@ -54,7 +54,7 @@ fn parse_user_message(message: &[ContentItem]) -> Option<UserMessageItem> {
|
||||
Some(UserMessageItem::new(&content))
|
||||
}
|
||||
|
||||
fn parse_agent_message(id: Option<&String>, message: &[ContentItem]) -> AgentMessageItem {
|
||||
fn parse_agent_message(id: Option<&String>, message: &[ContentItem]) -> Option<AgentMessageItem> {
|
||||
let mut content: Vec<AgentMessageContent> = Vec::new();
|
||||
for content_item in message.iter() {
|
||||
match content_item {
|
||||
@@ -69,18 +69,23 @@ fn parse_agent_message(id: Option<&String>, message: &[ContentItem]) -> AgentMes
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If the message has no content, return None to signal turn completion
|
||||
// This happens when the API ends a turn with an empty assistant message (e.g., after tool calls)
|
||||
if content.is_empty() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let id = id.cloned().unwrap_or_else(|| Uuid::new_v4().to_string());
|
||||
AgentMessageItem { id, content }
|
||||
Some(AgentMessageItem { id, content })
|
||||
}
|
||||
|
||||
pub fn parse_turn_item(item: &ResponseItem) -> Option<TurnItem> {
|
||||
match item {
|
||||
ResponseItem::Message { role, content, id } => match role.as_str() {
|
||||
"user" => parse_user_message(content).map(TurnItem::UserMessage),
|
||||
"assistant" => Some(TurnItem::AgentMessage(parse_agent_message(
|
||||
id.as_ref(),
|
||||
content,
|
||||
))),
|
||||
"assistant" => parse_agent_message(id.as_ref(), content)
|
||||
.map(TurnItem::AgentMessage),
|
||||
"system" => None,
|
||||
_ => None,
|
||||
},
|
||||
|
||||
@@ -87,6 +87,10 @@ pub struct ModelProviderInfo {
|
||||
/// the connection as lost.
|
||||
pub stream_idle_timeout_ms: Option<u64>,
|
||||
|
||||
/// Maximum number of tokens to generate in the response. If not specified, defaults to 8192.
|
||||
/// This is required by some providers (e.g., Anthropic via LiteLLM).
|
||||
pub max_tokens: Option<i64>,
|
||||
|
||||
/// Does this provider require an OpenAI API Key or ChatGPT login token? If true,
|
||||
/// user is presented with login screen on first run, and login preference and token/key
|
||||
/// are stored in auth.json. If false (which is the default), login screen is skipped,
|
||||
@@ -290,6 +294,7 @@ pub fn built_in_model_providers() -> HashMap<String, ModelProviderInfo> {
|
||||
request_max_retries: None,
|
||||
stream_max_retries: None,
|
||||
stream_idle_timeout_ms: None,
|
||||
max_tokens: None,
|
||||
requires_openai_auth: false,
|
||||
},
|
||||
),
|
||||
@@ -330,6 +335,7 @@ pub fn built_in_model_providers() -> HashMap<String, ModelProviderInfo> {
|
||||
request_max_retries: None,
|
||||
stream_max_retries: None,
|
||||
stream_idle_timeout_ms: None,
|
||||
max_tokens: None,
|
||||
requires_openai_auth: true,
|
||||
},
|
||||
),
|
||||
@@ -375,6 +381,7 @@ pub fn create_oss_provider_with_base_url(base_url: &str) -> ModelProviderInfo {
|
||||
request_max_retries: None,
|
||||
stream_max_retries: None,
|
||||
stream_idle_timeout_ms: None,
|
||||
max_tokens: None,
|
||||
requires_openai_auth: false,
|
||||
}
|
||||
}
|
||||
@@ -415,6 +422,7 @@ base_url = "http://localhost:11434/v1"
|
||||
request_max_retries: None,
|
||||
stream_max_retries: None,
|
||||
stream_idle_timeout_ms: None,
|
||||
max_tokens: None,
|
||||
requires_openai_auth: false,
|
||||
};
|
||||
|
||||
@@ -445,6 +453,7 @@ query_params = { api-version = "2025-04-01-preview" }
|
||||
request_max_retries: None,
|
||||
stream_max_retries: None,
|
||||
stream_idle_timeout_ms: None,
|
||||
max_tokens: None,
|
||||
requires_openai_auth: false,
|
||||
};
|
||||
|
||||
@@ -478,6 +487,7 @@ env_http_headers = { "X-Example-Env-Header" = "EXAMPLE_ENV_VAR" }
|
||||
request_max_retries: None,
|
||||
stream_max_retries: None,
|
||||
stream_idle_timeout_ms: None,
|
||||
max_tokens: None,
|
||||
requires_openai_auth: false,
|
||||
};
|
||||
|
||||
@@ -501,6 +511,7 @@ env_http_headers = { "X-Example-Env-Header" = "EXAMPLE_ENV_VAR" }
|
||||
request_max_retries: None,
|
||||
stream_max_retries: None,
|
||||
stream_idle_timeout_ms: None,
|
||||
max_tokens: None,
|
||||
requires_openai_auth: false,
|
||||
}
|
||||
}
|
||||
@@ -534,6 +545,7 @@ env_http_headers = { "X-Example-Env-Header" = "EXAMPLE_ENV_VAR" }
|
||||
request_max_retries: None,
|
||||
stream_max_retries: None,
|
||||
stream_idle_timeout_ms: None,
|
||||
max_tokens: None,
|
||||
requires_openai_auth: false,
|
||||
};
|
||||
assert!(named_provider.is_azure_responses_endpoint());
|
||||
|
||||
@@ -693,7 +693,7 @@ pub(crate) fn create_tools_json_for_chat_completions_api(
|
||||
// We start with the JSON for the Responses API and than rewrite it to match
|
||||
// the chat completions tool call format.
|
||||
let responses_api_tools_json = create_tools_json_for_responses_api(tools)?;
|
||||
let tools_json = responses_api_tools_json
|
||||
let mut tools_json = responses_api_tools_json
|
||||
.into_iter()
|
||||
.filter_map(|mut tool| {
|
||||
if tool.get("type") != Some(&serde_json::Value::String("function".to_string())) {
|
||||
@@ -712,6 +712,14 @@ pub(crate) fn create_tools_json_for_chat_completions_api(
|
||||
}
|
||||
})
|
||||
.collect::<Vec<serde_json::Value>>();
|
||||
|
||||
// Add cache_control to the last tool to enable Anthropic prompt caching
|
||||
if let Some(last_tool) = tools_json.last_mut() {
|
||||
if let Some(obj) = last_tool.as_object_mut() {
|
||||
obj.insert("cache_control".to_string(), json!({"type": "ephemeral"}));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(tools_json)
|
||||
}
|
||||
|
||||
|
||||
@@ -58,6 +58,7 @@ async fn run_request(input: Vec<ResponseItem>) -> Value {
|
||||
request_max_retries: Some(0),
|
||||
stream_max_retries: Some(0),
|
||||
stream_idle_timeout_ms: Some(5_000),
|
||||
max_tokens: None,
|
||||
requires_openai_auth: false,
|
||||
};
|
||||
|
||||
|
||||
@@ -58,6 +58,7 @@ async fn run_stream_with_bytes(sse_body: &[u8]) -> Vec<ResponseEvent> {
|
||||
request_max_retries: Some(0),
|
||||
stream_max_retries: Some(0),
|
||||
stream_idle_timeout_ms: Some(5_000),
|
||||
max_tokens: None,
|
||||
requires_openai_auth: false,
|
||||
};
|
||||
|
||||
|
||||
@@ -47,6 +47,7 @@ async fn responses_stream_includes_subagent_header_on_review() {
|
||||
request_max_retries: Some(0),
|
||||
stream_max_retries: Some(0),
|
||||
stream_idle_timeout_ms: Some(5_000),
|
||||
max_tokens: None,
|
||||
requires_openai_auth: false,
|
||||
};
|
||||
|
||||
@@ -135,6 +136,7 @@ async fn responses_stream_includes_subagent_header_on_other() {
|
||||
request_max_retries: Some(0),
|
||||
stream_max_retries: Some(0),
|
||||
stream_idle_timeout_ms: Some(5_000),
|
||||
max_tokens: None,
|
||||
requires_openai_auth: false,
|
||||
};
|
||||
|
||||
|
||||
@@ -712,6 +712,7 @@ async fn azure_responses_request_includes_store_and_reasoning_ids() {
|
||||
request_max_retries: Some(0),
|
||||
stream_max_retries: Some(0),
|
||||
stream_idle_timeout_ms: Some(5_000),
|
||||
max_tokens: None,
|
||||
requires_openai_auth: false,
|
||||
};
|
||||
|
||||
@@ -1195,6 +1196,7 @@ async fn azure_overrides_assign_properties_used_for_responses_url() {
|
||||
request_max_retries: None,
|
||||
stream_max_retries: None,
|
||||
stream_idle_timeout_ms: None,
|
||||
max_tokens: None,
|
||||
requires_openai_auth: false,
|
||||
};
|
||||
|
||||
@@ -1272,6 +1274,7 @@ async fn env_var_overrides_loaded_auth() {
|
||||
request_max_retries: None,
|
||||
stream_max_retries: None,
|
||||
stream_idle_timeout_ms: None,
|
||||
max_tokens: None,
|
||||
requires_openai_auth: false,
|
||||
};
|
||||
|
||||
|
||||
@@ -72,6 +72,7 @@ async fn continue_after_stream_error() {
|
||||
request_max_retries: Some(1),
|
||||
stream_max_retries: Some(1),
|
||||
stream_idle_timeout_ms: Some(2_000),
|
||||
max_tokens: None,
|
||||
requires_openai_auth: false,
|
||||
};
|
||||
|
||||
|
||||
@@ -80,6 +80,7 @@ async fn retries_on_early_close() {
|
||||
request_max_retries: Some(0),
|
||||
stream_max_retries: Some(1),
|
||||
stream_idle_timeout_ms: Some(2000),
|
||||
max_tokens: None,
|
||||
requires_openai_auth: false,
|
||||
};
|
||||
|
||||
|
||||
@@ -144,7 +144,7 @@ impl McpProcess {
|
||||
let initialized = self.read_jsonrpc_message().await?;
|
||||
let os_info = os_info::get();
|
||||
let user_agent = format!(
|
||||
"llmx_cli_rs/0.1.0 ({} {}; {}) {} (elicitation test; 0.0.0)",
|
||||
"llmx_cli_rs/0.1.7 ({} {}; {}) {} (elicitation test; 0.0.0)",
|
||||
os_info.os_type(),
|
||||
os_info.version(),
|
||||
os_info.architecture().unwrap_or("unknown"),
|
||||
@@ -163,7 +163,7 @@ impl McpProcess {
|
||||
"serverInfo": {
|
||||
"name": "llmx-mcp-server",
|
||||
"title": "LLMX",
|
||||
"version": "0.1.0",
|
||||
"version": "0.1.7",
|
||||
"user_agent": user_agent
|
||||
},
|
||||
"protocolVersion": mcp_types::MCP_SCHEMA_VERSION
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"name": "@valknar/llmx-responses-api-proxy",
|
||||
"name": "@valknarthing/llmx-responses-api-proxy",
|
||||
"version": "0.1.0",
|
||||
"license": "Apache-2.0",
|
||||
"bin": {
|
||||
@@ -15,7 +15,7 @@
|
||||
],
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git+https://github.com/valknar/llmx.git",
|
||||
"url": "git+https://github.com/valknarthing/llmx.git",
|
||||
"directory": "llmx-rs/responses-api-proxy/npm"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -69,7 +69,7 @@ impl WidgetRef for &WelcomeWidget {
|
||||
" ".into(),
|
||||
"Welcome to ".into(),
|
||||
"LLMX".bold(),
|
||||
", OpenAI's command-line coding agent".into(),
|
||||
", your command-line coding agent".into(),
|
||||
]));
|
||||
|
||||
Paragraph::new(lines)
|
||||
|
||||
@@ -33,11 +33,8 @@ pub(crate) const WSL_INSTRUCTIONS: &str = r#"Install WSL2 by opening PowerShell
|
||||
nvm install 22
|
||||
|
||||
# Install and run LLMX in WSL
|
||||
npm install --global @openai/llmx
|
||||
llmx
|
||||
|
||||
# Additional details and instructions for how to install and run LLMX in WSL:
|
||||
https://developers.openai.com/llmx/windows"#;
|
||||
npm install --global @valknarthing/llmx
|
||||
llmx"#;
|
||||
|
||||
pub(crate) struct WindowsSetupWidget {
|
||||
pub llmx_home: PathBuf,
|
||||
@@ -102,7 +99,6 @@ impl WidgetRef for &WindowsSetupWidget {
|
||||
"To use all LLMX features, we recommend running LLMX in Windows Subsystem for Linux (WSL2)".bold(),
|
||||
]),
|
||||
Line::from(vec![" ".into(), "WSL allows LLMX to run Agent mode in a sandboxed environment with better data protections in place.".into()]),
|
||||
Line::from(vec![" ".into(), "Learn more: https://developers.openai.com/llmx/windows".into()]),
|
||||
Line::from(""),
|
||||
];
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ expression: sanitized
|
||||
/status
|
||||
|
||||
╭───────────────────────────────────────────────────────────────────────────╮
|
||||
│ >_ LLMX (v0.1.0) │
|
||||
│ >_ LLMX (v0.1.7) │
|
||||
│ │
|
||||
│ Visit https://chatgpt.com/llmx/settings/usage for up-to-date │
|
||||
│ information on rate limits and credits │
|
||||
|
||||
@@ -5,7 +5,7 @@ expression: sanitized
|
||||
/status
|
||||
|
||||
╭─────────────────────────────────────────────────────────────────╮
|
||||
│ >_ LLMX (v0.1.0) │
|
||||
│ >_ LLMX (v0.1.7) │
|
||||
│ │
|
||||
│ Visit https://chatgpt.com/llmx/settings/usage for up-to-date │
|
||||
│ information on rate limits and credits │
|
||||
|
||||
@@ -5,7 +5,7 @@ expression: sanitized
|
||||
/status
|
||||
|
||||
╭──────────────────────────────────────────────────────────────╮
|
||||
│ >_ LLMX (v0.1.0) │
|
||||
│ >_ LLMX (v0.1.7) │
|
||||
│ │
|
||||
│ Visit https://chatgpt.com/llmx/settings/usage for up-to-date │
|
||||
│ information on rate limits and credits │
|
||||
|
||||
@@ -5,7 +5,7 @@ expression: sanitized
|
||||
/status
|
||||
|
||||
╭──────────────────────────────────────────────────────────────╮
|
||||
│ >_ LLMX (v0.1.0) │
|
||||
│ >_ LLMX (v0.1.7) │
|
||||
│ │
|
||||
│ Visit https://chatgpt.com/llmx/settings/usage for up-to-date │
|
||||
│ information on rate limits and credits │
|
||||
|
||||
@@ -5,7 +5,7 @@ expression: sanitized
|
||||
/status
|
||||
|
||||
╭───────────────────────────────────────────────────────────────────╮
|
||||
│ >_ LLMX (v0.1.0) │
|
||||
│ >_ LLMX (v0.1.7) │
|
||||
│ │
|
||||
│ Visit https://chatgpt.com/llmx/settings/usage for up-to-date │
|
||||
│ information on rate limits and credits │
|
||||
|
||||
@@ -5,7 +5,7 @@ expression: sanitized
|
||||
/status
|
||||
|
||||
╭────────────────────────────────────────────╮
|
||||
│ >_ LLMX (v0.1.0) │
|
||||
│ >_ LLMX (v0.1.7) │
|
||||
│ │
|
||||
│ Visit https://chatgpt.com/llmx/settings/ │
|
||||
│ usage for up-to-date │
|
||||
|
||||
10
llmx-rs/tui/tests/fixtures/binary-size-log.jsonl
vendored
10
llmx-rs/tui/tests/fixtures/binary-size-log.jsonl
vendored
File diff suppressed because one or more lines are too long
@@ -148,7 +148,9 @@ def main() -> int:
|
||||
print(f"should `git checkout {resolved_head_sha}`")
|
||||
|
||||
for package in packages:
|
||||
staging_dir = Path(tempfile.mkdtemp(prefix=f"npm-stage-{package}-", dir=runner_temp))
|
||||
# Sanitize package name for use in filesystem path (replace / with -)
|
||||
safe_package_name = package.replace("/", "-").replace("@", "")
|
||||
staging_dir = Path(tempfile.mkdtemp(prefix=f"npm-stage-{safe_package_name}-", dir=runner_temp))
|
||||
pack_output = output_dir / f"{package}-npm-{args.release_version}.tgz"
|
||||
|
||||
cmd = [
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
{
|
||||
"name": "@valknar/llmx-sdk",
|
||||
"name": "@valknarthing/llmx-sdk",
|
||||
"version": "0.1.0",
|
||||
"description": "TypeScript SDK for LLMX - Multi-provider coding agent",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git+https://github.com/valknar/llmx.git",
|
||||
"url": "git+https://github.com/valknarthing/llmx.git",
|
||||
"directory": "sdk/typescript"
|
||||
},
|
||||
"keywords": [
|
||||
|
||||
198
test_system_message.json
Normal file
198
test_system_message.json
Normal file
File diff suppressed because one or more lines are too long
Reference in New Issue
Block a user