diff --git a/.claude-plugin/marketplace.json b/.claude-plugin/marketplace.json
deleted file mode 100644
index d7f7c4a8..00000000
--- a/.claude-plugin/marketplace.json
+++ /dev/null
@@ -1,21 +0,0 @@
-{
- "name": "openai-codex",
- "owner": {
- "name": "OpenAI"
- },
- "metadata": {
- "description": "Codex plugins to use in Claude Code for delegation and code review.",
- "version": "1.0.3"
- },
- "plugins": [
- {
- "name": "codex",
- "description": "Use Codex from Claude Code to review code or delegate tasks.",
- "version": "1.0.3",
- "author": {
- "name": "OpenAI"
- },
- "source": "./plugins/codex"
- }
- ]
-}
diff --git a/.github/workflows/pull-request-ci.yml b/.github/workflows/pull-request-ci.yml
deleted file mode 100644
index ebcff0b6..00000000
--- a/.github/workflows/pull-request-ci.yml
+++ /dev/null
@@ -1,35 +0,0 @@
-name: Pull Request CI
-
-on:
- pull_request:
-
-permissions:
- contents: read
-
-jobs:
- ci:
- name: CI
- runs-on: ubuntu-latest
- timeout-minutes: 10
-
- steps:
- - name: Check out repository
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
-
- - name: Set up Node.js
- uses: actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6.3.0
- with:
- node-version: 22
- cache: npm
-
- - name: Install dependencies
- run: npm ci
-
- - name: Install Codex CLI
- run: npm install -g @openai/codex
-
- - name: Run test suite
- run: npm test
-
- - name: Run build
- run: npm run build
diff --git a/.gitignore b/.gitignore
deleted file mode 100644
index 3d573eec..00000000
--- a/.gitignore
+++ /dev/null
@@ -1,150 +0,0 @@
-# Logs
-logs
-*.log
-npm-debug.log*
-yarn-debug.log*
-yarn-error.log*
-lerna-debug.log*
-
-# Diagnostic reports (https://nodejs.org/api/report.html)
-report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
-
-# Runtime data
-pids
-*.pid
-*.seed
-*.pid.lock
-
-# Directory for instrumented libs generated by jscoverage/JSCover
-lib-cov
-
-# Coverage directory used by tools like istanbul
-coverage
-*.lcov
-
-# nyc test coverage
-.nyc_output
-
-# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
-.grunt
-
-# Bower dependency directory (https://bower.io/)
-bower_components
-
-# node-waf configuration
-.lock-wscript
-
-# Compiled binary addons (https://nodejs.org/api/addons.html)
-build/Release
-
-# Dependency directories
-node_modules/
-jspm_packages/
-
-# Snowpack dependency directory (https://snowpack.dev/)
-web_modules/
-
-# TypeScript cache
-*.tsbuildinfo
-
-# Optional npm cache directory
-.npm
-
-# Optional eslint cache
-.eslintcache
-
-# Optional stylelint cache
-.stylelintcache
-
-# Optional REPL history
-.node_repl_history
-
-# Output of 'npm pack'
-*.tgz
-*.zip
-.DS_Store
-**/.DS_Store
-
-# Yarn Integrity file
-.yarn-integrity
-
-# dotenv environment variable files
-.env
-.env.*
-!.env.example
-
-# parcel-bundler cache (https://parceljs.org/)
-.cache
-.parcel-cache
-
-# Next.js build output
-.next
-out
-
-# Nuxt.js build / generate output
-.nuxt
-dist
-.output
-
-# Gatsby files
-.cache/
-# Comment in the public line in if your project uses Gatsby and not Next.js
-# https://nextjs.org/blog/next-9-1#public-directory-support
-# public
-
-# vuepress build output
-.vuepress/dist
-
-# vuepress v2.x temp and cache directory
-.temp
-.cache
-
-# Sveltekit cache directory
-.svelte-kit/
-
-# vitepress build output
-**/.vitepress/dist
-
-# vitepress cache directory
-**/.vitepress/cache
-
-# Docusaurus cache and generated files
-.docusaurus
-
-# Serverless directories
-.serverless/
-
-# FuseBox cache
-.fusebox/
-
-# DynamoDB Local files
-.dynamodb/
-
-# Firebase cache directory
-.firebase/
-
-# TernJS port file
-.tern-port
-
-# Stores VSCode versions used for testing VSCode extensions
-.vscode-test
-
-# pnpm
-.pnpm-store
-
-# yarn v3
-.pnp.*
-.yarn/*
-!.yarn/patches
-!.yarn/plugins
-!.yarn/releases
-!.yarn/sdks
-!.yarn/versions
-
-# Vite files
-vite.config.js.timestamp-*
-vite.config.ts.timestamp-*
-.vite/
-
-output/
-plugins/codex/.generated/
diff --git a/LICENSE b/LICENSE
deleted file mode 100644
index d0be6cdc..00000000
--- a/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
-TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
-2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
-3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
-4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
-5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
-6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
-7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
-8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
-9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
-END OF TERMS AND CONDITIONS
-
-APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
-Copyright [yyyy] [name of copyright owner]
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
diff --git a/NOTICE b/NOTICE
deleted file mode 100644
index 295a8dc6..00000000
--- a/NOTICE
+++ /dev/null
@@ -1,13 +0,0 @@
-Copyright 2026 OpenAI
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
diff --git a/README.md b/README.md
deleted file mode 100644
index 458c39fb..00000000
--- a/README.md
+++ /dev/null
@@ -1,305 +0,0 @@
-# Codex plugin for Claude Code
-
-Use Codex from inside Claude Code for code reviews or to delegate tasks to Codex.
-
-This plugin is for Claude Code users who want an easy way to start using Codex from the workflow
-they already have.
-
-
-
-## What You Get
-
-- `/codex:review` for a normal read-only Codex review
-- `/codex:adversarial-review` for a steerable challenge review
-- `/codex:rescue`, `/codex:status`, `/codex:result`, and `/codex:cancel` to delegate work and manage background jobs
-
-## Requirements
-
-- **ChatGPT subscription (incl. Free) or OpenAI API key.**
- - Usage will contribute to your Codex usage limits. [Learn more](https://developers.openai.com/codex/pricing).
-- **Node.js 18.18 or later**
-
-## Install
-
-Add the marketplace in Claude Code:
-
-```bash
-/plugin marketplace add openai/codex-plugin-cc
-```
-
-Install the plugin:
-
-```bash
-/plugin install codex@openai-codex
-```
-
-Reload plugins:
-
-```bash
-/reload-plugins
-```
-
-Then run:
-
-```bash
-/codex:setup
-```
-
-`/codex:setup` will tell you whether Codex is ready. If Codex is missing and npm is available, it can offer to install Codex for you.
-
-If you prefer to install Codex yourself, use:
-
-```bash
-npm install -g @openai/codex
-```
-
-If Codex is installed but not logged in yet, run:
-
-```bash
-!codex login
-```
-
-After install, you should see:
-
-- the slash commands listed below
-- the `codex:codex-rescue` subagent in `/agents`
-
-One simple first run is:
-
-```bash
-/codex:review --background
-/codex:status
-/codex:result
-```
-
-## Usage
-
-### `/codex:review`
-
-Runs a normal Codex review on your current work. It gives you the same quality of code review as running `/review` inside Codex directly.
-
-> [!NOTE]
-> Code review especially for multi-file changes might take a while. It's generally recommended to run it in the background.
-
-Use it when you want:
-
-- a review of your current uncommitted changes
-- a review of your branch compared to a base branch like `main`
-
-Use `--base [` for branch review. It also supports `--wait` and `--background`. It is not steerable and does not take custom focus text. Use [`/codex:adversarial-review`](#codexadversarial-review) when you want to challenge a specific decision or risk area.
-
-Examples:
-
-```bash
-/codex:review
-/codex:review --base main
-/codex:review --background
-```
-
-This command is read-only and will not perform any changes. When run in the background you can use [`/codex:status`](#codexstatus) to check on the progress and [`/codex:cancel`](#codexcancel) to cancel the ongoing task.
-
-### `/codex:adversarial-review`
-
-Runs a **steerable** review that questions the chosen implementation and design.
-
-It can be used to pressure-test assumptions, tradeoffs, failure modes, and whether a different approach would have been safer or simpler.
-
-It uses the same review target selection as `/codex:review`, including `--base ][` for branch review.
-It also supports `--wait` and `--background`. Unlike `/codex:review`, it can take extra focus text after the flags.
-
-Use it when you want:
-
-- a review before shipping that challenges the direction, not just the code details
-- review focused on design choices, tradeoffs, hidden assumptions, and alternative approaches
-- pressure-testing around specific risk areas like auth, data loss, rollback, race conditions, or reliability
-
-Examples:
-
-```bash
-/codex:adversarial-review
-/codex:adversarial-review --base main challenge whether this was the right caching and retry design
-/codex:adversarial-review --background look for race conditions and question the chosen approach
-```
-
-This command is read-only. It does not fix code.
-
-### `/codex:rescue`
-
-Hands a task to Codex through the `codex:codex-rescue` subagent.
-
-Use it when you want Codex to:
-
-- investigate a bug
-- try a fix
-- continue a previous Codex task
-- take a faster or cheaper pass with a smaller model
-
-> [!NOTE]
-> Depending on the task and the model you choose these tasks might take a long time and it's generally recommended to force the task to be in the background or move the agent to the background.
-
-It supports `--background`, `--wait`, `--resume`, and `--fresh`. If you omit `--resume` and `--fresh`, the plugin can offer to continue the latest rescue thread for this repo.
-
-Examples:
-
-```bash
-/codex:rescue investigate why the tests started failing
-/codex:rescue fix the failing test with the smallest safe patch
-/codex:rescue --resume apply the top fix from the last run
-/codex:rescue --model gpt-5.4-mini --effort medium investigate the flaky integration test
-/codex:rescue --model spark fix the issue quickly
-/codex:rescue --background investigate the regression
-```
-
-You can also just ask for a task to be delegated to Codex:
-
-```text
-Ask Codex to redesign the database connection to be more resilient.
-```
-
-**Notes:**
-
-- if you do not pass `--model` or `--effort`, Codex chooses its own defaults.
-- if you say `spark`, the plugin maps that to `gpt-5.3-codex-spark`
-- follow-up rescue requests can continue the latest Codex task in the repo
-
-### `/codex:status`
-
-Shows running and recent Codex jobs for the current repository.
-
-Examples:
-
-```bash
-/codex:status
-/codex:status task-abc123
-```
-
-Use it to:
-
-- check progress on background work
-- see the latest completed job
-- confirm whether a task is still running
-
-### `/codex:result`
-
-Shows the final stored Codex output for a finished job.
-When available, it also includes the Codex session ID so you can reopen that run directly in Codex with `codex resume `.
-
-Examples:
-
-```bash
-/codex:result
-/codex:result task-abc123
-```
-
-### `/codex:cancel`
-
-Cancels an active background Codex job.
-
-Examples:
-
-```bash
-/codex:cancel
-/codex:cancel task-abc123
-```
-
-### `/codex:setup`
-
-Checks whether Codex is installed and authenticated.
-If Codex is missing and npm is available, it can offer to install Codex for you.
-
-You can also use `/codex:setup` to manage the optional review gate.
-
-#### Enabling review gate
-
-```bash
-/codex:setup --enable-review-gate
-/codex:setup --disable-review-gate
-```
-
-When the review gate is enabled, the plugin uses a `Stop` hook to run a targeted Codex review based on Claude's response. If that review finds issues, the stop is blocked so Claude can address them first.
-
-> [!WARNING]
-> The review gate can create a long-running Claude/Codex loop and may drain usage limits quickly. Only enable it when you plan to actively monitor the session.
-
-## Typical Flows
-
-### Review Before Shipping
-
-```bash
-/codex:review
-```
-
-### Hand A Problem To Codex
-
-```bash
-/codex:rescue investigate why the build is failing in CI
-```
-
-### Start Something Long-Running
-
-```bash
-/codex:adversarial-review --background
-/codex:rescue --background investigate the flaky test
-```
-
-Then check in with:
-
-```bash
-/codex:status
-/codex:result
-```
-
-## Codex Integration
-
-The Codex plugin wraps the [Codex app server](https://developers.openai.com/codex/app-server). It uses the global `codex` binary installed in your environment and [applies the same configuration](https://developers.openai.com/codex/config-basic).
-
-### Common Configurations
-
-If you want to change the default reasoning effort or the default model that gets used by the plugin, you can define that inside your user-level or project-level `config.toml`. For example to always use `gpt-5.4-mini` on `high` for a specific project you can add the following to a `.codex/config.toml` file at the root of the directory you started Claude in:
-
-```toml
-model = "gpt-5.4-mini"
-model_reasoning_effort = "high"
-```
-
-Your configuration will be picked up based on:
-
-- user-level config in `~/.codex/config.toml`
-- project-level overrides in `.codex/config.toml`
-- project-level overrides only load when the [project is trusted](https://developers.openai.com/codex/config-advanced#project-config-files-codexconfigtoml)
-
-Check out the Codex docs for more [configuration options](https://developers.openai.com/codex/config-reference).
-
-### Moving The Work Over To Codex
-
-Delegated tasks and any [stop gate](#what-does-the-review-gate-do) run can also be directly resumed inside Codex by running `codex resume` either with the specific session ID you received from running `/codex:result` or `/codex:status` or by selecting it from the list.
-
-This way you can review the Codex work or continue the work there.
-
-## FAQ
-
-### Do I need a separate Codex account for this plugin?
-
-If you are already signed into Codex on this machine, that account should work immediately here too. This plugin uses your local Codex CLI authentication.
-
-If you only use Claude Code today and have not used Codex yet, you will also need to sign in to Codex with either a ChatGPT account or an API key. [Codex is available with your ChatGPT subscription](https://developers.openai.com/codex/pricing/), and [`codex login`](https://developers.openai.com/codex/cli/reference/#codex-login) supports both ChatGPT and API key sign-in. Run `/codex:setup` to check whether Codex is ready, and use `!codex login` if it is not.
-
-### Does the plugin use a separate Codex runtime?
-
-No. This plugin delegates through your local [Codex CLI](https://developers.openai.com/codex/cli/) and [Codex app server](https://developers.openai.com/codex/app-server/) on the same machine.
-
-That means:
-
-- it uses the same Codex install you would use directly
-- it uses the same local authentication state
-- it uses the same repository checkout and machine-local environment
-
-### Will it use the same Codex config I already have?
-
-Yes. If you already use Codex, the plugin picks up the same [configuration](#common-configurations).
-
-### Can I keep using my current API key or base URL setup?
-
-Yes. Because the plugin uses your local Codex CLI, your existing sign-in method and config still apply.
-
-If you need to point the built-in OpenAI provider at a different endpoint, set `openai_base_url` in your [Codex config](https://developers.openai.com/codex/config-advanced/#config-and-state-locations).
diff --git a/package-lock.json b/package-lock.json
deleted file mode 100644
index 46006918..00000000
--- a/package-lock.json
+++ /dev/null
@@ -1,51 +0,0 @@
-{
- "name": "@openai/codex-plugin-cc",
- "version": "1.0.3",
- "lockfileVersion": 3,
- "requires": true,
- "packages": {
- "": {
- "name": "@openai/codex-plugin-cc",
- "version": "1.0.3",
- "license": "Apache-2.0",
- "devDependencies": {
- "@types/node": "^25.5.0",
- "typescript": "^6.0.2"
- },
- "engines": {
- "node": ">=18.18.0"
- }
- },
- "node_modules/@types/node": {
- "version": "25.5.0",
- "resolved": "https://registry.npmjs.org/@types/node/-/node-25.5.0.tgz",
- "integrity": "sha512-jp2P3tQMSxWugkCUKLRPVUpGaL5MVFwF8RDuSRztfwgN1wmqJeMSbKlnEtQqU8UrhTmzEmZdu2I6v2dpp7XIxw==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "undici-types": "~7.18.0"
- }
- },
- "node_modules/typescript": {
- "version": "6.0.2",
- "resolved": "https://registry.npmjs.org/typescript/-/typescript-6.0.2.tgz",
- "integrity": "sha512-bGdAIrZ0wiGDo5l8c++HWtbaNCWTS4UTv7RaTH/ThVIgjkveJt83m74bBHMJkuCbslY8ixgLBVZJIOiQlQTjfQ==",
- "dev": true,
- "license": "Apache-2.0",
- "bin": {
- "tsc": "bin/tsc",
- "tsserver": "bin/tsserver"
- },
- "engines": {
- "node": ">=14.17"
- }
- },
- "node_modules/undici-types": {
- "version": "7.18.2",
- "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-7.18.2.tgz",
- "integrity": "sha512-AsuCzffGHJybSaRrmr5eHr81mwJU3kjw6M+uprWvCXiNeN9SOGwQ3Jn8jb8m3Z6izVgknn1R0FTCEAP2QrLY/w==",
- "dev": true,
- "license": "MIT"
- }
- }
-}
diff --git a/package.json b/package.json
deleted file mode 100644
index 2f8efc96..00000000
--- a/package.json
+++ /dev/null
@@ -1,22 +0,0 @@
-{
- "name": "@openai/codex-plugin-cc",
- "version": "1.0.3",
- "private": true,
- "type": "module",
- "description": "Use Codex from Claude Code to review code or delegate tasks.",
- "license": "Apache-2.0",
- "engines": {
- "node": ">=18.18.0"
- },
- "scripts": {
- "bump-version": "node scripts/bump-version.mjs",
- "check-version": "node scripts/bump-version.mjs --check",
- "prebuild": "mkdir -p plugins/codex/.generated/app-server-types && codex app-server generate-ts --out plugins/codex/.generated/app-server-types",
- "build": "tsc -p tsconfig.app-server.json",
- "test": "node --test tests/*.test.mjs"
- },
- "devDependencies": {
- "@types/node": "^25.5.0",
- "typescript": "^6.0.2"
- }
-}
diff --git a/plugins/codex/.claude-plugin/plugin.json b/plugins/codex/.claude-plugin/plugin.json
deleted file mode 100644
index db3b22d3..00000000
--- a/plugins/codex/.claude-plugin/plugin.json
+++ /dev/null
@@ -1,8 +0,0 @@
-{
- "name": "codex",
- "version": "1.0.3",
- "description": "Use Codex from Claude Code to review code or delegate tasks.",
- "author": {
- "name": "OpenAI"
- }
-}
diff --git a/plugins/codex/CHANGELOG.md b/plugins/codex/CHANGELOG.md
deleted file mode 100644
index d647561b..00000000
--- a/plugins/codex/CHANGELOG.md
+++ /dev/null
@@ -1,5 +0,0 @@
-# Changelog
-
-## 1.0.0
-
-- Initial version of the Codex plugin for Claude Code
diff --git a/plugins/codex/LICENSE b/plugins/codex/LICENSE
deleted file mode 100644
index d0be6cdc..00000000
--- a/plugins/codex/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
-TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
-2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
-3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
-4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
-5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
-6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
-7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
-8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
-9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
-END OF TERMS AND CONDITIONS
-
-APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
-Copyright [yyyy] [name of copyright owner]
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
diff --git a/plugins/codex/NOTICE b/plugins/codex/NOTICE
deleted file mode 100644
index 295a8dc6..00000000
--- a/plugins/codex/NOTICE
+++ /dev/null
@@ -1,13 +0,0 @@
-Copyright 2026 OpenAI
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
diff --git a/plugins/codex/agents/codex-rescue.md b/plugins/codex/agents/codex-rescue.md
deleted file mode 100644
index 7009ec86..00000000
--- a/plugins/codex/agents/codex-rescue.md
+++ /dev/null
@@ -1,46 +0,0 @@
----
-name: codex-rescue
-description: Proactively use when Claude Code is stuck, wants a second implementation or diagnosis pass, needs a deeper root-cause investigation, or should hand a substantial coding task to Codex through the shared runtime
-model: sonnet
-tools: Bash
-skills:
- - codex-cli-runtime
- - gpt-5-4-prompting
----
-
-You are a thin forwarding wrapper around the Codex companion task runtime.
-
-Your only job is to forward the user's rescue request to the Codex companion script. Do not do anything else.
-
-Selection guidance:
-
-- Do not wait for the user to explicitly ask for Codex. Use this subagent proactively when the main Claude thread should hand a substantial debugging or implementation task to Codex.
-- Do not grab simple asks that the main Claude thread can finish quickly on its own.
-
-Forwarding rules:
-
-- Use exactly one `Bash` call to invoke `node "${CLAUDE_PLUGIN_ROOT}/scripts/codex-companion.mjs" task ...`.
-- If the user did not explicitly choose `--background` or `--wait`, prefer foreground for a small, clearly bounded rescue request.
-- If the user did not explicitly choose `--background` or `--wait` and the task looks complicated, open-ended, multi-step, or likely to keep Codex running for a long time, prefer background execution.
-- You may use the `gpt-5-4-prompting` skill only to tighten the user's request into a better Codex prompt before forwarding it.
-- Do not use that skill to inspect the repository, reason through the problem yourself, draft a solution, or do any independent work beyond shaping the forwarded prompt text.
-- Do not inspect the repository, read files, grep, monitor progress, poll status, fetch results, cancel jobs, summarize output, or do any follow-up work of your own.
-- Do not call `review`, `adversarial-review`, `status`, `result`, or `cancel`. This subagent only forwards to `task`.
-- Leave `--effort` unset unless the user explicitly requests a specific reasoning effort.
-- Leave model unset by default. Only add `--model` when the user explicitly asks for a specific model.
-- If the user asks for `spark`, map that to `--model gpt-5.3-codex-spark`.
-- If the user asks for a concrete model name such as `gpt-5.4-mini`, pass it through with `--model`.
-- Treat `--effort ` and `--model ` as runtime controls and do not include them in the task text you pass through.
-- Default to a write-capable Codex run by adding `--write` unless the user explicitly asks for read-only behavior or only wants review, diagnosis, or research without edits.
-- Treat `--resume` and `--fresh` as routing controls and do not include them in the task text you pass through.
-- `--resume` means add `--resume-last`.
-- `--fresh` means do not add `--resume-last`.
-- If the user is clearly asking to continue prior Codex work in this repository, such as "continue", "keep going", "resume", "apply the top fix", or "dig deeper", add `--resume-last` unless `--fresh` is present.
-- Otherwise forward the task as a fresh `task` run.
-- Preserve the user's task text as-is apart from stripping routing flags.
-- Return the stdout of the `codex-companion` command exactly as-is.
-- If the Bash call fails or Codex cannot be invoked, return nothing.
-
-Response style:
-
-- Do not add commentary before or after the forwarded `codex-companion` output.
diff --git a/plugins/codex/commands/adversarial-review.md b/plugins/codex/commands/adversarial-review.md
deleted file mode 100644
index da440ab4..00000000
--- a/plugins/codex/commands/adversarial-review.md
+++ /dev/null
@@ -1,66 +0,0 @@
----
-description: Run a Codex review that challenges the implementation approach and design choices
-argument-hint: '[--wait|--background] [--base ][] [--scope auto|working-tree|branch] [focus ...]'
-disable-model-invocation: true
-allowed-tools: Read, Glob, Grep, Bash(node:*), Bash(git:*), AskUserQuestion
----
-
-Run an adversarial Codex review through the shared plugin runtime.
-Position it as a challenge review that questions the chosen implementation, design choices, tradeoffs, and assumptions.
-It is not just a stricter pass over implementation defects.
-
-Raw slash-command arguments:
-`$ARGUMENTS`
-
-Core constraint:
-- This command is review-only.
-- Do not fix issues, apply patches, or suggest that you are about to make changes.
-- Your only job is to run the review and return Codex's output verbatim to the user.
-- Keep the framing focused on whether the current approach is the right one, what assumptions it depends on, and where the design could fail under real-world conditions.
-
-Execution mode rules:
-- If the raw arguments include `--wait`, do not ask. Run in the foreground.
-- If the raw arguments include `--background`, do not ask. Run in a Claude background task.
-- Otherwise, estimate the review size before asking:
- - For working-tree review, start with `git status --short --untracked-files=all`.
- - For working-tree review, also inspect both `git diff --shortstat --cached` and `git diff --shortstat`.
- - For base-branch review, use `git diff --shortstat ...HEAD`.
- - Treat untracked files or directories as reviewable work for auto or working-tree review even when `git diff --shortstat` is empty.
- - Only conclude there is nothing to review when the relevant scope is actually empty.
- - Recommend waiting only when the scoped review is clearly tiny, roughly 1-2 files total and no sign of a broader directory-sized change.
- - In every other case, including unclear size, recommend background.
- - When in doubt, run the review instead of declaring that there is nothing to review.
-- Then use `AskUserQuestion` exactly once with two options, putting the recommended option first and suffixing its label with `(Recommended)`:
- - `Wait for results`
- - `Run in background`
-
-Argument handling:
-- Preserve the user's arguments exactly.
-- Do not strip `--wait` or `--background` yourself.
-- Do not weaken the adversarial framing or rewrite the user's focus text.
-- The companion script parses `--wait` and `--background`, but Claude Code's `Bash(..., run_in_background: true)` is what actually detaches the run.
-- `/codex:adversarial-review` uses the same review target selection as `/codex:review`.
-- It supports working-tree review, branch review, and `--base ][`.
-- It does not support `--scope staged` or `--scope unstaged`.
-- Unlike `/codex:review`, it can still take extra focus text after the flags.
-
-Foreground flow:
-- Run:
-```bash
-node "${CLAUDE_PLUGIN_ROOT}/scripts/codex-companion.mjs" adversarial-review "$ARGUMENTS"
-```
-- Return the command stdout verbatim, exactly as-is.
-- Do not paraphrase, summarize, or add commentary before or after it.
-- Do not fix any issues mentioned in the review output.
-
-Background flow:
-- Launch the review with `Bash` in the background:
-```typescript
-Bash({
- command: `node "${CLAUDE_PLUGIN_ROOT}/scripts/codex-companion.mjs" adversarial-review "$ARGUMENTS"`,
- description: "Codex adversarial review",
- run_in_background: true
-})
-```
-- Do not call `BashOutput` or wait for completion in this turn.
-- After launching the command, tell the user: "Codex adversarial review started in the background. Check `/codex:status` for progress."
diff --git a/plugins/codex/commands/cancel.md b/plugins/codex/commands/cancel.md
deleted file mode 100644
index a1472b83..00000000
--- a/plugins/codex/commands/cancel.md
+++ /dev/null
@@ -1,8 +0,0 @@
----
-description: Cancel an active background Codex job in this repository
-argument-hint: '[job-id]'
-disable-model-invocation: true
-allowed-tools: Bash(node:*)
----
-
-!`node "${CLAUDE_PLUGIN_ROOT}/scripts/codex-companion.mjs" cancel "$ARGUMENTS"`
diff --git a/plugins/codex/commands/rescue.md b/plugins/codex/commands/rescue.md
deleted file mode 100644
index c92a2896..00000000
--- a/plugins/codex/commands/rescue.md
+++ /dev/null
@@ -1,49 +0,0 @@
----
-description: Delegate investigation, an explicit fix request, or follow-up rescue work to the Codex rescue subagent
-argument-hint: "[--background|--wait] [--resume|--fresh] [--model ] [--effort ] [what Codex should investigate, solve, or continue]"
-context: fork
-allowed-tools: Bash(node:*), AskUserQuestion
----
-
-Route this request to the `codex:codex-rescue` subagent.
-The final user-visible response must be Codex's output verbatim.
-
-Raw user request:
-$ARGUMENTS
-
-Execution mode:
-
-- If the request includes `--background`, run the `codex:codex-rescue` subagent in the background.
-- If the request includes `--wait`, run the `codex:codex-rescue` subagent in the foreground.
-- If neither flag is present, default to foreground.
-- `--background` and `--wait` are execution flags for Claude Code. Do not forward them to `task`, and do not treat them as part of the natural-language task text.
-- `--model` and `--effort` are runtime-selection flags. Preserve them for the forwarded `task` call, but do not treat them as part of the natural-language task text.
-- If the request includes `--resume`, do not ask whether to continue. The user already chose.
-- If the request includes `--fresh`, do not ask whether to continue. The user already chose.
-- Otherwise, before starting Codex, check for a resumable rescue thread from this Claude session by running:
-
-```bash
-node "${CLAUDE_PLUGIN_ROOT}/scripts/codex-companion.mjs" task-resume-candidate --json
-```
-
-- If that helper reports `available: true`, use `AskUserQuestion` exactly once to ask whether to continue the current Codex thread or start a new one.
-- The two choices must be:
- - `Continue current Codex thread`
- - `Start a new Codex thread`
-- If the user is clearly giving a follow-up instruction such as "continue", "keep going", "resume", "apply the top fix", or "dig deeper", put `Continue current Codex thread (Recommended)` first.
-- Otherwise put `Start a new Codex thread (Recommended)` first.
-- If the user chooses continue, add `--resume` before routing to the subagent.
-- If the user chooses a new thread, add `--fresh` before routing to the subagent.
-- If the helper reports `available: false`, do not ask. Route normally.
-
-Operating rules:
-
-- The subagent is a thin forwarder only. It should use one `Bash` call to invoke `node "${CLAUDE_PLUGIN_ROOT}/scripts/codex-companion.mjs" task ...` and return that command's stdout as-is.
-- Return the Codex companion stdout verbatim to the user.
-- Do not paraphrase, summarize, rewrite, or add commentary before or after it.
-- Do not ask the subagent to inspect files, monitor progress, poll `/codex:status`, fetch `/codex:result`, call `/codex:cancel`, summarize output, or do follow-up work of its own.
-- Leave `--effort` unset unless the user explicitly asks for a specific reasoning effort.
-- Leave the model unset unless the user explicitly asks for one. If they ask for `spark`, map it to `gpt-5.3-codex-spark`.
-- Leave `--resume` and `--fresh` in the forwarded request. The subagent handles that routing when it builds the `task` command.
-- If the helper reports that Codex is missing or unauthenticated, stop and tell the user to run `/codex:setup`.
-- If the user did not supply a request, ask what Codex should investigate or fix.
diff --git a/plugins/codex/commands/result.md b/plugins/codex/commands/result.md
deleted file mode 100644
index 3abc2d93..00000000
--- a/plugins/codex/commands/result.md
+++ /dev/null
@@ -1,15 +0,0 @@
----
-description: Show the stored final output for a finished Codex job in this repository
-argument-hint: '[job-id]'
-disable-model-invocation: true
-allowed-tools: Bash(node:*)
----
-
-!`node "${CLAUDE_PLUGIN_ROOT}/scripts/codex-companion.mjs" result "$ARGUMENTS"`
-
-Present the full command output to the user. Do not summarize or condense it. Preserve all details including:
-- Job ID and status
-- The complete result payload, including verdict, summary, findings, details, artifacts, and next steps
-- File paths and line numbers exactly as reported
-- Any error messages or parse errors
-- Follow-up commands such as `/codex:status ` and `/codex:review`
diff --git a/plugins/codex/commands/review.md b/plugins/codex/commands/review.md
deleted file mode 100644
index fb70a487..00000000
--- a/plugins/codex/commands/review.md
+++ /dev/null
@@ -1,61 +0,0 @@
----
-description: Run a Codex code review against local git state
-argument-hint: '[--wait|--background] [--base ][] [--scope auto|working-tree|branch]'
-disable-model-invocation: true
-allowed-tools: Read, Glob, Grep, Bash(node:*), Bash(git:*), AskUserQuestion
----
-
-Run a Codex review through the shared built-in reviewer.
-
-Raw slash-command arguments:
-`$ARGUMENTS`
-
-Core constraint:
-- This command is review-only.
-- Do not fix issues, apply patches, or suggest that you are about to make changes.
-- Your only job is to run the review and return Codex's output verbatim to the user.
-
-Execution mode rules:
-- If the raw arguments include `--wait`, do not ask. Run the review in the foreground.
-- If the raw arguments include `--background`, do not ask. Run the review in a Claude background task.
-- Otherwise, estimate the review size before asking:
- - For working-tree review, start with `git status --short --untracked-files=all`.
- - For working-tree review, also inspect both `git diff --shortstat --cached` and `git diff --shortstat`.
- - For base-branch review, use `git diff --shortstat ...HEAD`.
- - Treat untracked files or directories as reviewable work even when `git diff --shortstat` is empty.
- - Only conclude there is nothing to review when the relevant working-tree status is empty or the explicit branch diff is empty.
- - Recommend waiting only when the review is clearly tiny, roughly 1-2 files total and no sign of a broader directory-sized change.
- - In every other case, including unclear size, recommend background.
- - When in doubt, run the review instead of declaring that there is nothing to review.
-- Then use `AskUserQuestion` exactly once with two options, putting the recommended option first and suffixing its label with `(Recommended)`:
- - `Wait for results`
- - `Run in background`
-
-Argument handling:
-- Preserve the user's arguments exactly.
-- Do not strip `--wait` or `--background` yourself.
-- Do not add extra review instructions or rewrite the user's intent.
-- The companion script parses `--wait` and `--background`, but Claude Code's `Bash(..., run_in_background: true)` is what actually detaches the run.
-- `/codex:review` is native-review only. It does not support staged-only review, unstaged-only review, or extra focus text.
-- If the user needs custom review instructions or more adversarial framing, they should use `/codex:adversarial-review`.
-
-Foreground flow:
-- Run:
-```bash
-node "${CLAUDE_PLUGIN_ROOT}/scripts/codex-companion.mjs" review "$ARGUMENTS"
-```
-- Return the command stdout verbatim, exactly as-is.
-- Do not paraphrase, summarize, or add commentary before or after it.
-- Do not fix any issues mentioned in the review output.
-
-Background flow:
-- Launch the review with `Bash` in the background:
-```typescript
-Bash({
- command: `node "${CLAUDE_PLUGIN_ROOT}/scripts/codex-companion.mjs" review "$ARGUMENTS"`,
- description: "Codex review",
- run_in_background: true
-})
-```
-- Do not call `BashOutput` or wait for completion in this turn.
-- After launching the command, tell the user: "Codex review started in the background. Check `/codex:status` for progress."
diff --git a/plugins/codex/commands/setup.md b/plugins/codex/commands/setup.md
deleted file mode 100644
index fb33a150..00000000
--- a/plugins/codex/commands/setup.md
+++ /dev/null
@@ -1,37 +0,0 @@
----
-description: Check whether the local Codex CLI is ready and optionally toggle the stop-time review gate
-argument-hint: '[--enable-review-gate|--disable-review-gate]'
-allowed-tools: Bash(node:*), Bash(npm:*), AskUserQuestion
----
-
-Run:
-
-```bash
-node "${CLAUDE_PLUGIN_ROOT}/scripts/codex-companion.mjs" setup --json $ARGUMENTS
-```
-
-If the result says Codex is unavailable and npm is available:
-- Use `AskUserQuestion` exactly once to ask whether Claude should install Codex now.
-- Put the install option first and suffix it with `(Recommended)`.
-- Use these two options:
- - `Install Codex (Recommended)`
- - `Skip for now`
-- If the user chooses install, run:
-
-```bash
-npm install -g @openai/codex
-```
-
-- Then rerun:
-
-```bash
-node "${CLAUDE_PLUGIN_ROOT}/scripts/codex-companion.mjs" setup --json $ARGUMENTS
-```
-
-If Codex is already installed or npm is unavailable:
-- Do not ask about installation.
-
-Output rules:
-- Present the final setup output to the user.
-- If installation was skipped, present the original setup output.
-- If Codex is installed but not authenticated, preserve the guidance to run `!codex login`.
diff --git a/plugins/codex/commands/status.md b/plugins/codex/commands/status.md
deleted file mode 100644
index 8f70663d..00000000
--- a/plugins/codex/commands/status.md
+++ /dev/null
@@ -1,17 +0,0 @@
----
-description: Show active and recent Codex jobs for this repository, including review-gate status
-argument-hint: '[job-id] [--wait] [--timeout-ms ] [--all]'
-disable-model-invocation: true
-allowed-tools: Bash(node:*)
----
-
-!`node "${CLAUDE_PLUGIN_ROOT}/scripts/codex-companion.mjs" status "$ARGUMENTS"`
-
-If the user did not pass a job ID:
-- Render the command output as a single Markdown table for the current and past runs in this session.
-- Keep it compact. Do not include progress blocks or extra prose outside the table.
-- Preserve the actionable fields from the command output, including job ID, kind, status, phase, elapsed or duration, summary, and follow-up commands.
-
-If the user did pass a job ID:
-- Present the full command output to the user.
-- Do not summarize or condense it.
diff --git a/plugins/codex/hooks/hooks.json b/plugins/codex/hooks/hooks.json
deleted file mode 100644
index 19e33b81..00000000
--- a/plugins/codex/hooks/hooks.json
+++ /dev/null
@@ -1,38 +0,0 @@
-{
- "description": "Optional stop-time review gate for Codex Companion.",
- "hooks": {
- "SessionStart": [
- {
- "hooks": [
- {
- "type": "command",
- "command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/session-lifecycle-hook.mjs\" SessionStart",
- "timeout": 5
- }
- ]
- }
- ],
- "SessionEnd": [
- {
- "hooks": [
- {
- "type": "command",
- "command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/session-lifecycle-hook.mjs\" SessionEnd",
- "timeout": 5
- }
- ]
- }
- ],
- "Stop": [
- {
- "hooks": [
- {
- "type": "command",
- "command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/stop-review-gate-hook.mjs\"",
- "timeout": 900
- }
- ]
- }
- ]
- }
-}
diff --git a/plugins/codex/prompts/adversarial-review.md b/plugins/codex/prompts/adversarial-review.md
deleted file mode 100644
index 78668af6..00000000
--- a/plugins/codex/prompts/adversarial-review.md
+++ /dev/null
@@ -1,84 +0,0 @@
-
-You are Codex performing an adversarial software review.
-Your job is to break confidence in the change, not to validate it.
-
-
-
-Review the provided repository context as if you are trying to find the strongest reasons this change should not ship yet.
-Target: {{TARGET_LABEL}}
-User focus: {{USER_FOCUS}}
-
-
-
-Default to skepticism.
-Assume the change can fail in subtle, high-cost, or user-visible ways until the evidence says otherwise.
-Do not give credit for good intent, partial fixes, or likely follow-up work.
-If something only works on the happy path, treat that as a real weakness.
-
-
-
-Prioritize the kinds of failures that are expensive, dangerous, or hard to detect:
-- auth, permissions, tenant isolation, and trust boundaries
-- data loss, corruption, duplication, and irreversible state changes
-- rollback safety, retries, partial failure, and idempotency gaps
-- race conditions, ordering assumptions, stale state, and re-entrancy
-- empty-state, null, timeout, and degraded dependency behavior
-- version skew, schema drift, migration hazards, and compatibility regressions
-- observability gaps that would hide failure or make recovery harder
-
-
-
-Actively try to disprove the change.
-Look for violated invariants, missing guards, unhandled failure paths, and assumptions that stop being true under stress.
-Trace how bad inputs, retries, concurrent actions, or partially completed operations move through the code.
-If the user supplied a focus area, weight it heavily, but still report any other material issue you can defend.
-{{REVIEW_COLLECTION_GUIDANCE}}
-
-
-
-Report only material findings.
-Do not include style feedback, naming feedback, low-value cleanup, or speculative concerns without evidence.
-A finding should answer:
-1. What can go wrong?
-2. Why is this code path vulnerable?
-3. What is the likely impact?
-4. What concrete change would reduce the risk?
-
-
-
-Return only valid JSON matching the provided schema.
-Keep the output compact and specific.
-Use `needs-attention` if there is any material risk worth blocking on.
-Use `approve` only if you cannot support any substantive adversarial finding from the provided context.
-Every finding must include:
-- the affected file
-- `line_start` and `line_end`
-- a confidence score from 0 to 1
-- a concrete recommendation
-Write the summary like a terse ship/no-ship assessment, not a neutral recap.
-
-
-
-Be aggressive, but stay grounded.
-Every finding must be defensible from the provided repository context or tool outputs.
-Do not invent files, lines, code paths, incidents, attack chains, or runtime behavior you cannot support.
-If a conclusion depends on an inference, state that explicitly in the finding body and keep the confidence honest.
-
-
-
-Prefer one strong finding over several weak ones.
-Do not dilute serious issues with filler.
-If the change looks safe, say so directly and return no findings.
-
-
-
-Before finalizing, check that each finding is:
-- adversarial rather than stylistic
-- tied to a concrete code location
-- plausible under a real failure scenario
-- actionable for an engineer fixing the issue
-
-
-
-{{REVIEW_INPUT}}
-
diff --git a/plugins/codex/prompts/stop-review-gate.md b/plugins/codex/prompts/stop-review-gate.md
deleted file mode 100644
index 8ed4d129..00000000
--- a/plugins/codex/prompts/stop-review-gate.md
+++ /dev/null
@@ -1,36 +0,0 @@
-
-Run a stop-gate review of the previous Claude turn.
-Only review the work from the previous Claude turn.
-Only review it if Claude actually did code changes in that turn.
-Pure status, setup, or reporting output does not count as reviewable work.
-For example, the output of /codex:setup or /codex:status does not count.
-Only direct edits made in that specific turn count.
-If the previous Claude turn was only a status update, a summary, a setup/login check, a review result, or output from a command that did not itself make direct edits in that turn, return ALLOW immediately and do no further work.
-Challenge whether that specific work and its design choices should ship.
-
-{{CLAUDE_RESPONSE_BLOCK}}
-
-
-
-Return a compact final answer.
-Your first line must be exactly one of:
-- ALLOW:
-- BLOCK:
-Do not put anything before that first line.
-
-
-
-Use ALLOW if the previous turn did not make code changes or if you do not see a blocking issue.
-Use ALLOW immediately, without extra investigation, if the previous turn was not an edit-producing turn.
-Use BLOCK only if the previous turn made code changes and you found something that still needs to be fixed before stopping.
-
-
-
-Ground every blocking claim in the repository context or tool outputs you inspected during this run.
-Do not treat the previous Claude response as proof that code changes happened; verify that from the repository state before you block.
-Do not block based on older edits from earlier turns when the immediately previous turn did not itself make direct edits.
-
-
-
-If the previous turn did make code changes, check for second-order failures, empty-state behavior, retries, stale state, rollback risk, and design tradeoffs before you finalize.
-
diff --git a/plugins/codex/schemas/review-output.schema.json b/plugins/codex/schemas/review-output.schema.json
deleted file mode 100644
index 875eac46..00000000
--- a/plugins/codex/schemas/review-output.schema.json
+++ /dev/null
@@ -1,87 +0,0 @@
-{
- "$schema": "https://json-schema.org/draft/2020-12/schema",
- "type": "object",
- "additionalProperties": false,
- "required": [
- "verdict",
- "summary",
- "findings",
- "next_steps"
- ],
- "properties": {
- "verdict": {
- "type": "string",
- "enum": [
- "approve",
- "needs-attention"
- ]
- },
- "summary": {
- "type": "string",
- "minLength": 1
- },
- "findings": {
- "type": "array",
- "items": {
- "type": "object",
- "additionalProperties": false,
- "required": [
- "severity",
- "title",
- "body",
- "file",
- "line_start",
- "line_end",
- "confidence",
- "recommendation"
- ],
- "properties": {
- "severity": {
- "type": "string",
- "enum": [
- "critical",
- "high",
- "medium",
- "low"
- ]
- },
- "title": {
- "type": "string",
- "minLength": 1
- },
- "body": {
- "type": "string",
- "minLength": 1
- },
- "file": {
- "type": "string",
- "minLength": 1
- },
- "line_start": {
- "type": "integer",
- "minimum": 1
- },
- "line_end": {
- "type": "integer",
- "minimum": 1
- },
- "confidence": {
- "type": "number",
- "minimum": 0,
- "maximum": 1
- },
- "recommendation": {
- "type": "string"
- }
- }
- }
- },
- "next_steps": {
- "type": "array",
- "items": {
- "type": "string",
- "minLength": 1
- }
- }
- }
-}
diff --git a/plugins/codex/scripts/app-server-broker.mjs b/plugins/codex/scripts/app-server-broker.mjs
deleted file mode 100644
index 1954274f..00000000
--- a/plugins/codex/scripts/app-server-broker.mjs
+++ /dev/null
@@ -1,252 +0,0 @@
-#!/usr/bin/env node
-
-import fs from "node:fs";
-import net from "node:net";
-import path from "node:path";
-import process from "node:process";
-
-import { parseArgs } from "./lib/args.mjs";
-import { BROKER_BUSY_RPC_CODE, CodexAppServerClient } from "./lib/app-server.mjs";
-import { parseBrokerEndpoint } from "./lib/broker-endpoint.mjs";
-
-const STREAMING_METHODS = new Set(["turn/start", "review/start", "thread/compact/start"]);
-
-function buildStreamThreadIds(method, params, result) {
- const threadIds = new Set();
- if (params?.threadId) {
- threadIds.add(params.threadId);
- }
- if (method === "review/start" && result?.reviewThreadId) {
- threadIds.add(result.reviewThreadId);
- }
- return threadIds;
-}
-
-function buildJsonRpcError(code, message, data) {
- return data === undefined ? { code, message } : { code, message, data };
-}
-
-function send(socket, message) {
- if (socket.destroyed) {
- return;
- }
- socket.write(`${JSON.stringify(message)}\n`);
-}
-
-function isInterruptRequest(message) {
- return message?.method === "turn/interrupt";
-}
-
-function writePidFile(pidFile) {
- if (!pidFile) {
- return;
- }
- fs.mkdirSync(path.dirname(pidFile), { recursive: true });
- fs.writeFileSync(pidFile, `${process.pid}\n`, "utf8");
-}
-
-async function main() {
- const [subcommand, ...argv] = process.argv.slice(2);
- if (subcommand !== "serve") {
- throw new Error("Usage: node scripts/app-server-broker.mjs serve --endpoint [--cwd ] [--pid-file ]");
- }
-
- const { options } = parseArgs(argv, {
- valueOptions: ["cwd", "pid-file", "endpoint"]
- });
-
- if (!options.endpoint) {
- throw new Error("Missing required --endpoint.");
- }
-
- const cwd = options.cwd ? path.resolve(process.cwd(), options.cwd) : process.cwd();
- const endpoint = String(options.endpoint);
- const listenTarget = parseBrokerEndpoint(endpoint);
- const pidFile = options["pid-file"] ? path.resolve(options["pid-file"]) : null;
- writePidFile(pidFile);
-
- const appClient = await CodexAppServerClient.connect(cwd, { disableBroker: true });
- let activeRequestSocket = null;
- let activeStreamSocket = null;
- let activeStreamThreadIds = null;
- const sockets = new Set();
-
- function clearSocketOwnership(socket) {
- if (activeRequestSocket === socket) {
- activeRequestSocket = null;
- }
- if (activeStreamSocket === socket) {
- activeStreamSocket = null;
- activeStreamThreadIds = null;
- }
- }
-
- function routeNotification(message) {
- const target = activeRequestSocket ?? activeStreamSocket;
- if (!target) {
- return;
- }
- send(target, message);
- if (message.method === "turn/completed" && activeStreamSocket === target) {
- const threadId = message.params?.threadId ?? null;
- if (!threadId || !activeStreamThreadIds || activeStreamThreadIds.has(threadId)) {
- activeStreamSocket = null;
- activeStreamThreadIds = null;
- if (activeRequestSocket === target) {
- activeRequestSocket = null;
- }
- }
- }
- }
-
- async function shutdown(server) {
- for (const socket of sockets) {
- socket.end();
- }
- await appClient.close().catch(() => {});
- await new Promise((resolve) => server.close(resolve));
- if (listenTarget.kind === "unix" && fs.existsSync(listenTarget.path)) {
- fs.unlinkSync(listenTarget.path);
- }
- if (pidFile && fs.existsSync(pidFile)) {
- fs.unlinkSync(pidFile);
- }
- }
-
- appClient.setNotificationHandler(routeNotification);
-
- const server = net.createServer((socket) => {
- sockets.add(socket);
- socket.setEncoding("utf8");
- let buffer = "";
-
- socket.on("data", async (chunk) => {
- buffer += chunk;
- let newlineIndex = buffer.indexOf("\n");
- while (newlineIndex !== -1) {
- const line = buffer.slice(0, newlineIndex);
- buffer = buffer.slice(newlineIndex + 1);
- newlineIndex = buffer.indexOf("\n");
-
- if (!line.trim()) {
- continue;
- }
-
- let message;
- try {
- message = JSON.parse(line);
- } catch (error) {
- send(socket, {
- id: null,
- error: buildJsonRpcError(-32700, `Invalid JSON: ${error.message}`)
- });
- continue;
- }
-
- if (message.id !== undefined && message.method === "initialize") {
- send(socket, {
- id: message.id,
- result: {
- userAgent: "codex-companion-broker"
- }
- });
- continue;
- }
-
- if (message.method === "initialized" && message.id === undefined) {
- continue;
- }
-
- if (message.id !== undefined && message.method === "broker/shutdown") {
- send(socket, { id: message.id, result: {} });
- await shutdown(server);
- process.exit(0);
- }
-
- if (message.id === undefined) {
- continue;
- }
-
- const allowInterruptDuringActiveStream =
- isInterruptRequest(message) && activeStreamSocket && activeStreamSocket !== socket && !activeRequestSocket;
-
- if (
- ((activeRequestSocket && activeRequestSocket !== socket) || (activeStreamSocket && activeStreamSocket !== socket)) &&
- !allowInterruptDuringActiveStream
- ) {
- send(socket, {
- id: message.id,
- error: buildJsonRpcError(BROKER_BUSY_RPC_CODE, "Shared Codex broker is busy.")
- });
- continue;
- }
-
- if (allowInterruptDuringActiveStream) {
- try {
- const result = await appClient.request(message.method, message.params ?? {});
- send(socket, { id: message.id, result });
- } catch (error) {
- send(socket, {
- id: message.id,
- error: buildJsonRpcError(error.rpcCode ?? -32000, error.message)
- });
- }
- continue;
- }
-
- const isStreaming = STREAMING_METHODS.has(message.method);
- activeRequestSocket = socket;
-
- try {
- const result = await appClient.request(message.method, message.params ?? {});
- send(socket, { id: message.id, result });
- if (isStreaming) {
- activeStreamSocket = socket;
- activeStreamThreadIds = buildStreamThreadIds(message.method, message.params ?? {}, result);
- }
- if (activeRequestSocket === socket) {
- activeRequestSocket = null;
- }
- } catch (error) {
- send(socket, {
- id: message.id,
- error: buildJsonRpcError(error.rpcCode ?? -32000, error.message)
- });
- if (activeRequestSocket === socket) {
- activeRequestSocket = null;
- }
- if (activeStreamSocket === socket && !isStreaming) {
- activeStreamSocket = null;
- }
- }
- }
- });
-
- socket.on("close", () => {
- sockets.delete(socket);
- clearSocketOwnership(socket);
- });
-
- socket.on("error", () => {
- sockets.delete(socket);
- clearSocketOwnership(socket);
- });
- });
-
- process.on("SIGTERM", async () => {
- await shutdown(server);
- process.exit(0);
- });
-
- process.on("SIGINT", async () => {
- await shutdown(server);
- process.exit(0);
- });
-
- server.listen(listenTarget.path);
-}
-
-main().catch((error) => {
- process.stderr.write(`${error instanceof Error ? error.message : String(error)}\n`);
- process.exit(1);
-});
diff --git a/plugins/codex/scripts/codex-companion.mjs b/plugins/codex/scripts/codex-companion.mjs
deleted file mode 100644
index 35222fd5..00000000
--- a/plugins/codex/scripts/codex-companion.mjs
+++ /dev/null
@@ -1,1027 +0,0 @@
-#!/usr/bin/env node
-
-import { spawn } from "node:child_process";
-import fs from "node:fs";
-import path from "node:path";
-import process from "node:process";
-import { fileURLToPath } from "node:url";
-
-import { parseArgs, splitRawArgumentString } from "./lib/args.mjs";
-import {
- buildPersistentTaskThreadName,
- DEFAULT_CONTINUE_PROMPT,
- findLatestTaskThread,
- getCodexAuthStatus,
- getCodexAvailability,
- getSessionRuntimeStatus,
- interruptAppServerTurn,
- parseStructuredOutput,
- readOutputSchema,
- runAppServerReview,
- runAppServerTurn
- } from "./lib/codex.mjs";
-import { readStdinIfPiped } from "./lib/fs.mjs";
-import { collectReviewContext, ensureGitRepository, resolveReviewTarget } from "./lib/git.mjs";
-import { binaryAvailable, terminateProcessTree } from "./lib/process.mjs";
-import { loadPromptTemplate, interpolateTemplate } from "./lib/prompts.mjs";
-import {
- generateJobId,
- getConfig,
- listJobs,
- setConfig,
- upsertJob,
- writeJobFile
-} from "./lib/state.mjs";
-import {
- buildSingleJobSnapshot,
- buildStatusSnapshot,
- readStoredJob,
- resolveCancelableJob,
- resolveResultJob,
- sortJobsNewestFirst
-} from "./lib/job-control.mjs";
-import {
- appendLogLine,
- createJobLogFile,
- createJobProgressUpdater,
- createJobRecord,
- createProgressReporter,
- nowIso,
- runTrackedJob,
- SESSION_ID_ENV
-} from "./lib/tracked-jobs.mjs";
-import { resolveWorkspaceRoot } from "./lib/workspace.mjs";
-import {
- renderNativeReviewResult,
- renderReviewResult,
- renderStoredJobResult,
- renderCancelReport,
- renderJobStatusReport,
- renderSetupReport,
- renderStatusReport,
- renderTaskResult
-} from "./lib/render.mjs";
-
-const ROOT_DIR = path.resolve(fileURLToPath(new URL("..", import.meta.url)));
-const REVIEW_SCHEMA = path.join(ROOT_DIR, "schemas", "review-output.schema.json");
-const DEFAULT_STATUS_WAIT_TIMEOUT_MS = 240000;
-const DEFAULT_STATUS_POLL_INTERVAL_MS = 2000;
-const VALID_REASONING_EFFORTS = new Set(["none", "minimal", "low", "medium", "high", "xhigh"]);
-const MODEL_ALIASES = new Map([["spark", "gpt-5.3-codex-spark"]]);
-const STOP_REVIEW_TASK_MARKER = "Run a stop-gate review of the previous Claude turn.";
-
-function printUsage() {
- console.log(
- [
- "Usage:",
- " node scripts/codex-companion.mjs setup [--enable-review-gate|--disable-review-gate] [--json]",
- " node scripts/codex-companion.mjs review [--wait|--background] [--base ][] [--scope ]",
- " node scripts/codex-companion.mjs adversarial-review [--wait|--background] [--base ][] [--scope ] [focus text]",
- " node scripts/codex-companion.mjs task [--background] [--write] [--resume-last|--resume|--fresh] [--model ] [--effort ] [prompt]",
- " node scripts/codex-companion.mjs status [job-id] [--all] [--json]",
- " node scripts/codex-companion.mjs result [job-id] [--json]",
- " node scripts/codex-companion.mjs cancel [job-id] [--json]"
- ].join("\n")
- );
-}
-
-function outputResult(value, asJson) {
- if (asJson) {
- console.log(JSON.stringify(value, null, 2));
- } else {
- process.stdout.write(value);
- }
-}
-
-function outputCommandResult(payload, rendered, asJson) {
- outputResult(asJson ? payload : rendered, asJson);
-}
-
-function normalizeRequestedModel(model) {
- if (model == null) {
- return null;
- }
- const normalized = String(model).trim();
- if (!normalized) {
- return null;
- }
- return MODEL_ALIASES.get(normalized.toLowerCase()) ?? normalized;
-}
-
-function normalizeReasoningEffort(effort) {
- if (effort == null) {
- return null;
- }
- const normalized = String(effort).trim().toLowerCase();
- if (!normalized) {
- return null;
- }
- if (!VALID_REASONING_EFFORTS.has(normalized)) {
- throw new Error(
- `Unsupported reasoning effort "${effort}". Use one of: none, minimal, low, medium, high, xhigh.`
- );
- }
- return normalized;
-}
-
-function normalizeArgv(argv) {
- if (argv.length === 1) {
- const [raw] = argv;
- if (!raw || !raw.trim()) {
- return [];
- }
- return splitRawArgumentString(raw);
- }
- return argv;
-}
-
-function parseCommandInput(argv, config = {}) {
- return parseArgs(normalizeArgv(argv), {
- ...config,
- aliasMap: {
- C: "cwd",
- ...(config.aliasMap ?? {})
- }
- });
-}
-
-function resolveCommandCwd(options = {}) {
- return options.cwd ? path.resolve(process.cwd(), options.cwd) : process.cwd();
-}
-
-function resolveCommandWorkspace(options = {}) {
- return resolveWorkspaceRoot(resolveCommandCwd(options));
-}
-
-function sleep(ms) {
- return new Promise((resolve) => setTimeout(resolve, ms));
-}
-
-function shorten(text, limit = 96) {
- const normalized = String(text ?? "").trim().replace(/\s+/g, " ");
- if (!normalized) {
- return "";
- }
- if (normalized.length <= limit) {
- return normalized;
- }
- return `${normalized.slice(0, limit - 3)}...`;
-}
-
-function firstMeaningfulLine(text, fallback) {
- const line = String(text ?? "")
- .split(/\r?\n/)
- .map((value) => value.trim())
- .find(Boolean);
- return line ?? fallback;
-}
-
-async function buildSetupReport(cwd, actionsTaken = []) {
- const workspaceRoot = resolveWorkspaceRoot(cwd);
- const nodeStatus = binaryAvailable("node", ["--version"], { cwd });
- const npmStatus = binaryAvailable("npm", ["--version"], { cwd });
- const codexStatus = getCodexAvailability(cwd);
- const authStatus = await getCodexAuthStatus(cwd);
- const config = getConfig(workspaceRoot);
-
- const nextSteps = [];
- if (!codexStatus.available) {
- nextSteps.push("Install Codex with `npm install -g @openai/codex`.");
- }
- if (codexStatus.available && !authStatus.loggedIn && authStatus.requiresOpenaiAuth) {
- nextSteps.push("Run `!codex login`.");
- nextSteps.push("If browser login is blocked, retry with `!codex login --device-auth` or `!codex login --with-api-key`.");
- }
- if (!config.stopReviewGate) {
- nextSteps.push("Optional: run `/codex:setup --enable-review-gate` to require a fresh review before stop.");
- }
-
- return {
- ready: nodeStatus.available && codexStatus.available && authStatus.loggedIn,
- node: nodeStatus,
- npm: npmStatus,
- codex: codexStatus,
- auth: authStatus,
- sessionRuntime: getSessionRuntimeStatus(process.env, workspaceRoot),
- reviewGateEnabled: Boolean(config.stopReviewGate),
- actionsTaken,
- nextSteps
- };
-}
-
-async function handleSetup(argv) {
- const { options } = parseCommandInput(argv, {
- valueOptions: ["cwd"],
- booleanOptions: ["json", "enable-review-gate", "disable-review-gate"]
- });
-
- if (options["enable-review-gate"] && options["disable-review-gate"]) {
- throw new Error("Choose either --enable-review-gate or --disable-review-gate.");
- }
-
- const cwd = resolveCommandCwd(options);
- const workspaceRoot = resolveCommandWorkspace(options);
- const actionsTaken = [];
-
- if (options["enable-review-gate"]) {
- setConfig(workspaceRoot, "stopReviewGate", true);
- actionsTaken.push(`Enabled the stop-time review gate for ${workspaceRoot}.`);
- } else if (options["disable-review-gate"]) {
- setConfig(workspaceRoot, "stopReviewGate", false);
- actionsTaken.push(`Disabled the stop-time review gate for ${workspaceRoot}.`);
- }
-
- const finalReport = await buildSetupReport(cwd, actionsTaken);
- outputResult(options.json ? finalReport : renderSetupReport(finalReport), options.json);
-}
-
-function buildAdversarialReviewPrompt(context, focusText) {
- const template = loadPromptTemplate(ROOT_DIR, "adversarial-review");
- return interpolateTemplate(template, {
- REVIEW_KIND: "Adversarial Review",
- TARGET_LABEL: context.target.label,
- USER_FOCUS: focusText || "No extra focus provided.",
- REVIEW_COLLECTION_GUIDANCE: context.collectionGuidance,
- REVIEW_INPUT: context.content
- });
-}
-
-function ensureCodexAvailable(cwd) {
- const availability = getCodexAvailability(cwd);
- if (!availability.available) {
- throw new Error("Codex CLI is not installed or is missing required runtime support. Install it with `npm install -g @openai/codex`, then rerun `/codex:setup`.");
- }
-}
-
-function buildNativeReviewTarget(target) {
- if (target.mode === "working-tree") {
- return { type: "uncommittedChanges" };
- }
-
- if (target.mode === "branch") {
- return { type: "baseBranch", branch: target.baseRef };
- }
-
- return null;
-}
-
-function validateNativeReviewRequest(target, focusText) {
- if (focusText.trim()) {
- throw new Error(
- `\`/codex:review\` now maps directly to the built-in reviewer and does not support custom focus text. Retry with \`/codex:adversarial-review ${focusText.trim()}\` for focused review instructions.`
- );
- }
-
- const nativeTarget = buildNativeReviewTarget(target);
- if (!nativeTarget) {
- throw new Error("This `/codex:review` target is not supported by the built-in reviewer. Retry with `/codex:adversarial-review` for custom targeting.");
- }
-
- return nativeTarget;
-}
-
-function renderStatusPayload(report, asJson) {
- return asJson ? report : renderStatusReport(report);
-}
-
-function isActiveJobStatus(status) {
- return status === "queued" || status === "running";
-}
-
-function getCurrentClaudeSessionId() {
- return process.env[SESSION_ID_ENV] ?? null;
-}
-
-function filterJobsForCurrentClaudeSession(jobs) {
- const sessionId = getCurrentClaudeSessionId();
- if (!sessionId) {
- return jobs;
- }
- return jobs.filter((job) => job.sessionId === sessionId);
-}
-
-function findLatestResumableTaskJob(jobs) {
- return (
- jobs.find(
- (job) =>
- job.jobClass === "task" &&
- job.threadId &&
- job.status !== "queued" &&
- job.status !== "running"
- ) ?? null
- );
-}
-
-async function waitForSingleJobSnapshot(cwd, reference, options = {}) {
- const timeoutMs = Math.max(0, Number(options.timeoutMs) || DEFAULT_STATUS_WAIT_TIMEOUT_MS);
- const pollIntervalMs = Math.max(100, Number(options.pollIntervalMs) || DEFAULT_STATUS_POLL_INTERVAL_MS);
- const deadline = Date.now() + timeoutMs;
- let snapshot = buildSingleJobSnapshot(cwd, reference);
-
- while (isActiveJobStatus(snapshot.job.status) && Date.now() < deadline) {
- await sleep(Math.min(pollIntervalMs, Math.max(0, deadline - Date.now())));
- snapshot = buildSingleJobSnapshot(cwd, reference);
- }
-
- return {
- ...snapshot,
- waitTimedOut: isActiveJobStatus(snapshot.job.status),
- timeoutMs
- };
-}
-
-async function resolveLatestTrackedTaskThread(cwd, options = {}) {
- const workspaceRoot = resolveWorkspaceRoot(cwd);
- const sessionId = getCurrentClaudeSessionId();
- const jobs = sortJobsNewestFirst(listJobs(workspaceRoot)).filter((job) => job.id !== options.excludeJobId);
- const visibleJobs = filterJobsForCurrentClaudeSession(jobs);
- const activeTask = visibleJobs.find((job) => job.jobClass === "task" && (job.status === "queued" || job.status === "running"));
- if (activeTask) {
- throw new Error(`Task ${activeTask.id} is still running. Use /codex:status before continuing it.`);
- }
-
- const trackedTask = findLatestResumableTaskJob(visibleJobs);
- if (trackedTask) {
- return { id: trackedTask.threadId };
- }
-
- if (sessionId) {
- return null;
- }
-
- return findLatestTaskThread(workspaceRoot);
-}
-
-async function executeReviewRun(request) {
- ensureCodexAvailable(request.cwd);
- ensureGitRepository(request.cwd);
-
- const target = resolveReviewTarget(request.cwd, {
- base: request.base,
- scope: request.scope
- });
- const focusText = request.focusText?.trim() ?? "";
- const reviewName = request.reviewName ?? "Review";
- if (reviewName === "Review") {
- const reviewTarget = validateNativeReviewRequest(target, focusText);
- const result = await runAppServerReview(request.cwd, {
- target: reviewTarget,
- model: request.model,
- onProgress: request.onProgress
- });
- const payload = {
- review: reviewName,
- target,
- threadId: result.threadId,
- sourceThreadId: result.sourceThreadId,
- codex: {
- status: result.status,
- stderr: result.stderr,
- stdout: result.reviewText,
- reasoning: result.reasoningSummary
- }
- };
- const rendered = renderNativeReviewResult(
- {
- status: result.status,
- stdout: result.reviewText,
- stderr: result.stderr
- },
- { reviewLabel: reviewName, targetLabel: target.label, reasoningSummary: result.reasoningSummary }
- );
-
- return {
- exitStatus: result.status,
- threadId: result.threadId,
- turnId: result.turnId,
- payload,
- rendered,
- summary: firstMeaningfulLine(result.reviewText, `${reviewName} completed.`),
- jobTitle: `Codex ${reviewName}`,
- jobClass: "review",
- targetLabel: target.label
- };
- }
-
- const context = collectReviewContext(request.cwd, target);
- const prompt = buildAdversarialReviewPrompt(context, focusText);
- const result = await runAppServerTurn(context.repoRoot, {
- prompt,
- model: request.model,
- sandbox: "read-only",
- outputSchema: readOutputSchema(REVIEW_SCHEMA),
- onProgress: request.onProgress
- });
- const parsed = parseStructuredOutput(result.finalMessage, {
- status: result.status,
- failureMessage: result.error?.message ?? result.stderr
- });
- const payload = {
- review: reviewName,
- target,
- threadId: result.threadId,
- context: {
- repoRoot: context.repoRoot,
- branch: context.branch,
- summary: context.summary
- },
- codex: {
- status: result.status,
- stderr: result.stderr,
- stdout: result.finalMessage,
- reasoning: result.reasoningSummary
- },
- result: parsed.parsed,
- rawOutput: parsed.rawOutput,
- parseError: parsed.parseError,
- reasoningSummary: result.reasoningSummary
- };
-
- return {
- exitStatus: result.status,
- threadId: result.threadId,
- turnId: result.turnId,
- payload,
- rendered: renderReviewResult(parsed, {
- reviewLabel: reviewName,
- targetLabel: context.target.label,
- reasoningSummary: result.reasoningSummary
- }),
- summary: parsed.parsed?.summary ?? parsed.parseError ?? firstMeaningfulLine(result.finalMessage, `${reviewName} finished.`),
- jobTitle: `Codex ${reviewName}`,
- jobClass: "review",
- targetLabel: context.target.label
- };
-}
-
-
-async function executeTaskRun(request) {
- const workspaceRoot = resolveWorkspaceRoot(request.cwd);
- ensureCodexAvailable(request.cwd);
-
- const taskMetadata = buildTaskRunMetadata({
- prompt: request.prompt,
- resumeLast: request.resumeLast
- });
-
- let resumeThreadId = null;
- if (request.resumeLast) {
- const latestThread = await resolveLatestTrackedTaskThread(workspaceRoot, {
- excludeJobId: request.jobId
- });
- if (!latestThread) {
- throw new Error("No previous Codex task thread was found for this repository.");
- }
- resumeThreadId = latestThread.id;
- }
-
- if (!request.prompt && !resumeThreadId) {
- throw new Error("Provide a prompt, a prompt file, piped stdin, or use --resume-last.");
- }
-
- const result = await runAppServerTurn(workspaceRoot, {
- resumeThreadId,
- prompt: request.prompt,
- defaultPrompt: resumeThreadId ? DEFAULT_CONTINUE_PROMPT : "",
- model: request.model,
- effort: request.effort,
- sandbox: request.write ? "workspace-write" : "read-only",
- onProgress: request.onProgress,
- persistThread: true,
- threadName: resumeThreadId ? null : buildPersistentTaskThreadName(request.prompt || DEFAULT_CONTINUE_PROMPT)
- });
-
- const rawOutput = typeof result.finalMessage === "string" ? result.finalMessage : "";
- const failureMessage = result.error?.message ?? result.stderr ?? "";
- const rendered = renderTaskResult(
- {
- rawOutput,
- failureMessage,
- reasoningSummary: result.reasoningSummary
- },
- {
- title: taskMetadata.title,
- jobId: request.jobId ?? null,
- write: Boolean(request.write)
- }
- );
- const payload = {
- status: result.status,
- threadId: result.threadId,
- rawOutput,
- touchedFiles: result.touchedFiles,
- reasoningSummary: result.reasoningSummary
- };
-
- return {
- exitStatus: result.status,
- threadId: result.threadId,
- turnId: result.turnId,
- payload,
- rendered,
- summary: firstMeaningfulLine(rawOutput, firstMeaningfulLine(failureMessage, `${taskMetadata.title} finished.`)),
- jobTitle: taskMetadata.title,
- jobClass: "task",
- write: Boolean(request.write)
- };
-}
-
-function buildReviewJobMetadata(reviewName, target) {
- return {
- kind: reviewName === "Adversarial Review" ? "adversarial-review" : "review",
- title: reviewName === "Review" ? "Codex Review" : `Codex ${reviewName}`,
- summary: `${reviewName} ${target.label}`
- };
-}
-
-function buildTaskRunMetadata({ prompt, resumeLast = false }) {
- if (!resumeLast && String(prompt ?? "").includes(STOP_REVIEW_TASK_MARKER)) {
- return {
- title: "Codex Stop Gate Review",
- summary: "Stop-gate review of previous Claude turn"
- };
- }
-
- const title = resumeLast ? "Codex Resume" : "Codex Task";
- const fallbackSummary = resumeLast ? DEFAULT_CONTINUE_PROMPT : "Task";
- return {
- title,
- summary: shorten(prompt || fallbackSummary)
- };
-}
-
-function renderQueuedTaskLaunch(payload) {
- return `${payload.title} started in the background as ${payload.jobId}. Check /codex:status ${payload.jobId} for progress.\n`;
-}
-
-function getJobKindLabel(kind, jobClass) {
- if (kind === "adversarial-review") {
- return "adversarial-review";
- }
- return jobClass === "review" ? "review" : "rescue";
-}
-
-function createCompanionJob({ prefix, kind, title, workspaceRoot, jobClass, summary, write = false }) {
- return createJobRecord({
- id: generateJobId(prefix),
- kind,
- kindLabel: getJobKindLabel(kind, jobClass),
- title,
- workspaceRoot,
- jobClass,
- summary,
- write
- });
-}
-
-function createTrackedProgress(job, options = {}) {
- const logFile = options.logFile ?? createJobLogFile(job.workspaceRoot, job.id, job.title);
- return {
- logFile,
- progress: createProgressReporter({
- stderr: Boolean(options.stderr),
- logFile,
- onEvent: createJobProgressUpdater(job.workspaceRoot, job.id)
- })
- };
-}
-
-function buildTaskJob(workspaceRoot, taskMetadata, write) {
- return createCompanionJob({
- prefix: "task",
- kind: "task",
- title: taskMetadata.title,
- workspaceRoot,
- jobClass: "task",
- summary: taskMetadata.summary,
- write
- });
-}
-
-function buildTaskRequest({ cwd, model, effort, prompt, write, resumeLast, jobId }) {
- return {
- cwd,
- model,
- effort,
- prompt,
- write,
- resumeLast,
- jobId
- };
-}
-
-function readTaskPrompt(cwd, options, positionals) {
- if (options["prompt-file"]) {
- return fs.readFileSync(path.resolve(cwd, options["prompt-file"]), "utf8");
- }
-
- const positionalPrompt = positionals.join(" ");
- return positionalPrompt || readStdinIfPiped();
-}
-
-function requireTaskRequest(prompt, resumeLast) {
- if (!prompt && !resumeLast) {
- throw new Error("Provide a prompt, a prompt file, piped stdin, or use --resume-last.");
- }
-}
-
-async function runForegroundCommand(job, runner, options = {}) {
- const { logFile, progress } = createTrackedProgress(job, {
- logFile: options.logFile,
- stderr: !options.json
- });
- const execution = await runTrackedJob(job, () => runner(progress), { logFile });
- outputResult(options.json ? execution.payload : execution.rendered, options.json);
- if (execution.exitStatus !== 0) {
- process.exitCode = execution.exitStatus;
- }
- return execution;
-}
-
-function spawnDetachedTaskWorker(cwd, jobId) {
- const scriptPath = path.join(ROOT_DIR, "scripts", "codex-companion.mjs");
- const child = spawn(process.execPath, [scriptPath, "task-worker", "--cwd", cwd, "--job-id", jobId], {
- cwd,
- env: process.env,
- detached: true,
- stdio: "ignore",
- windowsHide: true
- });
- child.unref();
- return child;
-}
-
-function enqueueBackgroundTask(cwd, job, request) {
- const { logFile } = createTrackedProgress(job);
- appendLogLine(logFile, "Queued for background execution.");
-
- const child = spawnDetachedTaskWorker(cwd, job.id);
- const queuedRecord = {
- ...job,
- status: "queued",
- phase: "queued",
- pid: child.pid ?? null,
- logFile,
- request
- };
- writeJobFile(job.workspaceRoot, job.id, queuedRecord);
- upsertJob(job.workspaceRoot, queuedRecord);
-
- return {
- payload: {
- jobId: job.id,
- status: "queued",
- title: job.title,
- summary: job.summary,
- logFile
- },
- logFile
- };
-}
-
-async function handleReviewCommand(argv, config) {
- const { options, positionals } = parseCommandInput(argv, {
- valueOptions: ["base", "scope", "model", "cwd"],
- booleanOptions: ["json", "background", "wait"],
- aliasMap: {
- m: "model"
- }
- });
-
- const cwd = resolveCommandCwd(options);
- const workspaceRoot = resolveCommandWorkspace(options);
- const focusText = positionals.join(" ").trim();
- const target = resolveReviewTarget(cwd, {
- base: options.base,
- scope: options.scope
- });
-
- config.validateRequest?.(target, focusText);
- const metadata = buildReviewJobMetadata(config.reviewName, target);
- const job = createCompanionJob({
- prefix: "review",
- kind: metadata.kind,
- title: metadata.title,
- workspaceRoot,
- jobClass: "review",
- summary: metadata.summary
- });
- await runForegroundCommand(
- job,
- (progress) =>
- executeReviewRun({
- cwd,
- base: options.base,
- scope: options.scope,
- model: options.model,
- focusText,
- reviewName: config.reviewName,
- onProgress: progress
- }),
- { json: options.json }
- );
-}
-
-async function handleReview(argv) {
- return handleReviewCommand(argv, {
- reviewName: "Review",
- validateRequest: validateNativeReviewRequest
- });
-}
-
-async function handleTask(argv) {
- const { options, positionals } = parseCommandInput(argv, {
- valueOptions: ["model", "effort", "cwd", "prompt-file"],
- booleanOptions: ["json", "write", "resume-last", "resume", "fresh", "background"],
- aliasMap: {
- m: "model"
- }
- });
-
- const cwd = resolveCommandCwd(options);
- const workspaceRoot = resolveCommandWorkspace(options);
- const model = normalizeRequestedModel(options.model);
- const effort = normalizeReasoningEffort(options.effort);
- const prompt = readTaskPrompt(cwd, options, positionals);
-
- const resumeLast = Boolean(options["resume-last"] || options.resume);
- const fresh = Boolean(options.fresh);
- if (resumeLast && fresh) {
- throw new Error("Choose either --resume/--resume-last or --fresh.");
- }
- const write = Boolean(options.write);
- const taskMetadata = buildTaskRunMetadata({
- prompt,
- resumeLast
- });
-
- if (options.background) {
- ensureCodexAvailable(cwd);
- requireTaskRequest(prompt, resumeLast);
-
- const job = buildTaskJob(workspaceRoot, taskMetadata, write);
- const request = buildTaskRequest({
- cwd,
- model,
- effort,
- prompt,
- write,
- resumeLast,
- jobId: job.id
- });
- const { payload } = enqueueBackgroundTask(cwd, job, request);
- outputCommandResult(payload, renderQueuedTaskLaunch(payload), options.json);
- return;
- }
-
- const job = buildTaskJob(workspaceRoot, taskMetadata, write);
- await runForegroundCommand(
- job,
- (progress) =>
- executeTaskRun({
- cwd,
- model,
- effort,
- prompt,
- write,
- resumeLast,
- jobId: job.id,
- onProgress: progress
- }),
- { json: options.json }
- );
-}
-
-async function handleTaskWorker(argv) {
- const { options } = parseCommandInput(argv, {
- valueOptions: ["cwd", "job-id"]
- });
-
- if (!options["job-id"]) {
- throw new Error("Missing required --job-id for task-worker.");
- }
-
- const cwd = resolveCommandCwd(options);
- const workspaceRoot = resolveCommandWorkspace(options);
- const storedJob = readStoredJob(workspaceRoot, options["job-id"]);
- if (!storedJob) {
- throw new Error(`No stored job found for ${options["job-id"]}.`);
- }
-
- const request = storedJob.request;
- if (!request || typeof request !== "object") {
- throw new Error(`Stored job ${options["job-id"]} is missing its task request payload.`);
- }
-
- const { logFile, progress } = createTrackedProgress(
- {
- ...storedJob,
- workspaceRoot
- },
- {
- logFile: storedJob.logFile ?? null
- }
- );
- await runTrackedJob(
- {
- ...storedJob,
- workspaceRoot,
- logFile
- },
- () =>
- executeTaskRun({
- ...request,
- onProgress: progress
- }),
- { logFile }
- );
-}
-
-async function handleStatus(argv) {
- const { options, positionals } = parseCommandInput(argv, {
- valueOptions: ["cwd", "timeout-ms", "poll-interval-ms"],
- booleanOptions: ["json", "all", "wait"]
- });
-
- const cwd = resolveCommandCwd(options);
- const reference = positionals[0] ?? "";
- if (reference) {
- const snapshot = options.wait
- ? await waitForSingleJobSnapshot(cwd, reference, {
- timeoutMs: options["timeout-ms"],
- pollIntervalMs: options["poll-interval-ms"]
- })
- : buildSingleJobSnapshot(cwd, reference);
- outputCommandResult(snapshot, renderJobStatusReport(snapshot.job), options.json);
- return;
- }
-
- if (options.wait) {
- throw new Error("`status --wait` requires a job id.");
- }
-
- const report = buildStatusSnapshot(cwd, { all: options.all });
- outputResult(renderStatusPayload(report, options.json), options.json);
-}
-
-function handleResult(argv) {
- const { options, positionals } = parseCommandInput(argv, {
- valueOptions: ["cwd"],
- booleanOptions: ["json"]
- });
-
- const cwd = resolveCommandCwd(options);
- const reference = positionals[0] ?? "";
- const { workspaceRoot, job } = resolveResultJob(cwd, reference);
- const storedJob = readStoredJob(workspaceRoot, job.id);
- const payload = {
- job,
- storedJob
- };
-
- outputCommandResult(payload, renderStoredJobResult(job, storedJob), options.json);
-}
-
-function handleTaskResumeCandidate(argv) {
- const { options } = parseCommandInput(argv, {
- valueOptions: ["cwd"],
- booleanOptions: ["json"]
- });
-
- const cwd = resolveCommandCwd(options);
- const workspaceRoot = resolveCommandWorkspace(options);
- const sessionId = getCurrentClaudeSessionId();
- const jobs = filterJobsForCurrentClaudeSession(sortJobsNewestFirst(listJobs(workspaceRoot)));
- const candidate = findLatestResumableTaskJob(jobs);
-
- const payload = {
- available: Boolean(candidate),
- sessionId,
- candidate:
- candidate == null
- ? null
- : {
- id: candidate.id,
- status: candidate.status,
- title: candidate.title ?? null,
- summary: candidate.summary ?? null,
- threadId: candidate.threadId,
- completedAt: candidate.completedAt ?? null,
- updatedAt: candidate.updatedAt ?? null
- }
- };
-
- const rendered = candidate
- ? `Resumable task found: ${candidate.id} (${candidate.status}).\n`
- : "No resumable task found for this session.\n";
- outputCommandResult(payload, rendered, options.json);
-}
-
-async function handleCancel(argv) {
- const { options, positionals } = parseCommandInput(argv, {
- valueOptions: ["cwd"],
- booleanOptions: ["json"]
- });
-
- const cwd = resolveCommandCwd(options);
- const reference = positionals[0] ?? "";
- const { workspaceRoot, job } = resolveCancelableJob(cwd, reference, { env: process.env });
- const existing = readStoredJob(workspaceRoot, job.id) ?? {};
- const threadId = existing.threadId ?? job.threadId ?? null;
- const turnId = existing.turnId ?? job.turnId ?? null;
-
- const interrupt = await interruptAppServerTurn(cwd, { threadId, turnId });
- if (interrupt.attempted) {
- appendLogLine(
- job.logFile,
- interrupt.interrupted
- ? `Requested Codex turn interrupt for ${turnId} on ${threadId}.`
- : `Codex turn interrupt failed${interrupt.detail ? `: ${interrupt.detail}` : "."}`
- );
- }
-
- terminateProcessTree(job.pid ?? Number.NaN);
- appendLogLine(job.logFile, "Cancelled by user.");
-
- const completedAt = nowIso();
- const nextJob = {
- ...job,
- status: "cancelled",
- phase: "cancelled",
- pid: null,
- completedAt,
- errorMessage: "Cancelled by user."
- };
-
- writeJobFile(workspaceRoot, job.id, {
- ...existing,
- ...nextJob,
- cancelledAt: completedAt
- });
- upsertJob(workspaceRoot, {
- id: job.id,
- status: "cancelled",
- phase: "cancelled",
- pid: null,
- errorMessage: "Cancelled by user.",
- completedAt
- });
-
- const payload = {
- jobId: job.id,
- status: "cancelled",
- title: job.title,
- turnInterruptAttempted: interrupt.attempted,
- turnInterrupted: interrupt.interrupted
- };
-
- outputCommandResult(payload, renderCancelReport(nextJob), options.json);
-}
-
-async function main() {
- const [subcommand, ...argv] = process.argv.slice(2);
- if (!subcommand || subcommand === "help" || subcommand === "--help") {
- printUsage();
- return;
- }
-
- switch (subcommand) {
- case "setup":
- await handleSetup(argv);
- break;
- case "review":
- await handleReview(argv);
- break;
- case "adversarial-review":
- await handleReviewCommand(argv, {
- reviewName: "Adversarial Review"
- });
- break;
- case "task":
- await handleTask(argv);
- break;
- case "task-worker":
- await handleTaskWorker(argv);
- break;
- case "status":
- await handleStatus(argv);
- break;
- case "result":
- handleResult(argv);
- break;
- case "task-resume-candidate":
- handleTaskResumeCandidate(argv);
- break;
- case "cancel":
- await handleCancel(argv);
- break;
- default:
- throw new Error(`Unknown subcommand: ${subcommand}`);
- }
-}
-
-main().catch((error) => {
- const message = error instanceof Error ? error.message : String(error);
- process.stderr.write(`${message}\n`);
- process.exitCode = 1;
-});
diff --git a/plugins/codex/scripts/lib/app-server-protocol.d.ts b/plugins/codex/scripts/lib/app-server-protocol.d.ts
deleted file mode 100644
index cc6446d0..00000000
--- a/plugins/codex/scripts/lib/app-server-protocol.d.ts
+++ /dev/null
@@ -1,72 +0,0 @@
-import type {
- ClientInfo,
- InitializeCapabilities,
- InitializeParams,
- InitializeResponse,
- ServerNotification
-} from "../../.generated/app-server-types/index.js";
-import type {
- ReviewStartParams,
- ReviewStartResponse,
- ReviewTarget,
- Thread,
- ThreadItem,
- ThreadListParams,
- ThreadListResponse,
- ThreadResumeParams as RawThreadResumeParams,
- ThreadResumeResponse,
- ThreadSetNameParams,
- ThreadSetNameResponse,
- ThreadStartParams as RawThreadStartParams,
- ThreadStartResponse,
- Turn,
- TurnInterruptParams,
- TurnInterruptResponse,
- TurnStartParams,
- TurnStartResponse,
- UserInput
-} from "../../.generated/app-server-types/v2/index.js";
-
-export type {
- ClientInfo,
- InitializeCapabilities,
- InitializeParams,
- InitializeResponse,
- ReviewTarget,
- Thread,
- ThreadItem,
- ThreadListParams,
- Turn,
- TurnInterruptParams,
- TurnStartParams,
- UserInput
-};
-
-export type ThreadStartParams = Omit;
-export type ThreadResumeParams = Omit;
-
-export interface CodexAppServerClientOptions {
- env?: NodeJS.ProcessEnv;
- clientInfo?: ClientInfo;
- capabilities?: InitializeCapabilities;
- brokerEndpoint?: string;
- disableBroker?: boolean;
- reuseExistingBroker?: boolean;
-}
-
-export interface AppServerMethodMap {
- initialize: { params: InitializeParams; result: InitializeResponse };
- "thread/start": { params: ThreadStartParams; result: ThreadStartResponse };
- "thread/resume": { params: ThreadResumeParams; result: ThreadResumeResponse };
- "thread/name/set": { params: ThreadSetNameParams; result: ThreadSetNameResponse };
- "thread/list": { params: ThreadListParams; result: ThreadListResponse };
- "review/start": { params: ReviewStartParams; result: ReviewStartResponse };
- "turn/start": { params: TurnStartParams; result: TurnStartResponse };
- "turn/interrupt": { params: TurnInterruptParams; result: TurnInterruptResponse };
-}
-
-export type AppServerMethod = keyof AppServerMethodMap;
-export type AppServerRequestParams = AppServerMethodMap[M]["params"];
-export type AppServerResponse = AppServerMethodMap[M]["result"];
-export type AppServerNotification = ServerNotification;
-export type AppServerNotificationHandler = (message: AppServerNotification) => void;
diff --git a/plugins/codex/scripts/lib/app-server.mjs b/plugins/codex/scripts/lib/app-server.mjs
deleted file mode 100644
index 127c8376..00000000
--- a/plugins/codex/scripts/lib/app-server.mjs
+++ /dev/null
@@ -1,350 +0,0 @@
-/**
- * @typedef {Error & { data?: unknown, rpcCode?: number }} ProtocolError
- * @typedef {import("./app-server-protocol").AppServerMethod} AppServerMethod
- * @typedef {import("./app-server-protocol").AppServerNotification} AppServerNotification
- * @typedef {import("./app-server-protocol").AppServerNotificationHandler} AppServerNotificationHandler
- * @typedef {import("./app-server-protocol").ClientInfo} ClientInfo
- * @typedef {import("./app-server-protocol").CodexAppServerClientOptions} CodexAppServerClientOptions
- * @typedef {import("./app-server-protocol").InitializeCapabilities} InitializeCapabilities
- */
-import fs from "node:fs";
-import net from "node:net";
-import process from "node:process";
-import { spawn } from "node:child_process";
-import readline from "node:readline";
-import { parseBrokerEndpoint } from "./broker-endpoint.mjs";
-import { ensureBrokerSession, loadBrokerSession } from "./broker-lifecycle.mjs";
-import { terminateProcessTree } from "./process.mjs";
-
-const PLUGIN_MANIFEST_URL = new URL("../../.claude-plugin/plugin.json", import.meta.url);
-const PLUGIN_MANIFEST = JSON.parse(fs.readFileSync(PLUGIN_MANIFEST_URL, "utf8"));
-
-export const BROKER_ENDPOINT_ENV = "CODEX_COMPANION_APP_SERVER_ENDPOINT";
-export const BROKER_BUSY_RPC_CODE = -32001;
-
-/** @type {ClientInfo} */
-const DEFAULT_CLIENT_INFO = {
- title: "Codex Plugin",
- name: "Claude Code",
- version: PLUGIN_MANIFEST.version ?? "0.0.0"
-};
-
-/** @type {InitializeCapabilities} */
-const DEFAULT_CAPABILITIES = {
- experimentalApi: false,
- optOutNotificationMethods: [
- "item/agentMessage/delta",
- "item/reasoning/summaryTextDelta",
- "item/reasoning/summaryPartAdded",
- "item/reasoning/textDelta"
- ]
-};
-
-function buildJsonRpcError(code, message, data) {
- return data === undefined ? { code, message } : { code, message, data };
-}
-
-function createProtocolError(message, data) {
- const error = /** @type {ProtocolError} */ (new Error(message));
- error.data = data;
- if (data?.code !== undefined) {
- error.rpcCode = data.code;
- }
- return error;
-}
-
-class AppServerClientBase {
- constructor(cwd, options = {}) {
- this.cwd = cwd;
- this.options = options;
- this.pending = new Map();
- this.nextId = 1;
- this.stderr = "";
- this.closed = false;
- this.exitError = null;
- /** @type {AppServerNotificationHandler | null} */
- this.notificationHandler = null;
- this.lineBuffer = "";
- this.transport = "unknown";
-
- this.exitPromise = new Promise((resolve) => {
- this.resolveExit = resolve;
- });
- }
-
- setNotificationHandler(handler) {
- this.notificationHandler = handler;
- }
-
- /**
- * @template {AppServerMethod} M
- * @param {M} method
- * @param {import("./app-server-protocol").AppServerRequestParams} params
- * @returns {Promise>}
- */
- request(method, params) {
- if (this.closed) {
- throw new Error("codex app-server client is closed.");
- }
-
- const id = this.nextId;
- this.nextId += 1;
-
- return new Promise((resolve, reject) => {
- this.pending.set(id, { resolve, reject, method });
- this.sendMessage({ id, method, params });
- });
- }
-
- notify(method, params = {}) {
- if (this.closed) {
- return;
- }
- this.sendMessage({ method, params });
- }
-
- handleChunk(chunk) {
- this.lineBuffer += chunk;
- let newlineIndex = this.lineBuffer.indexOf("\n");
- while (newlineIndex !== -1) {
- const line = this.lineBuffer.slice(0, newlineIndex);
- this.lineBuffer = this.lineBuffer.slice(newlineIndex + 1);
- this.handleLine(line);
- newlineIndex = this.lineBuffer.indexOf("\n");
- }
- }
-
- handleLine(line) {
- if (!line.trim()) {
- return;
- }
-
- let message;
- try {
- message = JSON.parse(line);
- } catch (error) {
- this.handleExit(createProtocolError(`Failed to parse codex app-server JSONL: ${error.message}`, { line }));
- return;
- }
-
- if (message.id !== undefined && message.method) {
- this.handleServerRequest(message);
- return;
- }
-
- if (message.id !== undefined) {
- const pending = this.pending.get(message.id);
- if (!pending) {
- return;
- }
- this.pending.delete(message.id);
-
- if (message.error) {
- pending.reject(createProtocolError(message.error.message ?? `codex app-server ${pending.method} failed.`, message.error));
- } else {
- pending.resolve(message.result ?? {});
- }
- return;
- }
-
- if (message.method && this.notificationHandler) {
- this.notificationHandler(/** @type {AppServerNotification} */ (message));
- }
- }
-
- handleServerRequest(message) {
- this.sendMessage({
- id: message.id,
- error: buildJsonRpcError(-32601, `Unsupported server request: ${message.method}`)
- });
- }
-
- handleExit(error) {
- if (this.exitResolved) {
- return;
- }
-
- this.exitResolved = true;
- this.exitError = error ?? null;
-
- for (const pending of this.pending.values()) {
- pending.reject(this.exitError ?? new Error("codex app-server connection closed."));
- }
- this.pending.clear();
- this.resolveExit(undefined);
- }
-
- sendMessage(_message) {
- throw new Error("sendMessage must be implemented by subclasses.");
- }
-}
-
-class SpawnedCodexAppServerClient extends AppServerClientBase {
- constructor(cwd, options = {}) {
- super(cwd, options);
- this.transport = "direct";
- }
-
- async initialize() {
- this.proc = spawn("codex", ["app-server"], {
- cwd: this.cwd,
- env: this.options.env ?? process.env,
- stdio: ["pipe", "pipe", "pipe"],
- shell: process.platform === "win32" ? (process.env.SHELL || true) : false,
- windowsHide: true
- });
-
- this.proc.stdout.setEncoding("utf8");
- this.proc.stderr.setEncoding("utf8");
-
- this.proc.stderr.on("data", (chunk) => {
- this.stderr += chunk;
- });
-
- this.proc.on("error", (error) => {
- this.handleExit(error);
- });
-
- this.proc.on("exit", (code, signal) => {
- const detail =
- code === 0
- ? null
- : createProtocolError(`codex app-server exited unexpectedly (${signal ? `signal ${signal}` : `exit ${code}`}).`);
- this.handleExit(detail);
- });
-
- this.readline = readline.createInterface({ input: this.proc.stdout });
- this.readline.on("line", (line) => {
- this.handleLine(line);
- });
-
- await this.request("initialize", {
- clientInfo: this.options.clientInfo ?? DEFAULT_CLIENT_INFO,
- capabilities: this.options.capabilities ?? DEFAULT_CAPABILITIES
- });
- this.notify("initialized", {});
- }
-
- async close() {
- if (this.closed) {
- await this.exitPromise;
- return;
- }
-
- this.closed = true;
-
- if (this.readline) {
- this.readline.close();
- }
-
- if (this.proc && !this.proc.killed) {
- this.proc.stdin.end();
- setTimeout(() => {
- if (this.proc && !this.proc.killed && this.proc.exitCode === null) {
- // On Windows with shell: true, the direct child is cmd.exe.
- // Use terminateProcessTree to kill the entire tree including
- // the grandchild node process.
- if (process.platform === "win32") {
- try {
- terminateProcessTree(this.proc.pid);
- } catch {
- // Best-effort cleanup inside an unref'd timer — swallow errors
- // to avoid crashing the host process during shutdown.
- }
- } else {
- this.proc.kill("SIGTERM");
- }
- }
- }, 50).unref?.();
- }
-
- await this.exitPromise;
- }
-
- sendMessage(message) {
- const line = `${JSON.stringify(message)}\n`;
- const stdin = this.proc?.stdin;
- if (!stdin) {
- throw new Error("codex app-server stdin is not available.");
- }
- stdin.write(line);
- }
-}
-
-class BrokerCodexAppServerClient extends AppServerClientBase {
- constructor(cwd, options = {}) {
- super(cwd, options);
- this.transport = "broker";
- this.endpoint = options.brokerEndpoint;
- }
-
- async initialize() {
- await new Promise((resolve, reject) => {
- const target = parseBrokerEndpoint(this.endpoint);
- this.socket = net.createConnection({ path: target.path });
- this.socket.setEncoding("utf8");
- this.socket.on("connect", resolve);
- this.socket.on("data", (chunk) => {
- this.handleChunk(chunk);
- });
- this.socket.on("error", (error) => {
- if (!this.exitResolved) {
- reject(error);
- }
- this.handleExit(error);
- });
- this.socket.on("close", () => {
- this.handleExit(this.exitError);
- });
- });
-
- await this.request("initialize", {
- clientInfo: this.options.clientInfo ?? DEFAULT_CLIENT_INFO,
- capabilities: this.options.capabilities ?? DEFAULT_CAPABILITIES
- });
- this.notify("initialized", {});
- }
-
- async close() {
- if (this.closed) {
- await this.exitPromise;
- return;
- }
-
- this.closed = true;
- if (this.socket) {
- this.socket.end();
- }
- await this.exitPromise;
- }
-
- sendMessage(message) {
- const line = `${JSON.stringify(message)}\n`;
- const socket = this.socket;
- if (!socket) {
- throw new Error("codex app-server broker connection is not connected.");
- }
- socket.write(line);
- }
-}
-
-export class CodexAppServerClient {
- static async connect(cwd, options = {}) {
- let brokerEndpoint = null;
- if (!options.disableBroker) {
- brokerEndpoint = options.brokerEndpoint ?? options.env?.[BROKER_ENDPOINT_ENV] ?? process.env[BROKER_ENDPOINT_ENV] ?? null;
- if (!brokerEndpoint && options.reuseExistingBroker) {
- brokerEndpoint = loadBrokerSession(cwd)?.endpoint ?? null;
- }
- if (!brokerEndpoint && !options.reuseExistingBroker) {
- const brokerSession = await ensureBrokerSession(cwd, { env: options.env });
- brokerEndpoint = brokerSession?.endpoint ?? null;
- }
- }
- const client = brokerEndpoint
- ? new BrokerCodexAppServerClient(cwd, { ...options, brokerEndpoint })
- : new SpawnedCodexAppServerClient(cwd, options);
- await client.initialize();
- return client;
- }
-}
diff --git a/plugins/codex/scripts/lib/args.mjs b/plugins/codex/scripts/lib/args.mjs
deleted file mode 100644
index 6b151850..00000000
--- a/plugins/codex/scripts/lib/args.mjs
+++ /dev/null
@@ -1,128 +0,0 @@
-export function parseArgs(argv, config = {}) {
- const valueOptions = new Set(config.valueOptions ?? []);
- const booleanOptions = new Set(config.booleanOptions ?? []);
- const aliasMap = config.aliasMap ?? {};
- const options = {};
- const positionals = [];
- let passthrough = false;
-
- for (let index = 0; index < argv.length; index += 1) {
- const token = argv[index];
-
- if (passthrough) {
- positionals.push(token);
- continue;
- }
-
- if (token === "--") {
- passthrough = true;
- continue;
- }
-
- if (!token.startsWith("-") || token === "-") {
- positionals.push(token);
- continue;
- }
-
- if (token.startsWith("--")) {
- const [rawKey, inlineValue] = token.slice(2).split("=", 2);
- const key = aliasMap[rawKey] ?? rawKey;
-
- if (booleanOptions.has(key)) {
- options[key] = inlineValue === undefined ? true : inlineValue !== "false";
- continue;
- }
-
- if (valueOptions.has(key)) {
- const nextValue = inlineValue ?? argv[index + 1];
- if (nextValue === undefined) {
- throw new Error(`Missing value for --${rawKey}`);
- }
- options[key] = nextValue;
- if (inlineValue === undefined) {
- index += 1;
- }
- continue;
- }
-
- positionals.push(token);
- continue;
- }
-
- const shortKey = token.slice(1);
- const key = aliasMap[shortKey] ?? shortKey;
-
- if (booleanOptions.has(key)) {
- options[key] = true;
- continue;
- }
-
- if (valueOptions.has(key)) {
- const nextValue = argv[index + 1];
- if (nextValue === undefined) {
- throw new Error(`Missing value for -${shortKey}`);
- }
- options[key] = nextValue;
- index += 1;
- continue;
- }
-
- positionals.push(token);
- }
-
- return { options, positionals };
-}
-
-export function splitRawArgumentString(raw) {
- const tokens = [];
- let current = "";
- let quote = null;
- let escaping = false;
-
- for (const character of raw) {
- if (escaping) {
- current += character;
- escaping = false;
- continue;
- }
-
- if (character === "\\") {
- escaping = true;
- continue;
- }
-
- if (quote) {
- if (character === quote) {
- quote = null;
- } else {
- current += character;
- }
- continue;
- }
-
- if (character === "'" || character === "\"") {
- quote = character;
- continue;
- }
-
- if (/\s/.test(character)) {
- if (current) {
- tokens.push(current);
- current = "";
- }
- continue;
- }
-
- current += character;
- }
-
- if (escaping) {
- current += "\\";
- }
-
- if (current) {
- tokens.push(current);
- }
-
- return tokens;
-}
diff --git a/plugins/codex/scripts/lib/broker-endpoint.mjs b/plugins/codex/scripts/lib/broker-endpoint.mjs
deleted file mode 100644
index 8abdcc71..00000000
--- a/plugins/codex/scripts/lib/broker-endpoint.mjs
+++ /dev/null
@@ -1,41 +0,0 @@
-import path from "node:path";
-import process from "node:process";
-
-function sanitizePipeName(value) {
- return String(value ?? "")
- .replace(/[^A-Za-z0-9._-]/g, "-")
- .replace(/^-+|-+$/g, "");
-}
-
-export function createBrokerEndpoint(sessionDir, platform = process.platform) {
- if (platform === "win32") {
- const pipeName = sanitizePipeName(`${path.win32.basename(sessionDir)}-codex-app-server`);
- return `pipe:\\\\.\\pipe\\${pipeName}`;
- }
-
- return `unix:${path.join(sessionDir, "broker.sock")}`;
-}
-
-export function parseBrokerEndpoint(endpoint) {
- if (typeof endpoint !== "string" || endpoint.length === 0) {
- throw new Error("Missing broker endpoint.");
- }
-
- if (endpoint.startsWith("pipe:")) {
- const pipePath = endpoint.slice("pipe:".length);
- if (!pipePath) {
- throw new Error("Broker pipe endpoint is missing its path.");
- }
- return { kind: "pipe", path: pipePath };
- }
-
- if (endpoint.startsWith("unix:")) {
- const socketPath = endpoint.slice("unix:".length);
- if (!socketPath) {
- throw new Error("Broker Unix socket endpoint is missing its path.");
- }
- return { kind: "unix", path: socketPath };
- }
-
- throw new Error(`Unsupported broker endpoint: ${endpoint}`);
-}
diff --git a/plugins/codex/scripts/lib/broker-lifecycle.mjs b/plugins/codex/scripts/lib/broker-lifecycle.mjs
deleted file mode 100644
index ef763819..00000000
--- a/plugins/codex/scripts/lib/broker-lifecycle.mjs
+++ /dev/null
@@ -1,209 +0,0 @@
-import fs from "node:fs";
-import net from "node:net";
-import os from "node:os";
-import path from "node:path";
-import process from "node:process";
-import { spawn } from "node:child_process";
-import { fileURLToPath } from "node:url";
-import { createBrokerEndpoint, parseBrokerEndpoint } from "./broker-endpoint.mjs";
-import { resolveStateDir } from "./state.mjs";
-
-export const PID_FILE_ENV = "CODEX_COMPANION_APP_SERVER_PID_FILE";
-export const LOG_FILE_ENV = "CODEX_COMPANION_APP_SERVER_LOG_FILE";
-const BROKER_STATE_FILE = "broker.json";
-
-export function createBrokerSessionDir(prefix = "cxc-") {
- return fs.mkdtempSync(path.join(os.tmpdir(), prefix));
-}
-
-function connectToEndpoint(endpoint) {
- const target = parseBrokerEndpoint(endpoint);
- return net.createConnection({ path: target.path });
-}
-
-export async function waitForBrokerEndpoint(endpoint, timeoutMs = 2000) {
- const start = Date.now();
- while (Date.now() - start < timeoutMs) {
- const ready = await new Promise((resolve) => {
- const socket = connectToEndpoint(endpoint);
- socket.on("connect", () => {
- socket.end();
- resolve(true);
- });
- socket.on("error", () => resolve(false));
- });
- if (ready) {
- return true;
- }
- await new Promise((resolve) => setTimeout(resolve, 50));
- }
- return false;
-}
-
-export async function sendBrokerShutdown(endpoint) {
- await new Promise((resolve) => {
- const socket = connectToEndpoint(endpoint);
- socket.setEncoding("utf8");
- socket.on("connect", () => {
- socket.write(`${JSON.stringify({ id: 1, method: "broker/shutdown", params: {} })}\n`);
- });
- socket.on("data", () => {
- socket.end();
- resolve();
- });
- socket.on("error", resolve);
- socket.on("close", resolve);
- });
-}
-
-export function spawnBrokerProcess({ scriptPath, cwd, endpoint, pidFile, logFile, env = process.env }) {
- const logFd = fs.openSync(logFile, "a");
- const child = spawn(process.execPath, [scriptPath, "serve", "--endpoint", endpoint, "--cwd", cwd, "--pid-file", pidFile], {
- cwd,
- env,
- detached: true,
- stdio: ["ignore", logFd, logFd]
- });
- child.unref();
- fs.closeSync(logFd);
- return child;
-}
-
-function resolveBrokerStateFile(cwd) {
- return path.join(resolveStateDir(cwd), BROKER_STATE_FILE);
-}
-
-export function loadBrokerSession(cwd) {
- const stateFile = resolveBrokerStateFile(cwd);
- if (!fs.existsSync(stateFile)) {
- return null;
- }
-
- try {
- return JSON.parse(fs.readFileSync(stateFile, "utf8"));
- } catch {
- return null;
- }
-}
-
-export function saveBrokerSession(cwd, session) {
- const stateDir = resolveStateDir(cwd);
- fs.mkdirSync(stateDir, { recursive: true });
- fs.writeFileSync(resolveBrokerStateFile(cwd), `${JSON.stringify(session, null, 2)}\n`, "utf8");
-}
-
-export function clearBrokerSession(cwd) {
- const stateFile = resolveBrokerStateFile(cwd);
- if (fs.existsSync(stateFile)) {
- fs.unlinkSync(stateFile);
- }
-}
-
-async function isBrokerEndpointReady(endpoint) {
- if (!endpoint) {
- return false;
- }
- try {
- return await waitForBrokerEndpoint(endpoint, 150);
- } catch {
- return false;
- }
-}
-
-export async function ensureBrokerSession(cwd, options = {}) {
- const existing = loadBrokerSession(cwd);
- if (existing && (await isBrokerEndpointReady(existing.endpoint))) {
- return existing;
- }
-
- if (existing) {
- teardownBrokerSession({
- endpoint: existing.endpoint ?? null,
- pidFile: existing.pidFile ?? null,
- logFile: existing.logFile ?? null,
- sessionDir: existing.sessionDir ?? null,
- pid: existing.pid ?? null,
- killProcess: options.killProcess ?? null
- });
- clearBrokerSession(cwd);
- }
-
- const sessionDir = createBrokerSessionDir();
- const endpointFactory = options.createBrokerEndpoint ?? createBrokerEndpoint;
- const endpoint = endpointFactory(sessionDir, options.platform);
- const pidFile = path.join(sessionDir, "broker.pid");
- const logFile = path.join(sessionDir, "broker.log");
- const scriptPath =
- options.scriptPath ??
- fileURLToPath(new URL("../app-server-broker.mjs", import.meta.url));
-
- const child = spawnBrokerProcess({
- scriptPath,
- cwd,
- endpoint,
- pidFile,
- logFile,
- env: options.env ?? process.env
- });
-
- const ready = await waitForBrokerEndpoint(endpoint, options.timeoutMs ?? 2000);
- if (!ready) {
- teardownBrokerSession({
- endpoint,
- pidFile,
- logFile,
- sessionDir,
- pid: child.pid ?? null,
- killProcess: options.killProcess ?? null
- });
- return null;
- }
-
- const session = {
- endpoint,
- pidFile,
- logFile,
- sessionDir,
- pid: child.pid ?? null
- };
- saveBrokerSession(cwd, session);
- return session;
-}
-
-export function teardownBrokerSession({ endpoint = null, pidFile, logFile, sessionDir = null, pid = null, killProcess = null }) {
- if (Number.isFinite(pid) && killProcess) {
- try {
- killProcess(pid);
- } catch {
- // Ignore missing or already-exited broker processes.
- }
- }
-
- if (pidFile && fs.existsSync(pidFile)) {
- fs.unlinkSync(pidFile);
- }
-
- if (logFile && fs.existsSync(logFile)) {
- fs.unlinkSync(logFile);
- }
-
- if (endpoint) {
- try {
- const target = parseBrokerEndpoint(endpoint);
- if (target.kind === "unix" && fs.existsSync(target.path)) {
- fs.unlinkSync(target.path);
- }
- } catch {
- // Ignore malformed or already-removed broker endpoints during teardown.
- }
- }
-
- const resolvedSessionDir = sessionDir ?? (pidFile ? path.dirname(pidFile) : logFile ? path.dirname(logFile) : null);
- if (resolvedSessionDir && fs.existsSync(resolvedSessionDir)) {
- try {
- fs.rmdirSync(resolvedSessionDir);
- } catch {
- // Ignore non-empty or missing directories.
- }
- }
-}
diff --git a/plugins/codex/scripts/lib/codex.mjs b/plugins/codex/scripts/lib/codex.mjs
deleted file mode 100644
index f2fe88bd..00000000
--- a/plugins/codex/scripts/lib/codex.mjs
+++ /dev/null
@@ -1,1088 +0,0 @@
-/**
- * @typedef {import("./app-server-protocol").AppServerNotification} AppServerNotification
- * @typedef {import("./app-server-protocol").ReviewTarget} ReviewTarget
- * @typedef {import("./app-server-protocol").ThreadItem} ThreadItem
- * @typedef {import("./app-server-protocol").ThreadResumeParams} ThreadResumeParams
- * @typedef {import("./app-server-protocol").ThreadStartParams} ThreadStartParams
- * @typedef {import("./app-server-protocol").Turn} Turn
- * @typedef {import("./app-server-protocol").UserInput} UserInput
- * @typedef {((update: string | { message: string, phase: string | null, threadId?: string | null, turnId?: string | null, stderrMessage?: string | null, logTitle?: string | null, logBody?: string | null }) => void)} ProgressReporter
- * @typedef {{
- * threadId: string,
- * rootThreadId: string,
- * threadIds: Set,
- * threadTurnIds: Map,
- * threadLabels: Map,
- * turnId: string | null,
- * bufferedNotifications: AppServerNotification[],
- * completion: Promise,
- * resolveCompletion: (state: TurnCaptureState) => void,
- * rejectCompletion: (error: unknown) => void,
- * finalTurn: Turn | null,
- * completed: boolean,
- * finalAnswerSeen: boolean,
- * pendingCollaborations: Set,
- * activeSubagentTurns: Set,
- * completionTimer: ReturnType | null,
- * lastAgentMessage: string,
- * reviewText: string,
- * reasoningSummary: string[],
- * error: unknown,
- * messages: Array<{ lifecycle: string, phase: string | null, text: string }>,
- * fileChanges: ThreadItem[],
- * commandExecutions: ThreadItem[],
- * onProgress: ProgressReporter | null
- * }} TurnCaptureState
- */
-import { readJsonFile } from "./fs.mjs";
-import { BROKER_BUSY_RPC_CODE, BROKER_ENDPOINT_ENV, CodexAppServerClient } from "./app-server.mjs";
-import { loadBrokerSession } from "./broker-lifecycle.mjs";
-import { binaryAvailable } from "./process.mjs";
-
-const SERVICE_NAME = "claude_code_codex_plugin";
-const TASK_THREAD_PREFIX = "Codex Companion Task";
-const DEFAULT_CONTINUE_PROMPT =
- "Continue from the current thread state. Pick the next highest-value step and follow through until the task is resolved.";
-
-function cleanCodexStderr(stderr) {
- return stderr
- .split(/\r?\n/)
- .map((line) => line.trimEnd())
- .filter((line) => line && !line.startsWith("WARNING: proceeding, even though we could not update PATH:"))
- .join("\n");
-}
-
-/** @returns {ThreadStartParams} */
-function buildThreadParams(cwd, options = {}) {
- return {
- cwd,
- model: options.model ?? null,
- approvalPolicy: options.approvalPolicy ?? "never",
- sandbox: options.sandbox ?? "read-only",
- serviceName: SERVICE_NAME,
- ephemeral: options.ephemeral ?? true,
- experimentalRawEvents: false
- };
-}
-
-/** @returns {ThreadResumeParams} */
-function buildResumeParams(threadId, cwd, options = {}) {
- return {
- threadId,
- cwd,
- model: options.model ?? null,
- approvalPolicy: options.approvalPolicy ?? "never",
- sandbox: options.sandbox ?? "read-only"
- };
-}
-
-/** @returns {UserInput[]} */
-function buildTurnInput(prompt) {
- return [{ type: "text", text: prompt, text_elements: [] }];
-}
-
-function shorten(text, limit = 72) {
- const normalized = String(text ?? "").trim().replace(/\s+/g, " ");
- if (!normalized) {
- return "";
- }
- if (normalized.length <= limit) {
- return normalized;
- }
- return `${normalized.slice(0, limit - 3)}...`;
-}
-
-function looksLikeVerificationCommand(command) {
- return /\b(test|tests|lint|build|typecheck|type-check|check|verify|validate|pytest|jest|vitest|cargo test|npm test|pnpm test|yarn test|go test|mvn test|gradle test|tsc|eslint|ruff)\b/i.test(
- command
- );
-}
-
-function buildTaskThreadName(prompt) {
- const excerpt = shorten(prompt, 56);
- return excerpt ? `${TASK_THREAD_PREFIX}: ${excerpt}` : TASK_THREAD_PREFIX;
-}
-
-function extractThreadId(message) {
- return message?.params?.threadId ?? null;
-}
-
-function extractTurnId(message) {
- if (message?.params?.turnId) {
- return message.params.turnId;
- }
- if (message?.params?.turn?.id) {
- return message.params.turn.id;
- }
- return null;
-}
-
-function collectTouchedFiles(fileChanges) {
- const paths = new Set();
- for (const fileChange of fileChanges) {
- for (const change of fileChange.changes ?? []) {
- if (change.path) {
- paths.add(change.path);
- }
- }
- }
- return [...paths];
-}
-
-function normalizeReasoningText(text) {
- return String(text ?? "").replace(/\s+/g, " ").trim();
-}
-
-function extractReasoningSections(value) {
- if (!value) {
- return [];
- }
-
- if (typeof value === "string") {
- const normalized = normalizeReasoningText(value);
- return normalized ? [normalized] : [];
- }
-
- if (Array.isArray(value)) {
- return value.flatMap((entry) => extractReasoningSections(entry));
- }
-
- if (typeof value === "object") {
- if (typeof value.text === "string") {
- return extractReasoningSections(value.text);
- }
- if ("summary" in value) {
- return extractReasoningSections(value.summary);
- }
- if ("content" in value) {
- return extractReasoningSections(value.content);
- }
- if ("parts" in value) {
- return extractReasoningSections(value.parts);
- }
- }
-
- return [];
-}
-
-function mergeReasoningSections(existingSections, nextSections) {
- const merged = [];
- for (const section of [...existingSections, ...nextSections]) {
- const normalized = normalizeReasoningText(section);
- if (!normalized || merged.includes(normalized)) {
- continue;
- }
- merged.push(normalized);
- }
- return merged;
-}
-
-/**
- * @param {ProgressReporter | null | undefined} onProgress
- * @param {string | null | undefined} message
- * @param {string | null | undefined} [phase]
- */
-function emitProgress(onProgress, message, phase = null, extra = {}) {
- if (!onProgress || !message) {
- return;
- }
- if (!phase && Object.keys(extra).length === 0) {
- onProgress(message);
- return;
- }
- onProgress({ message, phase, ...extra });
-}
-
-function emitLogEvent(onProgress, options = {}) {
- if (!onProgress) {
- return;
- }
-
- onProgress({
- message: options.message ?? "",
- phase: options.phase ?? null,
- stderrMessage: options.stderrMessage ?? null,
- logTitle: options.logTitle ?? null,
- logBody: options.logBody ?? null
- });
-}
-
-function labelForThread(state, threadId) {
- if (!threadId || threadId === state.rootThreadId || threadId === state.threadId) {
- return null;
- }
- return state.threadLabels.get(threadId) ?? threadId;
-}
-
-function registerThread(state, threadId, options = {}) {
- if (!threadId) {
- return;
- }
-
- state.threadIds.add(threadId);
- const label =
- options.threadName ??
- options.name ??
- options.agentNickname ??
- options.agentRole ??
- state.threadLabels.get(threadId) ??
- null;
- if (label) {
- state.threadLabels.set(threadId, label);
- }
-}
-
-function describeStartedItem(state, item) {
- switch (item.type) {
- case "enteredReviewMode":
- return { message: `Reviewer started: ${item.review}`, phase: "reviewing" };
- case "commandExecution":
- return {
- message: `Running command: ${shorten(item.command, 96)}`,
- phase: looksLikeVerificationCommand(item.command) ? "verifying" : "running"
- };
- case "fileChange":
- return { message: `Applying ${item.changes.length} file change(s).`, phase: "editing" };
- case "mcpToolCall":
- return { message: `Calling ${item.server}/${item.tool}.`, phase: "investigating" };
- case "dynamicToolCall":
- return { message: `Running tool: ${item.tool}.`, phase: "investigating" };
- case "collabAgentToolCall": {
- const subagents = (item.receiverThreadIds ?? []).map((threadId) => labelForThread(state, threadId) ?? threadId);
- const summary =
- subagents.length > 0
- ? `Starting subagent ${subagents.join(", ")} via collaboration tool: ${item.tool}.`
- : `Starting collaboration tool: ${item.tool}.`;
- return { message: summary, phase: "investigating" };
- }
- case "webSearch":
- return { message: `Searching: ${shorten(item.query, 96)}`, phase: "investigating" };
- default:
- return null;
- }
-}
-
-function describeCompletedItem(state, item) {
- switch (item.type) {
- case "commandExecution": {
- const exitCode = item.exitCode ?? "?";
- const statusLabel = item.status === "completed" ? "completed" : item.status;
- return {
- message: `Command ${statusLabel}: ${shorten(item.command, 96)} (exit ${exitCode})`,
- phase: looksLikeVerificationCommand(item.command) ? "verifying" : "running"
- };
- }
- case "fileChange":
- return { message: `File changes ${item.status}.`, phase: "editing" };
- case "mcpToolCall":
- return { message: `Tool ${item.server}/${item.tool} ${item.status}.`, phase: "investigating" };
- case "dynamicToolCall":
- return { message: `Tool ${item.tool} ${item.status}.`, phase: "investigating" };
- case "collabAgentToolCall": {
- const subagents = (item.receiverThreadIds ?? []).map((threadId) => labelForThread(state, threadId) ?? threadId);
- const summary =
- subagents.length > 0
- ? `Subagent ${subagents.join(", ")} ${item.status}.`
- : `Collaboration tool ${item.tool} ${item.status}.`;
- return { message: summary, phase: "investigating" };
- }
- case "exitedReviewMode":
- return { message: "Reviewer finished.", phase: "finalizing" };
- default:
- return null;
- }
-}
-
-/** @returns {TurnCaptureState} */
-function createTurnCaptureState(threadId, options = {}) {
- let resolveCompletion;
- let rejectCompletion;
- const completion = new Promise((resolve, reject) => {
- resolveCompletion = resolve;
- rejectCompletion = reject;
- });
-
- return {
- threadId,
- rootThreadId: threadId,
- threadIds: new Set([threadId]),
- threadTurnIds: new Map(),
- threadLabels: new Map(),
- turnId: null,
- bufferedNotifications: [],
- completion,
- resolveCompletion,
- rejectCompletion,
- finalTurn: null,
- completed: false,
- finalAnswerSeen: false,
- pendingCollaborations: new Set(),
- activeSubagentTurns: new Set(),
- completionTimer: null,
- lastAgentMessage: "",
- reviewText: "",
- reasoningSummary: [],
- error: null,
- messages: [],
- fileChanges: [],
- commandExecutions: [],
- onProgress: options.onProgress ?? null
- };
-}
-
-function clearCompletionTimer(state) {
- if (state.completionTimer) {
- clearTimeout(state.completionTimer);
- state.completionTimer = null;
- }
-}
-
-function completeTurn(state, turn = null, options = {}) {
- if (state.completed) {
- return;
- }
-
- clearCompletionTimer(state);
- state.completed = true;
-
- if (turn) {
- state.finalTurn = turn;
- if (!state.turnId) {
- state.turnId = turn.id;
- }
- } else if (!state.finalTurn) {
- state.finalTurn = {
- id: state.turnId ?? "inferred-turn",
- status: "completed"
- };
- }
-
- if (options.inferred) {
- emitProgress(state.onProgress, "Turn completion inferred after the main thread finished and subagent work drained.", "finalizing");
- }
-
- state.resolveCompletion(state);
-}
-
-function scheduleInferredCompletion(state) {
- if (state.completed || state.finalTurn || !state.finalAnswerSeen) {
- return;
- }
-
- if (state.pendingCollaborations.size > 0 || state.activeSubagentTurns.size > 0) {
- return;
- }
-
- clearCompletionTimer(state);
- state.completionTimer = setTimeout(() => {
- state.completionTimer = null;
- if (state.completed || state.finalTurn || !state.finalAnswerSeen) {
- return;
- }
- if (state.pendingCollaborations.size > 0 || state.activeSubagentTurns.size > 0) {
- return;
- }
- completeTurn(state, null, { inferred: true });
- }, 250);
- state.completionTimer.unref?.();
-}
-
-function belongsToTurn(state, message) {
- const messageThreadId = extractThreadId(message);
- if (!messageThreadId || !state.threadIds.has(messageThreadId)) {
- return false;
- }
- const trackedTurnId = state.threadTurnIds.get(messageThreadId) ?? null;
- const messageTurnId = extractTurnId(message);
- return trackedTurnId === null || messageTurnId === null || messageTurnId === trackedTurnId;
-}
-
-function recordItem(state, item, lifecycle, threadId = null) {
- if (item.type === "collabAgentToolCall") {
- if (!threadId || threadId === state.threadId) {
- if (lifecycle === "started" || item.status === "inProgress") {
- state.pendingCollaborations.add(item.id);
- } else if (lifecycle === "completed") {
- state.pendingCollaborations.delete(item.id);
- scheduleInferredCompletion(state);
- }
- }
- for (const receiverThreadId of item.receiverThreadIds ?? []) {
- registerThread(state, receiverThreadId);
- }
- }
-
- if (item.type === "agentMessage") {
- state.messages.push({
- lifecycle,
- phase: item.phase ?? null,
- text: item.text ?? ""
- });
- if (item.text) {
- if (!threadId || threadId === state.threadId) {
- state.lastAgentMessage = item.text;
- if (lifecycle === "completed" && item.phase === "final_answer") {
- state.finalAnswerSeen = true;
- scheduleInferredCompletion(state);
- }
- }
- if (lifecycle === "completed") {
- const sourceLabel = labelForThread(state, threadId);
- emitLogEvent(state.onProgress, {
- message: sourceLabel ? `Subagent ${sourceLabel}: ${shorten(item.text, 96)}` : `Assistant message captured: ${shorten(item.text, 96)}`,
- stderrMessage: null,
- phase: item.phase === "final_answer" ? "finalizing" : null,
- logTitle: sourceLabel ? `Subagent ${sourceLabel} message` : "Assistant message",
- logBody: item.text
- });
- }
- }
- return;
- }
-
- if (item.type === "exitedReviewMode") {
- state.reviewText = item.review ?? "";
- if (lifecycle === "completed" && item.review) {
- emitLogEvent(state.onProgress, {
- message: "Review output captured.",
- stderrMessage: null,
- phase: "finalizing",
- logTitle: "Review output",
- logBody: item.review
- });
- }
- return;
- }
-
- if (item.type === "reasoning" && lifecycle === "completed") {
- const nextSections = extractReasoningSections(item.summary);
- state.reasoningSummary = mergeReasoningSections(state.reasoningSummary, nextSections);
- if (nextSections.length > 0) {
- const sourceLabel = labelForThread(state, threadId);
- emitLogEvent(state.onProgress, {
- message: sourceLabel
- ? `Subagent ${sourceLabel} reasoning: ${shorten(nextSections[0], 96)}`
- : `Reasoning summary captured: ${shorten(nextSections[0], 96)}`,
- stderrMessage: null,
- logTitle: sourceLabel ? `Subagent ${sourceLabel} reasoning summary` : "Reasoning summary",
- logBody: nextSections.map((section) => `- ${section}`).join("\n")
- });
- }
- return;
- }
-
- if (item.type === "fileChange" && lifecycle === "completed") {
- state.fileChanges.push(item);
- return;
- }
-
- if (item.type === "commandExecution" && lifecycle === "completed") {
- state.commandExecutions.push(item);
- }
-}
-
-function applyTurnNotification(state, message) {
- switch (message.method) {
- case "thread/started":
- registerThread(state, message.params.thread.id, {
- threadName: message.params.thread.name,
- name: message.params.thread.name,
- agentNickname: message.params.thread.agentNickname,
- agentRole: message.params.thread.agentRole
- });
- break;
- case "thread/name/updated":
- registerThread(state, message.params.threadId, {
- threadName: message.params.threadName ?? null
- });
- break;
- case "turn/started":
- registerThread(state, message.params.threadId);
- state.threadTurnIds.set(message.params.threadId, message.params.turn.id);
- if ((message.params.threadId ?? null) !== state.threadId) {
- state.activeSubagentTurns.add(message.params.threadId);
- }
- emitProgress(
- state.onProgress,
- `Turn started (${message.params.turn.id}).`,
- "starting",
- (message.params.threadId ?? null) === state.threadId
- ? {
- threadId: message.params.threadId ?? null,
- turnId: message.params.turn.id ?? null
- }
- : {}
- );
- break;
- case "item/started":
- recordItem(state, message.params.item, "started", message.params.threadId ?? null);
- {
- const update = describeStartedItem(state, message.params.item);
- emitProgress(state.onProgress, update?.message, update?.phase ?? null);
- }
- break;
- case "item/completed":
- recordItem(state, message.params.item, "completed", message.params.threadId ?? null);
- {
- const update = describeCompletedItem(state, message.params.item);
- emitProgress(state.onProgress, update?.message, update?.phase ?? null);
- }
- break;
- case "error":
- state.error = message.params.error;
- emitProgress(state.onProgress, `Codex error: ${message.params.error.message}`, "failed");
- break;
- case "turn/completed":
- if ((message.params.threadId ?? null) !== state.threadId) {
- state.activeSubagentTurns.delete(message.params.threadId);
- scheduleInferredCompletion(state);
- break;
- }
- emitProgress(
- state.onProgress,
- `Turn ${message.params.turn.status === "completed" ? "completed" : message.params.turn.status}.`,
- "finalizing"
- );
- completeTurn(state, message.params.turn);
- break;
- default:
- break;
- }
-}
-
-async function captureTurn(client, threadId, startRequest, options = {}) {
- const state = createTurnCaptureState(threadId, options);
- const previousHandler = client.notificationHandler;
-
- client.setNotificationHandler((message) => {
- if (!state.turnId) {
- state.bufferedNotifications.push(message);
- return;
- }
-
- if (message.method === "thread/started" || message.method === "thread/name/updated") {
- applyTurnNotification(state, message);
- return;
- }
-
- if (!belongsToTurn(state, message)) {
- if (previousHandler) {
- previousHandler(message);
- }
- return;
- }
-
- applyTurnNotification(state, message);
- });
-
- try {
- const response = await startRequest();
- options.onResponse?.(response, state);
- state.turnId = response.turn?.id ?? null;
- if (state.turnId) {
- state.threadTurnIds.set(state.threadId, state.turnId);
- }
- for (const message of state.bufferedNotifications) {
- if (belongsToTurn(state, message)) {
- applyTurnNotification(state, message);
- } else {
- if (previousHandler) {
- previousHandler(message);
- }
- }
- }
- state.bufferedNotifications.length = 0;
-
- if (response.turn?.status && response.turn.status !== "inProgress") {
- completeTurn(state, response.turn);
- }
-
- return await state.completion;
- } finally {
- clearCompletionTimer(state);
- client.setNotificationHandler(previousHandler ?? null);
- }
-}
-
-async function withAppServer(cwd, fn) {
- let client = null;
- try {
- client = await CodexAppServerClient.connect(cwd);
- const result = await fn(client);
- await client.close();
- return result;
- } catch (error) {
- const brokerRequested = client?.transport === "broker" || Boolean(process.env[BROKER_ENDPOINT_ENV]);
- const shouldRetryDirect =
- (client?.transport === "broker" && error?.rpcCode === BROKER_BUSY_RPC_CODE) ||
- (brokerRequested && (error?.code === "ENOENT" || error?.code === "ECONNREFUSED"));
-
- if (client) {
- await client.close().catch(() => {});
- client = null;
- }
-
- if (!shouldRetryDirect) {
- throw error;
- }
-
- const directClient = await CodexAppServerClient.connect(cwd, { disableBroker: true });
- try {
- return await fn(directClient);
- } finally {
- await directClient.close();
- }
- }
-}
-
-async function startThread(client, cwd, options = {}) {
- const response = await client.request("thread/start", buildThreadParams(cwd, options));
- const threadId = response.thread.id;
- if (options.threadName) {
- try {
- await client.request("thread/name/set", { threadId, name: options.threadName });
- } catch (err) {
- // Only suppress "unknown variant/method" errors from older CLI versions
- // that don't support thread/name/set. Rethrow auth, network, or server errors.
- const msg = String(err?.message ?? err ?? "");
- if (!msg.includes("unknown variant") && !msg.includes("unknown method")) {
- throw err;
- }
- }
- }
- return response;
-}
-
-async function resumeThread(client, threadId, cwd, options = {}) {
- return client.request("thread/resume", buildResumeParams(threadId, cwd, options));
-}
-
-function buildResultStatus(turnState) {
- return turnState.finalTurn?.status === "completed" ? 0 : 1;
-}
-
-const BUILTIN_PROVIDER_LABELS = new Map([
- ["openai", "OpenAI"],
- ["ollama", "Ollama"],
- ["lmstudio", "LM Studio"]
-]);
-
-function normalizeProviderId(value) {
- const providerId = typeof value === "string" ? value.trim() : "";
- return providerId || null;
-}
-
-function formatProviderLabel(providerId, providerConfig = null) {
- const configuredName = typeof providerConfig?.name === "string" ? providerConfig.name.trim() : "";
- if (configuredName) {
- return configuredName;
- }
- if (!providerId) {
- return "The active provider";
- }
- return BUILTIN_PROVIDER_LABELS.get(providerId) ?? providerId;
-}
-
-function buildAuthStatus(fields = {}) {
- return {
- available: true,
- loggedIn: false,
- detail: "not authenticated",
- source: "unknown",
- authMethod: null,
- verified: null,
- requiresOpenaiAuth: null,
- provider: null,
- ...fields
- };
-}
-
-function resolveProviderConfig(configResponse) {
- const config = configResponse?.config;
- if (!config || typeof config !== "object") {
- return {
- providerId: null,
- providerConfig: null
- };
- }
-
- const providerId = normalizeProviderId(config.model_provider);
- const providers =
- config.model_providers && typeof config.model_providers === "object" && !Array.isArray(config.model_providers)
- ? config.model_providers
- : null;
- const providerConfig =
- providerId && providers?.[providerId] && typeof providers[providerId] === "object" ? providers[providerId] : null;
-
- return {
- providerId,
- providerConfig
- };
-}
-
-function buildAppServerAuthStatus(accountResponse, configResponse) {
- const account = accountResponse?.account ?? null;
- const requiresOpenaiAuth =
- typeof accountResponse?.requiresOpenaiAuth === "boolean" ? accountResponse.requiresOpenaiAuth : null;
- const { providerId, providerConfig } = resolveProviderConfig(configResponse);
- const providerLabel = formatProviderLabel(providerId, providerConfig);
-
- if (account?.type === "chatgpt") {
- const email = typeof account.email === "string" && account.email.trim() ? account.email.trim() : null;
- return buildAuthStatus({
- loggedIn: true,
- detail: email ? `ChatGPT login active for ${email}` : "ChatGPT login active",
- source: "app-server",
- authMethod: "chatgpt",
- verified: true,
- requiresOpenaiAuth,
- provider: providerId
- });
- }
-
- if (account?.type === "apiKey") {
- return buildAuthStatus({
- loggedIn: true,
- detail: "API key configured (unverified)",
- source: "app-server",
- authMethod: "apiKey",
- verified: false,
- requiresOpenaiAuth,
- provider: providerId
- });
- }
-
- if (requiresOpenaiAuth === false) {
- return buildAuthStatus({
- loggedIn: true,
- detail: `${providerLabel} is configured and does not require OpenAI authentication`,
- source: "app-server",
- requiresOpenaiAuth,
- provider: providerId
- });
- }
-
- return buildAuthStatus({
- loggedIn: false,
- detail: `${providerLabel} requires OpenAI authentication`,
- source: "app-server",
- requiresOpenaiAuth,
- provider: providerId
- });
-}
-
-async function getCodexAuthStatusFromClient(client, cwd) {
- try {
- const accountResponse = await client.request("account/read", { refreshToken: false });
- const configResponse = await client.request("config/read", {
- includeLayers: false,
- cwd
- });
-
- return buildAppServerAuthStatus(accountResponse, configResponse);
- } catch (error) {
- return buildAuthStatus({
- loggedIn: false,
- detail: error instanceof Error ? error.message : String(error),
- source: "app-server"
- });
- }
-}
-
-export function getCodexAvailability(cwd) {
- const versionStatus = binaryAvailable("codex", ["--version"], { cwd });
- if (!versionStatus.available) {
- return versionStatus;
- }
-
- const appServerStatus = binaryAvailable("codex", ["app-server", "--help"], { cwd });
- if (!appServerStatus.available) {
- return {
- available: false,
- detail: `${versionStatus.detail}; advanced runtime unavailable: ${appServerStatus.detail}`
- };
- }
-
- return {
- available: true,
- detail: `${versionStatus.detail}; advanced runtime available`
- };
-}
-
-export function getSessionRuntimeStatus(env = process.env, cwd = process.cwd()) {
- const endpoint = env?.[BROKER_ENDPOINT_ENV] ?? loadBrokerSession(cwd)?.endpoint ?? null;
- if (endpoint) {
- return {
- mode: "shared",
- label: "shared session",
- detail: "This Claude session is configured to reuse one shared Codex runtime.",
- endpoint
- };
- }
-
- return {
- mode: "direct",
- label: "direct startup",
- detail: "No shared Codex runtime is active yet. The first review or task command will start one on demand.",
- endpoint: null
- };
-}
-
-export async function getCodexAuthStatus(cwd, options = {}) {
- const availability = getCodexAvailability(cwd);
- if (!availability.available) {
- return {
- available: false,
- loggedIn: false,
- detail: availability.detail,
- source: "availability",
- authMethod: null,
- verified: null,
- requiresOpenaiAuth: null,
- provider: null
- };
- }
-
- let client = null;
- try {
- client = await CodexAppServerClient.connect(cwd, {
- env: options.env,
- reuseExistingBroker: true
- });
- return await getCodexAuthStatusFromClient(client, cwd);
- } catch (error) {
- return buildAuthStatus({
- loggedIn: false,
- detail: error instanceof Error ? error.message : String(error),
- source: "app-server"
- });
- } finally {
- if (client) {
- await client.close().catch(() => {});
- }
- }
-}
-
-export async function interruptAppServerTurn(cwd, { threadId, turnId }) {
- if (!threadId || !turnId) {
- return {
- attempted: false,
- interrupted: false,
- transport: null,
- detail: "missing threadId or turnId"
- };
- }
-
- const availability = getCodexAvailability(cwd);
- if (!availability.available) {
- return {
- attempted: false,
- interrupted: false,
- transport: null,
- detail: availability.detail
- };
- }
-
- let client = null;
- try {
- client = await CodexAppServerClient.connect(cwd, { reuseExistingBroker: true });
- await client.request("turn/interrupt", { threadId, turnId });
- return {
- attempted: true,
- interrupted: true,
- transport: client.transport,
- detail: `Interrupted ${turnId} on ${threadId}.`
- };
- } catch (error) {
- return {
- attempted: true,
- interrupted: false,
- transport: client?.transport ?? null,
- detail: error instanceof Error ? error.message : String(error)
- };
- } finally {
- await client?.close().catch(() => {});
- }
-}
-
-export async function runAppServerReview(cwd, options = {}) {
- const availability = getCodexAvailability(cwd);
- if (!availability.available) {
- throw new Error("Codex CLI is not installed or is missing required runtime support. Install it with `npm install -g @openai/codex`, then rerun `/codex:setup`.");
- }
-
- return withAppServer(cwd, async (client) => {
- emitProgress(options.onProgress, "Starting Codex review thread.", "starting");
- const thread = await startThread(client, cwd, {
- model: options.model,
- sandbox: "read-only",
- ephemeral: true,
- threadName: options.threadName
- });
- const sourceThreadId = thread.thread.id;
- emitProgress(options.onProgress, `Thread ready (${sourceThreadId}).`, "starting", {
- threadId: sourceThreadId
- });
- const delivery = options.delivery ?? "inline";
-
- const turnState = await captureTurn(
- client,
- sourceThreadId,
- () =>
- client.request("review/start", {
- threadId: sourceThreadId,
- delivery,
- target: options.target
- }),
- {
- onProgress: options.onProgress,
- onResponse(response, state) {
- if (response.reviewThreadId) {
- state.threadIds.add(response.reviewThreadId);
- if (delivery === "detached") {
- state.threadId = response.reviewThreadId;
- }
- }
- }
- }
- );
-
- return {
- status: buildResultStatus(turnState),
- threadId: turnState.threadId,
- sourceThreadId,
- turnId: turnState.turnId,
- reviewText: turnState.reviewText,
- reasoningSummary: turnState.reasoningSummary,
- turn: turnState.finalTurn,
- error: turnState.error,
- stderr: cleanCodexStderr(client.stderr)
- };
- });
-}
-
-export async function runAppServerTurn(cwd, options = {}) {
- const availability = getCodexAvailability(cwd);
- if (!availability.available) {
- throw new Error("Codex CLI is not installed or is missing required runtime support. Install it with `npm install -g @openai/codex`, then rerun `/codex:setup`.");
- }
-
- return withAppServer(cwd, async (client) => {
- let threadId;
-
- if (options.resumeThreadId) {
- emitProgress(options.onProgress, `Resuming thread ${options.resumeThreadId}.`, "starting");
- const response = await resumeThread(client, options.resumeThreadId, cwd, {
- model: options.model,
- sandbox: options.sandbox,
- ephemeral: false
- });
- threadId = response.thread.id;
- } else {
- emitProgress(options.onProgress, "Starting Codex task thread.", "starting");
- const response = await startThread(client, cwd, {
- model: options.model,
- sandbox: options.sandbox,
- ephemeral: options.persistThread ? false : true,
- threadName: options.persistThread ? options.threadName : options.threadName ?? null
- });
- threadId = response.thread.id;
- }
-
- emitProgress(options.onProgress, `Thread ready (${threadId}).`, "starting", {
- threadId
- });
-
- const prompt = options.prompt?.trim() || options.defaultPrompt || "";
- if (!prompt) {
- throw new Error("A prompt is required for this Codex run.");
- }
-
- const turnState = await captureTurn(
- client,
- threadId,
- () =>
- client.request("turn/start", {
- threadId,
- input: buildTurnInput(prompt),
- model: options.model ?? null,
- effort: options.effort ?? null,
- outputSchema: options.outputSchema ?? null
- }),
- { onProgress: options.onProgress }
- );
-
- return {
- status: buildResultStatus(turnState),
- threadId,
- turnId: turnState.turnId,
- finalMessage: turnState.lastAgentMessage,
- reasoningSummary: turnState.reasoningSummary,
- turn: turnState.finalTurn,
- error: turnState.error,
- stderr: cleanCodexStderr(client.stderr),
- fileChanges: turnState.fileChanges,
- touchedFiles: collectTouchedFiles(turnState.fileChanges),
- commandExecutions: turnState.commandExecutions
- };
- });
-}
-
-export async function findLatestTaskThread(cwd) {
- const availability = getCodexAvailability(cwd);
- if (!availability.available) {
- throw new Error("Codex CLI is not installed or is missing required runtime support. Install it with `npm install -g @openai/codex`, then rerun `/codex:setup`.");
- }
-
- return withAppServer(cwd, async (client) => {
- const response = await client.request("thread/list", {
- cwd,
- limit: 20,
- sortKey: "updated_at",
- sourceKinds: ["appServer"],
- searchTerm: TASK_THREAD_PREFIX
- });
-
- return (
- response.data.find((thread) => typeof thread.name === "string" && thread.name.startsWith(TASK_THREAD_PREFIX)) ??
- null
- );
- });
-}
-
-export function buildPersistentTaskThreadName(prompt) {
- return buildTaskThreadName(prompt);
-}
-
-export function parseStructuredOutput(rawOutput, fallback = {}) {
- if (!rawOutput) {
- return {
- parsed: null,
- parseError: fallback.failureMessage ?? "Codex did not return a final structured message.",
- rawOutput: rawOutput ?? "",
- ...fallback
- };
- }
-
- try {
- return {
- parsed: JSON.parse(rawOutput),
- parseError: null,
- rawOutput,
- ...fallback
- };
- } catch (error) {
- return {
- parsed: null,
- parseError: error.message,
- rawOutput,
- ...fallback
- };
- }
-}
-
-export function readOutputSchema(schemaPath) {
- return readJsonFile(schemaPath);
-}
-
-export { DEFAULT_CONTINUE_PROMPT, TASK_THREAD_PREFIX };
diff --git a/plugins/codex/scripts/lib/fs.mjs b/plugins/codex/scripts/lib/fs.mjs
deleted file mode 100644
index 02752244..00000000
--- a/plugins/codex/scripts/lib/fs.mjs
+++ /dev/null
@@ -1,40 +0,0 @@
-import fs from "node:fs";
-import os from "node:os";
-import path from "node:path";
-
-export function ensureAbsolutePath(cwd, maybePath) {
- return path.isAbsolute(maybePath) ? maybePath : path.resolve(cwd, maybePath);
-}
-
-export function createTempDir(prefix = "codex-plugin-") {
- return fs.mkdtempSync(path.join(os.tmpdir(), prefix));
-}
-
-export function readJsonFile(filePath) {
- return JSON.parse(fs.readFileSync(filePath, "utf8"));
-}
-
-export function writeJsonFile(filePath, value) {
- fs.writeFileSync(filePath, `${JSON.stringify(value, null, 2)}\n`, "utf8");
-}
-
-export function safeReadFile(filePath) {
- return fs.existsSync(filePath) ? fs.readFileSync(filePath, "utf8") : "";
-}
-
-export function isProbablyText(buffer) {
- const sample = buffer.subarray(0, Math.min(buffer.length, 4096));
- for (const value of sample) {
- if (value === 0) {
- return false;
- }
- }
- return true;
-}
-
-export function readStdinIfPiped() {
- if (process.stdin.isTTY) {
- return "";
- }
- return fs.readFileSync(0, "utf8");
-}
diff --git a/plugins/codex/scripts/lib/git.mjs b/plugins/codex/scripts/lib/git.mjs
deleted file mode 100644
index 1749cfc8..00000000
--- a/plugins/codex/scripts/lib/git.mjs
+++ /dev/null
@@ -1,346 +0,0 @@
-import fs from "node:fs";
-import path from "node:path";
-
-import { isProbablyText } from "./fs.mjs";
-import { formatCommandFailure, runCommand, runCommandChecked } from "./process.mjs";
-
-const MAX_UNTRACKED_BYTES = 24 * 1024;
-const DEFAULT_INLINE_DIFF_MAX_FILES = 2;
-const DEFAULT_INLINE_DIFF_MAX_BYTES = 256 * 1024;
-
-function git(cwd, args, options = {}) {
- return runCommand("git", args, { cwd, ...options });
-}
-
-function gitChecked(cwd, args, options = {}) {
- return runCommandChecked("git", args, { cwd, ...options });
-}
-
-function listUniqueFiles(...groups) {
- return [...new Set(groups.flat().filter(Boolean))].sort();
-}
-
-function normalizeMaxInlineFiles(value) {
- const parsed = Number(value);
- if (!Number.isFinite(parsed) || parsed < 0) {
- return DEFAULT_INLINE_DIFF_MAX_FILES;
- }
- return Math.floor(parsed);
-}
-
-function normalizeMaxInlineDiffBytes(value) {
- const parsed = Number(value);
- if (!Number.isFinite(parsed) || parsed < 0) {
- return DEFAULT_INLINE_DIFF_MAX_BYTES;
- }
- return Math.floor(parsed);
-}
-
-function measureGitOutputBytes(cwd, args, maxBytes) {
- const result = git(cwd, args, { maxBuffer: maxBytes + 1 });
- if (result.error && /** @type {NodeJS.ErrnoException} */ (result.error).code === "ENOBUFS") {
- return maxBytes + 1;
- }
- if (result.error) {
- throw result.error;
- }
- if (result.status !== 0) {
- throw new Error(formatCommandFailure(result));
- }
- return Buffer.byteLength(result.stdout, "utf8");
-}
-
-function measureCombinedGitOutputBytes(cwd, argSets, maxBytes) {
- let totalBytes = 0;
- for (const args of argSets) {
- const remainingBytes = maxBytes - totalBytes;
- if (remainingBytes < 0) {
- return maxBytes + 1;
- }
- totalBytes += measureGitOutputBytes(cwd, args, remainingBytes);
- if (totalBytes > maxBytes) {
- return totalBytes;
- }
- }
- return totalBytes;
-}
-
-function buildBranchComparison(cwd, baseRef) {
- const mergeBase = gitChecked(cwd, ["merge-base", "HEAD", baseRef]).stdout.trim();
- return {
- mergeBase,
- commitRange: `${mergeBase}..HEAD`,
- reviewRange: `${baseRef}...HEAD`
- };
-}
-
-export function ensureGitRepository(cwd) {
- const result = git(cwd, ["rev-parse", "--show-toplevel"]);
- const errorCode = result.error && "code" in result.error ? result.error.code : null;
- if (errorCode === "ENOENT") {
- throw new Error("git is not installed. Install Git and retry.");
- }
- if (result.status !== 0) {
- throw new Error("This command must run inside a Git repository.");
- }
- return result.stdout.trim();
-}
-
-export function getRepoRoot(cwd) {
- return gitChecked(cwd, ["rev-parse", "--show-toplevel"]).stdout.trim();
-}
-
-export function detectDefaultBranch(cwd) {
- const symbolic = git(cwd, ["symbolic-ref", "refs/remotes/origin/HEAD"]);
- if (symbolic.status === 0) {
- const remoteHead = symbolic.stdout.trim();
- if (remoteHead.startsWith("refs/remotes/origin/")) {
- return remoteHead.replace("refs/remotes/origin/", "");
- }
- }
-
- const candidates = ["main", "master", "trunk"];
- for (const candidate of candidates) {
- const local = git(cwd, ["show-ref", "--verify", "--quiet", `refs/heads/${candidate}`]);
- if (local.status === 0) {
- return candidate;
- }
- const remote = git(cwd, ["show-ref", "--verify", "--quiet", `refs/remotes/origin/${candidate}`]);
- if (remote.status === 0) {
- return `origin/${candidate}`;
- }
- }
-
- throw new Error("Unable to detect the repository default branch. Pass --base ][ or use --scope working-tree.");
-}
-
-export function getCurrentBranch(cwd) {
- return gitChecked(cwd, ["branch", "--show-current"]).stdout.trim() || "HEAD";
-}
-
-export function getWorkingTreeState(cwd) {
- const staged = gitChecked(cwd, ["diff", "--cached", "--name-only"]).stdout.trim().split("\n").filter(Boolean);
- const unstaged = gitChecked(cwd, ["diff", "--name-only"]).stdout.trim().split("\n").filter(Boolean);
- const untracked = gitChecked(cwd, ["ls-files", "--others", "--exclude-standard"]).stdout.trim().split("\n").filter(Boolean);
-
- return {
- staged,
- unstaged,
- untracked,
- isDirty: staged.length > 0 || unstaged.length > 0 || untracked.length > 0
- };
-}
-
-export function resolveReviewTarget(cwd, options = {}) {
- ensureGitRepository(cwd);
-
- const requestedScope = options.scope ?? "auto";
- const baseRef = options.base ?? null;
- const state = getWorkingTreeState(cwd);
- const supportedScopes = new Set(["auto", "working-tree", "branch"]);
-
- if (baseRef) {
- return {
- mode: "branch",
- label: `branch diff against ${baseRef}`,
- baseRef,
- explicit: true
- };
- }
-
- if (requestedScope === "working-tree") {
- return {
- mode: "working-tree",
- label: "working tree diff",
- explicit: true
- };
- }
-
- if (!supportedScopes.has(requestedScope)) {
- throw new Error(
- `Unsupported review scope "${requestedScope}". Use one of: auto, working-tree, branch, or pass --base ][.`
- );
- }
-
- if (requestedScope === "branch") {
- const detectedBase = detectDefaultBranch(cwd);
- return {
- mode: "branch",
- label: `branch diff against ${detectedBase}`,
- baseRef: detectedBase,
- explicit: true
- };
- }
-
- if (state.isDirty) {
- return {
- mode: "working-tree",
- label: "working tree diff",
- explicit: false
- };
- }
-
- const detectedBase = detectDefaultBranch(cwd);
- return {
- mode: "branch",
- label: `branch diff against ${detectedBase}`,
- baseRef: detectedBase,
- explicit: false
- };
-}
-
-function formatSection(title, body) {
- return [`## ${title}`, "", body.trim() ? body.trim() : "(none)", ""].join("\n");
-}
-
-function formatUntrackedFile(cwd, relativePath) {
- const absolutePath = path.join(cwd, relativePath);
- let stat;
- try {
- stat = fs.statSync(absolutePath);
- } catch {
- return `### ${relativePath}\n(skipped: broken symlink or unreadable file)`;
- }
- if (stat.isDirectory()) {
- return `### ${relativePath}\n(skipped: directory)`;
- }
- if (stat.size > MAX_UNTRACKED_BYTES) {
- return `### ${relativePath}\n(skipped: ${stat.size} bytes exceeds ${MAX_UNTRACKED_BYTES} byte limit)`;
- }
-
- let buffer;
- try {
- buffer = fs.readFileSync(absolutePath);
- } catch {
- return `### ${relativePath}\n(skipped: broken symlink or unreadable file)`;
- }
- if (!isProbablyText(buffer)) {
- return `### ${relativePath}\n(skipped: binary file)`;
- }
-
- return [`### ${relativePath}`, "```", buffer.toString("utf8").trimEnd(), "```"].join("\n");
-}
-
-function collectWorkingTreeContext(cwd, state, options = {}) {
- const includeDiff = options.includeDiff !== false;
- const status = gitChecked(cwd, ["status", "--short", "--untracked-files=all"]).stdout.trim();
- const changedFiles = listUniqueFiles(state.staged, state.unstaged, state.untracked);
-
- let parts;
- if (includeDiff) {
- const stagedDiff = gitChecked(cwd, ["diff", "--cached", "--binary", "--no-ext-diff", "--submodule=diff"]).stdout;
- const unstagedDiff = gitChecked(cwd, ["diff", "--binary", "--no-ext-diff", "--submodule=diff"]).stdout;
- const untrackedBody = state.untracked.map((file) => formatUntrackedFile(cwd, file)).join("\n\n");
- parts = [
- formatSection("Git Status", status),
- formatSection("Staged Diff", stagedDiff),
- formatSection("Unstaged Diff", unstagedDiff),
- formatSection("Untracked Files", untrackedBody)
- ];
- } else {
- const stagedStat = gitChecked(cwd, ["diff", "--shortstat", "--cached"]).stdout.trim();
- const unstagedStat = gitChecked(cwd, ["diff", "--shortstat"]).stdout.trim();
- const untrackedBody = state.untracked.map((file) => formatUntrackedFile(cwd, file)).join("\n\n");
- parts = [
- formatSection("Git Status", status),
- formatSection("Staged Diff Stat", stagedStat),
- formatSection("Unstaged Diff Stat", unstagedStat),
- formatSection("Changed Files", changedFiles.join("\n")),
- formatSection("Untracked Files", untrackedBody)
- ];
- }
-
- return {
- mode: "working-tree",
- summary: `Reviewing ${state.staged.length} staged, ${state.unstaged.length} unstaged, and ${state.untracked.length} untracked file(s).`,
- content: parts.join("\n"),
- changedFiles
- };
-}
-
-function collectBranchContext(cwd, baseRef, options = {}) {
- const includeDiff = options.includeDiff !== false;
- const comparison = options.comparison ?? buildBranchComparison(cwd, baseRef);
- const currentBranch = getCurrentBranch(cwd);
- const changedFiles = gitChecked(cwd, ["diff", "--name-only", comparison.commitRange]).stdout.trim().split("\n").filter(Boolean);
- const logOutput = gitChecked(cwd, ["log", "--oneline", "--decorate", comparison.commitRange]).stdout.trim();
- const diffStat = gitChecked(cwd, ["diff", "--stat", comparison.commitRange]).stdout.trim();
-
- return {
- mode: "branch",
- summary: `Reviewing branch ${currentBranch} against ${baseRef} from merge-base ${comparison.mergeBase}.`,
- content: includeDiff
- ? [
- formatSection("Commit Log", logOutput),
- formatSection("Diff Stat", diffStat),
- formatSection(
- "Branch Diff",
- gitChecked(cwd, ["diff", "--binary", "--no-ext-diff", "--submodule=diff", comparison.commitRange]).stdout
- )
- ].join("\n")
- : [
- formatSection("Commit Log", logOutput),
- formatSection("Diff Stat", diffStat),
- formatSection("Changed Files", changedFiles.join("\n"))
- ].join("\n"),
- changedFiles,
- comparison
- };
-}
-
-function buildAdversarialCollectionGuidance(options = {}) {
- if (options.includeDiff !== false) {
- return "Use the repository context below as primary evidence.";
- }
-
- return "The repository context below is a lightweight summary. Inspect the target diff yourself with read-only git commands before finalizing findings.";
-}
-
-export function collectReviewContext(cwd, target, options = {}) {
- const repoRoot = getRepoRoot(cwd);
- const currentBranch = getCurrentBranch(repoRoot);
- const maxInlineFiles = normalizeMaxInlineFiles(options.maxInlineFiles);
- const maxInlineDiffBytes = normalizeMaxInlineDiffBytes(options.maxInlineDiffBytes);
- let details;
- let includeDiff;
- let diffBytes;
-
- if (target.mode === "working-tree") {
- const state = getWorkingTreeState(repoRoot);
- diffBytes = measureCombinedGitOutputBytes(
- repoRoot,
- [
- ["diff", "--cached", "--binary", "--no-ext-diff", "--submodule=diff"],
- ["diff", "--binary", "--no-ext-diff", "--submodule=diff"]
- ],
- maxInlineDiffBytes
- );
- includeDiff =
- options.includeDiff ??
- (listUniqueFiles(state.staged, state.unstaged, state.untracked).length <= maxInlineFiles &&
- diffBytes <= maxInlineDiffBytes);
- details = collectWorkingTreeContext(repoRoot, state, { includeDiff });
- } else {
- const comparison = buildBranchComparison(repoRoot, target.baseRef);
- const fileCount = gitChecked(repoRoot, ["diff", "--name-only", comparison.commitRange]).stdout.trim().split("\n").filter(Boolean).length;
- diffBytes = measureGitOutputBytes(
- repoRoot,
- ["diff", "--binary", "--no-ext-diff", "--submodule=diff", comparison.commitRange],
- maxInlineDiffBytes
- );
- includeDiff = options.includeDiff ?? (fileCount <= maxInlineFiles && diffBytes <= maxInlineDiffBytes);
- details = collectBranchContext(repoRoot, target.baseRef, { includeDiff, comparison });
- }
-
- return {
- cwd: repoRoot,
- repoRoot,
- branch: currentBranch,
- target,
- fileCount: details.changedFiles.length,
- diffBytes,
- inputMode: includeDiff ? "inline-diff" : "self-collect",
- collectionGuidance: buildAdversarialCollectionGuidance({ includeDiff }),
- ...details
- };
-}
diff --git a/plugins/codex/scripts/lib/job-control.mjs b/plugins/codex/scripts/lib/job-control.mjs
deleted file mode 100644
index ad152c15..00000000
--- a/plugins/codex/scripts/lib/job-control.mjs
+++ /dev/null
@@ -1,308 +0,0 @@
-import fs from "node:fs";
-
-import { getSessionRuntimeStatus } from "./codex.mjs";
-import { getConfig, listJobs, readJobFile, resolveJobFile } from "./state.mjs";
-import { SESSION_ID_ENV } from "./tracked-jobs.mjs";
-import { resolveWorkspaceRoot } from "./workspace.mjs";
-
-export const DEFAULT_MAX_STATUS_JOBS = 8;
-export const DEFAULT_MAX_PROGRESS_LINES = 4;
-
-export function sortJobsNewestFirst(jobs) {
- return [...jobs].sort((left, right) => String(right.updatedAt ?? "").localeCompare(String(left.updatedAt ?? "")));
-}
-
-function getCurrentSessionId(options = {}) {
- return options.env?.[SESSION_ID_ENV] ?? process.env[SESSION_ID_ENV] ?? null;
-}
-
-function filterJobsForCurrentSession(jobs, options = {}) {
- const sessionId = getCurrentSessionId(options);
- if (!sessionId) {
- return jobs;
- }
- return jobs.filter((job) => job.sessionId === sessionId);
-}
-
-function getJobTypeLabel(job) {
- if (typeof job.kindLabel === "string" && job.kindLabel) {
- return job.kindLabel;
- }
- if (job.kind === "adversarial-review") {
- return "adversarial-review";
- }
- if (job.jobClass === "review") {
- return "review";
- }
- if (job.jobClass === "task") {
- return "rescue";
- }
- if (job.kind === "review") {
- return "review";
- }
- if (job.kind === "task") {
- return "rescue";
- }
- return "job";
-}
-
-function stripLogPrefix(line) {
- return line.replace(/^\[[^\]]+\]\s*/, "").trim();
-}
-
-function isProgressBlockTitle(line) {
- return (
- ["Final output", "Assistant message", "Reasoning summary", "Review output"].includes(line) ||
- /^Subagent .+ message$/.test(line) ||
- /^Subagent .+ reasoning summary$/.test(line)
- );
-}
-
-export function readJobProgressPreview(logFile, maxLines = DEFAULT_MAX_PROGRESS_LINES) {
- if (!logFile || !fs.existsSync(logFile)) {
- return [];
- }
-
- const lines = fs
- .readFileSync(logFile, "utf8")
- .split(/\r?\n/)
- .map((line) => line.trimEnd())
- .filter(Boolean)
- .filter((line) => line.startsWith("["))
- .map(stripLogPrefix)
- .filter((line) => line && !isProgressBlockTitle(line));
-
- return lines.slice(-maxLines);
-}
-
-function formatElapsedDuration(startValue, endValue = null) {
- const start = Date.parse(startValue ?? "");
- if (!Number.isFinite(start)) {
- return null;
- }
-
- const end = endValue ? Date.parse(endValue) : Date.now();
- if (!Number.isFinite(end) || end < start) {
- return null;
- }
-
- const totalSeconds = Math.max(0, Math.round((end - start) / 1000));
- const hours = Math.floor(totalSeconds / 3600);
- const minutes = Math.floor((totalSeconds % 3600) / 60);
- const seconds = totalSeconds % 60;
-
- if (hours > 0) {
- return `${hours}h ${minutes}m`;
- }
- if (minutes > 0) {
- return `${minutes}m ${seconds}s`;
- }
- return `${seconds}s`;
-}
-
-function looksLikeVerificationCommand(line) {
- return /\b(test|tests|lint|build|typecheck|type-check|check|verify|validate|pytest|jest|vitest|cargo test|npm test|pnpm test|yarn test|go test|mvn test|gradle test|tsc|eslint|ruff)\b/i.test(
- line
- );
-}
-
-function inferLegacyJobPhase(job, progressPreview = []) {
- switch (job.status) {
- case "queued":
- return "queued";
- case "cancelled":
- return "cancelled";
- case "failed":
- return "failed";
- case "completed":
- return "done";
- default:
- break;
- }
-
- for (let index = progressPreview.length - 1; index >= 0; index -= 1) {
- const line = progressPreview[index].toLowerCase();
- if (line.startsWith("starting codex") || line.startsWith("thread ready") || line.startsWith("turn started")) {
- return "starting";
- }
- if (line.startsWith("reviewer started") || line.includes("review mode")) {
- return "reviewing";
- }
- if (line.startsWith("searching:") || line.startsWith("calling ") || line.startsWith("running tool:")) {
- return "investigating";
- }
- if (line.startsWith("starting collaboration tool:")) {
- return "investigating";
- }
- if (line.startsWith("running command:")) {
- return looksLikeVerificationCommand(line)
- ? "verifying"
- : job.jobClass === "review"
- ? "reviewing"
- : "investigating";
- }
- if (line.startsWith("command completed:")) {
- return looksLikeVerificationCommand(line) ? "verifying" : "running";
- }
- if (line.startsWith("applying ") || line.startsWith("file changes ")) {
- return "editing";
- }
- if (line.startsWith("turn completed")) {
- return "finalizing";
- }
- if (line.startsWith("codex error:") || line.startsWith("failed:")) {
- return "failed";
- }
- }
-
- return job.jobClass === "review" ? "reviewing" : "running";
-}
-
-export function enrichJob(job, options = {}) {
- const maxProgressLines = options.maxProgressLines ?? DEFAULT_MAX_PROGRESS_LINES;
- const enriched = {
- ...job,
- kindLabel: getJobTypeLabel(job),
- progressPreview:
- job.status === "queued" || job.status === "running" || job.status === "failed"
- ? readJobProgressPreview(job.logFile, maxProgressLines)
- : [],
- elapsed: formatElapsedDuration(job.startedAt ?? job.createdAt, job.completedAt ?? null),
- duration:
- job.status === "completed" || job.status === "failed" || job.status === "cancelled"
- ? formatElapsedDuration(job.startedAt ?? job.createdAt, job.completedAt ?? job.updatedAt)
- : null
- };
-
- return {
- ...enriched,
- phase: enriched.phase ?? inferLegacyJobPhase(enriched, enriched.progressPreview)
- };
-}
-
-export function readStoredJob(workspaceRoot, jobId) {
- const jobFile = resolveJobFile(workspaceRoot, jobId);
- if (!fs.existsSync(jobFile)) {
- return null;
- }
- return readJobFile(jobFile);
-}
-
-function matchJobReference(jobs, reference, predicate = () => true) {
- const filtered = jobs.filter(predicate);
- if (!reference) {
- return filtered[0] ?? null;
- }
-
- const exact = filtered.find((job) => job.id === reference);
- if (exact) {
- return exact;
- }
-
- const prefixMatches = filtered.filter((job) => job.id.startsWith(reference));
- if (prefixMatches.length === 1) {
- return prefixMatches[0];
- }
- if (prefixMatches.length > 1) {
- throw new Error(`Job reference "${reference}" is ambiguous. Use a longer job id.`);
- }
-
- throw new Error(`No job found for "${reference}". Run /codex:status to list known jobs.`);
-}
-
-export function buildStatusSnapshot(cwd, options = {}) {
- const workspaceRoot = resolveWorkspaceRoot(cwd);
- const config = getConfig(workspaceRoot);
- const jobs = sortJobsNewestFirst(filterJobsForCurrentSession(listJobs(workspaceRoot), options));
- const maxJobs = options.maxJobs ?? DEFAULT_MAX_STATUS_JOBS;
- const maxProgressLines = options.maxProgressLines ?? DEFAULT_MAX_PROGRESS_LINES;
-
- const running = jobs
- .filter((job) => job.status === "queued" || job.status === "running")
- .map((job) => enrichJob(job, { maxProgressLines }));
-
- const latestFinishedRaw = jobs.find((job) => job.status !== "queued" && job.status !== "running") ?? null;
- const latestFinished = latestFinishedRaw ? enrichJob(latestFinishedRaw, { maxProgressLines }) : null;
-
- const recent = (options.all ? jobs : jobs.slice(0, maxJobs))
- .filter((job) => job.status !== "queued" && job.status !== "running" && job.id !== latestFinished?.id)
- .map((job) => enrichJob(job, { maxProgressLines }));
-
- return {
- workspaceRoot,
- config,
- sessionRuntime: getSessionRuntimeStatus(options.env, workspaceRoot),
- running,
- latestFinished,
- recent,
- needsReview: Boolean(config.stopReviewGate)
- };
-}
-
-export function buildSingleJobSnapshot(cwd, reference, options = {}) {
- const workspaceRoot = resolveWorkspaceRoot(cwd);
- const jobs = sortJobsNewestFirst(listJobs(workspaceRoot));
- const selected = matchJobReference(jobs, reference);
- if (!selected) {
- throw new Error(`No job found for "${reference}". Run /codex:status to inspect known jobs.`);
- }
-
- return {
- workspaceRoot,
- job: enrichJob(selected, { maxProgressLines: options.maxProgressLines })
- };
-}
-
-export function resolveResultJob(cwd, reference) {
- const workspaceRoot = resolveWorkspaceRoot(cwd);
- const jobs = sortJobsNewestFirst(reference ? listJobs(workspaceRoot) : filterJobsForCurrentSession(listJobs(workspaceRoot)));
- const selected = matchJobReference(
- jobs,
- reference,
- (job) => job.status === "completed" || job.status === "failed" || job.status === "cancelled"
- );
-
- if (selected) {
- return { workspaceRoot, job: selected };
- }
-
- const active = matchJobReference(jobs, reference, (job) => job.status === "queued" || job.status === "running");
- if (active) {
- throw new Error(`Job ${active.id} is still ${active.status}. Check /codex:status and try again once it finishes.`);
- }
-
- if (reference) {
- throw new Error(`No finished job found for "${reference}". Run /codex:status to inspect active jobs.`);
- }
-
- throw new Error("No finished Codex jobs found for this repository yet.");
-}
-
-export function resolveCancelableJob(cwd, reference, options = {}) {
- const workspaceRoot = resolveWorkspaceRoot(cwd);
- const jobs = sortJobsNewestFirst(listJobs(workspaceRoot));
- const activeJobs = jobs.filter((job) => job.status === "queued" || job.status === "running");
-
- if (reference) {
- const selected = matchJobReference(activeJobs, reference);
- if (!selected) {
- throw new Error(`No active job found for "${reference}".`);
- }
- return { workspaceRoot, job: selected };
- }
-
- const sessionScopedActiveJobs = filterJobsForCurrentSession(activeJobs, options);
-
- if (sessionScopedActiveJobs.length === 1) {
- return { workspaceRoot, job: sessionScopedActiveJobs[0] };
- }
- if (sessionScopedActiveJobs.length > 1) {
- throw new Error("Multiple Codex jobs are active. Pass a job id to /codex:cancel.");
- }
-
- if (getCurrentSessionId(options)) {
- throw new Error("No active Codex jobs to cancel for this session.");
- }
-
- throw new Error("No active Codex jobs to cancel.");
-}
diff --git a/plugins/codex/scripts/lib/process.mjs b/plugins/codex/scripts/lib/process.mjs
index af28d1cf..df2f8e4e 100644
--- a/plugins/codex/scripts/lib/process.mjs
+++ b/plugins/codex/scripts/lib/process.mjs
@@ -66,7 +66,11 @@ export function terminateProcessTree(pid, options = {}) {
if (platform === "win32") {
const result = runCommandImpl("taskkill", ["/PID", String(pid), "/T", "/F"], {
cwd: options.cwd,
- env: options.env
+ env: {
+ ...(options.env ?? process.env),
+ MSYS_NO_PATHCONV: "1",
+ MSYS2_ARG_CONV_EXCL: "*"
+ }
});
if (!result.error && result.status === 0) {
@@ -74,8 +78,7 @@ export function terminateProcessTree(pid, options = {}) {
}
const combinedOutput = `${result.stderr}\n${result.stdout}`.trim();
- if (!result.error && looksLikeMissingProcessMessage(combinedOutput)) {
- return { attempted: true, delivered: false, method: "taskkill", result };
+ if (!result.error && (result.status === 128 || looksLikeMissingProcessMessage(combinedOutput))) { return { attempted: true, delivered: false, method: "taskkill", result };
}
if (result.error?.code === "ENOENT") {
diff --git a/plugins/codex/scripts/lib/prompts.mjs b/plugins/codex/scripts/lib/prompts.mjs
deleted file mode 100644
index 20108150..00000000
--- a/plugins/codex/scripts/lib/prompts.mjs
+++ /dev/null
@@ -1,13 +0,0 @@
-import fs from "node:fs";
-import path from "node:path";
-
-export function loadPromptTemplate(rootDir, name) {
- const promptPath = path.join(rootDir, "prompts", `${name}.md`);
- return fs.readFileSync(promptPath, "utf8");
-}
-
-export function interpolateTemplate(template, variables) {
- return template.replace(/\{\{([A-Z_]+)\}\}/g, (_, key) => {
- return Object.prototype.hasOwnProperty.call(variables, key) ? variables[key] : "";
- });
-}
diff --git a/plugins/codex/scripts/lib/render.mjs b/plugins/codex/scripts/lib/render.mjs
deleted file mode 100644
index 2ec18523..00000000
--- a/plugins/codex/scripts/lib/render.mjs
+++ /dev/null
@@ -1,465 +0,0 @@
-function severityRank(severity) {
- switch (severity) {
- case "critical":
- return 0;
- case "high":
- return 1;
- case "medium":
- return 2;
- default:
- return 3;
- }
-}
-
-function formatLineRange(finding) {
- if (!finding.line_start) {
- return "";
- }
- if (!finding.line_end || finding.line_end === finding.line_start) {
- return `:${finding.line_start}`;
- }
- return `:${finding.line_start}-${finding.line_end}`;
-}
-
-function validateReviewResultShape(data) {
- if (!data || typeof data !== "object" || Array.isArray(data)) {
- return "Expected a top-level JSON object.";
- }
- if (typeof data.verdict !== "string" || !data.verdict.trim()) {
- return "Missing string `verdict`.";
- }
- if (typeof data.summary !== "string" || !data.summary.trim()) {
- return "Missing string `summary`.";
- }
- if (!Array.isArray(data.findings)) {
- return "Missing array `findings`.";
- }
- if (!Array.isArray(data.next_steps)) {
- return "Missing array `next_steps`.";
- }
- return null;
-}
-
-function normalizeReviewFinding(finding, index) {
- const source = finding && typeof finding === "object" && !Array.isArray(finding) ? finding : {};
- const lineStart = Number.isInteger(source.line_start) && source.line_start > 0 ? source.line_start : null;
- const lineEnd =
- Number.isInteger(source.line_end) && source.line_end > 0 && (!lineStart || source.line_end >= lineStart)
- ? source.line_end
- : lineStart;
-
- return {
- severity: typeof source.severity === "string" && source.severity.trim() ? source.severity.trim() : "low",
- title: typeof source.title === "string" && source.title.trim() ? source.title.trim() : `Finding ${index + 1}`,
- body: typeof source.body === "string" && source.body.trim() ? source.body.trim() : "No details provided.",
- file: typeof source.file === "string" && source.file.trim() ? source.file.trim() : "unknown",
- line_start: lineStart,
- line_end: lineEnd,
- recommendation: typeof source.recommendation === "string" ? source.recommendation.trim() : ""
- };
-}
-
-function normalizeReviewResultData(data) {
- return {
- verdict: data.verdict.trim(),
- summary: data.summary.trim(),
- findings: data.findings.map((finding, index) => normalizeReviewFinding(finding, index)),
- next_steps: data.next_steps
- .filter((step) => typeof step === "string" && step.trim())
- .map((step) => step.trim())
- };
-}
-
-function isStructuredReviewStoredResult(storedJob) {
- const result = storedJob?.result;
- if (!result || typeof result !== "object" || Array.isArray(result)) {
- return false;
- }
- return (
- Object.prototype.hasOwnProperty.call(result, "result") ||
- Object.prototype.hasOwnProperty.call(result, "parseError")
- );
-}
-
-function formatJobLine(job) {
- const parts = [job.id, `${job.status || "unknown"}`];
- if (job.kindLabel) {
- parts.push(job.kindLabel);
- }
- if (job.title) {
- parts.push(job.title);
- }
- return parts.join(" | ");
-}
-
-function escapeMarkdownCell(value) {
- return String(value ?? "")
- .replace(/\|/g, "\\|")
- .replace(/\r?\n/g, " ")
- .trim();
-}
-
-function formatCodexResumeCommand(job) {
- if (!job?.threadId) {
- return null;
- }
- return `codex resume ${job.threadId}`;
-}
-
-function appendActiveJobsTable(lines, jobs) {
- lines.push("Active jobs:");
- lines.push("| Job | Kind | Status | Phase | Elapsed | Codex Session ID | Summary | Actions |");
- lines.push("| --- | --- | --- | --- | --- | --- | --- | --- |");
- for (const job of jobs) {
- const actions = [`/codex:status ${job.id}`];
- if (job.status === "queued" || job.status === "running") {
- actions.push(`/codex:cancel ${job.id}`);
- }
- lines.push(
- `| ${escapeMarkdownCell(job.id)} | ${escapeMarkdownCell(job.kindLabel)} | ${escapeMarkdownCell(job.status)} | ${escapeMarkdownCell(job.phase ?? "")} | ${escapeMarkdownCell(job.elapsed ?? "")} | ${escapeMarkdownCell(job.threadId ?? "")} | ${escapeMarkdownCell(job.summary ?? "")} | ${actions.map((action) => `\`${action}\``).join("]
")} |`
- );
- }
-}
-
-function pushJobDetails(lines, job, options = {}) {
- lines.push(`- ${formatJobLine(job)}`);
- if (job.summary) {
- lines.push(` Summary: ${job.summary}`);
- }
- if (job.phase) {
- lines.push(` Phase: ${job.phase}`);
- }
- if (options.showElapsed && job.elapsed) {
- lines.push(` Elapsed: ${job.elapsed}`);
- }
- if (options.showDuration && job.duration) {
- lines.push(` Duration: ${job.duration}`);
- }
- if (job.threadId) {
- lines.push(` Codex session ID: ${job.threadId}`);
- }
- const resumeCommand = formatCodexResumeCommand(job);
- if (resumeCommand) {
- lines.push(` Resume in Codex: ${resumeCommand}`);
- }
- if (job.logFile && options.showLog) {
- lines.push(` Log: ${job.logFile}`);
- }
- if ((job.status === "queued" || job.status === "running") && options.showCancelHint) {
- lines.push(` Cancel: /codex:cancel ${job.id}`);
- }
- if (job.status !== "queued" && job.status !== "running" && options.showResultHint) {
- lines.push(` Result: /codex:result ${job.id}`);
- }
- if (job.status !== "queued" && job.status !== "running" && job.jobClass === "task" && job.write && options.showReviewHint) {
- lines.push(" Review changes: /codex:review --wait");
- lines.push(" Stricter review: /codex:adversarial-review --wait");
- }
- if (job.progressPreview?.length) {
- lines.push(" Progress:");
- for (const line of job.progressPreview) {
- lines.push(` ${line}`);
- }
- }
-}
-
-function appendReasoningSection(lines, reasoningSummary) {
- if (!Array.isArray(reasoningSummary) || reasoningSummary.length === 0) {
- return;
- }
-
- lines.push("", "Reasoning:");
- for (const section of reasoningSummary) {
- lines.push(`- ${section}`);
- }
-}
-
-export function renderSetupReport(report) {
- const lines = [
- "# Codex Setup",
- "",
- `Status: ${report.ready ? "ready" : "needs attention"}`,
- "",
- "Checks:",
- `- node: ${report.node.detail}`,
- `- npm: ${report.npm.detail}`,
- `- codex: ${report.codex.detail}`,
- `- auth: ${report.auth.detail}`,
- `- session runtime: ${report.sessionRuntime.label}`,
- `- review gate: ${report.reviewGateEnabled ? "enabled" : "disabled"}`,
- ""
- ];
-
- if (report.actionsTaken.length > 0) {
- lines.push("Actions taken:");
- for (const action of report.actionsTaken) {
- lines.push(`- ${action}`);
- }
- lines.push("");
- }
-
- if (report.nextSteps.length > 0) {
- lines.push("Next steps:");
- for (const step of report.nextSteps) {
- lines.push(`- ${step}`);
- }
- }
-
- return `${lines.join("\n").trimEnd()}\n`;
-}
-
-export function renderReviewResult(parsedResult, meta) {
- if (!parsedResult.parsed) {
- const lines = [
- `# Codex ${meta.reviewLabel}`,
- "",
- "Codex did not return valid structured JSON.",
- "",
- `- Parse error: ${parsedResult.parseError}`
- ];
-
- if (parsedResult.rawOutput) {
- lines.push("", "Raw final message:", "", "```text", parsedResult.rawOutput, "```");
- }
-
- appendReasoningSection(lines, meta.reasoningSummary ?? parsedResult.reasoningSummary);
-
- return `${lines.join("\n").trimEnd()}\n`;
- }
-
- const validationError = validateReviewResultShape(parsedResult.parsed);
- if (validationError) {
- const lines = [
- `# Codex ${meta.reviewLabel}`,
- "",
- `Target: ${meta.targetLabel}`,
- "Codex returned JSON with an unexpected review shape.",
- "",
- `- Validation error: ${validationError}`
- ];
-
- if (parsedResult.rawOutput) {
- lines.push("", "Raw final message:", "", "```text", parsedResult.rawOutput, "```");
- }
-
- appendReasoningSection(lines, meta.reasoningSummary ?? parsedResult.reasoningSummary);
-
- return `${lines.join("\n").trimEnd()}\n`;
- }
-
- const data = normalizeReviewResultData(parsedResult.parsed);
- const findings = [...data.findings].sort((left, right) => severityRank(left.severity) - severityRank(right.severity));
- const lines = [
- `# Codex ${meta.reviewLabel}`,
- "",
- `Target: ${meta.targetLabel}`,
- `Verdict: ${data.verdict}`,
- "",
- data.summary,
- ""
- ];
-
- if (findings.length === 0) {
- lines.push("No material findings.");
- } else {
- lines.push("Findings:");
- for (const finding of findings) {
- const lineSuffix = formatLineRange(finding);
- lines.push(`- [${finding.severity}] ${finding.title} (${finding.file}${lineSuffix})`);
- lines.push(` ${finding.body}`);
- if (finding.recommendation) {
- lines.push(` Recommendation: ${finding.recommendation}`);
- }
- }
- }
-
- if (data.next_steps.length > 0) {
- lines.push("", "Next steps:");
- for (const step of data.next_steps) {
- lines.push(`- ${step}`);
- }
- }
-
- appendReasoningSection(lines, meta.reasoningSummary);
-
- return `${lines.join("\n").trimEnd()}\n`;
-}
-
-export function renderNativeReviewResult(result, meta) {
- const stdout = result.stdout.trim();
- const stderr = result.stderr.trim();
- const lines = [
- `# Codex ${meta.reviewLabel}`,
- "",
- `Target: ${meta.targetLabel}`,
- ""
- ];
-
- if (stdout) {
- lines.push(stdout);
- } else if (result.status === 0) {
- lines.push("Codex review completed without any stdout output.");
- } else {
- lines.push("Codex review failed.");
- }
-
- if (stderr) {
- lines.push("", "stderr:", "", "```text", stderr, "```");
- }
-
- appendReasoningSection(lines, meta.reasoningSummary);
-
- return `${lines.join("\n").trimEnd()}\n`;
-}
-
-export function renderTaskResult(parsedResult, meta) {
- const rawOutput = typeof parsedResult?.rawOutput === "string" ? parsedResult.rawOutput : "";
- if (rawOutput) {
- return rawOutput.endsWith("\n") ? rawOutput : `${rawOutput}\n`;
- }
-
- const message = String(parsedResult?.failureMessage ?? "").trim() || "Codex did not return a final message.";
- return `${message}\n`;
-}
-
-export function renderStatusReport(report) {
- const lines = [
- "# Codex Status",
- "",
- `Session runtime: ${report.sessionRuntime.label}`,
- `Review gate: ${report.config.stopReviewGate ? "enabled" : "disabled"}`,
- ""
- ];
-
- if (report.running.length > 0) {
- appendActiveJobsTable(lines, report.running);
- lines.push("");
- lines.push("Live details:");
- for (const job of report.running) {
- pushJobDetails(lines, job, {
- showElapsed: true,
- showLog: true
- });
- }
- lines.push("");
- }
-
- if (report.latestFinished) {
- lines.push("Latest finished:");
- pushJobDetails(lines, report.latestFinished, {
- showDuration: true,
- showLog: report.latestFinished.status === "failed"
- });
- lines.push("");
- }
-
- if (report.recent.length > 0) {
- lines.push("Recent jobs:");
- for (const job of report.recent) {
- pushJobDetails(lines, job, {
- showDuration: true,
- showLog: job.status === "failed"
- });
- }
- lines.push("");
- } else if (report.running.length === 0 && !report.latestFinished) {
- lines.push("No jobs recorded yet.", "");
- }
-
- if (report.needsReview) {
- lines.push("The stop-time review gate is enabled.");
- lines.push("Ending the session will trigger a fresh Codex adversarial review and block if it finds issues.");
- }
-
- return `${lines.join("\n").trimEnd()}\n`;
-}
-
-export function renderJobStatusReport(job) {
- const lines = ["# Codex Job Status", ""];
- pushJobDetails(lines, job, {
- showElapsed: job.status === "queued" || job.status === "running",
- showDuration: job.status !== "queued" && job.status !== "running",
- showLog: true,
- showCancelHint: true,
- showResultHint: true,
- showReviewHint: true
- });
- return `${lines.join("\n").trimEnd()}\n`;
-}
-
-export function renderStoredJobResult(job, storedJob) {
- const threadId = storedJob?.threadId ?? job.threadId ?? null;
- const resumeCommand = threadId ? `codex resume ${threadId}` : null;
- if (isStructuredReviewStoredResult(storedJob) && storedJob?.rendered) {
- const output = storedJob.rendered.endsWith("\n") ? storedJob.rendered : `${storedJob.rendered}\n`;
- if (!threadId) {
- return output;
- }
- return `${output}\nCodex session ID: ${threadId}\nResume in Codex: ${resumeCommand}\n`;
- }
-
- const rawOutput =
- (typeof storedJob?.result?.rawOutput === "string" && storedJob.result.rawOutput) ||
- (typeof storedJob?.result?.codex?.stdout === "string" && storedJob.result.codex.stdout) ||
- "";
- if (rawOutput) {
- const output = rawOutput.endsWith("\n") ? rawOutput : `${rawOutput}\n`;
- if (!threadId) {
- return output;
- }
- return `${output}\nCodex session ID: ${threadId}\nResume in Codex: ${resumeCommand}\n`;
- }
-
- if (storedJob?.rendered) {
- const output = storedJob.rendered.endsWith("\n") ? storedJob.rendered : `${storedJob.rendered}\n`;
- if (!threadId) {
- return output;
- }
- return `${output}\nCodex session ID: ${threadId}\nResume in Codex: ${resumeCommand}\n`;
- }
-
- const lines = [
- `# ${job.title ?? "Codex Result"}`,
- "",
- `Job: ${job.id}`,
- `Status: ${job.status}`
- ];
-
- if (threadId) {
- lines.push(`Codex session ID: ${threadId}`);
- lines.push(`Resume in Codex: ${resumeCommand}`);
- }
-
- if (job.summary) {
- lines.push(`Summary: ${job.summary}`);
- }
-
- if (job.errorMessage) {
- lines.push("", job.errorMessage);
- } else if (storedJob?.errorMessage) {
- lines.push("", storedJob.errorMessage);
- } else {
- lines.push("", "No captured result payload was stored for this job.");
- }
-
- return `${lines.join("\n").trimEnd()}\n`;
-}
-
-export function renderCancelReport(job) {
- const lines = [
- "# Codex Cancel",
- "",
- `Cancelled ${job.id}.`,
- ""
- ];
-
- if (job.title) {
- lines.push(`- Title: ${job.title}`);
- }
- if (job.summary) {
- lines.push(`- Summary: ${job.summary}`);
- }
- lines.push("- Check `/codex:status` for the updated queue.");
-
- return `${lines.join("\n").trimEnd()}\n`;
-}
diff --git a/plugins/codex/scripts/lib/state.mjs b/plugins/codex/scripts/lib/state.mjs
deleted file mode 100644
index 2da23498..00000000
--- a/plugins/codex/scripts/lib/state.mjs
+++ /dev/null
@@ -1,191 +0,0 @@
-import { createHash } from "node:crypto";
-import fs from "node:fs";
-import os from "node:os";
-import path from "node:path";
-
-import { resolveWorkspaceRoot } from "./workspace.mjs";
-
-const STATE_VERSION = 1;
-const PLUGIN_DATA_ENV = "CLAUDE_PLUGIN_DATA";
-const FALLBACK_STATE_ROOT_DIR = path.join(os.tmpdir(), "codex-companion");
-const STATE_FILE_NAME = "state.json";
-const JOBS_DIR_NAME = "jobs";
-const MAX_JOBS = 50;
-
-function nowIso() {
- return new Date().toISOString();
-}
-
-function defaultState() {
- return {
- version: STATE_VERSION,
- config: {
- stopReviewGate: false
- },
- jobs: []
- };
-}
-
-export function resolveStateDir(cwd) {
- const workspaceRoot = resolveWorkspaceRoot(cwd);
- let canonicalWorkspaceRoot = workspaceRoot;
- try {
- canonicalWorkspaceRoot = fs.realpathSync.native(workspaceRoot);
- } catch {
- canonicalWorkspaceRoot = workspaceRoot;
- }
-
- const slugSource = path.basename(workspaceRoot) || "workspace";
- const slug = slugSource.replace(/[^a-zA-Z0-9._-]+/g, "-").replace(/^-+|-+$/g, "") || "workspace";
- const hash = createHash("sha256").update(canonicalWorkspaceRoot).digest("hex").slice(0, 16);
- const pluginDataDir = process.env[PLUGIN_DATA_ENV];
- const stateRoot = pluginDataDir ? path.join(pluginDataDir, "state") : FALLBACK_STATE_ROOT_DIR;
- return path.join(stateRoot, `${slug}-${hash}`);
-}
-
-export function resolveStateFile(cwd) {
- return path.join(resolveStateDir(cwd), STATE_FILE_NAME);
-}
-
-export function resolveJobsDir(cwd) {
- return path.join(resolveStateDir(cwd), JOBS_DIR_NAME);
-}
-
-export function ensureStateDir(cwd) {
- fs.mkdirSync(resolveJobsDir(cwd), { recursive: true });
-}
-
-export function loadState(cwd) {
- const stateFile = resolveStateFile(cwd);
- if (!fs.existsSync(stateFile)) {
- return defaultState();
- }
-
- try {
- const parsed = JSON.parse(fs.readFileSync(stateFile, "utf8"));
- return {
- ...defaultState(),
- ...parsed,
- config: {
- ...defaultState().config,
- ...(parsed.config ?? {})
- },
- jobs: Array.isArray(parsed.jobs) ? parsed.jobs : []
- };
- } catch {
- return defaultState();
- }
-}
-
-function pruneJobs(jobs) {
- return [...jobs]
- .sort((left, right) => String(right.updatedAt ?? "").localeCompare(String(left.updatedAt ?? "")))
- .slice(0, MAX_JOBS);
-}
-
-function removeFileIfExists(filePath) {
- if (filePath && fs.existsSync(filePath)) {
- fs.unlinkSync(filePath);
- }
-}
-
-export function saveState(cwd, state) {
- const previousJobs = loadState(cwd).jobs;
- ensureStateDir(cwd);
- const nextJobs = pruneJobs(state.jobs ?? []);
- const nextState = {
- version: STATE_VERSION,
- config: {
- ...defaultState().config,
- ...(state.config ?? {})
- },
- jobs: nextJobs
- };
-
- const retainedIds = new Set(nextJobs.map((job) => job.id));
- for (const job of previousJobs) {
- if (retainedIds.has(job.id)) {
- continue;
- }
- removeJobFile(resolveJobFile(cwd, job.id));
- removeFileIfExists(job.logFile);
- }
-
- fs.writeFileSync(resolveStateFile(cwd), `${JSON.stringify(nextState, null, 2)}\n`, "utf8");
- return nextState;
-}
-
-export function updateState(cwd, mutate) {
- const state = loadState(cwd);
- mutate(state);
- return saveState(cwd, state);
-}
-
-export function generateJobId(prefix = "job") {
- const random = Math.random().toString(36).slice(2, 8);
- return `${prefix}-${Date.now().toString(36)}-${random}`;
-}
-
-export function upsertJob(cwd, jobPatch) {
- return updateState(cwd, (state) => {
- const timestamp = nowIso();
- const existingIndex = state.jobs.findIndex((job) => job.id === jobPatch.id);
- if (existingIndex === -1) {
- state.jobs.unshift({
- createdAt: timestamp,
- updatedAt: timestamp,
- ...jobPatch
- });
- return;
- }
- state.jobs[existingIndex] = {
- ...state.jobs[existingIndex],
- ...jobPatch,
- updatedAt: timestamp
- };
- });
-}
-
-export function listJobs(cwd) {
- return loadState(cwd).jobs;
-}
-
-export function setConfig(cwd, key, value) {
- return updateState(cwd, (state) => {
- state.config = {
- ...state.config,
- [key]: value
- };
- });
-}
-
-export function getConfig(cwd) {
- return loadState(cwd).config;
-}
-
-export function writeJobFile(cwd, jobId, payload) {
- ensureStateDir(cwd);
- const jobFile = resolveJobFile(cwd, jobId);
- fs.writeFileSync(jobFile, `${JSON.stringify(payload, null, 2)}\n`, "utf8");
- return jobFile;
-}
-
-export function readJobFile(jobFile) {
- return JSON.parse(fs.readFileSync(jobFile, "utf8"));
-}
-
-function removeJobFile(jobFile) {
- if (fs.existsSync(jobFile)) {
- fs.unlinkSync(jobFile);
- }
-}
-
-export function resolveJobLogFile(cwd, jobId) {
- ensureStateDir(cwd);
- return path.join(resolveJobsDir(cwd), `${jobId}.log`);
-}
-
-export function resolveJobFile(cwd, jobId) {
- ensureStateDir(cwd);
- return path.join(resolveJobsDir(cwd), `${jobId}.json`);
-}
diff --git a/plugins/codex/scripts/lib/tracked-jobs.mjs b/plugins/codex/scripts/lib/tracked-jobs.mjs
deleted file mode 100644
index 90286901..00000000
--- a/plugins/codex/scripts/lib/tracked-jobs.mjs
+++ /dev/null
@@ -1,204 +0,0 @@
-import fs from "node:fs";
-import process from "node:process";
-
-import { readJobFile, resolveJobFile, resolveJobLogFile, upsertJob, writeJobFile } from "./state.mjs";
-
-export const SESSION_ID_ENV = "CODEX_COMPANION_SESSION_ID";
-
-export function nowIso() {
- return new Date().toISOString();
-}
-
-function normalizeProgressEvent(value) {
- if (value && typeof value === "object" && !Array.isArray(value)) {
- return {
- message: String(value.message ?? "").trim(),
- phase: typeof value.phase === "string" && value.phase.trim() ? value.phase.trim() : null,
- threadId: typeof value.threadId === "string" && value.threadId.trim() ? value.threadId.trim() : null,
- turnId: typeof value.turnId === "string" && value.turnId.trim() ? value.turnId.trim() : null,
- stderrMessage: value.stderrMessage == null ? null : String(value.stderrMessage).trim(),
- logTitle: typeof value.logTitle === "string" && value.logTitle.trim() ? value.logTitle.trim() : null,
- logBody: value.logBody == null ? null : String(value.logBody).trimEnd()
- };
- }
-
- return {
- message: String(value ?? "").trim(),
- phase: null,
- threadId: null,
- turnId: null,
- stderrMessage: String(value ?? "").trim(),
- logTitle: null,
- logBody: null
- };
-}
-
-export function appendLogLine(logFile, message) {
- const normalized = String(message ?? "").trim();
- if (!logFile || !normalized) {
- return;
- }
- fs.appendFileSync(logFile, `[${nowIso()}] ${normalized}\n`, "utf8");
-}
-
-export function appendLogBlock(logFile, title, body) {
- if (!logFile || !body) {
- return;
- }
- fs.appendFileSync(logFile, `\n[${nowIso()}] ${title}\n${String(body).trimEnd()}\n`, "utf8");
-}
-
-export function createJobLogFile(workspaceRoot, jobId, title) {
- const logFile = resolveJobLogFile(workspaceRoot, jobId);
- fs.writeFileSync(logFile, "", "utf8");
- if (title) {
- appendLogLine(logFile, `Starting ${title}.`);
- }
- return logFile;
-}
-
-export function createJobRecord(base, options = {}) {
- const env = options.env ?? process.env;
- const sessionId = env[options.sessionIdEnv ?? SESSION_ID_ENV];
- return {
- ...base,
- createdAt: nowIso(),
- ...(sessionId ? { sessionId } : {})
- };
-}
-
-export function createJobProgressUpdater(workspaceRoot, jobId) {
- let lastPhase = null;
- let lastThreadId = null;
- let lastTurnId = null;
-
- return (event) => {
- const normalized = normalizeProgressEvent(event);
- const patch = { id: jobId };
- let changed = false;
-
- if (normalized.phase && normalized.phase !== lastPhase) {
- lastPhase = normalized.phase;
- patch.phase = normalized.phase;
- changed = true;
- }
-
- if (normalized.threadId && normalized.threadId !== lastThreadId) {
- lastThreadId = normalized.threadId;
- patch.threadId = normalized.threadId;
- changed = true;
- }
-
- if (normalized.turnId && normalized.turnId !== lastTurnId) {
- lastTurnId = normalized.turnId;
- patch.turnId = normalized.turnId;
- changed = true;
- }
-
- if (!changed) {
- return;
- }
-
- upsertJob(workspaceRoot, patch);
-
- const jobFile = resolveJobFile(workspaceRoot, jobId);
- if (!fs.existsSync(jobFile)) {
- return;
- }
-
- const storedJob = readJobFile(jobFile);
- writeJobFile(workspaceRoot, jobId, {
- ...storedJob,
- ...patch
- });
- };
-}
-
-export function createProgressReporter({ stderr = false, logFile = null, onEvent = null } = {}) {
- if (!stderr && !logFile && !onEvent) {
- return null;
- }
-
- return (eventOrMessage) => {
- const event = normalizeProgressEvent(eventOrMessage);
- const stderrMessage = event.stderrMessage ?? event.message;
- if (stderr && stderrMessage) {
- process.stderr.write(`[codex] ${stderrMessage}\n`);
- }
- appendLogLine(logFile, event.message);
- appendLogBlock(logFile, event.logTitle, event.logBody);
- onEvent?.(event);
- };
-}
-
-function readStoredJobOrNull(workspaceRoot, jobId) {
- const jobFile = resolveJobFile(workspaceRoot, jobId);
- if (!fs.existsSync(jobFile)) {
- return null;
- }
- return readJobFile(jobFile);
-}
-
-export async function runTrackedJob(job, runner, options = {}) {
- const runningRecord = {
- ...job,
- status: "running",
- startedAt: nowIso(),
- phase: "starting",
- pid: process.pid,
- logFile: options.logFile ?? job.logFile ?? null
- };
- writeJobFile(job.workspaceRoot, job.id, runningRecord);
- upsertJob(job.workspaceRoot, runningRecord);
-
- try {
- const execution = await runner();
- const completionStatus = execution.exitStatus === 0 ? "completed" : "failed";
- const completedAt = nowIso();
- writeJobFile(job.workspaceRoot, job.id, {
- ...runningRecord,
- status: completionStatus,
- threadId: execution.threadId ?? null,
- turnId: execution.turnId ?? null,
- pid: null,
- phase: completionStatus === "completed" ? "done" : "failed",
- completedAt,
- result: execution.payload,
- rendered: execution.rendered
- });
- upsertJob(job.workspaceRoot, {
- id: job.id,
- status: completionStatus,
- threadId: execution.threadId ?? null,
- turnId: execution.turnId ?? null,
- summary: execution.summary,
- phase: completionStatus === "completed" ? "done" : "failed",
- pid: null,
- completedAt
- });
- appendLogBlock(options.logFile ?? job.logFile ?? null, "Final output", execution.rendered);
- return execution;
- } catch (error) {
- const errorMessage = error instanceof Error ? error.message : String(error);
- const existing = readStoredJobOrNull(job.workspaceRoot, job.id) ?? runningRecord;
- const completedAt = nowIso();
- writeJobFile(job.workspaceRoot, job.id, {
- ...existing,
- status: "failed",
- phase: "failed",
- errorMessage,
- pid: null,
- completedAt,
- logFile: options.logFile ?? job.logFile ?? existing.logFile ?? null
- });
- upsertJob(job.workspaceRoot, {
- id: job.id,
- status: "failed",
- phase: "failed",
- pid: null,
- errorMessage,
- completedAt
- });
- throw error;
- }
-}
diff --git a/plugins/codex/scripts/lib/workspace.mjs b/plugins/codex/scripts/lib/workspace.mjs
deleted file mode 100644
index 89a0060b..00000000
--- a/plugins/codex/scripts/lib/workspace.mjs
+++ /dev/null
@@ -1,9 +0,0 @@
-import { ensureGitRepository } from "./git.mjs";
-
-export function resolveWorkspaceRoot(cwd) {
- try {
- return ensureGitRepository(cwd);
- } catch {
- return cwd;
- }
-}
diff --git a/plugins/codex/scripts/session-lifecycle-hook.mjs b/plugins/codex/scripts/session-lifecycle-hook.mjs
deleted file mode 100644
index 9655eaef..00000000
--- a/plugins/codex/scripts/session-lifecycle-hook.mjs
+++ /dev/null
@@ -1,131 +0,0 @@
-#!/usr/bin/env node
-
-import fs from "node:fs";
-import process from "node:process";
-
-import { terminateProcessTree } from "./lib/process.mjs";
-import { BROKER_ENDPOINT_ENV } from "./lib/app-server.mjs";
-import {
- clearBrokerSession,
- LOG_FILE_ENV,
- loadBrokerSession,
- PID_FILE_ENV,
- sendBrokerShutdown,
- teardownBrokerSession
-} from "./lib/broker-lifecycle.mjs";
-import { loadState, resolveStateFile, saveState } from "./lib/state.mjs";
-import { resolveWorkspaceRoot } from "./lib/workspace.mjs";
-
-export const SESSION_ID_ENV = "CODEX_COMPANION_SESSION_ID";
-const PLUGIN_DATA_ENV = "CLAUDE_PLUGIN_DATA";
-
-function readHookInput() {
- const raw = fs.readFileSync(0, "utf8").trim();
- if (!raw) {
- return {};
- }
- return JSON.parse(raw);
-}
-
-function shellEscape(value) {
- return `'${String(value).replace(/'/g, `'\"'\"'`)}'`;
-}
-
-function appendEnvVar(name, value) {
- if (!process.env.CLAUDE_ENV_FILE || value == null || value === "") {
- return;
- }
- fs.appendFileSync(process.env.CLAUDE_ENV_FILE, `export ${name}=${shellEscape(value)}\n`, "utf8");
-}
-
-function cleanupSessionJobs(cwd, sessionId) {
- if (!cwd || !sessionId) {
- return;
- }
-
- const workspaceRoot = resolveWorkspaceRoot(cwd);
- const stateFile = resolveStateFile(workspaceRoot);
- if (!fs.existsSync(stateFile)) {
- return;
- }
-
- const state = loadState(workspaceRoot);
- const removedJobs = state.jobs.filter((job) => job.sessionId === sessionId);
- if (removedJobs.length === 0) {
- return;
- }
-
- for (const job of removedJobs) {
- const stillRunning = job.status === "queued" || job.status === "running";
- if (!stillRunning) {
- continue;
- }
- try {
- terminateProcessTree(job.pid ?? Number.NaN);
- } catch {
- // Ignore teardown failures during session shutdown.
- }
- }
-
- saveState(workspaceRoot, {
- ...state,
- jobs: state.jobs.filter((job) => job.sessionId !== sessionId)
- });
-}
-
-function handleSessionStart(input) {
- appendEnvVar(SESSION_ID_ENV, input.session_id);
- appendEnvVar(PLUGIN_DATA_ENV, process.env[PLUGIN_DATA_ENV]);
-}
-
-async function handleSessionEnd(input) {
- const cwd = input.cwd || process.cwd();
- const brokerSession =
- loadBrokerSession(cwd) ??
- (process.env[BROKER_ENDPOINT_ENV]
- ? {
- endpoint: process.env[BROKER_ENDPOINT_ENV],
- pidFile: process.env[PID_FILE_ENV] ?? null,
- logFile: process.env[LOG_FILE_ENV] ?? null
- }
- : null);
- const brokerEndpoint = brokerSession?.endpoint ?? null;
- const pidFile = brokerSession?.pidFile ?? null;
- const logFile = brokerSession?.logFile ?? null;
- const sessionDir = brokerSession?.sessionDir ?? null;
- const pid = brokerSession?.pid ?? null;
-
- if (brokerEndpoint) {
- await sendBrokerShutdown(brokerEndpoint);
- }
-
- cleanupSessionJobs(cwd, input.session_id || process.env[SESSION_ID_ENV]);
- teardownBrokerSession({
- endpoint: brokerEndpoint,
- pidFile,
- logFile,
- sessionDir,
- pid,
- killProcess: terminateProcessTree
- });
- clearBrokerSession(cwd);
-}
-
-async function main() {
- const input = readHookInput();
- const eventName = process.argv[2] ?? input.hook_event_name ?? "";
-
- if (eventName === "SessionStart") {
- handleSessionStart(input);
- return;
- }
-
- if (eventName === "SessionEnd") {
- await handleSessionEnd(input);
- }
-}
-
-main().catch((error) => {
- process.stderr.write(`${error instanceof Error ? error.message : String(error)}\n`);
- process.exit(1);
-});
diff --git a/plugins/codex/scripts/stop-review-gate-hook.mjs b/plugins/codex/scripts/stop-review-gate-hook.mjs
deleted file mode 100644
index 2346bdcf..00000000
--- a/plugins/codex/scripts/stop-review-gate-hook.mjs
+++ /dev/null
@@ -1,184 +0,0 @@
-#!/usr/bin/env node
-
-import fs from "node:fs";
-import process from "node:process";
-import path from "node:path";
-import { spawnSync } from "node:child_process";
-import { fileURLToPath } from "node:url";
-
-import { getCodexAvailability } from "./lib/codex.mjs";
-import { loadPromptTemplate, interpolateTemplate } from "./lib/prompts.mjs";
-import { getConfig, listJobs } from "./lib/state.mjs";
-import { sortJobsNewestFirst } from "./lib/job-control.mjs";
-import { SESSION_ID_ENV } from "./lib/tracked-jobs.mjs";
-import { resolveWorkspaceRoot } from "./lib/workspace.mjs";
-
-const STOP_REVIEW_TIMEOUT_MS = 15 * 60 * 1000;
-const SCRIPT_DIR = path.dirname(fileURLToPath(import.meta.url));
-const ROOT_DIR = path.resolve(SCRIPT_DIR, "..");
-const STOP_REVIEW_TASK_MARKER = "Run a stop-gate review of the previous Claude turn.";
-
-function readHookInput() {
- const raw = fs.readFileSync(0, "utf8").trim();
- if (!raw) {
- return {};
- }
- return JSON.parse(raw);
-}
-
-function emitDecision(payload) {
- process.stdout.write(`${JSON.stringify(payload)}\n`);
-}
-
-function logNote(message) {
- if (!message) {
- return;
- }
- process.stderr.write(`${message}\n`);
-}
-
-function filterJobsForCurrentSession(jobs, input = {}) {
- const sessionId = input.session_id || process.env[SESSION_ID_ENV] || null;
- if (!sessionId) {
- return jobs;
- }
- return jobs.filter((job) => job.sessionId === sessionId);
-}
-
-function buildStopReviewPrompt(input = {}) {
- const lastAssistantMessage = String(input.last_assistant_message ?? "").trim();
- const template = loadPromptTemplate(ROOT_DIR, "stop-review-gate");
- const claudeResponseBlock = lastAssistantMessage
- ? ["Previous Claude response:", lastAssistantMessage].join("\n")
- : "";
- return interpolateTemplate(template, {
- CLAUDE_RESPONSE_BLOCK: claudeResponseBlock
- });
-}
-
-function buildSetupNote(cwd) {
- const availability = getCodexAvailability(cwd);
- if (availability.available) {
- return null;
- }
-
- const detail = availability.detail ? ` ${availability.detail}.` : "";
- return `Codex is not set up for the review gate.${detail} Run /codex:setup.`;
-}
-
-function parseStopReviewOutput(rawOutput) {
- const text = String(rawOutput ?? "").trim();
- if (!text) {
- return {
- ok: false,
- reason:
- "The stop-time Codex review task returned no final output. Run /codex:review --wait manually or bypass the gate."
- };
- }
-
- const firstLine = text.split(/\r?\n/, 1)[0].trim();
- if (firstLine.startsWith("ALLOW:")) {
- return { ok: true, reason: null };
- }
- if (firstLine.startsWith("BLOCK:")) {
- const reason = firstLine.slice("BLOCK:".length).trim() || text;
- return {
- ok: false,
- reason: `Codex stop-time review found issues that still need fixes before ending the session: ${reason}`
- };
- }
-
- return {
- ok: false,
- reason:
- "The stop-time Codex review task returned an unexpected answer. Run /codex:review --wait manually or bypass the gate."
- };
-}
-
-function runStopReview(cwd, input = {}) {
- const scriptPath = path.join(SCRIPT_DIR, "codex-companion.mjs");
- const prompt = buildStopReviewPrompt(input);
- const childEnv = {
- ...process.env,
- ...(input.session_id ? { [SESSION_ID_ENV]: input.session_id } : {})
- };
- const result = spawnSync(process.execPath, [scriptPath, "task", "--json", prompt], {
- cwd,
- env: childEnv,
- encoding: "utf8",
- timeout: STOP_REVIEW_TIMEOUT_MS
- });
-
- if (result.error?.code === "ETIMEDOUT") {
- return {
- ok: false,
- reason:
- "The stop-time Codex review task timed out after 15 minutes. Run /codex:review --wait manually or bypass the gate."
- };
- }
-
- if (result.status !== 0) {
- const detail = String(result.stderr || result.stdout || "").trim();
- return {
- ok: false,
- reason: detail
- ? `The stop-time Codex review task failed: ${detail}`
- : "The stop-time Codex review task failed. Run /codex:review --wait manually or bypass the gate."
- };
- }
-
- try {
- const payload = JSON.parse(result.stdout);
- return parseStopReviewOutput(payload?.rawOutput);
- } catch {
- return {
- ok: false,
- reason:
- "The stop-time Codex review task returned invalid JSON. Run /codex:review --wait manually or bypass the gate."
- };
- }
-}
-
-function main() {
- const input = readHookInput();
- const cwd = input.cwd || process.env.CLAUDE_PROJECT_DIR || process.cwd();
- const workspaceRoot = resolveWorkspaceRoot(cwd);
- const config = getConfig(workspaceRoot);
-
- const jobs = sortJobsNewestFirst(filterJobsForCurrentSession(listJobs(workspaceRoot), input));
- const runningJob = jobs.find((job) => job.status === "queued" || job.status === "running");
- const runningTaskNote = runningJob
- ? `Codex task ${runningJob.id} is still running. Check /codex:status and use /codex:cancel ${runningJob.id} if you want to stop it before ending the session.`
- : null;
-
- if (!config.stopReviewGate) {
- logNote(runningTaskNote);
- return;
- }
-
- const setupNote = buildSetupNote(cwd);
- if (setupNote) {
- logNote(setupNote);
- logNote(runningTaskNote);
- return;
- }
-
- const review = runStopReview(cwd, input);
- if (!review.ok) {
- emitDecision({
- decision: "block",
- reason: runningTaskNote ? `${runningTaskNote} ${review.reason}` : review.reason
- });
- return;
- }
-
- logNote(runningTaskNote);
-}
-
-try {
- main();
-} catch (error) {
- const message = error instanceof Error ? error.message : String(error);
- process.stderr.write(`${message}\n`);
- process.exitCode = 1;
-}
diff --git a/plugins/codex/skills/codex-cli-runtime/SKILL.md b/plugins/codex/skills/codex-cli-runtime/SKILL.md
deleted file mode 100644
index 0e91bfb5..00000000
--- a/plugins/codex/skills/codex-cli-runtime/SKILL.md
+++ /dev/null
@@ -1,43 +0,0 @@
----
-name: codex-cli-runtime
-description: Internal helper contract for calling the codex-companion runtime from Claude Code
-user-invocable: false
----
-
-# Codex Runtime
-
-Use this skill only inside the `codex:codex-rescue` subagent.
-
-Primary helper:
-- `node "${CLAUDE_PLUGIN_ROOT}/scripts/codex-companion.mjs" task ""`
-
-Execution rules:
-- The rescue subagent is a forwarder, not an orchestrator. Its only job is to invoke `task` once and return that stdout unchanged.
-- Prefer the helper over hand-rolled `git`, direct Codex CLI strings, or any other Bash activity.
-- Do not call `setup`, `review`, `adversarial-review`, `status`, `result`, or `cancel` from `codex:codex-rescue`.
-- Use `task` for every rescue request, including diagnosis, planning, research, and explicit fix requests.
-- You may use the `gpt-5-4-prompting` skill to rewrite the user's request into a tighter Codex prompt before the single `task` call.
-- That prompt drafting is the only Claude-side work allowed. Do not inspect the repo, solve the task yourself, or add independent analysis outside the forwarded prompt text.
-- Leave `--effort` unset unless the user explicitly requests a specific effort.
-- Leave model unset by default. Add `--model` only when the user explicitly asks for one.
-- Map `spark` to `--model gpt-5.3-codex-spark`.
-- Default to a write-capable Codex run by adding `--write` unless the user explicitly asks for read-only behavior or only wants review, diagnosis, or research without edits.
-
-Command selection:
-- Use exactly one `task` invocation per rescue handoff.
-- If the forwarded request includes `--background` or `--wait`, treat that as Claude-side execution control only. Strip it before calling `task`, and do not treat it as part of the natural-language task text.
-- If the forwarded request includes `--model`, normalize `spark` to `gpt-5.3-codex-spark` and pass it through to `task`.
-- If the forwarded request includes `--effort`, pass it through to `task`.
-- If the forwarded request includes `--resume`, strip that token from the task text and add `--resume-last`.
-- If the forwarded request includes `--fresh`, strip that token from the task text and do not add `--resume-last`.
-- `--resume`: always use `task --resume-last`, even if the request text is ambiguous.
-- `--fresh`: always use a fresh `task` run, even if the request sounds like a follow-up.
-- `--effort`: accepted values are `none`, `minimal`, `low`, `medium`, `high`, `xhigh`.
-- `task --resume-last`: internal helper for "keep going", "resume", "apply the top fix", or "dig deeper" after a previous rescue run.
-
-Safety rules:
-- Default to write-capable Codex work in `codex:codex-rescue` unless the user explicitly asks for read-only behavior.
-- Preserve the user's task text as-is apart from stripping routing flags.
-- Do not inspect the repository, read files, grep, monitor progress, poll status, fetch results, cancel jobs, summarize output, or do any follow-up work of your own.
-- Return the stdout of the `task` command exactly as-is.
-- If the Bash call fails or Codex cannot be invoked, return nothing.
diff --git a/plugins/codex/skills/codex-result-handling/SKILL.md b/plugins/codex/skills/codex-result-handling/SKILL.md
deleted file mode 100644
index e1896548..00000000
--- a/plugins/codex/skills/codex-result-handling/SKILL.md
+++ /dev/null
@@ -1,21 +0,0 @@
----
-name: codex-result-handling
-description: Internal guidance for presenting Codex helper output back to the user
-user-invocable: false
----
-
-# Codex Result Handling
-
-When the helper returns Codex output:
-- Preserve the helper's verdict, summary, findings, and next steps structure.
-- For review output, present findings first and keep them ordered by severity.
-- Use the file paths and line numbers exactly as the helper reports them.
-- Preserve evidence boundaries. If Codex marked something as an inference, uncertainty, or follow-up question, keep that distinction.
-- Preserve output sections when the prompt asked for them, such as observed facts, inferences, open questions, touched files, or next steps.
-- If there are no findings, say that explicitly and keep the residual-risk note brief.
-- If Codex made edits, say so explicitly and list the touched files when the helper provides them.
-- For `codex:codex-rescue`, do not turn a failed or incomplete Codex run into a Claude-side implementation attempt. Report the failure and stop.
-- For `codex:codex-rescue`, if Codex was never successfully invoked, do not generate a substitute answer at all.
-- CRITICAL: After presenting review findings, STOP. Do not make any code changes. Do not fix any issues. You MUST explicitly ask the user which issues, if any, they want fixed before touching a single file. Auto-applying fixes from a review is strictly forbidden, even if the fix is obvious.
-- If the helper reports malformed output or a failed Codex run, include the most actionable stderr lines and stop there instead of guessing.
-- If the helper reports that setup or authentication is required, direct the user to `/codex:setup` and do not improvise alternate auth flows.
diff --git a/plugins/codex/skills/gpt-5-4-prompting/SKILL.md b/plugins/codex/skills/gpt-5-4-prompting/SKILL.md
deleted file mode 100644
index 16669d92..00000000
--- a/plugins/codex/skills/gpt-5-4-prompting/SKILL.md
+++ /dev/null
@@ -1,54 +0,0 @@
----
-name: gpt-5-4-prompting
-description: Internal guidance for composing Codex and GPT-5.4 prompts for coding, review, diagnosis, and research tasks inside the Codex Claude Code plugin
-user-invocable: false
----
-
-# GPT-5.4 Prompting
-
-Use this skill when `codex:codex-rescue` needs to ask Codex or another GPT-5.4-based workflow for help.
-
-Prompt Codex like an operator, not a collaborator. Keep prompts compact and block-structured with XML tags. State the task, the output contract, the follow-through defaults, and the small set of extra constraints that matter.
-
-Core rules:
-- Prefer one clear task per Codex run. Split unrelated asks into separate runs.
-- Tell Codex what done looks like. Do not assume it will infer the desired end state.
-- Add explicit grounding and verification rules for any task where unsupported guesses would hurt quality.
-- Prefer better prompt contracts over raising reasoning or adding long natural-language explanations.
-- Use XML tags consistently so the prompt has stable internal structure.
-
-Default prompt recipe:
-- ``: the concrete job and the relevant repository or failure context.
-- `` or ``: exact shape, ordering, and brevity requirements.
-- ``: what Codex should do by default instead of asking routine questions.
-- `` or ``: required for debugging, implementation, or risky fixes.
-- `` or ``: required for review, research, or anything that could drift into unsupported claims.
-
-When to add blocks:
-- Coding or debugging: add `completeness_contract`, `verification_loop`, and `missing_context_gating`.
-- Review or adversarial review: add `grounding_rules`, `structured_output_contract`, and `dig_deeper_nudge`.
-- Research or recommendation tasks: add `research_mode` and `citation_rules`.
-- Write-capable tasks: add `action_safety` so Codex stays narrow and avoids unrelated refactors.
-
-How to choose prompt shape:
-- Use built-in `review` or `adversarial-review` commands when the job is reviewing local git changes. Those prompts already carry the review contract.
-- Use `task` when the task is diagnosis, planning, research, or implementation and you need to control the prompt more directly.
-- Use `task --resume-last` for follow-up instructions on the same Codex thread. Send only the delta instruction instead of restating the whole prompt unless the direction changed materially.
-
-Working rules:
-- Prefer explicit prompt contracts over vague nudges.
-- Use stable XML tag names that match the block names from the reference file.
-- Do not raise reasoning or complexity first. Tighten the prompt and verification rules before escalating.
-- Ask Codex for brief, outcome-based progress updates only when the task is long-running or tool-heavy.
-- Keep claims anchored to observed evidence. If something is a hypothesis, say so.
-
-Prompt assembly checklist:
-1. Define the exact task and scope in ``.
-2. Choose the smallest output contract that still makes the answer easy to use.
-3. Decide whether Codex should keep going by default or stop for missing high-risk details.
-4. Add verification, grounding, and safety tags only where the task needs them.
-5. Remove redundant instructions before sending the prompt.
-
-Reusable blocks live in [references/prompt-blocks.md](references/prompt-blocks.md).
-Concrete end-to-end templates live in [references/codex-prompt-recipes.md](references/codex-prompt-recipes.md).
-Common failure modes to avoid live in [references/codex-prompt-antipatterns.md](references/codex-prompt-antipatterns.md).
diff --git a/plugins/codex/skills/gpt-5-4-prompting/references/codex-prompt-antipatterns.md b/plugins/codex/skills/gpt-5-4-prompting/references/codex-prompt-antipatterns.md
deleted file mode 100644
index 10a44d6b..00000000
--- a/plugins/codex/skills/gpt-5-4-prompting/references/codex-prompt-antipatterns.md
+++ /dev/null
@@ -1,100 +0,0 @@
-# Codex Prompt Anti-Patterns
-
-Avoid these when prompting Codex or GPT-5.4.
-
-## Vague task framing
-
-Bad:
-
-```text
-Take a look at this and let me know what you think.
-```
-
-Better:
-
-```xml
-
-Review this change for material correctness and regression risks.
-
-```
-
-## Missing output contract
-
-Bad:
-
-```text
-Investigate and report back.
-```
-
-Better:
-
-```xml
-
-Return:
-1. root cause
-2. evidence
-3. smallest safe next step
-
-```
-
-## No follow-through default
-
-Bad:
-
-```text
-Debug this failure.
-```
-
-Better:
-
-```xml
-
-Keep going until you have enough evidence to identify the root cause confidently.
-
-```
-
-## Asking for more reasoning instead of a better contract
-
-Bad:
-
-```text
-Think harder and be very smart.
-```
-
-Better:
-
-```xml
-
-Before finalizing, verify that the answer matches the observed evidence and task requirements.
-
-```
-
-## Mixing unrelated jobs into one run
-
-Bad:
-
-```text
-Review this diff, fix the bug you find, update the docs, and suggest a roadmap.
-```
-
-Better:
-- Run review first.
-- Run a separate fix prompt if needed.
-- Use a third run for docs or roadmap work.
-
-## Unsupported certainty
-
-Bad:
-
-```text
-Tell me exactly why production failed.
-```
-
-Better:
-
-```xml
-
-Ground every claim in the provided context or tool outputs.
-If a point is an inference, label it clearly.
-
-```
diff --git a/plugins/codex/skills/gpt-5-4-prompting/references/codex-prompt-recipes.md b/plugins/codex/skills/gpt-5-4-prompting/references/codex-prompt-recipes.md
deleted file mode 100644
index 7711de20..00000000
--- a/plugins/codex/skills/gpt-5-4-prompting/references/codex-prompt-recipes.md
+++ /dev/null
@@ -1,150 +0,0 @@
-# Codex Prompt Recipes
-
-Use these as starting templates for Codex task prompts or other Codex/GPT-5.4 prompt construction.
-Copy the smallest recipe that fits the task, then trim anything you do not need.
-In `codex:codex-rescue`, run diagnosis and fix-oriented recipes in write mode by default unless the user explicitly asked for read-only behavior.
-
-## Diagnosis
-
-```xml
-
-Diagnose why the failing test or command is breaking in this repository.
-Use the available repository context and tools to identify the most likely root cause.
-
-
-
-Return a compact diagnosis with:
-1. most likely root cause
-2. evidence
-3. smallest safe next step
-
-
-
-Keep going until you have enough evidence to identify the root cause confidently.
-Only stop to ask questions when a missing detail changes correctness materially.
-
-
-
-Before finalizing, verify that the proposed root cause matches the observed evidence.
-
-
-
-Do not guess missing repository facts.
-If required context is absent, state exactly what remains unknown.
-
-```
-
-## Narrow Fix
-
-```xml
-
-Implement the smallest safe fix for the identified issue in this repository.
-Preserve existing behavior outside the failing path.
-
-
-
-Return:
-1. summary of the fix
-2. touched files
-3. verification performed
-4. residual risks or follow-ups
-
-
-
-Default to the most reasonable low-risk interpretation and keep going.
-
-
-
-Resolve the task fully before stopping.
-Do not stop after identifying the issue without applying the fix.
-
-
-
-Before finalizing, verify that the fix matches the task requirements and that the changed code is coherent.
-
-
-
-Keep changes tightly scoped to the stated task.
-Avoid unrelated refactors or cleanup.
-
-```
-
-## Root-Cause Review
-
-```xml
-
-Analyze this change for the most likely correctness or regression issues.
-Focus on the provided repository context only.
-
-
-
-Return:
-1. findings ordered by severity
-2. supporting evidence for each finding
-3. brief next steps
-
-
-
-Ground every claim in the repository context or tool outputs.
-If a point is an inference, label it clearly.
-
-
-
-Check for second-order failures, empty-state handling, retries, stale state, and rollback paths before finalizing.
-
-
-
-Before finalizing, verify that each finding is material and actionable.
-
-```
-
-## Research Or Recommendation
-
-```xml
-
-Research the available options and recommend the best path for this task.
-
-
-
-Return:
-1. observed facts
-2. reasoned recommendation
-3. tradeoffs
-4. open questions
-
-
-
-Separate observed facts, reasoned inferences, and open questions.
-Prefer breadth first, then go deeper only where the evidence changes the recommendation.
-
-
-
-Back important claims with explicit references to the sources you inspected.
-Prefer primary sources.
-
-```
-
-## Prompt-Patching
-
-```xml
-
-Diagnose why this existing prompt is underperforming and propose the smallest high-leverage changes to improve it for Codex or GPT-5.4.
-
-
-
-Return:
-1. failure modes
-2. root causes in the current prompt
-3. a revised prompt
-4. why the revision should work better
-
-
-
-Base your diagnosis on the prompt text and the failure examples provided.
-Do not invent failure modes that are not supported by the examples.
-
-
-
-Before finalizing, make sure the revised prompt resolves the cited failure modes without adding contradictory instructions.
-
-```
diff --git a/plugins/codex/skills/gpt-5-4-prompting/references/prompt-blocks.md b/plugins/codex/skills/gpt-5-4-prompting/references/prompt-blocks.md
deleted file mode 100644
index cbf66940..00000000
--- a/plugins/codex/skills/gpt-5-4-prompting/references/prompt-blocks.md
+++ /dev/null
@@ -1,172 +0,0 @@
-# Prompt Blocks
-
-Use these blocks selectively when composing Codex or GPT-5.4 prompts.
-Wrap each block in the XML tag shown in its heading.
-
-## Core Wrapper
-
-### `task`
-
-Use in nearly every prompt.
-
-```xml
-
-Describe the concrete job, the relevant repository or failure context, and the expected end state.
-
-```
-
-## Output and Format
-
-### `structured_output_contract`
-
-Use when the response shape matters.
-
-```xml
-
-Return exactly the requested output shape and nothing else.
-Keep the answer compact.
-Put the highest-value findings or decisions first.
-
-```
-
-### `compact_output_contract`
-
-Use when you want concise prose instead of a schema.
-
-```xml
-
-Keep the final answer compact and structured.
-Do not include long scene-setting or repeated recap.
-
-```
-
-## Follow-through and Completion
-
-### `default_follow_through_policy`
-
-Use when Codex should act without asking routine questions.
-
-```xml
-
-Default to the most reasonable low-risk interpretation and keep going.
-Only stop to ask questions when a missing detail changes correctness, safety, or an irreversible action.
-
-```
-
-### `completeness_contract`
-
-Use for debugging, implementation, or any multi-step task that should not stop early.
-
-```xml
-
-Resolve the task fully before stopping.
-Do not stop at the first plausible answer.
-Check whether there are follow-on fixes, edge cases, or cleanup needed for a correct result.
-
-```
-
-### `verification_loop`
-
-Use when correctness matters.
-
-```xml
-
-Before finalizing, verify the result against the task requirements and the changed files or tool outputs.
-If a check fails, revise the answer instead of reporting the first draft.
-
-```
-
-## Grounding and Missing Context
-
-### `missing_context_gating`
-
-Use when Codex might otherwise guess.
-
-```xml
-
-Do not guess missing repository facts.
-If required context is absent, retrieve it with tools or state exactly what remains unknown.
-
-```
-
-### `grounding_rules`
-
-Use for review, research, or root-cause analysis.
-
-```xml
-
-Ground every claim in the provided context or your tool outputs.
-Do not present inferences as facts.
-If a point is a hypothesis, label it clearly.
-
-```
-
-### `citation_rules`
-
-Use when external research or quotes matter.
-
-```xml
-
-Back important claims with citations or explicit references to the source material you inspected.
-Prefer primary sources.
-
-```
-
-## Safety and Scope
-
-### `action_safety`
-
-Use for write-capable or potentially broad tasks.
-
-```xml
-
-Keep changes tightly scoped to the stated task.
-Avoid unrelated refactors, renames, or cleanup unless they are required for correctness.
-Call out any risky or irreversible action before taking it.
-
-```
-
-### `tool_persistence_rules`
-
-Use for long-running tool-heavy tasks.
-
-```xml
-
-Keep using tools until you have enough evidence to finish the task confidently.
-Do not abandon the workflow after a partial read when another targeted check would change the answer.
-
-```
-
-## Task-Specific Blocks
-
-### `research_mode`
-
-Use for exploration, comparisons, or recommendations.
-
-```xml
-
-Separate observed facts, reasoned inferences, and open questions.
-Prefer breadth first, then go deeper only where the evidence changes the recommendation.
-
-```
-
-### `dig_deeper_nudge`
-
-Use for review and adversarial inspection.
-
-```xml
-
-After you find the first plausible issue, check for second-order failures, empty-state behavior, retries, stale state, and rollback paths before you finalize.
-
-```
-
-### `progress_updates`
-
-Use when the run may take a while.
-
-```xml
-
-If you provide progress updates, keep them brief and outcome-based.
-Mention only major phase changes or blockers.
-
-```
diff --git a/scripts/bump-version.mjs b/scripts/bump-version.mjs
deleted file mode 100644
index 19b9888f..00000000
--- a/scripts/bump-version.mjs
+++ /dev/null
@@ -1,227 +0,0 @@
-#!/usr/bin/env node
-import fs from "node:fs";
-import path from "node:path";
-import process from "node:process";
-
-const VERSION_PATTERN = /^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-[0-9A-Za-z.-]+)?(?:\+[0-9A-Za-z.-]+)?$/;
-
-const TARGETS = [
- {
- file: "package.json",
- values: [
- {
- label: "version",
- get: (json) => json.version,
- set: (json, version) => {
- json.version = version;
- }
- }
- ]
- },
- {
- file: "package-lock.json",
- values: [
- {
- label: "version",
- get: (json) => json.version,
- set: (json, version) => {
- json.version = version;
- }
- },
- {
- label: "packages[\"\"].version",
- get: (json) => json.packages?.[""]?.version,
- set: (json, version) => {
- requireObject(json.packages?.[""], "package-lock.json packages[\"\"]");
- json.packages[""].version = version;
- }
- }
- ]
- },
- {
- file: "plugins/codex/.claude-plugin/plugin.json",
- values: [
- {
- label: "version",
- get: (json) => json.version,
- set: (json, version) => {
- json.version = version;
- }
- }
- ]
- },
- {
- file: ".claude-plugin/marketplace.json",
- values: [
- {
- label: "metadata.version",
- get: (json) => json.metadata?.version,
- set: (json, version) => {
- requireObject(json.metadata, ".claude-plugin/marketplace.json metadata");
- json.metadata.version = version;
- }
- },
- {
- label: "plugins[codex].version",
- get: (json) => findMarketplacePlugin(json).version,
- set: (json, version) => {
- findMarketplacePlugin(json).version = version;
- }
- }
- ]
- }
-];
-
-function usage() {
- return [
- "Usage:",
- " node scripts/bump-version.mjs ",
- " node scripts/bump-version.mjs --check [version]",
- "",
- "Options:",
- " --check Verify manifest versions. Uses package.json when version is omitted.",
- " --root Run against a different repository root.",
- " --help Print this help."
- ].join("\n");
-}
-
-function parseArgs(argv) {
- const options = {
- check: false,
- root: process.cwd(),
- version: null
- };
-
- for (let i = 0; i < argv.length; i += 1) {
- const arg = argv[i];
-
- if (arg === "--check") {
- options.check = true;
- } else if (arg === "--root") {
- const root = argv[i + 1];
- if (!root) {
- throw new Error("--root requires a directory.");
- }
- options.root = root;
- i += 1;
- } else if (arg === "--help" || arg === "-h") {
- options.help = true;
- } else if (arg.startsWith("-")) {
- throw new Error(`Unknown option: ${arg}`);
- } else if (options.version) {
- throw new Error(`Unexpected extra argument: ${arg}`);
- } else {
- options.version = arg;
- }
- }
-
- options.root = path.resolve(options.root);
- return options;
-}
-
-function validateVersion(version) {
- if (!VERSION_PATTERN.test(version)) {
- throw new Error(`Expected a semver-like version such as 1.0.3, got: ${version}`);
- }
-}
-
-function requireObject(value, label) {
- if (!value || typeof value !== "object" || Array.isArray(value)) {
- throw new Error(`Expected ${label} to be an object.`);
- }
-}
-
-function findMarketplacePlugin(json) {
- const plugin = json.plugins?.find((entry) => entry?.name === "codex");
- requireObject(plugin, ".claude-plugin/marketplace.json plugins[codex]");
- return plugin;
-}
-
-function readJson(root, file) {
- const filePath = path.join(root, file);
- return JSON.parse(fs.readFileSync(filePath, "utf8"));
-}
-
-function writeJson(root, file, json) {
- const filePath = path.join(root, file);
- fs.writeFileSync(filePath, `${JSON.stringify(json, null, 2)}\n`);
-}
-
-function readPackageVersion(root) {
- const packageJson = readJson(root, "package.json");
- if (typeof packageJson.version !== "string") {
- throw new Error("package.json version must be a string.");
- }
- validateVersion(packageJson.version);
- return packageJson.version;
-}
-
-function checkVersions(root, expectedVersion) {
- const mismatches = [];
-
- for (const target of TARGETS) {
- const json = readJson(root, target.file);
- for (const value of target.values) {
- const actual = value.get(json);
- if (actual !== expectedVersion) {
- mismatches.push(`${target.file} ${value.label}: expected ${expectedVersion}, found ${actual ?? ""}`);
- }
- }
- }
-
- return mismatches;
-}
-
-function bumpVersion(root, version) {
- const changedFiles = [];
-
- for (const target of TARGETS) {
- const json = readJson(root, target.file);
- const before = JSON.stringify(json);
-
- for (const value of target.values) {
- value.set(json, version);
- }
-
- if (JSON.stringify(json) !== before) {
- writeJson(root, target.file, json);
- changedFiles.push(target.file);
- }
- }
-
- return changedFiles;
-}
-
-function main() {
- const options = parseArgs(process.argv.slice(2));
- if (options.help) {
- console.log(usage());
- return;
- }
-
- const version = options.version ?? (options.check ? readPackageVersion(options.root) : null);
- if (!version) {
- throw new Error(`Missing version.\n\n${usage()}`);
- }
- validateVersion(version);
-
- if (options.check) {
- const mismatches = checkVersions(options.root, version);
- if (mismatches.length > 0) {
- throw new Error(`Version metadata is out of sync:\n${mismatches.join("\n")}`);
- }
- console.log(`All version metadata matches ${version}.`);
- return;
- }
-
- const changedFiles = bumpVersion(options.root, version);
- const touched = changedFiles.length > 0 ? changedFiles.join(", ") : "no files changed";
- console.log(`Set version metadata to ${version}: ${touched}.`);
-}
-
-try {
- main();
-} catch (error) {
- console.error(error instanceof Error ? error.message : String(error));
- process.exitCode = 1;
-}
diff --git a/tests/broker-endpoint.test.mjs b/tests/broker-endpoint.test.mjs
deleted file mode 100644
index b3fc1146..00000000
--- a/tests/broker-endpoint.test.mjs
+++ /dev/null
@@ -1,22 +0,0 @@
-import test from "node:test";
-import assert from "node:assert/strict";
-
-import { createBrokerEndpoint, parseBrokerEndpoint } from "../plugins/codex/scripts/lib/broker-endpoint.mjs";
-
-test("createBrokerEndpoint uses Unix sockets on non-Windows platforms", () => {
- const endpoint = createBrokerEndpoint("/tmp/cxc-12345", "darwin");
- assert.equal(endpoint, "unix:/tmp/cxc-12345/broker.sock");
- assert.deepEqual(parseBrokerEndpoint(endpoint), {
- kind: "unix",
- path: "/tmp/cxc-12345/broker.sock"
- });
-});
-
-test("createBrokerEndpoint uses named pipes on Windows", () => {
- const endpoint = createBrokerEndpoint("C:\\\\Temp\\\\cxc-12345", "win32");
- assert.equal(endpoint, "pipe:\\\\.\\pipe\\cxc-12345-codex-app-server");
- assert.deepEqual(parseBrokerEndpoint(endpoint), {
- kind: "pipe",
- path: "\\\\.\\pipe\\cxc-12345-codex-app-server"
- });
-});
diff --git a/tests/bump-version.test.mjs b/tests/bump-version.test.mjs
deleted file mode 100644
index 205b0e9f..00000000
--- a/tests/bump-version.test.mjs
+++ /dev/null
@@ -1,88 +0,0 @@
-import fs from "node:fs";
-import path from "node:path";
-import test from "node:test";
-import assert from "node:assert/strict";
-import { fileURLToPath } from "node:url";
-
-import { makeTempDir, run } from "./helpers.mjs";
-
-const ROOT = path.resolve(path.dirname(fileURLToPath(import.meta.url)), "..");
-const SCRIPT = path.join(ROOT, "scripts", "bump-version.mjs");
-
-function writeJson(filePath, json) {
- fs.mkdirSync(path.dirname(filePath), { recursive: true });
- fs.writeFileSync(filePath, `${JSON.stringify(json, null, 2)}\n`);
-}
-
-function readJson(filePath) {
- return JSON.parse(fs.readFileSync(filePath, "utf8"));
-}
-
-function makeVersionFixture() {
- const root = makeTempDir();
-
- writeJson(path.join(root, "package.json"), {
- name: "@openai/codex-plugin-cc",
- version: "1.0.2"
- });
- writeJson(path.join(root, "package-lock.json"), {
- name: "@openai/codex-plugin-cc",
- version: "1.0.2",
- lockfileVersion: 3,
- packages: {
- "": {
- name: "@openai/codex-plugin-cc",
- version: "1.0.2"
- }
- }
- });
- writeJson(path.join(root, "plugins", "codex", ".claude-plugin", "plugin.json"), {
- name: "codex",
- version: "1.0.2"
- });
- writeJson(path.join(root, ".claude-plugin", "marketplace.json"), {
- metadata: {
- version: "1.0.2"
- },
- plugins: [
- {
- name: "codex",
- version: "1.0.2"
- }
- ]
- });
-
- return root;
-}
-
-test("bump-version updates every release manifest", () => {
- const root = makeVersionFixture();
-
- const result = run("node", [SCRIPT, "--root", root, "1.2.3"], {
- cwd: ROOT
- });
-
- assert.equal(result.status, 0, result.stderr);
- assert.equal(readJson(path.join(root, "package.json")).version, "1.2.3");
- assert.equal(readJson(path.join(root, "package-lock.json")).version, "1.2.3");
- assert.equal(readJson(path.join(root, "package-lock.json")).packages[""].version, "1.2.3");
- assert.equal(readJson(path.join(root, "plugins", "codex", ".claude-plugin", "plugin.json")).version, "1.2.3");
- assert.equal(readJson(path.join(root, ".claude-plugin", "marketplace.json")).metadata.version, "1.2.3");
- assert.equal(readJson(path.join(root, ".claude-plugin", "marketplace.json")).plugins[0].version, "1.2.3");
-});
-
-test("bump-version check mode reports stale metadata", () => {
- const root = makeVersionFixture();
- writeJson(path.join(root, "package.json"), {
- name: "@openai/codex-plugin-cc",
- version: "1.0.3"
- });
-
- const result = run("node", [SCRIPT, "--root", root, "--check"], {
- cwd: ROOT
- });
-
- assert.notEqual(result.status, 0);
- assert.match(result.stderr, /plugins\/codex\/\.claude-plugin\/plugin\.json version/);
- assert.match(result.stderr, /\.claude-plugin\/marketplace\.json metadata\.version/);
-});
diff --git a/tests/commands.test.mjs b/tests/commands.test.mjs
deleted file mode 100644
index ef5adb09..00000000
--- a/tests/commands.test.mjs
+++ /dev/null
@@ -1,210 +0,0 @@
-import fs from "node:fs";
-import path from "node:path";
-import test from "node:test";
-import assert from "node:assert/strict";
-import { fileURLToPath } from "node:url";
-
-const ROOT = path.resolve(path.dirname(fileURLToPath(import.meta.url)), "..");
-const PLUGIN_ROOT = path.join(ROOT, "plugins", "codex");
-
-function read(relativePath) {
- return fs.readFileSync(path.join(PLUGIN_ROOT, relativePath), "utf8");
-}
-
-test("review command uses AskUserQuestion and background Bash while staying review-only", () => {
- const source = read("commands/review.md");
- assert.match(source, /AskUserQuestion/);
- assert.match(source, /\bBash\(/);
- assert.match(source, /Do not fix issues/i);
- assert.match(source, /review-only/i);
- assert.match(source, /return Codex's output verbatim to the user/i);
- assert.match(source, /```bash/);
- assert.match(source, /```typescript/);
- assert.match(source, /review "\$ARGUMENTS"/);
- assert.match(source, /\[--scope auto\|working-tree\|branch\]/);
- assert.match(source, /run_in_background:\s*true/);
- assert.match(source, /command:\s*`node "\$\{CLAUDE_PLUGIN_ROOT\}\/scripts\/codex-companion\.mjs" review "\$ARGUMENTS"`/);
- assert.match(source, /description:\s*"Codex review"/);
- assert.match(source, /Do not call `BashOutput`/);
- assert.match(source, /Return the command stdout verbatim, exactly as-is/i);
- assert.match(source, /git status --short --untracked-files=all/);
- assert.match(source, /git diff --shortstat/);
- assert.match(source, /Treat untracked files or directories as reviewable work/i);
- assert.match(source, /Recommend waiting only when the review is clearly tiny, roughly 1-2 files total/i);
- assert.match(source, /In every other case, including unclear size, recommend background/i);
- assert.match(source, /The companion script parses `--wait` and `--background`/i);
- assert.match(source, /Claude Code's `Bash\(..., run_in_background: true\)` is what actually detaches the run/i);
- assert.match(source, /When in doubt, run the review/i);
- assert.match(source, /\(Recommended\)/);
- assert.match(source, /does not support staged-only review, unstaged-only review, or extra focus text/i);
-});
-
-test("adversarial review command uses AskUserQuestion and background Bash while staying review-only", () => {
- const source = read("commands/adversarial-review.md");
- assert.match(source, /AskUserQuestion/);
- assert.match(source, /\bBash\(/);
- assert.match(source, /Do not fix issues/i);
- assert.match(source, /review-only/i);
- assert.match(source, /return Codex's output verbatim to the user/i);
- assert.match(source, /```bash/);
- assert.match(source, /```typescript/);
- assert.match(source, /adversarial-review "\$ARGUMENTS"/);
- assert.match(source, /\[--scope auto\|working-tree\|branch\] \[focus \.\.\.\]/);
- assert.match(source, /run_in_background:\s*true/);
- assert.match(source, /command:\s*`node "\$\{CLAUDE_PLUGIN_ROOT\}\/scripts\/codex-companion\.mjs" adversarial-review "\$ARGUMENTS"`/);
- assert.match(source, /description:\s*"Codex adversarial review"/);
- assert.match(source, /Do not call `BashOutput`/);
- assert.match(source, /Return the command stdout verbatim, exactly as-is/i);
- assert.match(source, /git status --short --untracked-files=all/);
- assert.match(source, /git diff --shortstat/);
- assert.match(source, /Treat untracked files or directories as reviewable work/i);
- assert.match(source, /Recommend waiting only when the scoped review is clearly tiny, roughly 1-2 files total/i);
- assert.match(source, /In every other case, including unclear size, recommend background/i);
- assert.match(source, /The companion script parses `--wait` and `--background`/i);
- assert.match(source, /Claude Code's `Bash\(..., run_in_background: true\)` is what actually detaches the run/i);
- assert.match(source, /When in doubt, run the review/i);
- assert.match(source, /\(Recommended\)/);
- assert.match(source, /uses the same review target selection as `\/codex:review`/i);
- assert.match(source, /supports working-tree review, branch review, and `--base [`/i);
- assert.match(source, /does not support `--scope staged` or `--scope unstaged`/i);
- assert.match(source, /can still take extra focus text after the flags/i);
-});
-
-test("continue is not exposed as a user-facing command", () => {
- const commandFiles = fs.readdirSync(path.join(PLUGIN_ROOT, "commands")).sort();
- assert.deepEqual(commandFiles, [
- "adversarial-review.md",
- "cancel.md",
- "rescue.md",
- "result.md",
- "review.md",
- "setup.md",
- "status.md"
- ]);
-});
-
-test("rescue command absorbs continue semantics", () => {
- const rescue = read("commands/rescue.md");
- const agent = read("agents/codex-rescue.md");
- const readme = fs.readFileSync(path.join(ROOT, "README.md"), "utf8");
- const runtimeSkill = read("skills/codex-cli-runtime/SKILL.md");
-
- assert.match(rescue, /The final user-visible response must be Codex's output verbatim/i);
- assert.match(rescue, /allowed-tools:\s*Bash\(node:\*\),\s*AskUserQuestion/);
- assert.match(rescue, /--background\|--wait/);
- assert.match(rescue, /--resume\|--fresh/);
- assert.match(rescue, /--model /);
- assert.match(rescue, /--effort /);
- assert.match(rescue, /task-resume-candidate --json/);
- assert.match(rescue, /AskUserQuestion/);
- assert.match(rescue, /Continue current Codex thread/);
- assert.match(rescue, /Start a new Codex thread/);
- assert.match(rescue, /run the `codex:codex-rescue` subagent in the background/i);
- assert.match(rescue, /default to foreground/i);
- assert.match(rescue, /Do not forward them to `task`/i);
- assert.match(rescue, /`--model` and `--effort` are runtime-selection flags/i);
- assert.match(rescue, /Leave `--effort` unset unless the user explicitly asks for a specific reasoning effort/i);
- assert.match(rescue, /If they ask for `spark`, map it to `gpt-5\.3-codex-spark`/i);
- assert.match(rescue, /If the request includes `--resume`, do not ask whether to continue/i);
- assert.match(rescue, /If the request includes `--fresh`, do not ask whether to continue/i);
- assert.match(rescue, /If the user chooses continue, add `--resume`/i);
- assert.match(rescue, /If the user chooses a new thread, add `--fresh`/i);
- assert.match(rescue, /thin forwarder only/i);
- assert.match(rescue, /Return the Codex companion stdout verbatim to the user/i);
- assert.match(rescue, /Do not paraphrase, summarize, rewrite, or add commentary before or after it/i);
- assert.match(rescue, /return that command's stdout as-is/i);
- assert.match(rescue, /Leave `--resume` and `--fresh` in the forwarded request/i);
- assert.match(agent, /--resume/);
- assert.match(agent, /--fresh/);
- assert.match(agent, /thin forwarding wrapper/i);
- assert.match(agent, /prefer foreground for a small, clearly bounded rescue request/i);
- assert.match(agent, /If the user did not explicitly choose `--background` or `--wait` and the task looks complicated, open-ended, multi-step, or likely to keep Codex running for a long time, prefer background execution/i);
- assert.match(agent, /Use exactly one `Bash` call/i);
- assert.match(agent, /Do not inspect the repository, read files, grep, monitor progress, poll status, fetch results, cancel jobs, summarize output, or do any follow-up work of your own/i);
- assert.match(agent, /Do not call `review`, `adversarial-review`, `status`, `result`, or `cancel`/i);
- assert.match(agent, /Leave `--effort` unset unless the user explicitly requests a specific reasoning effort/i);
- assert.match(agent, /Leave model unset by default/i);
- assert.match(agent, /If the user asks for `spark`, map that to `--model gpt-5\.3-codex-spark`/i);
- assert.match(agent, /If the user asks for a concrete model name such as `gpt-5\.4-mini`, pass it through with `--model`/i);
- assert.match(agent, /Return the stdout of the `codex-companion` command exactly as-is/i);
- assert.match(agent, /If the Bash call fails or Codex cannot be invoked, return nothing/i);
- assert.match(agent, /gpt-5-4-prompting/);
- assert.match(agent, /only to tighten the user's request into a better Codex prompt/i);
- assert.match(agent, /Do not use that skill to inspect the repository, reason through the problem yourself, draft a solution, or do any independent work/i);
- assert.match(runtimeSkill, /only job is to invoke `task` once and return that stdout unchanged/i);
- assert.match(runtimeSkill, /Do not call `setup`, `review`, `adversarial-review`, `status`, `result`, or `cancel`/i);
- assert.match(runtimeSkill, /use the `gpt-5-4-prompting` skill to rewrite the user's request into a tighter Codex prompt/i);
- assert.match(runtimeSkill, /That prompt drafting is the only Claude-side work allowed/i);
- assert.match(runtimeSkill, /Leave `--effort` unset unless the user explicitly requests a specific effort/i);
- assert.match(runtimeSkill, /Leave model unset by default/i);
- assert.match(runtimeSkill, /Map `spark` to `--model gpt-5\.3-codex-spark`/i);
- assert.match(runtimeSkill, /If the forwarded request includes `--background` or `--wait`, treat that as Claude-side execution control only/i);
- assert.match(runtimeSkill, /Strip it before calling `task`/i);
- assert.match(runtimeSkill, /`--effort`: accepted values are `none`, `minimal`, `low`, `medium`, `high`, `xhigh`/i);
- assert.match(runtimeSkill, /Do not inspect the repository, read files, grep, monitor progress, poll status, fetch results, cancel jobs, summarize output, or do any follow-up work of your own/i);
- assert.match(runtimeSkill, /If the Bash call fails or Codex cannot be invoked, return nothing/i);
- assert.match(readme, /`codex:codex-rescue` subagent/i);
- assert.match(readme, /if you do not pass `--model` or `--effort`, Codex chooses its own defaults/i);
- assert.match(readme, /--model gpt-5\.4-mini --effort medium/i);
- assert.match(readme, /`spark`, the plugin maps that to `gpt-5\.3-codex-spark`/i);
- assert.match(readme, /continue a previous Codex task/i);
- assert.match(readme, /### `\/codex:setup`/);
- assert.match(readme, /### `\/codex:review`/);
- assert.match(readme, /### `\/codex:adversarial-review`/);
- assert.match(readme, /uses the same review target selection as `\/codex:review`/i);
- assert.match(readme, /--base main challenge whether this was the right caching and retry design/);
- assert.match(readme, /### `\/codex:rescue`/);
- assert.match(readme, /### `\/codex:status`/);
- assert.match(readme, /### `\/codex:result`/);
- assert.match(readme, /### `\/codex:cancel`/);
-});
-
-test("result and cancel commands are exposed as deterministic runtime entrypoints", () => {
- const result = read("commands/result.md");
- const cancel = read("commands/cancel.md");
- const resultHandling = read("skills/codex-result-handling/SKILL.md");
-
- assert.match(result, /disable-model-invocation:\s*true/);
- assert.match(result, /codex-companion\.mjs" result \$ARGUMENTS/);
- assert.match(cancel, /disable-model-invocation:\s*true/);
- assert.match(cancel, /codex-companion\.mjs" cancel \$ARGUMENTS/);
- assert.match(resultHandling, /do not turn a failed or incomplete Codex run into a Claude-side implementation attempt/i);
- assert.match(resultHandling, /if Codex was never successfully invoked, do not generate a substitute answer at all/i);
-});
-
-test("internal docs use task terminology for rescue runs", () => {
- const runtimeSkill = read("skills/codex-cli-runtime/SKILL.md");
- const promptingSkill = read("skills/gpt-5-4-prompting/SKILL.md");
- const promptRecipes = read("skills/gpt-5-4-prompting/references/codex-prompt-recipes.md");
-
- assert.match(runtimeSkill, /codex-companion\.mjs" task ""/);
- assert.match(runtimeSkill, /Use `task` for every rescue request/i);
- assert.match(runtimeSkill, /task --resume-last/i);
- assert.match(promptingSkill, /Use `task` when the task is diagnosis/i);
- assert.match(promptRecipes, /Codex task prompts/i);
- assert.match(promptRecipes, /Use these as starting templates for Codex task prompts/i);
- assert.match(promptRecipes, /## Diagnosis/);
- assert.match(promptRecipes, /## Narrow Fix/);
-});
-
-test("hooks keep session-end cleanup and stop gating enabled", () => {
- const source = read("hooks/hooks.json");
- assert.match(source, /SessionStart/);
- assert.match(source, /SessionEnd/);
- assert.match(source, /stop-review-gate-hook\.mjs/);
- assert.match(source, /session-lifecycle-hook\.mjs/);
-});
-
-test("setup command can offer Codex install and still points users to codex login", () => {
- const setup = read("commands/setup.md");
- const readme = fs.readFileSync(path.join(ROOT, "README.md"), "utf8");
-
- assert.match(setup, /argument-hint:\s*'\[--enable-review-gate\|--disable-review-gate\]'/);
- assert.match(setup, /AskUserQuestion/);
- assert.match(setup, /npm install -g @openai\/codex/);
- assert.match(setup, /codex-companion\.mjs" setup --json \$ARGUMENTS/);
- assert.match(readme, /!codex login/);
- assert.match(readme, /offer to install Codex for you/i);
- assert.match(readme, /\/codex:setup --enable-review-gate/);
- assert.match(readme, /\/codex:setup --disable-review-gate/);
-});
diff --git a/tests/fake-codex-fixture.mjs b/tests/fake-codex-fixture.mjs
deleted file mode 100644
index debcadce..00000000
--- a/tests/fake-codex-fixture.mjs
+++ /dev/null
@@ -1,589 +0,0 @@
-import fs from "node:fs";
-import path from "node:path";
-import process from "node:process";
-
-import { writeExecutable } from "./helpers.mjs";
-
-export function installFakeCodex(binDir, behavior = "review-ok") {
- const statePath = path.join(binDir, "fake-codex-state.json");
- const scriptPath = path.join(binDir, "codex");
- const source = `#!/usr/bin/env node
-const fs = require("node:fs");
-const path = require("node:path");
-const readline = require("node:readline");
-
- const STATE_PATH = ${JSON.stringify(statePath)};
- const BEHAVIOR = ${JSON.stringify(behavior)};
- const interruptibleTurns = new Map();
-
- function loadState() {
- if (!fs.existsSync(STATE_PATH)) {
- return { nextThreadId: 1, nextTurnId: 1, appServerStarts: 0, threads: [], capabilities: null, lastInterrupt: null };
- }
- return JSON.parse(fs.readFileSync(STATE_PATH, "utf8"));
- }
-
-function saveState(state) {
- fs.writeFileSync(STATE_PATH, JSON.stringify(state, null, 2));
-}
-
-function requiresExperimental(field, message, state) {
- if (!(field in (message.params || {}))) {
- return false;
- }
- return !state.capabilities || state.capabilities.experimentalApi !== true;
-}
-
-function now() {
- return Math.floor(Date.now() / 1000);
-}
-
-function buildThread(thread) {
- return {
- id: thread.id,
- preview: thread.preview || "",
- ephemeral: Boolean(thread.ephemeral),
- modelProvider: "openai",
- createdAt: thread.createdAt,
- updatedAt: thread.updatedAt,
- status: { type: "idle" },
- path: null,
- cwd: thread.cwd,
- cliVersion: "fake-codex",
- source: "appServer",
- agentNickname: null,
- agentRole: null,
- gitInfo: null,
- name: thread.name || null,
- turns: []
- };
-}
-
-function buildTurn(id, status = "inProgress", error = null) {
- return { id, status, items: [], error };
-}
-
-function buildAccountReadResult() {
- switch (BEHAVIOR) {
- case "logged-out":
- case "refreshable-auth":
- case "auth-run-fails":
- return { account: null, requiresOpenaiAuth: true };
- case "provider-no-auth":
- case "env-key-provider":
- return { account: null, requiresOpenaiAuth: false };
- case "api-key-account-only":
- return { account: { type: "apiKey" }, requiresOpenaiAuth: true };
- default:
- return {
- account: { type: "chatgpt", email: "test@example.com", planType: "plus" },
- requiresOpenaiAuth: true
- };
- }
-}
-
-function buildConfigReadResult() {
- switch (BEHAVIOR) {
- case "provider-no-auth":
- return {
- config: { model_provider: "ollama" },
- origins: {}
- };
- case "env-key-provider":
- return {
- config: {
- model_provider: "openai-custom",
- model_providers: {
- "openai-custom": {
- name: "OpenAI custom",
- env_key: "OPENAI_API_KEY",
- requires_openai_auth: false
- }
- }
- },
- origins: {}
- };
- default:
- return {
- config: { model_provider: "openai" },
- origins: {}
- };
- }
-}
-
-function send(message) {
- process.stdout.write(JSON.stringify(message) + "\\n");
-}
-
-function nextThread(state, cwd, ephemeral) {
- const thread = {
- id: "thr_" + state.nextThreadId++,
- cwd: cwd || process.cwd(),
- name: null,
- preview: "",
- ephemeral: Boolean(ephemeral),
- createdAt: now(),
- updatedAt: now()
- };
- state.threads.unshift(thread);
- saveState(state);
- return thread;
-}
-
-function ensureThread(state, threadId) {
- const thread = state.threads.find((candidate) => candidate.id === threadId);
- if (!thread) {
- throw new Error("unknown thread " + threadId);
- }
- return thread;
-}
-
-function nextTurnId(state) {
- const turnId = "turn_" + state.nextTurnId++;
- saveState(state);
- return turnId;
-}
-
-function emitTurnCompleted(threadId, turnId, item) {
- const items = Array.isArray(item) ? item : [item];
- send({ method: "turn/started", params: { threadId, turn: buildTurn(turnId) } });
- for (const entry of items) {
- if (entry && entry.started) {
- send({ method: "item/started", params: { threadId, turnId, item: entry.started } });
- }
- if (entry && entry.completed) {
- send({ method: "item/completed", params: { threadId, turnId, item: entry.completed } });
- }
- }
- send({ method: "turn/completed", params: { threadId, turn: buildTurn(turnId, "completed") } });
-}
-
-function emitTurnCompletedLater(threadId, turnId, item, delayMs) {
- setTimeout(() => {
- emitTurnCompleted(threadId, turnId, item);
- }, delayMs);
-}
-
-function nativeReviewText(target) {
- if (target.type === "baseBranch") {
- return "Reviewed changes against " + target.branch + ".\\nNo material issues found.";
- }
- if (target.type === "custom") {
- return "Reviewed custom target.\\nNo material issues found.";
- }
- return "Reviewed uncommitted changes.\\nNo material issues found.";
-}
-
-function structuredReviewPayload(prompt) {
- if (prompt.includes("adversarial software review")) {
- if (BEHAVIOR === "adversarial-clean") {
- return JSON.stringify({
- verdict: "approve",
- summary: "No material issues found.",
- findings: [],
- next_steps: []
- });
- }
-
- return JSON.stringify({
- verdict: "needs-attention",
- summary: "One adversarial concern surfaced.",
- findings: [
- {
- severity: "high",
- title: "Missing empty-state guard",
- body: "The change assumes data is always present.",
- file: "src/app.js",
- line_start: 4,
- line_end: 6,
- confidence: 0.87,
- recommendation: "Handle empty collections before indexing."
- }
- ],
- next_steps: ["Add an empty-state test."]
- });
- }
-
- if (BEHAVIOR === "invalid-json") {
- return "not valid json";
- }
-
- return JSON.stringify({
- verdict: "approve",
- summary: "No material issues found.",
- findings: [],
- next_steps: []
- });
-}
-
-function taskPayload(prompt, resume) {
- if (prompt.includes("") && prompt.includes("Only review the work from the previous Claude turn.")) {
- if (BEHAVIOR === "adversarial-clean") {
- return "ALLOW: No blocking issues found in the previous turn.";
- }
- return "BLOCK: Missing empty-state guard in src/app.js:4-6.";
- }
-
- if (resume || prompt.includes("Continue from the current thread state") || prompt.includes("follow up")) {
- return "Resumed the prior run.\\nFollow-up prompt accepted.";
- }
-
- return "Handled the requested task.\\nTask prompt accepted.";
-}
-
-const args = process.argv.slice(2);
-if (args[0] === "--version") {
- console.log("codex-cli test");
- process.exit(0);
-}
-if (args[0] === "app-server" && args[1] === "--help") {
- console.log("fake app-server help");
- process.exit(0);
-}
-if (args[0] === "login" && args[1] === "status") {
- if (BEHAVIOR === "logged-out" || BEHAVIOR === "refreshable-auth" || BEHAVIOR === "auth-run-fails" || BEHAVIOR === "provider-no-auth" || BEHAVIOR === "env-key-provider" || BEHAVIOR === "api-key-account-only") {
- console.error("not authenticated");
- process.exit(1);
- }
- console.log("logged in");
- process.exit(0);
-}
-if (args[0] === "login") {
- process.exit(0);
-}
-if (args[0] !== "app-server") {
- process.exit(1);
-}
-const bootState = loadState();
-bootState.appServerStarts = (bootState.appServerStarts || 0) + 1;
-saveState(bootState);
-
-const rl = readline.createInterface({ input: process.stdin });
-rl.on("line", (line) => {
- if (!line.trim()) {
- return;
- }
-
- const message = JSON.parse(line);
- const state = loadState();
-
- try {
- switch (message.method) {
- case "initialize":
- state.capabilities = message.params.capabilities || null;
- saveState(state);
- send({ id: message.id, result: { userAgent: "fake-codex-app-server" } });
- break;
-
- case "initialized":
- break;
-
- case "account/read":
- send({ id: message.id, result: buildAccountReadResult() });
- break;
-
- case "config/read":
- if (BEHAVIOR === "config-read-fails") {
- throw new Error("config/read failed for cwd");
- }
- send({ id: message.id, result: buildConfigReadResult() });
- break;
-
- case "thread/start": {
- if (BEHAVIOR === "auth-run-fails") {
- throw new Error("authentication expired; run codex login");
- }
- if (requiresExperimental("persistExtendedHistory", message, state) || requiresExperimental("persistFullHistory", message, state)) {
- throw new Error("thread/start.persistFullHistory requires experimentalApi capability");
- }
- const thread = nextThread(state, message.params.cwd, message.params.ephemeral);
- send({ id: message.id, result: { thread: buildThread(thread), model: message.params.model || "gpt-5.4", modelProvider: "openai", serviceTier: null, cwd: thread.cwd, approvalPolicy: "never", sandbox: { type: "readOnly", access: { type: "fullAccess" }, networkAccess: false }, reasoningEffort: null } });
- send({ method: "thread/started", params: { thread: { id: thread.id } } });
- break;
- }
-
- case "thread/name/set": {
- const thread = ensureThread(state, message.params.threadId);
- thread.name = message.params.name;
- thread.updatedAt = now();
- saveState(state);
- send({ id: message.id, result: {} });
- break;
- }
-
- case "thread/list": {
- let threads = state.threads.slice();
- if (message.params.cwd) {
- threads = threads.filter((thread) => thread.cwd === message.params.cwd);
- }
- if (message.params.searchTerm) {
- threads = threads.filter((thread) => (thread.name || "").includes(message.params.searchTerm));
- }
- threads.sort((left, right) => right.updatedAt - left.updatedAt);
- send({ id: message.id, result: { data: threads.map(buildThread), nextCursor: null } });
- break;
- }
-
- case "thread/resume": {
- if (requiresExperimental("persistExtendedHistory", message, state) || requiresExperimental("persistFullHistory", message, state)) {
- throw new Error("thread/resume.persistFullHistory requires experimentalApi capability");
- }
- const thread = ensureThread(state, message.params.threadId);
- thread.updatedAt = now();
- saveState(state);
- send({ id: message.id, result: { thread: buildThread(thread), model: message.params.model || "gpt-5.4", modelProvider: "openai", serviceTier: null, cwd: thread.cwd, approvalPolicy: "never", sandbox: { type: "readOnly", access: { type: "fullAccess" }, networkAccess: false }, reasoningEffort: null } });
- break;
- }
-
- case "review/start": {
- const thread = ensureThread(state, message.params.threadId);
- let reviewThread = thread;
- if (message.params.delivery === "detached") {
- reviewThread = nextThread(state, thread.cwd, true);
- send({ method: "thread/started", params: { thread: { id: reviewThread.id } } });
- }
- const turnId = nextTurnId(state);
- send({ id: message.id, result: { turn: buildTurn(turnId), reviewThreadId: reviewThread.id } });
- emitTurnCompleted(reviewThread.id, turnId, [
- {
- started: { type: "enteredReviewMode", id: turnId, review: "current changes" }
- },
- ...(BEHAVIOR === "with-reasoning"
- ? [
- {
- completed: {
- type: "reasoning",
- id: "reasoning_" + turnId,
- summary: [{ text: "Reviewed the changed files and checked the likely regression paths." }],
- content: []
- }
- }
- ]
- : []),
- {
- completed: { type: "exitedReviewMode", id: turnId, review: nativeReviewText(message.params.target) }
- }
- ]);
- break;
- }
-
- case "turn/start": {
- const thread = ensureThread(state, message.params.threadId);
- const prompt = (message.params.input || [])
- .filter((item) => item.type === "text")
- .map((item) => item.text)
- .join("\\n");
- const turnId = nextTurnId(state);
- thread.updatedAt = now();
- state.lastTurnStart = {
- threadId: message.params.threadId,
- turnId,
- model: message.params.model ?? null,
- effort: message.params.effort ?? null,
- prompt
- };
- saveState(state);
- send({ id: message.id, result: { turn: buildTurn(turnId) } });
-
- const payload = message.params.outputSchema && message.params.outputSchema.properties && message.params.outputSchema.properties.verdict
- ? structuredReviewPayload(prompt)
- : taskPayload(prompt, thread.name && thread.name.startsWith("Codex Companion Task") && prompt.includes("Continue from the current thread state"));
-
- if (
- BEHAVIOR === "with-subagent" ||
- BEHAVIOR === "with-late-subagent-message" ||
- BEHAVIOR === "with-subagent-no-main-turn-completed"
- ) {
- const subThread = nextThread(state, thread.cwd, true);
- const subThreadRecord = ensureThread(state, subThread.id);
- subThreadRecord.name = "design-challenger";
- saveState(state);
- const subTurnId = nextTurnId(state);
-
- send({ method: "thread/started", params: { thread: { ...buildThread(subThreadRecord), name: "design-challenger", agentNickname: "design-challenger" } } });
- send({ method: "turn/started", params: { threadId: thread.id, turn: buildTurn(turnId) } });
- send({
- method: "item/started",
- params: {
- threadId: thread.id,
- turnId,
- item: {
- type: "collabAgentToolCall",
- id: "collab_" + turnId,
- tool: "wait",
- status: "inProgress",
- senderThreadId: thread.id,
- receiverThreadIds: [subThread.id],
- prompt: "Challenge the implementation approach",
- model: null,
- reasoningEffort: null,
- agentsStates: {
- [subThread.id]: { status: "inProgress", message: "Investigating design tradeoffs" }
- }
- }
- }
- });
- if (BEHAVIOR === "with-late-subagent-message") {
- send({
- method: "item/completed",
- params: {
- threadId: thread.id,
- turnId,
- item: { type: "agentMessage", id: "msg_" + turnId, text: payload, phase: "final_answer" }
- }
- });
- }
- send({ method: "turn/started", params: { threadId: subThread.id, turn: buildTurn(subTurnId) } });
- send({
- method: "item/completed",
- params: {
- threadId: subThread.id,
- turnId: subTurnId,
- item: {
- type: "reasoning",
- id: "reasoning_" + subTurnId,
- summary: [{ text: "Questioned the retry strategy and the cache invalidation boundaries." }],
- content: []
- }
- }
- });
- send({
- method: "item/completed",
- params: {
- threadId: subThread.id,
- turnId: subTurnId,
- item: {
- type: "agentMessage",
- id: "msg_" + subTurnId,
- text: "The design assumes retries are harmless, but they can duplicate side effects without stronger idempotency guarantees.",
- phase: "analysis"
- }
- }
- });
- send({ method: "turn/completed", params: { threadId: subThread.id, turn: buildTurn(subTurnId, "completed") } });
- send({
- method: "item/completed",
- params: {
- threadId: thread.id,
- turnId,
- item: {
- type: "collabAgentToolCall",
- id: "collab_" + turnId,
- tool: "wait",
- status: "completed",
- senderThreadId: thread.id,
- receiverThreadIds: [subThread.id],
- prompt: "Challenge the implementation approach",
- model: null,
- reasoningEffort: null,
- agentsStates: {
- [subThread.id]: { status: "completed", message: "Finished" }
- }
- }
- }
- });
- if (BEHAVIOR !== "with-late-subagent-message") {
- send({
- method: "item/completed",
- params: {
- threadId: thread.id,
- turnId,
- item: { type: "agentMessage", id: "msg_" + turnId, text: payload, phase: "final_answer" }
- }
- });
- }
- if (BEHAVIOR !== "with-subagent-no-main-turn-completed") {
- send({ method: "turn/completed", params: { threadId: thread.id, turn: buildTurn(turnId, "completed") } });
- }
- break;
- }
-
- const items = [
- ...(BEHAVIOR === "with-reasoning"
- ? [
- {
- completed: {
- type: "reasoning",
- id: "reasoning_" + turnId,
- summary: [{ text: "Inspected the prompt, gathered evidence, and checked the highest-risk paths first." }],
- content: []
- }
- }
- ]
- : []),
- {
- completed: { type: "agentMessage", id: "msg_" + turnId, text: payload, phase: "final_answer" }
- }
- ];
-
- if (BEHAVIOR === "interruptible-slow-task") {
- send({ method: "turn/started", params: { threadId: thread.id, turn: buildTurn(turnId) } });
- const timer = setTimeout(() => {
- if (!interruptibleTurns.has(turnId)) {
- return;
- }
- interruptibleTurns.delete(turnId);
- for (const entry of items) {
- if (entry && entry.completed) {
- send({ method: "item/completed", params: { threadId: thread.id, turnId, item: entry.completed } });
- }
- }
- send({ method: "turn/completed", params: { threadId: thread.id, turn: buildTurn(turnId, "completed") } });
- }, 5000);
- interruptibleTurns.set(turnId, { threadId: thread.id, timer });
- } else if (BEHAVIOR === "slow-task") {
- emitTurnCompletedLater(thread.id, turnId, items, 400);
- } else {
- emitTurnCompleted(thread.id, turnId, items);
- }
- break;
- }
-
- case "turn/interrupt": {
- state.lastInterrupt = {
- threadId: message.params.threadId,
- turnId: message.params.turnId
- };
- saveState(state);
- const pending = interruptibleTurns.get(message.params.turnId);
- if (pending) {
- clearTimeout(pending.timer);
- interruptibleTurns.delete(message.params.turnId);
- send({
- method: "turn/completed",
- params: {
- threadId: pending.threadId,
- turn: buildTurn(message.params.turnId, "interrupted")
- }
- });
- }
- send({ id: message.id, result: {} });
- break;
- }
-
- default:
- send({ id: message.id, error: { code: -32601, message: "Unsupported method: " + message.method } });
- break;
- }
- } catch (error) {
- send({ id: message.id, error: { code: -32000, message: error.message } });
- }
-});
-`;
- writeExecutable(scriptPath, source);
-
- // On Windows, npm global binaries are invoked via .cmd wrappers.
- // Create a codex.cmd so the fake binary is discoverable by spawn with shell: true.
- if (process.platform === "win32") {
- const cmdWrapper = `@echo off\r\nnode "%~dp0codex" %*\r\n`;
- fs.writeFileSync(path.join(binDir, "codex.cmd"), cmdWrapper, { encoding: "utf8" });
- }
-}
-
-export function buildEnv(binDir) {
- const sep = process.platform === "win32" ? ";" : ":";
- return {
- ...process.env,
- PATH: `${binDir}${sep}${process.env.PATH}`
- };
-}
diff --git a/tests/git.test.mjs b/tests/git.test.mjs
deleted file mode 100644
index 14ff2576..00000000
--- a/tests/git.test.mjs
+++ /dev/null
@@ -1,183 +0,0 @@
-import fs from "node:fs";
-import path from "node:path";
-import test from "node:test";
-import assert from "node:assert/strict";
-
-import { collectReviewContext, resolveReviewTarget } from "../plugins/codex/scripts/lib/git.mjs";
-import { initGitRepo, makeTempDir, run } from "./helpers.mjs";
-
-test("resolveReviewTarget prefers working tree when repo is dirty", () => {
- const cwd = makeTempDir();
- initGitRepo(cwd);
- fs.writeFileSync(path.join(cwd, "app.js"), "console.log('v1');\n");
- run("git", ["add", "app.js"], { cwd });
- run("git", ["commit", "-m", "init"], { cwd });
- fs.writeFileSync(path.join(cwd, "app.js"), "console.log('v2');\n");
-
- const target = resolveReviewTarget(cwd, {});
-
- assert.equal(target.mode, "working-tree");
-});
-
-test("resolveReviewTarget falls back to branch diff when repo is clean", () => {
- const cwd = makeTempDir();
- initGitRepo(cwd);
- fs.writeFileSync(path.join(cwd, "app.js"), "console.log('v1');\n");
- run("git", ["add", "app.js"], { cwd });
- run("git", ["commit", "-m", "init"], { cwd });
- run("git", ["checkout", "-b", "feature/test"], { cwd });
- fs.writeFileSync(path.join(cwd, "app.js"), "console.log('v2');\n");
- run("git", ["add", "app.js"], { cwd });
- run("git", ["commit", "-m", "change"], { cwd });
-
- const target = resolveReviewTarget(cwd, {});
- const context = collectReviewContext(cwd, target);
-
- assert.equal(target.mode, "branch");
- assert.match(target.label, /main/);
- assert.match(context.content, /Branch Diff/);
-});
-
-test("resolveReviewTarget honors explicit base overrides", () => {
- const cwd = makeTempDir();
- initGitRepo(cwd);
- fs.writeFileSync(path.join(cwd, "app.js"), "console.log('v1');\n");
- run("git", ["add", "app.js"], { cwd });
- run("git", ["commit", "-m", "init"], { cwd });
- run("git", ["checkout", "-b", "feature/test"], { cwd });
- fs.writeFileSync(path.join(cwd, "app.js"), "console.log('v2');\n");
- run("git", ["add", "app.js"], { cwd });
- run("git", ["commit", "-m", "change"], { cwd });
-
- const target = resolveReviewTarget(cwd, { base: "main" });
-
- assert.equal(target.mode, "branch");
- assert.equal(target.baseRef, "main");
-});
-
-test("resolveReviewTarget requires an explicit base when no default branch can be inferred", () => {
- const cwd = makeTempDir();
- initGitRepo(cwd);
- fs.writeFileSync(path.join(cwd, "app.js"), "console.log('v1');\n");
- run("git", ["add", "app.js"], { cwd });
- run("git", ["commit", "-m", "init"], { cwd });
- run("git", ["branch", "-m", "feature-only"], { cwd });
-
- assert.throws(
- () => resolveReviewTarget(cwd, {}),
- /Unable to detect the repository default branch\. Pass --base ][ or use --scope working-tree\./
- );
-});
-
-test("collectReviewContext keeps inline diffs for tiny adversarial reviews", () => {
- const cwd = makeTempDir();
- initGitRepo(cwd);
- fs.writeFileSync(path.join(cwd, "app.js"), "console.log('v1');\n");
- run("git", ["add", "app.js"], { cwd });
- run("git", ["commit", "-m", "init"], { cwd });
- fs.writeFileSync(path.join(cwd, "app.js"), "console.log('INLINE_MARKER');\n");
-
- const target = resolveReviewTarget(cwd, {});
- const context = collectReviewContext(cwd, target);
-
- assert.equal(context.inputMode, "inline-diff");
- assert.equal(context.fileCount, 1);
- assert.match(context.collectionGuidance, /primary evidence/i);
- assert.match(context.content, /INLINE_MARKER/);
-});
-
-test("collectReviewContext skips untracked directories in working tree review", () => {
- const cwd = makeTempDir();
- initGitRepo(cwd);
- fs.writeFileSync(path.join(cwd, "app.js"), "console.log('v1');\n");
- run("git", ["add", "app.js"], { cwd });
- run("git", ["commit", "-m", "init"], { cwd });
-
- const nestedRepoDir = path.join(cwd, ".claude", "worktrees", "agent-test");
- fs.mkdirSync(nestedRepoDir, { recursive: true });
- initGitRepo(nestedRepoDir);
-
- const target = resolveReviewTarget(cwd, { scope: "working-tree" });
- const context = collectReviewContext(cwd, target);
-
- assert.match(context.content, /### \.claude\/worktrees\/agent-test\/\n\(skipped: directory\)/);
-});
-
-test("collectReviewContext skips broken untracked symlinks instead of crashing", () => {
- const cwd = makeTempDir();
- initGitRepo(cwd);
- fs.writeFileSync(path.join(cwd, "app.js"), "console.log('v1');\n");
- run("git", ["add", "app.js"], { cwd });
- run("git", ["commit", "-m", "init"], { cwd });
- fs.symlinkSync("missing-target", path.join(cwd, "broken-link"));
-
- const target = resolveReviewTarget(cwd, {});
- const context = collectReviewContext(cwd, target);
-
- assert.equal(target.mode, "working-tree");
- assert.match(context.content, /### broken-link/);
- assert.match(context.content, /skipped: broken symlink or unreadable file/i);
-});
-
-test("collectReviewContext falls back to lightweight context for larger adversarial reviews", () => {
- const cwd = makeTempDir();
- initGitRepo(cwd);
- for (const name of ["a.js", "b.js", "c.js"]) {
- fs.writeFileSync(path.join(cwd, name), `export const value = "${name}-v1";\n`);
- }
- run("git", ["add", "a.js", "b.js", "c.js"], { cwd });
- run("git", ["commit", "-m", "init"], { cwd });
- fs.writeFileSync(path.join(cwd, "a.js"), 'export const value = "SELF_COLLECT_MARKER_A";\n');
- fs.writeFileSync(path.join(cwd, "b.js"), 'export const value = "SELF_COLLECT_MARKER_B";\n');
- fs.writeFileSync(path.join(cwd, "c.js"), 'export const value = "SELF_COLLECT_MARKER_C";\n');
-
- const target = resolveReviewTarget(cwd, {});
- const context = collectReviewContext(cwd, target);
-
- assert.equal(context.inputMode, "self-collect");
- assert.equal(context.fileCount, 3);
- assert.match(context.collectionGuidance, /lightweight summary/i);
- assert.match(context.collectionGuidance, /read-only git commands/i);
- assert.doesNotMatch(context.content, /SELF_COLLECT_MARKER_[ABC]/);
- assert.match(context.content, /## Changed Files/);
-});
-
-test("collectReviewContext falls back to lightweight context for oversized single-file diffs", () => {
- const cwd = makeTempDir();
- initGitRepo(cwd);
- fs.writeFileSync(path.join(cwd, "app.js"), "export const value = 'v1';\n");
- run("git", ["add", "app.js"], { cwd });
- run("git", ["commit", "-m", "init"], { cwd });
- fs.writeFileSync(path.join(cwd, "app.js"), `export const value = '${"x".repeat(512)}';\n`);
-
- const target = resolveReviewTarget(cwd, {});
- const context = collectReviewContext(cwd, target, { maxInlineDiffBytes: 128 });
-
- assert.equal(context.fileCount, 1);
- assert.equal(context.inputMode, "self-collect");
- assert.ok(context.diffBytes > 128);
- assert.doesNotMatch(context.content, /xxx/);
- assert.match(context.content, /## Changed Files/);
-});
-
-test("collectReviewContext keeps untracked file content in lightweight working tree context", () => {
- const cwd = makeTempDir();
- initGitRepo(cwd);
- for (const name of ["a.js", "b.js"]) {
- fs.writeFileSync(path.join(cwd, name), `export const value = "${name}-v1";\n`);
- }
- run("git", ["add", "a.js", "b.js"], { cwd });
- run("git", ["commit", "-m", "init"], { cwd });
- fs.writeFileSync(path.join(cwd, "a.js"), 'export const value = "TRACKED_MARKER_A";\n');
- fs.writeFileSync(path.join(cwd, "b.js"), 'export const value = "TRACKED_MARKER_B";\n');
- fs.writeFileSync(path.join(cwd, "new-risk.js"), 'export const value = "UNTRACKED_RISK_MARKER";\n');
-
- const target = resolveReviewTarget(cwd, {});
- const context = collectReviewContext(cwd, target);
-
- assert.equal(context.inputMode, "self-collect");
- assert.equal(context.fileCount, 3);
- assert.doesNotMatch(context.content, /TRACKED_MARKER_[AB]/);
- assert.match(context.content, /## Untracked Files/);
- assert.match(context.content, /UNTRACKED_RISK_MARKER/);
-});
diff --git a/tests/helpers.mjs b/tests/helpers.mjs
deleted file mode 100644
index 945ae0e7..00000000
--- a/tests/helpers.mjs
+++ /dev/null
@@ -1,32 +0,0 @@
-import fs from "node:fs";
-import os from "node:os";
-import path from "node:path";
-import process from "node:process";
-import { spawnSync } from "node:child_process";
-
-export function makeTempDir(prefix = "codex-plugin-test-") {
- return fs.mkdtempSync(path.join(os.tmpdir(), prefix));
-}
-
-export function writeExecutable(filePath, source) {
- fs.writeFileSync(filePath, source, { encoding: "utf8", mode: 0o755 });
-}
-
-export function run(command, args, options = {}) {
- return spawnSync(command, args, {
- cwd: options.cwd,
- env: options.env,
- encoding: "utf8",
- input: options.input,
- shell: process.platform === "win32" && !path.isAbsolute(command),
- windowsHide: true
- });
-}
-
-export function initGitRepo(cwd) {
- run("git", ["init", "-b", "main"], { cwd });
- run("git", ["config", "user.name", "Codex Plugin Tests"], { cwd });
- run("git", ["config", "user.email", "tests@example.com"], { cwd });
- run("git", ["config", "commit.gpgsign", "false"], { cwd });
- run("git", ["config", "tag.gpgsign", "false"], { cwd });
-}
diff --git a/tests/process.test.mjs b/tests/process.test.mjs
deleted file mode 100644
index 80e0715b..00000000
--- a/tests/process.test.mjs
+++ /dev/null
@@ -1,55 +0,0 @@
-import test from "node:test";
-import assert from "node:assert/strict";
-
-import { terminateProcessTree } from "../plugins/codex/scripts/lib/process.mjs";
-
-test("terminateProcessTree uses taskkill on Windows", () => {
- let captured = null;
- const outcome = terminateProcessTree(1234, {
- platform: "win32",
- runCommandImpl(command, args) {
- captured = { command, args };
- return {
- command,
- args,
- status: 0,
- signal: null,
- stdout: "",
- stderr: "",
- error: null
- };
- },
- killImpl() {
- throw new Error("kill fallback should not run");
- }
- });
-
- assert.deepEqual(captured, {
- command: "taskkill",
- args: ["/PID", "1234", "/T", "/F"]
- });
- assert.equal(outcome.delivered, true);
- assert.equal(outcome.method, "taskkill");
-});
-
-test("terminateProcessTree treats missing Windows processes as already stopped", () => {
- const outcome = terminateProcessTree(1234, {
- platform: "win32",
- runCommandImpl(command, args) {
- return {
- command,
- args,
- status: 128,
- signal: null,
- stdout: "ERROR: The process \"1234\" not found.",
- stderr: "",
- error: null
- };
- }
- });
-
- assert.equal(outcome.attempted, true);
- assert.equal(outcome.method, "taskkill");
- assert.equal(outcome.result.status, 128);
- assert.match(outcome.result.stdout, /not found/i);
-});
diff --git a/tests/render.test.mjs b/tests/render.test.mjs
deleted file mode 100644
index ab68038e..00000000
--- a/tests/render.test.mjs
+++ /dev/null
@@ -1,59 +0,0 @@
-import test from "node:test";
-import assert from "node:assert/strict";
-
-import { renderReviewResult, renderStoredJobResult } from "../plugins/codex/scripts/lib/render.mjs";
-
-test("renderReviewResult degrades gracefully when JSON is missing required review fields", () => {
- const output = renderReviewResult(
- {
- parsed: {
- verdict: "approve",
- summary: "Looks fine."
- },
- rawOutput: JSON.stringify({
- verdict: "approve",
- summary: "Looks fine."
- }),
- parseError: null
- },
- {
- reviewLabel: "Adversarial Review",
- targetLabel: "working tree diff"
- }
- );
-
- assert.match(output, /Codex returned JSON with an unexpected review shape\./);
- assert.match(output, /Missing array `findings`\./);
- assert.match(output, /Raw final message:/);
-});
-
-test("renderStoredJobResult prefers rendered output for structured review jobs", () => {
- const output = renderStoredJobResult(
- {
- id: "review-123",
- status: "completed",
- title: "Codex Adversarial Review",
- jobClass: "review",
- threadId: "thr_123"
- },
- {
- threadId: "thr_123",
- rendered: "# Codex Adversarial Review\n\nTarget: working tree diff\nVerdict: needs-attention\n",
- result: {
- result: {
- verdict: "needs-attention",
- summary: "One issue.",
- findings: [],
- next_steps: []
- },
- rawOutput:
- '{"verdict":"needs-attention","summary":"One issue.","findings":[],"next_steps":[]}'
- }
- }
- );
-
- assert.match(output, /^# Codex Adversarial Review/);
- assert.doesNotMatch(output, /^\{/);
- assert.match(output, /Codex session ID: thr_123/);
- assert.match(output, /Resume in Codex: codex resume thr_123/);
-});
diff --git a/tests/runtime.test.mjs b/tests/runtime.test.mjs
deleted file mode 100644
index 90408372..00000000
--- a/tests/runtime.test.mjs
+++ /dev/null
@@ -1,2123 +0,0 @@
-import fs from "node:fs";
-import path from "node:path";
-import test from "node:test";
-import assert from "node:assert/strict";
-import { spawn } from "node:child_process";
-import { fileURLToPath } from "node:url";
-
-import { buildEnv, installFakeCodex } from "./fake-codex-fixture.mjs";
-import { initGitRepo, makeTempDir, run } from "./helpers.mjs";
-import { loadBrokerSession, saveBrokerSession } from "../plugins/codex/scripts/lib/broker-lifecycle.mjs";
-import { resolveStateDir } from "../plugins/codex/scripts/lib/state.mjs";
-
-const ROOT = path.resolve(path.dirname(fileURLToPath(import.meta.url)), "..");
-const PLUGIN_ROOT = path.join(ROOT, "plugins", "codex");
-const SCRIPT = path.join(PLUGIN_ROOT, "scripts", "codex-companion.mjs");
-const STOP_HOOK = path.join(PLUGIN_ROOT, "scripts", "stop-review-gate-hook.mjs");
-const SESSION_HOOK = path.join(PLUGIN_ROOT, "scripts", "session-lifecycle-hook.mjs");
-
-async function waitFor(predicate, { timeoutMs = 5000, intervalMs = 50 } = {}) {
- const start = Date.now();
- while (Date.now() - start < timeoutMs) {
- const value = await predicate();
- if (value) {
- return value;
- }
- await new Promise((resolve) => setTimeout(resolve, intervalMs));
- }
- throw new Error("Timed out waiting for condition.");
-}
-
-test("setup reports ready when fake codex is installed and authenticated", () => {
- const binDir = makeTempDir();
- installFakeCodex(binDir);
-
- const result = run("node", [SCRIPT, "setup", "--json"], {
- cwd: ROOT,
- env: buildEnv(binDir)
- });
-
- assert.equal(result.status, 0);
- const payload = JSON.parse(result.stdout);
- assert.equal(payload.ready, true);
- assert.match(payload.codex.detail, /advanced runtime available/);
- assert.equal(payload.sessionRuntime.mode, "direct");
-});
-
-test("setup is ready without npm when Codex is already installed and authenticated", () => {
- const binDir = makeTempDir();
- installFakeCodex(binDir);
- fs.symlinkSync(process.execPath, path.join(binDir, "node"));
-
- const result = run("node", [SCRIPT, "setup", "--json"], {
- cwd: ROOT,
- env: {
- ...process.env,
- PATH: binDir
- }
- });
-
- assert.equal(result.status, 0, result.stderr);
- const payload = JSON.parse(result.stdout);
- assert.equal(payload.ready, true);
- assert.equal(payload.npm.available, false);
- assert.equal(payload.codex.available, true);
- assert.equal(payload.auth.loggedIn, true);
-});
-
-test("setup trusts app-server API key auth even when login status alone would fail", () => {
- const binDir = makeTempDir();
- installFakeCodex(binDir, "api-key-account-only");
-
- const result = run("node", [SCRIPT, "setup", "--json"], {
- cwd: ROOT,
- env: buildEnv(binDir)
- });
-
- assert.equal(result.status, 0, result.stderr);
- const payload = JSON.parse(result.stdout);
- assert.equal(payload.ready, true);
- assert.equal(payload.auth.loggedIn, true);
- assert.equal(payload.auth.authMethod, "apiKey");
- assert.equal(payload.auth.source, "app-server");
- assert.match(payload.auth.detail, /API key configured \(unverified\)/);
-});
-
-test("setup is ready when the active provider does not require OpenAI login", () => {
- const binDir = makeTempDir();
- installFakeCodex(binDir, "provider-no-auth");
-
- const result = run("node", [SCRIPT, "setup", "--json"], {
- cwd: ROOT,
- env: buildEnv(binDir)
- });
-
- assert.equal(result.status, 0, result.stderr);
- const payload = JSON.parse(result.stdout);
- assert.equal(payload.ready, true);
- assert.equal(payload.auth.loggedIn, true);
- assert.equal(payload.auth.authMethod, null);
- assert.equal(payload.auth.source, "app-server");
- assert.match(payload.auth.detail, /configured and does not require OpenAI authentication/i);
-});
-
-test("setup treats custom providers with app-server-ready config as ready", () => {
- const binDir = makeTempDir();
- installFakeCodex(binDir, "env-key-provider");
-
- const result = run("node", [SCRIPT, "setup", "--json"], {
- cwd: ROOT,
- env: buildEnv(binDir)
- });
-
- assert.equal(result.status, 0, result.stderr);
- const payload = JSON.parse(result.stdout);
- assert.equal(payload.ready, true);
- assert.equal(payload.auth.loggedIn, true);
- assert.equal(payload.auth.authMethod, null);
- assert.equal(payload.auth.source, "app-server");
- assert.match(payload.auth.detail, /configured and does not require OpenAI authentication/i);
-});
-
-test("setup reports not ready when app-server config read fails", () => {
- const binDir = makeTempDir();
- installFakeCodex(binDir, "config-read-fails");
-
- const result = run("node", [SCRIPT, "setup", "--json"], {
- cwd: ROOT,
- env: buildEnv(binDir)
- });
-
- assert.equal(result.status, 0, result.stderr);
- const payload = JSON.parse(result.stdout);
- assert.equal(payload.ready, false);
- assert.equal(payload.auth.loggedIn, false);
- assert.equal(payload.auth.source, "app-server");
- assert.match(payload.auth.detail, /config\/read failed for cwd/);
-});
-
-test("review renders a no-findings result from app-server review/start", () => {
- const repo = makeTempDir();
- const binDir = makeTempDir();
- installFakeCodex(binDir);
- initGitRepo(repo);
- fs.mkdirSync(path.join(repo, "src"));
- fs.writeFileSync(path.join(repo, "src", "app.js"), "export const value = 1;\n");
- run("git", ["add", "src/app.js"], { cwd: repo });
- run("git", ["commit", "-m", "init"], { cwd: repo });
- fs.writeFileSync(path.join(repo, "src", "app.js"), "export const value = 2;\n");
-
- const result = run("node", [SCRIPT, "review"], {
- cwd: repo,
- env: buildEnv(binDir)
- });
-
- assert.equal(result.status, 0);
- assert.match(result.stdout, /Reviewed uncommitted changes/);
- assert.match(result.stdout, /No material issues found/);
-});
-
-test("task runs when the active provider does not require OpenAI login", () => {
- const repo = makeTempDir();
- const binDir = makeTempDir();
- installFakeCodex(binDir, "provider-no-auth");
- initGitRepo(repo);
- fs.writeFileSync(path.join(repo, "README.md"), "hello\n");
- run("git", ["add", "README.md"], { cwd: repo });
- run("git", ["commit", "-m", "init"], { cwd: repo });
-
- const result = run("node", [SCRIPT, "task", "check auth preflight"], {
- cwd: repo,
- env: buildEnv(binDir)
- });
-
- assert.equal(result.status, 0, result.stderr);
- assert.match(result.stdout, /Handled the requested task/);
-});
-
-test("task runs without auth preflight so Codex can refresh an expired session", () => {
- const repo = makeTempDir();
- const binDir = makeTempDir();
- installFakeCodex(binDir, "refreshable-auth");
- initGitRepo(repo);
- fs.writeFileSync(path.join(repo, "README.md"), "hello\n");
- run("git", ["add", "README.md"], { cwd: repo });
- run("git", ["commit", "-m", "init"], { cwd: repo });
-
- const result = run("node", [SCRIPT, "task", "check refreshable auth"], {
- cwd: repo,
- env: buildEnv(binDir)
- });
-
- assert.equal(result.status, 0, result.stderr);
- assert.match(result.stdout, /Handled the requested task/);
-});
-
-test("task reports the actual Codex auth error when the run is rejected", () => {
- const repo = makeTempDir();
- const binDir = makeTempDir();
- installFakeCodex(binDir, "auth-run-fails");
- initGitRepo(repo);
- fs.writeFileSync(path.join(repo, "README.md"), "hello\n");
- run("git", ["add", "README.md"], { cwd: repo });
- run("git", ["commit", "-m", "init"], { cwd: repo });
-
- const result = run("node", [SCRIPT, "task", "check failed auth"], {
- cwd: repo,
- env: buildEnv(binDir)
- });
-
- assert.notEqual(result.status, 0);
- assert.match(result.stderr, /authentication expired; run codex login/);
-});
-
-test("review accepts the quoted raw argument style for built-in base-branch review", () => {
- const repo = makeTempDir();
- const binDir = makeTempDir();
- installFakeCodex(binDir);
- initGitRepo(repo);
- fs.mkdirSync(path.join(repo, "src"));
- fs.writeFileSync(path.join(repo, "src", "app.js"), "export const value = 1;\n");
- run("git", ["add", "src/app.js"], { cwd: repo });
- run("git", ["commit", "-m", "init"], { cwd: repo });
- fs.writeFileSync(path.join(repo, "src", "app.js"), "export const value = 2;\n");
-
- const result = run("node", [SCRIPT, "review", "--base main"], {
- cwd: repo,
- env: buildEnv(binDir)
- });
-
- assert.equal(result.status, 0);
- assert.match(result.stdout, /Reviewed changes against main/);
- assert.match(result.stdout, /No material issues found/);
-});
-
-test("adversarial review renders structured findings over app-server turn/start", () => {
- const repo = makeTempDir();
- const binDir = makeTempDir();
- installFakeCodex(binDir);
- initGitRepo(repo);
- fs.mkdirSync(path.join(repo, "src"));
- fs.writeFileSync(path.join(repo, "src", "app.js"), "export const value = items[0];\n");
- run("git", ["add", "src/app.js"], { cwd: repo });
- run("git", ["commit", "-m", "init"], { cwd: repo });
- fs.writeFileSync(path.join(repo, "src", "app.js"), "export const value = items[0].id;\n");
-
- const result = run("node", [SCRIPT, "adversarial-review"], {
- cwd: repo,
- env: buildEnv(binDir)
- });
-
- assert.equal(result.status, 0);
- assert.match(result.stdout, /Missing empty-state guard/);
-});
-
-test("adversarial review accepts the same base-branch targeting as review", () => {
- const repo = makeTempDir();
- const binDir = makeTempDir();
- installFakeCodex(binDir);
- initGitRepo(repo);
- fs.mkdirSync(path.join(repo, "src"));
- fs.writeFileSync(path.join(repo, "src", "app.js"), "export const value = items[0];\n");
- run("git", ["add", "src/app.js"], { cwd: repo });
- run("git", ["commit", "-m", "init"], { cwd: repo });
- fs.writeFileSync(path.join(repo, "src", "app.js"), "export const value = items[0].id;\n");
-
- const result = run("node", [SCRIPT, "adversarial-review", "--base", "main"], {
- cwd: repo,
- env: buildEnv(binDir)
- });
-
- assert.equal(result.status, 0, result.stderr);
- assert.match(result.stdout, /Branch review against main|against main/i);
- assert.match(result.stdout, /Missing empty-state guard/);
-});
-
-test("adversarial review asks Codex to inspect larger diffs itself", () => {
- const repo = makeTempDir();
- const binDir = makeTempDir();
- installFakeCodex(binDir);
- initGitRepo(repo);
- fs.mkdirSync(path.join(repo, "src"));
- for (const name of ["a.js", "b.js", "c.js"]) {
- fs.writeFileSync(path.join(repo, "src", name), `export const value = "${name}-v1";\n`);
- }
- run("git", ["add", "src/a.js", "src/b.js", "src/c.js"], { cwd: repo });
- run("git", ["commit", "-m", "init"], { cwd: repo });
- fs.writeFileSync(path.join(repo, "src", "a.js"), 'export const value = "PROMPT_SELF_COLLECT_A";\n');
- fs.writeFileSync(path.join(repo, "src", "b.js"), 'export const value = "PROMPT_SELF_COLLECT_B";\n');
- fs.writeFileSync(path.join(repo, "src", "c.js"), 'export const value = "PROMPT_SELF_COLLECT_C";\n');
-
- const result = run("node", [SCRIPT, "adversarial-review"], {
- cwd: repo,
- env: buildEnv(binDir)
- });
-
- assert.equal(result.status, 0, result.stderr);
- const state = JSON.parse(fs.readFileSync(path.join(binDir, "fake-codex-state.json"), "utf8"));
- assert.match(state.lastTurnStart.prompt, /lightweight summary/i);
- assert.match(state.lastTurnStart.prompt, /read-only git commands/i);
- assert.doesNotMatch(state.lastTurnStart.prompt, /PROMPT_SELF_COLLECT_[ABC]/);
-});
-
-test("review includes reasoning output when the app server returns it", () => {
- const repo = makeTempDir();
- const binDir = makeTempDir();
- installFakeCodex(binDir, "with-reasoning");
- initGitRepo(repo);
- fs.writeFileSync(path.join(repo, "README.md"), "hello\n");
- run("git", ["add", "README.md"], { cwd: repo });
- run("git", ["commit", "-m", "init"], { cwd: repo });
- fs.writeFileSync(path.join(repo, "README.md"), "hello again\n");
-
- const result = run("node", [SCRIPT, "review"], {
- cwd: repo,
- env: buildEnv(binDir)
- });
-
- assert.equal(result.status, 0, result.stderr);
- assert.match(result.stdout, /Reasoning:/);
- assert.match(result.stdout, /Reviewed the changed files and checked the likely regression paths first|Reviewed the changed files and checked the likely regression paths/i);
-});
-
-test("review logs reasoning summaries and review output to the job log", () => {
- const repo = makeTempDir();
- const binDir = makeTempDir();
- installFakeCodex(binDir, "with-reasoning");
- initGitRepo(repo);
- fs.writeFileSync(path.join(repo, "README.md"), "hello\n");
- run("git", ["add", "README.md"], { cwd: repo });
- run("git", ["commit", "-m", "init"], { cwd: repo });
- fs.writeFileSync(path.join(repo, "README.md"), "hello again\n");
-
- const result = run("node", [SCRIPT, "review"], {
- cwd: repo,
- env: buildEnv(binDir)
- });
-
- assert.equal(result.status, 0, result.stderr);
- const stateDir = resolveStateDir(repo);
- const state = JSON.parse(fs.readFileSync(path.join(stateDir, "state.json"), "utf8"));
- const log = fs.readFileSync(state.jobs[0].logFile, "utf8");
- assert.match(log, /Reasoning summary/);
- assert.match(log, /Reviewed the changed files and checked the likely regression paths/);
- assert.match(log, /Review output/);
- assert.match(log, /Reviewed uncommitted changes\./);
-});
-
-test("task --resume-last resumes the latest persisted task thread", () => {
- const repo = makeTempDir();
- const binDir = makeTempDir();
- installFakeCodex(binDir);
- initGitRepo(repo);
- fs.writeFileSync(path.join(repo, "README.md"), "hello\n");
- run("git", ["add", "README.md"], { cwd: repo });
- run("git", ["commit", "-m", "init"], { cwd: repo });
-
- const firstRun = run("node", [SCRIPT, "task", "initial task"], {
- cwd: repo,
- env: buildEnv(binDir)
- });
- assert.equal(firstRun.status, 0, firstRun.stderr);
-
- const result = run("node", [SCRIPT, "task", "--resume-last", "follow up"], {
- cwd: repo,
- env: buildEnv(binDir)
- });
-
- assert.equal(result.status, 0, result.stderr);
- assert.equal(result.stdout, "Resumed the prior run.\nFollow-up prompt accepted.\n");
-});
-
-test("task-resume-candidate returns the latest rescue thread from the current session", () => {
- const workspace = makeTempDir();
- const stateDir = resolveStateDir(workspace);
- const jobsDir = path.join(stateDir, "jobs");
- fs.mkdirSync(jobsDir, { recursive: true });
-
- fs.writeFileSync(
- path.join(stateDir, "state.json"),
- `${JSON.stringify(
- {
- version: 1,
- config: { stopReviewGate: false },
- jobs: [
- {
- id: "task-current",
- status: "completed",
- title: "Codex Task",
- jobClass: "task",
- sessionId: "sess-current",
- threadId: "thr_current",
- summary: "Investigate the flaky test",
- updatedAt: "2026-03-24T20:00:00.000Z"
- },
- {
- id: "task-other-session",
- status: "completed",
- title: "Codex Task",
- jobClass: "task",
- sessionId: "sess-other",
- threadId: "thr_other",
- summary: "Old rescue run",
- updatedAt: "2026-03-24T20:05:00.000Z"
- },
- {
- id: "review-current",
- status: "completed",
- title: "Codex Review",
- jobClass: "review",
- sessionId: "sess-current",
- threadId: "thr_review",
- summary: "Review main...HEAD",
- updatedAt: "2026-03-24T20:10:00.000Z"
- }
- ]
- },
- null,
- 2
- )}\n`,
- "utf8"
- );
-
- const result = run("node", [SCRIPT, "task-resume-candidate", "--json"], {
- cwd: workspace,
- env: {
- ...process.env,
- CODEX_COMPANION_SESSION_ID: "sess-current"
- }
- });
-
- assert.equal(result.status, 0, result.stderr);
- const payload = JSON.parse(result.stdout);
- assert.equal(payload.available, true);
- assert.equal(payload.sessionId, "sess-current");
- assert.equal(payload.candidate.id, "task-current");
- assert.equal(payload.candidate.threadId, "thr_current");
-});
-
-test("task --resume-last does not resume a task from another Claude session", () => {
- const repo = makeTempDir();
- const binDir = makeTempDir();
- const statePath = path.join(binDir, "fake-codex-state.json");
- installFakeCodex(binDir);
- initGitRepo(repo);
- fs.writeFileSync(path.join(repo, "README.md"), "hello\n");
- run("git", ["add", "README.md"], { cwd: repo });
- run("git", ["commit", "-m", "init"], { cwd: repo });
-
- const otherEnv = {
- ...buildEnv(binDir),
- CODEX_COMPANION_SESSION_ID: "sess-other"
- };
- const currentEnv = {
- ...buildEnv(binDir),
- CODEX_COMPANION_SESSION_ID: "sess-current"
- };
-
- const firstRun = run("node", [SCRIPT, "task", "initial task"], {
- cwd: repo,
- env: otherEnv
- });
- assert.equal(firstRun.status, 0, firstRun.stderr);
-
- const candidate = run("node", [SCRIPT, "task-resume-candidate", "--json"], {
- cwd: repo,
- env: currentEnv
- });
- assert.equal(candidate.status, 0, candidate.stderr);
- assert.equal(JSON.parse(candidate.stdout).available, false);
-
- const resume = run("node", [SCRIPT, "task", "--resume-last", "follow up"], {
- cwd: repo,
- env: currentEnv
- });
- assert.equal(resume.status, 1);
- assert.match(resume.stderr, /No previous Codex task thread was found for this repository\./);
-
- const fakeState = JSON.parse(fs.readFileSync(statePath, "utf8"));
- assert.equal(fakeState.lastTurnStart.threadId, "thr_1");
- assert.equal(fakeState.lastTurnStart.prompt, "initial task");
-});
-
-test("task --resume-last ignores running tasks from other Claude sessions", () => {
- const repo = makeTempDir();
- const binDir = makeTempDir();
- installFakeCodex(binDir);
- initGitRepo(repo);
- fs.writeFileSync(path.join(repo, "README.md"), "hello\n");
- run("git", ["add", "README.md"], { cwd: repo });
- run("git", ["commit", "-m", "init"], { cwd: repo });
-
- const stateDir = resolveStateDir(repo);
- fs.mkdirSync(path.join(stateDir, "jobs"), { recursive: true });
- fs.writeFileSync(
- path.join(stateDir, "state.json"),
- `${JSON.stringify(
- {
- version: 1,
- config: { stopReviewGate: false },
- jobs: [
- {
- id: "task-other-running",
- status: "running",
- title: "Codex Task",
- jobClass: "task",
- sessionId: "sess-other",
- threadId: "thr_other",
- summary: "Other session active task",
- updatedAt: "2026-03-24T20:05:00.000Z"
- }
- ]
- },
- null,
- 2
- )}\n`,
- "utf8"
- );
-
- const env = {
- ...buildEnv(binDir),
- CODEX_COMPANION_SESSION_ID: "sess-current"
- };
- const status = run("node", [SCRIPT, "status", "--json"], {
- cwd: repo,
- env
- });
- assert.equal(status.status, 0, status.stderr);
- assert.deepEqual(JSON.parse(status.stdout).running, []);
-
- const resume = run("node", [SCRIPT, "task", "--resume-last", "follow up"], {
- cwd: repo,
- env
- });
- assert.equal(resume.status, 1);
- assert.match(resume.stderr, /No previous Codex task thread was found for this repository\./);
-});
-
-test("session start hook exports the Claude session id and plugin data dir for later commands", () => {
- const repo = makeTempDir();
- const envFile = path.join(makeTempDir(), "claude-env.sh");
- fs.writeFileSync(envFile, "", "utf8");
- const pluginDataDir = makeTempDir();
-
- const result = run("node", [SESSION_HOOK, "SessionStart"], {
- cwd: repo,
- env: {
- ...process.env,
- CLAUDE_ENV_FILE: envFile,
- CLAUDE_PLUGIN_DATA: pluginDataDir
- },
- input: JSON.stringify({
- hook_event_name: "SessionStart",
- session_id: "sess-current",
- cwd: repo
- })
- });
-
- assert.equal(result.status, 0, result.stderr);
- assert.equal(
- fs.readFileSync(envFile, "utf8"),
- `export CODEX_COMPANION_SESSION_ID='sess-current'\nexport CLAUDE_PLUGIN_DATA='${pluginDataDir}'\n`
- );
-});
-
-test("write task output focuses on the Codex result without generic follow-up hints", () => {
- const repo = makeTempDir();
- const binDir = makeTempDir();
- installFakeCodex(binDir);
- initGitRepo(repo);
- fs.writeFileSync(path.join(repo, "README.md"), "hello\n");
- run("git", ["add", "README.md"], { cwd: repo });
- run("git", ["commit", "-m", "init"], { cwd: repo });
-
- const result = run("node", [SCRIPT, "task", "--write", "fix the failing test"], {
- cwd: repo,
- env: buildEnv(binDir)
- });
-
- assert.equal(result.status, 0, result.stderr);
- assert.equal(result.stdout, "Handled the requested task.\nTask prompt accepted.\n");
-});
-
-test("task --resume acts like --resume-last without leaking the flag into the prompt", () => {
- const repo = makeTempDir();
- const binDir = makeTempDir();
- const statePath = path.join(binDir, "fake-codex-state.json");
- installFakeCodex(binDir);
- initGitRepo(repo);
- fs.writeFileSync(path.join(repo, "README.md"), "hello\n");
- run("git", ["add", "README.md"], { cwd: repo });
- run("git", ["commit", "-m", "init"], { cwd: repo });
-
- const firstRun = run("node", [SCRIPT, "task", "initial task"], {
- cwd: repo,
- env: buildEnv(binDir)
- });
- assert.equal(firstRun.status, 0, firstRun.stderr);
-
- const result = run("node", [SCRIPT, "task", "--resume", "follow up"], {
- cwd: repo,
- env: buildEnv(binDir)
- });
-
- assert.equal(result.status, 0, result.stderr);
- const fakeState = JSON.parse(fs.readFileSync(statePath, "utf8"));
- assert.equal(fakeState.lastTurnStart.threadId, "thr_1");
- assert.equal(fakeState.lastTurnStart.prompt, "follow up");
-});
-
-test("task --fresh is treated as routing control and does not leak into the prompt", () => {
- const repo = makeTempDir();
- const binDir = makeTempDir();
- const statePath = path.join(binDir, "fake-codex-state.json");
- installFakeCodex(binDir);
- initGitRepo(repo);
- fs.writeFileSync(path.join(repo, "README.md"), "hello\n");
- run("git", ["add", "README.md"], { cwd: repo });
- run("git", ["commit", "-m", "init"], { cwd: repo });
-
- const result = run("node", [SCRIPT, "task", "--fresh", "diagnose the flaky test"], {
- cwd: repo,
- env: buildEnv(binDir)
- });
-
- assert.equal(result.status, 0, result.stderr);
- const fakeState = JSON.parse(fs.readFileSync(statePath, "utf8"));
- assert.equal(fakeState.lastTurnStart.prompt, "diagnose the flaky test");
-});
-
-test("task forwards model selection and reasoning effort to app-server turn/start", () => {
- const repo = makeTempDir();
- const binDir = makeTempDir();
- const statePath = path.join(binDir, "fake-codex-state.json");
- installFakeCodex(binDir);
- initGitRepo(repo);
- fs.writeFileSync(path.join(repo, "README.md"), "hello\n");
- run("git", ["add", "README.md"], { cwd: repo });
- run("git", ["commit", "-m", "init"], { cwd: repo });
-
- const result = run("node", [SCRIPT, "task", "--model", "spark", "--effort", "low", "diagnose the failing test"], {
- cwd: repo,
- env: buildEnv(binDir)
- });
-
- assert.equal(result.status, 0, result.stderr);
- const fakeState = JSON.parse(fs.readFileSync(statePath, "utf8"));
- assert.equal(fakeState.lastTurnStart.model, "gpt-5.3-codex-spark");
- assert.equal(fakeState.lastTurnStart.effort, "low");
-});
-
-test("task logs reasoning summaries and assistant messages to the job log", () => {
- const repo = makeTempDir();
- const binDir = makeTempDir();
- installFakeCodex(binDir, "with-reasoning");
- initGitRepo(repo);
- fs.writeFileSync(path.join(repo, "README.md"), "hello\n");
- run("git", ["add", "README.md"], { cwd: repo });
- run("git", ["commit", "-m", "init"], { cwd: repo });
-
- const result = run("node", [SCRIPT, "task", "investigate the failing test"], {
- cwd: repo,
- env: buildEnv(binDir)
- });
-
- assert.equal(result.status, 0, result.stderr);
- const stateDir = resolveStateDir(repo);
- const state = JSON.parse(fs.readFileSync(path.join(stateDir, "state.json"), "utf8"));
- const log = fs.readFileSync(state.jobs[0].logFile, "utf8");
- assert.match(log, /Reasoning summary/);
- assert.match(log, /Inspected the prompt, gathered evidence, and checked the highest-risk paths first/);
- assert.match(log, /Assistant message/);
- assert.match(log, /Handled the requested task/);
-});
-
-test("task logs subagent reasoning and messages with a subagent prefix", () => {
- const repo = makeTempDir();
- const binDir = makeTempDir();
- installFakeCodex(binDir, "with-subagent");
- initGitRepo(repo);
- fs.writeFileSync(path.join(repo, "README.md"), "hello\n");
- run("git", ["add", "README.md"], { cwd: repo });
- run("git", ["commit", "-m", "init"], { cwd: repo });
-
- const result = run("node", [SCRIPT, "task", "challenge the current design"], {
- cwd: repo,
- env: buildEnv(binDir)
- });
-
- assert.equal(result.status, 0, result.stderr);
- const stateDir = resolveStateDir(repo);
- const state = JSON.parse(fs.readFileSync(path.join(stateDir, "state.json"), "utf8"));
- const log = fs.readFileSync(state.jobs[0].logFile, "utf8");
- assert.match(log, /Starting subagent design-challenger via collaboration tool: wait\./);
- assert.match(log, /Subagent design-challenger reasoning:/);
- assert.match(log, /Questioned the retry strategy and the cache invalidation boundaries\./);
- assert.match(log, /Subagent design-challenger:/);
- assert.match(
- log,
- /The design assumes retries are harmless, but they can duplicate side effects without stronger idempotency guarantees\./
- );
-});
-
-test("task waits for the main thread to complete before returning the final result", () => {
- const repo = makeTempDir();
- const binDir = makeTempDir();
- installFakeCodex(binDir, "with-subagent");
- initGitRepo(repo);
- fs.writeFileSync(path.join(repo, "README.md"), "hello\n");
- run("git", ["add", "README.md"], { cwd: repo });
- run("git", ["commit", "-m", "init"], { cwd: repo });
-
- const result = run("node", [SCRIPT, "task", "challenge the current design"], {
- cwd: repo,
- env: buildEnv(binDir)
- });
-
- assert.equal(result.status, 0, result.stderr);
- assert.equal(result.stdout, "Handled the requested task.\nTask prompt accepted.\n");
-});
-
-test("task ignores later subagent messages when choosing the final returned output", () => {
- const repo = makeTempDir();
- const binDir = makeTempDir();
- installFakeCodex(binDir, "with-late-subagent-message");
- initGitRepo(repo);
- fs.writeFileSync(path.join(repo, "README.md"), "hello\n");
- run("git", ["add", "README.md"], { cwd: repo });
- run("git", ["commit", "-m", "init"], { cwd: repo });
-
- const result = run("node", [SCRIPT, "task", "challenge the current design"], {
- cwd: repo,
- env: buildEnv(binDir)
- });
-
- assert.equal(result.status, 0, result.stderr);
- assert.equal(result.stdout, "Handled the requested task.\nTask prompt accepted.\n");
-});
-
-test("task can finish after subagent work even if the parent turn/completed event is missing", () => {
- const repo = makeTempDir();
- const binDir = makeTempDir();
- installFakeCodex(binDir, "with-subagent-no-main-turn-completed");
- initGitRepo(repo);
- fs.writeFileSync(path.join(repo, "README.md"), "hello\n");
- run("git", ["add", "README.md"], { cwd: repo });
- run("git", ["commit", "-m", "init"], { cwd: repo });
-
- const result = run("node", [SCRIPT, "task", "challenge the current design"], {
- cwd: repo,
- env: buildEnv(binDir)
- });
-
- assert.equal(result.status, 0, result.stderr);
- assert.equal(result.stdout, "Handled the requested task.\nTask prompt accepted.\n");
-});
-
-test("task using the shared broker still completes when Codex spawns subagents", () => {
- const repo = makeTempDir();
- const binDir = makeTempDir();
- installFakeCodex(binDir, "with-subagent");
- initGitRepo(repo);
- fs.writeFileSync(path.join(repo, "README.md"), "hello\n");
- run("git", ["add", "README.md"], { cwd: repo });
- run("git", ["commit", "-m", "init"], { cwd: repo });
- fs.writeFileSync(path.join(repo, "README.md"), "hello again\n");
-
- const env = buildEnv(binDir);
- const review = run("node", [SCRIPT, "review"], {
- cwd: repo,
- env
- });
- assert.equal(review.status, 0, review.stderr);
-
- if (!loadBrokerSession(repo)) {
- return;
- }
-
- const result = run("node", [SCRIPT, "task", "challenge the current design"], {
- cwd: repo,
- env
- });
-
- assert.equal(result.status, 0, result.stderr);
- assert.equal(result.stdout, "Handled the requested task.\nTask prompt accepted.\n");
-});
-
-test("task --background enqueues a detached worker and exposes per-job status", async () => {
- const repo = makeTempDir();
- const binDir = makeTempDir();
- installFakeCodex(binDir, "slow-task");
- initGitRepo(repo);
- fs.writeFileSync(path.join(repo, "README.md"), "hello\n");
- run("git", ["add", "README.md"], { cwd: repo });
- run("git", ["commit", "-m", "init"], { cwd: repo });
-
- const launched = run("node", [SCRIPT, "task", "--background", "--json", "investigate the failing test"], {
- cwd: repo,
- env: buildEnv(binDir)
- });
-
- assert.equal(launched.status, 0, launched.stderr);
- const launchPayload = JSON.parse(launched.stdout);
- assert.equal(launchPayload.status, "queued");
- assert.match(launchPayload.jobId, /^task-/);
-
- const waitedStatus = run(
- "node",
- [SCRIPT, "status", launchPayload.jobId, "--wait", "--timeout-ms", "15000", "--json"],
- {
- cwd: repo,
- env: buildEnv(binDir)
- }
- );
-
- assert.equal(waitedStatus.status, 0, waitedStatus.stderr);
- const waitedPayload = JSON.parse(waitedStatus.stdout);
- assert.equal(waitedPayload.job.id, launchPayload.jobId);
- assert.equal(waitedPayload.job.status, "completed");
-
- const resultPayload = await waitFor(() => {
- const result = run("node", [SCRIPT, "result", launchPayload.jobId, "--json"], {
- cwd: repo,
- env: buildEnv(binDir)
- });
- if (result.status !== 0) {
- return null;
- }
- return JSON.parse(result.stdout);
- });
-
- assert.equal(resultPayload.job.id, launchPayload.jobId);
- assert.equal(resultPayload.job.status, "completed");
- assert.match(resultPayload.storedJob.rendered, /Handled the requested task/);
-});
-
-test("review rejects focus text because it is native-review only", () => {
- const repo = makeTempDir();
- const binDir = makeTempDir();
- installFakeCodex(binDir);
- initGitRepo(repo);
- fs.writeFileSync(path.join(repo, "README.md"), "hello\n");
- run("git", ["add", "README.md"], { cwd: repo });
- run("git", ["commit", "-m", "init"], { cwd: repo });
- fs.writeFileSync(path.join(repo, "README.md"), "hello again\n");
-
- const result = run("node", [SCRIPT, "review", "--scope working-tree focus on auth"], {
- cwd: repo,
- env: buildEnv(binDir)
- });
-
- assert.equal(result.status > 0, true);
- assert.match(result.stderr, /does not support custom focus text/i);
- assert.match(result.stderr, /\/codex:adversarial-review focus on auth/i);
-});
-
-test("review rejects staged-only scope because it is native-review only", () => {
- const repo = makeTempDir();
- const binDir = makeTempDir();
- installFakeCodex(binDir);
- initGitRepo(repo);
- fs.writeFileSync(path.join(repo, "README.md"), "hello\n");
- run("git", ["add", "README.md"], { cwd: repo });
- run("git", ["commit", "-m", "init"], { cwd: repo });
- fs.writeFileSync(path.join(repo, "README.md"), "hello again\n");
- run("git", ["add", "README.md"], { cwd: repo });
-
- const result = run("node", [SCRIPT, "review", "--scope", "staged"], {
- cwd: repo,
- env: buildEnv(binDir)
- });
-
- assert.equal(result.status > 0, true);
- assert.match(result.stderr, /Unsupported review scope "staged"/i);
- assert.match(result.stderr, /Use one of: auto, working-tree, branch, or pass --base ][/i);
-});
-
-test("adversarial review rejects staged-only scope to match review target selection", () => {
- const repo = makeTempDir();
- const binDir = makeTempDir();
- installFakeCodex(binDir);
- initGitRepo(repo);
- fs.writeFileSync(path.join(repo, "README.md"), "hello\n");
- run("git", ["add", "README.md"], { cwd: repo });
- run("git", ["commit", "-m", "init"], { cwd: repo });
- fs.writeFileSync(path.join(repo, "README.md"), "hello again\n");
- run("git", ["add", "README.md"], { cwd: repo });
-
- const result = run("node", [SCRIPT, "adversarial-review", "--scope", "staged"], {
- cwd: repo,
- env: buildEnv(binDir)
- });
-
- assert.equal(result.status > 0, true);
- assert.match(result.stderr, /Unsupported review scope "staged"/i);
- assert.match(result.stderr, /Use one of: auto, working-tree, branch, or pass --base ][/i);
-});
-
-test("review accepts --background while still running as a tracked review job", () => {
- const repo = makeTempDir();
- const binDir = makeTempDir();
- installFakeCodex(binDir);
- initGitRepo(repo);
- fs.writeFileSync(path.join(repo, "README.md"), "hello\n");
- run("git", ["add", "README.md"], { cwd: repo });
- run("git", ["commit", "-m", "init"], { cwd: repo });
- fs.writeFileSync(path.join(repo, "README.md"), "hello again\n");
-
- const launched = run("node", [SCRIPT, "review", "--background", "--json"], {
- cwd: repo,
- env: buildEnv(binDir)
- });
-
- assert.equal(launched.status, 0, launched.stderr);
- const launchPayload = JSON.parse(launched.stdout);
- assert.equal(launchPayload.review, "Review");
- assert.match(launchPayload.codex.stdout, /No material issues found/);
-
- const status = run("node", [SCRIPT, "status"], {
- cwd: repo,
- env: buildEnv(binDir)
- });
-
- assert.equal(status.status, 0, status.stderr);
- assert.match(status.stdout, /# Codex Status/);
- assert.match(status.stdout, /Codex Review/);
- assert.match(status.stdout, /completed/);
-});
-
-test("status shows phases, hints, and the latest finished job", () => {
- const workspace = makeTempDir();
- const stateDir = resolveStateDir(workspace);
- const jobsDir = path.join(stateDir, "jobs");
- fs.mkdirSync(jobsDir, { recursive: true });
-
- const logFile = path.join(jobsDir, "review-live.log");
- fs.writeFileSync(
- logFile,
- [
- "[2026-03-18T15:30:00.000Z] Starting Codex Review.",
- "[2026-03-18T15:30:01.000Z] Thread ready (thr_1).",
- "[2026-03-18T15:30:02.000Z] Turn started (turn_1).",
- "[2026-03-18T15:30:03.000Z] Reviewer started: current changes"
- ].join("\n"),
- "utf8"
- );
-
- const finishedJobFile = path.join(jobsDir, "review-done.json");
- fs.writeFileSync(
- finishedJobFile,
- JSON.stringify(
- {
- id: "review-done",
- status: "completed",
- title: "Codex Review",
- rendered: "# Codex Review\n\nReviewed uncommitted changes.\nNo material issues found.\n"
- },
- null,
- 2
- ),
- "utf8"
- );
-
- fs.writeFileSync(
- path.join(stateDir, "state.json"),
- `${JSON.stringify(
- {
- version: 1,
- config: { stopReviewGate: false },
- jobs: [
- {
- id: "review-live",
- kind: "review",
- kindLabel: "review",
- status: "running",
- title: "Codex Review",
- jobClass: "review",
- phase: "reviewing",
- threadId: "thr_1",
- summary: "Review working tree diff",
- logFile,
- createdAt: "2026-03-18T15:30:00.000Z",
- updatedAt: "2026-03-18T15:30:03.000Z"
- },
- {
- id: "review-done",
- status: "completed",
- title: "Codex Review",
- jobClass: "review",
- threadId: "thr_done",
- summary: "Review main...HEAD",
- createdAt: "2026-03-18T15:10:00.000Z",
- startedAt: "2026-03-18T15:10:05.000Z",
- completedAt: "2026-03-18T15:11:10.000Z",
- updatedAt: "2026-03-18T15:11:10.000Z"
- }
- ]
- },
- null,
- 2
- )}\n`,
- "utf8"
- );
-
- const result = run("node", [SCRIPT, "status"], {
- cwd: workspace
- });
-
- assert.equal(result.status, 0, result.stderr);
- assert.match(result.stdout, /Active jobs:/);
- assert.match(result.stdout, /\| Job \| Kind \| Status \| Phase \| Elapsed \| Codex Session ID \| Summary \| Actions \|/);
- assert.match(result.stdout, /\| review-live \| review \| running \| reviewing \| .* \| thr_1 \| Review working tree diff \|/);
- assert.match(result.stdout, /`\/codex:status review-live`]
`\/codex:cancel review-live`/);
- assert.match(result.stdout, /Live details:/);
- assert.match(result.stdout, /Latest finished:/);
- assert.match(result.stdout, /Progress:/);
- assert.match(result.stdout, /Session runtime: direct startup/);
- assert.match(result.stdout, /Phase: reviewing/);
- assert.match(result.stdout, /Codex session ID: thr_1/);
- assert.match(result.stdout, /Resume in Codex: codex resume thr_1/);
- assert.match(result.stdout, /Thread ready \(thr_1\)\./);
- assert.match(result.stdout, /Reviewer started: current changes/);
- assert.match(result.stdout, /Duration: 1m 5s/);
- assert.match(result.stdout, /Codex session ID: thr_done/);
- assert.match(result.stdout, /Resume in Codex: codex resume thr_done/);
-});
-
-test("status without a job id only shows jobs from the current Claude session", () => {
- const workspace = makeTempDir();
- const stateDir = resolveStateDir(workspace);
- const jobsDir = path.join(stateDir, "jobs");
- fs.mkdirSync(jobsDir, { recursive: true });
-
- const currentLog = path.join(jobsDir, "review-current.log");
- const otherLog = path.join(jobsDir, "review-other.log");
- fs.writeFileSync(currentLog, "[2026-03-18T15:30:00.000Z] Reviewer started: current changes\n", "utf8");
- fs.writeFileSync(otherLog, "[2026-03-18T15:31:00.000Z] Reviewer started: old changes\n", "utf8");
-
- fs.writeFileSync(
- path.join(stateDir, "state.json"),
- `${JSON.stringify(
- {
- version: 1,
- config: { stopReviewGate: false },
- jobs: [
- {
- id: "review-current",
- kind: "review",
- kindLabel: "review",
- status: "running",
- title: "Codex Review",
- jobClass: "review",
- phase: "reviewing",
- sessionId: "sess-current",
- threadId: "thr_current",
- summary: "Current session review",
- logFile: currentLog,
- createdAt: "2026-03-18T15:30:00.000Z",
- updatedAt: "2026-03-18T15:30:00.000Z"
- },
- {
- id: "review-other",
- kind: "review",
- kindLabel: "review",
- status: "completed",
- title: "Codex Review",
- jobClass: "review",
- sessionId: "sess-other",
- threadId: "thr_other",
- summary: "Previous session review",
- createdAt: "2026-03-18T15:20:00.000Z",
- startedAt: "2026-03-18T15:20:05.000Z",
- completedAt: "2026-03-18T15:21:00.000Z",
- updatedAt: "2026-03-18T15:21:00.000Z"
- }
- ]
- },
- null,
- 2
- )}\n`,
- "utf8"
- );
-
- const result = run("node", [SCRIPT, "status"], {
- cwd: workspace,
- env: {
- ...process.env,
- CODEX_COMPANION_SESSION_ID: "sess-current"
- }
- });
-
- assert.equal(result.status, 0, result.stderr);
- assert.deepEqual(
- [...new Set(result.stdout.match(/review-(?:current|other)/g) ?? [])],
- ["review-current"]
- );
-});
-
-test("status preserves adversarial review kind labels", () => {
- const workspace = makeTempDir();
- const stateDir = resolveStateDir(workspace);
- const jobsDir = path.join(stateDir, "jobs");
- fs.mkdirSync(jobsDir, { recursive: true });
-
- const logFile = path.join(jobsDir, "review-adv.log");
- fs.writeFileSync(logFile, "[2026-03-18T15:30:00.000Z] Reviewer started: adversarial review\n", "utf8");
-
- fs.writeFileSync(
- path.join(stateDir, "state.json"),
- `${JSON.stringify(
- {
- version: 1,
- config: { stopReviewGate: false },
- jobs: [
- {
- id: "review-adv-live",
- kind: "adversarial-review",
- status: "running",
- title: "Codex Adversarial Review",
- jobClass: "review",
- phase: "reviewing",
- threadId: "thr_adv_live",
- summary: "Adversarial review current changes",
- logFile,
- createdAt: "2026-03-18T15:30:00.000Z",
- updatedAt: "2026-03-18T15:30:00.000Z"
- },
- {
- id: "review-adv",
- kind: "adversarial-review",
- status: "completed",
- title: "Codex Adversarial Review",
- jobClass: "review",
- threadId: "thr_adv_done",
- summary: "Adversarial review working tree diff",
- createdAt: "2026-03-18T15:10:00.000Z",
- startedAt: "2026-03-18T15:10:05.000Z",
- completedAt: "2026-03-18T15:11:10.000Z",
- updatedAt: "2026-03-18T15:11:10.000Z"
- }
- ]
- },
- null,
- 2
- )}\n`,
- "utf8"
- );
-
- const result = run("node", [SCRIPT, "status"], {
- cwd: workspace
- });
-
- assert.equal(result.status, 0, result.stderr);
- assert.match(result.stdout, /\| review-adv-live \| adversarial-review \| running \| reviewing \|/);
- assert.match(result.stdout, /- review-adv \| completed \| adversarial-review \| Codex Adversarial Review/);
- assert.match(result.stdout, /Codex session ID: thr_adv_live/);
- assert.match(result.stdout, /Codex session ID: thr_adv_done/);
-});
-
-test("status --wait times out cleanly when a job is still active", () => {
- const workspace = makeTempDir();
- const stateDir = resolveStateDir(workspace);
- const jobsDir = path.join(stateDir, "jobs");
- fs.mkdirSync(jobsDir, { recursive: true });
-
- const logFile = path.join(jobsDir, "task-live.log");
- fs.writeFileSync(logFile, "[2026-03-18T15:30:00.000Z] Starting Codex Task.\n", "utf8");
- fs.writeFileSync(
- path.join(jobsDir, "task-live.json"),
- JSON.stringify(
- {
- id: "task-live",
- status: "running",
- title: "Codex Task",
- logFile
- },
- null,
- 2
- ),
- "utf8"
- );
-
- fs.writeFileSync(
- path.join(stateDir, "state.json"),
- `${JSON.stringify(
- {
- version: 1,
- config: { stopReviewGate: false },
- jobs: [
- {
- id: "task-live",
- status: "running",
- title: "Codex Task",
- jobClass: "task",
- summary: "Investigate flaky test",
- logFile,
- createdAt: "2026-03-18T15:30:00.000Z",
- startedAt: "2026-03-18T15:30:01.000Z",
- updatedAt: "2026-03-18T15:30:02.000Z"
- }
- ]
- },
- null,
- 2
- )}\n`,
- "utf8"
- );
-
- const result = run("node", [SCRIPT, "status", "task-live", "--wait", "--timeout-ms", "25", "--json"], {
- cwd: workspace
- });
-
- assert.equal(result.status, 0, result.stderr);
- const payload = JSON.parse(result.stdout);
- assert.equal(payload.job.id, "task-live");
- assert.equal(payload.job.status, "running");
- assert.equal(payload.waitTimedOut, true);
-});
-
-test("result returns the stored output for the latest finished job by default", () => {
- const workspace = makeTempDir();
- const stateDir = resolveStateDir(workspace);
- const jobsDir = path.join(stateDir, "jobs");
- fs.mkdirSync(jobsDir, { recursive: true });
-
- fs.writeFileSync(
- path.join(jobsDir, "review-finished.json"),
- JSON.stringify(
- {
- id: "review-finished",
- status: "completed",
- title: "Codex Review",
- rendered: "# Codex Review\n\nReviewed uncommitted changes.\nNo material issues found.\n",
- result: {
- codex: {
- stdout: "Reviewed uncommitted changes.\nNo material issues found."
- }
- },
- threadId: "thr_review_finished"
- },
- null,
- 2
- ),
- "utf8"
- );
-
- fs.writeFileSync(
- path.join(stateDir, "state.json"),
- `${JSON.stringify(
- {
- version: 1,
- config: { stopReviewGate: false },
- jobs: [
- {
- id: "review-finished",
- status: "completed",
- title: "Codex Review",
- jobClass: "review",
- threadId: "thr_review_finished",
- summary: "Review working tree diff",
- createdAt: "2026-03-18T15:00:00.000Z",
- updatedAt: "2026-03-18T15:01:00.000Z"
- }
- ]
- },
- null,
- 2
- )}\n`,
- "utf8"
- );
-
- const result = run("node", [SCRIPT, "result"], {
- cwd: workspace
- });
-
- assert.equal(result.status, 0, result.stderr);
- assert.equal(
- result.stdout,
- "Reviewed uncommitted changes.\nNo material issues found.\n\nCodex session ID: thr_review_finished\nResume in Codex: codex resume thr_review_finished\n"
- );
-});
-
-test("result without a job id prefers the latest finished job from the current Claude session", () => {
- const workspace = makeTempDir();
- const stateDir = resolveStateDir(workspace);
- const jobsDir = path.join(stateDir, "jobs");
- fs.mkdirSync(jobsDir, { recursive: true });
-
- fs.writeFileSync(
- path.join(jobsDir, "review-current.json"),
- JSON.stringify(
- {
- id: "review-current",
- status: "completed",
- title: "Codex Review",
- threadId: "thr_current",
- result: {
- codex: {
- stdout: "Current session output."
- }
- }
- },
- null,
- 2
- ),
- "utf8"
- );
-
- fs.writeFileSync(
- path.join(jobsDir, "review-other.json"),
- JSON.stringify(
- {
- id: "review-other",
- status: "completed",
- title: "Codex Review",
- threadId: "thr_other",
- result: {
- codex: {
- stdout: "Old session output."
- }
- }
- },
- null,
- 2
- ),
- "utf8"
- );
-
- fs.writeFileSync(
- path.join(stateDir, "state.json"),
- `${JSON.stringify(
- {
- version: 1,
- config: { stopReviewGate: false },
- jobs: [
- {
- id: "review-current",
- status: "completed",
- title: "Codex Review",
- jobClass: "review",
- sessionId: "sess-current",
- threadId: "thr_current",
- summary: "Current session review",
- createdAt: "2026-03-18T15:10:00.000Z",
- updatedAt: "2026-03-18T15:11:00.000Z"
- },
- {
- id: "review-other",
- status: "completed",
- title: "Codex Review",
- jobClass: "review",
- sessionId: "sess-other",
- threadId: "thr_other",
- summary: "Old session review",
- createdAt: "2026-03-18T15:20:00.000Z",
- updatedAt: "2026-03-18T15:21:00.000Z"
- }
- ]
- },
- null,
- 2
- )}\n`,
- "utf8"
- );
-
- const result = run("node", [SCRIPT, "result"], {
- cwd: workspace,
- env: {
- ...process.env,
- CODEX_COMPANION_SESSION_ID: "sess-current"
- }
- });
-
- assert.equal(result.status, 0, result.stderr);
- assert.equal(
- result.stdout,
- "Current session output.\n\nCodex session ID: thr_current\nResume in Codex: codex resume thr_current\n"
- );
-});
-
-test("result for a finished write-capable task returns the raw Codex final response", () => {
- const repo = makeTempDir();
- const binDir = makeTempDir();
- installFakeCodex(binDir);
- initGitRepo(repo);
- fs.writeFileSync(path.join(repo, "README.md"), "hello\n");
- run("git", ["add", "README.md"], { cwd: repo });
- run("git", ["commit", "-m", "init"], { cwd: repo });
-
- const taskRun = run("node", [SCRIPT, "task", "--write", "fix the flaky integration test"], {
- cwd: repo,
- env: buildEnv(binDir)
- });
- assert.equal(taskRun.status, 0, taskRun.stderr);
-
- const result = run("node", [SCRIPT, "result"], {
- cwd: repo,
- env: buildEnv(binDir)
- });
-
- assert.equal(result.status, 0, result.stderr);
- assert.match(result.stdout, /^Handled the requested task\.\nTask prompt accepted\.\n/);
- assert.match(result.stdout, /Codex session ID: thr_[a-z0-9]+/i);
- assert.match(result.stdout, /Resume in Codex: codex resume thr_[a-z0-9]+/i);
-});
-
-test("cancel stops an active background job and marks it cancelled", async (t) => {
- const workspace = makeTempDir();
- const stateDir = resolveStateDir(workspace);
- const jobsDir = path.join(stateDir, "jobs");
- fs.mkdirSync(jobsDir, { recursive: true });
-
- const sleeper = spawn(process.execPath, ["-e", "setInterval(() => {}, 1000)"], {
- cwd: workspace,
- detached: true,
- stdio: "ignore"
- });
- sleeper.unref();
-
- t.after(() => {
- try {
- process.kill(-sleeper.pid, "SIGTERM");
- } catch {
- try {
- process.kill(sleeper.pid, "SIGTERM");
- } catch {
- // Ignore missing process.
- }
- }
- });
-
- const logFile = path.join(jobsDir, "task-live.log");
- const jobFile = path.join(jobsDir, "task-live.json");
- fs.writeFileSync(logFile, "[2026-03-18T15:30:00.000Z] Starting Codex Task.\n", "utf8");
- fs.writeFileSync(
- jobFile,
- JSON.stringify(
- {
- id: "task-live",
- status: "running",
- title: "Codex Task",
- logFile
- },
- null,
- 2
- ),
- "utf8"
- );
- fs.writeFileSync(
- path.join(stateDir, "state.json"),
- `${JSON.stringify(
- {
- version: 1,
- config: { stopReviewGate: false },
- jobs: [
- {
- id: "task-live",
- status: "running",
- title: "Codex Task",
- jobClass: "task",
- summary: "Investigate flaky test",
- pid: sleeper.pid,
- logFile,
- createdAt: "2026-03-18T15:30:00.000Z",
- startedAt: "2026-03-18T15:30:01.000Z",
- updatedAt: "2026-03-18T15:30:02.000Z"
- }
- ]
- },
- null,
- 2
- )}\n`,
- "utf8"
- );
-
- const cancelResult = run("node", [SCRIPT, "cancel", "task-live", "--json"], {
- cwd: workspace
- });
-
- assert.equal(cancelResult.status, 0, cancelResult.stderr);
- assert.equal(JSON.parse(cancelResult.stdout).status, "cancelled");
-
- await waitFor(() => {
- try {
- process.kill(sleeper.pid, 0);
- return false;
- } catch (error) {
- return error?.code === "ESRCH";
- }
- });
-
- const state = JSON.parse(fs.readFileSync(path.join(stateDir, "state.json"), "utf8"));
- const cancelled = state.jobs.find((job) => job.id === "task-live");
- assert.equal(cancelled.status, "cancelled");
- assert.equal(cancelled.pid, null);
-
- const stored = JSON.parse(fs.readFileSync(jobFile, "utf8"));
- assert.equal(stored.status, "cancelled");
- assert.match(fs.readFileSync(logFile, "utf8"), /Cancelled by user/);
-});
-
-test("cancel without a job id ignores active jobs from other Claude sessions", () => {
- const workspace = makeTempDir();
- const stateDir = resolveStateDir(workspace);
- const jobsDir = path.join(stateDir, "jobs");
- fs.mkdirSync(jobsDir, { recursive: true });
-
- const logFile = path.join(jobsDir, "task-other.log");
- fs.writeFileSync(logFile, "", "utf8");
- fs.writeFileSync(
- path.join(stateDir, "state.json"),
- `${JSON.stringify(
- {
- version: 1,
- config: { stopReviewGate: false },
- jobs: [
- {
- id: "task-other",
- status: "running",
- title: "Codex Task",
- jobClass: "task",
- sessionId: "sess-other",
- summary: "Other session run",
- updatedAt: "2026-03-24T20:05:00.000Z",
- logFile
- }
- ]
- },
- null,
- 2
- )}\n`,
- "utf8"
- );
-
- const env = {
- ...process.env,
- CODEX_COMPANION_SESSION_ID: "sess-current"
- };
- const status = run("node", [SCRIPT, "status", "--json"], {
- cwd: workspace,
- env
- });
- assert.equal(status.status, 0, status.stderr);
- assert.deepEqual(JSON.parse(status.stdout).running, []);
-
- const cancel = run("node", [SCRIPT, "cancel", "--json"], {
- cwd: workspace,
- env
- });
- assert.equal(cancel.status, 1);
- assert.match(cancel.stderr, /No active Codex jobs to cancel for this session\./);
-
- const state = JSON.parse(fs.readFileSync(path.join(stateDir, "state.json"), "utf8"));
- assert.equal(state.jobs[0].status, "running");
-});
-
-test("cancel with a job id can still target an active job from another Claude session", () => {
- const workspace = makeTempDir();
- const stateDir = resolveStateDir(workspace);
- const jobsDir = path.join(stateDir, "jobs");
- fs.mkdirSync(jobsDir, { recursive: true });
-
- const logFile = path.join(jobsDir, "task-other.log");
- fs.writeFileSync(logFile, "", "utf8");
- fs.writeFileSync(
- path.join(stateDir, "state.json"),
- `${JSON.stringify(
- {
- version: 1,
- config: { stopReviewGate: false },
- jobs: [
- {
- id: "task-other",
- status: "running",
- title: "Codex Task",
- jobClass: "task",
- sessionId: "sess-other",
- summary: "Other session run",
- updatedAt: "2026-03-24T20:05:00.000Z",
- logFile
- }
- ]
- },
- null,
- 2
- )}\n`,
- "utf8"
- );
-
- const env = {
- ...process.env,
- CODEX_COMPANION_SESSION_ID: "sess-current"
- };
- const cancel = run("node", [SCRIPT, "cancel", "task-other", "--json"], {
- cwd: workspace,
- env
- });
- assert.equal(cancel.status, 0, cancel.stderr);
- assert.equal(JSON.parse(cancel.stdout).jobId, "task-other");
-
- const state = JSON.parse(fs.readFileSync(path.join(stateDir, "state.json"), "utf8"));
- assert.equal(state.jobs[0].status, "cancelled");
-});
-
-test("cancel sends turn interrupt to the shared app-server before killing a brokered task", async () => {
- const repo = makeTempDir();
- const binDir = makeTempDir();
- const fakeStatePath = path.join(binDir, "fake-codex-state.json");
- installFakeCodex(binDir, "interruptible-slow-task");
- initGitRepo(repo);
- fs.writeFileSync(path.join(repo, "README.md"), "hello\n");
- run("git", ["add", "README.md"], { cwd: repo });
- run("git", ["commit", "-m", "init"], { cwd: repo });
-
- const env = buildEnv(binDir);
- const launched = run("node", [SCRIPT, "task", "--background", "--json", "investigate the flaky worker timeout"], {
- cwd: repo,
- env
- });
-
- assert.equal(launched.status, 0, launched.stderr);
- const launchPayload = JSON.parse(launched.stdout);
- const jobId = launchPayload.jobId;
- assert.ok(jobId);
-
- const stateDir = resolveStateDir(repo);
- const runningJob = await waitFor(() => {
- const state = JSON.parse(fs.readFileSync(path.join(stateDir, "state.json"), "utf8"));
- const job = state.jobs.find((candidate) => candidate.id === jobId);
- if (job?.status === "running" && job.threadId && job.turnId) {
- return job;
- }
- return null;
- }, { timeoutMs: 15000 });
-
- const cancelResult = run("node", [SCRIPT, "cancel", jobId, "--json"], {
- cwd: repo,
- env
- });
-
- assert.equal(cancelResult.status, 0, cancelResult.stderr);
- const cancelPayload = JSON.parse(cancelResult.stdout);
- assert.equal(cancelPayload.status, "cancelled");
- assert.equal(cancelPayload.turnInterruptAttempted, true);
- assert.equal(cancelPayload.turnInterrupted, true);
-
- await waitFor(() => {
- const fakeState = JSON.parse(fs.readFileSync(fakeStatePath, "utf8"));
- return fakeState.lastInterrupt ?? null;
- });
-
- const fakeState = JSON.parse(fs.readFileSync(fakeStatePath, "utf8"));
- assert.deepEqual(fakeState.lastInterrupt, {
- threadId: runningJob.threadId,
- turnId: runningJob.turnId
- });
-
- const cleanup = run("node", [SESSION_HOOK, "SessionEnd"], {
- cwd: repo,
- env,
- input: JSON.stringify({
- hook_event_name: "SessionEnd",
- cwd: repo
- })
- });
- assert.equal(cleanup.status, 0, cleanup.stderr);
-});
-
-test("session end fully cleans up jobs for the ending session", async (t) => {
- const repo = makeTempDir();
- initGitRepo(repo);
- fs.writeFileSync(path.join(repo, "README.md"), "hello\n");
- run("git", ["add", "README.md"], { cwd: repo });
- run("git", ["commit", "-m", "init"], { cwd: repo });
-
- const stateDir = resolveStateDir(repo);
- const jobsDir = path.join(stateDir, "jobs");
- fs.mkdirSync(jobsDir, { recursive: true });
-
- const completedLog = path.join(jobsDir, "completed.log");
- const runningLog = path.join(jobsDir, "running.log");
- const otherSessionLog = path.join(jobsDir, "other.log");
- const completedJobFile = path.join(jobsDir, "review-completed.json");
- const runningJobFile = path.join(jobsDir, "review-running.json");
- const otherJobFile = path.join(jobsDir, "review-other.json");
- fs.writeFileSync(completedLog, "completed\n", "utf8");
- fs.writeFileSync(runningLog, "running\n", "utf8");
- fs.writeFileSync(otherSessionLog, "other\n", "utf8");
- fs.writeFileSync(completedJobFile, JSON.stringify({ id: "review-completed" }, null, 2), "utf8");
- fs.writeFileSync(otherJobFile, JSON.stringify({ id: "review-other" }, null, 2), "utf8");
-
- const sleeper = spawn(process.execPath, ["-e", "setInterval(() => {}, 1000)"], {
- cwd: repo,
- detached: true,
- stdio: "ignore"
- });
- sleeper.unref();
- fs.writeFileSync(runningJobFile, JSON.stringify({ id: "review-running" }, null, 2), "utf8");
-
- t.after(() => {
- try {
- process.kill(-sleeper.pid, "SIGTERM");
- } catch {
- try {
- process.kill(sleeper.pid, "SIGTERM");
- } catch {
- // Ignore missing process.
- }
- }
- });
-
- fs.writeFileSync(
- path.join(stateDir, "state.json"),
- `${JSON.stringify(
- {
- version: 1,
- config: { stopReviewGate: false },
- jobs: [
- {
- id: "review-completed",
- status: "completed",
- title: "Codex Review",
- sessionId: "sess-current",
- logFile: completedLog,
- createdAt: "2026-03-18T15:30:00.000Z",
- updatedAt: "2026-03-18T15:31:00.000Z"
- },
- {
- id: "review-running",
- status: "running",
- title: "Codex Review",
- sessionId: "sess-current",
- pid: sleeper.pid,
- logFile: runningLog,
- createdAt: "2026-03-18T15:32:00.000Z",
- updatedAt: "2026-03-18T15:33:00.000Z"
- },
- {
- id: "review-other",
- status: "completed",
- title: "Codex Review",
- sessionId: "sess-other",
- logFile: otherSessionLog,
- createdAt: "2026-03-18T15:34:00.000Z",
- updatedAt: "2026-03-18T15:35:00.000Z"
- }
- ]
- },
- null,
- 2
- )}\n`,
- "utf8"
- );
-
- const result = run("node", [SESSION_HOOK, "SessionEnd"], {
- cwd: repo,
- env: {
- ...process.env,
- CODEX_COMPANION_SESSION_ID: "sess-current"
- },
- input: JSON.stringify({
- hook_event_name: "SessionEnd",
- session_id: "sess-current",
- cwd: repo
- })
- });
-
- assert.equal(result.status, 0, result.stderr);
- assert.equal(fs.existsSync(otherSessionLog), true);
- assert.equal(fs.existsSync(otherJobFile), true);
- assert.deepEqual(
- fs.readdirSync(path.dirname(otherJobFile)).sort(),
- [path.basename(otherJobFile), path.basename(otherSessionLog)].sort()
- );
-
- await waitFor(() => {
- try {
- process.kill(sleeper.pid, 0);
- return false;
- } catch (error) {
- return error?.code === "ESRCH";
- }
- });
-
- const state = JSON.parse(fs.readFileSync(path.join(stateDir, "state.json"), "utf8"));
- assert.deepEqual(state.jobs.map((job) => job.id), ["review-other"]);
- const otherJob = state.jobs[0];
- assert.equal(otherJob.logFile, otherSessionLog);
-});
-
-test("stop hook runs a stop-time review task and blocks on findings when the review gate is enabled", () => {
- const repo = makeTempDir();
- const binDir = makeTempDir();
- const fakeStatePath = path.join(binDir, "fake-codex-state.json");
- installFakeCodex(binDir);
- initGitRepo(repo);
- fs.writeFileSync(path.join(repo, "README.md"), "hello\n");
- run("git", ["add", "README.md"], { cwd: repo });
- run("git", ["commit", "-m", "init"], { cwd: repo });
-
- const setup = run("node", [SCRIPT, "setup", "--enable-review-gate", "--json"], {
- cwd: repo,
- env: buildEnv(binDir)
- });
- assert.equal(setup.status, 0, setup.stderr);
- const setupPayload = JSON.parse(setup.stdout);
- assert.equal(setupPayload.reviewGateEnabled, true);
-
- const taskResult = run("node", [SCRIPT, "task", "--write", "fix the issue"], {
- cwd: repo,
- env: buildEnv(binDir)
- });
- assert.equal(taskResult.status, 0, taskResult.stderr);
-
- const blocked = run("node", [STOP_HOOK], {
- cwd: repo,
- env: buildEnv(binDir),
- input: JSON.stringify({
- cwd: repo,
- session_id: "sess-stop-review",
- last_assistant_message: "I completed the refactor and updated the retry logic."
- })
- });
- assert.equal(blocked.status, 0, blocked.stderr);
- const blockedPayload = JSON.parse(blocked.stdout);
- assert.equal(blockedPayload.decision, "block");
- assert.match(blockedPayload.reason, /Codex stop-time review found issues that still need fixes/i);
- assert.match(blockedPayload.reason, /Missing empty-state guard/i);
-
- const fakeState = JSON.parse(fs.readFileSync(fakeStatePath, "utf8"));
- assert.match(fakeState.lastTurnStart.prompt, //i);
- assert.match(fakeState.lastTurnStart.prompt, //i);
- assert.match(fakeState.lastTurnStart.prompt, /Only review the work from the previous Claude turn/i);
- assert.match(fakeState.lastTurnStart.prompt, /I completed the refactor and updated the retry logic\./);
-
- const status = run("node", [SCRIPT, "status"], {
- cwd: repo,
- env: {
- ...buildEnv(binDir),
- CODEX_COMPANION_SESSION_ID: "sess-stop-review"
- }
- });
- assert.equal(status.status, 0, status.stderr);
- assert.match(status.stdout, /Codex Stop Gate Review/);
-});
-
-test("stop hook logs running tasks to stderr without blocking when the review gate is disabled", () => {
- const repo = makeTempDir();
- initGitRepo(repo);
- fs.writeFileSync(path.join(repo, "README.md"), "hello\n");
- run("git", ["add", "README.md"], { cwd: repo });
- run("git", ["commit", "-m", "init"], { cwd: repo });
-
- const stateDir = resolveStateDir(repo);
- const jobsDir = path.join(stateDir, "jobs");
- fs.mkdirSync(jobsDir, { recursive: true });
-
- const runningLog = path.join(jobsDir, "task-running.log");
- fs.writeFileSync(runningLog, "running\n", "utf8");
-
- fs.writeFileSync(
- path.join(stateDir, "state.json"),
- `${JSON.stringify(
- {
- version: 1,
- config: {
- stopReviewGate: false
- },
- jobs: [
- {
- id: "task-live",
- status: "running",
- title: "Codex Task",
- jobClass: "task",
- sessionId: "sess-current",
- logFile: runningLog,
- createdAt: "2026-03-18T15:32:00.000Z",
- updatedAt: "2026-03-18T15:33:00.000Z"
- }
- ]
- },
- null,
- 2
- )}\n`,
- "utf8"
- );
-
- const blocked = run("node", [STOP_HOOK], {
- cwd: repo,
- env: {
- ...process.env,
- CODEX_COMPANION_SESSION_ID: "sess-current"
- },
- input: JSON.stringify({ cwd: repo })
- });
-
- assert.equal(blocked.status, 0, blocked.stderr);
- assert.equal(blocked.stdout.trim(), "");
- assert.match(blocked.stderr, /Codex task task-live is still running/i);
- assert.match(blocked.stderr, /\/codex:status/i);
- assert.match(blocked.stderr, /\/codex:cancel task-live/i);
-});
-
-test("stop hook allows the stop when the review gate is enabled and the stop-time review task is clean", () => {
- const repo = makeTempDir();
- const binDir = makeTempDir();
- installFakeCodex(binDir, "adversarial-clean");
- initGitRepo(repo);
- fs.writeFileSync(path.join(repo, "README.md"), "hello\n");
- run("git", ["add", "README.md"], { cwd: repo });
- run("git", ["commit", "-m", "init"], { cwd: repo });
-
- const setup = run("node", [SCRIPT, "setup", "--enable-review-gate", "--json"], {
- cwd: repo,
- env: buildEnv(binDir)
- });
- assert.equal(setup.status, 0, setup.stderr);
-
- const allowed = run("node", [STOP_HOOK], {
- cwd: repo,
- env: buildEnv(binDir),
- input: JSON.stringify({ cwd: repo, session_id: "sess-stop-clean" })
- });
-
- assert.equal(allowed.status, 0, allowed.stderr);
- assert.equal(allowed.stdout.trim(), "");
-});
-
-test("stop hook does not block when Codex is unavailable even if the review gate is enabled", () => {
- const repo = makeTempDir();
- initGitRepo(repo);
- fs.writeFileSync(path.join(repo, "README.md"), "hello\n");
- run("git", ["add", "README.md"], { cwd: repo });
- run("git", ["commit", "-m", "init"], { cwd: repo });
-
- const setup = run(process.execPath, [SCRIPT, "setup", "--enable-review-gate", "--json"], {
- cwd: repo
- });
- assert.equal(setup.status, 0, setup.stderr);
-
- const allowed = run(process.execPath, [STOP_HOOK], {
- cwd: repo,
- env: {
- ...process.env,
- PATH: ""
- },
- input: JSON.stringify({ cwd: repo })
- });
-
- assert.equal(allowed.status, 0, allowed.stderr);
- assert.equal(allowed.stdout.trim(), "");
- assert.match(allowed.stderr, /Codex is not set up for the review gate/i);
- assert.match(allowed.stderr, /Run \/codex:setup/i);
-});
-
-test("stop hook runs the actual task when auth status looks stale", () => {
- const repo = makeTempDir();
- const binDir = makeTempDir();
- installFakeCodex(binDir, "refreshable-auth");
- initGitRepo(repo);
- fs.writeFileSync(path.join(repo, "README.md"), "hello\n");
- run("git", ["add", "README.md"], { cwd: repo });
- run("git", ["commit", "-m", "init"], { cwd: repo });
-
- const setup = run("node", [SCRIPT, "setup", "--enable-review-gate", "--json"], {
- cwd: repo,
- env: buildEnv(binDir)
- });
- assert.equal(setup.status, 0, setup.stderr);
-
- const allowed = run("node", [STOP_HOOK], {
- cwd: repo,
- env: buildEnv(binDir),
- input: JSON.stringify({ cwd: repo })
- });
-
- assert.equal(allowed.status, 0, allowed.stderr);
- assert.doesNotMatch(allowed.stderr, /Codex is not set up for the review gate/i);
- const payload = JSON.parse(allowed.stdout);
- assert.equal(payload.decision, "block");
- assert.match(payload.reason, /Missing empty-state guard/i);
-});
-
-test("commands lazily start and reuse one shared app-server after first use", async () => {
- const repo = makeTempDir();
- const binDir = makeTempDir();
- const fakeStatePath = path.join(binDir, "fake-codex-state.json");
-
- installFakeCodex(binDir);
- initGitRepo(repo);
- fs.writeFileSync(path.join(repo, "README.md"), "hello\n");
- run("git", ["add", "README.md"], { cwd: repo });
- run("git", ["commit", "-m", "init"], { cwd: repo });
- fs.writeFileSync(path.join(repo, "README.md"), "hello again\n");
-
- const env = buildEnv(binDir);
-
- const review = run("node", [SCRIPT, "review"], {
- cwd: repo,
- env
- });
- assert.equal(review.status, 0, review.stderr);
-
- const brokerSession = loadBrokerSession(repo);
- if (!brokerSession) {
- return;
- }
-
- const adversarial = run("node", [SCRIPT, "adversarial-review"], {
- cwd: repo,
- env
- });
- assert.equal(adversarial.status, 0, adversarial.stderr);
-
- const fakeState = JSON.parse(fs.readFileSync(fakeStatePath, "utf8"));
- assert.equal(fakeState.appServerStarts, 1);
-
- const cleanup = run("node", [SESSION_HOOK, "SessionEnd"], {
- cwd: repo,
- env,
- input: JSON.stringify({
- hook_event_name: "SessionEnd",
- cwd: repo
- })
- });
- assert.equal(cleanup.status, 0, cleanup.stderr);
-});
-
-test("setup reuses an existing shared app-server without starting another one", () => {
- const repo = makeTempDir();
- const binDir = makeTempDir();
- const fakeStatePath = path.join(binDir, "fake-codex-state.json");
-
- installFakeCodex(binDir);
- initGitRepo(repo);
- fs.writeFileSync(path.join(repo, "README.md"), "hello\n");
- run("git", ["add", "README.md"], { cwd: repo });
- run("git", ["commit", "-m", "init"], { cwd: repo });
- fs.writeFileSync(path.join(repo, "README.md"), "hello again\n");
-
- const env = buildEnv(binDir);
-
- const review = run("node", [SCRIPT, "review"], {
- cwd: repo,
- env
- });
- assert.equal(review.status, 0, review.stderr);
-
- const brokerSession = loadBrokerSession(repo);
- if (!brokerSession) {
- return;
- }
-
- const setup = run("node", [SCRIPT, "setup", "--json"], {
- cwd: repo,
- env
- });
- assert.equal(setup.status, 0, setup.stderr);
-
- const fakeState = JSON.parse(fs.readFileSync(fakeStatePath, "utf8"));
- assert.equal(fakeState.appServerStarts, 1);
-
- const cleanup = run("node", [SESSION_HOOK, "SessionEnd"], {
- cwd: repo,
- env,
- input: JSON.stringify({
- hook_event_name: "SessionEnd",
- cwd: repo
- })
- });
- assert.equal(cleanup.status, 0, cleanup.stderr);
-});
-
-test("status reports shared session runtime when a lazy broker is active", () => {
- const repo = makeTempDir();
- const binDir = makeTempDir();
- installFakeCodex(binDir);
- initGitRepo(repo);
- fs.writeFileSync(path.join(repo, "README.md"), "hello\n");
- run("git", ["add", "README.md"], { cwd: repo });
- run("git", ["commit", "-m", "init"], { cwd: repo });
- fs.writeFileSync(path.join(repo, "README.md"), "hello again\n");
-
- const review = run("node", [SCRIPT, "review"], {
- cwd: repo,
- env: buildEnv(binDir)
- });
- assert.equal(review.status, 0, review.stderr);
-
- if (!loadBrokerSession(repo)) {
- return;
- }
-
- const result = run("node", [SCRIPT, "status"], {
- cwd: repo,
- env: buildEnv(binDir)
- });
-
- assert.equal(result.status, 0, result.stderr);
- assert.match(result.stdout, /Session runtime: shared session/);
-});
-
-test("setup and status honor --cwd when reading shared session runtime", () => {
- const targetWorkspace = makeTempDir();
- const invocationWorkspace = makeTempDir();
-
- saveBrokerSession(targetWorkspace, {
- endpoint: "unix:/tmp/fake-broker.sock"
- });
-
- const status = run("node", [SCRIPT, "status", "--cwd", targetWorkspace], {
- cwd: invocationWorkspace
- });
- assert.equal(status.status, 0, status.stderr);
- assert.match(status.stdout, /Session runtime: shared session/);
-
- const setup = run("node", [SCRIPT, "setup", "--cwd", targetWorkspace, "--json"], {
- cwd: invocationWorkspace
- });
- assert.equal(setup.status, 0, setup.stderr);
- const payload = JSON.parse(setup.stdout);
- assert.equal(payload.sessionRuntime.mode, "shared");
- assert.equal(payload.sessionRuntime.endpoint, "unix:/tmp/fake-broker.sock");
-});
diff --git a/tests/state.test.mjs b/tests/state.test.mjs
deleted file mode 100644
index 0f8f57ce..00000000
--- a/tests/state.test.mjs
+++ /dev/null
@@ -1,105 +0,0 @@
-import fs from "node:fs";
-import os from "node:os";
-import path from "node:path";
-import test from "node:test";
-import assert from "node:assert/strict";
-
-import { makeTempDir } from "./helpers.mjs";
-import { resolveJobFile, resolveJobLogFile, resolveStateDir, resolveStateFile, saveState } from "../plugins/codex/scripts/lib/state.mjs";
-
-test("resolveStateDir uses a temp-backed per-workspace directory", () => {
- const workspace = makeTempDir();
- const stateDir = resolveStateDir(workspace);
-
- assert.equal(stateDir.startsWith(os.tmpdir()), true);
- assert.match(path.basename(stateDir), /.+-[a-f0-9]{16}$/);
- assert.match(stateDir, new RegExp(`^${os.tmpdir().replace(/[.*+?^${}()|[\]\\]/g, "\\$&")}`));
-});
-
-test("resolveStateDir uses CLAUDE_PLUGIN_DATA when it is provided", () => {
- const workspace = makeTempDir();
- const pluginDataDir = makeTempDir();
- const previousPluginDataDir = process.env.CLAUDE_PLUGIN_DATA;
- process.env.CLAUDE_PLUGIN_DATA = pluginDataDir;
-
- try {
- const stateDir = resolveStateDir(workspace);
-
- assert.equal(stateDir.startsWith(path.join(pluginDataDir, "state")), true);
- assert.match(path.basename(stateDir), /.+-[a-f0-9]{16}$/);
- assert.match(
- stateDir,
- new RegExp(`^${path.join(pluginDataDir, "state").replace(/[.*+?^${}()|[\]\\]/g, "\\$&")}`)
- );
- } finally {
- if (previousPluginDataDir == null) {
- delete process.env.CLAUDE_PLUGIN_DATA;
- } else {
- process.env.CLAUDE_PLUGIN_DATA = previousPluginDataDir;
- }
- }
-});
-
-test("saveState prunes dropped job artifacts when indexed jobs exceed the cap", () => {
- const workspace = makeTempDir();
- const stateFile = resolveStateFile(workspace);
- fs.mkdirSync(path.dirname(stateFile), { recursive: true });
-
- const jobs = Array.from({ length: 51 }, (_, index) => {
- const jobId = `job-${index}`;
- const updatedAt = new Date(Date.UTC(2026, 0, 1, 0, index, 0)).toISOString();
- const logFile = resolveJobLogFile(workspace, jobId);
- const jobFile = resolveJobFile(workspace, jobId);
- fs.writeFileSync(logFile, `log ${jobId}\n`, "utf8");
- fs.writeFileSync(jobFile, JSON.stringify({ id: jobId, status: "completed" }, null, 2), "utf8");
- return {
- id: jobId,
- status: "completed",
- logFile,
- updatedAt,
- createdAt: updatedAt
- };
- });
-
- fs.writeFileSync(
- stateFile,
- `${JSON.stringify(
- {
- version: 1,
- config: { stopReviewGate: false },
- jobs
- },
- null,
- 2
- )}\n`,
- "utf8"
- );
-
- saveState(workspace, {
- version: 1,
- config: { stopReviewGate: false },
- jobs
- });
-
- const prunedJobFile = resolveJobFile(workspace, "job-0");
- const prunedLogFile = resolveJobLogFile(workspace, "job-0");
- const retainedJobFile = resolveJobFile(workspace, "job-50");
- const retainedLogFile = resolveJobLogFile(workspace, "job-50");
- const jobsDir = path.dirname(prunedJobFile);
-
- assert.equal(fs.existsSync(retainedJobFile), true);
- assert.equal(fs.existsSync(retainedLogFile), true);
-
- const savedState = JSON.parse(fs.readFileSync(stateFile, "utf8"));
- assert.equal(savedState.jobs.length, 50);
- assert.deepEqual(
- savedState.jobs.map((job) => job.id),
- Array.from({ length: 50 }, (_, index) => `job-${50 - index}`)
- );
- assert.deepEqual(
- fs.readdirSync(jobsDir).sort(),
- Array.from({ length: 50 }, (_, index) => `job-${index + 1}`)
- .flatMap((jobId) => [`${jobId}.json`, `${jobId}.log`])
- .sort()
- );
-});
diff --git a/tsconfig.app-server.json b/tsconfig.app-server.json
deleted file mode 100644
index 3f8c11f4..00000000
--- a/tsconfig.app-server.json
+++ /dev/null
@@ -1,23 +0,0 @@
-{
- "compilerOptions": {
- "target": "ES2022",
- "module": "ESNext",
- "moduleResolution": "Bundler",
- "allowJs": true,
- "checkJs": true,
- "noEmit": true,
- "strict": false,
- "noImplicitAny": false,
- "useUnknownInCatchVariables": false,
- "skipLibCheck": true,
- "types": ["node"]
- },
- "include": [
- "plugins/codex/scripts/lib/app-server.mjs",
- "plugins/codex/scripts/lib/codex.mjs",
- "plugins/codex/scripts/lib/fs.mjs",
- "plugins/codex/scripts/lib/process.mjs",
- "plugins/codex/scripts/lib/app-server-protocol.d.ts",
- "plugins/codex/.generated/app-server-types/**/*.ts"
- ]
-}