From 4cc3c621c6f534b8c1795fb447b5d820ae413039 Mon Sep 17 00:00:00 2001 From: Donagh Brennan Date: Thu, 30 Apr 2026 18:49:15 +0100 Subject: [PATCH] feat(docs-tools): maximize code-evidence usage across all workflow steps MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Extend code-evidence grounding from 2 of 7 steps to all steps that benefit from it. The repo is already cloned and indexed — these changes make every step smarter about using that evidence. Changes by step: 1. Technical review: run grounded_review.py and api_surface.py as a pre-scan when source repo is available. Pass verdicts to the technical-reviewer agent as pre-computed evidence. 2. Writing: pass evidence-status.json (grounded/partial/absent per requirement), source repo path, and anti-fabrication guardrails. Writer uses [NEEDS VERIFICATION] for partial requirements, skips absent ones. 3. Code-evidence step: run api_surface.py alongside find_evidence.py and include API surface in evidence.json. Seed additional queries from scope-req-audit key_files for better retrieval quality. 4. Requirements: requirements-analyst verifies features exist in code, identifies existing docs, extracts project metadata, and notes code references when repo is available. 5. Planning: planner references key_files from evidence-status as content source pointers in module specifications. 6. Scope-req-audit: evidence-classifier uses 2-3 query reformulations to reduce false-absent classifications. API surface extracted in pre-flight and passed to classifiers as supplementary evidence. Also: code-evidence workflow no longer hard-fails without explicit --source-code-repo. Pre-flight attempts JIRA auto-discovery first, then fails with clear actionable options if no repo is found. Made-with: Cursor Co-authored-by: Cursor --- plugins/docs-tools/.claude-plugin/plugin.json | 2 +- .../docs-tools/agents/evidence-classifier.md | 36 +++-- .../docs-tools/agents/requirements-analyst.md | 23 ++- .../skills/docs-orchestrator/SKILL.md | 28 ++-- .../defaults/docs-workflow-code-evidence.yaml | 2 +- .../schema/step-result-schema.md | 4 +- .../docs-workflow-code-evidence/SKILL.md | 47 +++++- .../skills/docs-workflow-planning/SKILL.md | 10 +- .../docs-workflow-requirements/SKILL.md | 16 +- .../docs-workflow-scope-req-audit/SKILL.md | 14 +- .../skills/docs-workflow-tech-review/SKILL.md | 137 ++++++++++++++++-- .../skills/docs-workflow-writing/SKILL.md | 64 +++++--- .../scripts/build_writing_args.sh | 43 ++++-- scripts/test-upstream-plugin.sh | 14 +- 14 files changed, 344 insertions(+), 96 deletions(-) diff --git a/plugins/docs-tools/.claude-plugin/plugin.json b/plugins/docs-tools/.claude-plugin/plugin.json index a9810cc0..09e90525 100644 --- a/plugins/docs-tools/.claude-plugin/plugin.json +++ b/plugins/docs-tools/.claude-plugin/plugin.json @@ -1,6 +1,6 @@ { "name": "docs-tools", - "version": "0.0.60", + "version": "0.0.61", "description": "Documentation review, writing, and workflow tools for Red Hat AsciiDoc and Markdown documentation.", "author": { "name": "Red Hat Documentation Team", diff --git a/plugins/docs-tools/agents/evidence-classifier.md b/plugins/docs-tools/agents/evidence-classifier.md index dab7f0a5..a0c01e83 100644 --- a/plugins/docs-tools/agents/evidence-classifier.md +++ b/plugins/docs-tools/agents/evidence-classifier.md @@ -2,7 +2,7 @@ name: evidence-classifier description: Classifies a single documentation requirement by code evidence status. Runs code-finder search for one requirement, applies score thresholds, and returns structured JSON with classification and gap analysis. tools: Bash, Read -maxTurns: 8 +maxTurns: 10 --- # Your role @@ -13,32 +13,38 @@ You produce exactly one JSON object on stdout — no markdown, no commentary, no ## Procedure -### 1. Build a search query +### 1. Build search queries -Convert the requirement summary into a natural-language search query that tests for implementation evidence: +Build 2-3 query reformulations to reduce false-absent results. A single query can miss due to vocabulary mismatch between the requirement description and the code. -- Strip documentation-oriented language ("document how to", "explain the", "describe the") -- Focus on the implementation artifact (e.g., "Python SDK client library" not "Python SDK documentation") -- Keep the query specific enough to distinguish from tangential matches +**Query A — Implementation-focused:** Convert the requirement summary into a search query targeting the implementation artifact. Strip documentation language ("document how to", "explain the") and focus on the code artifact. + +**Query B — Term-focused:** Extract the most specific technical terms from the requirement (class names, function names, CLI flags, CRD kinds, API paths) and search for those directly. If the requirement mentions "ModelRegistry REST API", query "ModelRegistry REST API endpoint handler". + +**Query C (optional) — Alternate phrasing:** If the requirement uses product-specific terminology that may differ from the code (e.g., "model customization" in docs vs "fine-tuning" in code), add a third query using the likely code-side term. Examples: -- REQ "CA bundle configuration support" → query "CA bundle configuration implementation" -- REQ "Python SDK for notebook-based workflows" → query "Python SDK client library implementation" -- REQ "Kueue workload scheduling integration" → query "Kueue queue integration workload scheduling" +- REQ "CA bundle configuration support" → A: "CA bundle configuration implementation" B: "CA bundle TLS certificate path" +- REQ "Python SDK for notebook-based workflows" → A: "Python SDK client library" B: "SyncClient NotebookClient class" +- REQ "Kueue workload scheduling integration" → A: "Kueue queue integration workload scheduling" B: "Kueue WorkloadQueue reconciler" + +### 2. Run code search and API surface check -### 2. Run code search +**2a. Run NL search queries:** -Run the search using the REPO_PATH from your prompt's CONFIGURATION block: +Run each query using the REPO_PATH from your prompt's CONFIGURATION block: ```bash python3 ${CLAUDE_PLUGIN_ROOT}/skills/code-evidence/scripts/find_evidence.py --repo --query "" --limit 5 ``` -Where: -- `` — the REPO_PATH from CONFIGURATION -- `` — the search query you built in step 1 +Run each query (A, B, and optionally C) separately. Keep the best result set — the one with the highest top score. + +**2b. API surface cross-check (when API_SURFACE is provided):** + +If your prompt's CONFIGURATION includes an `API_SURFACE_FILE` path, read it. Search for any specific class names, function names, or type names from the requirement in the API surface entities. An exact match in the API surface is strong positive evidence regardless of NL search scores — set `top_score` to at least the grounded threshold if found. -Capture the JSON output from stdout. If the command fails, return an error result (see output format below). +If the command fails, return an error result (see output format below). ### 3. Classify the result diff --git a/plugins/docs-tools/agents/requirements-analyst.md b/plugins/docs-tools/agents/requirements-analyst.md index ed15119f..7103e4d4 100644 --- a/plugins/docs-tools/agents/requirements-analyst.md +++ b/plugins/docs-tools/agents/requirements-analyst.md @@ -33,6 +33,7 @@ Your prompt will provide: - **REQUIREMENT**: One requirement skeleton (id, title, priority, category, sources, one_line_summary) - **RELATED_TICKETS**: Context from the discovery pass (parent, siblings, linked tickets) - **RELEASE**: Release/sprint identifier +- **REPO_PATH**: (optional) Path to the source code repository, when available ### 1. Fetch detailed source content @@ -63,7 +64,23 @@ For other specs (Confluence, etc.), use WebFetch. **Existing documentation sources:** Read the file to understand what already exists and what needs updating. -### 2. Web search expansion +### 2. Source repo enrichment (when REPO_PATH is provided) + +**Skip this step if REPO_PATH is not provided in your prompt.** + +Use Read, Glob, and Grep to verify and enrich the requirement against the actual codebase: + +1. **Verify the feature exists in code.** Search for key terms from the requirement (class names, function names, CLI flags, CRD kinds) using Grep against the repo. If the feature has no trace in the codebase, add a note: `"notes": "No implementation evidence found in repo — requirement may describe planned/aspirational functionality"` + +2. **Identify existing documentation.** Check for `README.md`, `CHANGELOG.md`, `docs/` directory, and inline code comments related to the requirement's topic. Note what documentation already exists — the planner uses this for gap analysis + +3. **Extract project metadata.** Read the repo root for: primary language (from file extensions or build files), build system (`Makefile`, `go.mod`, `pyproject.toml`, `package.json`), and major directory structure. Add as a `repo_metadata` field in your output. Multiple agents may extract this in parallel — the merge step deduplicates + +4. **Note code references.** If you find specific files, functions, or types that implement the requirement, add them to `references` with `"type": "code"`. These feed directly into the code-evidence step's query seeding + +Keep this lightweight — read a few targeted files, don't scan the entire repo. The code-evidence step does thorough retrieval later. + +### 3. Web search expansion Build 2-4 targeted search queries from the requirement's topic: @@ -75,7 +92,7 @@ Use WebSearch for each query. Evaluate results for relevance. **Sanitize:** Do not include raw search queries, result counts, or rankings in your output. Only include curated references (URL, title, relevance note). -### 3. Analyze and produce detailed requirement +### 4. Analyze and produce detailed requirement From the gathered sources, produce: @@ -86,7 +103,7 @@ From the gathered sources, produce: - **references**: All sources consulted with URLs and notes - **web_findings**: Curated external references from web search -### 4. Categorization guidance +### 5. Categorization guidance Map the requirement to documentation module types: diff --git a/plugins/docs-tools/skills/docs-orchestrator/SKILL.md b/plugins/docs-tools/skills/docs-orchestrator/SKILL.md index 09febae8..88677a30 100644 --- a/plugins/docs-tools/skills/docs-orchestrator/SKILL.md +++ b/plugins/docs-tools/skills/docs-orchestrator/SKILL.md @@ -35,7 +35,7 @@ When displaying available options to the user (e.g., on skill load or when askin - `--mkdocs` — Use Material for MkDocs format instead of AsciiDoc. Propagates to the writing step (generates `.md` with MkDocs front matter) and style-review step (applies Markdown-appropriate rules). Sets `options.format` to `"mkdocs"` in the progress file - `--draft` — Write documentation to the staging area (`.claude/docs//writing/`) instead of directly into the repo. Uses DRAFT placement mode: no framework detection, no file placement into the target repo. Without this flag, UPDATE-IN-PLACE is the default - `--docs-repo-path ` — Target documentation repository for UPDATE-IN-PLACE mode. The docs-writer explores this directory for framework detection (Antora, MkDocs, Docusaurus, etc.) and writes files there instead of the current working directory. Propagates to `writing` and `create-merge-request` steps (mapped to their internal `--repo-path` flag). **Precedence**: if both `--docs-repo-path` and `--draft` are passed, `--docs-repo-path` wins — log a warning and ignore `--draft` -- `--source-code-repo ...` — Source code repository/repositories for code evidence and requirements enrichment (space-delimited, one or more). Accepts remote URLs (https://, git@, ssh:// — each shallow-cloned to `.claude/docs//code-repo//`) or local paths (used directly). The first repo is treated as primary; additional repos are returned as `additional_repos` in the result. Passed to requirements, code-evidence, and writing steps (mapped to their internal `--repo` flag). Without `--pr`, the entire repo is the subject matter; with `--pr`, the PR branch is checked out on the primary repo so code-evidence reflects the PR's state. Takes highest priority in source resolution, overriding `source.yaml` and PR-derived URLs +- `--source-code-repo ...` — Source code repository/repositories for code evidence and requirements enrichment (space-delimited, one or more). Accepts remote URLs (https://, git@, ssh:// — each shallow-cloned to `.claude/docs//code-repo//`) or local paths (used directly). The first repo is treated as primary; additional repos are returned as `additional_repos` in the result. Passed to requirements, code-evidence, writing, and technical-review steps (mapped to their internal `--repo` flag). Without `--pr`, the entire repo is the subject matter; with `--pr`, the PR branch is checked out on the primary repo so code-evidence reflects the PR's state. Takes highest priority in source resolution, overriding `source.yaml` and PR-derived URLs - `--create-jira ` — Create a linked JIRA ticket in the specified project after the planning step completes. Runs the standalone `docs-workflow-create-jira` workflow (use `--workflow workflow-create-jira`). Requires `JIRA_API_TOKEN` to be set - `--create-merge-request` — Create a branch, commit, push, and open a merge request or pull request after reviews complete. Activates the `create-merge-request` workflow step (guarded by `when: create_merge_request`). Off by default @@ -67,9 +67,12 @@ When displaying available options to the user (e.g., on skill load or when askin # Custom workflow YAML /docs-orchestrator PROJ-123 --workflow quick -# Code-evidence workflow — requires a source repo +# Code-evidence workflow — auto-discovers repo from JIRA, or pass explicitly +/docs-orchestrator PROJ-123 --workflow code-evidence + +# Code-evidence workflow — explicit repo (overrides auto-discovery) /docs-orchestrator PROJ-123 \ - --workflow workflow-code-evidence \ + --workflow code-evidence \ --source-code-repo https://github.com/org/operator ``` @@ -159,9 +162,13 @@ Read the YAML file and extract the ordered step list. Each step has: `name`, `sk If the YAML includes a top-level `workflow.requires` list, check each condition **before evaluating steps or running anything**: -- `has_source_repo` → a source repo must be resolvable. The pre-flight resolution script tries CLI args, `source.yaml`, PR-derived, and JIRA ticket discovery (git links and auto-discovered PRs) in priority order. If **none** of these yield a source repo, **STOP** immediately with: `"This workflow requires a source code repository. No repo could be discovered from the JIRA ticket. Pass --source-code-repo or --pr ."` +- `has_source_repo` → a source repo must be resolvable. The pre-flight resolution script tries all sources in priority order: CLI `--source-code-repo`, `source.yaml`, `--pr`-derived, and JIRA ticket discovery (git links and auto-discovered PRs). If **none** yield a source repo, **STOP** immediately with: `"This workflow requires a source code repository. No repo could be discovered from the JIRA ticket. Options: (1) re-run with --source-code-repo , (2) re-run with --pr , (3) create .claude/docs//source.yaml with a repo: field, or (4) link PRs to the JIRA ticket and re-run."` + +Unlike `when` (which makes individual steps conditional), `requires` is a workflow-level precondition — the entire workflow fails if a required condition is not met. This prevents users from running a code-evidence workflow without a repo and only discovering the problem after requirements and planning have already completed. -Unlike `when` (which makes individual steps conditional), `requires` is a workflow-level precondition — the entire workflow fails if a required condition is not met. This prevents users from running a code-evidence-heavy workflow without a repo and only discovering the problem after requirements and planning have already completed. +The `has_source_repo` precondition supports two modes: +- **Explicit:** User passes `--source-code-repo` → guaranteed grounding against the specified repo +- **Auto-discovered:** User passes only a JIRA ticket → pre-flight discovers the repo from JIRA git links and linked PRs ### 4. Evaluate `when` conditions @@ -363,6 +370,7 @@ Build the args string for the step skill. The orchestrator maps its user-facing - `scope-req-audit`: `--repo [--grounded-threshold ] [--absent-threshold ]` - `code-evidence`: `--repo [--scope-include ] [--scope-exclude ] [--reindex]` — scope globs come from `source.yaml` or `options.source.scope` in the progress file - `writing`: `--format [--draft] [--repo ] [--repo-path ]` + - `technical-review`: `[--repo ]` - `style-review`: `--format ` - `create-merge-request`: `[--draft] [--repo-path ]` @@ -508,13 +516,5 @@ Same as new session. The progress file shows which steps completed and which fai ### Requirements-analyst agent: repo-aware analysis -When `--source-code-repo` is passed to the requirements step, the `requirements-analyst` agent should use the repo to enrich its analysis. This is **not yet implemented** — the requirements step currently accepts `--source-code-repo` but the agent does not act on it. Future work: - -- Scan the repo's `README.md`, `CHANGELOG.md`, and `docs/` directory for existing documentation -- Note what documentation already exists and what gaps remain (feeds directly into the planning step's gap analysis) -- Extract project metadata: language, build system, major dependencies, directory structure -- Identify existing code examples, tutorials, or quickstart guides that the writer could reference or update rather than recreate -- If no `--pr` was provided, use the repo structure itself to identify the key components and features that need documentation - -This work requires changes to the `requirements-analyst` agent definition (`agents/requirements-analyst.md`), not just the step skill. +When `--repo` is passed to the requirements step, the `requirements-analyst` agent uses the repo to enrich its analysis: verifying features exist in code, identifying existing documentation, extracting project metadata, and noting code references for downstream steps. See `agents/requirements-analyst.md` step 2 (Source repo enrichment). diff --git a/plugins/docs-tools/skills/docs-orchestrator/defaults/docs-workflow-code-evidence.yaml b/plugins/docs-tools/skills/docs-orchestrator/defaults/docs-workflow-code-evidence.yaml index 92011ac4..c6954aae 100644 --- a/plugins/docs-tools/skills/docs-orchestrator/defaults/docs-workflow-code-evidence.yaml +++ b/plugins/docs-tools/skills/docs-orchestrator/defaults/docs-workflow-code-evidence.yaml @@ -1,6 +1,6 @@ workflow: name: docs-workflow - description: Code-evidence-grounded documentation workflow. Requires a source code repository. Adds scope-req-audit and code-evidence steps to classify requirements by implementation status and ground documentation in actual source code. + description: Code-evidence-grounded documentation workflow. Adds scope-req-audit and code-evidence steps to classify requirements by implementation status and ground documentation in actual source code. Source repo is resolved via explicit --source-code-repo, JIRA auto-discovery, or PR-derived URLs. Fails if no repo can be resolved. requires: - has_source_repo diff --git a/plugins/docs-tools/skills/docs-orchestrator/schema/step-result-schema.md b/plugins/docs-tools/skills/docs-orchestrator/schema/step-result-schema.md index b5aea86d..6aeaf2b1 100644 --- a/plugins/docs-tools/skills/docs-orchestrator/schema/step-result-schema.md +++ b/plugins/docs-tools/skills/docs-orchestrator/schema/step-result-schema.md @@ -121,7 +121,8 @@ All sidecars share these fields: "minor": 3, "sme": 2 }, - "iteration": 1 + "iteration": 1, + "code_grounded": true } ``` @@ -134,6 +135,7 @@ All sidecars share these fields: | `severity_counts.minor` | integer | Minor issues found | Orchestrator | | `severity_counts.sme` | integer | Issues requiring SME verification | Orchestrator | | `iteration` | integer | Which iteration of review this represents (1-based) | Orchestrator | +| `code_grounded` | boolean | Whether the code-grounded pre-scan ran (source repo was available and `grounded_review.py` succeeded) | Informational | ### style-review diff --git a/plugins/docs-tools/skills/docs-workflow-code-evidence/SKILL.md b/plugins/docs-tools/skills/docs-workflow-code-evidence/SKILL.md index 99c78d55..af57f22b 100644 --- a/plugins/docs-tools/skills/docs-workflow-code-evidence/SKILL.md +++ b/plugins/docs-tools/skills/docs-workflow-code-evidence/SKILL.md @@ -42,6 +42,7 @@ The writer typically works from the **documentation repository**, not the code r ```text /code-evidence/evidence.json +/code-evidence/api-surface.json /code-evidence/summary.md ``` @@ -91,7 +92,7 @@ Determine the source directories for the filtered pass (Pass 1). The goal is to Store the detected source paths and any exclude patterns for use in step 5. -### 4. Extract topics from the plan +### 4. Extract topics from the plan (and seed from scope-req-audit) Read `$PLAN_FILE` and extract the key topics to search for. Look for: @@ -104,6 +105,8 @@ Produce a list of 5-15 natural language search queries that cover the plan's sco Additionally, derive 1-2 **pattern-level queries** that ask how the codebase implements the general pattern, not the specific component. For example, if the plan is about adding Prometheus monitoring for a new component, add a query like "how do components implement monitoring and alerting" alongside the component-specific queries. These pattern queries help the unfiltered pass surface analogous implementations from other parts of the codebase, giving the writer examples to reference. +**Seed from scope-req-audit key_files (when available):** Check if `/scope-req-audit/evidence-status.json` exists. If it does, read it and extract the `key_files` from each **grounded** requirement. These are specific source files where the scope-req-audit found strong evidence. For each key_file, add a targeted query scoped to that file's directory (e.g., if `key_files` contains `pkg/api/v1/types.go`, add a query like "types and interfaces in api v1" with `filter_paths: ["pkg/api/v1"]`). This produces higher-quality results than relying solely on plan-derived queries because these files are already confirmed as relevant. + ### 5. Run two-pass evidence retrieval for each topic For each search query, run code-finder's evidence retrieval **twice** to capture both accurate source code and narrative context. Use **batch mode** to run all queries in a single process invocation — this pays the import and index-load cost once instead of per-query. @@ -196,9 +199,42 @@ Collect all results into a combined evidence structure: The `scope` field records what was searched so downstream steps know the boundaries. If no scope was provided, `include` and `exclude` are `null` and `source_dirs_used` lists the auto-detected directories. -Write this to `$EVIDENCE_FILE`. +Write the search results to `$EVIDENCE_FILE`. + +### 6. Extract API surface + +Run `api_surface.py` against the source directories to extract the public API (classes, functions, methods with signatures and line ranges). This gives the writer exact names and types rather than relying solely on search-ranked snippets. + +```bash +python3 ${CLAUDE_PLUGIN_ROOT}/skills/code-evidence/scripts/api_surface.py \ + --target "$REPO_PATH" > "${OUTPUT_DIR}/api-surface.json" +``` + +Or with uv fallback if code-finder is not installed. + +If `--scope-include` was provided, pass `--target` for each source directory instead of the repo root to limit the extraction scope. + +After extraction, read `${OUTPUT_DIR}/api-surface.json` and append an `api_surface` field to `$EVIDENCE_FILE`: + +```json +{ + "ticket": "", + "repo_path": "", + "scope": { ... }, + "topics": [ ... ], + "api_surface": { + "total_entities": 142, + "files_processed": 38, + "files_with_api": 24, + "entities": { ... } + }, + "index_info": { ... } +} +``` + +If `api_surface.py` fails, log a warning and continue — the `api_surface` field is omitted from `evidence.json`. The search-based evidence is still valid. -### 6. Generate evidence summary +### 7. Generate evidence summary Create a human-readable markdown summary at `$SUMMARY_FILE` with: @@ -209,6 +245,7 @@ Create a human-readable markdown summary at `$SUMMARY_FILE` with: **Repository:** **Topics searched:** **Total code snippets found:** (source: , context: ) +**API surface:** entities across files (omit if api_surface extraction failed) ## Topics @@ -230,7 +267,7 @@ Create a human-readable markdown summary at `$SUMMARY_FILE` with: This summary is for human review. The JSON file is what downstream steps consume. -### 7. Write step-result.json +### 8. Write step-result.json After generating the evidence, read `$EVIDENCE_FILE` to count topics and total snippets. Write the sidecar to `${OUTPUT_DIR}/step-result.json`: @@ -249,7 +286,7 @@ After generating the evidence, read `$EVIDENCE_FILE` to count topics and total s - `topic_count`: length of the `topics` array in `evidence.json` - `snippet_count`: sum of all `source_results` and `context_results` entries across all topics -### 8. Verify output +### 9. Verify output After completion, verify that `$EVIDENCE_FILE`, `$SUMMARY_FILE`, and `${OUTPUT_DIR}/step-result.json` exist. diff --git a/plugins/docs-tools/skills/docs-workflow-planning/SKILL.md b/plugins/docs-tools/skills/docs-workflow-planning/SKILL.md index c32443e7..9956e9f5 100644 --- a/plugins/docs-tools/skills/docs-workflow-planning/SKILL.md +++ b/plugins/docs-tools/skills/docs-workflow-planning/SKILL.md @@ -1,6 +1,6 @@ --- name: docs-workflow-planning -description: Create a documentation plan from requirements analysis output. Dispatches the docs-tools:docs-planner agent. Invoked by the orchestrator. +description: Create a documentation plan from requirements analysis output. Dispatches the docs-planner agent. Invoked by the orchestrator. argument-hint: --base-path allowed-tools: Read, Write, Glob, Grep, Edit, Bash, Skill, Agent --- @@ -43,10 +43,10 @@ mkdir -p "$OUTPUT_DIR" ### 2. Dispatch agent -**You MUST use the Agent tool** to invoke the `docs-tools:docs-planner` subagent. Do NOT read the agent's markdown file or attempt to perform the agent's work yourself — the agent has a specialized system prompt and must run as an isolated subagent. +**You MUST use the Agent tool** to invoke the `docs-planner` subagent. Do NOT read the agent's markdown file or attempt to perform the agent's work yourself — the agent has a specialized system prompt and must run as an isolated subagent. **Agent tool parameters:** -- `subagent_type`: `docs-tools:docs-planner` +- `subagent_type`: `docs-planner` - `description`: `Create documentation plan for ` **Prompt** (pass this as the `prompt` parameter to the Agent tool): @@ -68,8 +68,8 @@ mkdir -p "$OUTPUT_DIR" > Code evidence status is available at `/scope-req-audit/evidence-status.json`. Read it and use the evidence status when making scoping decisions: > -> - **Grounded** requirements: create full module specifications as normal -> - **Partial** requirements: create module specifications but note what evidence was found and what is missing — flag for SME review +> - **Grounded** requirements: create full module specifications as normal. Use the `key_files` for each grounded requirement as content source references in the module spec — these are the actual source files where the feature is implemented. The code-evidence step will use them for targeted retrieval +> - **Partial** requirements: create module specifications but note what evidence was found and what is missing — flag for SME review. Include available `key_files` as partial source references > - **Absent** requirements: do NOT create module specifications. Instead, list them in a "Deferred requirements (no code evidence)" section at the end of the plan, including the recommended action from the evidence status. These may be unimplemented features — documenting them risks fabrication > > If `discovered_repos` lists repos that weren't indexed, note them in the deferred section as potential sources for resolving absent requirements. diff --git a/plugins/docs-tools/skills/docs-workflow-requirements/SKILL.md b/plugins/docs-tools/skills/docs-workflow-requirements/SKILL.md index a6aca655..29a04cae 100644 --- a/plugins/docs-tools/skills/docs-workflow-requirements/SKILL.md +++ b/plugins/docs-tools/skills/docs-workflow-requirements/SKILL.md @@ -11,8 +11,8 @@ Step skill for the docs-orchestrator pipeline. Follows the step skill contract: This skill uses a two-pass architecture to analyze documentation requirements: -1. **Discovery pass** — A single `docs-tools:requirements-discoverer` agent enumerates requirements from JIRA, PRs, and specs, producing a JSON skeleton -2. **Deep analysis pass** — One `docs-tools:requirements-analyst` agent per requirement, all running in parallel, each performing thorough analysis with a clean context window +1. **Discovery pass** — A single `requirements-discoverer` agent enumerates requirements from JIRA, PRs, and specs, producing a JSON skeleton +2. **Deep analysis pass** — One `requirements-analyst` agent per requirement, all running in parallel, each performing thorough analysis with a clean context window 3. **Merge** — The orchestrator assembles per-requirement JSON results into the standard `requirements.md` format ## Arguments @@ -45,11 +45,11 @@ mkdir -p "$OUTPUT_DIR" ### 2. Pass 1 — Discovery -Dispatch one `docs-tools:requirements-discoverer` agent to enumerate requirements from all sources. +Dispatch one `requirements-discoverer` agent to enumerate requirements from all sources. ``` Agent: - subagent_type: docs-tools:requirements-discoverer + subagent_type: requirements-discoverer description: "Discover requirements for " prompt: | Discover documentation requirements for JIRA ticket . @@ -83,13 +83,13 @@ If `requirements` is empty, write a minimal `requirements.md` noting that no req ### 4. Pass 2 — Fan out deep analysis -For each requirement in the discovery skeleton, dispatch one `docs-tools:requirements-analyst` agent. Launch ALL agents in a **single message** (parallel execution). +For each requirement in the discovery skeleton, dispatch one `requirements-analyst` agent. Launch ALL agents in a **single message** (parallel execution). For each requirement, use: ``` Agent: - subagent_type: docs-tools:requirements-analyst + subagent_type: requirements-analyst description: "Analyze REQ-NNN: " prompt: | Perform deep analysis of this single documentation requirement. @@ -102,12 +102,16 @@ Agent: RELEASE: <release from discovery output> + [If --repo was provided: "REPO_PATH: <repo_path>"] + Fetch detailed content from each source, perform web search expansion, and produce complete documentation requirements with acceptance criteria. Print your JSON result to stdout. ``` +The `REPO_PATH` line is conditional — include it only if `--repo` was passed to this step. When present, the analyst verifies the requirement against the codebase, identifies existing docs, and extracts code references. + **Important:** All Agent calls MUST be in a single message so they run in parallel. ### 5. Merge results diff --git a/plugins/docs-tools/skills/docs-workflow-scope-req-audit/SKILL.md b/plugins/docs-tools/skills/docs-workflow-scope-req-audit/SKILL.md index 77ce135e..8d349d3a 100644 --- a/plugins/docs-tools/skills/docs-workflow-scope-req-audit/SKILL.md +++ b/plugins/docs-tools/skills/docs-workflow-scope-req-audit/SKILL.md @@ -107,7 +107,7 @@ For each requirement, extract: If no requirements are found matching this pattern, STOP with error: "No requirements found in requirements.md. Expected REQ-NNN pattern." -### 4. Pre-flight: warm the code-finder index +### 4. Pre-flight: warm the code-finder index and extract API surface Warm the code-finder index before fanning out. This ensures the index is built once (expensive) and all subagents reuse the cached index at `{repo}/.vibe2doc/index.db`. Run one throwaway query: @@ -117,6 +117,15 @@ python3 ${CLAUDE_PLUGIN_ROOT}/skills/code-evidence/scripts/find_evidence.py --re Discard the output. If this fails, STOP with error including the stderr output — the index cannot be built. +Extract the API surface for use as supplementary evidence during classification: + +```bash +python3 ${CLAUDE_PLUGIN_ROOT}/skills/code-evidence/scripts/api_surface.py \ + --target "$REPO_PATH" > "${OUTPUT_DIR}/api-surface.json" +``` + +Set `API_SURFACE_FILE="${OUTPUT_DIR}/api-surface.json"` if the command succeeds. If it fails, log a warning and set `API_SURFACE_FILE=""` — classifiers will rely on NL search alone. + ### 5. Fan out: dispatch one agent per requirement For each requirement extracted in step 3, dispatch one Agent call. Launch ALL requirement agents in a **single message** (parallel execution). @@ -125,7 +134,7 @@ For each requirement, use: ``` Agent: - subagent_type: docs-tools:evidence-classifier + subagent_type: evidence-classifier model: haiku description: "Classify REQ-NNN: <title truncated to 40 chars>" prompt: | @@ -140,6 +149,7 @@ Agent: - REPO_PATH: <absolute repo path> - GROUNDED_THRESHOLD: <threshold> - ABSENT_THRESHOLD: <threshold> + - API_SURFACE_FILE: <API_SURFACE_FILE or omit this line if empty> DISCOVERED_REPOS: <JSON array of discovered_repos from step 2, or [] if none> diff --git a/plugins/docs-tools/skills/docs-workflow-tech-review/SKILL.md b/plugins/docs-tools/skills/docs-workflow-tech-review/SKILL.md index 8be4c5a1..a9dd64b6 100644 --- a/plugins/docs-tools/skills/docs-workflow-tech-review/SKILL.md +++ b/plugins/docs-tools/skills/docs-workflow-tech-review/SKILL.md @@ -1,13 +1,15 @@ --- name: docs-workflow-tech-review -description: Technical accuracy review of documentation drafts. Dispatches the docs-tools:technical-reviewer agent. Output includes confidence rating (HIGH/MEDIUM/LOW) Iteration logic is owned by the orchestrator, not this skill. -argument-hint: <ticket> --base-path <path> +description: Technical accuracy review of documentation drafts with optional code-grounded validation. When a source repo is available, runs grounded_review and api_surface against the code to validate documentation claims before dispatching the technical-reviewer agent. Iteration logic is owned by the orchestrator, not this skill. +argument-hint: <ticket> --base-path <path> [--repo <path>] allowed-tools: Read, Write, Glob, Grep, Edit, Bash, Skill, Agent, WebSearch, WebFetch --- # Technical Review Step -Step skill for the docs-orchestrator pipeline. Follows the step skill contract: **parse args → dispatch agent → write output**. +Step skill for the docs-orchestrator pipeline. Follows the step skill contract: **parse args → [run code-grounded pre-scan] → dispatch agent → write output**. + +When a source code repository is available (`--repo`), this step runs the same code-grounded validation pipeline used by `docs-review-technical` (Agent 2): `grounded_review.py` validates documentation claims against source code, and `api_surface.py` extracts the public API surface. These results are passed to the `technical-reviewer` agent as pre-computed evidence, giving the reviewer concrete code verdicts alongside its engineering judgment. This skill performs a single review pass. The iteration loop (re-running with fixes between passes) is driven by the orchestrator skill, not this step skill. @@ -15,33 +17,42 @@ This skill performs a single review pass. The iteration loop (re-running with fi - `$1` — JIRA ticket ID (required) - `--base-path <path>` — Base output path (e.g., `.claude/docs/proj-123`) +- `--repo <path>` — Path to the source code repository (optional, provided by orchestrator when available) ## Input ``` <base-path>/writing/ +<repo-path>/ (optional — source code repo for code-grounded validation) ``` ## Output ``` <base-path>/technical-review/review.md +<base-path>/technical-review/step-result.json +<base-path>/technical-review/grounded-review.json (when --repo provided) +<base-path>/technical-review/api-surface.json (when --repo provided) ``` ## Execution ### 1. Parse arguments -Extract the ticket ID and `--base-path` from the args string. +Extract the ticket ID, `--base-path`, and optional `--repo` from the args string. Set the paths: ```bash OUTPUT_DIR="${BASE_PATH}/technical-review" OUTPUT_FILE="${OUTPUT_DIR}/review.md" +GROUNDED_FILE="${OUTPUT_DIR}/grounded-review.json" +API_SURFACE_FILE="${OUTPUT_DIR}/api-surface.json" mkdir -p "$OUTPUT_DIR" ``` +Set `HAS_REPO=true` if `--repo` was provided and the path exists as a directory. Otherwise `HAS_REPO=false`. + ### 2. Determine source files Read the writing step's sidecar at `${BASE_PATH}/writing/step-result.json` to determine the writing mode and file list. @@ -64,12 +75,90 @@ Set `DRAFTS_DIR="${BASE_PATH}/writing"` and build the block as: Source drafts location: `<DRAFTS_DIR>/` ``` -### 3. Dispatch agent +### 3. Code-grounded pre-scan (conditional) + +**Skip this step entirely if `HAS_REPO=false`.** Proceed directly to step 3. + +When a source repo is available, run the code-grounded validation pipeline before dispatching the reviewer agent. This produces structured evidence the agent uses alongside its own analysis. + +#### 2a. Collect draft file paths + +Read the writing manifest at `<DRAFTS_DIR>/_index.md`. Extract the absolute file paths from the table rows. If the manifest doesn't exist, fall back to globbing `<DRAFTS_DIR>/` for `.adoc` and `.md` files recursively. + +Build a JSON drafts file for batch mode: + +```bash +# Build drafts batch file from the collected paths +cat > "${OUTPUT_DIR}/drafts-batch.json" << 'EOF' +[ + {"draft": "/path/to/file1.adoc"}, + {"draft": "/path/to/file2.adoc"} +] +EOF +``` + +#### 2b. Run grounded review + +Check if code-finder is installed: + +```bash +python3 -c "import claude_context" 2>/dev/null && echo "INSTALLED" || echo "NOT_INSTALLED" +``` + +If **INSTALLED**, run directly: + +```bash +python3 ${CLAUDE_PLUGIN_ROOT}/skills/code-evidence/scripts/grounded_review.py \ + --repo "$REPO_PATH" \ + --drafts-file "${OUTPUT_DIR}/drafts-batch.json" \ + --reindex > "$GROUNDED_FILE" +``` + +If **NOT_INSTALLED**, prefix with uv: + +```bash +uv run --with code-finder python3 ${CLAUDE_PLUGIN_ROOT}/skills/code-evidence/scripts/grounded_review.py \ + --repo "$REPO_PATH" \ + --drafts-file "${OUTPUT_DIR}/drafts-batch.json" \ + --reindex > "$GROUNDED_FILE" +``` + +If the command fails (non-zero exit), log a warning and continue without grounded review — set `HAS_GROUNDED=false`. Otherwise `HAS_GROUNDED=true`. -**You MUST use the Agent tool** to invoke the `docs-tools:technical-reviewer` subagent. Do NOT read the agent's markdown file or attempt to perform the agent's work yourself — the agent has a specialized system prompt and must run as an isolated subagent. +#### 2c. Run API surface extraction + +```bash +python3 ${CLAUDE_PLUGIN_ROOT}/skills/code-evidence/scripts/api_surface.py \ + --target "$REPO_PATH" > "$API_SURFACE_FILE" +``` + +Or with uv fallback if code-finder is not installed. + +If the command fails, log a warning and continue without API surface — set `HAS_API_SURFACE=false`. Otherwise `HAS_API_SURFACE=true`. + +#### 2d. Summarize code-grounded findings + +Read `$GROUNDED_FILE` and triage the results. For each claim verdict: + +- `unsupported` — flag as likely inaccurate. Note the evidence that contradicts the claim. +- `no_evidence_found` — note as unverifiable. The claim may reference something outside the repo scope. +- `partially_supported` — note what part is supported and what isn't. +- `supported` — no action needed. + +Read `$API_SURFACE_FILE` and note the total entity count and key classes/functions. This gives the reviewer a map of what exists in the code. + +Build a `CODE_EVIDENCE_SUMMARY` text block containing: +- Count of claims by verdict (supported, partially_supported, unsupported, no_evidence_found) +- List of unsupported and partially_supported claims with their evidence +- Top-level API surface summary (number of classes, functions, methods) +- List of any doc-referenced APIs not found in the API surface + +### 4. Dispatch agent + +**You MUST use the Agent tool** to invoke the `technical-reviewer` subagent. Do NOT read the agent's markdown file or attempt to perform the agent's work yourself — the agent has a specialized system prompt and must run as an isolated subagent. **Agent tool parameters:** -- `subagent_type`: `docs-tools:technical-reviewer` +- `subagent_type`: `technical-reviewer` - `description`: `Technical review of documentation for <TICKET>` **Prompt** (pass this as the `prompt` parameter to the Agent tool): @@ -81,7 +170,32 @@ Source drafts location: `<DRAFTS_DIR>/` > > The report must include an `Overall technical confidence: HIGH|MEDIUM|LOW` line. -### 4. Verify output +**[Include only if HAS_REPO=true]** Append: + +> Source code repository is available at `<REPO_PATH>`. You may read specific source files to verify technical claims in the documentation. + +**[Include only if HAS_GROUNDED=true]** Append: + +> ## Code-Grounded Review Evidence +> +> A code-grounded review has been run against the documentation drafts using the source repository. The review extracted claims from the documentation and validated each one against the source code. +> +> Full results: `<GROUNDED_FILE>` +> +> Summary of findings: +> <CODE_EVIDENCE_SUMMARY> +> +> **How to use this evidence:** +> - Claims with verdict `unsupported` are likely inaccurate — verify the evidence and flag as critical or significant issues +> - Claims with verdict `no_evidence_found` may reference features outside the repo scope — flag as SME verification needed unless you can confirm from other sources +> - Claims with verdict `partially_supported` need targeted review — identify what part is wrong +> - Claims with verdict `supported` have code backing — still apply your engineering judgment but these are lower risk + +**[Include only if HAS_API_SURFACE=true]** Append: + +> Cross-reference the API surface at `<API_SURFACE_FILE>` to check that documented class names, function signatures, and parameters match the actual code. + +### 5. Verify output After the agent completes, verify the review report exists at `<OUTPUT_FILE>`. @@ -89,7 +203,7 @@ The review report **must** include an `Overall technical confidence: HIGH|MEDIUM The report should also include a `Severity counts: critical=N significant=N minor=N sme=N` line. This enables the orchestrator to skip unnecessary iteration when only SME-verification items remain. -### 5. Write step-result.json +### 6. Write step-result.json Parse `<OUTPUT_FILE>` to extract the structured review metadata: @@ -111,8 +225,11 @@ Write the sidecar to `${BASE_PATH}/technical-review/step-result.json`: "minor": "<N>", "sme": "<N>" }, - "iteration": 1 + "iteration": 1, + "code_grounded": <true|false> } ``` The `iteration` field is `1` for the first review pass. If the orchestrator re-invokes this skill after a fix cycle, it passes the current iteration count — increment it for the sidecar. + +The `code_grounded` field records whether the code-grounded pre-scan ran (`HAS_GROUNDED`). This is informational — downstream consumers can use it to assess review thoroughness. diff --git a/plugins/docs-tools/skills/docs-workflow-writing/SKILL.md b/plugins/docs-tools/skills/docs-workflow-writing/SKILL.md index a7f799f5..d2739c8e 100644 --- a/plugins/docs-tools/skills/docs-workflow-writing/SKILL.md +++ b/plugins/docs-tools/skills/docs-workflow-writing/SKILL.md @@ -1,7 +1,7 @@ --- name: docs-workflow-writing -description: Write documentation from a documentation plan. Dispatches the docs-tools:docs-writer agent. Supports AsciiDoc (default) and MkDocs formats. Default placement is UPDATE-IN-PLACE; use --draft for staging area. Also supports fix mode for applying technical review corrections. -argument-hint: <ticket> --base-path <path> --format <adoc|mkdocs> [--draft] [--repo-path <path>] [--repo <path>] [--fix-from <review_path>] +description: Write documentation from a documentation plan. Dispatches the docs-writer agent. Supports AsciiDoc (default) and MkDocs formats. Default placement is UPDATE-IN-PLACE; use --draft for staging area. Also supports fix mode for applying technical review corrections. +argument-hint: <ticket> --base-path <path> --format <adoc|mkdocs> [--draft] [--repo <path>] [--repo-path <path>] [--fix-from <review_path>] allowed-tools: Read, Write, Glob, Grep, Edit, Bash, Skill, Agent --- @@ -23,31 +23,33 @@ Pass through the full args string. The script emits JSON on stdout: ```json { - "mode": "update-in-place | draft | fix", - "ticket": "PROJ-123", - "format": "adoc | mkdocs", - "input_file": "<base-path>/planning/plan.md", - "evidence_file": "<base-path>/code-evidence/evidence.json | null", - "has_evidence": true | false, - "output_dir": "<base-path>/writing", - "output_file": "<base-path>/writing/_index.md", - "docs_repo_path": "<path> | null", - "source_repo_path": "<path> | null", - "fix_from": "<path> | null", - "verify_output": true | false + "mode": "update-in-place | draft | fix", + "ticket": "PROJ-123", + "format": "adoc | mkdocs", + "input_file": "<base-path>/planning/plan.md", + "evidence_file": "<base-path>/code-evidence/evidence.json | null", + "has_evidence": true | false, + "evidence_status": "<base-path>/scope-req-audit/evidence-status.json | null", + "has_evidence_status": true | false, + "output_dir": "<base-path>/writing", + "output_file": "<base-path>/writing/_index.md", + "docs_repo_path": "<path> | null", + "source_repo_path": "<path> | null", + "fix_from": "<path> | null", + "verify_output": true | false } ``` If the script exits non-zero, stop and report the error from stderr. -### 2. Dispatch the docs-tools:docs-writer agent +### 2. Dispatch the docs-writer agent -**You MUST use the Agent tool** to invoke the `docs-tools:docs-writer` subagent. Do NOT read the agent's markdown file or attempt to perform the agent's work yourself — the agent has a specialized system prompt and must run as an isolated subagent. +**You MUST use the Agent tool** to invoke the `docs-writer` subagent. Do NOT read the agent's markdown file or attempt to perform the agent's work yourself — the agent has a specialized system prompt and must run as an isolated subagent. Select the prompt based on `mode` and `format` from the JSON output. In every prompt below, substitute the `<TICKET>`, `<INPUT_FILE>`, `<OUTPUT_FILE>`, `<OUTPUT_DIR>`, `<DOCS_REPO_PATH>`, `<FIX_FROM>`, and `<EVIDENCE_FILE>` placeholders with the corresponding values from the script's JSON. **Agent tool parameters for all modes:** -- `subagent_type`: `docs-tools:docs-writer` +- `subagent_type`: `docs-writer` - `description`: use the value described under each mode below --- @@ -64,6 +66,13 @@ Select the prompt based on `mode` and `format` from the JSON output. In every pr > > **[Include only if HAS_EVIDENCE=true]** Code evidence is available at `<EVIDENCE_FILE>`. Read it and use the `source_results` for accurate function signatures, parameter types, and code examples. Use `context_results` for narrative context, installation steps, and architectural patterns. Prefer evidence over assumptions — if the evidence contradicts the plan, follow the evidence. > +> **[Include only if HAS_EVIDENCE_STATUS=true]** Evidence classifications are available at `<EVIDENCE_STATUS>`. Read it and apply these rules per requirement: +> - **Grounded** requirements have strong code evidence — write with full technical detail +> - **Partial** requirements have weak or ambiguous evidence — write the content but mark unverified technical details (API names, parameter values, configuration keys) with `[NEEDS VERIFICATION]` +> - **Absent** requirements have no code evidence — if they appear in the plan, skip them and note the omission in the manifest. Do NOT fabricate API signatures, SDK imports, CRD schemas, or configuration examples for absent requirements +> +> **[Include only if SOURCE_REPO is not null]** Source code repository is available at `<SOURCE_REPO>`. You may read specific source files for additional detail when the code evidence does not contain sufficient information for a section. Use this to verify function signatures, check parameter types, or find code examples — do not browse the entire repo. +> > **IMPORTANT**: Write COMPLETE .adoc files, not summaries or outlines. > > **Placement mode: UPDATE-IN-PLACE** @@ -95,6 +104,13 @@ Select the prompt based on `mode` and `format` from the JSON output. In every pr > > **[Include only if HAS_EVIDENCE=true]** Code evidence is available at `<EVIDENCE_FILE>`. Read it and use the `source_results` for accurate function signatures, parameter types, and code examples. Use `context_results` for narrative context, installation steps, and architectural patterns. Prefer evidence over assumptions — if the evidence contradicts the plan, follow the evidence. > +> **[Include only if HAS_EVIDENCE_STATUS=true]** Evidence classifications are available at `<EVIDENCE_STATUS>`. Read it and apply these rules per requirement: +> - **Grounded** requirements have strong code evidence — write with full technical detail +> - **Partial** requirements have weak or ambiguous evidence — write the content but mark unverified technical details (API names, parameter values, configuration keys) with `[NEEDS VERIFICATION]` +> - **Absent** requirements have no code evidence — if they appear in the plan, skip them and note the omission in the manifest. Do NOT fabricate API signatures, SDK imports, CRD schemas, or configuration examples for absent requirements +> +> **[Include only if SOURCE_REPO is not null]** Source code repository is available at `<SOURCE_REPO>`. You may read specific source files for additional detail when the code evidence does not contain sufficient information for a section. Use this to verify function signatures, check parameter types, or find code examples — do not browse the entire repo. +> > **IMPORTANT**: Write COMPLETE .md files with YAML frontmatter (title, description). Use Material for MkDocs conventions: admonitions, content tabs, code blocks with titles, heading hierarchy starting at `# h1`. > > **Placement mode: UPDATE-IN-PLACE** @@ -126,6 +142,13 @@ Select the prompt based on `mode` and `format` from the JSON output. In every pr > > **[Include only if HAS_EVIDENCE=true]** Code evidence is available at `<EVIDENCE_FILE>`. Read it and use the `source_results` for accurate function signatures, parameter types, and code examples. Use `context_results` for narrative context, installation steps, and architectural patterns. Prefer evidence over assumptions — if the evidence contradicts the plan, follow the evidence. > +> **[Include only if HAS_EVIDENCE_STATUS=true]** Evidence classifications are available at `<EVIDENCE_STATUS>`. Read it and apply these rules per requirement: +> - **Grounded** requirements have strong code evidence — write with full technical detail +> - **Partial** requirements have weak or ambiguous evidence — write the content but mark unverified technical details (API names, parameter values, configuration keys) with `[NEEDS VERIFICATION]` +> - **Absent** requirements have no code evidence — if they appear in the plan, skip them and note the omission in the manifest. Do NOT fabricate API signatures, SDK imports, CRD schemas, or configuration examples for absent requirements +> +> **[Include only if SOURCE_REPO is not null]** Source code repository is available at `<SOURCE_REPO>`. You may read specific source files for additional detail when the code evidence does not contain sufficient information for a section. Use this to verify function signatures, check parameter types, or find code examples — do not browse the entire repo. +> > **IMPORTANT**: Write COMPLETE .adoc files, not summaries or outlines. > > **Placement mode: DRAFT (staging area)** @@ -161,6 +184,13 @@ Select the prompt based on `mode` and `format` from the JSON output. In every pr > > **[Include only if HAS_EVIDENCE=true]** Code evidence is available at `<EVIDENCE_FILE>`. Read it and use the `source_results` for accurate function signatures, parameter types, and code examples. Use `context_results` for narrative context, installation steps, and architectural patterns. Prefer evidence over assumptions — if the evidence contradicts the plan, follow the evidence. > +> **[Include only if HAS_EVIDENCE_STATUS=true]** Evidence classifications are available at `<EVIDENCE_STATUS>`. Read it and apply these rules per requirement: +> - **Grounded** requirements have strong code evidence — write with full technical detail +> - **Partial** requirements have weak or ambiguous evidence — write the content but mark unverified technical details (API names, parameter values, configuration keys) with `[NEEDS VERIFICATION]` +> - **Absent** requirements have no code evidence — if they appear in the plan, skip them and note the omission in the manifest. Do NOT fabricate API signatures, SDK imports, CRD schemas, or configuration examples for absent requirements +> +> **[Include only if SOURCE_REPO is not null]** Source code repository is available at `<SOURCE_REPO>`. You may read specific source files for additional detail when the code evidence does not contain sufficient information for a section. Use this to verify function signatures, check parameter types, or find code examples — do not browse the entire repo. +> > **IMPORTANT**: Write COMPLETE .md files with YAML frontmatter (title, description). Use Material for MkDocs conventions: admonitions, content tabs, code blocks with titles, heading hierarchy starting at `# h1`. > > **Placement mode: DRAFT (staging area)** diff --git a/plugins/docs-tools/skills/docs-workflow-writing/scripts/build_writing_args.sh b/plugins/docs-tools/skills/docs-workflow-writing/scripts/build_writing_args.sh index 00af9a8a..a0cd289c 100755 --- a/plugins/docs-tools/skills/docs-workflow-writing/scripts/build_writing_args.sh +++ b/plugins/docs-tools/skills/docs-workflow-writing/scripts/build_writing_args.sh @@ -8,8 +8,8 @@ # # Usage: # build_writing_args.sh <ticket> --base-path <path> \ -# [--format adoc|mkdocs] [--draft] [--repo-path <path>] \ -# [--repo <path>] [--fix-from <path>] +# [--format adoc|mkdocs] [--draft] [--repo <path>] \ +# [--repo-path <path>] [--fix-from <path>] # # Requires: jq @@ -49,14 +49,14 @@ while [[ $# -gt 0 ]]; do DRAFT=true shift ;; - --repo-path) + --repo) require_arg "$1" "${2:-}" - DOCS_REPO_PATH="$2" + SOURCE_REPO="$2" shift 2 ;; - --repo) + --repo-path) require_arg "$1" "${2:-}" - SOURCE_REPO="$2" + DOCS_REPO_PATH="$2" shift 2 ;; --fix-from) @@ -110,6 +110,21 @@ else EVIDENCE_FILE="" fi +# --- Check for evidence status (scope-req-audit classifications) --- +EVIDENCE_STATUS_FILE="${BASE_PATH}/scope-req-audit/evidence-status.json" +if [[ -f "$EVIDENCE_STATUS_FILE" ]]; then + HAS_EVIDENCE_STATUS=true +else + HAS_EVIDENCE_STATUS=false + EVIDENCE_STATUS_FILE="" +fi + +# --- Validate source repo if provided --- +if [[ -n "$SOURCE_REPO" && ! -d "$SOURCE_REPO" ]]; then + echo "WARNING: Source repo path not found: ${SOURCE_REPO}. Ignoring --repo." >&2 + SOURCE_REPO="" +fi + # --- Determine mode --- MODE="" if [[ -n "$FIX_FROM" ]]; then @@ -159,12 +174,14 @@ jq -n \ --arg input_file "$INPUT_FILE" \ --arg evidence_file "$EVIDENCE_FILE" \ --argjson has_evidence "$HAS_EVIDENCE" \ - --arg output_dir "$OUTPUT_DIR" \ - --arg output_file "$OUTPUT_FILE" \ - --arg docs_repo_path "$DOCS_REPO_PATH" \ - --arg source_repo_path "$SOURCE_REPO" \ - --arg fix_from "$FIX_FROM" \ - --argjson verify "$VERIFY" \ + --arg evidence_status "$EVIDENCE_STATUS_FILE" \ + --argjson has_evidence_status "$HAS_EVIDENCE_STATUS" \ + --arg output_dir "$OUTPUT_DIR" \ + --arg output_file "$OUTPUT_FILE" \ + --arg docs_repo_path "$DOCS_REPO_PATH" \ + --arg source_repo_path "$SOURCE_REPO" \ + --arg fix_from "$FIX_FROM" \ + --argjson verify "$VERIFY" \ '{ mode: $mode, ticket: $ticket, @@ -172,6 +189,8 @@ jq -n \ input_file: $input_file, evidence_file: (if $evidence_file == "" then null else $evidence_file end), has_evidence: $has_evidence, + evidence_status: (if $evidence_status == "" then null else $evidence_status end), + has_evidence_status: $has_evidence_status, output_dir: $output_dir, output_file: $output_file, docs_repo_path: (if $docs_repo_path == "" then null else $docs_repo_path end), diff --git a/scripts/test-upstream-plugin.sh b/scripts/test-upstream-plugin.sh index d4255ded..3e4dec0d 100755 --- a/scripts/test-upstream-plugin.sh +++ b/scripts/test-upstream-plugin.sh @@ -82,8 +82,8 @@ reset=false while [[ $# -gt 0 ]]; do case "$1" in - --branch) branch="$2"; shift 2 ;; - --plugin) plugin="$2"; shift 2 ;; + --branch) [[ -n "${2:-}" && "${2:-}" != -* ]] || { echo "ERROR: --branch requires a value"; usage; }; branch="$2"; shift 2 ;; + --plugin) [[ -n "${2:-}" && "${2:-}" != -* ]] || { echo "ERROR: --plugin requires a value"; usage; }; plugin="$2"; shift 2 ;; --reset) reset=true; shift ;; -h|--help) usage ;; *) echo "Unknown option: $1"; usage ;; @@ -119,10 +119,16 @@ if [[ "$reset" == true ]]; then exit 0 fi +if [[ ! -d "$MARKETPLACE_DIR/.git" ]]; then + echo "Error: marketplace directory is not a git repository: $MARKETPLACE_DIR" + echo "Run the plugin installer first to create the marketplace clone." + exit 1 +fi + if [[ -z "$branch" ]]; then - branch=$(git branch --show-current 2>/dev/null || true) + branch=$(git -C "$MARKETPLACE_DIR" branch --show-current 2>/dev/null || true) if [[ -z "$branch" ]]; then - echo "Error: --branch <branch> is required (could not detect current branch)." + echo "Error: --branch <branch> is required (could not detect current branch in $MARKETPLACE_DIR)." echo usage fi