diff --git a/.claude/CLAUDE.md b/.claude/CLAUDE.md index e2fcd652e..5df7811a1 100644 --- a/.claude/CLAUDE.md +++ b/.claude/CLAUDE.md @@ -7,7 +7,8 @@ Rust library for NP-hard problem reductions. Implements computational problems w - [issue-to-pr](skills/issue-to-pr/SKILL.md) -- Convert a GitHub issue into a PR with an implementation plan. Validates the issue against the appropriate checklist, then dispatches to `add-model` or `add-rule`. - [add-model](skills/add-model/SKILL.md) -- Add a new problem model. Can be used standalone (brainstorms with user) or called from `issue-to-pr`. - [add-rule](skills/add-rule/SKILL.md) -- Add a new reduction rule. Can be used standalone (brainstorms with user) or called from `issue-to-pr`. -- [review-implementation](skills/review-implementation/SKILL.md) -- Review a model or rule implementation for completeness. Auto-detects type from changed files. Called automatically at the end of `add-model`/`add-rule`, or standalone via `/review-implementation`. +- [review-implementation](skills/review-implementation/SKILL.md) -- Review implementation completeness by dispatching parallel subagents (structural + quality) with fresh context. Auto-detects new models/rules from git diff. Called automatically at the end of `add-model`/`add-rule`, after each `executing-plans` batch, or standalone via `/review-implementation`. +- [fix-pr](skills/fix-pr/SKILL.md) -- Resolve PR review comments (user + Copilot), fix CI failures, and address codecov coverage gaps. Uses `gh api` for codecov (not local `cargo-llvm-cov`). - [release](skills/release/SKILL.md) -- Create a new crate release. Determines version bump from diff, verifies tests/clippy, then runs `make release`. ## Commands @@ -35,6 +36,7 @@ make cli # Build the pred CLI tool (release mode) make cli-demo # Run closed-loop CLI demo (exercises all commands) make mcp-test # Run MCP server tests (unit + integration) make run-plan # Execute a plan with Claude autorun +make copilot-review # Request Copilot code review on current PR make release V=x.y.z # Tag and push a new release (CI publishes to crates.io) ``` @@ -119,8 +121,9 @@ Problem types use explicit optimization prefixes: ### Problem Variant IDs Reduction graph nodes use variant key-value pairs from `Problem::variant()`: - Base: `MaximumIndependentSet` (empty variant = defaults) -- Graph variant: `MaximumIndependentSet {graph: "GridGraph", weight: "i32"}` +- Graph variant: `MaximumIndependentSet {graph: "KingsSubgraph", weight: "One"}` - Weight variant: `MaximumIndependentSet {graph: "SimpleGraph", weight: "f64"}` +- Default variant ranking: `SimpleGraph`, `One`, `KN` are considered default values; variants with the most default values sort first - Nodes come exclusively from `#[reduction]` registrations; natural edges between same-name variants are inferred from the graph/weight subtype partial order ## Conventions diff --git a/.claude/skills/fix-pr/SKILL.md b/.claude/skills/fix-pr/SKILL.md new file mode 100644 index 000000000..f42d7ae96 --- /dev/null +++ b/.claude/skills/fix-pr/SKILL.md @@ -0,0 +1,155 @@ +--- +name: fix-pr +description: Use when a PR has review comments to address, CI failures to fix, or codecov coverage gaps to resolve +--- + +# Fix PR + +Resolve PR review comments, fix CI failures, and address codecov coverage gaps for the current branch's PR. + +## Step 1: Gather PR State + +```bash +# Get PR number +PR=$(gh pr view --json number --jq .number) + +# Get PR head SHA (on remote) +HEAD_SHA=$(gh api repos/{owner}/{repo}/pulls/$PR --jq '.head.sha') +``` + +### 1a. Fetch Review Comments + +Three sources of feedback to check: + +```bash +# Copilot and user inline review comments (on code lines) +gh api repos/{owner}/{repo}/pulls/$PR/comments --jq '.[] | "[\(.user.login)] \(.path):\(.line // .original_line) — \(.body)"' + +# Review-level comments (top-level review body) +gh api repos/{owner}/{repo}/pulls/$PR/reviews --jq '.[] | select(.body != "") | "[\(.user.login)] \(.state): \(.body)"' + +# Issue-level comments (general discussion) +gh api repos/{owner}/{repo}/issues/$PR/comments --jq '.[] | select(.user.login | test("codecov|copilot") | not) | "[\(.user.login)] \(.body)"' +``` + +### 1b. Check CI Status + +```bash +# All check runs on the PR head +gh api repos/{owner}/{repo}/commits/$HEAD_SHA/check-runs \ + --jq '.check_runs[] | "\(.name): \(.conclusion // .status)"' +``` + +### 1c. Check Codecov Report + +```bash +# Codecov bot comment with coverage diff +gh api repos/{owner}/{repo}/issues/$PR/comments \ + --jq '.[] | select(.user.login == "codecov[bot]") | .body' +``` + +## Step 2: Triage and Prioritize + +Categorize all findings: + +| Priority | Type | Action | +|----------|------|--------| +| 1 | CI failures (test/clippy/build) | Fix immediately -- blocks merge | +| 2 | User review comments | Address each one -- respond on PR | +| 3 | Copilot review comments | Evaluate validity, fix if correct | +| 4 | Codecov coverage gaps | Add tests for uncovered lines | + +## Step 3: Fix CI Failures + +For each failing check: + +1. **Clippy**: Run `make clippy` locally, fix warnings +2. **Test**: Run `make test` locally, fix failures +3. **Build**: Run `make build` locally, fix errors +4. **Coverage**: See Step 5 (codecov-specific flow) + +## Step 4: Address Review Comments + +For each review comment: + +1. Read the comment and the code it references +2. Evaluate if the suggestion is correct +3. If valid: make the fix, commit +4. If debatable: fix it anyway unless technically wrong +5. If wrong: prepare a response explaining why + +**Do NOT respond on the PR** -- just fix and commit. The user will push and respond. + +### Handling Copilot Suggestions + +Copilot suggestions with `suggestion` blocks contain exact code. Evaluate each: +- **Correct**: Apply the suggestion +- **Partially correct**: Apply the spirit, adjust details +- **Wrong**: Skip, note why in commit message + +## Step 5: Fix Codecov Coverage Gaps + +**IMPORTANT: Do NOT run `cargo-llvm-cov` locally.** Use the `gh api` to read the codecov report instead. + +### 5a. Identify Uncovered Lines + +From the codecov bot comment (fetched in Step 1c), extract: +- Files with missing coverage +- Patch coverage percentage +- Specific uncovered lines (linked in the report) + +For detailed line-by-line coverage, use the Codecov API: + +```bash +# Get file-level coverage for the PR +gh api repos/{owner}/{repo}/pulls/$PR/comments \ + --jq '.[] | select(.user.login == "codecov[bot]") | .body' \ + | grep -oP 'filepath=\K[^&]+' +``` + +Then read the source files and identify which new/changed lines lack test coverage. + +### 5b. Add Tests for Uncovered Lines + +1. Read the uncovered file and identify the untested code paths +2. Write tests targeting those specific paths (error branches, edge cases, etc.) +3. Run `make test` to verify tests pass +4. Commit the new tests + +### 5c. Verify Coverage Improvement + +After pushing, CI will re-run coverage. Check the updated codecov comment on the PR. + +## Step 6: Commit and Report + +After all fixes: + +```bash +# Verify everything passes locally +make check # fmt + clippy + test +``` + +Commit with a descriptive message referencing the PR: + +```bash +git commit -m "fix: address PR #$PR review comments + +- [summary of fixes applied] +" +``` + +Report to user: +- List of review comments addressed (with what was done) +- CI fixes applied +- Coverage gaps filled +- Any comments left unresolved (with reasoning) + +## Integration + +### With review-implementation + +Run `/review-implementation` first to catch issues before push. Then `/fix-pr` after push to address CI and reviewer feedback. + +### With executing-plans / finishing-a-development-branch + +After creating a PR and running `make copilot-review`, use `/fix-pr` to address the resulting feedback. diff --git a/.claude/skills/review-implementation/SKILL.md b/.claude/skills/review-implementation/SKILL.md index 886116961..9b7e0acdb 100644 --- a/.claude/skills/review-implementation/SKILL.md +++ b/.claude/skills/review-implementation/SKILL.md @@ -1,141 +1,149 @@ --- name: review-implementation -description: Use after implementing a model or rule to verify completeness and correctness before committing +description: Use after implementing a model, rule, or any code change to verify completeness and correctness before committing --- # Review Implementation -Automated review checklist for verifying that a new model or rule implementation is complete. Run this after finishing `add-model` or `add-rule`, before committing. +Dispatches two parallel review subagents with fresh context (no implementation history bias): +- **Structural reviewer** -- model/rule checklists + semantic correctness (only for new models/rules) +- **Quality reviewer** -- DRY, KISS, HC/LC, HCI, test quality (always) ## Invocation -Auto-detects the implementation type from changed files. Can also be invoked with an explicit argument: -- `/review-implementation` -- auto-detect from `git diff` +- `/review-implementation` -- auto-detect from git diff - `/review-implementation model MaximumClique` -- review a specific model - `/review-implementation rule mis_qubo` -- review a specific rule +- `/review-implementation generic` -- code quality only (no structural checklist) ## Step 1: Detect What Changed -Use `git diff --name-only` (against main branch or last commit) to identify: -- Files in `src/models/` -> model review -- Files in `src/rules/` (not `mod.rs`, `traits.rs`, `cost.rs`, `graph.rs`, `registry.rs`) -> rule review -- Both -> run both reviews +Determine whether new model/rule files were added: + +```bash +# Check for NEW files (not just modifications) +git diff --name-only --diff-filter=A HEAD~1..HEAD +# Also check against main for branch-level changes +git diff --name-only --diff-filter=A main..HEAD +``` + +Detection rules: +- New file in `src/models/` (not `mod.rs`) -> **model review** (structural + quality) +- New file in `src/rules/` (not `mod.rs`, `traits.rs`, `cost.rs`, `graph.rs`, `registry.rs`) -> **rule review** (structural + quality) +- Only modified files (no new model/rule) -> **quality review only** +- Both new model and rule files -> dispatch structural for both + quality +- Explicit argument overrides auto-detection Extract the problem name(s) and rule source/target from the file paths. -## Step 2: Run Structural Checks - -For each detected change, run the appropriate checklist below. Report results as a table with pass/fail per item. - -### Model Checklist - -Given: problem name `P`, category `C`, file stem `F` (snake_case). - -| # | Check | Verification method | -|---|-------|-------------------| -| 1 | Model file exists | `Glob("src/models/{C}/{F}.rs")` | -| 2 | `inventory::submit!` present | `Grep("inventory::submit", file)` | -| 3 | `#[derive(...Serialize, Deserialize)]` on struct | `Grep("Serialize.*Deserialize", file)` | -| 4 | `Problem` trait impl | `Grep("impl.*Problem for.*{P}", file)` | -| 5 | `OptimizationProblem` or `SatisfactionProblem` impl | `Grep("(OptimizationProblem\|SatisfactionProblem).*for.*{P}", file)` | -| 6 | `#[cfg(test)]` + `#[path = "..."]` test link | `Grep("#\\[path =", file)` | -| 7 | Test file exists | `Glob("src/unit_tests/models/{C}/{F}.rs")` | -| 8 | Test has creation test | `Grep("fn test_.*creation\|fn test_{F}.*basic", test_file)` | -| 9 | Test has evaluation test | `Grep("fn test_.*evaluat", test_file)` | -| 10 | Registered in `{C}/mod.rs` | `Grep("mod {F}", "src/models/{C}/mod.rs")` | -| 11 | Re-exported in `models/mod.rs` | `Grep("{P}", "src/models/mod.rs")` | -| 12 | CLI `load_problem` arm | `Grep('"{P}"', "problemreductions-cli/src/dispatch.rs")` | -| 13 | CLI `serialize_any_problem` arm | `Grep('"{P}".*try_ser', "problemreductions-cli/src/dispatch.rs")` | -| 14 | CLI `resolve_alias` entry | `Grep("{P}", "problemreductions-cli/src/problem_name.rs")` | -| 15 | Paper `display-name` entry | `Grep('"{P}"', "docs/paper/reductions.typ")` | -| 16 | Paper `problem-def` block | `Grep('problem-def.*"{P}"', "docs/paper/reductions.typ")` | - -### Rule Checklist - -Given: source `S`, target `T`, rule file stem `R` = `{s}_{t}` (lowercase), example stem `E` = `reduction_{s}_to_{t}`. - -| # | Check | Verification method | -|---|-------|-------------------| -| 1 | Rule file exists | `Glob("src/rules/{R}.rs")` | -| 2 | `#[reduction(...)]` macro present | `Grep("#\\[reduction", file)` | -| 3 | `ReductionResult` impl present | `Grep("impl.*ReductionResult", file)` | -| 4 | `ReduceTo` impl present | `Grep("impl.*ReduceTo", file)` | -| 5 | `#[cfg(test)]` + `#[path = "..."]` test link | `Grep("#\\[path =", file)` | -| 6 | Test file exists | `Glob("src/unit_tests/rules/{R}.rs")` | -| 7 | Closed-loop test present | `Grep("fn test_.*closed_loop\|fn test_.*to_.*basic", test_file)` | -| 8 | Registered in `rules/mod.rs` | `Grep("mod {R}", "src/rules/mod.rs")` | -| 9 | Example file exists | `Glob("examples/{E}.rs")` | -| 10 | Example has `pub fn run()` | `Grep("pub fn run", example_file)` | -| 11 | Example has `fn main()` | `Grep("fn main", example_file)` | -| 12 | `example_test!` registered | `Grep("example_test!\\({E}\\)", "tests/suites/examples.rs")` | -| 13 | `example_fn!` registered | `Grep("example_fn!.*{E}", "tests/suites/examples.rs")` | -| 14 | Paper `reduction-rule` entry | `Grep('reduction-rule.*"{S}".*"{T}"', "docs/paper/reductions.typ")` | - -## Step 3: Run Build Checks - -After structural checks, run: +## Step 2: Prepare Subagent Context + +Get the git SHAs for the review range: + +```bash +BASE_SHA=$(git merge-base main HEAD) # or HEAD~N for batch reviews +HEAD_SHA=$(git rev-parse HEAD) +``` + +Get the diff summary and changed file list: ```bash -make test clippy +git diff --stat $BASE_SHA..$HEAD_SHA +git diff --name-only $BASE_SHA..$HEAD_SHA ``` -Report pass/fail. If tests fail, identify which tests and suggest fixes. +## Step 3: Dispatch Subagents in Parallel + +### Structural Reviewer (if new model/rule detected) + +Dispatch using `Task` tool with `subagent_type="superpowers:code-reviewer"`: + +- Read `structural-reviewer-prompt.md` from this skill directory +- Fill placeholders: + - `{REVIEW_TYPE}` -> "model", "rule", or "model + rule" + - `{REVIEW_PARAMS}` -> summary of what's being reviewed + - `{PROBLEM_NAME}`, `{CATEGORY}`, `{FILE_STEM}` -> for model reviews + - `{SOURCE}`, `{TARGET}`, `{RULE_STEM}`, `{EXAMPLE_STEM}` -> for rule reviews +- Prompt = filled template + +### Quality Reviewer (always) + +Dispatch using `Task` tool with `subagent_type="superpowers:code-reviewer"`: -## Step 4: Semantic Review (AI Judgment) +- Read `quality-reviewer-prompt.md` from this skill directory +- Fill placeholders: + - `{DIFF_SUMMARY}` -> output of `git diff --stat` + - `{CHANGED_FILES}` -> list of changed files + - `{PLAN_STEP}` -> description of what was implemented (or "standalone review") + - `{BASE_SHA}`, `{HEAD_SHA}` -> git range +- Prompt = filled template -Read the implementation files and assess: +**Both subagents must be dispatched in parallel** (single message, two Task tool calls). -### For Models: -1. **`evaluate()` correctness** -- Does it check feasibility before computing the objective? Does it return `SolutionSize::Invalid` / `false` for infeasible configs? -2. **`dims()` correctness** -- Does it return the actual configuration space? (e.g., `vec![2; n]` for binary) -3. **Size getter consistency** -- Do the inherent getter methods (e.g., `num_vertices()`, `num_edges()`) match names used in overhead expressions? -4. **Weight handling** -- Are weights managed via inherent methods, not traits? +## Step 4: Collect and Address Findings -### For Rules: -1. **`extract_solution` correctness** -- Does it correctly invert the reduction? Does the returned solution have the right length (source dimensions)? -2. **Overhead accuracy** -- Does the `overhead = { field = "expr" }` reflect the actual size relationship? -3. **Example quality** -- Is it tutorial-style? Does it use the instance from the issue? Does the JSON export include both source and target data? -4. **Paper quality** -- Is the reduction-rule statement precise? Is the proof sketch sound? Is the example figure clear? +When both subagents return: -### Code Quality Principles (applies to both Models and Rules): -1. **DRY (Don't Repeat Yourself)** -- Is there duplicated logic that should be extracted into a shared helper, utility function, or common module? Check for copy-pasted code blocks across files (e.g., similar graph construction, weight handling, or solution extraction patterns). If duplication is found, suggest extracting shared logic. -2. **KISS (Keep It Simple, Stupid)** -- Is the implementation unnecessarily complex? Look for: over-engineered abstractions, convoluted control flow, premature generalization, or layers of indirection that add no value. The implementation should be as simple as possible while remaining correct and maintainable. +1. **Parse results** -- identify FAIL/ISSUE items from both reports +2. **Fix automatically** -- structural FAILs (missing registration, missing file), clear semantic issues, Important+ quality issues +3. **Report to user** -- ambiguous semantic issues, Minor quality items, anything you're unsure about +4. **Present consolidated report** combining both reviews -## Output Format +## Step 5: Present Consolidated Report -Present results as: +Merge both subagent outputs into a single report: ``` -## Review: [Model/Rule] [Name] +## Review: [Model/Rule/Generic] [Name] -### Structural Completeness +### Structural Completeness (from structural reviewer) | # | Check | Status | |---|-------|--------| -| 1 | Model file exists | PASS | -| 2 | inventory::submit! | PASS | -| ... | ... | ... | -| N | Paper entry | FAIL -- missing display-name | - -### Build Status -- `make test`: PASS -- `make clippy`: PASS - -### Semantic Review -- evaluate() correctness: OK -- dims() correctness: OK -- DRY compliance: OK / [duplicated logic found in ...] -- KISS compliance: OK / [unnecessary complexity found in ...] -- [any other issues found] - -### Summary -- X/Y structural checks passed -- [list of action items for any failures] +... + +### Build Status (from structural reviewer) +- `make test`: PASS / FAIL +- `make clippy`: PASS / FAIL + +### Semantic Review (from structural reviewer) +... + +### Code Quality (from quality reviewer) +- DRY: OK / ... +- KISS: OK / ... +- HC/LC: OK / ... + +### HCI (from quality reviewer, if CLI/MCP changed) +... + +### Test Quality (from quality reviewer) +... + +### Fixes Applied +- [list of issues automatically fixed by main agent] + +### Remaining Items (needs user decision) +- [list of issues that need user input] ``` -## Integration with Other Skills +## Integration + +### With executing-plans + +After each batch in the executing-plans flow, the main agent should: +1. Record `BASE_SHA` before the batch starts +2. After batch completes, follow Steps 1-5 above +3. Fix findings before reporting to user +4. Include review results in the batch report + +### Copilot Review (after PR creation) + +After creating a PR (from any flow), run `make copilot-review` to request GitHub Copilot code review on the PR. + +### With add-model / add-rule + +At the end of these skills (after their verify step), invoke `/review-implementation` which dispatches subagents as described above. -This skill is called automatically at the end of: -- `add-model` (after Step 7: Verify) -- `add-rule` (after Step 6: Verify) +### Standalone -It can also be invoked standalone via `/review-implementation`. +Invoke directly via `/review-implementation` for any code change. diff --git a/.claude/skills/review-implementation/quality-reviewer-prompt.md b/.claude/skills/review-implementation/quality-reviewer-prompt.md new file mode 100644 index 000000000..803637652 --- /dev/null +++ b/.claude/skills/review-implementation/quality-reviewer-prompt.md @@ -0,0 +1,100 @@ +# Code Quality Review Agent + +You are reviewing code changes for quality in the `problemreductions` Rust codebase. You have NO context about prior implementation work -- review the code fresh. + +## What Changed + +{DIFF_SUMMARY} + +## Changed Files + +{CHANGED_FILES} + +## Plan Step Context (if applicable) + +{PLAN_STEP} + +## Git Range + +**Base:** {BASE_SHA} +**Head:** {HEAD_SHA} + +Start by running: +```bash +git diff --stat {BASE_SHA}..{HEAD_SHA} +git diff {BASE_SHA}..{HEAD_SHA} +``` + +Then read the changed files in full. + +## Review Criteria + +### Design Principles + +1. **DRY (Don't Repeat Yourself)** -- Is there duplicated logic that should be extracted into a shared helper? Check for copy-pasted code blocks across files (similar graph construction, weight handling, or solution extraction patterns). + +2. **KISS (Keep It Simple, Stupid)** -- Is the implementation unnecessarily complex? Look for: over-engineered abstractions, convoluted control flow, premature generalization, layers of indirection that add no value. + +3. **High Cohesion, Low Coupling (HC/LC)** -- Does each module/function/struct have a single, well-defined responsibility? + - **Low cohesion**: Function doing unrelated things. Each unit should have one reason to change. + - **High coupling**: Modules depending on each other's internals. + - **Mixed concerns**: A single file containing both problem logic and CLI/serialization logic. + - **God functions**: Functions longer than ~50 lines doing multiple conceptually distinct things. + +### HCI (if CLI/MCP files changed) + +Only check these if the diff touches `problemreductions-cli/`: + +4. **Error messages** -- Are they actionable? Bad: `"invalid parameter"`. Good: `"KColoring requires --k (e.g., --k 3)"`. +5. **Discoverability** -- Missing `--help` examples? Undocumented flags? Silent failures that should suggest alternatives? +6. **Consistency** -- Similar operations expressed similarly? Parameter names, output formats, error styles uniform? +7. **Least surprise** -- Output matches expectations? No contradictory output or silent data loss? +8. **Feedback** -- Tool confirms what it did? Echoes interpreted parameters for ambiguous operations? + +### Test Quality + +9. **Naive Test Detection** -- Flag tests that: + - **Only check types/shapes, not values**: e.g., `assert!(result.is_some())` without checking the solution is correct. + - **Mirror the implementation**: Tests recomputing the same formula as the code prove nothing. + - **Lack adversarial cases**: Only happy path. Tests must include infeasible configs and boundary cases. + - **Use trivial instances only**: Single-edge or 2-node tests may pass with bugs. Need 5+ vertex instances. + - **Closed-loop without verification**: Must verify extracted solution is **optimal** (compare brute-force on both source and target). + - **Assert count too low**: 1-2 asserts for non-trivial code is insufficient. + +## Output Format + +You MUST output in this exact format: + +``` +## Code Quality Review + +### Design Principles +- DRY: OK / ISSUE -- [description with file:line] +- KISS: OK / ISSUE -- [description with file:line] +- HC/LC: OK / ISSUE -- [description with file:line] + +### HCI (if CLI/MCP changed) +- Error messages: OK / ISSUE -- [description] +- Discoverability: OK / ISSUE -- [description] +- Consistency: OK / ISSUE -- [description] +- Least surprise: OK / ISSUE -- [description] +- Feedback: OK / ISSUE -- [description] + +### Test Quality +- Naive test detection: OK / ISSUE + - [specific tests flagged with reason and file:line] + +### Issues + +#### Critical (Must Fix) +[Bugs, correctness issues, data loss risks] + +#### Important (Should Fix) +[Architecture problems, missing tests, poor error handling] + +#### Minor (Nice to Have) +[Code style, optimization opportunities] + +### Summary +- [list of action items with severity] +``` diff --git a/.claude/skills/review-implementation/structural-reviewer-prompt.md b/.claude/skills/review-implementation/structural-reviewer-prompt.md new file mode 100644 index 000000000..b24c7a3a6 --- /dev/null +++ b/.claude/skills/review-implementation/structural-reviewer-prompt.md @@ -0,0 +1,111 @@ +# Structural & Semantic Review Agent + +You are reviewing a new model or rule implementation for structural completeness and semantic correctness in the `problemreductions` Rust codebase. + +## Review Type: {REVIEW_TYPE} + +{REVIEW_PARAMS} + +## Instructions + +1. Run the structural checklist below using Grep and Glob tools +2. Run `make test clippy` to verify build +3. Read the implementation files and perform semantic review +4. Output results in the structured format at the end + +## Model Checklist + +Only run this section if REVIEW_TYPE includes "model". + +Given: problem name `P` = `{PROBLEM_NAME}`, category `C` = `{CATEGORY}`, file stem `F` = `{FILE_STEM}`. + +| # | Check | How to verify | +|---|-------|--------------| +| 1 | Model file exists | `Glob("src/models/{C}/{F}.rs")` | +| 2 | `inventory::submit!` present | `Grep("inventory::submit", file)` | +| 3 | `#[derive(...Serialize, Deserialize)]` on struct | `Grep("Serialize.*Deserialize", file)` | +| 4 | `Problem` trait impl | `Grep("impl.*Problem for.*{P}", file)` | +| 5 | `OptimizationProblem` or `SatisfactionProblem` impl | `Grep("(OptimizationProblem\|SatisfactionProblem).*for.*{P}", file)` | +| 6 | `#[cfg(test)]` + `#[path = "..."]` test link | `Grep("#\\[path =", file)` | +| 7 | Test file exists | `Glob("src/unit_tests/models/{C}/{F}.rs")` | +| 8 | Test has creation test | `Grep("fn test_.*creation\|fn test_{F}.*basic", test_file)` | +| 9 | Test has evaluation test | `Grep("fn test_.*evaluat", test_file)` | +| 10 | Registered in `{C}/mod.rs` | `Grep("mod {F}", "src/models/{C}/mod.rs")` | +| 11 | Re-exported in `models/mod.rs` | `Grep("{P}", "src/models/mod.rs")` | +| 12 | CLI `load_problem` arm | `Grep('"{P}"', "problemreductions-cli/src/dispatch.rs")` | +| 13 | CLI `serialize_any_problem` arm | `Grep('"{P}".*try_ser', "problemreductions-cli/src/dispatch.rs")` | +| 14 | CLI `resolve_alias` entry | `Grep("{P}", "problemreductions-cli/src/problem_name.rs")` | +| 15 | Paper `display-name` entry | `Grep('"{P}"', "docs/paper/reductions.typ")` | +| 16 | Paper `problem-def` block | `Grep('problem-def.*"{P}"', "docs/paper/reductions.typ")` | + +## Rule Checklist + +Only run this section if REVIEW_TYPE includes "rule". + +Given: source `S` = `{SOURCE}`, target `T` = `{TARGET}`, rule file stem `R` = `{RULE_STEM}`, example stem `E` = `{EXAMPLE_STEM}`. + +| # | Check | How to verify | +|---|-------|--------------| +| 1 | Rule file exists | `Glob("src/rules/{R}.rs")` | +| 2 | `#[reduction(...)]` macro present | `Grep("#\\[reduction", file)` | +| 3 | `ReductionResult` impl present | `Grep("impl.*ReductionResult", file)` | +| 4 | `ReduceTo` impl present | `Grep("impl.*ReduceTo", file)` | +| 5 | `#[cfg(test)]` + `#[path = "..."]` test link | `Grep("#\\[path =", file)` | +| 6 | Test file exists | `Glob("src/unit_tests/rules/{R}.rs")` | +| 7 | Closed-loop test present | `Grep("fn test_.*closed_loop\|fn test_.*to_.*basic", test_file)` | +| 8 | Registered in `rules/mod.rs` | `Grep("mod {R}", "src/rules/mod.rs")` | +| 9 | Example file exists | `Glob("examples/{E}.rs")` | +| 10 | Example has `pub fn run()` | `Grep("pub fn run", example_file)` | +| 11 | Example has `fn main()` | `Grep("fn main", example_file)` | +| 12 | `example_test!` registered | `Grep("example_test!\\({E}\\)", "tests/suites/examples.rs")` | +| 13 | `example_fn!` registered | `Grep("example_fn!.*{E}", "tests/suites/examples.rs")` | +| 14 | Paper `reduction-rule` entry | `Grep('reduction-rule.*"{S}".*"{T}"', "docs/paper/reductions.typ")` | + +## Build Check + +Run: +```bash +make test clippy +``` + +Report pass/fail. If tests fail, identify which tests. + +## Semantic Review + +### For Models: +1. **`evaluate()` correctness** -- Does it check feasibility before computing the objective? Does it return `SolutionSize::Invalid` / `false` for infeasible configs? +2. **`dims()` correctness** -- Does it return the actual configuration space? (e.g., `vec![2; n]` for binary) +3. **Size getter consistency** -- Do inherent getter methods (e.g., `num_vertices()`, `num_edges()`) match names used in overhead expressions? +4. **Weight handling** -- Are weights managed via inherent methods, not traits? + +### For Rules: +1. **`extract_solution` correctness** -- Does it correctly invert the reduction? Does the returned solution have the right length (source dimensions)? +2. **Overhead accuracy** -- Does `overhead = { field = "expr" }` reflect the actual size relationship? +3. **Example quality** -- Is it tutorial-style? Does the JSON export include both source and target data? +4. **Paper quality** -- Is the reduction-rule statement precise? Is the proof sketch sound? + +## Output Format + +You MUST output in this exact format: + +``` +## Review: {REVIEW_TYPE} {PROBLEM_NAME} + +### Structural Completeness +| # | Check | Status | +|---|-------|--------| +| 1 | ... | PASS / FAIL -- reason | + +### Build Status +- `make test`: PASS / FAIL +- `make clippy`: PASS / FAIL + +### Semantic Review +- evaluate()/extract_solution correctness: OK / ISSUE -- description +- dims() correctness: OK / ISSUE -- description +- [other checks]: OK / ISSUE -- description + +### Summary +- X/Y structural checks passed +- [list of action items for any failures] +``` diff --git a/.gitignore b/.gitignore index 19031764a..886a4b60e 100644 --- a/.gitignore +++ b/.gitignore @@ -82,4 +82,5 @@ docs/paper/examples/ # Claude Code logs claude-output.log .worktrees/ -.worktree/ \ No newline at end of file +.worktree/ +*.json diff --git a/Makefile b/Makefile index a41d69bd9..68317c949 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ # Makefile for problemreductions -.PHONY: help build test mcp-test fmt clippy doc mdbook paper examples clean coverage rust-export compare qubo-testdata export-schemas release run-plan diagrams jl-testdata cli cli-demo +.PHONY: help build test mcp-test fmt clippy doc mdbook paper examples clean coverage rust-export compare qubo-testdata export-schemas release run-plan diagrams jl-testdata cli cli-demo copilot-review # Default target help: @@ -28,6 +28,7 @@ help: @echo " cli - Build the pred CLI tool" @echo " cli-demo - Run closed-loop CLI demo (build + exercise all commands)" @echo " run-plan - Execute a plan with Claude autorun (latest plan in docs/plans/)" + @echo " copilot-review - Request Copilot code review on current PR" # Build the project build: @@ -341,3 +342,10 @@ cli-demo: cli echo ""; \ echo "=== Demo complete: $$(ls $(CLI_DEMO_DIR)/*.json | wc -l | tr -d ' ') JSON files in $(CLI_DEMO_DIR) ===" @echo "=== All 20 steps passed ✅ ===" + +# Request Copilot code review on the current PR +# Requires: gh extension install ChrisCarini/gh-copilot-review +copilot-review: + @PR=$$(gh pr view --json number --jq .number 2>/dev/null) || { echo "No PR found for current branch"; exit 1; }; \ + echo "Requesting Copilot review on PR #$$PR..."; \ + gh copilot-review $$PR diff --git a/docs/paper/static/petersen_square_unweighted.json b/docs/paper/static/petersen_square_unweighted.json index 10f0f52f6..083ab57a6 100644 --- a/docs/paper/static/petersen_square_unweighted.json +++ b/docs/paper/static/petersen_square_unweighted.json @@ -1 +1 @@ -{"nodes":[{"row":2,"col":6,"weight":1},{"row":2,"col":18,"weight":1},{"row":2,"col":34,"weight":1},{"row":3,"col":5,"weight":1},{"row":3,"col":7,"weight":1},{"row":3,"col":8,"weight":2},{"row":3,"col":9,"weight":2},{"row":3,"col":10,"weight":2},{"row":3,"col":11,"weight":2},{"row":3,"col":12,"weight":2},{"row":3,"col":13,"weight":2},{"row":3,"col":14,"weight":2},{"row":3,"col":15,"weight":2},{"row":3,"col":16,"weight":2},{"row":3,"col":17,"weight":1},{"row":3,"col":19,"weight":1},{"row":3,"col":20,"weight":2},{"row":3,"col":21,"weight":1},{"row":3,"col":28,"weight":1},{"row":3,"col":29,"weight":2},{"row":3,"col":30,"weight":2},{"row":3,"col":31,"weight":2},{"row":3,"col":32,"weight":2},{"row":3,"col":33,"weight":1},{"row":3,"col":35,"weight":1},{"row":3,"col":36,"weight":2},{"row":3,"col":37,"weight":1},{"row":4,"col":6,"weight":1},{"row":4,"col":18,"weight":1},{"row":4,"col":22,"weight":1},{"row":4,"col":27,"weight":1},{"row":4,"col":34,"weight":1},{"row":4,"col":38,"weight":1},{"row":5,"col":6,"weight":1},{"row":5,"col":18,"weight":2},{"row":5,"col":22,"weight":2},{"row":5,"col":26,"weight":1},{"row":5,"col":34,"weight":2},{"row":5,"col":38,"weight":2},{"row":6,"col":7,"weight":1},{"row":6,"col":10,"weight":1},{"row":6,"col":18,"weight":1},{"row":6,"col":22,"weight":1},{"row":6,"col":26,"weight":1},{"row":6,"col":34,"weight":1},{"row":6,"col":38,"weight":1},{"row":7,"col":8,"weight":1},{"row":7,"col":9,"weight":1},{"row":7,"col":11,"weight":1},{"row":7,"col":12,"weight":2},{"row":7,"col":13,"weight":2},{"row":7,"col":14,"weight":2},{"row":7,"col":15,"weight":2},{"row":7,"col":16,"weight":1},{"row":7,"col":17,"weight":1},{"row":7,"col":18,"weight":1},{"row":7,"col":19,"weight":1},{"row":7,"col":20,"weight":1},{"row":7,"col":21,"weight":1},{"row":7,"col":22,"weight":1},{"row":7,"col":23,"weight":1},{"row":7,"col":24,"weight":1},{"row":7,"col":25,"weight":1},{"row":7,"col":32,"weight":1},{"row":7,"col":33,"weight":1},{"row":7,"col":34,"weight":1},{"row":7,"col":35,"weight":1},{"row":7,"col":36,"weight":1},{"row":7,"col":37,"weight":1},{"row":7,"col":39,"weight":1},{"row":8,"col":10,"weight":1},{"row":8,"col":17,"weight":1},{"row":8,"col":18,"weight":1},{"row":8,"col":19,"weight":1},{"row":8,"col":21,"weight":1},{"row":8,"col":22,"weight":1},{"row":8,"col":23,"weight":1},{"row":8,"col":31,"weight":1},{"row":8,"col":33,"weight":1},{"row":8,"col":34,"weight":1},{"row":8,"col":35,"weight":1},{"row":8,"col":38,"weight":1},{"row":9,"col":10,"weight":1},{"row":9,"col":18,"weight":1},{"row":9,"col":22,"weight":1},{"row":9,"col":30,"weight":1},{"row":9,"col":34,"weight":1},{"row":9,"col":38,"weight":2},{"row":10,"col":11,"weight":1},{"row":10,"col":14,"weight":1},{"row":10,"col":18,"weight":1},{"row":10,"col":22,"weight":1},{"row":10,"col":30,"weight":1},{"row":10,"col":34,"weight":1},{"row":10,"col":38,"weight":1},{"row":11,"col":12,"weight":1},{"row":11,"col":13,"weight":1},{"row":11,"col":15,"weight":1},{"row":11,"col":16,"weight":1},{"row":11,"col":17,"weight":1},{"row":11,"col":18,"weight":1},{"row":11,"col":19,"weight":1},{"row":11,"col":20,"weight":1},{"row":11,"col":21,"weight":1},{"row":11,"col":22,"weight":1},{"row":11,"col":23,"weight":1},{"row":11,"col":24,"weight":1},{"row":11,"col":25,"weight":2},{"row":11,"col":26,"weight":2},{"row":11,"col":27,"weight":2},{"row":11,"col":28,"weight":2},{"row":11,"col":29,"weight":1},{"row":11,"col":31,"weight":1},{"row":11,"col":34,"weight":1},{"row":11,"col":38,"weight":1},{"row":12,"col":14,"weight":1},{"row":12,"col":17,"weight":1},{"row":12,"col":18,"weight":1},{"row":12,"col":19,"weight":1},{"row":12,"col":21,"weight":1},{"row":12,"col":22,"weight":1},{"row":12,"col":23,"weight":1},{"row":12,"col":30,"weight":1},{"row":12,"col":34,"weight":1},{"row":12,"col":38,"weight":1},{"row":13,"col":14,"weight":1},{"row":13,"col":18,"weight":1},{"row":13,"col":22,"weight":1},{"row":13,"col":30,"weight":2},{"row":13,"col":34,"weight":1},{"row":13,"col":38,"weight":1},{"row":14,"col":15,"weight":1},{"row":14,"col":18,"weight":1},{"row":14,"col":22,"weight":1},{"row":14,"col":30,"weight":1},{"row":14,"col":34,"weight":1},{"row":14,"col":38,"weight":2},{"row":15,"col":16,"weight":1},{"row":15,"col":17,"weight":1},{"row":15,"col":18,"weight":1},{"row":15,"col":19,"weight":1},{"row":15,"col":20,"weight":1},{"row":15,"col":21,"weight":1},{"row":15,"col":22,"weight":1},{"row":15,"col":23,"weight":1},{"row":15,"col":24,"weight":1},{"row":15,"col":25,"weight":2},{"row":15,"col":26,"weight":2},{"row":15,"col":27,"weight":2},{"row":15,"col":28,"weight":1},{"row":15,"col":29,"weight":1},{"row":15,"col":30,"weight":1},{"row":15,"col":31,"weight":1},{"row":15,"col":32,"weight":1},{"row":15,"col":33,"weight":1},{"row":15,"col":35,"weight":1},{"row":15,"col":38,"weight":2},{"row":16,"col":18,"weight":1},{"row":16,"col":21,"weight":1},{"row":16,"col":22,"weight":1},{"row":16,"col":23,"weight":1},{"row":16,"col":29,"weight":1},{"row":16,"col":30,"weight":1},{"row":16,"col":31,"weight":1},{"row":16,"col":34,"weight":1},{"row":16,"col":38,"weight":2},{"row":17,"col":18,"weight":1},{"row":17,"col":22,"weight":1},{"row":17,"col":30,"weight":1},{"row":17,"col":34,"weight":2},{"row":17,"col":38,"weight":2},{"row":18,"col":19,"weight":1},{"row":18,"col":22,"weight":1},{"row":18,"col":30,"weight":1},{"row":18,"col":34,"weight":1},{"row":18,"col":38,"weight":1},{"row":19,"col":20,"weight":1},{"row":19,"col":21,"weight":1},{"row":19,"col":22,"weight":1},{"row":19,"col":23,"weight":1},{"row":19,"col":24,"weight":1},{"row":19,"col":25,"weight":2},{"row":19,"col":26,"weight":2},{"row":19,"col":27,"weight":2},{"row":19,"col":28,"weight":1},{"row":19,"col":29,"weight":1},{"row":19,"col":30,"weight":1},{"row":19,"col":31,"weight":1},{"row":19,"col":32,"weight":1},{"row":19,"col":33,"weight":1},{"row":19,"col":34,"weight":1},{"row":19,"col":35,"weight":1},{"row":19,"col":36,"weight":1},{"row":19,"col":37,"weight":1},{"row":20,"col":21,"weight":1},{"row":20,"col":22,"weight":1},{"row":20,"col":23,"weight":1},{"row":20,"col":29,"weight":1},{"row":20,"col":30,"weight":1},{"row":20,"col":31,"weight":1},{"row":20,"col":33,"weight":1},{"row":20,"col":34,"weight":1},{"row":20,"col":35,"weight":1},{"row":21,"col":22,"weight":1},{"row":21,"col":30,"weight":1},{"row":21,"col":34,"weight":1},{"row":22,"col":23,"weight":1},{"row":22,"col":30,"weight":1},{"row":22,"col":34,"weight":1},{"row":23,"col":24,"weight":1},{"row":23,"col":25,"weight":2},{"row":23,"col":26,"weight":2},{"row":23,"col":27,"weight":2},{"row":23,"col":28,"weight":2},{"row":23,"col":29,"weight":1},{"row":23,"col":31,"weight":1},{"row":23,"col":32,"weight":2},{"row":23,"col":33,"weight":1},{"row":24,"col":30,"weight":1}],"edges":[[0,3],[0,4],[1,14],[1,15],[2,23],[2,24],[3,27],[4,5],[4,27],[5,6],[6,7],[7,8],[8,9],[9,10],[10,11],[11,12],[12,13],[13,14],[14,28],[15,16],[15,28],[16,17],[17,29],[18,19],[18,30],[19,20],[20,21],[21,22],[22,23],[23,31],[24,25],[24,31],[25,26],[26,32],[27,33],[28,34],[29,35],[30,36],[31,37],[32,38],[33,39],[34,41],[35,42],[36,43],[37,44],[38,45],[39,46],[40,47],[40,48],[41,54],[41,55],[41,56],[42,58],[42,59],[42,60],[43,62],[44,64],[44,65],[44,66],[45,68],[45,69],[46,47],[47,70],[48,49],[48,70],[49,50],[50,51],[51,52],[52,53],[53,54],[53,71],[54,55],[54,71],[54,72],[55,56],[55,71],[55,72],[55,73],[56,57],[56,72],[56,73],[57,58],[57,73],[57,74],[58,59],[58,74],[58,75],[59,60],[59,74],[59,75],[59,76],[60,61],[60,75],[60,76],[61,62],[61,76],[63,64],[63,77],[63,78],[64,65],[64,78],[64,79],[65,66],[65,78],[65,79],[65,80],[66,67],[66,79],[66,80],[67,68],[67,80],[68,81],[69,81],[70,82],[71,72],[71,83],[72,73],[72,83],[73,83],[74,75],[74,84],[75,76],[75,84],[76,84],[77,85],[78,79],[78,86],[79,80],[79,86],[80,86],[81,87],[82,88],[83,90],[84,91],[85,92],[86,93],[87,94],[88,95],[89,96],[89,97],[90,99],[90,100],[90,101],[91,103],[91,104],[91,105],[92,111],[92,112],[93,113],[94,114],[95,96],[96,115],[97,98],[97,115],[98,99],[98,116],[99,100],[99,116],[99,117],[100,101],[100,116],[100,117],[100,118],[101,102],[101,117],[101,118],[102,103],[102,118],[102,119],[103,104],[103,119],[103,120],[104,105],[104,119],[104,120],[104,121],[105,106],[105,120],[105,121],[106,107],[106,121],[107,108],[108,109],[109,110],[110,111],[111,122],[112,122],[113,123],[114,124],[115,125],[116,117],[116,126],[117,118],[117,126],[118,126],[119,120],[119,127],[120,121],[120,127],[121,127],[122,128],[123,129],[124,130],[125,131],[126,132],[127,133],[128,134],[129,135],[130,136],[131,137],[132,138],[132,139],[132,140],[133,142],[133,143],[133,144],[134,150],[134,151],[134,152],[135,154],[135,155],[136,156],[137,138],[138,139],[138,157],[139,140],[139,157],[140,141],[140,157],[141,142],[141,158],[142,143],[142,158],[142,159],[143,144],[143,158],[143,159],[143,160],[144,145],[144,159],[144,160],[145,146],[145,160],[146,147],[147,148],[148,149],[149,150],[149,161],[150,151],[150,161],[150,162],[151,152],[151,161],[151,162],[151,163],[152,153],[152,162],[152,163],[153,154],[153,163],[154,164],[155,164],[156,165],[157,166],[158,159],[158,167],[159,160],[159,167],[160,167],[161,162],[161,168],[162,163],[162,168],[163,168],[164,169],[165,170],[166,171],[167,172],[168,173],[169,174],[170,175],[171,176],[172,177],[172,178],[172,179],[173,185],[173,186],[173,187],[174,189],[174,190],[174,191],[175,193],[176,177],[176,194],[177,178],[177,194],[177,195],[178,179],[178,194],[178,195],[178,196],[179,180],[179,195],[179,196],[180,181],[180,196],[181,182],[182,183],[183,184],[184,185],[184,197],[185,186],[185,197],[185,198],[186,187],[186,197],[186,198],[186,199],[187,188],[187,198],[187,199],[188,189],[188,199],[188,200],[189,190],[189,200],[189,201],[190,191],[190,200],[190,201],[190,202],[191,192],[191,201],[191,202],[192,193],[192,202],[194,195],[194,203],[195,196],[195,203],[196,203],[197,198],[197,204],[198,199],[198,204],[199,204],[200,201],[200,205],[201,202],[201,205],[202,205],[203,206],[204,207],[205,208],[206,209],[207,214],[207,215],[208,217],[209,210],[210,211],[211,212],[212,213],[213,214],[214,218],[215,216],[215,218],[216,217]],"mis_overhead":89,"padding":2,"spacing":4,"weighted":false} \ No newline at end of file +{"nodes":[{"row":2,"col":6,"weight":1},{"row":2,"col":18,"weight":1},{"row":2,"col":34,"weight":1},{"row":3,"col":5,"weight":1},{"row":3,"col":7,"weight":1},{"row":3,"col":8,"weight":1},{"row":3,"col":9,"weight":1},{"row":3,"col":10,"weight":1},{"row":3,"col":11,"weight":1},{"row":3,"col":12,"weight":1},{"row":3,"col":13,"weight":1},{"row":3,"col":14,"weight":1},{"row":3,"col":15,"weight":1},{"row":3,"col":16,"weight":1},{"row":3,"col":17,"weight":1},{"row":3,"col":19,"weight":1},{"row":3,"col":20,"weight":1},{"row":3,"col":21,"weight":1},{"row":3,"col":28,"weight":1},{"row":3,"col":29,"weight":1},{"row":3,"col":30,"weight":1},{"row":3,"col":31,"weight":1},{"row":3,"col":32,"weight":1},{"row":3,"col":33,"weight":1},{"row":3,"col":35,"weight":1},{"row":3,"col":36,"weight":1},{"row":3,"col":37,"weight":1},{"row":4,"col":6,"weight":1},{"row":4,"col":18,"weight":1},{"row":4,"col":22,"weight":1},{"row":4,"col":27,"weight":1},{"row":4,"col":34,"weight":1},{"row":4,"col":38,"weight":1},{"row":5,"col":6,"weight":1},{"row":5,"col":18,"weight":1},{"row":5,"col":22,"weight":1},{"row":5,"col":26,"weight":1},{"row":5,"col":34,"weight":1},{"row":5,"col":38,"weight":1},{"row":6,"col":7,"weight":1},{"row":6,"col":10,"weight":1},{"row":6,"col":18,"weight":1},{"row":6,"col":22,"weight":1},{"row":6,"col":26,"weight":1},{"row":6,"col":34,"weight":1},{"row":6,"col":38,"weight":1},{"row":7,"col":8,"weight":1},{"row":7,"col":9,"weight":1},{"row":7,"col":11,"weight":1},{"row":7,"col":12,"weight":1},{"row":7,"col":13,"weight":1},{"row":7,"col":14,"weight":1},{"row":7,"col":15,"weight":1},{"row":7,"col":16,"weight":1},{"row":7,"col":17,"weight":1},{"row":7,"col":18,"weight":1},{"row":7,"col":19,"weight":1},{"row":7,"col":20,"weight":1},{"row":7,"col":21,"weight":1},{"row":7,"col":22,"weight":1},{"row":7,"col":23,"weight":1},{"row":7,"col":24,"weight":1},{"row":7,"col":25,"weight":1},{"row":7,"col":32,"weight":1},{"row":7,"col":33,"weight":1},{"row":7,"col":34,"weight":1},{"row":7,"col":35,"weight":1},{"row":7,"col":36,"weight":1},{"row":7,"col":37,"weight":1},{"row":7,"col":39,"weight":1},{"row":8,"col":10,"weight":1},{"row":8,"col":17,"weight":1},{"row":8,"col":18,"weight":1},{"row":8,"col":19,"weight":1},{"row":8,"col":21,"weight":1},{"row":8,"col":22,"weight":1},{"row":8,"col":23,"weight":1},{"row":8,"col":31,"weight":1},{"row":8,"col":33,"weight":1},{"row":8,"col":34,"weight":1},{"row":8,"col":35,"weight":1},{"row":8,"col":38,"weight":1},{"row":9,"col":10,"weight":1},{"row":9,"col":18,"weight":1},{"row":9,"col":22,"weight":1},{"row":9,"col":30,"weight":1},{"row":9,"col":34,"weight":1},{"row":9,"col":38,"weight":1},{"row":10,"col":11,"weight":1},{"row":10,"col":14,"weight":1},{"row":10,"col":18,"weight":1},{"row":10,"col":22,"weight":1},{"row":10,"col":30,"weight":1},{"row":10,"col":34,"weight":1},{"row":10,"col":38,"weight":1},{"row":11,"col":12,"weight":1},{"row":11,"col":13,"weight":1},{"row":11,"col":15,"weight":1},{"row":11,"col":16,"weight":1},{"row":11,"col":17,"weight":1},{"row":11,"col":18,"weight":1},{"row":11,"col":19,"weight":1},{"row":11,"col":20,"weight":1},{"row":11,"col":21,"weight":1},{"row":11,"col":22,"weight":1},{"row":11,"col":23,"weight":1},{"row":11,"col":24,"weight":1},{"row":11,"col":25,"weight":1},{"row":11,"col":26,"weight":1},{"row":11,"col":27,"weight":1},{"row":11,"col":28,"weight":1},{"row":11,"col":29,"weight":1},{"row":11,"col":31,"weight":1},{"row":11,"col":34,"weight":1},{"row":11,"col":38,"weight":1},{"row":12,"col":14,"weight":1},{"row":12,"col":17,"weight":1},{"row":12,"col":18,"weight":1},{"row":12,"col":19,"weight":1},{"row":12,"col":21,"weight":1},{"row":12,"col":22,"weight":1},{"row":12,"col":23,"weight":1},{"row":12,"col":30,"weight":1},{"row":12,"col":34,"weight":1},{"row":12,"col":38,"weight":1},{"row":13,"col":14,"weight":1},{"row":13,"col":18,"weight":1},{"row":13,"col":22,"weight":1},{"row":13,"col":30,"weight":1},{"row":13,"col":34,"weight":1},{"row":13,"col":38,"weight":1},{"row":14,"col":15,"weight":1},{"row":14,"col":18,"weight":1},{"row":14,"col":22,"weight":1},{"row":14,"col":30,"weight":1},{"row":14,"col":34,"weight":1},{"row":14,"col":38,"weight":1},{"row":15,"col":16,"weight":1},{"row":15,"col":17,"weight":1},{"row":15,"col":18,"weight":1},{"row":15,"col":19,"weight":1},{"row":15,"col":20,"weight":1},{"row":15,"col":21,"weight":1},{"row":15,"col":22,"weight":1},{"row":15,"col":23,"weight":1},{"row":15,"col":24,"weight":1},{"row":15,"col":25,"weight":1},{"row":15,"col":26,"weight":1},{"row":15,"col":27,"weight":1},{"row":15,"col":28,"weight":1},{"row":15,"col":29,"weight":1},{"row":15,"col":30,"weight":1},{"row":15,"col":31,"weight":1},{"row":15,"col":32,"weight":1},{"row":15,"col":33,"weight":1},{"row":15,"col":35,"weight":1},{"row":15,"col":38,"weight":1},{"row":16,"col":18,"weight":1},{"row":16,"col":21,"weight":1},{"row":16,"col":22,"weight":1},{"row":16,"col":23,"weight":1},{"row":16,"col":29,"weight":1},{"row":16,"col":30,"weight":1},{"row":16,"col":31,"weight":1},{"row":16,"col":34,"weight":1},{"row":16,"col":38,"weight":1},{"row":17,"col":18,"weight":1},{"row":17,"col":22,"weight":1},{"row":17,"col":30,"weight":1},{"row":17,"col":34,"weight":1},{"row":17,"col":38,"weight":1},{"row":18,"col":19,"weight":1},{"row":18,"col":22,"weight":1},{"row":18,"col":30,"weight":1},{"row":18,"col":34,"weight":1},{"row":18,"col":38,"weight":1},{"row":19,"col":20,"weight":1},{"row":19,"col":21,"weight":1},{"row":19,"col":22,"weight":1},{"row":19,"col":23,"weight":1},{"row":19,"col":24,"weight":1},{"row":19,"col":25,"weight":1},{"row":19,"col":26,"weight":1},{"row":19,"col":27,"weight":1},{"row":19,"col":28,"weight":1},{"row":19,"col":29,"weight":1},{"row":19,"col":30,"weight":1},{"row":19,"col":31,"weight":1},{"row":19,"col":32,"weight":1},{"row":19,"col":33,"weight":1},{"row":19,"col":34,"weight":1},{"row":19,"col":35,"weight":1},{"row":19,"col":36,"weight":1},{"row":19,"col":37,"weight":1},{"row":20,"col":21,"weight":1},{"row":20,"col":22,"weight":1},{"row":20,"col":23,"weight":1},{"row":20,"col":29,"weight":1},{"row":20,"col":30,"weight":1},{"row":20,"col":31,"weight":1},{"row":20,"col":33,"weight":1},{"row":20,"col":34,"weight":1},{"row":20,"col":35,"weight":1},{"row":21,"col":22,"weight":1},{"row":21,"col":30,"weight":1},{"row":21,"col":34,"weight":1},{"row":22,"col":23,"weight":1},{"row":22,"col":30,"weight":1},{"row":22,"col":34,"weight":1},{"row":23,"col":24,"weight":1},{"row":23,"col":25,"weight":1},{"row":23,"col":26,"weight":1},{"row":23,"col":27,"weight":1},{"row":23,"col":28,"weight":1},{"row":23,"col":29,"weight":1},{"row":23,"col":31,"weight":1},{"row":23,"col":32,"weight":1},{"row":23,"col":33,"weight":1},{"row":24,"col":30,"weight":1}],"edges":[[0,3],[0,4],[1,14],[1,15],[2,23],[2,24],[3,27],[4,5],[4,27],[5,6],[6,7],[7,8],[8,9],[9,10],[10,11],[11,12],[12,13],[13,14],[14,28],[15,16],[15,28],[16,17],[17,29],[18,19],[18,30],[19,20],[20,21],[21,22],[22,23],[23,31],[24,25],[24,31],[25,26],[26,32],[27,33],[28,34],[29,35],[30,36],[31,37],[32,38],[33,39],[34,41],[35,42],[36,43],[37,44],[38,45],[39,46],[40,47],[40,48],[41,54],[41,55],[41,56],[42,58],[42,59],[42,60],[43,62],[44,64],[44,65],[44,66],[45,68],[45,69],[46,47],[47,70],[48,49],[48,70],[49,50],[50,51],[51,52],[52,53],[53,54],[53,71],[54,55],[54,71],[54,72],[55,56],[55,71],[55,72],[55,73],[56,57],[56,72],[56,73],[57,58],[57,73],[57,74],[58,59],[58,74],[58,75],[59,60],[59,74],[59,75],[59,76],[60,61],[60,75],[60,76],[61,62],[61,76],[63,64],[63,77],[63,78],[64,65],[64,78],[64,79],[65,66],[65,78],[65,79],[65,80],[66,67],[66,79],[66,80],[67,68],[67,80],[68,81],[69,81],[70,82],[71,72],[71,83],[72,73],[72,83],[73,83],[74,75],[74,84],[75,76],[75,84],[76,84],[77,85],[78,79],[78,86],[79,80],[79,86],[80,86],[81,87],[82,88],[83,90],[84,91],[85,92],[86,93],[87,94],[88,95],[89,96],[89,97],[90,99],[90,100],[90,101],[91,103],[91,104],[91,105],[92,111],[92,112],[93,113],[94,114],[95,96],[96,115],[97,98],[97,115],[98,99],[98,116],[99,100],[99,116],[99,117],[100,101],[100,116],[100,117],[100,118],[101,102],[101,117],[101,118],[102,103],[102,118],[102,119],[103,104],[103,119],[103,120],[104,105],[104,119],[104,120],[104,121],[105,106],[105,120],[105,121],[106,107],[106,121],[107,108],[108,109],[109,110],[110,111],[111,122],[112,122],[113,123],[114,124],[115,125],[116,117],[116,126],[117,118],[117,126],[118,126],[119,120],[119,127],[120,121],[120,127],[121,127],[122,128],[123,129],[124,130],[125,131],[126,132],[127,133],[128,134],[129,135],[130,136],[131,137],[132,138],[132,139],[132,140],[133,142],[133,143],[133,144],[134,150],[134,151],[134,152],[135,154],[135,155],[136,156],[137,138],[138,139],[138,157],[139,140],[139,157],[140,141],[140,157],[141,142],[141,158],[142,143],[142,158],[142,159],[143,144],[143,158],[143,159],[143,160],[144,145],[144,159],[144,160],[145,146],[145,160],[146,147],[147,148],[148,149],[149,150],[149,161],[150,151],[150,161],[150,162],[151,152],[151,161],[151,162],[151,163],[152,153],[152,162],[152,163],[153,154],[153,163],[154,164],[155,164],[156,165],[157,166],[158,159],[158,167],[159,160],[159,167],[160,167],[161,162],[161,168],[162,163],[162,168],[163,168],[164,169],[165,170],[166,171],[167,172],[168,173],[169,174],[170,175],[171,176],[172,177],[172,178],[172,179],[173,185],[173,186],[173,187],[174,189],[174,190],[174,191],[175,193],[176,177],[176,194],[177,178],[177,194],[177,195],[178,179],[178,194],[178,195],[178,196],[179,180],[179,195],[179,196],[180,181],[180,196],[181,182],[182,183],[183,184],[184,185],[184,197],[185,186],[185,197],[185,198],[186,187],[186,197],[186,198],[186,199],[187,188],[187,198],[187,199],[188,189],[188,199],[188,200],[189,190],[189,200],[189,201],[190,191],[190,200],[190,201],[190,202],[191,192],[191,201],[191,202],[192,193],[192,202],[194,195],[194,203],[195,196],[195,203],[196,203],[197,198],[197,204],[198,199],[198,204],[199,204],[200,201],[200,205],[201,202],[201,205],[202,205],[203,206],[204,207],[205,208],[206,209],[207,214],[207,215],[208,217],[209,210],[210,211],[211,212],[212,213],[213,214],[214,218],[215,216],[215,218],[216,217]],"mis_overhead":89,"padding":2,"spacing":4,"weighted":false} \ No newline at end of file diff --git a/docs/plans/2026-02-27-binpacking-model.md b/docs/plans/2026-02-27-binpacking-model.md new file mode 100644 index 000000000..b576b45e2 --- /dev/null +++ b/docs/plans/2026-02-27-binpacking-model.md @@ -0,0 +1,96 @@ +# Plan: Add BinPacking Model + +**Issue:** #95 — [Model] BinPacking +**Skill:** add-model (Steps 1–7) + +## Overview + +Add a `BinPacking` optimization model: given items with sizes and a bin capacity, minimize the number of bins used to pack all items such that no bin exceeds capacity. + +## Design Decisions + +- **Category:** `specialized` — BinPacking is a domain-specific packing/scheduling problem. It doesn't fit `graph/` (no graph), `set/` (not subset selection), `optimization/` (reserved for generic formulations like QUBO/ILP), or `satisfiability/`. +- **Struct:** `BinPacking` with fields `sizes: Vec` and `capacity: W`. Generic over weight type W for integer or real-valued sizes. +- **dims():** `vec![n; n]` where n = number of items. Each variable is a bin index in {0, ..., n−1}. This is the first non-binary configuration space in the codebase. +- **Objective:** Minimize the count of distinct bin indices used (always `i32`, regardless of W). So `Metric = SolutionSize`, `Value = i32`. +- **Feasibility:** For each bin j, the sum of sizes of items assigned to j must not exceed capacity. Uses `WeightElement::to_sum()` for size summation and capacity comparison. +- **variant():** `variant_params![W]` — exposes weight type (i32, f64). +- **Solver:** BruteForce (existing) — enumerates all n^n assignments. No ILP reduction in this PR. + +## Steps + +### Step 1: Determine category +Category: `specialized/` + +### Step 2: Implement the model +Create `src/models/specialized/bin_packing.rs`: + +```rust +// Structure: +// 1. inventory::submit! for ProblemSchemaEntry +// 2. BinPacking struct with sizes: Vec, capacity: W +// 3. Constructor: new(sizes, capacity), with_unit_sizes(sizes, capacity) if W: From +// 4. Accessors: sizes(), capacity(), num_items() +// 5. Problem impl: NAME="BinPacking", Metric=SolutionSize, dims()=vec![n;n] +// 6. evaluate(): check bin capacities, count distinct bins +// 7. OptimizationProblem impl: Value=i32, direction=Minimize +// 8. #[cfg(test)] #[path] link +``` + +Key implementation details for `evaluate()`: +``` +1. Group items by assigned bin index +2. For each bin, sum sizes via to_sum() and compare with capacity.to_sum() +3. If any bin exceeds capacity → SolutionSize::Invalid +4. Otherwise → SolutionSize::Valid(num_distinct_bins as i32) +``` + +### Step 3: Register the model +1. `src/models/specialized/mod.rs` — add `pub(crate) mod bin_packing;` and `pub use bin_packing::BinPacking;` +2. `src/models/mod.rs` — add `BinPacking` to the `specialized` re-export line + +### Step 4: Register in CLI +1. `problemreductions-cli/src/dispatch.rs`: + - `load_problem()`: add `"BinPacking" => deser_opt::>(data)` + - `serialize_any_problem()`: add `"BinPacking" => try_ser::>(any)` +2. `problemreductions-cli/src/problem_name.rs`: + - `resolve_alias()`: add `"binpacking" => "BinPacking".to_string()` + - Optionally add `("BP", "BinPacking")` to `ALIASES` + +### Step 5: Write unit tests +Create `src/unit_tests/models/specialized/bin_packing.rs`: + +Tests: +- `test_binpacking_creation` — construct instance, verify num_items, dims +- `test_binpacking_evaluation_valid` — valid packing returns SolutionSize::Valid(num_bins) +- `test_binpacking_evaluation_invalid` — overloaded bin returns SolutionSize::Invalid +- `test_binpacking_direction` — verify Direction::Minimize +- `test_binpacking_solver` — BruteForce finds optimal 3-bin solution for the example instance (6 items, sizes [6,6,5,5,4,4], capacity 10) +- `test_binpacking_serialization` — round-trip serde test + +Example instance from issue: +- 6 items, capacity C = 10, sizes = [6, 6, 5, 5, 4, 4] +- Optimal: 3 bins, e.g., x = (0, 1, 2, 2, 0, 1) + +### Step 6: Document in paper +Update `docs/paper/reductions.typ`: +1. Add to `display-name` dictionary: `"BinPacking": [Bin Packing]` +2. Add `#problem-def("BinPacking")[...]` block with mathematical definition + +### Step 7: Verify +```bash +make check # fmt + clippy + test +``` +Then run `/review-implementation` to verify completeness. + +## Files Changed + +| File | Action | +|------|--------| +| `src/models/specialized/bin_packing.rs` | **Create** — model implementation | +| `src/unit_tests/models/specialized/bin_packing.rs` | **Create** — unit tests | +| `src/models/specialized/mod.rs` | **Edit** — register module | +| `src/models/mod.rs` | **Edit** — add re-export | +| `problemreductions-cli/src/dispatch.rs` | **Edit** — CLI dispatch | +| `problemreductions-cli/src/problem_name.rs` | **Edit** — alias | +| `docs/paper/reductions.typ` | **Edit** — paper definition | diff --git a/docs/src/reductions/reduction_graph.json b/docs/src/reductions/reduction_graph.json index acf4bd9eb..e25f33c65 100644 --- a/docs/src/reductions/reduction_graph.json +++ b/docs/src/reductions/reduction_graph.json @@ -4,19 +4,32 @@ "name": "CircuitSAT", "variant": {}, "category": "specialized", - "doc_path": "models/specialized/struct.CircuitSAT.html" + "doc_path": "models/specialized/struct.CircuitSAT.html", + "complexity": "2^num_inputs" }, { "name": "Factoring", "variant": {}, "category": "specialized", - "doc_path": "models/specialized/struct.Factoring.html" + "doc_path": "models/specialized/struct.Factoring.html", + "complexity": "exp(sqrt(num_bits))" }, { "name": "ILP", "variant": {}, "category": "optimization", - "doc_path": "models/optimization/struct.ILP.html" + "doc_path": "models/optimization/struct.ILP.html", + "complexity": "exp(num_variables)" + }, + { + "name": "KColoring", + "variant": { + "graph": "SimpleGraph", + "k": "K2" + }, + "category": "graph", + "doc_path": "models/graph/struct.KColoring.html", + "complexity": "2^num_vertices" }, { "name": "KColoring", @@ -25,7 +38,28 @@ "k": "K3" }, "category": "graph", - "doc_path": "models/graph/struct.KColoring.html" + "doc_path": "models/graph/struct.KColoring.html", + "complexity": "3^num_vertices" + }, + { + "name": "KColoring", + "variant": { + "graph": "SimpleGraph", + "k": "K4" + }, + "category": "graph", + "doc_path": "models/graph/struct.KColoring.html", + "complexity": "4^num_vertices" + }, + { + "name": "KColoring", + "variant": { + "graph": "SimpleGraph", + "k": "K5" + }, + "category": "graph", + "doc_path": "models/graph/struct.KColoring.html", + "complexity": "5^num_vertices" }, { "name": "KColoring", @@ -34,7 +68,8 @@ "k": "KN" }, "category": "graph", - "doc_path": "models/graph/struct.KColoring.html" + "doc_path": "models/graph/struct.KColoring.html", + "complexity": "k^num_vertices" }, { "name": "KSatisfiability", @@ -42,7 +77,8 @@ "k": "K2" }, "category": "satisfiability", - "doc_path": "models/satisfiability/struct.KSatisfiability.html" + "doc_path": "models/satisfiability/struct.KSatisfiability.html", + "complexity": "2^num_variables" }, { "name": "KSatisfiability", @@ -50,7 +86,8 @@ "k": "K3" }, "category": "satisfiability", - "doc_path": "models/satisfiability/struct.KSatisfiability.html" + "doc_path": "models/satisfiability/struct.KSatisfiability.html", + "complexity": "2^num_variables" }, { "name": "KSatisfiability", @@ -58,7 +95,8 @@ "k": "KN" }, "category": "satisfiability", - "doc_path": "models/satisfiability/struct.KSatisfiability.html" + "doc_path": "models/satisfiability/struct.KSatisfiability.html", + "complexity": "2^num_variables" }, { "name": "MaxCut", @@ -67,7 +105,18 @@ "weight": "i32" }, "category": "graph", - "doc_path": "models/graph/struct.MaxCut.html" + "doc_path": "models/graph/struct.MaxCut.html", + "complexity": "2^num_vertices" + }, + { + "name": "MaximalIS", + "variant": { + "graph": "SimpleGraph", + "weight": "i32" + }, + "category": "graph", + "doc_path": "models/graph/struct.MaximalIS.html", + "complexity": "2^num_vertices" }, { "name": "MaximumClique", @@ -76,7 +125,18 @@ "weight": "i32" }, "category": "graph", - "doc_path": "models/graph/struct.MaximumClique.html" + "doc_path": "models/graph/struct.MaximumClique.html", + "complexity": "2^num_vertices" + }, + { + "name": "MaximumIndependentSet", + "variant": { + "graph": "KingsSubgraph", + "weight": "One" + }, + "category": "graph", + "doc_path": "models/graph/struct.MaximumIndependentSet.html", + "complexity": "2^num_vertices" }, { "name": "MaximumIndependentSet", @@ -85,7 +145,18 @@ "weight": "i32" }, "category": "graph", - "doc_path": "models/graph/struct.MaximumIndependentSet.html" + "doc_path": "models/graph/struct.MaximumIndependentSet.html", + "complexity": "2^num_vertices" + }, + { + "name": "MaximumIndependentSet", + "variant": { + "graph": "SimpleGraph", + "weight": "One" + }, + "category": "graph", + "doc_path": "models/graph/struct.MaximumIndependentSet.html", + "complexity": "2^num_vertices" }, { "name": "MaximumIndependentSet", @@ -94,7 +165,8 @@ "weight": "i32" }, "category": "graph", - "doc_path": "models/graph/struct.MaximumIndependentSet.html" + "doc_path": "models/graph/struct.MaximumIndependentSet.html", + "complexity": "2^num_vertices" }, { "name": "MaximumIndependentSet", @@ -103,7 +175,18 @@ "weight": "i32" }, "category": "graph", - "doc_path": "models/graph/struct.MaximumIndependentSet.html" + "doc_path": "models/graph/struct.MaximumIndependentSet.html", + "complexity": "2^num_vertices" + }, + { + "name": "MaximumIndependentSet", + "variant": { + "graph": "UnitDiskGraph", + "weight": "One" + }, + "category": "graph", + "doc_path": "models/graph/struct.MaximumIndependentSet.html", + "complexity": "2^num_vertices" }, { "name": "MaximumIndependentSet", @@ -112,7 +195,8 @@ "weight": "i32" }, "category": "graph", - "doc_path": "models/graph/struct.MaximumIndependentSet.html" + "doc_path": "models/graph/struct.MaximumIndependentSet.html", + "complexity": "2^num_vertices" }, { "name": "MaximumMatching", @@ -121,7 +205,8 @@ "weight": "i32" }, "category": "graph", - "doc_path": "models/graph/struct.MaximumMatching.html" + "doc_path": "models/graph/struct.MaximumMatching.html", + "complexity": "2^num_vertices" }, { "name": "MaximumSetPacking", @@ -129,7 +214,8 @@ "weight": "f64" }, "category": "set", - "doc_path": "models/set/struct.MaximumSetPacking.html" + "doc_path": "models/set/struct.MaximumSetPacking.html", + "complexity": "2^num_sets" }, { "name": "MaximumSetPacking", @@ -137,7 +223,8 @@ "weight": "i32" }, "category": "set", - "doc_path": "models/set/struct.MaximumSetPacking.html" + "doc_path": "models/set/struct.MaximumSetPacking.html", + "complexity": "2^num_sets" }, { "name": "MinimumDominatingSet", @@ -146,7 +233,8 @@ "weight": "i32" }, "category": "graph", - "doc_path": "models/graph/struct.MinimumDominatingSet.html" + "doc_path": "models/graph/struct.MinimumDominatingSet.html", + "complexity": "2^num_vertices" }, { "name": "MinimumSetCovering", @@ -154,7 +242,8 @@ "weight": "i32" }, "category": "set", - "doc_path": "models/set/struct.MinimumSetCovering.html" + "doc_path": "models/set/struct.MinimumSetCovering.html", + "complexity": "2^num_sets" }, { "name": "MinimumVertexCover", @@ -163,7 +252,8 @@ "weight": "i32" }, "category": "graph", - "doc_path": "models/graph/struct.MinimumVertexCover.html" + "doc_path": "models/graph/struct.MinimumVertexCover.html", + "complexity": "2^num_vertices" }, { "name": "QUBO", @@ -171,13 +261,15 @@ "weight": "f64" }, "category": "optimization", - "doc_path": "models/optimization/struct.QUBO.html" + "doc_path": "models/optimization/struct.QUBO.html", + "complexity": "2^num_vars" }, { "name": "Satisfiability", "variant": {}, "category": "satisfiability", - "doc_path": "models/satisfiability/struct.Satisfiability.html" + "doc_path": "models/satisfiability/struct.Satisfiability.html", + "complexity": "2^num_variables" }, { "name": "SpinGlass", @@ -186,7 +278,8 @@ "weight": "f64" }, "category": "optimization", - "doc_path": "models/optimization/struct.SpinGlass.html" + "doc_path": "models/optimization/struct.SpinGlass.html", + "complexity": "2^num_vertices" }, { "name": "SpinGlass", @@ -195,7 +288,8 @@ "weight": "i32" }, "category": "optimization", - "doc_path": "models/optimization/struct.SpinGlass.html" + "doc_path": "models/optimization/struct.SpinGlass.html", + "complexity": "2^num_vertices" }, { "name": "TravelingSalesman", @@ -204,7 +298,8 @@ "weight": "i32" }, "category": "graph", - "doc_path": "models/graph/struct.TravelingSalesman.html" + "doc_path": "models/graph/struct.TravelingSalesman.html", + "complexity": "num_vertices!" } ], "edges": [ @@ -225,7 +320,7 @@ }, { "source": 0, - "target": 23, + "target": 30, "overhead": [ { "field": "num_spins", @@ -270,7 +365,7 @@ }, { "source": 2, - "target": 20, + "target": 27, "overhead": [ { "field": "num_vars", @@ -280,8 +375,8 @@ "doc_path": "rules/ilp_qubo/index.html" }, { - "source": 3, - "target": 4, + "source": 4, + "target": 7, "overhead": [ { "field": "num_vertices", @@ -295,7 +390,7 @@ "doc_path": "rules/kcoloring_casts/index.html" }, { - "source": 4, + "source": 7, "target": 2, "overhead": [ { @@ -310,8 +405,8 @@ "doc_path": "rules/coloring_ilp/index.html" }, { - "source": 4, - "target": 20, + "source": 7, + "target": 27, "overhead": [ { "field": "num_vars", @@ -321,8 +416,8 @@ "doc_path": "rules/coloring_qubo/index.html" }, { - "source": 5, - "target": 7, + "source": 8, + "target": 10, "overhead": [ { "field": "num_vars", @@ -336,8 +431,8 @@ "doc_path": "rules/ksatisfiability_casts/index.html" }, { - "source": 5, - "target": 20, + "source": 8, + "target": 27, "overhead": [ { "field": "num_vars", @@ -347,8 +442,8 @@ "doc_path": "rules/ksatisfiability_qubo/index.html" }, { - "source": 5, - "target": 21, + "source": 8, + "target": 28, "overhead": [ { "field": "num_clauses", @@ -366,8 +461,8 @@ "doc_path": "rules/sat_ksat/index.html" }, { - "source": 6, - "target": 7, + "source": 9, + "target": 10, "overhead": [ { "field": "num_vars", @@ -381,8 +476,8 @@ "doc_path": "rules/ksatisfiability_casts/index.html" }, { - "source": 6, - "target": 20, + "source": 9, + "target": 27, "overhead": [ { "field": "num_vars", @@ -392,8 +487,8 @@ "doc_path": "rules/ksatisfiability_qubo/index.html" }, { - "source": 6, - "target": 21, + "source": 9, + "target": 28, "overhead": [ { "field": "num_clauses", @@ -411,8 +506,8 @@ "doc_path": "rules/sat_ksat/index.html" }, { - "source": 7, - "target": 21, + "source": 10, + "target": 28, "overhead": [ { "field": "num_clauses", @@ -430,8 +525,8 @@ "doc_path": "rules/sat_ksat/index.html" }, { - "source": 8, - "target": 23, + "source": 11, + "target": 30, "overhead": [ { "field": "num_spins", @@ -445,7 +540,7 @@ "doc_path": "rules/spinglass_maxcut/index.html" }, { - "source": 9, + "source": 13, "target": 2, "overhead": [ { @@ -460,8 +555,8 @@ "doc_path": "rules/maximumclique_ilp/index.html" }, { - "source": 10, - "target": 13, + "source": 14, + "target": 15, "overhead": [ { "field": "num_vertices", @@ -475,23 +570,38 @@ "doc_path": "rules/maximumindependentset_casts/index.html" }, { - "source": 11, - "target": 2, + "source": 14, + "target": 19, "overhead": [ { - "field": "num_vars", + "field": "num_vertices", "formula": "num_vertices" }, { - "field": "num_constraints", + "field": "num_edges", "formula": "num_edges" } ], - "doc_path": "rules/maximumindependentset_ilp/index.html" + "doc_path": "rules/maximumindependentset_casts/index.html" }, { - "source": 11, - "target": 10, + "source": 15, + "target": 20, + "overhead": [ + { + "field": "num_vertices", + "formula": "num_vertices" + }, + { + "field": "num_edges", + "formula": "num_edges" + } + ], + "doc_path": "rules/maximumindependentset_casts/index.html" + }, + { + "source": 16, + "target": 14, "overhead": [ { "field": "num_vertices", @@ -505,8 +615,38 @@ "doc_path": "rules/maximumindependentset_gridgraph/index.html" }, { - "source": 11, - "target": 12, + "source": 16, + "target": 15, + "overhead": [ + { + "field": "num_vertices", + "formula": "num_vertices * num_vertices" + }, + { + "field": "num_edges", + "formula": "num_vertices * num_vertices" + } + ], + "doc_path": "rules/maximumindependentset_gridgraph/index.html" + }, + { + "source": 16, + "target": 17, + "overhead": [ + { + "field": "num_vertices", + "formula": "num_vertices" + }, + { + "field": "num_edges", + "formula": "num_edges" + } + ], + "doc_path": "rules/maximumindependentset_casts/index.html" + }, + { + "source": 16, + "target": 18, "overhead": [ { "field": "num_vertices", @@ -520,8 +660,23 @@ "doc_path": "rules/maximumindependentset_triangular/index.html" }, { - "source": 11, - "target": 16, + "source": 17, + "target": 2, + "overhead": [ + { + "field": "num_vars", + "formula": "num_vertices" + }, + { + "field": "num_constraints", + "formula": "num_edges" + } + ], + "doc_path": "rules/maximumindependentset_ilp/index.html" + }, + { + "source": 17, + "target": 23, "overhead": [ { "field": "num_sets", @@ -535,8 +690,8 @@ "doc_path": "rules/maximumindependentset_maximumsetpacking/index.html" }, { - "source": 11, - "target": 19, + "source": 17, + "target": 26, "overhead": [ { "field": "num_vertices", @@ -550,8 +705,8 @@ "doc_path": "rules/minimumvertexcover_maximumindependentset/index.html" }, { - "source": 11, - "target": 20, + "source": 17, + "target": 27, "overhead": [ { "field": "num_vars", @@ -561,8 +716,8 @@ "doc_path": "rules/maximumindependentset_qubo/index.html" }, { - "source": 12, - "target": 13, + "source": 18, + "target": 20, "overhead": [ { "field": "num_vertices", @@ -576,23 +731,23 @@ "doc_path": "rules/maximumindependentset_casts/index.html" }, { - "source": 13, - "target": 10, + "source": 19, + "target": 16, "overhead": [ { "field": "num_vertices", - "formula": "num_vertices * num_vertices" + "formula": "num_vertices" }, { "field": "num_edges", - "formula": "num_vertices * num_vertices" + "formula": "num_edges" } ], - "doc_path": "rules/maximumindependentset_gridgraph/index.html" + "doc_path": "rules/maximumindependentset_casts/index.html" }, { - "source": 13, - "target": 11, + "source": 19, + "target": 20, "overhead": [ { "field": "num_vertices", @@ -606,7 +761,22 @@ "doc_path": "rules/maximumindependentset_casts/index.html" }, { - "source": 14, + "source": 20, + "target": 17, + "overhead": [ + { + "field": "num_vertices", + "formula": "num_vertices" + }, + { + "field": "num_edges", + "formula": "num_edges" + } + ], + "doc_path": "rules/maximumindependentset_casts/index.html" + }, + { + "source": 21, "target": 2, "overhead": [ { @@ -621,8 +791,8 @@ "doc_path": "rules/maximummatching_ilp/index.html" }, { - "source": 14, - "target": 16, + "source": 21, + "target": 23, "overhead": [ { "field": "num_sets", @@ -636,8 +806,8 @@ "doc_path": "rules/maximummatching_maximumsetpacking/index.html" }, { - "source": 15, - "target": 20, + "source": 22, + "target": 27, "overhead": [ { "field": "num_vars", @@ -647,7 +817,7 @@ "doc_path": "rules/maximumsetpacking_qubo/index.html" }, { - "source": 16, + "source": 23, "target": 2, "overhead": [ { @@ -662,8 +832,8 @@ "doc_path": "rules/maximumsetpacking_ilp/index.html" }, { - "source": 16, - "target": 11, + "source": 23, + "target": 17, "overhead": [ { "field": "num_vertices", @@ -677,8 +847,8 @@ "doc_path": "rules/maximumindependentset_maximumsetpacking/index.html" }, { - "source": 16, - "target": 15, + "source": 23, + "target": 22, "overhead": [ { "field": "num_sets", @@ -692,7 +862,7 @@ "doc_path": "rules/maximumsetpacking_casts/index.html" }, { - "source": 17, + "source": 24, "target": 2, "overhead": [ { @@ -707,7 +877,7 @@ "doc_path": "rules/minimumdominatingset_ilp/index.html" }, { - "source": 18, + "source": 25, "target": 2, "overhead": [ { @@ -722,7 +892,7 @@ "doc_path": "rules/minimumsetcovering_ilp/index.html" }, { - "source": 19, + "source": 26, "target": 2, "overhead": [ { @@ -737,8 +907,8 @@ "doc_path": "rules/minimumvertexcover_ilp/index.html" }, { - "source": 19, - "target": 11, + "source": 26, + "target": 17, "overhead": [ { "field": "num_vertices", @@ -752,8 +922,8 @@ "doc_path": "rules/minimumvertexcover_maximumindependentset/index.html" }, { - "source": 19, - "target": 18, + "source": 26, + "target": 25, "overhead": [ { "field": "num_sets", @@ -767,8 +937,8 @@ "doc_path": "rules/minimumvertexcover_minimumsetcovering/index.html" }, { - "source": 19, - "target": 20, + "source": 26, + "target": 27, "overhead": [ { "field": "num_vars", @@ -778,7 +948,7 @@ "doc_path": "rules/minimumvertexcover_qubo/index.html" }, { - "source": 20, + "source": 27, "target": 2, "overhead": [ { @@ -793,8 +963,8 @@ "doc_path": "rules/qubo_ilp/index.html" }, { - "source": 20, - "target": 22, + "source": 27, + "target": 29, "overhead": [ { "field": "num_spins", @@ -804,7 +974,7 @@ "doc_path": "rules/spinglass_qubo/index.html" }, { - "source": 21, + "source": 28, "target": 0, "overhead": [ { @@ -819,23 +989,23 @@ "doc_path": "rules/sat_circuitsat/index.html" }, { - "source": 21, - "target": 3, + "source": 28, + "target": 4, "overhead": [ { "field": "num_vertices", - "formula": "2 * num_vars + 5 * num_literals - 5 * num_clauses + 3" + "formula": "2 * num_vars + 5 * num_literals + -1 * 5 * num_clauses + 3" }, { "field": "num_edges", - "formula": "3 * num_vars + 11 * num_literals - 9 * num_clauses + 3" + "formula": "3 * num_vars + 11 * num_literals + -1 * 9 * num_clauses + 3" } ], "doc_path": "rules/sat_coloring/index.html" }, { - "source": 21, - "target": 6, + "source": 28, + "target": 9, "overhead": [ { "field": "num_clauses", @@ -849,8 +1019,8 @@ "doc_path": "rules/sat_ksat/index.html" }, { - "source": 21, - "target": 11, + "source": 28, + "target": 16, "overhead": [ { "field": "num_vertices", @@ -864,8 +1034,8 @@ "doc_path": "rules/sat_maximumindependentset/index.html" }, { - "source": 21, - "target": 17, + "source": 28, + "target": 24, "overhead": [ { "field": "num_vertices", @@ -879,8 +1049,8 @@ "doc_path": "rules/sat_minimumdominatingset/index.html" }, { - "source": 22, - "target": 20, + "source": 29, + "target": 27, "overhead": [ { "field": "num_vars", @@ -890,8 +1060,8 @@ "doc_path": "rules/spinglass_qubo/index.html" }, { - "source": 23, - "target": 8, + "source": 30, + "target": 11, "overhead": [ { "field": "num_vertices", @@ -905,8 +1075,8 @@ "doc_path": "rules/spinglass_maxcut/index.html" }, { - "source": 23, - "target": 22, + "source": 30, + "target": 29, "overhead": [ { "field": "num_spins", @@ -920,7 +1090,7 @@ "doc_path": "rules/spinglass_casts/index.html" }, { - "source": 24, + "source": 31, "target": 2, "overhead": [ { @@ -929,7 +1099,7 @@ }, { "field": "num_constraints", - "formula": "num_vertices^3 - num_vertices^2 + 2 * num_vertices + 4 * num_vertices * num_edges" + "formula": "num_vertices^3 + -1 * 1 * num_vertices^2 + 2 * num_vertices + 4 * num_vertices * num_edges" } ], "doc_path": "rules/travelingsalesman_ilp/index.html" diff --git a/examples/reduction_satisfiability_to_maximumindependentset.rs b/examples/reduction_satisfiability_to_maximumindependentset.rs index 6b99d0777..40027025f 100644 --- a/examples/reduction_satisfiability_to_maximumindependentset.rs +++ b/examples/reduction_satisfiability_to_maximumindependentset.rs @@ -43,7 +43,7 @@ pub fn run() { ); // 2. Reduce to Independent Set - let reduction = ReduceTo::>::reduce_to(&sat); + let reduction = ReduceTo::>::reduce_to(&sat); let is = reduction.target_problem(); println!("\n=== Problem Transformation ==="); @@ -105,7 +105,7 @@ pub fn run() { // 5. Export JSON let source_variant = variant_to_map(Satisfiability::variant()); - let target_variant = variant_to_map(MaximumIndependentSet::::variant()); + let target_variant = variant_to_map(MaximumIndependentSet::::variant()); let overhead = lookup_overhead( "Satisfiability", &source_variant, diff --git a/problemreductions-cli/src/cli.rs b/problemreductions-cli/src/cli.rs index 089c977f4..1de97c436 100644 --- a/problemreductions-cli/src/cli.rs +++ b/problemreductions-cli/src/cli.rs @@ -197,27 +197,32 @@ Setup: add one line to your shell rc file: #[derive(clap::Args)] #[command(after_help = "\ -Run `pred create ` without arguments to see problem-specific parameters. +TIP: Run `pred create ` (no other flags) to see problem-specific help. + Not every flag applies to every problem — the above list shows ALL flags. -Random generation (graph-based problems only): - --random Generate a random Erdos-Renyi graph instance - --num-vertices Number of vertices [required with --random] - --edge-prob Edge probability (0.0 to 1.0) [default: 0.5] - --seed Random seed for reproducibility +Flags by problem type: + MIS, MVC, MaxClique, MinDomSet --graph, --weights + MaxCut, MaxMatching, TSP --graph, --edge-weights + SAT, 3SAT/KSAT --num-vars, --clauses [--k] + QUBO --matrix + SpinGlass --graph, --couplings, --fields + KColoring --graph, --k + Factoring --target, --m, --n + +Geometry graph variants (use slash notation, e.g., MIS/KingsSubgraph): + KingsSubgraph, TriangularSubgraph --positions (integer x,y pairs) + UnitDiskGraph --positions (float x,y pairs) [--radius] + +Random generation: + --random --num-vertices N [--edge-prob 0.5] [--seed 42] Examples: - pred create MIS --graph 0-1,1-2,2-3 -o problem.json - pred create MIS --graph 0-1,1-2 --weights 2,1,3 -o weighted.json - pred create SAT --num-vars 3 --clauses \"1,2;-1,3\" -o sat.json - pred create QUBO --matrix \"1,0.5;0.5,2\" -o qubo.json - pred create KColoring --k 3 --graph 0-1,1-2,2-0 -o kcol.json - pred create MaxCut --graph 0-1,1-2 --edge-weights 2,3 - pred create SpinGlass --graph 0-1,1-2 --couplings 1,-1 - pred create MIS --random --num-vertices 10 --edge-prob 0.3 - pred create Factoring --target 15 --m 4 --n 4 - -Output (`-o`) uses the standard problem JSON format: - {\"type\": \"...\", \"variant\": {...}, \"data\": {...}}")] + pred create MIS --graph 0-1,1-2,2-3 --weights 1,1,1 + pred create SAT --num-vars 3 --clauses \"1,2;-1,3\" + pred create QUBO --matrix \"1,0.5;0.5,2\" + pred create MIS/KingsSubgraph --positions \"0,0;1,0;1,1;0,1\" + pred create MIS/UnitDiskGraph --positions \"0,0;1,0;0.5,0.8\" --radius 1.5 + pred create MIS --random --num-vertices 10 --edge-prob 0.3")] pub struct CreateArgs { /// Problem type (e.g., MIS, QUBO, SAT) #[arg(value_parser = crate::problem_name::ProblemNameParser)] @@ -270,6 +275,12 @@ pub struct CreateArgs { /// Bits for second factor (for Factoring) #[arg(long)] pub n: Option, + /// Vertex positions for geometry-based graphs (semicolon-separated x,y pairs, e.g., "0,0;1,0;1,1") + #[arg(long)] + pub positions: Option, + /// Radius for UnitDiskGraph [default: 1.0] + #[arg(long)] + pub radius: Option, } #[derive(clap::Args)] diff --git a/problemreductions-cli/src/commands/create.rs b/problemreductions-cli/src/commands/create.rs index ffc57252d..350983eb6 100644 --- a/problemreductions-cli/src/commands/create.rs +++ b/problemreductions-cli/src/commands/create.rs @@ -1,12 +1,14 @@ use crate::cli::CreateArgs; use crate::dispatch::ProblemJsonOutput; use crate::output::OutputConfig; -use crate::problem_name::resolve_alias; +use crate::problem_name::{parse_problem_spec, resolve_variant}; +use crate::util; use anyhow::{bail, Context, Result}; use problemreductions::prelude::*; use problemreductions::registry::collect_schemas; -use problemreductions::topology::{Graph, SimpleGraph}; -use problemreductions::variant::{K2, K3, KN}; +use problemreductions::topology::{ + Graph, KingsSubgraph, SimpleGraph, TriangularSubgraph, UnitDiskGraph, +}; use serde::Serialize; use std::collections::BTreeMap; @@ -27,11 +29,17 @@ fn all_data_flags_empty(args: &CreateArgs) -> bool { && args.num_vertices.is_none() && args.edge_prob.is_none() && args.seed.is_none() + && args.positions.is_none() + && args.radius.is_none() } -fn type_format_hint(type_name: &str) -> &'static str { +fn type_format_hint(type_name: &str, graph_type: Option<&str>) -> &'static str { match type_name { - "G" => "edge list: 0-1,1-2,2-3", + "G" => match graph_type { + Some("KingsSubgraph" | "TriangularSubgraph") => "integer positions: \"0,0;1,0;1,1\"", + Some("UnitDiskGraph") => "float positions: \"0.0,0.0;1.0,0.0\"", + _ => "edge list: 0-1,1-2,2-3", + }, "Vec" => "comma-separated: 1,2,3", "Vec" => "semicolon-separated clauses: \"1,2;-1,3\"", "Vec>" => "semicolon-separated rows: \"1,0.5;0.5,2\"", @@ -41,12 +49,17 @@ fn type_format_hint(type_name: &str) -> &'static str { } } -fn example_for(canonical: &str) -> &'static str { +fn example_for(canonical: &str, graph_type: Option<&str>) -> &'static str { match canonical { "MaximumIndependentSet" | "MinimumVertexCover" | "MaximumClique" - | "MinimumDominatingSet" => "--graph 0-1,1-2,2-3 --weights 1,1,1,1", + | "MinimumDominatingSet" => match graph_type { + Some("KingsSubgraph") => "--positions \"0,0;1,0;1,1;0,1\"", + Some("TriangularSubgraph") => "--positions \"0,0;0,1;1,0;1,1\"", + Some("UnitDiskGraph") => "--positions \"0,0;1,0;0.5,0.8\" --radius 1.5", + _ => "--graph 0-1,1-2,2-3 --weights 1,1,1,1", + }, "MaxCut" | "MaximumMatching" | "TravelingSalesman" => { "--graph 0-1,1-2,2-3 --edge-weights 1,1,1" } @@ -60,7 +73,11 @@ fn example_for(canonical: &str) -> &'static str { } } -fn print_problem_help(canonical: &str) -> Result<()> { +fn print_problem_help(canonical: &str, graph_type: Option<&str>) -> Result<()> { + let is_geometry = matches!( + graph_type, + Some("KingsSubgraph" | "TriangularSubgraph" | "UnitDiskGraph") + ); let schemas = collect_schemas(); let schema = schemas.iter().find(|s| s.name == canonical); @@ -68,37 +85,77 @@ fn print_problem_help(canonical: &str) -> Result<()> { eprintln!("{}\n {}\n", canonical, s.description); eprintln!("Parameters:"); for field in &s.fields { - let hint = type_format_hint(&field.type_name); - eprintln!( - " --{:<16} {} ({})", - field.name.replace('_', "-"), - field.description, - hint - ); + // For geometry variants, show --positions instead of --graph + if field.type_name == "G" && is_geometry { + let hint = type_format_hint(&field.type_name, graph_type); + eprintln!(" --{:<16} {} ({hint})", "positions", field.description); + if graph_type == Some("UnitDiskGraph") { + eprintln!(" --{:<16} Distance threshold [default: 1.0]", "radius"); + } + } else { + let hint = type_format_hint(&field.type_name, graph_type); + eprintln!( + " --{:<16} {} ({})", + field.name.replace('_', "-"), + field.description, + hint + ); + } } } else { eprintln!("{canonical}\n"); eprintln!("No schema information available."); } - let example = example_for(canonical); + let example = example_for(canonical, graph_type); if !example.is_empty() { eprintln!("\nExample:"); - eprintln!(" pred create {} {}", canonical, example); + eprintln!( + " pred create {} {}", + match graph_type { + Some(g) => format!("{canonical}/{g}"), + None => canonical.to_string(), + }, + example + ); } Ok(()) } +/// Resolve the graph type from the variant map (e.g., "KingsSubgraph", "UnitDiskGraph", or "SimpleGraph"). +fn resolved_graph_type(variant: &BTreeMap) -> &str { + variant + .get("graph") + .map(|s| s.as_str()) + .unwrap_or("SimpleGraph") +} + pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { - let canonical = resolve_alias(&args.problem); + let spec = parse_problem_spec(&args.problem)?; + let canonical = &spec.name; + + // Resolve variant early so random and help can use it + let rgraph = problemreductions::rules::ReductionGraph::new(); + let known_variants = rgraph.variants_for(canonical); + let resolved_variant = if known_variants.is_empty() { + BTreeMap::new() + } else { + resolve_variant(&spec, &known_variants)? + }; + let graph_type = resolved_graph_type(&resolved_variant); if args.random { - return create_random(args, &canonical, out); + return create_random(args, canonical, &resolved_variant, out); } // Show schema-driven help when no data flags are provided if all_data_flags_empty(args) { - return print_problem_help(&canonical); + let gt = if graph_type != "SimpleGraph" { + Some(graph_type) + } else { + None + }; + return print_problem_help(canonical, gt); } let (data, variant) = match canonical.as_str() { @@ -107,22 +164,7 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { | "MinimumVertexCover" | "MaximumClique" | "MinimumDominatingSet" => { - let (graph, n) = parse_graph(args).map_err(|e| { - anyhow::anyhow!( - "{e}\n\nUsage: pred create {} --graph 0-1,1-2,2-3 [--weights 1,1,1,1]", - args.problem - ) - })?; - let weights = parse_vertex_weights(args, n)?; - let variant = variant_map(&[("graph", "SimpleGraph"), ("weight", "i32")]); - let data = match canonical.as_str() { - "MaximumIndependentSet" => ser(MaximumIndependentSet::new(graph, weights))?, - "MinimumVertexCover" => ser(MinimumVertexCover::new(graph, weights))?, - "MaximumClique" => ser(MaximumClique::new(graph, weights))?, - "MinimumDominatingSet" => ser(MinimumDominatingSet::new(graph, weights))?, - _ => unreachable!(), - }; - (data, variant) + create_vertex_weight_problem(args, canonical, graph_type, &resolved_variant)? } // Graph problems with edge weights @@ -134,14 +176,13 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { ) })?; let edge_weights = parse_edge_weights(args, graph.num_edges())?; - let variant = variant_map(&[("graph", "SimpleGraph"), ("weight", "i32")]); let data = match canonical.as_str() { "MaxCut" => ser(MaxCut::new(graph, edge_weights))?, "MaximumMatching" => ser(MaximumMatching::new(graph, edge_weights))?, "TravelingSalesman" => ser(TravelingSalesman::new(graph, edge_weights))?, _ => unreachable!(), }; - (data, variant) + (data, resolved_variant.clone()) } // KColoring @@ -149,27 +190,9 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { let (graph, _) = parse_graph(args).map_err(|e| { anyhow::anyhow!("{e}\n\nUsage: pred create KColoring --graph 0-1,1-2,2-0 --k 3") })?; - let variant; - let data; - match args.k { - Some(2) => { - variant = variant_map(&[("k", "K2"), ("graph", "SimpleGraph")]); - data = ser(KColoring::::new(graph))?; - } - Some(3) => { - variant = variant_map(&[("k", "K3"), ("graph", "SimpleGraph")]); - data = ser(KColoring::::new(graph))?; - } - Some(k) => { - variant = variant_map(&[("k", "KN"), ("graph", "SimpleGraph")]); - data = ser(KColoring::::with_k(graph, k))?; - } - None => bail!( - "KColoring requires --k \n\n\ - Usage: pred create KColoring --graph 0-1,1-2,2-0 --k 3" - ), - } - (data, variant) + let (k, _variant) = + util::validate_k_param(&resolved_variant, args.k, None, "KColoring")?; + util::ser_kcoloring(graph, k)? } // SAT @@ -181,8 +204,10 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { ) })?; let clauses = parse_clauses(args)?; - let variant = BTreeMap::new(); - (ser(Satisfiability::new(num_vars, clauses))?, variant) + ( + ser(Satisfiability::new(num_vars, clauses))?, + resolved_variant.clone(), + ) } "KSatisfiability" => { let num_vars = args.num_vars.ok_or_else(|| { @@ -192,30 +217,15 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { ) })?; let clauses = parse_clauses(args)?; - let variant; - let data; - match args.k { - Some(2) => { - variant = variant_map(&[("k", "K2")]); - data = ser(KSatisfiability::::new(num_vars, clauses))?; - } - Some(3) => { - variant = variant_map(&[("k", "K3")]); - data = ser(KSatisfiability::::new(num_vars, clauses))?; - } - _ => { - variant = variant_map(&[("k", "KN")]); - data = ser(KSatisfiability::::new(num_vars, clauses))?; - } - } - (data, variant) + let (k, _variant) = + util::validate_k_param(&resolved_variant, args.k, Some(3), "KSatisfiability")?; + util::ser_ksat(num_vars, clauses, k)? } // QUBO "QUBO" => { let matrix = parse_matrix(args)?; - let variant = BTreeMap::new(); - (ser(QUBO::from_matrix(matrix))?, variant) + (ser(QUBO::from_matrix(matrix))?, resolved_variant.clone()) } // SpinGlass @@ -227,10 +237,9 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { })?; let couplings = parse_couplings(args, graph.num_edges())?; let fields = parse_fields(args, n)?; - let variant = variant_map(&[("graph", "SimpleGraph"), ("weight", "i32")]); ( ser(SpinGlass::from_graph(graph, couplings, fields))?, - variant, + resolved_variant.clone(), ) } @@ -246,15 +255,14 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { let n = args .n .ok_or_else(|| anyhow::anyhow!("Factoring requires --n\n\n{usage}"))?; - let variant = BTreeMap::new(); - (ser(Factoring::new(m, n, target))?, variant) + (ser(Factoring::new(m, n, target))?, resolved_variant.clone()) } - _ => bail!("{}", crate::problem_name::unknown_problem_error(&canonical)), + _ => bail!("{}", crate::problem_name::unknown_problem_error(canonical)), }; let output = ProblemJsonOutput { - problem_type: canonical.clone(), + problem_type: canonical.to_string(), variant, data, }; @@ -273,15 +281,87 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { Ok(()) } +/// Create a vertex-weight problem dispatching on geometry graph type. +fn create_vertex_weight_problem( + args: &CreateArgs, + canonical: &str, + graph_type: &str, + resolved_variant: &BTreeMap, +) -> Result<(serde_json::Value, BTreeMap)> { + match graph_type { + "KingsSubgraph" => { + let positions = parse_int_positions(args)?; + let n = positions.len(); + let graph = KingsSubgraph::new(positions); + let weights = parse_vertex_weights(args, n)?; + Ok(( + ser_vertex_weight_problem_with(canonical, graph, weights)?, + resolved_variant.clone(), + )) + } + "TriangularSubgraph" => { + let positions = parse_int_positions(args)?; + let n = positions.len(); + let graph = TriangularSubgraph::new(positions); + let weights = parse_vertex_weights(args, n)?; + Ok(( + ser_vertex_weight_problem_with(canonical, graph, weights)?, + resolved_variant.clone(), + )) + } + "UnitDiskGraph" => { + let positions = parse_float_positions(args)?; + let n = positions.len(); + let radius = args.radius.unwrap_or(1.0); + let graph = UnitDiskGraph::new(positions, radius); + let weights = parse_vertex_weights(args, n)?; + Ok(( + ser_vertex_weight_problem_with(canonical, graph, weights)?, + resolved_variant.clone(), + )) + } + _ => { + // SimpleGraph path (existing) + let (graph, n) = parse_graph(args).map_err(|e| { + anyhow::anyhow!( + "{e}\n\nUsage: pred create {} --graph 0-1,1-2,2-3 [--weights 1,1,1,1]", + args.problem + ) + })?; + let weights = parse_vertex_weights(args, n)?; + let data = match canonical { + "MaximumIndependentSet" => ser(MaximumIndependentSet::new(graph, weights))?, + "MinimumVertexCover" => ser(MinimumVertexCover::new(graph, weights))?, + "MaximumClique" => ser(MaximumClique::new(graph, weights))?, + "MinimumDominatingSet" => ser(MinimumDominatingSet::new(graph, weights))?, + _ => unreachable!(), + }; + Ok((data, resolved_variant.clone())) + } + } +} + +/// Serialize a vertex-weight problem with a generic graph type. +fn ser_vertex_weight_problem_with( + canonical: &str, + graph: G, + weights: Vec, +) -> Result { + match canonical { + "MaximumIndependentSet" => ser(MaximumIndependentSet::new(graph, weights)), + "MinimumVertexCover" => ser(MinimumVertexCover::new(graph, weights)), + "MaximumClique" => ser(MaximumClique::new(graph, weights)), + "MinimumDominatingSet" => ser(MinimumDominatingSet::new(graph, weights)), + _ => unreachable!(), + } +} + fn ser(problem: T) -> Result { - Ok(serde_json::to_value(problem)?) + util::ser(problem) } fn variant_map(pairs: &[(&str, &str)]) -> BTreeMap { - pairs - .iter() - .map(|(k, v)| (k.to_string(), v.to_string())) - .collect() + util::variant_map(pairs) } /// Parse `--graph` into a SimpleGraph, inferring num_vertices from max index. @@ -314,6 +394,22 @@ fn parse_graph(args: &CreateArgs) -> Result<(SimpleGraph, usize)> { Ok((SimpleGraph::new(num_vertices, edges), num_vertices)) } +/// Parse `--positions` as integer grid positions. +fn parse_int_positions(args: &CreateArgs) -> Result> { + let pos_str = args.positions.as_deref().ok_or_else(|| { + anyhow::anyhow!("This variant requires --positions (e.g., \"0,0;1,0;1,1\")") + })?; + util::parse_positions(pos_str, "0,0") +} + +/// Parse `--positions` as float positions. +fn parse_float_positions(args: &CreateArgs) -> Result> { + let pos_str = args.positions.as_deref().ok_or_else(|| { + anyhow::anyhow!("This variant requires --positions (e.g., \"0.0,0.0;1.0,0.0;0.5,0.87\")") + })?; + util::parse_positions(pos_str, "0.0,0.0") +} + /// Parse `--weights` as vertex weights (i32), defaulting to all 1s. fn parse_vertex_weights(args: &CreateArgs, num_vertices: usize) -> Result> { match &args.weights { @@ -434,34 +530,13 @@ fn parse_matrix(args: &CreateArgs) -> Result>> { .collect() } -/// Generate a random Erdos-Renyi graph using a simple LCG PRNG (no external dependency). -fn create_random_graph(num_vertices: usize, edge_prob: f64, seed: Option) -> SimpleGraph { - let mut state: u64 = seed.unwrap_or_else(|| { - std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .unwrap() - .as_nanos() as u64 - }); - - let mut edges = Vec::new(); - for i in 0..num_vertices { - for j in (i + 1)..num_vertices { - // LCG step - state = state - .wrapping_mul(6364136223846793005) - .wrapping_add(1442695040888963407); - let rand_val = (state >> 33) as f64 / (1u64 << 31) as f64; - if rand_val < edge_prob { - edges.push((i, j)); - } - } - } - - SimpleGraph::new(num_vertices, edges) -} - /// Handle `pred create --random ...` -fn create_random(args: &CreateArgs, canonical: &str, out: &OutputConfig) -> Result<()> { +fn create_random( + args: &CreateArgs, + canonical: &str, + resolved_variant: &BTreeMap, + out: &OutputConfig, +) -> Result<()> { let num_vertices = args.num_vertices.ok_or_else(|| { anyhow::anyhow!( "--random requires --num-vertices\n\n\ @@ -469,13 +544,8 @@ fn create_random(args: &CreateArgs, canonical: &str, out: &OutputConfig) -> Resu args.problem ) })?; - let edge_prob = args.edge_prob.unwrap_or(0.5); - if !(0.0..=1.0).contains(&edge_prob) { - bail!("--edge-prob must be between 0.0 and 1.0"); - } - let graph = create_random_graph(num_vertices, edge_prob, args.seed); - let num_edges = graph.num_edges(); + let graph_type = resolved_graph_type(resolved_variant); let (data, variant) = match canonical { // Graph problems with vertex weights @@ -484,19 +554,59 @@ fn create_random(args: &CreateArgs, canonical: &str, out: &OutputConfig) -> Resu | "MaximumClique" | "MinimumDominatingSet" => { let weights = vec![1i32; num_vertices]; - let variant = variant_map(&[("graph", "SimpleGraph"), ("weight", "i32")]); - let data = match canonical { - "MaximumIndependentSet" => ser(MaximumIndependentSet::new(graph, weights))?, - "MinimumVertexCover" => ser(MinimumVertexCover::new(graph, weights))?, - "MaximumClique" => ser(MaximumClique::new(graph, weights))?, - "MinimumDominatingSet" => ser(MinimumDominatingSet::new(graph, weights))?, - _ => unreachable!(), - }; - (data, variant) + match graph_type { + "KingsSubgraph" => { + let positions = util::create_random_int_positions(num_vertices, args.seed); + let graph = KingsSubgraph::new(positions); + ( + ser_vertex_weight_problem_with(canonical, graph, weights)?, + resolved_variant.clone(), + ) + } + "TriangularSubgraph" => { + let positions = util::create_random_int_positions(num_vertices, args.seed); + let graph = TriangularSubgraph::new(positions); + ( + ser_vertex_weight_problem_with(canonical, graph, weights)?, + resolved_variant.clone(), + ) + } + "UnitDiskGraph" => { + let radius = args.radius.unwrap_or(1.0); + let positions = util::create_random_float_positions(num_vertices, args.seed); + let graph = UnitDiskGraph::new(positions, radius); + ( + ser_vertex_weight_problem_with(canonical, graph, weights)?, + resolved_variant.clone(), + ) + } + _ => { + let edge_prob = args.edge_prob.unwrap_or(0.5); + if !(0.0..=1.0).contains(&edge_prob) { + bail!("--edge-prob must be between 0.0 and 1.0"); + } + let graph = util::create_random_graph(num_vertices, edge_prob, args.seed); + let variant = variant_map(&[("graph", "SimpleGraph"), ("weight", "i32")]); + let data = match canonical { + "MaximumIndependentSet" => ser(MaximumIndependentSet::new(graph, weights))?, + "MinimumVertexCover" => ser(MinimumVertexCover::new(graph, weights))?, + "MaximumClique" => ser(MaximumClique::new(graph, weights))?, + "MinimumDominatingSet" => ser(MinimumDominatingSet::new(graph, weights))?, + _ => unreachable!(), + }; + (data, variant) + } + } } // Graph problems with edge weights "MaxCut" | "MaximumMatching" | "TravelingSalesman" => { + let edge_prob = args.edge_prob.unwrap_or(0.5); + if !(0.0..=1.0).contains(&edge_prob) { + bail!("--edge-prob must be between 0.0 and 1.0"); + } + let graph = util::create_random_graph(num_vertices, edge_prob, args.seed); + let num_edges = graph.num_edges(); let edge_weights = vec![1i32; num_edges]; let variant = variant_map(&[("graph", "SimpleGraph"), ("weight", "i32")]); let data = match canonical { @@ -510,6 +620,12 @@ fn create_random(args: &CreateArgs, canonical: &str, out: &OutputConfig) -> Resu // SpinGlass "SpinGlass" => { + let edge_prob = args.edge_prob.unwrap_or(0.5); + if !(0.0..=1.0).contains(&edge_prob) { + bail!("--edge-prob must be between 0.0 and 1.0"); + } + let graph = util::create_random_graph(num_vertices, edge_prob, args.seed); + let num_edges = graph.num_edges(); let couplings = vec![1i32; num_edges]; let fields = vec![0i32; num_vertices]; let variant = variant_map(&[("graph", "SimpleGraph"), ("weight", "i32")]); @@ -521,24 +637,14 @@ fn create_random(args: &CreateArgs, canonical: &str, out: &OutputConfig) -> Resu // KColoring "KColoring" => { - let k = args.k.unwrap_or(3); - let variant; - let data; - match k { - 2 => { - variant = variant_map(&[("k", "K2"), ("graph", "SimpleGraph")]); - data = ser(KColoring::::new(graph))?; - } - 3 => { - variant = variant_map(&[("k", "K3"), ("graph", "SimpleGraph")]); - data = ser(KColoring::::new(graph))?; - } - _ => { - variant = variant_map(&[("k", "KN"), ("graph", "SimpleGraph")]); - data = ser(KColoring::::with_k(graph, k))?; - } + let edge_prob = args.edge_prob.unwrap_or(0.5); + if !(0.0..=1.0).contains(&edge_prob) { + bail!("--edge-prob must be between 0.0 and 1.0"); } - (data, variant) + let graph = util::create_random_graph(num_vertices, edge_prob, args.seed); + let (k, _variant) = + util::validate_k_param(resolved_variant, args.k, Some(3), "KColoring")?; + util::ser_kcoloring(graph, k)? } _ => bail!( diff --git a/problemreductions-cli/src/commands/graph.rs b/problemreductions-cli/src/commands/graph.rs index f80438dcc..fc314d170 100644 --- a/problemreductions-cli/src/commands/graph.rs +++ b/problemreductions-cli/src/commands/graph.rs @@ -117,18 +117,17 @@ pub fn show(problem: &str, out: &OutputConfig) -> Result<()> { "\n{}\n", crate::output::fmt_section(&format!("Variants ({}):", variants.len())) )); - let default_variant = variants.first().cloned().unwrap_or_default(); for v in &variants { - let slash = variant_to_slash(v, &default_variant); - let label = if slash.is_empty() { - format!(" {}", crate::output::fmt_problem_name(&spec.name)) + let slash = variant_to_full_slash(v); + let label = format!( + " {}", + crate::output::fmt_problem_name(&format!("{}{}", spec.name, slash)) + ); + if let Some(c) = graph.variant_complexity(&spec.name, v) { + text.push_str(&format!("{label} complexity: {c}\n")); } else { - format!( - " {}", - crate::output::fmt_problem_name(&format!("{}{}", spec.name, slash)) - ) - }; - text.push_str(&format!("{label}\n")); + text.push_str(&format!("{label}\n")); + } } // Show fields from schema (right after variants) @@ -168,11 +167,21 @@ pub fn show(problem: &str, out: &OutputConfig) -> Result<()> { )); for e in &outgoing { text.push_str(&format!( - " {} {} {}\n", + " {} {} {}", fmt_node(&graph, e.source_name, &e.source_variant), crate::output::fmt_outgoing("\u{2192}"), fmt_node(&graph, e.target_name, &e.target_variant), )); + let oh_parts: Vec = e + .overhead + .output_size + .iter() + .map(|(field, poly)| format!("{field} = {poly}")) + .collect(); + if !oh_parts.is_empty() { + text.push_str(&format!(" ({})", oh_parts.join(", "))); + } + text.push('\n'); } text.push_str(&format!( @@ -181,23 +190,53 @@ pub fn show(problem: &str, out: &OutputConfig) -> Result<()> { )); for e in &incoming { text.push_str(&format!( - " {} {} {}\n", + " {} {} {}", fmt_node(&graph, e.source_name, &e.source_variant), crate::output::fmt_outgoing("\u{2192}"), fmt_node(&graph, e.target_name, &e.target_variant), )); + let oh_parts: Vec = e + .overhead + .output_size + .iter() + .map(|(field, poly)| format!("{field} = {poly}")) + .collect(); + if !oh_parts.is_empty() { + text.push_str(&format!(" ({})", oh_parts.join(", "))); + } + text.push('\n'); } + let edge_to_json = |e: &problemreductions::rules::ReductionEdgeInfo| { + let overhead: Vec = e + .overhead + .output_size + .iter() + .map(|(field, poly)| serde_json::json!({"field": field, "formula": poly.to_string()})) + .collect(); + serde_json::json!({ + "source": {"name": e.source_name, "variant": e.source_variant}, + "target": {"name": e.target_name, "variant": e.target_variant}, + "overhead": overhead, + }) + }; + let variants_json: Vec = variants + .iter() + .map(|v| { + let complexity = graph.variant_complexity(&spec.name, v).unwrap_or(""); + serde_json::json!({ + "variant": v, + "complexity": complexity, + }) + }) + .collect(); + let mut json = serde_json::json!({ "name": spec.name, - "variants": variants, + "variants": variants_json, "size_fields": size_fields, - "reduces_to": outgoing.iter().map(|e| { - serde_json::json!({"source": {"name": e.source_name, "variant": e.source_variant}, "target": {"name": e.target_name, "variant": e.target_variant}}) - }).collect::>(), - "reduces_from": incoming.iter().map(|e| { - serde_json::json!({"source": {"name": e.source_name, "variant": e.source_variant}, "target": {"name": e.target_name, "variant": e.target_variant}}) - }).collect::>(), + "reduces_to": outgoing.iter().map(&edge_to_json).collect::>(), + "reduces_from": incoming.iter().map(&edge_to_json).collect::>(), }); if let Some(s) = schema { if let (Some(obj), Ok(schema_val)) = (json.as_object_mut(), serde_json::to_value(s)) { @@ -209,6 +248,17 @@ pub fn show(problem: &str, out: &OutputConfig) -> Result<()> { out.emit_with_default_name(&default_name, &text, &json) } +/// Convert a variant BTreeMap to slash notation showing ALL values. +/// E.g., {graph: "SimpleGraph", weight: "i32"} → "/SimpleGraph/i32". +fn variant_to_full_slash(variant: &BTreeMap) -> String { + if variant.is_empty() { + String::new() + } else { + let vals: Vec<&str> = variant.values().map(|v| v.as_str()).collect(); + format!("/{}", vals.join("/")) + } +} + /// Convert a variant BTreeMap to slash notation showing only non-default values. /// Given default {graph: "SimpleGraph", weight: "i32"} and variant {graph: "UnitDiskGraph", weight: "i32"}, /// returns "/UnitDiskGraph". @@ -564,11 +614,10 @@ pub fn neighbors( let root_label = fmt_node(&graph, &spec.name, &variant); + let header_label = fmt_node(&graph, &spec.name, &variant); let mut text = format!( "{} — {}-hop neighbors ({})\n\n", - crate::output::fmt_problem_name(&spec.name), - max_hops, - dir_label, + header_label, max_hops, dir_label, ); text.push_str(&root_label); diff --git a/problemreductions-cli/src/main.rs b/problemreductions-cli/src/main.rs index e2386c829..dfddcf2de 100644 --- a/problemreductions-cli/src/main.rs +++ b/problemreductions-cli/src/main.rs @@ -5,6 +5,7 @@ mod dispatch; mod mcp; mod output; mod problem_name; +mod util; use clap::{CommandFactory, Parser}; use cli::{Cli, Commands}; diff --git a/problemreductions-cli/src/mcp/tools.rs b/problemreductions-cli/src/mcp/tools.rs index d1104d110..89d456279 100644 --- a/problemreductions-cli/src/mcp/tools.rs +++ b/problemreductions-cli/src/mcp/tools.rs @@ -1,17 +1,19 @@ +use crate::util; use problemreductions::models::graph::{ - KColoring, MaxCut, MaximumClique, MaximumIndependentSet, MaximumMatching, MinimumDominatingSet, + MaxCut, MaximumClique, MaximumIndependentSet, MaximumMatching, MinimumDominatingSet, MinimumVertexCover, TravelingSalesman, }; use problemreductions::models::optimization::{SpinGlass, QUBO}; -use problemreductions::models::satisfiability::{CNFClause, KSatisfiability, Satisfiability}; +use problemreductions::models::satisfiability::{CNFClause, Satisfiability}; use problemreductions::models::specialized::Factoring; use problemreductions::registry::collect_schemas; use problemreductions::rules::{ CustomCost, MinimizeSteps, ReductionGraph, ReductionPath, TraversalDirection, }; -use problemreductions::topology::{Graph, SimpleGraph}; +use problemreductions::topology::{ + Graph, KingsSubgraph, SimpleGraph, TriangularSubgraph, UnitDiskGraph, +}; use problemreductions::types::ProblemSize; -use problemreductions::variant::{K2, K3, KN}; use rmcp::handler::server::router::tool::ToolRouter; use rmcp::handler::server::wrapper::Parameters; use rmcp::tool; @@ -22,7 +24,7 @@ use crate::dispatch::{ load_problem, serialize_any_problem, PathStep, ProblemJson, ProblemJsonOutput, ReductionBundle, }; use crate::problem_name::{ - aliases_for, parse_problem_spec, resolve_alias, resolve_variant, unknown_problem_error, + aliases_for, parse_problem_spec, resolve_variant, unknown_problem_error, }; // --------------------------------------------------------------------------- @@ -68,7 +70,7 @@ pub struct CreateProblemParams { )] pub problem_type: String, #[schemars( - description = "Problem parameters as JSON object. Graph problems: {\"edges\": \"0-1,1-2\", \"weights\": \"1,2,3\"}. SAT: {\"num_vars\": 3, \"clauses\": \"1,2;-1,3\"}. QUBO: {\"matrix\": \"1,0.5;0.5,2\"}. KColoring: {\"edges\": \"0-1,1-2\", \"k\": 3}. Factoring: {\"target\": 15, \"bits_m\": 4, \"bits_n\": 4}. Random graph: {\"random\": true, \"num_vertices\": 10, \"edge_prob\": 0.3}" + description = "Problem parameters as JSON object. Graph problems: {\"edges\": \"0-1,1-2\", \"weights\": \"1,2,3\"}. SAT: {\"num_vars\": 3, \"clauses\": \"1,2;-1,3\"}. QUBO: {\"matrix\": \"1,0.5;0.5,2\"}. KColoring: {\"edges\": \"0-1,1-2\", \"k\": 3}. Factoring: {\"target\": 15, \"bits_m\": 4, \"bits_n\": 4}. Random graph: {\"random\": true, \"num_vertices\": 10, \"edge_prob\": 0.3}. Geometry graphs (use with MIS/KingsSubgraph etc.): {\"positions\": \"0,0;1,0;1,1\"}. UnitDiskGraph: {\"positions\": \"0.0,0.0;1.0,0.0\", \"radius\": 1.5}" )] pub params: serde_json::Value, } @@ -174,20 +176,39 @@ impl McpServer { let incoming = graph.incoming_reductions(&spec.name); let size_fields = graph.size_field_names(&spec.name); + let variants_json: Vec = variants + .iter() + .map(|v| { + let complexity = graph.variant_complexity(&spec.name, v).unwrap_or(""); + serde_json::json!({ + "variant": v, + "complexity": complexity, + }) + }) + .collect(); + let mut json = serde_json::json!({ "name": spec.name, - "variants": variants, + "variants": variants_json, "size_fields": &size_fields, "reduces_to": outgoing.iter().map(|e| { + let overhead: Vec = e.overhead.output_size.iter() + .map(|(field, poly)| serde_json::json!({"field": field, "formula": poly.to_string()})) + .collect(); serde_json::json!({ "source": {"name": e.source_name, "variant": e.source_variant}, "target": {"name": e.target_name, "variant": e.target_variant}, + "overhead": overhead, }) }).collect::>(), "reduces_from": incoming.iter().map(|e| { + let overhead: Vec = e.overhead.output_size.iter() + .map(|(field, poly)| serde_json::json!({"field": field, "formula": poly.to_string()})) + .collect(); serde_json::json!({ "source": {"name": e.source_name, "variant": e.source_variant}, "target": {"name": e.target_name, "variant": e.target_variant}, + "overhead": overhead, }) }).collect::>(), }); @@ -382,7 +403,21 @@ impl McpServer { problem_type: &str, params: &serde_json::Value, ) -> anyhow::Result { - let canonical = resolve_alias(problem_type); + let spec = parse_problem_spec(problem_type)?; + let canonical = spec.name.clone(); + + // Resolve variant from spec + let rgraph = ReductionGraph::new(); + let known_variants = rgraph.variants_for(&canonical); + let resolved_variant = if known_variants.is_empty() { + BTreeMap::new() + } else { + resolve_variant(&spec, &known_variants)? + }; + let graph_type = resolved_variant + .get("graph") + .map(|s| s.as_str()) + .unwrap_or("SimpleGraph"); // Check for random generation let is_random = params @@ -391,7 +426,7 @@ impl McpServer { .unwrap_or(false); if is_random { - return self.create_random_inner(&canonical, params); + return self.create_random_inner(&canonical, &resolved_variant, params); } let (data, variant) = match canonical.as_str() { @@ -399,9 +434,7 @@ impl McpServer { | "MinimumVertexCover" | "MaximumClique" | "MinimumDominatingSet" => { - let (graph, n) = parse_graph_from_params(params)?; - let weights = parse_vertex_weights_from_params(params, n)?; - ser_vertex_weight_problem(&canonical, graph, weights)? + create_vertex_weight_from_params(&canonical, graph_type, &resolved_variant, params)? } "MaxCut" | "MaximumMatching" | "TravelingSalesman" => { @@ -412,14 +445,10 @@ impl McpServer { "KColoring" => { let (graph, _) = parse_graph_from_params(params)?; - let k = params - .get("k") - .and_then(|v| v.as_u64()) - .map(|v| v as usize) - .ok_or_else(|| { - anyhow::anyhow!("KColoring requires 'k' parameter (number of colors)") - })?; - ser_kcoloring(graph, k)? + let k_flag = params.get("k").and_then(|v| v.as_u64()).map(|v| v as usize); + let (k, _variant) = + util::validate_k_param(&resolved_variant, k_flag, None, "KColoring")?; + util::ser_kcoloring(graph, k)? } // SAT @@ -440,24 +469,10 @@ impl McpServer { .map(|v| v as usize) .ok_or_else(|| anyhow::anyhow!("KSatisfiability requires 'num_vars'"))?; let clauses = parse_clauses_from_params(params)?; - let k = params.get("k").and_then(|v| v.as_u64()).map(|v| v as usize); - let variant; - let data; - match k { - Some(2) => { - variant = variant_map(&[("k", "K2")]); - data = ser(KSatisfiability::::new(num_vars, clauses))?; - } - Some(3) => { - variant = variant_map(&[("k", "K3")]); - data = ser(KSatisfiability::::new(num_vars, clauses))?; - } - _ => { - variant = variant_map(&[("k", "KN")]); - data = ser(KSatisfiability::::new(num_vars, clauses))?; - } - } - (data, variant) + let k_flag = params.get("k").and_then(|v| v.as_u64()).map(|v| v as usize); + let (k, _variant) = + util::validate_k_param(&resolved_variant, k_flag, Some(3), "KSatisfiability")?; + util::ser_ksat(num_vars, clauses, k)? } // QUBO @@ -513,6 +528,7 @@ impl McpServer { fn create_random_inner( &self, canonical: &str, + resolved_variant: &BTreeMap, params: &serde_json::Value, ) -> anyhow::Result { let num_vertices = params @@ -522,17 +538,11 @@ impl McpServer { .ok_or_else(|| { anyhow::anyhow!("Random generation requires 'num_vertices' parameter") })?; - let edge_prob = params - .get("edge_prob") - .and_then(|v| v.as_f64()) - .unwrap_or(0.5); - if !(0.0..=1.0).contains(&edge_prob) { - anyhow::bail!("edge_prob must be between 0.0 and 1.0"); - } let seed = params.get("seed").and_then(|v| v.as_u64()); - - let graph = create_random_graph(num_vertices, edge_prob, seed); - let num_edges = graph.num_edges(); + let graph_type = resolved_variant + .get("graph") + .map(|s| s.as_str()) + .unwrap_or("SimpleGraph"); let (data, variant) = match canonical { "MaximumIndependentSet" @@ -540,13 +550,68 @@ impl McpServer { | "MaximumClique" | "MinimumDominatingSet" => { let weights = vec![1i32; num_vertices]; - ser_vertex_weight_problem(canonical, graph, weights)? + match graph_type { + "KingsSubgraph" => { + let positions = util::create_random_int_positions(num_vertices, seed); + let graph = KingsSubgraph::new(positions); + ( + ser_vertex_weight_problem_generic(canonical, graph, weights)?, + resolved_variant.clone(), + ) + } + "TriangularSubgraph" => { + let positions = util::create_random_int_positions(num_vertices, seed); + let graph = TriangularSubgraph::new(positions); + ( + ser_vertex_weight_problem_generic(canonical, graph, weights)?, + resolved_variant.clone(), + ) + } + "UnitDiskGraph" => { + let radius = params.get("radius").and_then(|v| v.as_f64()).unwrap_or(1.0); + let positions = util::create_random_float_positions(num_vertices, seed); + let graph = UnitDiskGraph::new(positions, radius); + ( + ser_vertex_weight_problem_generic(canonical, graph, weights)?, + resolved_variant.clone(), + ) + } + _ => { + let edge_prob = params + .get("edge_prob") + .and_then(|v| v.as_f64()) + .unwrap_or(0.5); + if !(0.0..=1.0).contains(&edge_prob) { + anyhow::bail!("edge_prob must be between 0.0 and 1.0"); + } + let graph = util::create_random_graph(num_vertices, edge_prob, seed); + ser_vertex_weight_problem(canonical, graph, weights)? + } + } } "MaxCut" | "MaximumMatching" | "TravelingSalesman" => { + let edge_prob = params + .get("edge_prob") + .and_then(|v| v.as_f64()) + .unwrap_or(0.5); + if !(0.0..=1.0).contains(&edge_prob) { + anyhow::bail!("edge_prob must be between 0.0 and 1.0"); + } + let graph = util::create_random_graph(num_vertices, edge_prob, seed); + let num_edges = graph.num_edges(); let edge_weights = vec![1i32; num_edges]; ser_edge_weight_problem(canonical, graph, edge_weights)? } "SpinGlass" => { + let edge_prob = params + .get("edge_prob") + .and_then(|v| v.as_f64()) + .unwrap_or(0.5); + if !(0.0..=1.0).contains(&edge_prob) { + anyhow::bail!("edge_prob must be between 0.0 and 1.0"); + } + let graph = util::create_random_graph(num_vertices, edge_prob, seed); + let num_edges = graph.num_edges(); let couplings = vec![1i32; num_edges]; let fields = vec![0i32; num_vertices]; let variant = variant_map(&[("graph", "SimpleGraph"), ("weight", "i32")]); @@ -556,12 +621,18 @@ impl McpServer { ) } "KColoring" => { - let k = params - .get("k") - .and_then(|v| v.as_u64()) - .map(|v| v as usize) - .unwrap_or(3); - ser_kcoloring(graph, k)? + let edge_prob = params + .get("edge_prob") + .and_then(|v| v.as_f64()) + .unwrap_or(0.5); + if !(0.0..=1.0).contains(&edge_prob) { + anyhow::bail!("edge_prob must be between 0.0 and 1.0"); + } + let graph = util::create_random_graph(num_vertices, edge_prob, seed); + let k_flag = params.get("k").and_then(|v| v.as_u64()).map(|v| v as usize); + let (k, _variant) = + util::validate_k_param(resolved_variant, k_flag, Some(3), "KColoring")?; + util::ser_kcoloring(graph, k)? } _ => anyhow::bail!( "Random generation is not supported for {}. \ @@ -1007,14 +1078,11 @@ fn format_path_json( // --------------------------------------------------------------------------- fn ser(problem: T) -> anyhow::Result { - Ok(serde_json::to_value(problem)?) + util::ser(problem) } fn variant_map(pairs: &[(&str, &str)]) -> BTreeMap { - pairs - .iter() - .map(|(k, v)| (k.to_string(), v.to_string())) - .collect() + util::variant_map(pairs) } /// Serialize a vertex-weight graph problem (MIS, MVC, MaxClique, MinDomSet). @@ -1050,27 +1118,94 @@ fn ser_edge_weight_problem( Ok((data, variant)) } -/// Serialize a KColoring problem with the appropriate K variant. -fn ser_kcoloring( - graph: SimpleGraph, - k: usize, +/// Serialize a vertex-weight problem with a generic graph type. +fn ser_vertex_weight_problem_generic( + canonical: &str, + graph: G, + weights: Vec, +) -> anyhow::Result { + match canonical { + "MaximumIndependentSet" => ser(MaximumIndependentSet::new(graph, weights)), + "MinimumVertexCover" => ser(MinimumVertexCover::new(graph, weights)), + "MaximumClique" => ser(MaximumClique::new(graph, weights)), + "MinimumDominatingSet" => ser(MinimumDominatingSet::new(graph, weights)), + _ => unreachable!(), + } +} + +/// Create a vertex-weight problem from MCP params, dispatching on graph type. +fn create_vertex_weight_from_params( + canonical: &str, + graph_type: &str, + resolved_variant: &BTreeMap, + params: &serde_json::Value, ) -> anyhow::Result<(serde_json::Value, BTreeMap)> { - match k { - 2 => Ok(( - ser(KColoring::::new(graph))?, - variant_map(&[("k", "K2"), ("graph", "SimpleGraph")]), - )), - 3 => Ok(( - ser(KColoring::::new(graph))?, - variant_map(&[("k", "K3"), ("graph", "SimpleGraph")]), - )), - _ => Ok(( - ser(KColoring::::with_k(graph, k))?, - variant_map(&[("k", "KN"), ("graph", "SimpleGraph")]), - )), + match graph_type { + "KingsSubgraph" => { + let positions = parse_int_positions_from_params(params)?; + let n = positions.len(); + let graph = KingsSubgraph::new(positions); + let weights = parse_vertex_weights_from_params(params, n)?; + Ok(( + ser_vertex_weight_problem_generic(canonical, graph, weights)?, + resolved_variant.clone(), + )) + } + "TriangularSubgraph" => { + let positions = parse_int_positions_from_params(params)?; + let n = positions.len(); + let graph = TriangularSubgraph::new(positions); + let weights = parse_vertex_weights_from_params(params, n)?; + Ok(( + ser_vertex_weight_problem_generic(canonical, graph, weights)?, + resolved_variant.clone(), + )) + } + "UnitDiskGraph" => { + let positions = parse_float_positions_from_params(params)?; + let n = positions.len(); + let radius = params.get("radius").and_then(|v| v.as_f64()).unwrap_or(1.0); + let graph = UnitDiskGraph::new(positions, radius); + let weights = parse_vertex_weights_from_params(params, n)?; + Ok(( + ser_vertex_weight_problem_generic(canonical, graph, weights)?, + resolved_variant.clone(), + )) + } + _ => { + let (graph, n) = parse_graph_from_params(params)?; + let weights = parse_vertex_weights_from_params(params, n)?; + ser_vertex_weight_problem(canonical, graph, weights) + } } } +/// Extract and parse 'positions' param as integer grid positions. +fn parse_int_positions_from_params(params: &serde_json::Value) -> anyhow::Result> { + let pos_str = params + .get("positions") + .and_then(|v| v.as_str()) + .ok_or_else(|| { + anyhow::anyhow!("This variant requires 'positions' parameter (e.g., \"0,0;1,0;1,1\")") + })?; + util::parse_positions(pos_str, "0,0;1,0;1,1") +} + +/// Extract and parse 'positions' param as float positions. +fn parse_float_positions_from_params( + params: &serde_json::Value, +) -> anyhow::Result> { + let pos_str = params + .get("positions") + .and_then(|v| v.as_str()) + .ok_or_else(|| { + anyhow::anyhow!( + "This variant requires 'positions' parameter (e.g., \"0.0,0.0;1.0,0.0\")" + ) + })?; + util::parse_positions(pos_str, "0.0,0.0;1.0,0.0") +} + /// Parse `edges` field from JSON params into a SimpleGraph. fn parse_graph_from_params(params: &serde_json::Value) -> anyhow::Result<(SimpleGraph, usize)> { let edges_str = params @@ -1197,32 +1332,6 @@ fn parse_matrix_from_params(params: &serde_json::Value) -> anyhow::Result) -> SimpleGraph { - let mut state: u64 = seed.unwrap_or_else(|| { - std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .unwrap() - .as_nanos() as u64 - }); - - let mut edges = Vec::new(); - for i in 0..num_vertices { - for j in (i + 1)..num_vertices { - // LCG step - state = state - .wrapping_mul(6364136223846793005) - .wrapping_add(1442695040888963407); - let rand_val = (state >> 33) as f64 / (1u64 << 31) as f64; - if rand_val < edge_prob { - edges.push((i, j)); - } - } - } - - SimpleGraph::new(num_vertices, edges) -} - /// Solve a plain problem and return JSON string. fn solve_problem_inner( problem_type: &str, diff --git a/problemreductions-cli/src/problem_name.rs b/problemreductions-cli/src/problem_name.rs index ad64e7bba..f7fa533b5 100644 --- a/problemreductions-cli/src/problem_name.rs +++ b/problemreductions-cli/src/problem_name.rs @@ -109,12 +109,33 @@ pub fn resolve_variant( spec.variant_values, known_variants ), - _ => anyhow::bail!( - "Ambiguous variant for {} with values {:?}. Matches: {:?}", - spec.name, - spec.variant_values, - matches - ), + _ => { + // When ambiguous, use the same default ranking as the reduction graph: + // variants whose remaining (unmatched) fields are closest to defaults + // (SimpleGraph, One, KN) win. This matches variants_for() sort order. + let default_rank = |v: &BTreeMap| -> usize { + v.values() + .filter(|val| { + !spec.variant_values.contains(val) + && !["SimpleGraph", "One", "KN"].contains(&val.as_str()) + }) + .count() + }; + let min_rank = matches.iter().map(|v| default_rank(v)).min().unwrap(); + let best: Vec<_> = matches + .iter() + .filter(|v| default_rank(v) == min_rank) + .collect(); + if best.len() == 1 { + return Ok((*best[0]).clone()); + } + anyhow::bail!( + "Ambiguous variant for {} with values {:?}. Matches: {:?}", + spec.name, + spec.variant_values, + matches + ) + } } } diff --git a/problemreductions-cli/src/util.rs b/problemreductions-cli/src/util.rs new file mode 100644 index 000000000..73e43742f --- /dev/null +++ b/problemreductions-cli/src/util.rs @@ -0,0 +1,229 @@ +//! Shared utilities for CLI and MCP: parsing helpers and random generation. + +use anyhow::{bail, Result}; +use problemreductions::prelude::*; +use problemreductions::topology::SimpleGraph; +use problemreductions::variant::{K2, K3, KN}; +use serde::Serialize; +use std::collections::BTreeMap; + +// --------------------------------------------------------------------------- +// K-parameter validation +// --------------------------------------------------------------------------- + +/// Derive the k variant string from a numeric k value. +fn k_variant_str(k: usize) -> &'static str { + match k { + 1 => "K1", + 2 => "K2", + 3 => "K3", + 4 => "K4", + 5 => "K5", + _ => "KN", + } +} + +/// Validate that `--k` (or `params.k`) is consistent with a variant suffix +/// (e.g., `/K2`). Returns the effective k value and variant map. +/// +/// Rules: +/// - If the resolved variant has a specific k (e.g., K2), `k_flag` must +/// either be `None` or match. A mismatch is an error. +/// - If the resolved variant has k=KN (or no k), any `k_flag` is accepted. +/// - If `k_flag` is `None`, k is inferred from the variant (K2→2, K3→3, etc.), +/// or defaults to `default_k`. +pub fn validate_k_param( + resolved_variant: &BTreeMap, + k_flag: Option, + default_k: Option, + problem_name: &str, +) -> Result<(usize, BTreeMap)> { + let variant_k_str = resolved_variant.get("k").map(|s| s.as_str()); + let variant_k_num: Option = match variant_k_str { + Some("K1") => Some(1), + Some("K2") => Some(2), + Some("K3") => Some(3), + Some("K4") => Some(4), + Some("K5") => Some(5), + _ => None, // KN or absent + }; + + let effective_k = match (k_flag, variant_k_num) { + (Some(flag), Some(from_variant)) if flag != from_variant => { + bail!( + "{problem_name}: --k {flag} conflicts with variant /{} (k={from_variant}). \ + Either omit the suffix or match the --k value.", + variant_k_str.unwrap() + ); + } + (Some(flag), _) => flag, + (None, Some(from_variant)) => from_variant, + (None, None) => match default_k { + Some(d) => d, + None => bail!("{problem_name} requires --k "), + }, + }; + + // Build the variant map with the effective k + let mut variant = resolved_variant.clone(); + variant.insert("k".to_string(), k_variant_str(effective_k).to_string()); + + Ok((effective_k, variant)) +} + +// --------------------------------------------------------------------------- +// K-problem serialization +// --------------------------------------------------------------------------- + +/// Serialize a KColoring instance given a graph and validated k. +pub fn ser_kcoloring( + graph: SimpleGraph, + k: usize, +) -> Result<(serde_json::Value, BTreeMap)> { + match k { + 2 => Ok(( + ser(KColoring::::new(graph))?, + variant_map(&[("k", "K2"), ("graph", "SimpleGraph")]), + )), + 3 => Ok(( + ser(KColoring::::new(graph))?, + variant_map(&[("k", "K3"), ("graph", "SimpleGraph")]), + )), + _ => Ok(( + ser(KColoring::::with_k(graph, k))?, + variant_map(&[("k", "KN"), ("graph", "SimpleGraph")]), + )), + } +} + +/// Serialize a KSatisfiability instance given clauses and validated k. +pub fn ser_ksat( + num_vars: usize, + clauses: Vec, + k: usize, +) -> Result<(serde_json::Value, BTreeMap)> { + match k { + 2 => Ok(( + ser(KSatisfiability::::new(num_vars, clauses))?, + variant_map(&[("k", "K2")]), + )), + 3 => Ok(( + ser(KSatisfiability::::new(num_vars, clauses))?, + variant_map(&[("k", "K3")]), + )), + _ => Ok(( + ser(KSatisfiability::::new(num_vars, clauses))?, + variant_map(&[("k", "KN")]), + )), + } +} + +// --------------------------------------------------------------------------- +// Parsing helpers +// --------------------------------------------------------------------------- + +/// Parse semicolon-separated x,y pairs from a string. +pub fn parse_positions(pos_str: &str, example: &str) -> Result> +where + T::Err: std::fmt::Display, +{ + pos_str + .split(';') + .map(|pair| { + let parts: Vec<&str> = pair.trim().split(',').collect(); + if parts.len() != 2 { + bail!( + "Invalid position '{}': expected format x,y (e.g., {example})", + pair.trim() + ); + } + let x: T = parts[0] + .trim() + .parse() + .map_err(|e| anyhow::anyhow!("Invalid x in '{}': {e}", pair.trim()))?; + let y: T = parts[1] + .trim() + .parse() + .map_err(|e| anyhow::anyhow!("Invalid y in '{}': {e}", pair.trim()))?; + Ok((x, y)) + }) + .collect() +} + +// --------------------------------------------------------------------------- +// Random generation (LCG-based) +// --------------------------------------------------------------------------- + +/// LCG PRNG step — returns next state and a uniform f64 in [0, 1). +pub fn lcg_step(state: &mut u64) -> f64 { + *state = state + .wrapping_mul(6364136223846793005) + .wrapping_add(1442695040888963407); + (*state >> 33) as f64 / (1u64 << 31) as f64 +} + +/// Initialize LCG state from seed or system time. +pub fn lcg_init(seed: Option) -> u64 { + seed.unwrap_or_else(|| { + std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_nanos() as u64 + }) +} + +/// Generate a random Erdos-Renyi graph using a simple LCG PRNG. +pub fn create_random_graph(num_vertices: usize, edge_prob: f64, seed: Option) -> SimpleGraph { + let mut state = lcg_init(seed); + let mut edges = Vec::new(); + for i in 0..num_vertices { + for j in (i + 1)..num_vertices { + let rand_val = lcg_step(&mut state); + if rand_val < edge_prob { + edges.push((i, j)); + } + } + } + SimpleGraph::new(num_vertices, edges) +} + +/// Generate random unique integer positions on a grid for KingsSubgraph/TriangularSubgraph. +pub fn create_random_int_positions(num_vertices: usize, seed: Option) -> Vec<(i32, i32)> { + let mut state = lcg_init(seed); + let grid_size = (num_vertices as f64).sqrt().ceil() as i32 + 1; + let mut positions = std::collections::BTreeSet::new(); + while positions.len() < num_vertices { + let x = (lcg_step(&mut state) * grid_size as f64) as i32; + let y = (lcg_step(&mut state) * grid_size as f64) as i32; + positions.insert((x, y)); + } + positions.into_iter().collect() +} + +/// Generate random float positions in [0, sqrt(N)] x [0, sqrt(N)] for UnitDiskGraph. +pub fn create_random_float_positions(num_vertices: usize, seed: Option) -> Vec<(f64, f64)> { + let mut state = lcg_init(seed); + let side = (num_vertices as f64).sqrt(); + (0..num_vertices) + .map(|_| { + let x = lcg_step(&mut state) * side; + let y = lcg_step(&mut state) * side; + (x, y) + }) + .collect() +} + +// --------------------------------------------------------------------------- +// Small shared helpers +// --------------------------------------------------------------------------- + +pub fn ser(problem: T) -> Result { + Ok(serde_json::to_value(problem)?) +} + +pub fn variant_map(pairs: &[(&str, &str)]) -> BTreeMap { + pairs + .iter() + .map(|(k, v)| (k.to_string(), v.to_string())) + .collect() +} diff --git a/problemreductions-cli/tests/cli_tests.rs b/problemreductions-cli/tests/cli_tests.rs index dd0395cf1..6434bd4d1 100644 --- a/problemreductions-cli/tests/cli_tests.rs +++ b/problemreductions-cli/tests/cli_tests.rs @@ -2552,3 +2552,242 @@ fn test_solve_timeout_zero_means_no_limit() { std::fs::remove_file(&problem_file).ok(); } + +// --------------------------------------------------------------------------- +// Geometry-based graph tests +// --------------------------------------------------------------------------- + +#[test] +fn test_create_mis_kings_subgraph() { + let output = pred() + .args([ + "create", + "MIS/KingsSubgraph", + "--positions", + "0,0;1,0;1,1;0,1", + ]) + .output() + .unwrap(); + assert!( + output.status.success(), + "stderr: {}", + String::from_utf8_lossy(&output.stderr) + ); + let stdout = String::from_utf8(output.stdout).unwrap(); + let json: serde_json::Value = serde_json::from_str(&stdout).unwrap(); + assert_eq!(json["type"], "MaximumIndependentSet"); + assert_eq!(json["variant"]["graph"], "KingsSubgraph"); + assert!(json["data"].is_object()); +} + +#[test] +fn test_create_mis_triangular_subgraph() { + let output = pred() + .args([ + "create", + "MIS/TriangularSubgraph", + "--positions", + "0,0;0,1;1,0;1,1", + ]) + .output() + .unwrap(); + assert!( + output.status.success(), + "stderr: {}", + String::from_utf8_lossy(&output.stderr) + ); + let stdout = String::from_utf8(output.stdout).unwrap(); + let json: serde_json::Value = serde_json::from_str(&stdout).unwrap(); + assert_eq!(json["type"], "MaximumIndependentSet"); + assert_eq!(json["variant"]["graph"], "TriangularSubgraph"); +} + +#[test] +fn test_create_mis_unit_disk_graph() { + let output = pred() + .args([ + "create", + "MIS/UnitDiskGraph", + "--positions", + "0,0;1,0;0.5,0.8", + "--radius", + "1.5", + ]) + .output() + .unwrap(); + assert!( + output.status.success(), + "stderr: {}", + String::from_utf8_lossy(&output.stderr) + ); + let stdout = String::from_utf8(output.stdout).unwrap(); + let json: serde_json::Value = serde_json::from_str(&stdout).unwrap(); + assert_eq!(json["type"], "MaximumIndependentSet"); + assert_eq!(json["variant"]["graph"], "UnitDiskGraph"); +} + +#[test] +fn test_create_mvc_kings_subgraph_unsupported_variant() { + // MVC doesn't have a KingsSubgraph variant registered + let output = pred() + .args(["create", "MVC/KingsSubgraph", "--positions", "0,0;1,0;1,1"]) + .output() + .unwrap(); + assert!(!output.status.success()); + let stderr = String::from_utf8(output.stderr).unwrap(); + assert!( + stderr.contains("No variant"), + "should mention variant mismatch: {stderr}" + ); +} + +#[test] +fn test_create_mis_unit_disk_graph_default_radius() { + let output = pred() + .args([ + "create", + "MIS/UnitDiskGraph", + "--positions", + "0,0;0.5,0;1,0", + ]) + .output() + .unwrap(); + assert!( + output.status.success(), + "stderr: {}", + String::from_utf8_lossy(&output.stderr) + ); + let stdout = String::from_utf8(output.stdout).unwrap(); + let json: serde_json::Value = serde_json::from_str(&stdout).unwrap(); + assert_eq!(json["type"], "MaximumIndependentSet"); + assert_eq!(json["variant"]["graph"], "UnitDiskGraph"); +} + +#[test] +fn test_create_mis_kings_subgraph_with_weights() { + let output = pred() + .args([ + "create", + "MIS/KingsSubgraph", + "--positions", + "0,0;1,0;1,1", + "--weights", + "2,3,1", + ]) + .output() + .unwrap(); + assert!( + output.status.success(), + "stderr: {}", + String::from_utf8_lossy(&output.stderr) + ); + let stdout = String::from_utf8(output.stdout).unwrap(); + let json: serde_json::Value = serde_json::from_str(&stdout).unwrap(); + assert_eq!(json["type"], "MaximumIndependentSet"); + assert_eq!(json["variant"]["graph"], "KingsSubgraph"); +} + +#[test] +fn test_create_random_kings_subgraph() { + let output = pred() + .args([ + "create", + "MIS/KingsSubgraph", + "--random", + "--num-vertices", + "10", + "--seed", + "42", + ]) + .output() + .unwrap(); + assert!( + output.status.success(), + "stderr: {}", + String::from_utf8_lossy(&output.stderr) + ); + let stdout = String::from_utf8(output.stdout).unwrap(); + let json: serde_json::Value = serde_json::from_str(&stdout).unwrap(); + assert_eq!(json["type"], "MaximumIndependentSet"); + assert_eq!(json["variant"]["graph"], "KingsSubgraph"); +} + +#[test] +fn test_create_random_triangular_subgraph() { + let output = pred() + .args([ + "create", + "MIS/TriangularSubgraph", + "--random", + "--num-vertices", + "8", + "--seed", + "42", + ]) + .output() + .unwrap(); + assert!( + output.status.success(), + "stderr: {}", + String::from_utf8_lossy(&output.stderr) + ); + let stdout = String::from_utf8(output.stdout).unwrap(); + let json: serde_json::Value = serde_json::from_str(&stdout).unwrap(); + assert_eq!(json["type"], "MaximumIndependentSet"); + assert_eq!(json["variant"]["graph"], "TriangularSubgraph"); +} + +#[test] +fn test_create_random_unit_disk_graph() { + let output = pred() + .args([ + "create", + "MIS/UnitDiskGraph", + "--random", + "--num-vertices", + "10", + "--radius", + "1.5", + "--seed", + "42", + ]) + .output() + .unwrap(); + assert!( + output.status.success(), + "stderr: {}", + String::from_utf8_lossy(&output.stderr) + ); + let stdout = String::from_utf8(output.stdout).unwrap(); + let json: serde_json::Value = serde_json::from_str(&stdout).unwrap(); + assert_eq!(json["type"], "MaximumIndependentSet"); + assert_eq!(json["variant"]["graph"], "UnitDiskGraph"); +} + +#[test] +fn test_create_kings_subgraph_help() { + let output = pred() + .args(["create", "MIS/KingsSubgraph"]) + .output() + .unwrap(); + assert!(output.status.success()); + let stderr = String::from_utf8(output.stderr).unwrap(); + assert!( + stderr.contains("positions") || stderr.contains("MaximumIndependentSet"), + "stderr should show help: {stderr}" + ); +} + +#[test] +fn test_create_geometry_graph_missing_positions() { + let output = pred() + .args(["create", "MIS/KingsSubgraph", "--weights", "1,2,3"]) + .output() + .unwrap(); + assert!(!output.status.success()); + let stderr = String::from_utf8(output.stderr).unwrap(); + assert!( + stderr.contains("--positions"), + "should mention --positions: {stderr}" + ); +} diff --git a/problemreductions-macros/src/lib.rs b/problemreductions-macros/src/lib.rs index 6ff9dce99..1e0cbb557 100644 --- a/problemreductions-macros/src/lib.rs +++ b/problemreductions-macros/src/lib.rs @@ -280,6 +280,14 @@ fn generate_reduction_entry( }, } } + + const _: () = { + fn _assert_declared_variant() {} + fn _check() { + _assert_declared_variant::<#source_type>(); + _assert_declared_variant::<#target_type>(); + } + }; }; Ok(output) diff --git a/src/lib.rs b/src/lib.rs index 278b0f3d3..e76634374 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -68,6 +68,9 @@ pub use types::{ // Re-export proc macro for reduction registration pub use problemreductions_macros::reduction; +// Re-export inventory so `declare_variants!` can use `$crate::inventory::submit!` +pub use inventory; + #[cfg(test)] #[path = "unit_tests/graph_models.rs"] mod test_graph_models; diff --git a/src/models/graph/kcoloring.rs b/src/models/graph/kcoloring.rs index 281c1fbd6..5b3c160d0 100644 --- a/src/models/graph/kcoloring.rs +++ b/src/models/graph/kcoloring.rs @@ -4,9 +4,9 @@ //! such that no two adjacent vertices have the same color. use crate::registry::{FieldInfo, ProblemSchemaEntry}; -use crate::topology::Graph; +use crate::topology::{Graph, SimpleGraph}; use crate::traits::{Problem, SatisfactionProblem}; -use crate::variant::{KValue, VariantParam, KN}; +use crate::variant::{KValue, VariantParam, K2, K3, K4, K5, KN}; use serde::{Deserialize, Serialize}; inventory::submit! { @@ -183,6 +183,14 @@ pub(crate) fn is_valid_coloring( true } +crate::declare_variants! { + KColoring => "k^num_vertices", + KColoring => "2^num_vertices", + KColoring => "3^num_vertices", + KColoring => "4^num_vertices", + KColoring => "5^num_vertices", +} + #[cfg(test)] #[path = "../../unit_tests/models/graph/kcoloring.rs"] mod tests; diff --git a/src/models/graph/max_cut.rs b/src/models/graph/max_cut.rs index 6024a33f8..3b8c9c210 100644 --- a/src/models/graph/max_cut.rs +++ b/src/models/graph/max_cut.rs @@ -4,7 +4,7 @@ //! that maximizes the total weight of edges crossing the partition. use crate::registry::{FieldInfo, ProblemSchemaEntry}; -use crate::topology::Graph; +use crate::topology::{Graph, SimpleGraph}; use crate::traits::{OptimizationProblem, Problem}; use crate::types::{Direction, SolutionSize, WeightElement}; use num_traits::Zero; @@ -214,6 +214,10 @@ where total } +crate::declare_variants! { + MaxCut => "2^num_vertices", +} + #[cfg(test)] #[path = "../../unit_tests/models/graph/max_cut.rs"] mod tests; diff --git a/src/models/graph/maximal_is.rs b/src/models/graph/maximal_is.rs index dee4722ba..9b39f89be 100644 --- a/src/models/graph/maximal_is.rs +++ b/src/models/graph/maximal_is.rs @@ -4,7 +4,7 @@ //! cannot be extended by adding any other vertex. use crate::registry::{FieldInfo, ProblemSchemaEntry}; -use crate::topology::Graph; +use crate::topology::{Graph, SimpleGraph}; use crate::traits::{OptimizationProblem, Problem}; use crate::types::{Direction, SolutionSize, WeightElement}; use num_traits::Zero; @@ -215,6 +215,10 @@ pub(crate) fn is_maximal_independent_set(graph: &G, selected: &[bool]) true } +crate::declare_variants! { + MaximalIS => "2^num_vertices", +} + #[cfg(test)] #[path = "../../unit_tests/models/graph/maximal_is.rs"] mod tests; diff --git a/src/models/graph/maximum_clique.rs b/src/models/graph/maximum_clique.rs index e293b037f..223aacecb 100644 --- a/src/models/graph/maximum_clique.rs +++ b/src/models/graph/maximum_clique.rs @@ -4,7 +4,7 @@ //! such that all vertices in the subset are pairwise adjacent. use crate::registry::{FieldInfo, ProblemSchemaEntry}; -use crate::topology::Graph; +use crate::topology::{Graph, SimpleGraph}; use crate::traits::{OptimizationProblem, Problem}; use crate::types::{Direction, SolutionSize, WeightElement}; use num_traits::Zero; @@ -170,6 +170,10 @@ fn is_clique_config(graph: &G, config: &[usize]) -> bool { true } +crate::declare_variants! { + MaximumClique => "2^num_vertices", +} + /// Check if a set of vertices forms a clique. /// /// # Arguments diff --git a/src/models/graph/maximum_independent_set.rs b/src/models/graph/maximum_independent_set.rs index 2cd2802ff..36aaa8ae4 100644 --- a/src/models/graph/maximum_independent_set.rs +++ b/src/models/graph/maximum_independent_set.rs @@ -4,9 +4,9 @@ //! such that no two vertices in the subset are adjacent. use crate::registry::{FieldInfo, ProblemSchemaEntry}; -use crate::topology::Graph; +use crate::topology::{Graph, KingsSubgraph, SimpleGraph, TriangularSubgraph, UnitDiskGraph}; use crate::traits::{OptimizationProblem, Problem}; -use crate::types::{Direction, SolutionSize, WeightElement}; +use crate::types::{Direction, One, SolutionSize, WeightElement}; use num_traits::Zero; use serde::{Deserialize, Serialize}; @@ -159,6 +159,16 @@ fn is_independent_set_config(graph: &G, config: &[usize]) -> bool { true } +crate::declare_variants! { + MaximumIndependentSet => "2^num_vertices", + MaximumIndependentSet => "2^num_vertices", + MaximumIndependentSet => "2^num_vertices", + MaximumIndependentSet => "2^num_vertices", + MaximumIndependentSet => "2^num_vertices", + MaximumIndependentSet => "2^num_vertices", + MaximumIndependentSet => "2^num_vertices", +} + /// Check if a set of vertices forms an independent set. /// /// # Arguments diff --git a/src/models/graph/maximum_matching.rs b/src/models/graph/maximum_matching.rs index e7b75d3f9..a13abfd24 100644 --- a/src/models/graph/maximum_matching.rs +++ b/src/models/graph/maximum_matching.rs @@ -4,7 +4,7 @@ //! such that no two edges share a vertex. use crate::registry::{FieldInfo, ProblemSchemaEntry}; -use crate::topology::Graph; +use crate::topology::{Graph, SimpleGraph}; use crate::traits::{OptimizationProblem, Problem}; use crate::types::{Direction, SolutionSize, WeightElement}; use num_traits::Zero; @@ -219,6 +219,10 @@ where } } +crate::declare_variants! { + MaximumMatching => "2^num_vertices", +} + /// Check if a selection of edges forms a valid matching. /// /// # Panics diff --git a/src/models/graph/minimum_dominating_set.rs b/src/models/graph/minimum_dominating_set.rs index 023f3f713..65d77cdcd 100644 --- a/src/models/graph/minimum_dominating_set.rs +++ b/src/models/graph/minimum_dominating_set.rs @@ -4,7 +4,7 @@ //! such that every vertex is either in the set or adjacent to a vertex in the set. use crate::registry::{FieldInfo, ProblemSchemaEntry}; -use crate::topology::Graph; +use crate::topology::{Graph, SimpleGraph}; use crate::traits::{OptimizationProblem, Problem}; use crate::types::{Direction, SolutionSize, WeightElement}; use num_traits::Zero; @@ -169,6 +169,10 @@ where } } +crate::declare_variants! { + MinimumDominatingSet => "2^num_vertices", +} + /// Check if a set of vertices is a dominating set. /// /// # Panics diff --git a/src/models/graph/minimum_vertex_cover.rs b/src/models/graph/minimum_vertex_cover.rs index 757e926bc..60ed20607 100644 --- a/src/models/graph/minimum_vertex_cover.rs +++ b/src/models/graph/minimum_vertex_cover.rs @@ -4,7 +4,7 @@ //! such that every edge has at least one endpoint in the subset. use crate::registry::{FieldInfo, ProblemSchemaEntry}; -use crate::topology::Graph; +use crate::topology::{Graph, SimpleGraph}; use crate::traits::{OptimizationProblem, Problem}; use crate::types::{Direction, SolutionSize, WeightElement}; use num_traits::Zero; @@ -156,6 +156,10 @@ fn is_vertex_cover_config(graph: &G, config: &[usize]) -> bool { true } +crate::declare_variants! { + MinimumVertexCover => "2^num_vertices", +} + /// Check if a set of vertices forms a vertex cover. /// /// # Arguments diff --git a/src/models/graph/traveling_salesman.rs b/src/models/graph/traveling_salesman.rs index edb3fbaf6..b66b16e15 100644 --- a/src/models/graph/traveling_salesman.rs +++ b/src/models/graph/traveling_salesman.rs @@ -4,7 +4,7 @@ //! that visits every vertex exactly once. use crate::registry::{FieldInfo, ProblemSchemaEntry}; -use crate::topology::Graph; +use crate::topology::{Graph, SimpleGraph}; use crate::traits::{OptimizationProblem, Problem}; use crate::types::{Direction, SolutionSize, WeightElement}; use num_traits::Zero; @@ -252,6 +252,10 @@ pub(crate) fn is_hamiltonian_cycle(graph: &G, selected: &[bool]) -> bo visit_count == n } +crate::declare_variants! { + TravelingSalesman => "num_vertices!", +} + #[cfg(test)] #[path = "../../unit_tests/models/graph/traveling_salesman.rs"] mod tests; diff --git a/src/models/optimization/ilp.rs b/src/models/optimization/ilp.rs index 7f9776692..15dd5a89b 100644 --- a/src/models/optimization/ilp.rs +++ b/src/models/optimization/ilp.rs @@ -376,6 +376,10 @@ impl OptimizationProblem for ILP { } } +crate::declare_variants! { + ILP => "exp(num_variables)", +} + #[cfg(test)] #[path = "../../unit_tests/models/optimization/ilp.rs"] mod tests; diff --git a/src/models/optimization/qubo.rs b/src/models/optimization/qubo.rs index d3bb01c39..211071881 100644 --- a/src/models/optimization/qubo.rs +++ b/src/models/optimization/qubo.rs @@ -188,6 +188,10 @@ where } } +crate::declare_variants! { + QUBO => "2^num_vars", +} + #[cfg(test)] #[path = "../../unit_tests/models/optimization/qubo.rs"] mod tests; diff --git a/src/models/optimization/spin_glass.rs b/src/models/optimization/spin_glass.rs index 5259f5798..81464d120 100644 --- a/src/models/optimization/spin_glass.rs +++ b/src/models/optimization/spin_glass.rs @@ -250,6 +250,11 @@ where } } +crate::declare_variants! { + SpinGlass => "2^num_vertices", + SpinGlass => "2^num_vertices", +} + #[cfg(test)] #[path = "../../unit_tests/models/optimization/spin_glass.rs"] mod tests; diff --git a/src/models/satisfiability/ksat.rs b/src/models/satisfiability/ksat.rs index 0d74d7e01..1273548ac 100644 --- a/src/models/satisfiability/ksat.rs +++ b/src/models/satisfiability/ksat.rs @@ -7,7 +7,7 @@ use crate::registry::{FieldInfo, ProblemSchemaEntry}; use crate::traits::{Problem, SatisfactionProblem}; -use crate::variant::KValue; +use crate::variant::{KValue, K2, K3, KN}; use serde::{Deserialize, Serialize}; use super::CNFClause; @@ -183,6 +183,12 @@ impl Problem for KSatisfiability { impl SatisfactionProblem for KSatisfiability {} +crate::declare_variants! { + KSatisfiability => "2^num_variables", + KSatisfiability => "2^num_variables", + KSatisfiability => "2^num_variables", +} + #[cfg(test)] #[path = "../../unit_tests/models/satisfiability/ksat.rs"] mod tests; diff --git a/src/models/satisfiability/sat.rs b/src/models/satisfiability/sat.rs index 380a0a35c..4401008b2 100644 --- a/src/models/satisfiability/sat.rs +++ b/src/models/satisfiability/sat.rs @@ -195,6 +195,10 @@ impl Problem for Satisfiability { impl SatisfactionProblem for Satisfiability {} +crate::declare_variants! { + Satisfiability => "2^num_variables", +} + /// Check if an assignment satisfies a SAT formula. /// /// # Arguments diff --git a/src/models/set/maximum_set_packing.rs b/src/models/set/maximum_set_packing.rs index 55a1af2ab..19719fa31 100644 --- a/src/models/set/maximum_set_packing.rs +++ b/src/models/set/maximum_set_packing.rs @@ -173,6 +173,11 @@ where } } +crate::declare_variants! { + MaximumSetPacking => "2^num_sets", + MaximumSetPacking => "2^num_sets", +} + /// Check if a selection forms a valid set packing (pairwise disjoint). fn is_valid_packing(sets: &[Vec], config: &[usize]) -> bool { let selected_sets: Vec<_> = config diff --git a/src/models/set/minimum_set_covering.rs b/src/models/set/minimum_set_covering.rs index 90f281b73..c37f34d42 100644 --- a/src/models/set/minimum_set_covering.rs +++ b/src/models/set/minimum_set_covering.rs @@ -178,6 +178,10 @@ where } } +crate::declare_variants! { + MinimumSetCovering => "2^num_sets", +} + /// Check if a selection of sets forms a valid set cover. #[cfg(test)] pub(crate) fn is_set_cover(universe_size: usize, sets: &[Vec], selected: &[bool]) -> bool { diff --git a/src/models/specialized/circuit.rs b/src/models/specialized/circuit.rs index 287841f77..e352fd2ed 100644 --- a/src/models/specialized/circuit.rs +++ b/src/models/specialized/circuit.rs @@ -299,6 +299,10 @@ impl Problem for CircuitSAT { impl SatisfactionProblem for CircuitSAT {} +crate::declare_variants! { + CircuitSAT => "2^num_inputs", +} + #[cfg(test)] #[path = "../../unit_tests/models/specialized/circuit.rs"] mod tests; diff --git a/src/models/specialized/factoring.rs b/src/models/specialized/factoring.rs index 05bce8cb4..4aa83d90b 100644 --- a/src/models/specialized/factoring.rs +++ b/src/models/specialized/factoring.rs @@ -162,6 +162,10 @@ impl OptimizationProblem for Factoring { } } +crate::declare_variants! { + Factoring => "exp(sqrt(num_bits))", +} + #[cfg(test)] #[path = "../../unit_tests/models/specialized/factoring.rs"] mod tests; diff --git a/src/registry/mod.rs b/src/registry/mod.rs index f14a02901..e7bad24e4 100644 --- a/src/registry/mod.rs +++ b/src/registry/mod.rs @@ -46,6 +46,8 @@ mod info; mod schema; +pub mod variant; pub use info::{ComplexityClass, FieldInfo, ProblemInfo, ProblemMetadata}; pub use schema::{collect_schemas, FieldInfoJson, ProblemSchemaEntry, ProblemSchemaJson}; +pub use variant::VariantEntry; diff --git a/src/registry/variant.rs b/src/registry/variant.rs new file mode 100644 index 000000000..d73a65e8c --- /dev/null +++ b/src/registry/variant.rs @@ -0,0 +1,33 @@ +//! Explicit variant registration via inventory. + +/// A registered problem variant entry. +/// +/// Submitted by [`declare_variants!`] for each concrete problem type. +/// The reduction graph uses these entries to build nodes with complexity metadata. +pub struct VariantEntry { + /// Problem name (from `Problem::NAME`). + pub name: &'static str, + /// Function returning variant key-value pairs (from `Problem::variant()`). + pub variant_fn: fn() -> Vec<(&'static str, &'static str)>, + /// Worst-case time complexity expression (e.g., `"2^num_vertices"`). + pub complexity: &'static str, +} + +impl VariantEntry { + /// Get the variant by calling the function. + pub fn variant(&self) -> Vec<(&'static str, &'static str)> { + (self.variant_fn)() + } +} + +impl std::fmt::Debug for VariantEntry { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("VariantEntry") + .field("name", &self.name) + .field("variant", &self.variant()) + .field("complexity", &self.complexity) + .finish() + } +} + +inventory::collect!(VariantEntry); diff --git a/src/rules/graph.rs b/src/rules/graph.rs index 75c1499f6..722d75d82 100644 --- a/src/rules/graph.rs +++ b/src/rules/graph.rs @@ -1,10 +1,14 @@ //! Runtime reduction graph for discovering and executing reduction paths. //! //! The graph uses variant-level nodes: each node is a unique `(problem_name, variant)` pair. +//! Nodes are built in two phases: +//! 1. From `VariantEntry` inventory (with complexity metadata) +//! 2. From `ReductionEntry` inventory (fallback for backwards compatibility) +//! //! Edges come exclusively from `#[reduction]` registrations via `inventory::iter::`. //! //! This module implements: -//! - Variant-level graph construction from `ReductionEntry` inventory +//! - Variant-level graph construction from `VariantEntry` and `ReductionEntry` inventory //! - Dijkstra's algorithm with custom cost functions for optimal paths //! - JSON export for documentation and visualization @@ -29,6 +33,7 @@ pub struct ReductionEdgeInfo { pub source_variant: BTreeMap, pub target_name: &'static str, pub target_variant: BTreeMap, + pub overhead: ReductionOverhead, } /// Internal edge data combining overhead and executable reduce function. @@ -72,6 +77,8 @@ pub(crate) struct NodeJson { pub(crate) category: String, /// Relative rustdoc path (e.g., "models/graph/maximum_independent_set"). pub(crate) doc_path: String, + /// Worst-case time complexity expression (empty if not declared). + pub(crate) complexity: String, } /// Internal reference to a problem variant, used as HashMap key. @@ -207,6 +214,7 @@ pub(crate) fn classify_problem_category(module_path: &str) -> &str { struct VariantNode { name: &'static str, variant: BTreeMap, + complexity: &'static str, } /// Information about a neighbor in the reduction graph. @@ -271,6 +279,7 @@ impl ReductionGraph { // Helper to ensure a variant node exists in the graph. let ensure_node = |name: &'static str, variant: BTreeMap, + complexity: &'static str, nodes: &mut Vec, graph: &mut DiGraph, node_index: &mut HashMap, @@ -284,7 +293,11 @@ impl ReductionGraph { idx } else { let node_id = nodes.len(); - nodes.push(VariantNode { name, variant }); + nodes.push(VariantNode { + name, + variant, + complexity, + }); let idx = graph.add_node(node_id); node_index.insert(vref, idx); name_to_nodes.entry(name).or_default().push(idx); @@ -292,14 +305,31 @@ impl ReductionGraph { } }; - // Register reductions from inventory (auto-discovery) + // Phase 1: Build nodes from VariantEntry inventory + for entry in inventory::iter:: { + let variant = Self::variant_to_map(&entry.variant()); + ensure_node( + entry.name, + variant, + entry.complexity, + &mut nodes, + &mut graph, + &mut node_index, + &mut name_to_nodes, + ); + } + + // Phase 2: Build edges from ReductionEntry inventory for entry in inventory::iter:: { let source_variant = Self::variant_to_map(&entry.source_variant()); let target_variant = Self::variant_to_map(&entry.target_variant()); + // Nodes should already exist from Phase 1. + // Fall back to creating them with empty complexity for backwards compatibility. let src_idx = ensure_node( entry.source_name, source_variant, + "", &mut nodes, &mut graph, &mut node_index, @@ -308,6 +338,7 @@ impl ReductionGraph { let dst_idx = ensure_node( entry.target_name, target_variant, + "", &mut nodes, &mut graph, &mut node_index, @@ -315,8 +346,6 @@ impl ReductionGraph { ); let overhead = entry.overhead(); - - // Check if edge already exists (avoid duplicates) if graph.find_edge(src_idx, dst_idx).is_none() { graph.add_edge( src_idx, @@ -586,9 +615,12 @@ impl ReductionGraph { /// Get all variant maps registered for a problem name. /// - /// Returns an empty `Vec` if the name is not found. + /// Returns variants sorted deterministically: the "default" variant + /// (SimpleGraph, i32, etc.) comes first, then remaining variants + /// in lexicographic order. pub fn variants_for(&self, name: &str) -> Vec> { - self.name_to_nodes + let mut variants: Vec> = self + .name_to_nodes .get(name) .map(|indices| { indices @@ -596,7 +628,33 @@ impl ReductionGraph { .map(|&idx| self.nodes[self.graph[idx]].variant.clone()) .collect() }) - .unwrap_or_default() + .unwrap_or_default(); + // Sort deterministically: default variant values (SimpleGraph, One, KN) + // sort first so callers can rely on variants[0] being the "base" variant. + variants.sort_by(|a, b| { + fn default_rank(v: &BTreeMap) -> usize { + v.values() + .filter(|val| !["SimpleGraph", "One", "KN"].contains(&val.as_str())) + .count() + } + default_rank(a).cmp(&default_rank(b)).then_with(|| a.cmp(b)) + }); + variants + } + + /// Get the complexity expression for a specific variant. + pub fn variant_complexity( + &self, + name: &str, + variant: &BTreeMap, + ) -> Option<&'static str> { + let idx = self.lookup_node(name, variant)?; + let node = &self.nodes[self.graph[idx]]; + if node.complexity.is_empty() { + None + } else { + Some(node.complexity) + } } /// Get all outgoing reductions from a problem (across all its variants). @@ -616,6 +674,7 @@ impl ReductionGraph { source_variant: src.variant.clone(), target_name: dst.name, target_variant: dst.variant.clone(), + overhead: self.graph[e.id()].overhead.clone(), } }) .collect() @@ -662,6 +721,7 @@ impl ReductionGraph { source_variant: src.variant.clone(), target_name: dst.name, target_variant: dst.variant.clone(), + overhead: self.graph[e.id()].overhead.clone(), } }) .collect() @@ -852,6 +912,7 @@ impl ReductionGraph { variant: node.variant.clone(), category, doc_path, + complexity: node.complexity.to_string(), }, ) }) diff --git a/src/rules/maximumindependentset_casts.rs b/src/rules/maximumindependentset_casts.rs index 9027cd0de..c293f0019 100644 --- a/src/rules/maximumindependentset_casts.rs +++ b/src/rules/maximumindependentset_casts.rs @@ -6,6 +6,7 @@ use crate::impl_variant_reduction; use crate::models::graph::MaximumIndependentSet; use crate::topology::{KingsSubgraph, SimpleGraph, TriangularSubgraph, UnitDiskGraph}; +use crate::types::One; use crate::variant::CastToParent; impl_variant_reduction!( @@ -31,3 +32,45 @@ impl_variant_reduction!( |src| MaximumIndependentSet::new( src.graph().cast_to_parent(), src.weights().to_vec()) ); + +// Graph-hierarchy casts (same weight One) +impl_variant_reduction!( + MaximumIndependentSet, + => , + fields: [num_vertices, num_edges], + |src| MaximumIndependentSet::new( + src.graph().cast_to_parent(), src.weights().to_vec()) +); + +impl_variant_reduction!( + MaximumIndependentSet, + => , + fields: [num_vertices, num_edges], + |src| MaximumIndependentSet::new( + src.graph().cast_to_parent(), src.weights().to_vec()) +); + +// Weight-hierarchy casts (One → i32) +impl_variant_reduction!( + MaximumIndependentSet, + => , + fields: [num_vertices, num_edges], + |src| MaximumIndependentSet::new( + src.graph().clone(), src.weights().iter().map(|w| w.cast_to_parent()).collect()) +); + +impl_variant_reduction!( + MaximumIndependentSet, + => , + fields: [num_vertices, num_edges], + |src| MaximumIndependentSet::new( + src.graph().clone(), src.weights().iter().map(|w| w.cast_to_parent()).collect()) +); + +impl_variant_reduction!( + MaximumIndependentSet, + => , + fields: [num_vertices, num_edges], + |src| MaximumIndependentSet::new( + src.graph().clone(), src.weights().iter().map(|w| w.cast_to_parent()).collect()) +); diff --git a/src/rules/maximumindependentset_gridgraph.rs b/src/rules/maximumindependentset_gridgraph.rs index 0ce20fe4d..e336f033a 100644 --- a/src/rules/maximumindependentset_gridgraph.rs +++ b/src/rules/maximumindependentset_gridgraph.rs @@ -1,24 +1,25 @@ -//! Reduction from MaximumIndependentSet on SimpleGraph/UnitDiskGraph to KingsSubgraph +//! Reduction from unweighted MaximumIndependentSet on SimpleGraph to KingsSubgraph //! using the King's Subgraph (KSG) unit disk mapping. //! -//! Maps an arbitrary graph's MIS problem to an equivalent weighted MIS on a grid graph. +//! Maps an arbitrary graph's MIS problem to an equivalent MIS on a grid graph. use crate::models::graph::MaximumIndependentSet; use crate::reduction; use crate::rules::traits::{ReduceTo, ReductionResult}; use crate::rules::unitdiskmapping::ksg; -use crate::topology::{Graph, KingsSubgraph, SimpleGraph, UnitDiskGraph}; +use crate::topology::{Graph, KingsSubgraph, SimpleGraph}; +use crate::types::One; -/// Result of reducing MIS on SimpleGraph to MIS on KingsSubgraph. +/// Result of reducing MIS to MIS. #[derive(Debug, Clone)] -pub struct ReductionISSimpleToGrid { - target: MaximumIndependentSet, +pub struct ReductionISSimpleOneToGridOne { + target: MaximumIndependentSet, mapping_result: ksg::MappingResult, } -impl ReductionResult for ReductionISSimpleToGrid { - type Source = MaximumIndependentSet; - type Target = MaximumIndependentSet; +impl ReductionResult for ReductionISSimpleOneToGridOne { + type Source = MaximumIndependentSet; + type Target = MaximumIndependentSet; fn target_problem(&self) -> &Self::Target { &self.target @@ -35,34 +36,34 @@ impl ReductionResult for ReductionISSimpleToGrid { num_edges = "num_vertices * num_vertices", } )] -impl ReduceTo> - for MaximumIndependentSet +impl ReduceTo> + for MaximumIndependentSet { - type Result = ReductionISSimpleToGrid; + type Result = ReductionISSimpleOneToGridOne; fn reduce_to(&self) -> Self::Result { let n = self.graph().num_vertices(); let edges = self.graph().edges(); let result = ksg::map_unweighted(n, &edges); - let weights = result.node_weights.clone(); let grid = result.to_kings_subgraph(); + let weights = vec![One; grid.num_vertices()]; let target = MaximumIndependentSet::new(grid, weights); - ReductionISSimpleToGrid { + ReductionISSimpleOneToGridOne { target, mapping_result: result, } } } -/// Result of reducing MIS on UnitDiskGraph to MIS on KingsSubgraph. +/// Result of reducing MIS to MIS. #[derive(Debug, Clone)] -pub struct ReductionISUnitDiskToGrid { +pub struct ReductionISSimpleOneToGridWeighted { target: MaximumIndependentSet, mapping_result: ksg::MappingResult, } -impl ReductionResult for ReductionISUnitDiskToGrid { - type Source = MaximumIndependentSet; +impl ReductionResult for ReductionISSimpleOneToGridWeighted { + type Source = MaximumIndependentSet; type Target = MaximumIndependentSet; fn target_problem(&self) -> &Self::Target { @@ -81,18 +82,18 @@ impl ReductionResult for ReductionISUnitDiskToGrid { } )] impl ReduceTo> - for MaximumIndependentSet + for MaximumIndependentSet { - type Result = ReductionISUnitDiskToGrid; + type Result = ReductionISSimpleOneToGridWeighted; fn reduce_to(&self) -> Self::Result { let n = self.graph().num_vertices(); - let edges = Graph::edges(self.graph()); + let edges = self.graph().edges(); let result = ksg::map_unweighted(n, &edges); let weights = result.node_weights.clone(); let grid = result.to_kings_subgraph(); let target = MaximumIndependentSet::new(grid, weights); - ReductionISUnitDiskToGrid { + ReductionISSimpleOneToGridWeighted { target, mapping_result: result, } diff --git a/src/rules/maximumindependentset_triangular.rs b/src/rules/maximumindependentset_triangular.rs index 60e9338b9..0f57af8e2 100644 --- a/src/rules/maximumindependentset_triangular.rs +++ b/src/rules/maximumindependentset_triangular.rs @@ -1,5 +1,5 @@ -//! Reduction from MaximumIndependentSet on SimpleGraph to TriangularSubgraph -//! using the weighted triangular unit disk mapping. +//! Reduction from unweighted MaximumIndependentSet on SimpleGraph to TriangularSubgraph +//! using the triangular unit disk mapping. //! //! Maps an arbitrary graph's MIS problem to an equivalent weighted MIS on a //! triangular lattice grid graph. @@ -10,8 +10,9 @@ use crate::rules::traits::{ReduceTo, ReductionResult}; use crate::rules::unitdiskmapping::ksg; use crate::rules::unitdiskmapping::triangular; use crate::topology::{Graph, SimpleGraph, TriangularSubgraph}; +use crate::types::One; -/// Result of reducing MIS on SimpleGraph to MIS on TriangularSubgraph. +/// Result of reducing MIS to MIS. #[derive(Debug, Clone)] pub struct ReductionISSimpleToTriangular { target: MaximumIndependentSet, @@ -19,7 +20,7 @@ pub struct ReductionISSimpleToTriangular { } impl ReductionResult for ReductionISSimpleToTriangular { - type Source = MaximumIndependentSet; + type Source = MaximumIndependentSet; type Target = MaximumIndependentSet; fn target_problem(&self) -> &Self::Target { @@ -38,7 +39,7 @@ impl ReductionResult for ReductionISSimpleToTriangular { } )] impl ReduceTo> - for MaximumIndependentSet + for MaximumIndependentSet { type Result = ReductionISSimpleToTriangular; diff --git a/src/rules/sat_maximumindependentset.rs b/src/rules/sat_maximumindependentset.rs index 89978b9be..f39afe728 100644 --- a/src/rules/sat_maximumindependentset.rs +++ b/src/rules/sat_maximumindependentset.rs @@ -13,6 +13,7 @@ use crate::models::satisfiability::Satisfiability; use crate::reduction; use crate::rules::traits::{ReduceTo, ReductionResult}; use crate::topology::SimpleGraph; +use crate::types::One; /// A literal in the SAT problem, representing a variable or its negation. #[derive(Debug, Clone, PartialEq, Eq)] @@ -53,7 +54,7 @@ impl BoolVar { #[derive(Debug, Clone)] pub struct ReductionSATToIS { /// The target MaximumIndependentSet problem. - target: MaximumIndependentSet, + target: MaximumIndependentSet, /// Mapping from vertex index to the literal it represents. literals: Vec, /// The number of variables in the source SAT problem. @@ -64,7 +65,7 @@ pub struct ReductionSATToIS { impl ReductionResult for ReductionSATToIS { type Source = Satisfiability; - type Target = MaximumIndependentSet; + type Target = MaximumIndependentSet; fn target_problem(&self) -> &Self::Target { &self.target @@ -113,7 +114,7 @@ impl ReductionSATToIS { num_edges = "num_literals^2", } )] -impl ReduceTo> for Satisfiability { +impl ReduceTo> for Satisfiability { type Result = ReductionSATToIS; fn reduce_to(&self) -> Self::Result { @@ -153,7 +154,7 @@ impl ReduceTo> for Satisfiability { let target = MaximumIndependentSet::new( SimpleGraph::new(vertex_count, edges), - vec![1i32; vertex_count], + vec![One; vertex_count], ); ReductionSATToIS { diff --git a/src/rules/unitdiskmapping/grid.rs b/src/rules/unitdiskmapping/grid.rs index 27ae7aacf..16edfb5b6 100644 --- a/src/rules/unitdiskmapping/grid.rs +++ b/src/rules/unitdiskmapping/grid.rs @@ -181,6 +181,20 @@ impl MappingGrid { coords } + /// Check if any doubled or connected cells remain in the grid. + /// Returns true if the mapping is not fully resolved. + /// Matches Julia's `GridGraph()` assertion. + pub fn has_unresolved_cells(&self) -> bool { + self.content.iter().any(|row| { + row.iter().any(|cell| { + matches!( + cell, + CellState::Doubled { .. } | CellState::Connected { .. } + ) + }) + }) + } + /// Get all doubled cell coordinates. /// Returns a set of (row, col) for cells in the Doubled state. pub fn doubled_cells(&self) -> std::collections::HashSet<(usize, usize)> { diff --git a/src/rules/unitdiskmapping/ksg/gadgets.rs b/src/rules/unitdiskmapping/ksg/gadgets.rs index a14345564..3bf84ca9f 100644 --- a/src/rules/unitdiskmapping/ksg/gadgets.rs +++ b/src/rules/unitdiskmapping/ksg/gadgets.rs @@ -1635,7 +1635,7 @@ fn apply_gadget_boxed(pattern: &dyn KsgPatternBoxed, grid: &mut MappingGrid, i: let state = match cell { PatternCell::Empty => CellState::Empty, PatternCell::Occupied => CellState::Occupied { weight: 1 }, - PatternCell::Doubled => CellState::Doubled { weight: 2 }, + PatternCell::Doubled => CellState::Doubled { weight: 1 }, PatternCell::Connected => CellState::Connected { weight: 1 }, }; grid.set(grid_r, grid_c, state); diff --git a/src/rules/unitdiskmapping/ksg/mapping.rs b/src/rules/unitdiskmapping/ksg/mapping.rs index 5a5bd4f8b..af2527f68 100644 --- a/src/rules/unitdiskmapping/ksg/mapping.rs +++ b/src/rules/unitdiskmapping/ksg/mapping.rs @@ -593,16 +593,26 @@ pub fn map_unweighted_with_order( let gadget_overhead: i32 = tape.iter().map(tape_entry_mis_overhead).sum(); let mis_overhead = copyline_overhead + gadget_overhead; - // Extract positions and weights from occupied cells - let (positions, node_weights): (Vec<(i32, i32)>, Vec) = grid + // Assert all doubled/connected cells have been resolved by gadgets. + // Matches Julia's `GridGraph()` check: "This mapping is not done yet!" + debug_assert!( + !grid.has_unresolved_cells(), + "Mapping is not done: doubled or connected cells remain after gadget application" + ); + + // Extract positions from occupied cells. + // In unweighted mode, all node weights are 1 — matching Julia's behavior where + // `node(::Type{<:UnWeightedNode}, i, j, w) = Node(i, j)` ignores the weight parameter. + let positions: Vec<(i32, i32)> = grid .occupied_coords() .into_iter() .filter_map(|(row, col)| { grid.get(row, col) - .map(|cell| ((row as i32, col as i32), cell.weight())) + .filter(|cell| cell.weight() > 0) + .map(|_| (row as i32, col as i32)) }) - .filter(|&(_, w)| w > 0) - .unzip(); + .collect(); + let node_weights = vec![1i32; positions.len()]; MappingResult { positions, @@ -685,6 +695,12 @@ pub fn map_weighted_with_order( let gadget_overhead: i32 = tape.iter().map(weighted_tape_entry_mis_overhead).sum(); let mis_overhead = copyline_overhead + gadget_overhead; + // Assert all doubled/connected cells have been resolved by gadgets. + debug_assert!( + !grid.has_unresolved_cells(), + "Mapping is not done: doubled or connected cells remain after gadget application" + ); + // Extract positions and weights from occupied cells let (positions, node_weights): (Vec<(i32, i32)>, Vec) = grid .occupied_coords() diff --git a/src/rules/unitdiskmapping/traits.rs b/src/rules/unitdiskmapping/traits.rs index e89910fb0..9bfba019c 100644 --- a/src/rules/unitdiskmapping/traits.rs +++ b/src/rules/unitdiskmapping/traits.rs @@ -178,7 +178,7 @@ pub fn apply_gadget(pattern: &P, grid: &mut MappingGrid, i: usize, j let state = match cell { PatternCell::Empty => CellState::Empty, PatternCell::Occupied => CellState::Occupied { weight: 1 }, - PatternCell::Doubled => CellState::Doubled { weight: 2 }, + PatternCell::Doubled => CellState::Doubled { weight: 1 }, PatternCell::Connected => CellState::Connected { weight: 1 }, }; grid.set(grid_r, grid_c, state); @@ -202,7 +202,7 @@ pub fn unapply_gadget(pattern: &P, grid: &mut MappingGrid, i: usize, let state = match cell { PatternCell::Empty => CellState::Empty, PatternCell::Occupied => CellState::Occupied { weight: 1 }, - PatternCell::Doubled => CellState::Doubled { weight: 2 }, + PatternCell::Doubled => CellState::Doubled { weight: 1 }, PatternCell::Connected => CellState::Connected { weight: 1 }, }; grid.set(grid_r, grid_c, state); diff --git a/src/traits.rs b/src/traits.rs index 635718c0c..b4f38dcba 100644 --- a/src/traits.rs +++ b/src/traits.rs @@ -42,6 +42,13 @@ pub trait OptimizationProblem: Problem {} +/// Marker trait for explicitly declared problem variants. +/// +/// Implemented automatically by [`declare_variants!`] for each concrete type. +/// The [`#[reduction]`] proc macro checks this trait at compile time to ensure +/// all reduction source/target types have been declared. +pub trait DeclaredVariant {} + #[cfg(test)] #[path = "unit_tests/traits.rs"] mod tests; diff --git a/src/unit_tests/reduction_graph.rs b/src/unit_tests/reduction_graph.rs index 5dab95623..6bb979ac0 100644 --- a/src/unit_tests/reduction_graph.rs +++ b/src/unit_tests/reduction_graph.rs @@ -314,7 +314,7 @@ fn test_3sat_to_mis_triangular_overhead() { ) .expect("Should find path from 3-SAT to MIS on triangular lattice"); - // Path: K3SAT → SAT → MIS{SimpleGraph,i32} → MIS{TriangularSubgraph,i32} + // Path: K3SAT → SAT → MIS{SimpleGraph,One} → MIS{TriangularSubgraph,i32} assert_eq!( path.type_names(), vec!["KSatisfiability", "Satisfiability", "MaximumIndependentSet"] @@ -339,12 +339,12 @@ fn test_3sat_to_mis_triangular_overhead() { assert_eq!(edges[0].get("num_clauses").unwrap().eval(&test_size), 2.0); assert_eq!(edges[0].get("num_literals").unwrap().eval(&test_size), 6.0); - // Edge 1: SAT → MIS{SimpleGraph,i32} + // Edge 1: SAT → MIS{SimpleGraph,One} // num_vertices = num_literals, num_edges = num_literals^2 assert_eq!(edges[1].get("num_vertices").unwrap().eval(&test_size), 6.0); assert_eq!(edges[1].get("num_edges").unwrap().eval(&test_size), 36.0); - // Edge 2: MIS{SimpleGraph,i32} → MIS{TriangularSubgraph,i32} + // Edge 2: MIS{SimpleGraph,One} → MIS{TriangularSubgraph,i32} // num_vertices = num_vertices^2, num_edges = num_vertices^2 assert_eq!( edges[2].get("num_vertices").unwrap().eval(&test_size), @@ -355,9 +355,9 @@ fn test_3sat_to_mis_triangular_overhead() { // Compose overheads symbolically along the path. // The composed overhead maps 3-SAT input variables to final MIS{Triangular} output. // - // K3SAT → SAT: {num_clauses: C, num_vars: V, num_literals: L} (identity) - // SAT → MIS: {num_vertices: L, num_edges: L²} - // MIS → MIS{Tri}: {num_vertices: num_vertices², num_edges: num_vertices²} + // K3SAT → SAT: {num_clauses: C, num_vars: V, num_literals: L} (identity) + // SAT → MIS{SG,One}: {num_vertices: L, num_edges: L²} + // MIS{SG,One→Tri}: {num_vertices: V², num_edges: V²} // // Composed: num_vertices = L², num_edges = L² let composed = graph.compose_path_overhead(&path); diff --git a/src/unit_tests/rules/graph.rs b/src/unit_tests/rules/graph.rs index c10f60c48..345b2c19f 100644 --- a/src/unit_tests/rules/graph.rs +++ b/src/unit_tests/rules/graph.rs @@ -7,7 +7,7 @@ use crate::rules::graph::{classify_problem_category, ReductionStep}; use crate::rules::registry::ReductionEntry; use crate::topology::SimpleGraph; use crate::traits::Problem; -use crate::types::ProblemSize; +use crate::types::{One, ProblemSize}; use std::collections::BTreeMap; #[test] @@ -316,7 +316,7 @@ fn test_sat_based_reductions() { let graph = ReductionGraph::new(); // SAT -> IS - assert!(graph.has_direct_reduction::>()); + assert!(graph.has_direct_reduction::>()); // SAT -> KColoring assert!(graph.has_direct_reduction::>()); @@ -1051,3 +1051,43 @@ fn test_overhead_variables_are_consistent() { } } } + +#[test] +fn test_variant_entry_complexity_available() { + let entries: Vec<_> = inventory::iter:: + .into_iter() + .collect(); + assert!( + !entries.is_empty(), + "VariantEntry inventory should not be empty" + ); + + let mis_entry = entries.iter().find(|e| e.name == "MaximumIndependentSet"); + assert!(mis_entry.is_some(), "MIS should have a VariantEntry"); + let mis_entry = mis_entry.unwrap(); + assert!( + !mis_entry.complexity.is_empty(), + "complexity should not be empty" + ); + + // Exercise Debug impl for VariantEntry + let debug_str = format!("{:?}", mis_entry); + assert!(debug_str.contains("VariantEntry")); + assert!(debug_str.contains("MaximumIndependentSet")); + assert!(debug_str.contains("complexity")); +} + +#[test] +fn test_variant_complexity() { + let graph = ReductionGraph::new(); + let variant = ReductionGraph::variant_to_map(&[("graph", "SimpleGraph"), ("weight", "i32")]); + let complexity = graph.variant_complexity("MaximumIndependentSet", &variant); + assert_eq!(complexity, Some("2^num_vertices")); + + // Unknown problem returns None + let unknown = BTreeMap::new(); + assert_eq!( + graph.variant_complexity("NonExistentProblem", &unknown), + None + ); +} diff --git a/src/unit_tests/rules/maximumindependentset_gridgraph.rs b/src/unit_tests/rules/maximumindependentset_gridgraph.rs index 5adef63ff..734149f5e 100644 --- a/src/unit_tests/rules/maximumindependentset_gridgraph.rs +++ b/src/unit_tests/rules/maximumindependentset_gridgraph.rs @@ -1,75 +1,75 @@ use super::*; use crate::models::graph::MaximumIndependentSet; +use crate::rules::unitdiskmapping::ksg; use crate::solvers::BruteForce; -use crate::topology::{Graph, KingsSubgraph, SimpleGraph, UnitDiskGraph}; +use crate::topology::{Graph, KingsSubgraph, SimpleGraph}; +use crate::types::One; #[test] -fn test_mis_simple_to_grid_closed_loop() { - // Triangle graph: 3 vertices, 3 edges - let problem = MaximumIndependentSet::new( - SimpleGraph::new(3, vec![(0, 1), (1, 2), (0, 2)]), - vec![1i32; 3], +fn test_map_unweighted_produces_uniform_weights() { + // Triangle graph + let result = ksg::map_unweighted(3, &[(0, 1), (1, 2), (0, 2)]); + assert!( + result.node_weights.iter().all(|&w| w == 1), + "map_unweighted triangle should produce uniform weights, got: {:?}", + result.node_weights ); - let result = ReduceTo::>::reduce_to(&problem); - let target = result.target_problem(); - // The grid graph should have more vertices than the original - assert!(target.graph().num_vertices() > 3); - - // Find best solution on the grid graph using brute force - let solver = BruteForce::new(); - let grid_solutions = solver.find_all_best(target); - assert!(!grid_solutions.is_empty()); - - // Map solution back - let original_solution = result.extract_solution(&grid_solutions[0]); - assert_eq!(original_solution.len(), 3); + // Path graph + let result2 = ksg::map_unweighted(3, &[(0, 1), (1, 2)]); + assert!( + result2.node_weights.iter().all(|&w| w == 1), + "map_unweighted path should produce uniform weights, got: {:?}", + result2.node_weights + ); - // For a triangle, MIS size is 1 - let size: usize = original_solution.iter().sum(); - assert_eq!(size, 1, "Max IS in triangle should be 1"); + // Cycle-5 + let result3 = ksg::map_unweighted(5, &[(0, 1), (1, 2), (2, 3), (3, 4), (0, 4)]); + assert!( + result3.node_weights.iter().all(|&w| w == 1), + "map_unweighted cycle5 should produce uniform weights, got: {:?}", + result3.node_weights + ); } #[test] -fn test_mis_simple_to_grid_path_graph() { - // Path graph: 0-1-2 - let problem = - MaximumIndependentSet::new(SimpleGraph::new(3, vec![(0, 1), (1, 2)]), vec![1i32; 3]); - let result = ReduceTo::>::reduce_to(&problem); +fn test_mis_simple_one_to_kings_one_closed_loop() { + // Path graph: 0-1-2-3-4 (MIS = 3: select vertices 0, 2, 4) + let problem = MaximumIndependentSet::new( + SimpleGraph::new(5, vec![(0, 1), (1, 2), (2, 3), (3, 4)]), + vec![One; 5], + ); + let result = ReduceTo::>::reduce_to(&problem); let target = result.target_problem(); + assert!(target.graph().num_vertices() > 5); let solver = BruteForce::new(); let grid_solutions = solver.find_all_best(target); assert!(!grid_solutions.is_empty()); let original_solution = result.extract_solution(&grid_solutions[0]); - - // Path of 3 vertices has MIS size 2 (vertices 0 and 2) + assert_eq!(original_solution.len(), 5); let size: usize = original_solution.iter().sum(); - assert_eq!(size, 2, "Max IS in path should be 2"); + assert_eq!(size, 3, "Max IS in path of 5 should be 3"); } #[test] -fn test_mis_unitdisk_to_grid_closed_loop() { - // Create a UnitDiskGraph: 3 points where 0-1 are close, 2 is far - let udg = UnitDiskGraph::new(vec![(0.0, 0.0), (0.5, 0.0), (3.0, 0.0)], 1.0); - // Only edge is 0-1 (distance 0.5 <= 1.0), vertex 2 is isolated - assert_eq!(udg.num_edges(), 1); - - let problem = MaximumIndependentSet::new(udg, vec![1i32, 1, 1]); +fn test_mis_simple_one_to_kings_weighted_closed_loop() { + // Path graph: 0-1-2-3-4 (MIS = 3: select vertices 0, 2, 4) + let problem = MaximumIndependentSet::new( + SimpleGraph::new(5, vec![(0, 1), (1, 2), (2, 3), (3, 4)]), + vec![One; 5], + ); let result = ReduceTo::>::reduce_to(&problem); let target = result.target_problem(); - - assert!(target.graph().num_vertices() >= 3); + assert!(target.graph().num_vertices() > 5); let solver = BruteForce::new(); let grid_solutions = solver.find_all_best(target); assert!(!grid_solutions.is_empty()); let original_solution = result.extract_solution(&grid_solutions[0]); - assert_eq!(original_solution.len(), 3); - - // MIS should be size 2 (one from {0,1} + vertex 2) + assert_eq!(original_solution.len(), 5); let size: usize = original_solution.iter().sum(); - assert_eq!(size, 2, "Max IS should be 2"); + assert_eq!(size, 3, "Max IS in path of 5 should be 3"); } diff --git a/src/unit_tests/rules/maximumindependentset_triangular.rs b/src/unit_tests/rules/maximumindependentset_triangular.rs index cfd5303cd..62502717b 100644 --- a/src/unit_tests/rules/maximumindependentset_triangular.rs +++ b/src/unit_tests/rules/maximumindependentset_triangular.rs @@ -1,12 +1,13 @@ use super::*; use crate::models::graph::MaximumIndependentSet; use crate::topology::{Graph, SimpleGraph, TriangularSubgraph}; +use crate::types::One; #[test] -fn test_mis_simple_to_triangular_closed_loop() { +fn test_mis_simple_one_to_triangular_closed_loop() { // Path graph: 0-1-2 let problem = - MaximumIndependentSet::new(SimpleGraph::new(3, vec![(0, 1), (1, 2)]), vec![1i32; 3]); + MaximumIndependentSet::new(SimpleGraph::new(3, vec![(0, 1), (1, 2)]), vec![One; 3]); let result = ReduceTo::>::reduce_to(&problem); let target = result.target_problem(); @@ -20,9 +21,9 @@ fn test_mis_simple_to_triangular_closed_loop() { } #[test] -fn test_mis_simple_to_triangular_graph_methods() { +fn test_mis_simple_one_to_triangular_graph_methods() { // Single edge graph: 0-1 - let problem = MaximumIndependentSet::new(SimpleGraph::new(2, vec![(0, 1)]), vec![1i32; 2]); + let problem = MaximumIndependentSet::new(SimpleGraph::new(2, vec![(0, 1)]), vec![One; 2]); let result = ReduceTo::>::reduce_to(&problem); let target = result.target_problem(); let graph = target.graph(); diff --git a/src/unit_tests/rules/sat_maximumindependentset.rs b/src/unit_tests/rules/sat_maximumindependentset.rs index c34a467e6..b60f40a54 100644 --- a/src/unit_tests/rules/sat_maximumindependentset.rs +++ b/src/unit_tests/rules/sat_maximumindependentset.rs @@ -45,7 +45,7 @@ fn test_boolvar_complement() { fn test_sat_to_maximumindependentset_closed_loop() { // Simple SAT: (x1) - one clause with one literal let sat = Satisfiability::new(1, vec![CNFClause::new(vec![1])]); - let reduction = ReduceTo::>::reduce_to(&sat); + let reduction = ReduceTo::>::reduce_to(&sat); let is_problem = reduction.target_problem(); // Should have 1 vertex (one literal) @@ -59,7 +59,7 @@ fn test_two_clause_sat_to_is() { // SAT: (x1) AND (NOT x1) // This is unsatisfiable let sat = Satisfiability::new(1, vec![CNFClause::new(vec![1]), CNFClause::new(vec![-1])]); - let reduction = ReduceTo::>::reduce_to(&sat); + let reduction = ReduceTo::>::reduce_to(&sat); let is_problem = reduction.target_problem(); // Should have 2 vertices @@ -79,7 +79,7 @@ fn test_two_clause_sat_to_is() { fn test_extract_solution_basic() { // Simple case: (x1 OR x2) let sat = Satisfiability::new(2, vec![CNFClause::new(vec![1, 2])]); - let reduction = ReduceTo::>::reduce_to(&sat); + let reduction = ReduceTo::>::reduce_to(&sat); // Select vertex 0 (literal x1) let is_sol = vec![1, 0]; @@ -96,7 +96,7 @@ fn test_extract_solution_basic() { fn test_extract_solution_with_negation() { // (NOT x1) - selecting NOT x1 means x1 should be false let sat = Satisfiability::new(1, vec![CNFClause::new(vec![-1])]); - let reduction = ReduceTo::>::reduce_to(&sat); + let reduction = ReduceTo::>::reduce_to(&sat); let is_sol = vec![1]; let sat_sol = reduction.extract_solution(&is_sol); @@ -107,7 +107,7 @@ fn test_extract_solution_with_negation() { fn test_clique_edges_in_clause() { // A clause with 3 literals should form a clique (3 edges) let sat = Satisfiability::new(3, vec![CNFClause::new(vec![1, 2, 3])]); - let reduction = ReduceTo::>::reduce_to(&sat); + let reduction = ReduceTo::>::reduce_to(&sat); let is_problem = reduction.target_problem(); // 3 vertices, 3 edges (complete graph K3) @@ -128,7 +128,7 @@ fn test_complement_edges_across_clauses() { CNFClause::new(vec![2]), ], ); - let reduction = ReduceTo::>::reduce_to(&sat); + let reduction = ReduceTo::>::reduce_to(&sat); let is_problem = reduction.target_problem(); assert_eq!(is_problem.graph().num_vertices(), 3); @@ -141,7 +141,7 @@ fn test_is_structure() { 3, vec![CNFClause::new(vec![1, 2]), CNFClause::new(vec![-1, 3])], ); - let reduction = ReduceTo::>::reduce_to(&sat); + let reduction = ReduceTo::>::reduce_to(&sat); let is_problem = reduction.target_problem(); // IS should have vertices for literals in clauses @@ -152,7 +152,7 @@ fn test_is_structure() { fn test_empty_sat() { // Empty SAT (trivially satisfiable) let sat = Satisfiability::new(0, vec![]); - let reduction = ReduceTo::>::reduce_to(&sat); + let reduction = ReduceTo::>::reduce_to(&sat); let is_problem = reduction.target_problem(); assert_eq!(is_problem.graph().num_vertices(), 0); @@ -163,7 +163,7 @@ fn test_empty_sat() { #[test] fn test_literals_accessor() { let sat = Satisfiability::new(2, vec![CNFClause::new(vec![1, -2])]); - let reduction = ReduceTo::>::reduce_to(&sat); + let reduction = ReduceTo::>::reduce_to(&sat); let literals = reduction.literals(); assert_eq!(literals.len(), 2); @@ -206,7 +206,7 @@ fn test_jl_parity_sat_to_independentset() { let inst = &jl_find_instance_by_label(&sat_data, label)["instance"]; let (num_vars, clauses) = jl_parse_sat_clauses(inst); let source = Satisfiability::new(num_vars, clauses); - let result = ReduceTo::>::reduce_to(&source); + let result = ReduceTo::>::reduce_to(&source); let solver = BruteForce::new(); let best_target = solver.find_all_best(result.target_problem()); let extracted: HashSet> = best_target diff --git a/src/variant.rs b/src/variant.rs index fcac4dc0c..fbf679ee6 100644 --- a/src/variant.rs +++ b/src/variant.rs @@ -146,6 +146,37 @@ impl_variant_param!(K3, "k", parent: KN, cast: |_| KN, k: Some(3)); impl_variant_param!(K2, "k", parent: KN, cast: |_| KN, k: Some(2)); impl_variant_param!(K1, "k", parent: KN, cast: |_| KN, k: Some(1)); +/// Declare explicit problem variants with per-variant complexity metadata. +/// +/// Each entry generates: +/// 1. A `DeclaredVariant` trait impl for compile-time checking +/// 2. A `VariantEntry` inventory submission for runtime graph building +/// +/// # Example +/// +/// ```text +/// declare_variants! { +/// MaximumIndependentSet => "2^num_vertices", +/// MaximumIndependentSet => "2^num_vertices", +/// } +/// ``` +#[macro_export] +macro_rules! declare_variants { + ($($ty:ty => $complexity:expr),+ $(,)?) => { + $( + impl $crate::traits::DeclaredVariant for $ty {} + + $crate::inventory::submit! { + $crate::registry::VariantEntry { + name: <$ty as $crate::traits::Problem>::NAME, + variant_fn: || <$ty as $crate::traits::Problem>::variant(), + complexity: $complexity, + } + } + )+ + }; +} + #[cfg(test)] #[path = "unit_tests/variant.rs"] mod tests;