From 63810fa7ac74dd426b6df270f353056309ee756c Mon Sep 17 00:00:00 2001 From: GiggleLiu Date: Fri, 27 Feb 2026 13:39:45 +0800 Subject: [PATCH 01/28] fix: show full variant values in `pred show` variants list The variants list now shows all values (e.g., MIS/SimpleGraph/i32) instead of hiding default values. This makes it clear what graph and weight types each variant uses. The diff-from-default slash notation is still used for reduction edges to keep them concise. Co-Authored-By: Claude Opus 4.6 --- problemreductions-cli/src/commands/graph.rs | 26 +++++++++++++-------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/problemreductions-cli/src/commands/graph.rs b/problemreductions-cli/src/commands/graph.rs index f80438dcc..fc3bc30b5 100644 --- a/problemreductions-cli/src/commands/graph.rs +++ b/problemreductions-cli/src/commands/graph.rs @@ -117,17 +117,12 @@ pub fn show(problem: &str, out: &OutputConfig) -> Result<()> { "\n{}\n", crate::output::fmt_section(&format!("Variants ({}):", variants.len())) )); - let default_variant = variants.first().cloned().unwrap_or_default(); for v in &variants { - let slash = variant_to_slash(v, &default_variant); - let label = if slash.is_empty() { - format!(" {}", crate::output::fmt_problem_name(&spec.name)) - } else { - format!( - " {}", - crate::output::fmt_problem_name(&format!("{}{}", spec.name, slash)) - ) - }; + let slash = variant_to_full_slash(v); + let label = format!( + " {}", + crate::output::fmt_problem_name(&format!("{}{}", spec.name, slash)) + ); text.push_str(&format!("{label}\n")); } @@ -209,6 +204,17 @@ pub fn show(problem: &str, out: &OutputConfig) -> Result<()> { out.emit_with_default_name(&default_name, &text, &json) } +/// Convert a variant BTreeMap to slash notation showing ALL values. +/// E.g., {graph: "SimpleGraph", weight: "i32"} → "/SimpleGraph/i32". +fn variant_to_full_slash(variant: &BTreeMap) -> String { + if variant.is_empty() { + String::new() + } else { + let vals: Vec<&str> = variant.values().map(|v| v.as_str()).collect(); + format!("/{}", vals.join("/")) + } +} + /// Convert a variant BTreeMap to slash notation showing only non-default values. /// Given default {graph: "SimpleGraph", weight: "i32"} and variant {graph: "UnitDiskGraph", weight: "i32"}, /// returns "/UnitDiskGraph". From 2a3fba3ecad319e1c667adf7b3b43fdd7b414ecb Mon Sep 17 00:00:00 2001 From: GiggleLiu Date: Fri, 27 Feb 2026 13:43:15 +0800 Subject: [PATCH 02/28] fix: support slash variant notation in pred create (e.g., MIS/KingsSubgraph) - Use parse_problem_spec + resolve_variant instead of resolve_alias so `pred create MIS/KingsSubgraph --graph ...` works - Resolved variant from reduction graph is used in output JSON instead of hardcoded variant maps - Simplify KColoring and KSatisfiability branches by removing redundant local variant variables Co-Authored-By: Claude Opus 4.6 --- problemreductions-cli/src/commands/create.rs | 87 ++++++++------------ 1 file changed, 36 insertions(+), 51 deletions(-) diff --git a/problemreductions-cli/src/commands/create.rs b/problemreductions-cli/src/commands/create.rs index ffc57252d..df76e80df 100644 --- a/problemreductions-cli/src/commands/create.rs +++ b/problemreductions-cli/src/commands/create.rs @@ -1,7 +1,7 @@ use crate::cli::CreateArgs; use crate::dispatch::ProblemJsonOutput; use crate::output::OutputConfig; -use crate::problem_name::resolve_alias; +use crate::problem_name::{parse_problem_spec, resolve_variant}; use anyhow::{bail, Context, Result}; use problemreductions::prelude::*; use problemreductions::registry::collect_schemas; @@ -90,17 +90,27 @@ fn print_problem_help(canonical: &str) -> Result<()> { } pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { - let canonical = resolve_alias(&args.problem); + let spec = parse_problem_spec(&args.problem)?; + let canonical = &spec.name; if args.random { - return create_random(args, &canonical, out); + return create_random(args, canonical, out); } // Show schema-driven help when no data flags are provided if all_data_flags_empty(args) { - return print_problem_help(&canonical); + return print_problem_help(canonical); } + // Resolve variant from spec (e.g., MIS/KingsSubgraph → {graph: "KingsSubgraph", weight: "i32"}) + let graph = problemreductions::rules::ReductionGraph::new(); + let known_variants = graph.variants_for(canonical); + let resolved_variant = if known_variants.is_empty() { + BTreeMap::new() + } else { + resolve_variant(&spec, &known_variants)? + }; + let (data, variant) = match canonical.as_str() { // Graph problems with vertex weights "MaximumIndependentSet" @@ -114,7 +124,6 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { ) })?; let weights = parse_vertex_weights(args, n)?; - let variant = variant_map(&[("graph", "SimpleGraph"), ("weight", "i32")]); let data = match canonical.as_str() { "MaximumIndependentSet" => ser(MaximumIndependentSet::new(graph, weights))?, "MinimumVertexCover" => ser(MinimumVertexCover::new(graph, weights))?, @@ -122,7 +131,7 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { "MinimumDominatingSet" => ser(MinimumDominatingSet::new(graph, weights))?, _ => unreachable!(), }; - (data, variant) + (data, resolved_variant.clone()) } // Graph problems with edge weights @@ -134,14 +143,13 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { ) })?; let edge_weights = parse_edge_weights(args, graph.num_edges())?; - let variant = variant_map(&[("graph", "SimpleGraph"), ("weight", "i32")]); let data = match canonical.as_str() { "MaxCut" => ser(MaxCut::new(graph, edge_weights))?, "MaximumMatching" => ser(MaximumMatching::new(graph, edge_weights))?, "TravelingSalesman" => ser(TravelingSalesman::new(graph, edge_weights))?, _ => unreachable!(), }; - (data, variant) + (data, resolved_variant.clone()) } // KColoring @@ -149,27 +157,16 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { let (graph, _) = parse_graph(args).map_err(|e| { anyhow::anyhow!("{e}\n\nUsage: pred create KColoring --graph 0-1,1-2,2-0 --k 3") })?; - let variant; - let data; - match args.k { - Some(2) => { - variant = variant_map(&[("k", "K2"), ("graph", "SimpleGraph")]); - data = ser(KColoring::::new(graph))?; - } - Some(3) => { - variant = variant_map(&[("k", "K3"), ("graph", "SimpleGraph")]); - data = ser(KColoring::::new(graph))?; - } - Some(k) => { - variant = variant_map(&[("k", "KN"), ("graph", "SimpleGraph")]); - data = ser(KColoring::::with_k(graph, k))?; - } + let data = match args.k { + Some(2) => ser(KColoring::::new(graph))?, + Some(3) => ser(KColoring::::new(graph))?, + Some(k) => ser(KColoring::::with_k(graph, k))?, None => bail!( "KColoring requires --k \n\n\ Usage: pred create KColoring --graph 0-1,1-2,2-0 --k 3" ), - } - (data, variant) + }; + (data, resolved_variant.clone()) } // SAT @@ -181,8 +178,10 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { ) })?; let clauses = parse_clauses(args)?; - let variant = BTreeMap::new(); - (ser(Satisfiability::new(num_vars, clauses))?, variant) + ( + ser(Satisfiability::new(num_vars, clauses))?, + resolved_variant.clone(), + ) } "KSatisfiability" => { let num_vars = args.num_vars.ok_or_else(|| { @@ -192,30 +191,18 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { ) })?; let clauses = parse_clauses(args)?; - let variant; - let data; - match args.k { - Some(2) => { - variant = variant_map(&[("k", "K2")]); - data = ser(KSatisfiability::::new(num_vars, clauses))?; - } - Some(3) => { - variant = variant_map(&[("k", "K3")]); - data = ser(KSatisfiability::::new(num_vars, clauses))?; - } - _ => { - variant = variant_map(&[("k", "KN")]); - data = ser(KSatisfiability::::new(num_vars, clauses))?; - } - } - (data, variant) + let data = match args.k { + Some(2) => ser(KSatisfiability::::new(num_vars, clauses))?, + Some(3) => ser(KSatisfiability::::new(num_vars, clauses))?, + _ => ser(KSatisfiability::::new(num_vars, clauses))?, + }; + (data, resolved_variant.clone()) } // QUBO "QUBO" => { let matrix = parse_matrix(args)?; - let variant = BTreeMap::new(); - (ser(QUBO::from_matrix(matrix))?, variant) + (ser(QUBO::from_matrix(matrix))?, resolved_variant.clone()) } // SpinGlass @@ -227,10 +214,9 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { })?; let couplings = parse_couplings(args, graph.num_edges())?; let fields = parse_fields(args, n)?; - let variant = variant_map(&[("graph", "SimpleGraph"), ("weight", "i32")]); ( ser(SpinGlass::from_graph(graph, couplings, fields))?, - variant, + resolved_variant.clone(), ) } @@ -246,15 +232,14 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { let n = args .n .ok_or_else(|| anyhow::anyhow!("Factoring requires --n\n\n{usage}"))?; - let variant = BTreeMap::new(); - (ser(Factoring::new(m, n, target))?, variant) + (ser(Factoring::new(m, n, target))?, resolved_variant.clone()) } _ => bail!("{}", crate::problem_name::unknown_problem_error(&canonical)), }; let output = ProblemJsonOutput { - problem_type: canonical.clone(), + problem_type: canonical.to_string(), variant, data, }; From de7db355da58fd29923eff89522832d7e9390405 Mon Sep 17 00:00:00 2001 From: GiggleLiu Date: Fri, 27 Feb 2026 14:44:30 +0800 Subject: [PATCH 03/28] cli improvement --- .gitignore | 3 +- problemreductions-cli/src/cli.rs | 47 ++- problemreductions-cli/src/commands/create.rs | 363 ++++++++++++++++--- problemreductions-cli/src/commands/graph.rs | 47 ++- problemreductions-cli/src/mcp/tools.rs | 267 +++++++++++++- problemreductions-cli/tests/cli_tests.rs | 239 ++++++++++++ src/rules/graph.rs | 3 + 7 files changed, 865 insertions(+), 104 deletions(-) diff --git a/.gitignore b/.gitignore index 19031764a..886a4b60e 100644 --- a/.gitignore +++ b/.gitignore @@ -82,4 +82,5 @@ docs/paper/examples/ # Claude Code logs claude-output.log .worktrees/ -.worktree/ \ No newline at end of file +.worktree/ +*.json diff --git a/problemreductions-cli/src/cli.rs b/problemreductions-cli/src/cli.rs index 089c977f4..1de97c436 100644 --- a/problemreductions-cli/src/cli.rs +++ b/problemreductions-cli/src/cli.rs @@ -197,27 +197,32 @@ Setup: add one line to your shell rc file: #[derive(clap::Args)] #[command(after_help = "\ -Run `pred create ` without arguments to see problem-specific parameters. +TIP: Run `pred create ` (no other flags) to see problem-specific help. + Not every flag applies to every problem — the above list shows ALL flags. -Random generation (graph-based problems only): - --random Generate a random Erdos-Renyi graph instance - --num-vertices Number of vertices [required with --random] - --edge-prob Edge probability (0.0 to 1.0) [default: 0.5] - --seed Random seed for reproducibility +Flags by problem type: + MIS, MVC, MaxClique, MinDomSet --graph, --weights + MaxCut, MaxMatching, TSP --graph, --edge-weights + SAT, 3SAT/KSAT --num-vars, --clauses [--k] + QUBO --matrix + SpinGlass --graph, --couplings, --fields + KColoring --graph, --k + Factoring --target, --m, --n + +Geometry graph variants (use slash notation, e.g., MIS/KingsSubgraph): + KingsSubgraph, TriangularSubgraph --positions (integer x,y pairs) + UnitDiskGraph --positions (float x,y pairs) [--radius] + +Random generation: + --random --num-vertices N [--edge-prob 0.5] [--seed 42] Examples: - pred create MIS --graph 0-1,1-2,2-3 -o problem.json - pred create MIS --graph 0-1,1-2 --weights 2,1,3 -o weighted.json - pred create SAT --num-vars 3 --clauses \"1,2;-1,3\" -o sat.json - pred create QUBO --matrix \"1,0.5;0.5,2\" -o qubo.json - pred create KColoring --k 3 --graph 0-1,1-2,2-0 -o kcol.json - pred create MaxCut --graph 0-1,1-2 --edge-weights 2,3 - pred create SpinGlass --graph 0-1,1-2 --couplings 1,-1 - pred create MIS --random --num-vertices 10 --edge-prob 0.3 - pred create Factoring --target 15 --m 4 --n 4 - -Output (`-o`) uses the standard problem JSON format: - {\"type\": \"...\", \"variant\": {...}, \"data\": {...}}")] + pred create MIS --graph 0-1,1-2,2-3 --weights 1,1,1 + pred create SAT --num-vars 3 --clauses \"1,2;-1,3\" + pred create QUBO --matrix \"1,0.5;0.5,2\" + pred create MIS/KingsSubgraph --positions \"0,0;1,0;1,1;0,1\" + pred create MIS/UnitDiskGraph --positions \"0,0;1,0;0.5,0.8\" --radius 1.5 + pred create MIS --random --num-vertices 10 --edge-prob 0.3")] pub struct CreateArgs { /// Problem type (e.g., MIS, QUBO, SAT) #[arg(value_parser = crate::problem_name::ProblemNameParser)] @@ -270,6 +275,12 @@ pub struct CreateArgs { /// Bits for second factor (for Factoring) #[arg(long)] pub n: Option, + /// Vertex positions for geometry-based graphs (semicolon-separated x,y pairs, e.g., "0,0;1,0;1,1") + #[arg(long)] + pub positions: Option, + /// Radius for UnitDiskGraph [default: 1.0] + #[arg(long)] + pub radius: Option, } #[derive(clap::Args)] diff --git a/problemreductions-cli/src/commands/create.rs b/problemreductions-cli/src/commands/create.rs index df76e80df..0808fd37b 100644 --- a/problemreductions-cli/src/commands/create.rs +++ b/problemreductions-cli/src/commands/create.rs @@ -5,7 +5,9 @@ use crate::problem_name::{parse_problem_spec, resolve_variant}; use anyhow::{bail, Context, Result}; use problemreductions::prelude::*; use problemreductions::registry::collect_schemas; -use problemreductions::topology::{Graph, SimpleGraph}; +use problemreductions::topology::{ + Graph, KingsSubgraph, SimpleGraph, TriangularSubgraph, UnitDiskGraph, +}; use problemreductions::variant::{K2, K3, KN}; use serde::Serialize; use std::collections::BTreeMap; @@ -27,11 +29,17 @@ fn all_data_flags_empty(args: &CreateArgs) -> bool { && args.num_vertices.is_none() && args.edge_prob.is_none() && args.seed.is_none() + && args.positions.is_none() + && args.radius.is_none() } -fn type_format_hint(type_name: &str) -> &'static str { +fn type_format_hint(type_name: &str, graph_type: Option<&str>) -> &'static str { match type_name { - "G" => "edge list: 0-1,1-2,2-3", + "G" => match graph_type { + Some("KingsSubgraph" | "TriangularSubgraph") => "integer positions: \"0,0;1,0;1,1\"", + Some("UnitDiskGraph") => "float positions: \"0.0,0.0;1.0,0.0\"", + _ => "edge list: 0-1,1-2,2-3", + }, "Vec" => "comma-separated: 1,2,3", "Vec" => "semicolon-separated clauses: \"1,2;-1,3\"", "Vec>" => "semicolon-separated rows: \"1,0.5;0.5,2\"", @@ -41,12 +49,17 @@ fn type_format_hint(type_name: &str) -> &'static str { } } -fn example_for(canonical: &str) -> &'static str { +fn example_for(canonical: &str, graph_type: Option<&str>) -> &'static str { match canonical { "MaximumIndependentSet" | "MinimumVertexCover" | "MaximumClique" - | "MinimumDominatingSet" => "--graph 0-1,1-2,2-3 --weights 1,1,1,1", + | "MinimumDominatingSet" => match graph_type { + Some("KingsSubgraph") => "--positions \"0,0;1,0;1,1;0,1\"", + Some("TriangularSubgraph") => "--positions \"0,0;0,1;1,0;1,1\"", + Some("UnitDiskGraph") => "--positions \"0,0;1,0;0.5,0.8\" --radius 1.5", + _ => "--graph 0-1,1-2,2-3 --weights 1,1,1,1", + }, "MaxCut" | "MaximumMatching" | "TravelingSalesman" => { "--graph 0-1,1-2,2-3 --edge-weights 1,1,1" } @@ -60,7 +73,11 @@ fn example_for(canonical: &str) -> &'static str { } } -fn print_problem_help(canonical: &str) -> Result<()> { +fn print_problem_help(canonical: &str, graph_type: Option<&str>) -> Result<()> { + let is_geometry = matches!( + graph_type, + Some("KingsSubgraph" | "TriangularSubgraph" | "UnitDiskGraph") + ); let schemas = collect_schemas(); let schema = schemas.iter().find(|s| s.name == canonical); @@ -68,70 +85,86 @@ fn print_problem_help(canonical: &str) -> Result<()> { eprintln!("{}\n {}\n", canonical, s.description); eprintln!("Parameters:"); for field in &s.fields { - let hint = type_format_hint(&field.type_name); - eprintln!( - " --{:<16} {} ({})", - field.name.replace('_', "-"), - field.description, - hint - ); + // For geometry variants, show --positions instead of --graph + if field.type_name == "G" && is_geometry { + let hint = type_format_hint(&field.type_name, graph_type); + eprintln!(" --{:<16} {} ({hint})", "positions", field.description); + if graph_type == Some("UnitDiskGraph") { + eprintln!(" --{:<16} Distance threshold [default: 1.0]", "radius"); + } + } else { + let hint = type_format_hint(&field.type_name, graph_type); + eprintln!( + " --{:<16} {} ({})", + field.name.replace('_', "-"), + field.description, + hint + ); + } } } else { eprintln!("{canonical}\n"); eprintln!("No schema information available."); } - let example = example_for(canonical); + let example = example_for(canonical, graph_type); if !example.is_empty() { eprintln!("\nExample:"); - eprintln!(" pred create {} {}", canonical, example); + eprintln!( + " pred create {} {}", + match graph_type { + Some(g) => format!("{canonical}/{g}"), + None => canonical.to_string(), + }, + example + ); } Ok(()) } +/// Resolve the graph type from the variant map (e.g., "KingsSubgraph", "UnitDiskGraph", or "SimpleGraph"). +fn resolved_graph_type(variant: &BTreeMap) -> &str { + variant + .get("graph") + .map(|s| s.as_str()) + .unwrap_or("SimpleGraph") +} + pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { let spec = parse_problem_spec(&args.problem)?; let canonical = &spec.name; + // Resolve variant early so random and help can use it + let rgraph = problemreductions::rules::ReductionGraph::new(); + let known_variants = rgraph.variants_for(canonical); + let resolved_variant = if known_variants.is_empty() { + BTreeMap::new() + } else { + resolve_variant(&spec, &known_variants)? + }; + let graph_type = resolved_graph_type(&resolved_variant); + if args.random { - return create_random(args, canonical, out); + return create_random(args, canonical, &resolved_variant, out); } // Show schema-driven help when no data flags are provided if all_data_flags_empty(args) { - return print_problem_help(canonical); + let gt = if graph_type != "SimpleGraph" { + Some(graph_type) + } else { + None + }; + return print_problem_help(canonical, gt); } - // Resolve variant from spec (e.g., MIS/KingsSubgraph → {graph: "KingsSubgraph", weight: "i32"}) - let graph = problemreductions::rules::ReductionGraph::new(); - let known_variants = graph.variants_for(canonical); - let resolved_variant = if known_variants.is_empty() { - BTreeMap::new() - } else { - resolve_variant(&spec, &known_variants)? - }; - let (data, variant) = match canonical.as_str() { // Graph problems with vertex weights "MaximumIndependentSet" | "MinimumVertexCover" | "MaximumClique" | "MinimumDominatingSet" => { - let (graph, n) = parse_graph(args).map_err(|e| { - anyhow::anyhow!( - "{e}\n\nUsage: pred create {} --graph 0-1,1-2,2-3 [--weights 1,1,1,1]", - args.problem - ) - })?; - let weights = parse_vertex_weights(args, n)?; - let data = match canonical.as_str() { - "MaximumIndependentSet" => ser(MaximumIndependentSet::new(graph, weights))?, - "MinimumVertexCover" => ser(MinimumVertexCover::new(graph, weights))?, - "MaximumClique" => ser(MaximumClique::new(graph, weights))?, - "MinimumDominatingSet" => ser(MinimumDominatingSet::new(graph, weights))?, - _ => unreachable!(), - }; - (data, resolved_variant.clone()) + create_vertex_weight_problem(args, canonical, graph_type, &resolved_variant)? } // Graph problems with edge weights @@ -235,7 +268,7 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { (ser(Factoring::new(m, n, target))?, resolved_variant.clone()) } - _ => bail!("{}", crate::problem_name::unknown_problem_error(&canonical)), + _ => bail!("{}", crate::problem_name::unknown_problem_error(canonical)), }; let output = ProblemJsonOutput { @@ -258,6 +291,81 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { Ok(()) } +/// Create a vertex-weight problem dispatching on geometry graph type. +fn create_vertex_weight_problem( + args: &CreateArgs, + canonical: &str, + graph_type: &str, + resolved_variant: &BTreeMap, +) -> Result<(serde_json::Value, BTreeMap)> { + match graph_type { + "KingsSubgraph" => { + let positions = parse_int_positions(args)?; + let n = positions.len(); + let graph = KingsSubgraph::new(positions); + let weights = parse_vertex_weights(args, n)?; + Ok(( + ser_vertex_weight_problem_with(canonical, graph, weights)?, + resolved_variant.clone(), + )) + } + "TriangularSubgraph" => { + let positions = parse_int_positions(args)?; + let n = positions.len(); + let graph = TriangularSubgraph::new(positions); + let weights = parse_vertex_weights(args, n)?; + Ok(( + ser_vertex_weight_problem_with(canonical, graph, weights)?, + resolved_variant.clone(), + )) + } + "UnitDiskGraph" => { + let positions = parse_float_positions(args)?; + let n = positions.len(); + let radius = args.radius.unwrap_or(1.0); + let graph = UnitDiskGraph::new(positions, radius); + let weights = parse_vertex_weights(args, n)?; + Ok(( + ser_vertex_weight_problem_with(canonical, graph, weights)?, + resolved_variant.clone(), + )) + } + _ => { + // SimpleGraph path (existing) + let (graph, n) = parse_graph(args).map_err(|e| { + anyhow::anyhow!( + "{e}\n\nUsage: pred create {} --graph 0-1,1-2,2-3 [--weights 1,1,1,1]", + args.problem + ) + })?; + let weights = parse_vertex_weights(args, n)?; + let data = match canonical { + "MaximumIndependentSet" => ser(MaximumIndependentSet::new(graph, weights))?, + "MinimumVertexCover" => ser(MinimumVertexCover::new(graph, weights))?, + "MaximumClique" => ser(MaximumClique::new(graph, weights))?, + "MinimumDominatingSet" => ser(MinimumDominatingSet::new(graph, weights))?, + _ => unreachable!(), + }; + Ok((data, resolved_variant.clone())) + } + } +} + +/// Serialize a vertex-weight problem with a generic graph type. +fn ser_vertex_weight_problem_with( + canonical: &str, + graph: G, + weights: Vec, +) -> Result { + match canonical { + "MaximumIndependentSet" => ser(MaximumIndependentSet::new(graph, weights)), + "MinimumVertexCover" => ser(MinimumVertexCover::new(graph, weights)), + "MaximumClique" => ser(MaximumClique::new(graph, weights)), + "MinimumDominatingSet" => ser(MinimumDominatingSet::new(graph, weights)), + _ => unreachable!(), + } +} + fn ser(problem: T) -> Result { Ok(serde_json::to_value(problem)?) } @@ -299,6 +407,50 @@ fn parse_graph(args: &CreateArgs) -> Result<(SimpleGraph, usize)> { Ok((SimpleGraph::new(num_vertices, edges), num_vertices)) } +/// Parse semicolon-separated x,y pairs from a string. +fn parse_positions(pos_str: &str, example: &str) -> Result> +where + T::Err: std::fmt::Display, +{ + pos_str + .split(';') + .map(|pair| { + let parts: Vec<&str> = pair.trim().split(',').collect(); + if parts.len() != 2 { + bail!( + "Invalid position '{}': expected format x,y (e.g., {example})", + pair.trim() + ); + } + let x: T = parts[0] + .trim() + .parse() + .map_err(|e| anyhow::anyhow!("Invalid x in '{}': {e}", pair.trim()))?; + let y: T = parts[1] + .trim() + .parse() + .map_err(|e| anyhow::anyhow!("Invalid y in '{}': {e}", pair.trim()))?; + Ok((x, y)) + }) + .collect() +} + +/// Parse `--positions` as integer grid positions. +fn parse_int_positions(args: &CreateArgs) -> Result> { + let pos_str = args.positions.as_deref().ok_or_else(|| { + anyhow::anyhow!("This variant requires --positions (e.g., \"0,0;1,0;1,1\")") + })?; + parse_positions(pos_str, "0,0") +} + +/// Parse `--positions` as float positions. +fn parse_float_positions(args: &CreateArgs) -> Result> { + let pos_str = args.positions.as_deref().ok_or_else(|| { + anyhow::anyhow!("This variant requires --positions (e.g., \"0.0,0.0;1.0,0.0;0.5,0.87\")") + })?; + parse_positions(pos_str, "0.0,0.0") +} + /// Parse `--weights` as vertex weights (i32), defaulting to all 1s. fn parse_vertex_weights(args: &CreateArgs, num_vertices: usize) -> Result> { match &args.weights { @@ -445,8 +597,57 @@ fn create_random_graph(num_vertices: usize, edge_prob: f64, seed: Option) - SimpleGraph::new(num_vertices, edges) } +/// LCG PRNG step — returns next state and a uniform f64 in [0, 1). +fn lcg_step(state: &mut u64) -> f64 { + *state = state + .wrapping_mul(6364136223846793005) + .wrapping_add(1442695040888963407); + (*state >> 33) as f64 / (1u64 << 31) as f64 +} + +/// Initialize LCG state from seed or system time. +fn lcg_init(seed: Option) -> u64 { + seed.unwrap_or_else(|| { + std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_nanos() as u64 + }) +} + +/// Generate random unique integer positions on a grid for KingsSubgraph/TriangularSubgraph. +fn create_random_int_positions(num_vertices: usize, seed: Option) -> Vec<(i32, i32)> { + let mut state = lcg_init(seed); + let grid_size = (num_vertices as f64).sqrt().ceil() as i32 + 1; + let mut positions = std::collections::BTreeSet::new(); + while positions.len() < num_vertices { + let x = (lcg_step(&mut state) * grid_size as f64) as i32; + let y = (lcg_step(&mut state) * grid_size as f64) as i32; + positions.insert((x, y)); + } + positions.into_iter().collect() +} + +/// Generate random float positions in [0, sqrt(N)] x [0, sqrt(N)] for UnitDiskGraph. +fn create_random_float_positions(num_vertices: usize, seed: Option) -> Vec<(f64, f64)> { + let mut state = lcg_init(seed); + let side = (num_vertices as f64).sqrt(); + (0..num_vertices) + .map(|_| { + let x = lcg_step(&mut state) * side; + let y = lcg_step(&mut state) * side; + (x, y) + }) + .collect() +} + /// Handle `pred create --random ...` -fn create_random(args: &CreateArgs, canonical: &str, out: &OutputConfig) -> Result<()> { +fn create_random( + args: &CreateArgs, + canonical: &str, + resolved_variant: &BTreeMap, + out: &OutputConfig, +) -> Result<()> { let num_vertices = args.num_vertices.ok_or_else(|| { anyhow::anyhow!( "--random requires --num-vertices\n\n\ @@ -454,13 +655,8 @@ fn create_random(args: &CreateArgs, canonical: &str, out: &OutputConfig) -> Resu args.problem ) })?; - let edge_prob = args.edge_prob.unwrap_or(0.5); - if !(0.0..=1.0).contains(&edge_prob) { - bail!("--edge-prob must be between 0.0 and 1.0"); - } - let graph = create_random_graph(num_vertices, edge_prob, args.seed); - let num_edges = graph.num_edges(); + let graph_type = resolved_graph_type(resolved_variant); let (data, variant) = match canonical { // Graph problems with vertex weights @@ -469,19 +665,59 @@ fn create_random(args: &CreateArgs, canonical: &str, out: &OutputConfig) -> Resu | "MaximumClique" | "MinimumDominatingSet" => { let weights = vec![1i32; num_vertices]; - let variant = variant_map(&[("graph", "SimpleGraph"), ("weight", "i32")]); - let data = match canonical { - "MaximumIndependentSet" => ser(MaximumIndependentSet::new(graph, weights))?, - "MinimumVertexCover" => ser(MinimumVertexCover::new(graph, weights))?, - "MaximumClique" => ser(MaximumClique::new(graph, weights))?, - "MinimumDominatingSet" => ser(MinimumDominatingSet::new(graph, weights))?, - _ => unreachable!(), - }; - (data, variant) + match graph_type { + "KingsSubgraph" => { + let positions = create_random_int_positions(num_vertices, args.seed); + let graph = KingsSubgraph::new(positions); + ( + ser_vertex_weight_problem_with(canonical, graph, weights)?, + resolved_variant.clone(), + ) + } + "TriangularSubgraph" => { + let positions = create_random_int_positions(num_vertices, args.seed); + let graph = TriangularSubgraph::new(positions); + ( + ser_vertex_weight_problem_with(canonical, graph, weights)?, + resolved_variant.clone(), + ) + } + "UnitDiskGraph" => { + let radius = args.radius.unwrap_or(1.0); + let positions = create_random_float_positions(num_vertices, args.seed); + let graph = UnitDiskGraph::new(positions, radius); + ( + ser_vertex_weight_problem_with(canonical, graph, weights)?, + resolved_variant.clone(), + ) + } + _ => { + let edge_prob = args.edge_prob.unwrap_or(0.5); + if !(0.0..=1.0).contains(&edge_prob) { + bail!("--edge-prob must be between 0.0 and 1.0"); + } + let graph = create_random_graph(num_vertices, edge_prob, args.seed); + let variant = variant_map(&[("graph", "SimpleGraph"), ("weight", "i32")]); + let data = match canonical { + "MaximumIndependentSet" => ser(MaximumIndependentSet::new(graph, weights))?, + "MinimumVertexCover" => ser(MinimumVertexCover::new(graph, weights))?, + "MaximumClique" => ser(MaximumClique::new(graph, weights))?, + "MinimumDominatingSet" => ser(MinimumDominatingSet::new(graph, weights))?, + _ => unreachable!(), + }; + (data, variant) + } + } } // Graph problems with edge weights "MaxCut" | "MaximumMatching" | "TravelingSalesman" => { + let edge_prob = args.edge_prob.unwrap_or(0.5); + if !(0.0..=1.0).contains(&edge_prob) { + bail!("--edge-prob must be between 0.0 and 1.0"); + } + let graph = create_random_graph(num_vertices, edge_prob, args.seed); + let num_edges = graph.num_edges(); let edge_weights = vec![1i32; num_edges]; let variant = variant_map(&[("graph", "SimpleGraph"), ("weight", "i32")]); let data = match canonical { @@ -495,6 +731,12 @@ fn create_random(args: &CreateArgs, canonical: &str, out: &OutputConfig) -> Resu // SpinGlass "SpinGlass" => { + let edge_prob = args.edge_prob.unwrap_or(0.5); + if !(0.0..=1.0).contains(&edge_prob) { + bail!("--edge-prob must be between 0.0 and 1.0"); + } + let graph = create_random_graph(num_vertices, edge_prob, args.seed); + let num_edges = graph.num_edges(); let couplings = vec![1i32; num_edges]; let fields = vec![0i32; num_vertices]; let variant = variant_map(&[("graph", "SimpleGraph"), ("weight", "i32")]); @@ -506,6 +748,11 @@ fn create_random(args: &CreateArgs, canonical: &str, out: &OutputConfig) -> Resu // KColoring "KColoring" => { + let edge_prob = args.edge_prob.unwrap_or(0.5); + if !(0.0..=1.0).contains(&edge_prob) { + bail!("--edge-prob must be between 0.0 and 1.0"); + } + let graph = create_random_graph(num_vertices, edge_prob, args.seed); let k = args.k.unwrap_or(3); let variant; let data; diff --git a/problemreductions-cli/src/commands/graph.rs b/problemreductions-cli/src/commands/graph.rs index fc3bc30b5..da05d4cd4 100644 --- a/problemreductions-cli/src/commands/graph.rs +++ b/problemreductions-cli/src/commands/graph.rs @@ -163,11 +163,21 @@ pub fn show(problem: &str, out: &OutputConfig) -> Result<()> { )); for e in &outgoing { text.push_str(&format!( - " {} {} {}\n", + " {} {} {}", fmt_node(&graph, e.source_name, &e.source_variant), crate::output::fmt_outgoing("\u{2192}"), fmt_node(&graph, e.target_name, &e.target_variant), )); + let oh_parts: Vec = e + .overhead + .output_size + .iter() + .map(|(field, poly)| format!("{field} = {poly}")) + .collect(); + if !oh_parts.is_empty() { + text.push_str(&format!(" ({})", oh_parts.join(", "))); + } + text.push('\n'); } text.push_str(&format!( @@ -176,23 +186,44 @@ pub fn show(problem: &str, out: &OutputConfig) -> Result<()> { )); for e in &incoming { text.push_str(&format!( - " {} {} {}\n", + " {} {} {}", fmt_node(&graph, e.source_name, &e.source_variant), crate::output::fmt_outgoing("\u{2192}"), fmt_node(&graph, e.target_name, &e.target_variant), )); + let oh_parts: Vec = e + .overhead + .output_size + .iter() + .map(|(field, poly)| format!("{field} = {poly}")) + .collect(); + if !oh_parts.is_empty() { + text.push_str(&format!(" ({})", oh_parts.join(", "))); + } + text.push('\n'); } + let edge_to_json = |e: &problemreductions::rules::ReductionEdgeInfo| { + let overhead: Vec = e + .overhead + .output_size + .iter() + .map(|(field, poly)| { + serde_json::json!({"field": field, "formula": poly.to_string()}) + }) + .collect(); + serde_json::json!({ + "source": {"name": e.source_name, "variant": e.source_variant}, + "target": {"name": e.target_name, "variant": e.target_variant}, + "overhead": overhead, + }) + }; let mut json = serde_json::json!({ "name": spec.name, "variants": variants, "size_fields": size_fields, - "reduces_to": outgoing.iter().map(|e| { - serde_json::json!({"source": {"name": e.source_name, "variant": e.source_variant}, "target": {"name": e.target_name, "variant": e.target_variant}}) - }).collect::>(), - "reduces_from": incoming.iter().map(|e| { - serde_json::json!({"source": {"name": e.source_name, "variant": e.source_variant}, "target": {"name": e.target_name, "variant": e.target_variant}}) - }).collect::>(), + "reduces_to": outgoing.iter().map(&edge_to_json).collect::>(), + "reduces_from": incoming.iter().map(&edge_to_json).collect::>(), }); if let Some(s) = schema { if let (Some(obj), Ok(schema_val)) = (json.as_object_mut(), serde_json::to_value(s)) { diff --git a/problemreductions-cli/src/mcp/tools.rs b/problemreductions-cli/src/mcp/tools.rs index d1104d110..19f34c571 100644 --- a/problemreductions-cli/src/mcp/tools.rs +++ b/problemreductions-cli/src/mcp/tools.rs @@ -9,7 +9,9 @@ use problemreductions::registry::collect_schemas; use problemreductions::rules::{ CustomCost, MinimizeSteps, ReductionGraph, ReductionPath, TraversalDirection, }; -use problemreductions::topology::{Graph, SimpleGraph}; +use problemreductions::topology::{ + Graph, KingsSubgraph, SimpleGraph, TriangularSubgraph, UnitDiskGraph, +}; use problemreductions::types::ProblemSize; use problemreductions::variant::{K2, K3, KN}; use rmcp::handler::server::router::tool::ToolRouter; @@ -22,7 +24,7 @@ use crate::dispatch::{ load_problem, serialize_any_problem, PathStep, ProblemJson, ProblemJsonOutput, ReductionBundle, }; use crate::problem_name::{ - aliases_for, parse_problem_spec, resolve_alias, resolve_variant, unknown_problem_error, + aliases_for, parse_problem_spec, resolve_variant, unknown_problem_error, }; // --------------------------------------------------------------------------- @@ -68,7 +70,7 @@ pub struct CreateProblemParams { )] pub problem_type: String, #[schemars( - description = "Problem parameters as JSON object. Graph problems: {\"edges\": \"0-1,1-2\", \"weights\": \"1,2,3\"}. SAT: {\"num_vars\": 3, \"clauses\": \"1,2;-1,3\"}. QUBO: {\"matrix\": \"1,0.5;0.5,2\"}. KColoring: {\"edges\": \"0-1,1-2\", \"k\": 3}. Factoring: {\"target\": 15, \"bits_m\": 4, \"bits_n\": 4}. Random graph: {\"random\": true, \"num_vertices\": 10, \"edge_prob\": 0.3}" + description = "Problem parameters as JSON object. Graph problems: {\"edges\": \"0-1,1-2\", \"weights\": \"1,2,3\"}. SAT: {\"num_vars\": 3, \"clauses\": \"1,2;-1,3\"}. QUBO: {\"matrix\": \"1,0.5;0.5,2\"}. KColoring: {\"edges\": \"0-1,1-2\", \"k\": 3}. Factoring: {\"target\": 15, \"bits_m\": 4, \"bits_n\": 4}. Random graph: {\"random\": true, \"num_vertices\": 10, \"edge_prob\": 0.3}. Geometry graphs (use with MIS/KingsSubgraph etc.): {\"positions\": \"0,0;1,0;1,1\"}. UnitDiskGraph: {\"positions\": \"0.0,0.0;1.0,0.0\", \"radius\": 1.5}" )] pub params: serde_json::Value, } @@ -382,7 +384,21 @@ impl McpServer { problem_type: &str, params: &serde_json::Value, ) -> anyhow::Result { - let canonical = resolve_alias(problem_type); + let spec = parse_problem_spec(problem_type)?; + let canonical = spec.name.clone(); + + // Resolve variant from spec + let rgraph = ReductionGraph::new(); + let known_variants = rgraph.variants_for(&canonical); + let resolved_variant = if known_variants.is_empty() { + BTreeMap::new() + } else { + resolve_variant(&spec, &known_variants)? + }; + let graph_type = resolved_variant + .get("graph") + .map(|s| s.as_str()) + .unwrap_or("SimpleGraph"); // Check for random generation let is_random = params @@ -391,7 +407,7 @@ impl McpServer { .unwrap_or(false); if is_random { - return self.create_random_inner(&canonical, params); + return self.create_random_inner(&canonical, &resolved_variant, params); } let (data, variant) = match canonical.as_str() { @@ -399,9 +415,7 @@ impl McpServer { | "MinimumVertexCover" | "MaximumClique" | "MinimumDominatingSet" => { - let (graph, n) = parse_graph_from_params(params)?; - let weights = parse_vertex_weights_from_params(params, n)?; - ser_vertex_weight_problem(&canonical, graph, weights)? + create_vertex_weight_from_params(&canonical, graph_type, &resolved_variant, params)? } "MaxCut" | "MaximumMatching" | "TravelingSalesman" => { @@ -513,6 +527,7 @@ impl McpServer { fn create_random_inner( &self, canonical: &str, + resolved_variant: &BTreeMap, params: &serde_json::Value, ) -> anyhow::Result { let num_vertices = params @@ -522,17 +537,11 @@ impl McpServer { .ok_or_else(|| { anyhow::anyhow!("Random generation requires 'num_vertices' parameter") })?; - let edge_prob = params - .get("edge_prob") - .and_then(|v| v.as_f64()) - .unwrap_or(0.5); - if !(0.0..=1.0).contains(&edge_prob) { - anyhow::bail!("edge_prob must be between 0.0 and 1.0"); - } let seed = params.get("seed").and_then(|v| v.as_u64()); - - let graph = create_random_graph(num_vertices, edge_prob, seed); - let num_edges = graph.num_edges(); + let graph_type = resolved_variant + .get("graph") + .map(|s| s.as_str()) + .unwrap_or("SimpleGraph"); let (data, variant) = match canonical { "MaximumIndependentSet" @@ -540,13 +549,68 @@ impl McpServer { | "MaximumClique" | "MinimumDominatingSet" => { let weights = vec![1i32; num_vertices]; - ser_vertex_weight_problem(canonical, graph, weights)? + match graph_type { + "KingsSubgraph" => { + let positions = create_random_int_positions(num_vertices, seed); + let graph = KingsSubgraph::new(positions); + ( + ser_vertex_weight_problem_generic(canonical, graph, weights)?, + resolved_variant.clone(), + ) + } + "TriangularSubgraph" => { + let positions = create_random_int_positions(num_vertices, seed); + let graph = TriangularSubgraph::new(positions); + ( + ser_vertex_weight_problem_generic(canonical, graph, weights)?, + resolved_variant.clone(), + ) + } + "UnitDiskGraph" => { + let radius = params.get("radius").and_then(|v| v.as_f64()).unwrap_or(1.0); + let positions = create_random_float_positions(num_vertices, seed); + let graph = UnitDiskGraph::new(positions, radius); + ( + ser_vertex_weight_problem_generic(canonical, graph, weights)?, + resolved_variant.clone(), + ) + } + _ => { + let edge_prob = params + .get("edge_prob") + .and_then(|v| v.as_f64()) + .unwrap_or(0.5); + if !(0.0..=1.0).contains(&edge_prob) { + anyhow::bail!("edge_prob must be between 0.0 and 1.0"); + } + let graph = create_random_graph(num_vertices, edge_prob, seed); + ser_vertex_weight_problem(canonical, graph, weights)? + } + } } "MaxCut" | "MaximumMatching" | "TravelingSalesman" => { + let edge_prob = params + .get("edge_prob") + .and_then(|v| v.as_f64()) + .unwrap_or(0.5); + if !(0.0..=1.0).contains(&edge_prob) { + anyhow::bail!("edge_prob must be between 0.0 and 1.0"); + } + let graph = create_random_graph(num_vertices, edge_prob, seed); + let num_edges = graph.num_edges(); let edge_weights = vec![1i32; num_edges]; ser_edge_weight_problem(canonical, graph, edge_weights)? } "SpinGlass" => { + let edge_prob = params + .get("edge_prob") + .and_then(|v| v.as_f64()) + .unwrap_or(0.5); + if !(0.0..=1.0).contains(&edge_prob) { + anyhow::bail!("edge_prob must be between 0.0 and 1.0"); + } + let graph = create_random_graph(num_vertices, edge_prob, seed); + let num_edges = graph.num_edges(); let couplings = vec![1i32; num_edges]; let fields = vec![0i32; num_vertices]; let variant = variant_map(&[("graph", "SimpleGraph"), ("weight", "i32")]); @@ -556,6 +620,14 @@ impl McpServer { ) } "KColoring" => { + let edge_prob = params + .get("edge_prob") + .and_then(|v| v.as_f64()) + .unwrap_or(0.5); + if !(0.0..=1.0).contains(&edge_prob) { + anyhow::bail!("edge_prob must be between 0.0 and 1.0"); + } + let graph = create_random_graph(num_vertices, edge_prob, seed); let k = params .get("k") .and_then(|v| v.as_u64()) @@ -1071,6 +1143,163 @@ fn ser_kcoloring( } } +/// Serialize a vertex-weight problem with a generic graph type. +fn ser_vertex_weight_problem_generic( + canonical: &str, + graph: G, + weights: Vec, +) -> anyhow::Result { + match canonical { + "MaximumIndependentSet" => ser(MaximumIndependentSet::new(graph, weights)), + "MinimumVertexCover" => ser(MinimumVertexCover::new(graph, weights)), + "MaximumClique" => ser(MaximumClique::new(graph, weights)), + "MinimumDominatingSet" => ser(MinimumDominatingSet::new(graph, weights)), + _ => unreachable!(), + } +} + +/// Create a vertex-weight problem from MCP params, dispatching on graph type. +fn create_vertex_weight_from_params( + canonical: &str, + graph_type: &str, + resolved_variant: &BTreeMap, + params: &serde_json::Value, +) -> anyhow::Result<(serde_json::Value, BTreeMap)> { + match graph_type { + "KingsSubgraph" => { + let positions = parse_int_positions_from_params(params)?; + let n = positions.len(); + let graph = KingsSubgraph::new(positions); + let weights = parse_vertex_weights_from_params(params, n)?; + Ok(( + ser_vertex_weight_problem_generic(canonical, graph, weights)?, + resolved_variant.clone(), + )) + } + "TriangularSubgraph" => { + let positions = parse_int_positions_from_params(params)?; + let n = positions.len(); + let graph = TriangularSubgraph::new(positions); + let weights = parse_vertex_weights_from_params(params, n)?; + Ok(( + ser_vertex_weight_problem_generic(canonical, graph, weights)?, + resolved_variant.clone(), + )) + } + "UnitDiskGraph" => { + let positions = parse_float_positions_from_params(params)?; + let n = positions.len(); + let radius = params.get("radius").and_then(|v| v.as_f64()).unwrap_or(1.0); + let graph = UnitDiskGraph::new(positions, radius); + let weights = parse_vertex_weights_from_params(params, n)?; + Ok(( + ser_vertex_weight_problem_generic(canonical, graph, weights)?, + resolved_variant.clone(), + )) + } + _ => { + let (graph, n) = parse_graph_from_params(params)?; + let weights = parse_vertex_weights_from_params(params, n)?; + ser_vertex_weight_problem(canonical, graph, weights) + } + } +} + +/// Parse semicolon-separated x,y pairs from a string. +fn parse_positions(pos_str: &str) -> anyhow::Result> +where + T::Err: std::fmt::Display, +{ + pos_str + .split(';') + .map(|pair| { + let parts: Vec<&str> = pair.trim().split(',').collect(); + if parts.len() != 2 { + anyhow::bail!("Invalid position '{}': expected format x,y", pair.trim()); + } + let x: T = parts[0] + .trim() + .parse() + .map_err(|e| anyhow::anyhow!("Invalid x in '{}': {e}", pair.trim()))?; + let y: T = parts[1] + .trim() + .parse() + .map_err(|e| anyhow::anyhow!("Invalid y in '{}': {e}", pair.trim()))?; + Ok((x, y)) + }) + .collect() +} + +/// Extract and parse 'positions' param as integer grid positions. +fn parse_int_positions_from_params(params: &serde_json::Value) -> anyhow::Result> { + let pos_str = params + .get("positions") + .and_then(|v| v.as_str()) + .ok_or_else(|| { + anyhow::anyhow!("This variant requires 'positions' parameter (e.g., \"0,0;1,0;1,1\")") + })?; + parse_positions(pos_str) +} + +/// Extract and parse 'positions' param as float positions. +fn parse_float_positions_from_params( + params: &serde_json::Value, +) -> anyhow::Result> { + let pos_str = params + .get("positions") + .and_then(|v| v.as_str()) + .ok_or_else(|| { + anyhow::anyhow!( + "This variant requires 'positions' parameter (e.g., \"0.0,0.0;1.0,0.0\")" + ) + })?; + parse_positions(pos_str) +} + +/// Generate random unique integer positions on a grid. +fn create_random_int_positions(num_vertices: usize, seed: Option) -> Vec<(i32, i32)> { + let mut state = lcg_init(seed); + let grid_size = (num_vertices as f64).sqrt().ceil() as i32 + 1; + let mut positions = std::collections::BTreeSet::new(); + while positions.len() < num_vertices { + let x = (lcg_step(&mut state) * grid_size as f64) as i32; + let y = (lcg_step(&mut state) * grid_size as f64) as i32; + positions.insert((x, y)); + } + positions.into_iter().collect() +} + +/// Generate random float positions in [0, sqrt(N)] x [0, sqrt(N)]. +fn create_random_float_positions(num_vertices: usize, seed: Option) -> Vec<(f64, f64)> { + let mut state = lcg_init(seed); + let side = (num_vertices as f64).sqrt(); + (0..num_vertices) + .map(|_| { + let x = lcg_step(&mut state) * side; + let y = lcg_step(&mut state) * side; + (x, y) + }) + .collect() +} + +/// LCG PRNG step. +fn lcg_step(state: &mut u64) -> f64 { + *state = state + .wrapping_mul(6364136223846793005) + .wrapping_add(1442695040888963407); + (*state >> 33) as f64 / (1u64 << 31) as f64 +} + +/// Initialize LCG state. +fn lcg_init(seed: Option) -> u64 { + seed.unwrap_or_else(|| { + std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_nanos() as u64 + }) +} + /// Parse `edges` field from JSON params into a SimpleGraph. fn parse_graph_from_params(params: &serde_json::Value) -> anyhow::Result<(SimpleGraph, usize)> { let edges_str = params diff --git a/problemreductions-cli/tests/cli_tests.rs b/problemreductions-cli/tests/cli_tests.rs index dd0395cf1..6434bd4d1 100644 --- a/problemreductions-cli/tests/cli_tests.rs +++ b/problemreductions-cli/tests/cli_tests.rs @@ -2552,3 +2552,242 @@ fn test_solve_timeout_zero_means_no_limit() { std::fs::remove_file(&problem_file).ok(); } + +// --------------------------------------------------------------------------- +// Geometry-based graph tests +// --------------------------------------------------------------------------- + +#[test] +fn test_create_mis_kings_subgraph() { + let output = pred() + .args([ + "create", + "MIS/KingsSubgraph", + "--positions", + "0,0;1,0;1,1;0,1", + ]) + .output() + .unwrap(); + assert!( + output.status.success(), + "stderr: {}", + String::from_utf8_lossy(&output.stderr) + ); + let stdout = String::from_utf8(output.stdout).unwrap(); + let json: serde_json::Value = serde_json::from_str(&stdout).unwrap(); + assert_eq!(json["type"], "MaximumIndependentSet"); + assert_eq!(json["variant"]["graph"], "KingsSubgraph"); + assert!(json["data"].is_object()); +} + +#[test] +fn test_create_mis_triangular_subgraph() { + let output = pred() + .args([ + "create", + "MIS/TriangularSubgraph", + "--positions", + "0,0;0,1;1,0;1,1", + ]) + .output() + .unwrap(); + assert!( + output.status.success(), + "stderr: {}", + String::from_utf8_lossy(&output.stderr) + ); + let stdout = String::from_utf8(output.stdout).unwrap(); + let json: serde_json::Value = serde_json::from_str(&stdout).unwrap(); + assert_eq!(json["type"], "MaximumIndependentSet"); + assert_eq!(json["variant"]["graph"], "TriangularSubgraph"); +} + +#[test] +fn test_create_mis_unit_disk_graph() { + let output = pred() + .args([ + "create", + "MIS/UnitDiskGraph", + "--positions", + "0,0;1,0;0.5,0.8", + "--radius", + "1.5", + ]) + .output() + .unwrap(); + assert!( + output.status.success(), + "stderr: {}", + String::from_utf8_lossy(&output.stderr) + ); + let stdout = String::from_utf8(output.stdout).unwrap(); + let json: serde_json::Value = serde_json::from_str(&stdout).unwrap(); + assert_eq!(json["type"], "MaximumIndependentSet"); + assert_eq!(json["variant"]["graph"], "UnitDiskGraph"); +} + +#[test] +fn test_create_mvc_kings_subgraph_unsupported_variant() { + // MVC doesn't have a KingsSubgraph variant registered + let output = pred() + .args(["create", "MVC/KingsSubgraph", "--positions", "0,0;1,0;1,1"]) + .output() + .unwrap(); + assert!(!output.status.success()); + let stderr = String::from_utf8(output.stderr).unwrap(); + assert!( + stderr.contains("No variant"), + "should mention variant mismatch: {stderr}" + ); +} + +#[test] +fn test_create_mis_unit_disk_graph_default_radius() { + let output = pred() + .args([ + "create", + "MIS/UnitDiskGraph", + "--positions", + "0,0;0.5,0;1,0", + ]) + .output() + .unwrap(); + assert!( + output.status.success(), + "stderr: {}", + String::from_utf8_lossy(&output.stderr) + ); + let stdout = String::from_utf8(output.stdout).unwrap(); + let json: serde_json::Value = serde_json::from_str(&stdout).unwrap(); + assert_eq!(json["type"], "MaximumIndependentSet"); + assert_eq!(json["variant"]["graph"], "UnitDiskGraph"); +} + +#[test] +fn test_create_mis_kings_subgraph_with_weights() { + let output = pred() + .args([ + "create", + "MIS/KingsSubgraph", + "--positions", + "0,0;1,0;1,1", + "--weights", + "2,3,1", + ]) + .output() + .unwrap(); + assert!( + output.status.success(), + "stderr: {}", + String::from_utf8_lossy(&output.stderr) + ); + let stdout = String::from_utf8(output.stdout).unwrap(); + let json: serde_json::Value = serde_json::from_str(&stdout).unwrap(); + assert_eq!(json["type"], "MaximumIndependentSet"); + assert_eq!(json["variant"]["graph"], "KingsSubgraph"); +} + +#[test] +fn test_create_random_kings_subgraph() { + let output = pred() + .args([ + "create", + "MIS/KingsSubgraph", + "--random", + "--num-vertices", + "10", + "--seed", + "42", + ]) + .output() + .unwrap(); + assert!( + output.status.success(), + "stderr: {}", + String::from_utf8_lossy(&output.stderr) + ); + let stdout = String::from_utf8(output.stdout).unwrap(); + let json: serde_json::Value = serde_json::from_str(&stdout).unwrap(); + assert_eq!(json["type"], "MaximumIndependentSet"); + assert_eq!(json["variant"]["graph"], "KingsSubgraph"); +} + +#[test] +fn test_create_random_triangular_subgraph() { + let output = pred() + .args([ + "create", + "MIS/TriangularSubgraph", + "--random", + "--num-vertices", + "8", + "--seed", + "42", + ]) + .output() + .unwrap(); + assert!( + output.status.success(), + "stderr: {}", + String::from_utf8_lossy(&output.stderr) + ); + let stdout = String::from_utf8(output.stdout).unwrap(); + let json: serde_json::Value = serde_json::from_str(&stdout).unwrap(); + assert_eq!(json["type"], "MaximumIndependentSet"); + assert_eq!(json["variant"]["graph"], "TriangularSubgraph"); +} + +#[test] +fn test_create_random_unit_disk_graph() { + let output = pred() + .args([ + "create", + "MIS/UnitDiskGraph", + "--random", + "--num-vertices", + "10", + "--radius", + "1.5", + "--seed", + "42", + ]) + .output() + .unwrap(); + assert!( + output.status.success(), + "stderr: {}", + String::from_utf8_lossy(&output.stderr) + ); + let stdout = String::from_utf8(output.stdout).unwrap(); + let json: serde_json::Value = serde_json::from_str(&stdout).unwrap(); + assert_eq!(json["type"], "MaximumIndependentSet"); + assert_eq!(json["variant"]["graph"], "UnitDiskGraph"); +} + +#[test] +fn test_create_kings_subgraph_help() { + let output = pred() + .args(["create", "MIS/KingsSubgraph"]) + .output() + .unwrap(); + assert!(output.status.success()); + let stderr = String::from_utf8(output.stderr).unwrap(); + assert!( + stderr.contains("positions") || stderr.contains("MaximumIndependentSet"), + "stderr should show help: {stderr}" + ); +} + +#[test] +fn test_create_geometry_graph_missing_positions() { + let output = pred() + .args(["create", "MIS/KingsSubgraph", "--weights", "1,2,3"]) + .output() + .unwrap(); + assert!(!output.status.success()); + let stderr = String::from_utf8(output.stderr).unwrap(); + assert!( + stderr.contains("--positions"), + "should mention --positions: {stderr}" + ); +} diff --git a/src/rules/graph.rs b/src/rules/graph.rs index 75c1499f6..f0f23b355 100644 --- a/src/rules/graph.rs +++ b/src/rules/graph.rs @@ -29,6 +29,7 @@ pub struct ReductionEdgeInfo { pub source_variant: BTreeMap, pub target_name: &'static str, pub target_variant: BTreeMap, + pub overhead: ReductionOverhead, } /// Internal edge data combining overhead and executable reduce function. @@ -616,6 +617,7 @@ impl ReductionGraph { source_variant: src.variant.clone(), target_name: dst.name, target_variant: dst.variant.clone(), + overhead: self.graph[e.id()].overhead.clone(), } }) .collect() @@ -662,6 +664,7 @@ impl ReductionGraph { source_variant: src.variant.clone(), target_name: dst.name, target_variant: dst.variant.clone(), + overhead: self.graph[e.id()].overhead.clone(), } }) .collect() From fba08b4768c6c7be4c1ead76cc3ab1c22661bae6 Mon Sep 17 00:00:00 2001 From: GiggleLiu Date: Fri, 27 Feb 2026 14:44:41 +0800 Subject: [PATCH 04/28] feat: add DeclaredVariant marker trait Co-Authored-By: Claude Opus 4.6 --- src/traits.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/traits.rs b/src/traits.rs index 635718c0c..b4f38dcba 100644 --- a/src/traits.rs +++ b/src/traits.rs @@ -42,6 +42,13 @@ pub trait OptimizationProblem: Problem {} +/// Marker trait for explicitly declared problem variants. +/// +/// Implemented automatically by [`declare_variants!`] for each concrete type. +/// The [`#[reduction]`] proc macro checks this trait at compile time to ensure +/// all reduction source/target types have been declared. +pub trait DeclaredVariant {} + #[cfg(test)] #[path = "unit_tests/traits.rs"] mod tests; From 227ab672276f82955e5a0ccf43ec3a10d1ff59af Mon Sep 17 00:00:00 2001 From: GiggleLiu Date: Fri, 27 Feb 2026 14:45:38 +0800 Subject: [PATCH 05/28] feat: add VariantEntry inventory struct Co-Authored-By: Claude Opus 4.6 --- src/registry/mod.rs | 2 ++ src/registry/variant.rs | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 35 insertions(+) create mode 100644 src/registry/variant.rs diff --git a/src/registry/mod.rs b/src/registry/mod.rs index f14a02901..e7bad24e4 100644 --- a/src/registry/mod.rs +++ b/src/registry/mod.rs @@ -46,6 +46,8 @@ mod info; mod schema; +pub mod variant; pub use info::{ComplexityClass, FieldInfo, ProblemInfo, ProblemMetadata}; pub use schema::{collect_schemas, FieldInfoJson, ProblemSchemaEntry, ProblemSchemaJson}; +pub use variant::VariantEntry; diff --git a/src/registry/variant.rs b/src/registry/variant.rs new file mode 100644 index 000000000..d73a65e8c --- /dev/null +++ b/src/registry/variant.rs @@ -0,0 +1,33 @@ +//! Explicit variant registration via inventory. + +/// A registered problem variant entry. +/// +/// Submitted by [`declare_variants!`] for each concrete problem type. +/// The reduction graph uses these entries to build nodes with complexity metadata. +pub struct VariantEntry { + /// Problem name (from `Problem::NAME`). + pub name: &'static str, + /// Function returning variant key-value pairs (from `Problem::variant()`). + pub variant_fn: fn() -> Vec<(&'static str, &'static str)>, + /// Worst-case time complexity expression (e.g., `"2^num_vertices"`). + pub complexity: &'static str, +} + +impl VariantEntry { + /// Get the variant by calling the function. + pub fn variant(&self) -> Vec<(&'static str, &'static str)> { + (self.variant_fn)() + } +} + +impl std::fmt::Debug for VariantEntry { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("VariantEntry") + .field("name", &self.name) + .field("variant", &self.variant()) + .field("complexity", &self.complexity) + .finish() + } +} + +inventory::collect!(VariantEntry); From a1c302f021a36ff9022fc4f2ea4e36c5c11ee545 Mon Sep 17 00:00:00 2001 From: GiggleLiu Date: Fri, 27 Feb 2026 14:47:18 +0800 Subject: [PATCH 06/28] feat: add declare_variants! macro Co-Authored-By: Claude Opus 4.6 --- src/lib.rs | 3 +++ src/variant.rs | 31 +++++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/src/lib.rs b/src/lib.rs index 278b0f3d3..e76634374 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -68,6 +68,9 @@ pub use types::{ // Re-export proc macro for reduction registration pub use problemreductions_macros::reduction; +// Re-export inventory so `declare_variants!` can use `$crate::inventory::submit!` +pub use inventory; + #[cfg(test)] #[path = "unit_tests/graph_models.rs"] mod test_graph_models; diff --git a/src/variant.rs b/src/variant.rs index fcac4dc0c..36e9d3caa 100644 --- a/src/variant.rs +++ b/src/variant.rs @@ -146,6 +146,37 @@ impl_variant_param!(K3, "k", parent: KN, cast: |_| KN, k: Some(3)); impl_variant_param!(K2, "k", parent: KN, cast: |_| KN, k: Some(2)); impl_variant_param!(K1, "k", parent: KN, cast: |_| KN, k: Some(1)); +/// Declare explicit problem variants with per-variant complexity metadata. +/// +/// Each entry generates: +/// 1. A `DeclaredVariant` trait impl for compile-time checking +/// 2. A `VariantEntry` inventory submission for runtime graph building +/// +/// # Example +/// +/// ```ignore +/// declare_variants! { +/// MaximumIndependentSet => "2^num_vertices", +/// MaximumIndependentSet => "2^num_vertices", +/// } +/// ``` +#[macro_export] +macro_rules! declare_variants { + ($($ty:ty => $complexity:expr),+ $(,)?) => { + $( + impl $crate::traits::DeclaredVariant for $ty {} + + $crate::inventory::submit! { + $crate::registry::VariantEntry { + name: <$ty as $crate::traits::Problem>::NAME, + variant_fn: || <$ty as $crate::traits::Problem>::variant(), + complexity: $complexity, + } + } + )+ + }; +} + #[cfg(test)] #[path = "unit_tests/variant.rs"] mod tests; From a917ff0fd3d2d4dea83f679ed6d24ecda76a5249 Mon Sep 17 00:00:00 2001 From: GiggleLiu Date: Fri, 27 Feb 2026 14:52:00 +0800 Subject: [PATCH 07/28] feat: add declare_variants! to graph model files Co-Authored-By: Claude Opus 4.6 --- src/models/graph/kcoloring.rs | 12 ++++++++++-- src/models/graph/max_cut.rs | 6 +++++- src/models/graph/maximal_is.rs | 6 +++++- src/models/graph/maximum_clique.rs | 6 +++++- src/models/graph/maximum_independent_set.rs | 9 ++++++++- src/models/graph/maximum_matching.rs | 6 +++++- src/models/graph/minimum_dominating_set.rs | 6 +++++- src/models/graph/minimum_vertex_cover.rs | 6 +++++- src/models/graph/traveling_salesman.rs | 6 +++++- 9 files changed, 53 insertions(+), 10 deletions(-) diff --git a/src/models/graph/kcoloring.rs b/src/models/graph/kcoloring.rs index 281c1fbd6..5b3c160d0 100644 --- a/src/models/graph/kcoloring.rs +++ b/src/models/graph/kcoloring.rs @@ -4,9 +4,9 @@ //! such that no two adjacent vertices have the same color. use crate::registry::{FieldInfo, ProblemSchemaEntry}; -use crate::topology::Graph; +use crate::topology::{Graph, SimpleGraph}; use crate::traits::{Problem, SatisfactionProblem}; -use crate::variant::{KValue, VariantParam, KN}; +use crate::variant::{KValue, VariantParam, K2, K3, K4, K5, KN}; use serde::{Deserialize, Serialize}; inventory::submit! { @@ -183,6 +183,14 @@ pub(crate) fn is_valid_coloring( true } +crate::declare_variants! { + KColoring => "k^num_vertices", + KColoring => "2^num_vertices", + KColoring => "3^num_vertices", + KColoring => "4^num_vertices", + KColoring => "5^num_vertices", +} + #[cfg(test)] #[path = "../../unit_tests/models/graph/kcoloring.rs"] mod tests; diff --git a/src/models/graph/max_cut.rs b/src/models/graph/max_cut.rs index 6024a33f8..3b8c9c210 100644 --- a/src/models/graph/max_cut.rs +++ b/src/models/graph/max_cut.rs @@ -4,7 +4,7 @@ //! that maximizes the total weight of edges crossing the partition. use crate::registry::{FieldInfo, ProblemSchemaEntry}; -use crate::topology::Graph; +use crate::topology::{Graph, SimpleGraph}; use crate::traits::{OptimizationProblem, Problem}; use crate::types::{Direction, SolutionSize, WeightElement}; use num_traits::Zero; @@ -214,6 +214,10 @@ where total } +crate::declare_variants! { + MaxCut => "2^num_vertices", +} + #[cfg(test)] #[path = "../../unit_tests/models/graph/max_cut.rs"] mod tests; diff --git a/src/models/graph/maximal_is.rs b/src/models/graph/maximal_is.rs index dee4722ba..9b39f89be 100644 --- a/src/models/graph/maximal_is.rs +++ b/src/models/graph/maximal_is.rs @@ -4,7 +4,7 @@ //! cannot be extended by adding any other vertex. use crate::registry::{FieldInfo, ProblemSchemaEntry}; -use crate::topology::Graph; +use crate::topology::{Graph, SimpleGraph}; use crate::traits::{OptimizationProblem, Problem}; use crate::types::{Direction, SolutionSize, WeightElement}; use num_traits::Zero; @@ -215,6 +215,10 @@ pub(crate) fn is_maximal_independent_set(graph: &G, selected: &[bool]) true } +crate::declare_variants! { + MaximalIS => "2^num_vertices", +} + #[cfg(test)] #[path = "../../unit_tests/models/graph/maximal_is.rs"] mod tests; diff --git a/src/models/graph/maximum_clique.rs b/src/models/graph/maximum_clique.rs index e293b037f..223aacecb 100644 --- a/src/models/graph/maximum_clique.rs +++ b/src/models/graph/maximum_clique.rs @@ -4,7 +4,7 @@ //! such that all vertices in the subset are pairwise adjacent. use crate::registry::{FieldInfo, ProblemSchemaEntry}; -use crate::topology::Graph; +use crate::topology::{Graph, SimpleGraph}; use crate::traits::{OptimizationProblem, Problem}; use crate::types::{Direction, SolutionSize, WeightElement}; use num_traits::Zero; @@ -170,6 +170,10 @@ fn is_clique_config(graph: &G, config: &[usize]) -> bool { true } +crate::declare_variants! { + MaximumClique => "2^num_vertices", +} + /// Check if a set of vertices forms a clique. /// /// # Arguments diff --git a/src/models/graph/maximum_independent_set.rs b/src/models/graph/maximum_independent_set.rs index 2cd2802ff..ef70cf6ae 100644 --- a/src/models/graph/maximum_independent_set.rs +++ b/src/models/graph/maximum_independent_set.rs @@ -4,7 +4,7 @@ //! such that no two vertices in the subset are adjacent. use crate::registry::{FieldInfo, ProblemSchemaEntry}; -use crate::topology::Graph; +use crate::topology::{Graph, KingsSubgraph, SimpleGraph, TriangularSubgraph, UnitDiskGraph}; use crate::traits::{OptimizationProblem, Problem}; use crate::types::{Direction, SolutionSize, WeightElement}; use num_traits::Zero; @@ -159,6 +159,13 @@ fn is_independent_set_config(graph: &G, config: &[usize]) -> bool { true } +crate::declare_variants! { + MaximumIndependentSet => "2^num_vertices", + MaximumIndependentSet => "2^num_vertices", + MaximumIndependentSet => "2^num_vertices", + MaximumIndependentSet => "2^num_vertices", +} + /// Check if a set of vertices forms an independent set. /// /// # Arguments diff --git a/src/models/graph/maximum_matching.rs b/src/models/graph/maximum_matching.rs index e7b75d3f9..a13abfd24 100644 --- a/src/models/graph/maximum_matching.rs +++ b/src/models/graph/maximum_matching.rs @@ -4,7 +4,7 @@ //! such that no two edges share a vertex. use crate::registry::{FieldInfo, ProblemSchemaEntry}; -use crate::topology::Graph; +use crate::topology::{Graph, SimpleGraph}; use crate::traits::{OptimizationProblem, Problem}; use crate::types::{Direction, SolutionSize, WeightElement}; use num_traits::Zero; @@ -219,6 +219,10 @@ where } } +crate::declare_variants! { + MaximumMatching => "2^num_vertices", +} + /// Check if a selection of edges forms a valid matching. /// /// # Panics diff --git a/src/models/graph/minimum_dominating_set.rs b/src/models/graph/minimum_dominating_set.rs index 023f3f713..65d77cdcd 100644 --- a/src/models/graph/minimum_dominating_set.rs +++ b/src/models/graph/minimum_dominating_set.rs @@ -4,7 +4,7 @@ //! such that every vertex is either in the set or adjacent to a vertex in the set. use crate::registry::{FieldInfo, ProblemSchemaEntry}; -use crate::topology::Graph; +use crate::topology::{Graph, SimpleGraph}; use crate::traits::{OptimizationProblem, Problem}; use crate::types::{Direction, SolutionSize, WeightElement}; use num_traits::Zero; @@ -169,6 +169,10 @@ where } } +crate::declare_variants! { + MinimumDominatingSet => "2^num_vertices", +} + /// Check if a set of vertices is a dominating set. /// /// # Panics diff --git a/src/models/graph/minimum_vertex_cover.rs b/src/models/graph/minimum_vertex_cover.rs index 757e926bc..60ed20607 100644 --- a/src/models/graph/minimum_vertex_cover.rs +++ b/src/models/graph/minimum_vertex_cover.rs @@ -4,7 +4,7 @@ //! such that every edge has at least one endpoint in the subset. use crate::registry::{FieldInfo, ProblemSchemaEntry}; -use crate::topology::Graph; +use crate::topology::{Graph, SimpleGraph}; use crate::traits::{OptimizationProblem, Problem}; use crate::types::{Direction, SolutionSize, WeightElement}; use num_traits::Zero; @@ -156,6 +156,10 @@ fn is_vertex_cover_config(graph: &G, config: &[usize]) -> bool { true } +crate::declare_variants! { + MinimumVertexCover => "2^num_vertices", +} + /// Check if a set of vertices forms a vertex cover. /// /// # Arguments diff --git a/src/models/graph/traveling_salesman.rs b/src/models/graph/traveling_salesman.rs index edb3fbaf6..b66b16e15 100644 --- a/src/models/graph/traveling_salesman.rs +++ b/src/models/graph/traveling_salesman.rs @@ -4,7 +4,7 @@ //! that visits every vertex exactly once. use crate::registry::{FieldInfo, ProblemSchemaEntry}; -use crate::topology::Graph; +use crate::topology::{Graph, SimpleGraph}; use crate::traits::{OptimizationProblem, Problem}; use crate::types::{Direction, SolutionSize, WeightElement}; use num_traits::Zero; @@ -252,6 +252,10 @@ pub(crate) fn is_hamiltonian_cycle(graph: &G, selected: &[bool]) -> bo visit_count == n } +crate::declare_variants! { + TravelingSalesman => "num_vertices!", +} + #[cfg(test)] #[path = "../../unit_tests/models/graph/traveling_salesman.rs"] mod tests; From b955a216fe2e05d52fe2e933bd98fe2692e2308a Mon Sep 17 00:00:00 2001 From: GiggleLiu Date: Fri, 27 Feb 2026 14:55:57 +0800 Subject: [PATCH 08/28] feat: add declare_variants! to remaining model files Adds variant declarations for optimization (QUBO, SpinGlass, ILP), satisfiability (Satisfiability, KSatisfiability), set (MaximumSetPacking, MinimumSetCovering), and specialized (CircuitSAT, Factoring) models. Co-Authored-By: Claude Opus 4.6 --- src/models/optimization/ilp.rs | 4 ++++ src/models/optimization/qubo.rs | 4 ++++ src/models/optimization/spin_glass.rs | 5 +++++ src/models/satisfiability/ksat.rs | 8 +++++++- src/models/satisfiability/sat.rs | 4 ++++ src/models/set/maximum_set_packing.rs | 5 +++++ src/models/set/minimum_set_covering.rs | 4 ++++ src/models/specialized/circuit.rs | 4 ++++ src/models/specialized/factoring.rs | 4 ++++ 9 files changed, 41 insertions(+), 1 deletion(-) diff --git a/src/models/optimization/ilp.rs b/src/models/optimization/ilp.rs index 7f9776692..15dd5a89b 100644 --- a/src/models/optimization/ilp.rs +++ b/src/models/optimization/ilp.rs @@ -376,6 +376,10 @@ impl OptimizationProblem for ILP { } } +crate::declare_variants! { + ILP => "exp(num_variables)", +} + #[cfg(test)] #[path = "../../unit_tests/models/optimization/ilp.rs"] mod tests; diff --git a/src/models/optimization/qubo.rs b/src/models/optimization/qubo.rs index d3bb01c39..211071881 100644 --- a/src/models/optimization/qubo.rs +++ b/src/models/optimization/qubo.rs @@ -188,6 +188,10 @@ where } } +crate::declare_variants! { + QUBO => "2^num_vars", +} + #[cfg(test)] #[path = "../../unit_tests/models/optimization/qubo.rs"] mod tests; diff --git a/src/models/optimization/spin_glass.rs b/src/models/optimization/spin_glass.rs index 5259f5798..81464d120 100644 --- a/src/models/optimization/spin_glass.rs +++ b/src/models/optimization/spin_glass.rs @@ -250,6 +250,11 @@ where } } +crate::declare_variants! { + SpinGlass => "2^num_vertices", + SpinGlass => "2^num_vertices", +} + #[cfg(test)] #[path = "../../unit_tests/models/optimization/spin_glass.rs"] mod tests; diff --git a/src/models/satisfiability/ksat.rs b/src/models/satisfiability/ksat.rs index 0d74d7e01..1273548ac 100644 --- a/src/models/satisfiability/ksat.rs +++ b/src/models/satisfiability/ksat.rs @@ -7,7 +7,7 @@ use crate::registry::{FieldInfo, ProblemSchemaEntry}; use crate::traits::{Problem, SatisfactionProblem}; -use crate::variant::KValue; +use crate::variant::{KValue, K2, K3, KN}; use serde::{Deserialize, Serialize}; use super::CNFClause; @@ -183,6 +183,12 @@ impl Problem for KSatisfiability { impl SatisfactionProblem for KSatisfiability {} +crate::declare_variants! { + KSatisfiability => "2^num_variables", + KSatisfiability => "2^num_variables", + KSatisfiability => "2^num_variables", +} + #[cfg(test)] #[path = "../../unit_tests/models/satisfiability/ksat.rs"] mod tests; diff --git a/src/models/satisfiability/sat.rs b/src/models/satisfiability/sat.rs index 380a0a35c..4401008b2 100644 --- a/src/models/satisfiability/sat.rs +++ b/src/models/satisfiability/sat.rs @@ -195,6 +195,10 @@ impl Problem for Satisfiability { impl SatisfactionProblem for Satisfiability {} +crate::declare_variants! { + Satisfiability => "2^num_variables", +} + /// Check if an assignment satisfies a SAT formula. /// /// # Arguments diff --git a/src/models/set/maximum_set_packing.rs b/src/models/set/maximum_set_packing.rs index 55a1af2ab..19719fa31 100644 --- a/src/models/set/maximum_set_packing.rs +++ b/src/models/set/maximum_set_packing.rs @@ -173,6 +173,11 @@ where } } +crate::declare_variants! { + MaximumSetPacking => "2^num_sets", + MaximumSetPacking => "2^num_sets", +} + /// Check if a selection forms a valid set packing (pairwise disjoint). fn is_valid_packing(sets: &[Vec], config: &[usize]) -> bool { let selected_sets: Vec<_> = config diff --git a/src/models/set/minimum_set_covering.rs b/src/models/set/minimum_set_covering.rs index 90f281b73..c37f34d42 100644 --- a/src/models/set/minimum_set_covering.rs +++ b/src/models/set/minimum_set_covering.rs @@ -178,6 +178,10 @@ where } } +crate::declare_variants! { + MinimumSetCovering => "2^num_sets", +} + /// Check if a selection of sets forms a valid set cover. #[cfg(test)] pub(crate) fn is_set_cover(universe_size: usize, sets: &[Vec], selected: &[bool]) -> bool { diff --git a/src/models/specialized/circuit.rs b/src/models/specialized/circuit.rs index 287841f77..e352fd2ed 100644 --- a/src/models/specialized/circuit.rs +++ b/src/models/specialized/circuit.rs @@ -299,6 +299,10 @@ impl Problem for CircuitSAT { impl SatisfactionProblem for CircuitSAT {} +crate::declare_variants! { + CircuitSAT => "2^num_inputs", +} + #[cfg(test)] #[path = "../../unit_tests/models/specialized/circuit.rs"] mod tests; diff --git a/src/models/specialized/factoring.rs b/src/models/specialized/factoring.rs index 05bce8cb4..4aa83d90b 100644 --- a/src/models/specialized/factoring.rs +++ b/src/models/specialized/factoring.rs @@ -162,6 +162,10 @@ impl OptimizationProblem for Factoring { } } +crate::declare_variants! { + Factoring => "exp(sqrt(num_bits))", +} + #[cfg(test)] #[path = "../../unit_tests/models/specialized/factoring.rs"] mod tests; From 87efc18e72d1fa0e7ab02d472de0867fb9a438a3 Mon Sep 17 00:00:00 2001 From: GiggleLiu Date: Fri, 27 Feb 2026 14:59:11 +0800 Subject: [PATCH 09/28] feat: #[reduction] now checks DeclaredVariant at compile time Co-Authored-By: Claude Opus 4.6 --- problemreductions-macros/src/lib.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/problemreductions-macros/src/lib.rs b/problemreductions-macros/src/lib.rs index 6ff9dce99..1e0cbb557 100644 --- a/problemreductions-macros/src/lib.rs +++ b/problemreductions-macros/src/lib.rs @@ -280,6 +280,14 @@ fn generate_reduction_entry( }, } } + + const _: () = { + fn _assert_declared_variant() {} + fn _check() { + _assert_declared_variant::<#source_type>(); + _assert_declared_variant::<#target_type>(); + } + }; }; Ok(output) From c98a9656f926d9a6e61e29faa7116e47a2433b26 Mon Sep 17 00:00:00 2001 From: GiggleLiu Date: Fri, 27 Feb 2026 15:03:34 +0800 Subject: [PATCH 10/28] feat: ReductionGraph builds nodes from VariantEntry with complexity Phase 1 now builds variant nodes from VariantEntry inventory (with complexity metadata), then Phase 2 creates edges from ReductionEntry. Adds variant_complexity() getter for querying a variant's time complexity. Co-Authored-By: Claude Opus 4.6 --- src/rules/graph.rs | 63 +++++++++++++++++++++++++++++------ src/unit_tests/rules/graph.rs | 29 ++++++++++++++++ 2 files changed, 81 insertions(+), 11 deletions(-) diff --git a/src/rules/graph.rs b/src/rules/graph.rs index f0f23b355..fda9a8591 100644 --- a/src/rules/graph.rs +++ b/src/rules/graph.rs @@ -1,10 +1,14 @@ //! Runtime reduction graph for discovering and executing reduction paths. //! //! The graph uses variant-level nodes: each node is a unique `(problem_name, variant)` pair. +//! Nodes are built in two phases: +//! 1. From `VariantEntry` inventory (with complexity metadata) +//! 2. From `ReductionEntry` inventory (fallback for backwards compatibility) +//! //! Edges come exclusively from `#[reduction]` registrations via `inventory::iter::`. //! //! This module implements: -//! - Variant-level graph construction from `ReductionEntry` inventory +//! - Variant-level graph construction from `VariantEntry` and `ReductionEntry` inventory //! - Dijkstra's algorithm with custom cost functions for optimal paths //! - JSON export for documentation and visualization @@ -208,6 +212,7 @@ pub(crate) fn classify_problem_category(module_path: &str) -> &str { struct VariantNode { name: &'static str, variant: BTreeMap, + complexity: &'static str, } /// Information about a neighbor in the reduction graph. @@ -270,12 +275,13 @@ impl ReductionGraph { let mut name_to_nodes: HashMap<&'static str, Vec> = HashMap::new(); // Helper to ensure a variant node exists in the graph. - let ensure_node = |name: &'static str, - variant: BTreeMap, - nodes: &mut Vec, - graph: &mut DiGraph, - node_index: &mut HashMap, - name_to_nodes: &mut HashMap<&'static str, Vec>| + let mut ensure_node = |name: &'static str, + variant: BTreeMap, + complexity: &'static str, + nodes: &mut Vec, + graph: &mut DiGraph, + node_index: &mut HashMap, + name_to_nodes: &mut HashMap<&'static str, Vec>| -> NodeIndex { let vref = VariantRef { name: name.to_string(), @@ -285,7 +291,11 @@ impl ReductionGraph { idx } else { let node_id = nodes.len(); - nodes.push(VariantNode { name, variant }); + nodes.push(VariantNode { + name, + variant, + complexity, + }); let idx = graph.add_node(node_id); node_index.insert(vref, idx); name_to_nodes.entry(name).or_default().push(idx); @@ -293,14 +303,31 @@ impl ReductionGraph { } }; - // Register reductions from inventory (auto-discovery) + // Phase 1: Build nodes from VariantEntry inventory + for entry in inventory::iter:: { + let variant = Self::variant_to_map(&entry.variant()); + ensure_node( + entry.name, + variant, + entry.complexity, + &mut nodes, + &mut graph, + &mut node_index, + &mut name_to_nodes, + ); + } + + // Phase 2: Build edges from ReductionEntry inventory for entry in inventory::iter:: { let source_variant = Self::variant_to_map(&entry.source_variant()); let target_variant = Self::variant_to_map(&entry.target_variant()); + // Nodes should already exist from Phase 1. + // Fall back to creating them with empty complexity for backwards compatibility. let src_idx = ensure_node( entry.source_name, source_variant, + "", &mut nodes, &mut graph, &mut node_index, @@ -309,6 +336,7 @@ impl ReductionGraph { let dst_idx = ensure_node( entry.target_name, target_variant, + "", &mut nodes, &mut graph, &mut node_index, @@ -316,8 +344,6 @@ impl ReductionGraph { ); let overhead = entry.overhead(); - - // Check if edge already exists (avoid duplicates) if graph.find_edge(src_idx, dst_idx).is_none() { graph.add_edge( src_idx, @@ -600,6 +626,21 @@ impl ReductionGraph { .unwrap_or_default() } + /// Get the complexity expression for a specific variant. + pub fn variant_complexity( + &self, + name: &str, + variant: &BTreeMap, + ) -> Option<&'static str> { + let idx = self.lookup_node(name, variant)?; + let node = &self.nodes[self.graph[idx]]; + if node.complexity.is_empty() { + None + } else { + Some(node.complexity) + } + } + /// Get all outgoing reductions from a problem (across all its variants). pub fn outgoing_reductions(&self, name: &str) -> Vec { let Some(indices) = self.name_to_nodes.get(name) else { diff --git a/src/unit_tests/rules/graph.rs b/src/unit_tests/rules/graph.rs index c10f60c48..be755a4dc 100644 --- a/src/unit_tests/rules/graph.rs +++ b/src/unit_tests/rules/graph.rs @@ -1051,3 +1051,32 @@ fn test_overhead_variables_are_consistent() { } } } + +#[test] +fn test_variant_entry_complexity_available() { + let entries: Vec<_> = inventory::iter:: + .into_iter() + .collect(); + assert!( + !entries.is_empty(), + "VariantEntry inventory should not be empty" + ); + + let mis_entry = entries + .iter() + .find(|e| e.name == "MaximumIndependentSet"); + assert!(mis_entry.is_some(), "MIS should have a VariantEntry"); + assert!( + !mis_entry.unwrap().complexity.is_empty(), + "complexity should not be empty" + ); +} + +#[test] +fn test_variant_complexity() { + let graph = ReductionGraph::new(); + let variant = + ReductionGraph::variant_to_map(&[("graph", "SimpleGraph"), ("weight", "i32")]); + let complexity = graph.variant_complexity("MaximumIndependentSet", &variant); + assert_eq!(complexity, Some("2^num_vertices")); +} From 3e175df2aa1271b2ed2d61af415f2b5517945c4c Mon Sep 17 00:00:00 2001 From: GiggleLiu Date: Fri, 27 Feb 2026 15:06:40 +0800 Subject: [PATCH 11/28] feat: include complexity in graph JSON export Co-Authored-By: Claude Opus 4.6 --- src/rules/graph.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/rules/graph.rs b/src/rules/graph.rs index fda9a8591..72b748550 100644 --- a/src/rules/graph.rs +++ b/src/rules/graph.rs @@ -77,6 +77,8 @@ pub(crate) struct NodeJson { pub(crate) category: String, /// Relative rustdoc path (e.g., "models/graph/maximum_independent_set"). pub(crate) doc_path: String, + /// Worst-case time complexity expression (empty if not declared). + pub(crate) complexity: String, } /// Internal reference to a problem variant, used as HashMap key. @@ -896,6 +898,7 @@ impl ReductionGraph { variant: node.variant.clone(), category, doc_path, + complexity: node.complexity.to_string(), }, ) }) From f7c00b222eba898f3cc6fea65bc4fcb0db7a680e Mon Sep 17 00:00:00 2001 From: GiggleLiu Date: Fri, 27 Feb 2026 15:11:15 +0800 Subject: [PATCH 12/28] feat: display per-variant complexity in pred show Co-Authored-By: Claude Opus 4.6 --- problemreductions-cli/src/commands/graph.rs | 19 +++++++++++++++++-- problemreductions-cli/src/mcp/tools.rs | 13 ++++++++++++- 2 files changed, 29 insertions(+), 3 deletions(-) diff --git a/problemreductions-cli/src/commands/graph.rs b/problemreductions-cli/src/commands/graph.rs index da05d4cd4..dc106908d 100644 --- a/problemreductions-cli/src/commands/graph.rs +++ b/problemreductions-cli/src/commands/graph.rs @@ -123,7 +123,11 @@ pub fn show(problem: &str, out: &OutputConfig) -> Result<()> { " {}", crate::output::fmt_problem_name(&format!("{}{}", spec.name, slash)) ); - text.push_str(&format!("{label}\n")); + if let Some(c) = graph.variant_complexity(&spec.name, v) { + text.push_str(&format!("{label} complexity: {c}\n")); + } else { + text.push_str(&format!("{label}\n")); + } } // Show fields from schema (right after variants) @@ -218,9 +222,20 @@ pub fn show(problem: &str, out: &OutputConfig) -> Result<()> { "overhead": overhead, }) }; + let variants_json: Vec = variants + .iter() + .map(|v| { + let complexity = graph.variant_complexity(&spec.name, v).unwrap_or(""); + serde_json::json!({ + "variant": v, + "complexity": complexity, + }) + }) + .collect(); + let mut json = serde_json::json!({ "name": spec.name, - "variants": variants, + "variants": variants_json, "size_fields": size_fields, "reduces_to": outgoing.iter().map(&edge_to_json).collect::>(), "reduces_from": incoming.iter().map(&edge_to_json).collect::>(), diff --git a/problemreductions-cli/src/mcp/tools.rs b/problemreductions-cli/src/mcp/tools.rs index 19f34c571..0b1f90034 100644 --- a/problemreductions-cli/src/mcp/tools.rs +++ b/problemreductions-cli/src/mcp/tools.rs @@ -176,9 +176,20 @@ impl McpServer { let incoming = graph.incoming_reductions(&spec.name); let size_fields = graph.size_field_names(&spec.name); + let variants_json: Vec = variants + .iter() + .map(|v| { + let complexity = graph.variant_complexity(&spec.name, v).unwrap_or(""); + serde_json::json!({ + "variant": v, + "complexity": complexity, + }) + }) + .collect(); + let mut json = serde_json::json!({ "name": spec.name, - "variants": variants, + "variants": variants_json, "size_fields": &size_fields, "reduces_to": outgoing.iter().map(|e| { serde_json::json!({ From 221eef8d706060f75e161a71276ad756b1d9e871 Mon Sep 17 00:00:00 2001 From: GiggleLiu Date: Fri, 27 Feb 2026 15:14:50 +0800 Subject: [PATCH 13/28] fix: sort variants_for() deterministically (default variant first) --- src/rules/graph.rs | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/src/rules/graph.rs b/src/rules/graph.rs index 72b748550..f3b4f7f16 100644 --- a/src/rules/graph.rs +++ b/src/rules/graph.rs @@ -615,9 +615,12 @@ impl ReductionGraph { /// Get all variant maps registered for a problem name. /// - /// Returns an empty `Vec` if the name is not found. + /// Returns variants sorted deterministically: the "default" variant + /// (SimpleGraph, i32, etc.) comes first, then remaining variants + /// in lexicographic order. pub fn variants_for(&self, name: &str) -> Vec> { - self.name_to_nodes + let mut variants: Vec> = self + .name_to_nodes .get(name) .map(|indices| { indices @@ -625,7 +628,20 @@ impl ReductionGraph { .map(|&idx| self.nodes[self.graph[idx]].variant.clone()) .collect() }) - .unwrap_or_default() + .unwrap_or_default(); + // Sort deterministically: default variant values (SimpleGraph, i32, KN) + // sort first so callers can rely on variants[0] being the "base" variant. + variants.sort_by(|a, b| { + fn default_rank(v: &BTreeMap) -> usize { + v.values() + .filter(|val| !["SimpleGraph", "i32", "KN"].contains(&val.as_str())) + .count() + } + default_rank(a) + .cmp(&default_rank(b)) + .then_with(|| a.cmp(b)) + }); + variants } /// Get the complexity expression for a specific variant. From d6876ea57d1b78ac2a8f5dde0b7293a93b7333dd Mon Sep 17 00:00:00 2001 From: GiggleLiu Date: Fri, 27 Feb 2026 15:20:55 +0800 Subject: [PATCH 14/28] chore: fix formatting, clippy, and doctest issues - Remove unused `mut` from ensure_node closure - Sort variants_for() deterministically (default variant first) - Change declare_variants! doctest from `ignore` to `text` Co-Authored-By: Claude Opus 4.6 --- ...27-explicit-variant-declarations-design.md | 102 +++ ...2-27-explicit-variant-declarations-impl.md | 761 ++++++++++++++++++ problemreductions-cli/src/commands/graph.rs | 4 +- src/rules/graph.rs | 18 +- src/unit_tests/rules/graph.rs | 7 +- src/variant.rs | 2 +- 6 files changed, 875 insertions(+), 19 deletions(-) create mode 100644 docs/plans/2026-02-27-explicit-variant-declarations-design.md create mode 100644 docs/plans/2026-02-27-explicit-variant-declarations-impl.md diff --git a/docs/plans/2026-02-27-explicit-variant-declarations-design.md b/docs/plans/2026-02-27-explicit-variant-declarations-design.md new file mode 100644 index 000000000..530ed764d --- /dev/null +++ b/docs/plans/2026-02-27-explicit-variant-declarations-design.md @@ -0,0 +1,102 @@ +# Explicit Variant Declarations with Per-Variant Complexity + +**Date:** 2026-02-27 +**Status:** Approved + +## Problem + +Variants currently emerge implicitly from `#[reduction]` registrations. This means: +- A variant can't exist without a reduction +- There's no place to attach per-variant metadata (e.g., worst-case time complexity) +- No compile-time validation that reductions reference valid variants + +## Design + +### New types + +**`DeclaredVariant` marker trait** (`src/traits.rs`): +```rust +pub trait DeclaredVariant {} +``` + +**`VariantEntry` inventory struct** (new file `src/registry/variant.rs`): +```rust +pub struct VariantEntry { + pub name: &'static str, + pub variant_fn: fn() -> Vec<(&'static str, &'static str)>, + pub complexity: &'static str, // worst-case time complexity, e.g., "2^num_vertices" +} +inventory::collect!(VariantEntry); +``` + +### `declare_variants!` macro + +Declarative macro that generates both `DeclaredVariant` trait impls and `VariantEntry` inventory submissions: + +```rust +macro_rules! declare_variants { + ($($ty:ty => $complexity:expr),+ $(,)?) => { + $( + impl $crate::traits::DeclaredVariant for $ty {} + + inventory::submit! { + $crate::registry::VariantEntry { + name: <$ty as $crate::traits::Problem>::NAME, + variant_fn: || <$ty as $crate::traits::Problem>::variant(), + complexity: $complexity, + } + } + )+ + }; +} +``` + +**Usage** (in each model file, e.g., `maximum_independent_set.rs`): +```rust +declare_variants! { + MaximumIndependentSet => "2^num_vertices", + MaximumIndependentSet => "2^num_vertices", + MaximumIndependentSet => "2^num_vertices", +} +``` + +### Compile-time checking in `#[reduction]` + +The `#[reduction]` proc macro generates a `DeclaredVariant` assertion after the impl block: + +```rust +const _: () = { + fn _assert() {} + _assert::(); + _assert::(); +}; +``` + +This produces a compile error if either source or target variant is not declared via `declare_variants!`. + +### Graph construction change + +`ReductionGraph::new()` changes: +1. **First:** Build nodes from `VariantEntry` inventory (each entry becomes a node with complexity metadata) +2. **Then:** Build edges from `ReductionEntry` inventory (edges connect existing nodes) +3. Edges referencing undeclared variants would be caught at compile time by `#[reduction]` + +### Display changes + +- `pred show `: Shows complexity per variant in the variants list +- Graph JSON export: Adds `complexity` field per node +- `pred show` JSON output: Includes complexity in variant info + +## Decisions + +| Decision | Choice | Rationale | +|----------|--------|-----------| +| Declaration location | Model file | All variants of a problem are visible in one place | +| Macro syntax | `declare_variants!` (macro_rules!) | Good balance of conciseness vs. complexity | +| Type specification | Concrete Rust types | Enables compile-time checking via trait bounds | +| Validation | Compile error | Strictest; catches mistakes early via `DeclaredVariant` trait | +| Complexity format | String expression (e.g., `"2^num_vertices"`) | Consistent with overhead expression syntax | + +## Scope + +Every model file that has variants needs a `declare_variants!` call. This touches all files in `src/models/`. diff --git a/docs/plans/2026-02-27-explicit-variant-declarations-impl.md b/docs/plans/2026-02-27-explicit-variant-declarations-impl.md new file mode 100644 index 000000000..3e1e3ff4a --- /dev/null +++ b/docs/plans/2026-02-27-explicit-variant-declarations-impl.md @@ -0,0 +1,761 @@ +# Explicit Variant Declarations Implementation Plan + +> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. + +**Goal:** Make problem variants first-class citizens with explicit declarations and per-variant time complexity metadata. + +**Architecture:** New `VariantEntry` inventory + `declare_variants!` macro in model files. `DeclaredVariant` marker trait enables compile-time checking in `#[reduction]`. `ReductionGraph` builds nodes from `VariantEntry` instead of inferring them from edges. + +**Tech Stack:** Rust, inventory crate, macro_rules!, proc_macro (existing `#[reduction]`), petgraph + +--- + +### Task 1: Add DeclaredVariant trait + +**Files:** +- Modify: `src/traits.rs` + +**Step 1: Add the marker trait** + +At the end of `src/traits.rs` (before the `#[cfg(test)]` block), add: + +```rust +/// Marker trait for explicitly declared problem variants. +/// +/// Implemented automatically by [`declare_variants!`] for each concrete type. +/// The [`#[reduction]`] proc macro checks this trait at compile time to ensure +/// all reduction source/target types have been declared. +pub trait DeclaredVariant {} +``` + +**Step 2: Build** + +Run: `cargo build` +Expected: PASS (trait is unused so far) + +**Step 3: Commit** + +```bash +git add src/traits.rs +git commit -m "feat: add DeclaredVariant marker trait" +``` + +--- + +### Task 2: Add VariantEntry struct and inventory + +**Files:** +- Create: `src/registry/variant.rs` +- Modify: `src/registry/mod.rs` + +**Step 1: Create the variant entry module** + +Create `src/registry/variant.rs`: + +```rust +//! Explicit variant registration via inventory. + +/// A registered problem variant entry. +/// +/// Submitted by [`declare_variants!`] for each concrete problem type. +/// The reduction graph uses these entries to build nodes with complexity metadata. +pub struct VariantEntry { + /// Problem name (from `Problem::NAME`). + pub name: &'static str, + /// Function returning variant key-value pairs (from `Problem::variant()`). + pub variant_fn: fn() -> Vec<(&'static str, &'static str)>, + /// Worst-case time complexity expression (e.g., `"2^num_vertices"`). + pub complexity: &'static str, +} + +impl VariantEntry { + /// Get the variant by calling the function. + pub fn variant(&self) -> Vec<(&'static str, &'static str)> { + (self.variant_fn)() + } +} + +impl std::fmt::Debug for VariantEntry { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("VariantEntry") + .field("name", &self.name) + .field("variant", &self.variant()) + .field("complexity", &self.complexity) + .finish() + } +} + +inventory::collect!(VariantEntry); +``` + +**Step 2: Export from registry module** + +In `src/registry/mod.rs`, add the module declaration and re-export: + +```rust +pub mod variant; +pub use variant::VariantEntry; +``` + +**Step 3: Build** + +Run: `cargo build` +Expected: PASS + +**Step 4: Commit** + +```bash +git add src/registry/variant.rs src/registry/mod.rs +git commit -m "feat: add VariantEntry inventory struct" +``` + +--- + +### Task 3: Create declare_variants! macro + +**Files:** +- Modify: `src/variant.rs` (where `variant_params!` is defined) + +**Step 1: Add the macro** + +At the end of `src/variant.rs`, add: + +```rust +/// Declare explicit problem variants with per-variant complexity metadata. +/// +/// Each entry generates: +/// 1. A `DeclaredVariant` trait impl for compile-time checking +/// 2. A `VariantEntry` inventory submission for runtime graph building +/// +/// # Example +/// +/// ```ignore +/// declare_variants! { +/// MaximumIndependentSet => "2^num_vertices", +/// MaximumIndependentSet => "2^num_vertices", +/// } +/// ``` +#[macro_export] +macro_rules! declare_variants { + ($($ty:ty => $complexity:expr),+ $(,)?) => { + $( + impl $crate::traits::DeclaredVariant for $ty {} + + $crate::inventory::submit! { + $crate::registry::VariantEntry { + name: <$ty as $crate::traits::Problem>::NAME, + variant_fn: || <$ty as $crate::traits::Problem>::variant(), + complexity: $complexity, + } + } + )+ + }; +} +``` + +**Step 2: Check inventory re-export** + +Verify that `inventory` is re-exported from the main crate. Check `src/lib.rs` for `pub use inventory;` or similar. If not present, add: + +```rust +pub use inventory; +``` + +**Step 3: Build** + +Run: `cargo build` +Expected: PASS + +**Step 4: Commit** + +```bash +git add src/variant.rs src/lib.rs +git commit -m "feat: add declare_variants! macro" +``` + +--- + +### Task 4: Add declare_variants! to graph model files + +**Files (9 model files):** +- Modify: `src/models/graph/maximum_independent_set.rs` +- Modify: `src/models/graph/minimum_vertex_cover.rs` +- Modify: `src/models/graph/maximum_clique.rs` +- Modify: `src/models/graph/minimum_dominating_set.rs` +- Modify: `src/models/graph/maximum_matching.rs` +- Modify: `src/models/graph/traveling_salesman.rs` +- Modify: `src/models/graph/max_cut.rs` +- Modify: `src/models/graph/kcoloring.rs` +- Modify: `src/models/graph/maximal_is.rs` (optional — no reductions) + +**Step 1: Add declarations to maximum_independent_set.rs** + +Add at the end of the file (before `#[cfg(test)]`): + +```rust +declare_variants! { + MaximumIndependentSet => "2^num_vertices", + MaximumIndependentSet => "2^num_vertices", + MaximumIndependentSet => "2^num_vertices", + MaximumIndependentSet => "2^num_vertices", +} +``` + +Ensure the geometry graph type imports are present: +```rust +use crate::graphs::{KingsSubgraph, TriangularSubgraph, UnitDiskGraph}; +``` + +**Step 2: Add declarations to minimum_vertex_cover.rs** + +```rust +declare_variants! { + MinimumVertexCover => "2^num_vertices", +} +``` + +**Step 3: Add declarations to maximum_clique.rs** + +```rust +declare_variants! { + MaximumClique => "2^num_vertices", +} +``` + +**Step 4: Add declarations to minimum_dominating_set.rs** + +```rust +declare_variants! { + MinimumDominatingSet => "2^num_vertices", +} +``` + +**Step 5: Add declarations to maximum_matching.rs** + +```rust +declare_variants! { + MaximumMatching => "2^num_vertices", +} +``` + +**Step 6: Add declarations to traveling_salesman.rs** + +```rust +declare_variants! { + TravelingSalesman => "num_vertices!", +} +``` + +**Step 7: Add declarations to max_cut.rs** + +```rust +declare_variants! { + MaxCut => "2^num_vertices", +} +``` + +**Step 8: Add declarations to kcoloring.rs** + +```rust +use crate::graphs::SimpleGraph; +use crate::variant::{KN, K2, K3, K4, K5}; + +declare_variants! { + KColoring => "k^num_vertices", + KColoring => "2^num_vertices", + KColoring => "3^num_vertices", + KColoring => "4^num_vertices", + KColoring => "5^num_vertices", +} +``` + +**Step 9: Build** + +Run: `cargo build` +Expected: PASS + +**Step 10: Commit** + +```bash +git add src/models/graph/ +git commit -m "feat: add declare_variants! to graph model files" +``` + +--- + +### Task 5: Add declare_variants! to optimization, satisfiability, set, and specialized model files + +**Files (9 model files):** +- Modify: `src/models/optimization/qubo.rs` +- Modify: `src/models/optimization/spin_glass.rs` +- Modify: `src/models/optimization/ilp.rs` +- Modify: `src/models/satisfiability/sat.rs` +- Modify: `src/models/satisfiability/ksat.rs` +- Modify: `src/models/set/maximum_set_packing.rs` +- Modify: `src/models/set/minimum_set_covering.rs` +- Modify: `src/models/specialized/circuit.rs` +- Modify: `src/models/specialized/factoring.rs` + +**Step 1: qubo.rs** + +```rust +declare_variants! { + QUBO => "2^num_vars", +} +``` + +**Step 2: spin_glass.rs** + +```rust +use crate::graphs::SimpleGraph; + +declare_variants! { + SpinGlass => "2^num_vertices", + SpinGlass => "2^num_vertices", +} +``` + +**Step 3: ilp.rs** + +```rust +declare_variants! { + ILP => "exp(num_variables)", +} +``` + +**Step 4: sat.rs** + +```rust +declare_variants! { + Satisfiability => "2^num_variables", +} +``` + +**Step 5: ksat.rs** + +```rust +use crate::variant::{KN, K2, K3}; + +declare_variants! { + KSatisfiability => "2^num_variables", + KSatisfiability => "2^num_variables", + KSatisfiability => "2^num_variables", +} +``` + +**Step 6: maximum_set_packing.rs** + +```rust +declare_variants! { + MaximumSetPacking => "2^num_sets", + MaximumSetPacking => "2^num_sets", +} +``` + +**Step 7: minimum_set_covering.rs** + +```rust +declare_variants! { + MinimumSetCovering => "2^num_sets", +} +``` + +**Step 8: circuit.rs** + +```rust +declare_variants! { + CircuitSAT => "2^num_inputs", +} +``` + +**Step 9: factoring.rs** + +```rust +declare_variants! { + Factoring => "exp(sqrt(num_bits))", +} +``` + +**Step 10: Build and test** + +Run: `cargo build && cargo test` +Expected: PASS + +**Step 11: Commit** + +```bash +git add src/models/optimization/ src/models/satisfiability/ src/models/set/ src/models/specialized/ +git commit -m "feat: add declare_variants! to remaining model files" +``` + +--- + +### Task 6: Update #[reduction] proc macro to check DeclaredVariant + +**Files:** +- Modify: `problemreductions-macros/src/lib.rs` + +**Step 1: Add DeclaredVariant assertion to generate_reduction_entry()** + +In the `generate_reduction_entry` function, after the `inventory::submit!` block, add a compile-time assertion. Find the section that builds the final `output` tokens (around line 260-282) and append: + +```rust +// After the inventory::submit! block, add: +let declared_check = quote! { + const _: () = { + fn _assert_declared_variant() {} + _assert_declared_variant::<#source_type>(); + _assert_declared_variant::<#target_type>(); + }; +}; +``` + +Include `declared_check` in the final output token stream. + +**Step 2: Build** + +Run: `cargo build` +Expected: PASS (all variants are already declared from Tasks 4-5) + +**Step 3: Verify enforcement works** + +Temporarily comment out one variant from a `declare_variants!` call (e.g., remove `MaximumIndependentSet` from MIS), then build: + +Run: `cargo build 2>&1 | head -20` +Expected: Compile error mentioning `DeclaredVariant` not implemented for `MaximumIndependentSet` + +Restore the commented-out variant. + +**Step 4: Commit** + +```bash +git add problemreductions-macros/src/lib.rs +git commit -m "feat: #[reduction] now checks DeclaredVariant at compile time" +``` + +--- + +### Task 7: Update ReductionGraph to build nodes from VariantEntry + +**Files:** +- Modify: `src/rules/graph.rs` + +**Step 1: Write a test for variant complexity in the graph** + +In `src/unit_tests/rules/graph.rs`, add: + +```rust +#[test] +fn test_variant_entry_complexity_available() { + // VariantEntry inventory should have entries with complexity info + let entries: Vec<_> = inventory::iter::.into_iter().collect(); + assert!(!entries.is_empty(), "VariantEntry inventory should not be empty"); + + // Check MIS has a variant with complexity + let mis_entry = entries.iter().find(|e| e.name == "MaximumIndependentSet"); + assert!(mis_entry.is_some(), "MIS should have a VariantEntry"); + assert!(!mis_entry.unwrap().complexity.is_empty(), "complexity should not be empty"); +} +``` + +**Step 2: Run test** + +Run: `cargo test test_variant_entry_complexity_available` +Expected: PASS (VariantEntry submissions exist from Tasks 4-5) + +**Step 3: Add complexity field to VariantNode** + +In `src/rules/graph.rs`, update `VariantNode`: + +```rust +#[derive(Debug, Clone)] +struct VariantNode { + name: &'static str, + variant: BTreeMap, + complexity: &'static str, +} +``` + +**Step 4: Update ReductionGraph::new() to build nodes from VariantEntry first** + +Replace the node-building logic in `new()`. The new approach: + +1. First pass: create nodes from `VariantEntry` inventory +2. Second pass: create edges from `ReductionEntry` inventory (nodes must already exist) + +```rust +pub fn new() -> Self { + let mut graph = DiGraph::new(); + let mut nodes: Vec = Vec::new(); + let mut node_index: HashMap = HashMap::new(); + let mut name_to_nodes: HashMap<&'static str, Vec> = HashMap::new(); + + // Helper to ensure a variant node exists + let mut ensure_node = |name: &'static str, + variant: BTreeMap, + complexity: &'static str, + nodes: &mut Vec, + graph: &mut DiGraph, + node_index: &mut HashMap, + name_to_nodes: &mut HashMap<&'static str, Vec>| + -> NodeIndex { + let vref = VariantRef { + name: name.to_string(), + variant: variant.clone(), + }; + if let Some(&idx) = node_index.get(&vref) { + idx + } else { + let node_id = nodes.len(); + nodes.push(VariantNode { name, variant, complexity }); + let idx = graph.add_node(node_id); + node_index.insert(vref, idx); + name_to_nodes.entry(name).or_default().push(idx); + idx + } + }; + + // Phase 1: Build nodes from VariantEntry inventory + for entry in inventory::iter:: { + let variant = Self::variant_to_map(&entry.variant()); + ensure_node( + entry.name, + variant, + entry.complexity, + &mut nodes, + &mut graph, + &mut node_index, + &mut name_to_nodes, + ); + } + + // Phase 2: Build edges from ReductionEntry inventory + for entry in inventory::iter:: { + let source_variant = Self::variant_to_map(&entry.source_variant()); + let target_variant = Self::variant_to_map(&entry.target_variant()); + + // Nodes should already exist from Phase 1 (enforced by #[reduction] compile check). + // Fall back to creating them with empty complexity for backwards compatibility. + let src_idx = ensure_node( + entry.source_name, + source_variant, + "", + &mut nodes, + &mut graph, + &mut node_index, + &mut name_to_nodes, + ); + let dst_idx = ensure_node( + entry.target_name, + target_variant, + "", + &mut nodes, + &mut graph, + &mut node_index, + &mut name_to_nodes, + ); + + let overhead = entry.overhead(); + if graph.find_edge(src_idx, dst_idx).is_none() { + graph.add_edge( + src_idx, + dst_idx, + ReductionEdgeData { + overhead, + reduce_fn: entry.reduce_fn, + }, + ); + } + } + + Self { graph, nodes, name_to_nodes } +} +``` + +**Step 5: Add complexity getter to ReductionGraph** + +```rust +/// Get the complexity expression for a specific variant. +pub fn variant_complexity( + &self, + name: &str, + variant: &BTreeMap, +) -> Option<&'static str> { + let idx = self.lookup_node(name, variant)?; + let node = &self.nodes[self.graph[idx]]; + if node.complexity.is_empty() { + None + } else { + Some(node.complexity) + } +} +``` + +**Step 6: Write test for variant_complexity** + +```rust +#[test] +fn test_variant_complexity() { + let graph = ReductionGraph::new(); + let variant = ReductionGraph::variant_to_map(&[("graph", "SimpleGraph"), ("weight", "i32")]); + let complexity = graph.variant_complexity("MaximumIndependentSet", &variant); + assert!(complexity.is_some()); + assert!(!complexity.unwrap().is_empty()); +} +``` + +**Step 7: Build and test** + +Run: `cargo test` +Expected: PASS + +**Step 8: Commit** + +```bash +git add src/rules/graph.rs src/unit_tests/rules/graph.rs +git commit -m "feat: ReductionGraph builds nodes from VariantEntry with complexity" +``` + +--- + +### Task 8: Update JSON export with complexity field + +**Files:** +- Modify: `src/rules/graph.rs` (NodeJson struct and to_json()) + +**Step 1: Add complexity to NodeJson** + +```rust +#[derive(Debug, Clone, Serialize)] +pub(crate) struct NodeJson { + pub(crate) name: String, + pub(crate) variant: BTreeMap, + pub(crate) category: String, + pub(crate) doc_path: String, + /// Worst-case time complexity expression (empty if not declared). + pub(crate) complexity: String, +} +``` + +**Step 2: Update to_json() to populate complexity** + +In the node-building section of `to_json()`, add: + +```rust +let complexity = self.nodes[i].complexity.to_string(); +// ... in NodeJson construction: +NodeJson { + name: node.name.to_string(), + variant: node.variant.clone(), + category, + doc_path, + complexity, +} +``` + +**Step 3: Build and test** + +Run: `cargo test` +Expected: PASS (existing tests may need updating if they assert exact JSON structure) + +**Step 4: Commit** + +```bash +git add src/rules/graph.rs +git commit -m "feat: include complexity in graph JSON export" +``` + +--- + +### Task 9: Update CLI `pred show` to display complexity + +**Files:** +- Modify: `problemreductions-cli/src/commands/graph.rs` + +**Step 1: Add complexity to variant display** + +In the `show_problem_inner` function, update the variants section. For each variant, also show complexity. + +Find where variants are printed (human-readable output) and add complexity: + +``` +Variants: + /SimpleGraph/i32 complexity: 2^num_vertices + /KingsSubgraph/i32 complexity: 2^num_vertices +``` + +**Step 2: Add complexity to JSON output** + +In the JSON output path of `show_problem_inner`, include complexity per variant. + +**Step 3: Update MCP show_problem_inner** + +In `problemreductions-cli/src/mcp/tools.rs`, update the MCP `show_problem` output to include complexity per variant. + +**Step 4: Build and test** + +Run: `cargo build && cargo test` +Expected: PASS + +**Step 5: Smoke test** + +Run: `cargo run -p problemreductions-cli -- show MIS` +Expected: Variants section shows complexity for each variant. + +**Step 6: Commit** + +```bash +git add problemreductions-cli/src/commands/graph.rs problemreductions-cli/src/mcp/tools.rs +git commit -m "feat: display per-variant complexity in pred show" +``` + +--- + +### Task 10: Update graph JSON test data + +**Files:** +- Modify: `tests/data/reduction_graph.json` (if it exists and is checked in tests) +- Modify: Any tests that assert exact JSON structure + +**Step 1: Regenerate graph JSON** + +Run: `make rust-export` + +**Step 2: Update any snapshot tests** + +Check for tests that compare against stored JSON. Update expected values. + +**Step 3: Run full test suite** + +Run: `make check` +Expected: PASS (fmt + clippy + all tests) + +**Step 4: Commit** + +```bash +git add -A +git commit -m "chore: update test data for variant complexity" +``` + +--- + +### Task 11: Final verification + +**Step 1: Run full CI check** + +Run: `make check` +Expected: PASS + +**Step 2: Run CLI demo** + +Run: `make cli-demo` +Expected: PASS + +**Step 3: Test compile-time enforcement** + +Temporarily add a bogus reduction (or comment out a declare_variants entry) and verify the build fails with a clear error about `DeclaredVariant`. + +**Step 4: Verify JSON export** + +Run: `cargo run -p problemreductions-cli -- show MIS --json | python3 -m json.tool | head -30` +Expected: JSON includes complexity per variant. diff --git a/problemreductions-cli/src/commands/graph.rs b/problemreductions-cli/src/commands/graph.rs index dc106908d..bb2634276 100644 --- a/problemreductions-cli/src/commands/graph.rs +++ b/problemreductions-cli/src/commands/graph.rs @@ -212,9 +212,7 @@ pub fn show(problem: &str, out: &OutputConfig) -> Result<()> { .overhead .output_size .iter() - .map(|(field, poly)| { - serde_json::json!({"field": field, "formula": poly.to_string()}) - }) + .map(|(field, poly)| serde_json::json!({"field": field, "formula": poly.to_string()})) .collect(); serde_json::json!({ "source": {"name": e.source_name, "variant": e.source_variant}, diff --git a/src/rules/graph.rs b/src/rules/graph.rs index f3b4f7f16..2b5e0f87d 100644 --- a/src/rules/graph.rs +++ b/src/rules/graph.rs @@ -277,13 +277,13 @@ impl ReductionGraph { let mut name_to_nodes: HashMap<&'static str, Vec> = HashMap::new(); // Helper to ensure a variant node exists in the graph. - let mut ensure_node = |name: &'static str, - variant: BTreeMap, - complexity: &'static str, - nodes: &mut Vec, - graph: &mut DiGraph, - node_index: &mut HashMap, - name_to_nodes: &mut HashMap<&'static str, Vec>| + let ensure_node = |name: &'static str, + variant: BTreeMap, + complexity: &'static str, + nodes: &mut Vec, + graph: &mut DiGraph, + node_index: &mut HashMap, + name_to_nodes: &mut HashMap<&'static str, Vec>| -> NodeIndex { let vref = VariantRef { name: name.to_string(), @@ -637,9 +637,7 @@ impl ReductionGraph { .filter(|val| !["SimpleGraph", "i32", "KN"].contains(&val.as_str())) .count() } - default_rank(a) - .cmp(&default_rank(b)) - .then_with(|| a.cmp(b)) + default_rank(a).cmp(&default_rank(b)).then_with(|| a.cmp(b)) }); variants } diff --git a/src/unit_tests/rules/graph.rs b/src/unit_tests/rules/graph.rs index be755a4dc..3989379eb 100644 --- a/src/unit_tests/rules/graph.rs +++ b/src/unit_tests/rules/graph.rs @@ -1062,9 +1062,7 @@ fn test_variant_entry_complexity_available() { "VariantEntry inventory should not be empty" ); - let mis_entry = entries - .iter() - .find(|e| e.name == "MaximumIndependentSet"); + let mis_entry = entries.iter().find(|e| e.name == "MaximumIndependentSet"); assert!(mis_entry.is_some(), "MIS should have a VariantEntry"); assert!( !mis_entry.unwrap().complexity.is_empty(), @@ -1075,8 +1073,7 @@ fn test_variant_entry_complexity_available() { #[test] fn test_variant_complexity() { let graph = ReductionGraph::new(); - let variant = - ReductionGraph::variant_to_map(&[("graph", "SimpleGraph"), ("weight", "i32")]); + let variant = ReductionGraph::variant_to_map(&[("graph", "SimpleGraph"), ("weight", "i32")]); let complexity = graph.variant_complexity("MaximumIndependentSet", &variant); assert_eq!(complexity, Some("2^num_vertices")); } diff --git a/src/variant.rs b/src/variant.rs index 36e9d3caa..fbf679ee6 100644 --- a/src/variant.rs +++ b/src/variant.rs @@ -154,7 +154,7 @@ impl_variant_param!(K1, "k", parent: KN, cast: |_| KN, k: Some(1)); /// /// # Example /// -/// ```ignore +/// ```text /// declare_variants! { /// MaximumIndependentSet => "2^num_vertices", /// MaximumIndependentSet => "2^num_vertices", From 5b6decae72a147b1fe436b5a4069fed0af864914 Mon Sep 17 00:00:00 2001 From: GiggleLiu Date: Fri, 27 Feb 2026 20:56:16 +0800 Subject: [PATCH 15/28] test: cover VariantEntry Debug and variant_complexity None branch Adds test coverage for the two uncovered patch areas: - VariantEntry Debug formatting - variant_complexity returning None for unknown problems Co-Authored-By: Claude Opus 4.6 --- src/unit_tests/rules/graph.rs | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/src/unit_tests/rules/graph.rs b/src/unit_tests/rules/graph.rs index 3989379eb..548833f84 100644 --- a/src/unit_tests/rules/graph.rs +++ b/src/unit_tests/rules/graph.rs @@ -1064,10 +1064,17 @@ fn test_variant_entry_complexity_available() { let mis_entry = entries.iter().find(|e| e.name == "MaximumIndependentSet"); assert!(mis_entry.is_some(), "MIS should have a VariantEntry"); + let mis_entry = mis_entry.unwrap(); assert!( - !mis_entry.unwrap().complexity.is_empty(), + !mis_entry.complexity.is_empty(), "complexity should not be empty" ); + + // Exercise Debug impl for VariantEntry + let debug_str = format!("{:?}", mis_entry); + assert!(debug_str.contains("VariantEntry")); + assert!(debug_str.contains("MaximumIndependentSet")); + assert!(debug_str.contains("complexity")); } #[test] @@ -1076,4 +1083,11 @@ fn test_variant_complexity() { let variant = ReductionGraph::variant_to_map(&[("graph", "SimpleGraph"), ("weight", "i32")]); let complexity = graph.variant_complexity("MaximumIndependentSet", &variant); assert_eq!(complexity, Some("2^num_vertices")); + + // Unknown problem returns None + let unknown = BTreeMap::new(); + assert_eq!( + graph.variant_complexity("NonExistentProblem", &unknown), + None + ); } From 90163ca4827ceac6c7812f908a127b154684b73d Mon Sep 17 00:00:00 2001 From: GiggleLiu Date: Fri, 27 Feb 2026 21:12:41 +0800 Subject: [PATCH 16/28] fix: validate K-param consistency and extract shared CLI/MCP utils - Add validate_k_param() to detect contradictions between variant suffix (e.g., /K2) and --k flag (e.g., --k 3) for KColoring/KSatisfiability - Extract shared helpers (ser, variant_map, parse_positions, LCG, random graph generation) into util.rs to eliminate CLI/MCP duplication - Apply consistent K-param validation in both CLI create and MCP create_problem paths (including random generation) Co-Authored-By: Claude Opus 4.6 --- problemreductions-cli/src/commands/create.rs | 148 ++---------- problemreductions-cli/src/main.rs | 1 + problemreductions-cli/src/mcp/tools.rs | 191 +++------------- problemreductions-cli/src/util.rs | 229 +++++++++++++++++++ 4 files changed, 273 insertions(+), 296 deletions(-) create mode 100644 problemreductions-cli/src/util.rs diff --git a/problemreductions-cli/src/commands/create.rs b/problemreductions-cli/src/commands/create.rs index 0808fd37b..d14db5f76 100644 --- a/problemreductions-cli/src/commands/create.rs +++ b/problemreductions-cli/src/commands/create.rs @@ -2,13 +2,13 @@ use crate::cli::CreateArgs; use crate::dispatch::ProblemJsonOutput; use crate::output::OutputConfig; use crate::problem_name::{parse_problem_spec, resolve_variant}; +use crate::util; use anyhow::{bail, Context, Result}; use problemreductions::prelude::*; use problemreductions::registry::collect_schemas; use problemreductions::topology::{ Graph, KingsSubgraph, SimpleGraph, TriangularSubgraph, UnitDiskGraph, }; -use problemreductions::variant::{K2, K3, KN}; use serde::Serialize; use std::collections::BTreeMap; @@ -190,16 +190,9 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { let (graph, _) = parse_graph(args).map_err(|e| { anyhow::anyhow!("{e}\n\nUsage: pred create KColoring --graph 0-1,1-2,2-0 --k 3") })?; - let data = match args.k { - Some(2) => ser(KColoring::::new(graph))?, - Some(3) => ser(KColoring::::new(graph))?, - Some(k) => ser(KColoring::::with_k(graph, k))?, - None => bail!( - "KColoring requires --k \n\n\ - Usage: pred create KColoring --graph 0-1,1-2,2-0 --k 3" - ), - }; - (data, resolved_variant.clone()) + let (k, _variant) = + util::validate_k_param(&resolved_variant, args.k, None, "KColoring")?; + util::ser_kcoloring(graph, k)? } // SAT @@ -224,12 +217,9 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { ) })?; let clauses = parse_clauses(args)?; - let data = match args.k { - Some(2) => ser(KSatisfiability::::new(num_vars, clauses))?, - Some(3) => ser(KSatisfiability::::new(num_vars, clauses))?, - _ => ser(KSatisfiability::::new(num_vars, clauses))?, - }; - (data, resolved_variant.clone()) + let (k, _variant) = + util::validate_k_param(&resolved_variant, args.k, Some(3), "KSatisfiability")?; + util::ser_ksat(num_vars, clauses, k)? } // QUBO @@ -367,14 +357,11 @@ fn ser_vertex_weight_problem_with( } fn ser(problem: T) -> Result { - Ok(serde_json::to_value(problem)?) + util::ser(problem) } fn variant_map(pairs: &[(&str, &str)]) -> BTreeMap { - pairs - .iter() - .map(|(k, v)| (k.to_string(), v.to_string())) - .collect() + util::variant_map(pairs) } /// Parse `--graph` into a SimpleGraph, inferring num_vertices from max index. @@ -407,40 +394,12 @@ fn parse_graph(args: &CreateArgs) -> Result<(SimpleGraph, usize)> { Ok((SimpleGraph::new(num_vertices, edges), num_vertices)) } -/// Parse semicolon-separated x,y pairs from a string. -fn parse_positions(pos_str: &str, example: &str) -> Result> -where - T::Err: std::fmt::Display, -{ - pos_str - .split(';') - .map(|pair| { - let parts: Vec<&str> = pair.trim().split(',').collect(); - if parts.len() != 2 { - bail!( - "Invalid position '{}': expected format x,y (e.g., {example})", - pair.trim() - ); - } - let x: T = parts[0] - .trim() - .parse() - .map_err(|e| anyhow::anyhow!("Invalid x in '{}': {e}", pair.trim()))?; - let y: T = parts[1] - .trim() - .parse() - .map_err(|e| anyhow::anyhow!("Invalid y in '{}': {e}", pair.trim()))?; - Ok((x, y)) - }) - .collect() -} - /// Parse `--positions` as integer grid positions. fn parse_int_positions(args: &CreateArgs) -> Result> { let pos_str = args.positions.as_deref().ok_or_else(|| { anyhow::anyhow!("This variant requires --positions (e.g., \"0,0;1,0;1,1\")") })?; - parse_positions(pos_str, "0,0") + util::parse_positions(pos_str, "0,0") } /// Parse `--positions` as float positions. @@ -448,7 +407,7 @@ fn parse_float_positions(args: &CreateArgs) -> Result> { let pos_str = args.positions.as_deref().ok_or_else(|| { anyhow::anyhow!("This variant requires --positions (e.g., \"0.0,0.0;1.0,0.0;0.5,0.87\")") })?; - parse_positions(pos_str, "0.0,0.0") + util::parse_positions(pos_str, "0.0,0.0") } /// Parse `--weights` as vertex weights (i32), defaulting to all 1s. @@ -571,74 +530,16 @@ fn parse_matrix(args: &CreateArgs) -> Result>> { .collect() } -/// Generate a random Erdos-Renyi graph using a simple LCG PRNG (no external dependency). fn create_random_graph(num_vertices: usize, edge_prob: f64, seed: Option) -> SimpleGraph { - let mut state: u64 = seed.unwrap_or_else(|| { - std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .unwrap() - .as_nanos() as u64 - }); - - let mut edges = Vec::new(); - for i in 0..num_vertices { - for j in (i + 1)..num_vertices { - // LCG step - state = state - .wrapping_mul(6364136223846793005) - .wrapping_add(1442695040888963407); - let rand_val = (state >> 33) as f64 / (1u64 << 31) as f64; - if rand_val < edge_prob { - edges.push((i, j)); - } - } - } - - SimpleGraph::new(num_vertices, edges) + util::create_random_graph(num_vertices, edge_prob, seed) } -/// LCG PRNG step — returns next state and a uniform f64 in [0, 1). -fn lcg_step(state: &mut u64) -> f64 { - *state = state - .wrapping_mul(6364136223846793005) - .wrapping_add(1442695040888963407); - (*state >> 33) as f64 / (1u64 << 31) as f64 -} - -/// Initialize LCG state from seed or system time. -fn lcg_init(seed: Option) -> u64 { - seed.unwrap_or_else(|| { - std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .unwrap() - .as_nanos() as u64 - }) -} - -/// Generate random unique integer positions on a grid for KingsSubgraph/TriangularSubgraph. fn create_random_int_positions(num_vertices: usize, seed: Option) -> Vec<(i32, i32)> { - let mut state = lcg_init(seed); - let grid_size = (num_vertices as f64).sqrt().ceil() as i32 + 1; - let mut positions = std::collections::BTreeSet::new(); - while positions.len() < num_vertices { - let x = (lcg_step(&mut state) * grid_size as f64) as i32; - let y = (lcg_step(&mut state) * grid_size as f64) as i32; - positions.insert((x, y)); - } - positions.into_iter().collect() + util::create_random_int_positions(num_vertices, seed) } -/// Generate random float positions in [0, sqrt(N)] x [0, sqrt(N)] for UnitDiskGraph. fn create_random_float_positions(num_vertices: usize, seed: Option) -> Vec<(f64, f64)> { - let mut state = lcg_init(seed); - let side = (num_vertices as f64).sqrt(); - (0..num_vertices) - .map(|_| { - let x = lcg_step(&mut state) * side; - let y = lcg_step(&mut state) * side; - (x, y) - }) - .collect() + util::create_random_float_positions(num_vertices, seed) } /// Handle `pred create --random ...` @@ -753,24 +654,9 @@ fn create_random( bail!("--edge-prob must be between 0.0 and 1.0"); } let graph = create_random_graph(num_vertices, edge_prob, args.seed); - let k = args.k.unwrap_or(3); - let variant; - let data; - match k { - 2 => { - variant = variant_map(&[("k", "K2"), ("graph", "SimpleGraph")]); - data = ser(KColoring::::new(graph))?; - } - 3 => { - variant = variant_map(&[("k", "K3"), ("graph", "SimpleGraph")]); - data = ser(KColoring::::new(graph))?; - } - _ => { - variant = variant_map(&[("k", "KN"), ("graph", "SimpleGraph")]); - data = ser(KColoring::::with_k(graph, k))?; - } - } - (data, variant) + let (k, _variant) = + util::validate_k_param(resolved_variant, args.k, Some(3), "KColoring")?; + util::ser_kcoloring(graph, k)? } _ => bail!( diff --git a/problemreductions-cli/src/main.rs b/problemreductions-cli/src/main.rs index e2386c829..dfddcf2de 100644 --- a/problemreductions-cli/src/main.rs +++ b/problemreductions-cli/src/main.rs @@ -5,6 +5,7 @@ mod dispatch; mod mcp; mod output; mod problem_name; +mod util; use clap::{CommandFactory, Parser}; use cli::{Cli, Commands}; diff --git a/problemreductions-cli/src/mcp/tools.rs b/problemreductions-cli/src/mcp/tools.rs index 0b1f90034..21ca323ba 100644 --- a/problemreductions-cli/src/mcp/tools.rs +++ b/problemreductions-cli/src/mcp/tools.rs @@ -1,9 +1,10 @@ +use crate::util; use problemreductions::models::graph::{ - KColoring, MaxCut, MaximumClique, MaximumIndependentSet, MaximumMatching, MinimumDominatingSet, + MaxCut, MaximumClique, MaximumIndependentSet, MaximumMatching, MinimumDominatingSet, MinimumVertexCover, TravelingSalesman, }; use problemreductions::models::optimization::{SpinGlass, QUBO}; -use problemreductions::models::satisfiability::{CNFClause, KSatisfiability, Satisfiability}; +use problemreductions::models::satisfiability::{CNFClause, Satisfiability}; use problemreductions::models::specialized::Factoring; use problemreductions::registry::collect_schemas; use problemreductions::rules::{ @@ -13,7 +14,6 @@ use problemreductions::topology::{ Graph, KingsSubgraph, SimpleGraph, TriangularSubgraph, UnitDiskGraph, }; use problemreductions::types::ProblemSize; -use problemreductions::variant::{K2, K3, KN}; use rmcp::handler::server::router::tool::ToolRouter; use rmcp::handler::server::wrapper::Parameters; use rmcp::tool; @@ -437,14 +437,10 @@ impl McpServer { "KColoring" => { let (graph, _) = parse_graph_from_params(params)?; - let k = params - .get("k") - .and_then(|v| v.as_u64()) - .map(|v| v as usize) - .ok_or_else(|| { - anyhow::anyhow!("KColoring requires 'k' parameter (number of colors)") - })?; - ser_kcoloring(graph, k)? + let k_flag = params.get("k").and_then(|v| v.as_u64()).map(|v| v as usize); + let (k, _variant) = + util::validate_k_param(&resolved_variant, k_flag, None, "KColoring")?; + util::ser_kcoloring(graph, k)? } // SAT @@ -465,24 +461,10 @@ impl McpServer { .map(|v| v as usize) .ok_or_else(|| anyhow::anyhow!("KSatisfiability requires 'num_vars'"))?; let clauses = parse_clauses_from_params(params)?; - let k = params.get("k").and_then(|v| v.as_u64()).map(|v| v as usize); - let variant; - let data; - match k { - Some(2) => { - variant = variant_map(&[("k", "K2")]); - data = ser(KSatisfiability::::new(num_vars, clauses))?; - } - Some(3) => { - variant = variant_map(&[("k", "K3")]); - data = ser(KSatisfiability::::new(num_vars, clauses))?; - } - _ => { - variant = variant_map(&[("k", "KN")]); - data = ser(KSatisfiability::::new(num_vars, clauses))?; - } - } - (data, variant) + let k_flag = params.get("k").and_then(|v| v.as_u64()).map(|v| v as usize); + let (k, _variant) = + util::validate_k_param(&resolved_variant, k_flag, Some(3), "KSatisfiability")?; + util::ser_ksat(num_vars, clauses, k)? } // QUBO @@ -562,7 +544,7 @@ impl McpServer { let weights = vec![1i32; num_vertices]; match graph_type { "KingsSubgraph" => { - let positions = create_random_int_positions(num_vertices, seed); + let positions = util::create_random_int_positions(num_vertices, seed); let graph = KingsSubgraph::new(positions); ( ser_vertex_weight_problem_generic(canonical, graph, weights)?, @@ -570,7 +552,7 @@ impl McpServer { ) } "TriangularSubgraph" => { - let positions = create_random_int_positions(num_vertices, seed); + let positions = util::create_random_int_positions(num_vertices, seed); let graph = TriangularSubgraph::new(positions); ( ser_vertex_weight_problem_generic(canonical, graph, weights)?, @@ -579,7 +561,7 @@ impl McpServer { } "UnitDiskGraph" => { let radius = params.get("radius").and_then(|v| v.as_f64()).unwrap_or(1.0); - let positions = create_random_float_positions(num_vertices, seed); + let positions = util::create_random_float_positions(num_vertices, seed); let graph = UnitDiskGraph::new(positions, radius); ( ser_vertex_weight_problem_generic(canonical, graph, weights)?, @@ -594,7 +576,7 @@ impl McpServer { if !(0.0..=1.0).contains(&edge_prob) { anyhow::bail!("edge_prob must be between 0.0 and 1.0"); } - let graph = create_random_graph(num_vertices, edge_prob, seed); + let graph = util::create_random_graph(num_vertices, edge_prob, seed); ser_vertex_weight_problem(canonical, graph, weights)? } } @@ -607,7 +589,7 @@ impl McpServer { if !(0.0..=1.0).contains(&edge_prob) { anyhow::bail!("edge_prob must be between 0.0 and 1.0"); } - let graph = create_random_graph(num_vertices, edge_prob, seed); + let graph = util::create_random_graph(num_vertices, edge_prob, seed); let num_edges = graph.num_edges(); let edge_weights = vec![1i32; num_edges]; ser_edge_weight_problem(canonical, graph, edge_weights)? @@ -620,7 +602,7 @@ impl McpServer { if !(0.0..=1.0).contains(&edge_prob) { anyhow::bail!("edge_prob must be between 0.0 and 1.0"); } - let graph = create_random_graph(num_vertices, edge_prob, seed); + let graph = util::create_random_graph(num_vertices, edge_prob, seed); let num_edges = graph.num_edges(); let couplings = vec![1i32; num_edges]; let fields = vec![0i32; num_vertices]; @@ -638,13 +620,11 @@ impl McpServer { if !(0.0..=1.0).contains(&edge_prob) { anyhow::bail!("edge_prob must be between 0.0 and 1.0"); } - let graph = create_random_graph(num_vertices, edge_prob, seed); - let k = params - .get("k") - .and_then(|v| v.as_u64()) - .map(|v| v as usize) - .unwrap_or(3); - ser_kcoloring(graph, k)? + let graph = util::create_random_graph(num_vertices, edge_prob, seed); + let k_flag = params.get("k").and_then(|v| v.as_u64()).map(|v| v as usize); + let (k, _variant) = + util::validate_k_param(resolved_variant, k_flag, Some(3), "KColoring")?; + util::ser_kcoloring(graph, k)? } _ => anyhow::bail!( "Random generation is not supported for {}. \ @@ -1090,14 +1070,11 @@ fn format_path_json( // --------------------------------------------------------------------------- fn ser(problem: T) -> anyhow::Result { - Ok(serde_json::to_value(problem)?) + util::ser(problem) } fn variant_map(pairs: &[(&str, &str)]) -> BTreeMap { - pairs - .iter() - .map(|(k, v)| (k.to_string(), v.to_string())) - .collect() + util::variant_map(pairs) } /// Serialize a vertex-weight graph problem (MIS, MVC, MaxClique, MinDomSet). @@ -1133,27 +1110,6 @@ fn ser_edge_weight_problem( Ok((data, variant)) } -/// Serialize a KColoring problem with the appropriate K variant. -fn ser_kcoloring( - graph: SimpleGraph, - k: usize, -) -> anyhow::Result<(serde_json::Value, BTreeMap)> { - match k { - 2 => Ok(( - ser(KColoring::::new(graph))?, - variant_map(&[("k", "K2"), ("graph", "SimpleGraph")]), - )), - 3 => Ok(( - ser(KColoring::::new(graph))?, - variant_map(&[("k", "K3"), ("graph", "SimpleGraph")]), - )), - _ => Ok(( - ser(KColoring::::with_k(graph, k))?, - variant_map(&[("k", "KN"), ("graph", "SimpleGraph")]), - )), - } -} - /// Serialize a vertex-weight problem with a generic graph type. fn ser_vertex_weight_problem_generic( canonical: &str, @@ -1216,31 +1172,6 @@ fn create_vertex_weight_from_params( } } -/// Parse semicolon-separated x,y pairs from a string. -fn parse_positions(pos_str: &str) -> anyhow::Result> -where - T::Err: std::fmt::Display, -{ - pos_str - .split(';') - .map(|pair| { - let parts: Vec<&str> = pair.trim().split(',').collect(); - if parts.len() != 2 { - anyhow::bail!("Invalid position '{}': expected format x,y", pair.trim()); - } - let x: T = parts[0] - .trim() - .parse() - .map_err(|e| anyhow::anyhow!("Invalid x in '{}': {e}", pair.trim()))?; - let y: T = parts[1] - .trim() - .parse() - .map_err(|e| anyhow::anyhow!("Invalid y in '{}': {e}", pair.trim()))?; - Ok((x, y)) - }) - .collect() -} - /// Extract and parse 'positions' param as integer grid positions. fn parse_int_positions_from_params(params: &serde_json::Value) -> anyhow::Result> { let pos_str = params @@ -1249,7 +1180,7 @@ fn parse_int_positions_from_params(params: &serde_json::Value) -> anyhow::Result .ok_or_else(|| { anyhow::anyhow!("This variant requires 'positions' parameter (e.g., \"0,0;1,0;1,1\")") })?; - parse_positions(pos_str) + util::parse_positions(pos_str, "0,0;1,0;1,1") } /// Extract and parse 'positions' param as float positions. @@ -1264,51 +1195,7 @@ fn parse_float_positions_from_params( "This variant requires 'positions' parameter (e.g., \"0.0,0.0;1.0,0.0\")" ) })?; - parse_positions(pos_str) -} - -/// Generate random unique integer positions on a grid. -fn create_random_int_positions(num_vertices: usize, seed: Option) -> Vec<(i32, i32)> { - let mut state = lcg_init(seed); - let grid_size = (num_vertices as f64).sqrt().ceil() as i32 + 1; - let mut positions = std::collections::BTreeSet::new(); - while positions.len() < num_vertices { - let x = (lcg_step(&mut state) * grid_size as f64) as i32; - let y = (lcg_step(&mut state) * grid_size as f64) as i32; - positions.insert((x, y)); - } - positions.into_iter().collect() -} - -/// Generate random float positions in [0, sqrt(N)] x [0, sqrt(N)]. -fn create_random_float_positions(num_vertices: usize, seed: Option) -> Vec<(f64, f64)> { - let mut state = lcg_init(seed); - let side = (num_vertices as f64).sqrt(); - (0..num_vertices) - .map(|_| { - let x = lcg_step(&mut state) * side; - let y = lcg_step(&mut state) * side; - (x, y) - }) - .collect() -} - -/// LCG PRNG step. -fn lcg_step(state: &mut u64) -> f64 { - *state = state - .wrapping_mul(6364136223846793005) - .wrapping_add(1442695040888963407); - (*state >> 33) as f64 / (1u64 << 31) as f64 -} - -/// Initialize LCG state. -fn lcg_init(seed: Option) -> u64 { - seed.unwrap_or_else(|| { - std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .unwrap() - .as_nanos() as u64 - }) + util::parse_positions(pos_str, "0.0,0.0;1.0,0.0") } /// Parse `edges` field from JSON params into a SimpleGraph. @@ -1437,32 +1324,6 @@ fn parse_matrix_from_params(params: &serde_json::Value) -> anyhow::Result) -> SimpleGraph { - let mut state: u64 = seed.unwrap_or_else(|| { - std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .unwrap() - .as_nanos() as u64 - }); - - let mut edges = Vec::new(); - for i in 0..num_vertices { - for j in (i + 1)..num_vertices { - // LCG step - state = state - .wrapping_mul(6364136223846793005) - .wrapping_add(1442695040888963407); - let rand_val = (state >> 33) as f64 / (1u64 << 31) as f64; - if rand_val < edge_prob { - edges.push((i, j)); - } - } - } - - SimpleGraph::new(num_vertices, edges) -} - /// Solve a plain problem and return JSON string. fn solve_problem_inner( problem_type: &str, diff --git a/problemreductions-cli/src/util.rs b/problemreductions-cli/src/util.rs new file mode 100644 index 000000000..73e43742f --- /dev/null +++ b/problemreductions-cli/src/util.rs @@ -0,0 +1,229 @@ +//! Shared utilities for CLI and MCP: parsing helpers and random generation. + +use anyhow::{bail, Result}; +use problemreductions::prelude::*; +use problemreductions::topology::SimpleGraph; +use problemreductions::variant::{K2, K3, KN}; +use serde::Serialize; +use std::collections::BTreeMap; + +// --------------------------------------------------------------------------- +// K-parameter validation +// --------------------------------------------------------------------------- + +/// Derive the k variant string from a numeric k value. +fn k_variant_str(k: usize) -> &'static str { + match k { + 1 => "K1", + 2 => "K2", + 3 => "K3", + 4 => "K4", + 5 => "K5", + _ => "KN", + } +} + +/// Validate that `--k` (or `params.k`) is consistent with a variant suffix +/// (e.g., `/K2`). Returns the effective k value and variant map. +/// +/// Rules: +/// - If the resolved variant has a specific k (e.g., K2), `k_flag` must +/// either be `None` or match. A mismatch is an error. +/// - If the resolved variant has k=KN (or no k), any `k_flag` is accepted. +/// - If `k_flag` is `None`, k is inferred from the variant (K2→2, K3→3, etc.), +/// or defaults to `default_k`. +pub fn validate_k_param( + resolved_variant: &BTreeMap, + k_flag: Option, + default_k: Option, + problem_name: &str, +) -> Result<(usize, BTreeMap)> { + let variant_k_str = resolved_variant.get("k").map(|s| s.as_str()); + let variant_k_num: Option = match variant_k_str { + Some("K1") => Some(1), + Some("K2") => Some(2), + Some("K3") => Some(3), + Some("K4") => Some(4), + Some("K5") => Some(5), + _ => None, // KN or absent + }; + + let effective_k = match (k_flag, variant_k_num) { + (Some(flag), Some(from_variant)) if flag != from_variant => { + bail!( + "{problem_name}: --k {flag} conflicts with variant /{} (k={from_variant}). \ + Either omit the suffix or match the --k value.", + variant_k_str.unwrap() + ); + } + (Some(flag), _) => flag, + (None, Some(from_variant)) => from_variant, + (None, None) => match default_k { + Some(d) => d, + None => bail!("{problem_name} requires --k "), + }, + }; + + // Build the variant map with the effective k + let mut variant = resolved_variant.clone(); + variant.insert("k".to_string(), k_variant_str(effective_k).to_string()); + + Ok((effective_k, variant)) +} + +// --------------------------------------------------------------------------- +// K-problem serialization +// --------------------------------------------------------------------------- + +/// Serialize a KColoring instance given a graph and validated k. +pub fn ser_kcoloring( + graph: SimpleGraph, + k: usize, +) -> Result<(serde_json::Value, BTreeMap)> { + match k { + 2 => Ok(( + ser(KColoring::::new(graph))?, + variant_map(&[("k", "K2"), ("graph", "SimpleGraph")]), + )), + 3 => Ok(( + ser(KColoring::::new(graph))?, + variant_map(&[("k", "K3"), ("graph", "SimpleGraph")]), + )), + _ => Ok(( + ser(KColoring::::with_k(graph, k))?, + variant_map(&[("k", "KN"), ("graph", "SimpleGraph")]), + )), + } +} + +/// Serialize a KSatisfiability instance given clauses and validated k. +pub fn ser_ksat( + num_vars: usize, + clauses: Vec, + k: usize, +) -> Result<(serde_json::Value, BTreeMap)> { + match k { + 2 => Ok(( + ser(KSatisfiability::::new(num_vars, clauses))?, + variant_map(&[("k", "K2")]), + )), + 3 => Ok(( + ser(KSatisfiability::::new(num_vars, clauses))?, + variant_map(&[("k", "K3")]), + )), + _ => Ok(( + ser(KSatisfiability::::new(num_vars, clauses))?, + variant_map(&[("k", "KN")]), + )), + } +} + +// --------------------------------------------------------------------------- +// Parsing helpers +// --------------------------------------------------------------------------- + +/// Parse semicolon-separated x,y pairs from a string. +pub fn parse_positions(pos_str: &str, example: &str) -> Result> +where + T::Err: std::fmt::Display, +{ + pos_str + .split(';') + .map(|pair| { + let parts: Vec<&str> = pair.trim().split(',').collect(); + if parts.len() != 2 { + bail!( + "Invalid position '{}': expected format x,y (e.g., {example})", + pair.trim() + ); + } + let x: T = parts[0] + .trim() + .parse() + .map_err(|e| anyhow::anyhow!("Invalid x in '{}': {e}", pair.trim()))?; + let y: T = parts[1] + .trim() + .parse() + .map_err(|e| anyhow::anyhow!("Invalid y in '{}': {e}", pair.trim()))?; + Ok((x, y)) + }) + .collect() +} + +// --------------------------------------------------------------------------- +// Random generation (LCG-based) +// --------------------------------------------------------------------------- + +/// LCG PRNG step — returns next state and a uniform f64 in [0, 1). +pub fn lcg_step(state: &mut u64) -> f64 { + *state = state + .wrapping_mul(6364136223846793005) + .wrapping_add(1442695040888963407); + (*state >> 33) as f64 / (1u64 << 31) as f64 +} + +/// Initialize LCG state from seed or system time. +pub fn lcg_init(seed: Option) -> u64 { + seed.unwrap_or_else(|| { + std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_nanos() as u64 + }) +} + +/// Generate a random Erdos-Renyi graph using a simple LCG PRNG. +pub fn create_random_graph(num_vertices: usize, edge_prob: f64, seed: Option) -> SimpleGraph { + let mut state = lcg_init(seed); + let mut edges = Vec::new(); + for i in 0..num_vertices { + for j in (i + 1)..num_vertices { + let rand_val = lcg_step(&mut state); + if rand_val < edge_prob { + edges.push((i, j)); + } + } + } + SimpleGraph::new(num_vertices, edges) +} + +/// Generate random unique integer positions on a grid for KingsSubgraph/TriangularSubgraph. +pub fn create_random_int_positions(num_vertices: usize, seed: Option) -> Vec<(i32, i32)> { + let mut state = lcg_init(seed); + let grid_size = (num_vertices as f64).sqrt().ceil() as i32 + 1; + let mut positions = std::collections::BTreeSet::new(); + while positions.len() < num_vertices { + let x = (lcg_step(&mut state) * grid_size as f64) as i32; + let y = (lcg_step(&mut state) * grid_size as f64) as i32; + positions.insert((x, y)); + } + positions.into_iter().collect() +} + +/// Generate random float positions in [0, sqrt(N)] x [0, sqrt(N)] for UnitDiskGraph. +pub fn create_random_float_positions(num_vertices: usize, seed: Option) -> Vec<(f64, f64)> { + let mut state = lcg_init(seed); + let side = (num_vertices as f64).sqrt(); + (0..num_vertices) + .map(|_| { + let x = lcg_step(&mut state) * side; + let y = lcg_step(&mut state) * side; + (x, y) + }) + .collect() +} + +// --------------------------------------------------------------------------- +// Small shared helpers +// --------------------------------------------------------------------------- + +pub fn ser(problem: T) -> Result { + Ok(serde_json::to_value(problem)?) +} + +pub fn variant_map(pairs: &[(&str, &str)]) -> BTreeMap { + pairs + .iter() + .map(|(k, v)| (k.to_string(), v.to_string())) + .collect() +} From 02c37f6d5ac534a21befa25f7183fbc107d37a14 Mon Sep 17 00:00:00 2001 From: GiggleLiu Date: Fri, 27 Feb 2026 22:43:35 +0800 Subject: [PATCH 17/28] docs: add design for review-implementation as parallel subagents Split review-implementation into two parallel subagents (structural + quality) dispatched with fresh context. Integrates with executing-plans after each batch and standalone /review-implementation. Co-Authored-By: Claude Opus 4.6 --- .../2026-02-27-review-subagent-design.md | 100 ++++++++++++++++++ 1 file changed, 100 insertions(+) create mode 100644 docs/plans/2026-02-27-review-subagent-design.md diff --git a/docs/plans/2026-02-27-review-subagent-design.md b/docs/plans/2026-02-27-review-subagent-design.md new file mode 100644 index 000000000..f457e41aa --- /dev/null +++ b/docs/plans/2026-02-27-review-subagent-design.md @@ -0,0 +1,100 @@ +# Design: Review-Implementation as Parallel Subagents + +**Date:** 2026-02-27 +**Status:** Approved + +## Problem + +The `review-implementation` skill runs inline in the main agent's context after implementation. This causes: +1. **Context bias** — the agent that just wrote the code is reviewing it, anchored to its own decisions +2. **No fresh perspective** — all implementation history pollutes the review +3. **No automatic trigger** — executing-plans has no review step after batches + +## Design + +### Split into Two Subagent Prompts + +One skill (`review-implementation`) dispatches two parallel subagents via `superpowers:code-reviewer`: + +| Subagent | Prompt file | Scope | When | +|----------|------------|-------|------| +| Structural reviewer | `structural-reviewer-prompt.md` | Model/rule checklists (16/14 items) + semantic review | If `src/models/` or `src/rules/` in diff | +| Quality reviewer | `quality-reviewer-prompt.md` | DRY, KISS, HC/LC, HCI, test quality | Always | + +### File Structure + +``` +.claude/skills/review-implementation/ +├── SKILL.md # Orchestrator: how main agent dispatches +├── structural-reviewer-prompt.md # Self-contained checklist for subagent +└── quality-reviewer-prompt.md # Self-contained quality review for subagent +``` + +### Integration with executing-plans + +New steps after each batch: + +``` +Step 2: Execute Batch (3 tasks) + ↓ +Step 2.5: Dispatch Review Subagents (parallel) + ├── structural-reviewer (if model/rule files in diff) + └── quality-reviewer (always) + ↓ +Step 2.6: Main Agent Addresses Findings + - Fix FAIL items automatically + - Report unfixable/ambiguous items to user + ↓ +Step 3: Report (implementation + review results + fixes) +``` + +Main agent determines diff via `git diff --name-only` against batch start SHA. + +### Standalone / add-model / add-rule Integration + +`/review-implementation` invocation: +1. Auto-detect what changed (git diff) +2. Dispatch structural + quality subagents in parallel +3. Collect results +4. Fix what it can automatically +5. Present consolidated report to user + +### Prompt Template Design + +**structural-reviewer-prompt.md** (self-contained): +- Full model checklist table (16 items) with Grep/Glob verification methods +- Full rule checklist table (14 items) +- Semantic review: evaluate() correctness, dims(), overhead accuracy, extract_solution +- `make test clippy` build check +- Placeholders: `{REVIEW_TYPE}`, `{PROBLEM_NAME}`, `{CATEGORY}`, `{FILE_STEM}`, `{SOURCE}`, `{TARGET}`, `{RULE_STEM}` +- Output: structured table with PASS/FAIL per item + +**quality-reviewer-prompt.md** (self-contained): +- DRY, KISS, HC/LC design principles with detection criteria +- HCI checks (error messages, discoverability, consistency, least surprise, feedback) +- Naive test detection (types-only, mirrors-impl, no-adversarial, trivial-only, etc.) +- Placeholders: `{DIFF_SUMMARY}`, `{CHANGED_FILES}`, `{PLAN_STEP}` +- Output: structured findings with severity (Critical/Important/Minor) + +### Main Agent Fix Strategy + +| Finding type | Action | +|-------------|--------| +| Missing file/registration (structural FAIL) | Fix automatically | +| Missing test case | Fix automatically | +| Semantic correctness issue (clear) | Fix automatically | +| Semantic correctness issue (ambiguous) | Report to user | +| Code quality (Important+) | Fix automatically | +| Code quality (Minor) | Report to user | + +### CLAUDE.md Changes + +- Update `review-implementation` skill description to mention subagent dispatch +- Add note about parallel subagent dispatch to executing-plans integration section +- Keep single `/review-implementation` entry point + +### What Doesn't Change + +- SDD (subagent-driven-development) keeps its own two-stage review (spec + generic code quality) +- The review-implementation invocation syntax stays the same +- The structured output format stays the same (tables with PASS/FAIL) From 624f7e5fd4c9b4eeceb42001ee2c20905dda2519 Mon Sep 17 00:00:00 2001 From: GiggleLiu Date: Fri, 27 Feb 2026 22:55:04 +0800 Subject: [PATCH 18/28] feat: add structural and quality reviewer prompt templates Self-contained prompt templates for review subagents: - structural-reviewer-prompt.md: model/rule checklists + semantic review - quality-reviewer-prompt.md: DRY, KISS, HC/LC, HCI, test quality Co-Authored-By: Claude Opus 4.6 --- .../quality-reviewer-prompt.md | 100 ++++++++++++++++ .../structural-reviewer-prompt.md | 111 ++++++++++++++++++ 2 files changed, 211 insertions(+) create mode 100644 .claude/skills/review-implementation/quality-reviewer-prompt.md create mode 100644 .claude/skills/review-implementation/structural-reviewer-prompt.md diff --git a/.claude/skills/review-implementation/quality-reviewer-prompt.md b/.claude/skills/review-implementation/quality-reviewer-prompt.md new file mode 100644 index 000000000..803637652 --- /dev/null +++ b/.claude/skills/review-implementation/quality-reviewer-prompt.md @@ -0,0 +1,100 @@ +# Code Quality Review Agent + +You are reviewing code changes for quality in the `problemreductions` Rust codebase. You have NO context about prior implementation work -- review the code fresh. + +## What Changed + +{DIFF_SUMMARY} + +## Changed Files + +{CHANGED_FILES} + +## Plan Step Context (if applicable) + +{PLAN_STEP} + +## Git Range + +**Base:** {BASE_SHA} +**Head:** {HEAD_SHA} + +Start by running: +```bash +git diff --stat {BASE_SHA}..{HEAD_SHA} +git diff {BASE_SHA}..{HEAD_SHA} +``` + +Then read the changed files in full. + +## Review Criteria + +### Design Principles + +1. **DRY (Don't Repeat Yourself)** -- Is there duplicated logic that should be extracted into a shared helper? Check for copy-pasted code blocks across files (similar graph construction, weight handling, or solution extraction patterns). + +2. **KISS (Keep It Simple, Stupid)** -- Is the implementation unnecessarily complex? Look for: over-engineered abstractions, convoluted control flow, premature generalization, layers of indirection that add no value. + +3. **High Cohesion, Low Coupling (HC/LC)** -- Does each module/function/struct have a single, well-defined responsibility? + - **Low cohesion**: Function doing unrelated things. Each unit should have one reason to change. + - **High coupling**: Modules depending on each other's internals. + - **Mixed concerns**: A single file containing both problem logic and CLI/serialization logic. + - **God functions**: Functions longer than ~50 lines doing multiple conceptually distinct things. + +### HCI (if CLI/MCP files changed) + +Only check these if the diff touches `problemreductions-cli/`: + +4. **Error messages** -- Are they actionable? Bad: `"invalid parameter"`. Good: `"KColoring requires --k (e.g., --k 3)"`. +5. **Discoverability** -- Missing `--help` examples? Undocumented flags? Silent failures that should suggest alternatives? +6. **Consistency** -- Similar operations expressed similarly? Parameter names, output formats, error styles uniform? +7. **Least surprise** -- Output matches expectations? No contradictory output or silent data loss? +8. **Feedback** -- Tool confirms what it did? Echoes interpreted parameters for ambiguous operations? + +### Test Quality + +9. **Naive Test Detection** -- Flag tests that: + - **Only check types/shapes, not values**: e.g., `assert!(result.is_some())` without checking the solution is correct. + - **Mirror the implementation**: Tests recomputing the same formula as the code prove nothing. + - **Lack adversarial cases**: Only happy path. Tests must include infeasible configs and boundary cases. + - **Use trivial instances only**: Single-edge or 2-node tests may pass with bugs. Need 5+ vertex instances. + - **Closed-loop without verification**: Must verify extracted solution is **optimal** (compare brute-force on both source and target). + - **Assert count too low**: 1-2 asserts for non-trivial code is insufficient. + +## Output Format + +You MUST output in this exact format: + +``` +## Code Quality Review + +### Design Principles +- DRY: OK / ISSUE -- [description with file:line] +- KISS: OK / ISSUE -- [description with file:line] +- HC/LC: OK / ISSUE -- [description with file:line] + +### HCI (if CLI/MCP changed) +- Error messages: OK / ISSUE -- [description] +- Discoverability: OK / ISSUE -- [description] +- Consistency: OK / ISSUE -- [description] +- Least surprise: OK / ISSUE -- [description] +- Feedback: OK / ISSUE -- [description] + +### Test Quality +- Naive test detection: OK / ISSUE + - [specific tests flagged with reason and file:line] + +### Issues + +#### Critical (Must Fix) +[Bugs, correctness issues, data loss risks] + +#### Important (Should Fix) +[Architecture problems, missing tests, poor error handling] + +#### Minor (Nice to Have) +[Code style, optimization opportunities] + +### Summary +- [list of action items with severity] +``` diff --git a/.claude/skills/review-implementation/structural-reviewer-prompt.md b/.claude/skills/review-implementation/structural-reviewer-prompt.md new file mode 100644 index 000000000..b24c7a3a6 --- /dev/null +++ b/.claude/skills/review-implementation/structural-reviewer-prompt.md @@ -0,0 +1,111 @@ +# Structural & Semantic Review Agent + +You are reviewing a new model or rule implementation for structural completeness and semantic correctness in the `problemreductions` Rust codebase. + +## Review Type: {REVIEW_TYPE} + +{REVIEW_PARAMS} + +## Instructions + +1. Run the structural checklist below using Grep and Glob tools +2. Run `make test clippy` to verify build +3. Read the implementation files and perform semantic review +4. Output results in the structured format at the end + +## Model Checklist + +Only run this section if REVIEW_TYPE includes "model". + +Given: problem name `P` = `{PROBLEM_NAME}`, category `C` = `{CATEGORY}`, file stem `F` = `{FILE_STEM}`. + +| # | Check | How to verify | +|---|-------|--------------| +| 1 | Model file exists | `Glob("src/models/{C}/{F}.rs")` | +| 2 | `inventory::submit!` present | `Grep("inventory::submit", file)` | +| 3 | `#[derive(...Serialize, Deserialize)]` on struct | `Grep("Serialize.*Deserialize", file)` | +| 4 | `Problem` trait impl | `Grep("impl.*Problem for.*{P}", file)` | +| 5 | `OptimizationProblem` or `SatisfactionProblem` impl | `Grep("(OptimizationProblem\|SatisfactionProblem).*for.*{P}", file)` | +| 6 | `#[cfg(test)]` + `#[path = "..."]` test link | `Grep("#\\[path =", file)` | +| 7 | Test file exists | `Glob("src/unit_tests/models/{C}/{F}.rs")` | +| 8 | Test has creation test | `Grep("fn test_.*creation\|fn test_{F}.*basic", test_file)` | +| 9 | Test has evaluation test | `Grep("fn test_.*evaluat", test_file)` | +| 10 | Registered in `{C}/mod.rs` | `Grep("mod {F}", "src/models/{C}/mod.rs")` | +| 11 | Re-exported in `models/mod.rs` | `Grep("{P}", "src/models/mod.rs")` | +| 12 | CLI `load_problem` arm | `Grep('"{P}"', "problemreductions-cli/src/dispatch.rs")` | +| 13 | CLI `serialize_any_problem` arm | `Grep('"{P}".*try_ser', "problemreductions-cli/src/dispatch.rs")` | +| 14 | CLI `resolve_alias` entry | `Grep("{P}", "problemreductions-cli/src/problem_name.rs")` | +| 15 | Paper `display-name` entry | `Grep('"{P}"', "docs/paper/reductions.typ")` | +| 16 | Paper `problem-def` block | `Grep('problem-def.*"{P}"', "docs/paper/reductions.typ")` | + +## Rule Checklist + +Only run this section if REVIEW_TYPE includes "rule". + +Given: source `S` = `{SOURCE}`, target `T` = `{TARGET}`, rule file stem `R` = `{RULE_STEM}`, example stem `E` = `{EXAMPLE_STEM}`. + +| # | Check | How to verify | +|---|-------|--------------| +| 1 | Rule file exists | `Glob("src/rules/{R}.rs")` | +| 2 | `#[reduction(...)]` macro present | `Grep("#\\[reduction", file)` | +| 3 | `ReductionResult` impl present | `Grep("impl.*ReductionResult", file)` | +| 4 | `ReduceTo` impl present | `Grep("impl.*ReduceTo", file)` | +| 5 | `#[cfg(test)]` + `#[path = "..."]` test link | `Grep("#\\[path =", file)` | +| 6 | Test file exists | `Glob("src/unit_tests/rules/{R}.rs")` | +| 7 | Closed-loop test present | `Grep("fn test_.*closed_loop\|fn test_.*to_.*basic", test_file)` | +| 8 | Registered in `rules/mod.rs` | `Grep("mod {R}", "src/rules/mod.rs")` | +| 9 | Example file exists | `Glob("examples/{E}.rs")` | +| 10 | Example has `pub fn run()` | `Grep("pub fn run", example_file)` | +| 11 | Example has `fn main()` | `Grep("fn main", example_file)` | +| 12 | `example_test!` registered | `Grep("example_test!\\({E}\\)", "tests/suites/examples.rs")` | +| 13 | `example_fn!` registered | `Grep("example_fn!.*{E}", "tests/suites/examples.rs")` | +| 14 | Paper `reduction-rule` entry | `Grep('reduction-rule.*"{S}".*"{T}"', "docs/paper/reductions.typ")` | + +## Build Check + +Run: +```bash +make test clippy +``` + +Report pass/fail. If tests fail, identify which tests. + +## Semantic Review + +### For Models: +1. **`evaluate()` correctness** -- Does it check feasibility before computing the objective? Does it return `SolutionSize::Invalid` / `false` for infeasible configs? +2. **`dims()` correctness** -- Does it return the actual configuration space? (e.g., `vec![2; n]` for binary) +3. **Size getter consistency** -- Do inherent getter methods (e.g., `num_vertices()`, `num_edges()`) match names used in overhead expressions? +4. **Weight handling** -- Are weights managed via inherent methods, not traits? + +### For Rules: +1. **`extract_solution` correctness** -- Does it correctly invert the reduction? Does the returned solution have the right length (source dimensions)? +2. **Overhead accuracy** -- Does `overhead = { field = "expr" }` reflect the actual size relationship? +3. **Example quality** -- Is it tutorial-style? Does the JSON export include both source and target data? +4. **Paper quality** -- Is the reduction-rule statement precise? Is the proof sketch sound? + +## Output Format + +You MUST output in this exact format: + +``` +## Review: {REVIEW_TYPE} {PROBLEM_NAME} + +### Structural Completeness +| # | Check | Status | +|---|-------|--------| +| 1 | ... | PASS / FAIL -- reason | + +### Build Status +- `make test`: PASS / FAIL +- `make clippy`: PASS / FAIL + +### Semantic Review +- evaluate()/extract_solution correctness: OK / ISSUE -- description +- dims() correctness: OK / ISSUE -- description +- [other checks]: OK / ISSUE -- description + +### Summary +- X/Y structural checks passed +- [list of action items for any failures] +``` From ed38d3399427c45202c33619ba4ec6e93c2640a3 Mon Sep 17 00:00:00 2001 From: GiggleLiu Date: Fri, 27 Feb 2026 22:56:00 +0800 Subject: [PATCH 19/28] refactor: rewrite review-implementation as subagent orchestrator SKILL.md now dispatches two parallel subagents (structural + quality) with fresh context instead of running checks inline. Detection uses git diff --diff-filter=A to find new model/rule files. Co-Authored-By: Claude Opus 4.6 --- .claude/skills/review-implementation/SKILL.md | 212 +++++++++--------- 1 file changed, 108 insertions(+), 104 deletions(-) diff --git a/.claude/skills/review-implementation/SKILL.md b/.claude/skills/review-implementation/SKILL.md index 886116961..082bc2bb0 100644 --- a/.claude/skills/review-implementation/SKILL.md +++ b/.claude/skills/review-implementation/SKILL.md @@ -1,141 +1,145 @@ --- name: review-implementation -description: Use after implementing a model or rule to verify completeness and correctness before committing +description: Use after implementing a model, rule, or any code change to verify completeness and correctness before committing --- # Review Implementation -Automated review checklist for verifying that a new model or rule implementation is complete. Run this after finishing `add-model` or `add-rule`, before committing. +Dispatches two parallel review subagents with fresh context (no implementation history bias): +- **Structural reviewer** -- model/rule checklists + semantic correctness (only for new models/rules) +- **Quality reviewer** -- DRY, KISS, HC/LC, HCI, test quality (always) ## Invocation -Auto-detects the implementation type from changed files. Can also be invoked with an explicit argument: -- `/review-implementation` -- auto-detect from `git diff` +- `/review-implementation` -- auto-detect from git diff - `/review-implementation model MaximumClique` -- review a specific model - `/review-implementation rule mis_qubo` -- review a specific rule +- `/review-implementation generic` -- code quality only (no structural checklist) ## Step 1: Detect What Changed -Use `git diff --name-only` (against main branch or last commit) to identify: -- Files in `src/models/` -> model review -- Files in `src/rules/` (not `mod.rs`, `traits.rs`, `cost.rs`, `graph.rs`, `registry.rs`) -> rule review -- Both -> run both reviews +Determine whether new model/rule files were added: + +```bash +# Check for NEW files (not just modifications) +git diff --name-only --diff-filter=A HEAD~1..HEAD +# Also check against main for branch-level changes +git diff --name-only --diff-filter=A main..HEAD +``` + +Detection rules: +- New file in `src/models/` (not `mod.rs`) -> **model review** (structural + quality) +- New file in `src/rules/` (not `mod.rs`, `traits.rs`, `cost.rs`, `graph.rs`, `registry.rs`) -> **rule review** (structural + quality) +- Only modified files (no new model/rule) -> **quality review only** +- Both new model and rule files -> dispatch structural for both + quality +- Explicit argument overrides auto-detection Extract the problem name(s) and rule source/target from the file paths. -## Step 2: Run Structural Checks - -For each detected change, run the appropriate checklist below. Report results as a table with pass/fail per item. - -### Model Checklist - -Given: problem name `P`, category `C`, file stem `F` (snake_case). - -| # | Check | Verification method | -|---|-------|-------------------| -| 1 | Model file exists | `Glob("src/models/{C}/{F}.rs")` | -| 2 | `inventory::submit!` present | `Grep("inventory::submit", file)` | -| 3 | `#[derive(...Serialize, Deserialize)]` on struct | `Grep("Serialize.*Deserialize", file)` | -| 4 | `Problem` trait impl | `Grep("impl.*Problem for.*{P}", file)` | -| 5 | `OptimizationProblem` or `SatisfactionProblem` impl | `Grep("(OptimizationProblem\|SatisfactionProblem).*for.*{P}", file)` | -| 6 | `#[cfg(test)]` + `#[path = "..."]` test link | `Grep("#\\[path =", file)` | -| 7 | Test file exists | `Glob("src/unit_tests/models/{C}/{F}.rs")` | -| 8 | Test has creation test | `Grep("fn test_.*creation\|fn test_{F}.*basic", test_file)` | -| 9 | Test has evaluation test | `Grep("fn test_.*evaluat", test_file)` | -| 10 | Registered in `{C}/mod.rs` | `Grep("mod {F}", "src/models/{C}/mod.rs")` | -| 11 | Re-exported in `models/mod.rs` | `Grep("{P}", "src/models/mod.rs")` | -| 12 | CLI `load_problem` arm | `Grep('"{P}"', "problemreductions-cli/src/dispatch.rs")` | -| 13 | CLI `serialize_any_problem` arm | `Grep('"{P}".*try_ser', "problemreductions-cli/src/dispatch.rs")` | -| 14 | CLI `resolve_alias` entry | `Grep("{P}", "problemreductions-cli/src/problem_name.rs")` | -| 15 | Paper `display-name` entry | `Grep('"{P}"', "docs/paper/reductions.typ")` | -| 16 | Paper `problem-def` block | `Grep('problem-def.*"{P}"', "docs/paper/reductions.typ")` | - -### Rule Checklist - -Given: source `S`, target `T`, rule file stem `R` = `{s}_{t}` (lowercase), example stem `E` = `reduction_{s}_to_{t}`. - -| # | Check | Verification method | -|---|-------|-------------------| -| 1 | Rule file exists | `Glob("src/rules/{R}.rs")` | -| 2 | `#[reduction(...)]` macro present | `Grep("#\\[reduction", file)` | -| 3 | `ReductionResult` impl present | `Grep("impl.*ReductionResult", file)` | -| 4 | `ReduceTo` impl present | `Grep("impl.*ReduceTo", file)` | -| 5 | `#[cfg(test)]` + `#[path = "..."]` test link | `Grep("#\\[path =", file)` | -| 6 | Test file exists | `Glob("src/unit_tests/rules/{R}.rs")` | -| 7 | Closed-loop test present | `Grep("fn test_.*closed_loop\|fn test_.*to_.*basic", test_file)` | -| 8 | Registered in `rules/mod.rs` | `Grep("mod {R}", "src/rules/mod.rs")` | -| 9 | Example file exists | `Glob("examples/{E}.rs")` | -| 10 | Example has `pub fn run()` | `Grep("pub fn run", example_file)` | -| 11 | Example has `fn main()` | `Grep("fn main", example_file)` | -| 12 | `example_test!` registered | `Grep("example_test!\\({E}\\)", "tests/suites/examples.rs")` | -| 13 | `example_fn!` registered | `Grep("example_fn!.*{E}", "tests/suites/examples.rs")` | -| 14 | Paper `reduction-rule` entry | `Grep('reduction-rule.*"{S}".*"{T}"', "docs/paper/reductions.typ")` | - -## Step 3: Run Build Checks - -After structural checks, run: +## Step 2: Prepare Subagent Context + +Get the git SHAs for the review range: ```bash -make test clippy +BASE_SHA=$(git merge-base main HEAD) # or HEAD~N for batch reviews +HEAD_SHA=$(git rev-parse HEAD) ``` -Report pass/fail. If tests fail, identify which tests and suggest fixes. +Get the diff summary and changed file list: + +```bash +git diff --stat $BASE_SHA..$HEAD_SHA +git diff --name-only $BASE_SHA..$HEAD_SHA +``` + +## Step 3: Dispatch Subagents in Parallel + +### Structural Reviewer (if new model/rule detected) + +Dispatch using `Task` tool with `subagent_type="superpowers:code-reviewer"`: + +- Read `structural-reviewer-prompt.md` from this skill directory +- Fill placeholders: + - `{REVIEW_TYPE}` -> "model", "rule", or "model + rule" + - `{REVIEW_PARAMS}` -> summary of what's being reviewed + - `{PROBLEM_NAME}`, `{CATEGORY}`, `{FILE_STEM}` -> for model reviews + - `{SOURCE}`, `{TARGET}`, `{RULE_STEM}`, `{EXAMPLE_STEM}` -> for rule reviews +- Prompt = filled template + +### Quality Reviewer (always) -## Step 4: Semantic Review (AI Judgment) +Dispatch using `Task` tool with `subagent_type="superpowers:code-reviewer"`: -Read the implementation files and assess: +- Read `quality-reviewer-prompt.md` from this skill directory +- Fill placeholders: + - `{DIFF_SUMMARY}` -> output of `git diff --stat` + - `{CHANGED_FILES}` -> list of changed files + - `{PLAN_STEP}` -> description of what was implemented (or "standalone review") + - `{BASE_SHA}`, `{HEAD_SHA}` -> git range +- Prompt = filled template -### For Models: -1. **`evaluate()` correctness** -- Does it check feasibility before computing the objective? Does it return `SolutionSize::Invalid` / `false` for infeasible configs? -2. **`dims()` correctness** -- Does it return the actual configuration space? (e.g., `vec![2; n]` for binary) -3. **Size getter consistency** -- Do the inherent getter methods (e.g., `num_vertices()`, `num_edges()`) match names used in overhead expressions? -4. **Weight handling** -- Are weights managed via inherent methods, not traits? +**Both subagents must be dispatched in parallel** (single message, two Task tool calls). -### For Rules: -1. **`extract_solution` correctness** -- Does it correctly invert the reduction? Does the returned solution have the right length (source dimensions)? -2. **Overhead accuracy** -- Does the `overhead = { field = "expr" }` reflect the actual size relationship? -3. **Example quality** -- Is it tutorial-style? Does it use the instance from the issue? Does the JSON export include both source and target data? -4. **Paper quality** -- Is the reduction-rule statement precise? Is the proof sketch sound? Is the example figure clear? +## Step 4: Collect and Address Findings -### Code Quality Principles (applies to both Models and Rules): -1. **DRY (Don't Repeat Yourself)** -- Is there duplicated logic that should be extracted into a shared helper, utility function, or common module? Check for copy-pasted code blocks across files (e.g., similar graph construction, weight handling, or solution extraction patterns). If duplication is found, suggest extracting shared logic. -2. **KISS (Keep It Simple, Stupid)** -- Is the implementation unnecessarily complex? Look for: over-engineered abstractions, convoluted control flow, premature generalization, or layers of indirection that add no value. The implementation should be as simple as possible while remaining correct and maintainable. +When both subagents return: -## Output Format +1. **Parse results** -- identify FAIL/ISSUE items from both reports +2. **Fix automatically** -- structural FAILs (missing registration, missing file), clear semantic issues, Important+ quality issues +3. **Report to user** -- ambiguous semantic issues, Minor quality items, anything you're unsure about +4. **Present consolidated report** combining both reviews -Present results as: +## Step 5: Present Consolidated Report + +Merge both subagent outputs into a single report: ``` -## Review: [Model/Rule] [Name] +## Review: [Model/Rule/Generic] [Name] -### Structural Completeness +### Structural Completeness (from structural reviewer) | # | Check | Status | |---|-------|--------| -| 1 | Model file exists | PASS | -| 2 | inventory::submit! | PASS | -| ... | ... | ... | -| N | Paper entry | FAIL -- missing display-name | - -### Build Status -- `make test`: PASS -- `make clippy`: PASS - -### Semantic Review -- evaluate() correctness: OK -- dims() correctness: OK -- DRY compliance: OK / [duplicated logic found in ...] -- KISS compliance: OK / [unnecessary complexity found in ...] -- [any other issues found] - -### Summary -- X/Y structural checks passed -- [list of action items for any failures] +... + +### Build Status (from structural reviewer) +- `make test`: PASS / FAIL +- `make clippy`: PASS / FAIL + +### Semantic Review (from structural reviewer) +... + +### Code Quality (from quality reviewer) +- DRY: OK / ... +- KISS: OK / ... +- HC/LC: OK / ... + +### HCI (from quality reviewer, if CLI/MCP changed) +... + +### Test Quality (from quality reviewer) +... + +### Fixes Applied +- [list of issues automatically fixed by main agent] + +### Remaining Items (needs user decision) +- [list of issues that need user input] ``` -## Integration with Other Skills +## Integration + +### With executing-plans + +After each batch in the executing-plans flow, the main agent should: +1. Record `BASE_SHA` before the batch starts +2. After batch completes, follow Steps 1-5 above +3. Fix findings before reporting to user +4. Include review results in the batch report + +### With add-model / add-rule + +At the end of these skills (after their verify step), invoke `/review-implementation` which dispatches subagents as described above. -This skill is called automatically at the end of: -- `add-model` (after Step 7: Verify) -- `add-rule` (after Step 6: Verify) +### Standalone -It can also be invoked standalone via `/review-implementation`. +Invoke directly via `/review-implementation` for any code change. From e01ac1e25990da2fb21179595c7067d508b6f558 Mon Sep 17 00:00:00 2001 From: GiggleLiu Date: Fri, 27 Feb 2026 22:56:33 +0800 Subject: [PATCH 20/28] docs: update review-implementation description in CLAUDE.md Reflects new subagent-based dispatch and executing-plans integration. Co-Authored-By: Claude Opus 4.6 --- .claude/CLAUDE.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.claude/CLAUDE.md b/.claude/CLAUDE.md index e2fcd652e..f56410a89 100644 --- a/.claude/CLAUDE.md +++ b/.claude/CLAUDE.md @@ -7,7 +7,7 @@ Rust library for NP-hard problem reductions. Implements computational problems w - [issue-to-pr](skills/issue-to-pr/SKILL.md) -- Convert a GitHub issue into a PR with an implementation plan. Validates the issue against the appropriate checklist, then dispatches to `add-model` or `add-rule`. - [add-model](skills/add-model/SKILL.md) -- Add a new problem model. Can be used standalone (brainstorms with user) or called from `issue-to-pr`. - [add-rule](skills/add-rule/SKILL.md) -- Add a new reduction rule. Can be used standalone (brainstorms with user) or called from `issue-to-pr`. -- [review-implementation](skills/review-implementation/SKILL.md) -- Review a model or rule implementation for completeness. Auto-detects type from changed files. Called automatically at the end of `add-model`/`add-rule`, or standalone via `/review-implementation`. +- [review-implementation](skills/review-implementation/SKILL.md) -- Review implementation completeness by dispatching parallel subagents (structural + quality) with fresh context. Auto-detects new models/rules from git diff. Called automatically at the end of `add-model`/`add-rule`, after each `executing-plans` batch, or standalone via `/review-implementation`. - [release](skills/release/SKILL.md) -- Create a new crate release. Determines version bump from diff, verifies tests/clippy, then runs `make release`. ## Commands From d9b38c40c1fed04da933c9c7a7785410eb222927 Mon Sep 17 00:00:00 2001 From: GiggleLiu Date: Fri, 27 Feb 2026 22:57:37 +0800 Subject: [PATCH 21/28] fix: resolve PR review comments (dedup wrappers, MCP overhead, doc fix) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Remove thin wrapper functions in create.rs, use util:: directly - Add overhead info to MCP show_problem edge JSON (matching CLI output) - Fix UnitDiskGraph variant type in design doc (f64 → i32) Co-Authored-By: Claude Opus 4.6 --- ...27-explicit-variant-declarations-design.md | 2 +- problemreductions-cli/src/commands/create.rs | 26 +++++-------------- problemreductions-cli/src/mcp/tools.rs | 8 ++++++ 3 files changed, 16 insertions(+), 20 deletions(-) diff --git a/docs/plans/2026-02-27-explicit-variant-declarations-design.md b/docs/plans/2026-02-27-explicit-variant-declarations-design.md index 530ed764d..ed86ca4ca 100644 --- a/docs/plans/2026-02-27-explicit-variant-declarations-design.md +++ b/docs/plans/2026-02-27-explicit-variant-declarations-design.md @@ -56,7 +56,7 @@ macro_rules! declare_variants { declare_variants! { MaximumIndependentSet => "2^num_vertices", MaximumIndependentSet => "2^num_vertices", - MaximumIndependentSet => "2^num_vertices", + MaximumIndependentSet => "2^num_vertices", } ``` diff --git a/problemreductions-cli/src/commands/create.rs b/problemreductions-cli/src/commands/create.rs index d14db5f76..350983eb6 100644 --- a/problemreductions-cli/src/commands/create.rs +++ b/problemreductions-cli/src/commands/create.rs @@ -530,18 +530,6 @@ fn parse_matrix(args: &CreateArgs) -> Result>> { .collect() } -fn create_random_graph(num_vertices: usize, edge_prob: f64, seed: Option) -> SimpleGraph { - util::create_random_graph(num_vertices, edge_prob, seed) -} - -fn create_random_int_positions(num_vertices: usize, seed: Option) -> Vec<(i32, i32)> { - util::create_random_int_positions(num_vertices, seed) -} - -fn create_random_float_positions(num_vertices: usize, seed: Option) -> Vec<(f64, f64)> { - util::create_random_float_positions(num_vertices, seed) -} - /// Handle `pred create --random ...` fn create_random( args: &CreateArgs, @@ -568,7 +556,7 @@ fn create_random( let weights = vec![1i32; num_vertices]; match graph_type { "KingsSubgraph" => { - let positions = create_random_int_positions(num_vertices, args.seed); + let positions = util::create_random_int_positions(num_vertices, args.seed); let graph = KingsSubgraph::new(positions); ( ser_vertex_weight_problem_with(canonical, graph, weights)?, @@ -576,7 +564,7 @@ fn create_random( ) } "TriangularSubgraph" => { - let positions = create_random_int_positions(num_vertices, args.seed); + let positions = util::create_random_int_positions(num_vertices, args.seed); let graph = TriangularSubgraph::new(positions); ( ser_vertex_weight_problem_with(canonical, graph, weights)?, @@ -585,7 +573,7 @@ fn create_random( } "UnitDiskGraph" => { let radius = args.radius.unwrap_or(1.0); - let positions = create_random_float_positions(num_vertices, args.seed); + let positions = util::create_random_float_positions(num_vertices, args.seed); let graph = UnitDiskGraph::new(positions, radius); ( ser_vertex_weight_problem_with(canonical, graph, weights)?, @@ -597,7 +585,7 @@ fn create_random( if !(0.0..=1.0).contains(&edge_prob) { bail!("--edge-prob must be between 0.0 and 1.0"); } - let graph = create_random_graph(num_vertices, edge_prob, args.seed); + let graph = util::create_random_graph(num_vertices, edge_prob, args.seed); let variant = variant_map(&[("graph", "SimpleGraph"), ("weight", "i32")]); let data = match canonical { "MaximumIndependentSet" => ser(MaximumIndependentSet::new(graph, weights))?, @@ -617,7 +605,7 @@ fn create_random( if !(0.0..=1.0).contains(&edge_prob) { bail!("--edge-prob must be between 0.0 and 1.0"); } - let graph = create_random_graph(num_vertices, edge_prob, args.seed); + let graph = util::create_random_graph(num_vertices, edge_prob, args.seed); let num_edges = graph.num_edges(); let edge_weights = vec![1i32; num_edges]; let variant = variant_map(&[("graph", "SimpleGraph"), ("weight", "i32")]); @@ -636,7 +624,7 @@ fn create_random( if !(0.0..=1.0).contains(&edge_prob) { bail!("--edge-prob must be between 0.0 and 1.0"); } - let graph = create_random_graph(num_vertices, edge_prob, args.seed); + let graph = util::create_random_graph(num_vertices, edge_prob, args.seed); let num_edges = graph.num_edges(); let couplings = vec![1i32; num_edges]; let fields = vec![0i32; num_vertices]; @@ -653,7 +641,7 @@ fn create_random( if !(0.0..=1.0).contains(&edge_prob) { bail!("--edge-prob must be between 0.0 and 1.0"); } - let graph = create_random_graph(num_vertices, edge_prob, args.seed); + let graph = util::create_random_graph(num_vertices, edge_prob, args.seed); let (k, _variant) = util::validate_k_param(resolved_variant, args.k, Some(3), "KColoring")?; util::ser_kcoloring(graph, k)? diff --git a/problemreductions-cli/src/mcp/tools.rs b/problemreductions-cli/src/mcp/tools.rs index 21ca323ba..89d456279 100644 --- a/problemreductions-cli/src/mcp/tools.rs +++ b/problemreductions-cli/src/mcp/tools.rs @@ -192,15 +192,23 @@ impl McpServer { "variants": variants_json, "size_fields": &size_fields, "reduces_to": outgoing.iter().map(|e| { + let overhead: Vec = e.overhead.output_size.iter() + .map(|(field, poly)| serde_json::json!({"field": field, "formula": poly.to_string()})) + .collect(); serde_json::json!({ "source": {"name": e.source_name, "variant": e.source_variant}, "target": {"name": e.target_name, "variant": e.target_variant}, + "overhead": overhead, }) }).collect::>(), "reduces_from": incoming.iter().map(|e| { + let overhead: Vec = e.overhead.output_size.iter() + .map(|(field, poly)| serde_json::json!({"field": field, "formula": poly.to_string()})) + .collect(); serde_json::json!({ "source": {"name": e.source_name, "variant": e.source_variant}, "target": {"name": e.target_name, "variant": e.target_variant}, + "overhead": overhead, }) }).collect::>(), }); From 2b613dd22b8dbe56cace0b6416d6b9c73b45d718 Mon Sep 17 00:00:00 2001 From: GiggleLiu Date: Fri, 27 Feb 2026 22:59:08 +0800 Subject: [PATCH 22/28] chore: remove working plan files from branch These were implementation plans consumed during development and are no longer needed in the tree. Co-Authored-By: Claude Opus 4.6 --- ...27-explicit-variant-declarations-design.md | 102 --- ...2-27-explicit-variant-declarations-impl.md | 761 ------------------ .../2026-02-27-review-subagent-design.md | 100 --- 3 files changed, 963 deletions(-) delete mode 100644 docs/plans/2026-02-27-explicit-variant-declarations-design.md delete mode 100644 docs/plans/2026-02-27-explicit-variant-declarations-impl.md delete mode 100644 docs/plans/2026-02-27-review-subagent-design.md diff --git a/docs/plans/2026-02-27-explicit-variant-declarations-design.md b/docs/plans/2026-02-27-explicit-variant-declarations-design.md deleted file mode 100644 index ed86ca4ca..000000000 --- a/docs/plans/2026-02-27-explicit-variant-declarations-design.md +++ /dev/null @@ -1,102 +0,0 @@ -# Explicit Variant Declarations with Per-Variant Complexity - -**Date:** 2026-02-27 -**Status:** Approved - -## Problem - -Variants currently emerge implicitly from `#[reduction]` registrations. This means: -- A variant can't exist without a reduction -- There's no place to attach per-variant metadata (e.g., worst-case time complexity) -- No compile-time validation that reductions reference valid variants - -## Design - -### New types - -**`DeclaredVariant` marker trait** (`src/traits.rs`): -```rust -pub trait DeclaredVariant {} -``` - -**`VariantEntry` inventory struct** (new file `src/registry/variant.rs`): -```rust -pub struct VariantEntry { - pub name: &'static str, - pub variant_fn: fn() -> Vec<(&'static str, &'static str)>, - pub complexity: &'static str, // worst-case time complexity, e.g., "2^num_vertices" -} -inventory::collect!(VariantEntry); -``` - -### `declare_variants!` macro - -Declarative macro that generates both `DeclaredVariant` trait impls and `VariantEntry` inventory submissions: - -```rust -macro_rules! declare_variants { - ($($ty:ty => $complexity:expr),+ $(,)?) => { - $( - impl $crate::traits::DeclaredVariant for $ty {} - - inventory::submit! { - $crate::registry::VariantEntry { - name: <$ty as $crate::traits::Problem>::NAME, - variant_fn: || <$ty as $crate::traits::Problem>::variant(), - complexity: $complexity, - } - } - )+ - }; -} -``` - -**Usage** (in each model file, e.g., `maximum_independent_set.rs`): -```rust -declare_variants! { - MaximumIndependentSet => "2^num_vertices", - MaximumIndependentSet => "2^num_vertices", - MaximumIndependentSet => "2^num_vertices", -} -``` - -### Compile-time checking in `#[reduction]` - -The `#[reduction]` proc macro generates a `DeclaredVariant` assertion after the impl block: - -```rust -const _: () = { - fn _assert() {} - _assert::(); - _assert::(); -}; -``` - -This produces a compile error if either source or target variant is not declared via `declare_variants!`. - -### Graph construction change - -`ReductionGraph::new()` changes: -1. **First:** Build nodes from `VariantEntry` inventory (each entry becomes a node with complexity metadata) -2. **Then:** Build edges from `ReductionEntry` inventory (edges connect existing nodes) -3. Edges referencing undeclared variants would be caught at compile time by `#[reduction]` - -### Display changes - -- `pred show `: Shows complexity per variant in the variants list -- Graph JSON export: Adds `complexity` field per node -- `pred show` JSON output: Includes complexity in variant info - -## Decisions - -| Decision | Choice | Rationale | -|----------|--------|-----------| -| Declaration location | Model file | All variants of a problem are visible in one place | -| Macro syntax | `declare_variants!` (macro_rules!) | Good balance of conciseness vs. complexity | -| Type specification | Concrete Rust types | Enables compile-time checking via trait bounds | -| Validation | Compile error | Strictest; catches mistakes early via `DeclaredVariant` trait | -| Complexity format | String expression (e.g., `"2^num_vertices"`) | Consistent with overhead expression syntax | - -## Scope - -Every model file that has variants needs a `declare_variants!` call. This touches all files in `src/models/`. diff --git a/docs/plans/2026-02-27-explicit-variant-declarations-impl.md b/docs/plans/2026-02-27-explicit-variant-declarations-impl.md deleted file mode 100644 index 3e1e3ff4a..000000000 --- a/docs/plans/2026-02-27-explicit-variant-declarations-impl.md +++ /dev/null @@ -1,761 +0,0 @@ -# Explicit Variant Declarations Implementation Plan - -> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. - -**Goal:** Make problem variants first-class citizens with explicit declarations and per-variant time complexity metadata. - -**Architecture:** New `VariantEntry` inventory + `declare_variants!` macro in model files. `DeclaredVariant` marker trait enables compile-time checking in `#[reduction]`. `ReductionGraph` builds nodes from `VariantEntry` instead of inferring them from edges. - -**Tech Stack:** Rust, inventory crate, macro_rules!, proc_macro (existing `#[reduction]`), petgraph - ---- - -### Task 1: Add DeclaredVariant trait - -**Files:** -- Modify: `src/traits.rs` - -**Step 1: Add the marker trait** - -At the end of `src/traits.rs` (before the `#[cfg(test)]` block), add: - -```rust -/// Marker trait for explicitly declared problem variants. -/// -/// Implemented automatically by [`declare_variants!`] for each concrete type. -/// The [`#[reduction]`] proc macro checks this trait at compile time to ensure -/// all reduction source/target types have been declared. -pub trait DeclaredVariant {} -``` - -**Step 2: Build** - -Run: `cargo build` -Expected: PASS (trait is unused so far) - -**Step 3: Commit** - -```bash -git add src/traits.rs -git commit -m "feat: add DeclaredVariant marker trait" -``` - ---- - -### Task 2: Add VariantEntry struct and inventory - -**Files:** -- Create: `src/registry/variant.rs` -- Modify: `src/registry/mod.rs` - -**Step 1: Create the variant entry module** - -Create `src/registry/variant.rs`: - -```rust -//! Explicit variant registration via inventory. - -/// A registered problem variant entry. -/// -/// Submitted by [`declare_variants!`] for each concrete problem type. -/// The reduction graph uses these entries to build nodes with complexity metadata. -pub struct VariantEntry { - /// Problem name (from `Problem::NAME`). - pub name: &'static str, - /// Function returning variant key-value pairs (from `Problem::variant()`). - pub variant_fn: fn() -> Vec<(&'static str, &'static str)>, - /// Worst-case time complexity expression (e.g., `"2^num_vertices"`). - pub complexity: &'static str, -} - -impl VariantEntry { - /// Get the variant by calling the function. - pub fn variant(&self) -> Vec<(&'static str, &'static str)> { - (self.variant_fn)() - } -} - -impl std::fmt::Debug for VariantEntry { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("VariantEntry") - .field("name", &self.name) - .field("variant", &self.variant()) - .field("complexity", &self.complexity) - .finish() - } -} - -inventory::collect!(VariantEntry); -``` - -**Step 2: Export from registry module** - -In `src/registry/mod.rs`, add the module declaration and re-export: - -```rust -pub mod variant; -pub use variant::VariantEntry; -``` - -**Step 3: Build** - -Run: `cargo build` -Expected: PASS - -**Step 4: Commit** - -```bash -git add src/registry/variant.rs src/registry/mod.rs -git commit -m "feat: add VariantEntry inventory struct" -``` - ---- - -### Task 3: Create declare_variants! macro - -**Files:** -- Modify: `src/variant.rs` (where `variant_params!` is defined) - -**Step 1: Add the macro** - -At the end of `src/variant.rs`, add: - -```rust -/// Declare explicit problem variants with per-variant complexity metadata. -/// -/// Each entry generates: -/// 1. A `DeclaredVariant` trait impl for compile-time checking -/// 2. A `VariantEntry` inventory submission for runtime graph building -/// -/// # Example -/// -/// ```ignore -/// declare_variants! { -/// MaximumIndependentSet => "2^num_vertices", -/// MaximumIndependentSet => "2^num_vertices", -/// } -/// ``` -#[macro_export] -macro_rules! declare_variants { - ($($ty:ty => $complexity:expr),+ $(,)?) => { - $( - impl $crate::traits::DeclaredVariant for $ty {} - - $crate::inventory::submit! { - $crate::registry::VariantEntry { - name: <$ty as $crate::traits::Problem>::NAME, - variant_fn: || <$ty as $crate::traits::Problem>::variant(), - complexity: $complexity, - } - } - )+ - }; -} -``` - -**Step 2: Check inventory re-export** - -Verify that `inventory` is re-exported from the main crate. Check `src/lib.rs` for `pub use inventory;` or similar. If not present, add: - -```rust -pub use inventory; -``` - -**Step 3: Build** - -Run: `cargo build` -Expected: PASS - -**Step 4: Commit** - -```bash -git add src/variant.rs src/lib.rs -git commit -m "feat: add declare_variants! macro" -``` - ---- - -### Task 4: Add declare_variants! to graph model files - -**Files (9 model files):** -- Modify: `src/models/graph/maximum_independent_set.rs` -- Modify: `src/models/graph/minimum_vertex_cover.rs` -- Modify: `src/models/graph/maximum_clique.rs` -- Modify: `src/models/graph/minimum_dominating_set.rs` -- Modify: `src/models/graph/maximum_matching.rs` -- Modify: `src/models/graph/traveling_salesman.rs` -- Modify: `src/models/graph/max_cut.rs` -- Modify: `src/models/graph/kcoloring.rs` -- Modify: `src/models/graph/maximal_is.rs` (optional — no reductions) - -**Step 1: Add declarations to maximum_independent_set.rs** - -Add at the end of the file (before `#[cfg(test)]`): - -```rust -declare_variants! { - MaximumIndependentSet => "2^num_vertices", - MaximumIndependentSet => "2^num_vertices", - MaximumIndependentSet => "2^num_vertices", - MaximumIndependentSet => "2^num_vertices", -} -``` - -Ensure the geometry graph type imports are present: -```rust -use crate::graphs::{KingsSubgraph, TriangularSubgraph, UnitDiskGraph}; -``` - -**Step 2: Add declarations to minimum_vertex_cover.rs** - -```rust -declare_variants! { - MinimumVertexCover => "2^num_vertices", -} -``` - -**Step 3: Add declarations to maximum_clique.rs** - -```rust -declare_variants! { - MaximumClique => "2^num_vertices", -} -``` - -**Step 4: Add declarations to minimum_dominating_set.rs** - -```rust -declare_variants! { - MinimumDominatingSet => "2^num_vertices", -} -``` - -**Step 5: Add declarations to maximum_matching.rs** - -```rust -declare_variants! { - MaximumMatching => "2^num_vertices", -} -``` - -**Step 6: Add declarations to traveling_salesman.rs** - -```rust -declare_variants! { - TravelingSalesman => "num_vertices!", -} -``` - -**Step 7: Add declarations to max_cut.rs** - -```rust -declare_variants! { - MaxCut => "2^num_vertices", -} -``` - -**Step 8: Add declarations to kcoloring.rs** - -```rust -use crate::graphs::SimpleGraph; -use crate::variant::{KN, K2, K3, K4, K5}; - -declare_variants! { - KColoring => "k^num_vertices", - KColoring => "2^num_vertices", - KColoring => "3^num_vertices", - KColoring => "4^num_vertices", - KColoring => "5^num_vertices", -} -``` - -**Step 9: Build** - -Run: `cargo build` -Expected: PASS - -**Step 10: Commit** - -```bash -git add src/models/graph/ -git commit -m "feat: add declare_variants! to graph model files" -``` - ---- - -### Task 5: Add declare_variants! to optimization, satisfiability, set, and specialized model files - -**Files (9 model files):** -- Modify: `src/models/optimization/qubo.rs` -- Modify: `src/models/optimization/spin_glass.rs` -- Modify: `src/models/optimization/ilp.rs` -- Modify: `src/models/satisfiability/sat.rs` -- Modify: `src/models/satisfiability/ksat.rs` -- Modify: `src/models/set/maximum_set_packing.rs` -- Modify: `src/models/set/minimum_set_covering.rs` -- Modify: `src/models/specialized/circuit.rs` -- Modify: `src/models/specialized/factoring.rs` - -**Step 1: qubo.rs** - -```rust -declare_variants! { - QUBO => "2^num_vars", -} -``` - -**Step 2: spin_glass.rs** - -```rust -use crate::graphs::SimpleGraph; - -declare_variants! { - SpinGlass => "2^num_vertices", - SpinGlass => "2^num_vertices", -} -``` - -**Step 3: ilp.rs** - -```rust -declare_variants! { - ILP => "exp(num_variables)", -} -``` - -**Step 4: sat.rs** - -```rust -declare_variants! { - Satisfiability => "2^num_variables", -} -``` - -**Step 5: ksat.rs** - -```rust -use crate::variant::{KN, K2, K3}; - -declare_variants! { - KSatisfiability => "2^num_variables", - KSatisfiability => "2^num_variables", - KSatisfiability => "2^num_variables", -} -``` - -**Step 6: maximum_set_packing.rs** - -```rust -declare_variants! { - MaximumSetPacking => "2^num_sets", - MaximumSetPacking => "2^num_sets", -} -``` - -**Step 7: minimum_set_covering.rs** - -```rust -declare_variants! { - MinimumSetCovering => "2^num_sets", -} -``` - -**Step 8: circuit.rs** - -```rust -declare_variants! { - CircuitSAT => "2^num_inputs", -} -``` - -**Step 9: factoring.rs** - -```rust -declare_variants! { - Factoring => "exp(sqrt(num_bits))", -} -``` - -**Step 10: Build and test** - -Run: `cargo build && cargo test` -Expected: PASS - -**Step 11: Commit** - -```bash -git add src/models/optimization/ src/models/satisfiability/ src/models/set/ src/models/specialized/ -git commit -m "feat: add declare_variants! to remaining model files" -``` - ---- - -### Task 6: Update #[reduction] proc macro to check DeclaredVariant - -**Files:** -- Modify: `problemreductions-macros/src/lib.rs` - -**Step 1: Add DeclaredVariant assertion to generate_reduction_entry()** - -In the `generate_reduction_entry` function, after the `inventory::submit!` block, add a compile-time assertion. Find the section that builds the final `output` tokens (around line 260-282) and append: - -```rust -// After the inventory::submit! block, add: -let declared_check = quote! { - const _: () = { - fn _assert_declared_variant() {} - _assert_declared_variant::<#source_type>(); - _assert_declared_variant::<#target_type>(); - }; -}; -``` - -Include `declared_check` in the final output token stream. - -**Step 2: Build** - -Run: `cargo build` -Expected: PASS (all variants are already declared from Tasks 4-5) - -**Step 3: Verify enforcement works** - -Temporarily comment out one variant from a `declare_variants!` call (e.g., remove `MaximumIndependentSet` from MIS), then build: - -Run: `cargo build 2>&1 | head -20` -Expected: Compile error mentioning `DeclaredVariant` not implemented for `MaximumIndependentSet` - -Restore the commented-out variant. - -**Step 4: Commit** - -```bash -git add problemreductions-macros/src/lib.rs -git commit -m "feat: #[reduction] now checks DeclaredVariant at compile time" -``` - ---- - -### Task 7: Update ReductionGraph to build nodes from VariantEntry - -**Files:** -- Modify: `src/rules/graph.rs` - -**Step 1: Write a test for variant complexity in the graph** - -In `src/unit_tests/rules/graph.rs`, add: - -```rust -#[test] -fn test_variant_entry_complexity_available() { - // VariantEntry inventory should have entries with complexity info - let entries: Vec<_> = inventory::iter::.into_iter().collect(); - assert!(!entries.is_empty(), "VariantEntry inventory should not be empty"); - - // Check MIS has a variant with complexity - let mis_entry = entries.iter().find(|e| e.name == "MaximumIndependentSet"); - assert!(mis_entry.is_some(), "MIS should have a VariantEntry"); - assert!(!mis_entry.unwrap().complexity.is_empty(), "complexity should not be empty"); -} -``` - -**Step 2: Run test** - -Run: `cargo test test_variant_entry_complexity_available` -Expected: PASS (VariantEntry submissions exist from Tasks 4-5) - -**Step 3: Add complexity field to VariantNode** - -In `src/rules/graph.rs`, update `VariantNode`: - -```rust -#[derive(Debug, Clone)] -struct VariantNode { - name: &'static str, - variant: BTreeMap, - complexity: &'static str, -} -``` - -**Step 4: Update ReductionGraph::new() to build nodes from VariantEntry first** - -Replace the node-building logic in `new()`. The new approach: - -1. First pass: create nodes from `VariantEntry` inventory -2. Second pass: create edges from `ReductionEntry` inventory (nodes must already exist) - -```rust -pub fn new() -> Self { - let mut graph = DiGraph::new(); - let mut nodes: Vec = Vec::new(); - let mut node_index: HashMap = HashMap::new(); - let mut name_to_nodes: HashMap<&'static str, Vec> = HashMap::new(); - - // Helper to ensure a variant node exists - let mut ensure_node = |name: &'static str, - variant: BTreeMap, - complexity: &'static str, - nodes: &mut Vec, - graph: &mut DiGraph, - node_index: &mut HashMap, - name_to_nodes: &mut HashMap<&'static str, Vec>| - -> NodeIndex { - let vref = VariantRef { - name: name.to_string(), - variant: variant.clone(), - }; - if let Some(&idx) = node_index.get(&vref) { - idx - } else { - let node_id = nodes.len(); - nodes.push(VariantNode { name, variant, complexity }); - let idx = graph.add_node(node_id); - node_index.insert(vref, idx); - name_to_nodes.entry(name).or_default().push(idx); - idx - } - }; - - // Phase 1: Build nodes from VariantEntry inventory - for entry in inventory::iter:: { - let variant = Self::variant_to_map(&entry.variant()); - ensure_node( - entry.name, - variant, - entry.complexity, - &mut nodes, - &mut graph, - &mut node_index, - &mut name_to_nodes, - ); - } - - // Phase 2: Build edges from ReductionEntry inventory - for entry in inventory::iter:: { - let source_variant = Self::variant_to_map(&entry.source_variant()); - let target_variant = Self::variant_to_map(&entry.target_variant()); - - // Nodes should already exist from Phase 1 (enforced by #[reduction] compile check). - // Fall back to creating them with empty complexity for backwards compatibility. - let src_idx = ensure_node( - entry.source_name, - source_variant, - "", - &mut nodes, - &mut graph, - &mut node_index, - &mut name_to_nodes, - ); - let dst_idx = ensure_node( - entry.target_name, - target_variant, - "", - &mut nodes, - &mut graph, - &mut node_index, - &mut name_to_nodes, - ); - - let overhead = entry.overhead(); - if graph.find_edge(src_idx, dst_idx).is_none() { - graph.add_edge( - src_idx, - dst_idx, - ReductionEdgeData { - overhead, - reduce_fn: entry.reduce_fn, - }, - ); - } - } - - Self { graph, nodes, name_to_nodes } -} -``` - -**Step 5: Add complexity getter to ReductionGraph** - -```rust -/// Get the complexity expression for a specific variant. -pub fn variant_complexity( - &self, - name: &str, - variant: &BTreeMap, -) -> Option<&'static str> { - let idx = self.lookup_node(name, variant)?; - let node = &self.nodes[self.graph[idx]]; - if node.complexity.is_empty() { - None - } else { - Some(node.complexity) - } -} -``` - -**Step 6: Write test for variant_complexity** - -```rust -#[test] -fn test_variant_complexity() { - let graph = ReductionGraph::new(); - let variant = ReductionGraph::variant_to_map(&[("graph", "SimpleGraph"), ("weight", "i32")]); - let complexity = graph.variant_complexity("MaximumIndependentSet", &variant); - assert!(complexity.is_some()); - assert!(!complexity.unwrap().is_empty()); -} -``` - -**Step 7: Build and test** - -Run: `cargo test` -Expected: PASS - -**Step 8: Commit** - -```bash -git add src/rules/graph.rs src/unit_tests/rules/graph.rs -git commit -m "feat: ReductionGraph builds nodes from VariantEntry with complexity" -``` - ---- - -### Task 8: Update JSON export with complexity field - -**Files:** -- Modify: `src/rules/graph.rs` (NodeJson struct and to_json()) - -**Step 1: Add complexity to NodeJson** - -```rust -#[derive(Debug, Clone, Serialize)] -pub(crate) struct NodeJson { - pub(crate) name: String, - pub(crate) variant: BTreeMap, - pub(crate) category: String, - pub(crate) doc_path: String, - /// Worst-case time complexity expression (empty if not declared). - pub(crate) complexity: String, -} -``` - -**Step 2: Update to_json() to populate complexity** - -In the node-building section of `to_json()`, add: - -```rust -let complexity = self.nodes[i].complexity.to_string(); -// ... in NodeJson construction: -NodeJson { - name: node.name.to_string(), - variant: node.variant.clone(), - category, - doc_path, - complexity, -} -``` - -**Step 3: Build and test** - -Run: `cargo test` -Expected: PASS (existing tests may need updating if they assert exact JSON structure) - -**Step 4: Commit** - -```bash -git add src/rules/graph.rs -git commit -m "feat: include complexity in graph JSON export" -``` - ---- - -### Task 9: Update CLI `pred show` to display complexity - -**Files:** -- Modify: `problemreductions-cli/src/commands/graph.rs` - -**Step 1: Add complexity to variant display** - -In the `show_problem_inner` function, update the variants section. For each variant, also show complexity. - -Find where variants are printed (human-readable output) and add complexity: - -``` -Variants: - /SimpleGraph/i32 complexity: 2^num_vertices - /KingsSubgraph/i32 complexity: 2^num_vertices -``` - -**Step 2: Add complexity to JSON output** - -In the JSON output path of `show_problem_inner`, include complexity per variant. - -**Step 3: Update MCP show_problem_inner** - -In `problemreductions-cli/src/mcp/tools.rs`, update the MCP `show_problem` output to include complexity per variant. - -**Step 4: Build and test** - -Run: `cargo build && cargo test` -Expected: PASS - -**Step 5: Smoke test** - -Run: `cargo run -p problemreductions-cli -- show MIS` -Expected: Variants section shows complexity for each variant. - -**Step 6: Commit** - -```bash -git add problemreductions-cli/src/commands/graph.rs problemreductions-cli/src/mcp/tools.rs -git commit -m "feat: display per-variant complexity in pred show" -``` - ---- - -### Task 10: Update graph JSON test data - -**Files:** -- Modify: `tests/data/reduction_graph.json` (if it exists and is checked in tests) -- Modify: Any tests that assert exact JSON structure - -**Step 1: Regenerate graph JSON** - -Run: `make rust-export` - -**Step 2: Update any snapshot tests** - -Check for tests that compare against stored JSON. Update expected values. - -**Step 3: Run full test suite** - -Run: `make check` -Expected: PASS (fmt + clippy + all tests) - -**Step 4: Commit** - -```bash -git add -A -git commit -m "chore: update test data for variant complexity" -``` - ---- - -### Task 11: Final verification - -**Step 1: Run full CI check** - -Run: `make check` -Expected: PASS - -**Step 2: Run CLI demo** - -Run: `make cli-demo` -Expected: PASS - -**Step 3: Test compile-time enforcement** - -Temporarily add a bogus reduction (or comment out a declare_variants entry) and verify the build fails with a clear error about `DeclaredVariant`. - -**Step 4: Verify JSON export** - -Run: `cargo run -p problemreductions-cli -- show MIS --json | python3 -m json.tool | head -30` -Expected: JSON includes complexity per variant. diff --git a/docs/plans/2026-02-27-review-subagent-design.md b/docs/plans/2026-02-27-review-subagent-design.md deleted file mode 100644 index f457e41aa..000000000 --- a/docs/plans/2026-02-27-review-subagent-design.md +++ /dev/null @@ -1,100 +0,0 @@ -# Design: Review-Implementation as Parallel Subagents - -**Date:** 2026-02-27 -**Status:** Approved - -## Problem - -The `review-implementation` skill runs inline in the main agent's context after implementation. This causes: -1. **Context bias** — the agent that just wrote the code is reviewing it, anchored to its own decisions -2. **No fresh perspective** — all implementation history pollutes the review -3. **No automatic trigger** — executing-plans has no review step after batches - -## Design - -### Split into Two Subagent Prompts - -One skill (`review-implementation`) dispatches two parallel subagents via `superpowers:code-reviewer`: - -| Subagent | Prompt file | Scope | When | -|----------|------------|-------|------| -| Structural reviewer | `structural-reviewer-prompt.md` | Model/rule checklists (16/14 items) + semantic review | If `src/models/` or `src/rules/` in diff | -| Quality reviewer | `quality-reviewer-prompt.md` | DRY, KISS, HC/LC, HCI, test quality | Always | - -### File Structure - -``` -.claude/skills/review-implementation/ -├── SKILL.md # Orchestrator: how main agent dispatches -├── structural-reviewer-prompt.md # Self-contained checklist for subagent -└── quality-reviewer-prompt.md # Self-contained quality review for subagent -``` - -### Integration with executing-plans - -New steps after each batch: - -``` -Step 2: Execute Batch (3 tasks) - ↓ -Step 2.5: Dispatch Review Subagents (parallel) - ├── structural-reviewer (if model/rule files in diff) - └── quality-reviewer (always) - ↓ -Step 2.6: Main Agent Addresses Findings - - Fix FAIL items automatically - - Report unfixable/ambiguous items to user - ↓ -Step 3: Report (implementation + review results + fixes) -``` - -Main agent determines diff via `git diff --name-only` against batch start SHA. - -### Standalone / add-model / add-rule Integration - -`/review-implementation` invocation: -1. Auto-detect what changed (git diff) -2. Dispatch structural + quality subagents in parallel -3. Collect results -4. Fix what it can automatically -5. Present consolidated report to user - -### Prompt Template Design - -**structural-reviewer-prompt.md** (self-contained): -- Full model checklist table (16 items) with Grep/Glob verification methods -- Full rule checklist table (14 items) -- Semantic review: evaluate() correctness, dims(), overhead accuracy, extract_solution -- `make test clippy` build check -- Placeholders: `{REVIEW_TYPE}`, `{PROBLEM_NAME}`, `{CATEGORY}`, `{FILE_STEM}`, `{SOURCE}`, `{TARGET}`, `{RULE_STEM}` -- Output: structured table with PASS/FAIL per item - -**quality-reviewer-prompt.md** (self-contained): -- DRY, KISS, HC/LC design principles with detection criteria -- HCI checks (error messages, discoverability, consistency, least surprise, feedback) -- Naive test detection (types-only, mirrors-impl, no-adversarial, trivial-only, etc.) -- Placeholders: `{DIFF_SUMMARY}`, `{CHANGED_FILES}`, `{PLAN_STEP}` -- Output: structured findings with severity (Critical/Important/Minor) - -### Main Agent Fix Strategy - -| Finding type | Action | -|-------------|--------| -| Missing file/registration (structural FAIL) | Fix automatically | -| Missing test case | Fix automatically | -| Semantic correctness issue (clear) | Fix automatically | -| Semantic correctness issue (ambiguous) | Report to user | -| Code quality (Important+) | Fix automatically | -| Code quality (Minor) | Report to user | - -### CLAUDE.md Changes - -- Update `review-implementation` skill description to mention subagent dispatch -- Add note about parallel subagent dispatch to executing-plans integration section -- Keep single `/review-implementation` entry point - -### What Doesn't Change - -- SDD (subagent-driven-development) keeps its own two-stage review (spec + generic code quality) -- The review-implementation invocation syntax stays the same -- The structured output format stays the same (tables with PASS/FAIL) From ff7687c268c7632ac23444c9192b38dbf33f2c41 Mon Sep 17 00:00:00 2001 From: GiggleLiu Date: Fri, 27 Feb 2026 23:03:33 +0800 Subject: [PATCH 23/28] feat: add copilot-review step after PR creation Instructs the agent to run `make copilot-review` after creating a PR to get GitHub Copilot code review feedback. Co-Authored-By: Claude Opus 4.6 --- .claude/skills/review-implementation/SKILL.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.claude/skills/review-implementation/SKILL.md b/.claude/skills/review-implementation/SKILL.md index 082bc2bb0..9b7e0acdb 100644 --- a/.claude/skills/review-implementation/SKILL.md +++ b/.claude/skills/review-implementation/SKILL.md @@ -136,6 +136,10 @@ After each batch in the executing-plans flow, the main agent should: 3. Fix findings before reporting to user 4. Include review results in the batch report +### Copilot Review (after PR creation) + +After creating a PR (from any flow), run `make copilot-review` to request GitHub Copilot code review on the PR. + ### With add-model / add-rule At the end of these skills (after their verify step), invoke `/review-implementation` which dispatches subagents as described above. From e8f08ad822704fe9a3a7c12948c8e00099e02ac8 Mon Sep 17 00:00:00 2001 From: GiggleLiu Date: Fri, 27 Feb 2026 23:08:25 +0800 Subject: [PATCH 24/28] copilot review --- .claude/CLAUDE.md | 1 + Makefile | 10 +++++++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/.claude/CLAUDE.md b/.claude/CLAUDE.md index f56410a89..baa7a5793 100644 --- a/.claude/CLAUDE.md +++ b/.claude/CLAUDE.md @@ -35,6 +35,7 @@ make cli # Build the pred CLI tool (release mode) make cli-demo # Run closed-loop CLI demo (exercises all commands) make mcp-test # Run MCP server tests (unit + integration) make run-plan # Execute a plan with Claude autorun +make copilot-review # Request Copilot code review on current PR make release V=x.y.z # Tag and push a new release (CI publishes to crates.io) ``` diff --git a/Makefile b/Makefile index a41d69bd9..68317c949 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ # Makefile for problemreductions -.PHONY: help build test mcp-test fmt clippy doc mdbook paper examples clean coverage rust-export compare qubo-testdata export-schemas release run-plan diagrams jl-testdata cli cli-demo +.PHONY: help build test mcp-test fmt clippy doc mdbook paper examples clean coverage rust-export compare qubo-testdata export-schemas release run-plan diagrams jl-testdata cli cli-demo copilot-review # Default target help: @@ -28,6 +28,7 @@ help: @echo " cli - Build the pred CLI tool" @echo " cli-demo - Run closed-loop CLI demo (build + exercise all commands)" @echo " run-plan - Execute a plan with Claude autorun (latest plan in docs/plans/)" + @echo " copilot-review - Request Copilot code review on current PR" # Build the project build: @@ -341,3 +342,10 @@ cli-demo: cli echo ""; \ echo "=== Demo complete: $$(ls $(CLI_DEMO_DIR)/*.json | wc -l | tr -d ' ') JSON files in $(CLI_DEMO_DIR) ===" @echo "=== All 20 steps passed ✅ ===" + +# Request Copilot code review on the current PR +# Requires: gh extension install ChrisCarini/gh-copilot-review +copilot-review: + @PR=$$(gh pr view --json number --jq .number 2>/dev/null) || { echo "No PR found for current branch"; exit 1; }; \ + echo "Requesting Copilot review on PR #$$PR..."; \ + gh copilot-review $$PR From c32544269ad7e9872117addf85e8c709ae7fd70c Mon Sep 17 00:00:00 2001 From: GiggleLiu Date: Fri, 27 Feb 2026 23:12:14 +0800 Subject: [PATCH 25/28] feat: add fix-pr skill for resolving PR feedback New skill that handles: - Fetching and triaging PR review comments (user + Copilot) - Checking CI status via gh api - Fixing codecov coverage gaps using gh api (not local cargo-llvm-cov) - Structured workflow: gather -> triage -> fix -> verify -> report Co-Authored-By: Claude Opus 4.6 --- .claude/CLAUDE.md | 1 + .claude/skills/fix-pr/SKILL.md | 155 +++++++++++++++++++++++++++++++++ 2 files changed, 156 insertions(+) create mode 100644 .claude/skills/fix-pr/SKILL.md diff --git a/.claude/CLAUDE.md b/.claude/CLAUDE.md index baa7a5793..f48aeb819 100644 --- a/.claude/CLAUDE.md +++ b/.claude/CLAUDE.md @@ -8,6 +8,7 @@ Rust library for NP-hard problem reductions. Implements computational problems w - [add-model](skills/add-model/SKILL.md) -- Add a new problem model. Can be used standalone (brainstorms with user) or called from `issue-to-pr`. - [add-rule](skills/add-rule/SKILL.md) -- Add a new reduction rule. Can be used standalone (brainstorms with user) or called from `issue-to-pr`. - [review-implementation](skills/review-implementation/SKILL.md) -- Review implementation completeness by dispatching parallel subagents (structural + quality) with fresh context. Auto-detects new models/rules from git diff. Called automatically at the end of `add-model`/`add-rule`, after each `executing-plans` batch, or standalone via `/review-implementation`. +- [fix-pr](skills/fix-pr/SKILL.md) -- Resolve PR review comments (user + Copilot), fix CI failures, and address codecov coverage gaps. Uses `gh api` for codecov (not local `cargo-llvm-cov`). - [release](skills/release/SKILL.md) -- Create a new crate release. Determines version bump from diff, verifies tests/clippy, then runs `make release`. ## Commands diff --git a/.claude/skills/fix-pr/SKILL.md b/.claude/skills/fix-pr/SKILL.md new file mode 100644 index 000000000..f42d7ae96 --- /dev/null +++ b/.claude/skills/fix-pr/SKILL.md @@ -0,0 +1,155 @@ +--- +name: fix-pr +description: Use when a PR has review comments to address, CI failures to fix, or codecov coverage gaps to resolve +--- + +# Fix PR + +Resolve PR review comments, fix CI failures, and address codecov coverage gaps for the current branch's PR. + +## Step 1: Gather PR State + +```bash +# Get PR number +PR=$(gh pr view --json number --jq .number) + +# Get PR head SHA (on remote) +HEAD_SHA=$(gh api repos/{owner}/{repo}/pulls/$PR --jq '.head.sha') +``` + +### 1a. Fetch Review Comments + +Three sources of feedback to check: + +```bash +# Copilot and user inline review comments (on code lines) +gh api repos/{owner}/{repo}/pulls/$PR/comments --jq '.[] | "[\(.user.login)] \(.path):\(.line // .original_line) — \(.body)"' + +# Review-level comments (top-level review body) +gh api repos/{owner}/{repo}/pulls/$PR/reviews --jq '.[] | select(.body != "") | "[\(.user.login)] \(.state): \(.body)"' + +# Issue-level comments (general discussion) +gh api repos/{owner}/{repo}/issues/$PR/comments --jq '.[] | select(.user.login | test("codecov|copilot") | not) | "[\(.user.login)] \(.body)"' +``` + +### 1b. Check CI Status + +```bash +# All check runs on the PR head +gh api repos/{owner}/{repo}/commits/$HEAD_SHA/check-runs \ + --jq '.check_runs[] | "\(.name): \(.conclusion // .status)"' +``` + +### 1c. Check Codecov Report + +```bash +# Codecov bot comment with coverage diff +gh api repos/{owner}/{repo}/issues/$PR/comments \ + --jq '.[] | select(.user.login == "codecov[bot]") | .body' +``` + +## Step 2: Triage and Prioritize + +Categorize all findings: + +| Priority | Type | Action | +|----------|------|--------| +| 1 | CI failures (test/clippy/build) | Fix immediately -- blocks merge | +| 2 | User review comments | Address each one -- respond on PR | +| 3 | Copilot review comments | Evaluate validity, fix if correct | +| 4 | Codecov coverage gaps | Add tests for uncovered lines | + +## Step 3: Fix CI Failures + +For each failing check: + +1. **Clippy**: Run `make clippy` locally, fix warnings +2. **Test**: Run `make test` locally, fix failures +3. **Build**: Run `make build` locally, fix errors +4. **Coverage**: See Step 5 (codecov-specific flow) + +## Step 4: Address Review Comments + +For each review comment: + +1. Read the comment and the code it references +2. Evaluate if the suggestion is correct +3. If valid: make the fix, commit +4. If debatable: fix it anyway unless technically wrong +5. If wrong: prepare a response explaining why + +**Do NOT respond on the PR** -- just fix and commit. The user will push and respond. + +### Handling Copilot Suggestions + +Copilot suggestions with `suggestion` blocks contain exact code. Evaluate each: +- **Correct**: Apply the suggestion +- **Partially correct**: Apply the spirit, adjust details +- **Wrong**: Skip, note why in commit message + +## Step 5: Fix Codecov Coverage Gaps + +**IMPORTANT: Do NOT run `cargo-llvm-cov` locally.** Use the `gh api` to read the codecov report instead. + +### 5a. Identify Uncovered Lines + +From the codecov bot comment (fetched in Step 1c), extract: +- Files with missing coverage +- Patch coverage percentage +- Specific uncovered lines (linked in the report) + +For detailed line-by-line coverage, use the Codecov API: + +```bash +# Get file-level coverage for the PR +gh api repos/{owner}/{repo}/pulls/$PR/comments \ + --jq '.[] | select(.user.login == "codecov[bot]") | .body' \ + | grep -oP 'filepath=\K[^&]+' +``` + +Then read the source files and identify which new/changed lines lack test coverage. + +### 5b. Add Tests for Uncovered Lines + +1. Read the uncovered file and identify the untested code paths +2. Write tests targeting those specific paths (error branches, edge cases, etc.) +3. Run `make test` to verify tests pass +4. Commit the new tests + +### 5c. Verify Coverage Improvement + +After pushing, CI will re-run coverage. Check the updated codecov comment on the PR. + +## Step 6: Commit and Report + +After all fixes: + +```bash +# Verify everything passes locally +make check # fmt + clippy + test +``` + +Commit with a descriptive message referencing the PR: + +```bash +git commit -m "fix: address PR #$PR review comments + +- [summary of fixes applied] +" +``` + +Report to user: +- List of review comments addressed (with what was done) +- CI fixes applied +- Coverage gaps filled +- Any comments left unresolved (with reasoning) + +## Integration + +### With review-implementation + +Run `/review-implementation` first to catch issues before push. Then `/fix-pr` after push to address CI and reviewer feedback. + +### With executing-plans / finishing-a-development-branch + +After creating a PR and running `make copilot-review`, use `/fix-pr` to address the resulting feedback. From a8e74af326eff06f10a3d64343a2dd35f778ed5c Mon Sep 17 00:00:00 2001 From: GiggleLiu Date: Sat, 28 Feb 2026 00:18:37 +0800 Subject: [PATCH 26/28] fix MIS reduction --- docs/plans/2026-02-27-binpacking-model.md | 96 +++++ docs/src/reductions/reduction_graph.json | 402 +++++++++++++----- ...satisfiability_to_maximumindependentset.rs | 2 +- problemreductions-cli/src/commands/graph.rs | 5 +- src/models/graph/maximum_independent_set.rs | 11 +- src/rules/maximumindependentset_casts.rs | 43 ++ src/rules/maximumindependentset_gridgraph.rs | 45 +- src/rules/maximumindependentset_triangular.rs | 11 +- src/rules/sat_maximumindependentset.rs | 9 +- src/unit_tests/reduction_graph.rs | 12 +- src/unit_tests/rules/graph.rs | 4 +- .../rules/maximumindependentset_gridgraph.rs | 54 +-- .../rules/maximumindependentset_triangular.rs | 9 +- .../rules/sat_maximumindependentset.rs | 20 +- 14 files changed, 504 insertions(+), 219 deletions(-) create mode 100644 docs/plans/2026-02-27-binpacking-model.md diff --git a/docs/plans/2026-02-27-binpacking-model.md b/docs/plans/2026-02-27-binpacking-model.md new file mode 100644 index 000000000..b576b45e2 --- /dev/null +++ b/docs/plans/2026-02-27-binpacking-model.md @@ -0,0 +1,96 @@ +# Plan: Add BinPacking Model + +**Issue:** #95 — [Model] BinPacking +**Skill:** add-model (Steps 1–7) + +## Overview + +Add a `BinPacking` optimization model: given items with sizes and a bin capacity, minimize the number of bins used to pack all items such that no bin exceeds capacity. + +## Design Decisions + +- **Category:** `specialized` — BinPacking is a domain-specific packing/scheduling problem. It doesn't fit `graph/` (no graph), `set/` (not subset selection), `optimization/` (reserved for generic formulations like QUBO/ILP), or `satisfiability/`. +- **Struct:** `BinPacking` with fields `sizes: Vec` and `capacity: W`. Generic over weight type W for integer or real-valued sizes. +- **dims():** `vec![n; n]` where n = number of items. Each variable is a bin index in {0, ..., n−1}. This is the first non-binary configuration space in the codebase. +- **Objective:** Minimize the count of distinct bin indices used (always `i32`, regardless of W). So `Metric = SolutionSize`, `Value = i32`. +- **Feasibility:** For each bin j, the sum of sizes of items assigned to j must not exceed capacity. Uses `WeightElement::to_sum()` for size summation and capacity comparison. +- **variant():** `variant_params![W]` — exposes weight type (i32, f64). +- **Solver:** BruteForce (existing) — enumerates all n^n assignments. No ILP reduction in this PR. + +## Steps + +### Step 1: Determine category +Category: `specialized/` + +### Step 2: Implement the model +Create `src/models/specialized/bin_packing.rs`: + +```rust +// Structure: +// 1. inventory::submit! for ProblemSchemaEntry +// 2. BinPacking struct with sizes: Vec, capacity: W +// 3. Constructor: new(sizes, capacity), with_unit_sizes(sizes, capacity) if W: From +// 4. Accessors: sizes(), capacity(), num_items() +// 5. Problem impl: NAME="BinPacking", Metric=SolutionSize, dims()=vec![n;n] +// 6. evaluate(): check bin capacities, count distinct bins +// 7. OptimizationProblem impl: Value=i32, direction=Minimize +// 8. #[cfg(test)] #[path] link +``` + +Key implementation details for `evaluate()`: +``` +1. Group items by assigned bin index +2. For each bin, sum sizes via to_sum() and compare with capacity.to_sum() +3. If any bin exceeds capacity → SolutionSize::Invalid +4. Otherwise → SolutionSize::Valid(num_distinct_bins as i32) +``` + +### Step 3: Register the model +1. `src/models/specialized/mod.rs` — add `pub(crate) mod bin_packing;` and `pub use bin_packing::BinPacking;` +2. `src/models/mod.rs` — add `BinPacking` to the `specialized` re-export line + +### Step 4: Register in CLI +1. `problemreductions-cli/src/dispatch.rs`: + - `load_problem()`: add `"BinPacking" => deser_opt::>(data)` + - `serialize_any_problem()`: add `"BinPacking" => try_ser::>(any)` +2. `problemreductions-cli/src/problem_name.rs`: + - `resolve_alias()`: add `"binpacking" => "BinPacking".to_string()` + - Optionally add `("BP", "BinPacking")` to `ALIASES` + +### Step 5: Write unit tests +Create `src/unit_tests/models/specialized/bin_packing.rs`: + +Tests: +- `test_binpacking_creation` — construct instance, verify num_items, dims +- `test_binpacking_evaluation_valid` — valid packing returns SolutionSize::Valid(num_bins) +- `test_binpacking_evaluation_invalid` — overloaded bin returns SolutionSize::Invalid +- `test_binpacking_direction` — verify Direction::Minimize +- `test_binpacking_solver` — BruteForce finds optimal 3-bin solution for the example instance (6 items, sizes [6,6,5,5,4,4], capacity 10) +- `test_binpacking_serialization` — round-trip serde test + +Example instance from issue: +- 6 items, capacity C = 10, sizes = [6, 6, 5, 5, 4, 4] +- Optimal: 3 bins, e.g., x = (0, 1, 2, 2, 0, 1) + +### Step 6: Document in paper +Update `docs/paper/reductions.typ`: +1. Add to `display-name` dictionary: `"BinPacking": [Bin Packing]` +2. Add `#problem-def("BinPacking")[...]` block with mathematical definition + +### Step 7: Verify +```bash +make check # fmt + clippy + test +``` +Then run `/review-implementation` to verify completeness. + +## Files Changed + +| File | Action | +|------|--------| +| `src/models/specialized/bin_packing.rs` | **Create** — model implementation | +| `src/unit_tests/models/specialized/bin_packing.rs` | **Create** — unit tests | +| `src/models/specialized/mod.rs` | **Edit** — register module | +| `src/models/mod.rs` | **Edit** — add re-export | +| `problemreductions-cli/src/dispatch.rs` | **Edit** — CLI dispatch | +| `problemreductions-cli/src/problem_name.rs` | **Edit** — alias | +| `docs/paper/reductions.typ` | **Edit** — paper definition | diff --git a/docs/src/reductions/reduction_graph.json b/docs/src/reductions/reduction_graph.json index acf4bd9eb..e25f33c65 100644 --- a/docs/src/reductions/reduction_graph.json +++ b/docs/src/reductions/reduction_graph.json @@ -4,19 +4,32 @@ "name": "CircuitSAT", "variant": {}, "category": "specialized", - "doc_path": "models/specialized/struct.CircuitSAT.html" + "doc_path": "models/specialized/struct.CircuitSAT.html", + "complexity": "2^num_inputs" }, { "name": "Factoring", "variant": {}, "category": "specialized", - "doc_path": "models/specialized/struct.Factoring.html" + "doc_path": "models/specialized/struct.Factoring.html", + "complexity": "exp(sqrt(num_bits))" }, { "name": "ILP", "variant": {}, "category": "optimization", - "doc_path": "models/optimization/struct.ILP.html" + "doc_path": "models/optimization/struct.ILP.html", + "complexity": "exp(num_variables)" + }, + { + "name": "KColoring", + "variant": { + "graph": "SimpleGraph", + "k": "K2" + }, + "category": "graph", + "doc_path": "models/graph/struct.KColoring.html", + "complexity": "2^num_vertices" }, { "name": "KColoring", @@ -25,7 +38,28 @@ "k": "K3" }, "category": "graph", - "doc_path": "models/graph/struct.KColoring.html" + "doc_path": "models/graph/struct.KColoring.html", + "complexity": "3^num_vertices" + }, + { + "name": "KColoring", + "variant": { + "graph": "SimpleGraph", + "k": "K4" + }, + "category": "graph", + "doc_path": "models/graph/struct.KColoring.html", + "complexity": "4^num_vertices" + }, + { + "name": "KColoring", + "variant": { + "graph": "SimpleGraph", + "k": "K5" + }, + "category": "graph", + "doc_path": "models/graph/struct.KColoring.html", + "complexity": "5^num_vertices" }, { "name": "KColoring", @@ -34,7 +68,8 @@ "k": "KN" }, "category": "graph", - "doc_path": "models/graph/struct.KColoring.html" + "doc_path": "models/graph/struct.KColoring.html", + "complexity": "k^num_vertices" }, { "name": "KSatisfiability", @@ -42,7 +77,8 @@ "k": "K2" }, "category": "satisfiability", - "doc_path": "models/satisfiability/struct.KSatisfiability.html" + "doc_path": "models/satisfiability/struct.KSatisfiability.html", + "complexity": "2^num_variables" }, { "name": "KSatisfiability", @@ -50,7 +86,8 @@ "k": "K3" }, "category": "satisfiability", - "doc_path": "models/satisfiability/struct.KSatisfiability.html" + "doc_path": "models/satisfiability/struct.KSatisfiability.html", + "complexity": "2^num_variables" }, { "name": "KSatisfiability", @@ -58,7 +95,8 @@ "k": "KN" }, "category": "satisfiability", - "doc_path": "models/satisfiability/struct.KSatisfiability.html" + "doc_path": "models/satisfiability/struct.KSatisfiability.html", + "complexity": "2^num_variables" }, { "name": "MaxCut", @@ -67,7 +105,18 @@ "weight": "i32" }, "category": "graph", - "doc_path": "models/graph/struct.MaxCut.html" + "doc_path": "models/graph/struct.MaxCut.html", + "complexity": "2^num_vertices" + }, + { + "name": "MaximalIS", + "variant": { + "graph": "SimpleGraph", + "weight": "i32" + }, + "category": "graph", + "doc_path": "models/graph/struct.MaximalIS.html", + "complexity": "2^num_vertices" }, { "name": "MaximumClique", @@ -76,7 +125,18 @@ "weight": "i32" }, "category": "graph", - "doc_path": "models/graph/struct.MaximumClique.html" + "doc_path": "models/graph/struct.MaximumClique.html", + "complexity": "2^num_vertices" + }, + { + "name": "MaximumIndependentSet", + "variant": { + "graph": "KingsSubgraph", + "weight": "One" + }, + "category": "graph", + "doc_path": "models/graph/struct.MaximumIndependentSet.html", + "complexity": "2^num_vertices" }, { "name": "MaximumIndependentSet", @@ -85,7 +145,18 @@ "weight": "i32" }, "category": "graph", - "doc_path": "models/graph/struct.MaximumIndependentSet.html" + "doc_path": "models/graph/struct.MaximumIndependentSet.html", + "complexity": "2^num_vertices" + }, + { + "name": "MaximumIndependentSet", + "variant": { + "graph": "SimpleGraph", + "weight": "One" + }, + "category": "graph", + "doc_path": "models/graph/struct.MaximumIndependentSet.html", + "complexity": "2^num_vertices" }, { "name": "MaximumIndependentSet", @@ -94,7 +165,8 @@ "weight": "i32" }, "category": "graph", - "doc_path": "models/graph/struct.MaximumIndependentSet.html" + "doc_path": "models/graph/struct.MaximumIndependentSet.html", + "complexity": "2^num_vertices" }, { "name": "MaximumIndependentSet", @@ -103,7 +175,18 @@ "weight": "i32" }, "category": "graph", - "doc_path": "models/graph/struct.MaximumIndependentSet.html" + "doc_path": "models/graph/struct.MaximumIndependentSet.html", + "complexity": "2^num_vertices" + }, + { + "name": "MaximumIndependentSet", + "variant": { + "graph": "UnitDiskGraph", + "weight": "One" + }, + "category": "graph", + "doc_path": "models/graph/struct.MaximumIndependentSet.html", + "complexity": "2^num_vertices" }, { "name": "MaximumIndependentSet", @@ -112,7 +195,8 @@ "weight": "i32" }, "category": "graph", - "doc_path": "models/graph/struct.MaximumIndependentSet.html" + "doc_path": "models/graph/struct.MaximumIndependentSet.html", + "complexity": "2^num_vertices" }, { "name": "MaximumMatching", @@ -121,7 +205,8 @@ "weight": "i32" }, "category": "graph", - "doc_path": "models/graph/struct.MaximumMatching.html" + "doc_path": "models/graph/struct.MaximumMatching.html", + "complexity": "2^num_vertices" }, { "name": "MaximumSetPacking", @@ -129,7 +214,8 @@ "weight": "f64" }, "category": "set", - "doc_path": "models/set/struct.MaximumSetPacking.html" + "doc_path": "models/set/struct.MaximumSetPacking.html", + "complexity": "2^num_sets" }, { "name": "MaximumSetPacking", @@ -137,7 +223,8 @@ "weight": "i32" }, "category": "set", - "doc_path": "models/set/struct.MaximumSetPacking.html" + "doc_path": "models/set/struct.MaximumSetPacking.html", + "complexity": "2^num_sets" }, { "name": "MinimumDominatingSet", @@ -146,7 +233,8 @@ "weight": "i32" }, "category": "graph", - "doc_path": "models/graph/struct.MinimumDominatingSet.html" + "doc_path": "models/graph/struct.MinimumDominatingSet.html", + "complexity": "2^num_vertices" }, { "name": "MinimumSetCovering", @@ -154,7 +242,8 @@ "weight": "i32" }, "category": "set", - "doc_path": "models/set/struct.MinimumSetCovering.html" + "doc_path": "models/set/struct.MinimumSetCovering.html", + "complexity": "2^num_sets" }, { "name": "MinimumVertexCover", @@ -163,7 +252,8 @@ "weight": "i32" }, "category": "graph", - "doc_path": "models/graph/struct.MinimumVertexCover.html" + "doc_path": "models/graph/struct.MinimumVertexCover.html", + "complexity": "2^num_vertices" }, { "name": "QUBO", @@ -171,13 +261,15 @@ "weight": "f64" }, "category": "optimization", - "doc_path": "models/optimization/struct.QUBO.html" + "doc_path": "models/optimization/struct.QUBO.html", + "complexity": "2^num_vars" }, { "name": "Satisfiability", "variant": {}, "category": "satisfiability", - "doc_path": "models/satisfiability/struct.Satisfiability.html" + "doc_path": "models/satisfiability/struct.Satisfiability.html", + "complexity": "2^num_variables" }, { "name": "SpinGlass", @@ -186,7 +278,8 @@ "weight": "f64" }, "category": "optimization", - "doc_path": "models/optimization/struct.SpinGlass.html" + "doc_path": "models/optimization/struct.SpinGlass.html", + "complexity": "2^num_vertices" }, { "name": "SpinGlass", @@ -195,7 +288,8 @@ "weight": "i32" }, "category": "optimization", - "doc_path": "models/optimization/struct.SpinGlass.html" + "doc_path": "models/optimization/struct.SpinGlass.html", + "complexity": "2^num_vertices" }, { "name": "TravelingSalesman", @@ -204,7 +298,8 @@ "weight": "i32" }, "category": "graph", - "doc_path": "models/graph/struct.TravelingSalesman.html" + "doc_path": "models/graph/struct.TravelingSalesman.html", + "complexity": "num_vertices!" } ], "edges": [ @@ -225,7 +320,7 @@ }, { "source": 0, - "target": 23, + "target": 30, "overhead": [ { "field": "num_spins", @@ -270,7 +365,7 @@ }, { "source": 2, - "target": 20, + "target": 27, "overhead": [ { "field": "num_vars", @@ -280,8 +375,8 @@ "doc_path": "rules/ilp_qubo/index.html" }, { - "source": 3, - "target": 4, + "source": 4, + "target": 7, "overhead": [ { "field": "num_vertices", @@ -295,7 +390,7 @@ "doc_path": "rules/kcoloring_casts/index.html" }, { - "source": 4, + "source": 7, "target": 2, "overhead": [ { @@ -310,8 +405,8 @@ "doc_path": "rules/coloring_ilp/index.html" }, { - "source": 4, - "target": 20, + "source": 7, + "target": 27, "overhead": [ { "field": "num_vars", @@ -321,8 +416,8 @@ "doc_path": "rules/coloring_qubo/index.html" }, { - "source": 5, - "target": 7, + "source": 8, + "target": 10, "overhead": [ { "field": "num_vars", @@ -336,8 +431,8 @@ "doc_path": "rules/ksatisfiability_casts/index.html" }, { - "source": 5, - "target": 20, + "source": 8, + "target": 27, "overhead": [ { "field": "num_vars", @@ -347,8 +442,8 @@ "doc_path": "rules/ksatisfiability_qubo/index.html" }, { - "source": 5, - "target": 21, + "source": 8, + "target": 28, "overhead": [ { "field": "num_clauses", @@ -366,8 +461,8 @@ "doc_path": "rules/sat_ksat/index.html" }, { - "source": 6, - "target": 7, + "source": 9, + "target": 10, "overhead": [ { "field": "num_vars", @@ -381,8 +476,8 @@ "doc_path": "rules/ksatisfiability_casts/index.html" }, { - "source": 6, - "target": 20, + "source": 9, + "target": 27, "overhead": [ { "field": "num_vars", @@ -392,8 +487,8 @@ "doc_path": "rules/ksatisfiability_qubo/index.html" }, { - "source": 6, - "target": 21, + "source": 9, + "target": 28, "overhead": [ { "field": "num_clauses", @@ -411,8 +506,8 @@ "doc_path": "rules/sat_ksat/index.html" }, { - "source": 7, - "target": 21, + "source": 10, + "target": 28, "overhead": [ { "field": "num_clauses", @@ -430,8 +525,8 @@ "doc_path": "rules/sat_ksat/index.html" }, { - "source": 8, - "target": 23, + "source": 11, + "target": 30, "overhead": [ { "field": "num_spins", @@ -445,7 +540,7 @@ "doc_path": "rules/spinglass_maxcut/index.html" }, { - "source": 9, + "source": 13, "target": 2, "overhead": [ { @@ -460,8 +555,8 @@ "doc_path": "rules/maximumclique_ilp/index.html" }, { - "source": 10, - "target": 13, + "source": 14, + "target": 15, "overhead": [ { "field": "num_vertices", @@ -475,23 +570,38 @@ "doc_path": "rules/maximumindependentset_casts/index.html" }, { - "source": 11, - "target": 2, + "source": 14, + "target": 19, "overhead": [ { - "field": "num_vars", + "field": "num_vertices", "formula": "num_vertices" }, { - "field": "num_constraints", + "field": "num_edges", "formula": "num_edges" } ], - "doc_path": "rules/maximumindependentset_ilp/index.html" + "doc_path": "rules/maximumindependentset_casts/index.html" }, { - "source": 11, - "target": 10, + "source": 15, + "target": 20, + "overhead": [ + { + "field": "num_vertices", + "formula": "num_vertices" + }, + { + "field": "num_edges", + "formula": "num_edges" + } + ], + "doc_path": "rules/maximumindependentset_casts/index.html" + }, + { + "source": 16, + "target": 14, "overhead": [ { "field": "num_vertices", @@ -505,8 +615,38 @@ "doc_path": "rules/maximumindependentset_gridgraph/index.html" }, { - "source": 11, - "target": 12, + "source": 16, + "target": 15, + "overhead": [ + { + "field": "num_vertices", + "formula": "num_vertices * num_vertices" + }, + { + "field": "num_edges", + "formula": "num_vertices * num_vertices" + } + ], + "doc_path": "rules/maximumindependentset_gridgraph/index.html" + }, + { + "source": 16, + "target": 17, + "overhead": [ + { + "field": "num_vertices", + "formula": "num_vertices" + }, + { + "field": "num_edges", + "formula": "num_edges" + } + ], + "doc_path": "rules/maximumindependentset_casts/index.html" + }, + { + "source": 16, + "target": 18, "overhead": [ { "field": "num_vertices", @@ -520,8 +660,23 @@ "doc_path": "rules/maximumindependentset_triangular/index.html" }, { - "source": 11, - "target": 16, + "source": 17, + "target": 2, + "overhead": [ + { + "field": "num_vars", + "formula": "num_vertices" + }, + { + "field": "num_constraints", + "formula": "num_edges" + } + ], + "doc_path": "rules/maximumindependentset_ilp/index.html" + }, + { + "source": 17, + "target": 23, "overhead": [ { "field": "num_sets", @@ -535,8 +690,8 @@ "doc_path": "rules/maximumindependentset_maximumsetpacking/index.html" }, { - "source": 11, - "target": 19, + "source": 17, + "target": 26, "overhead": [ { "field": "num_vertices", @@ -550,8 +705,8 @@ "doc_path": "rules/minimumvertexcover_maximumindependentset/index.html" }, { - "source": 11, - "target": 20, + "source": 17, + "target": 27, "overhead": [ { "field": "num_vars", @@ -561,8 +716,8 @@ "doc_path": "rules/maximumindependentset_qubo/index.html" }, { - "source": 12, - "target": 13, + "source": 18, + "target": 20, "overhead": [ { "field": "num_vertices", @@ -576,23 +731,23 @@ "doc_path": "rules/maximumindependentset_casts/index.html" }, { - "source": 13, - "target": 10, + "source": 19, + "target": 16, "overhead": [ { "field": "num_vertices", - "formula": "num_vertices * num_vertices" + "formula": "num_vertices" }, { "field": "num_edges", - "formula": "num_vertices * num_vertices" + "formula": "num_edges" } ], - "doc_path": "rules/maximumindependentset_gridgraph/index.html" + "doc_path": "rules/maximumindependentset_casts/index.html" }, { - "source": 13, - "target": 11, + "source": 19, + "target": 20, "overhead": [ { "field": "num_vertices", @@ -606,7 +761,22 @@ "doc_path": "rules/maximumindependentset_casts/index.html" }, { - "source": 14, + "source": 20, + "target": 17, + "overhead": [ + { + "field": "num_vertices", + "formula": "num_vertices" + }, + { + "field": "num_edges", + "formula": "num_edges" + } + ], + "doc_path": "rules/maximumindependentset_casts/index.html" + }, + { + "source": 21, "target": 2, "overhead": [ { @@ -621,8 +791,8 @@ "doc_path": "rules/maximummatching_ilp/index.html" }, { - "source": 14, - "target": 16, + "source": 21, + "target": 23, "overhead": [ { "field": "num_sets", @@ -636,8 +806,8 @@ "doc_path": "rules/maximummatching_maximumsetpacking/index.html" }, { - "source": 15, - "target": 20, + "source": 22, + "target": 27, "overhead": [ { "field": "num_vars", @@ -647,7 +817,7 @@ "doc_path": "rules/maximumsetpacking_qubo/index.html" }, { - "source": 16, + "source": 23, "target": 2, "overhead": [ { @@ -662,8 +832,8 @@ "doc_path": "rules/maximumsetpacking_ilp/index.html" }, { - "source": 16, - "target": 11, + "source": 23, + "target": 17, "overhead": [ { "field": "num_vertices", @@ -677,8 +847,8 @@ "doc_path": "rules/maximumindependentset_maximumsetpacking/index.html" }, { - "source": 16, - "target": 15, + "source": 23, + "target": 22, "overhead": [ { "field": "num_sets", @@ -692,7 +862,7 @@ "doc_path": "rules/maximumsetpacking_casts/index.html" }, { - "source": 17, + "source": 24, "target": 2, "overhead": [ { @@ -707,7 +877,7 @@ "doc_path": "rules/minimumdominatingset_ilp/index.html" }, { - "source": 18, + "source": 25, "target": 2, "overhead": [ { @@ -722,7 +892,7 @@ "doc_path": "rules/minimumsetcovering_ilp/index.html" }, { - "source": 19, + "source": 26, "target": 2, "overhead": [ { @@ -737,8 +907,8 @@ "doc_path": "rules/minimumvertexcover_ilp/index.html" }, { - "source": 19, - "target": 11, + "source": 26, + "target": 17, "overhead": [ { "field": "num_vertices", @@ -752,8 +922,8 @@ "doc_path": "rules/minimumvertexcover_maximumindependentset/index.html" }, { - "source": 19, - "target": 18, + "source": 26, + "target": 25, "overhead": [ { "field": "num_sets", @@ -767,8 +937,8 @@ "doc_path": "rules/minimumvertexcover_minimumsetcovering/index.html" }, { - "source": 19, - "target": 20, + "source": 26, + "target": 27, "overhead": [ { "field": "num_vars", @@ -778,7 +948,7 @@ "doc_path": "rules/minimumvertexcover_qubo/index.html" }, { - "source": 20, + "source": 27, "target": 2, "overhead": [ { @@ -793,8 +963,8 @@ "doc_path": "rules/qubo_ilp/index.html" }, { - "source": 20, - "target": 22, + "source": 27, + "target": 29, "overhead": [ { "field": "num_spins", @@ -804,7 +974,7 @@ "doc_path": "rules/spinglass_qubo/index.html" }, { - "source": 21, + "source": 28, "target": 0, "overhead": [ { @@ -819,23 +989,23 @@ "doc_path": "rules/sat_circuitsat/index.html" }, { - "source": 21, - "target": 3, + "source": 28, + "target": 4, "overhead": [ { "field": "num_vertices", - "formula": "2 * num_vars + 5 * num_literals - 5 * num_clauses + 3" + "formula": "2 * num_vars + 5 * num_literals + -1 * 5 * num_clauses + 3" }, { "field": "num_edges", - "formula": "3 * num_vars + 11 * num_literals - 9 * num_clauses + 3" + "formula": "3 * num_vars + 11 * num_literals + -1 * 9 * num_clauses + 3" } ], "doc_path": "rules/sat_coloring/index.html" }, { - "source": 21, - "target": 6, + "source": 28, + "target": 9, "overhead": [ { "field": "num_clauses", @@ -849,8 +1019,8 @@ "doc_path": "rules/sat_ksat/index.html" }, { - "source": 21, - "target": 11, + "source": 28, + "target": 16, "overhead": [ { "field": "num_vertices", @@ -864,8 +1034,8 @@ "doc_path": "rules/sat_maximumindependentset/index.html" }, { - "source": 21, - "target": 17, + "source": 28, + "target": 24, "overhead": [ { "field": "num_vertices", @@ -879,8 +1049,8 @@ "doc_path": "rules/sat_minimumdominatingset/index.html" }, { - "source": 22, - "target": 20, + "source": 29, + "target": 27, "overhead": [ { "field": "num_vars", @@ -890,8 +1060,8 @@ "doc_path": "rules/spinglass_qubo/index.html" }, { - "source": 23, - "target": 8, + "source": 30, + "target": 11, "overhead": [ { "field": "num_vertices", @@ -905,8 +1075,8 @@ "doc_path": "rules/spinglass_maxcut/index.html" }, { - "source": 23, - "target": 22, + "source": 30, + "target": 29, "overhead": [ { "field": "num_spins", @@ -920,7 +1090,7 @@ "doc_path": "rules/spinglass_casts/index.html" }, { - "source": 24, + "source": 31, "target": 2, "overhead": [ { @@ -929,7 +1099,7 @@ }, { "field": "num_constraints", - "formula": "num_vertices^3 - num_vertices^2 + 2 * num_vertices + 4 * num_vertices * num_edges" + "formula": "num_vertices^3 + -1 * 1 * num_vertices^2 + 2 * num_vertices + 4 * num_vertices * num_edges" } ], "doc_path": "rules/travelingsalesman_ilp/index.html" diff --git a/examples/reduction_satisfiability_to_maximumindependentset.rs b/examples/reduction_satisfiability_to_maximumindependentset.rs index 6b99d0777..6047033ab 100644 --- a/examples/reduction_satisfiability_to_maximumindependentset.rs +++ b/examples/reduction_satisfiability_to_maximumindependentset.rs @@ -43,7 +43,7 @@ pub fn run() { ); // 2. Reduce to Independent Set - let reduction = ReduceTo::>::reduce_to(&sat); + let reduction = ReduceTo::>::reduce_to(&sat); let is = reduction.target_problem(); println!("\n=== Problem Transformation ==="); diff --git a/problemreductions-cli/src/commands/graph.rs b/problemreductions-cli/src/commands/graph.rs index bb2634276..fc314d170 100644 --- a/problemreductions-cli/src/commands/graph.rs +++ b/problemreductions-cli/src/commands/graph.rs @@ -614,11 +614,10 @@ pub fn neighbors( let root_label = fmt_node(&graph, &spec.name, &variant); + let header_label = fmt_node(&graph, &spec.name, &variant); let mut text = format!( "{} — {}-hop neighbors ({})\n\n", - crate::output::fmt_problem_name(&spec.name), - max_hops, - dir_label, + header_label, max_hops, dir_label, ); text.push_str(&root_label); diff --git a/src/models/graph/maximum_independent_set.rs b/src/models/graph/maximum_independent_set.rs index ef70cf6ae..36aaa8ae4 100644 --- a/src/models/graph/maximum_independent_set.rs +++ b/src/models/graph/maximum_independent_set.rs @@ -6,7 +6,7 @@ use crate::registry::{FieldInfo, ProblemSchemaEntry}; use crate::topology::{Graph, KingsSubgraph, SimpleGraph, TriangularSubgraph, UnitDiskGraph}; use crate::traits::{OptimizationProblem, Problem}; -use crate::types::{Direction, SolutionSize, WeightElement}; +use crate::types::{Direction, One, SolutionSize, WeightElement}; use num_traits::Zero; use serde::{Deserialize, Serialize}; @@ -160,10 +160,13 @@ fn is_independent_set_config(graph: &G, config: &[usize]) -> bool { } crate::declare_variants! { - MaximumIndependentSet => "2^num_vertices", - MaximumIndependentSet => "2^num_vertices", + MaximumIndependentSet => "2^num_vertices", + MaximumIndependentSet => "2^num_vertices", + MaximumIndependentSet => "2^num_vertices", + MaximumIndependentSet => "2^num_vertices", MaximumIndependentSet => "2^num_vertices", - MaximumIndependentSet => "2^num_vertices", + MaximumIndependentSet => "2^num_vertices", + MaximumIndependentSet => "2^num_vertices", } /// Check if a set of vertices forms an independent set. diff --git a/src/rules/maximumindependentset_casts.rs b/src/rules/maximumindependentset_casts.rs index 9027cd0de..c293f0019 100644 --- a/src/rules/maximumindependentset_casts.rs +++ b/src/rules/maximumindependentset_casts.rs @@ -6,6 +6,7 @@ use crate::impl_variant_reduction; use crate::models::graph::MaximumIndependentSet; use crate::topology::{KingsSubgraph, SimpleGraph, TriangularSubgraph, UnitDiskGraph}; +use crate::types::One; use crate::variant::CastToParent; impl_variant_reduction!( @@ -31,3 +32,45 @@ impl_variant_reduction!( |src| MaximumIndependentSet::new( src.graph().cast_to_parent(), src.weights().to_vec()) ); + +// Graph-hierarchy casts (same weight One) +impl_variant_reduction!( + MaximumIndependentSet, + => , + fields: [num_vertices, num_edges], + |src| MaximumIndependentSet::new( + src.graph().cast_to_parent(), src.weights().to_vec()) +); + +impl_variant_reduction!( + MaximumIndependentSet, + => , + fields: [num_vertices, num_edges], + |src| MaximumIndependentSet::new( + src.graph().cast_to_parent(), src.weights().to_vec()) +); + +// Weight-hierarchy casts (One → i32) +impl_variant_reduction!( + MaximumIndependentSet, + => , + fields: [num_vertices, num_edges], + |src| MaximumIndependentSet::new( + src.graph().clone(), src.weights().iter().map(|w| w.cast_to_parent()).collect()) +); + +impl_variant_reduction!( + MaximumIndependentSet, + => , + fields: [num_vertices, num_edges], + |src| MaximumIndependentSet::new( + src.graph().clone(), src.weights().iter().map(|w| w.cast_to_parent()).collect()) +); + +impl_variant_reduction!( + MaximumIndependentSet, + => , + fields: [num_vertices, num_edges], + |src| MaximumIndependentSet::new( + src.graph().clone(), src.weights().iter().map(|w| w.cast_to_parent()).collect()) +); diff --git a/src/rules/maximumindependentset_gridgraph.rs b/src/rules/maximumindependentset_gridgraph.rs index 0ce20fe4d..e336f033a 100644 --- a/src/rules/maximumindependentset_gridgraph.rs +++ b/src/rules/maximumindependentset_gridgraph.rs @@ -1,24 +1,25 @@ -//! Reduction from MaximumIndependentSet on SimpleGraph/UnitDiskGraph to KingsSubgraph +//! Reduction from unweighted MaximumIndependentSet on SimpleGraph to KingsSubgraph //! using the King's Subgraph (KSG) unit disk mapping. //! -//! Maps an arbitrary graph's MIS problem to an equivalent weighted MIS on a grid graph. +//! Maps an arbitrary graph's MIS problem to an equivalent MIS on a grid graph. use crate::models::graph::MaximumIndependentSet; use crate::reduction; use crate::rules::traits::{ReduceTo, ReductionResult}; use crate::rules::unitdiskmapping::ksg; -use crate::topology::{Graph, KingsSubgraph, SimpleGraph, UnitDiskGraph}; +use crate::topology::{Graph, KingsSubgraph, SimpleGraph}; +use crate::types::One; -/// Result of reducing MIS on SimpleGraph to MIS on KingsSubgraph. +/// Result of reducing MIS to MIS. #[derive(Debug, Clone)] -pub struct ReductionISSimpleToGrid { - target: MaximumIndependentSet, +pub struct ReductionISSimpleOneToGridOne { + target: MaximumIndependentSet, mapping_result: ksg::MappingResult, } -impl ReductionResult for ReductionISSimpleToGrid { - type Source = MaximumIndependentSet; - type Target = MaximumIndependentSet; +impl ReductionResult for ReductionISSimpleOneToGridOne { + type Source = MaximumIndependentSet; + type Target = MaximumIndependentSet; fn target_problem(&self) -> &Self::Target { &self.target @@ -35,34 +36,34 @@ impl ReductionResult for ReductionISSimpleToGrid { num_edges = "num_vertices * num_vertices", } )] -impl ReduceTo> - for MaximumIndependentSet +impl ReduceTo> + for MaximumIndependentSet { - type Result = ReductionISSimpleToGrid; + type Result = ReductionISSimpleOneToGridOne; fn reduce_to(&self) -> Self::Result { let n = self.graph().num_vertices(); let edges = self.graph().edges(); let result = ksg::map_unweighted(n, &edges); - let weights = result.node_weights.clone(); let grid = result.to_kings_subgraph(); + let weights = vec![One; grid.num_vertices()]; let target = MaximumIndependentSet::new(grid, weights); - ReductionISSimpleToGrid { + ReductionISSimpleOneToGridOne { target, mapping_result: result, } } } -/// Result of reducing MIS on UnitDiskGraph to MIS on KingsSubgraph. +/// Result of reducing MIS to MIS. #[derive(Debug, Clone)] -pub struct ReductionISUnitDiskToGrid { +pub struct ReductionISSimpleOneToGridWeighted { target: MaximumIndependentSet, mapping_result: ksg::MappingResult, } -impl ReductionResult for ReductionISUnitDiskToGrid { - type Source = MaximumIndependentSet; +impl ReductionResult for ReductionISSimpleOneToGridWeighted { + type Source = MaximumIndependentSet; type Target = MaximumIndependentSet; fn target_problem(&self) -> &Self::Target { @@ -81,18 +82,18 @@ impl ReductionResult for ReductionISUnitDiskToGrid { } )] impl ReduceTo> - for MaximumIndependentSet + for MaximumIndependentSet { - type Result = ReductionISUnitDiskToGrid; + type Result = ReductionISSimpleOneToGridWeighted; fn reduce_to(&self) -> Self::Result { let n = self.graph().num_vertices(); - let edges = Graph::edges(self.graph()); + let edges = self.graph().edges(); let result = ksg::map_unweighted(n, &edges); let weights = result.node_weights.clone(); let grid = result.to_kings_subgraph(); let target = MaximumIndependentSet::new(grid, weights); - ReductionISUnitDiskToGrid { + ReductionISSimpleOneToGridWeighted { target, mapping_result: result, } diff --git a/src/rules/maximumindependentset_triangular.rs b/src/rules/maximumindependentset_triangular.rs index 60e9338b9..0f57af8e2 100644 --- a/src/rules/maximumindependentset_triangular.rs +++ b/src/rules/maximumindependentset_triangular.rs @@ -1,5 +1,5 @@ -//! Reduction from MaximumIndependentSet on SimpleGraph to TriangularSubgraph -//! using the weighted triangular unit disk mapping. +//! Reduction from unweighted MaximumIndependentSet on SimpleGraph to TriangularSubgraph +//! using the triangular unit disk mapping. //! //! Maps an arbitrary graph's MIS problem to an equivalent weighted MIS on a //! triangular lattice grid graph. @@ -10,8 +10,9 @@ use crate::rules::traits::{ReduceTo, ReductionResult}; use crate::rules::unitdiskmapping::ksg; use crate::rules::unitdiskmapping::triangular; use crate::topology::{Graph, SimpleGraph, TriangularSubgraph}; +use crate::types::One; -/// Result of reducing MIS on SimpleGraph to MIS on TriangularSubgraph. +/// Result of reducing MIS to MIS. #[derive(Debug, Clone)] pub struct ReductionISSimpleToTriangular { target: MaximumIndependentSet, @@ -19,7 +20,7 @@ pub struct ReductionISSimpleToTriangular { } impl ReductionResult for ReductionISSimpleToTriangular { - type Source = MaximumIndependentSet; + type Source = MaximumIndependentSet; type Target = MaximumIndependentSet; fn target_problem(&self) -> &Self::Target { @@ -38,7 +39,7 @@ impl ReductionResult for ReductionISSimpleToTriangular { } )] impl ReduceTo> - for MaximumIndependentSet + for MaximumIndependentSet { type Result = ReductionISSimpleToTriangular; diff --git a/src/rules/sat_maximumindependentset.rs b/src/rules/sat_maximumindependentset.rs index 89978b9be..f39afe728 100644 --- a/src/rules/sat_maximumindependentset.rs +++ b/src/rules/sat_maximumindependentset.rs @@ -13,6 +13,7 @@ use crate::models::satisfiability::Satisfiability; use crate::reduction; use crate::rules::traits::{ReduceTo, ReductionResult}; use crate::topology::SimpleGraph; +use crate::types::One; /// A literal in the SAT problem, representing a variable or its negation. #[derive(Debug, Clone, PartialEq, Eq)] @@ -53,7 +54,7 @@ impl BoolVar { #[derive(Debug, Clone)] pub struct ReductionSATToIS { /// The target MaximumIndependentSet problem. - target: MaximumIndependentSet, + target: MaximumIndependentSet, /// Mapping from vertex index to the literal it represents. literals: Vec, /// The number of variables in the source SAT problem. @@ -64,7 +65,7 @@ pub struct ReductionSATToIS { impl ReductionResult for ReductionSATToIS { type Source = Satisfiability; - type Target = MaximumIndependentSet; + type Target = MaximumIndependentSet; fn target_problem(&self) -> &Self::Target { &self.target @@ -113,7 +114,7 @@ impl ReductionSATToIS { num_edges = "num_literals^2", } )] -impl ReduceTo> for Satisfiability { +impl ReduceTo> for Satisfiability { type Result = ReductionSATToIS; fn reduce_to(&self) -> Self::Result { @@ -153,7 +154,7 @@ impl ReduceTo> for Satisfiability { let target = MaximumIndependentSet::new( SimpleGraph::new(vertex_count, edges), - vec![1i32; vertex_count], + vec![One; vertex_count], ); ReductionSATToIS { diff --git a/src/unit_tests/reduction_graph.rs b/src/unit_tests/reduction_graph.rs index 5dab95623..6bb979ac0 100644 --- a/src/unit_tests/reduction_graph.rs +++ b/src/unit_tests/reduction_graph.rs @@ -314,7 +314,7 @@ fn test_3sat_to_mis_triangular_overhead() { ) .expect("Should find path from 3-SAT to MIS on triangular lattice"); - // Path: K3SAT → SAT → MIS{SimpleGraph,i32} → MIS{TriangularSubgraph,i32} + // Path: K3SAT → SAT → MIS{SimpleGraph,One} → MIS{TriangularSubgraph,i32} assert_eq!( path.type_names(), vec!["KSatisfiability", "Satisfiability", "MaximumIndependentSet"] @@ -339,12 +339,12 @@ fn test_3sat_to_mis_triangular_overhead() { assert_eq!(edges[0].get("num_clauses").unwrap().eval(&test_size), 2.0); assert_eq!(edges[0].get("num_literals").unwrap().eval(&test_size), 6.0); - // Edge 1: SAT → MIS{SimpleGraph,i32} + // Edge 1: SAT → MIS{SimpleGraph,One} // num_vertices = num_literals, num_edges = num_literals^2 assert_eq!(edges[1].get("num_vertices").unwrap().eval(&test_size), 6.0); assert_eq!(edges[1].get("num_edges").unwrap().eval(&test_size), 36.0); - // Edge 2: MIS{SimpleGraph,i32} → MIS{TriangularSubgraph,i32} + // Edge 2: MIS{SimpleGraph,One} → MIS{TriangularSubgraph,i32} // num_vertices = num_vertices^2, num_edges = num_vertices^2 assert_eq!( edges[2].get("num_vertices").unwrap().eval(&test_size), @@ -355,9 +355,9 @@ fn test_3sat_to_mis_triangular_overhead() { // Compose overheads symbolically along the path. // The composed overhead maps 3-SAT input variables to final MIS{Triangular} output. // - // K3SAT → SAT: {num_clauses: C, num_vars: V, num_literals: L} (identity) - // SAT → MIS: {num_vertices: L, num_edges: L²} - // MIS → MIS{Tri}: {num_vertices: num_vertices², num_edges: num_vertices²} + // K3SAT → SAT: {num_clauses: C, num_vars: V, num_literals: L} (identity) + // SAT → MIS{SG,One}: {num_vertices: L, num_edges: L²} + // MIS{SG,One→Tri}: {num_vertices: V², num_edges: V²} // // Composed: num_vertices = L², num_edges = L² let composed = graph.compose_path_overhead(&path); diff --git a/src/unit_tests/rules/graph.rs b/src/unit_tests/rules/graph.rs index 548833f84..345b2c19f 100644 --- a/src/unit_tests/rules/graph.rs +++ b/src/unit_tests/rules/graph.rs @@ -7,7 +7,7 @@ use crate::rules::graph::{classify_problem_category, ReductionStep}; use crate::rules::registry::ReductionEntry; use crate::topology::SimpleGraph; use crate::traits::Problem; -use crate::types::ProblemSize; +use crate::types::{One, ProblemSize}; use std::collections::BTreeMap; #[test] @@ -316,7 +316,7 @@ fn test_sat_based_reductions() { let graph = ReductionGraph::new(); // SAT -> IS - assert!(graph.has_direct_reduction::>()); + assert!(graph.has_direct_reduction::>()); // SAT -> KColoring assert!(graph.has_direct_reduction::>()); diff --git a/src/unit_tests/rules/maximumindependentset_gridgraph.rs b/src/unit_tests/rules/maximumindependentset_gridgraph.rs index 5adef63ff..8f326c96a 100644 --- a/src/unit_tests/rules/maximumindependentset_gridgraph.rs +++ b/src/unit_tests/rules/maximumindependentset_gridgraph.rs @@ -1,66 +1,38 @@ use super::*; use crate::models::graph::MaximumIndependentSet; use crate::solvers::BruteForce; -use crate::topology::{Graph, KingsSubgraph, SimpleGraph, UnitDiskGraph}; +use crate::topology::{Graph, KingsSubgraph, SimpleGraph}; +use crate::types::One; #[test] -fn test_mis_simple_to_grid_closed_loop() { - // Triangle graph: 3 vertices, 3 edges +fn test_mis_simple_one_to_kings_one_closed_loop() { let problem = MaximumIndependentSet::new( SimpleGraph::new(3, vec![(0, 1), (1, 2), (0, 2)]), - vec![1i32; 3], + vec![One; 3], ); - let result = ReduceTo::>::reduce_to(&problem); + let result = ReduceTo::>::reduce_to(&problem); let target = result.target_problem(); - - // The grid graph should have more vertices than the original assert!(target.graph().num_vertices() > 3); - // Find best solution on the grid graph using brute force let solver = BruteForce::new(); let grid_solutions = solver.find_all_best(target); assert!(!grid_solutions.is_empty()); - // Map solution back let original_solution = result.extract_solution(&grid_solutions[0]); assert_eq!(original_solution.len(), 3); - - // For a triangle, MIS size is 1 let size: usize = original_solution.iter().sum(); assert_eq!(size, 1, "Max IS in triangle should be 1"); } #[test] -fn test_mis_simple_to_grid_path_graph() { - // Path graph: 0-1-2 - let problem = - MaximumIndependentSet::new(SimpleGraph::new(3, vec![(0, 1), (1, 2)]), vec![1i32; 3]); - let result = ReduceTo::>::reduce_to(&problem); - let target = result.target_problem(); - - let solver = BruteForce::new(); - let grid_solutions = solver.find_all_best(target); - assert!(!grid_solutions.is_empty()); - - let original_solution = result.extract_solution(&grid_solutions[0]); - - // Path of 3 vertices has MIS size 2 (vertices 0 and 2) - let size: usize = original_solution.iter().sum(); - assert_eq!(size, 2, "Max IS in path should be 2"); -} - -#[test] -fn test_mis_unitdisk_to_grid_closed_loop() { - // Create a UnitDiskGraph: 3 points where 0-1 are close, 2 is far - let udg = UnitDiskGraph::new(vec![(0.0, 0.0), (0.5, 0.0), (3.0, 0.0)], 1.0); - // Only edge is 0-1 (distance 0.5 <= 1.0), vertex 2 is isolated - assert_eq!(udg.num_edges(), 1); - - let problem = MaximumIndependentSet::new(udg, vec![1i32, 1, 1]); +fn test_mis_simple_one_to_kings_weighted_closed_loop() { + let problem = MaximumIndependentSet::new( + SimpleGraph::new(3, vec![(0, 1), (1, 2), (0, 2)]), + vec![One; 3], + ); let result = ReduceTo::>::reduce_to(&problem); let target = result.target_problem(); - - assert!(target.graph().num_vertices() >= 3); + assert!(target.graph().num_vertices() > 3); let solver = BruteForce::new(); let grid_solutions = solver.find_all_best(target); @@ -68,8 +40,6 @@ fn test_mis_unitdisk_to_grid_closed_loop() { let original_solution = result.extract_solution(&grid_solutions[0]); assert_eq!(original_solution.len(), 3); - - // MIS should be size 2 (one from {0,1} + vertex 2) let size: usize = original_solution.iter().sum(); - assert_eq!(size, 2, "Max IS should be 2"); + assert_eq!(size, 1, "Max IS in triangle should be 1"); } diff --git a/src/unit_tests/rules/maximumindependentset_triangular.rs b/src/unit_tests/rules/maximumindependentset_triangular.rs index cfd5303cd..62502717b 100644 --- a/src/unit_tests/rules/maximumindependentset_triangular.rs +++ b/src/unit_tests/rules/maximumindependentset_triangular.rs @@ -1,12 +1,13 @@ use super::*; use crate::models::graph::MaximumIndependentSet; use crate::topology::{Graph, SimpleGraph, TriangularSubgraph}; +use crate::types::One; #[test] -fn test_mis_simple_to_triangular_closed_loop() { +fn test_mis_simple_one_to_triangular_closed_loop() { // Path graph: 0-1-2 let problem = - MaximumIndependentSet::new(SimpleGraph::new(3, vec![(0, 1), (1, 2)]), vec![1i32; 3]); + MaximumIndependentSet::new(SimpleGraph::new(3, vec![(0, 1), (1, 2)]), vec![One; 3]); let result = ReduceTo::>::reduce_to(&problem); let target = result.target_problem(); @@ -20,9 +21,9 @@ fn test_mis_simple_to_triangular_closed_loop() { } #[test] -fn test_mis_simple_to_triangular_graph_methods() { +fn test_mis_simple_one_to_triangular_graph_methods() { // Single edge graph: 0-1 - let problem = MaximumIndependentSet::new(SimpleGraph::new(2, vec![(0, 1)]), vec![1i32; 2]); + let problem = MaximumIndependentSet::new(SimpleGraph::new(2, vec![(0, 1)]), vec![One; 2]); let result = ReduceTo::>::reduce_to(&problem); let target = result.target_problem(); let graph = target.graph(); diff --git a/src/unit_tests/rules/sat_maximumindependentset.rs b/src/unit_tests/rules/sat_maximumindependentset.rs index c34a467e6..b60f40a54 100644 --- a/src/unit_tests/rules/sat_maximumindependentset.rs +++ b/src/unit_tests/rules/sat_maximumindependentset.rs @@ -45,7 +45,7 @@ fn test_boolvar_complement() { fn test_sat_to_maximumindependentset_closed_loop() { // Simple SAT: (x1) - one clause with one literal let sat = Satisfiability::new(1, vec![CNFClause::new(vec![1])]); - let reduction = ReduceTo::>::reduce_to(&sat); + let reduction = ReduceTo::>::reduce_to(&sat); let is_problem = reduction.target_problem(); // Should have 1 vertex (one literal) @@ -59,7 +59,7 @@ fn test_two_clause_sat_to_is() { // SAT: (x1) AND (NOT x1) // This is unsatisfiable let sat = Satisfiability::new(1, vec![CNFClause::new(vec![1]), CNFClause::new(vec![-1])]); - let reduction = ReduceTo::>::reduce_to(&sat); + let reduction = ReduceTo::>::reduce_to(&sat); let is_problem = reduction.target_problem(); // Should have 2 vertices @@ -79,7 +79,7 @@ fn test_two_clause_sat_to_is() { fn test_extract_solution_basic() { // Simple case: (x1 OR x2) let sat = Satisfiability::new(2, vec![CNFClause::new(vec![1, 2])]); - let reduction = ReduceTo::>::reduce_to(&sat); + let reduction = ReduceTo::>::reduce_to(&sat); // Select vertex 0 (literal x1) let is_sol = vec![1, 0]; @@ -96,7 +96,7 @@ fn test_extract_solution_basic() { fn test_extract_solution_with_negation() { // (NOT x1) - selecting NOT x1 means x1 should be false let sat = Satisfiability::new(1, vec![CNFClause::new(vec![-1])]); - let reduction = ReduceTo::>::reduce_to(&sat); + let reduction = ReduceTo::>::reduce_to(&sat); let is_sol = vec![1]; let sat_sol = reduction.extract_solution(&is_sol); @@ -107,7 +107,7 @@ fn test_extract_solution_with_negation() { fn test_clique_edges_in_clause() { // A clause with 3 literals should form a clique (3 edges) let sat = Satisfiability::new(3, vec![CNFClause::new(vec![1, 2, 3])]); - let reduction = ReduceTo::>::reduce_to(&sat); + let reduction = ReduceTo::>::reduce_to(&sat); let is_problem = reduction.target_problem(); // 3 vertices, 3 edges (complete graph K3) @@ -128,7 +128,7 @@ fn test_complement_edges_across_clauses() { CNFClause::new(vec![2]), ], ); - let reduction = ReduceTo::>::reduce_to(&sat); + let reduction = ReduceTo::>::reduce_to(&sat); let is_problem = reduction.target_problem(); assert_eq!(is_problem.graph().num_vertices(), 3); @@ -141,7 +141,7 @@ fn test_is_structure() { 3, vec![CNFClause::new(vec![1, 2]), CNFClause::new(vec![-1, 3])], ); - let reduction = ReduceTo::>::reduce_to(&sat); + let reduction = ReduceTo::>::reduce_to(&sat); let is_problem = reduction.target_problem(); // IS should have vertices for literals in clauses @@ -152,7 +152,7 @@ fn test_is_structure() { fn test_empty_sat() { // Empty SAT (trivially satisfiable) let sat = Satisfiability::new(0, vec![]); - let reduction = ReduceTo::>::reduce_to(&sat); + let reduction = ReduceTo::>::reduce_to(&sat); let is_problem = reduction.target_problem(); assert_eq!(is_problem.graph().num_vertices(), 0); @@ -163,7 +163,7 @@ fn test_empty_sat() { #[test] fn test_literals_accessor() { let sat = Satisfiability::new(2, vec![CNFClause::new(vec![1, -2])]); - let reduction = ReduceTo::>::reduce_to(&sat); + let reduction = ReduceTo::>::reduce_to(&sat); let literals = reduction.literals(); assert_eq!(literals.len(), 2); @@ -206,7 +206,7 @@ fn test_jl_parity_sat_to_independentset() { let inst = &jl_find_instance_by_label(&sat_data, label)["instance"]; let (num_vars, clauses) = jl_parse_sat_clauses(inst); let source = Satisfiability::new(num_vars, clauses); - let result = ReduceTo::>::reduce_to(&source); + let result = ReduceTo::>::reduce_to(&source); let solver = BruteForce::new(); let best_target = solver.find_all_best(result.target_problem()); let extracted: HashSet> = best_target From 70ca34a1664ad520092627c9a2c7f5f0111f7859 Mon Sep 17 00:00:00 2001 From: GiggleLiu Date: Sat, 28 Feb 2026 01:11:41 +0800 Subject: [PATCH 27/28] fix MIS to UDG issue --- .../static/petersen_square_unweighted.json | 2 +- ...satisfiability_to_maximumindependentset.rs | 2 +- src/rules/unitdiskmapping/grid.rs | 14 ++++++ src/rules/unitdiskmapping/ksg/gadgets.rs | 2 +- src/rules/unitdiskmapping/ksg/mapping.rs | 26 ++++++++-- src/rules/unitdiskmapping/traits.rs | 4 +- .../rules/maximumindependentset_gridgraph.rs | 50 +++++++++++++++---- 7 files changed, 80 insertions(+), 20 deletions(-) diff --git a/docs/paper/static/petersen_square_unweighted.json b/docs/paper/static/petersen_square_unweighted.json index 10f0f52f6..083ab57a6 100644 --- a/docs/paper/static/petersen_square_unweighted.json +++ b/docs/paper/static/petersen_square_unweighted.json @@ -1 +1 @@ -{"nodes":[{"row":2,"col":6,"weight":1},{"row":2,"col":18,"weight":1},{"row":2,"col":34,"weight":1},{"row":3,"col":5,"weight":1},{"row":3,"col":7,"weight":1},{"row":3,"col":8,"weight":2},{"row":3,"col":9,"weight":2},{"row":3,"col":10,"weight":2},{"row":3,"col":11,"weight":2},{"row":3,"col":12,"weight":2},{"row":3,"col":13,"weight":2},{"row":3,"col":14,"weight":2},{"row":3,"col":15,"weight":2},{"row":3,"col":16,"weight":2},{"row":3,"col":17,"weight":1},{"row":3,"col":19,"weight":1},{"row":3,"col":20,"weight":2},{"row":3,"col":21,"weight":1},{"row":3,"col":28,"weight":1},{"row":3,"col":29,"weight":2},{"row":3,"col":30,"weight":2},{"row":3,"col":31,"weight":2},{"row":3,"col":32,"weight":2},{"row":3,"col":33,"weight":1},{"row":3,"col":35,"weight":1},{"row":3,"col":36,"weight":2},{"row":3,"col":37,"weight":1},{"row":4,"col":6,"weight":1},{"row":4,"col":18,"weight":1},{"row":4,"col":22,"weight":1},{"row":4,"col":27,"weight":1},{"row":4,"col":34,"weight":1},{"row":4,"col":38,"weight":1},{"row":5,"col":6,"weight":1},{"row":5,"col":18,"weight":2},{"row":5,"col":22,"weight":2},{"row":5,"col":26,"weight":1},{"row":5,"col":34,"weight":2},{"row":5,"col":38,"weight":2},{"row":6,"col":7,"weight":1},{"row":6,"col":10,"weight":1},{"row":6,"col":18,"weight":1},{"row":6,"col":22,"weight":1},{"row":6,"col":26,"weight":1},{"row":6,"col":34,"weight":1},{"row":6,"col":38,"weight":1},{"row":7,"col":8,"weight":1},{"row":7,"col":9,"weight":1},{"row":7,"col":11,"weight":1},{"row":7,"col":12,"weight":2},{"row":7,"col":13,"weight":2},{"row":7,"col":14,"weight":2},{"row":7,"col":15,"weight":2},{"row":7,"col":16,"weight":1},{"row":7,"col":17,"weight":1},{"row":7,"col":18,"weight":1},{"row":7,"col":19,"weight":1},{"row":7,"col":20,"weight":1},{"row":7,"col":21,"weight":1},{"row":7,"col":22,"weight":1},{"row":7,"col":23,"weight":1},{"row":7,"col":24,"weight":1},{"row":7,"col":25,"weight":1},{"row":7,"col":32,"weight":1},{"row":7,"col":33,"weight":1},{"row":7,"col":34,"weight":1},{"row":7,"col":35,"weight":1},{"row":7,"col":36,"weight":1},{"row":7,"col":37,"weight":1},{"row":7,"col":39,"weight":1},{"row":8,"col":10,"weight":1},{"row":8,"col":17,"weight":1},{"row":8,"col":18,"weight":1},{"row":8,"col":19,"weight":1},{"row":8,"col":21,"weight":1},{"row":8,"col":22,"weight":1},{"row":8,"col":23,"weight":1},{"row":8,"col":31,"weight":1},{"row":8,"col":33,"weight":1},{"row":8,"col":34,"weight":1},{"row":8,"col":35,"weight":1},{"row":8,"col":38,"weight":1},{"row":9,"col":10,"weight":1},{"row":9,"col":18,"weight":1},{"row":9,"col":22,"weight":1},{"row":9,"col":30,"weight":1},{"row":9,"col":34,"weight":1},{"row":9,"col":38,"weight":2},{"row":10,"col":11,"weight":1},{"row":10,"col":14,"weight":1},{"row":10,"col":18,"weight":1},{"row":10,"col":22,"weight":1},{"row":10,"col":30,"weight":1},{"row":10,"col":34,"weight":1},{"row":10,"col":38,"weight":1},{"row":11,"col":12,"weight":1},{"row":11,"col":13,"weight":1},{"row":11,"col":15,"weight":1},{"row":11,"col":16,"weight":1},{"row":11,"col":17,"weight":1},{"row":11,"col":18,"weight":1},{"row":11,"col":19,"weight":1},{"row":11,"col":20,"weight":1},{"row":11,"col":21,"weight":1},{"row":11,"col":22,"weight":1},{"row":11,"col":23,"weight":1},{"row":11,"col":24,"weight":1},{"row":11,"col":25,"weight":2},{"row":11,"col":26,"weight":2},{"row":11,"col":27,"weight":2},{"row":11,"col":28,"weight":2},{"row":11,"col":29,"weight":1},{"row":11,"col":31,"weight":1},{"row":11,"col":34,"weight":1},{"row":11,"col":38,"weight":1},{"row":12,"col":14,"weight":1},{"row":12,"col":17,"weight":1},{"row":12,"col":18,"weight":1},{"row":12,"col":19,"weight":1},{"row":12,"col":21,"weight":1},{"row":12,"col":22,"weight":1},{"row":12,"col":23,"weight":1},{"row":12,"col":30,"weight":1},{"row":12,"col":34,"weight":1},{"row":12,"col":38,"weight":1},{"row":13,"col":14,"weight":1},{"row":13,"col":18,"weight":1},{"row":13,"col":22,"weight":1},{"row":13,"col":30,"weight":2},{"row":13,"col":34,"weight":1},{"row":13,"col":38,"weight":1},{"row":14,"col":15,"weight":1},{"row":14,"col":18,"weight":1},{"row":14,"col":22,"weight":1},{"row":14,"col":30,"weight":1},{"row":14,"col":34,"weight":1},{"row":14,"col":38,"weight":2},{"row":15,"col":16,"weight":1},{"row":15,"col":17,"weight":1},{"row":15,"col":18,"weight":1},{"row":15,"col":19,"weight":1},{"row":15,"col":20,"weight":1},{"row":15,"col":21,"weight":1},{"row":15,"col":22,"weight":1},{"row":15,"col":23,"weight":1},{"row":15,"col":24,"weight":1},{"row":15,"col":25,"weight":2},{"row":15,"col":26,"weight":2},{"row":15,"col":27,"weight":2},{"row":15,"col":28,"weight":1},{"row":15,"col":29,"weight":1},{"row":15,"col":30,"weight":1},{"row":15,"col":31,"weight":1},{"row":15,"col":32,"weight":1},{"row":15,"col":33,"weight":1},{"row":15,"col":35,"weight":1},{"row":15,"col":38,"weight":2},{"row":16,"col":18,"weight":1},{"row":16,"col":21,"weight":1},{"row":16,"col":22,"weight":1},{"row":16,"col":23,"weight":1},{"row":16,"col":29,"weight":1},{"row":16,"col":30,"weight":1},{"row":16,"col":31,"weight":1},{"row":16,"col":34,"weight":1},{"row":16,"col":38,"weight":2},{"row":17,"col":18,"weight":1},{"row":17,"col":22,"weight":1},{"row":17,"col":30,"weight":1},{"row":17,"col":34,"weight":2},{"row":17,"col":38,"weight":2},{"row":18,"col":19,"weight":1},{"row":18,"col":22,"weight":1},{"row":18,"col":30,"weight":1},{"row":18,"col":34,"weight":1},{"row":18,"col":38,"weight":1},{"row":19,"col":20,"weight":1},{"row":19,"col":21,"weight":1},{"row":19,"col":22,"weight":1},{"row":19,"col":23,"weight":1},{"row":19,"col":24,"weight":1},{"row":19,"col":25,"weight":2},{"row":19,"col":26,"weight":2},{"row":19,"col":27,"weight":2},{"row":19,"col":28,"weight":1},{"row":19,"col":29,"weight":1},{"row":19,"col":30,"weight":1},{"row":19,"col":31,"weight":1},{"row":19,"col":32,"weight":1},{"row":19,"col":33,"weight":1},{"row":19,"col":34,"weight":1},{"row":19,"col":35,"weight":1},{"row":19,"col":36,"weight":1},{"row":19,"col":37,"weight":1},{"row":20,"col":21,"weight":1},{"row":20,"col":22,"weight":1},{"row":20,"col":23,"weight":1},{"row":20,"col":29,"weight":1},{"row":20,"col":30,"weight":1},{"row":20,"col":31,"weight":1},{"row":20,"col":33,"weight":1},{"row":20,"col":34,"weight":1},{"row":20,"col":35,"weight":1},{"row":21,"col":22,"weight":1},{"row":21,"col":30,"weight":1},{"row":21,"col":34,"weight":1},{"row":22,"col":23,"weight":1},{"row":22,"col":30,"weight":1},{"row":22,"col":34,"weight":1},{"row":23,"col":24,"weight":1},{"row":23,"col":25,"weight":2},{"row":23,"col":26,"weight":2},{"row":23,"col":27,"weight":2},{"row":23,"col":28,"weight":2},{"row":23,"col":29,"weight":1},{"row":23,"col":31,"weight":1},{"row":23,"col":32,"weight":2},{"row":23,"col":33,"weight":1},{"row":24,"col":30,"weight":1}],"edges":[[0,3],[0,4],[1,14],[1,15],[2,23],[2,24],[3,27],[4,5],[4,27],[5,6],[6,7],[7,8],[8,9],[9,10],[10,11],[11,12],[12,13],[13,14],[14,28],[15,16],[15,28],[16,17],[17,29],[18,19],[18,30],[19,20],[20,21],[21,22],[22,23],[23,31],[24,25],[24,31],[25,26],[26,32],[27,33],[28,34],[29,35],[30,36],[31,37],[32,38],[33,39],[34,41],[35,42],[36,43],[37,44],[38,45],[39,46],[40,47],[40,48],[41,54],[41,55],[41,56],[42,58],[42,59],[42,60],[43,62],[44,64],[44,65],[44,66],[45,68],[45,69],[46,47],[47,70],[48,49],[48,70],[49,50],[50,51],[51,52],[52,53],[53,54],[53,71],[54,55],[54,71],[54,72],[55,56],[55,71],[55,72],[55,73],[56,57],[56,72],[56,73],[57,58],[57,73],[57,74],[58,59],[58,74],[58,75],[59,60],[59,74],[59,75],[59,76],[60,61],[60,75],[60,76],[61,62],[61,76],[63,64],[63,77],[63,78],[64,65],[64,78],[64,79],[65,66],[65,78],[65,79],[65,80],[66,67],[66,79],[66,80],[67,68],[67,80],[68,81],[69,81],[70,82],[71,72],[71,83],[72,73],[72,83],[73,83],[74,75],[74,84],[75,76],[75,84],[76,84],[77,85],[78,79],[78,86],[79,80],[79,86],[80,86],[81,87],[82,88],[83,90],[84,91],[85,92],[86,93],[87,94],[88,95],[89,96],[89,97],[90,99],[90,100],[90,101],[91,103],[91,104],[91,105],[92,111],[92,112],[93,113],[94,114],[95,96],[96,115],[97,98],[97,115],[98,99],[98,116],[99,100],[99,116],[99,117],[100,101],[100,116],[100,117],[100,118],[101,102],[101,117],[101,118],[102,103],[102,118],[102,119],[103,104],[103,119],[103,120],[104,105],[104,119],[104,120],[104,121],[105,106],[105,120],[105,121],[106,107],[106,121],[107,108],[108,109],[109,110],[110,111],[111,122],[112,122],[113,123],[114,124],[115,125],[116,117],[116,126],[117,118],[117,126],[118,126],[119,120],[119,127],[120,121],[120,127],[121,127],[122,128],[123,129],[124,130],[125,131],[126,132],[127,133],[128,134],[129,135],[130,136],[131,137],[132,138],[132,139],[132,140],[133,142],[133,143],[133,144],[134,150],[134,151],[134,152],[135,154],[135,155],[136,156],[137,138],[138,139],[138,157],[139,140],[139,157],[140,141],[140,157],[141,142],[141,158],[142,143],[142,158],[142,159],[143,144],[143,158],[143,159],[143,160],[144,145],[144,159],[144,160],[145,146],[145,160],[146,147],[147,148],[148,149],[149,150],[149,161],[150,151],[150,161],[150,162],[151,152],[151,161],[151,162],[151,163],[152,153],[152,162],[152,163],[153,154],[153,163],[154,164],[155,164],[156,165],[157,166],[158,159],[158,167],[159,160],[159,167],[160,167],[161,162],[161,168],[162,163],[162,168],[163,168],[164,169],[165,170],[166,171],[167,172],[168,173],[169,174],[170,175],[171,176],[172,177],[172,178],[172,179],[173,185],[173,186],[173,187],[174,189],[174,190],[174,191],[175,193],[176,177],[176,194],[177,178],[177,194],[177,195],[178,179],[178,194],[178,195],[178,196],[179,180],[179,195],[179,196],[180,181],[180,196],[181,182],[182,183],[183,184],[184,185],[184,197],[185,186],[185,197],[185,198],[186,187],[186,197],[186,198],[186,199],[187,188],[187,198],[187,199],[188,189],[188,199],[188,200],[189,190],[189,200],[189,201],[190,191],[190,200],[190,201],[190,202],[191,192],[191,201],[191,202],[192,193],[192,202],[194,195],[194,203],[195,196],[195,203],[196,203],[197,198],[197,204],[198,199],[198,204],[199,204],[200,201],[200,205],[201,202],[201,205],[202,205],[203,206],[204,207],[205,208],[206,209],[207,214],[207,215],[208,217],[209,210],[210,211],[211,212],[212,213],[213,214],[214,218],[215,216],[215,218],[216,217]],"mis_overhead":89,"padding":2,"spacing":4,"weighted":false} \ No newline at end of file +{"nodes":[{"row":2,"col":6,"weight":1},{"row":2,"col":18,"weight":1},{"row":2,"col":34,"weight":1},{"row":3,"col":5,"weight":1},{"row":3,"col":7,"weight":1},{"row":3,"col":8,"weight":1},{"row":3,"col":9,"weight":1},{"row":3,"col":10,"weight":1},{"row":3,"col":11,"weight":1},{"row":3,"col":12,"weight":1},{"row":3,"col":13,"weight":1},{"row":3,"col":14,"weight":1},{"row":3,"col":15,"weight":1},{"row":3,"col":16,"weight":1},{"row":3,"col":17,"weight":1},{"row":3,"col":19,"weight":1},{"row":3,"col":20,"weight":1},{"row":3,"col":21,"weight":1},{"row":3,"col":28,"weight":1},{"row":3,"col":29,"weight":1},{"row":3,"col":30,"weight":1},{"row":3,"col":31,"weight":1},{"row":3,"col":32,"weight":1},{"row":3,"col":33,"weight":1},{"row":3,"col":35,"weight":1},{"row":3,"col":36,"weight":1},{"row":3,"col":37,"weight":1},{"row":4,"col":6,"weight":1},{"row":4,"col":18,"weight":1},{"row":4,"col":22,"weight":1},{"row":4,"col":27,"weight":1},{"row":4,"col":34,"weight":1},{"row":4,"col":38,"weight":1},{"row":5,"col":6,"weight":1},{"row":5,"col":18,"weight":1},{"row":5,"col":22,"weight":1},{"row":5,"col":26,"weight":1},{"row":5,"col":34,"weight":1},{"row":5,"col":38,"weight":1},{"row":6,"col":7,"weight":1},{"row":6,"col":10,"weight":1},{"row":6,"col":18,"weight":1},{"row":6,"col":22,"weight":1},{"row":6,"col":26,"weight":1},{"row":6,"col":34,"weight":1},{"row":6,"col":38,"weight":1},{"row":7,"col":8,"weight":1},{"row":7,"col":9,"weight":1},{"row":7,"col":11,"weight":1},{"row":7,"col":12,"weight":1},{"row":7,"col":13,"weight":1},{"row":7,"col":14,"weight":1},{"row":7,"col":15,"weight":1},{"row":7,"col":16,"weight":1},{"row":7,"col":17,"weight":1},{"row":7,"col":18,"weight":1},{"row":7,"col":19,"weight":1},{"row":7,"col":20,"weight":1},{"row":7,"col":21,"weight":1},{"row":7,"col":22,"weight":1},{"row":7,"col":23,"weight":1},{"row":7,"col":24,"weight":1},{"row":7,"col":25,"weight":1},{"row":7,"col":32,"weight":1},{"row":7,"col":33,"weight":1},{"row":7,"col":34,"weight":1},{"row":7,"col":35,"weight":1},{"row":7,"col":36,"weight":1},{"row":7,"col":37,"weight":1},{"row":7,"col":39,"weight":1},{"row":8,"col":10,"weight":1},{"row":8,"col":17,"weight":1},{"row":8,"col":18,"weight":1},{"row":8,"col":19,"weight":1},{"row":8,"col":21,"weight":1},{"row":8,"col":22,"weight":1},{"row":8,"col":23,"weight":1},{"row":8,"col":31,"weight":1},{"row":8,"col":33,"weight":1},{"row":8,"col":34,"weight":1},{"row":8,"col":35,"weight":1},{"row":8,"col":38,"weight":1},{"row":9,"col":10,"weight":1},{"row":9,"col":18,"weight":1},{"row":9,"col":22,"weight":1},{"row":9,"col":30,"weight":1},{"row":9,"col":34,"weight":1},{"row":9,"col":38,"weight":1},{"row":10,"col":11,"weight":1},{"row":10,"col":14,"weight":1},{"row":10,"col":18,"weight":1},{"row":10,"col":22,"weight":1},{"row":10,"col":30,"weight":1},{"row":10,"col":34,"weight":1},{"row":10,"col":38,"weight":1},{"row":11,"col":12,"weight":1},{"row":11,"col":13,"weight":1},{"row":11,"col":15,"weight":1},{"row":11,"col":16,"weight":1},{"row":11,"col":17,"weight":1},{"row":11,"col":18,"weight":1},{"row":11,"col":19,"weight":1},{"row":11,"col":20,"weight":1},{"row":11,"col":21,"weight":1},{"row":11,"col":22,"weight":1},{"row":11,"col":23,"weight":1},{"row":11,"col":24,"weight":1},{"row":11,"col":25,"weight":1},{"row":11,"col":26,"weight":1},{"row":11,"col":27,"weight":1},{"row":11,"col":28,"weight":1},{"row":11,"col":29,"weight":1},{"row":11,"col":31,"weight":1},{"row":11,"col":34,"weight":1},{"row":11,"col":38,"weight":1},{"row":12,"col":14,"weight":1},{"row":12,"col":17,"weight":1},{"row":12,"col":18,"weight":1},{"row":12,"col":19,"weight":1},{"row":12,"col":21,"weight":1},{"row":12,"col":22,"weight":1},{"row":12,"col":23,"weight":1},{"row":12,"col":30,"weight":1},{"row":12,"col":34,"weight":1},{"row":12,"col":38,"weight":1},{"row":13,"col":14,"weight":1},{"row":13,"col":18,"weight":1},{"row":13,"col":22,"weight":1},{"row":13,"col":30,"weight":1},{"row":13,"col":34,"weight":1},{"row":13,"col":38,"weight":1},{"row":14,"col":15,"weight":1},{"row":14,"col":18,"weight":1},{"row":14,"col":22,"weight":1},{"row":14,"col":30,"weight":1},{"row":14,"col":34,"weight":1},{"row":14,"col":38,"weight":1},{"row":15,"col":16,"weight":1},{"row":15,"col":17,"weight":1},{"row":15,"col":18,"weight":1},{"row":15,"col":19,"weight":1},{"row":15,"col":20,"weight":1},{"row":15,"col":21,"weight":1},{"row":15,"col":22,"weight":1},{"row":15,"col":23,"weight":1},{"row":15,"col":24,"weight":1},{"row":15,"col":25,"weight":1},{"row":15,"col":26,"weight":1},{"row":15,"col":27,"weight":1},{"row":15,"col":28,"weight":1},{"row":15,"col":29,"weight":1},{"row":15,"col":30,"weight":1},{"row":15,"col":31,"weight":1},{"row":15,"col":32,"weight":1},{"row":15,"col":33,"weight":1},{"row":15,"col":35,"weight":1},{"row":15,"col":38,"weight":1},{"row":16,"col":18,"weight":1},{"row":16,"col":21,"weight":1},{"row":16,"col":22,"weight":1},{"row":16,"col":23,"weight":1},{"row":16,"col":29,"weight":1},{"row":16,"col":30,"weight":1},{"row":16,"col":31,"weight":1},{"row":16,"col":34,"weight":1},{"row":16,"col":38,"weight":1},{"row":17,"col":18,"weight":1},{"row":17,"col":22,"weight":1},{"row":17,"col":30,"weight":1},{"row":17,"col":34,"weight":1},{"row":17,"col":38,"weight":1},{"row":18,"col":19,"weight":1},{"row":18,"col":22,"weight":1},{"row":18,"col":30,"weight":1},{"row":18,"col":34,"weight":1},{"row":18,"col":38,"weight":1},{"row":19,"col":20,"weight":1},{"row":19,"col":21,"weight":1},{"row":19,"col":22,"weight":1},{"row":19,"col":23,"weight":1},{"row":19,"col":24,"weight":1},{"row":19,"col":25,"weight":1},{"row":19,"col":26,"weight":1},{"row":19,"col":27,"weight":1},{"row":19,"col":28,"weight":1},{"row":19,"col":29,"weight":1},{"row":19,"col":30,"weight":1},{"row":19,"col":31,"weight":1},{"row":19,"col":32,"weight":1},{"row":19,"col":33,"weight":1},{"row":19,"col":34,"weight":1},{"row":19,"col":35,"weight":1},{"row":19,"col":36,"weight":1},{"row":19,"col":37,"weight":1},{"row":20,"col":21,"weight":1},{"row":20,"col":22,"weight":1},{"row":20,"col":23,"weight":1},{"row":20,"col":29,"weight":1},{"row":20,"col":30,"weight":1},{"row":20,"col":31,"weight":1},{"row":20,"col":33,"weight":1},{"row":20,"col":34,"weight":1},{"row":20,"col":35,"weight":1},{"row":21,"col":22,"weight":1},{"row":21,"col":30,"weight":1},{"row":21,"col":34,"weight":1},{"row":22,"col":23,"weight":1},{"row":22,"col":30,"weight":1},{"row":22,"col":34,"weight":1},{"row":23,"col":24,"weight":1},{"row":23,"col":25,"weight":1},{"row":23,"col":26,"weight":1},{"row":23,"col":27,"weight":1},{"row":23,"col":28,"weight":1},{"row":23,"col":29,"weight":1},{"row":23,"col":31,"weight":1},{"row":23,"col":32,"weight":1},{"row":23,"col":33,"weight":1},{"row":24,"col":30,"weight":1}],"edges":[[0,3],[0,4],[1,14],[1,15],[2,23],[2,24],[3,27],[4,5],[4,27],[5,6],[6,7],[7,8],[8,9],[9,10],[10,11],[11,12],[12,13],[13,14],[14,28],[15,16],[15,28],[16,17],[17,29],[18,19],[18,30],[19,20],[20,21],[21,22],[22,23],[23,31],[24,25],[24,31],[25,26],[26,32],[27,33],[28,34],[29,35],[30,36],[31,37],[32,38],[33,39],[34,41],[35,42],[36,43],[37,44],[38,45],[39,46],[40,47],[40,48],[41,54],[41,55],[41,56],[42,58],[42,59],[42,60],[43,62],[44,64],[44,65],[44,66],[45,68],[45,69],[46,47],[47,70],[48,49],[48,70],[49,50],[50,51],[51,52],[52,53],[53,54],[53,71],[54,55],[54,71],[54,72],[55,56],[55,71],[55,72],[55,73],[56,57],[56,72],[56,73],[57,58],[57,73],[57,74],[58,59],[58,74],[58,75],[59,60],[59,74],[59,75],[59,76],[60,61],[60,75],[60,76],[61,62],[61,76],[63,64],[63,77],[63,78],[64,65],[64,78],[64,79],[65,66],[65,78],[65,79],[65,80],[66,67],[66,79],[66,80],[67,68],[67,80],[68,81],[69,81],[70,82],[71,72],[71,83],[72,73],[72,83],[73,83],[74,75],[74,84],[75,76],[75,84],[76,84],[77,85],[78,79],[78,86],[79,80],[79,86],[80,86],[81,87],[82,88],[83,90],[84,91],[85,92],[86,93],[87,94],[88,95],[89,96],[89,97],[90,99],[90,100],[90,101],[91,103],[91,104],[91,105],[92,111],[92,112],[93,113],[94,114],[95,96],[96,115],[97,98],[97,115],[98,99],[98,116],[99,100],[99,116],[99,117],[100,101],[100,116],[100,117],[100,118],[101,102],[101,117],[101,118],[102,103],[102,118],[102,119],[103,104],[103,119],[103,120],[104,105],[104,119],[104,120],[104,121],[105,106],[105,120],[105,121],[106,107],[106,121],[107,108],[108,109],[109,110],[110,111],[111,122],[112,122],[113,123],[114,124],[115,125],[116,117],[116,126],[117,118],[117,126],[118,126],[119,120],[119,127],[120,121],[120,127],[121,127],[122,128],[123,129],[124,130],[125,131],[126,132],[127,133],[128,134],[129,135],[130,136],[131,137],[132,138],[132,139],[132,140],[133,142],[133,143],[133,144],[134,150],[134,151],[134,152],[135,154],[135,155],[136,156],[137,138],[138,139],[138,157],[139,140],[139,157],[140,141],[140,157],[141,142],[141,158],[142,143],[142,158],[142,159],[143,144],[143,158],[143,159],[143,160],[144,145],[144,159],[144,160],[145,146],[145,160],[146,147],[147,148],[148,149],[149,150],[149,161],[150,151],[150,161],[150,162],[151,152],[151,161],[151,162],[151,163],[152,153],[152,162],[152,163],[153,154],[153,163],[154,164],[155,164],[156,165],[157,166],[158,159],[158,167],[159,160],[159,167],[160,167],[161,162],[161,168],[162,163],[162,168],[163,168],[164,169],[165,170],[166,171],[167,172],[168,173],[169,174],[170,175],[171,176],[172,177],[172,178],[172,179],[173,185],[173,186],[173,187],[174,189],[174,190],[174,191],[175,193],[176,177],[176,194],[177,178],[177,194],[177,195],[178,179],[178,194],[178,195],[178,196],[179,180],[179,195],[179,196],[180,181],[180,196],[181,182],[182,183],[183,184],[184,185],[184,197],[185,186],[185,197],[185,198],[186,187],[186,197],[186,198],[186,199],[187,188],[187,198],[187,199],[188,189],[188,199],[188,200],[189,190],[189,200],[189,201],[190,191],[190,200],[190,201],[190,202],[191,192],[191,201],[191,202],[192,193],[192,202],[194,195],[194,203],[195,196],[195,203],[196,203],[197,198],[197,204],[198,199],[198,204],[199,204],[200,201],[200,205],[201,202],[201,205],[202,205],[203,206],[204,207],[205,208],[206,209],[207,214],[207,215],[208,217],[209,210],[210,211],[211,212],[212,213],[213,214],[214,218],[215,216],[215,218],[216,217]],"mis_overhead":89,"padding":2,"spacing":4,"weighted":false} \ No newline at end of file diff --git a/examples/reduction_satisfiability_to_maximumindependentset.rs b/examples/reduction_satisfiability_to_maximumindependentset.rs index 6047033ab..40027025f 100644 --- a/examples/reduction_satisfiability_to_maximumindependentset.rs +++ b/examples/reduction_satisfiability_to_maximumindependentset.rs @@ -105,7 +105,7 @@ pub fn run() { // 5. Export JSON let source_variant = variant_to_map(Satisfiability::variant()); - let target_variant = variant_to_map(MaximumIndependentSet::::variant()); + let target_variant = variant_to_map(MaximumIndependentSet::::variant()); let overhead = lookup_overhead( "Satisfiability", &source_variant, diff --git a/src/rules/unitdiskmapping/grid.rs b/src/rules/unitdiskmapping/grid.rs index 27ae7aacf..16edfb5b6 100644 --- a/src/rules/unitdiskmapping/grid.rs +++ b/src/rules/unitdiskmapping/grid.rs @@ -181,6 +181,20 @@ impl MappingGrid { coords } + /// Check if any doubled or connected cells remain in the grid. + /// Returns true if the mapping is not fully resolved. + /// Matches Julia's `GridGraph()` assertion. + pub fn has_unresolved_cells(&self) -> bool { + self.content.iter().any(|row| { + row.iter().any(|cell| { + matches!( + cell, + CellState::Doubled { .. } | CellState::Connected { .. } + ) + }) + }) + } + /// Get all doubled cell coordinates. /// Returns a set of (row, col) for cells in the Doubled state. pub fn doubled_cells(&self) -> std::collections::HashSet<(usize, usize)> { diff --git a/src/rules/unitdiskmapping/ksg/gadgets.rs b/src/rules/unitdiskmapping/ksg/gadgets.rs index a14345564..3bf84ca9f 100644 --- a/src/rules/unitdiskmapping/ksg/gadgets.rs +++ b/src/rules/unitdiskmapping/ksg/gadgets.rs @@ -1635,7 +1635,7 @@ fn apply_gadget_boxed(pattern: &dyn KsgPatternBoxed, grid: &mut MappingGrid, i: let state = match cell { PatternCell::Empty => CellState::Empty, PatternCell::Occupied => CellState::Occupied { weight: 1 }, - PatternCell::Doubled => CellState::Doubled { weight: 2 }, + PatternCell::Doubled => CellState::Doubled { weight: 1 }, PatternCell::Connected => CellState::Connected { weight: 1 }, }; grid.set(grid_r, grid_c, state); diff --git a/src/rules/unitdiskmapping/ksg/mapping.rs b/src/rules/unitdiskmapping/ksg/mapping.rs index 5a5bd4f8b..af2527f68 100644 --- a/src/rules/unitdiskmapping/ksg/mapping.rs +++ b/src/rules/unitdiskmapping/ksg/mapping.rs @@ -593,16 +593,26 @@ pub fn map_unweighted_with_order( let gadget_overhead: i32 = tape.iter().map(tape_entry_mis_overhead).sum(); let mis_overhead = copyline_overhead + gadget_overhead; - // Extract positions and weights from occupied cells - let (positions, node_weights): (Vec<(i32, i32)>, Vec) = grid + // Assert all doubled/connected cells have been resolved by gadgets. + // Matches Julia's `GridGraph()` check: "This mapping is not done yet!" + debug_assert!( + !grid.has_unresolved_cells(), + "Mapping is not done: doubled or connected cells remain after gadget application" + ); + + // Extract positions from occupied cells. + // In unweighted mode, all node weights are 1 — matching Julia's behavior where + // `node(::Type{<:UnWeightedNode}, i, j, w) = Node(i, j)` ignores the weight parameter. + let positions: Vec<(i32, i32)> = grid .occupied_coords() .into_iter() .filter_map(|(row, col)| { grid.get(row, col) - .map(|cell| ((row as i32, col as i32), cell.weight())) + .filter(|cell| cell.weight() > 0) + .map(|_| (row as i32, col as i32)) }) - .filter(|&(_, w)| w > 0) - .unzip(); + .collect(); + let node_weights = vec![1i32; positions.len()]; MappingResult { positions, @@ -685,6 +695,12 @@ pub fn map_weighted_with_order( let gadget_overhead: i32 = tape.iter().map(weighted_tape_entry_mis_overhead).sum(); let mis_overhead = copyline_overhead + gadget_overhead; + // Assert all doubled/connected cells have been resolved by gadgets. + debug_assert!( + !grid.has_unresolved_cells(), + "Mapping is not done: doubled or connected cells remain after gadget application" + ); + // Extract positions and weights from occupied cells let (positions, node_weights): (Vec<(i32, i32)>, Vec) = grid .occupied_coords() diff --git a/src/rules/unitdiskmapping/traits.rs b/src/rules/unitdiskmapping/traits.rs index e89910fb0..9bfba019c 100644 --- a/src/rules/unitdiskmapping/traits.rs +++ b/src/rules/unitdiskmapping/traits.rs @@ -178,7 +178,7 @@ pub fn apply_gadget(pattern: &P, grid: &mut MappingGrid, i: usize, j let state = match cell { PatternCell::Empty => CellState::Empty, PatternCell::Occupied => CellState::Occupied { weight: 1 }, - PatternCell::Doubled => CellState::Doubled { weight: 2 }, + PatternCell::Doubled => CellState::Doubled { weight: 1 }, PatternCell::Connected => CellState::Connected { weight: 1 }, }; grid.set(grid_r, grid_c, state); @@ -202,7 +202,7 @@ pub fn unapply_gadget(pattern: &P, grid: &mut MappingGrid, i: usize, let state = match cell { PatternCell::Empty => CellState::Empty, PatternCell::Occupied => CellState::Occupied { weight: 1 }, - PatternCell::Doubled => CellState::Doubled { weight: 2 }, + PatternCell::Doubled => CellState::Doubled { weight: 1 }, PatternCell::Connected => CellState::Connected { weight: 1 }, }; grid.set(grid_r, grid_c, state); diff --git a/src/unit_tests/rules/maximumindependentset_gridgraph.rs b/src/unit_tests/rules/maximumindependentset_gridgraph.rs index 8f326c96a..734149f5e 100644 --- a/src/unit_tests/rules/maximumindependentset_gridgraph.rs +++ b/src/unit_tests/rules/maximumindependentset_gridgraph.rs @@ -1,45 +1,75 @@ use super::*; use crate::models::graph::MaximumIndependentSet; +use crate::rules::unitdiskmapping::ksg; use crate::solvers::BruteForce; use crate::topology::{Graph, KingsSubgraph, SimpleGraph}; use crate::types::One; +#[test] +fn test_map_unweighted_produces_uniform_weights() { + // Triangle graph + let result = ksg::map_unweighted(3, &[(0, 1), (1, 2), (0, 2)]); + assert!( + result.node_weights.iter().all(|&w| w == 1), + "map_unweighted triangle should produce uniform weights, got: {:?}", + result.node_weights + ); + + // Path graph + let result2 = ksg::map_unweighted(3, &[(0, 1), (1, 2)]); + assert!( + result2.node_weights.iter().all(|&w| w == 1), + "map_unweighted path should produce uniform weights, got: {:?}", + result2.node_weights + ); + + // Cycle-5 + let result3 = ksg::map_unweighted(5, &[(0, 1), (1, 2), (2, 3), (3, 4), (0, 4)]); + assert!( + result3.node_weights.iter().all(|&w| w == 1), + "map_unweighted cycle5 should produce uniform weights, got: {:?}", + result3.node_weights + ); +} + #[test] fn test_mis_simple_one_to_kings_one_closed_loop() { + // Path graph: 0-1-2-3-4 (MIS = 3: select vertices 0, 2, 4) let problem = MaximumIndependentSet::new( - SimpleGraph::new(3, vec![(0, 1), (1, 2), (0, 2)]), - vec![One; 3], + SimpleGraph::new(5, vec![(0, 1), (1, 2), (2, 3), (3, 4)]), + vec![One; 5], ); let result = ReduceTo::>::reduce_to(&problem); let target = result.target_problem(); - assert!(target.graph().num_vertices() > 3); + assert!(target.graph().num_vertices() > 5); let solver = BruteForce::new(); let grid_solutions = solver.find_all_best(target); assert!(!grid_solutions.is_empty()); let original_solution = result.extract_solution(&grid_solutions[0]); - assert_eq!(original_solution.len(), 3); + assert_eq!(original_solution.len(), 5); let size: usize = original_solution.iter().sum(); - assert_eq!(size, 1, "Max IS in triangle should be 1"); + assert_eq!(size, 3, "Max IS in path of 5 should be 3"); } #[test] fn test_mis_simple_one_to_kings_weighted_closed_loop() { + // Path graph: 0-1-2-3-4 (MIS = 3: select vertices 0, 2, 4) let problem = MaximumIndependentSet::new( - SimpleGraph::new(3, vec![(0, 1), (1, 2), (0, 2)]), - vec![One; 3], + SimpleGraph::new(5, vec![(0, 1), (1, 2), (2, 3), (3, 4)]), + vec![One; 5], ); let result = ReduceTo::>::reduce_to(&problem); let target = result.target_problem(); - assert!(target.graph().num_vertices() > 3); + assert!(target.graph().num_vertices() > 5); let solver = BruteForce::new(); let grid_solutions = solver.find_all_best(target); assert!(!grid_solutions.is_empty()); let original_solution = result.extract_solution(&grid_solutions[0]); - assert_eq!(original_solution.len(), 3); + assert_eq!(original_solution.len(), 5); let size: usize = original_solution.iter().sum(); - assert_eq!(size, 1, "Max IS in triangle should be 1"); + assert_eq!(size, 3, "Max IS in path of 5 should be 3"); } From 16ec31649f32c6c0d898f4526df720b8b9fc34e0 Mon Sep 17 00:00:00 2001 From: GiggleLiu Date: Sat, 28 Feb 2026 01:46:09 +0800 Subject: [PATCH 28/28] fix One default --- .claude/CLAUDE.md | 3 ++- problemreductions-cli/src/problem_name.rs | 33 ++++++++++++++++++----- src/rules/graph.rs | 4 +-- 3 files changed, 31 insertions(+), 9 deletions(-) diff --git a/.claude/CLAUDE.md b/.claude/CLAUDE.md index f48aeb819..5df7811a1 100644 --- a/.claude/CLAUDE.md +++ b/.claude/CLAUDE.md @@ -121,8 +121,9 @@ Problem types use explicit optimization prefixes: ### Problem Variant IDs Reduction graph nodes use variant key-value pairs from `Problem::variant()`: - Base: `MaximumIndependentSet` (empty variant = defaults) -- Graph variant: `MaximumIndependentSet {graph: "GridGraph", weight: "i32"}` +- Graph variant: `MaximumIndependentSet {graph: "KingsSubgraph", weight: "One"}` - Weight variant: `MaximumIndependentSet {graph: "SimpleGraph", weight: "f64"}` +- Default variant ranking: `SimpleGraph`, `One`, `KN` are considered default values; variants with the most default values sort first - Nodes come exclusively from `#[reduction]` registrations; natural edges between same-name variants are inferred from the graph/weight subtype partial order ## Conventions diff --git a/problemreductions-cli/src/problem_name.rs b/problemreductions-cli/src/problem_name.rs index ad64e7bba..f7fa533b5 100644 --- a/problemreductions-cli/src/problem_name.rs +++ b/problemreductions-cli/src/problem_name.rs @@ -109,12 +109,33 @@ pub fn resolve_variant( spec.variant_values, known_variants ), - _ => anyhow::bail!( - "Ambiguous variant for {} with values {:?}. Matches: {:?}", - spec.name, - spec.variant_values, - matches - ), + _ => { + // When ambiguous, use the same default ranking as the reduction graph: + // variants whose remaining (unmatched) fields are closest to defaults + // (SimpleGraph, One, KN) win. This matches variants_for() sort order. + let default_rank = |v: &BTreeMap| -> usize { + v.values() + .filter(|val| { + !spec.variant_values.contains(val) + && !["SimpleGraph", "One", "KN"].contains(&val.as_str()) + }) + .count() + }; + let min_rank = matches.iter().map(|v| default_rank(v)).min().unwrap(); + let best: Vec<_> = matches + .iter() + .filter(|v| default_rank(v) == min_rank) + .collect(); + if best.len() == 1 { + return Ok((*best[0]).clone()); + } + anyhow::bail!( + "Ambiguous variant for {} with values {:?}. Matches: {:?}", + spec.name, + spec.variant_values, + matches + ) + } } } diff --git a/src/rules/graph.rs b/src/rules/graph.rs index 2b5e0f87d..722d75d82 100644 --- a/src/rules/graph.rs +++ b/src/rules/graph.rs @@ -629,12 +629,12 @@ impl ReductionGraph { .collect() }) .unwrap_or_default(); - // Sort deterministically: default variant values (SimpleGraph, i32, KN) + // Sort deterministically: default variant values (SimpleGraph, One, KN) // sort first so callers can rely on variants[0] being the "base" variant. variants.sort_by(|a, b| { fn default_rank(v: &BTreeMap) -> usize { v.values() - .filter(|val| !["SimpleGraph", "i32", "KN"].contains(&val.as_str())) + .filter(|val| !["SimpleGraph", "One", "KN"].contains(&val.as_str())) .count() } default_rank(a).cmp(&default_rank(b)).then_with(|| a.cmp(b))