diff --git a/docs/paper/reductions.typ b/docs/paper/reductions.typ index c60593ad..3ff1ba9d 100644 --- a/docs/paper/reductions.typ +++ b/docs/paper/reductions.typ @@ -82,6 +82,7 @@ "CyclicOrdering": [Cyclic Ordering], "AcyclicPartition": [Acyclic Partition], "MaximumIndependentSet": [Maximum Independent Set], + "MaximumLeafSpanningTree": [Maximum Leaf Spanning Tree], "MinimumVertexCover": [Minimum Vertex Cover], "MaxCut": [Max-Cut], "GeneralizedHex": [Generalized Hex], @@ -131,6 +132,7 @@ "Satisfiability": [SAT], "NAESatisfiability": [NAE-SAT], "KSatisfiability": [$k$-SAT], + "Maximum2Satisfiability": [Maximum 2-Satisfiability], "NonTautology": [Non-Tautology], "OneInThreeSatisfiability": [1-in-3 SAT], "Planar3Satisfiability": [Planar 3-SAT], @@ -148,6 +150,7 @@ "BoundedComponentSpanningForest": [Bounded Component Spanning Forest], "BinPacking": [Bin Packing], "BoyceCoddNormalFormViolation": [Boyce-Codd Normal Form Violation], + "Clustering": [Clustering], "CapacityAssignment": [Capacity Assignment], "ConsistencyOfDatabaseFrequencyTables": [Consistency of Database Frequency Tables], "ClosestVectorProblem": [Closest Vector Problem], @@ -162,6 +165,7 @@ "LongestCommonSubsequence": [Longest Common Subsequence], "ExactCoverBy3Sets": [Exact Cover by 3-Sets], "ThreeDimensionalMatching": [Three-Dimensional Matching], + "ThreeMatroidIntersection": [Three-Matroid Intersection], "SubsetProduct": [Subset Product], "SubsetSum": [Subset Sum], "CosineProductIntegration": [Cosine Product Integration], @@ -169,6 +173,7 @@ "ThreePartition": [3-Partition], "DynamicStorageAllocation": [Dynamic Storage Allocation], "Numerical3DimensionalMatching": [Numerical 3-Dimensional Matching], + "NumericalMatchingWithTargetSums": [Numerical Matching with Target Sums], "PartialFeedbackEdgeSet": [Partial Feedback Edge Set], "MinimumFeedbackArcSet": [Minimum Feedback Arc Set], "MinimumFeedbackVertexSet": [Minimum Feedback Vertex Set], @@ -178,9 +183,12 @@ "ConsecutiveOnesSubmatrix": [Consecutive Ones Submatrix], "FeasibleBasisExtension": [Feasible Basis Extension], "SparseMatrixCompression": [Sparse Matrix Compression], + "MinimumMatrixCover": [Minimum Matrix Cover], "MinimumMatrixDomination": [Minimum Matrix Domination], + "MinimumWeightDecoding": [Minimum Weight Decoding], "MinimumWeightSolutionToLinearEquations": [Minimum Weight Solution to Linear Equations], "DirectedTwoCommodityIntegralFlow": [Directed Two-Commodity Integral Flow], + "MinimumEdgeCostFlow": [Minimum Edge-Cost Flow], "IntegralFlowHomologousArcs": [Integral Flow with Homologous Arcs], "IntegralFlowWithMultipliers": [Integral Flow With Multipliers], "MinMaxMulticenter": [Min-Max Multicenter], @@ -216,6 +224,17 @@ "QuantifiedBooleanFormulas": [Quantified Boolean Formulas (QBF)], "RectilinearPictureCompression": [Rectilinear Picture Compression], "FeasibleRegisterAssignment": [Feasible Register Assignment], + "MinimumRegisterSufficiencyForLoops": [Minimum Register Sufficiency for Loops], + "MinimumCodeGenerationOneRegister": [Minimum Code Generation (One Register)], + "MinimumCodeGenerationParallelAssignments": [Minimum Code Generation (Parallel Assignments)], + "MaximumDomaticNumber": [Maximum Domatic Number], + "MinimumCapacitatedSpanningTree": [Minimum Capacitated Spanning Tree], + "MinimumDecisionTree": [Minimum Decision Tree], + "MinimumDisjunctiveNormalForm": [Minimum Disjunctive Normal Form], + "MinimumGraphBandwidth": [Minimum Graph Bandwidth], + "MinimumMetricDimension": [Minimum Metric Dimension], + "VertexCover": [Vertex Cover], + "MinimumCodeGenerationUnlimitedRegisters": [Minimum Code Generation (Unlimited Registers)], "RegisterSufficiency": [Register Sufficiency], "ResourceConstrainedScheduling": [Resource Constrained Scheduling], "RootedTreeStorageAssignment": [Rooted Tree Storage Assignment], @@ -232,8 +251,11 @@ "StaffScheduling": [Staff Scheduling], "SteinerTree": [Steiner Tree], "SteinerTreeInGraphs": [Steiner Tree in Graphs], + "MinimumAxiomSet": [Minimum Axiom Set], "MinimumExternalMacroDataCompression": [Minimum External Macro Data Compression], "MinimumInternalMacroDataCompression": [Minimum Internal Macro Data Compression], + "MinimumFaultDetectionTestSet": [Minimum Fault Detection Test Set], + "MinimumWeightAndOrGraph": [Minimum Weight AND/OR Graph], "StringToStringCorrection": [String-to-String Correction], "StrongConnectivityAugmentation": [Strong Connectivity Augmentation], "SubgraphIsomorphism": [Subgraph Isomorphism], @@ -241,6 +263,9 @@ "TimetableDesign": [Timetable Design], "TwoDimensionalConsecutiveSets": [2-Dimensional Consecutive Sets], "KthLargestMTuple": [$K$th Largest $m$-Tuple], + "MaximumLikelihoodRanking": [Maximum Likelihood Ranking], + "OptimumCommunicationSpanningTree": [Optimum Communication Spanning Tree], + "SquareTiling": [Square Tiling], ) // Definition label: "def:" — each definition block must have a matching label @@ -623,7 +648,31 @@ In all graph problems below, $G = (V, E)$ denotes an undirected graph with $|V| } #{ - let x = load-model-example("MaxCut") + let x = load-model-example("VertexCover") + let nv = graph-num-vertices(x.instance) + let ne = graph-num-edges(x.instance) + let k = x.instance.k + let sol = x.optimal_config + let cover = sol.enumerate().filter(((i, v)) => v == 1).map(((i, _)) => i) + [ + #problem-def("VertexCover")[ + Given an undirected graph $G = (V, E)$ and a positive integer $k <= |V|$, determine whether there exists a vertex cover of size at most $k$: a subset $V' subset.eq V$ with $|V'| <= k$ such that for each edge ${u, v} in E$, at least one of $u, v$ belongs to $V'$. + ][ + Vertex Cover is one of Karp's 21 NP-complete problems @karp1972 and the decision version of Minimum Vertex Cover @garey1979. The best known exact algorithm runs in $O^*(1.1996^n)$ time (Chen, Kanj, and Xia, 2010). + + *Example.* Consider a graph on $n = #nv$ vertices and $|E| = #ne$ edges with threshold $k = #k$. The cover $V' = {#cover.map(i => $v_#i$).join(", ")}$ with $|V'| = #cover.len() <= k$ is a valid vertex cover. + + #pred-commands( + "pred create --example VertexCover -o vc.json", + "pred solve vc.json", + "pred evaluate vc.json --config " + sol.map(str).join(","), + ) + ] + ] +} + +#{ + let x = load-model-example("MaxCut", variant: (graph: "SimpleGraph", weight: "i32")) let nv = graph-num-vertices(x.instance) let ne = graph-num-edges(x.instance) let edges = x.instance.graph.edges @@ -3002,6 +3051,38 @@ A classical NP-complete problem from Garey and Johnson @garey1979[Ch.~3, p.~76], ] } +#{ + let tmi = load-model-example("ThreeMatroidIntersection") + let n = tmi.instance.ground_set_size + let parts = tmi.instance.partitions + let K = tmi.instance.bound + let sol = tmi.optimal_config + let selected = sol.enumerate().filter(((i, v)) => v == 1).map(((i, _)) => i) + let fmt-set(items) = if items.len() == 0 { + $emptyset$ + } else { + "${" + items.map(e => str(e)).join(", ") + "}$" + } + let fmt-group(g) = "${" + g.map(e => str(e)).join(", ") + "}$" + [ + #problem-def("ThreeMatroidIntersection")[ + Given three partition matroids $(E, cal(F)_1)$, $(E, cal(F)_2)$, $(E, cal(F)_3)$ on a common ground set $E$ with $|E| = n$, and a positive integer $K <= n$, does there exist a subset $E' subset.eq E$ with $|E'| = K$ that is independent in all three matroids? A partition matroid partitions $E$ into groups; a set $S$ is independent if $|S sect G| <= 1$ for every group $G$. + ][ + Three-Matroid Intersection is problem SP11 in Garey & Johnson @garey1979 (section A3). While 2-matroid intersection is solvable in polynomial time (Edmonds, 1970) @edmonds1970, the jump to three matroids captures NP-hardness. NP-completeness is established by transformation from Three-Dimensional Matching, where each dimension induces a partition matroid. The restriction to partition matroids suffices for NP-completeness. + + Doron-Arad, Kulik, and Shachnai (2024) @doron2024 showed that brute force essentially cannot be beaten: any algorithm requires $Omega(2^(n - 5 sqrt(n) log n))$ oracle queries. A marginal improvement to $2^(n - Omega(log^2 n))$ exists via Monotone Local Search @fomin2019. The direct brute-force algorithm runs in $O^*(2^n)$ time where $n = |E|$. + + *Example.* Let $E = {0, 1, dots, #(n - 1)}$ with $K = #K$. The three partition matroids have groups: $cal(F)_1$: #parts.at(0).map(fmt-group).join(", "); $cal(F)_2$: #parts.at(1).map(fmt-group).join(", "); $cal(F)_3$: #parts.at(2).map(fmt-group).join(", "). The subset $E' = #fmt-set(selected)$ is a valid common independent set of size $#K$: each matroid has at most one selected element per group. + + #pred-commands( + "pred create --example ThreeMatroidIntersection -o three-matroid-intersection.json", + "pred solve three-matroid-intersection.json", + "pred evaluate three-matroid-intersection.json --config " + tmi.optimal_config.map(str).join(","), + ) + ] + ] +} + #{ let x = load-model-example("ComparativeContainment") let n = x.instance.universe_size @@ -4042,6 +4123,70 @@ A classical NP-complete problem from Garey and Johnson @garey1979[Ch.~3, p.~76], ] } +#{ + let x = load-model-example("Maximum2Satisfiability") + let n = x.instance.num_vars + let m = x.instance.clauses.len() + let clauses = x.instance.clauses + let sol = (config: x.optimal_config, metric: x.optimal_value) + let assign = sol.config + let fmt-lit(l) = if l > 0 { $x_#l$ } else { $not x_#(-l)$ } + let fmt-clause(c) = $paren.l #c.literals.map(fmt-lit).join($or$) paren.r$ + let eval-lit(l) = if l > 0 { assign.at(l - 1) } else { 1 - assign.at(-l - 1) } + let clause-sat(c) = c.literals.map(eval-lit).any(v => v == 1) + let sat-count = clauses.filter(clause-sat).len() + [ + #problem-def("Maximum2Satisfiability")[ + Given a set $U$ of $n$ Boolean variables and a collection $C = {C_1, dots, C_m}$ of $m$ clauses over $U$ with $|C_j| = 2$ for each $j$, find a truth assignment $bold(x) in {0,1}^n$ that maximizes the number of simultaneously satisfied clauses. + ][ + Maximum 2-Satisfiability (MAX-2-SAT) is one of the fundamental NP-hard optimization problems. While the decision version of 2-SAT is solvable in linear time by implication-graph analysis, the optimization variant---maximizing the number of satisfied clauses---is NP-hard @garey1979. The best known exact algorithm by Williams @williams2005 runs in $O^*(2^(0.7905n))$ time by reducing to a maximum-weight triangle problem and applying fast matrix multiplication. + + *Example.* Consider $m = #m$ clauses over $n = #n$ variables: $#clauses.map(fmt-clause).join($and$)$. The assignment $(#range(n).map(i => $x_#(i + 1)$).join(",")) = (#assign.map(v => str(v)).join(", "))$ satisfies #sat-count out of #m clauses. + + #pred-commands( + "pred create --example Maximum2Satisfiability -o max2sat.json", + "pred solve max2sat.json", + "pred evaluate max2sat.json --config " + x.optimal_config.map(str).join(","), + ) + ] + ] +} + +#let max2sat_ilp = load-example("Maximum2Satisfiability", "ILP") +#let max2sat_ilp_sol = max2sat_ilp.solutions.at(0) +#reduction-rule("Maximum2Satisfiability", "ILP", + example: true, + example-caption: [$n = #max2sat_ilp.source.instance.num_vars$ variables, $m = #max2sat_ilp.source.instance.clauses.len()$ clauses], + extra: [ + #pred-commands( + "pred create --example Maximum2Satisfiability -o max2sat.json", + "pred reduce max2sat.json --to " + target-spec(max2sat_ilp) + " -o bundle.json", + "pred solve bundle.json", + "pred evaluate max2sat.json --config " + max2sat_ilp_sol.source_config.map(str).join(","), + ) + *Step 1 -- Source instance.* The canonical MAX-2-SAT instance has $n = #max2sat_ilp.source.instance.num_vars$ Boolean variables and $m = #max2sat_ilp.source.instance.clauses.len()$ clauses. + + *Step 2 -- Build the binary ILP.* Introduce $n$ binary truth variables $y_0, dots, y_(n-1) in {0,1}$ and $m$ binary clause-indicator variables $z_0, dots, z_(m-1) in {0,1}$. The objective is $ max sum_(j=0)^(m-1) z_j $ subject to one constraint per clause $j$: $z_j <= l_1' + l_2'$ where $l_i' = y_i$ for a positive literal and $l_i' = 1 - y_i$ for a negated literal. The resulting ILP has $n + m = #(max2sat_ilp.source.instance.num_vars + max2sat_ilp.source.instance.clauses.len())$ variables and $m = #max2sat_ilp.source.instance.clauses.len()$ constraints. + + *Step 3 -- Verify a solution.* The ILP optimum extracts the first $n$ variables as the truth assignment $bold(y)^* = (#max2sat_ilp_sol.source_config.map(str).join(", "))$, satisfying #max2sat_ilp_sol.source_config.len() source variables #sym.checkmark. + ], +)[ + A MAX-2-SAT instance maps directly to a binary ILP @garey1979: each Boolean variable becomes a binary decision variable, each clause gets a binary indicator variable, and a single linear inequality per clause links the indicator to its literals. The objective maximizes the sum of clause indicators, so the ILP optimum equals the maximum number of satisfiable clauses. +][ + _Construction._ Given $n$ Boolean variables and $m$ clauses, introduce binary variables $y_0, dots, y_(n-1) in {0,1}$ (truth assignment) and $z_0, dots, z_(m-1) in {0,1}$ (clause indicators). For each clause $C_j$ with literals $ell_1, ell_2$, define $ell_i' = y_i$ if positive and $ell_i' = 1 - y_i$ if negated. Add the constraint $z_j <= ell_1' + ell_2'$, ensuring $z_j = 1$ only when the clause is satisfied. The ILP is: + $ + max quad & sum_(j=0)^(m-1) z_j \ + "subject to" quad & z_j <= ell_1' + ell_2' quad forall j in {0, dots, m - 1} \ + & y_i in {0, 1} quad forall i in {0, dots, n - 1} \ + & z_j in {0, 1} quad forall j in {0, dots, m - 1} + $. + The target has $n + m$ variables and $m$ constraints. + + _Correctness._ ($arrow.r.double$) Any truth assignment $bold(y)$ satisfying $k$ clauses yields a feasible ILP solution by setting $z_j = 1$ iff clause $j$ is satisfied, achieving objective $k$. ($arrow.l.double$) Any feasible ILP solution with $z_j = 1$ has clause $j$ satisfied by the constraint, so the truth assignment satisfies at least $sum z_j$ clauses. Thus optimal values coincide. + + _Solution extraction._ Return the first $n$ components $(y_0, dots, y_(n-1))$ as the truth assignment. +] + #{ let x = load-model-example("NonTautology") let n = x.instance.num_vars @@ -4678,6 +4823,53 @@ A classical NP-complete problem from Garey and Johnson @garey1979[Ch.~3, p.~76], ] } +#{ + let x = load-model-example("MaximumLeafSpanningTree") + let nv = graph-num-vertices(x.instance) + let ne = graph-num-edges(x.instance) + let edges = x.instance.graph.edges + let sol = (config: x.optimal_config, metric: x.optimal_value) + let tree-edges = sol.config.enumerate().filter(((i, v)) => v == 1).map(((i, _)) => i) + let leaf-count = sol.metric + // compute degrees in the tree + let degrees = range(nv).map(v => tree-edges.map(i => edges.at(i)).filter(((u, w)) => u == v or w == v).len()) + let leaves = range(nv).filter(v => degrees.at(v) == 1) + [ + #problem-def("MaximumLeafSpanningTree")[ + Given a connected undirected graph $G = (V, E)$, find a spanning tree $T$ of $G$ that maximizes the number of leaves (degree-1 vertices) in $T$. + ][ + Maximum Leaf Spanning Tree is NP-hard @garey1979[ND2]. The problem has applications in network design and broadcasting, where maximizing the number of leaf nodes reduces the set of internal (relay) nodes. The best known exact algorithm runs in $O^*(1.8966^n)$ time, where $n = |V|$.#footnote[H. Fernau, J. Kneis, D. Kratsch, A. Langer, M. Liedloff, D. Raible, and P. Rossmanith, An exact algorithm for the Maximum Leaf Spanning Tree problem, _Theoretical Computer Science_, 412(45):6290--6302, 2011.] + + *Example.* Consider $G$ with $n = #nv$ vertices and $m = #ne$ edges #edges.map(((u, v)) => [${#u, #v}$]).join(", "). The spanning tree using edges #tree-edges.map(i => [${#edges.at(i).at(0), #edges.at(i).at(1)}$]).join(", ") has #leaf-count leaves (vertices #leaves.map(v => [$#v$]).join(", ")), each with degree 1 in the tree. + + #pred-commands( + "pred create --example MaximumLeafSpanningTree -o mlst.json", + "pred solve mlst.json", + "pred evaluate mlst.json --config " + x.optimal_config.map(str).join(","), + ) + ] + ] +} + +#reduction-rule("MaximumLeafSpanningTree", "ILP")[ + An MLST instance reduces to an integer linear program with $3m + n$ variables and $3n + 2m + 1$ constraints, using a single-commodity flow formulation to enforce spanning-tree connectivity and binary leaf indicators to encode the objective @garey1979. +][ + _Construction._ Given $G = (V, E)$ with $n = |V|$, $m = |E|$, root the flow at vertex $0$. Introduce binary edge selectors $y_e in {0, 1}$ for each $e in E$ ($m$ variables), binary leaf indicators $z_v in {0, 1}$ for each $v in V$ ($n$ variables), and directed flow variables $f_(u v), f_(v u) >= 0$ for each undirected edge ${u, v}$ ($2m$ variables). + + _Constraints:_ + + *Tree cardinality:* $sum_e y_e = n - 1$. + + *Flow conservation:* for the root, net outflow $= n - 1$; for each non-root vertex $v$, net inflow $= 1$. + + *Flow--edge linking:* $f_(u v) + f_(v u) <= (n - 1) y_e$ for each edge $e = {u, v}$. + + *Leaf detection:* $sum_(e in.rev v) y_e + (n - 2) z_v <= n - 1$ for each vertex $v$. + + *Binary bounds:* $y_e <= 1$, $z_v <= 1$. + + The objective is $max sum_(v in V) z_v$. + + _Correctness._ ($arrow.r.double$) A spanning tree $T$ of $G$ with $ell$ leaves induces a feasible ILP solution: route one unit of flow from the root to every other vertex along the unique tree path, set $y_e = 1$ for tree edges, and set $z_v = 1$ for degree-1 vertices; constraint (4) is tight when $deg_T(v) = 1$ and slack otherwise, achieving objective $ell$. ($arrow.l.double$) Any feasible ILP solution with $sum y_e = n - 1$ and connectivity enforced by the flow yields a spanning tree; constraint (4) forces $z_v = 0$ whenever $deg_T(v) > 1$, and maximization ensures $z_v = 1$ for all leaves. + + _Solution extraction._ Return the edge-selector prefix $(y_0, dots, y_(m-1))$ as the source configuration. +] + #{ let x = load-model-example("MonochromaticTriangle") let nv = graph-num-vertices(x.instance) @@ -4958,6 +5150,30 @@ A classical NP-complete problem from Garey and Johnson @garey1979[Ch.~3, p.~76], ] } +#{ + let x = load-model-example("MinimumRegisterSufficiencyForLoops") + let N = x.instance.loop_length + let vars = x.instance.variables + let nv = vars.len() + let config = x.optimal_config + let num-regs = config.dedup().len() + [ + #problem-def("MinimumRegisterSufficiencyForLoops")[ + Given a loop of length $N$ (representing $N$ timesteps arranged in a circle) and a set of $n$ variables, each active during a contiguous circular arc of timesteps specified by $(s_i, l_i)$ covering timesteps ${s_i, s_i + 1, dots, s_i + l_i - 1} mod N$, assign a register $r_i in {0, dots, n-1}$ to each variable minimizing the number of distinct registers used, such that no two variables with overlapping arcs share the same register. + ][ + Minimum Register Sufficiency for Loops is problem SS20 in Garey & Johnson @garey1979. It is equivalent to minimum coloring of circular arc graphs. NP-complete via reduction from Chromatic Number. No algorithm improving on brute-force $O(n^n)$ enumeration is known for arbitrary circular arc instances. + + *Example.* Let $N = #N$ timesteps and $n = #nv$ variables with arcs: #vars.enumerate().map(((i, v)) => $x_#i: [#(v.at(0)), #(v.at(0)) + #(v.at(1)))$).join(", ") mod $#N$. All pairs of arcs overlap (each arc covers half the circle and any two arcs share at least one timestep), forming a complete conflict graph $K_#nv$. The assignment $(#config.map(str).join(", "))$ uses #num-regs distinct registers, which is optimal. + + #pred-commands( + "pred create --example MinimumRegisterSufficiencyForLoops -o mrsfl.json", + "pred solve mrsfl.json --solver brute-force", + "pred evaluate mrsfl.json --config " + x.optimal_config.map(str).join(","), + ) + ] + ] +} + #{ let x = load-model-example("RegisterSufficiency") let n = x.instance.num_vertices @@ -4985,6 +5201,148 @@ A classical NP-complete problem from Garey and Johnson @garey1979[Ch.~3, p.~76], ] } +#{ + let x = load-model-example("MinimumCodeGenerationOneRegister") + let n = x.instance.num_vertices + let edges = x.instance.edges + let num-leaves = x.instance.num_leaves + let num-internal = n - num-leaves + let config = x.optimal_config + // Build evaluation order: position -> internal vertex + // internal vertices are those with out-degree > 0 + let out-deg = range(n).map(v => edges.filter(e => e.at(0) == v).len()) + let internal = range(n).filter(v => out-deg.at(v) > 0) + let order = range(num-internal).map(pos => + range(num-internal).find(i => config.at(i) == pos) + ).map(i => internal.at(i)) + [ + #problem-def("MinimumCodeGenerationOneRegister")[ + Given a directed acyclic graph $G = (V, A)$ with maximum out-degree 2 representing an expression DAG, where leaf vertices (out-degree 0) are input values stored in memory, internal vertices are operations, and root vertices (in-degree 0) are the values to compute, find a program of minimum number of instructions for a one-register machine (supporting LOAD, STORE, and OP instructions) that computes all root vertices. + ][ + Minimum Code Generation on a One-Register Machine is problem A11 PO4 in Garey & Johnson @garey1979. NP-complete via transformation from 3-Satisfiability @brunoSethi1976. Remains NP-complete even when the only vertices with in-degree greater than 1 have arcs only to leaves. For directed forests (expression trees), the Sethi--Ullman algorithm finds an optimal instruction sequence in $O(n)$ time @sethiUllman1970. For general DAGs, brute-force enumeration of all valid evaluation orderings runs in $O^*(2^n)$ time. + #footnote[No algorithm improving on brute-force is known for general expression DAGs.] + + *Example.* Consider $n = #n$ vertices with arcs: #{edges.map(a => $v_#(a.at(0)) arrow.r v_#(a.at(1))$).join(", ")}. Leaves (out-degree 0): $\{#(range(n).filter(v => out-deg.at(v) == 0).map(v => $v_#v$).join($, $))\}$. The evaluation order $(#order.map(v => $v_#v$).join(", "))$ yields an optimal program of #x.optimal_value instructions. + + #pred-commands( + "pred create --example MinimumCodeGenerationOneRegister -o mcgor.json", + "pred solve mcgor.json --solver brute-force", + "pred evaluate mcgor.json --config " + x.optimal_config.map(str).join(","), + ) + ] + ] +} + +#{ + let x = load-model-example("MinimumCodeGenerationUnlimitedRegisters") + let n = x.instance.num_vertices + let left-arcs = x.instance.left_arcs + let right-arcs = x.instance.right_arcs + // Build out-degree to identify leaves vs internal + let out-deg = range(n).map(v => + left-arcs.filter(e => e.at(0) == v).len() + right-arcs.filter(e => e.at(0) == v).len() + ) + let internal = range(n).filter(v => out-deg.at(v) > 0) + let num-internal = internal.len() + let config = x.optimal_config + let order = range(num-internal).map(pos => + range(num-internal).find(i => config.at(i) == pos) + ).map(i => internal.at(i)) + [ + #problem-def("MinimumCodeGenerationUnlimitedRegisters")[ + Given a directed acyclic graph $G = (V, A)$ with maximum out-degree 2 representing an expression DAG, and a partition of arcs into left ($L$) and right ($R$) operand sets, find a program of minimum number of instructions for an unlimited-register machine using 2-address instructions (OP and LOAD/copy) that computes all root vertices. The OP instruction computes a vertex and overwrites the left operand's register; a LOAD instruction copies a register to preserve a value before destruction. + ][ + Minimum Code Generation with Unlimited Registers is problem A11 PO5 in Garey & Johnson @garey1979. NP-complete via transformation from Feedback Vertex Set @ahoJohnsonUllman1977. Remains NP-complete even if the only vertices with in-degree greater than 1 are leaves. Polynomial for forests and when 3-address instructions are allowed. For general DAGs, brute-force enumeration runs in $O^*(2^n)$ time. + #footnote[No algorithm improving on brute-force is known for general expression DAGs with 2-address instructions.] + + *Example.* Consider $n = #n$ vertices with left arcs $L$: #{left-arcs.map(a => $v_#(a.at(0)) arrow.r v_#(a.at(1))$).join(", ")} and right arcs $R$: #{right-arcs.map(a => $v_#(a.at(0)) arrow.r v_#(a.at(1))$).join(", ")}. Leaves (out-degree 0): $\{#(range(n).filter(v => out-deg.at(v) == 0).map(v => $v_#v$).join($, $))\}$. The evaluation order $(#order.map(v => $v_#v$).join(", "))$ yields an optimal program of #x.optimal_value instructions. + + #pred-commands( + "pred create --example MinimumCodeGenerationUnlimitedRegisters -o mcgur.json", + "pred solve mcgur.json --solver brute-force", + "pred evaluate mcgur.json --config " + x.optimal_config.map(str).join(","), + ) + ] + ] +} + +#{ + let x = load-model-example("MinimumCodeGenerationParallelAssignments") + let nv = x.instance.num_variables + let assigns = x.instance.assignments + let m = assigns.len() + let config = x.optimal_config + // Build execution order: order[pos] = assignment index + let order = range(m).map(pos => + range(m).find(i => config.at(i) == pos) + ) + // Count backward deps for the optimal ordering + [ + #problem-def("MinimumCodeGenerationParallelAssignments")[ + Given a set $V$ of variables and a collection of simultaneous assignments $A_i: v_i arrow.l op(B_i)$ where $v_i in V$ is the target variable and $B_i subset.eq V$ is the set of variables read, find a permutation $pi$ of the assignments that minimizes the number of backward dependencies. A backward dependency occurs when $v_(pi(i)) in B_(pi(j))$ for some $j > i$, i.e., assignment $pi(i)$ overwrites a variable that a later assignment $pi(j)$ still needs to read. + ][ + Minimum Code Generation for Parallel Assignments is problem A11 PO6 in Garey & Johnson @garey1979. NP-complete via transformation from Feedback Vertex Set @sethi1975. Remains NP-complete even when $|B_i| lt.eq 2$ for all assignments. For general instances, brute-force enumeration of all permutations runs in $O^*(2^m)$ time via dynamic programming over subsets. + #footnote[No algorithm improving on brute-force is known for general parallel assignment instances.] + + *Example.* Consider $#nv$ variables and $#m$ assignments: #{assigns.enumerate().map(((i, a)) => { + let target = a.at(0) + let reads = a.at(1) + $A_#i: v_#target arrow.l op({#reads.map(r => $v_#r$).join($, $)})$ + }).join(", ")}. The execution order $(#order.map(i => $A_#i$).join(", "))$ yields #x.optimal_value backward dependencies. + + #pred-commands( + "pred create --example MinimumCodeGenerationParallelAssignments -o mcgpa.json", + "pred solve mcgpa.json --solver brute-force", + "pred evaluate mcgpa.json --config " + x.optimal_config.map(str).join(","), + ) + ] + ] +} + +#{ + let x = load-model-example("MinimumDecisionTree") + let n = x.instance.num_objects + let m = x.instance.num_tests + let sol = x.optimal_config + let tepl = x.optimal_value + [ + #problem-def("MinimumDecisionTree")[ + Given a set $S$ of $n$ objects and $m$ binary tests $T_1, dots, T_m$ where each test maps $S$ to ${0,1}$ and every pair of objects is distinguished by at least one test, find a decision tree using tests from $T$ that identifies each object and minimizes the total external path length (sum of leaf depths). + ][ + Minimum Decision Tree (MS15) models optimal test sequencing for object identification @garey1979. NP-hard even when each test has at most 3 positive objects (Hyafil and Rivest 1976, via reduction from Exact Cover by 3-Sets). The Sethi--Ullman algorithm solves the tree (forest) case in polynomial time. The brute-force bound is $O^*(m^n)$. + + *Example.* Consider $n = #n$ objects and $m = #m$ tests. The optimal decision tree has total external path length #tepl, achieved by a balanced split at the root. + + #pred-commands( + "pred create --example MinimumDecisionTree -o mdt.json", + "pred solve mdt.json", + "pred evaluate mdt.json --config " + sol.map(str).join(","), + ) + ] + ] +} + +#{ + let x = load-model-example("MinimumDisjunctiveNormalForm") + let n = x.instance.num_variables + let num_pi = x.instance.prime_implicants.len() + let num_mt = x.instance.minterms.len() + [ + #problem-def("MinimumDisjunctiveNormalForm")[ + Given $n$ Boolean variables and a Boolean function $f: {0,1}^n -> {0,1}$ specified by its truth table, find a disjunctive normal form (DNF) formula with the minimum number of terms (disjuncts) that is equivalent to $f$. + ][ + Minimum Disjunctive Normal Form (LO9) is the classic two-level logic minimization problem @garey1979. NP-hard (Masek 1979, via reduction from Minimum Cover). The Quine--McCluskey algorithm enumerates all prime implicants, reducing the problem to minimum set cover. The worst-case number of prime implicants is $Theta(3^n slash sqrt(n))$ (Chandra and Markowsky 1978). + + *Example.* A function on $n = #n$ variables with #num_mt minterms has #num_pi prime implicants. The minimum cover requires #x.optimal_value terms. + + #pred-commands( + "pred create --example MinimumDisjunctiveNormalForm -o mdnf.json", + "pred solve mdnf.json", + ) + ] + ] +} + #{ let x = load-model-example("RuralPostman") let nv = x.instance.graph.num_vertices @@ -5574,6 +5932,37 @@ A classical NP-complete problem from Garey and Johnson @garey1979[Ch.~3, p.~76], ] ] } +#{ + let x = load-model-example("NumericalMatchingWithTargetSums") + let m = x.instance.sizes_x.len() + let sx = x.instance.sizes_x + let sy = x.instance.sizes_y + let targets = x.instance.targets + let config = x.optimal_config + [ + #problem-def("NumericalMatchingWithTargetSums")[ + Given two disjoint sets $X$ and $Y$ each with $m$ elements, integer sizes $s(x_i)$ for $x_i in X$ and $s(y_j)$ for $y_j in Y$, and a multiset of $m$ target values $B_1, dots, B_m$, determine whether $X union Y$ can be partitioned into $m$ pairs, each containing one element from $X$ and one from $Y$, such that the multiset of pair sums ${s(x_i) + s(y_(pi(i)))}$ equals the target multiset. + ][ + Numerical Matching with Target Sums is NP-complete in the strong sense (SP17 in Garey and Johnson @garey1979). It generalizes bipartite perfect matching by imposing sum constraints on each pair. Brute-force enumeration runs in $O^*(2^m)$ time by trying all $m!$ permutations. + + *Example.* Let $m = #m$, $X = (#sx.map(str).join(", "))$, $Y = (#sy.map(str).join(", "))$, targets $= (#targets.map(str).join(", "))$. The matching $pi = (#config.map(str).join(", "))$ yields sums #range(m).map(i => [$#(sx.at(i)) + #(sy.at(config.at(i))) = #(sx.at(i) + sy.at(config.at(i)))$]).join(", "), which as a multiset equals the targets. + + #pred-commands( + "pred create --example NumericalMatchingWithTargetSums -o nmts.json", + "pred solve nmts.json", + "pred evaluate nmts.json --config " + config.map(str).join(","), + ) + ] + ] +} +#reduction-rule("NumericalMatchingWithTargetSums", "ILP", + example: true, + example-caption: [Numerical Matching with Target Sums to ILP via compatible-triple assignment variables.], +)[ + Introduce a binary variable $z_(i,j,k) in {0,1}$ for each _compatible triple_ $(i,j,k)$ where $s(x_i) + s(y_j) = B_k$. The constraints ensure a perfect matching: $sum_(j,k) z_(i,j,k) = 1$ for each $i$ (every $x_i$ matched once), $sum_(i,k) z_(i,j,k) = 1$ for each $j$ (every $y_j$ matched once), $sum_(i,j) z_(i,j,k) = 1$ for each $k$ (every target used once). The objective is trivial (minimize 0), since this is a feasibility problem. +][ + _Correctness._ By construction, variables are only created for triples satisfying $s(x_i) + s(y_j) = B_k$. The three families of equality constraints enforce that the assignment is a bijection on $X$, $Y$, and the target indices. Any feasible ILP solution therefore defines a permutation $pi$ with $s(x_i) + s(y_(pi(i)))$ matching a distinct target, and conversely any valid matching maps to a feasible binary assignment. The number of variables is at most $m^3$ (all triples compatible), and the number of constraints is $3m$. +] #{ let x = load-model-example("NonLivenessFreePetriNet") let np = x.instance.num_places @@ -5596,6 +5985,37 @@ A classical NP-complete problem from Garey and Johnson @garey1979[Ch.~3, p.~76], ] } +#{ + let x = load-model-example("MinimumAxiomSet") + let ns = x.instance.num_sentences + let ts = x.instance.true_sentences + let nt = ts.len() + let imps = x.instance.implications + let config = x.optimal_config + let selected = range(nt).filter(i => config.at(i) == 1) + let sel-labels = selected.map(i => str(ts.at(i))) + [ + #problem-def("MinimumAxiomSet")[ + Given a finite set of sentences $S = {s_0, dots, s_(n-1)}$, a subset $T subset.eq S$ of true sentences, and a set of implications ${(A_j, c_j)}$ where each $A_j subset.eq S$ and $c_j in S$, find a smallest subset $S_0 subset.eq T$ such that the deductive closure of $S_0$ under the implications equals $T$. That is, starting from $S_0$, repeatedly applying every rule "if all sentences in $A_j$ hold then $c_j$ holds" until no new sentences are added must yield exactly $T$. + ][ + Minimum Axiom Set is problem LO6 in Garey and Johnson @garey1979. The problem models finding a minimal set of assumptions (axioms) from which all truths in a theory can be derived. It generalises set cover: when every implication has a single antecedent, the problem reduces to finding a minimum dominating set in the implication graph. No algorithm improving on brute-force ($O(2^(|T|))$) is known for the general case.#footnote[No algorithm improving on brute-force is known for the general Minimum Axiom Set problem.] + + *Example.* Let $S = {s_0, dots, s_7}$ ($n = #ns$) with $T = S$ (all sentences true) and implications + #imps.map(imp => { + let ante = imp.at(0).map(str).join(", ") + let cons = str(imp.at(1)) + [$({#ante} arrow.r #cons)$] + }).join(", "). Selecting axioms $S_0 = {#sel-labels.join(", ")}$ generates the full deductive closure $T$ in three rounds: first $\{0,1\} arrow.r \{2,3,4,5\}$, then $\{2,4\},\{3,5\} arrow.r \{6,7\}$, then $\{6,7\} arrow.r \{0,1\}$ (already present). The optimal value is $#x.optimal_value$. + + #pred-commands( + "pred create --example MinimumAxiomSet -o axiom.json", + "pred solve axiom.json --solver brute-force", + "pred evaluate axiom.json --config " + config.map(str).join(","), + ) + ] + ] +} + #{ let x = load-model-example("Betweenness") let n = x.instance.num_elements @@ -5640,6 +6060,29 @@ A classical NP-complete problem from Garey and Johnson @garey1979[Ch.~3, p.~76], ] } +#{ + let x = load-model-example("Clustering") + let n = x.instance.distances.len() + let K = x.instance.num_clusters + let B = x.instance.diameter_bound + let config = x.optimal_config + [ + #problem-def("Clustering")[ + Given a set of $n$ elements with a symmetric distance function $d: binom(A,2) -> NN$ (represented as a matrix with zero diagonal), a cluster count bound $K$, and a diameter bound $B$, determine whether there exists a partition of the elements into at most $K$ non-empty clusters such that for every cluster $C$, all pairwise distances within $C$ satisfy $d(i,j) <= B$. + ][ + Clustering is a fundamental problem in unsupervised learning and data analysis. The variant considered here is the diameter-bounded formulation, which is NP-complete. No algorithm improving on brute-force ($K^n$ enumeration) is known for the general case.#footnote[No algorithm improving on brute-force is known for general diameter-bounded clustering.] + + *Example.* Consider $n = #n$ elements with $K = #K$ clusters and diameter bound $B = #B$. The distance matrix has two tight groups ${0,1,2}$ and ${3,4,5}$ with intra-group distance 1 and inter-group distance 3. The witness assignment $(#config.map(str).join(", "))$ partitions elements into clusters ${0,1,2}$ and ${3,4,5}$; each cluster has maximum pairwise distance $1 <= #B$. + + #pred-commands( + "pred create --example Clustering -o clustering.json", + "pred solve clustering.json", + "pred evaluate clustering.json --config " + config.map(str).join(","), + ) + ] + ] +} + #{ let x = load-model-example("DynamicStorageAllocation") let items = x.instance.items @@ -6077,6 +6520,64 @@ A classical NP-complete problem from Garey and Johnson @garey1979[Ch.~3, p.~76], _Solution extraction._ Walk through the active segments (via $ell_i$ and $p_(i,lambda,r)$) to reconstruct $C$, mapping source reference positions to compressed-string positions. ] +#{ + let x = load-model-example("MinimumWeightAndOrGraph") + let n = x.instance.num_vertices + let arcs = x.instance.arcs + let src = x.instance.source + let gates = x.instance.gate_types + let ws = x.instance.arc_weights + let cfg = x.optimal_config + let sel-arcs = range(arcs.len()).filter(i => cfg.at(i) == 1) + let total = sel-arcs.map(i => ws.at(i)).sum() + [ + #problem-def("MinimumWeightAndOrGraph")[ + Given a directed acyclic graph $G = (V, A)$ with $n = |V|$ vertices, a source vertex $s in V$, a gate-type function $g: V arrow {"AND", "OR", "leaf"}$, and arc weights $w: A arrow ZZ$, find a solution subgraph $S subset.eq A$ of minimum total weight $sum_(a in S) w(a)$. A solution subgraph is valid when: (1) the source is solved, (2) for each solved AND-gate vertex $v$, all outgoing arcs from $v$ are in $S$, (3) for each solved OR-gate vertex $v$, at least one outgoing arc from $v$ is in $S$, and (4) leaf vertices are trivially solved. A vertex $v != s$ is solved if there exists an arc $(u, v) in S$ with $u$ solved. + ][ + AND/OR graphs generalize search trees and game trees and arise in AI planning, logic programming, and design-space exploration. Dynamic-programming algorithms on tree-structured AND/OR graphs run in linear time, but the general DAG case requires exponential enumeration.#footnote[No algorithm improving on brute-force enumeration of all $2^(|A|)$ arc subsets is known for general AND/OR DAGs.] + + *Example.* Consider $n = #n$ vertices with source $v_#src$ (AND gate). Vertices $v_1, v_2$ are OR gates; $v_3, v_4, v_5, v_6$ are leaves. Arcs with weights: #{range(arcs.len()).map(i => { + let a = arcs.at(i) + $v_#(a.at(0)) arrow.r v_#(a.at(1)) (#(ws.at(i)))$ + }).join(", ")}. Since $v_0$ is AND, both outgoing arcs must be selected (cost $#(ws.at(0)) + #(ws.at(1)) = #(ws.at(0) + ws.at(1))$). For OR gates, pick the cheapest outgoing arc: $v_1 arrow.r v_4$ (cost #(ws.at(3))) and $v_2 arrow.r v_6$ (cost #(ws.at(5))). Total weight: $#total$. + + #pred-commands( + "pred create --example MinimumWeightAndOrGraph -o mwaog.json", + "pred solve mwaog.json --solver brute-force", + "pred evaluate mwaog.json --config " + x.optimal_config.map(str).join(","), + ) + ] + ] +} + +#{ + let x = load-model-example("MinimumFaultDetectionTestSet") + let n = x.instance.num_vertices + let arcs = x.instance.arcs + let inputs = x.instance.inputs + let outputs = x.instance.outputs + let ni = inputs.len() + let no = outputs.len() + let cfg = x.optimal_config + let sel-pairs = range(cfg.len()).filter(i => cfg.at(i) == 1) + let count = sel-pairs.len() + [ + #problem-def("MinimumFaultDetectionTestSet")[ + Given a directed acyclic graph $G = (V, A)$ with $n = |V|$ vertices, designated input vertices $I subset.eq V$, and designated output vertices $O subset.eq V$, find the minimum number of input-output pairs $(i, o) in I times O$ such that the union of their coverage sets covers all vertices $V$. For a pair $(i, o)$, the coverage set is the set of vertices reachable from $i$ that can also reach $o$. + ][ + Fault detection test sets arise in hardware testing: each input-output path through a circuit's DAG representation can detect faults at the vertices it traverses, and the goal is to find the fewest test paths that collectively exercise every component. The problem generalises Set Cover over a structured family of subsets induced by DAG reachability.#footnote[No algorithm improving on brute-force enumeration of all $2^(|I| dot |O|)$ input-output pair subsets is known for the general case.] + + *Example.* Consider $n = #n$ vertices with inputs $I = {#inputs.map(str).join(", ")}$ and outputs $O = {#outputs.map(str).join(", ")}$. Arcs: #{arcs.map(a => $#(a.at(0)) arrow.r #(a.at(1))$).join(", ")}. Selecting pair $(#(inputs.at(0)), #(outputs.at(0)))$ covers ${0, 2, 3, 5}$, and pair $(#(inputs.at(1)), #(outputs.at(1)))$ covers ${1, 3, 4, 6}$. Their union is all $#n$ vertices, giving an optimal count of $#count$. + + #pred-commands( + "pred create --example MinimumFaultDetectionTestSet -o mfdts.json", + "pred solve mfdts.json --solver brute-force", + "pred evaluate mfdts.json --config " + x.optimal_config.map(str).join(","), + ) + ] + ] +} + #{ let x = load-model-example("MinimumFeedbackArcSet") let nv = x.instance.graph.num_vertices @@ -7722,6 +8223,54 @@ A classical NP-complete problem from Garey and Johnson @garey1979[Ch.~3, p.~76], ) ] +#problem-def("MinimumEdgeCostFlow")[ + Given a directed graph $G = (V, A)$ with arc capacities $c: A -> ZZ^+$, arc prices $p: A -> ZZ$, a source vertex $s$, a sink vertex $t$, and a flow requirement $R in ZZ^+$, find an integral flow $f: A -> ZZ_(>= 0)$ of value at least $R$ that minimizes the total edge cost $sum_(a in A: f(a) > 0) p(a)$ — the sum of prices of arcs carrying nonzero flow. +][ + Minimum Edge-Cost Flow is an NP-hard network design problem that arises in telecommunications and logistics, where there is a fixed cost (price) for activating each link, independent of the actual traffic volume. Unlike the classical minimum-cost flow problem (where cost is proportional to flow), the edge-cost variant introduces a combinatorial selection aspect: choosing which arcs to activate. The problem is closely related to fixed-charge network flow problems @garey1979. + + The brute-force bound $(C + 1)^(|A|)$ arises from enumerating all possible integral flow vectors, where $C = max_(a in A) c(a)$.#footnote[No sub-exponential exact algorithm is known for Minimum Edge-Cost Flow.] + + *Example.* Consider a directed graph with 5 vertices, source $s = 0$, sink $t = 4$, requirement $R = 3$, and 6 arcs with capacities $c(a) = 2$ for all arcs. The prices are $p(0,1) = 3$, $p(0,2) = 1$, $p(0,3) = 2$, and all arcs entering the sink have price 0. The optimal flow routes 1 unit via vertex 2 and 2 units via vertex 3, activating 4 arcs for a total edge cost of $1 + 2 + 0 + 0 = 3$. + + #figure( + canvas(length: 1cm, { + import draw: * + let positions = ( + (0, 0), // 0 = s + (2, 1.5), // 1 + (2, 0), // 2 + (2, -1.5), // 3 + (4, 0), // 4 = t + ) + let labels = ($s$, $1$, $2$, $3$, $t$) + let arcs = ((0, 1), (0, 2), (0, 3), (1, 4), (2, 4), (3, 4)) + let prices = (3, 1, 2, 0, 0, 0) + // Optimal arcs: (0,2), (0,3), (2,4), (3,4) — indices 1, 2, 4, 5 + let opt-arcs = (1, 2, 4, 5) + + // Draw arcs + for (idx, (u, v)) in arcs.enumerate() { + let from = positions.at(u) + let to = positions.at(v) + let is-opt = opt-arcs.contains(idx) + let color = if is-opt { blue } else { gray.darken(20%) } + let thickness = if is-opt { 1.2pt } else { 0.6pt } + line(from, to, stroke: (paint: color, thickness: thickness), mark: (end: "straight", scale: 0.5), name: "arc" + str(idx)) + // Price label + content("arc" + str(idx) + ".mid", text(7pt, $#prices.at(idx)$), fill: white, frame: "rect", padding: 0.06, stroke: none) + } + + // Draw vertices + for (k, pos) in positions.enumerate() { + let fill = if k == 0 or k == 4 { blue.lighten(70%) } else { white } + circle(pos, radius: 0.3, fill: fill, stroke: 0.6pt, name: str(k)) + content(pos, text(8pt, labels.at(k))) + } + }), + caption: [Minimum Edge-Cost Flow: optimal flow (blue) routes via vertices 2 and 3, total edge cost 3. Arc labels show prices.], + ) +] + #{ let x = load-model-example("IntegralFlowBundles") let source = x.instance.source @@ -8160,6 +8709,29 @@ A classical NP-complete problem from Garey and Johnson @garey1979[Ch.~3, p.~76], ] } +#{ + let x = load-model-example("MinimumMatrixCover") + let A = x.instance.matrix + let n = A.len() + let cfg = x.optimal_config + let signs = cfg.map(v => if v == 0 { $-1$ } else { $+1$ }) + [ + #problem-def("MinimumMatrixCover")[ + Given an $n times n$ nonnegative integer matrix $A$, find a function $f: \{1, dots, n\} -> \{-1, +1\}$ minimizing $sum_(i,j) a_(i j) dot f(i) dot f(j)$. + ][ + Minimum Matrix Cover asks for a sign assignment to rows (equivalently columns) of a square matrix that minimizes the resulting quadratic form. Each binary variable $x_i in \{0, 1\}$ encodes a sign $f(i) = 2 x_i - 1$. Since $f(i)^2 = 1$, diagonal entries contribute a constant $sum_i a_(i i)$; the optimization depends only on off-diagonal structure. The brute-force complexity is $O(2^n)$ where $n$ is the matrix dimension.#footnote[No algorithm improving on brute-force enumeration of all $2^n$ sign assignments is known for the general case.] + + *Example.* Let $A$ be the #(n)$times$#(n) symmetric matrix with zero diagonal shown below. The optimal config $(#cfg.map(str).join(", "))$ assigns signs $(#signs.join(", "))$, yielding value $= #x.optimal_value$. + + #pred-commands( + "pred create --example " + problem-spec(x) + " -o mmc.json", + "pred solve mmc.json", + "pred evaluate mmc.json --config " + x.optimal_config.map(str).join(","), + ) + ] + ] +} + #{ let x = load-model-example("MinimumMatrixDomination") let M = x.instance.matrix @@ -8185,6 +8757,53 @@ A classical NP-complete problem from Garey and Johnson @garey1979[Ch.~3, p.~76], ] } +#{ + let x = load-model-example("MinimumWeightDecoding") + let H = x.instance.matrix + let s = x.instance.target + let n = H.len() + let m = if n > 0 { H.at(0).len() } else { 0 } + let cfg = x.optimal_config + let wt = x.optimal_value + [ + #problem-def("MinimumWeightDecoding")[ + Given an $n times m$ binary parity-check matrix $H$ and a binary syndrome vector $s in {0,1}^n$, find a binary vector $x in {0,1}^m$ minimizing the Hamming weight $|x| = sum_(j=0)^(m-1) x_j$ subject to $H x equiv s (mod space 2)$. + ][ + Minimum Weight Decoding is a fundamental problem in coding theory. Given a linear code with parity-check matrix $H$, the task is to find the minimum-weight error pattern consistent with a received syndrome. The problem is equivalent to finding the closest codeword to a received word and is central to the hardness of decoding random linear codes. + + The best known algorithms for general instances use information set decoding techniques, achieving $O(2^(0.0494 n))$ where $n$ is the block length. + + *Example.* Let $H$ be the #(n)$times$#(m) binary matrix and $s = (#s.map(v => if v { "1" } else { "0" }).join(", "))$. The optimal config $(#cfg.map(str).join(", "))$ has Hamming weight $#wt$. + + #pred-commands( + "pred create --example " + problem-spec(x) + " -o mwd.json", + "pred solve mwd.json", + "pred evaluate mwd.json --config " + x.optimal_config.map(str).join(","), + ) + ] + ] +} + +#{ + let x = load-example("MinimumWeightDecoding", "ILP") + let src = x.source + let tgt = x.target + [ + #reduction-rule("MinimumWeightDecoding", "ILP", + example: true, + example-caption: [Minimum Weight Decoding to ILP ($#src.instance.matrix.len()$ rows, $#src.instance.matrix.at(0).len()$ columns)], + )[ + The GF(2) constraint $H x equiv s (mod space 2)$ is linearized by introducing integer slack variables: for each row $i$, $sum_j H_(i j) x_j - 2 k_i = s_i$ where $k_i >= 0$ is an integer. Binary bounds $x_j <= 1$ are added, and the objective minimizes $sum x_j$. + ][ + _Construction._ Given $H in {0,1}^(n times m)$ and $s in {0,1}^n$, create an ILP with $m + n$ variables: $x_0, dots, x_(m-1)$ (binary) and $k_0, dots, k_(n-1)$ (non-negative integer). Add $n$ equality constraints $sum_j H_(i j) x_j - 2 k_i = s_i$ and $m$ binary bounds $x_j <= 1$. The objective is $min sum_(j=0)^(m-1) x_j$. + + _Correctness._ ($arrow.r.double$) If $x^*$ is feasible for the source, then $H x^* equiv s (mod space 2)$, so $sum_j H_(i j) x^*_j = s_i + 2 k_i$ for some $k_i >= 0$. Setting these $k_i$ values gives a feasible ILP solution with the same objective. ($arrow.l.double$) If $(x^*, k^*)$ is feasible for the ILP, then $sum_j H_(i j) x^*_j = s_i + 2 k^*_i$ implies $sum_j H_(i j) x^*_j equiv s_i (mod space 2)$ for all $i$, and $x^*_j in {0, 1}$ by the binary bounds. + + _Solution extraction._ Take the first $m$ variables as the source configuration. + ] + ] +} + #{ let x = load-model-example("MinimumWeightSolutionToLinearEquations") let A = x.instance.matrix @@ -8368,6 +8987,136 @@ A classical NP-complete problem from Garey and Johnson @garey1979[Ch.~3, p.~76], ] } +#{ + let x = load-model-example("MaximumLikelihoodRanking") + let A = x.instance.matrix + let n = A.len() + let config = x.optimal_config + let opt-val = metric-value(x.optimal_value) + // Compute disagreement pairs for the optimal solution + let disagreement-pairs = () + for a in range(n) { + for b in range(n) { + if a != b and config.at(a) > config.at(b) { + disagreement-pairs.push((a, b, A.at(a).at(b))) + } + } + } + let total-cost = disagreement-pairs.map(p => p.at(2)).sum() + [ + #problem-def("MaximumLikelihoodRanking")[ + Given an $n times n$ comparison matrix $A$ with $a_(i i) = 0$ for all $i$, find a permutation $pi$ of ${0, dots, n - 1}$ minimizing the _disagreement cost_ $sum_(i > j) a_(pi(i), pi(j))$, where $pi$ maps rank positions to items. + ][ + The Maximum Likelihood Ranking problem arises in voting theory and tournament aggregation: given pairwise comparison counts between $n$ alternatives, find a linear ordering that best explains the observed data @garey1979. The problem is equivalent to finding a minimum-weight linear extension of a tournament. When $a_(i j) + a_(j i) = c$ for all $i != j$, the problem is also known as the _Kemeny ranking_ or _minimum feedback arc set in tournaments_. The best known exact algorithm uses dynamic programming over subsets (Held--Karp style), running in $O(n^2 dot 2^n)$ time.#footnote[No algorithm with a significantly better exponential base than $2^n$ is known for the general Maximum Likelihood Ranking problem.] + + *Example.* Consider $n = #n$ items with comparison matrix $A$, where rows and columns index items $0, dots, #(n - 1)$. Ranking all items by identity ($pi(i) = i$) yields disagreement cost $#disagreement-pairs.map(p => str(p.at(2))).join(" + ") = #total-cost$, which is optimal. + + #pred-commands( + "pred create --example " + problem-spec(x) + " -o mlr.json", + "pred solve mlr.json --solver brute-force", + "pred evaluate mlr.json --config " + x.optimal_config.map(str).join(","), + ) + + #figure( + canvas(length: 0.7cm, { + import draw: * + let cell = 0.9 + let gap = 0.12 + for i in range(n) { + for j in range(n) { + let val = A.at(i).at(j) + let is-disagree = i != j and config.at(i) > config.at(j) + let f = if i == j { luma(230) } else if is-disagree { graph-colors.at(0).transparentize(40%) } else { white } + rect( + (j * cell, -i * cell), + (j * cell + cell - gap, -i * cell - cell + gap), + fill: f, + stroke: 0.3pt + luma(180), + ) + content( + (j * cell + (cell - gap) / 2, -i * cell - (cell - gap) / 2), + text(8pt, str(val)), + ) + } + } + // Column labels + for j in range(n) { + content( + (j * cell + (cell - gap) / 2, 0.35), + text(7pt)[$#j$], + ) + } + // Row labels + for i in range(n) { + content( + (-0.35, -i * cell - (cell - gap) / 2), + text(7pt)[$#i$], + ) + } + }), + caption: [Comparison matrix $A$ ($n = #n$). Highlighted cells are disagreement entries under the identity ranking.], + ) + ] + ] +} + +#{ + let x = load-model-example("OptimumCommunicationSpanningTree") + let n = x.instance.num_vertices + let W = x.instance.edge_weights + let R = x.instance.requirements + let config = x.optimal_config + let opt-val = metric-value(x.optimal_value) + // Build edge list and selected edges + let edges = () + let edge-idx = 0 + for i in range(n) { + for j in range(i + 1, n) { + edges.push((i, j, config.at(edge-idx))) + edge-idx += 1 + } + } + let tree-edges = edges.filter(e => e.at(2) == 1) + [ + #problem-def("OptimumCommunicationSpanningTree")[ + Given a complete graph $K_n$ with edge weights $w(e) >= 0$ and communication requirements $r(u, v) >= 0$ for each vertex pair, find a spanning tree $T$ minimizing the total communication cost $sum_(u < v) r(u, v) dot W_T (u, v)$, where $W_T (u, v)$ is the sum of edge weights on the unique path from $u$ to $v$ in $T$. + ][ + The Optimum Communication Spanning Tree problem (ND7 in @garey1979) models communication network design where edge weights represent link costs and requirements represent traffic demands between vertex pairs. NP-hard even when all requirements are equal (reducing to the Minimum Routing Cost Spanning Tree). Polynomial when all edge weights are equal, solved by the Gomory-Hu tree. The best known exact approach enumerates all $n^(n-2)$ labeled spanning trees. + + *Example.* Consider $K_#n$ with edge weight matrix $W$ and requirement matrix $R$. The optimal spanning tree uses edges ${#tree-edges.map(e => $(#e.at(0), #e.at(1))$).join(", ")}$ with total communication cost $#opt-val$. + + #pred-commands( + "pred create --example " + problem-spec(x) + " -o ocst.json", + "pred solve ocst.json --solver brute-force", + "pred evaluate ocst.json --config " + x.optimal_config.map(str).join(","), + ) + ] + ] +} + +#{ + let x = load-model-example("SquareTiling") + let nc = x.instance.num_colors + let tiles = x.instance.tiles + let n = x.instance.grid_size + let config = x.optimal_config + [ + #problem-def("SquareTiling")[ + Given a set $C$ of colors, a collection $T subset.eq C^4$ of tile types (where $angle.l a, b, c, d angle.r$ denotes a tile whose top, right, bottom, and left sides are colored $a, b, c, d$ respectively), and a positive integer $N$, determine whether there exists an assignment of a tile $f(i,j) in T$ to each grid cell $(i,j)$, $0 <= i,j < N$, such that (1) if $f(i,j) = angle.l a,b,c,d angle.r$ and $f(i+1,j) = angle.l a',b',c',d' angle.r$ then $c = a'$ (bottom of upper tile matches top of lower tile), and (2) if $f(i,j) = angle.l a,b,c,d angle.r$ and $f(i,j+1) = angle.l a',b',c',d' angle.r$ then $b = d'$ (right of left tile matches left of right tile). Tiles may be reused but not rotated or reflected. + ][ + Square Tiling (also known as Bounded Wang Tiling) is problem GP13 in Garey and Johnson @garey1979. It was shown NP-complete via transformation from Directed Hamiltonian Path. The infinite variant (tiling the entire plane) is famously undecidable (Berger, 1966). The best known exact approach enumerates all $|T|^(N^2)$ assignments. + + *Example.* Consider $|C| = #nc$ colors, $|T| = #tiles.len()$ tiles, and grid size $N = #n$. The tiles are #tiles.enumerate().map(((i, t)) => [$t_#i = angle.l #t.at(0), #t.at(1), #t.at(2), #t.at(3) angle.r$]).join(", "). The witness assignment $(#config.map(str).join(", "))$ places $t_#config.at(0), t_#config.at(1)$ in row 0 and $t_#config.at(2), t_#config.at(3)$ in row 1, satisfying all edge-color constraints. + + #pred-commands( + "pred create --example SquareTiling -o square_tiling.json", + "pred solve square_tiling.json", + "pred evaluate square_tiling.json --config " + config.map(str).join(","), + ) + ] + ] +} + // Completeness check: warn about problem types in JSON but missing from paper #{ let json-models = { @@ -11938,6 +12687,112 @@ The following reductions to Integer Linear Programming are straightforward formu _Solution extraction._ For each vertex $v$, output its unique parent $u$ with $p_(v,u) = 1$. ] +#reduction-rule("MinimumEdgeCostFlow", "ILP")[ + Introduce integer flow variables and binary arc-activation indicators, link them so that an indicator is forced to 1 whenever the corresponding arc carries positive flow, and minimize the total price of activated arcs. +][ + _Construction._ Let $m = |A|$ and $n = |V|$. Use `ILP` with $2m$ variables: integer flow variables $f_a in {0, dots, c(a)}$ for $a in {0, dots, m - 1}$ and binary activation indicators $y_a in {0, 1}$ for $a in {m, dots, 2m - 1}$. + + Constraints: + - _Linking:_ $f_a - c(a) dot y_a <= 0$ for each arc $a$ — forces $y_a = 1$ when $f_a > 0$ ($m$ constraints). + - _Binary bound:_ $y_a <= 1$ for each arc $a$ ($m$ constraints). + - _Flow conservation:_ $sum_(a "entering" v) f_a - sum_(a "leaving" v) f_a = 0$ for each non-terminal vertex $v in V backslash {s, t}$ ($n - 2$ constraints). + - _Flow requirement:_ $sum_(a "entering" t) f_a - sum_(a "leaving" t) f_a >= R$ (1 constraint). + + Objective: $min sum_(a=0)^(m-1) p(a) dot y_a$. + + Total: $2m + n - 1$ constraints, $2m$ variables. + + _Correctness._ ($arrow.r.double$) Any feasible flow of value $>= R$ determines the flow variables directly, and the linking constraints force $y_a = 1$ for every arc with positive flow, so the ILP objective equals the edge cost. ($arrow.l.double$) Any feasible ILP solution has $f_a <= c(a) y_a <= c(a)$ and satisfies conservation and the flow requirement. The objective $sum p(a) y_a$ is at least the edge cost of the flow because $y_a >= 1$ whenever $f_a > 0$. + + _Solution extraction._ Output the first $m$ variables $(f_0, dots, f_(m-1))$ as the flow assignment. +] + +#{ + let mlr_ilp = load-example("MaximumLikelihoodRanking", "ILP") + let mlr_ilp_sol = mlr_ilp.solutions.at(0) + let mlr_n = mlr_ilp.source.instance.matrix.len() + let mlr_nv = mlr_ilp.target.instance.num_vars + let mlr_nc = mlr_ilp.target.instance.constraints.len() + [ + #reduction-rule("MaximumLikelihoodRanking", "ILP", + example: true, + example-caption: [$n = #mlr_n$ items, $#mlr_nv$ pairwise variables, $#mlr_nc$ transitivity constraints], + extra: [ + #pred-commands( + "pred create --example MaximumLikelihoodRanking -o mlr.json", + "pred reduce mlr.json --to " + target-spec(mlr_ilp) + " -o bundle.json", + "pred solve bundle.json", + "pred evaluate mlr.json --config " + mlr_ilp_sol.source_config.map(str).join(","), + ) + *Step 1 -- Source instance.* A ranking instance with $n = #mlr_n$ items and comparison matrix $A$. + + *Step 2 -- Build the ILP.* Introduce $binom(#str(mlr_n), 2) = #mlr_nv$ binary variables $x_(i j)$ for each pair $i < j$. Add $#mlr_nc$ transitivity constraints from all $binom(#str(mlr_n), 3)$ triples. The ILP has #mlr_nv variables and #mlr_nc constraints. + + *Step 3 -- Verify.* The ILP optimum extracts to ranking $(#mlr_ilp_sol.source_config.map(str).join(", "))$, which matches the source optimum #sym.checkmark. + ], + )[ + Each pair of items $(i, j)$ with $i < j$ gets a binary variable $x_(i j)$ indicating whether $i$ is ranked before $j$. Transitivity constraints enforce a valid linear order, and the objective minimizes the total disagreement cost. + ][ + _Construction._ Given $n$ items and comparison matrix $A$, introduce $n(n-1)/2$ binary variables $x_(i j)$ for $i < j$. For each triple ${a, b, c}$ with $a < b < c$, add two transitivity constraints: + $ + x_(a b) + x_(b c) - x_(a c) &<= 1 \ + -x_(a b) - x_(b c) + x_(a c) &<= 0 + $ + The objective is: + $ + min sum_(i < j) (a_(j i) - a_(i j)) dot x_(i j) + $ + This yields $n(n-1)/2$ variables and $n(n-1)(n-2)/3$ constraints. + + _Correctness._ ($arrow.r.double$) Any permutation $pi$ defines a consistent tournament: set $x_(i j) = 1$ iff $i$ appears before $j$ in $pi$. The transitivity constraints are satisfied because a linear order has no directed cycles. The ILP objective equals the disagreement cost of $pi$. ($arrow.l.double$) Any feasible binary solution defines a transitive tournament (an acyclic tournament), which corresponds to a unique linear order. The objective equals the disagreement cost of that order. + + _Solution extraction._ For each item $i$, count the number of items ranked before it: $"rank"(i) = sum_(j < i) x_(j i) + sum_(j > i) (1 - x_(i j))$. The resulting rank vector is the permutation. + ] + ] +} + +#{ + let ocst_ilp = load-example("OptimumCommunicationSpanningTree", "ILP") + let ocst_ilp_sol = ocst_ilp.solutions.at(0) + let ocst_n = ocst_ilp.source.instance.num_vertices + let ocst_nv = ocst_ilp.target.instance.num_vars + let ocst_nc = ocst_ilp.target.instance.constraints.len() + [ + #reduction-rule("OptimumCommunicationSpanningTree", "ILP", + example: true, + example-caption: [$K_#ocst_n$, #ocst_nv variables, #ocst_nc constraints], + extra: [ + #pred-commands( + "pred create --example OptimumCommunicationSpanningTree -o ocst.json", + "pred reduce ocst.json --to " + target-spec(ocst_ilp) + " -o bundle.json", + "pred solve bundle.json", + "pred evaluate ocst.json --config " + ocst_ilp_sol.source_config.map(str).join(","), + ) + *Step 1 -- Source instance.* $K_#ocst_n$ with edge weight and requirement matrices. + + *Step 2 -- Build the ILP.* Introduce edge selectors and multi-commodity flow variables. The ILP has #ocst_nv variables and #ocst_nc constraints. + + *Step 3 -- Verify.* The ILP optimum extracts to edge selection $(#ocst_ilp_sol.source_config.map(str).join(", "))$, which matches the source optimum #sym.checkmark. + ], + )[ + Binary edge selectors determine the spanning tree. Multi-commodity flow variables route one unit of flow for each vertex pair with positive requirement, and the objective minimizes the total weighted communication cost. + ][ + _Construction._ Given $K_n$ with weights $w(e)$ and requirements $r(u, v)$, let $m = n(n-1)/2$. Introduce binary edge variables $x_e$ for each edge. For each pair $(s, t)$ with $r(s, t) > 0$, add directed flow variables $f^(s t)_(e, "fwd")$ and $f^(s t)_(e, "bwd")$ for each edge $e$. + + Constraints: + - Tree size: $sum_e x_e = n - 1$ + - Flow conservation: for each commodity $(s, t)$ and each vertex $v$, net inflow equals $-1$ at source $s$, $+1$ at sink $t$, and $0$ elsewhere. + - Capacity linking: $f^(s t)_(e, "dir") <= x_e$ for each commodity and direction. + + Objective: $min sum_((s,t): r > 0) r(s,t) dot sum_e w(e) dot (f^(s t)_(e, "fwd") + f^(s t)_(e, "bwd")))$ + + _Correctness._ ($arrow.r.double$) Any spanning tree defines edge selectors and unique path flows. The objective equals the communication cost. ($arrow.l.double$) Any feasible ILP solution with $n - 1$ selected edges and valid flows corresponds to a connected spanning subgraph (tree), and the flow cost equals the path-weighted communication cost. + + _Solution extraction._ Output the first $m$ variables $(x_0, dots, x_(m-1))$ as the edge selection. + ] + ] +} + == Unit Disk Mapping #reduction-rule("MaximumIndependentSet", "KingsSubgraph")[ @@ -12100,5 +12955,86 @@ The following table shows concrete variable overhead for example instances, take (name: example-name(entry.source, entry.target), data: d) }) +// === Missing problem-defs and ILP reduction-rules === + +#problem-def("MaximumDomaticNumber")[ + Given an undirected graph $G = (V, E)$, find the maximum $k$ such that $V$ can be partitioned into $k$ disjoint dominating sets $V_1, dots, V_k$ where each $V_i$ dominates all of $V$. +][ + Maximum Domatic Number (GT3) @garey1979. NP-complete for any fixed $k >= 3$ (Garey, Johnson, Tarjan 1976). Polynomial for $k = 2$. + + The best known exact algorithm runs in $O^*(2.695^n)$ (Riege, Rothe, Spakowski, Yamamoto 2007). +] + +#reduction-rule("MaximumDomaticNumber", "ILP")[ + Binary assignment variables $x_(v,i) in {0,1}$ for each vertex $v$ and set index $i in {1, dots, n}$, plus binary usage indicators $y_i$. Partition constraints, domination constraints, and linking constraints enforce a valid domatic partition. Maximize $sum_i y_i$. +][ + _Construction._ Introduce $n^2 + n$ binary variables. For each vertex $v$, $sum_i x_(v,i) = 1$ (partition). For each $v$ and $i$, $x_(v,i) + sum_(u in N(v)) x_(u,i) >= y_i$ (domination). For each $i$, $y_i <= sum_v x_(v,i)$ (linking). Maximize $sum_i y_i$. + + _Correctness._ ($arrow.r.double$) Any valid domatic partition gives a feasible ILP assignment with the same number of non-empty sets. ($arrow.l.double$) Any feasible ILP solution encodes a valid domatic partition. + + _Solution extraction._ For each vertex $v$, find $i$ with $x_(v,i) = 1$; set $"config"[v] = i$. +] + +#problem-def("MinimumMetricDimension")[ + Given an undirected graph $G = (V, E)$, find a minimum-size resolving set $V' subset.eq V$ such that for every pair of distinct vertices $u, v in V$, there exists $w in V'$ with $d(u, w) != d(v, w)$. +][ + Minimum Metric Dimension (GT61) @garey1979. NP-complete via reduction from 3-Dimensional Matching. Polynomial for trees. + + The best known exact algorithm is brute-force $O^*(2^n)$. +] + +#reduction-rule("MinimumMetricDimension", "ILP")[ + Binary variables $z_v in {0,1}$ for each vertex. Precompute all-pairs shortest-path distances. For each pair $(u, v)$ with $u < v$, add constraint $sum_(w: d(u,w) != d(v,w)) z_w >= 1$. Minimize $sum_v z_v$. +][ + _Construction._ $n$ binary variables and $n(n-1)/2$ pair-distinguishing constraints from BFS distances. + + _Correctness._ ($arrow.r.double$) A resolving set satisfies all pair constraints. ($arrow.l.double$) Any feasible solution is a resolving set since every pair is distinguished. + + _Solution extraction._ $z_v = 1 arrow.r "config"[v] = 1$. +] + +#problem-def("MinimumGraphBandwidth")[ + Given an undirected graph $G = (V, E)$, find a one-to-one mapping $f: V -> {0, 1, dots, |V|-1}$ that minimizes $max_({u,v} in E) |f(u) - f(v)|$. +][ + Graph Bandwidth (GT40) @garey1979. NP-complete even for trees with maximum degree 3. The brute-force bound is $O^*(n!)$ over all permutations. +] + +#reduction-rule("MinimumGraphBandwidth", "ILP")[ + Assignment variables $x_(v,p) in {0,1}$ for vertex $v$ and position $p$, integer position variables, and bandwidth variable $B$. Bijection constraints enforce a valid permutation; edge-stretch constraints enforce $|"pos"(u) - "pos"(v)| <= B$. Minimize $B$. +][ + _Construction._ $n^2 + n + 1$ variables (assignment + position + bandwidth). Bijection: $sum_p x_(v,p) = 1$ and $sum_v x_(v,p) = 1$. Position linking and edge-stretch constraints bound the bandwidth. + + _Correctness._ ($arrow.r.double$) Any valid permutation with bandwidth $B$ gives a feasible ILP with objective $B$. ($arrow.l.double$) Any feasible ILP solution encodes a valid permutation with bandwidth $<= B$. + + _Solution extraction._ For each vertex $v$, find $p$ with $x_(v,p) = 1$; set $"config"[v] = p$. +] + +#problem-def("MinimumCapacitatedSpanningTree")[ + Given a weighted graph $G = (V, E)$ with root $v_0$, vertex requirements $r(v)$, and edge capacity $c$, find a spanning tree $T$ rooted at $v_0$ minimizing $sum_(e in T) w(e)$ subject to: for each edge $e$ in $T$, the sum of requirements in the subtree on the non-root side is at most $c$. +][ + Minimum Capacitated Spanning Tree (ND5) @garey1979. NP-hard in the strong sense, even with unit requirements and capacity 3. +] + +#reduction-rule("MinimumCapacitatedSpanningTree", "ILP")[ + Binary edge selectors $y_e$ plus directed requirement-flow variables. Flow conservation routes each vertex's requirement to the root; capacity constraints bound flow per edge. +][ + _Construction._ $3m$ variables: $m$ edge selectors + $2m$ directed flow. Tree cardinality, binary bounds, flow conservation (each non-root vertex generates $r(v)$ flow toward root), flow-edge linking, and per-edge capacity bounds ($<= c$). Minimize $sum_e w(e) dot y_e$. + + _Correctness._ ($arrow.r.double$) A feasible capacitated spanning tree induces a valid flow. ($arrow.l.double$) A feasible ILP solution encodes a spanning tree satisfying capacity constraints. + + _Solution extraction._ First $m$ variables (edge selectors) give the source configuration. +] + +// MinimumMatrixCover → ILP (problem-def already exists above, just need reduction-rule) +#reduction-rule("MinimumMatrixCover", "ILP")[ + McCormick linearization of the quadratic form: $n$ sign variables $x_i$ plus $n(n-1)/2$ auxiliary variables $y_(i j)$ linearizing products $x_i x_j$. Three McCormick constraints per pair. Minimize the linearized objective. +][ + _Construction._ $n + n(n-1)/2$ binary variables and $3 dot n(n-1)/2$ constraints. For each pair $i < j$: $y_(i j) <= x_i$, $y_(i j) <= x_j$, $y_(i j) >= x_i + x_j - 1$. The objective encodes $sum_(i,j) a_(i j) dot f(i) dot f(j)$ via $f(i) = 2x_i - 1$. + + _Correctness._ ($arrow.r.double$) Any sign assignment $f$ maps to $x_i = (f(i)+1)/2$ with matching quadratic form value. ($arrow.l.double$) McCormick ensures $y_(i j) = x_i x_j$ at optimality. + + _Solution extraction._ First $n$ variables (sign variables) give the source configuration. +] + #pagebreak() #bibliography("references.bib", style: "ieee") diff --git a/docs/paper/references.bib b/docs/paper/references.bib index aa3c5f81..1fe47dd0 100644 --- a/docs/paper/references.bib +++ b/docs/paper/references.bib @@ -10,6 +10,27 @@ @inproceedings{bouchez2006 doi = {10.1007/978-3-540-72521-3_21} } +@inproceedings{ahoJohnsonUllman1977, + author = {Alfred V. Aho and Steven C. Johnson and Jeffrey D. Ullman}, + title = {Code Generation for Machines with Multiregister Operations}, + booktitle = {Proceedings of the 4th ACM SIGACT-SIGPLAN Symposium on Principles of Programming Languages}, + pages = {21--28}, + year = {1977}, + publisher = {ACM}, + doi = {10.1145/512950.512953} +} + +@article{brunoSethi1976, + author = {John Bruno and Ravi Sethi}, + title = {Code Generation for a One-Register Machine}, + journal = {Journal of the ACM}, + volume = {23}, + number = {3}, + pages = {502--510}, + year = {1976}, + doi = {10.1145/321958.321974} +} + @article{sethi1975, author = {Ravi Sethi}, title = {Complete Register Allocation Problems}, @@ -1615,3 +1636,30 @@ @article{yannakakis1980 year = {1980}, doi = {10.1137/0138030} } + +@inproceedings{edmonds1970, + author = {Jack Edmonds}, + title = {Submodular functions, matroids, and certain polyhedra}, + booktitle = {Combinatorial Structures and Their Applications}, + pages = {69--87}, + year = {1970}, + publisher = {Gordon and Breach} +} + +@article{doron2024, + author = {Ilan Doron-Arad and Ariel Kulik and Hadas Shachnai}, + title = {You (Almost) Can't Beat Brute Force for 3-Matroid Intersection}, + journal = {arXiv preprint arXiv:2412.02217}, + year = {2024} +} + +@article{fomin2019, + author = {Fedor V. Fomin and Daniel Lokshtanov and Fahad Panolan and Saket Saurabh}, + title = {Exact Algorithms via Monotone Local Search}, + journal = {Journal of the ACM}, + volume = {66}, + number = {2}, + pages = {1--23}, + year = {2019}, + doi = {10.1145/3277568} +} diff --git a/problemreductions-cli/src/cli.rs b/problemreductions-cli/src/cli.rs index 14bbfdef..d36a5e61 100644 --- a/problemreductions-cli/src/cli.rs +++ b/problemreductions-cli/src/cli.rs @@ -227,13 +227,16 @@ Flags by problem type: SpinGlass --graph, --couplings, --fields KColoring --graph, --k KClique --graph, --k + VertexCover (VC) --graph, --k MinimumMultiwayCut --graph, --terminals, --edge-weights MonochromaticTriangle --graph PartitionIntoTriangles --graph GeneralizedHex --graph, --source, --sink IntegralFlowWithMultipliers --arcs, --capacities, --source, --sink, --multipliers, --requirement + MinimumEdgeCostFlow --arcs, --edge-weights (prices), --capacities, --source, --sink, --requirement MinimumCutIntoBoundedSets --graph, --edge-weights, --source, --sink, --size-bound HamiltonianCircuit, HC --graph + MaximumLeafSpanningTree --graph LongestCircuit --graph, --edge-weights BoundedComponentSpanningForest --graph, --weights, --k, --bound UndirectedFlowLowerBounds --graph, --capacities, --lower-bounds, --source, --sink, --requirement @@ -247,10 +250,12 @@ Flags by problem type: PathConstrainedNetworkFlow --arcs, --capacities, --source, --sink, --paths, --requirement Factoring --target, --m, --n BinPacking --sizes, --capacity + Clustering --distance-matrix, --k, --diameter-bound CapacityAssignment --capacities, --cost-matrix, --delay-matrix, --cost-budget, --delay-budget ProductionPlanning --num-periods, --demands, --capacities, --setup-costs, --production-costs, --inventory-costs, --cost-bound SubsetProduct --sizes, --target SubsetSum --sizes, --target + MinimumAxiomSet --n, --true-sentences, --implications Numerical3DimensionalMatching --w-sizes, --x-sizes, --y-sizes, --bound Betweenness --n, --sets (triples a,b,c) CyclicOrdering --n, --sets (triples a,b,c) @@ -270,6 +275,7 @@ Flags by problem type: ComparativeContainment --universe, --r-sets, --s-sets [--r-weights] [--s-weights] X3C (ExactCoverBy3Sets) --universe, --sets (3 elements each) 3DM (ThreeDimensionalMatching) --universe, --sets (triples w,x,y) + ThreeMatroidIntersection --universe, --partitions, --bound SetBasis --universe, --sets, --k MinimumCardinalityKey --num-attributes, --dependencies PrimeAttributeName --universe, --deps, --query @@ -284,6 +290,9 @@ Flags by problem type: ConsecutiveOnesMatrixAugmentation --matrix (0/1), --bound ConsecutiveOnesSubmatrix --matrix (0/1), --k SparseMatrixCompression --matrix (0/1), --bound + MaximumLikelihoodRanking --matrix (i32 rows, semicolon-separated) + MinimumMatrixCover --matrix (i64 rows, semicolon-separated) + MinimumWeightDecoding --matrix (JSON 2D bool), --rhs (comma-separated booleans) FeasibleBasisExtension --matrix (JSON 2D i64), --rhs, --required-columns SteinerTree --graph, --edge-weights, --terminals MultipleCopyFileAllocation --graph, --usage, --storage @@ -333,10 +342,19 @@ Flags by problem type: D2CIF --arcs, --capacities, --source-1, --sink-1, --source-2, --sink-2, --requirement-1, --requirement-2 MinimumDummyActivitiesPert --arcs [--num-vertices] FeasibleRegisterAssignment --arcs, --assignment, --k [--num-vertices] + MinimumFaultDetectionTestSet --arcs, --inputs, --outputs [--num-vertices] + MinimumWeightAndOrGraph --arcs, --source, --gate-types, --weights [--num-vertices] + MinimumCodeGenerationOneRegister --arcs [--num-vertices] + MinimumCodeGenerationParallelAssignments --num-variables, --assignments + MinimumCodeGenerationUnlimitedRegisters --left-arcs, --right-arcs [--num-vertices] + MinimumRegisterSufficiencyForLoops --loop-length, --loop-variables RegisterSufficiency --arcs, --bound [--num-vertices] CBQ --domain-size, --relations, --conjuncts-spec IntegerExpressionMembership --expression (JSON), --target MinimumGeometricConnectedDominatingSet --positions (float x,y pairs), --radius + MinimumDecisionTree --test-matrix (JSON 2D bool), --num-objects, --num-tests + MinimumDisjunctiveNormalForm (MinDNF) --num-vars, --truth-table + SquareTiling (WangTiling) --num-colors, --tiles, --grid-size ILP, CircuitSAT (via reduction only) Geometry graph variants (use slash notation, e.g., MIS/KingsSubgraph): @@ -547,6 +565,9 @@ pub struct CreateArgs { /// Partition groups for arc-index partitions (semicolon-separated, e.g., "0,1;2,3") #[arg(long)] pub partition: Option, + /// Three partition matroids for ThreeMatroidIntersection (pipe-separated matroids, semicolon-separated groups, e.g., "0,1,2;3,4,5|0,3;1,4;2,5|0,4;1,5;2,3") + #[arg(long)] + pub partitions: Option, /// Arc bundles for IntegralFlowBundles (semicolon-separated groups of arc indices, e.g., "0,1;2,5;3,4") #[arg(long)] pub bundles: Option, @@ -631,6 +652,12 @@ pub struct CreateArgs { /// Directed arcs for directed graph problems (e.g., 0>1,1>2,2>0) #[arg(long)] pub arcs: Option, + /// Left operand arcs for MinimumCodeGenerationUnlimitedRegisters (e.g., 1>3,2>3,0>1) + #[arg(long)] + pub left_arcs: Option, + /// Right operand arcs for MinimumCodeGenerationUnlimitedRegisters (e.g., 1>4,2>4,0>2) + #[arg(long)] + pub right_arcs: Option, /// Arc-index equality constraints for IntegralFlowHomologousArcs (semicolon-separated, e.g., "2=5;4=3") #[arg(long)] pub homologous_pairs: Option, @@ -824,6 +851,51 @@ pub struct CreateArgs { /// Output arcs (transition-to-place) for NonLivenessFreePetriNet (e.g., "0>1,1>2,2>3") #[arg(long)] pub output_arcs: Option, + /// Gate types for MinimumWeightAndOrGraph (comma-separated: AND, OR, or L for leaf, e.g., "AND,OR,OR,L,L,L,L") + #[arg(long)] + pub gate_types: Option, + /// Input vertex indices (comma-separated, e.g., "0,1") + #[arg(long)] + pub inputs: Option, + /// Output vertex indices (comma-separated, e.g., "5,6") + #[arg(long)] + pub outputs: Option, + /// True sentence indices for MinimumAxiomSet (comma-separated, e.g., "0,1,2,3,4,5,6,7") + #[arg(long)] + pub true_sentences: Option, + /// Implications for MinimumAxiomSet (semicolon-separated "antecedents>consequent", e.g., "0>2;0>3;1>4;2,4>6") + #[arg(long)] + pub implications: Option, + /// Loop length N for MinimumRegisterSufficiencyForLoops + #[arg(long)] + pub loop_length: Option, + /// Variables as semicolon-separated start,duration pairs for MinimumRegisterSufficiencyForLoops (e.g., "0,3;2,3;4,3") + #[arg(long)] + pub loop_variables: Option, + /// Parallel assignments for MinimumCodeGenerationParallelAssignments (semicolon-separated "target:read1,read2" entries, e.g., "0:1,2;1:0;2:3;3:1,2") + #[arg(long)] + pub assignments: Option, + /// Number of variables for MinimumCodeGenerationParallelAssignments + #[arg(long)] + pub num_variables: Option, + /// Truth table for MinimumDisjunctiveNormalForm (comma-separated 0/1, e.g., "0,1,1,1,1,1,1,0") + #[arg(long)] + pub truth_table: Option, + /// Test matrix for MinimumDecisionTree (JSON 2D bool array, e.g., '[[true,true,false],[true,false,false]]') + #[arg(long)] + pub test_matrix: Option, + /// Number of tests for MinimumDecisionTree + #[arg(long)] + pub num_tests: Option, + /// Tiles for SquareTiling (semicolon-separated top,right,bottom,left tuples, e.g., "0,1,2,0;0,0,2,1;2,1,0,0;2,0,0,1") + #[arg(long)] + pub tiles: Option, + /// Grid size N for SquareTiling (N x N grid) + #[arg(long)] + pub grid_size: Option, + /// Number of colors for SquareTiling + #[arg(long)] + pub num_colors: Option, } #[derive(clap::Args)] diff --git a/problemreductions-cli/src/commands/create.rs b/problemreductions-cli/src/commands/create.rs index 822479f1..1bfd40ca 100644 --- a/problemreductions-cli/src/commands/create.rs +++ b/problemreductions-cli/src/commands/create.rs @@ -10,8 +10,9 @@ use problemreductions::export::{ModelExample, ProblemRef, ProblemSide, RuleExamp use problemreductions::models::algebraic::{ AlgebraicEquationsOverGF2, ClosestVectorProblem, ConsecutiveBlockMinimization, ConsecutiveOnesMatrixAugmentation, ConsecutiveOnesSubmatrix, FeasibleBasisExtension, - MinimumMatrixDomination, MinimumWeightSolutionToLinearEquations, QuadraticCongruences, - QuadraticDiophantineEquations, SimultaneousIncongruences, SparseMatrixCompression, BMF, + MinimumMatrixCover, MinimumMatrixDomination, MinimumWeightDecoding, + MinimumWeightSolutionToLinearEquations, QuadraticCongruences, QuadraticDiophantineEquations, + SimultaneousIncongruences, SparseMatrixCompression, BMF, }; use problemreductions::models::formula::Quantifier; use problemreductions::models::graph::{ @@ -21,15 +22,20 @@ use problemreductions::models::graph::{ MinimumDummyActivitiesPert, MinimumGeometricConnectedDominatingSet, MinimumMaximalMatching, MinimumMultiwayCut, MixedChinesePostman, MultipleChoiceBranching, PathConstrainedNetworkFlow, RootedTreeArrangement, SteinerTree, SteinerTreeInGraphs, StrongConnectivityAugmentation, + VertexCover, }; use problemreductions::models::misc::{ AdditionalKey, Betweenness, BinPacking, BoyceCoddNormalFormViolation, CapacityAssignment, - CbqRelation, ConjunctiveBooleanQuery, ConsistencyOfDatabaseFrequencyTables, CyclicOrdering, - DynamicStorageAllocation, EnsembleComputation, ExpectedRetrievalCost, + CbqRelation, Clustering, ConjunctiveBooleanQuery, ConsistencyOfDatabaseFrequencyTables, + CyclicOrdering, DynamicStorageAllocation, EnsembleComputation, ExpectedRetrievalCost, FeasibleRegisterAssignment, FlowShopScheduling, FrequencyTable, GroupingBySwapping, IntExpr, IntegerExpressionMembership, JobShopScheduling, KnownValue, KthLargestMTuple, - LongestCommonSubsequence, MinimumExternalMacroDataCompression, - MinimumInternalMacroDataCompression, MinimumTardinessSequencing, MultiprocessorScheduling, + LongestCommonSubsequence, MaximumLikelihoodRanking, MinimumAxiomSet, + MinimumCodeGenerationOneRegister, MinimumCodeGenerationParallelAssignments, + MinimumCodeGenerationUnlimitedRegisters, MinimumDecisionTree, MinimumDisjunctiveNormalForm, + MinimumExternalMacroDataCompression, MinimumFaultDetectionTestSet, + MinimumInternalMacroDataCompression, MinimumRegisterSufficiencyForLoops, + MinimumTardinessSequencing, MinimumWeightAndOrGraph, MultiprocessorScheduling, NonLivenessFreePetriNet, Numerical3DimensionalMatching, OpenShopScheduling, PaintShop, PartiallyOrderedKnapsack, PreemptiveScheduling, ProductionPlanning, QueryArg, RectilinearPictureCompression, RegisterSufficiency, ResourceConstrainedScheduling, @@ -37,7 +43,7 @@ use problemreductions::models::misc::{ SequencingToMinimizeMaximumCumulativeCost, SequencingToMinimizeTardyTaskWeight, SequencingToMinimizeWeightedCompletionTime, SequencingToMinimizeWeightedTardiness, SequencingWithDeadlinesAndSetUpTimes, SequencingWithReleaseTimesAndDeadlines, - SequencingWithinIntervals, ShortestCommonSupersequence, StringToStringCorrection, + SequencingWithinIntervals, ShortestCommonSupersequence, SquareTiling, StringToStringCorrection, SubsetProduct, SubsetSum, SumOfSquaresPartition, ThreePartition, TimetableDesign, }; use problemreductions::models::BiconnectivityAugmentation; @@ -114,6 +120,7 @@ fn all_data_flags_empty(args: &CreateArgs) -> bool { && args.r_weights.is_none() && args.s_weights.is_none() && args.partition.is_none() + && args.partitions.is_none() && args.bundles.is_none() && args.universe.is_none() && args.biedges.is_none() @@ -143,6 +150,8 @@ fn all_data_flags_empty(args: &CreateArgs) -> bool { && args.costs.is_none() && args.arc_costs.is_none() && args.arcs.is_none() + && args.left_arcs.is_none() + && args.right_arcs.is_none() && args.homologous_pairs.is_none() && args.quantifiers.is_none() && args.usage.is_none() @@ -221,6 +230,21 @@ fn all_data_flags_empty(args: &CreateArgs) -> bool { && args.assignment.is_none() && args.initial_marking.is_none() && args.output_arcs.is_none() + && args.gate_types.is_none() + && args.inputs.is_none() + && args.outputs.is_none() + && args.true_sentences.is_none() + && args.implications.is_none() + && args.loop_length.is_none() + && args.loop_variables.is_none() + && args.assignments.is_none() + && args.num_variables.is_none() + && args.truth_table.is_none() + && args.test_matrix.is_none() + && args.num_tests.is_none() + && args.tiles.is_none() + && args.grid_size.is_none() + && args.num_colors.is_none() } fn emit_problem_output(output: &ProblemJsonOutput, out: &OutputConfig) -> Result<()> { @@ -615,6 +639,7 @@ fn example_for(canonical: &str, graph_type: Option<&str>) -> &'static str { _ => "--graph 0-1,1-2,2-3 --weights 1,1,1,1", }, "KClique" => "--graph 0-1,0-2,1-3,2-3,2-4,3-4 --k 3", + "VertexCover" => "--graph 0-1,1-2,0-2,2-3 --k 2", "GeneralizedHex" => "--graph 0-1,0-2,0-3,1-4,2-4,3-4,4-5 --source 0 --sink 5", "IntegralFlowBundles" => { "--arcs \"0>1,0>2,1>3,2>3,1>2,2>1\" --bundles \"0,1;2,5;3,4\" --bundle-capacities 1,1,1 --source 0 --sink 3 --requirement 1 --num-vertices 4" @@ -680,6 +705,7 @@ fn example_for(canonical: &str, graph_type: Option<&str>) -> &'static str { "--num-vars 3 --clauses \"1,2;-1,3\" --quantifiers \"E,A,E\"" } "KSatisfiability" => "--num-vars 3 --clauses \"1,2,3;-1,2,-3\" --k 3", + "Maximum2Satisfiability" => "--num-vars 4 --clauses \"1,2;1,-2;-1,3;-1,-3;2,4;-3,-4;3,4\"", "NonTautology" => { "--num-vars 3 --clauses \"1,2,3;-1,-2,-3\"" } @@ -694,6 +720,7 @@ fn example_for(canonical: &str, graph_type: Option<&str>) -> &'static str { "SpinGlass" => "--graph 0-1,1-2 --couplings 1,1", "KColoring" => "--graph 0-1,1-2,2-0 --k 3", "HamiltonianCircuit" => "--graph 0-1,1-2,2-3,3-0", + "MaximumLeafSpanningTree" => "--graph 0-1,0-2,0-3,1-4,2-4,2-5,3-5,4-5,1-3", "EnsembleComputation" => "--universe 4 --sets \"0,1,2;0,1,3\"", "RootedTreeStorageAssignment" => "--universe 5 --sets \"0,2;1,3;0,4;2,4\" --bound 1", "MinMaxMulticenter" => { @@ -706,6 +733,7 @@ fn example_for(canonical: &str, graph_type: Option<&str>) -> &'static str { "--left 4 --right 4 --biedges 0-0,0-1,0-2,1-0,1-1,1-2,2-0,2-1,2-2,3-0,3-1,3-3 --k 3" } "MaximumAchromaticNumber" => "--graph 0-1,1-2,2-3,3-4,4-5,5-0", + "MaximumDomaticNumber" => "--graph 0-1,1-2,0-2", "MinimumCoveringByCliques" => "--graph 0-1,1-2,0-2,2-3", "MinimumIntersectionGraphBasis" => "--graph 0-1,1-2", "MinimumMaximalMatching" => "--graph 0-1,1-2,2-3,3-4,4-5", @@ -753,6 +781,9 @@ fn example_for(canonical: &str, graph_type: Option<&str>) -> &'static str { "DirectedTwoCommodityIntegralFlow" => { "--arcs \"0>2,0>3,1>2,1>3,2>4,2>5,3>4,3>5\" --capacities 1,1,1,1,1,1,1,1 --source-1 0 --sink-1 4 --source-2 1 --sink-2 5 --requirement-1 1 --requirement-2 1" } + "MinimumEdgeCostFlow" => { + "--arcs \"0>1,0>2,0>3,1>4,2>4,3>4\" --edge-weights 3,1,2,0,0,0 --capacities 2,2,2,2,2,2 --source 0 --sink 4 --requirement 3" + } "MinimumFeedbackArcSet" => "--arcs \"0>1,1>2,2>0\"", "DirectedHamiltonianPath" => { "--arcs \"0>1,0>3,1>3,1>4,2>0,2>4,3>2,3>5,4>5,5>1\" --num-vertices 6" @@ -765,6 +796,15 @@ fn example_for(canonical: &str, graph_type: Option<&str>) -> &'static str { "FeasibleRegisterAssignment" => { "--arcs \"0>1,0>2,1>3\" --assignment 0,1,0,0 --k 2 --num-vertices 4" } + "MinimumFaultDetectionTestSet" => { + "--arcs \"0>2,0>3,1>3,1>4,2>5,3>5,3>6,4>6\" --inputs 0,1 --outputs 5,6 --num-vertices 7" + } + "MinimumWeightAndOrGraph" => { + "--arcs \"0>1,0>2,1>3,1>4,2>5,2>6\" --source 0 --gate-types \"AND,OR,OR,L,L,L,L\" --weights 1,2,3,1,4,2 --num-vertices 7" + } + "MinimumRegisterSufficiencyForLoops" => { + "--loop-length 6 --loop-variables \"0,3;2,3;4,3\"" + } "RegisterSufficiency" => { "--arcs \"2>0,2>1,3>1,4>2,4>3,5>0,6>4,6>5\" --bound 3 --num-vertices 7" } @@ -797,6 +837,9 @@ fn example_for(canonical: &str, graph_type: Option<&str>) -> &'static str { "IntegerKnapsack" => "--sizes 3,4,5,2,7 --values 4,5,7,3,9 --capacity 15", "SubsetProduct" => "--sizes 2,3,5,7,6,10 --target 210", "SubsetSum" => "--sizes 3,7,1,8,2,4 --target 11", + "MinimumAxiomSet" => { + "--n 8 --true-sentences 0,1,2,3,4,5,6,7 --implications \"0>2;0>3;1>4;1>5;2,4>6;3,5>7;6,7>0;6,7>1\"" + } "IntegerExpressionMembership" => { "--expression '{\"Sum\":[{\"Sum\":[{\"Union\":[{\"Atom\":1},{\"Atom\":4}]},{\"Union\":[{\"Atom\":3},{\"Atom\":6}]}]},{\"Union\":[{\"Atom\":2},{\"Atom\":5}]}]}' --target 12" } @@ -816,6 +859,9 @@ fn example_for(canonical: &str, graph_type: Option<&str>) -> &'static str { "BoyceCoddNormalFormViolation" => { "--n 6 --sets \"0,1:2;2:3;3,4:5\" --target 0,1,2,3,4,5" } + "Clustering" => { + "--distance-matrix \"0,1,1,3;1,0,1,3;1,1,0,3;3,3,3,0\" --k 2 --diameter-bound 1" + } "SumOfSquaresPartition" => "--sizes 5,3,8,2,7,1 --num-groups 3", "ComparativeContainment" => { "--universe 4 --r-sets \"0,1,2,3;0,1\" --s-sets \"0,1,2,3;2,3\" --r-weights 2,5 --s-weights 3,6" @@ -848,7 +894,12 @@ fn example_for(canonical: &str, graph_type: Option<&str>) -> &'static str { "SparseMatrixCompression" => { "--matrix \"1,0,0,1;0,1,0,0;0,0,1,0;1,0,0,0\" --bound 2" } + "MaximumLikelihoodRanking" => "--matrix \"0,4,3,5;1,0,4,3;2,1,0,4;0,2,1,0\"", + "MinimumMatrixCover" => "--matrix \"0,3,1,0;3,0,0,2;1,0,0,4;0,2,4,0\"", "MinimumMatrixDomination" => "--matrix \"0,1,0;1,0,1;0,1,0\"", + "MinimumWeightDecoding" => { + "--matrix '[[true,false,true,true],[false,true,true,false],[true,true,false,true]]' --rhs 'true,true,false'" + } "MinimumWeightSolutionToLinearEquations" => { "--matrix '[[1,2,3,1],[2,1,1,3]]' --rhs '5,4'" } @@ -866,6 +917,18 @@ fn example_for(canonical: &str, graph_type: Option<&str>) -> &'static str { "FeasibleBasisExtension" => { "--matrix '[[1,0,1,2,-1,0],[0,1,0,1,1,2],[0,0,1,1,0,1]]' --rhs '7,5,3' --required-columns '0,1'" } + "MinimumCodeGenerationParallelAssignments" => { + "--num-variables 4 --assignments \"0:1,2;1:0;2:3;3:1,2\"" + } + "MinimumDecisionTree" => { + "--test-matrix '[[true,true,false,false],[true,false,false,false],[false,true,false,true]]' --num-objects 4 --num-tests 3" + } + "MinimumDisjunctiveNormalForm" => { + "--num-vars 3 --truth-table 0,1,1,1,1,1,1,0" + } + "SquareTiling" => { + "--num-colors 3 --tiles \"0,1,2,0;0,0,2,1;2,1,0,0;2,0,0,1\" --grid-size 2" + } _ => "", } } @@ -906,6 +969,12 @@ fn help_flag_name(canonical: &str, field_name: &str) -> String { ("ConsecutiveOnesMatrixAugmentation", "bound") => return "bound".to_string(), ("ConsecutiveOnesSubmatrix", "bound") => return "bound".to_string(), ("SparseMatrixCompression", "bound_k") => return "bound".to_string(), + ("MinimumCodeGenerationParallelAssignments", "num_variables") => { + return "num-variables".to_string(); + } + ("MinimumCodeGenerationParallelAssignments", "assignments") => { + return "assignments".to_string(); + } ("StackerCrane", "edges") => return "graph".to_string(), ("StackerCrane", "arc_lengths") => return "arc-costs".to_string(), ("StackerCrane", "edge_lengths") => return "edge-lengths".to_string(), @@ -979,6 +1048,9 @@ fn help_flag_hint( | ("MinimumInternalMacroDataCompression", "string") => "symbol list: \"0,1,0,1\"", ("MinimumExternalMacroDataCompression", "pointer_cost") | ("MinimumInternalMacroDataCompression", "pointer_cost") => "positive integer: 2", + ("MinimumAxiomSet", "num_sentences") => "total number of sentences: 8", + ("MinimumAxiomSet", "true_sentences") => "comma-separated indices: 0,1,2,3,4,5,6,7", + ("MinimumAxiomSet", "implications") => "semicolon-separated rules: \"0>2;0>3;1>4;2,4>6\"", ("ShortestCommonSupersequence", "strings") => "symbol lists: \"0,1,2;1,2,0\"", ("MultipleChoiceBranching", "partition") => "semicolon-separated groups: \"0,1;2,3\"", ("IntegralFlowHomologousArcs", "homologous_pairs") => { @@ -1003,7 +1075,13 @@ fn help_flag_hint( } ("ConsecutiveOnesSubmatrix", "matrix") => "semicolon-separated 0/1 rows: \"1,0;0,1\"", ("SparseMatrixCompression", "matrix") => "semicolon-separated 0/1 rows: \"1,0;0,1\"", + ("MaximumLikelihoodRanking", "matrix") => { + "semicolon-separated i32 rows: \"0,4,3,5;1,0,4,3;2,1,0,4;0,2,1,0\"" + } + ("MinimumMatrixCover", "matrix") => "semicolon-separated i64 rows: \"0,3,1;3,0,2;1,2,0\"", ("MinimumMatrixDomination", "matrix") => "semicolon-separated 0/1 rows: \"1,0;0,1\"", + ("MinimumWeightDecoding", "matrix") => "JSON 2D bool array: '[[true,false],[false,true]]'", + ("MinimumWeightDecoding", "target") => "comma-separated booleans: \"true,true,false\"", ("MinimumWeightSolutionToLinearEquations", "matrix") => { "JSON 2D integer array: '[[1,2,3],[4,5,6]]'" } @@ -1011,6 +1089,9 @@ fn help_flag_hint( ("FeasibleBasisExtension", "matrix") => "JSON 2D integer array: '[[1,0,1],[0,1,0]]'", ("FeasibleBasisExtension", "rhs") => "comma-separated integers: \"7,5,3\"", ("FeasibleBasisExtension", "required_columns") => "comma-separated column indices: \"0,1\"", + ("MinimumCodeGenerationParallelAssignments", "assignments") => { + "semicolon-separated target:reads entries: \"0:1,2;1:0;2:3;3:1,2\"" + } ("TimetableDesign", "craftsman_avail") | ("TimetableDesign", "task_avail") => { "semicolon-separated 0/1 rows: \"1,1,0;0,1,1\"" } @@ -1478,6 +1559,21 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { ) } + // MaximumDomaticNumber (graph only, no weights) + "MaximumDomaticNumber" => { + let (graph, _) = parse_graph(args).map_err(|e| { + anyhow::anyhow!( + "{e}\n\nUsage: pred create MaximumDomaticNumber --graph 0-1,1-2,0-2" + ) + })?; + ( + ser(problemreductions::models::graph::MaximumDomaticNumber::new( + graph, + ))?, + variant_map(&[("graph", "SimpleGraph")]), + ) + } + // MinimumCoveringByCliques (graph only, no weights) "MinimumCoveringByCliques" => { let (graph, _) = parse_graph(args).map_err(|e| { @@ -1530,6 +1626,19 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { ) } + // Maximum Leaf Spanning Tree (graph only, no weights) + "MaximumLeafSpanningTree" => { + let (graph, _) = parse_graph(args).map_err(|e| { + anyhow::anyhow!( + "{e}\n\nUsage: pred create MaximumLeafSpanningTree --graph 0-1,0-2,0-3,1-4,2-4,2-5,3-5,4-5,1-3" + ) + })?; + ( + ser(problemreductions::models::graph::MaximumLeafSpanningTree::new(graph))?, + resolved_variant.clone(), + ) + } + // Biconnectivity augmentation "BiconnectivityAugmentation" => { let (graph, _) = parse_graph(args).map_err(|e| { @@ -2215,6 +2324,21 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { (ser(KClique::new(graph, k))?, resolved_variant.clone()) } + "VertexCover" => { + let usage = "Usage: pred create VertexCover --graph 0-1,1-2,0-2,2-3 --k 2"; + let (graph, _) = parse_graph(args).map_err(|e| anyhow::anyhow!("{e}\n\n{usage}"))?; + let k = args + .k + .ok_or_else(|| anyhow::anyhow!("VertexCover requires --k\n\n{usage}"))?; + if k == 0 { + bail!("VertexCover: --k must be positive"); + } + if k > graph.num_vertices() { + bail!("VertexCover: k must be <= graph num_vertices"); + } + (ser(VertexCover::new(graph, k))?, resolved_variant.clone()) + } + // SAT "Satisfiability" => { let num_vars = args.num_vars.ok_or_else(|| { @@ -2255,6 +2379,20 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { util::ser_ksat(num_vars, clauses, k)? } + "Maximum2Satisfiability" => { + let num_vars = args.num_vars.ok_or_else(|| { + anyhow::anyhow!( + "Maximum2Satisfiability requires --num-vars\n\n\ + Usage: pred create MAX2SAT --num-vars 4 --clauses \"1,2;1,-2;-1,3;-1,-3\"" + ) + })?; + let clauses = parse_clauses(args)?; + ( + ser(Maximum2Satisfiability::new(num_vars, clauses))?, + resolved_variant.clone(), + ) + } + "NonTautology" => { let num_vars = args.num_vars.ok_or_else(|| { anyhow::anyhow!( @@ -2678,6 +2816,38 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { ) } + // MinimumAxiomSet + "MinimumAxiomSet" => { + let usage = "Usage: pred create MinimumAxiomSet --n 8 --true-sentences 0,1,2,3,4,5,6,7 --implications \"0>2;0>3;1>4;1>5;2,4>6;3,5>7;6,7>0;6,7>1\""; + let num_sentences = args.n.ok_or_else(|| { + anyhow::anyhow!( + "MinimumAxiomSet requires --n, --true-sentences, and --implications\n\n{usage}" + ) + })?; + let ts_str = args.true_sentences.as_deref().ok_or_else(|| { + anyhow::anyhow!("MinimumAxiomSet requires --true-sentences\n\n{usage}") + })?; + let imp_str = args.implications.as_deref().ok_or_else(|| { + anyhow::anyhow!("MinimumAxiomSet requires --implications\n\n{usage}") + })?; + let true_sentences: Vec = ts_str + .split(',') + .map(|s| s.trim().parse::()) + .collect::>() + .context("--true-sentences must be comma-separated usize values")?; + let implications = parse_implications(imp_str).context( + "--implications must be semicolon-separated \"antecedents>consequent\" pairs", + )?; + ( + ser(MinimumAxiomSet::new( + num_sentences, + true_sentences, + implications, + ))?, + resolved_variant.clone(), + ) + } + // IntegerExpressionMembership "IntegerExpressionMembership" => { let usage = "Usage: pred create IntegerExpressionMembership --expression '{\"Sum\":[{\"Atom\":1},{\"Atom\":2}]}' --target 3"; @@ -3398,6 +3568,65 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { ) } + // ThreeMatroidIntersection + "ThreeMatroidIntersection" => { + let universe = args.universe.ok_or_else(|| { + anyhow::anyhow!( + "ThreeMatroidIntersection requires --universe, --partitions, and --bound\n\n\ + Usage: pred create ThreeMatroidIntersection --universe 6 --partitions \"0,1,2;3,4,5|0,3;1,4;2,5|0,4;1,5;2,3\" --bound 2" + ) + })?; + let bound_val = args.bound.ok_or_else(|| { + anyhow::anyhow!( + "ThreeMatroidIntersection requires --bound\n\n\ + Usage: pred create ThreeMatroidIntersection --universe 6 --partitions \"0,1,2;3,4,5|0,3;1,4;2,5|0,4;1,5;2,3\" --bound 2" + ) + })?; + let bound = usize::try_from(bound_val) + .map_err(|_| anyhow::anyhow!("--bound must be non-negative, got {}", bound_val))?; + let partitions_str = args.partitions.as_deref().ok_or_else(|| { + anyhow::anyhow!( + "ThreeMatroidIntersection requires --partitions\n\n\ + Usage: pred create ThreeMatroidIntersection --universe 6 --partitions \"0,1,2;3,4,5|0,3;1,4;2,5|0,4;1,5;2,3\" --bound 2" + ) + })?; + let matroids: Vec>> = partitions_str + .split('|') + .map(|matroid_str| { + matroid_str + .split(';') + .map(|group_str| { + group_str + .split(',') + .map(|s| { + s.trim().parse::().map_err(|_| { + anyhow::anyhow!( + "Invalid element in partitions: '{}'", + s.trim() + ) + }) + }) + .collect::>>() + }) + .collect::>>() + }) + .collect::>>()?; + if matroids.len() != 3 { + bail!( + "Expected exactly 3 partition matroids separated by '|', got {}", + matroids.len() + ); + } + ( + ser( + problemreductions::models::set::ThreeMatroidIntersection::new( + universe, matroids, bound, + ), + )?, + resolved_variant.clone(), + ) + } + // SetBasis "SetBasis" => { let universe = args.universe.ok_or_else(|| { @@ -3623,6 +3852,47 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { ) } + // MaximumLikelihoodRanking + "MaximumLikelihoodRanking" => { + let usage = "Usage: pred create MaximumLikelihoodRanking --matrix \"0,4,3,5;1,0,4,3;2,1,0,4;0,2,1,0\""; + let matrix_str = args.matrix.as_deref().ok_or_else(|| { + anyhow::anyhow!( + "MaximumLikelihoodRanking requires --matrix (semicolon-separated i32 rows)\n\n{usage}" + ) + })?; + let matrix_i64 = parse_i64_matrix(matrix_str).context("Invalid matrix")?; + let matrix: Vec> = matrix_i64 + .into_iter() + .map(|row| { + row.into_iter() + .map(|v| { + i32::try_from(v) + .map_err(|_| anyhow::anyhow!("matrix value {v} out of i32 range")) + }) + .collect::>>() + }) + .collect::>>()?; + ( + ser(MaximumLikelihoodRanking::new(matrix))?, + resolved_variant.clone(), + ) + } + + // MinimumMatrixCover + "MinimumMatrixCover" => { + let usage = "Usage: pred create MinimumMatrixCover --matrix \"0,3,1,0;3,0,0,2;1,0,0,4;0,2,4,0\""; + let matrix_str = args.matrix.as_deref().ok_or_else(|| { + anyhow::anyhow!( + "MinimumMatrixCover requires --matrix (semicolon-separated i64 rows)\n\n{usage}" + ) + })?; + let matrix = parse_i64_matrix(matrix_str).context("Invalid matrix")?; + ( + ser(MinimumMatrixCover::new(matrix))?, + resolved_variant.clone(), + ) + } + // MinimumMatrixDomination "MinimumMatrixDomination" => { let matrix = parse_bool_matrix(args)?; @@ -3632,6 +3902,43 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { ) } + // MinimumWeightDecoding + "MinimumWeightDecoding" => { + let usage = "Usage: pred create MinimumWeightDecoding --matrix '[[true,false,true],[false,true,true]]' --rhs 'true,true'"; + let matrix_str = args.matrix.as_deref().ok_or_else(|| { + anyhow::anyhow!( + "MinimumWeightDecoding requires --matrix (JSON 2D bool array) and --rhs\n\n{usage}" + ) + })?; + let matrix: Vec> = serde_json::from_str(matrix_str).map_err(|err| { + anyhow::anyhow!( + "MinimumWeightDecoding requires --matrix as a JSON 2D bool array (e.g., '[[true,false],[false,true]]')\n\n{usage}\n\nFailed to parse --matrix: {err}" + ) + })?; + let rhs_str = args.rhs.as_deref().ok_or_else(|| { + anyhow::anyhow!( + "MinimumWeightDecoding requires --rhs (comma-separated booleans)\n\n{usage}" + ) + })?; + let target: Vec = rhs_str + .split(',') + .map(|s| match s.trim() { + "true" | "1" => Ok(true), + "false" | "0" => Ok(false), + other => Err(anyhow::anyhow!("invalid boolean value: {other}")), + }) + .collect::, _>>() + .map_err(|err| { + anyhow::anyhow!( + "Failed to parse --rhs as comma-separated booleans: {err}\n\n{usage}" + ) + })?; + ( + ser(MinimumWeightDecoding::new(matrix, target))?, + resolved_variant.clone(), + ) + } + // MinimumWeightSolutionToLinearEquations "MinimumWeightSolutionToLinearEquations" => { let usage = "Usage: pred create MinimumWeightSolutionToLinearEquations --matrix '[[1,2,3,1],[2,1,1,3]]' --rhs '5,4'"; @@ -5304,6 +5611,58 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { ) } + // MinimumEdgeCostFlow + "MinimumEdgeCostFlow" => { + let usage = "Usage: pred create MinimumEdgeCostFlow --arcs \"0>1,0>2,0>3,1>4,2>4,3>4\" --edge-weights 3,1,2,0,0,0 --capacities 2,2,2,2,2,2 --source 0 --sink 4 --requirement 3"; + let arcs_str = args + .arcs + .as_deref() + .ok_or_else(|| anyhow::anyhow!("MinimumEdgeCostFlow requires --arcs\n\n{usage}"))?; + let (graph, num_arcs) = parse_directed_graph(arcs_str, args.num_vertices)?; + let prices: Vec = if let Some(ref s) = args.edge_weights { + util::parse_comma_list(s)? + } else { + bail!("MinimumEdgeCostFlow requires --edge-weights (prices)\n\n{usage}"); + }; + anyhow::ensure!( + prices.len() == num_arcs, + "--edge-weights length ({}) must match number of arcs ({num_arcs})", + prices.len() + ); + let capacities: Vec = if let Some(ref s) = args.capacities { + util::parse_comma_list(s)? + } else { + bail!("MinimumEdgeCostFlow requires --capacities\n\n{usage}"); + }; + anyhow::ensure!( + capacities.len() == num_arcs, + "--capacities length ({}) must match number of arcs ({num_arcs})", + capacities.len() + ); + let n = graph.num_vertices(); + let source = args.source.ok_or_else(|| { + anyhow::anyhow!("MinimumEdgeCostFlow requires --source\n\n{usage}") + })?; + let sink = args + .sink + .ok_or_else(|| anyhow::anyhow!("MinimumEdgeCostFlow requires --sink\n\n{usage}"))?; + anyhow::ensure!(source < n, "--source ({source}) >= num_vertices ({n})"); + anyhow::ensure!(sink < n, "--sink ({sink}) >= num_vertices ({n})"); + anyhow::ensure!(source != sink, "--source and --sink must be distinct"); + let requirement = args.requirement.unwrap_or(1) as i64; + ( + ser(problemreductions::models::graph::MinimumEdgeCostFlow::new( + graph, + prices, + capacities, + source, + sink, + requirement, + ))?, + resolved_variant.clone(), + ) + } + // MinimumDummyActivitiesPert "MinimumDummyActivitiesPert" => { let usage = "Usage: pred create MinimumDummyActivitiesPert --arcs \"0>2,0>3,1>3,1>4,2>5\" [--num-vertices N]"; @@ -5393,6 +5752,257 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { ) } + // MinimumCodeGenerationOneRegister + "MinimumCodeGenerationOneRegister" => { + let usage = "Usage: pred create MinimumCodeGenerationOneRegister --arcs \"0>1,0>2,1>3,1>4,2>3,2>5,3>5,3>6\" [--num-vertices N]"; + let arcs_str = args.arcs.as_deref().ok_or_else(|| { + anyhow::anyhow!( + "MinimumCodeGenerationOneRegister requires --arcs\n\n\ + {usage}" + ) + })?; + let (graph, _) = parse_directed_graph(arcs_str, args.num_vertices)?; + let n = graph.num_vertices(); + let arcs = graph.arcs(); + // Compute num_leaves: vertices with out-degree 0 + let mut out_degree = vec![0usize; n]; + for &(parent, _child) in &arcs { + out_degree[parent] += 1; + } + let num_leaves = out_degree.iter().filter(|&&d| d == 0).count(); + ( + ser(MinimumCodeGenerationOneRegister::new(n, arcs, num_leaves))?, + resolved_variant.clone(), + ) + } + + // MinimumCodeGenerationUnlimitedRegisters + "MinimumCodeGenerationUnlimitedRegisters" => { + let usage = "Usage: pred create MinimumCodeGenerationUnlimitedRegisters --left-arcs \"1>3,2>3,0>1\" --right-arcs \"1>4,2>4,0>2\" [--num-vertices N]"; + let left_str = args.left_arcs.as_deref().ok_or_else(|| { + anyhow::anyhow!( + "MinimumCodeGenerationUnlimitedRegisters requires --left-arcs\n\n\ + {usage}" + ) + })?; + let right_str = args.right_arcs.as_deref().ok_or_else(|| { + anyhow::anyhow!( + "MinimumCodeGenerationUnlimitedRegisters requires --right-arcs\n\n\ + {usage}" + ) + })?; + let (left_graph, _) = parse_directed_graph(left_str, args.num_vertices)?; + let (right_graph, _) = parse_directed_graph(right_str, args.num_vertices)?; + let n = if let Some(nv) = args.num_vertices { + nv + } else { + left_graph.num_vertices().max(right_graph.num_vertices()) + }; + let left_arcs = left_graph.arcs(); + let right_arcs = right_graph.arcs(); + ( + ser(MinimumCodeGenerationUnlimitedRegisters::new( + n, left_arcs, right_arcs, + ))?, + resolved_variant.clone(), + ) + } + + // MinimumCodeGenerationParallelAssignments + "MinimumCodeGenerationParallelAssignments" => { + let usage = "Usage: pred create MinimumCodeGenerationParallelAssignments --num-variables 4 --assignments \"0:1,2;1:0;2:3;3:1,2\""; + let nv = args.num_variables.ok_or_else(|| { + anyhow::anyhow!( + "MinimumCodeGenerationParallelAssignments requires --num-variables and --assignments\n\n\ + {usage}" + ) + })?; + let assign_str = args.assignments.as_deref().ok_or_else(|| { + anyhow::anyhow!( + "MinimumCodeGenerationParallelAssignments requires --assignments\n\n\ + {usage}" + ) + })?; + let assignments: Vec<(usize, Vec)> = assign_str + .split(';') + .map(|entry| { + let parts: Vec<&str> = entry.split(':').collect(); + anyhow::ensure!( + parts.len() == 2, + "each assignment must be 'target:read1,read2,...'; got '{entry}'" + ); + let target: usize = parts[0] + .trim() + .parse() + .context("invalid target variable index")?; + let reads: Vec = if parts[1].trim().is_empty() { + Vec::new() + } else { + parts[1] + .split(',') + .map(|s| { + s.trim() + .parse::() + .context("invalid read variable index") + }) + .collect::>>()? + }; + Ok((target, reads)) + }) + .collect::>>()?; + ( + ser(MinimumCodeGenerationParallelAssignments::new( + nv, + assignments, + ))?, + resolved_variant.clone(), + ) + } + + // MinimumRegisterSufficiencyForLoops + "MinimumRegisterSufficiencyForLoops" => { + let usage = "Usage: pred create MinimumRegisterSufficiencyForLoops --loop-length 6 --loop-variables \"0,3;2,3;4,3\""; + let loop_length = args.loop_length.ok_or_else(|| { + anyhow::anyhow!( + "MinimumRegisterSufficiencyForLoops requires --loop-length and --loop-variables\n\n\ + {usage}" + ) + })?; + let vars_str = args.loop_variables.as_deref().ok_or_else(|| { + anyhow::anyhow!( + "MinimumRegisterSufficiencyForLoops requires --loop-variables\n\n\ + {usage}" + ) + })?; + let variables: Vec<(usize, usize)> = vars_str + .split(';') + .map(|pair| { + let parts: Vec<&str> = pair.split(',').collect(); + if parts.len() != 2 { + bail!("Each variable must be start,duration (got '{pair}')\n\n{usage}"); + } + let start: usize = parts[0] + .trim() + .parse() + .context(format!("Invalid start_time in '{pair}'\n\n{usage}"))?; + let dur: usize = parts[1] + .trim() + .parse() + .context(format!("Invalid duration in '{pair}'\n\n{usage}"))?; + Ok((start, dur)) + }) + .collect::>>()?; + ( + ser(MinimumRegisterSufficiencyForLoops::new( + loop_length, + variables, + ))?, + resolved_variant.clone(), + ) + } + + // MinimumFaultDetectionTestSet + "MinimumFaultDetectionTestSet" => { + let usage = "Usage: pred create MinimumFaultDetectionTestSet --arcs \"0>2,0>3,1>3,1>4,2>5,3>5,3>6,4>6\" --inputs 0,1 --outputs 5,6 [--num-vertices N]"; + let arcs_str = args.arcs.as_deref().ok_or_else(|| { + anyhow::anyhow!( + "MinimumFaultDetectionTestSet requires --arcs, --inputs, and --outputs\n\n\ + {usage}" + ) + })?; + let inputs_str = args.inputs.as_deref().ok_or_else(|| { + anyhow::anyhow!( + "MinimumFaultDetectionTestSet requires --inputs\n\n\ + {usage}" + ) + })?; + let outputs_str = args.outputs.as_deref().ok_or_else(|| { + anyhow::anyhow!( + "MinimumFaultDetectionTestSet requires --outputs\n\n\ + {usage}" + ) + })?; + let (graph, _num_arcs) = parse_directed_graph(arcs_str, args.num_vertices)?; + let n = graph.num_vertices(); + let arcs = graph.arcs(); + let inputs: Vec = inputs_str + .split(',') + .map(|s| { + s.trim() + .parse::() + .map_err(|e| anyhow::anyhow!("Invalid input vertex '{}': {}", s.trim(), e)) + }) + .collect::>()?; + let outputs: Vec = outputs_str + .split(',') + .map(|s| { + s.trim() + .parse::() + .map_err(|e| anyhow::anyhow!("Invalid output vertex '{}': {}", s.trim(), e)) + }) + .collect::>()?; + ( + ser(MinimumFaultDetectionTestSet::new(n, arcs, inputs, outputs))?, + resolved_variant.clone(), + ) + } + + // MinimumWeightAndOrGraph + "MinimumWeightAndOrGraph" => { + let usage = "Usage: pred create MinimumWeightAndOrGraph --arcs \"0>1,0>2,1>3,1>4,2>5,2>6\" --source 0 --gate-types \"AND,OR,OR,L,L,L,L\" --weights 1,2,3,1,4,2 [--num-vertices N]"; + let arcs_str = args.arcs.as_deref().ok_or_else(|| { + anyhow::anyhow!( + "MinimumWeightAndOrGraph requires --arcs, --source, --gate-types, and --weights\n\n\ + {usage}" + ) + })?; + let source = args.source.ok_or_else(|| { + anyhow::anyhow!( + "MinimumWeightAndOrGraph requires --source\n\n\ + {usage}" + ) + })?; + let gate_types_str = args.gate_types.as_deref().ok_or_else(|| { + anyhow::anyhow!( + "MinimumWeightAndOrGraph requires --gate-types (e.g., \"AND,OR,OR,L,L,L,L\")\n\n\ + {usage}" + ) + })?; + let (graph, num_arcs) = parse_directed_graph(arcs_str, args.num_vertices)?; + let n = graph.num_vertices(); + let arcs = graph.arcs(); + let arc_weights = parse_arc_weights(args, num_arcs)?; + let gate_types: Vec> = gate_types_str + .split(',') + .map(|s| match s.trim() { + "AND" | "and" => Ok(Some(true)), + "OR" | "or" => Ok(Some(false)), + "L" | "l" | "LEAF" | "leaf" => Ok(None), + other => Err(anyhow::anyhow!( + "Invalid gate type '{}': expected AND, OR, or L (leaf)\n\n{usage}", + other + )), + }) + .collect::>()?; + if gate_types.len() != n { + bail!( + "Gate types length {} does not match vertex count {}\n\n{usage}", + gate_types.len(), + n + ); + } + ( + ser(MinimumWeightAndOrGraph::new( + n, + arcs, + source, + gate_types, + arc_weights, + ))?, + resolved_variant.clone(), + ) + } + // MixedChinesePostman "MixedChinesePostman" => { let usage = "Usage: pred create MixedChinesePostman --graph 0-2,1-3,0-4,4-2 --arcs \"0>1,1>2,2>3,3>0\" --edge-weights 2,3,1,2 --arc-costs 2,3,1,4 [--num-vertices N]"; @@ -6027,6 +6637,114 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { ) } + // Clustering + "Clustering" => { + let usage = "Usage: pred create Clustering --distance-matrix \"0,1,1,3;1,0,1,3;1,1,0,3;3,3,3,0\" --k 2 --diameter-bound 1"; + let dist_str = args.distance_matrix.as_deref().ok_or_else(|| { + anyhow::anyhow!( + "Clustering requires --distance-matrix, --k, and --diameter-bound\n\n{usage}" + ) + })?; + let distance_matrix = parse_u64_matrix_rows(dist_str, "distance matrix")?; + let k = args.k.ok_or_else(|| { + anyhow::anyhow!("Clustering requires --k (number of clusters)\n\n{usage}") + })?; + let diameter_bound = args + .diameter_bound + .ok_or_else(|| anyhow::anyhow!("Clustering requires --diameter-bound\n\n{usage}"))? + as u64; + ( + ser(Clustering::new(distance_matrix, k, diameter_bound))?, + resolved_variant.clone(), + ) + } + + // MinimumDecisionTree + "MinimumDecisionTree" => { + let usage = "Usage: pred create MinimumDecisionTree --test-matrix '[[true,true,false,false],[true,false,false,false],[false,true,false,true]]' --num-objects 4 --num-tests 3"; + let matrix_str = args.test_matrix.as_deref().ok_or_else(|| { + anyhow::anyhow!("MinimumDecisionTree requires --test-matrix\n\n{usage}") + })?; + let test_matrix: Vec> = serde_json::from_str(matrix_str) + .context("Failed to parse --test-matrix as JSON 2D bool array")?; + let num_objects = args.num_objects.ok_or_else(|| { + anyhow::anyhow!("MinimumDecisionTree requires --num-objects\n\n{usage}") + })?; + let num_tests = args.num_tests.ok_or_else(|| { + anyhow::anyhow!("MinimumDecisionTree requires --num-tests\n\n{usage}") + })?; + ( + ser(MinimumDecisionTree::new( + test_matrix, + num_objects, + num_tests, + ))?, + resolved_variant.clone(), + ) + } + + // MinimumDisjunctiveNormalForm + "MinimumDisjunctiveNormalForm" => { + let usage = "Usage: pred create MinDNF --num-vars 3 --truth-table 0,1,1,1,1,1,1,0"; + let num_vars = args.num_vars.ok_or_else(|| { + anyhow::anyhow!("MinimumDisjunctiveNormalForm requires --num-vars\n\n{usage}") + })?; + let tt_str = args.truth_table.as_deref().ok_or_else(|| { + anyhow::anyhow!("MinimumDisjunctiveNormalForm requires --truth-table\n\n{usage}") + })?; + let truth_table: Vec = tt_str + .split(',') + .map(|s| match s.trim() { + "1" | "true" => Ok(true), + "0" | "false" => Ok(false), + other => bail!("Invalid truth table entry '{}': expected 0 or 1", other), + }) + .collect::>>()?; + ( + ser(MinimumDisjunctiveNormalForm::new(num_vars, truth_table))?, + resolved_variant.clone(), + ) + } + + // SquareTiling + "SquareTiling" => { + let usage = "Usage: pred create SquareTiling --num-colors 3 --tiles \"0,1,2,0;0,0,2,1;2,1,0,0;2,0,0,1\" --grid-size 2"; + let num_colors = args + .num_colors + .ok_or_else(|| anyhow::anyhow!("SquareTiling requires --num-colors\n\n{usage}"))?; + let tiles_str = args + .tiles + .as_deref() + .ok_or_else(|| anyhow::anyhow!("SquareTiling requires --tiles\n\n{usage}"))?; + let tiles: Vec<(usize, usize, usize, usize)> = tiles_str + .split(';') + .map(|tile_s| { + let parts: Vec = tile_s + .split(',') + .map(|v| { + v.trim() + .parse::() + .context("invalid tile color index") + }) + .collect::>>()?; + if parts.len() != 4 { + bail!( + "Each tile must have exactly 4 values (top,right,bottom,left), got {}", + parts.len() + ); + } + Ok((parts[0], parts[1], parts[2], parts[3])) + }) + .collect::>>()?; + let grid_size = args + .grid_size + .ok_or_else(|| anyhow::anyhow!("SquareTiling requires --grid-size\n\n{usage}"))?; + ( + ser(SquareTiling::new(num_colors, tiles, grid_size))?, + resolved_variant.clone(), + ) + } + _ => bail!("{}", crate::problem_name::unknown_problem_error(canonical)), }; @@ -7595,6 +8313,29 @@ fn create_random( ) } + "VertexCover" => { + let edge_prob = args.edge_prob.unwrap_or(0.5); + if !(0.0..=1.0).contains(&edge_prob) { + bail!("--edge-prob must be between 0.0 and 1.0"); + } + let graph = util::create_random_graph(num_vertices, edge_prob, args.seed); + let usage = + "Usage: pred create VertexCover --random --num-vertices 5 [--edge-prob 0.5] [--seed 42] --k 3"; + let k = args + .k + .ok_or_else(|| anyhow::anyhow!("VertexCover requires --k\n\n{usage}"))?; + if k == 0 { + bail!("VertexCover: --k must be positive"); + } + if k > graph.num_vertices() { + bail!("VertexCover: k must be <= graph num_vertices"); + } + ( + ser(VertexCover::new(graph, k))?, + variant_map(&[("graph", "SimpleGraph")]), + ) + } + // MinimumCutIntoBoundedSets (graph + edge weights + s/t/B/K) "MinimumCutIntoBoundedSets" => { let edge_prob = args.edge_prob.unwrap_or(0.5); @@ -7634,6 +8375,20 @@ fn create_random( ) } + // MaximumDomaticNumber (graph only, no weights) + "MaximumDomaticNumber" => { + let edge_prob = args.edge_prob.unwrap_or(0.5); + if !(0.0..=1.0).contains(&edge_prob) { + bail!("--edge-prob must be between 0.0 and 1.0"); + } + let graph = util::create_random_graph(num_vertices, edge_prob, args.seed); + let variant = variant_map(&[("graph", "SimpleGraph")]); + ( + ser(problemreductions::models::graph::MaximumDomaticNumber::new(graph))?, + variant, + ) + } + // MinimumCoveringByCliques (graph only, no weights) "MinimumCoveringByCliques" => { let edge_prob = args.edge_prob.unwrap_or(0.5); @@ -7684,6 +8439,21 @@ fn create_random( (ser(HamiltonianCircuit::new(graph))?, variant) } + // Maximum Leaf Spanning Tree (graph only, no weights) + "MaximumLeafSpanningTree" => { + let num_vertices = num_vertices.max(2); + let edge_prob = args.edge_prob.unwrap_or(0.5); + if !(0.0..=1.0).contains(&edge_prob) { + bail!("--edge-prob must be between 0.0 and 1.0"); + } + let graph = util::create_random_graph(num_vertices, edge_prob, args.seed); + let variant = variant_map(&[("graph", "SimpleGraph")]); + ( + ser(problemreductions::models::graph::MaximumLeafSpanningTree::new(graph))?, + variant, + ) + } + // HamiltonianPath (graph only, no weights) "HamiltonianPath" => { let edge_prob = args.edge_prob.unwrap_or(0.5); @@ -7927,8 +8697,8 @@ fn create_random( _ => bail!( "Random generation is not supported for {canonical}. \ Supported: graph-based problems (MIS, MVC, MaxCut, MaxClique, \ - MaximumMatching, MinimumDominatingSet, SpinGlass, KColoring, KClique, TravelingSalesman, \ - BottleneckTravelingSalesman, SteinerTreeInGraphs, HamiltonianCircuit, SteinerTree, \ + MaximumMatching, MinimumDominatingSet, SpinGlass, KColoring, KClique, VertexCover, TravelingSalesman, \ + BottleneckTravelingSalesman, SteinerTreeInGraphs, HamiltonianCircuit, MaximumLeafSpanningTree, SteinerTree, \ OptimalLinearArrangement, RootedTreeArrangement, HamiltonianPath, LongestCircuit, GeneralizedHex)" ), }; @@ -7942,6 +8712,34 @@ fn create_random( emit_problem_output(&output, out) } +/// Parse implication rules from semicolon-separated "antecedents>consequent" strings. +/// +/// Format: "0,1>2;3>4;5,6,7>0" where antecedents are comma-separated indices +/// before the `>` and the consequent is the single index after. +fn parse_implications(s: &str) -> Result, usize)>> { + let mut implications = Vec::new(); + for part in s.split(';') { + let part = part.trim(); + if part.is_empty() { + continue; + } + let (lhs, rhs) = part.split_once('>').ok_or_else(|| { + anyhow::anyhow!("Each implication must contain '>' separator: {part}") + })?; + let antecedents: Vec = lhs + .split(',') + .map(|x| x.trim().parse::()) + .collect::>() + .context(format!("Invalid antecedent index in implication: {part}"))?; + let consequent: usize = rhs + .trim() + .parse() + .context(format!("Invalid consequent index in implication: {part}"))?; + implications.push((antecedents, consequent)); + } + Ok(implications) +} + #[cfg(test)] mod tests { use std::fs; @@ -9133,6 +9931,7 @@ mod tests { r_weights: None, s_weights: None, partition: None, + partitions: None, bundles: None, universe: None, biedges: None, @@ -9160,6 +9959,8 @@ mod tests { string: None, arc_costs: None, arcs: None, + left_arcs: None, + right_arcs: None, values: None, precedences: None, distance_matrix: None, @@ -9225,6 +10026,21 @@ mod tests { assignment: None, initial_marking: None, output_arcs: None, + gate_types: None, + true_sentences: None, + implications: None, + loop_length: None, + loop_variables: None, + inputs: None, + outputs: None, + assignments: None, + num_variables: None, + truth_table: None, + test_matrix: None, + num_tests: None, + tiles: None, + grid_size: None, + num_colors: None, } } diff --git a/problemreductions-cli/src/problem_name.rs b/problemreductions-cli/src/problem_name.rs index c449dac4..835cc7a0 100644 --- a/problemreductions-cli/src/problem_name.rs +++ b/problemreductions-cli/src/problem_name.rs @@ -20,6 +20,21 @@ pub fn resolve_alias(input: &str) -> String { if input.eq_ignore_ascii_case("GroupingBySwapping") { return "GroupingBySwapping".to_string(); } + if input.eq_ignore_ascii_case("MinimumWeightAndOrGraph") { + return "MinimumWeightAndOrGraph".to_string(); + } + if input.eq_ignore_ascii_case("MinimumFaultDetectionTestSet") { + return "MinimumFaultDetectionTestSet".to_string(); + } + if input.eq_ignore_ascii_case("MinimumCodeGenerationUnlimitedRegisters") { + return "MinimumCodeGenerationUnlimitedRegisters".to_string(); + } + if input.eq_ignore_ascii_case("MinimumCodeGenerationParallelAssignments") { + return "MinimumCodeGenerationParallelAssignments".to_string(); + } + if input.eq_ignore_ascii_case("ThreeMatroidIntersection") { + return "ThreeMatroidIntersection".to_string(); + } if let Some(pt) = problemreductions::registry::find_problem_type_by_alias(input) { return pt.canonical_name.to_string(); } @@ -291,6 +306,9 @@ mod tests { resolve_alias("biconnectivityaugmentation"), "BiconnectivityAugmentation" ); + // VertexCover alias + assert_eq!(resolve_alias("VC"), "VertexCover"); + assert_eq!(resolve_alias("VertexCover"), "VertexCover"); // Pass-through for full names assert_eq!( resolve_alias("MaximumIndependentSet"), diff --git a/src/lib.rs b/src/lib.rs index e30da8f0..3e4791a7 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -47,8 +47,9 @@ pub mod prelude { SparseMatrixCompression, BMF, QUBO, }; pub use crate::models::formula::{ - CNFClause, CircuitSAT, KSatisfiability, NAESatisfiability, NonTautology, - OneInThreeSatisfiability, Planar3Satisfiability, QuantifiedBooleanFormulas, Satisfiability, + CNFClause, CircuitSAT, KSatisfiability, Maximum2Satisfiability, NAESatisfiability, + NonTautology, OneInThreeSatisfiability, Planar3Satisfiability, QuantifiedBooleanFormulas, + Satisfiability, }; pub use crate::models::graph::{ AcyclicPartition, BalancedCompleteBipartiteSubgraph, BicliqueCover, @@ -62,14 +63,15 @@ pub mod prelude { }; pub use crate::models::graph::{ KColoring, LongestCircuit, MaxCut, MaximalIS, MaximumClique, MaximumIndependentSet, - MaximumMatching, MinMaxMulticenter, MinimumCutIntoBoundedSets, MinimumDominatingSet, - MinimumDummyActivitiesPert, MinimumFeedbackArcSet, MinimumFeedbackVertexSet, - MinimumGeometricConnectedDominatingSet, MinimumMultiwayCut, MinimumSumMulticenter, - MinimumVertexCover, MonochromaticTriangle, MultipleChoiceBranching, - MultipleCopyFileAllocation, OptimalLinearArrangement, PartialFeedbackEdgeSet, - PartitionIntoPathsOfLength2, PartitionIntoTriangles, PathConstrainedNetworkFlow, - RootedTreeArrangement, RuralPostman, ShortestWeightConstrainedPath, SteinerTreeInGraphs, - TravelingSalesman, UndirectedFlowLowerBounds, UndirectedTwoCommodityIntegralFlow, + MaximumLeafSpanningTree, MaximumMatching, MinMaxMulticenter, MinimumCutIntoBoundedSets, + MinimumDominatingSet, MinimumDummyActivitiesPert, MinimumFeedbackArcSet, + MinimumFeedbackVertexSet, MinimumGeometricConnectedDominatingSet, MinimumGraphBandwidth, + MinimumMultiwayCut, MinimumSumMulticenter, MinimumVertexCover, MonochromaticTriangle, + MultipleChoiceBranching, MultipleCopyFileAllocation, OptimalLinearArrangement, + PartialFeedbackEdgeSet, PartitionIntoPathsOfLength2, PartitionIntoTriangles, + PathConstrainedNetworkFlow, RootedTreeArrangement, RuralPostman, + ShortestWeightConstrainedPath, SteinerTreeInGraphs, TravelingSalesman, + UndirectedFlowLowerBounds, UndirectedTwoCommodityIntegralFlow, }; pub use crate::models::misc::{ AdditionalKey, BinPacking, BoyceCoddNormalFormViolation, CapacityAssignment, CbqRelation, @@ -90,6 +92,7 @@ pub mod prelude { ComparativeContainment, ConsecutiveSets, ExactCoverBy3Sets, IntegerKnapsack, MaximumSetPacking, MinimumCardinalityKey, MinimumHittingSet, MinimumSetCovering, PrimeAttributeName, RootedTreeStorageAssignment, SetBasis, SetSplitting, + ThreeMatroidIntersection, }; // Core traits diff --git a/src/models/algebraic/minimum_matrix_cover.rs b/src/models/algebraic/minimum_matrix_cover.rs new file mode 100644 index 00000000..7aada4ce --- /dev/null +++ b/src/models/algebraic/minimum_matrix_cover.rs @@ -0,0 +1,147 @@ +//! Minimum Matrix Cover problem implementation. +//! +//! Given an n×n nonnegative integer matrix A, find a sign assignment +//! f: {1,...,n} → {-1,+1} minimizing Σ a_ij · f(i) · f(j). + +use crate::registry::{FieldInfo, ProblemSchemaEntry}; +use crate::traits::Problem; +use crate::types::Min; +use serde::{Deserialize, Serialize}; + +inventory::submit! { + ProblemSchemaEntry { + name: "MinimumMatrixCover", + display_name: "Minimum Matrix Cover", + aliases: &[], + dimensions: &[], + module_path: module_path!(), + description: "Find sign assignment minimizing quadratic form over nonnegative integer matrix", + fields: &[ + FieldInfo { name: "matrix", type_name: "Vec>", description: "n×n nonnegative integer matrix" }, + ], + } +} + +/// Minimum Matrix Cover. +/// +/// Given an n×n nonnegative integer matrix A, find a function +/// f: {1,...,n} → {-1,+1} that minimizes the quadratic form: +/// +/// Σ_{i,j} a_ij · f(i) · f(j) +/// +/// Each binary variable x_i ∈ {0,1} maps to a sign: f(i) = 2·x_i - 1 +/// (0 → -1, 1 → +1). +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::algebraic::MinimumMatrixCover; +/// use problemreductions::{Problem, Solver, BruteForce}; +/// +/// let problem = MinimumMatrixCover::new(vec![ +/// vec![0, 3, 1, 0], +/// vec![3, 0, 0, 2], +/// vec![1, 0, 0, 4], +/// vec![0, 2, 4, 0], +/// ]); +/// +/// let solver = BruteForce::new(); +/// let witness = solver.find_witness(&problem); +/// assert!(witness.is_some()); +/// ``` +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct MinimumMatrixCover { + /// The n×n nonnegative integer matrix. + matrix: Vec>, +} + +impl MinimumMatrixCover { + /// Create a new MinimumMatrixCover instance. + /// + /// # Panics + /// + /// Panics if the matrix is not square or has inconsistent row lengths. + pub fn new(matrix: Vec>) -> Self { + let n = matrix.len(); + for (i, row) in matrix.iter().enumerate() { + assert_eq!( + row.len(), + n, + "Matrix must be square: row {i} has {} columns, expected {n}", + row.len() + ); + } + Self { matrix } + } + + /// Returns the number of rows (= columns) of the matrix. + pub fn num_rows(&self) -> usize { + self.matrix.len() + } + + /// Returns a reference to the matrix. + pub fn matrix(&self) -> &[Vec] { + &self.matrix + } +} + +impl Problem for MinimumMatrixCover { + const NAME: &'static str = "MinimumMatrixCover"; + type Value = Min; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![] + } + + fn dims(&self) -> Vec { + vec![2; self.num_rows()] + } + + fn evaluate(&self, config: &[usize]) -> Min { + let n = self.num_rows(); + if config.len() != n { + return Min(None); + } + if config.iter().any(|&v| v >= 2) { + return Min(None); + } + + // Map config to signs: 0 → -1, 1 → +1 + let signs: Vec = config.iter().map(|&x| 2 * x as i64 - 1).collect(); + + // Compute Σ_{i,j} a_ij * f(i) * f(j) + let mut value: i64 = 0; + for i in 0..n { + for j in 0..n { + value += self.matrix[i][j] * signs[i] * signs[j]; + } + } + + Min(Some(value)) + } +} + +crate::declare_variants! { + default MinimumMatrixCover => "2^num_rows", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + // 4×4 symmetric matrix with zero diagonal + // Config [0,1,1,0] → f=(-1,+1,+1,-1) → value = -20 + vec![crate::example_db::specs::ModelExampleSpec { + id: "minimum_matrix_cover", + instance: Box::new(MinimumMatrixCover::new(vec![ + vec![0, 3, 1, 0], + vec![3, 0, 0, 2], + vec![1, 0, 0, 4], + vec![0, 2, 4, 0], + ])), + optimal_config: vec![0, 1, 1, 0], + optimal_value: serde_json::json!(-20), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/algebraic/minimum_matrix_cover.rs"] +mod tests; diff --git a/src/models/algebraic/minimum_weight_decoding.rs b/src/models/algebraic/minimum_weight_decoding.rs new file mode 100644 index 00000000..a4c42420 --- /dev/null +++ b/src/models/algebraic/minimum_weight_decoding.rs @@ -0,0 +1,170 @@ +//! Minimum Weight Decoding problem implementation. +//! +//! Given an n x m binary matrix H (parity-check matrix) and a binary syndrome +//! vector s of length n, find a binary vector x of length m minimizing the +//! Hamming weight |x| subject to Hx ≡ s (mod 2). + +use crate::registry::{FieldInfo, ProblemSchemaEntry}; +use crate::traits::Problem; +use crate::types::Min; +use serde::{Deserialize, Serialize}; + +inventory::submit! { + ProblemSchemaEntry { + name: "MinimumWeightDecoding", + display_name: "Minimum Weight Decoding", + aliases: &[], + dimensions: &[], + module_path: module_path!(), + description: "Find minimum Hamming weight binary vector x such that Hx ≡ s (mod 2)", + fields: &[ + FieldInfo { name: "matrix", type_name: "Vec>", description: "n×m binary parity-check matrix H" }, + FieldInfo { name: "target", type_name: "Vec", description: "binary syndrome vector s of length n" }, + ], + } +} + +/// Minimum Weight Decoding. +/// +/// Given an n×m binary matrix H and a binary syndrome vector s, find a binary +/// vector x of length m that minimizes the Hamming weight |x| (number of 1s) +/// subject to Hx ≡ s (mod 2). +/// +/// # Representation +/// +/// Each of the m columns corresponds to a binary variable x_j ∈ {0, 1}. +/// The evaluator checks whether the GF(2) linear system Hx = s is satisfied, +/// and returns the Hamming weight of x if feasible. +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::algebraic::MinimumWeightDecoding; +/// use problemreductions::{Problem, Solver, BruteForce}; +/// +/// let matrix = vec![ +/// vec![true, false, true, true], +/// vec![false, true, true, false], +/// vec![true, true, false, true], +/// ]; +/// let target = vec![true, true, false]; +/// let problem = MinimumWeightDecoding::new(matrix, target); +/// let solver = BruteForce::new(); +/// let witness = solver.find_witness(&problem); +/// assert!(witness.is_some()); +/// ``` +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct MinimumWeightDecoding { + /// The n×m binary parity-check matrix H. + matrix: Vec>, + /// The binary syndrome vector s of length n. + target: Vec, +} + +impl MinimumWeightDecoding { + /// Create a new MinimumWeightDecoding instance. + /// + /// # Panics + /// + /// Panics if the matrix is empty, rows have inconsistent lengths, + /// target length does not match the number of rows, or there are no columns. + pub fn new(matrix: Vec>, target: Vec) -> Self { + assert!(!matrix.is_empty(), "Matrix must have at least one row"); + let num_cols = matrix[0].len(); + assert!(num_cols > 0, "Matrix must have at least one column"); + for row in &matrix { + assert_eq!(row.len(), num_cols, "All rows must have the same length"); + } + assert_eq!( + target.len(), + matrix.len(), + "Target length must equal number of rows" + ); + Self { matrix, target } + } + + /// Returns a reference to the parity-check matrix H. + pub fn matrix(&self) -> &[Vec] { + &self.matrix + } + + /// Returns a reference to the syndrome vector s. + pub fn target(&self) -> &[bool] { + &self.target + } + + /// Returns the number of rows of H. + pub fn num_rows(&self) -> usize { + self.matrix.len() + } + + /// Returns the number of columns of H. + pub fn num_cols(&self) -> usize { + self.matrix[0].len() + } +} + +impl Problem for MinimumWeightDecoding { + const NAME: &'static str = "MinimumWeightDecoding"; + type Value = Min; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![] + } + + fn dims(&self) -> Vec { + vec![2; self.num_cols()] + } + + fn evaluate(&self, config: &[usize]) -> Min { + if config.len() != self.num_cols() { + return Min(None); + } + if config.iter().any(|&v| v >= 2) { + return Min(None); + } + + // Check Hx ≡ s (mod 2) for each row + for (i, row) in self.matrix.iter().enumerate() { + let dot: usize = row + .iter() + .zip(config.iter()) + .filter(|(&h, &x)| h && x == 1) + .count(); + let syndrome_bit = dot % 2 == 1; + if syndrome_bit != self.target[i] { + return Min(None); + } + } + + // Feasible: return Hamming weight + let weight: usize = config.iter().filter(|&&v| v == 1).count(); + Min(Some(weight)) + } +} + +crate::declare_variants! { + default MinimumWeightDecoding => "2^(0.0494 * num_cols)", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + // H (3×4): [[1,0,1,1],[0,1,1,0],[1,1,0,1]], s = [1,1,0] + // Config [0,0,1,0] → weight 1, Hx = [1,1,0] ≡ s → Min(1) + let matrix = vec![ + vec![true, false, true, true], + vec![false, true, true, false], + vec![true, true, false, true], + ]; + let target = vec![true, true, false]; + vec![crate::example_db::specs::ModelExampleSpec { + id: "minimum_weight_decoding", + instance: Box::new(MinimumWeightDecoding::new(matrix, target)), + optimal_config: vec![0, 0, 1, 0], + optimal_value: serde_json::json!(1), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/algebraic/minimum_weight_decoding.rs"] +mod tests; diff --git a/src/models/algebraic/mod.rs b/src/models/algebraic/mod.rs index e6fc86f6..f9c7c79a 100644 --- a/src/models/algebraic/mod.rs +++ b/src/models/algebraic/mod.rs @@ -14,6 +14,7 @@ //! - [`QuadraticDiophantineEquations`]: Decide ax² + by = c in positive integers //! - [`SimultaneousIncongruences`]: Decide whether x ≢ aᵢ (mod bᵢ) for all i simultaneously //! - [`MinimumMatrixDomination`]: Minimum Matrix Domination (minimum dominating set of 1-entries) +//! - [`MinimumWeightDecoding`]: Minimum Weight Decoding (minimize Hamming weight of Hx≡s mod 2) //! - [`MinimumWeightSolutionToLinearEquations`]: Minimum Weight Solution to Linear Equations (minimize Hamming weight of Ay=b solution) //! - [`SparseMatrixCompression`]: Sparse Matrix Compression by row overlay @@ -26,7 +27,9 @@ pub(crate) mod consecutive_ones_submatrix; pub(crate) mod equilibrium_point; pub(crate) mod feasible_basis_extension; pub(crate) mod ilp; +pub(crate) mod minimum_matrix_cover; pub(crate) mod minimum_matrix_domination; +pub(crate) mod minimum_weight_decoding; pub(crate) mod minimum_weight_solution_to_linear_equations; pub(crate) mod quadratic_assignment; pub(crate) mod quadratic_congruences; @@ -44,7 +47,9 @@ pub use consecutive_ones_submatrix::ConsecutiveOnesSubmatrix; pub use equilibrium_point::EquilibriumPoint; pub use feasible_basis_extension::FeasibleBasisExtension; pub use ilp::{Comparison, LinearConstraint, ObjectiveSense, VariableDomain, ILP}; +pub use minimum_matrix_cover::MinimumMatrixCover; pub use minimum_matrix_domination::MinimumMatrixDomination; +pub use minimum_weight_decoding::MinimumWeightDecoding; pub use minimum_weight_solution_to_linear_equations::MinimumWeightSolutionToLinearEquations; pub use quadratic_assignment::QuadraticAssignment; pub use quadratic_congruences::QuadraticCongruences; @@ -65,7 +70,9 @@ pub(crate) fn canonical_model_example_specs() -> Vec", description: "Collection of 2-literal clauses" }, + ], + } +} + +/// Maximum 2-Satisfiability problem where each clause has exactly 2 literals. +/// +/// Given a set of Boolean variables and a collection of clauses, each containing +/// exactly 2 literals, find a truth assignment that maximizes the number of +/// simultaneously satisfied clauses. +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::formula::{Maximum2Satisfiability, CNFClause}; +/// use problemreductions::{Problem, Solver, BruteForce}; +/// +/// let problem = Maximum2Satisfiability::new( +/// 3, +/// vec![ +/// CNFClause::new(vec![1, 2]), // x1 OR x2 +/// CNFClause::new(vec![-1, -2]), // NOT x1 OR NOT x2 +/// CNFClause::new(vec![1, 3]), // x1 OR x3 +/// ], +/// ); +/// +/// let solver = BruteForce::new(); +/// let value = solver.solve(&problem); +/// ``` +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Maximum2Satisfiability { + /// Number of Boolean variables. + num_vars: usize, + /// Clauses in CNF, each with exactly 2 literals. + clauses: Vec, +} + +impl Maximum2Satisfiability { + /// Create a new MAX-2-SAT problem. + /// + /// # Panics + /// Panics if any clause does not have exactly 2 literals. + pub fn new(num_vars: usize, clauses: Vec) -> Self { + for (i, clause) in clauses.iter().enumerate() { + assert!( + clause.len() == 2, + "Clause {} has {} literals, expected 2", + i, + clause.len() + ); + } + Self { num_vars, clauses } + } + + /// Get the number of variables. + pub fn num_vars(&self) -> usize { + self.num_vars + } + + /// Get the number of clauses. + pub fn num_clauses(&self) -> usize { + self.clauses.len() + } + + /// Get the clauses. + pub fn clauses(&self) -> &[CNFClause] { + &self.clauses + } + + /// Count satisfied clauses for an assignment. + pub fn count_satisfied(&self, assignment: &[bool]) -> usize { + self.clauses + .iter() + .filter(|c| c.is_satisfied(assignment)) + .count() + } +} + +impl Problem for Maximum2Satisfiability { + const NAME: &'static str = "Maximum2Satisfiability"; + type Value = Max; + + fn dims(&self) -> Vec { + vec![2; self.num_vars] + } + + fn evaluate(&self, config: &[usize]) -> Max { + let assignment = super::config_to_assignment(config); + Max(Some(self.count_satisfied(&assignment))) + } + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![] + } +} + +crate::declare_variants! { + default Maximum2Satisfiability => "2^(0.7905 * num_variables)", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + vec![crate::example_db::specs::ModelExampleSpec { + id: "maximum_2_satisfiability", + instance: Box::new(Maximum2Satisfiability::new( + 4, + vec![ + CNFClause::new(vec![1, 2]), + CNFClause::new(vec![1, -2]), + CNFClause::new(vec![-1, 3]), + CNFClause::new(vec![-1, -3]), + CNFClause::new(vec![2, 4]), + CNFClause::new(vec![-3, -4]), + CNFClause::new(vec![3, 4]), + ], + )), + optimal_config: vec![1, 1, 0, 1], + optimal_value: serde_json::json!(6), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/formula/maximum_2_satisfiability.rs"] +mod tests; diff --git a/src/models/formula/mod.rs b/src/models/formula/mod.rs index f78f2d7e..e2c09d8d 100644 --- a/src/models/formula/mod.rs +++ b/src/models/formula/mod.rs @@ -4,6 +4,7 @@ //! - [`Satisfiability`]: Boolean satisfiability (SAT) with CNF clauses //! - [`NAESatisfiability`]: Not-All-Equal satisfiability with CNF clauses //! - [`KSatisfiability`]: K-SAT where each clause has exactly K literals +//! - [`Maximum2Satisfiability`]: MAX-2-SAT — maximize satisfied 2-literal clauses //! - [`NonTautology`]: Find a falsifying assignment for a DNF formula //! - [`OneInThreeSatisfiability`]: Exactly one literal true per clause (1-in-3 SAT) //! - [`Planar3Satisfiability`]: 3-SAT restricted to planar variable-clause incidence graphs @@ -12,6 +13,7 @@ pub(crate) mod circuit; pub(crate) mod ksat; +pub(crate) mod maximum_2_satisfiability; pub(crate) mod nae_satisfiability; pub(crate) mod non_tautology; pub(crate) mod one_in_three_satisfiability; @@ -21,6 +23,7 @@ pub(crate) mod sat; pub use circuit::{Assignment, BooleanExpr, BooleanOp, Circuit, CircuitSAT}; pub use ksat::KSatisfiability; +pub use maximum_2_satisfiability::Maximum2Satisfiability; pub use nae_satisfiability::NAESatisfiability; pub use non_tautology::NonTautology; pub use one_in_three_satisfiability::OneInThreeSatisfiability; @@ -39,6 +42,7 @@ pub(crate) fn canonical_model_example_specs() -> Vec => "2^(2.372 * num_vertices / 3)", + MaxCut => "2^(0.7907 * num_vertices)", } #[cfg(feature = "example-db")] pub(crate) fn canonical_model_example_specs() -> Vec { - vec![crate::example_db::specs::ModelExampleSpec { - id: "max_cut_simplegraph_i32", - instance: Box::new(MaxCut::<_, i32>::unweighted(SimpleGraph::new( - 5, - vec![(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (3, 4)], - ))), - optimal_config: vec![1, 0, 0, 1, 0], - optimal_value: serde_json::json!(5), - }] + vec![ + crate::example_db::specs::ModelExampleSpec { + id: "max_cut_simplegraph_i32", + instance: Box::new(MaxCut::<_, i32>::unweighted(SimpleGraph::new( + 5, + vec![(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (3, 4)], + ))), + optimal_config: vec![1, 0, 0, 1, 0], + optimal_value: serde_json::json!(5), + }, + crate::example_db::specs::ModelExampleSpec { + id: "max_cut_simplegraph_one", + instance: Box::new(MaxCut::new( + SimpleGraph::new( + 5, + vec![(0, 1), (0, 2), (0, 3), (1, 2), (1, 4), (2, 3), (3, 4)], + ), + vec![One; 7], + )), + optimal_config: vec![0, 1, 0, 1, 0], + optimal_value: serde_json::json!(6), + }, + ] } #[cfg(test)] diff --git a/src/models/graph/maximum_domatic_number.rs b/src/models/graph/maximum_domatic_number.rs new file mode 100644 index 00000000..1052f616 --- /dev/null +++ b/src/models/graph/maximum_domatic_number.rs @@ -0,0 +1,185 @@ +//! Maximum Domatic Number problem implementation. +//! +//! The Maximum Domatic Number problem asks for the maximum number k such that the +//! vertex set V of a graph G=(V,E) can be partitioned into k disjoint dominating sets. + +use crate::registry::{FieldInfo, ProblemSchemaEntry, VariantDimension}; +use crate::topology::{Graph, SimpleGraph}; +use crate::traits::Problem; +use crate::types::Max; +use serde::{Deserialize, Serialize}; + +inventory::submit! { + ProblemSchemaEntry { + name: "MaximumDomaticNumber", + display_name: "Maximum Domatic Number", + aliases: &[], + dimensions: &[ + VariantDimension::new("graph", "SimpleGraph", &["SimpleGraph"]), + ], + module_path: module_path!(), + description: "Find maximum number of disjoint dominating sets partitioning V", + fields: &[ + FieldInfo { name: "graph", type_name: "G", description: "The underlying graph G=(V,E)" }, + ], + } +} + +/// The Maximum Domatic Number problem. +/// +/// Given a graph G = (V, E), find the maximum k such that V can be partitioned +/// into k disjoint dominating sets. A dominating set D ⊆ V is a set such that +/// every vertex is either in D or adjacent to a vertex in D. +/// +/// The configuration assigns each vertex to a set index (0..n-1). The value is +/// `Max(Some(k))` where k is the number of non-empty sets if all non-empty sets +/// are dominating, or `Max(None)` if any non-empty set fails domination. +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::graph::MaximumDomaticNumber; +/// use problemreductions::topology::SimpleGraph; +/// use problemreductions::{Problem, Solver, BruteForce}; +/// +/// // Path graph P3: 0-1-2 +/// let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); +/// let problem = MaximumDomaticNumber::new(graph); +/// +/// let solver = BruteForce::new(); +/// let witness = solver.find_witness(&problem).unwrap(); +/// let value = problem.evaluate(&witness); +/// // Domatic number of P3 is 2 +/// assert_eq!(value, problemreductions::types::Max(Some(2))); +/// ``` +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct MaximumDomaticNumber { + /// The underlying graph. + graph: G, +} + +impl MaximumDomaticNumber { + /// Create a Maximum Domatic Number problem from a graph. + pub fn new(graph: G) -> Self { + Self { graph } + } + + /// Get a reference to the underlying graph. + pub fn graph(&self) -> &G { + &self.graph + } + + /// Get the number of vertices in the underlying graph. + pub fn num_vertices(&self) -> usize { + self.graph.num_vertices() + } + + /// Get the number of edges in the underlying graph. + pub fn num_edges(&self) -> usize { + self.graph.num_edges() + } + + /// Check whether a partition is valid (all non-empty sets are dominating). + /// + /// Returns `Some(k)` where k is the number of non-empty dominating sets, + /// or `None` if any non-empty set fails the domination property. + fn evaluate_partition(&self, config: &[usize]) -> Option { + let n = self.graph.num_vertices(); + + // Configuration must assign each vertex to exactly one set. + if config.len() != n { + return None; + } + + // Collect which vertices belong to each set + let mut sets: Vec> = vec![vec![]; n]; + for (v, &set_idx) in config.iter().enumerate() { + // Each set index must be within bounds of the available sets. + if set_idx >= n { + return None; + } + sets[set_idx].push(v); + } + + // Check each non-empty set is a dominating set + let mut count = 0; + for set in &sets { + if set.is_empty() { + continue; + } + count += 1; + + // Build membership lookup + let mut in_set = vec![false; n]; + for &v in set { + in_set[v] = true; + } + + // Every vertex must be in the set or adjacent to someone in the set + for v in 0..n { + if in_set[v] { + continue; + } + if !self.graph.neighbors(v).iter().any(|&u| in_set[u]) { + return None; + } + } + } + + Some(count) + } +} + +impl Problem for MaximumDomaticNumber +where + G: Graph + crate::variant::VariantParam, +{ + const NAME: &'static str = "MaximumDomaticNumber"; + type Value = Max; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![G] + } + + fn dims(&self) -> Vec { + let n = self.graph.num_vertices(); + vec![n; n] + } + + fn evaluate(&self, config: &[usize]) -> Max { + match self.evaluate_partition(config) { + Some(k) => Max(Some(k)), + None => Max(None), + } + } +} + +crate::declare_variants! { + default MaximumDomaticNumber => "2.695^num_vertices", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + vec![crate::example_db::specs::ModelExampleSpec { + id: "maximum_domatic_number_simplegraph", + instance: Box::new(MaximumDomaticNumber::new(SimpleGraph::new( + 6, + vec![ + (0, 1), + (0, 2), + (0, 3), + (1, 4), + (2, 5), + (3, 4), + (3, 5), + (4, 5), + ], + ))), + optimal_config: vec![0, 1, 2, 0, 2, 1], + optimal_value: serde_json::json!(3), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/graph/maximum_domatic_number.rs"] +mod tests; diff --git a/src/models/graph/maximum_leaf_spanning_tree.rs b/src/models/graph/maximum_leaf_spanning_tree.rs new file mode 100644 index 00000000..7fbf46b8 --- /dev/null +++ b/src/models/graph/maximum_leaf_spanning_tree.rs @@ -0,0 +1,198 @@ +//! Maximum Leaf Spanning Tree problem implementation. +//! +//! Given a connected graph G, find a spanning tree T of G that maximizes +//! the number of leaves (degree-1 vertices) in T. + +use crate::registry::{FieldInfo, ProblemSchemaEntry, VariantDimension}; +use crate::topology::{Graph, SimpleGraph}; +use crate::traits::Problem; +use crate::types::Max; +use serde::{Deserialize, Serialize}; + +inventory::submit! { + ProblemSchemaEntry { + name: "MaximumLeafSpanningTree", + display_name: "Maximum Leaf Spanning Tree", + aliases: &[], + dimensions: &[ + VariantDimension::new("graph", "SimpleGraph", &["SimpleGraph"]), + ], + module_path: module_path!(), + description: "Find spanning tree maximizing the number of leaves", + fields: &[ + FieldInfo { name: "graph", type_name: "G", description: "The underlying graph G=(V,E)" }, + ], + } +} + +/// The Maximum Leaf Spanning Tree problem. +/// +/// Given a connected graph G = (V, E), find a spanning tree T of G such that +/// the number of leaves (vertices with degree 1 in T) is maximized. +/// +/// # Representation +/// +/// Each edge is assigned a binary variable: +/// - 0: edge is not in the spanning tree +/// - 1: edge is in the spanning tree +/// +/// A valid spanning tree requires exactly n-1 selected edges that form a +/// connected, acyclic subgraph spanning all vertices. +/// +/// # Type Parameters +/// +/// * `G` - The graph type (e.g., `SimpleGraph`) +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct MaximumLeafSpanningTree { + /// The underlying graph. + graph: G, +} + +impl MaximumLeafSpanningTree { + /// Create a MaximumLeafSpanningTree problem from a graph. + /// + /// The graph must have at least 2 vertices. + pub fn new(graph: G) -> Self { + assert!( + graph.num_vertices() >= 2, + "graph must have at least 2 vertices" + ); + Self { graph } + } + + /// Get a reference to the underlying graph. + pub fn graph(&self) -> &G { + &self.graph + } + + /// Get the number of vertices in the underlying graph. + pub fn num_vertices(&self) -> usize { + self.graph.num_vertices() + } + + /// Get the number of edges in the underlying graph. + pub fn num_edges(&self) -> usize { + self.graph.num_edges() + } + + /// Check if a configuration is a valid spanning tree. + pub fn is_valid_solution(&self, config: &[usize]) -> bool { + is_valid_spanning_tree(&self.graph, config) + } +} + +/// Check if a configuration forms a valid spanning tree: +/// 1. Exactly n-1 edges selected +/// 2. Selected edges form a connected subgraph (which, combined with n-1 edges, implies a tree) +fn is_valid_spanning_tree(graph: &G, config: &[usize]) -> bool { + let n = graph.num_vertices(); + let edges = graph.edges(); + if config.len() != edges.len() { + return false; + } + + // Count selected edges + let selected_count: usize = config.iter().sum(); + if selected_count != n - 1 { + return false; + } + + // Build adjacency from selected edges and check connectivity via BFS + let mut adj: Vec> = vec![vec![]; n]; + for (idx, &sel) in config.iter().enumerate() { + if sel == 1 { + let (u, v) = edges[idx]; + adj[u].push(v); + adj[v].push(u); + } + } + + // BFS from vertex 0 + let mut visited = vec![false; n]; + let mut queue = std::collections::VecDeque::new(); + visited[0] = true; + queue.push_back(0); + while let Some(v) = queue.pop_front() { + for &u in &adj[v] { + if !visited[u] { + visited[u] = true; + queue.push_back(u); + } + } + } + + // All vertices must be reachable + visited.iter().all(|&v| v) +} + +/// Count the number of leaves (degree-1 vertices) in the tree defined by the config. +fn count_leaves(graph: &G, config: &[usize]) -> usize { + let n = graph.num_vertices(); + let edges = graph.edges(); + let mut degree = vec![0usize; n]; + for (idx, &sel) in config.iter().enumerate() { + if sel == 1 { + let (u, v) = edges[idx]; + degree[u] += 1; + degree[v] += 1; + } + } + degree.iter().filter(|&&d| d == 1).count() +} + +impl Problem for MaximumLeafSpanningTree +where + G: Graph + crate::variant::VariantParam, +{ + const NAME: &'static str = "MaximumLeafSpanningTree"; + type Value = Max; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![G] + } + + fn dims(&self) -> Vec { + vec![2; self.graph.num_edges()] + } + + fn evaluate(&self, config: &[usize]) -> Max { + if !is_valid_spanning_tree(&self.graph, config) { + return Max(None); + } + Max(Some(count_leaves(&self.graph, config))) + } +} + +crate::declare_variants! { + default MaximumLeafSpanningTree => "1.8966^num_vertices", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + vec![crate::example_db::specs::ModelExampleSpec { + id: "maximum_leaf_spanning_tree_simplegraph", + instance: Box::new(MaximumLeafSpanningTree::new(SimpleGraph::new( + 6, + vec![ + (0, 1), + (0, 2), + (0, 3), + (1, 4), + (2, 4), + (2, 5), + (3, 5), + (4, 5), + (1, 3), + ], + ))), + // Edges: 0:(0,1), 1:(0,2), 2:(0,3), 3:(1,4), 4:(2,4), 5:(2,5), 6:(3,5), 7:(4,5), 8:(1,3) + // Tree: {(0,1),(0,2),(0,3),(2,4),(2,5)} = indices 0,1,2,4,5 + // Leaves: 1,3,4,5 (degree 1 each), Internal: 0 (deg 3), 2 (deg 3) + optimal_config: vec![1, 1, 1, 0, 1, 1, 0, 0, 0], + optimal_value: serde_json::json!(4), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/graph/maximum_leaf_spanning_tree.rs"] +mod tests; diff --git a/src/models/graph/minimum_capacitated_spanning_tree.rs b/src/models/graph/minimum_capacitated_spanning_tree.rs new file mode 100644 index 00000000..04762cf3 --- /dev/null +++ b/src/models/graph/minimum_capacitated_spanning_tree.rs @@ -0,0 +1,363 @@ +//! Minimum Capacitated Spanning Tree problem implementation. +//! +//! Given a weighted graph with a designated root vertex, vertex requirements, +//! and a capacity bound, find a minimum-weight spanning tree rooted at the root +//! such that for each edge, the sum of requirements in its subtree (on the +//! non-root side) does not exceed the capacity. + +use num_traits::Zero; +use serde::{Deserialize, Serialize}; + +use crate::registry::{FieldInfo, ProblemSchemaEntry, VariantDimension}; +use crate::topology::{Graph, SimpleGraph}; +use crate::traits::Problem; +use crate::types::{Min, WeightElement}; + +inventory::submit! { + ProblemSchemaEntry { + name: "MinimumCapacitatedSpanningTree", + display_name: "Minimum Capacitated Spanning Tree", + aliases: &["MCST"], + dimensions: &[ + VariantDimension::new("graph", "SimpleGraph", &["SimpleGraph"]), + VariantDimension::new("weight", "i32", &["i32"]), + ], + module_path: module_path!(), + description: "Find minimum weight spanning tree with subtree capacity constraints", + fields: &[ + FieldInfo { name: "graph", type_name: "G", description: "The underlying graph G=(V,E)" }, + FieldInfo { name: "weights", type_name: "Vec", description: "Edge weights w: E -> R" }, + FieldInfo { name: "root", type_name: "usize", description: "Root vertex" }, + FieldInfo { name: "requirements", type_name: "Vec", description: "Vertex requirements r: V -> R (root has 0)" }, + FieldInfo { name: "capacity", type_name: "W::Sum", description: "Subtree capacity bound" }, + ], + } +} + +/// The Minimum Capacitated Spanning Tree problem. +/// +/// Given a weighted graph G = (V, E), edge weights w_e, a root vertex v0, +/// vertex requirements r_v (with r_{v0} = 0), and a capacity C, find a +/// spanning tree T rooted at v0 such that: +/// - For each edge e in T, the sum of requirements of all vertices in the +/// subtree on the non-root side of e is at most C. +/// - The total weight of T is minimized. +/// +/// # Representation +/// +/// Each edge is assigned a binary variable: +/// - 0: edge is not in the spanning tree +/// - 1: edge is in the spanning tree +/// +/// # Type Parameters +/// +/// * `G` - The graph type (e.g., `SimpleGraph`) +/// * `W` - The weight type for edges and requirements (e.g., `i32`) +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct MinimumCapacitatedSpanningTree { + /// The underlying graph. + graph: G, + /// Weights for each edge (in edge index order). + weights: Vec, + /// Root vertex index. + root: usize, + /// Vertex requirements (root has requirement 0). + requirements: Vec, + /// Subtree capacity bound. + capacity: W::Sum, +} + +impl MinimumCapacitatedSpanningTree { + /// Create a MinimumCapacitatedSpanningTree problem. + /// + /// # Panics + /// - If `weights.len() != graph.num_edges()` + /// - If `requirements.len() != graph.num_vertices()` + /// - If `root >= graph.num_vertices()` + /// - If `graph.num_vertices() < 2` + pub fn new( + graph: G, + weights: Vec, + root: usize, + requirements: Vec, + capacity: W::Sum, + ) -> Self { + assert_eq!( + weights.len(), + graph.num_edges(), + "weights length must match num_edges" + ); + assert_eq!( + requirements.len(), + graph.num_vertices(), + "requirements length must match num_vertices" + ); + assert!( + root < graph.num_vertices(), + "root {root} out of range (num_vertices = {})", + graph.num_vertices() + ); + assert!( + graph.num_vertices() >= 2, + "graph must have at least 2 vertices" + ); + Self { + graph, + weights, + root, + requirements, + capacity, + } + } + + /// Get a reference to the underlying graph. + pub fn graph(&self) -> &G { + &self.graph + } + + /// Get the edge weights. + pub fn weights(&self) -> &[W] { + &self.weights + } + + /// Set new edge weights. + pub fn set_weights(&mut self, weights: Vec) { + assert_eq!(weights.len(), self.graph.num_edges()); + self.weights = weights; + } + + /// Check if the problem uses a non-unit weight type. + pub fn is_weighted(&self) -> bool { + !W::IS_UNIT + } + + /// Get the root vertex. + pub fn root(&self) -> usize { + self.root + } + + /// Get the vertex requirements. + pub fn requirements(&self) -> &[W] { + &self.requirements + } + + /// Get the capacity bound. + pub fn capacity(&self) -> &W::Sum { + &self.capacity + } + + /// Get the number of vertices in the underlying graph. + pub fn num_vertices(&self) -> usize { + self.graph.num_vertices() + } + + /// Get the number of edges in the underlying graph. + pub fn num_edges(&self) -> usize { + self.graph.num_edges() + } + + /// Check if a configuration is a valid capacitated spanning tree. + pub fn is_valid_solution(&self, config: &[usize]) -> bool { + is_valid_capacitated_spanning_tree( + &self.graph, + &self.requirements, + self.root, + &self.capacity, + config, + ) + } +} + +/// Check if a configuration forms a valid spanning tree: +/// 1. Exactly n-1 edges selected +/// 2. Selected edges form a connected subgraph +fn is_spanning_tree(graph: &G, config: &[usize]) -> bool { + let n = graph.num_vertices(); + let edges = graph.edges(); + if config.len() != edges.len() { + return false; + } + + let selected_count: usize = config.iter().sum(); + if selected_count != n - 1 { + return false; + } + + // Build adjacency and BFS from vertex 0 + let mut adj: Vec> = vec![vec![]; n]; + for (idx, &sel) in config.iter().enumerate() { + if sel == 1 { + let (u, v) = edges[idx]; + adj[u].push(v); + adj[v].push(u); + } + } + + let mut visited = vec![false; n]; + let mut queue = std::collections::VecDeque::new(); + visited[0] = true; + queue.push_back(0); + while let Some(v) = queue.pop_front() { + for &u in &adj[v] { + if !visited[u] { + visited[u] = true; + queue.push_back(u); + } + } + } + + visited.iter().all(|&v| v) +} + +/// Compute the subtree requirement sum for each edge in the tree rooted at `root`. +/// Returns None if the tree is invalid, otherwise returns the max subtree sum. +fn check_capacity( + graph: &G, + requirements: &[W], + root: usize, + capacity: &W::Sum, + config: &[usize], +) -> bool { + let n = graph.num_vertices(); + let edges = graph.edges(); + + // Build adjacency list with edge indices + let mut adj: Vec> = vec![vec![]; n]; // (neighbor, edge_idx) + for (idx, &sel) in config.iter().enumerate() { + if sel == 1 { + let (u, v) = edges[idx]; + adj[u].push((v, idx)); + adj[v].push((u, idx)); + } + } + + // Root the tree using BFS from root + let mut parent = vec![usize::MAX; n]; + let mut order = Vec::with_capacity(n); + let mut visited = vec![false; n]; + let mut queue = std::collections::VecDeque::new(); + visited[root] = true; + parent[root] = root; + queue.push_back(root); + while let Some(v) = queue.pop_front() { + order.push(v); + for &(u, _) in &adj[v] { + if !visited[u] { + visited[u] = true; + parent[u] = v; + queue.push_back(u); + } + } + } + + // Compute subtree sums bottom-up + let mut subtree_sum: Vec = requirements.iter().map(|r| r.to_sum()).collect(); + for &v in order.iter().rev() { + if v != root { + let p = parent[v]; + let sv = subtree_sum[v].clone(); + subtree_sum[p] += sv; + } + } + + // Check capacity for each non-root vertex (its subtree sum is the flow on its parent edge) + for (v, sum) in subtree_sum.iter().enumerate() { + if v != root && *sum > *capacity { + return false; + } + } + + true +} + +/// Check if a configuration forms a valid capacitated spanning tree. +fn is_valid_capacitated_spanning_tree( + graph: &G, + requirements: &[W], + root: usize, + capacity: &W::Sum, + config: &[usize], +) -> bool { + if !is_spanning_tree(graph, config) { + return false; + } + check_capacity(graph, requirements, root, capacity, config) +} + +impl Problem for MinimumCapacitatedSpanningTree +where + G: Graph + crate::variant::VariantParam, + W: WeightElement + crate::variant::VariantParam, +{ + const NAME: &'static str = "MinimumCapacitatedSpanningTree"; + type Value = Min; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![G, W] + } + + fn dims(&self) -> Vec { + vec![2; self.graph.num_edges()] + } + + fn evaluate(&self, config: &[usize]) -> Min { + if !is_valid_capacitated_spanning_tree( + &self.graph, + &self.requirements, + self.root, + &self.capacity, + config, + ) { + return Min(None); + } + let mut total = W::Sum::zero(); + for (idx, &selected) in config.iter().enumerate() { + if selected == 1 { + if let Some(w) = self.weights.get(idx) { + total += w.to_sum(); + } + } + } + Min(Some(total)) + } +} + +crate::declare_variants! { + default MinimumCapacitatedSpanningTree => "2^num_edges", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + vec![crate::example_db::specs::ModelExampleSpec { + id: "minimum_capacitated_spanning_tree_simplegraph_i32", + instance: Box::new(MinimumCapacitatedSpanningTree::new( + SimpleGraph::new( + 5, + vec![ + (0, 1), + (0, 2), + (0, 3), + (1, 2), + (1, 4), + (2, 3), + (2, 4), + (3, 4), + ], + ), + vec![2, 1, 4, 3, 1, 2, 3, 1], // edge weights + 0, // root + vec![0, 1, 1, 1, 1], // requirements (root=0) + 3, // capacity + )), + // Optimal: edges {(0,1),(0,2),(1,4),(3,4)} = indices {0,1,4,7} + // Weight = 2+1+1+1 = 5 + // Subtree sums: subtree(1)={1,4}->req=2<=3, subtree(2)={2}->req=1<=3, + // subtree(4)={4}->req=1<=3, subtree(3)={3}->req=1<=3 + optimal_config: vec![1, 1, 0, 0, 1, 0, 0, 1], + optimal_value: serde_json::json!(5), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/graph/minimum_capacitated_spanning_tree.rs"] +mod tests; diff --git a/src/models/graph/minimum_edge_cost_flow.rs b/src/models/graph/minimum_edge_cost_flow.rs new file mode 100644 index 00000000..86edf998 --- /dev/null +++ b/src/models/graph/minimum_edge_cost_flow.rs @@ -0,0 +1,296 @@ +//! Minimum Edge-Cost Flow problem implementation. +//! +//! Given a directed graph G = (V, A) with arc capacities c(a) and prices p(a), +//! a source s, a sink t, and a flow requirement R, find an integral flow of +//! value at least R that minimizes the total edge cost — the sum of prices of +//! arcs carrying nonzero flow. +//! +//! This is NP-hard: it generalizes Minimum-Weight Satisfiability via reduction +//! from Minimum Edge-Cost Flow on DAGs (Amaldi et al., 2011). + +use crate::registry::{FieldInfo, ProblemSchemaEntry}; +use crate::topology::DirectedGraph; +use crate::traits::Problem; +use serde::{Deserialize, Serialize}; + +inventory::submit! { + ProblemSchemaEntry { + name: "MinimumEdgeCostFlow", + display_name: "Minimum Edge-Cost Flow", + aliases: &["MECF"], + dimensions: &[], + module_path: module_path!(), + description: "Integral flow minimizing the number of arcs with nonzero flow (weighted by price)", + fields: &[ + FieldInfo { name: "graph", type_name: "DirectedGraph", description: "Directed graph G = (V, A)" }, + FieldInfo { name: "prices", type_name: "Vec", description: "Price p(a) for each arc" }, + FieldInfo { name: "capacities", type_name: "Vec", description: "Capacity c(a) for each arc" }, + FieldInfo { name: "source", type_name: "usize", description: "Source vertex s" }, + FieldInfo { name: "sink", type_name: "usize", description: "Sink vertex t" }, + FieldInfo { name: "required_flow", type_name: "i64", description: "Flow requirement R" }, + ], + } +} + +/// Minimum Edge-Cost Flow problem. +/// +/// Given a directed graph G = (V, A) with arc capacities c(a) and prices p(a), +/// source s, sink t, and flow requirement R, find an integral flow f: A -> Z_0^+ +/// of value at least R minimizing the total edge cost sum_{a: f(a)>0} p(a). +/// +/// # Variables +/// +/// |A| variables: variable a ranges over {0, ..., c(a)} representing the flow +/// on arc a. +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::graph::MinimumEdgeCostFlow; +/// use problemreductions::topology::DirectedGraph; +/// use problemreductions::{Problem, Solver, BruteForce}; +/// +/// // 5-vertex network: s=0, t=4, R=3 +/// let graph = DirectedGraph::new(5, vec![ +/// (0, 1), (0, 2), (0, 3), (1, 4), (2, 4), (3, 4), +/// ]); +/// let problem = MinimumEdgeCostFlow::new( +/// graph, +/// vec![3, 1, 2, 0, 0, 0], // prices +/// vec![2, 2, 2, 2, 2, 2], // capacities +/// 0, 4, 3, +/// ); +/// let solver = BruteForce::new(); +/// let witness = solver.find_witness(&problem).unwrap(); +/// assert_eq!(problem.evaluate(&witness), problemreductions::types::Min(Some(3))); +/// ``` +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct MinimumEdgeCostFlow { + /// The directed graph G = (V, A). + graph: DirectedGraph, + /// Price p(a) for each arc. + prices: Vec, + /// Capacity c(a) for each arc. + capacities: Vec, + /// Source vertex s. + source: usize, + /// Sink vertex t. + sink: usize, + /// Flow requirement R. + required_flow: i64, +} + +impl MinimumEdgeCostFlow { + /// Create a new Minimum Edge-Cost Flow problem. + /// + /// # Arguments + /// + /// * `graph` - Directed graph G = (V, A) + /// * `prices` - Price p(a) for each arc (one per arc) + /// * `capacities` - Capacity c(a) for each arc (one per arc, all non-negative) + /// * `source` - Source vertex index + /// * `sink` - Sink vertex index + /// * `required_flow` - Minimum flow requirement R + /// + /// # Panics + /// + /// Panics if: + /// - `prices.len() != graph.num_arcs()` + /// - `capacities.len() != graph.num_arcs()` + /// - `source >= graph.num_vertices()` + /// - `sink >= graph.num_vertices()` + /// - `source == sink` + /// - Any capacity is negative + pub fn new( + graph: DirectedGraph, + prices: Vec, + capacities: Vec, + source: usize, + sink: usize, + required_flow: i64, + ) -> Self { + let n = graph.num_vertices(); + let m = graph.num_arcs(); + assert_eq!( + prices.len(), + m, + "prices length ({}) must match num_arcs ({m})", + prices.len() + ); + assert_eq!( + capacities.len(), + m, + "capacities length ({}) must match num_arcs ({m})", + capacities.len() + ); + assert!(source < n, "source ({source}) >= num_vertices ({n})"); + assert!(sink < n, "sink ({sink}) >= num_vertices ({n})"); + assert_ne!(source, sink, "source and sink must be distinct"); + for (i, &c) in capacities.iter().enumerate() { + assert!(c >= 0, "capacity[{i}] = {c} is negative"); + } + Self { + graph, + prices, + capacities, + source, + sink, + required_flow, + } + } + + /// Get a reference to the underlying directed graph. + pub fn graph(&self) -> &DirectedGraph { + &self.graph + } + + /// Get a reference to the prices. + pub fn prices(&self) -> &[i64] { + &self.prices + } + + /// Get a reference to the capacities. + pub fn capacities(&self) -> &[i64] { + &self.capacities + } + + /// Get the source vertex. + pub fn source(&self) -> usize { + self.source + } + + /// Get the sink vertex. + pub fn sink(&self) -> usize { + self.sink + } + + /// Get the flow requirement. + pub fn required_flow(&self) -> i64 { + self.required_flow + } + + /// Get the number of vertices. + pub fn num_vertices(&self) -> usize { + self.graph.num_vertices() + } + + /// Get the number of edges (arcs). + pub fn num_edges(&self) -> usize { + self.graph.num_arcs() + } + + /// Get the maximum capacity across all arcs (0 if empty). + pub fn max_capacity(&self) -> i64 { + self.capacities.iter().copied().max().unwrap_or(0) + } + + /// Get a reference to the edges (arcs). + pub fn edges(&self) -> Vec<(usize, usize)> { + self.graph.arcs() + } + + /// Check whether a flow assignment is feasible. + /// + /// A flow is feasible if: + /// 1. Each arc's flow does not exceed its capacity + /// 2. Flow is conserved at every non-terminal vertex + /// 3. Net flow into the sink is at least the required flow + pub fn is_feasible(&self, config: &[usize]) -> bool { + let m = self.graph.num_arcs(); + if config.len() != m { + return false; + } + let arcs = self.graph.arcs(); + + // (1) Capacity constraints + for (flow, cap) in config.iter().zip(self.capacities.iter()) { + if (*flow as i64) > *cap { + return false; + } + } + + // (2) Flow conservation at non-terminal vertices + let n = self.graph.num_vertices(); + let mut balance = vec![0_i64; n]; + for (a, &(u, v)) in arcs.iter().enumerate() { + let flow = config[a] as i64; + balance[u] -= flow; + balance[v] += flow; + } + + for (v, &bal) in balance.iter().enumerate() { + if v != self.source && v != self.sink && bal != 0 { + return false; + } + } + + // (3) Flow requirement: net flow into sink >= R + if balance[self.sink] < self.required_flow { + return false; + } + + true + } + + /// Compute the edge cost for a feasible flow: sum of prices of arcs with + /// nonzero flow. + pub fn edge_cost(&self, config: &[usize]) -> i64 { + config + .iter() + .enumerate() + .filter(|(_, &f)| f > 0) + .map(|(a, _)| self.prices[a]) + .sum() + } +} + +impl Problem for MinimumEdgeCostFlow { + const NAME: &'static str = "MinimumEdgeCostFlow"; + type Value = crate::types::Min; + + fn dims(&self) -> Vec { + self.capacities.iter().map(|&c| (c as usize) + 1).collect() + } + + fn evaluate(&self, config: &[usize]) -> crate::types::Min { + if self.is_feasible(config) { + crate::types::Min(Some(self.edge_cost(config))) + } else { + crate::types::Min(None) + } + } + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![] + } +} + +crate::declare_variants! { + default MinimumEdgeCostFlow => "(max_capacity + 1)^num_edges", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + vec![crate::example_db::specs::ModelExampleSpec { + id: "minimum_edge_cost_flow", + instance: Box::new(MinimumEdgeCostFlow::new( + crate::topology::DirectedGraph::new( + 5, + vec![(0, 1), (0, 2), (0, 3), (1, 4), (2, 4), (3, 4)], + ), + vec![3, 1, 2, 0, 0, 0], // prices + vec![2, 2, 2, 2, 2, 2], // capacities + 0, + 4, + 3, + )), + // Optimal: route 1 unit via v2 and 2 units via v3 → cost = 1 + 2 = 3 + // config = [0, 1, 2, 0, 1, 2] + optimal_config: vec![0, 1, 2, 0, 1, 2], + optimal_value: serde_json::json!(3), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/graph/minimum_edge_cost_flow.rs"] +mod tests; diff --git a/src/models/graph/minimum_graph_bandwidth.rs b/src/models/graph/minimum_graph_bandwidth.rs new file mode 100644 index 00000000..aac0cbce --- /dev/null +++ b/src/models/graph/minimum_graph_bandwidth.rs @@ -0,0 +1,172 @@ +//! Minimum Graph Bandwidth problem implementation. +//! +//! The Minimum Graph Bandwidth problem asks for a bijection +//! f: V -> {0, 1, ..., |V|-1} that minimizes the maximum edge stretch +//! max_{(u,v) in E} |f(u) - f(v)|. + +use crate::registry::{FieldInfo, ProblemSchemaEntry, VariantDimension}; +use crate::topology::{Graph, SimpleGraph}; +use crate::traits::Problem; +use crate::types::Min; +use serde::{Deserialize, Serialize}; + +inventory::submit! { + ProblemSchemaEntry { + name: "MinimumGraphBandwidth", + display_name: "Minimum Graph Bandwidth", + aliases: &["MGB"], + dimensions: &[ + VariantDimension::new("graph", "SimpleGraph", &["SimpleGraph"]), + ], + module_path: module_path!(), + description: "Find a vertex ordering minimizing the maximum edge stretch", + fields: &[ + FieldInfo { name: "graph", type_name: "G", description: "The undirected graph G=(V,E)" }, + ], + } +} + +/// The Minimum Graph Bandwidth problem. +/// +/// Given an undirected graph G = (V, E), find a bijection f: V -> {0, 1, ..., |V|-1} +/// that minimizes the bandwidth max_{(u,v) in E} |f(u) - f(v)|. +/// +/// # Representation +/// +/// Each vertex is assigned a variable representing its position in the arrangement. +/// Variable i takes a value in {0, 1, ..., n-1}, and a valid configuration must be +/// a permutation (all positions are distinct). The objective is to minimize the +/// maximum edge stretch. +/// +/// # Type Parameters +/// +/// * `G` - The graph type (e.g., `SimpleGraph`) +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::graph::MinimumGraphBandwidth; +/// use problemreductions::topology::SimpleGraph; +/// use problemreductions::{Problem, Solver, BruteForce}; +/// +/// // Star graph S4: center 0 connected to 1, 2, 3 +/// let graph = SimpleGraph::new(4, vec![(0, 1), (0, 2), (0, 3)]); +/// let problem = MinimumGraphBandwidth::new(graph); +/// +/// let solver = BruteForce::new(); +/// let solution = solver.find_witness(&problem); +/// assert!(solution.is_some()); +/// ``` +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(bound(deserialize = "G: serde::Deserialize<'de>"))] +pub struct MinimumGraphBandwidth { + /// The underlying graph. + graph: G, +} + +impl MinimumGraphBandwidth { + /// Create a new Minimum Graph Bandwidth problem. + /// + /// # Arguments + /// * `graph` - The undirected graph G = (V, E) + pub fn new(graph: G) -> Self { + Self { graph } + } + + /// Get a reference to the underlying graph. + pub fn graph(&self) -> &G { + &self.graph + } + + /// Get the number of vertices in the underlying graph. + pub fn num_vertices(&self) -> usize { + self.graph.num_vertices() + } + + /// Get the number of edges in the underlying graph. + pub fn num_edges(&self) -> usize { + self.graph.num_edges() + } + + /// Check if a configuration forms a valid permutation of {0, ..., n-1}. + fn is_valid_permutation(&self, config: &[usize]) -> bool { + let n = self.graph.num_vertices(); + if config.len() != n { + return false; + } + let mut seen = vec![false; n]; + for &pos in config { + if pos >= n || seen[pos] { + return false; + } + seen[pos] = true; + } + true + } + + /// Compute the bandwidth (maximum edge stretch) for a given arrangement. + /// + /// Returns `None` if the configuration is not a valid permutation. + pub fn bandwidth(&self, config: &[usize]) -> Option { + if !self.is_valid_permutation(config) { + return None; + } + let mut max_stretch = 0usize; + for (u, v) in self.graph.edges() { + let stretch = config[u].abs_diff(config[v]); + max_stretch = max_stretch.max(stretch); + } + Some(max_stretch) + } +} + +impl Problem for MinimumGraphBandwidth +where + G: Graph + crate::variant::VariantParam, +{ + const NAME: &'static str = "MinimumGraphBandwidth"; + type Value = Min; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![G] + } + + fn dims(&self) -> Vec { + let n = self.graph.num_vertices(); + vec![n; n] + } + + fn evaluate(&self, config: &[usize]) -> Min { + match self.bandwidth(config) { + Some(bw) => Min(Some(bw)), + None => Min(None), + } + } +} + +crate::declare_variants! { + default MinimumGraphBandwidth => "factorial(num_vertices)", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + use crate::topology::SimpleGraph; + // Star graph S4: center 0 connected to 1, 2, 3 + // Config [1,0,2,3]: f(0)=1, f(1)=0, f(2)=2, f(3)=3 + // Bandwidth = max(|1-0|, |1-2|, |1-3|) = max(1, 1, 2) = 2 + // Optimal bandwidth for S4 is 2 (center must be adjacent to all leaves, + // placing center at position 1 achieves max stretch 2). + vec![crate::example_db::specs::ModelExampleSpec { + id: "minimum_graph_bandwidth", + instance: Box::new(MinimumGraphBandwidth::new(SimpleGraph::new( + 4, + vec![(0, 1), (0, 2), (0, 3)], + ))), + optimal_config: vec![1, 0, 2, 3], + optimal_value: serde_json::json!(2), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/graph/minimum_graph_bandwidth.rs"] +mod tests; diff --git a/src/models/graph/minimum_metric_dimension.rs b/src/models/graph/minimum_metric_dimension.rs new file mode 100644 index 00000000..21299860 --- /dev/null +++ b/src/models/graph/minimum_metric_dimension.rs @@ -0,0 +1,194 @@ +//! Minimum Metric Dimension problem implementation. +//! +//! Given a graph G = (V, E), find a minimum resolving set — a smallest subset +//! V' ⊆ V such that for all distinct u, v ∈ V, there exists w ∈ V' with +//! d(u, w) ≠ d(v, w), where d denotes shortest-path distance. + +use crate::registry::{FieldInfo, ProblemSchemaEntry, VariantDimension}; +use crate::topology::{Graph, SimpleGraph}; +use crate::traits::Problem; +use crate::types::Min; +use serde::{Deserialize, Serialize}; +use std::collections::VecDeque; + +inventory::submit! { + ProblemSchemaEntry { + name: "MinimumMetricDimension", + display_name: "Minimum Metric Dimension", + aliases: &[], + dimensions: &[ + VariantDimension::new("graph", "SimpleGraph", &["SimpleGraph"]), + ], + module_path: module_path!(), + description: "Find minimum resolving set of a graph", + fields: &[ + FieldInfo { name: "graph", type_name: "G", description: "The underlying graph G=(V,E)" }, + ], + } +} + +/// Compute BFS shortest-path distances from a single source vertex. +/// +/// Returns a vector where `dist[v]` is the shortest-path distance from +/// `source` to `v`, or `usize::MAX` if `v` is unreachable. +pub fn bfs_distances(graph: &G, source: usize) -> Vec { + let n = graph.num_vertices(); + let mut dist = vec![usize::MAX; n]; + dist[source] = 0; + let mut queue = VecDeque::new(); + queue.push_back(source); + while let Some(u) = queue.pop_front() { + for v in graph.neighbors(u) { + if dist[v] == usize::MAX { + dist[v] = dist[u] + 1; + queue.push_back(v); + } + } + } + dist +} + +/// The Minimum Metric Dimension problem. +/// +/// Given a graph G = (V, E), find a minimum-size resolving set V' ⊆ V such +/// that for every pair of distinct vertices u, v ∈ V, there exists at least +/// one vertex w ∈ V' with d(u, w) ≠ d(v, w). +/// +/// # Type Parameters +/// +/// * `G` - The graph type (e.g., `SimpleGraph`) +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::graph::MinimumMetricDimension; +/// use problemreductions::topology::SimpleGraph; +/// use problemreductions::{Problem, Solver, BruteForce}; +/// +/// // House graph: vertices 0–4 +/// let graph = SimpleGraph::new(5, vec![(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (3, 4)]); +/// let problem = MinimumMetricDimension::new(graph); +/// +/// let solver = BruteForce::new(); +/// let solution = solver.find_witness(&problem).unwrap(); +/// let value = problem.evaluate(&solution); +/// assert!(value.is_valid()); +/// ``` +#[derive(Debug, Clone, Serialize)] +pub struct MinimumMetricDimension { + /// The underlying graph. + graph: G, + /// Precomputed all-pairs shortest-path distances. + #[serde(skip)] + dist_matrix: Vec>, +} + +impl<'de, G: Graph + Deserialize<'de>> Deserialize<'de> for MinimumMetricDimension { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + #[derive(Deserialize)] + struct Helper { + graph: G, + } + let helper = Helper::::deserialize(deserializer)?; + Ok(Self::new(helper.graph)) + } +} + +impl MinimumMetricDimension { + /// Create a MinimumMetricDimension problem from a graph. + pub fn new(graph: G) -> Self { + let n = graph.num_vertices(); + let dist_matrix = (0..n).map(|v| bfs_distances(&graph, v)).collect(); + Self { graph, dist_matrix } + } + + /// Get a reference to the underlying graph. + pub fn graph(&self) -> &G { + &self.graph + } + + /// Get the number of vertices in the underlying graph. + pub fn num_vertices(&self) -> usize { + self.graph.num_vertices() + } + + /// Get the number of edges in the underlying graph. + pub fn num_edges(&self) -> usize { + self.graph.num_edges() + } + + /// Check whether a configuration (binary vertex selection) forms a resolving set. + /// + /// A set S ⊆ V is resolving if for every pair of distinct vertices u, v ∈ V, + /// there exists some w ∈ S such that d(u, w) ≠ d(v, w). + pub fn is_resolving(&self, config: &[usize]) -> bool { + let n = self.graph.num_vertices(); + let selected: Vec = (0..n).filter(|&i| config[i] == 1).collect(); + if selected.is_empty() { + return false; + } + + // Check that all pairs of distinct vertices have different distance vectors + // using precomputed all-pairs distances + for u in 0..n { + for v in (u + 1)..n { + let all_same = selected + .iter() + .all(|&w| self.dist_matrix[w][u] == self.dist_matrix[w][v]); + if all_same { + return false; + } + } + } + + true + } +} + +impl Problem for MinimumMetricDimension +where + G: Graph + crate::variant::VariantParam, +{ + const NAME: &'static str = "MinimumMetricDimension"; + type Value = Min; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![G] + } + + fn dims(&self) -> Vec { + vec![2; self.graph.num_vertices()] + } + + fn evaluate(&self, config: &[usize]) -> Min { + if !self.is_resolving(config) { + return Min(None); + } + let count = config.iter().filter(|&&x| x == 1).count(); + Min(Some(count)) + } +} + +crate::declare_variants! { + default MinimumMetricDimension => "2^num_vertices", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + vec![crate::example_db::specs::ModelExampleSpec { + id: "minimum_metric_dimension_simplegraph", + instance: Box::new(MinimumMetricDimension::new(SimpleGraph::new( + 5, + vec![(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (3, 4)], + ))), + optimal_config: vec![1, 1, 0, 0, 0], + optimal_value: serde_json::json!(2), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/graph/minimum_metric_dimension.rs"] +mod tests; diff --git a/src/models/graph/minimum_vertex_cover.rs b/src/models/graph/minimum_vertex_cover.rs index db484b40..d10a9eac 100644 --- a/src/models/graph/minimum_vertex_cover.rs +++ b/src/models/graph/minimum_vertex_cover.rs @@ -139,7 +139,7 @@ where } /// Check if a configuration forms a valid vertex cover. -fn is_vertex_cover_config(graph: &G, config: &[usize]) -> bool { +pub(crate) fn is_vertex_cover_config(graph: &G, config: &[usize]) -> bool { for (u, v) in graph.edges() { let u_covered = config.get(u).copied().unwrap_or(0) == 1; let v_covered = config.get(v).copied().unwrap_or(0) == 1; diff --git a/src/models/graph/mod.rs b/src/models/graph/mod.rs index dcc6b2a8..4bb2e365 100644 --- a/src/models/graph/mod.rs +++ b/src/models/graph/mod.rs @@ -6,16 +6,21 @@ //! - [`DegreeConstrainedSpanningTree`]: Spanning tree with maximum vertex degree at most K //! - [`DirectedHamiltonianPath`]: Directed Hamiltonian path (decision problem) //! - [`MaximumIndependentSet`]: Maximum weight independent set +//! - [`MaximumLeafSpanningTree`]: Spanning tree maximizing number of leaves //! - [`MaximalIS`]: Maximal independent set //! - [`MinimumVertexCover`]: Minimum weight vertex cover //! - [`MinimumCoveringByCliques`]: Minimum number of cliques covering all edges //! - [`MonochromaticTriangle`]: 2-color edges so that no triangle is monochromatic //! - [`MinimumIntersectionGraphBasis`]: Minimum universe size for intersection graph representation +//! - [`MinimumCapacitatedSpanningTree`]: Minimum weight spanning tree with subtree capacity constraints //! - [`MinimumDominatingSet`]: Minimum dominating set +//! - [`MinimumMetricDimension`]: Minimum resolving set (metric dimension) +//! - [`MinimumEdgeCostFlow`]: Minimum edge-cost integral flow //! - [`MinimumGeometricConnectedDominatingSet`]: Minimum connected dominating set in a geometric point set //! - [`MinimumFeedbackVertexSet`]: Minimum weight feedback vertex set in a directed graph //! - [`MaximumClique`]: Maximum weight clique //! - [`MaximumAchromaticNumber`]: Maximum number of colors in a complete proper coloring +//! - [`MaximumDomaticNumber`]: Maximum partition into disjoint dominating sets //! - [`MaxCut`]: Maximum cut on weighted graphs //! - [`MinimumCutIntoBoundedSets`]: Minimum cut into bounded sets (Garey & Johnson ND17) //! - [`MinimumDummyActivitiesPert`]: Minimum dummy activities in activity-on-arc PERT networks @@ -65,8 +70,10 @@ //! - [`IntegralFlowWithMultipliers`]: Integral flow with vertex multipliers on a directed graph //! - [`UndirectedFlowLowerBounds`]: Feasible s-t flow in an undirected graph with lower/upper bounds //! - [`UndirectedTwoCommodityIntegralFlow`]: Two-commodity integral flow on undirected graphs +//! - [`VertexCover`]: Decision version of Minimum Vertex Cover (Karp's 21) //! - [`StrongConnectivityAugmentation`]: Strong connectivity augmentation with weighted candidate arcs //! - [`DisjointConnectingPaths`]: Vertex-disjoint paths connecting prescribed terminal pairs +//! - [`MinimumGraphBandwidth`]: Minimum graph bandwidth (minimize maximum edge stretch) pub(crate) mod acyclic_partition; pub(crate) mod balanced_complete_bipartite_subgraph; @@ -98,18 +105,24 @@ pub(crate) mod max_cut; pub(crate) mod maximal_is; pub(crate) mod maximum_achromatic_number; pub(crate) mod maximum_clique; +pub(crate) mod maximum_domatic_number; pub(crate) mod maximum_independent_set; +pub(crate) mod maximum_leaf_spanning_tree; pub(crate) mod maximum_matching; pub(crate) mod min_max_multicenter; +pub(crate) mod minimum_capacitated_spanning_tree; pub(crate) mod minimum_covering_by_cliques; pub(crate) mod minimum_cut_into_bounded_sets; pub(crate) mod minimum_dominating_set; pub(crate) mod minimum_dummy_activities_pert; +pub(crate) mod minimum_edge_cost_flow; pub(crate) mod minimum_feedback_arc_set; pub(crate) mod minimum_feedback_vertex_set; pub(crate) mod minimum_geometric_connected_dominating_set; +pub(crate) mod minimum_graph_bandwidth; pub(crate) mod minimum_intersection_graph_basis; pub(crate) mod minimum_maximal_matching; +pub(crate) mod minimum_metric_dimension; pub(crate) mod minimum_multiway_cut; pub(crate) mod minimum_sum_multicenter; pub(crate) mod minimum_vertex_cover; @@ -136,6 +149,7 @@ pub(crate) mod subgraph_isomorphism; pub(crate) mod traveling_salesman; pub(crate) mod undirected_flow_lower_bounds; pub(crate) mod undirected_two_commodity_integral_flow; +pub(crate) mod vertex_cover; pub use acyclic_partition::AcyclicPartition; pub use balanced_complete_bipartite_subgraph::BalancedCompleteBipartiteSubgraph; @@ -167,18 +181,24 @@ pub use max_cut::MaxCut; pub use maximal_is::MaximalIS; pub use maximum_achromatic_number::MaximumAchromaticNumber; pub use maximum_clique::MaximumClique; +pub use maximum_domatic_number::MaximumDomaticNumber; pub use maximum_independent_set::MaximumIndependentSet; +pub use maximum_leaf_spanning_tree::MaximumLeafSpanningTree; pub use maximum_matching::MaximumMatching; pub use min_max_multicenter::MinMaxMulticenter; +pub use minimum_capacitated_spanning_tree::MinimumCapacitatedSpanningTree; pub use minimum_covering_by_cliques::MinimumCoveringByCliques; pub use minimum_cut_into_bounded_sets::MinimumCutIntoBoundedSets; pub use minimum_dominating_set::MinimumDominatingSet; pub use minimum_dummy_activities_pert::MinimumDummyActivitiesPert; +pub use minimum_edge_cost_flow::MinimumEdgeCostFlow; pub use minimum_feedback_arc_set::MinimumFeedbackArcSet; pub use minimum_feedback_vertex_set::MinimumFeedbackVertexSet; pub use minimum_geometric_connected_dominating_set::MinimumGeometricConnectedDominatingSet; +pub use minimum_graph_bandwidth::MinimumGraphBandwidth; pub use minimum_intersection_graph_basis::MinimumIntersectionGraphBasis; pub use minimum_maximal_matching::MinimumMaximalMatching; +pub use minimum_metric_dimension::MinimumMetricDimension; pub use minimum_multiway_cut::MinimumMultiwayCut; pub use minimum_sum_multicenter::MinimumSumMulticenter; pub use minimum_vertex_cover::MinimumVertexCover; @@ -205,6 +225,7 @@ pub use subgraph_isomorphism::SubgraphIsomorphism; pub use traveling_salesman::TravelingSalesman; pub use undirected_flow_lower_bounds::UndirectedFlowLowerBounds; pub use undirected_two_commodity_integral_flow::UndirectedTwoCommodityIntegralFlow; +pub use vertex_cover::VertexCover; #[cfg(feature = "example-db")] pub(crate) fn canonical_model_example_specs() -> Vec { @@ -214,6 +235,7 @@ pub(crate) fn canonical_model_example_specs() -> Vec Vec Vec { + /// The underlying graph. + graph: G, + /// Maximum cover size threshold. + k: usize, +} + +impl VertexCover { + /// Create a new VertexCover problem. + pub fn new(graph: G, k: usize) -> Self { + assert!(k > 0, "k must be positive"); + assert!(k <= graph.num_vertices(), "k must be at most num_vertices"); + Self { graph, k } + } + + /// Get a reference to the graph. + pub fn graph(&self) -> &G { + &self.graph + } + + /// Get the cover size threshold. + pub fn k(&self) -> usize { + self.k + } + + /// Get the number of vertices. + pub fn num_vertices(&self) -> usize { + self.graph.num_vertices() + } + + /// Get the number of edges. + pub fn num_edges(&self) -> usize { + self.graph.num_edges() + } + + /// Check if a configuration is a valid vertex cover of size ≤ k. + pub fn is_valid_solution(&self, config: &[usize]) -> bool { + if config.len() != self.graph.num_vertices() { + return false; + } + let count: usize = config.iter().filter(|&&v| v == 1).count(); + if count > self.k { + return false; + } + is_vertex_cover_config(&self.graph, config) + } +} + +impl Problem for VertexCover +where + G: Graph + crate::variant::VariantParam, +{ + const NAME: &'static str = "VertexCover"; + type Value = crate::types::Or; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![G] + } + + fn dims(&self) -> Vec { + vec![2; self.graph.num_vertices()] + } + + fn evaluate(&self, config: &[usize]) -> crate::types::Or { + crate::types::Or(self.is_valid_solution(config)) + } +} + +crate::declare_variants! { + default VertexCover => "1.1996^num_vertices", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + vec![crate::example_db::specs::ModelExampleSpec { + id: "vertex_cover_simplegraph", + instance: Box::new(VertexCover::new( + SimpleGraph::new(4, vec![(0, 1), (1, 2), (0, 2), (2, 3)]), + 2, + )), + optimal_config: vec![1, 0, 1, 0], + optimal_value: serde_json::json!(true), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/graph/vertex_cover.rs"] +mod tests; diff --git a/src/models/misc/clustering.rs b/src/models/misc/clustering.rs new file mode 100644 index 00000000..3bb34008 --- /dev/null +++ b/src/models/misc/clustering.rs @@ -0,0 +1,205 @@ +//! Clustering problem implementation. +//! +//! Given a distance matrix over n elements, a cluster count bound K, +//! and a diameter bound B, determine whether the elements can be partitioned +//! into at most K non-empty clusters such that all intra-cluster pairwise +//! distances are at most B. + +use crate::registry::{FieldInfo, ProblemSchemaEntry}; +use crate::traits::Problem; +use serde::{Deserialize, Serialize}; + +inventory::submit! { + ProblemSchemaEntry { + name: "Clustering", + display_name: "Clustering", + aliases: &[], + dimensions: &[], + module_path: module_path!(), + description: "Partition elements into at most K clusters where all intra-cluster distances are at most B", + fields: &[ + FieldInfo { name: "distances", type_name: "Vec>", description: "Symmetric distance matrix with zero diagonal" }, + FieldInfo { name: "num_clusters", type_name: "usize", description: "Maximum number of clusters K" }, + FieldInfo { name: "diameter_bound", type_name: "u64", description: "Maximum allowed intra-cluster pairwise distance B" }, + ], + } +} + +/// The Clustering problem. +/// +/// Given a set of `n` elements with pairwise distances, a cluster count +/// bound `K`, and a diameter bound `B`, determine whether there exists +/// a partition of the elements into at most `K` non-empty clusters such +/// that for every cluster, all pairwise distances within that cluster +/// are at most `B`. +/// +/// # Representation +/// +/// Each element `i` is assigned a cluster index `config[i] ∈ {0, ..., K-1}`. +/// The problem is satisfiable iff every non-empty cluster has all pairwise +/// distances ≤ B. +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::misc::Clustering; +/// use problemreductions::{Problem, Solver, BruteForce}; +/// +/// // 4 elements, 2 clusters, diameter bound 1 +/// let distances = vec![ +/// vec![0, 1, 3, 3], +/// vec![1, 0, 3, 3], +/// vec![3, 3, 0, 1], +/// vec![3, 3, 1, 0], +/// ]; +/// let problem = Clustering::new(distances, 2, 1); +/// let solver = BruteForce::new(); +/// let solution = solver.find_witness(&problem); +/// assert!(solution.is_some()); +/// ``` +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Clustering { + /// Symmetric distance matrix with zero diagonal. + distances: Vec>, + /// Maximum number of clusters K. + num_clusters: usize, + /// Maximum allowed intra-cluster pairwise distance B. + diameter_bound: u64, +} + +impl Clustering { + /// Create a new Clustering instance. + /// + /// # Panics + /// + /// Panics if: + /// - `distances` is empty + /// - `distances` is not square + /// - `distances` is not symmetric + /// - diagonal entries are not zero + /// - `num_clusters` is zero + pub fn new(distances: Vec>, num_clusters: usize, diameter_bound: u64) -> Self { + let n = distances.len(); + assert!(n > 0, "Clustering requires at least one element"); + assert!(num_clusters > 0, "num_clusters must be at least 1"); + for (i, row) in distances.iter().enumerate() { + assert_eq!( + row.len(), + n, + "Distance matrix must be square: row {i} has {} columns, expected {n}", + row.len() + ); + assert_eq!( + distances[i][i], 0, + "Diagonal entry distances[{i}][{i}] must be 0" + ); + } + for (i, row_i) in distances.iter().enumerate() { + for j in (i + 1)..n { + assert_eq!( + row_i[j], distances[j][i], + "Distance matrix must be symmetric: distances[{i}][{j}] = {} != distances[{j}][{i}] = {}", + row_i[j], distances[j][i] + ); + } + } + Self { + distances, + num_clusters, + diameter_bound, + } + } + + /// Returns the distance matrix. + pub fn distances(&self) -> &[Vec] { + &self.distances + } + + /// Returns the number of elements. + pub fn num_elements(&self) -> usize { + self.distances.len() + } + + /// Returns the maximum number of clusters K. + pub fn num_clusters(&self) -> usize { + self.num_clusters + } + + /// Returns the diameter bound B. + pub fn diameter_bound(&self) -> u64 { + self.diameter_bound + } + + /// Check if a configuration is a valid clustering. + fn is_valid_partition(&self, config: &[usize]) -> bool { + let n = self.num_elements(); + if config.len() != n { + return false; + } + if config.iter().any(|&c| c >= self.num_clusters) { + return false; + } + // Group elements by cluster in a single pass + let mut clusters: Vec> = vec![vec![]; self.num_clusters]; + for (i, &c) in config.iter().enumerate() { + clusters[c].push(i); + } + // Check all intra-cluster pairwise distances ≤ B + for members in &clusters { + for a in 0..members.len() { + for b in (a + 1)..members.len() { + if self.distances[members[a]][members[b]] > self.diameter_bound { + return false; + } + } + } + } + true + } +} + +impl Problem for Clustering { + const NAME: &'static str = "Clustering"; + type Value = crate::types::Or; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![] + } + + fn dims(&self) -> Vec { + vec![self.num_clusters; self.num_elements()] + } + + fn evaluate(&self, config: &[usize]) -> crate::types::Or { + crate::types::Or(self.is_valid_partition(config)) + } +} + +crate::declare_variants! { + default Clustering => "num_clusters^num_elements", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + // 6 elements in two tight groups {0,1,2} and {3,4,5} + // Intra-group distance = 1, inter-group distance = 3 + // K=2, B=1 + let distances = vec![ + vec![0, 1, 1, 3, 3, 3], + vec![1, 0, 1, 3, 3, 3], + vec![1, 1, 0, 3, 3, 3], + vec![3, 3, 3, 0, 1, 1], + vec![3, 3, 3, 1, 0, 1], + vec![3, 3, 3, 1, 1, 0], + ]; + vec![crate::example_db::specs::ModelExampleSpec { + id: "clustering", + instance: Box::new(Clustering::new(distances, 2, 1)), + optimal_config: vec![0, 0, 0, 1, 1, 1], + optimal_value: serde_json::json!(true), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/misc/clustering.rs"] +mod tests; diff --git a/src/models/misc/maximum_likelihood_ranking.rs b/src/models/misc/maximum_likelihood_ranking.rs new file mode 100644 index 00000000..a49aee98 --- /dev/null +++ b/src/models/misc/maximum_likelihood_ranking.rs @@ -0,0 +1,166 @@ +//! Maximum Likelihood Ranking problem implementation. +//! +//! Given an n x n comparison matrix A where a_ij + a_ji = c and a_ii = 0, +//! find a permutation pi minimizing the total disagreement cost: +//! sum over all position pairs (i > j) of a_{pi(i), pi(j)}. + +use crate::registry::{FieldInfo, ProblemSchemaEntry}; +use crate::traits::Problem; +use crate::types::Min; +use serde::{Deserialize, Serialize}; + +inventory::submit! { + ProblemSchemaEntry { + name: "MaximumLikelihoodRanking", + display_name: "Maximum Likelihood Ranking", + aliases: &[], + dimensions: &[], + module_path: module_path!(), + description: "Find a ranking minimizing total pairwise disagreement cost", + fields: &[ + FieldInfo { name: "matrix", type_name: "Vec>", description: "Comparison matrix A (a_ij + a_ji = c, a_ii = 0)" }, + ], + } +} + +/// The Maximum Likelihood Ranking problem. +/// +/// Given an n x n comparison matrix A where a_ij + a_ji = c (constant) +/// and a_ii = 0, find a permutation pi that minimizes the total +/// disagreement cost: sum_{i > j} a_{pi(i), pi(j)}. +/// +/// Each item is assigned a rank position (0-indexed). The configuration +/// maps item -> rank: `config[item] = rank`. The permutation pi maps +/// rank -> item (the inverse of config). +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::misc::MaximumLikelihoodRanking; +/// use problemreductions::{Problem, Solver, BruteForce}; +/// +/// let matrix = vec![ +/// vec![0, 4, 3, 5], +/// vec![1, 0, 4, 3], +/// vec![2, 1, 0, 4], +/// vec![0, 2, 1, 0], +/// ]; +/// let problem = MaximumLikelihoodRanking::new(matrix); +/// let solver = BruteForce::new(); +/// let solution = solver.find_witness(&problem); +/// assert!(solution.is_some()); +/// ``` +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct MaximumLikelihoodRanking { + matrix: Vec>, +} + +impl MaximumLikelihoodRanking { + /// Create a new MaximumLikelihoodRanking instance. + /// + /// # Panics + /// Panics if the matrix is not square, or if any diagonal element is nonzero. + pub fn new(matrix: Vec>) -> Self { + let n = matrix.len(); + for (i, row) in matrix.iter().enumerate() { + assert_eq!( + row.len(), + n, + "matrix must be square: row {i} has length {} but expected {n}", + row.len() + ); + assert_eq!( + row[i], 0, + "diagonal entries must be zero: matrix[{i}][{i}] = {}", + row[i] + ); + } + Self { matrix } + } + + /// Returns the comparison matrix. + pub fn matrix(&self) -> &Vec> { + &self.matrix + } + + /// Returns the number of items to rank. + pub fn num_items(&self) -> usize { + self.matrix.len() + } +} + +impl Problem for MaximumLikelihoodRanking { + const NAME: &'static str = "MaximumLikelihoodRanking"; + type Value = Min; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![] + } + + fn dims(&self) -> Vec { + let n = self.num_items(); + vec![n; n] + } + + fn evaluate(&self, config: &[usize]) -> Min { + let n = self.num_items(); + + // Validate config length + if config.len() != n { + return Min(None); + } + + // Validate permutation: all values must be distinct and in 0..n + let mut seen = vec![false; n]; + for &rank in config { + if rank >= n || seen[rank] { + return Min(None); + } + seen[rank] = true; + } + + // config[item] = rank position of item + // Disagreement cost: for all pairs of items (a, b) where a is + // ranked AFTER b (config[a] > config[b]), add matrix[a][b]. + let mut cost: i64 = 0; + for a in 0..n { + for b in 0..n { + if a != b && config[a] > config[b] { + cost += self.matrix[a][b] as i64; + } + } + } + + Min(Some(cost)) + } +} + +crate::declare_variants! { + default MaximumLikelihoodRanking => "num_items * num_items * 2^num_items", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + // 4 items with comparison matrix. + // Optimal ranking: [0, 1, 2, 3] (identity) gives cost 7. + // Let's verify: items ranked in order 0,1,2,3. + // Disagreement = sum over (a,b) where config[a] > config[b] of matrix[a][b] + // = matrix[1][0] + matrix[2][0] + matrix[2][1] + matrix[3][0] + matrix[3][1] + matrix[3][2] + // = 1 + 2 + 1 + 0 + 2 + 1 = 7 + let matrix = vec![ + vec![0, 4, 3, 5], + vec![1, 0, 4, 3], + vec![2, 1, 0, 4], + vec![0, 2, 1, 0], + ]; + vec![crate::example_db::specs::ModelExampleSpec { + id: "maximum_likelihood_ranking", + instance: Box::new(MaximumLikelihoodRanking::new(matrix)), + optimal_config: vec![0, 1, 2, 3], + optimal_value: serde_json::json!(7), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/misc/maximum_likelihood_ranking.rs"] +mod tests; diff --git a/src/models/misc/minimum_axiom_set.rs b/src/models/misc/minimum_axiom_set.rs new file mode 100644 index 00000000..705e155c --- /dev/null +++ b/src/models/misc/minimum_axiom_set.rs @@ -0,0 +1,239 @@ +//! Minimum Axiom Set problem implementation. +//! +//! Given a finite set of sentences S, a subset T ⊆ S of true sentences, and a set +//! of implications (where each implication has a set of antecedent sentences and a +//! single consequent sentence), find a smallest subset S₀ ⊆ T such that the +//! deductive closure of S₀ under the implications equals T. + +use crate::registry::{FieldInfo, ProblemSchemaEntry}; +use crate::traits::Problem; +use crate::types::Min; +use serde::{Deserialize, Serialize}; + +inventory::submit! { + ProblemSchemaEntry { + name: "MinimumAxiomSet", + display_name: "Minimum Axiom Set", + aliases: &[], + dimensions: &[], + module_path: module_path!(), + description: "Find smallest axiom subset whose deductive closure equals the true sentences", + fields: &[ + FieldInfo { name: "num_sentences", type_name: "usize", description: "Total number of sentences |S|" }, + FieldInfo { name: "true_sentences", type_name: "Vec", description: "Indices of true sentences T ⊆ S" }, + FieldInfo { name: "implications", type_name: "Vec<(Vec, usize)>", description: "Implication rules (antecedents, consequent)" }, + ], + } +} + +/// The Minimum Axiom Set problem. +/// +/// Given a set of sentences `S = {0, ..., num_sentences - 1}`, a subset +/// `T ⊆ S` of true sentences, and a list of implications where each +/// implication `(A, c)` means "if all sentences in A hold, then c holds", +/// find a smallest subset `S₀ ⊆ T` whose deductive closure under the +/// implications equals `T`. +/// +/// # Representation +/// +/// Each true sentence has a binary variable: `config[i] = 1` if +/// `true_sentences[i]` is selected as an axiom, `0` otherwise. +/// The configuration space is `vec![2; |T|]`. +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::misc::MinimumAxiomSet; +/// use problemreductions::{Problem, Solver, BruteForce}; +/// +/// // 8 sentences, all true, with implications forming a cycle +/// let problem = MinimumAxiomSet::new( +/// 8, +/// vec![0, 1, 2, 3, 4, 5, 6, 7], +/// vec![ +/// (vec![0], 2), (vec![0], 3), +/// (vec![1], 4), (vec![1], 5), +/// (vec![2, 4], 6), (vec![3, 5], 7), +/// (vec![6, 7], 0), (vec![6, 7], 1), +/// ], +/// ); +/// let solver = BruteForce::new(); +/// let solution = solver.find_witness(&problem); +/// assert!(solution.is_some()); +/// ``` +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct MinimumAxiomSet { + /// Total number of sentences |S|. + num_sentences: usize, + /// Indices of true sentences T ⊆ S. + true_sentences: Vec, + /// Implication rules: each (antecedents, consequent). + implications: Vec<(Vec, usize)>, +} + +impl MinimumAxiomSet { + /// Create a new Minimum Axiom Set instance. + /// + /// # Panics + /// + /// Panics if any true sentence index is out of range, + /// if true sentences contain duplicates, + /// or if any implication references a sentence outside S. + pub fn new( + num_sentences: usize, + true_sentences: Vec, + implications: Vec<(Vec, usize)>, + ) -> Self { + // Validate true sentences + for &s in &true_sentences { + assert!( + s < num_sentences, + "True sentence index {s} out of range [0, {num_sentences})" + ); + } + // Check no duplicates + let mut seen = vec![false; num_sentences]; + for &s in &true_sentences { + assert!(!seen[s], "Duplicate true sentence index {s}"); + seen[s] = true; + } + // Validate implications + for (antecedents, consequent) in &implications { + for &a in antecedents { + assert!( + a < num_sentences, + "Implication antecedent {a} out of range [0, {num_sentences})" + ); + } + assert!( + *consequent < num_sentences, + "Implication consequent {consequent} out of range [0, {num_sentences})" + ); + } + Self { + num_sentences, + true_sentences, + implications, + } + } + + /// Returns the total number of sentences |S|. + pub fn num_sentences(&self) -> usize { + self.num_sentences + } + + /// Returns the number of true sentences |T|. + pub fn num_true_sentences(&self) -> usize { + self.true_sentences.len() + } + + /// Returns the number of implications. + pub fn num_implications(&self) -> usize { + self.implications.len() + } + + /// Returns the true sentence indices. + pub fn true_sentences(&self) -> &[usize] { + &self.true_sentences + } + + /// Returns the implications. + pub fn implications(&self) -> &[(Vec, usize)] { + &self.implications + } +} + +/// Compute the deductive closure of a set of sentences under the given implications. +/// +/// Starting from `current`, repeatedly applies implications until a fixpoint. +fn deductive_closure(current: &mut [bool], implications: &[(Vec, usize)]) { + loop { + let mut changed = false; + for (antecedents, consequent) in implications { + if !current[*consequent] && antecedents.iter().all(|&a| current[a]) { + current[*consequent] = true; + changed = true; + } + } + if !changed { + break; + } + } +} + +impl Problem for MinimumAxiomSet { + const NAME: &'static str = "MinimumAxiomSet"; + type Value = Min; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![] + } + + fn dims(&self) -> Vec { + vec![2; self.num_true_sentences()] + } + + fn evaluate(&self, config: &[usize]) -> Min { + if config.len() != self.num_true_sentences() { + return Min(None); + } + if config.iter().any(|&v| v >= 2) { + return Min(None); + } + + // Build the initial set of selected axioms + let mut current = vec![false; self.num_sentences]; + let mut count = 0usize; + for (i, &v) in config.iter().enumerate() { + if v == 1 { + current[self.true_sentences[i]] = true; + count += 1; + } + } + + // Compute deductive closure + deductive_closure(&mut current, &self.implications); + + // Check if closure equals T + let closure_equals_t = self.true_sentences.iter().all(|&s| current[s]); + + if closure_equals_t { + Min(Some(count)) + } else { + Min(None) + } + } +} + +crate::declare_variants! { + default MinimumAxiomSet => "2^num_true_sentences", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + // 8 sentences, all true, with implications forming a cycle + // Optimal: select {a, b} (indices 0, 1) → closure = all 8 + vec![crate::example_db::specs::ModelExampleSpec { + id: "minimum_axiom_set", + instance: Box::new(MinimumAxiomSet::new( + 8, + vec![0, 1, 2, 3, 4, 5, 6, 7], + vec![ + (vec![0], 2), + (vec![0], 3), + (vec![1], 4), + (vec![1], 5), + (vec![2, 4], 6), + (vec![3, 5], 7), + (vec![6, 7], 0), + (vec![6, 7], 1), + ], + )), + optimal_config: vec![1, 1, 0, 0, 0, 0, 0, 0], + optimal_value: serde_json::json!(2), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/misc/minimum_axiom_set.rs"] +mod tests; diff --git a/src/models/misc/minimum_code_generation_one_register.rs b/src/models/misc/minimum_code_generation_one_register.rs new file mode 100644 index 00000000..58fe83a2 --- /dev/null +++ b/src/models/misc/minimum_code_generation_one_register.rs @@ -0,0 +1,338 @@ +//! Minimum Code Generation on a One-Register Machine. +//! +//! Given a directed acyclic graph G = (V, A) with maximum out-degree 2 +//! (an expression DAG), find a program of minimum number of instructions +//! for a one-register machine (LOAD, STORE, OP) that computes all root +//! vertices. NP-complete [Bruno and Sethi, 1976]. + +use crate::registry::{FieldInfo, ProblemSchemaEntry}; +use crate::traits::Problem; +use crate::types::Min; +use serde::{Deserialize, Serialize}; + +inventory::submit! { + ProblemSchemaEntry { + name: "MinimumCodeGenerationOneRegister", + display_name: "Minimum Code Generation (One Register)", + aliases: &[], + dimensions: &[], + module_path: module_path!(), + description: "Find minimum-length instruction sequence for a one-register machine to evaluate an expression DAG", + fields: &[ + FieldInfo { name: "num_vertices", type_name: "usize", description: "Number of vertices n = |V|" }, + FieldInfo { name: "edges", type_name: "Vec<(usize, usize)>", description: "Directed arcs (parent, child) in the expression DAG" }, + FieldInfo { name: "num_leaves", type_name: "usize", description: "Number of leaf vertices (out-degree 0)" }, + ], + } +} + +/// Minimum Code Generation on a One-Register Machine. +/// +/// Given a directed acyclic graph G = (V, A) with maximum out-degree 2, +/// where leaves (out-degree 0) are input values in memory, internal vertices +/// are operations, and roots (in-degree 0) are values to compute, find a +/// program of minimum instructions using LOAD, STORE, and OP. +/// +/// # Representation +/// +/// The configuration is a permutation of internal (non-leaf) vertices +/// giving their evaluation order. `config[i]` is the evaluation position +/// for internal vertex `i` (0-indexed among internal vertices). +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::misc::MinimumCodeGenerationOneRegister; +/// use problemreductions::{Problem, Solver, BruteForce, Min}; +/// +/// // 7 vertices: leaves {4,5,6}, internal {0,1,2,3} +/// // v3 = op(v5, v6), v1 = op(v3, v4), v2 = op(v3, v5), v0 = op(v1, v2) +/// let problem = MinimumCodeGenerationOneRegister::new( +/// 7, +/// vec![(0,1),(0,2),(1,3),(1,4),(2,3),(2,5),(3,5),(3,6)], +/// 3, +/// ); +/// let result = BruteForce::new().solve(&problem); +/// assert_eq!(result, Min(Some(8))); +/// ``` +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct MinimumCodeGenerationOneRegister { + /// Number of vertices |V|. + num_vertices: usize, + /// Directed arcs (parent, child) in the expression DAG. + edges: Vec<(usize, usize)>, + /// Number of leaf vertices (out-degree 0). + num_leaves: usize, +} + +impl MinimumCodeGenerationOneRegister { + /// Create a new instance. + /// + /// # Arguments + /// + /// * `num_vertices` - Total number of vertices + /// * `edges` - Directed arcs (parent, child); parent depends on child + /// * `num_leaves` - Number of leaf vertices (out-degree 0) + /// + /// # Panics + /// + /// Panics if any edge index is out of bounds, if any vertex has + /// out-degree > 2, or if `num_leaves > num_vertices`. + pub fn new(num_vertices: usize, edges: Vec<(usize, usize)>, num_leaves: usize) -> Self { + assert!( + num_leaves <= num_vertices, + "num_leaves ({num_leaves}) exceeds num_vertices ({num_vertices})" + ); + let mut out_degree = vec![0usize; num_vertices]; + for &(parent, child) in &edges { + assert!( + parent < num_vertices && child < num_vertices, + "Edge ({parent}, {child}) out of bounds for {num_vertices} vertices" + ); + assert!( + parent != child, + "Self-loop ({parent}, {parent}) not allowed" + ); + out_degree[parent] += 1; + } + for (v, °) in out_degree.iter().enumerate() { + assert!(deg <= 2, "Vertex {v} has out-degree {deg} > 2"); + } + // Verify leaf count: leaves are vertices with out-degree 0 + let actual_leaves = out_degree.iter().filter(|&&d| d == 0).count(); + assert_eq!( + actual_leaves, num_leaves, + "Declared num_leaves ({num_leaves}) != actual leaf count ({actual_leaves})" + ); + Self { + num_vertices, + edges, + num_leaves, + } + } + + /// Get the number of vertices. + pub fn num_vertices(&self) -> usize { + self.num_vertices + } + + /// Get the number of edges. + pub fn num_edges(&self) -> usize { + self.edges.len() + } + + /// Get the number of leaf vertices. + pub fn num_leaves(&self) -> usize { + self.num_leaves + } + + /// Get the number of internal (non-leaf) vertices. + pub fn num_internal(&self) -> usize { + self.num_vertices - self.num_leaves + } + + /// Get the edges. + pub fn edges(&self) -> &[(usize, usize)] { + &self.edges + } + + /// Compute the children (operands) of each vertex from the edge list. + fn children(&self) -> Vec> { + let mut ch = vec![vec![]; self.num_vertices]; + for &(parent, child) in &self.edges { + ch[parent].push(child); + } + ch + } + + /// Determine which vertices are internal (non-leaf, i.e. out-degree > 0). + fn internal_vertices(&self) -> Vec { + let children = self.children(); + (0..self.num_vertices) + .filter(|&v| !children[v].is_empty()) + .collect() + } + + /// Determine which vertices are leaves (out-degree 0). + fn leaf_set(&self) -> Vec { + let children = self.children(); + (0..self.num_vertices) + .map(|v| children[v].is_empty()) + .collect() + } + + /// Simulate the one-register machine for a given evaluation order of + /// internal vertices and return the instruction count, or `None` if the + /// ordering is invalid (not a permutation or violates dependencies). + pub fn simulate(&self, config: &[usize]) -> Option { + let internal = self.internal_vertices(); + let n_internal = internal.len(); + if config.len() != n_internal { + return None; + } + + // config[i] = evaluation position for internal vertex index i + // (i indexes into the `internal` array) + // Build order: order[pos] = index into `internal` + let mut order = vec![0usize; n_internal]; + let mut used = vec![false; n_internal]; + for (i, &pos) in config.iter().enumerate() { + if pos >= n_internal { + return None; + } + if used[pos] { + return None; + } + used[pos] = true; + order[pos] = i; + } + + let children = self.children(); + let is_leaf = self.leaf_set(); + + // Track which internal vertices have been computed + let mut computed = vec![false; self.num_vertices]; + // All leaves are "computed" (available in memory) + for v in 0..self.num_vertices { + if is_leaf[v] { + computed[v] = true; + } + } + + // Build: for each vertex, which future internal vertices need it? + // We'll track this dynamically. + let mut future_uses = vec![0usize; self.num_vertices]; + for &idx in &order { + let v = internal[idx]; + for &c in &children[v] { + future_uses[c] += 1; + } + } + + let mut register: Option = None; // which vertex value is in register + let mut in_memory = vec![false; self.num_vertices]; + // Leaves start in memory + for v in 0..self.num_vertices { + if is_leaf[v] { + in_memory[v] = true; + } + } + + let mut instructions = 0usize; + + for step in 0..n_internal { + let v = internal[order[step]]; + + // Check dependencies: all children must be available + for &c in &children[v] { + let available = in_memory[c] || register == Some(c); + if !available { + return None; // child was computed but lost (not stored, overwritten) + } + } + + // Decrement future uses for children of v + for &c in &children[v] { + future_uses[c] -= 1; + } + + let operands = &children[v]; + + // Before computing v, check if we need to STORE the current register value + // We need to store if: + // 1. Register holds a value + // 2. That value is still needed in the future + // 3. That value is not already in memory + if let Some(r) = register { + if !in_memory[r] && future_uses[r] > 0 { + instructions += 1; // STORE + in_memory[r] = true; + } + } + + // Now compute v + if operands.len() == 2 { + let c0 = operands[0]; + let c1 = operands[1]; + let one_in_register = (register == Some(c0) && in_memory[c1]) + || (register == Some(c1) && in_memory[c0]); + if one_in_register { + instructions += 1; // OP v (one operand in register, other in memory) + } else { + // Need to LOAD one operand, OP with the other from memory + instructions += 1; // LOAD + instructions += 1; // OP + } + } else if operands.len() == 1 { + let c0 = operands[0]; + if register == Some(c0) { + instructions += 1; // OP v (unary) + } else { + instructions += 1; // LOAD + instructions += 1; // OP + } + } + + register = Some(v); + } + + Some(instructions) + } +} + +impl Problem for MinimumCodeGenerationOneRegister { + const NAME: &'static str = "MinimumCodeGenerationOneRegister"; + type Value = Min; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![] + } + + fn dims(&self) -> Vec { + let n_internal = self.num_internal(); + vec![n_internal; n_internal] + } + + fn evaluate(&self, config: &[usize]) -> Min { + Min(self.simulate(config)) + } +} + +crate::declare_variants! { + default MinimumCodeGenerationOneRegister => "2 ^ num_vertices", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + vec![crate::example_db::specs::ModelExampleSpec { + id: "minimum_code_generation_one_register", + // Issue #900 example: 7 vertices, leaves {4,5,6}, internal {0,1,2,3} + // Edges: (0,1),(0,2),(1,3),(1,4),(2,3),(2,5),(3,5),(3,6) + // Optimal order: v3, v2, v1, v0 with positions [3, 2, 1, 0] + // Wait — config[i] = position for internal vertex i. + // Internal vertices sorted: [0, 1, 2, 3] + // Optimal evaluation order: v3, v2, v1, v0 + // v3 at position 0, v2 at position 1, v1 at position 2, v0 at position 3 + // So config = [3, 2, 1, 0] (internal idx 0=v0 -> pos 3, idx 1=v1 -> pos 2, ...) + instance: Box::new(MinimumCodeGenerationOneRegister::new( + 7, + vec![ + (0, 1), + (0, 2), + (1, 3), + (1, 4), + (2, 3), + (2, 5), + (3, 5), + (3, 6), + ], + 3, + )), + optimal_config: vec![3, 2, 1, 0], + optimal_value: serde_json::json!(8), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/misc/minimum_code_generation_one_register.rs"] +mod tests; diff --git a/src/models/misc/minimum_code_generation_parallel_assignments.rs b/src/models/misc/minimum_code_generation_parallel_assignments.rs new file mode 100644 index 00000000..09f4595f --- /dev/null +++ b/src/models/misc/minimum_code_generation_parallel_assignments.rs @@ -0,0 +1,190 @@ +//! Minimum Code Generation for Parallel Assignments problem implementation. +//! +//! Given a set of simultaneous variable assignments, find an execution ordering +//! (permutation) that minimizes the number of backward dependencies -- cases where +//! a variable is overwritten before a later assignment reads its old value. + +use crate::registry::{FieldInfo, ProblemSchemaEntry}; +use crate::traits::Problem; +use crate::types::Min; +use serde::{Deserialize, Serialize}; + +inventory::submit! { + ProblemSchemaEntry { + name: "MinimumCodeGenerationParallelAssignments", + display_name: "Minimum Code Generation (Parallel Assignments)", + aliases: &[], + dimensions: &[], + module_path: module_path!(), + description: "Find an ordering of parallel assignments minimizing backward dependencies", + fields: &[ + FieldInfo { name: "num_variables", type_name: "usize", description: "Number of variables" }, + FieldInfo { name: "assignments", type_name: "Vec<(usize, Vec)>", description: "Each assignment (target_var, read_vars)" }, + ], + } +} + +/// The Minimum Code Generation for Parallel Assignments problem. +/// +/// Given a set V of variables and a collection of assignments A_i: "v_i <- op(B_i)" +/// where v_i is the target variable and B_i is the set of variables read, +/// find a permutation of the assignments that minimizes the number of backward +/// dependencies. A backward dependency occurs when assignment pi(i) writes +/// variable v and assignment pi(j) (j > i) reads v. +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::misc::MinimumCodeGenerationParallelAssignments; +/// use problemreductions::{Problem, Solver, BruteForce}; +/// +/// // 4 variables, 4 assignments: +/// // A_0: a <- op(b, c) -> (0, [1, 2]) +/// // A_1: b <- op(a) -> (1, [0]) +/// // A_2: c <- op(d) -> (2, [3]) +/// // A_3: d <- op(b, c) -> (3, [1, 2]) +/// let assignments = vec![ +/// (0, vec![1, 2]), +/// (1, vec![0]), +/// (2, vec![3]), +/// (3, vec![1, 2]), +/// ]; +/// let problem = MinimumCodeGenerationParallelAssignments::new(4, assignments); +/// let solver = BruteForce::new(); +/// let solution = solver.find_witness(&problem); +/// assert!(solution.is_some()); +/// ``` +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct MinimumCodeGenerationParallelAssignments { + num_variables: usize, + assignments: Vec<(usize, Vec)>, +} + +impl MinimumCodeGenerationParallelAssignments { + /// Create a new MinimumCodeGenerationParallelAssignments instance. + /// + /// # Panics + /// Panics if any target variable or read variable index is >= num_variables. + pub fn new(num_variables: usize, assignments: Vec<(usize, Vec)>) -> Self { + for (i, (target, reads)) in assignments.iter().enumerate() { + assert!( + *target < num_variables, + "assignment {i}: target variable {target} >= num_variables {num_variables}" + ); + for &r in reads { + assert!( + r < num_variables, + "assignment {i}: read variable {r} >= num_variables {num_variables}" + ); + } + } + Self { + num_variables, + assignments, + } + } + + /// Returns the number of variables. + pub fn num_variables(&self) -> usize { + self.num_variables + } + + /// Returns the number of assignments. + pub fn num_assignments(&self) -> usize { + self.assignments.len() + } + + /// Returns the assignments. + pub fn assignments(&self) -> &[(usize, Vec)] { + &self.assignments + } +} + +impl Problem for MinimumCodeGenerationParallelAssignments { + const NAME: &'static str = "MinimumCodeGenerationParallelAssignments"; + type Value = Min; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![] + } + + fn dims(&self) -> Vec { + let m = self.num_assignments(); + vec![m; m] + } + + fn evaluate(&self, config: &[usize]) -> Min { + let m = self.num_assignments(); + + // Validate config length + if config.len() != m { + return Min(None); + } + + // Validate permutation: all values must be distinct and in 0..m + let mut seen = vec![false; m]; + for &pos in config { + if pos >= m || seen[pos] { + return Min(None); + } + seen[pos] = true; + } + + // config[i] = position of assignment i in execution order + // Build execution order: order[pos] = assignment index + let mut order = vec![0usize; m]; + for (assignment_idx, &pos) in config.iter().enumerate() { + order[pos] = assignment_idx; + } + + // Count backward dependencies: for each pair (i, j) where i < j + // (i executes before j), check if the target variable of order[i] + // is in the read set of order[j] + let mut count = 0usize; + for (i, &earlier) in order.iter().enumerate() { + let (target_var, _) = &self.assignments[earlier]; + for &later in &order[(i + 1)..] { + let (_, read_vars) = &self.assignments[later]; + if read_vars.contains(target_var) { + count += 1; + } + } + } + + Min(Some(count)) + } +} + +crate::declare_variants! { + default MinimumCodeGenerationParallelAssignments => "2^num_assignments", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + // 4 variables, 4 assignments: + // A_0: a <- op(b, c) -> (0, [1, 2]) + // A_1: b <- op(a) -> (1, [0]) + // A_2: c <- op(d) -> (2, [3]) + // A_3: d <- op(b, c) -> (3, [1, 2]) + // + // Optimal ordering: config [0, 3, 1, 2] means + // A_0 at position 0, A_1 at position 3, A_2 at position 1, A_3 at position 2 + // Order: (A_0, A_2, A_3, A_1) + // Backward deps: A_0 writes a, A_1 reads a (later) -> 1 + // A_2 writes c, A_3 reads c (later) -> 1 + // Total: 2 + let assignments = vec![(0, vec![1, 2]), (1, vec![0]), (2, vec![3]), (3, vec![1, 2])]; + vec![crate::example_db::specs::ModelExampleSpec { + id: "minimum_code_generation_parallel_assignments", + instance: Box::new(MinimumCodeGenerationParallelAssignments::new( + 4, + assignments, + )), + optimal_config: vec![0, 3, 1, 2], + optimal_value: serde_json::json!(2), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/misc/minimum_code_generation_parallel_assignments.rs"] +mod tests; diff --git a/src/models/misc/minimum_code_generation_unlimited_registers.rs b/src/models/misc/minimum_code_generation_unlimited_registers.rs new file mode 100644 index 00000000..e4144d60 --- /dev/null +++ b/src/models/misc/minimum_code_generation_unlimited_registers.rs @@ -0,0 +1,369 @@ +//! Minimum Code Generation with Unlimited Registers. +//! +//! Given a directed acyclic graph G = (V, A) with maximum out-degree 2 +//! (an expression DAG) and a partition of arcs into left (L) and right (R) +//! operand sets, find a program of minimum number of instructions for an +//! unlimited-register machine using 2-address instructions. The left operand's +//! register is destroyed (overwritten by the result); a LOAD (copy) instruction +//! is needed to preserve values before destruction. NP-complete +//! [Aho, Johnson, and Ullman, 1977]. + +use crate::registry::{FieldInfo, ProblemSchemaEntry}; +use crate::traits::Problem; +use crate::types::Min; +use serde::{Deserialize, Serialize}; + +inventory::submit! { + ProblemSchemaEntry { + name: "MinimumCodeGenerationUnlimitedRegisters", + display_name: "Minimum Code Generation (Unlimited Registers)", + aliases: &[], + dimensions: &[], + module_path: module_path!(), + description: "Find minimum-length instruction sequence for an unlimited-register machine with 2-address instructions to evaluate an expression DAG", + fields: &[ + FieldInfo { name: "num_vertices", type_name: "usize", description: "Number of vertices n = |V|" }, + FieldInfo { name: "left_arcs", type_name: "Vec<(usize, usize)>", description: "Left operand arcs L: (parent, child) — child's register is destroyed" }, + FieldInfo { name: "right_arcs", type_name: "Vec<(usize, usize)>", description: "Right operand arcs R: (parent, child) — child's register is preserved" }, + ], + } +} + +/// Minimum Code Generation with Unlimited Registers. +/// +/// Given a directed acyclic graph G = (V, A) with maximum out-degree 2, +/// where arcs are partitioned into left (L) and right (R) operand sets, +/// leaves (out-degree 0) are input values each in its own register, +/// internal vertices are 2-address operations (the left operand's register +/// is overwritten by the result), and roots (in-degree 0) are values to +/// compute, find a program of minimum instructions using OP and LOAD (copy). +/// +/// # Representation +/// +/// The configuration is a permutation of internal (non-leaf) vertices +/// giving their evaluation order. `config[i]` is the evaluation position +/// for internal vertex `i` (0-indexed among internal vertices). +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::misc::MinimumCodeGenerationUnlimitedRegisters; +/// use problemreductions::{Problem, Solver, BruteForce, Min}; +/// +/// // 5 vertices: leaves {3,4}, internal {0,1,2} +/// // v1 = op(v3, v4), v2 = op(v3, v4), v0 = op(v1, v2) +/// let problem = MinimumCodeGenerationUnlimitedRegisters::new( +/// 5, +/// vec![(1,3),(2,3),(0,1)], // left arcs (child destroyed) +/// vec![(1,4),(2,4),(0,2)], // right arcs (child preserved) +/// ); +/// let result = BruteForce::new().solve(&problem); +/// assert_eq!(result, Min(Some(4))); +/// ``` +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct MinimumCodeGenerationUnlimitedRegisters { + /// Number of vertices |V|. + num_vertices: usize, + /// Left operand arcs (parent, child) — child's register is destroyed. + left_arcs: Vec<(usize, usize)>, + /// Right operand arcs (parent, child) — child's register is preserved. + right_arcs: Vec<(usize, usize)>, +} + +impl MinimumCodeGenerationUnlimitedRegisters { + /// Create a new instance. + /// + /// # Arguments + /// + /// * `num_vertices` - Total number of vertices + /// * `left_arcs` - Left operand arcs (parent, child); child register is destroyed by OP + /// * `right_arcs` - Right operand arcs (parent, child); child register is preserved + /// + /// # Panics + /// + /// Panics if any arc index is out of bounds, if any vertex has out-degree > 2, + /// if left and right arcs for binary vertices are inconsistent, or if a vertex + /// has a self-loop. + pub fn new( + num_vertices: usize, + left_arcs: Vec<(usize, usize)>, + right_arcs: Vec<(usize, usize)>, + ) -> Self { + let mut left_count = vec![0usize; num_vertices]; + let mut right_count = vec![0usize; num_vertices]; + + for &(parent, child) in &left_arcs { + assert!( + parent < num_vertices && child < num_vertices, + "Left arc ({parent}, {child}) out of bounds for {num_vertices} vertices" + ); + assert!( + parent != child, + "Self-loop ({parent}, {parent}) not allowed" + ); + left_count[parent] += 1; + } + for &(parent, child) in &right_arcs { + assert!( + parent < num_vertices && child < num_vertices, + "Right arc ({parent}, {child}) out of bounds for {num_vertices} vertices" + ); + assert!( + parent != child, + "Self-loop ({parent}, {parent}) not allowed" + ); + right_count[parent] += 1; + } + + for v in 0..num_vertices { + let out = left_count[v] + right_count[v]; + assert!(out <= 2, "Vertex {v} has out-degree {out} > 2"); + // Binary vertex: exactly one left and one right + if out == 2 { + assert!( + left_count[v] == 1 && right_count[v] == 1, + "Binary vertex {v} must have exactly 1 left and 1 right arc" + ); + } + // Unary vertex: one left arc (result overwrites operand register) + if out == 1 { + assert!( + left_count[v] == 1 && right_count[v] == 0, + "Unary vertex {v} must have exactly 1 left arc and 0 right arcs" + ); + } + } + + Self { + num_vertices, + left_arcs, + right_arcs, + } + } + + /// Get the number of vertices. + pub fn num_vertices(&self) -> usize { + self.num_vertices + } + + /// Get the left operand arcs. + pub fn left_arcs(&self) -> &[(usize, usize)] { + &self.left_arcs + } + + /// Get the right operand arcs. + pub fn right_arcs(&self) -> &[(usize, usize)] { + &self.right_arcs + } + + /// Get the number of leaf vertices (out-degree 0). + pub fn num_leaves(&self) -> usize { + self.num_vertices - self.num_internal() + } + + /// Get the number of internal (non-leaf) vertices. + pub fn num_internal(&self) -> usize { + let mut has_children = vec![false; self.num_vertices]; + for &(parent, _) in &self.left_arcs { + has_children[parent] = true; + } + for &(parent, _) in &self.right_arcs { + has_children[parent] = true; + } + has_children.iter().filter(|&&b| b).count() + } + + /// Determine which vertices are internal (non-leaf, i.e. out-degree > 0). + fn internal_vertices(&self) -> Vec { + let mut has_children = vec![false; self.num_vertices]; + for &(parent, _) in &self.left_arcs { + has_children[parent] = true; + } + for &(parent, _) in &self.right_arcs { + has_children[parent] = true; + } + (0..self.num_vertices) + .filter(|&v| has_children[v]) + .collect() + } + + /// Get the left child of a vertex, if any. + fn left_child(&self, v: usize) -> Option { + self.left_arcs + .iter() + .find(|&&(parent, _)| parent == v) + .map(|&(_, child)| child) + } + + /// Get the right child of a vertex, if any. + fn right_child(&self, v: usize) -> Option { + self.right_arcs + .iter() + .find(|&&(parent, _)| parent == v) + .map(|&(_, child)| child) + } + + /// Simulate the unlimited-register machine for a given evaluation order + /// of internal vertices and return the instruction count, or `None` if + /// the ordering is invalid (not a permutation or violates dependencies). + /// + /// With unlimited registers: + /// - Each leaf starts in its own register + /// - OP v: computes v, result overwrites the left operand's register + /// - LOAD: copies a register value (needed when a left operand is still + /// needed later and would be destroyed) + /// - Cost = num_OPs + num_LOADs + pub fn simulate(&self, config: &[usize]) -> Option { + let internal = self.internal_vertices(); + let n_internal = internal.len(); + if config.len() != n_internal { + return None; + } + + // config[i] = evaluation position for internal vertex index i + // Build order: order[pos] = index into `internal` + let mut order = vec![0usize; n_internal]; + let mut used = vec![false; n_internal]; + for (i, &pos) in config.iter().enumerate() { + if pos >= n_internal { + return None; + } + if used[pos] { + return None; + } + used[pos] = true; + order[pos] = i; + } + + // Track which vertices have been computed + let mut computed = vec![false; self.num_vertices]; + // All leaves are "computed" (available in registers from the start) + let has_children: Vec = { + let mut hc = vec![false; self.num_vertices]; + for &(parent, _) in &self.left_arcs { + hc[parent] = true; + } + for &(parent, _) in &self.right_arcs { + hc[parent] = true; + } + hc + }; + for v in 0..self.num_vertices { + if !has_children[v] { + computed[v] = true; + } + } + + // For each value, count how many future operations still need it + // as a LEFT operand. Only left operands get destroyed. + // But we also need to know total future uses (left + right) to know + // if a value is still needed at all. + let mut future_left_uses = vec![0usize; self.num_vertices]; + let mut future_right_uses = vec![0usize; self.num_vertices]; + for &idx in &order { + let v = internal[idx]; + if let Some(lc) = self.left_child(v) { + future_left_uses[lc] += 1; + } + if let Some(rc) = self.right_child(v) { + future_right_uses[rc] += 1; + } + } + + let mut instructions = 0usize; + + // With unlimited registers, each value has its own register. + // When OP v executes: result goes into left_child's register. + // If left_child's value is still needed by a future operation, + // we must LOAD (copy) it first. + + for step in 0..n_internal { + let v = internal[order[step]]; + let lc = self.left_child(v); + let rc = self.right_child(v); + + // Check dependencies + if let Some(l) = lc { + if !computed[l] { + return None; + } + } + if let Some(r) = rc { + if !computed[r] { + return None; + } + } + + // Decrement future use counts + if let Some(l) = lc { + future_left_uses[l] -= 1; + } + if let Some(r) = rc { + future_right_uses[r] -= 1; + } + + // Check if left operand needs to be copied before destruction + if let Some(l) = lc { + let still_needed = future_left_uses[l] + future_right_uses[l] > 0; + if still_needed { + instructions += 1; // LOAD (copy) + } + } + + // OP v + instructions += 1; + + // Mark v as computed + computed[v] = true; + } + + Some(instructions) + } +} + +impl Problem for MinimumCodeGenerationUnlimitedRegisters { + const NAME: &'static str = "MinimumCodeGenerationUnlimitedRegisters"; + type Value = Min; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![] + } + + fn dims(&self) -> Vec { + let n_internal = self.num_internal(); + vec![n_internal; n_internal] + } + + fn evaluate(&self, config: &[usize]) -> Min { + Min(self.simulate(config)) + } +} + +crate::declare_variants! { + default MinimumCodeGenerationUnlimitedRegisters => "2 ^ num_vertices", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + vec![crate::example_db::specs::ModelExampleSpec { + id: "minimum_code_generation_unlimited_registers", + // Issue #902 example: 5 vertices, leaves {3,4}, internal {0,1,2} + // left_arcs: (1,3),(2,3),(0,1) + // right_arcs: (1,4),(2,4),(0,2) + // Optimal order: v1,v2,v0 with 1 copy of v3 = 4 instructions + // Internal vertices sorted: [0, 1, 2] + // Order v1(pos 0), v2(pos 1), v0(pos 2) + // config[0]=2 (v0 at pos 2), config[1]=0 (v1 at pos 0), config[2]=1 (v2 at pos 1) + instance: Box::new(MinimumCodeGenerationUnlimitedRegisters::new( + 5, + vec![(1, 3), (2, 3), (0, 1)], + vec![(1, 4), (2, 4), (0, 2)], + )), + optimal_config: vec![2, 0, 1], + optimal_value: serde_json::json!(4), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/misc/minimum_code_generation_unlimited_registers.rs"] +mod tests; diff --git a/src/models/misc/minimum_decision_tree.rs b/src/models/misc/minimum_decision_tree.rs new file mode 100644 index 00000000..453b8008 --- /dev/null +++ b/src/models/misc/minimum_decision_tree.rs @@ -0,0 +1,214 @@ +//! Minimum Decision Tree problem implementation. +//! +//! Given a set of objects distinguished by binary tests, find a decision tree +//! that identifies each object with minimum total external path length +//! (sum of depths of all leaves). + +use crate::registry::{FieldInfo, ProblemSchemaEntry}; +use crate::traits::Problem; +use crate::types::Min; +use serde::{Deserialize, Serialize}; + +inventory::submit! { + ProblemSchemaEntry { + name: "MinimumDecisionTree", + display_name: "Minimum Decision Tree", + aliases: &[], + dimensions: &[], + module_path: module_path!(), + description: "Find decision tree identifying objects with minimum total path length", + fields: &[ + FieldInfo { name: "test_matrix", type_name: "Vec>", description: "Binary matrix: test_matrix[j][i] = object i passes test j" }, + FieldInfo { name: "num_objects", type_name: "usize", description: "Number of objects to identify" }, + FieldInfo { name: "num_tests", type_name: "usize", description: "Number of available binary tests" }, + ], + } +} + +/// Minimum Decision Tree problem. +/// +/// Given objects distinguished by binary tests, find a decision tree +/// minimizing the total external path length (sum of leaf depths). +/// +/// The configuration encodes a flattened complete binary tree of depth +/// `num_objects - 1`. Each internal node stores either a test index +/// (0..num_tests-1) or a sentinel value `num_tests` meaning "leaf". +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::misc::MinimumDecisionTree; +/// use problemreductions::{Problem, Solver, BruteForce}; +/// +/// let problem = MinimumDecisionTree::new( +/// vec![ +/// vec![true, true, false, false], // T0 +/// vec![true, false, false, false], // T1 +/// vec![false, true, false, true], // T2 +/// ], +/// 4, +/// 3, +/// ); +/// let solver = BruteForce::new(); +/// let value = solver.solve(&problem); +/// ``` +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct MinimumDecisionTree { + /// Binary matrix: test_matrix[j][i] = true iff object i passes test j. + test_matrix: Vec>, + /// Number of objects. + num_objects: usize, + /// Number of tests. + num_tests: usize, +} + +impl MinimumDecisionTree { + /// Create a new MinimumDecisionTree problem. + /// + /// # Panics + /// - If num_objects < 2 or num_tests < 1 + /// - If test_matrix dimensions don't match + /// - If tests don't distinguish all object pairs + pub fn new(test_matrix: Vec>, num_objects: usize, num_tests: usize) -> Self { + assert!(num_objects >= 2, "Need at least 2 objects"); + assert!(num_tests >= 1, "Need at least 1 test"); + assert_eq!( + test_matrix.len(), + num_tests, + "test_matrix must have num_tests rows" + ); + for (j, row) in test_matrix.iter().enumerate() { + assert_eq!( + row.len(), + num_objects, + "test_matrix[{j}] must have num_objects columns" + ); + } + // Check that every pair of objects is distinguished by at least one test + for a in 0..num_objects { + for b in (a + 1)..num_objects { + let distinguished = (0..num_tests).any(|j| test_matrix[j][a] != test_matrix[j][b]); + assert!( + distinguished, + "Objects {a} and {b} are not distinguished by any test" + ); + } + } + Self { + test_matrix, + num_objects, + num_tests, + } + } + + /// Get the number of objects. + pub fn num_objects(&self) -> usize { + self.num_objects + } + + /// Get the number of tests. + pub fn num_tests(&self) -> usize { + self.num_tests + } + + /// Get the test matrix. + pub fn test_matrix(&self) -> &[Vec] { + &self.test_matrix + } + + /// Number of internal node slots in the flattened complete binary tree. + fn num_tree_slots(&self) -> usize { + (1usize << (self.num_objects - 1)) - 1 + } + + /// Sentinel value meaning "this node is a leaf". + fn leaf_sentinel(&self) -> usize { + self.num_tests + } + + /// Simulate the decision tree for all objects and return total external path length, + /// or None if the tree is invalid (doesn't identify all objects uniquely). + fn simulate(&self, config: &[usize]) -> Option { + let sentinel = self.leaf_sentinel(); + let max_slots = self.num_tree_slots(); + let mut seen_leaves = std::collections::HashSet::new(); + let mut total_depth = 0usize; + + for obj in 0..self.num_objects { + let mut node = 0usize; + let mut depth = 0usize; + + loop { + if node >= max_slots || config[node] == sentinel { + // Two objects at same leaf — invalid + if !seen_leaves.insert(node) { + return None; + } + total_depth += depth; + break; + } + + let test_idx = config[node]; + debug_assert!(test_idx < self.num_tests); + + let result = self.test_matrix[test_idx][obj]; + node = if result { 2 * node + 2 } else { 2 * node + 1 }; + depth += 1; + + if depth > self.num_objects { + return None; + } + } + } + + Some(total_depth) + } +} + +impl Problem for MinimumDecisionTree { + const NAME: &'static str = "MinimumDecisionTree"; + type Value = Min; + + fn dims(&self) -> Vec { + // Each internal node can hold test 0..num_tests-1 or sentinel (leaf) + vec![self.num_tests + 1; self.num_tree_slots()] + } + + fn evaluate(&self, config: &[usize]) -> Min { + if config.len() != self.num_tree_slots() { + return Min(None); + } + Min(self.simulate(config)) + } + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![] + } +} + +crate::declare_variants! { + default MinimumDecisionTree => "num_tests^num_objects", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + vec![crate::example_db::specs::ModelExampleSpec { + id: "minimum_decision_tree", + instance: Box::new(MinimumDecisionTree::new( + vec![ + vec![true, true, false, false], + vec![true, false, false, false], + vec![false, true, false, true], + ], + 4, + 3, + )), + // T0 at root, T2 left, T1 right, rest are leaves (sentinel=3) + optimal_config: vec![0, 2, 1, 3, 3, 3, 3], + optimal_value: serde_json::json!(8), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/misc/minimum_decision_tree.rs"] +mod tests; diff --git a/src/models/misc/minimum_disjunctive_normal_form.rs b/src/models/misc/minimum_disjunctive_normal_form.rs new file mode 100644 index 00000000..a705a34e --- /dev/null +++ b/src/models/misc/minimum_disjunctive_normal_form.rs @@ -0,0 +1,294 @@ +//! Minimum Disjunctive Normal Form (DNF) problem implementation. +//! +//! Given a Boolean function specified by its truth table, find a DNF formula +//! with the minimum number of terms (prime implicants) equivalent to the function. +//! NP-hard (Masek 1979, via reduction from Minimum Cover). + +use crate::registry::{FieldInfo, ProblemSchemaEntry}; +use crate::traits::Problem; +use crate::types::Min; +use serde::{Deserialize, Serialize}; + +inventory::submit! { + ProblemSchemaEntry { + name: "MinimumDisjunctiveNormalForm", + display_name: "Minimum Disjunctive Normal Form", + aliases: &["MinDNF"], + dimensions: &[], + module_path: module_path!(), + description: "Find minimum-term DNF formula equivalent to a Boolean function", + fields: &[ + FieldInfo { name: "num_variables", type_name: "usize", description: "Number of Boolean variables" }, + FieldInfo { name: "truth_table", type_name: "Vec", description: "Truth table of length 2^n" }, + ], + } +} + +/// A prime implicant, represented as a pattern over n variables. +/// Each entry is `Some(true)` (positive literal), `Some(false)` (negative literal), +/// or `None` (don't care). +#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] +pub struct PrimeImplicant { + /// Pattern: one entry per variable. + pub pattern: Vec>, +} + +impl PrimeImplicant { + /// Check if this prime implicant covers a given minterm (as a bit pattern). + pub fn covers(&self, minterm: usize) -> bool { + for (i, &p) in self.pattern.iter().enumerate() { + if let Some(val) = p { + let bit = ((minterm >> (self.pattern.len() - 1 - i)) & 1) == 1; + if bit != val { + return false; + } + } + } + true + } +} + +/// Minimum Disjunctive Normal Form problem. +/// +/// Given a Boolean function by its truth table, find the minimum number of +/// prime implicants whose disjunction (OR) is equivalent to the function. +/// +/// The constructor computes all prime implicants via Quine-McCluskey. +/// The configuration is a binary selection over prime implicants. +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::misc::MinimumDisjunctiveNormalForm; +/// use problemreductions::{Problem, Solver, BruteForce}; +/// +/// // f(x1,x2,x3) = 1 when exactly 1 or 2 variables are true +/// let truth_table = vec![false, true, true, true, true, true, true, false]; +/// let problem = MinimumDisjunctiveNormalForm::new(3, truth_table); +/// let solver = BruteForce::new(); +/// let value = solver.solve(&problem); +/// ``` +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct MinimumDisjunctiveNormalForm { + /// Number of Boolean variables. + num_variables: usize, + /// Truth table of length 2^n. + truth_table: Vec, + /// Precomputed prime implicants. + prime_implicants: Vec, + /// Minterms (indices where truth table is true). + minterms: Vec, +} + +impl MinimumDisjunctiveNormalForm { + /// Create a new MinimumDisjunctiveNormalForm problem. + /// + /// # Panics + /// - If truth_table length != 2^num_variables + /// - If the function is identically false (no minterms) + pub fn new(num_variables: usize, truth_table: Vec) -> Self { + assert!(num_variables >= 1, "Need at least 1 variable"); + assert_eq!( + truth_table.len(), + 1 << num_variables, + "Truth table must have 2^n entries" + ); + + let minterms: Vec = truth_table + .iter() + .enumerate() + .filter_map(|(i, &v)| if v { Some(i) } else { None }) + .collect(); + assert!( + !minterms.is_empty(), + "Function must have at least one minterm" + ); + + let prime_implicants = compute_prime_implicants(num_variables, &minterms); + + Self { + num_variables, + truth_table, + prime_implicants, + minterms, + } + } + + /// Get the number of variables. + pub fn num_variables(&self) -> usize { + self.num_variables + } + + /// Get the truth table. + pub fn truth_table(&self) -> &[bool] { + &self.truth_table + } + + /// Get the prime implicants. + pub fn prime_implicants(&self) -> &[PrimeImplicant] { + &self.prime_implicants + } + + /// Get the number of prime implicants. + pub fn num_prime_implicants(&self) -> usize { + self.prime_implicants.len() + } + + /// Get the minterms. + pub fn minterms(&self) -> &[usize] { + &self.minterms + } +} + +impl Problem for MinimumDisjunctiveNormalForm { + const NAME: &'static str = "MinimumDisjunctiveNormalForm"; + type Value = Min; + + fn dims(&self) -> Vec { + vec![2; self.prime_implicants.len()] + } + + fn evaluate(&self, config: &[usize]) -> Min { + if config.len() != self.prime_implicants.len() { + return Min(None); + } + + // Collect selected prime implicants + let selected: Vec = config + .iter() + .enumerate() + .filter_map(|(i, &v)| if v == 1 { Some(i) } else { None }) + .collect(); + + if selected.is_empty() { + return Min(None); + } + + // Check that all minterms are covered + for &mt in &self.minterms { + let covered = selected + .iter() + .any(|&pi_idx| self.prime_implicants[pi_idx].covers(mt)); + if !covered { + return Min(None); + } + } + + Min(Some(selected.len())) + } + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![] + } +} + +crate::declare_variants! { + default MinimumDisjunctiveNormalForm => "2^(3^num_variables)", +} + +/// Compute all prime implicants of a Boolean function using Quine-McCluskey. +/// +/// Each implicant is represented as a Vec> of length num_variables. +fn compute_prime_implicants(num_vars: usize, minterms: &[usize]) -> Vec { + use std::collections::HashSet; + + if minterms.is_empty() { + return vec![]; + } + + type Pattern = Vec>; + + let mut current: Vec = minterms + .iter() + .map(|&mt| { + (0..num_vars) + .map(|i| Some(((mt >> (num_vars - 1 - i)) & 1) == 1)) + .collect() + }) + .collect(); + + let mut all_prime: HashSet = HashSet::new(); + + loop { + let mut next_set: HashSet = HashSet::new(); + let mut used = vec![false; current.len()]; + + for i in 0..current.len() { + for j in (i + 1)..current.len() { + if let Some(merged) = try_merge(¤t[i], ¤t[j]) { + next_set.insert(merged); + used[i] = true; + used[j] = true; + } + } + } + + for (i, &was_used) in used.iter().enumerate() { + if !was_used { + all_prime.insert(current[i].clone()); + } + } + + if next_set.is_empty() { + break; + } + current = next_set.into_iter().collect(); + } + + let mut result: Vec = all_prime + .into_iter() + .map(|pattern| PrimeImplicant { pattern }) + .collect(); + // Sort for deterministic output (HashSet iteration order is non-deterministic) + result.sort_by(|a, b| a.pattern.cmp(&b.pattern)); + result +} + +/// Try to merge two implicant patterns that differ in exactly one position. +/// Returns the merged pattern (with that position set to None) or None if they can't merge. +fn try_merge(a: &[Option], b: &[Option]) -> Option>> { + if a.len() != b.len() { + return None; + } + + let mut diff_count = 0; + let mut diff_pos = 0; + + for (i, (va, vb)) in a.iter().zip(b.iter()).enumerate() { + if va != vb { + diff_count += 1; + diff_pos = i; + if diff_count > 1 { + return None; + } + } + } + + if diff_count == 1 { + let mut merged = a.to_vec(); + merged[diff_pos] = None; + Some(merged) + } else { + None + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + vec![crate::example_db::specs::ModelExampleSpec { + id: "minimum_disjunctive_normal_form", + instance: Box::new(MinimumDisjunctiveNormalForm::new( + 3, + vec![false, true, true, true, true, true, true, false], + )), + // Select prime implicants: p1(¬x1∧x2), p4(x1∧¬x3), p5(¬x2∧x3) + // The order of PIs depends on the QMC algorithm output. + // We'll verify this in tests. + optimal_config: vec![1, 0, 0, 1, 1, 0], + optimal_value: serde_json::json!(3), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/misc/minimum_disjunctive_normal_form.rs"] +mod tests; diff --git a/src/models/misc/minimum_fault_detection_test_set.rs b/src/models/misc/minimum_fault_detection_test_set.rs new file mode 100644 index 00000000..cc62b223 --- /dev/null +++ b/src/models/misc/minimum_fault_detection_test_set.rs @@ -0,0 +1,342 @@ +//! Minimum Fault Detection Test Set problem implementation. +//! +//! Given a directed acyclic graph with designated input and output vertices, +//! find the minimum set of input-output pairs whose coverage sets cover all vertices. + +use crate::registry::{FieldInfo, ProblemSchemaEntry}; +use crate::traits::Problem; +use crate::types::Min; +use serde::{Deserialize, Deserializer, Serialize}; +use std::collections::{HashSet, VecDeque}; + +inventory::submit! { + ProblemSchemaEntry { + name: "MinimumFaultDetectionTestSet", + display_name: "Minimum Fault Detection Test Set", + aliases: &[], + dimensions: &[], + module_path: module_path!(), + description: "Find minimum set of input-output paths covering all DAG vertices", + fields: &[ + FieldInfo { name: "num_vertices", type_name: "usize", description: "Number of vertices in the DAG" }, + FieldInfo { name: "arcs", type_name: "Vec<(usize, usize)>", description: "Directed arcs (u, v)" }, + FieldInfo { name: "inputs", type_name: "Vec", description: "Input vertex indices" }, + FieldInfo { name: "outputs", type_name: "Vec", description: "Output vertex indices" }, + ], + } +} + +/// The Minimum Fault Detection Test Set problem. +/// +/// Given a directed acyclic graph G = (V, A) with designated input vertices +/// I ⊆ V and output vertices O ⊆ V, find the minimum number of input-output +/// pairs (i, o) ∈ I × O such that the union of their coverage sets covers +/// all vertices V. +/// +/// For a pair (i, o), the coverage set is the set of vertices reachable from i +/// that can also reach o (i.e., vertices on some i-to-o path). +/// +/// The configuration space is binary over all |I| × |O| pairs. +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::misc::MinimumFaultDetectionTestSet; +/// use problemreductions::{Problem, Solver, BruteForce}; +/// +/// let problem = MinimumFaultDetectionTestSet::new( +/// 7, +/// vec![(0,2),(0,3),(1,3),(1,4),(2,5),(3,5),(3,6),(4,6)], +/// vec![0, 1], +/// vec![5, 6], +/// ); +/// let solver = BruteForce::new(); +/// use problemreductions::solvers::Solver as _; +/// let optimal = solver.solve(&problem); +/// assert_eq!(optimal, problemreductions::types::Min(Some(2))); +/// ``` +#[derive(Debug, Clone, Serialize)] +pub struct MinimumFaultDetectionTestSet { + /// Number of vertices. + num_vertices: usize, + /// Directed arcs (u, v). + arcs: Vec<(usize, usize)>, + /// Input vertex indices. + inputs: Vec, + /// Output vertex indices. + outputs: Vec, + /// Precomputed coverage sets for each (input_idx, output_idx) pair. + /// Indexed as coverage[i_idx * num_outputs + o_idx]. + #[serde(skip)] + coverage: Vec>, +} + +#[derive(Deserialize)] +struct MinimumFaultDetectionTestSetData { + num_vertices: usize, + arcs: Vec<(usize, usize)>, + inputs: Vec, + outputs: Vec, +} + +impl<'de> Deserialize<'de> for MinimumFaultDetectionTestSet { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let data = MinimumFaultDetectionTestSetData::deserialize(deserializer)?; + let coverage = + Self::build_coverage(data.num_vertices, &data.arcs, &data.inputs, &data.outputs); + Ok(Self { + num_vertices: data.num_vertices, + arcs: data.arcs, + inputs: data.inputs, + outputs: data.outputs, + coverage, + }) + } +} + +impl MinimumFaultDetectionTestSet { + /// Create a new Minimum Fault Detection Test Set instance. + /// + /// # Panics + /// + /// Panics if any arc index is out of bounds, if any input or output index + /// is out of bounds, or if inputs or outputs are empty. + pub fn new( + num_vertices: usize, + arcs: Vec<(usize, usize)>, + inputs: Vec, + outputs: Vec, + ) -> Self { + assert!(!inputs.is_empty(), "Inputs must not be empty"); + assert!(!outputs.is_empty(), "Outputs must not be empty"); + for (i, &(u, v)) in arcs.iter().enumerate() { + assert!( + u < num_vertices && v < num_vertices, + "Arc {} ({}, {}) out of bounds for {} vertices", + i, + u, + v, + num_vertices + ); + } + for &inp in &inputs { + assert!( + inp < num_vertices, + "Input vertex {} out of bounds for {} vertices", + inp, + num_vertices + ); + } + for &out in &outputs { + assert!( + out < num_vertices, + "Output vertex {} out of bounds for {} vertices", + out, + num_vertices + ); + } + let coverage = Self::build_coverage(num_vertices, &arcs, &inputs, &outputs); + Self { + num_vertices, + arcs, + inputs, + outputs, + coverage, + } + } + + /// Compute forward reachability from a given vertex using BFS on the DAG. + fn forward_reachable(num_vertices: usize, adj: &[Vec], start: usize) -> HashSet { + let mut visited = HashSet::new(); + let mut queue = VecDeque::new(); + visited.insert(start); + queue.push_back(start); + while let Some(v) = queue.pop_front() { + if v < adj.len() { + for &w in &adj[v] { + if visited.insert(w) { + queue.push_back(w); + } + } + } + } + let _ = num_vertices; // used only to clarify signature + visited + } + + /// Compute backward reachability from a given vertex using BFS on the reverse DAG. + fn backward_reachable( + num_vertices: usize, + rev_adj: &[Vec], + start: usize, + ) -> HashSet { + let mut visited = HashSet::new(); + let mut queue = VecDeque::new(); + visited.insert(start); + queue.push_back(start); + while let Some(v) = queue.pop_front() { + if v < rev_adj.len() { + for &w in &rev_adj[v] { + if visited.insert(w) { + queue.push_back(w); + } + } + } + } + let _ = num_vertices; + visited + } + + /// Build coverage sets for all input-output pairs. + fn build_coverage( + num_vertices: usize, + arcs: &[(usize, usize)], + inputs: &[usize], + outputs: &[usize], + ) -> Vec> { + // Build adjacency lists + let mut adj = vec![vec![]; num_vertices]; + let mut rev_adj = vec![vec![]; num_vertices]; + for &(u, v) in arcs { + adj[u].push(v); + rev_adj[v].push(u); + } + + // Precompute forward reachability from each input + let fwd: Vec> = inputs + .iter() + .map(|&inp| Self::forward_reachable(num_vertices, &adj, inp)) + .collect(); + + // Precompute backward reachability from each output + let bwd: Vec> = outputs + .iter() + .map(|&out| Self::backward_reachable(num_vertices, &rev_adj, out)) + .collect(); + + let num_outputs = outputs.len(); + let mut coverage = Vec::with_capacity(inputs.len() * num_outputs); + for (i_idx, _) in inputs.iter().enumerate() { + for (o_idx, _) in outputs.iter().enumerate() { + // Coverage = vertices reachable from input i AND reachable backwards from output o + let cov: HashSet = fwd[i_idx].intersection(&bwd[o_idx]).copied().collect(); + coverage.push(cov); + } + } + coverage + } + + /// Get the number of vertices. + pub fn num_vertices(&self) -> usize { + self.num_vertices + } + + /// Get the number of arcs. + pub fn num_arcs(&self) -> usize { + self.arcs.len() + } + + /// Get the arcs. + pub fn arcs(&self) -> &[(usize, usize)] { + &self.arcs + } + + /// Get the input vertices. + pub fn inputs(&self) -> &[usize] { + &self.inputs + } + + /// Get the output vertices. + pub fn outputs(&self) -> &[usize] { + &self.outputs + } + + /// Get the number of input vertices. + pub fn num_inputs(&self) -> usize { + self.inputs.len() + } + + /// Get the number of output vertices. + pub fn num_outputs(&self) -> usize { + self.outputs.len() + } +} + +impl Problem for MinimumFaultDetectionTestSet { + const NAME: &'static str = "MinimumFaultDetectionTestSet"; + type Value = Min; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![] + } + + fn dims(&self) -> Vec { + vec![2; self.inputs.len() * self.outputs.len()] + } + + fn evaluate(&self, config: &[usize]) -> Min { + let num_pairs = self.inputs.len() * self.outputs.len(); + if config.len() != num_pairs { + return Min(None); + } + if config.iter().any(|&c| c > 1) { + return Min(None); + } + + // Collect union of coverage sets for selected pairs + let mut covered: HashSet = HashSet::new(); + let mut count = 0usize; + for (idx, &sel) in config.iter().enumerate() { + if sel == 1 { + count += 1; + covered.extend(&self.coverage[idx]); + } + } + + // Check all vertices are covered + if covered.len() == self.num_vertices { + Min(Some(count)) + } else { + Min(None) + } + } +} + +crate::declare_variants! { + default MinimumFaultDetectionTestSet => "2^(num_inputs * num_outputs)", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + // 7 vertices, inputs={0,1}, outputs={5,6} + // Arcs: (0,2),(0,3),(1,3),(1,4),(2,5),(3,5),(3,6),(4,6) + // Pairs: (0,5)->{0,2,3,5}, (0,6)->{0,3,6}, (1,5)->{1,3,5}, (1,6)->{1,3,4,6} + // Config [1,0,0,1]: select pairs (0,5) and (1,6) -> covers all 7 -> Min(2) + vec![crate::example_db::specs::ModelExampleSpec { + id: "minimum_fault_detection_test_set", + instance: Box::new(MinimumFaultDetectionTestSet::new( + 7, + vec![ + (0, 2), + (0, 3), + (1, 3), + (1, 4), + (2, 5), + (3, 5), + (3, 6), + (4, 6), + ], + vec![0, 1], + vec![5, 6], + )), + optimal_config: vec![1, 0, 0, 1], + optimal_value: serde_json::json!(2), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/misc/minimum_fault_detection_test_set.rs"] +mod tests; diff --git a/src/models/misc/minimum_register_sufficiency_for_loops.rs b/src/models/misc/minimum_register_sufficiency_for_loops.rs new file mode 100644 index 00000000..fc6d1902 --- /dev/null +++ b/src/models/misc/minimum_register_sufficiency_for_loops.rs @@ -0,0 +1,221 @@ +//! Minimum Register Sufficiency for Loops problem implementation. +//! +//! Given a loop of length N and a set of variables, each active during a +//! contiguous circular arc of timesteps, assign registers to variables +//! minimizing the number of distinct registers used, such that no two +//! conflicting (overlapping) variables share the same register. + +use crate::registry::{FieldInfo, ProblemSchemaEntry}; +use crate::traits::Problem; +use crate::types::Min; +use serde::{Deserialize, Serialize}; + +inventory::submit! { + ProblemSchemaEntry { + name: "MinimumRegisterSufficiencyForLoops", + display_name: "Minimum Register Sufficiency for Loops", + aliases: &[], + dimensions: &[], + module_path: module_path!(), + description: "Assign registers to loop variables minimizing register count, no two conflicting variables share a register", + fields: &[ + FieldInfo { name: "loop_length", type_name: "usize", description: "Loop length N (number of timesteps)" }, + FieldInfo { name: "variables", type_name: "Vec<(usize, usize)>", description: "Variables as (start_time, duration) circular arcs" }, + ], + } +} + +/// The Minimum Register Sufficiency for Loops problem. +/// +/// Given a loop of length N (representing N timesteps arranged in a circle) +/// and a set of variables, each active during a contiguous circular arc of +/// timesteps specified by (start_time, duration), assign a register index +/// to each variable such that: +/// - No two variables with overlapping circular arcs share the same register +/// - The number of distinct registers used is minimized +/// +/// This is equivalent to the circular arc graph coloring problem, where each +/// variable corresponds to a circular arc and registers correspond to colors. +/// +/// # Representation +/// +/// Each variable is assigned a register index from `{0, ..., n-1}` where n is +/// the number of variables. The configuration `config[i]` gives the register +/// assigned to variable i. +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::misc::MinimumRegisterSufficiencyForLoops; +/// use problemreductions::{Problem, Solver, BruteForce, Min}; +/// +/// // 3 variables on a loop of length 6, all pairs conflict +/// let problem = MinimumRegisterSufficiencyForLoops::new( +/// 6, +/// vec![(0, 3), (2, 3), (4, 3)], +/// ); +/// let solver = BruteForce::new(); +/// let solution = solver.find_witness(&problem); +/// assert!(solution.is_some()); +/// let val = problem.evaluate(&solution.unwrap()); +/// assert_eq!(val, Min(Some(3))); +/// ``` +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct MinimumRegisterSufficiencyForLoops { + /// Loop length N (number of timesteps in the circular loop). + loop_length: usize, + /// Variables as (start_time, duration) pairs representing circular arcs. + variables: Vec<(usize, usize)>, +} + +impl MinimumRegisterSufficiencyForLoops { + /// Create a new Minimum Register Sufficiency for Loops instance. + /// + /// # Panics + /// + /// Panics if `loop_length` is zero, if any duration is zero or exceeds + /// `loop_length`, or if any `start_time >= loop_length`. + pub fn new(loop_length: usize, variables: Vec<(usize, usize)>) -> Self { + assert!(loop_length > 0, "loop_length must be positive"); + for (i, &(start, dur)) in variables.iter().enumerate() { + assert!( + start < loop_length, + "Variable {} start_time {} >= loop_length {}", + i, + start, + loop_length + ); + assert!( + dur > 0 && dur <= loop_length, + "Variable {} duration {} must be in [1, {}]", + i, + dur, + loop_length + ); + } + Self { + loop_length, + variables, + } + } + + /// Get the loop length N. + pub fn loop_length(&self) -> usize { + self.loop_length + } + + /// Get the number of variables. + pub fn num_variables(&self) -> usize { + self.variables.len() + } + + /// Get the variables as (start_time, duration) pairs. + pub fn variables(&self) -> &[(usize, usize)] { + &self.variables + } + + /// Check if two circular arcs overlap. + /// + /// Arc [s, s+l) mod N covers timesteps {s, s+1, ..., s+l-1} mod N. + /// Two arcs overlap iff their covered timestep sets intersect. + fn arcs_overlap(s1: usize, l1: usize, s2: usize, l2: usize, n: usize) -> bool { + // Use the modular distance check: + // Timestep t is in arc [s, s+l) mod N iff (t - s) mod N < l + // Two arcs are disjoint iff arc2 fits entirely in the gap of arc1 + // or arc1 fits entirely in the gap of arc2. + // Gap of arc [s, s+l) is [(s+l) mod N, s) with length N-l. + + // If either arc covers the entire loop, they always overlap (if both non-empty) + if l1 == n || l2 == n { + return true; + } + + // Check if arc2 is entirely in the gap of arc1. + // Gap of arc1 starts at (s1+l1) % n and has length n-l1. + // Arc2 fits in this gap if the "gap distance" of s2 from gap_start + // plus l2 <= n - l1. + let gap1_start = (s1 + l1) % n; + let dist_s2_in_gap1 = (s2 + n - gap1_start) % n; + if dist_s2_in_gap1 + l2 <= n - l1 { + return false; + } + + // Check if arc1 is entirely in the gap of arc2. + let gap2_start = (s2 + l2) % n; + let dist_s1_in_gap2 = (s1 + n - gap2_start) % n; + if dist_s1_in_gap2 + l1 <= n - l2 { + return false; + } + + true + } +} + +impl Problem for MinimumRegisterSufficiencyForLoops { + const NAME: &'static str = "MinimumRegisterSufficiencyForLoops"; + type Value = Min; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![] + } + + fn dims(&self) -> Vec { + let n = self.variables.len(); + vec![n; n] + } + + fn evaluate(&self, config: &[usize]) -> Min { + let n = self.variables.len(); + if config.len() != n { + return Min(None); + } + // Check all register indices are in valid range + if config.iter().any(|&r| r >= n) { + return Min(None); + } + + // Check for conflicts: no two overlapping variables share a register + for i in 0..n { + for j in (i + 1)..n { + if config[i] == config[j] { + let (s1, l1) = self.variables[i]; + let (s2, l2) = self.variables[j]; + if Self::arcs_overlap(s1, l1, s2, l2, self.loop_length) { + return Min(None); + } + } + } + } + + // Count distinct registers used + let mut used = vec![false; n]; + for &r in config { + used[r] = true; + } + let count = used.iter().filter(|&&u| u).count(); + Min(Some(count)) + } +} + +crate::declare_variants! { + default MinimumRegisterSufficiencyForLoops => "num_variables ^ num_variables", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + vec![crate::example_db::specs::ModelExampleSpec { + id: "minimum_register_sufficiency_for_loops", + // 3 variables on a loop of length 6, all pairs conflict (K3) + // Optimal: 3 registers (chromatic number of K3) + instance: Box::new(MinimumRegisterSufficiencyForLoops::new( + 6, + vec![(0, 3), (2, 3), (4, 3)], + )), + optimal_config: vec![0, 1, 2], + optimal_value: serde_json::json!(3), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/misc/minimum_register_sufficiency_for_loops.rs"] +mod tests; diff --git a/src/models/misc/minimum_weight_and_or_graph.rs b/src/models/misc/minimum_weight_and_or_graph.rs new file mode 100644 index 00000000..dc497a6f --- /dev/null +++ b/src/models/misc/minimum_weight_and_or_graph.rs @@ -0,0 +1,340 @@ +//! Minimum Weight AND/OR Graph problem implementation. +//! +//! Given a directed acyclic graph with AND/OR gates, find the minimum-weight +//! solution subgraph from a designated source vertex. + +use crate::registry::{FieldInfo, ProblemSchemaEntry}; +use crate::traits::Problem; +use crate::types::Min; +use serde::{Deserialize, Deserializer, Serialize}; + +inventory::submit! { + ProblemSchemaEntry { + name: "MinimumWeightAndOrGraph", + display_name: "Minimum Weight AND/OR Graph", + aliases: &[], + dimensions: &[], + module_path: module_path!(), + description: "Find the minimum-weight solution subgraph from a source in a DAG with AND/OR gates", + fields: &[ + FieldInfo { name: "num_vertices", type_name: "usize", description: "Number of vertices in the DAG" }, + FieldInfo { name: "arcs", type_name: "Vec<(usize, usize)>", description: "Directed arcs (u, v)" }, + FieldInfo { name: "source", type_name: "usize", description: "Source vertex index" }, + FieldInfo { name: "gate_types", type_name: "Vec>", description: "Gate type per vertex: Some(true)=AND, Some(false)=OR, None=leaf" }, + FieldInfo { name: "arc_weights", type_name: "Vec", description: "Weight of each arc" }, + ], + } +} + +/// The Minimum Weight AND/OR Graph problem. +/// +/// Given a directed acyclic graph G = (V, A) where each non-leaf vertex is +/// either an AND gate or an OR gate, a source vertex s, and arc weights +/// w: A -> Z, find a solution subgraph of minimum total arc weight. +/// +/// A solution subgraph is a subset of arcs S such that: +/// - The source vertex is "solved" +/// - For each solved AND-gate vertex v: all outgoing arcs from v are in S +/// - For each solved OR-gate vertex v: at least one outgoing arc from v is in S +/// - For each arc (u,v) in S: the target vertex v is also solved (recursively) +/// - Leaf vertices are trivially solved (no outgoing arcs needed) +/// +/// The configuration space is binary over arcs: each arc is either selected (1) +/// or not (0). +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::misc::MinimumWeightAndOrGraph; +/// use problemreductions::{Problem, Solver, BruteForce}; +/// +/// // 7 vertices: AND at 0, OR at 1 and 2, leaves 3-6 +/// let problem = MinimumWeightAndOrGraph::new( +/// 7, +/// vec![(0,1), (0,2), (1,3), (1,4), (2,5), (2,6)], +/// 0, +/// vec![Some(true), Some(false), Some(false), None, None, None, None], +/// vec![1, 2, 3, 1, 4, 2], +/// ); +/// let solver = BruteForce::new(); +/// use problemreductions::solvers::Solver as _; +/// let optimal = solver.solve(&problem); +/// assert_eq!(optimal, problemreductions::types::Min(Some(6))); +/// ``` +#[derive(Debug, Clone, Serialize)] +pub struct MinimumWeightAndOrGraph { + /// Number of vertices. + num_vertices: usize, + /// Directed arcs (u, v). + arcs: Vec<(usize, usize)>, + /// Source vertex index. + source: usize, + /// Gate type per vertex: Some(true)=AND, Some(false)=OR, None=leaf. + gate_types: Vec>, + /// Weight of each arc. + arc_weights: Vec, + /// Precomputed: outgoing arcs for each vertex (arc indices). + #[serde(skip)] + outgoing: Vec>, +} + +#[derive(Deserialize)] +struct MinimumWeightAndOrGraphData { + num_vertices: usize, + arcs: Vec<(usize, usize)>, + source: usize, + gate_types: Vec>, + arc_weights: Vec, +} + +impl<'de> Deserialize<'de> for MinimumWeightAndOrGraph { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let data = MinimumWeightAndOrGraphData::deserialize(deserializer)?; + let outgoing = Self::build_outgoing(data.num_vertices, &data.arcs); + Ok(Self { + num_vertices: data.num_vertices, + arcs: data.arcs, + source: data.source, + gate_types: data.gate_types, + arc_weights: data.arc_weights, + outgoing, + }) + } +} + +impl MinimumWeightAndOrGraph { + /// Create a new Minimum Weight AND/OR Graph instance. + /// + /// # Panics + /// + /// Panics if any arc index is out of bounds, if the source is out of bounds, + /// if gate_types length does not match num_vertices, if arc_weights length + /// does not match the number of arcs, or if the source is a leaf. + pub fn new( + num_vertices: usize, + arcs: Vec<(usize, usize)>, + source: usize, + gate_types: Vec>, + arc_weights: Vec, + ) -> Self { + assert!( + source < num_vertices, + "Source vertex {} out of bounds for {} vertices", + source, + num_vertices + ); + assert_eq!( + gate_types.len(), + num_vertices, + "gate_types length {} does not match num_vertices {}", + gate_types.len(), + num_vertices + ); + assert_eq!( + arc_weights.len(), + arcs.len(), + "arc_weights length {} does not match number of arcs {}", + arc_weights.len(), + arcs.len() + ); + for (i, &(u, v)) in arcs.iter().enumerate() { + assert!( + u < num_vertices && v < num_vertices, + "Arc {} ({}, {}) out of bounds for {} vertices", + i, + u, + v, + num_vertices + ); + } + assert!( + gate_types[source].is_some(), + "Source vertex must be an AND or OR gate, not a leaf" + ); + let outgoing = Self::build_outgoing(num_vertices, &arcs); + Self { + num_vertices, + arcs, + source, + gate_types, + arc_weights, + outgoing, + } + } + + /// Build outgoing arc index lists for each vertex. + fn build_outgoing(num_vertices: usize, arcs: &[(usize, usize)]) -> Vec> { + let mut outgoing = vec![vec![]; num_vertices]; + for (i, &(u, _v)) in arcs.iter().enumerate() { + outgoing[u].push(i); + } + outgoing + } + + /// Get the number of vertices. + pub fn num_vertices(&self) -> usize { + self.num_vertices + } + + /// Get the number of arcs. + pub fn num_arcs(&self) -> usize { + self.arcs.len() + } + + /// Get the arcs. + pub fn arcs(&self) -> &[(usize, usize)] { + &self.arcs + } + + /// Get the source vertex. + pub fn source(&self) -> usize { + self.source + } + + /// Get the gate types. + pub fn gate_types(&self) -> &[Option] { + &self.gate_types + } + + /// Get the arc weights. + pub fn arc_weights(&self) -> &[i32] { + &self.arc_weights + } +} + +impl Problem for MinimumWeightAndOrGraph { + const NAME: &'static str = "MinimumWeightAndOrGraph"; + type Value = Min; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![] + } + + fn dims(&self) -> Vec { + vec![2; self.arcs.len()] + } + + fn evaluate(&self, config: &[usize]) -> Min { + if config.len() != self.arcs.len() { + return Min(None); + } + + // Check all config values are 0 or 1 + if config.iter().any(|&c| c > 1) { + return Min(None); + } + + // Determine which arcs are selected + let selected: Vec = config.iter().map(|&c| c == 1).collect(); + + // Propagate "solved" status top-down from source + let mut solved = vec![false; self.num_vertices]; + let mut stack = vec![self.source]; + solved[self.source] = true; + + while let Some(v) = stack.pop() { + match self.gate_types[v] { + None => { + // Leaf vertex: trivially solved, no outgoing arcs needed + } + Some(is_and) => { + let out_arcs = &self.outgoing[v]; + let selected_out: Vec = out_arcs + .iter() + .copied() + .filter(|&ai| selected[ai]) + .collect(); + + if is_and { + // AND gate: all outgoing arcs must be selected + if selected_out.len() != out_arcs.len() { + return Min(None); + } + } else { + // OR gate: at least one outgoing arc must be selected + if selected_out.is_empty() { + return Min(None); + } + } + + // Mark children of selected arcs as solved + for &ai in &selected_out { + let (_u, child) = self.arcs[ai]; + if !solved[child] { + solved[child] = true; + stack.push(child); + } + } + } + } + } + + // Check no selected arcs come from non-solved vertices (no dangling arcs) + for (ai, &sel) in selected.iter().enumerate() { + if sel { + let (u, _v) = self.arcs[ai]; + if !solved[u] { + return Min(None); + } + } + } + + // Compute total weight of selected arcs + let total_weight: i32 = selected + .iter() + .enumerate() + .filter(|(_, &sel)| sel) + .map(|(i, _)| self.arc_weights[i]) + .sum(); + + Min(Some(total_weight)) + } +} + +crate::declare_variants! { + default MinimumWeightAndOrGraph => "2^num_arcs", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + // 7 vertices: source=0 (AND), v1 (OR), v2 (OR), v3-v6 (leaves) + // Arcs: (0,1,1), (0,2,2), (1,3,3), (1,4,1), (2,5,4), (2,6,2) + // Optimal: AND at 0 requires both arcs to 1 and 2 (cost 1+2=3). + // OR at 1: pick arc to 4 (cost 1). OR at 2: pick arc to 6 (cost 2). + // Total = 1+2+1+2 = 6... but actually we should check: is there a cheaper? + // arc0(0->1,w=1), arc1(0->2,w=2), arc3(1->4,w=1), arc5(2->6,w=2) => 1+2+1+2=6 + // arc0(0->1,w=1), arc1(0->2,w=2), arc2(1->3,w=3), arc5(2->6,w=2) => 1+2+3+2=8 + // arc0(0->1,w=1), arc1(0->2,w=2), arc3(1->4,w=1), arc4(2->5,w=4) => 1+2+1+4=8 + // So optimal is config [1,1,0,1,0,1] with value 6... but wait, let me also check + // if val=5 is achievable: 1+2+1+1=5 impossible because OR at 2 must pick at least one. + // Actually optimal = 1(arc0) + 2(arc1) + 1(arc3) + 2(arc5) = 6 + // Hmm, let me reconsider: is there a solution with value 5? + // Source is AND, so both arcs 0 and 1 must be selected (cost 1+2=3). + // Then OR at 1: cheapest outgoing arc is arc3 (w=1), OR at 2: cheapest is arc5 (w=2). + // Total = 3+1+2 = 6. Can't do better since source AND forces both. + // Wait — check: what if we change arc weights. The issue says value 5 might be optimal. + // Let me re-read: issue example says Config [1,1,0,1,0,1] -> weight 1+2+1+2 = 6 -> Min(6). + // So 6 is the correct optimal. But let me verify: is there any config with value < 6? + // No — source is AND so arcs 0,1 are forced (cost 3), then OR nodes each need at least one. + // Min at OR-1 is 1 (arc3), min at OR-2 is 2 (arc5). Total = 3+1+2 = 6. + // Optimal config: [1,1,0,1,0,1] + vec![crate::example_db::specs::ModelExampleSpec { + id: "minimum_weight_and_or_graph", + instance: Box::new(MinimumWeightAndOrGraph::new( + 7, + vec![(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)], + 0, + vec![Some(true), Some(false), Some(false), None, None, None, None], + vec![1, 2, 3, 1, 4, 2], + )), + optimal_config: vec![1, 1, 0, 1, 0, 1], + optimal_value: serde_json::json!(6), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/misc/minimum_weight_and_or_graph.rs"] +mod tests; diff --git a/src/models/misc/mod.rs b/src/models/misc/mod.rs index 0ebc9365..ca2c7c7a 100644 --- a/src/models/misc/mod.rs +++ b/src/models/misc/mod.rs @@ -4,6 +4,7 @@ //! - [`AdditionalKey`]: Determine whether a relational schema has an additional candidate key //! - [`Betweenness`]: Find a linear ordering satisfying betweenness constraints on triples //! - [`BinPacking`]: Bin Packing (minimize bins) +//! - [`Clustering`]: Partition elements into bounded-diameter clusters //! - [`CyclicOrdering`]: Find a permutation satisfying cyclic ordering constraints on triples //! - [`BoyceCoddNormalFormViolation`]: Boyce-Codd Normal Form Violation (BCNF) //! - [`ConsistencyOfDatabaseFrequencyTables`]: Pairwise frequency-table consistency @@ -20,11 +21,21 @@ //! - [`Knapsack`]: 0-1 Knapsack (maximize value subject to weight capacity) //! - [`MultiprocessorScheduling`]: Schedule tasks on processors to meet a deadline //! - [`Numerical3DimensionalMatching`]: Partition W∪X∪Y into m triples each summing to B +//! - [`NumericalMatchingWithTargetSums`]: Partition X∪Y into m pairs with pair sums matching targets //! - [`OpenShopScheduling`]: Open Shop Scheduling (minimize makespan, free task order per job) //! - [`LongestCommonSubsequence`]: Longest Common Subsequence +//! - [`MaximumLikelihoodRanking`]: Find a ranking minimizing total pairwise disagreement +//! - [`MinimumAxiomSet`]: Find smallest axiom subset whose deductive closure covers all true sentences +//! - [`MinimumCodeGenerationOneRegister`]: Minimize instruction count for a one-register machine +//! - [`MinimumCodeGenerationParallelAssignments`]: Minimize backward dependencies when ordering parallel assignments +//! - [`MinimumCodeGenerationUnlimitedRegisters`]: Minimize instruction count for an unlimited-register machine with 2-address instructions //! - [`MinimumExternalMacroDataCompression`]: Minimize compression cost using external dictionary +//! - [`MinimumFaultDetectionTestSet`]: Find minimum set of input-output paths covering all DAG vertices //! - [`MinimumInternalMacroDataCompression`]: Minimize self-referencing compression cost +//! - [`MinimumRegisterSufficiencyForLoops`]: Minimize registers for loop variable allocation (circular arc coloring) +//! - [`MinimumWeightAndOrGraph`]: Find minimum-weight solution subgraph in a DAG with AND/OR gates //! - [`MinimumTardinessSequencing`]: Minimize tardy tasks in single-machine scheduling +//! - [`OptimumCommunicationSpanningTree`]: Find spanning tree minimizing total weighted communication cost //! - [`PaintShop`]: Minimize color switches in paint shop scheduling //! - [`CosineProductIntegration`]: Balanced sign assignment for integer frequencies //! - [`NonLivenessFreePetriNet`]: Determine whether a free-choice Petri net is not live @@ -46,6 +57,7 @@ //! - [`SequencingWithReleaseTimesAndDeadlines`]: Single-machine scheduling feasibility //! - [`SequencingWithinIntervals`]: Schedule tasks within time windows //! - [`ShortestCommonSupersequence`]: Find a common supersequence of bounded length +//! - [`SquareTiling`]: Place colored square tiles on an N x N grid with matching edge colors //! - [`TimetableDesign`]: Schedule craftsmen on tasks across work periods //! - [`StringToStringCorrection`]: String-to-String Correction (derive target via deletions and swaps) //! - [`SubsetProduct`]: Find a subset whose product equals exactly a target value @@ -102,6 +114,7 @@ pub(crate) fn lehmer_dims(n: usize) -> Vec { mod bin_packing; mod boyce_codd_normal_form_violation; mod capacity_assignment; +pub(crate) mod clustering; pub(crate) mod conjunctive_boolean_query; pub(crate) mod conjunctive_query_foldability; mod consistency_of_database_frequency_tables; @@ -118,13 +131,25 @@ mod job_shop_scheduling; mod knapsack; mod kth_largest_m_tuple; mod longest_common_subsequence; +pub(crate) mod maximum_likelihood_ranking; +mod minimum_axiom_set; +mod minimum_code_generation_one_register; +pub(crate) mod minimum_code_generation_parallel_assignments; +mod minimum_code_generation_unlimited_registers; +pub(crate) mod minimum_decision_tree; +pub(crate) mod minimum_disjunctive_normal_form; mod minimum_external_macro_data_compression; +mod minimum_fault_detection_test_set; mod minimum_internal_macro_data_compression; +mod minimum_register_sufficiency_for_loops; mod minimum_tardiness_sequencing; +mod minimum_weight_and_or_graph; mod multiprocessor_scheduling; mod non_liveness_free_petri_net; mod numerical_3_dimensional_matching; +mod numerical_matching_with_target_sums; mod open_shop_scheduling; +pub(crate) mod optimum_communication_spanning_tree; pub(crate) mod paintshop; pub(crate) mod partially_ordered_knapsack; pub(crate) mod partition; @@ -144,6 +169,7 @@ mod sequencing_with_deadlines_and_set_up_times; mod sequencing_with_release_times_and_deadlines; mod sequencing_within_intervals; pub(crate) mod shortest_common_supersequence; +mod square_tiling; mod stacker_crane; mod staff_scheduling; pub(crate) mod string_to_string_correction; @@ -158,6 +184,7 @@ pub use betweenness::Betweenness; pub use bin_packing::BinPacking; pub use boyce_codd_normal_form_violation::BoyceCoddNormalFormViolation; pub use capacity_assignment::CapacityAssignment; +pub use clustering::Clustering; pub use conjunctive_boolean_query::{ConjunctiveBooleanQuery, QueryArg, Relation as CbqRelation}; pub use conjunctive_query_foldability::{ConjunctiveQueryFoldability, Term}; pub use consistency_of_database_frequency_tables::{ @@ -177,13 +204,25 @@ pub use job_shop_scheduling::JobShopScheduling; pub use knapsack::Knapsack; pub use kth_largest_m_tuple::KthLargestMTuple; pub use longest_common_subsequence::LongestCommonSubsequence; +pub use maximum_likelihood_ranking::MaximumLikelihoodRanking; +pub use minimum_axiom_set::MinimumAxiomSet; +pub use minimum_code_generation_one_register::MinimumCodeGenerationOneRegister; +pub use minimum_code_generation_parallel_assignments::MinimumCodeGenerationParallelAssignments; +pub use minimum_code_generation_unlimited_registers::MinimumCodeGenerationUnlimitedRegisters; +pub use minimum_decision_tree::MinimumDecisionTree; +pub use minimum_disjunctive_normal_form::MinimumDisjunctiveNormalForm; pub use minimum_external_macro_data_compression::MinimumExternalMacroDataCompression; +pub use minimum_fault_detection_test_set::MinimumFaultDetectionTestSet; pub use minimum_internal_macro_data_compression::MinimumInternalMacroDataCompression; +pub use minimum_register_sufficiency_for_loops::MinimumRegisterSufficiencyForLoops; pub use minimum_tardiness_sequencing::MinimumTardinessSequencing; +pub use minimum_weight_and_or_graph::MinimumWeightAndOrGraph; pub use multiprocessor_scheduling::MultiprocessorScheduling; pub use non_liveness_free_petri_net::NonLivenessFreePetriNet; pub use numerical_3_dimensional_matching::Numerical3DimensionalMatching; +pub use numerical_matching_with_target_sums::NumericalMatchingWithTargetSums; pub use open_shop_scheduling::OpenShopScheduling; +pub use optimum_communication_spanning_tree::OptimumCommunicationSpanningTree; pub use paintshop::PaintShop; pub use partially_ordered_knapsack::PartiallyOrderedKnapsack; pub use partition::Partition; @@ -203,6 +242,7 @@ pub use sequencing_with_deadlines_and_set_up_times::SequencingWithDeadlinesAndSe pub use sequencing_with_release_times_and_deadlines::SequencingWithReleaseTimesAndDeadlines; pub use sequencing_within_intervals::SequencingWithinIntervals; pub use shortest_common_supersequence::ShortestCommonSupersequence; +pub use square_tiling::SquareTiling; pub use stacker_crane::StackerCrane; pub use staff_scheduling::StaffScheduling; pub use string_to_string_correction::StringToStringCorrection; @@ -259,11 +299,18 @@ pub(crate) fn canonical_model_example_specs() -> Vec Vec", description: "Integer sizes for each element of X" }, + FieldInfo { name: "sizes_y", type_name: "Vec", description: "Integer sizes for each element of Y" }, + FieldInfo { name: "targets", type_name: "Vec", description: "Target sums for each pair" }, + ], + } +} + +inventory::submit! { + ProblemSizeFieldEntry { + name: "NumericalMatchingWithTargetSums", + fields: &["num_pairs"], + } +} + +#[derive(Debug, Clone, Serialize)] +pub struct NumericalMatchingWithTargetSums { + sizes_x: Vec, + sizes_y: Vec, + targets: Vec, +} + +impl NumericalMatchingWithTargetSums { + fn validate_inputs(sizes_x: &[i64], sizes_y: &[i64], targets: &[i64]) -> Result<(), String> { + let m = sizes_x.len(); + if m == 0 { + return Err( + "NumericalMatchingWithTargetSums requires at least one element per set".to_string(), + ); + } + if sizes_y.len() != m { + return Err( + "NumericalMatchingWithTargetSums requires sizes_x and sizes_y to have the same length" + .to_string(), + ); + } + if targets.len() != m { + return Err( + "NumericalMatchingWithTargetSums requires targets to have the same length as sizes_x" + .to_string(), + ); + } + Ok(()) + } + + pub fn try_new( + sizes_x: Vec, + sizes_y: Vec, + targets: Vec, + ) -> Result { + Self::validate_inputs(&sizes_x, &sizes_y, &targets)?; + Ok(Self { + sizes_x, + sizes_y, + targets, + }) + } + + /// Create a new Numerical Matching with Target Sums instance. + /// + /// # Panics + /// + /// Panics if the input violates the NMTS invariants. + pub fn new(sizes_x: Vec, sizes_y: Vec, targets: Vec) -> Self { + Self::try_new(sizes_x, sizes_y, targets).unwrap_or_else(|message| panic!("{message}")) + } + + /// Number of pairs (m). + pub fn num_pairs(&self) -> usize { + self.sizes_x.len() + } + + /// Integer sizes for each element of X. + pub fn sizes_x(&self) -> &[i64] { + &self.sizes_x + } + + /// Integer sizes for each element of Y. + pub fn sizes_y(&self) -> &[i64] { + &self.sizes_y + } + + /// Target sums for each pair. + pub fn targets(&self) -> &[i64] { + &self.targets + } +} + +#[derive(Deserialize)] +struct NumericalMatchingWithTargetSumsData { + sizes_x: Vec, + sizes_y: Vec, + targets: Vec, +} + +impl<'de> Deserialize<'de> for NumericalMatchingWithTargetSums { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let data = NumericalMatchingWithTargetSumsData::deserialize(deserializer)?; + Self::try_new(data.sizes_x, data.sizes_y, data.targets).map_err(D::Error::custom) + } +} + +impl Problem for NumericalMatchingWithTargetSums { + const NAME: &'static str = "NumericalMatchingWithTargetSums"; + type Value = Or; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![] + } + + fn dims(&self) -> Vec { + let m = self.num_pairs(); + vec![m; m] + } + + fn evaluate(&self, config: &[usize]) -> Or { + Or({ + let m = self.num_pairs(); + if config.len() != m { + return Or(false); + } + + // Check config is valid permutation of 0..m + let mut used = vec![false; m]; + for &idx in config { + if idx >= m || used[idx] { + return Or(false); + } + used[idx] = true; + } + + // Compute pair sums and compare multisets + let mut pair_sums: Vec = (0..m) + .map(|i| self.sizes_x[i] + self.sizes_y[config[i]]) + .collect(); + let mut sorted_targets = self.targets.clone(); + pair_sums.sort(); + sorted_targets.sort(); + pair_sums == sorted_targets + }) + } +} + +crate::declare_variants! { + default NumericalMatchingWithTargetSums => "2^num_pairs", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + vec![crate::example_db::specs::ModelExampleSpec { + id: "numerical_matching_with_target_sums", + instance: Box::new(NumericalMatchingWithTargetSums::new( + vec![1, 4, 7], + vec![2, 5, 3], + vec![3, 7, 12], + )), + optimal_config: vec![0, 2, 1], + optimal_value: serde_json::json!(true), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/misc/numerical_matching_with_target_sums.rs"] +mod tests; diff --git a/src/models/misc/optimum_communication_spanning_tree.rs b/src/models/misc/optimum_communication_spanning_tree.rs new file mode 100644 index 00000000..6f2729b9 --- /dev/null +++ b/src/models/misc/optimum_communication_spanning_tree.rs @@ -0,0 +1,354 @@ +//! Optimum Communication Spanning Tree problem implementation. +//! +//! Given a complete graph K_n with edge weights w(e) and communication +//! requirements r(u,v) for each vertex pair, find a spanning tree T that +//! minimizes the total communication cost: sum_{u>", description: "Symmetric weight matrix w(i,j)" }, + FieldInfo { name: "requirements", type_name: "Vec>", description: "Symmetric requirement matrix r(i,j)" }, + ], + } +} + +/// The Optimum Communication Spanning Tree problem. +/// +/// Given a complete graph K_n with edge weights w(e) >= 0 and communication +/// requirements r(u,v) >= 0 for each vertex pair, find a spanning tree T +/// minimizing the total communication cost: +/// +/// sum_{u < v} r(u,v) * W_T(u,v) +/// +/// where W_T(u,v) is the weight of the unique path between u and v in T. +/// +/// # Representation +/// +/// Each edge of K_n is assigned a binary variable (0 = not in tree, 1 = in tree). +/// Edges are ordered lexicographically: (0,1), (0,2), ..., (0,n-1), (1,2), ..., (n-2,n-1). +/// A valid spanning tree has exactly n-1 selected edges forming a connected subgraph. +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::misc::OptimumCommunicationSpanningTree; +/// use problemreductions::{Problem, Solver, BruteForce}; +/// +/// let problem = OptimumCommunicationSpanningTree::new( +/// vec![ +/// vec![0, 1, 2], +/// vec![1, 0, 3], +/// vec![2, 3, 0], +/// ], +/// vec![ +/// vec![0, 1, 1], +/// vec![1, 0, 1], +/// vec![1, 1, 0], +/// ], +/// ); +/// let solver = BruteForce::new(); +/// let solution = solver.find_witness(&problem); +/// assert!(solution.is_some()); +/// ``` +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct OptimumCommunicationSpanningTree { + num_vertices: usize, + edge_weights: Vec>, + requirements: Vec>, +} + +impl OptimumCommunicationSpanningTree { + /// Create a new OptimumCommunicationSpanningTree instance. + /// + /// # Arguments + /// + /// * `edge_weights` - Symmetric n x n matrix with w(i,i) = 0 and w(i,j) >= 0. + /// * `requirements` - Symmetric n x n matrix with r(i,i) = 0 and r(i,j) >= 0. + /// + /// # Panics + /// + /// Panics if the matrices are not square, not the same size, have nonzero + /// diagonals, are not symmetric, or contain negative entries. + pub fn new(edge_weights: Vec>, requirements: Vec>) -> Self { + let n = edge_weights.len(); + assert!(n >= 2, "must have at least 2 vertices"); + assert_eq!( + requirements.len(), + n, + "requirements matrix must have same size as edge_weights" + ); + + for (i, row) in edge_weights.iter().enumerate() { + assert_eq!( + row.len(), + n, + "edge_weights must be square: row {i} has length {} but expected {n}", + row.len() + ); + assert_eq!( + row[i], 0, + "diagonal of edge_weights must be zero: edge_weights[{i}][{i}] = {}", + row[i] + ); + } + + for (i, row) in requirements.iter().enumerate() { + assert_eq!( + row.len(), + n, + "requirements must be square: row {i} has length {} but expected {n}", + row.len() + ); + assert_eq!( + row[i], 0, + "diagonal of requirements must be zero: requirements[{i}][{i}] = {}", + row[i] + ); + } + + // Check symmetry and non-negativity + for i in 0..n { + for j in (i + 1)..n { + assert_eq!( + edge_weights[i][j], edge_weights[j][i], + "edge_weights must be symmetric: w[{i}][{j}]={} != w[{j}][{i}]={}", + edge_weights[i][j], edge_weights[j][i] + ); + assert!( + edge_weights[i][j] >= 0, + "edge_weights must be non-negative: w[{i}][{j}]={}", + edge_weights[i][j] + ); + assert_eq!( + requirements[i][j], requirements[j][i], + "requirements must be symmetric: r[{i}][{j}]={} != r[{j}][{i}]={}", + requirements[i][j], requirements[j][i] + ); + assert!( + requirements[i][j] >= 0, + "requirements must be non-negative: r[{i}][{j}]={}", + requirements[i][j] + ); + } + } + + Self { + num_vertices: n, + edge_weights, + requirements, + } + } + + /// Returns the number of vertices. + pub fn num_vertices(&self) -> usize { + self.num_vertices + } + + /// Returns the number of edges in the complete graph K_n. + pub fn num_edges(&self) -> usize { + self.num_vertices * (self.num_vertices - 1) / 2 + } + + /// Returns the edge weight matrix. + pub fn edge_weights(&self) -> &Vec> { + &self.edge_weights + } + + /// Returns the requirements matrix. + pub fn requirements(&self) -> &Vec> { + &self.requirements + } + + /// Returns the list of edges in lexicographic order: (0,1), (0,2), ..., (n-2,n-1). + pub fn edges(&self) -> Vec<(usize, usize)> { + let n = self.num_vertices; + let mut edges = Vec::with_capacity(self.num_edges()); + for i in 0..n { + for j in (i + 1)..n { + edges.push((i, j)); + } + } + edges + } + + /// Map a pair (i, j) with i < j to its edge index. + pub fn edge_index(i: usize, j: usize, n: usize) -> usize { + debug_assert!(i < j && j < n); + i * n - i * (i + 1) / 2 + (j - i - 1) + } +} + +/// Check if a configuration forms a valid spanning tree of K_n. +fn is_valid_spanning_tree(n: usize, edges: &[(usize, usize)], config: &[usize]) -> bool { + if config.len() != edges.len() { + return false; + } + + // Check all values are 0 or 1 + if config.iter().any(|&v| v > 1) { + return false; + } + + // Count selected edges: must be exactly n-1 + let selected_count: usize = config.iter().sum(); + if selected_count != n - 1 { + return false; + } + + // Build adjacency and check connectivity via BFS + let mut adj: Vec> = vec![vec![]; n]; + for (idx, &sel) in config.iter().enumerate() { + if sel == 1 { + let (u, v) = edges[idx]; + adj[u].push(v); + adj[v].push(u); + } + } + + let mut visited = vec![false; n]; + let mut queue = VecDeque::new(); + visited[0] = true; + queue.push_back(0); + while let Some(v) = queue.pop_front() { + for &u in &adj[v] { + if !visited[u] { + visited[u] = true; + queue.push_back(u); + } + } + } + + visited.iter().all(|&v| v) +} + +/// Compute the communication cost of a spanning tree. +/// +/// For each pair (u, v) with u < v, compute W_T(u,v) via BFS in the tree, +/// then accumulate r(u,v) * W_T(u,v). +fn communication_cost( + n: usize, + edges: &[(usize, usize)], + config: &[usize], + edge_weights: &[Vec], + requirements: &[Vec], +) -> i64 { + // Build weighted adjacency list for the tree + let mut adj: Vec> = vec![vec![]; n]; + for (idx, &sel) in config.iter().enumerate() { + if sel == 1 { + let (u, v) = edges[idx]; + let w = edge_weights[u][v]; + adj[u].push((v, w)); + adj[v].push((u, w)); + } + } + + let mut total_cost: i64 = 0; + + // For each source vertex, BFS to find path weights to all other vertices + for src in 0..n { + let mut dist = vec![-1i64; n]; + dist[src] = 0; + let mut queue = VecDeque::new(); + queue.push_back(src); + while let Some(u) = queue.pop_front() { + for &(v, w) in &adj[u] { + if dist[v] < 0 { + dist[v] = dist[u] + w as i64; + queue.push_back(v); + } + } + } + + // Accumulate r(src, dst) * W_T(src, dst) for dst > src + for (dst, &d) in dist.iter().enumerate().skip(src + 1) { + total_cost += requirements[src][dst] as i64 * d; + } + } + + total_cost +} + +impl Problem for OptimumCommunicationSpanningTree { + const NAME: &'static str = "OptimumCommunicationSpanningTree"; + type Value = Min; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![] + } + + fn dims(&self) -> Vec { + vec![2; self.num_edges()] + } + + fn evaluate(&self, config: &[usize]) -> Min { + let edges = self.edges(); + if !is_valid_spanning_tree(self.num_vertices, &edges, config) { + return Min(None); + } + Min(Some(communication_cost( + self.num_vertices, + &edges, + config, + &self.edge_weights, + &self.requirements, + ))) + } +} + +crate::declare_variants! { + default OptimumCommunicationSpanningTree => "2^num_edges", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + // K4 example from issue #906 + // Edge weights: + // w(0,1)=1, w(0,2)=3, w(0,3)=2, w(1,2)=2, w(1,3)=4, w(2,3)=1 + // Requirements: + // r(0,1)=2, r(0,2)=1, r(0,3)=3, r(1,2)=1, r(1,3)=1, r(2,3)=2 + // Optimal tree: {(0,1), (0,3), (2,3)} = edges at indices 0, 2, 5 + // Optimal cost: 20 + let edge_weights = vec![ + vec![0, 1, 3, 2], + vec![1, 0, 2, 4], + vec![3, 2, 0, 1], + vec![2, 4, 1, 0], + ]; + let requirements = vec![ + vec![0, 2, 1, 3], + vec![2, 0, 1, 1], + vec![1, 1, 0, 2], + vec![3, 1, 2, 0], + ]; + // Edges in lex order: (0,1)=idx0, (0,2)=idx1, (0,3)=idx2, (1,2)=idx3, (1,3)=idx4, (2,3)=idx5 + // Optimal tree: {(0,1), (0,3), (2,3)} -> config = [1, 0, 1, 0, 0, 1] + vec![crate::example_db::specs::ModelExampleSpec { + id: "optimum_communication_spanning_tree", + instance: Box::new(OptimumCommunicationSpanningTree::new( + edge_weights, + requirements, + )), + optimal_config: vec![1, 0, 1, 0, 0, 1], + optimal_value: serde_json::json!(20), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/misc/optimum_communication_spanning_tree.rs"] +mod tests; diff --git a/src/models/misc/square_tiling.rs b/src/models/misc/square_tiling.rs new file mode 100644 index 00000000..fe27d4a3 --- /dev/null +++ b/src/models/misc/square_tiling.rs @@ -0,0 +1,212 @@ +//! Square Tiling (Wang Tiling) problem implementation. +//! +//! Given a set C of colors, a collection T of tiles (each with four colored edges: +//! top, right, bottom, left), and a positive integer N, determine whether there +//! exists a tiling of an N x N grid using tiles from T such that adjacent tiles +//! have matching edge colors. Tiles may be reused but not rotated or reflected. + +use crate::registry::{FieldInfo, ProblemSchemaEntry, ProblemSizeFieldEntry}; +use crate::traits::Problem; +use crate::types::Or; +use serde::de::Error as _; +use serde::{Deserialize, Deserializer, Serialize}; + +inventory::submit! { + ProblemSchemaEntry { + name: "SquareTiling", + display_name: "Square Tiling", + aliases: &["WangTiling"], + dimensions: &[], + module_path: module_path!(), + description: "Place colored square tiles on an N x N grid with matching edge colors", + fields: &[ + FieldInfo { name: "num_colors", type_name: "usize", description: "Number of colors" }, + FieldInfo { name: "tiles", type_name: "Vec<(usize, usize, usize, usize)>", description: "Collection of tile types (top, right, bottom, left)" }, + FieldInfo { name: "grid_size", type_name: "usize", description: "Grid dimension N for N x N tiling" }, + ], + } +} + +inventory::submit! { + ProblemSizeFieldEntry { + name: "SquareTiling", + fields: &["num_colors", "num_tiles", "grid_size"], + } +} + +/// A tile with four colored edges: (top, right, bottom, left). +/// Each color is an index in `0..num_colors`. +pub type Tile = (usize, usize, usize, usize); + +#[derive(Debug, Clone, Serialize)] +pub struct SquareTiling { + num_colors: usize, + tiles: Vec, + grid_size: usize, +} + +impl SquareTiling { + fn validate_inputs(num_colors: usize, tiles: &[Tile], grid_size: usize) -> Result<(), String> { + if num_colors == 0 { + return Err("SquareTiling requires at least one color".to_string()); + } + if tiles.is_empty() { + return Err("SquareTiling requires at least one tile".to_string()); + } + if grid_size == 0 { + return Err("SquareTiling requires grid_size >= 1".to_string()); + } + for (i, &(top, right, bottom, left)) in tiles.iter().enumerate() { + if top >= num_colors + || right >= num_colors + || bottom >= num_colors + || left >= num_colors + { + return Err(format!( + "Tile {} has color(s) out of range 0..{}", + i, num_colors + )); + } + } + Ok(()) + } + + /// Create a new `SquareTiling` instance, returning an error if inputs are invalid. + pub fn try_new(num_colors: usize, tiles: Vec, grid_size: usize) -> Result { + Self::validate_inputs(num_colors, &tiles, grid_size)?; + Ok(Self { + num_colors, + tiles, + grid_size, + }) + } + + /// Create a new `SquareTiling` instance. + /// + /// # Panics + /// + /// Panics if `num_colors` is 0, `tiles` is empty, `grid_size` is 0, + /// or any tile color is out of range. + pub fn new(num_colors: usize, tiles: Vec, grid_size: usize) -> Self { + Self::try_new(num_colors, tiles, grid_size).unwrap_or_else(|message| panic!("{message}")) + } + + /// Number of colors. + pub fn num_colors(&self) -> usize { + self.num_colors + } + + /// Number of tile types. + pub fn num_tiles(&self) -> usize { + self.tiles.len() + } + + /// Grid dimension N (for N x N grid). + pub fn grid_size(&self) -> usize { + self.grid_size + } + + /// The collection of tile types. + pub fn tiles(&self) -> &[Tile] { + &self.tiles + } + + /// Check whether a configuration represents a valid tiling. + /// + /// The configuration is a flat array of tile indices of length `grid_size^2`, + /// laid out in row-major order: position `i * grid_size + j` corresponds + /// to grid cell `(i, j)` (row i, column j). + fn is_valid_tiling(&self, config: &[usize]) -> bool { + let n = self.grid_size; + if config.len() != n * n { + return false; + } + + // Check all tile indices are valid + for &tile_idx in config { + if tile_idx >= self.tiles.len() { + return false; + } + } + + // Check horizontal adjacency: right of (i,j) must match left of (i,j+1) + for i in 0..n { + for j in 0..n - 1 { + let left_tile = self.tiles[config[i * n + j]]; + let right_tile = self.tiles[config[i * n + j + 1]]; + if left_tile.1 != right_tile.3 { + return false; + } + } + } + + // Check vertical adjacency: bottom of (i,j) must match top of (i+1,j) + for i in 0..n - 1 { + for j in 0..n { + let upper_tile = self.tiles[config[i * n + j]]; + let lower_tile = self.tiles[config[(i + 1) * n + j]]; + if upper_tile.2 != lower_tile.0 { + return false; + } + } + } + + true + } +} + +#[derive(Deserialize)] +struct SquareTilingData { + num_colors: usize, + tiles: Vec, + grid_size: usize, +} + +impl<'de> Deserialize<'de> for SquareTiling { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let data = SquareTilingData::deserialize(deserializer)?; + Self::try_new(data.num_colors, data.tiles, data.grid_size).map_err(D::Error::custom) + } +} + +impl Problem for SquareTiling { + const NAME: &'static str = "SquareTiling"; + type Value = Or; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![] + } + + fn dims(&self) -> Vec { + vec![self.tiles.len(); self.grid_size * self.grid_size] + } + + fn evaluate(&self, config: &[usize]) -> Or { + Or(self.is_valid_tiling(config)) + } +} + +crate::declare_variants! { + default SquareTiling => "num_tiles^(grid_size^2)", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + vec![crate::example_db::specs::ModelExampleSpec { + id: "square_tiling", + instance: Box::new(SquareTiling::new( + 3, + vec![(0, 1, 2, 0), (0, 0, 2, 1), (2, 1, 0, 0), (2, 0, 0, 1)], + 2, + )), + optimal_config: vec![0, 1, 2, 3], + optimal_value: serde_json::json!(true), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/misc/square_tiling.rs"] +mod tests; diff --git a/src/models/mod.rs b/src/models/mod.rs index e920df18..ebc4e8f7 100644 --- a/src/models/mod.rs +++ b/src/models/mod.rs @@ -12,14 +12,15 @@ pub mod set; pub use algebraic::{ AlgebraicEquationsOverGF2, ClosestVectorProblem, ConsecutiveBlockMinimization, ConsecutiveOnesMatrixAugmentation, ConsecutiveOnesSubmatrix, EquilibriumPoint, - FeasibleBasisExtension, MinimumMatrixDomination, MinimumWeightSolutionToLinearEquations, - QuadraticAssignment, QuadraticCongruences, QuadraticDiophantineEquations, - SimultaneousIncongruences, SparseMatrixCompression, BMF, ILP, QUBO, + FeasibleBasisExtension, MinimumMatrixCover, MinimumMatrixDomination, MinimumWeightDecoding, + MinimumWeightSolutionToLinearEquations, QuadraticAssignment, QuadraticCongruences, + QuadraticDiophantineEquations, SimultaneousIncongruences, SparseMatrixCompression, BMF, ILP, + QUBO, }; pub use formula::{ - CNFClause, CircuitSAT, KSatisfiability, NAESatisfiability, NonTautology, - OneInThreeSatisfiability, Planar3Satisfiability, QuantifiedBooleanFormulas, Quantifier, - Satisfiability, + CNFClause, CircuitSAT, KSatisfiability, Maximum2Satisfiability, NAESatisfiability, + NonTautology, OneInThreeSatisfiability, Planar3Satisfiability, QuantifiedBooleanFormulas, + Quantifier, Satisfiability, }; pub use graph::{ AcyclicPartition, BalancedCompleteBipartiteSubgraph, BicliqueCover, BiconnectivityAugmentation, @@ -29,42 +30,48 @@ pub use graph::{ HamiltonianPathBetweenTwoVertices, IntegralFlowBundles, IntegralFlowHomologousArcs, IntegralFlowWithMultipliers, IsomorphicSpanningTree, KClique, KColoring, Kernel, KthBestSpanningTree, LengthBoundedDisjointPaths, LongestCircuit, LongestPath, MaxCut, - MaximalIS, MaximumAchromaticNumber, MaximumClique, MaximumIndependentSet, MaximumMatching, - MinMaxMulticenter, MinimumCoveringByCliques, MinimumCutIntoBoundedSets, MinimumDominatingSet, - MinimumDummyActivitiesPert, MinimumFeedbackArcSet, MinimumFeedbackVertexSet, - MinimumGeometricConnectedDominatingSet, MinimumIntersectionGraphBasis, MinimumMaximalMatching, - MinimumMultiwayCut, MinimumSumMulticenter, MinimumVertexCover, MixedChinesePostman, - MonochromaticTriangle, MultipleChoiceBranching, MultipleCopyFileAllocation, - OptimalLinearArrangement, PartialFeedbackEdgeSet, PartitionIntoCliques, PartitionIntoForests, - PartitionIntoPathsOfLength2, PartitionIntoPerfectMatchings, PartitionIntoTriangles, - PathConstrainedNetworkFlow, RootedTreeArrangement, RuralPostman, ShortestWeightConstrainedPath, - SpinGlass, SteinerTree, SteinerTreeInGraphs, StrongConnectivityAugmentation, - SubgraphIsomorphism, TravelingSalesman, UndirectedFlowLowerBounds, - UndirectedTwoCommodityIntegralFlow, + MaximalIS, MaximumAchromaticNumber, MaximumClique, MaximumDomaticNumber, MaximumIndependentSet, + MaximumLeafSpanningTree, MaximumMatching, MinMaxMulticenter, MinimumCoveringByCliques, + MinimumCutIntoBoundedSets, MinimumDominatingSet, MinimumDummyActivitiesPert, + MinimumEdgeCostFlow, MinimumFeedbackArcSet, MinimumFeedbackVertexSet, + MinimumGeometricConnectedDominatingSet, MinimumGraphBandwidth, MinimumIntersectionGraphBasis, + MinimumMaximalMatching, MinimumMultiwayCut, MinimumSumMulticenter, MinimumVertexCover, + MixedChinesePostman, MonochromaticTriangle, MultipleChoiceBranching, + MultipleCopyFileAllocation, OptimalLinearArrangement, PartialFeedbackEdgeSet, + PartitionIntoCliques, PartitionIntoForests, PartitionIntoPathsOfLength2, + PartitionIntoPerfectMatchings, PartitionIntoTriangles, PathConstrainedNetworkFlow, + RootedTreeArrangement, RuralPostman, ShortestWeightConstrainedPath, SpinGlass, SteinerTree, + SteinerTreeInGraphs, StrongConnectivityAugmentation, SubgraphIsomorphism, TravelingSalesman, + UndirectedFlowLowerBounds, UndirectedTwoCommodityIntegralFlow, VertexCover, }; pub use misc::PartiallyOrderedKnapsack; pub use misc::{ - AdditionalKey, Betweenness, BinPacking, CapacityAssignment, CbqRelation, + AdditionalKey, Betweenness, BinPacking, CapacityAssignment, CbqRelation, Clustering, ConjunctiveBooleanQuery, ConjunctiveQueryFoldability, ConsistencyOfDatabaseFrequencyTables, CosineProductIntegration, CyclicOrdering, DynamicStorageAllocation, EnsembleComputation, ExpectedRetrievalCost, Factoring, FeasibleRegisterAssignment, FlowShopScheduling, GroupingBySwapping, IntExpr, IntegerExpressionMembership, JobShopScheduling, Knapsack, - KthLargestMTuple, LongestCommonSubsequence, MinimumExternalMacroDataCompression, - MinimumInternalMacroDataCompression, MinimumTardinessSequencing, MultiprocessorScheduling, - NonLivenessFreePetriNet, Numerical3DimensionalMatching, OpenShopScheduling, PaintShop, - Partition, PrecedenceConstrainedScheduling, PreemptiveScheduling, ProductionPlanning, QueryArg, + KthLargestMTuple, LongestCommonSubsequence, MaximumLikelihoodRanking, MinimumAxiomSet, + MinimumCodeGenerationOneRegister, MinimumCodeGenerationParallelAssignments, + MinimumCodeGenerationUnlimitedRegisters, MinimumDecisionTree, MinimumDisjunctiveNormalForm, + MinimumExternalMacroDataCompression, MinimumFaultDetectionTestSet, + MinimumInternalMacroDataCompression, MinimumRegisterSufficiencyForLoops, + MinimumTardinessSequencing, MinimumWeightAndOrGraph, MultiprocessorScheduling, + NonLivenessFreePetriNet, Numerical3DimensionalMatching, NumericalMatchingWithTargetSums, + OpenShopScheduling, OptimumCommunicationSpanningTree, PaintShop, Partition, + PrecedenceConstrainedScheduling, PreemptiveScheduling, ProductionPlanning, QueryArg, RectilinearPictureCompression, RegisterSufficiency, ResourceConstrainedScheduling, SchedulingToMinimizeWeightedCompletionTime, SchedulingWithIndividualDeadlines, SequencingToMinimizeMaximumCumulativeCost, SequencingToMinimizeTardyTaskWeight, SequencingToMinimizeWeightedCompletionTime, SequencingToMinimizeWeightedTardiness, SequencingWithDeadlinesAndSetUpTimes, SequencingWithReleaseTimesAndDeadlines, - SequencingWithinIntervals, ShortestCommonSupersequence, StackerCrane, StaffScheduling, - StringToStringCorrection, SubsetProduct, SubsetSum, SumOfSquaresPartition, Term, - ThreePartition, TimetableDesign, + SequencingWithinIntervals, ShortestCommonSupersequence, SquareTiling, StackerCrane, + StaffScheduling, StringToStringCorrection, SubsetProduct, SubsetSum, SumOfSquaresPartition, + Term, ThreePartition, TimetableDesign, }; pub use set::{ ComparativeContainment, ConsecutiveSets, ExactCoverBy3Sets, IntegerKnapsack, MaximumSetPacking, MinimumCardinalityKey, MinimumHittingSet, MinimumSetCovering, PrimeAttributeName, RootedTreeStorageAssignment, SetBasis, SetSplitting, ThreeDimensionalMatching, - TwoDimensionalConsecutiveSets, + ThreeMatroidIntersection, TwoDimensionalConsecutiveSets, }; diff --git a/src/models/set/mod.rs b/src/models/set/mod.rs index dd8f5115..970cb314 100644 --- a/src/models/set/mod.rs +++ b/src/models/set/mod.rs @@ -13,6 +13,7 @@ //! - [`SetBasis`]: Minimum-cardinality basis generating all sets by union //! - [`SetSplitting`]: 2-color universe so every specified subset is non-monochromatic //! - [`ThreeDimensionalMatching`]: Perfect matching in a tripartite 3-uniform hypergraph +//! - [`ThreeMatroidIntersection`]: Common independent set of size K in three partition matroids //! - [`TwoDimensionalConsecutiveSets`]: 2D consecutive arrangement of subset elements //! - [`MinimumCardinalityKey`]: Smallest attribute set that uniquely identifies tuples @@ -29,6 +30,7 @@ pub(crate) mod rooted_tree_storage_assignment; pub(crate) mod set_basis; pub(crate) mod set_splitting; pub(crate) mod three_dimensional_matching; +pub(crate) mod three_matroid_intersection; pub(crate) mod two_dimensional_consecutive_sets; pub use comparative_containment::ComparativeContainment; @@ -44,6 +46,7 @@ pub use rooted_tree_storage_assignment::RootedTreeStorageAssignment; pub use set_basis::SetBasis; pub use set_splitting::SetSplitting; pub use three_dimensional_matching::ThreeDimensionalMatching; +pub use three_matroid_intersection::ThreeMatroidIntersection; pub use two_dimensional_consecutive_sets::TwoDimensionalConsecutiveSets; #[cfg(feature = "example-db")] @@ -62,6 +65,7 @@ pub(crate) fn canonical_model_example_specs() -> Vec>>", description: "Three partition matroids, each as a list of groups" }, + FieldInfo { name: "bound", type_name: "usize", description: "Required size K of the common independent set" }, + ], + } +} + +/// Three-Matroid Intersection problem. +/// +/// Given three partition matroids on a common ground set E = {0, ..., n-1} and a +/// positive integer K ≤ |E|, determine whether there exists a subset E' ⊆ E such +/// that |E'| = K and E' is independent in all three matroids. +/// +/// A partition matroid is defined by a partition of E into groups. A set S is +/// independent if |S ∩ G| ≤ 1 for every group G. +/// +/// While 2-matroid intersection is solvable in polynomial time (Edmonds, 1970), +/// the jump to three matroids captures NP-hardness. +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::set::ThreeMatroidIntersection; +/// use problemreductions::{Problem, Solver, BruteForce}; +/// +/// // Ground set E = {0, 1, 2, 3, 4, 5}, K = 2 +/// let problem = ThreeMatroidIntersection::new( +/// 6, +/// vec![ +/// vec![vec![0, 1, 2], vec![3, 4, 5]], // M1 +/// vec![vec![0, 3], vec![1, 4], vec![2, 5]], // M2 +/// vec![vec![0, 4], vec![1, 5], vec![2, 3]], // M3 +/// ], +/// 2, +/// ); +/// +/// let solver = BruteForce::new(); +/// let solutions = solver.find_all_witnesses(&problem); +/// assert!(!solutions.is_empty()); +/// ``` +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ThreeMatroidIntersection { + /// Number of elements in the ground set E (elements are 0..ground_set_size). + ground_set_size: usize, + /// Three partition matroids. Each matroid is a list of groups, where each + /// group is a list of element indices. A set is independent in a partition + /// matroid if it contains at most one element from each group. + partitions: Vec>>, + /// Required size K of the common independent set. + bound: usize, +} + +impl ThreeMatroidIntersection { + /// Create a new Three-Matroid Intersection problem. + /// + /// # Panics + /// + /// Panics if: + /// - `partitions` does not contain exactly 3 matroids + /// - Any element index is outside `0..ground_set_size` + /// - `bound` exceeds `ground_set_size` + pub fn new(ground_set_size: usize, partitions: Vec>>, bound: usize) -> Self { + assert_eq!( + partitions.len(), + 3, + "Expected exactly 3 partition matroids, got {}", + partitions.len() + ); + assert!( + bound <= ground_set_size, + "Bound {} exceeds ground set size {}", + bound, + ground_set_size + ); + for (m_idx, matroid) in partitions.iter().enumerate() { + for (g_idx, group) in matroid.iter().enumerate() { + for &elem in group { + assert!( + elem < ground_set_size, + "Matroid {} group {} contains element {} which is outside 0..{}", + m_idx, + g_idx, + elem, + ground_set_size + ); + } + } + } + Self { + ground_set_size, + partitions, + bound, + } + } + + /// Get the ground set size. + pub fn ground_set_size(&self) -> usize { + self.ground_set_size + } + + /// Get the three partition matroids. + pub fn partitions(&self) -> &[Vec>] { + &self.partitions + } + + /// Get the bound K. + pub fn bound(&self) -> usize { + self.bound + } + + /// Get the total number of groups across all three matroids. + pub fn num_groups(&self) -> usize { + self.partitions.iter().map(|m| m.len()).sum() + } +} + +impl Problem for ThreeMatroidIntersection { + const NAME: &'static str = "ThreeMatroidIntersection"; + type Value = crate::types::Or; + + fn dims(&self) -> Vec { + vec![2; self.ground_set_size] + } + + fn evaluate(&self, config: &[usize]) -> crate::types::Or { + crate::types::Or({ + if config.len() != self.ground_set_size || config.iter().any(|&v| v > 1) { + return crate::types::Or(false); + } + + // Check selected set has exactly K elements + let selected_count: usize = config.iter().filter(|&&v| v == 1).sum(); + if selected_count != self.bound { + return crate::types::Or(false); + } + + // Check independence in each of the three partition matroids + for matroid in &self.partitions { + for group in matroid { + let count = group.iter().filter(|&&e| config[e] == 1).count(); + if count > 1 { + return crate::types::Or(false); + } + } + } + + true + }) + } + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![] + } +} + +crate::declare_variants! { + default ThreeMatroidIntersection => "2^ground_set_size", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + vec![crate::example_db::specs::ModelExampleSpec { + id: "three_matroid_intersection", + instance: Box::new(ThreeMatroidIntersection::new( + 6, + vec![ + vec![vec![0, 1, 2], vec![3, 4, 5]], + vec![vec![0, 3], vec![1, 4], vec![2, 5]], + vec![vec![0, 4], vec![1, 5], vec![2, 3]], + ], + 2, + )), + optimal_config: vec![1, 0, 0, 0, 0, 1], + optimal_value: serde_json::json!(true), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/set/three_matroid_intersection.rs"] +mod tests; diff --git a/src/rules/maximum2satisfiability_ilp.rs b/src/rules/maximum2satisfiability_ilp.rs new file mode 100644 index 00000000..1631aff9 --- /dev/null +++ b/src/rules/maximum2satisfiability_ilp.rs @@ -0,0 +1,143 @@ +//! Reduction from Maximum 2-Satisfiability (MAX-2-SAT) to ILP. +//! +//! The standard MAX-2-SAT formulation maps directly to a binary ILP: +//! - Variables: one binary variable per Boolean variable (truth assignment) +//! plus one binary indicator per clause (satisfaction indicator) +//! - Constraints: for each clause, the indicator is at most the sum of its +//! literal expressions, ensuring z_j = 1 only if the clause is satisfied +//! - Objective: maximize the sum of clause indicators + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::formula::Maximum2Satisfiability; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; + +/// Result of reducing Maximum2Satisfiability to ILP. +#[derive(Debug, Clone)] +pub struct ReductionMaximum2SatisfiabilityToILP { + target: ILP, + num_vars: usize, +} + +impl ReductionResult for ReductionMaximum2SatisfiabilityToILP { + type Source = Maximum2Satisfiability; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + target_solution[..self.num_vars].to_vec() + } +} + +#[reduction( + overhead = { + num_vars = "num_vars + num_clauses", + num_constraints = "num_clauses", + } +)] +impl ReduceTo> for Maximum2Satisfiability { + type Result = ReductionMaximum2SatisfiabilityToILP; + + fn reduce_to(&self) -> Self::Result { + let n = self.num_vars(); + let m = self.num_clauses(); + let num_ilp_vars = n + m; + + // Build one constraint per clause: + // For clause j with literals l_1, l_2: + // z_{n+j} <= l_1' + l_2' + // where l_i' = y_{var-1} if positive, or (1 - y_{var-1}) if negative. + // + // Rearranged: z_{n+j} - sum(y_i for positive lit i) + sum(y_i for negative lit i) <= k + // where k = number of negated literals in the clause. + let constraints: Vec = self + .clauses() + .iter() + .enumerate() + .map(|(j, clause)| { + let mut terms: Vec<(usize, f64)> = Vec::new(); + let mut neg_count = 0i32; + + // z_{n+j} has coefficient +1 + terms.push((n + j, 1.0)); + + for &lit in &clause.literals { + let var_idx = lit.unsigned_abs() as usize - 1; + if lit > 0 { + // positive literal: subtract y_i + terms.push((var_idx, -1.0)); + } else { + // negative literal: add y_i + terms.push((var_idx, 1.0)); + neg_count += 1; + } + } + + LinearConstraint::le(terms, neg_count as f64) + }) + .collect(); + + // Objective: maximize sum of z_j indicators + let objective: Vec<(usize, f64)> = (0..m).map(|j| (n + j, 1.0)).collect(); + + let target = ILP::new( + num_ilp_vars, + constraints, + objective, + ObjectiveSense::Maximize, + ); + + ReductionMaximum2SatisfiabilityToILP { + target, + num_vars: n, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + use crate::models::formula::CNFClause; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "maximum2satisfiability_to_ilp", + build: || { + let source = Maximum2Satisfiability::new( + 4, + vec![ + CNFClause::new(vec![1, 2]), + CNFClause::new(vec![1, -2]), + CNFClause::new(vec![-1, 3]), + CNFClause::new(vec![-1, -3]), + CNFClause::new(vec![2, 4]), + CNFClause::new(vec![-3, -4]), + CNFClause::new(vec![3, 4]), + ], + ); + // Optimal source config: [1,1,0,1] satisfies 6 of 7 clauses. + // ILP target config: first 4 are truth vars, next 7 are clause indicators. + // Clause satisfaction with [1,1,0,1] (x1=T, x2=T, x3=F, x4=T): + // C0: (x1 OR x2) = T -> z4=1 + // C1: (x1 OR ~x2) = T -> z5=1 + // C2: (~x1 OR x3) = F -> z6=0 + // C3: (~x1 OR ~x3) = T -> z7=1 + // C4: (x2 OR x4) = T -> z8=1 + // C5: (~x3 OR ~x4) = T -> z9=1 + // C6: (x3 OR x4) = T -> z10=1 + crate::example_db::specs::rule_example_with_witness::<_, ILP>( + source, + SolutionPair { + source_config: vec![1, 1, 0, 1], + target_config: vec![1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1], + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/maximum2satisfiability_ilp.rs"] +mod tests; diff --git a/src/rules/maximumdomaticnumber_ilp.rs b/src/rules/maximumdomaticnumber_ilp.rs new file mode 100644 index 00000000..ebf77215 --- /dev/null +++ b/src/rules/maximumdomaticnumber_ilp.rs @@ -0,0 +1,124 @@ +//! Reduction from MaximumDomaticNumber to ILP (Integer Linear Programming). +//! +//! The Maximum Domatic Number problem can be formulated as a binary ILP: +//! - Variables: x_{v,i} for each vertex v and set index i (binary: vertex v in set i), +//! plus y_i for each set index i (binary: set i is used). +//! - Partition constraints: for each v, Σ_i x_{v,i} = 1 +//! - Domination constraints: for each v and i, x_{v,i} + Σ_{u ∈ N(v)} x_{u,i} ≥ y_i +//! - Linking constraints: x_{v,i} ≤ y_i for each v, i +//! - Objective: maximize Σ y_i + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::graph::MaximumDomaticNumber; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; +use crate::topology::{Graph, SimpleGraph}; + +/// Result of reducing MaximumDomaticNumber to ILP. +/// +/// Variable layout: +/// - x_{v,i} at index v*n + i (vertex v assigned to set i) +/// - y_i at index n*n + i (set i is used) +#[derive(Debug, Clone)] +pub struct ReductionDomaticNumberToILP { + target: ILP, + n: usize, +} + +impl ReductionResult for ReductionDomaticNumberToILP { + type Source = MaximumDomaticNumber; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + /// Extract solution from ILP back to MaximumDomaticNumber. + /// + /// For each vertex v, find the set index i where x_{v,i} = 1. + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + let n = self.n; + let mut config = vec![0; n]; + for v in 0..n { + for i in 0..n { + if target_solution[v * n + i] == 1 { + config[v] = i; + break; + } + } + } + config + } +} + +#[reduction( + overhead = { + num_vars = "num_vertices * num_vertices + num_vertices", + num_constraints = "num_vertices + num_vertices * num_vertices + num_vertices * num_vertices", + } +)] +impl ReduceTo> for MaximumDomaticNumber { + type Result = ReductionDomaticNumberToILP; + + fn reduce_to(&self) -> Self::Result { + let n = self.graph().num_vertices(); + let num_vars = n * n + n; + let mut constraints = Vec::new(); + + // Partition constraints: for each vertex v, Σ_i x_{v,i} = 1 + for v in 0..n { + let terms: Vec<(usize, f64)> = (0..n).map(|i| (v * n + i, 1.0)).collect(); + constraints.push(LinearConstraint::eq(terms, 1.0)); + } + + // Domination constraints: for each v, i: x_{v,i} + Σ_{u ∈ N(v)} x_{u,i} >= y_i + // Rewritten as: x_{v,i} + Σ_{u ∈ N(v)} x_{u,i} - y_i >= 0 + for v in 0..n { + let neighbors = self.graph().neighbors(v); + for i in 0..n { + let mut terms: Vec<(usize, f64)> = vec![(v * n + i, 1.0)]; + for &u in &neighbors { + terms.push((u * n + i, 1.0)); + } + // -y_i + terms.push((n * n + i, -1.0)); + constraints.push(LinearConstraint::ge(terms, 0.0)); + } + } + + // Linking constraints: x_{v,i} <= y_i for each v, i + // Forces y_i = 1 whenever any vertex is assigned to set i, + // ensuring extract_solution always yields a valid partition. + for v in 0..n { + for i in 0..n { + constraints.push(LinearConstraint::le( + vec![(v * n + i, 1.0), (n * n + i, -1.0)], + 0.0, + )); + } + } + + // Objective: maximize Σ y_i + let objective: Vec<(usize, f64)> = (0..n).map(|i| (n * n + i, 1.0)).collect(); + + let target = ILP::new(num_vars, constraints, objective, ObjectiveSense::Maximize); + + ReductionDomaticNumberToILP { target, n } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + vec![crate::example_db::specs::RuleExampleSpec { + id: "maximumdomaticnumber_to_ilp", + build: || { + // Use small P3 graph (3 vertices, domatic number = 2) + let source = MaximumDomaticNumber::new(SimpleGraph::new(3, vec![(0, 1), (1, 2)])); + crate::example_db::specs::rule_example_via_ilp::<_, bool>(source) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/maximumdomaticnumber_ilp.rs"] +mod tests; diff --git a/src/rules/maximumleafspanningtree_ilp.rs b/src/rules/maximumleafspanningtree_ilp.rs new file mode 100644 index 00000000..c6bdcb78 --- /dev/null +++ b/src/rules/maximumleafspanningtree_ilp.rs @@ -0,0 +1,170 @@ +//! Reduction from MaximumLeafSpanningTree to ILP (Integer Linear Programming). +//! +//! Uses a single-commodity flow formulation for spanning tree connectivity +//! (rooted at vertex 0) combined with leaf-indicator variables. +//! +//! Variable layout (all non-negative integers, bounded by explicit constraints): +//! - `y_e` for each undirected edge `e` (indices `0..m`): edge selector (binary) +//! - `z_v` for each vertex `v` (indices `m..m+n`): leaf indicator (binary) +//! - `f_{2e}`, `f_{2e+1}` for each edge `e=(u,v)` (indices `m+n..m+n+2m`): +//! directed flow from u to v and v to u respectively +//! +//! Constraints: +//! 1. Tree cardinality: sum(y_e) = n-1 +//! 2. Flow conservation: net inflow = 1 for each non-root vertex; net outflow = n-1 for root +//! 3. Flow-edge linking: f_{uv} + f_{vu} <= (n-1) * y_e +//! 4. Leaf detection: degree_v <= 1 + (n-2)*(1 - z_v) for each vertex v +//! 5. Variable bounds: y_e <= 1, z_v <= 1 +//! +//! Objective: maximize sum(z_v) + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::graph::MaximumLeafSpanningTree; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; +use crate::topology::{Graph, SimpleGraph}; + +/// Result of reducing MaximumLeafSpanningTree to ILP. +#[derive(Debug, Clone)] +pub struct ReductionMaximumLeafSpanningTreeToILP { + target: ILP, + num_edges: usize, +} + +impl ReductionResult for ReductionMaximumLeafSpanningTreeToILP { + type Source = MaximumLeafSpanningTree; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + // First m variables are edge selectors + target_solution[..self.num_edges].to_vec() + } +} + +#[reduction( + overhead = { + num_vars = "3 * num_edges + num_vertices", + num_constraints = "3 * num_vertices + 2 * num_edges + 1", + } +)] +impl ReduceTo> for MaximumLeafSpanningTree { + type Result = ReductionMaximumLeafSpanningTreeToILP; + + fn reduce_to(&self) -> Self::Result { + let n = self.num_vertices(); + let m = self.num_edges(); + let edges = self.graph().edges(); + let root = 0usize; + + let num_vars = 3 * m + n; + // Variable indices + let edge_var = |e: usize| e; // y_e: 0..m + let leaf_var = |v: usize| m + v; // z_v: m..m+n + let flow_var = |e: usize, dir: usize| m + n + 2 * e + dir; // f: m+n..m+n+2m + + let mut constraints = Vec::new(); + + // 1. Tree cardinality: sum(y_e) = n - 1 + let terms: Vec<(usize, f64)> = (0..m).map(|e| (edge_var(e), 1.0)).collect(); + constraints.push(LinearConstraint::eq(terms, (n - 1) as f64)); + + // 2. Flow conservation + // Build incidence: for each vertex, which edges are incident and which direction + for vertex in 0..n { + let mut terms = Vec::new(); + for (edge_idx, &(u, v)) in edges.iter().enumerate() { + // flow_var(e, 0) is flow from u to v + // flow_var(e, 1) is flow from v to u + if v == vertex { + // inflow from edge direction u->v + terms.push((flow_var(edge_idx, 0), 1.0)); + // outflow from edge direction v->u + terms.push((flow_var(edge_idx, 1), -1.0)); + } + if u == vertex { + // outflow from edge direction u->v + terms.push((flow_var(edge_idx, 0), -1.0)); + // inflow from edge direction v->u + terms.push((flow_var(edge_idx, 1), 1.0)); + } + } + + let rhs = if vertex == root { + // Root sends n-1 units out => net inflow = -(n-1) + -((n - 1) as f64) + } else { + // Each non-root vertex receives exactly 1 unit + 1.0 + }; + constraints.push(LinearConstraint::eq(terms, rhs)); + } + + // 3. Flow-edge linking: f_{uv} + f_{vu} <= (n-1) * y_e + for edge_idx in 0..m { + constraints.push(LinearConstraint::le( + vec![ + (flow_var(edge_idx, 0), 1.0), + (flow_var(edge_idx, 1), 1.0), + (edge_var(edge_idx), -((n - 1) as f64)), + ], + 0.0, + )); + } + + // 4. Leaf detection: for each vertex v, + // degree_v <= 1 + (n-2)*(1 - z_v) + // i.e. sum_{e incident to v} y_e + (n-2)*z_v <= n-1 + // Build incidence lists + let mut incident: Vec> = vec![vec![]; n]; + for (edge_idx, &(u, v)) in edges.iter().enumerate() { + incident[u].push(edge_idx); + incident[v].push(edge_idx); + } + + for (v, inc) in incident.iter().enumerate() { + let mut terms: Vec<(usize, f64)> = inc.iter().map(|&e| (edge_var(e), 1.0)).collect(); + terms.push((leaf_var(v), (n - 2) as f64)); + constraints.push(LinearConstraint::le(terms, (n - 1) as f64)); + } + + // 5. Variable bounds: y_e <= 1, z_v <= 1 + for e in 0..m { + constraints.push(LinearConstraint::le(vec![(edge_var(e), 1.0)], 1.0)); + } + for v in 0..n { + constraints.push(LinearConstraint::le(vec![(leaf_var(v), 1.0)], 1.0)); + } + + // Objective: maximize sum(z_v) + let objective: Vec<(usize, f64)> = (0..n).map(|v| (leaf_var(v), 1.0)).collect(); + + let target = ILP::new(num_vars, constraints, objective, ObjectiveSense::Maximize); + + ReductionMaximumLeafSpanningTreeToILP { + target, + num_edges: m, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + vec![crate::example_db::specs::RuleExampleSpec { + id: "maximumleafspanningtree_to_ilp", + build: || { + let source = MaximumLeafSpanningTree::new(SimpleGraph::new( + 4, + vec![(0, 1), (1, 2), (2, 3), (0, 2)], + )); + crate::example_db::specs::rule_example_via_ilp::<_, i32>(source) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/maximumleafspanningtree_ilp.rs"] +mod tests; diff --git a/src/rules/maximumlikelihoodranking_ilp.rs b/src/rules/maximumlikelihoodranking_ilp.rs new file mode 100644 index 00000000..52abbac6 --- /dev/null +++ b/src/rules/maximumlikelihoodranking_ilp.rs @@ -0,0 +1,140 @@ +//! Reduction from MaximumLikelihoodRanking to ILP (Integer Linear Programming). +//! +//! Binary variables x_{ij} for each pair (i, j) with i < j: +//! x_{ij} = 1 means item i is ranked before item j. +//! +//! Transitivity constraints: for each triple {a, b, c} with a < b < c: +//! x_{ab} + x_{bc} - x_{ac} <= 1 +//! -x_{ab} - x_{bc} + x_{ac} <= 0 +//! +//! Objective: minimize sum_{i, + /// Number of items in the original problem. + n: usize, +} + +/// Map a pair (i, j) with i < j to a variable index. +fn pair_index(i: usize, j: usize, n: usize) -> usize { + debug_assert!(i < j && j < n); + // Sum of (n-1) + (n-2) + ... + (n-i) for rows before i, plus offset within row i. + // = i*n - i*(i+1)/2 + (j - i - 1) + i * n - i * (i + 1) / 2 + (j - i - 1) +} + +impl ReductionResult for ReductionMaximumLikelihoodRankingToILP { + type Source = MaximumLikelihoodRanking; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + let n = self.n; + if n == 0 { + return vec![]; + } + + // Count how many items are ranked before each item i. + // config[i] = number of items ranked before i = rank of item i. + let mut config = vec![0usize; n]; + for i in 0..n { + for j in (i + 1)..n { + let idx = pair_index(i, j, n); + if target_solution[idx] == 1 { + // i is before j -> contributes 1 to config[j] + config[j] += 1; + } else { + // j is before i -> contributes 1 to config[i] + config[i] += 1; + } + } + } + + config + } +} + +#[reduction( + overhead = { + num_vars = "num_items * (num_items - 1) / 2", + num_constraints = "num_items * (num_items - 1) * (num_items - 2) / 3", + } +)] +impl ReduceTo> for MaximumLikelihoodRanking { + type Result = ReductionMaximumLikelihoodRankingToILP; + + fn reduce_to(&self) -> Self::Result { + let n = self.num_items(); + let num_vars = n * (n.saturating_sub(1)) / 2; + let matrix = self.matrix(); + + // Build objective: minimize sum_{i = Vec::new(); + for (i, row_i) in matrix.iter().enumerate() { + for j in (i + 1)..n { + let coeff = (matrix[j][i] - row_i[j]) as f64; + if coeff != 0.0 { + objective.push((pair_index(i, j, n), coeff)); + } + } + } + + // Build transitivity constraints: + // For each triple (a, b, c) with a < b < c: + // x_{ab} + x_{bc} - x_{ac} <= 1 + // -x_{ab} - x_{bc} + x_{ac} <= 0 + let mut constraints = Vec::new(); + for a in 0..n { + for b in (a + 1)..n { + for c in (b + 1)..n { + let ab = pair_index(a, b, n); + let bc = pair_index(b, c, n); + let ac = pair_index(a, c, n); + + // x_{ab} + x_{bc} - x_{ac} <= 1 + constraints.push(LinearConstraint::le( + vec![(ab, 1.0), (bc, 1.0), (ac, -1.0)], + 1.0, + )); + + // -x_{ab} - x_{bc} + x_{ac} <= 0 + constraints.push(LinearConstraint::le( + vec![(ab, -1.0), (bc, -1.0), (ac, 1.0)], + 0.0, + )); + } + } + } + + let target = ILP::new(num_vars, constraints, objective, ObjectiveSense::Minimize); + + ReductionMaximumLikelihoodRankingToILP { target, n } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + vec![crate::example_db::specs::RuleExampleSpec { + id: "maximum_likelihood_ranking_to_ilp", + build: || { + // Use a 3-item matrix for a small example: C(3,2)=3 variables + let matrix = vec![vec![0, 3, 2], vec![2, 0, 4], vec![3, 1, 0]]; + crate::example_db::specs::rule_example_via_ilp(MaximumLikelihoodRanking::new(matrix)) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/maximumlikelihoodranking_ilp.rs"] +mod tests; diff --git a/src/rules/minimumcapacitatedspanningtree_ilp.rs b/src/rules/minimumcapacitatedspanningtree_ilp.rs new file mode 100644 index 00000000..7208dc43 --- /dev/null +++ b/src/rules/minimumcapacitatedspanningtree_ilp.rs @@ -0,0 +1,181 @@ +//! Reduction from MinimumCapacitatedSpanningTree to ILP (Integer Linear Programming). +//! +//! Uses a requirement-weighted single-commodity flow formulation: +//! - Each non-root vertex generates r(v) units of flow toward the root +//! - Flow on each edge is bounded by the capacity constraint +//! - Flow-edge linking ensures flow only travels on selected edges +//! +//! Variable layout (all non-negative integers, ILP): +//! - `y_e` for each undirected edge `e` (indices `0..m`): edge selector (binary) +//! - `f_{2e}`, `f_{2e+1}` for each edge `e=(u,v)` (indices `m..3m`): +//! directed flow from u to v and v to u respectively +//! +//! Constraints: +//! 1. Tree cardinality: sum(y_e) = n-1 +//! 2. Binary edge bounds: y_e <= 1 +//! 3. Flow conservation: each non-root vertex v generates r(v) units; +//! root absorbs all (total R = sum of requirements) +//! 4. Flow-edge linking: f_{uv} + f_{vu} <= R * y_e +//! 5. Capacity: f_{uv} <= c and f_{vu} <= c for each directed edge +//! +//! Objective: minimize sum(w_e * y_e) + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::graph::MinimumCapacitatedSpanningTree; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; +use crate::topology::{Graph, SimpleGraph}; +use crate::types::WeightElement; + +/// Result of reducing MinimumCapacitatedSpanningTree to ILP. +#[derive(Debug, Clone)] +pub struct ReductionMinimumCapacitatedSpanningTreeToILP { + target: ILP, + num_edges: usize, +} + +impl ReductionResult for ReductionMinimumCapacitatedSpanningTreeToILP { + type Source = MinimumCapacitatedSpanningTree; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + // First m variables are edge selectors + target_solution[..self.num_edges].to_vec() + } +} + +#[reduction( + overhead = { + num_vars = "3 * num_edges", + num_constraints = "5 * num_edges + num_vertices + 1", + } +)] +impl ReduceTo> for MinimumCapacitatedSpanningTree { + type Result = ReductionMinimumCapacitatedSpanningTreeToILP; + + fn reduce_to(&self) -> Self::Result { + let n = self.num_vertices(); + let m = self.num_edges(); + let edges = self.graph().edges(); + let root = self.root(); + let requirements = self.requirements(); + let cap = *self.capacity() as f64; + + let num_vars = 3 * m; + + // Variable indices + let edge_var = |e: usize| e; // y_e: 0..m + let flow_var = |e: usize, dir: usize| m + 2 * e + dir; // f: m..3m + + // Total requirement (flow from all non-root vertices to root) + let total_req: f64 = requirements.iter().map(|r| r.to_sum() as f64).sum(); + + let mut constraints = Vec::new(); + + // 1. Tree cardinality: sum(y_e) = n - 1 + let terms: Vec<(usize, f64)> = (0..m).map(|e| (edge_var(e), 1.0)).collect(); + constraints.push(LinearConstraint::eq(terms, (n - 1) as f64)); + + // 2. Binary edge bounds: y_e <= 1 + for e in 0..m { + constraints.push(LinearConstraint::le(vec![(edge_var(e), 1.0)], 1.0)); + } + + // 3. Flow conservation + // For non-root vertex v: outflow - inflow = r(v) + // For root: inflow - outflow = total_req (i.e., outflow - inflow = -total_req) + for (vertex, req) in requirements.iter().enumerate() { + let mut terms = Vec::new(); + for (edge_idx, &(u, v)) in edges.iter().enumerate() { + // flow_var(e, 0) = flow from u to v + // flow_var(e, 1) = flow from v to u + if v == vertex { + // inflow from u->v direction + terms.push((flow_var(edge_idx, 0), 1.0)); + // outflow from v->u direction + terms.push((flow_var(edge_idx, 1), -1.0)); + } + if u == vertex { + // outflow from u->v direction + terms.push((flow_var(edge_idx, 0), -1.0)); + // inflow from v->u direction + terms.push((flow_var(edge_idx, 1), 1.0)); + } + } + + let rhs = if vertex == root { + // Root absorbs all flow: net inflow = total_req + total_req + } else { + // Non-root vertex generates r(v) units toward root: + // net inflow = -r(v) + -(req.to_sum() as f64) + }; + constraints.push(LinearConstraint::eq(terms, rhs)); + } + + // 4. Flow-edge linking: f_{uv} + f_{vu} <= R * y_e + for edge_idx in 0..m { + constraints.push(LinearConstraint::le( + vec![ + (flow_var(edge_idx, 0), 1.0), + (flow_var(edge_idx, 1), 1.0), + (edge_var(edge_idx), -total_req), + ], + 0.0, + )); + } + + // 5. Capacity bounds: f_{uv} <= c, f_{vu} <= c + for edge_idx in 0..m { + constraints.push(LinearConstraint::le( + vec![(flow_var(edge_idx, 0), 1.0)], + cap, + )); + constraints.push(LinearConstraint::le( + vec![(flow_var(edge_idx, 1), 1.0)], + cap, + )); + } + + // Objective: minimize sum(w_e * y_e) + let objective: Vec<(usize, f64)> = self + .weights() + .iter() + .enumerate() + .map(|(edge_idx, w)| (edge_var(edge_idx), w.to_sum() as f64)) + .collect(); + + let target = ILP::new(num_vars, constraints, objective, ObjectiveSense::Minimize); + + ReductionMinimumCapacitatedSpanningTreeToILP { + target, + num_edges: m, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + vec![crate::example_db::specs::RuleExampleSpec { + id: "minimumcapacitatedspanningtree_to_ilp", + build: || { + let source = MinimumCapacitatedSpanningTree::new( + SimpleGraph::new(4, vec![(0, 1), (0, 2), (1, 2), (1, 3), (2, 3)]), + vec![2, 3, 1, 1, 2], // edge weights + 0, // root + vec![0, 1, 1, 1], // requirements + 2, // capacity + ); + crate::example_db::specs::rule_example_via_ilp::<_, i32>(source) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/minimumcapacitatedspanningtree_ilp.rs"] +mod tests; diff --git a/src/rules/minimumedgecostflow_ilp.rs b/src/rules/minimumedgecostflow_ilp.rs new file mode 100644 index 00000000..fda1c690 --- /dev/null +++ b/src/rules/minimumedgecostflow_ilp.rs @@ -0,0 +1,152 @@ +//! Reduction from MinimumEdgeCostFlow to ILP. +//! +//! Variables (2m total): +//! f_a (a = 0..m-1) — integer flow on arc a, domain {0, ..., c(a)} +//! y_a (a = m..2m-1) — binary indicator: y_a = 1 iff f_a > 0 +//! +//! Constraints: +//! f_a ≤ c(a) — capacity (m constraints) +//! f_a ≤ c(a) · y_a — linking: forces y_a = 1 when f_a > 0 (m constraints) +//! y_a ≤ 1 — binary bound on indicators (m constraints) +//! conservation at non-terminal vertices (|V|-2 equality constraints) +//! net flow into sink ≥ R (1 constraint) +//! +//! Total: 3m + |V| - 1 constraints (but we omit redundant capacity since +//! linking already implies f_a ≤ c(a) when y_a ≤ 1). +//! Actually we keep all for clarity: 2m + |V| - 1 constraints. +//! +//! Objective: minimize Σ p(a) · y_a. +//! Extraction: first m variables are the flow values. + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::graph::MinimumEdgeCostFlow; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; + +/// Result of reducing MinimumEdgeCostFlow to ILP. +/// +/// Variable layout: +/// - `f_a` at index a for a in 0..num_edges (flow on arc a) +/// - `y_a` at index num_edges + a for a in 0..num_edges (binary indicator) +#[derive(Debug, Clone)] +pub struct ReductionMECFToILP { + target: ILP, + num_edges: usize, +} + +impl ReductionResult for ReductionMECFToILP { + type Source = MinimumEdgeCostFlow; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + /// Extract flow solution: first m variables are the flow values. + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + target_solution[..self.num_edges].to_vec() + } +} + +#[reduction( + overhead = { + num_vars = "2 * num_edges", + num_constraints = "2 * num_edges + num_vertices - 1", + } +)] +impl ReduceTo> for MinimumEdgeCostFlow { + type Result = ReductionMECFToILP; + + fn reduce_to(&self) -> Self::Result { + let arcs = self.graph().arcs(); + let m = arcs.len(); + let n = self.num_vertices(); + let num_vars = 2 * m; + + let f = |a: usize| a; // flow variable index + let y = |a: usize| m + a; // indicator variable index + + let mut constraints = Vec::new(); + + // 1. Linking: f_a - c(a) * y_a ≤ 0 (forces y_a = 1 when f_a > 0) + for a in 0..m { + constraints.push(LinearConstraint::le( + vec![(f(a), 1.0), (y(a), -(self.capacities()[a] as f64))], + 0.0, + )); + } + + // 2. Binary bound: y_a ≤ 1 + for a in 0..m { + constraints.push(LinearConstraint::le(vec![(y(a), 1.0)], 1.0)); + } + + // 3. Flow conservation at non-terminal vertices + for vertex in 0..n { + if vertex == self.source() || vertex == self.sink() { + continue; + } + + let mut terms: Vec<(usize, f64)> = Vec::new(); + for (a, &(u, v)) in arcs.iter().enumerate() { + if vertex == u { + terms.push((f(a), -1.0)); // outgoing + } else if vertex == v { + terms.push((f(a), 1.0)); // incoming + } + } + + if !terms.is_empty() { + constraints.push(LinearConstraint::eq(terms, 0.0)); + } + } + + // 4. Flow requirement: net flow into sink ≥ R + let sink = self.sink(); + let mut sink_terms: Vec<(usize, f64)> = Vec::new(); + for (a, &(u, v)) in arcs.iter().enumerate() { + if v == sink { + sink_terms.push((f(a), 1.0)); + } else if u == sink { + sink_terms.push((f(a), -1.0)); + } + } + constraints.push(LinearConstraint::ge( + sink_terms, + self.required_flow() as f64, + )); + + // Objective: minimize Σ p(a) · y_a + let objective: Vec<(usize, f64)> = + (0..m).map(|a| (y(a), self.prices()[a] as f64)).collect(); + + ReductionMECFToILP { + target: ILP::new(num_vars, constraints, objective, ObjectiveSense::Minimize), + num_edges: m, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::topology::DirectedGraph; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "minimumedgecostflow_to_ilp", + build: || { + let source = MinimumEdgeCostFlow::new( + DirectedGraph::new(5, vec![(0, 1), (0, 2), (0, 3), (1, 4), (2, 4), (3, 4)]), + vec![3, 1, 2, 0, 0, 0], + vec![2, 2, 2, 2, 2, 2], + 0, + 4, + 3, + ); + crate::example_db::specs::rule_example_via_ilp::<_, i32>(source) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/minimumedgecostflow_ilp.rs"] +mod tests; diff --git a/src/rules/minimumgraphbandwidth_ilp.rs b/src/rules/minimumgraphbandwidth_ilp.rs new file mode 100644 index 00000000..119d657d --- /dev/null +++ b/src/rules/minimumgraphbandwidth_ilp.rs @@ -0,0 +1,151 @@ +//! Reduction from MinimumGraphBandwidth to ILP (Integer Linear Programming). +//! +//! Position-assignment formulation with bandwidth variable: +//! - Binary x_{v,p}: vertex v gets position p +//! - Integer position variables pos_v = sum_p p * x_{v,p} +//! - Integer bandwidth variable B +//! - For each edge (u,v): pos_u - pos_v <= B, pos_v - pos_u <= B +//! - Objective: minimize B + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::graph::MinimumGraphBandwidth; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; +use crate::topology::{Graph, SimpleGraph}; + +/// Result of reducing MinimumGraphBandwidth to ILP. +/// +/// Variable layout (ILP, non-negative integers): +/// - `x_{v,p}` at index `v * n + p`, bounded to {0,1} +/// - `pos_v` at index `n^2 + v`, integer position in {0, ..., n-1} +/// - `B` (bandwidth) at index `n^2 + n` +#[derive(Debug, Clone)] +pub struct ReductionMGBToILP { + target: ILP, + num_vertices: usize, +} + +impl ReductionResult for ReductionMGBToILP { + type Source = MinimumGraphBandwidth; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + /// Extract: for each vertex v, output its position p (the unique p with x_{v,p} = 1). + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + let n = self.num_vertices; + (0..n) + .map(|v| { + (0..n) + .find(|&p| target_solution[v * n + p] == 1) + .unwrap_or(0) + }) + .collect() + } +} + +#[reduction( + overhead = { + num_vars = "num_vertices^2 + num_vertices + 1", + num_constraints = "2 * num_vertices + num_vertices^2 + num_vertices + num_vertices + 1 + 2 * num_edges", + } +)] +impl ReduceTo> for MinimumGraphBandwidth { + type Result = ReductionMGBToILP; + + fn reduce_to(&self) -> Self::Result { + let n = self.num_vertices(); + let graph = self.graph(); + let edges = graph.edges(); + + let num_x = n * n; + let num_vars = num_x + n + 1; + + let x_idx = |v: usize, p: usize| -> usize { v * n + p }; + let pos_idx = |v: usize| -> usize { num_x + v }; + let b_idx = num_x + n; + + let mut constraints = Vec::new(); + + // Assignment: each vertex in exactly one position + for v in 0..n { + let terms: Vec<(usize, f64)> = (0..n).map(|p| (x_idx(v, p), 1.0)).collect(); + constraints.push(LinearConstraint::eq(terms, 1.0)); + } + + // Assignment: each position has exactly one vertex + for p in 0..n { + let terms: Vec<(usize, f64)> = (0..n).map(|v| (x_idx(v, p), 1.0)).collect(); + constraints.push(LinearConstraint::eq(terms, 1.0)); + } + + // Binary bounds for x variables (ILP) + for v in 0..n { + for p in 0..n { + constraints.push(LinearConstraint::le(vec![(x_idx(v, p), 1.0)], 1.0)); + } + } + + // Position variable linking: pos_v = sum_p p * x_{v,p} + for v in 0..n { + let mut terms: Vec<(usize, f64)> = vec![(pos_idx(v), 1.0)]; + for p in 0..n { + terms.push((x_idx(v, p), -(p as f64))); + } + constraints.push(LinearConstraint::eq(terms, 0.0)); + } + + // Position bounds: 0 <= pos_v <= n-1 + for v in 0..n { + constraints.push(LinearConstraint::le( + vec![(pos_idx(v), 1.0)], + (n - 1) as f64, + )); + } + + // Bandwidth upper bound: B <= n-1 (max possible position difference) + constraints.push(LinearConstraint::le(vec![(b_idx, 1.0)], (n - 1) as f64)); + + // Bandwidth constraints: for each edge (u,v): + // pos_u - pos_v <= B => pos_u - pos_v - B <= 0 + // pos_v - pos_u <= B => pos_v - pos_u - B <= 0 + for &(u, v) in edges.iter() { + constraints.push(LinearConstraint::le( + vec![(pos_idx(u), 1.0), (pos_idx(v), -1.0), (b_idx, -1.0)], + 0.0, + )); + constraints.push(LinearConstraint::le( + vec![(pos_idx(v), 1.0), (pos_idx(u), -1.0), (b_idx, -1.0)], + 0.0, + )); + } + + // Objective: minimize B + let objective = vec![(b_idx, 1.0)]; + let target = ILP::new(num_vars, constraints, objective, ObjectiveSense::Minimize); + + ReductionMGBToILP { + target, + num_vertices: n, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + vec![crate::example_db::specs::RuleExampleSpec { + id: "minimumgraphbandwidth_to_ilp", + build: || { + // Star S4: center 0 connected to 1, 2, 3 + let source = + MinimumGraphBandwidth::new(SimpleGraph::new(4, vec![(0, 1), (0, 2), (0, 3)])); + crate::example_db::specs::rule_example_via_ilp::<_, i32>(source) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/minimumgraphbandwidth_ilp.rs"] +mod tests; diff --git a/src/rules/minimummatrixcover_ilp.rs b/src/rules/minimummatrixcover_ilp.rs new file mode 100644 index 00000000..ecbfe96e --- /dev/null +++ b/src/rules/minimummatrixcover_ilp.rs @@ -0,0 +1,157 @@ +//! Reduction from MinimumMatrixCover to ILP (Integer Linear Programming). +//! +//! Uses McCormick linearization to convert the quadratic sign assignment +//! objective into a linear program with binary variables. +//! +//! Binary variables x_i ∈ {0,1} where f(i) = 2x_i - 1. +//! For i, + n: usize, +} + +impl ReductionResult for ReductionMinimumMatrixCoverToILP { + type Source = MinimumMatrixCover; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + // First n variables are the sign variables x_0,...,x_{n-1} + target_solution[..self.n].to_vec() + } +} + +/// Map pair (i,j) with i usize { + debug_assert!(i < j); + // Index into upper triangle: sum_{k=0}^{i-1} (n-1-k) + (j - i - 1) + let offset: usize = (0..i).map(|k| n - 1 - k).sum(); + n + offset + (j - i - 1) +} + +#[reduction( + overhead = { + num_vars = "num_rows + num_rows * (num_rows - 1) / 2", + num_constraints = "3 * num_rows * (num_rows - 1) / 2", + } +)] +impl ReduceTo> for MinimumMatrixCover { + type Result = ReductionMinimumMatrixCoverToILP; + + fn reduce_to(&self) -> Self::Result { + let n = self.num_rows(); + let num_pairs = n * (n.saturating_sub(1)) / 2; + let num_vars = n + num_pairs; + + // Build constraints: 3 per pair (i,j) with i = obj_coeffs + .into_iter() + .enumerate() + .filter(|&(_, c)| c != 0.0) + .collect(); + + let target = ILP::new(num_vars, constraints, objective, ObjectiveSense::Minimize); + + ReductionMinimumMatrixCoverToILP { target, n } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "minimum_matrix_cover_to_ilp", + build: || { + // Use a small 2×2 instance for the rule example + let source = MinimumMatrixCover::new(vec![vec![0, 3], vec![2, 0]]); + // Config [0,1] → f=(-1,+1) → value = 0·1 + 3·(-1) + 2·(-1) + 0·1 = -5 + // Config [1,0] → f=(+1,-1) → value = 0·1 + 3·(-1) + 2·(-1) + 0·1 = -5 + // Config [0,0] → f=(-1,-1) → value = 0+3+2+0 = 5 + // Config [1,1] → f=(+1,+1) → value = 0+3+2+0 = 5 + // Optimal is [0,1] or [1,0] with value -5 + // Source config [0,1], target config: x_0=0, x_1=1, y_{01}=0 + crate::example_db::specs::rule_example_with_witness::<_, ILP>( + source, + SolutionPair { + source_config: vec![0, 1], + target_config: vec![0, 1, 0], + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/minimummatrixcover_ilp.rs"] +mod tests; diff --git a/src/rules/minimummetricdimension_ilp.rs b/src/rules/minimummetricdimension_ilp.rs new file mode 100644 index 00000000..16516c72 --- /dev/null +++ b/src/rules/minimummetricdimension_ilp.rs @@ -0,0 +1,97 @@ +//! Reduction from MinimumMetricDimension to ILP (Integer Linear Programming). +//! +//! The Metric Dimension problem can be formulated as a binary ILP: +//! - Variables: One binary variable z_v per vertex (0 = not selected, 1 = selected) +//! - Constraints: For each pair (u, v) with u < v: +//! Σ_{w : d(u,w) ≠ d(v,w)} z_w ≥ 1 +//! (at least one resolving vertex distinguishes u from v) +//! - Objective: Minimize Σ z_v + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::graph::minimum_metric_dimension::bfs_distances; +use crate::models::graph::MinimumMetricDimension; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; +use crate::topology::{Graph, SimpleGraph}; + +/// Result of reducing MinimumMetricDimension to ILP. +/// +/// This reduction creates a binary ILP where: +/// - Each vertex corresponds to a binary variable z_v +/// - For each pair (u, v) with u < v, the constraint +/// Σ_{w : d(u,w) ≠ d(v,w)} z_w ≥ 1 ensures that the pair is resolved +/// - The objective minimizes the total number of selected vertices +#[derive(Debug, Clone)] +pub struct ReductionMDToILP { + target: ILP, +} + +impl ReductionResult for ReductionMDToILP { + type Source = MinimumMetricDimension; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + /// Extract solution from ILP back to MinimumMetricDimension. + /// + /// Since the mapping is 1:1 (each vertex maps to one binary variable), + /// the solution extraction is simply copying the configuration. + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + target_solution.to_vec() + } +} + +#[reduction( + overhead = { + num_vars = "num_vertices", + num_constraints = "num_vertices * (num_vertices - 1) / 2", + } +)] +impl ReduceTo> for MinimumMetricDimension { + type Result = ReductionMDToILP; + + fn reduce_to(&self) -> Self::Result { + let n = self.graph().num_vertices(); + + // Precompute all-pairs shortest paths via BFS from each vertex + let all_dists: Vec> = (0..n).map(|v| bfs_distances(self.graph(), v)).collect(); + + // Constraints: For each pair (u, v) with u < v, + // Σ_{w : d(u,w) ≠ d(v,w)} z_w ≥ 1 + let mut constraints = Vec::new(); + for u in 0..n { + for v in (u + 1)..n { + let terms: Vec<(usize, f64)> = (0..n) + .filter(|&w| all_dists[w][u] != all_dists[w][v]) + .map(|w| (w, 1.0)) + .collect(); + constraints.push(LinearConstraint::ge(terms, 1.0)); + } + } + + // Objective: minimize Σ z_v (unit weights) + let objective: Vec<(usize, f64)> = (0..n).map(|v| (v, 1.0)).collect(); + + let target = ILP::new(n, constraints, objective, ObjectiveSense::Minimize); + + ReductionMDToILP { target } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + vec![crate::example_db::specs::RuleExampleSpec { + id: "minimummetricdimension_to_ilp", + build: || { + // P3 path graph: 3 vertices, metric dimension = 1 + let source = MinimumMetricDimension::new(SimpleGraph::new(3, vec![(0, 1), (1, 2)])); + crate::example_db::specs::rule_example_via_ilp::<_, bool>(source) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/minimummetricdimension_ilp.rs"] +mod tests; diff --git a/src/rules/minimumweightdecoding_ilp.rs b/src/rules/minimumweightdecoding_ilp.rs new file mode 100644 index 00000000..2961fac1 --- /dev/null +++ b/src/rules/minimumweightdecoding_ilp.rs @@ -0,0 +1,115 @@ +//! Reduction from MinimumWeightDecoding to ILP. +//! +//! The GF(2) constraint Hx ≡ s (mod 2) is linearized by introducing integer +//! slack variables k_i for each row: +//! +//! Σ_j H[i][j] * x_j - 2 * k_i = s_i +//! +//! Variables (m + n total): +//! x_0, ..., x_{m-1}: binary decision variables (the codeword) +//! k_0, ..., k_{n-1}: non-negative integer slack variables +//! +//! Constraints: +//! n equality constraints (one per row of H) +//! m upper-bound constraints x_j ≤ 1 (enforce binary) +//! +//! Objective: minimize Σ x_j (Hamming weight). + +use crate::models::algebraic::MinimumWeightDecoding; +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; + +/// Result of reducing MinimumWeightDecoding to ILP. +/// +/// Variable layout: +/// - x_j at index j for j in 0..num_cols (binary codeword bits) +/// - k_i at index num_cols + i for i in 0..num_rows (integer slack) +#[derive(Debug, Clone)] +pub struct ReductionMinimumWeightDecodingToILP { + target: ILP, + num_cols: usize, +} + +impl ReductionResult for ReductionMinimumWeightDecodingToILP { + type Source = MinimumWeightDecoding; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + /// Extract the source solution: first m variables are the binary x_j values. + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + target_solution[..self.num_cols].to_vec() + } +} + +#[reduction( + overhead = { + num_vars = "num_cols + num_rows", + num_constraints = "num_rows + num_cols", + } +)] +impl ReduceTo> for MinimumWeightDecoding { + type Result = ReductionMinimumWeightDecodingToILP; + + fn reduce_to(&self) -> Self::Result { + let m = self.num_cols(); + let n = self.num_rows(); + let num_vars = m + n; + + let x = |j: usize| j; // binary variable index + let k = |i: usize| m + i; // slack variable index + + let mut constraints = Vec::new(); + + // Equality constraints: Σ_j H[i][j] * x_j - 2 * k_i = s_i + for i in 0..n { + let mut terms: Vec<(usize, f64)> = Vec::new(); + for j in 0..m { + if self.matrix()[i][j] { + terms.push((x(j), 1.0)); + } + } + terms.push((k(i), -2.0)); + let rhs = if self.target()[i] { 1.0 } else { 0.0 }; + constraints.push(LinearConstraint::eq(terms, rhs)); + } + + // Binary bounds: x_j ≤ 1 + for j in 0..m { + constraints.push(LinearConstraint::le(vec![(x(j), 1.0)], 1.0)); + } + + // Objective: minimize Σ x_j + let objective: Vec<(usize, f64)> = (0..m).map(|j| (x(j), 1.0)).collect(); + + ReductionMinimumWeightDecodingToILP { + target: ILP::new(num_vars, constraints, objective, ObjectiveSense::Minimize), + num_cols: m, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + vec![crate::example_db::specs::RuleExampleSpec { + id: "minimumweightdecoding_to_ilp", + build: || { + let source = MinimumWeightDecoding::new( + vec![ + vec![true, false, true, true], + vec![false, true, true, false], + vec![true, true, false, true], + ], + vec![true, true, false], + ); + crate::example_db::specs::rule_example_via_ilp::<_, i32>(source) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/minimumweightdecoding_ilp.rs"] +mod tests; diff --git a/src/rules/mod.rs b/src/rules/mod.rs index fabfa8c2..6789c92a 100644 --- a/src/rules/mod.rs +++ b/src/rules/mod.rs @@ -167,28 +167,46 @@ pub(crate) mod longestpath_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod maximalis_ilp; #[cfg(feature = "ilp-solver")] +pub(crate) mod maximum2satisfiability_ilp; +#[cfg(feature = "ilp-solver")] pub(crate) mod maximumclique_ilp; #[cfg(feature = "ilp-solver")] +pub(crate) mod maximumdomaticnumber_ilp; +#[cfg(feature = "ilp-solver")] +pub(crate) mod maximumleafspanningtree_ilp; +#[cfg(feature = "ilp-solver")] +pub(crate) mod maximumlikelihoodranking_ilp; +#[cfg(feature = "ilp-solver")] pub(crate) mod maximummatching_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod maximumsetpacking_ilp; #[cfg(feature = "ilp-solver")] +pub(crate) mod minimumcapacitatedspanningtree_ilp; +#[cfg(feature = "ilp-solver")] pub(crate) mod minimumcutintoboundedsets_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod minimumdominatingset_ilp; #[cfg(feature = "ilp-solver")] +pub(crate) mod minimumedgecostflow_ilp; +#[cfg(feature = "ilp-solver")] pub(crate) mod minimumexternalmacrodatacompression_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod minimumfeedbackarcset_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod minimumfeedbackvertexset_ilp; #[cfg(feature = "ilp-solver")] +pub(crate) mod minimumgraphbandwidth_ilp; +#[cfg(feature = "ilp-solver")] pub(crate) mod minimumhittingset_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod minimuminternalmacrodatacompression_ilp; #[cfg(feature = "ilp-solver")] +pub(crate) mod minimummatrixcover_ilp; +#[cfg(feature = "ilp-solver")] pub(crate) mod minimummaximalmatching_ilp; #[cfg(feature = "ilp-solver")] +pub(crate) mod minimummetricdimension_ilp; +#[cfg(feature = "ilp-solver")] pub(crate) mod minimummultiwaycut_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod minimumsetcovering_ilp; @@ -197,6 +215,8 @@ pub(crate) mod minimumsummulticenter_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod minimumtardinesssequencing_ilp; #[cfg(feature = "ilp-solver")] +pub(crate) mod minimumweightdecoding_ilp; +#[cfg(feature = "ilp-solver")] pub(crate) mod minmaxmulticenter_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod mixedchinesepostman_ilp; @@ -207,10 +227,14 @@ pub(crate) mod multiprocessorscheduling_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod naesatisfiability_ilp; #[cfg(feature = "ilp-solver")] +pub(crate) mod numericalmatchingwithtargetsums_ilp; +#[cfg(feature = "ilp-solver")] pub(crate) mod openshopscheduling_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod optimallineararrangement_ilp; #[cfg(feature = "ilp-solver")] +pub(crate) mod optimumcommunicationspanningtree_ilp; +#[cfg(feature = "ilp-solver")] pub(crate) mod paintshop_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod partiallyorderedknapsack_ilp; @@ -402,24 +426,34 @@ pub(crate) fn canonical_rule_example_specs() -> Vec Vec, + /// Compatible triples, indexed by variable index. + triples: Vec, + /// Number of pairs (m). + m: usize, +} + +impl ReductionResult for ReductionNMTSToILP { + type Source = NumericalMatchingWithTargetSums; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + /// Extract solution: for each x_i find the y_j it is paired with. + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + let mut assignment = vec![0usize; self.m]; + for (var_idx, triple) in self.triples.iter().enumerate() { + if target_solution[var_idx] == 1 { + assignment[triple.i] = triple.j; + } + } + assignment + } +} + +#[reduction( + overhead = { + num_vars = "num_pairs * num_pairs * num_pairs", + num_constraints = "3 * num_pairs", + } +)] +impl ReduceTo> for NumericalMatchingWithTargetSums { + type Result = ReductionNMTSToILP; + + fn reduce_to(&self) -> Self::Result { + let m = self.num_pairs(); + let sx = self.sizes_x(); + let sy = self.sizes_y(); + let targets = self.targets(); + + // Enumerate compatible triples: (i, j, k) where s(x_i) + s(y_j) = B_k + let mut triples = Vec::new(); + for (i, &sxi) in sx.iter().enumerate() { + for (j, &syj) in sy.iter().enumerate() { + for (k, &tk) in targets.iter().enumerate() { + if sxi + syj == tk { + triples.push(CompatibleTriple { i, j, k }); + } + } + } + } + + let num_vars = triples.len(); + let mut constraints = Vec::with_capacity(3 * m); + + // Each x_i in exactly one pair: Σ_{(i,j,k)} z_{i,j,k} = 1 for each i + for i in 0..m { + let terms: Vec<(usize, f64)> = triples + .iter() + .enumerate() + .filter(|(_, t)| t.i == i) + .map(|(idx, _)| (idx, 1.0)) + .collect(); + constraints.push(LinearConstraint::eq(terms, 1.0)); + } + + // Each y_j in exactly one pair: Σ_{(i,j,k)} z_{i,j,k} = 1 for each j + for j in 0..m { + let terms: Vec<(usize, f64)> = triples + .iter() + .enumerate() + .filter(|(_, t)| t.j == j) + .map(|(idx, _)| (idx, 1.0)) + .collect(); + constraints.push(LinearConstraint::eq(terms, 1.0)); + } + + // Each target k used exactly once: Σ_{(i,j,k)} z_{i,j,k} = 1 for each k + for k in 0..m { + let terms: Vec<(usize, f64)> = triples + .iter() + .enumerate() + .filter(|(_, t)| t.k == k) + .map(|(idx, _)| (idx, 1.0)) + .collect(); + constraints.push(LinearConstraint::eq(terms, 1.0)); + } + + let target = ILP::new(num_vars, constraints, vec![], ObjectiveSense::Minimize); + + ReductionNMTSToILP { target, triples, m } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + vec![crate::example_db::specs::RuleExampleSpec { + id: "numericalmatchingwithtargetsums_to_ilp", + build: || { + let source = + NumericalMatchingWithTargetSums::new(vec![1, 4, 7], vec![2, 5, 3], vec![3, 7, 12]); + crate::example_db::specs::rule_example_via_ilp::<_, bool>(source) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/numericalmatchingwithtargetsums_ilp.rs"] +mod tests; diff --git a/src/rules/optimumcommunicationspanningtree_ilp.rs b/src/rules/optimumcommunicationspanningtree_ilp.rs new file mode 100644 index 00000000..e64fbe98 --- /dev/null +++ b/src/rules/optimumcommunicationspanningtree_ilp.rs @@ -0,0 +1,169 @@ +//! Reduction from OptimumCommunicationSpanningTree to ILP (Integer Linear Programming). +//! +//! Uses a multi-commodity flow formulation: +//! - Binary edge variables x_e for each edge of K_n +//! - For each pair (u,v) with r(u,v) > 0, directed flow variables route 1 unit +//! from u to v through the tree +//! - Tree constraints: sum x_e = n-1, and connectivity via flow conservation +//! - Objective: minimize sum_{(u,v): r>0} r(u,v) * w(e) * (flow_uv(e->dir) + flow_uv(e<-dir)) + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::misc::OptimumCommunicationSpanningTree; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; + +/// Result of reducing OptimumCommunicationSpanningTree to ILP. +/// +/// Variable layout (all binary): +/// - `x_e` for each undirected edge `e` (indices `0..m`) +/// - For each commodity `k` (pair (u,v) with u < v and r(u,v) > 0): +/// `f^k_(i,j)` and `f^k_(j,i)` for each edge (i,j), two directed flow variables +/// (indices `m + k * 2m .. m + (k+1) * 2m`) +#[derive(Debug, Clone)] +pub struct ReductionOptimumCommunicationSpanningTreeToILP { + target: ILP, + num_edges: usize, +} + +impl ReductionResult for ReductionOptimumCommunicationSpanningTreeToILP { + type Source = OptimumCommunicationSpanningTree; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + target_solution[..self.num_edges].to_vec() + } +} + +#[reduction( + overhead = { + num_vars = "num_edges + 2 * num_edges * num_vertices * (num_vertices - 1) / 2", + num_constraints = "1 + num_vertices * num_vertices * (num_vertices - 1) / 2 + 2 * num_edges * num_vertices * (num_vertices - 1) / 2", + } +)] +impl ReduceTo> for OptimumCommunicationSpanningTree { + type Result = ReductionOptimumCommunicationSpanningTreeToILP; + + fn reduce_to(&self) -> Self::Result { + let n = self.num_vertices(); + let m = self.num_edges(); + let edges = self.edges(); + let w = self.edge_weights(); + let r = self.requirements(); + + // Enumerate commodities: all pairs (s, t) with s < t and r(s,t) > 0 + let mut commodities: Vec<(usize, usize)> = Vec::new(); + for (s, row) in r.iter().enumerate() { + for (t, &req) in row.iter().enumerate().skip(s + 1) { + if req > 0 { + commodities.push((s, t)); + } + } + } + let num_commodities = commodities.len(); + + let num_vars = m + 2 * m * num_commodities; + let mut constraints = Vec::new(); + + // Edge variable index + let edge_var = |edge_idx: usize| edge_idx; + + // Flow variable index: for commodity k, edge e, direction dir (0 = i->j, 1 = j->i) + let flow_var = + |k: usize, edge_idx: usize, dir: usize| -> usize { m + k * 2 * m + 2 * edge_idx + dir }; + + // Constraint 1: Tree has exactly n-1 edges + // sum x_e = n-1 + let tree_terms: Vec<(usize, f64)> = (0..m).map(|e| (edge_var(e), 1.0)).collect(); + constraints.push(LinearConstraint::eq(tree_terms, (n - 1) as f64)); + + // Constraint 2: Flow conservation for each commodity + for (k, &(src, dst)) in commodities.iter().enumerate() { + for vertex in 0..n { + let mut terms = Vec::new(); + for (edge_idx, &(i, j)) in edges.iter().enumerate() { + // Flow into vertex minus flow out of vertex + if j == vertex { + // Edge (i, j): direction 0 = i->j (inflow), direction 1 = j->i (outflow) + terms.push((flow_var(k, edge_idx, 0), 1.0)); + terms.push((flow_var(k, edge_idx, 1), -1.0)); + } + if i == vertex { + // Edge (i, j): direction 1 = j->i (inflow), direction 0 = i->j (outflow) + terms.push((flow_var(k, edge_idx, 1), 1.0)); + terms.push((flow_var(k, edge_idx, 0), -1.0)); + } + } + + let rhs = if vertex == src { + -1.0 // source: net outflow of 1 + } else if vertex == dst { + 1.0 // sink: net inflow of 1 + } else { + 0.0 // transit: balanced + }; + constraints.push(LinearConstraint::eq(terms, rhs)); + } + } + + // Constraint 3: Capacity linking: flow <= edge selector + for k in 0..num_commodities { + for edge_idx in 0..m { + let sel = edge_var(edge_idx); + // f^k_(i->j) <= x_e + constraints.push(LinearConstraint::le( + vec![(flow_var(k, edge_idx, 0), 1.0), (sel, -1.0)], + 0.0, + )); + // f^k_(j->i) <= x_e + constraints.push(LinearConstraint::le( + vec![(flow_var(k, edge_idx, 1), 1.0), (sel, -1.0)], + 0.0, + )); + } + } + + // Objective: minimize sum over commodities k of r(s,t) * sum_e w(e) * (f^k_e_fwd + f^k_e_bwd) + // This equals sum_{s = Vec::new(); + for (k, &(s, t)) in commodities.iter().enumerate() { + let req = r[s][t] as f64; + for (edge_idx, &(i, j)) in edges.iter().enumerate() { + let weight = w[i][j] as f64; + let coeff = req * weight; + if coeff != 0.0 { + objective.push((flow_var(k, edge_idx, 0), coeff)); + objective.push((flow_var(k, edge_idx, 1), coeff)); + } + } + } + + let target = ILP::new(num_vars, constraints, objective, ObjectiveSense::Minimize); + + ReductionOptimumCommunicationSpanningTreeToILP { + target, + num_edges: m, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + vec![crate::example_db::specs::RuleExampleSpec { + id: "optimum_communication_spanning_tree_to_ilp", + build: || { + // K3 example from issue #967 + let edge_weights = vec![vec![0, 1, 2], vec![1, 0, 3], vec![2, 3, 0]]; + let requirements = vec![vec![0, 1, 1], vec![1, 0, 1], vec![1, 1, 0]]; + let source = OptimumCommunicationSpanningTree::new(edge_weights, requirements); + crate::example_db::specs::rule_example_via_ilp::<_, bool>(source) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/optimumcommunicationspanningtree_ilp.rs"] +mod tests; diff --git a/src/unit_tests/models/algebraic/minimum_matrix_cover.rs b/src/unit_tests/models/algebraic/minimum_matrix_cover.rs new file mode 100644 index 00000000..e03a063b --- /dev/null +++ b/src/unit_tests/models/algebraic/minimum_matrix_cover.rs @@ -0,0 +1,151 @@ +use super::*; +use crate::solvers::{BruteForce, Solver}; +use crate::traits::Problem; +use crate::types::Min; + +#[test] +fn test_minimum_matrix_cover_creation() { + let matrix = vec![ + vec![0, 3, 1, 0], + vec![3, 0, 0, 2], + vec![1, 0, 0, 4], + vec![0, 2, 4, 0], + ]; + let problem = MinimumMatrixCover::new(matrix.clone()); + assert_eq!(problem.num_rows(), 4); + assert_eq!(problem.matrix(), &matrix); + assert_eq!(problem.dims(), vec![2; 4]); + assert_eq!(problem.num_variables(), 4); +} + +#[test] +fn test_minimum_matrix_cover_evaluate_all_minus() { + // All -1: f = (-1,-1,-1,-1) + // value = Σ a_ij * 1 * 1 = sum of all matrix entries + let matrix = vec![ + vec![0, 3, 1, 0], + vec![3, 0, 0, 2], + vec![1, 0, 0, 4], + vec![0, 2, 4, 0], + ]; + let problem = MinimumMatrixCover::new(matrix); + let value = problem.evaluate(&[0, 0, 0, 0]); + // Sum of all entries = 0+3+1+0 + 3+0+0+2 + 1+0+0+4 + 0+2+4+0 = 20 + assert_eq!(value, Min(Some(20))); +} + +#[test] +fn test_minimum_matrix_cover_evaluate_mixed() { + let matrix = vec![ + vec![0, 3, 1, 0], + vec![3, 0, 0, 2], + vec![1, 0, 0, 4], + vec![0, 2, 4, 0], + ]; + let problem = MinimumMatrixCover::new(matrix); + + // Config [0,1,1,0] → f=(-1,+1,+1,-1) + // Compute: Σ a_ij * f(i) * f(j) + // For each (i,j): + // (0,1): 3 * (-1)(+1) = -3 + // (0,2): 1 * (-1)(+1) = -1 + // (1,0): 3 * (+1)(-1) = -3 + // (1,3): 2 * (+1)(-1) = -2 + // (2,0): 1 * (+1)(-1) = -1 + // (2,3): 4 * (+1)(-1) = -4 + // (3,1): 2 * (-1)(+1) = -2 + // (3,2): 4 * (-1)(+1) = -4 + // All other terms are 0 (zero matrix entries or diagonal zeros) + // Total = -3 + -1 + -3 + -2 + -1 + -4 + -2 + -4 = -20 + let value = problem.evaluate(&[0, 1, 1, 0]); + assert_eq!(value, Min(Some(-20))); +} + +#[test] +fn test_minimum_matrix_cover_evaluate_invalid() { + let problem = MinimumMatrixCover::new(vec![vec![0, 1], vec![1, 0]]); + + // Wrong length + assert_eq!(problem.evaluate(&[0]), Min(None)); + // Out-of-range value + assert_eq!(problem.evaluate(&[0, 2]), Min(None)); +} + +#[test] +fn test_minimum_matrix_cover_solver() { + let matrix = vec![ + vec![0, 3, 1, 0], + vec![3, 0, 0, 2], + vec![1, 0, 0, 4], + vec![0, 2, 4, 0], + ]; + let problem = MinimumMatrixCover::new(matrix); + let solver = BruteForce::new(); + + let value = solver.solve(&problem); + assert_eq!(value, Min(Some(-20))); + + let witness = solver.find_witness(&problem); + assert!(witness.is_some()); + let w = witness.unwrap(); + assert_eq!(problem.evaluate(&w), Min(Some(-20))); +} + +#[test] +fn test_minimum_matrix_cover_serialization() { + let matrix = vec![vec![0, 1], vec![1, 0]]; + let problem = MinimumMatrixCover::new(matrix); + let json = serde_json::to_string(&problem).unwrap(); + let deserialized: MinimumMatrixCover = serde_json::from_str(&json).unwrap(); + assert_eq!(deserialized.num_rows(), 2); + assert_eq!(deserialized.matrix(), problem.matrix()); +} + +#[test] +fn test_minimum_matrix_cover_1x1() { + // 1×1 matrix: only one variable, f(1) = ±1 + // value = a_11 * f(1)^2 = a_11 regardless of sign + let problem = MinimumMatrixCover::new(vec![vec![5]]); + assert_eq!(problem.evaluate(&[0]), Min(Some(5))); + assert_eq!(problem.evaluate(&[1]), Min(Some(5))); + + let solver = BruteForce::new(); + assert_eq!(solver.solve(&problem), Min(Some(5))); +} + +#[test] +fn test_minimum_matrix_cover_paper_example() { + // Same as canonical example: 4×4 symmetric matrix + let matrix = vec![ + vec![0, 3, 1, 0], + vec![3, 0, 0, 2], + vec![1, 0, 0, 4], + vec![0, 2, 4, 0], + ]; + let problem = MinimumMatrixCover::new(matrix); + let solver = BruteForce::new(); + + // Verify the claimed optimal from the issue + let value = problem.evaluate(&[0, 1, 1, 0]); + assert_eq!(value, Min(Some(-20))); + + // Verify it is truly optimal + let optimal_value = solver.solve(&problem); + assert_eq!(optimal_value, Min(Some(-20))); + + // Verify the witness is one of the optimal solutions + let all_witnesses = solver.find_all_witnesses(&problem); + assert!(all_witnesses.contains(&vec![0, 1, 1, 0])); +} + +#[cfg(feature = "example-db")] +#[test] +fn test_minimum_matrix_cover_canonical_example_spec() { + use super::canonical_model_example_specs; + let specs = canonical_model_example_specs(); + assert_eq!(specs.len(), 1); + let spec = &specs[0]; + assert_eq!(spec.id, "minimum_matrix_cover"); + assert_eq!(spec.optimal_value, serde_json::json!(-20)); + assert_eq!(spec.optimal_config, vec![0, 1, 1, 0]); +} diff --git a/src/unit_tests/models/algebraic/minimum_weight_decoding.rs b/src/unit_tests/models/algebraic/minimum_weight_decoding.rs new file mode 100644 index 00000000..57335339 --- /dev/null +++ b/src/unit_tests/models/algebraic/minimum_weight_decoding.rs @@ -0,0 +1,156 @@ +use super::*; +use crate::solvers::BruteForce; +use crate::traits::Problem; +use crate::types::Min; + +/// H (3×4): [[1,0,1,1],[0,1,1,0],[1,1,0,1]], s = [1,1,0] +fn example_instance() -> MinimumWeightDecoding { + let matrix = vec![ + vec![true, false, true, true], + vec![false, true, true, false], + vec![true, true, false, true], + ]; + let target = vec![true, true, false]; + MinimumWeightDecoding::new(matrix, target) +} + +#[test] +fn test_minimum_weight_decoding_creation() { + let problem = example_instance(); + assert_eq!(problem.num_rows(), 3); + assert_eq!(problem.num_cols(), 4); + assert_eq!(problem.dims(), vec![2; 4]); + assert_eq!( + ::NAME, + "MinimumWeightDecoding" + ); + assert_eq!(::variant(), vec![]); +} + +#[test] +fn test_minimum_weight_decoding_evaluate_feasible() { + let problem = example_instance(); + // Config [0,0,1,0] → weight 1 + // Row 0: H[0][2]=1, x[2]=1 → dot=1 mod 2 = 1 = s[0]=true ✓ + // Row 1: H[1][2]=1, x[2]=1 → dot=1 mod 2 = 1 = s[1]=true ✓ + // Row 2: H[2][2]=0 → dot=0 mod 2 = 0 = s[2]=false ✓ + let config = vec![0, 0, 1, 0]; + assert_eq!(problem.evaluate(&config), Min(Some(1))); +} + +#[test] +fn test_minimum_weight_decoding_evaluate_infeasible() { + let problem = example_instance(); + // Config [0,0,0,0] → all zeros, Hx = [0,0,0] but s = [1,1,0] → infeasible + let config = vec![0, 0, 0, 0]; + assert_eq!(problem.evaluate(&config), Min(None)); +} + +#[test] +fn test_minimum_weight_decoding_evaluate_heavier_feasible() { + let problem = example_instance(); + // Config [1,0,0,1] → weight 2 + // Row 0: H[0][0]=1, H[0][3]=1 → dot=2 mod 2=0, s[0]=true → 0≠1 infeasible + let config = vec![1, 0, 0, 1]; + assert_eq!(problem.evaluate(&config), Min(None)); + + // Config [1,1,0,0] → weight 2 + // Row 0: H[0][0]=1 → dot=1, mod 2=1, s[0]=true ✓ + // Row 1: H[1][1]=1 → dot=1, mod 2=1, s[1]=true ✓ + // Row 2: H[2][0]=1,H[2][1]=1 → dot=2, mod 2=0, s[2]=false ✓ + let config2 = vec![1, 1, 0, 0]; + assert_eq!(problem.evaluate(&config2), Min(Some(2))); +} + +#[test] +fn test_minimum_weight_decoding_evaluate_wrong_length() { + let problem = example_instance(); + assert_eq!(problem.evaluate(&[1, 0]), Min(None)); + assert_eq!(problem.evaluate(&[1; 5]), Min(None)); +} + +#[test] +fn test_minimum_weight_decoding_evaluate_invalid_variable() { + let problem = example_instance(); + let config = vec![2, 0, 0, 0]; + assert_eq!(problem.evaluate(&config), Min(None)); +} + +#[test] +fn test_minimum_weight_decoding_brute_force() { + let problem = example_instance(); + let solver = BruteForce::new(); + let witness = solver.find_witness(&problem).expect("should find optimal"); + let val = problem.evaluate(&witness); + // Optimal is weight 1 with config [0,0,1,0] + assert_eq!(val, Min(Some(1))); +} + +#[test] +fn test_minimum_weight_decoding_all_witnesses() { + let problem = example_instance(); + let solver = BruteForce::new(); + let witnesses = solver.find_all_witnesses(&problem); + // All witnesses should be feasible and have weight 1 + assert!(!witnesses.is_empty()); + for w in &witnesses { + assert_eq!(problem.evaluate(w), Min(Some(1))); + } +} + +#[test] +fn test_minimum_weight_decoding_serialization() { + let problem = example_instance(); + let json = serde_json::to_value(&problem).unwrap(); + assert_eq!( + json, + serde_json::json!({ + "matrix": [[true, false, true, true], [false, true, true, false], [true, true, false, true]], + "target": [true, true, false], + }) + ); + let restored: MinimumWeightDecoding = serde_json::from_value(json).unwrap(); + assert_eq!(restored.num_rows(), 3); + assert_eq!(restored.num_cols(), 4); +} + +#[test] +fn test_minimum_weight_decoding_zero_syndrome() { + // s = [0,0] → x = [0,0,0] is feasible with weight 0 + let matrix = vec![vec![true, false, true], vec![false, true, true]]; + let target = vec![false, false]; + let problem = MinimumWeightDecoding::new(matrix, target); + let config = vec![0, 0, 0]; + assert_eq!(problem.evaluate(&config), Min(Some(0))); +} + +#[test] +fn test_minimum_weight_decoding_complexity_metadata() { + use crate::registry::VariantEntry; + + let entry = inventory::iter::() + .find(|entry| entry.name == "MinimumWeightDecoding") + .expect("MinimumWeightDecoding variant entry should exist"); + + assert_eq!(entry.complexity, "2^(0.0494 * num_cols)"); +} + +#[test] +#[should_panic(expected = "at least one row")] +fn test_minimum_weight_decoding_empty_matrix() { + MinimumWeightDecoding::new(vec![], vec![]); +} + +#[test] +#[should_panic(expected = "same length")] +fn test_minimum_weight_decoding_inconsistent_rows() { + let matrix = vec![vec![true, false], vec![true]]; + MinimumWeightDecoding::new(matrix, vec![true, false]); +} + +#[test] +#[should_panic(expected = "Target length")] +fn test_minimum_weight_decoding_target_mismatch() { + let matrix = vec![vec![true, false], vec![false, true]]; + MinimumWeightDecoding::new(matrix, vec![true]); +} diff --git a/src/unit_tests/models/formula/maximum_2_satisfiability.rs b/src/unit_tests/models/formula/maximum_2_satisfiability.rs new file mode 100644 index 00000000..32b1dc16 --- /dev/null +++ b/src/unit_tests/models/formula/maximum_2_satisfiability.rs @@ -0,0 +1,91 @@ +use super::*; +use crate::models::formula::CNFClause; +use crate::solvers::{BruteForce, Solver}; +use crate::traits::Problem; +use crate::types::Max; + +fn issue_instance() -> Maximum2Satisfiability { + Maximum2Satisfiability::new( + 4, + vec![ + CNFClause::new(vec![1, 2]), + CNFClause::new(vec![1, -2]), + CNFClause::new(vec![-1, 3]), + CNFClause::new(vec![-1, -3]), + CNFClause::new(vec![2, 4]), + CNFClause::new(vec![-3, -4]), + CNFClause::new(vec![3, 4]), + ], + ) +} + +#[test] +fn test_maximum_2_satisfiability_creation() { + let problem = issue_instance(); + assert_eq!(problem.num_vars(), 4); + assert_eq!(problem.num_clauses(), 7); + assert_eq!(problem.dims(), vec![2; 4]); +} + +#[test] +#[should_panic(expected = "Clause 0 has 3 literals, expected 2")] +fn test_maximum_2_satisfiability_wrong_clause_size() { + let _ = Maximum2Satisfiability::new(3, vec![CNFClause::new(vec![1, 2, 3])]); +} + +#[test] +fn test_maximum_2_satisfiability_evaluate_optimal() { + let problem = issue_instance(); + // x1=T, x2=T, x3=F, x4=T → config [1,1,0,1] + assert_eq!(problem.evaluate(&[1, 1, 0, 1]), Max(Some(6))); +} + +#[test] +fn test_maximum_2_satisfiability_evaluate_all_true() { + let problem = issue_instance(); + // All true: [1,1,1,1] + // (1∨2)=T, (1∨¬2)=T, (¬1∨3)=T, (¬1∨¬3)=F, (2∨4)=T, (¬3∨¬4)=F, (3∨4)=T → 5 + assert_eq!(problem.evaluate(&[1, 1, 1, 1]), Max(Some(5))); +} + +#[test] +fn test_maximum_2_satisfiability_evaluate_all_false() { + let problem = issue_instance(); + // All false: [0,0,0,0] + // (1∨2)=F, (1∨¬2)=T, (¬1∨3)=T, (¬1∨¬3)=T, (2∨4)=F, (¬3∨¬4)=T, (3∨4)=F → 4 + assert_eq!(problem.evaluate(&[0, 0, 0, 0]), Max(Some(4))); +} + +#[test] +fn test_maximum_2_satisfiability_solver() { + let problem = issue_instance(); + let solver = BruteForce::new(); + let value = solver.solve(&problem); + assert_eq!(value, Max(Some(6))); +} + +#[test] +fn test_maximum_2_satisfiability_witness() { + let problem = issue_instance(); + let solver = BruteForce::new(); + let witness = solver.find_witness(&problem); + assert!(witness.is_some()); + assert_eq!(problem.evaluate(&witness.unwrap()), Max(Some(6))); +} + +#[test] +fn test_maximum_2_satisfiability_serialization() { + let problem = issue_instance(); + let json = serde_json::to_string(&problem).unwrap(); + let restored: Maximum2Satisfiability = serde_json::from_str(&json).unwrap(); + assert_eq!(restored.num_vars(), 4); + assert_eq!(restored.num_clauses(), 7); + assert_eq!(restored.evaluate(&[1, 1, 0, 1]), Max(Some(6))); +} + +#[test] +fn test_maximum_2_satisfiability_count_satisfied() { + let problem = issue_instance(); + let assignment = vec![true, true, false, true]; + assert_eq!(problem.count_satisfied(&assignment), 6); +} diff --git a/src/unit_tests/models/graph/maximum_domatic_number.rs b/src/unit_tests/models/graph/maximum_domatic_number.rs new file mode 100644 index 00000000..027581b3 --- /dev/null +++ b/src/unit_tests/models/graph/maximum_domatic_number.rs @@ -0,0 +1,111 @@ +use super::*; +use crate::solvers::BruteForce; +use crate::topology::SimpleGraph; +use crate::traits::Problem; +use crate::types::Max; + +#[test] +fn test_maximum_domatic_number_creation() { + let graph = SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3)]); + let problem = MaximumDomaticNumber::new(graph); + assert_eq!(problem.graph().num_vertices(), 4); + assert_eq!(problem.graph().num_edges(), 3); + assert_eq!(problem.num_variables(), 4); + assert_eq!(problem.dims(), vec![4; 4]); +} + +#[test] +fn test_maximum_domatic_number_evaluate_optimal() { + // Graph from issue: 6 vertices + // Edges: (0,1), (0,2), (0,3), (1,4), (2,5), (3,4), (3,5), (4,5) + // Config: [0, 1, 2, 0, 2, 1] → 3 non-empty dominating sets → Max(3) + let graph = SimpleGraph::new( + 6, + vec![ + (0, 1), + (0, 2), + (0, 3), + (1, 4), + (2, 5), + (3, 4), + (3, 5), + (4, 5), + ], + ); + let problem = MaximumDomaticNumber::new(graph); + let config = vec![0, 1, 2, 0, 2, 1]; + let result = problem.evaluate(&config); + assert_eq!(result, Max(Some(3))); +} + +#[test] +fn test_maximum_domatic_number_evaluate_invalid() { + // Path graph P3: 0-1-2 + // Config [0, 1, 2]: set {0} = {v0}, set {1} = {v1}, set {2} = {v2} + // Set {2} = {v2} does NOT dominate v0 (v0 not in set and not adjacent to v2) + let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); + let problem = MaximumDomaticNumber::new(graph); + let config = vec![0, 1, 2]; + let result = problem.evaluate(&config); + assert_eq!(result, Max(None)); +} + +#[test] +fn test_maximum_domatic_number_evaluate_trivial() { + // All vertices in one set → always a dominating set → Max(1) + let graph = SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3)]); + let problem = MaximumDomaticNumber::new(graph); + let config = vec![0, 0, 0, 0]; + let result = problem.evaluate(&config); + assert_eq!(result, Max(Some(1))); +} + +#[test] +fn test_maximum_domatic_number_solver_p3() { + // Path graph P3: 0-1-2 + // Domatic number = 2: e.g., {0,2} and {1} are both dominating sets + let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); + let problem = MaximumDomaticNumber::new(graph); + let solver = BruteForce::new(); + let witness = solver.find_witness(&problem).unwrap(); + let value = problem.evaluate(&witness); + assert_eq!(value, Max(Some(2))); +} + +#[test] +fn test_maximum_domatic_number_solver_complete_graph() { + // K4: domatic number = 4 (each vertex is its own dominating set in K4) + let graph = SimpleGraph::new(4, vec![(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]); + let problem = MaximumDomaticNumber::new(graph); + let solver = BruteForce::new(); + let witness = solver.find_witness(&problem).unwrap(); + let value = problem.evaluate(&witness); + assert_eq!(value, Max(Some(4))); +} + +#[test] +fn test_maximum_domatic_number_serialization() { + let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); + let problem = MaximumDomaticNumber::new(graph); + let json = serde_json::to_string(&problem).unwrap(); + let deserialized: MaximumDomaticNumber = serde_json::from_str(&json).unwrap(); + assert_eq!(deserialized.graph().num_vertices(), 3); + assert_eq!(deserialized.graph().num_edges(), 2); +} + +#[test] +fn test_maximum_domatic_number_size_getters() { + let graph = SimpleGraph::new(5, vec![(0, 1), (1, 2), (2, 3), (3, 4)]); + let problem = MaximumDomaticNumber::new(graph); + assert_eq!(problem.num_vertices(), 5); + assert_eq!(problem.num_edges(), 4); +} + +#[test] +fn test_maximum_domatic_number_single_vertex() { + // Single vertex: domatic number = 1 + let graph = SimpleGraph::new(1, vec![]); + let problem = MaximumDomaticNumber::new(graph); + let config = vec![0]; + assert_eq!(problem.evaluate(&config), Max(Some(1))); +} diff --git a/src/unit_tests/models/graph/maximum_leaf_spanning_tree.rs b/src/unit_tests/models/graph/maximum_leaf_spanning_tree.rs new file mode 100644 index 00000000..0b2fe7db --- /dev/null +++ b/src/unit_tests/models/graph/maximum_leaf_spanning_tree.rs @@ -0,0 +1,147 @@ +use super::*; +use crate::{solvers::BruteForce, topology::SimpleGraph, traits::Problem}; + +/// Issue #897 example: 6 vertices, 9 edges. +/// Edges: (0,1),(0,2),(0,3),(1,4),(2,4),(2,5),(3,5),(4,5),(1,3) +fn example_instance() -> MaximumLeafSpanningTree { + let graph = SimpleGraph::new( + 6, + vec![ + (0, 1), + (0, 2), + (0, 3), + (1, 4), + (2, 4), + (2, 5), + (3, 5), + (4, 5), + (1, 3), + ], + ); + MaximumLeafSpanningTree::new(graph) +} + +#[test] +fn test_maximum_leaf_spanning_tree_creation() { + let problem = example_instance(); + assert_eq!(problem.graph().num_vertices(), 6); + assert_eq!(problem.graph().num_edges(), 9); + assert_eq!(problem.num_vertices(), 6); + assert_eq!(problem.num_edges(), 9); + assert_eq!(problem.dims().len(), 9); + assert!(problem.dims().iter().all(|&d| d == 2)); +} + +#[test] +#[should_panic(expected = "graph must have at least 2 vertices")] +fn test_maximum_leaf_spanning_tree_rejects_tiny_graph() { + let graph = SimpleGraph::new(1, vec![]); + let _ = MaximumLeafSpanningTree::new(graph); +} + +#[test] +fn test_maximum_leaf_spanning_tree_evaluate_optimal() { + let problem = example_instance(); + // Tree: {(0,1),(0,2),(0,3),(2,4),(2,5)} = indices 0,1,2,4,5 + // Degrees: 0->3, 1->1, 2->3, 3->1, 4->1, 5->1 => 4 leaves + let config = vec![1, 1, 1, 0, 1, 1, 0, 0, 0]; + assert_eq!(problem.evaluate(&config), Max(Some(4))); +} + +#[test] +fn test_maximum_leaf_spanning_tree_evaluate_valid_suboptimal() { + let problem = example_instance(); + // A path tree: (0,1),(1,3),(0,2),(2,4),(4,5) = indices 0,8,1,4,7 + // Wait, edge 8 is (1,3). So: indices 0,1,4,7,8 = [1,1,0,0,1,0,0,1,1] + // Degrees: 0->2, 1->2, 2->2, 3->1, 4->2, 5->1 => 2 leaves + let config = vec![1, 1, 0, 0, 1, 0, 0, 1, 1]; + assert_eq!(problem.evaluate(&config), Max(Some(2))); +} + +#[test] +fn test_maximum_leaf_spanning_tree_evaluate_invalid_too_few_edges() { + let problem = example_instance(); + // Only 3 edges (need 5 for spanning tree of 6 vertices) + let config = vec![1, 1, 1, 0, 0, 0, 0, 0, 0]; + assert_eq!(problem.evaluate(&config), Max(None)); +} + +#[test] +fn test_maximum_leaf_spanning_tree_evaluate_invalid_too_many_edges() { + let problem = example_instance(); + // 6 edges selected = cycle + let config = vec![1, 1, 1, 1, 1, 1, 0, 0, 0]; + assert_eq!(problem.evaluate(&config), Max(None)); +} + +#[test] +fn test_maximum_leaf_spanning_tree_evaluate_disconnected() { + let problem = example_instance(); + // 5 edges but disconnected: (0,1),(0,2),(0,3),(4,5),(1,3) = indices 0,1,2,7,8 + // Vertices {0,1,2,3} and {4,5} are separate => not spanning + let config = vec![1, 1, 1, 0, 0, 0, 0, 1, 1]; + assert_eq!(problem.evaluate(&config), Max(None)); +} + +#[test] +fn test_maximum_leaf_spanning_tree_evaluate_empty() { + let problem = example_instance(); + let config = vec![0; 9]; + assert_eq!(problem.evaluate(&config), Max(None)); +} + +#[test] +fn test_maximum_leaf_spanning_tree_brute_force() { + let problem = example_instance(); + let solver = BruteForce::new(); + let solutions = solver.find_all_witnesses(&problem); + assert!(!solutions.is_empty()); + // All optimal solutions should have 4 leaves + for sol in &solutions { + assert_eq!(problem.evaluate(sol), Max(Some(4))); + } +} + +#[test] +fn test_maximum_leaf_spanning_tree_is_valid_solution() { + let problem = example_instance(); + assert!(problem.is_valid_solution(&[1, 1, 1, 0, 1, 1, 0, 0, 0])); + assert!(!problem.is_valid_solution(&[1, 1, 1, 0, 0, 0, 0, 0, 0])); // too few + assert!(!problem.is_valid_solution(&[0; 9])); // empty + assert!(!problem.is_valid_solution(&[1, 1, 1])); // wrong length +} + +#[test] +fn test_maximum_leaf_spanning_tree_serialization() { + let problem = example_instance(); + let json = serde_json::to_value(&problem).unwrap(); + let deserialized: MaximumLeafSpanningTree = serde_json::from_value(json).unwrap(); + assert_eq!(deserialized.graph().num_vertices(), 6); + assert_eq!(deserialized.graph().num_edges(), 9); +} + +#[test] +fn test_maximum_leaf_spanning_tree_small_path() { + // Path graph P3: 0-1-2, only spanning tree is the path itself -> 2 leaves + let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); + let problem = MaximumLeafSpanningTree::new(graph); + assert_eq!(problem.dims(), vec![2, 2]); + let config = vec![1, 1]; + assert_eq!(problem.evaluate(&config), Max(Some(2))); +} + +#[test] +fn test_maximum_leaf_spanning_tree_star() { + // Star K1,3: center 0, leaves 1,2,3 + // Edges: (0,1),(0,2),(0,3) + let graph = SimpleGraph::new(4, vec![(0, 1), (0, 2), (0, 3)]); + let problem = MaximumLeafSpanningTree::new(graph); + // Only one spanning tree: all 3 edges + let config = vec![1, 1, 1]; + assert_eq!(problem.evaluate(&config), Max(Some(3))); + // This is optimal (3 leaves out of 4 vertices) + let solver = BruteForce::new(); + let solutions = solver.find_all_witnesses(&problem); + assert_eq!(solutions.len(), 1); + assert_eq!(solutions[0], vec![1, 1, 1]); +} diff --git a/src/unit_tests/models/graph/minimum_capacitated_spanning_tree.rs b/src/unit_tests/models/graph/minimum_capacitated_spanning_tree.rs new file mode 100644 index 00000000..5c74e862 --- /dev/null +++ b/src/unit_tests/models/graph/minimum_capacitated_spanning_tree.rs @@ -0,0 +1,174 @@ +use super::*; +use crate::{solvers::BruteForce, topology::SimpleGraph, traits::Problem}; + +/// 5-vertex instance from issue #901. +/// Edges: (0,1,2), (0,2,1), (0,3,4), (1,2,3), (1,4,1), (2,3,2), (2,4,3), (3,4,1) +/// Root=0, capacity=3, all requirements=1 +fn example_instance() -> MinimumCapacitatedSpanningTree { + let graph = SimpleGraph::new( + 5, + vec![ + (0, 1), + (0, 2), + (0, 3), + (1, 2), + (1, 4), + (2, 3), + (2, 4), + (3, 4), + ], + ); + let weights = vec![2, 1, 4, 3, 1, 2, 3, 1]; + let requirements = vec![0, 1, 1, 1, 1]; + let capacity = 3; + MinimumCapacitatedSpanningTree::new(graph, weights, 0, requirements, capacity) +} + +/// Tight capacity instance: capacity=2, so each subtree can hold at most 2 vertices. +fn tight_capacity_instance() -> MinimumCapacitatedSpanningTree { + let graph = SimpleGraph::new(4, vec![(0, 1), (0, 2), (0, 3), (1, 2), (2, 3)]); + let weights = vec![1, 2, 3, 1, 1]; + let requirements = vec![0, 1, 1, 1]; + let capacity = 2; + MinimumCapacitatedSpanningTree::new(graph, weights, 0, requirements, capacity) +} + +#[test] +fn test_creation() { + let problem = example_instance(); + assert_eq!(problem.num_vertices(), 5); + assert_eq!(problem.num_edges(), 8); + assert_eq!(problem.root(), 0); + assert_eq!(problem.requirements(), &[0, 1, 1, 1, 1]); + assert_eq!(*problem.capacity(), 3); + assert_eq!(problem.dims().len(), 8); + assert!(problem.is_weighted()); +} + +#[test] +#[should_panic(expected = "weights length must match num_edges")] +fn test_rejects_wrong_weight_count() { + let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); + let _ = MinimumCapacitatedSpanningTree::new(graph, vec![1, 1, 1], 0, vec![0, 1, 1], 3); +} + +#[test] +#[should_panic(expected = "requirements length must match num_vertices")] +fn test_rejects_wrong_requirements_count() { + let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); + let _ = MinimumCapacitatedSpanningTree::new(graph, vec![1, 1], 0, vec![0, 1], 3); +} + +#[test] +#[should_panic(expected = "root 5 out of range")] +fn test_rejects_invalid_root() { + let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); + let _ = MinimumCapacitatedSpanningTree::new(graph, vec![1, 1], 5, vec![0, 1, 1], 3); +} + +#[test] +#[should_panic(expected = "graph must have at least 2 vertices")] +fn test_rejects_single_vertex() { + let graph = SimpleGraph::new(1, vec![]); + let _ = MinimumCapacitatedSpanningTree::::new(graph, vec![], 0, vec![0], 3); +} + +#[test] +fn test_evaluate_optimal() { + let problem = example_instance(); + // Optimal: edges {(0,1),(0,2),(1,4),(3,4)} = indices {0,1,4,7} + // Weight = 2+1+1+1 = 5 + let config = vec![1, 1, 0, 0, 1, 0, 0, 1]; + assert_eq!(problem.evaluate(&config), Min(Some(5))); +} + +#[test] +fn test_evaluate_infeasible_not_spanning() { + let problem = example_instance(); + // Only 3 edges selected (not n-1=4) + let config = vec![1, 1, 0, 0, 1, 0, 0, 0]; + assert_eq!(problem.evaluate(&config), Min(None)); +} + +#[test] +fn test_evaluate_infeasible_capacity_violated() { + let problem = example_instance(); + // Tree: (0,3),(3,4),(3,2),(2,1) = indices {2,7,5,3} + // Subtree at 3: {3,4,2,1} req = 4 > 3 (capacity) + let config = vec![0, 0, 1, 1, 0, 1, 0, 1]; + assert_eq!(problem.evaluate(&config), Min(None)); +} + +#[test] +fn test_evaluate_empty() { + let problem = example_instance(); + let config = vec![0; 8]; + assert_eq!(problem.evaluate(&config), Min(None)); +} + +#[test] +fn test_brute_force() { + let problem = example_instance(); + let solver = BruteForce::new(); + let solutions = solver.find_all_witnesses(&problem); + assert!(!solutions.is_empty()); + let optimal_value = problem.evaluate(&solutions[0]); + assert_eq!(optimal_value, Min(Some(5))); + for sol in &solutions { + assert_eq!(problem.evaluate(sol), Min(Some(5))); + } +} + +#[test] +fn test_tight_capacity() { + let problem = tight_capacity_instance(); + let solver = BruteForce::new(); + let solutions = solver.find_all_witnesses(&problem); + assert!(!solutions.is_empty()); + // With capacity=2, star from root is valid: each subtree has 1 vertex + for sol in &solutions { + assert!(problem.is_valid_solution(sol)); + } +} + +#[test] +fn test_is_valid_solution() { + let problem = example_instance(); + // Valid + assert!(problem.is_valid_solution(&[1, 1, 0, 0, 1, 0, 0, 1])); + // Invalid: not enough edges + assert!(!problem.is_valid_solution(&[1, 1, 0, 0, 0, 0, 0, 0])); + // Invalid: wrong length + assert!(!problem.is_valid_solution(&[1, 1, 0])); +} + +#[test] +fn test_serialization() { + let problem = example_instance(); + let json = serde_json::to_value(&problem).unwrap(); + let deserialized: MinimumCapacitatedSpanningTree = + serde_json::from_value(json).unwrap(); + assert_eq!(deserialized.num_vertices(), 5); + assert_eq!(deserialized.num_edges(), 8); + assert_eq!(deserialized.root(), 0); + assert_eq!(deserialized.requirements(), &[0, 1, 1, 1, 1]); + assert_eq!(*deserialized.capacity(), 3); +} + +#[test] +fn test_size_getters() { + let problem = example_instance(); + assert_eq!(problem.num_vertices(), 5); + assert_eq!(problem.num_edges(), 8); +} + +#[test] +fn test_set_weights() { + let mut problem = example_instance(); + assert_eq!(problem.weights(), &[2, 1, 4, 3, 1, 2, 3, 1]); + problem.set_weights(vec![1; 8]); + assert_eq!(problem.weights(), &[1; 8]); + // Same optimal tree now has cost 4 + let config = vec![1, 1, 0, 0, 1, 0, 0, 1]; + assert_eq!(problem.evaluate(&config), Min(Some(4))); +} diff --git a/src/unit_tests/models/graph/minimum_edge_cost_flow.rs b/src/unit_tests/models/graph/minimum_edge_cost_flow.rs new file mode 100644 index 00000000..a530a66d --- /dev/null +++ b/src/unit_tests/models/graph/minimum_edge_cost_flow.rs @@ -0,0 +1,139 @@ +use super::*; +use crate::solvers::BruteForce; +use crate::topology::DirectedGraph; +use crate::traits::Problem; +use crate::types::Min; + +/// 5-vertex network from issue #810. +/// s=0, t=4, R=3. Prices: [3,1,2,0,0,0], capacities all 2. +/// Arcs: (0,1),(0,2),(0,3),(1,4),(2,4),(3,4) +/// Optimal: route via v2 (1 unit) and v3 (2 units) → cost = 1 + 2 = 3 +fn issue_instance() -> MinimumEdgeCostFlow { + MinimumEdgeCostFlow::new( + DirectedGraph::new(5, vec![(0, 1), (0, 2), (0, 3), (1, 4), (2, 4), (3, 4)]), + vec![3, 1, 2, 0, 0, 0], + vec![2, 2, 2, 2, 2, 2], + 0, + 4, + 3, + ) +} + +/// Small 3-vertex instance: s=0, t=2, R=2. +/// Arc (0,1) cap=1, (1,2) cap=1 — cannot route 2 units. +fn infeasible_instance() -> MinimumEdgeCostFlow { + MinimumEdgeCostFlow::new( + DirectedGraph::new(3, vec![(0, 1), (1, 2)]), + vec![1, 1], + vec![1, 1], + 0, + 2, + 2, + ) +} + +#[test] +fn test_minimum_edge_cost_flow_creation() { + let problem = issue_instance(); + assert_eq!(problem.num_vertices(), 5); + assert_eq!(problem.num_edges(), 6); + assert_eq!(problem.source(), 0); + assert_eq!(problem.sink(), 4); + assert_eq!(problem.required_flow(), 3); + assert_eq!(problem.max_capacity(), 2); + assert_eq!(problem.prices(), &[3, 1, 2, 0, 0, 0]); + assert_eq!(problem.capacities(), &[2, 2, 2, 2, 2, 2]); + assert_eq!(problem.dims(), vec![3, 3, 3, 3, 3, 3]); + assert_eq!( + ::NAME, + "MinimumEdgeCostFlow" + ); +} + +#[test] +fn test_minimum_edge_cost_flow_evaluate_optimal() { + let problem = issue_instance(); + // Route 1 unit via v2 and 2 units via v3: config = [0, 1, 2, 0, 1, 2] + let config = vec![0, 1, 2, 0, 1, 2]; + assert_eq!(problem.evaluate(&config), Min(Some(3))); +} + +#[test] +fn test_minimum_edge_cost_flow_evaluate_suboptimal() { + let problem = issue_instance(); + // Route 1 via v1, 1 via v2, 1 via v3: config = [1, 1, 1, 1, 1, 1] + // Cost = p(0)+p(1)+p(2)+p(3)+p(4)+p(5) = 3+1+2+0+0+0 = 6 + let config = vec![1, 1, 1, 1, 1, 1]; + assert_eq!(problem.evaluate(&config), Min(Some(6))); +} + +#[test] +fn test_minimum_edge_cost_flow_evaluate_infeasible_conservation() { + let problem = issue_instance(); + // Flow into vertex 1 but not out: violates conservation + let config = vec![1, 0, 0, 0, 0, 0]; + assert_eq!(problem.evaluate(&config), Min(None)); +} + +#[test] +fn test_minimum_edge_cost_flow_evaluate_infeasible_flow_req() { + let problem = issue_instance(); + // All zeros: no flow → insufficient + let config = vec![0, 0, 0, 0, 0, 0]; + assert_eq!(problem.evaluate(&config), Min(None)); +} + +#[test] +fn test_minimum_edge_cost_flow_evaluate_wrong_config_length() { + let problem = issue_instance(); + assert_eq!(problem.evaluate(&[0; 5]), Min(None)); // too short + assert_eq!(problem.evaluate(&[0; 7]), Min(None)); // too long + assert_eq!(problem.evaluate(&[]), Min(None)); // empty +} + +#[test] +fn test_minimum_edge_cost_flow_solver() { + let problem = issue_instance(); + let solver = BruteForce::new(); + let witness = solver.find_witness(&problem).expect("should find optimal"); + let value = problem.evaluate(&witness); + assert_eq!(value, Min(Some(3))); +} + +#[test] +fn test_minimum_edge_cost_flow_infeasible_instance() { + let problem = infeasible_instance(); + let solver = BruteForce::new(); + assert!(solver.find_witness(&problem).is_none()); +} + +#[test] +fn test_minimum_edge_cost_flow_serialization() { + let problem = issue_instance(); + let json = serde_json::to_string(&problem).unwrap(); + let deserialized: MinimumEdgeCostFlow = serde_json::from_str(&json).unwrap(); + assert_eq!(deserialized.num_vertices(), 5); + assert_eq!(deserialized.num_edges(), 6); + assert_eq!(deserialized.source(), 0); + assert_eq!(deserialized.sink(), 4); + assert_eq!(deserialized.required_flow(), 3); + assert_eq!(deserialized.prices(), &[3, 1, 2, 0, 0, 0]); + assert_eq!(deserialized.capacities(), &[2, 2, 2, 2, 2, 2]); +} + +#[test] +fn test_minimum_edge_cost_flow_max_capacity_empty() { + let problem = MinimumEdgeCostFlow::new(DirectedGraph::new(2, vec![]), vec![], vec![], 0, 1, 0); + assert_eq!(problem.max_capacity(), 0); +} + +#[test] +fn test_minimum_edge_cost_flow_all_witnesses_optimal() { + let problem = issue_instance(); + let solver = BruteForce::new(); + let all = solver.find_all_witnesses(&problem); + assert!(!all.is_empty()); + for sol in &all { + assert_eq!(problem.evaluate(sol), Min(Some(3))); + } +} diff --git a/src/unit_tests/models/graph/minimum_graph_bandwidth.rs b/src/unit_tests/models/graph/minimum_graph_bandwidth.rs new file mode 100644 index 00000000..a4cea8d1 --- /dev/null +++ b/src/unit_tests/models/graph/minimum_graph_bandwidth.rs @@ -0,0 +1,164 @@ +use super::*; +use crate::solvers::{BruteForce, Solver}; +use crate::topology::SimpleGraph; +use crate::traits::Problem; +use crate::types::Min; + +/// Star graph S4: center 0 connected to 1, 2, 3 +fn star_example() -> MinimumGraphBandwidth { + let graph = SimpleGraph::new(4, vec![(0, 1), (0, 2), (0, 3)]); + MinimumGraphBandwidth::new(graph) +} + +/// Path graph P4: 0-1-2-3 +fn path_example() -> MinimumGraphBandwidth { + let graph = SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3)]); + MinimumGraphBandwidth::new(graph) +} + +#[test] +fn test_minimumgraphbandwidth_creation() { + let problem = star_example(); + assert_eq!(problem.num_vertices(), 4); + assert_eq!(problem.num_edges(), 3); + assert_eq!(problem.dims(), vec![4, 4, 4, 4]); +} + +#[test] +fn test_minimumgraphbandwidth_evaluate_valid() { + let problem = star_example(); + // Config [1,0,2,3]: f(0)=1, f(1)=0, f(2)=2, f(3)=3 + // Edges: (0,1): |1-0|=1, (0,2): |1-2|=1, (0,3): |1-3|=2 + // Bandwidth = max(1, 1, 2) = 2 + assert_eq!(problem.evaluate(&[1, 0, 2, 3]), Min(Some(2))); + assert_eq!(problem.bandwidth(&[1, 0, 2, 3]), Some(2)); +} + +#[test] +fn test_minimumgraphbandwidth_evaluate_invalid() { + let problem = star_example(); + + // Not a permutation: repeated value + assert_eq!(problem.evaluate(&[0, 0, 1, 2]), Min(None)); + assert_eq!(problem.bandwidth(&[0, 0, 1, 2]), None); + + // Out of range + assert_eq!(problem.evaluate(&[0, 1, 2, 4]), Min(None)); + assert_eq!(problem.bandwidth(&[0, 1, 2, 4]), None); + + // Wrong length + assert_eq!(problem.evaluate(&[0, 1, 2]), Min(None)); + assert_eq!(problem.bandwidth(&[0, 1, 2]), None); +} + +#[test] +fn test_minimumgraphbandwidth_evaluate_optimal() { + let problem = star_example(); + // For S4 (star with 4 vertices), optimal bandwidth is 2. + // Center (vertex 0) placed at position 1: [1, 0, 2, 3] + // Edges: (0,1): |1-0|=1, (0,2): |1-2|=1, (0,3): |1-3|=2 → max = 2 + let solver = BruteForce::new(); + let value = solver.solve(&problem); + assert_eq!(value, Min(Some(2))); +} + +#[test] +fn test_minimumgraphbandwidth_solver() { + let problem = path_example(); + // Path graph P4: optimal bandwidth is 1 (identity permutation) + let solver = BruteForce::new(); + let solution = solver.find_witness(&problem); + assert!(solution.is_some()); + let sol = solution.unwrap(); + assert_eq!(problem.evaluate(&sol), Min(Some(1))); + + let value = solver.solve(&problem); + assert_eq!(value, Min(Some(1))); +} + +#[test] +fn test_minimumgraphbandwidth_serialization() { + let problem = star_example(); + let json = serde_json::to_string(&problem).unwrap(); + let deserialized: MinimumGraphBandwidth = serde_json::from_str(&json).unwrap(); + assert_eq!(deserialized.graph().num_vertices(), 4); + assert_eq!(deserialized.graph().num_edges(), 3); + + // Verify evaluation is consistent after round-trip + let config = vec![1, 0, 2, 3]; + assert_eq!(problem.evaluate(&config), deserialized.evaluate(&config)); +} + +#[test] +fn test_minimumgraphbandwidth_single_vertex() { + let graph = SimpleGraph::new(1, vec![]); + let problem = MinimumGraphBandwidth::new(graph); + assert_eq!(problem.dims(), vec![1]); + assert_eq!(problem.evaluate(&[0]), Min(Some(0))); + assert_eq!(problem.bandwidth(&[0]), Some(0)); +} + +#[test] +fn test_minimumgraphbandwidth_empty_graph() { + // No edges: any permutation has bandwidth 0 + let graph = SimpleGraph::new(3, vec![]); + let problem = MinimumGraphBandwidth::new(graph); + + let solver = BruteForce::new(); + let value = solver.solve(&problem); + assert_eq!(value, Min(Some(0))); + + let all_witnesses = solver.find_all_witnesses(&problem); + assert_eq!(all_witnesses.len(), 6); // 3! = 6 + for s in &all_witnesses { + assert_eq!(problem.evaluate(s), Min(Some(0))); + } +} + +#[test] +fn test_minimumgraphbandwidth_complete_graph_k4() { + // K4: bandwidth is always 3 (max position difference in any permutation) + // Actually for K4, bandwidth = n-1 = 3 for any arrangement since edge (first, last) exists. + let graph = SimpleGraph::new(4, vec![(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]); + let problem = MinimumGraphBandwidth::new(graph); + + let solver = BruteForce::new(); + let value = solver.solve(&problem); + assert_eq!(value, Min(Some(3))); +} + +#[test] +fn test_minimumgraphbandwidth_problem_name() { + assert_eq!( + as Problem>::NAME, + "MinimumGraphBandwidth" + ); +} + +#[test] +fn test_minimumgraphbandwidth_size_getters() { + let problem = star_example(); + assert_eq!(problem.num_vertices(), 4); + assert_eq!(problem.num_edges(), 3); +} + +#[test] +fn test_minimumgraphbandwidth_graph_accessor() { + let problem = star_example(); + let graph = problem.graph(); + assert_eq!(graph.num_vertices(), 4); + assert_eq!(graph.num_edges(), 3); +} + +#[test] +fn test_minimumgraphbandwidth_permutation_matters() { + let problem = star_example(); + + // Center at position 0: [0, 1, 2, 3] + // Edges: (0,1): |0-1|=1, (0,2): |0-2|=2, (0,3): |0-3|=3 → max = 3 + assert_eq!(problem.evaluate(&[0, 1, 2, 3]), Min(Some(3))); + + // Center at position 1: [1, 0, 2, 3] + // Edges: (0,1): |1-0|=1, (0,2): |1-2|=1, (0,3): |1-3|=2 → max = 2 + assert_eq!(problem.evaluate(&[1, 0, 2, 3]), Min(Some(2))); +} diff --git a/src/unit_tests/models/graph/minimum_metric_dimension.rs b/src/unit_tests/models/graph/minimum_metric_dimension.rs new file mode 100644 index 00000000..7650b789 --- /dev/null +++ b/src/unit_tests/models/graph/minimum_metric_dimension.rs @@ -0,0 +1,128 @@ +use super::*; +use crate::solvers::BruteForce; +use crate::topology::SimpleGraph; +use crate::traits::Problem; +use crate::types::Min; + +#[test] +fn test_minimum_metric_dimension_creation() { + let graph = SimpleGraph::new(5, vec![(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (3, 4)]); + let problem = MinimumMetricDimension::new(graph); + assert_eq!(problem.graph().num_vertices(), 5); + assert_eq!(problem.graph().num_edges(), 6); + assert_eq!(problem.num_variables(), 5); + assert_eq!(problem.dims(), vec![2; 5]); +} + +#[test] +fn test_minimum_metric_dimension_evaluate_optimal() { + // House graph: selecting vertices 0 and 1 forms a resolving set of size 2 + let graph = SimpleGraph::new(5, vec![(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (3, 4)]); + let problem = MinimumMetricDimension::new(graph); + let config = vec![1, 1, 0, 0, 0]; // select v0, v1 + let result = problem.evaluate(&config); + assert!(result.is_valid()); + assert_eq!(result, Min(Some(2))); +} + +#[test] +fn test_minimum_metric_dimension_evaluate_non_resolving() { + // House graph: selecting only v2 should not resolve all pairs + let graph = SimpleGraph::new(5, vec![(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (3, 4)]); + let problem = MinimumMetricDimension::new(graph); + // v2 alone: d(0,2)=1, d(1,2)=2, d(3,2)=1, d(4,2)=1 + // vertices 0 and 3 both have distance 1 to v2 -> not resolving + let config = vec![0, 0, 1, 0, 0]; + let result = problem.evaluate(&config); + assert_eq!(result, Min(None)); +} + +#[test] +fn test_minimum_metric_dimension_evaluate_empty_selection() { + let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); + let problem = MinimumMetricDimension::new(graph); + let config = vec![0, 0, 0]; + let result = problem.evaluate(&config); + assert_eq!(result, Min(None)); +} + +#[test] +fn test_minimum_metric_dimension_evaluate_all_selected() { + // Selecting all vertices is always resolving (trivially) + let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); + let problem = MinimumMetricDimension::new(graph); + let config = vec![1, 1, 1]; + let result = problem.evaluate(&config); + assert!(result.is_valid()); + assert_eq!(result, Min(Some(3))); +} + +#[test] +fn test_minimum_metric_dimension_solver() { + // House graph: minimum resolving set has size 2 + let graph = SimpleGraph::new(5, vec![(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (3, 4)]); + let problem = MinimumMetricDimension::new(graph); + let solver = BruteForce::new(); + let solution = solver.find_witness(&problem).unwrap(); + let value = problem.evaluate(&solution); + assert!(value.is_valid()); + assert_eq!(value, Min(Some(2))); +} + +#[test] +fn test_minimum_metric_dimension_path_graph() { + // Path graph P3: 0-1-2 + // Metric dimension of a path is 1 (either endpoint resolves) + // d(0,0)=0, d(1,0)=1, d(2,0)=2 -> all distinct -> {0} resolves + let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); + let problem = MinimumMetricDimension::new(graph); + let solver = BruteForce::new(); + let solution = solver.find_witness(&problem).unwrap(); + let value = problem.evaluate(&solution); + assert_eq!(value, Min(Some(1))); +} + +#[test] +fn test_minimum_metric_dimension_complete_graph() { + // K4: metric dimension of K_n is n-1 (all distances are 1, so any pair + // at distance 1 from each other needs a resolving vertex that is one of them) + let graph = SimpleGraph::new(4, vec![(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]); + let problem = MinimumMetricDimension::new(graph); + let solver = BruteForce::new(); + let solution = solver.find_witness(&problem).unwrap(); + let value = problem.evaluate(&solution); + assert_eq!(value, Min(Some(3))); +} + +#[test] +fn test_minimum_metric_dimension_serialization() { + let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); + let problem = MinimumMetricDimension::new(graph); + let json = serde_json::to_string(&problem).unwrap(); + let deserialized: MinimumMetricDimension = serde_json::from_str(&json).unwrap(); + assert_eq!(deserialized.num_vertices(), 3); + assert_eq!(deserialized.num_edges(), 2); + + // Verify evaluation is preserved + let config = vec![1, 0, 0]; + assert_eq!(problem.evaluate(&config), deserialized.evaluate(&config)); +} + +#[test] +fn test_minimum_metric_dimension_size_getters() { + let graph = SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3)]); + let problem = MinimumMetricDimension::new(graph); + assert_eq!(problem.num_vertices(), 4); + assert_eq!(problem.num_edges(), 3); +} + +#[test] +fn test_minimum_metric_dimension_cycle() { + // C5: metric dimension of a cycle C_n with n >= 3 is 2 + let graph = SimpleGraph::new(5, vec![(0, 1), (1, 2), (2, 3), (3, 4), (4, 0)]); + let problem = MinimumMetricDimension::new(graph); + let solver = BruteForce::new(); + let solution = solver.find_witness(&problem).unwrap(); + let value = problem.evaluate(&solution); + assert_eq!(value, Min(Some(2))); +} diff --git a/src/unit_tests/models/graph/vertex_cover.rs b/src/unit_tests/models/graph/vertex_cover.rs new file mode 100644 index 00000000..76de3efa --- /dev/null +++ b/src/unit_tests/models/graph/vertex_cover.rs @@ -0,0 +1,100 @@ +use super::*; +use crate::solvers::BruteForce; +use crate::topology::SimpleGraph; +use crate::traits::Problem; +use crate::types::Or; + +fn issue_instance() -> VertexCover { + // Triangle {0,1,2} with pendant edge to 3 + VertexCover::new(SimpleGraph::new(4, vec![(0, 1), (1, 2), (0, 2), (2, 3)]), 2) +} + +#[test] +fn test_vertex_cover_creation() { + let problem = issue_instance(); + assert_eq!(problem.num_vertices(), 4); + assert_eq!(problem.num_edges(), 4); + assert_eq!(problem.k(), 2); + assert_eq!(problem.dims(), vec![2; 4]); +} + +#[test] +fn test_vertex_cover_evaluate_valid() { + let problem = issue_instance(); + // {0, 2} covers all edges with size 2 ≤ k=2 + assert_eq!(problem.evaluate(&[1, 0, 1, 0]), Or(true)); +} + +#[test] +fn test_vertex_cover_evaluate_too_large() { + let problem = issue_instance(); + // {0, 1, 2} is a valid cover but size 3 > k=2 + assert_eq!(problem.evaluate(&[1, 1, 1, 0]), Or(false)); +} + +#[test] +fn test_vertex_cover_evaluate_not_covering() { + let problem = issue_instance(); + // {0} doesn't cover edge (1,2) + assert_eq!(problem.evaluate(&[1, 0, 0, 0]), Or(false)); +} + +#[test] +fn test_vertex_cover_evaluate_k1_impossible() { + // Same graph but k=1 — impossible for triangle + let problem = VertexCover::new(SimpleGraph::new(4, vec![(0, 1), (1, 2), (0, 2), (2, 3)]), 1); + let solver = BruteForce::new(); + let witness = solver.find_witness(&problem); + assert!(witness.is_none()); +} + +#[test] +fn test_vertex_cover_solver() { + let problem = issue_instance(); + let solver = BruteForce::new(); + let witness = solver.find_witness(&problem); + assert!(witness.is_some()); + let w = witness.unwrap(); + assert_eq!(problem.evaluate(&w), Or(true)); + // Cover size should be ≤ k=2 + let count: usize = w.iter().filter(|&&v| v == 1).count(); + assert!(count <= 2); +} + +#[test] +fn test_vertex_cover_all_witnesses() { + let problem = issue_instance(); + let solver = BruteForce::new(); + let witnesses = solver.find_all_witnesses(&problem); + // For k=2 on triangle+pendant: covers of size ≤2 that cover all edges + // Valid size-2 covers: {0,2}, {1,2} (vertex 2 covers pendant edge) + // {0,1} doesn't cover (2,3) + assert!(witnesses.len() >= 2); + for w in &witnesses { + assert_eq!(problem.evaluate(w), Or(true)); + } +} + +#[test] +fn test_vertex_cover_serialization() { + let problem = issue_instance(); + let json = serde_json::to_string(&problem).unwrap(); + let restored: VertexCover = serde_json::from_str(&json).unwrap(); + assert_eq!(restored.num_vertices(), 4); + assert_eq!(restored.k(), 2); + assert_eq!(restored.evaluate(&[1, 0, 1, 0]), Or(true)); +} + +#[test] +fn test_vertex_cover_path_graph() { + // Path 0-1-2: minimum cover is {1}, size 1 + let problem = VertexCover::new(SimpleGraph::new(3, vec![(0, 1), (1, 2)]), 1); + assert_eq!(problem.evaluate(&[0, 1, 0]), Or(true)); + assert_eq!(problem.evaluate(&[1, 0, 0]), Or(false)); // Doesn't cover (1,2) +} + +#[test] +#[should_panic(expected = "k must be positive")] +fn test_vertex_cover_k_zero() { + VertexCover::new(SimpleGraph::new(3, vec![(0, 1)]), 0); +} diff --git a/src/unit_tests/models/misc/clustering.rs b/src/unit_tests/models/misc/clustering.rs new file mode 100644 index 00000000..1b0e16ad --- /dev/null +++ b/src/unit_tests/models/misc/clustering.rs @@ -0,0 +1,159 @@ +use crate::models::misc::Clustering; +use crate::solvers::BruteForce; +use crate::traits::Problem; + +/// Helper: build the 6-element two-group instance from the issue. +fn two_group_instance() -> Clustering { + let distances = vec![ + vec![0, 1, 1, 3, 3, 3], + vec![1, 0, 1, 3, 3, 3], + vec![1, 1, 0, 3, 3, 3], + vec![3, 3, 3, 0, 1, 1], + vec![3, 3, 3, 1, 0, 1], + vec![3, 3, 3, 1, 1, 0], + ]; + Clustering::new(distances, 2, 1) +} + +#[test] +fn test_clustering_creation() { + let problem = two_group_instance(); + assert_eq!(problem.num_elements(), 6); + assert_eq!(problem.num_clusters(), 2); + assert_eq!(problem.diameter_bound(), 1); + assert_eq!(problem.distances().len(), 6); + assert_eq!(problem.dims(), vec![2; 6]); +} + +#[test] +fn test_clustering_evaluate_feasible() { + let problem = two_group_instance(); + // Cluster 0 = {0,1,2}, Cluster 1 = {3,4,5} + // All intra-cluster distances = 1 ≤ B=1 + let result = problem.evaluate(&[0, 0, 0, 1, 1, 1]); + assert!(result.0); +} + +#[test] +fn test_clustering_evaluate_infeasible_distance() { + let problem = two_group_instance(); + // Put element 3 (inter-group distance 3) in cluster 0 with {0,1,2} + // distances[0][3] = 3 > B=1 → infeasible + let result = problem.evaluate(&[0, 0, 0, 0, 1, 1]); + assert!(!result.0); +} + +#[test] +fn test_clustering_evaluate_all_same_cluster() { + let problem = two_group_instance(); + // All elements in one cluster → inter-group distance 3 > 1 → infeasible + let result = problem.evaluate(&[0, 0, 0, 0, 0, 0]); + assert!(!result.0); +} + +#[test] +fn test_clustering_evaluate_wrong_length() { + let problem = two_group_instance(); + assert!(!problem.evaluate(&[0, 0, 0]).0); + assert!(!problem.evaluate(&[0, 0, 0, 1, 1, 1, 0]).0); +} + +#[test] +fn test_clustering_evaluate_invalid_cluster_index() { + let problem = two_group_instance(); + // Cluster index 2 is invalid (K=2, valid indices are 0,1) + assert!(!problem.evaluate(&[0, 0, 2, 1, 1, 1]).0); +} + +#[test] +fn test_clustering_trivial_k_ge_n() { + // K ≥ n: each element in its own cluster → always feasible + let distances = vec![vec![0, 100, 100], vec![100, 0, 100], vec![100, 100, 0]]; + let problem = Clustering::new(distances, 3, 0); + // Each element in its own cluster: [0, 1, 2] + assert!(problem.evaluate(&[0, 1, 2]).0); +} + +#[test] +fn test_clustering_solver() { + let problem = two_group_instance(); + let solver = BruteForce::new(); + let solution = solver.find_witness(&problem); + assert!(solution.is_some()); + assert!(problem.evaluate(&solution.unwrap()).0); +} + +#[test] +fn test_clustering_solver_all_witnesses() { + // Small instance: 4 elements, K=2, B=1 + let distances = vec![ + vec![0, 1, 3, 3], + vec![1, 0, 3, 3], + vec![3, 3, 0, 1], + vec![3, 3, 1, 0], + ]; + let problem = Clustering::new(distances, 2, 1); + let solver = BruteForce::new(); + let solutions = solver.find_all_witnesses(&problem); + assert!(!solutions.is_empty()); + for sol in &solutions { + assert!(problem.evaluate(sol).0); + } + // Two valid groupings: {0,1} vs {2,3} in either assignment order + // [0,0,1,1] and [1,1,0,0] + assert_eq!(solutions.len(), 2); +} + +#[test] +fn test_clustering_serialization() { + let problem = two_group_instance(); + let json = serde_json::to_string(&problem).unwrap(); + let deserialized: Clustering = serde_json::from_str(&json).unwrap(); + assert_eq!(deserialized.num_elements(), 6); + assert_eq!(deserialized.num_clusters(), 2); + assert_eq!(deserialized.diameter_bound(), 1); + // Check round-trip gives same evaluation + let config = vec![0, 0, 0, 1, 1, 1]; + assert_eq!( + problem.evaluate(&config).0, + deserialized.evaluate(&config).0 + ); +} + +#[test] +fn test_clustering_no_solution() { + // 3 elements all pairwise distance 5, K=1, B=2 → infeasible + let distances = vec![vec![0, 5, 5], vec![5, 0, 5], vec![5, 5, 0]]; + let problem = Clustering::new(distances, 1, 2); + let solver = BruteForce::new(); + assert!(solver.find_witness(&problem).is_none()); +} + +#[test] +#[should_panic(expected = "symmetric")] +fn test_clustering_asymmetric_panics() { + let distances = vec![vec![0, 1], vec![2, 0]]; + Clustering::new(distances, 1, 1); +} + +#[test] +#[should_panic(expected = "Diagonal")] +fn test_clustering_nonzero_diagonal_panics() { + let distances = vec![vec![1, 1], vec![1, 0]]; + Clustering::new(distances, 1, 1); +} + +#[test] +fn test_clustering_paper_example() { + // Paper example: 6 elements, K=2, B=1 + let problem = two_group_instance(); + let config = vec![0, 0, 0, 1, 1, 1]; + let result = problem.evaluate(&config); + assert!(result.0); + + // Verify this is satisfiable + let solver = BruteForce::new(); + let witness = solver.find_witness(&problem); + assert!(witness.is_some()); + assert!(problem.evaluate(&witness.unwrap()).0); +} diff --git a/src/unit_tests/models/misc/maximum_likelihood_ranking.rs b/src/unit_tests/models/misc/maximum_likelihood_ranking.rs new file mode 100644 index 00000000..0d3f3ebe --- /dev/null +++ b/src/unit_tests/models/misc/maximum_likelihood_ranking.rs @@ -0,0 +1,163 @@ +use super::*; +use crate::solvers::BruteForce; +use crate::traits::Problem; + +#[test] +fn test_maximum_likelihood_ranking_creation() { + let matrix = vec![ + vec![0, 4, 3, 5], + vec![1, 0, 4, 3], + vec![2, 1, 0, 4], + vec![0, 2, 1, 0], + ]; + let problem = MaximumLikelihoodRanking::new(matrix.clone()); + assert_eq!(problem.num_items(), 4); + assert_eq!(problem.matrix(), &matrix); + assert_eq!(problem.dims(), vec![4; 4]); + assert_eq!( + ::NAME, + "MaximumLikelihoodRanking" + ); + assert_eq!(::variant(), vec![]); +} + +#[test] +fn test_maximum_likelihood_ranking_evaluate_optimal() { + let matrix = vec![ + vec![0, 4, 3, 5], + vec![1, 0, 4, 3], + vec![2, 1, 0, 4], + vec![0, 2, 1, 0], + ]; + let problem = MaximumLikelihoodRanking::new(matrix); + // Identity ranking: config[i] = i (item i is at position i) + // Disagreement pairs where config[a] > config[b]: + // (1,0): matrix[1][0] = 1 + // (2,0): matrix[2][0] = 2 + // (2,1): matrix[2][1] = 1 + // (3,0): matrix[3][0] = 0 + // (3,1): matrix[3][1] = 2 + // (3,2): matrix[3][2] = 1 + // Total = 1 + 2 + 1 + 0 + 2 + 1 = 7 + assert_eq!(problem.evaluate(&[0, 1, 2, 3]), Min(Some(7))); +} + +#[test] +fn test_maximum_likelihood_ranking_evaluate_non_permutation() { + let matrix = vec![ + vec![0, 4, 3, 5], + vec![1, 0, 4, 3], + vec![2, 1, 0, 4], + vec![0, 2, 1, 0], + ]; + let problem = MaximumLikelihoodRanking::new(matrix); + // Duplicate rank + assert_eq!(problem.evaluate(&[0, 0, 2, 3]), Min(None)); + // Rank out of range + assert_eq!(problem.evaluate(&[0, 1, 2, 4]), Min(None)); + // Wrong length + assert_eq!(problem.evaluate(&[0, 1, 2]), Min(None)); + assert_eq!(problem.evaluate(&[0, 1, 2, 3, 0]), Min(None)); +} + +#[test] +fn test_maximum_likelihood_ranking_evaluate_suboptimal() { + let matrix = vec![ + vec![0, 4, 3, 5], + vec![1, 0, 4, 3], + vec![2, 1, 0, 4], + vec![0, 2, 1, 0], + ]; + let problem = MaximumLikelihoodRanking::new(matrix); + // Reversed ranking: config = [3, 2, 1, 0] + // (item 0 at pos 3, item 1 at pos 2, item 2 at pos 1, item 3 at pos 0) + // Pairs where config[a] > config[b]: + // (0,1): config[0]=3 > config[1]=2 -> matrix[0][1] = 4 + // (0,2): config[0]=3 > config[2]=1 -> matrix[0][2] = 3 + // (0,3): config[0]=3 > config[3]=0 -> matrix[0][3] = 5 + // (1,2): config[1]=2 > config[2]=1 -> matrix[1][2] = 4 + // (1,3): config[1]=2 > config[3]=0 -> matrix[1][3] = 3 + // (2,3): config[2]=1 > config[3]=0 -> matrix[2][3] = 4 + // Total = 4 + 3 + 5 + 4 + 3 + 4 = 23 + assert_eq!(problem.evaluate(&[3, 2, 1, 0]), Min(Some(23))); +} + +#[test] +fn test_maximum_likelihood_ranking_solver() { + let matrix = vec![ + vec![0, 4, 3, 5], + vec![1, 0, 4, 3], + vec![2, 1, 0, 4], + vec![0, 2, 1, 0], + ]; + let problem = MaximumLikelihoodRanking::new(matrix); + let solver = BruteForce::new(); + let solution = solver + .find_witness(&problem) + .expect("should find a solution"); + let value = problem.evaluate(&solution); + assert_eq!(value, Min(Some(7))); +} + +#[test] +fn test_maximum_likelihood_ranking_serialization() { + let matrix = vec![ + vec![0, 4, 3, 5], + vec![1, 0, 4, 3], + vec![2, 1, 0, 4], + vec![0, 2, 1, 0], + ]; + let problem = MaximumLikelihoodRanking::new(matrix.clone()); + let json = serde_json::to_value(&problem).unwrap(); + let restored: MaximumLikelihoodRanking = serde_json::from_value(json).unwrap(); + assert_eq!(restored.matrix(), &matrix); + assert_eq!(restored.num_items(), 4); +} + +#[test] +fn test_maximum_likelihood_ranking_two_items() { + // 2 items: a_01 = 3, a_10 = 2 + let matrix = vec![vec![0, 3], vec![2, 0]]; + let problem = MaximumLikelihoodRanking::new(matrix); + // config [0,1]: item 0 at pos 0, item 1 at pos 1 + // Only pair where config[a] > config[b]: (1,0) -> matrix[1][0] = 2 + assert_eq!(problem.evaluate(&[0, 1]), Min(Some(2))); + // config [1,0]: item 0 at pos 1, item 1 at pos 0 + // Only pair where config[a] > config[b]: (0,1) -> matrix[0][1] = 3 + assert_eq!(problem.evaluate(&[1, 0]), Min(Some(3))); + // Optimal is [0,1] with cost 2 + let solver = BruteForce::new(); + let solution = solver.find_witness(&problem).unwrap(); + assert_eq!(problem.evaluate(&solution), Min(Some(2))); +} + +#[test] +fn test_maximum_likelihood_ranking_single_item() { + let problem = MaximumLikelihoodRanking::new(vec![vec![0]]); + assert_eq!(problem.num_items(), 1); + assert_eq!(problem.dims(), vec![1]); + assert_eq!(problem.evaluate(&[0]), Min(Some(0))); +} + +#[test] +#[should_panic(expected = "matrix must be square")] +fn test_maximum_likelihood_ranking_non_square_panics() { + MaximumLikelihoodRanking::new(vec![vec![0, 1], vec![2, 0], vec![1, 2]]); +} + +#[test] +#[should_panic(expected = "diagonal entries must be zero")] +fn test_maximum_likelihood_ranking_nonzero_diagonal_panics() { + MaximumLikelihoodRanking::new(vec![vec![1, 2], vec![3, 0]]); +} + +#[cfg(feature = "example-db")] +#[test] +fn test_maximum_likelihood_ranking_canonical_example() { + let specs = canonical_model_example_specs(); + assert_eq!(specs.len(), 1); + let spec = &specs[0]; + assert_eq!(spec.id, "maximum_likelihood_ranking"); + assert_eq!(spec.optimal_config, vec![0, 1, 2, 3]); + assert_eq!(spec.optimal_value, serde_json::json!(7)); +} diff --git a/src/unit_tests/models/misc/minimum_axiom_set.rs b/src/unit_tests/models/misc/minimum_axiom_set.rs new file mode 100644 index 00000000..e4e3d08a --- /dev/null +++ b/src/unit_tests/models/misc/minimum_axiom_set.rs @@ -0,0 +1,155 @@ +use super::*; +use crate::solvers::BruteForce; +use crate::traits::Problem; + +/// Helper: build the canonical 8-sentence example from the issue. +fn canonical_instance() -> MinimumAxiomSet { + MinimumAxiomSet::new( + 8, + vec![0, 1, 2, 3, 4, 5, 6, 7], + vec![ + (vec![0], 2), + (vec![0], 3), + (vec![1], 4), + (vec![1], 5), + (vec![2, 4], 6), + (vec![3, 5], 7), + (vec![6, 7], 0), + (vec![6, 7], 1), + ], + ) +} + +#[test] +fn test_minimum_axiom_set_creation() { + let problem = canonical_instance(); + assert_eq!(problem.num_sentences(), 8); + assert_eq!(problem.num_true_sentences(), 8); + assert_eq!(problem.num_implications(), 8); + assert_eq!(problem.true_sentences(), &[0, 1, 2, 3, 4, 5, 6, 7]); + assert_eq!(problem.dims(), vec![2; 8]); + assert_eq!(problem.num_variables(), 8); +} + +#[test] +fn test_minimum_axiom_set_evaluate_optimal() { + let problem = canonical_instance(); + // Select a and b (indices 0, 1): closure = all 8 sentences + let result = problem.evaluate(&[1, 1, 0, 0, 0, 0, 0, 0]); + assert!(result.is_valid()); + assert_eq!(result.unwrap(), 2); +} + +#[test] +fn test_minimum_axiom_set_evaluate_insufficient() { + let problem = canonical_instance(); + // Select only a (index 0): closure = {a, c, d} — missing b, e, f, g, h + let result = problem.evaluate(&[1, 0, 0, 0, 0, 0, 0, 0]); + assert!(!result.is_valid()); +} + +#[test] +fn test_minimum_axiom_set_evaluate_all_selected() { + let problem = canonical_instance(); + // Select all 8 sentences: closure = all 8 trivially + let result = problem.evaluate(&[1, 1, 1, 1, 1, 1, 1, 1]); + assert!(result.is_valid()); + assert_eq!(result.unwrap(), 8); +} + +#[test] +fn test_minimum_axiom_set_evaluate_none_selected() { + let problem = canonical_instance(); + // Select nothing: closure = empty + let result = problem.evaluate(&[0, 0, 0, 0, 0, 0, 0, 0]); + assert!(!result.is_valid()); +} + +#[test] +fn test_minimum_axiom_set_evaluate_wrong_length() { + let problem = canonical_instance(); + let result = problem.evaluate(&[1, 0]); + assert!(!result.is_valid()); +} + +#[test] +fn test_minimum_axiom_set_evaluate_out_of_range() { + let problem = canonical_instance(); + let result = problem.evaluate(&[2, 0, 0, 0, 0, 0, 0, 0]); + assert!(!result.is_valid()); +} + +#[test] +fn test_minimum_axiom_set_solver() { + let problem = canonical_instance(); + let solver = BruteForce::new(); + let solution = solver + .find_witness(&problem) + .expect("should find a solution"); + let metric = problem.evaluate(&solution); + assert!(metric.is_valid()); + assert_eq!(metric.unwrap(), 2); +} + +#[test] +fn test_minimum_axiom_set_serialization() { + let problem = canonical_instance(); + let json = serde_json::to_value(&problem).unwrap(); + let restored: MinimumAxiomSet = serde_json::from_value(json).unwrap(); + assert_eq!(restored.num_sentences(), problem.num_sentences()); + assert_eq!(restored.true_sentences(), problem.true_sentences()); + assert_eq!(restored.implications(), problem.implications()); +} + +#[test] +fn test_minimum_axiom_set_partial_true_sentences() { + // Only sentences 0,1,2 are true; implications: ({0}, 1), ({1}, 2) + // Optimal: select {0} → closure {0,1,2} = T + let problem = MinimumAxiomSet::new(5, vec![0, 1, 2], vec![(vec![0], 1), (vec![1], 2)]); + assert_eq!(problem.num_sentences(), 5); + assert_eq!(problem.num_true_sentences(), 3); + assert_eq!(problem.dims(), vec![2; 3]); + + // Select sentence 0 only + let result = problem.evaluate(&[1, 0, 0]); + assert!(result.is_valid()); + assert_eq!(result.unwrap(), 1); + + // Select sentence 2 only — cannot derive 0 or 1 + let result = problem.evaluate(&[0, 0, 1]); + assert!(!result.is_valid()); +} + +#[test] +fn test_minimum_axiom_set_no_implications() { + // 3 sentences, all true, no implications + // Only way to cover T is to select all of them + let problem = MinimumAxiomSet::new(3, vec![0, 1, 2], vec![]); + let result = problem.evaluate(&[1, 1, 1]); + assert!(result.is_valid()); + assert_eq!(result.unwrap(), 3); + + // Selecting only 2 leaves one uncovered + let result = problem.evaluate(&[1, 1, 0]); + assert!(!result.is_valid()); +} + +#[test] +fn test_minimum_axiom_set_paper_example() { + // The canonical 8-sentence example + let problem = canonical_instance(); + + // Verify the issue's expected outcome: config [1,1,0,0,0,0,0,0] → Min(2) + let result = problem.evaluate(&[1, 1, 0, 0, 0, 0, 0, 0]); + assert!(result.is_valid()); + assert_eq!(result.unwrap(), 2); + + // Confirm with brute force that 2 is optimal + let solver = BruteForce::new(); + let solution = solver + .find_witness(&problem) + .expect("should find a solution"); + let metric = problem.evaluate(&solution); + assert!(metric.is_valid()); + assert_eq!(metric.unwrap(), 2); +} diff --git a/src/unit_tests/models/misc/minimum_code_generation_one_register.rs b/src/unit_tests/models/misc/minimum_code_generation_one_register.rs new file mode 100644 index 00000000..6210ed72 --- /dev/null +++ b/src/unit_tests/models/misc/minimum_code_generation_one_register.rs @@ -0,0 +1,250 @@ +use super::*; +use crate::solvers::{BruteForce, Solver}; +use crate::traits::Problem; +use crate::types::Min; + +#[test] +fn test_minimum_code_generation_one_register_creation() { + let problem = MinimumCodeGenerationOneRegister::new( + 7, + vec![ + (0, 1), + (0, 2), + (1, 3), + (1, 4), + (2, 3), + (2, 5), + (3, 5), + (3, 6), + ], + 3, + ); + assert_eq!(problem.num_vertices(), 7); + assert_eq!(problem.num_edges(), 8); + assert_eq!(problem.num_leaves(), 3); + assert_eq!(problem.num_internal(), 4); + assert_eq!(problem.dims(), vec![4; 4]); + assert_eq!( + ::NAME, + "MinimumCodeGenerationOneRegister" + ); + assert_eq!( + ::variant(), + vec![] + ); +} + +#[test] +fn test_minimum_code_generation_one_register_evaluate_optimal() { + // Issue #900 example: optimal is 8 instructions + // Evaluation order: v3, v2, v1, v0 + // config[i] = position for internal vertex i + // internal = [0,1,2,3], so config = [3, 2, 1, 0] + let problem = MinimumCodeGenerationOneRegister::new( + 7, + vec![ + (0, 1), + (0, 2), + (1, 3), + (1, 4), + (2, 3), + (2, 5), + (3, 5), + (3, 6), + ], + 3, + ); + let config = vec![3, 2, 1, 0]; + assert_eq!(problem.evaluate(&config), Min(Some(8))); + assert_eq!(problem.simulate(&config), Some(8)); +} + +#[test] +fn test_minimum_code_generation_one_register_evaluate_suboptimal() { + // Another valid order: v3, v1, v2, v0 (computing v1 before v2). + // With greedy stores this also achieves 8 for this instance, + // showing the optimal value is robust to ordering choices here. + let problem = MinimumCodeGenerationOneRegister::new( + 7, + vec![ + (0, 1), + (0, 2), + (1, 3), + (1, 4), + (2, 3), + (2, 5), + (3, 5), + (3, 6), + ], + 3, + ); + // Order: v3 (pos 0), v1 (pos 1), v2 (pos 2), v0 (pos 3) + // config: v0->3, v1->1, v2->2, v3->0 + let config = vec![3, 1, 2, 0]; + assert_eq!(problem.simulate(&config), Some(8)); +} + +#[test] +fn test_minimum_code_generation_one_register_invalid_dependency() { + // Try to evaluate v0 before its children + let problem = MinimumCodeGenerationOneRegister::new( + 7, + vec![ + (0, 1), + (0, 2), + (1, 3), + (1, 4), + (2, 3), + (2, 5), + (3, 5), + (3, 6), + ], + 3, + ); + // v0 first (pos 0) — depends on v1,v2 which haven't been computed + let config = vec![0, 1, 2, 3]; + assert_eq!(problem.evaluate(&config), Min(None)); +} + +#[test] +fn test_minimum_code_generation_one_register_invalid_permutation() { + let problem = MinimumCodeGenerationOneRegister::new( + 7, + vec![ + (0, 1), + (0, 2), + (1, 3), + (1, 4), + (2, 3), + (2, 5), + (3, 5), + (3, 6), + ], + 3, + ); + // Not a permutation: position 0 used twice + assert_eq!(problem.evaluate(&[0, 0, 1, 2]), Min(None)); + // Wrong length + assert_eq!(problem.evaluate(&[0, 1, 2]), Min(None)); + // Position out of range + assert_eq!(problem.evaluate(&[0, 1, 2, 5]), Min(None)); +} + +#[test] +fn test_minimum_code_generation_one_register_solver() { + // Small instance: 4 vertices, 2 leaves, 2 internal + // v0 = op(v1, v2), v1 = op(v2, v3) + // Leaves: {2, 3}, Internal: {0, 1} + // Edges: (0,1), (0,2), (1,2), (1,3) + // Wait — v2 appears as both child and parent? + // No: v0 has children v1,v2. v1 has children v2,v3. + // Leaves: v2 and v3 have out-degree 0. So num_leaves=2. + let problem = MinimumCodeGenerationOneRegister::new(4, vec![(0, 1), (0, 2), (1, 2), (1, 3)], 2); + let solver = BruteForce::new(); + let result = solver.solve(&problem); + // Only valid order: v1 first, then v0 + // v1: LOAD v2, OP v1 (using v3 from memory) = 2 instructions (or LOAD v3, OP v1 using v2) + // v0: OP v0 (using v1 from register, v2 from memory) = 1 instruction + // Total: 3 + assert_eq!(result, Min(Some(3))); +} + +#[test] +fn test_minimum_code_generation_one_register_solver_witness() { + let problem = MinimumCodeGenerationOneRegister::new(4, vec![(0, 1), (0, 2), (1, 2), (1, 3)], 2); + let solver = BruteForce::new(); + let witness = solver.find_witness(&problem).expect("should find witness"); + assert_eq!(problem.simulate(&witness), Some(3)); +} + +#[test] +fn test_minimum_code_generation_one_register_serialization() { + let problem = MinimumCodeGenerationOneRegister::new( + 7, + vec![ + (0, 1), + (0, 2), + (1, 3), + (1, 4), + (2, 3), + (2, 5), + (3, 5), + (3, 6), + ], + 3, + ); + let json = serde_json::to_value(&problem).unwrap(); + let restored: MinimumCodeGenerationOneRegister = serde_json::from_value(json).unwrap(); + assert_eq!(restored.num_vertices(), problem.num_vertices()); + assert_eq!(restored.num_edges(), problem.num_edges()); + assert_eq!(restored.num_leaves(), problem.num_leaves()); + assert_eq!(restored.edges(), problem.edges()); +} + +#[test] +fn test_minimum_code_generation_one_register_unary_ops() { + // Simple chain: v0 = unary(v1), v1 = unary(v2) + // Leaves: {2}, Internal: {0, 1} + let problem = MinimumCodeGenerationOneRegister::new(3, vec![(0, 1), (1, 2)], 1); + // Order: v1 first, v0 second. config = [1, 0] + let config = vec![1, 0]; + // v1: LOAD v2, OP v1 = 2 + // v0: OP v0 (v1 in register) = 1 + // Total = 3 + assert_eq!(problem.simulate(&config), Some(3)); + assert_eq!(problem.evaluate(&config), Min(Some(3))); +} + +#[test] +fn test_minimum_code_generation_one_register_paper_example() { + // Issue #900 example + let problem = MinimumCodeGenerationOneRegister::new( + 7, + vec![ + (0, 1), + (0, 2), + (1, 3), + (1, 4), + (2, 3), + (2, 5), + (3, 5), + (3, 6), + ], + 3, + ); + + // Optimal order: v3, v2, v1, v0 => config = [3, 2, 1, 0] + let config = vec![3, 2, 1, 0]; + assert_eq!(problem.evaluate(&config), Min(Some(8))); + + // Verify with brute force + let solver = BruteForce::new(); + let result = solver.solve(&problem); + assert_eq!(result, Min(Some(8))); + + // Verify witness + let witness = solver.find_witness(&problem).unwrap(); + assert_eq!(problem.simulate(&witness), Some(8)); +} + +#[test] +fn test_minimum_code_generation_one_register_lost_value() { + // Test that a value computed but not stored and overwritten becomes unavailable + // v0 = op(v1, v2), v1 = op(v3, v4), v2 = op(v3, v5) + // Leaves: {3, 4, 5}, Internal: {0, 1, 2} + // If we evaluate v1, then v2 (overwriting v1 in register without storing), + // then v0 needs v1 which is lost. + let problem = MinimumCodeGenerationOneRegister::new( + 6, + vec![(0, 1), (0, 2), (1, 3), (1, 4), (2, 3), (2, 5)], + 3, + ); + // Order: v1, v2, v0 => config: v0->2, v1->0, v2->1 + let config = vec![2, 0, 1]; + // v1 computed first, but v1 is needed by v0. + // When v2 is computed, we should check if v1 needs to be stored. + // future_uses[1] = 1 (used by v0), so STORE v1 before computing v2. + // So this should NOT be None — the simulation stores v1 automatically. + let result = problem.simulate(&config); + assert!(result.is_some()); +} diff --git a/src/unit_tests/models/misc/minimum_code_generation_parallel_assignments.rs b/src/unit_tests/models/misc/minimum_code_generation_parallel_assignments.rs new file mode 100644 index 00000000..61a335fd --- /dev/null +++ b/src/unit_tests/models/misc/minimum_code_generation_parallel_assignments.rs @@ -0,0 +1,121 @@ +use super::*; +use crate::solvers::BruteForce; +use crate::traits::Problem; + +#[test] +fn test_minimum_code_generation_parallel_assignments_creation() { + let assignments = vec![(0, vec![1, 2]), (1, vec![0]), (2, vec![3]), (3, vec![1, 2])]; + let problem = MinimumCodeGenerationParallelAssignments::new(4, assignments.clone()); + assert_eq!(problem.num_variables(), 4); + assert_eq!(problem.num_assignments(), 4); + assert_eq!(problem.assignments(), &assignments); + assert_eq!(problem.dims(), vec![4; 4]); + assert_eq!( + ::NAME, + "MinimumCodeGenerationParallelAssignments" + ); + assert_eq!( + ::variant(), + vec![] + ); +} + +#[test] +fn test_minimum_code_generation_parallel_assignments_evaluate_optimal() { + let assignments = vec![(0, vec![1, 2]), (1, vec![0]), (2, vec![3]), (3, vec![1, 2])]; + let problem = MinimumCodeGenerationParallelAssignments::new(4, assignments); + // Config [0, 3, 1, 2]: A_0 at pos 0, A_1 at pos 3, A_2 at pos 1, A_3 at pos 2 + // Order: (A_0, A_2, A_3, A_1) + // A_0 writes a(0): A_1 reads a and is later (pos 3) -> 1 backward dep + // A_2 writes c(2): A_3 reads c and is later (pos 2) -> 1 backward dep + // A_3 writes d(3): A_1 does not read d -> 0 + // Total: 2 + assert_eq!(problem.evaluate(&[0, 3, 1, 2]), Min(Some(2))); +} + +#[test] +fn test_minimum_code_generation_parallel_assignments_evaluate_suboptimal() { + let assignments = vec![(0, vec![1, 2]), (1, vec![0]), (2, vec![3]), (3, vec![1, 2])]; + let problem = MinimumCodeGenerationParallelAssignments::new(4, assignments); + // Config [1, 0, 2, 3]: A_0 at pos 1, A_1 at pos 0, A_2 at pos 2, A_3 at pos 3 + // Order: (A_1, A_0, A_2, A_3) + // A_1 writes b(1): A_0 reads b (later, pos 1) -> 1; A_3 reads b (later, pos 3) -> 1 + // A_0 writes a(0): A_1 already executed -> 0 + // A_2 writes c(2): A_3 reads c (later, pos 3) -> 1 + // Total: 3 + assert_eq!(problem.evaluate(&[1, 0, 2, 3]), Min(Some(3))); +} + +#[test] +fn test_minimum_code_generation_parallel_assignments_evaluate_invalid() { + let assignments = vec![(0, vec![1, 2]), (1, vec![0]), (2, vec![3]), (3, vec![1, 2])]; + let problem = MinimumCodeGenerationParallelAssignments::new(4, assignments); + // Duplicate position + assert_eq!(problem.evaluate(&[0, 0, 1, 2]), Min(None)); + // Out of range + assert_eq!(problem.evaluate(&[0, 1, 2, 4]), Min(None)); + // Wrong length + assert_eq!(problem.evaluate(&[0, 1, 2]), Min(None)); + assert_eq!(problem.evaluate(&[0, 1, 2, 3, 0]), Min(None)); +} + +#[test] +fn test_minimum_code_generation_parallel_assignments_solver() { + let assignments = vec![(0, vec![1, 2]), (1, vec![0]), (2, vec![3]), (3, vec![1, 2])]; + let problem = MinimumCodeGenerationParallelAssignments::new(4, assignments); + let solver = BruteForce::new(); + let solution = solver + .find_witness(&problem) + .expect("should find a solution"); + let value = problem.evaluate(&solution); + assert_eq!(value, Min(Some(2))); +} + +#[test] +fn test_minimum_code_generation_parallel_assignments_serialization() { + let assignments = vec![(0, vec![1, 2]), (1, vec![0]), (2, vec![3]), (3, vec![1, 2])]; + let problem = MinimumCodeGenerationParallelAssignments::new(4, assignments.clone()); + let json = serde_json::to_value(&problem).unwrap(); + let restored: MinimumCodeGenerationParallelAssignments = serde_json::from_value(json).unwrap(); + assert_eq!(restored.num_variables(), 4); + assert_eq!(restored.assignments(), &assignments); +} + +#[test] +fn test_minimum_code_generation_parallel_assignments_no_dependencies() { + // No assignment reads the target of another -> 0 backward deps for any ordering + let assignments = vec![ + (0, vec![2]), // writes a, reads c + (1, vec![3]), // writes b, reads d + ]; + let problem = MinimumCodeGenerationParallelAssignments::new(4, assignments); + // Neither assignment reads the target of the other + assert_eq!(problem.evaluate(&[0, 1]), Min(Some(0))); + assert_eq!(problem.evaluate(&[1, 0]), Min(Some(0))); + let solver = BruteForce::new(); + let solution = solver.find_witness(&problem).unwrap(); + assert_eq!(problem.evaluate(&solution), Min(Some(0))); +} + +#[test] +#[should_panic(expected = "target variable")] +fn test_minimum_code_generation_parallel_assignments_invalid_target_panics() { + MinimumCodeGenerationParallelAssignments::new(2, vec![(2, vec![0])]); +} + +#[test] +#[should_panic(expected = "read variable")] +fn test_minimum_code_generation_parallel_assignments_invalid_read_panics() { + MinimumCodeGenerationParallelAssignments::new(2, vec![(0, vec![3])]); +} + +#[cfg(feature = "example-db")] +#[test] +fn test_minimum_code_generation_parallel_assignments_canonical_example() { + let specs = canonical_model_example_specs(); + assert_eq!(specs.len(), 1); + let spec = &specs[0]; + assert_eq!(spec.id, "minimum_code_generation_parallel_assignments"); + assert_eq!(spec.optimal_config, vec![0, 3, 1, 2]); + assert_eq!(spec.optimal_value, serde_json::json!(2)); +} diff --git a/src/unit_tests/models/misc/minimum_code_generation_unlimited_registers.rs b/src/unit_tests/models/misc/minimum_code_generation_unlimited_registers.rs new file mode 100644 index 00000000..281ab91b --- /dev/null +++ b/src/unit_tests/models/misc/minimum_code_generation_unlimited_registers.rs @@ -0,0 +1,198 @@ +use super::*; +use crate::solvers::{BruteForce, Solver}; +use crate::traits::Problem; +use crate::types::Min; + +#[test] +fn test_minimum_code_generation_unlimited_registers_creation() { + let problem = MinimumCodeGenerationUnlimitedRegisters::new( + 5, + vec![(1, 3), (2, 3), (0, 1)], + vec![(1, 4), (2, 4), (0, 2)], + ); + assert_eq!(problem.num_vertices(), 5); + assert_eq!(problem.num_leaves(), 2); + assert_eq!(problem.num_internal(), 3); + assert_eq!(problem.left_arcs(), &[(1, 3), (2, 3), (0, 1)]); + assert_eq!(problem.right_arcs(), &[(1, 4), (2, 4), (0, 2)]); + assert_eq!(problem.dims(), vec![3; 3]); + assert_eq!( + ::NAME, + "MinimumCodeGenerationUnlimitedRegisters" + ); + assert_eq!( + ::variant(), + vec![] + ); +} + +#[test] +fn test_minimum_code_generation_unlimited_registers_evaluate_optimal() { + // Issue #902 example: optimal is 4 instructions (3 OPs + 1 LOAD) + // Evaluation order: v1, v2, v0 + // config[i] = position for internal vertex i + // internal = [0,1,2], so v0->pos 2, v1->pos 0, v2->pos 1 + let problem = MinimumCodeGenerationUnlimitedRegisters::new( + 5, + vec![(1, 3), (2, 3), (0, 1)], + vec![(1, 4), (2, 4), (0, 2)], + ); + let config = vec![2, 0, 1]; + assert_eq!(problem.evaluate(&config), Min(Some(4))); + assert_eq!(problem.simulate(&config), Some(4)); +} + +#[test] +fn test_minimum_code_generation_unlimited_registers_evaluate_suboptimal() { + // Order: v2, v1, v0 — v2 destroys v3 first, then v1 also needs v3 as left + // So v1 needs a copy of v3 too, but v3 was already destroyed by v2. + // Wait — with unlimited registers, v3 stays in its register. When v2 executes, + // OP v2 overwrites v3's register. But if v1 hasn't run yet and also needs v3, + // then before v2 we must copy v3. + // Then when v1 runs, v3 is gone (overwritten by v2), but we copied it. + // Actually: the copy goes to a new register. So v1 can use the copy. + // But in our simulation, we track: before OP v2, left_child=v3 still needed + // by v1 (as left operand). So we LOAD (copy) v3. Cost so far: 1 LOAD + 1 OP = 2. + // Then OP v1: left_child=v3. But v3's register was overwritten by v2. + // Hmm, our simulation tracks "computed" but not register aliasing. + // + // Actually, with unlimited registers, the LOAD creates a copy in a new register. + // The key insight: when we count future uses, we check if the left child + // value is still needed. If so, we add a LOAD before the OP. + // After the OP, the left child's "register slot" now holds the new value. + // But the copy made by LOAD is in a separate register. + // + // Our simulation correctly handles this: it counts LOADs needed. + // Order v2(pos 0), v1(pos 1), v0(pos 2): + // config: v0->2, v1->1, v2->0 + let problem = MinimumCodeGenerationUnlimitedRegisters::new( + 5, + vec![(1, 3), (2, 3), (0, 1)], + vec![(1, 4), (2, 4), (0, 2)], + ); + let config = vec![2, 1, 0]; + // Step 0: OP v2, left=v3. future uses of v3 after decrement: left_uses=1 (from v1), right_uses=0. + // Still needed -> LOAD v3. instructions = 2 (1 LOAD + 1 OP). + // Step 1: OP v1, left=v3_copy. future uses of v3 after decrement: 0. + // Not needed -> no LOAD. instructions = 3 (+ 1 OP). + // Step 2: OP v0, left=v1. future uses of v1: 0. No LOAD. instructions = 4 (+ 1 OP). + // Total: 4 (same as optimal for this instance) + assert_eq!(problem.simulate(&config), Some(4)); +} + +#[test] +fn test_minimum_code_generation_unlimited_registers_dependency_violation() { + // Try to evaluate v0 before its children v1, v2 + let problem = MinimumCodeGenerationUnlimitedRegisters::new( + 5, + vec![(1, 3), (2, 3), (0, 1)], + vec![(1, 4), (2, 4), (0, 2)], + ); + // v0 first (pos 0) — depends on v1,v2 which haven't been computed + let config = vec![0, 1, 2]; + assert_eq!(problem.evaluate(&config), Min(None)); +} + +#[test] +fn test_minimum_code_generation_unlimited_registers_invalid_permutation() { + let problem = MinimumCodeGenerationUnlimitedRegisters::new( + 5, + vec![(1, 3), (2, 3), (0, 1)], + vec![(1, 4), (2, 4), (0, 2)], + ); + // Not a permutation: position 0 used twice + assert_eq!(problem.evaluate(&[0, 0, 1]), Min(None)); + // Wrong length + assert_eq!(problem.evaluate(&[0, 1]), Min(None)); + // Position out of range + assert_eq!(problem.evaluate(&[0, 1, 5]), Min(None)); +} + +#[test] +fn test_minimum_code_generation_unlimited_registers_solver() { + // Issue #902 example + let problem = MinimumCodeGenerationUnlimitedRegisters::new( + 5, + vec![(1, 3), (2, 3), (0, 1)], + vec![(1, 4), (2, 4), (0, 2)], + ); + let solver = BruteForce::new(); + let result = solver.solve(&problem); + assert_eq!(result, Min(Some(4))); +} + +#[test] +fn test_minimum_code_generation_unlimited_registers_solver_witness() { + let problem = MinimumCodeGenerationUnlimitedRegisters::new( + 5, + vec![(1, 3), (2, 3), (0, 1)], + vec![(1, 4), (2, 4), (0, 2)], + ); + let solver = BruteForce::new(); + let witness = solver.find_witness(&problem).expect("should find witness"); + assert_eq!(problem.simulate(&witness), Some(4)); +} + +#[test] +fn test_minimum_code_generation_unlimited_registers_serialization() { + let problem = MinimumCodeGenerationUnlimitedRegisters::new( + 5, + vec![(1, 3), (2, 3), (0, 1)], + vec![(1, 4), (2, 4), (0, 2)], + ); + let json = serde_json::to_value(&problem).unwrap(); + let restored: MinimumCodeGenerationUnlimitedRegisters = serde_json::from_value(json).unwrap(); + assert_eq!(restored.num_vertices(), problem.num_vertices()); + assert_eq!(restored.left_arcs(), problem.left_arcs()); + assert_eq!(restored.right_arcs(), problem.right_arcs()); + assert_eq!(restored.num_leaves(), problem.num_leaves()); +} + +#[test] +fn test_minimum_code_generation_unlimited_registers_unary_ops() { + // Simple chain: v0 = unary(v1), v1 = unary(v2) + // Leaves: {2}, Internal: {0, 1} + // Unary ops only have left arcs + let problem = MinimumCodeGenerationUnlimitedRegisters::new(3, vec![(0, 1), (1, 2)], vec![]); + // Order: v1 first, v0 second. config = [1, 0] + let config = vec![1, 0]; + // v1: left=v2, no future uses of v2 -> no LOAD. OP v1 = 1. + // v0: left=v1, no future uses of v1 -> no LOAD. OP v0 = 1. + // Total = 2 (just 2 OPs, no copies needed) + assert_eq!(problem.simulate(&config), Some(2)); + assert_eq!(problem.evaluate(&config), Min(Some(2))); +} + +#[test] +fn test_minimum_code_generation_unlimited_registers_no_copy_needed() { + // v0 = op(v1, v2), v1 and v2 are leaves + // No shared operands, so no copies needed + let problem = MinimumCodeGenerationUnlimitedRegisters::new(3, vec![(0, 1)], vec![(0, 2)]); + // Only one internal vertex v0, config = [0] + let config = vec![0]; + // OP v0: left=v1, right=v2. No future uses of v1. No LOAD. 1 OP. + assert_eq!(problem.simulate(&config), Some(1)); +} + +#[test] +fn test_minimum_code_generation_unlimited_registers_paper_example() { + // Issue #902 example + let problem = MinimumCodeGenerationUnlimitedRegisters::new( + 5, + vec![(1, 3), (2, 3), (0, 1)], + vec![(1, 4), (2, 4), (0, 2)], + ); + + // Optimal order: v1, v2, v0 => config = [2, 0, 1] + let config = vec![2, 0, 1]; + assert_eq!(problem.evaluate(&config), Min(Some(4))); + + // Verify with brute force + let solver = BruteForce::new(); + let result = solver.solve(&problem); + assert_eq!(result, Min(Some(4))); + + // Verify witness + let witness = solver.find_witness(&problem).unwrap(); + assert_eq!(problem.simulate(&witness), Some(4)); +} diff --git a/src/unit_tests/models/misc/minimum_decision_tree.rs b/src/unit_tests/models/misc/minimum_decision_tree.rs new file mode 100644 index 00000000..6332ff56 --- /dev/null +++ b/src/unit_tests/models/misc/minimum_decision_tree.rs @@ -0,0 +1,114 @@ +use super::*; +use crate::solvers::{BruteForce, Solver}; +use crate::traits::Problem; +use crate::types::Min; + +fn issue_instance() -> MinimumDecisionTree { + MinimumDecisionTree::new( + vec![ + vec![true, true, false, false], // T0 + vec![true, false, false, false], // T1 + vec![false, true, false, true], // T2 + ], + 4, + 3, + ) +} + +#[test] +fn test_minimum_decision_tree_creation() { + let problem = issue_instance(); + assert_eq!(problem.num_objects(), 4); + assert_eq!(problem.num_tests(), 3); + assert_eq!(problem.dims().len(), 7); // 2^(4-1) - 1 = 7 + assert_eq!(problem.dims(), vec![4; 7]); // 3 tests + 1 sentinel = 4 choices +} + +#[test] +fn test_minimum_decision_tree_evaluate_optimal() { + let problem = issue_instance(); + // Balanced tree: T0 at root, T2 left, T1 right, rest leaves + let config = vec![0, 2, 1, 3, 3, 3, 3]; + assert_eq!(problem.evaluate(&config), Min(Some(8))); +} + +#[test] +fn test_minimum_decision_tree_evaluate_suboptimal() { + let problem = issue_instance(); + // Unbalanced tree: T1 at root, T0 at left, leaf at right, T2 at left-left, leaf at left-right + // T1 at root: o0 goes right (T1=1→leaf), others go left (T1=0) + // T0 at node 1: o1 goes right (T0=1→leaf at depth 2), o2,o3 go left (T0=0) + // T2 at node 3: o2 goes left (T2=0→leaf at depth 3), o3 goes right (T2=1→leaf at depth 3) + let config = vec![1, 0, 3, 2, 3, 3, 3]; + assert_eq!(problem.evaluate(&config), Min(Some(9))); +} + +#[test] +fn test_minimum_decision_tree_evaluate_invalid_duplicate_leaf() { + let problem = issue_instance(); + // All leaves immediately — no tests applied, all objects reach same leaf + let config = vec![3, 3, 3, 3, 3, 3, 3]; + // Root is a leaf, all objects land at root — duplicates + assert_eq!(problem.evaluate(&config), Min(None)); +} + +#[test] +fn test_minimum_decision_tree_evaluate_wrong_length() { + let problem = issue_instance(); + assert_eq!(problem.evaluate(&[0, 1, 2]), Min(None)); +} + +#[test] +fn test_minimum_decision_tree_solver() { + let problem = issue_instance(); + let solver = BruteForce::new(); + let value = solver.solve(&problem); + assert_eq!(value, Min(Some(8))); +} + +#[test] +fn test_minimum_decision_tree_witness() { + let problem = issue_instance(); + let solver = BruteForce::new(); + let witness = solver.find_witness(&problem); + assert!(witness.is_some()); + assert_eq!(problem.evaluate(&witness.unwrap()), Min(Some(8))); +} + +#[test] +fn test_minimum_decision_tree_serialization() { + let problem = issue_instance(); + let json = serde_json::to_string(&problem).unwrap(); + let restored: MinimumDecisionTree = serde_json::from_str(&json).unwrap(); + assert_eq!(restored.num_objects(), 4); + assert_eq!(restored.num_tests(), 3); + let config = vec![0, 2, 1, 3, 3, 3, 3]; + assert_eq!(restored.evaluate(&config), Min(Some(8))); +} + +#[test] +fn test_minimum_decision_tree_two_objects() { + // Simplest case: 2 objects, 1 test + let problem = MinimumDecisionTree::new( + vec![vec![false, true]], // T0 distinguishes o0 (false) from o1 (true) + 2, + 1, + ); + assert_eq!(problem.dims().len(), 1); // 2^(2-1) - 1 = 1 slot + // Test at root, both objects go to leaves at depth 1 + assert_eq!(problem.evaluate(&[0]), Min(Some(2))); // depth 1 + depth 1 + assert_eq!(problem.evaluate(&[1]), Min(None)); // sentinel=1 is leaf at root, both objects at same leaf +} + +#[test] +#[should_panic(expected = "Need at least 2 objects")] +fn test_minimum_decision_tree_too_few_objects() { + MinimumDecisionTree::new(vec![vec![true]], 1, 1); +} + +#[test] +#[should_panic(expected = "not distinguished")] +fn test_minimum_decision_tree_indistinguishable() { + // Two objects with identical test results + MinimumDecisionTree::new(vec![vec![true, true]], 2, 1); +} diff --git a/src/unit_tests/models/misc/minimum_disjunctive_normal_form.rs b/src/unit_tests/models/misc/minimum_disjunctive_normal_form.rs new file mode 100644 index 00000000..39ba6a66 --- /dev/null +++ b/src/unit_tests/models/misc/minimum_disjunctive_normal_form.rs @@ -0,0 +1,130 @@ +use super::*; +use crate::solvers::{BruteForce, Solver}; +use crate::traits::Problem; +use crate::types::Min; + +fn issue_instance() -> MinimumDisjunctiveNormalForm { + // f(x1,x2,x3) = 1 when exactly 1 or 2 variables are true + MinimumDisjunctiveNormalForm::new(3, vec![false, true, true, true, true, true, true, false]) +} + +#[test] +fn test_minimum_dnf_creation() { + let problem = issue_instance(); + assert_eq!(problem.num_variables(), 3); + assert_eq!(problem.minterms().len(), 6); + assert_eq!(problem.num_prime_implicants(), 6); + assert_eq!(problem.dims(), vec![2; 6]); +} + +#[test] +fn test_minimum_dnf_prime_implicants() { + let problem = issue_instance(); + let pis = problem.prime_implicants(); + + // Each prime implicant should cover exactly 2 of the 6 minterms + for pi in pis { + let covered: Vec = problem + .minterms() + .iter() + .filter(|&&mt| pi.covers(mt)) + .copied() + .collect(); + assert_eq!(covered.len(), 2, "PI {:?} covers {:?}", pi.pattern, covered); + } +} + +#[test] +fn test_minimum_dnf_evaluate_all_selected() { + let problem = issue_instance(); + // Select all prime implicants — valid but not minimal + let config = vec![1; 6]; + assert_eq!(problem.evaluate(&config), Min(Some(6))); +} + +#[test] +fn test_minimum_dnf_evaluate_none_selected() { + let problem = issue_instance(); + let config = vec![0; 6]; + assert_eq!(problem.evaluate(&config), Min(None)); +} + +#[test] +fn test_minimum_dnf_evaluate_insufficient() { + let problem = issue_instance(); + // Select only the first prime implicant — covers at most 2 minterms, not all 6 + let config = vec![1, 0, 0, 0, 0, 0]; + assert_eq!(problem.evaluate(&config), Min(None)); +} + +#[test] +fn test_minimum_dnf_solver() { + let problem = issue_instance(); + let solver = BruteForce::new(); + let value = solver.solve(&problem); + assert_eq!(value, Min(Some(3))); +} + +#[test] +fn test_minimum_dnf_all_witnesses() { + let problem = issue_instance(); + let solver = BruteForce::new(); + let witnesses = solver.find_all_witnesses(&problem); + // Should be exactly 2 optimal covers of size 3 + assert_eq!(witnesses.len(), 2); + for w in &witnesses { + assert_eq!(problem.evaluate(w), Min(Some(3))); + } +} + +#[test] +fn test_minimum_dnf_serialization() { + let problem = issue_instance(); + let json = serde_json::to_string(&problem).unwrap(); + let restored: MinimumDisjunctiveNormalForm = serde_json::from_str(&json).unwrap(); + assert_eq!(restored.num_variables(), 3); + assert_eq!(restored.num_prime_implicants(), 6); +} + +#[test] +fn test_minimum_dnf_two_variables() { + // f(x1,x2) = x1 XOR x2 = {01, 10} + let problem = MinimumDisjunctiveNormalForm::new(2, vec![false, true, true, false]); + assert_eq!(problem.minterms(), &[1, 2]); + // Prime implicants: ¬x1∧x2 covers {01}, x1∧¬x2 covers {10} + assert_eq!(problem.num_prime_implicants(), 2); + + let solver = BruteForce::new(); + let value = solver.solve(&problem); + assert_eq!(value, Min(Some(2))); // Both PIs needed +} + +#[test] +fn test_minimum_dnf_single_minterm() { + // f(x1,x2) = x1 AND x2 = {11} + let problem = MinimumDisjunctiveNormalForm::new(2, vec![false, false, false, true]); + assert_eq!(problem.minterms(), &[3]); + assert_eq!(problem.num_prime_implicants(), 1); // x1∧x2 + let solver = BruteForce::new(); + assert_eq!(solver.solve(&problem), Min(Some(1))); +} + +#[test] +fn test_minimum_dnf_tautology_minus_one() { + // f = all true except 000 and 111 (same as issue example) + let problem = issue_instance(); + let solver = BruteForce::new(); + assert_eq!(solver.solve(&problem), Min(Some(3))); +} + +#[test] +fn test_minimum_dnf_wrong_config_length() { + let problem = issue_instance(); + assert_eq!(problem.evaluate(&[1, 0, 1]), Min(None)); +} + +#[test] +#[should_panic(expected = "at least one minterm")] +fn test_minimum_dnf_all_false() { + MinimumDisjunctiveNormalForm::new(2, vec![false, false, false, false]); +} diff --git a/src/unit_tests/models/misc/minimum_fault_detection_test_set.rs b/src/unit_tests/models/misc/minimum_fault_detection_test_set.rs new file mode 100644 index 00000000..664f7a98 --- /dev/null +++ b/src/unit_tests/models/misc/minimum_fault_detection_test_set.rs @@ -0,0 +1,142 @@ +use super::*; +use crate::solvers::BruteForce; +use crate::traits::Problem; +use crate::types::Min; + +fn issue_problem() -> MinimumFaultDetectionTestSet { + // 7 vertices, inputs={0,1}, outputs={5,6} + // Arcs: (0,2),(0,3),(1,3),(1,4),(2,5),(3,5),(3,6),(4,6) + MinimumFaultDetectionTestSet::new( + 7, + vec![ + (0, 2), + (0, 3), + (1, 3), + (1, 4), + (2, 5), + (3, 5), + (3, 6), + (4, 6), + ], + vec![0, 1], + vec![5, 6], + ) +} + +#[test] +fn test_minimum_fault_detection_test_set_creation() { + let problem = issue_problem(); + + assert_eq!(problem.num_vertices(), 7); + assert_eq!(problem.num_arcs(), 8); + assert_eq!(problem.inputs(), &[0, 1]); + assert_eq!(problem.outputs(), &[5, 6]); + assert_eq!(problem.num_inputs(), 2); + assert_eq!(problem.num_outputs(), 2); + // 2 inputs * 2 outputs = 4 pairs + assert_eq!(problem.num_variables(), 4); + assert_eq!(problem.dims(), vec![2; 4]); + assert_eq!( + ::NAME, + "MinimumFaultDetectionTestSet" + ); + assert!(::variant().is_empty()); +} + +#[test] +fn test_minimum_fault_detection_test_set_evaluate_optimal() { + let problem = issue_problem(); + + // Config [1,0,0,1]: select pairs (0,5) and (1,6) + // (0,5) covers {0,2,3,5}, (1,6) covers {1,3,4,6} + // Union = {0,1,2,3,4,5,6} = all 7 vertices -> Min(2) + assert_eq!(problem.evaluate(&[1, 0, 0, 1]), Min(Some(2))); +} + +#[test] +fn test_minimum_fault_detection_test_set_evaluate_insufficient() { + let problem = issue_problem(); + + // Config [1,0,0,0]: select only pair (0,5) + // (0,5) covers {0,2,3,5} -> missing {1,4,6} -> Min(None) + assert_eq!(problem.evaluate(&[1, 0, 0, 0]), Min(None)); + + // Config [0,0,0,1]: select only pair (1,6) + // (1,6) covers {1,3,4,6} -> missing {0,2,5} -> Min(None) + assert_eq!(problem.evaluate(&[0, 0, 0, 1]), Min(None)); +} + +#[test] +fn test_minimum_fault_detection_test_set_evaluate_all_pairs() { + let problem = issue_problem(); + + // Config [1,1,1,1]: select all 4 pairs + // Union covers all vertices -> Min(4) + assert_eq!(problem.evaluate(&[1, 1, 1, 1]), Min(Some(4))); +} + +#[test] +fn test_minimum_fault_detection_test_set_evaluate_no_selection() { + let problem = issue_problem(); + + // No pairs selected -> nothing covered -> Min(None) + assert_eq!(problem.evaluate(&[0, 0, 0, 0]), Min(None)); +} + +#[test] +fn test_minimum_fault_detection_test_set_wrong_config_length() { + let problem = issue_problem(); + + assert_eq!(problem.evaluate(&[1, 0]), Min(None)); +} + +#[test] +fn test_minimum_fault_detection_test_set_solver() { + let problem = issue_problem(); + let solver = BruteForce::new(); + + use crate::solvers::Solver; + let optimal = solver.solve(&problem); + assert_eq!(optimal, Min(Some(2))); + + let witness = solver.find_witness(&problem); + assert!(witness.is_some()); + let w = witness.unwrap(); + assert_eq!(problem.evaluate(&w), Min(Some(2))); +} + +#[test] +fn test_minimum_fault_detection_test_set_serialization() { + let problem = issue_problem(); + let json = serde_json::to_string(&problem).unwrap(); + let round_trip: MinimumFaultDetectionTestSet = serde_json::from_str(&json).unwrap(); + + assert_eq!(round_trip.num_vertices(), 7); + assert_eq!(round_trip.num_arcs(), 8); + assert_eq!(round_trip.inputs(), &[0, 1]); + assert_eq!(round_trip.outputs(), &[5, 6]); + assert_eq!(round_trip.evaluate(&[1, 0, 0, 1]), Min(Some(2))); +} + +#[test] +fn test_minimum_fault_detection_test_set_paper_example() { + let problem = issue_problem(); + + // Verify the paper example: optimal config [1,0,0,1] with value 2 + assert_eq!(problem.evaluate(&[1, 0, 0, 1]), Min(Some(2))); + + // Confirm optimality via brute force + let solver = BruteForce::new(); + use crate::solvers::Solver; + let optimal = solver.solve(&problem); + assert_eq!(optimal, Min(Some(2))); + + // Verify there is exactly one optimal witness + let all = solver.find_all_witnesses(&problem); + let optimal_witnesses: Vec<_> = all + .into_iter() + .filter(|w| problem.evaluate(w) == Min(Some(2))) + .collect(); + assert_eq!(optimal_witnesses.len(), 1); + assert_eq!(optimal_witnesses[0], vec![1, 0, 0, 1]); +} diff --git a/src/unit_tests/models/misc/minimum_register_sufficiency_for_loops.rs b/src/unit_tests/models/misc/minimum_register_sufficiency_for_loops.rs new file mode 100644 index 00000000..fd895168 --- /dev/null +++ b/src/unit_tests/models/misc/minimum_register_sufficiency_for_loops.rs @@ -0,0 +1,155 @@ +use super::*; +use crate::solvers::BruteForce; +use crate::traits::Problem; +use crate::types::Min; + +#[test] +fn test_creation() { + let problem = MinimumRegisterSufficiencyForLoops::new(6, vec![(0, 3), (2, 3), (4, 3)]); + assert_eq!(problem.loop_length(), 6); + assert_eq!(problem.num_variables(), 3); + assert_eq!(problem.variables(), &[(0, 3), (2, 3), (4, 3)]); + assert_eq!(problem.dims(), vec![3, 3, 3]); +} + +#[test] +fn test_evaluate_optimal() { + // K3 graph: all 3 vars conflict, need 3 registers + let problem = MinimumRegisterSufficiencyForLoops::new(6, vec![(0, 3), (2, 3), (4, 3)]); + let result = problem.evaluate(&[0, 1, 2]); + assert_eq!(result, Min(Some(3))); +} + +#[test] +fn test_evaluate_conflict() { + // Two overlapping vars assigned same register => conflict + let problem = MinimumRegisterSufficiencyForLoops::new(6, vec![(0, 3), (2, 3), (4, 3)]); + let result = problem.evaluate(&[0, 0, 1]); + // Vars 0 and 1 overlap (arcs [0,3) and [2,5)), same register 0 => invalid + assert_eq!(result, Min(None)); +} + +#[test] +fn test_evaluate_non_overlapping() { + // Two non-overlapping vars can share a register + let problem = MinimumRegisterSufficiencyForLoops::new(6, vec![(0, 2), (3, 2)]); + // Arcs [0,2) and [3,5) don't overlap + let result = problem.evaluate(&[0, 0]); + assert_eq!(result, Min(Some(1))); +} + +#[test] +fn test_evaluate_all_different() { + // Trivial assignment: all different registers + let problem = MinimumRegisterSufficiencyForLoops::new(6, vec![(0, 3), (2, 3), (4, 3)]); + let result = problem.evaluate(&[0, 1, 2]); + assert_eq!(result, Min(Some(3))); +} + +#[test] +fn test_evaluate_invalid_config_length() { + let problem = MinimumRegisterSufficiencyForLoops::new(6, vec![(0, 3), (2, 3)]); + let result = problem.evaluate(&[0]); + assert_eq!(result, Min(None)); +} + +#[test] +fn test_evaluate_out_of_range_register() { + let problem = MinimumRegisterSufficiencyForLoops::new(6, vec![(0, 3), (2, 3)]); + let result = problem.evaluate(&[0, 5]); // 5 >= num_variables (2) + assert_eq!(result, Min(None)); +} + +#[test] +fn test_solver_k3() { + // All pairs conflict: need 3 registers + let problem = MinimumRegisterSufficiencyForLoops::new(6, vec![(0, 3), (2, 3), (4, 3)]); + let solver = BruteForce::new(); + let witness = solver.find_witness(&problem).unwrap(); + let value = problem.evaluate(&witness); + assert_eq!(value, Min(Some(3))); +} + +#[test] +fn test_solver_two_non_overlapping() { + // Two non-overlapping arcs: can share 1 register + let problem = MinimumRegisterSufficiencyForLoops::new(6, vec![(0, 2), (3, 2)]); + let solver = BruteForce::new(); + let witness = solver.find_witness(&problem).unwrap(); + let value = problem.evaluate(&witness); + assert_eq!(value, Min(Some(1))); +} + +#[test] +fn test_solver_two_overlapping() { + // Two overlapping arcs: need 2 registers + let problem = MinimumRegisterSufficiencyForLoops::new(6, vec![(0, 4), (3, 4)]); + let solver = BruteForce::new(); + let witness = solver.find_witness(&problem).unwrap(); + let value = problem.evaluate(&witness); + assert_eq!(value, Min(Some(2))); +} + +#[test] +fn test_circular_wrap_around_overlap() { + // Arc (5, 3) on loop length 6 covers timesteps {5, 0, 1} + // Arc (0, 3) covers timesteps {0, 1, 2} + // They overlap at timesteps 0 and 1 + let problem = MinimumRegisterSufficiencyForLoops::new(6, vec![(5, 3), (0, 3)]); + let result = problem.evaluate(&[0, 0]); + assert_eq!(result, Min(None)); // conflict + let result = problem.evaluate(&[0, 1]); + assert_eq!(result, Min(Some(2))); +} + +#[test] +fn test_single_variable() { + let problem = MinimumRegisterSufficiencyForLoops::new(4, vec![(0, 2)]); + let solver = BruteForce::new(); + let witness = solver.find_witness(&problem).unwrap(); + let value = problem.evaluate(&witness); + assert_eq!(value, Min(Some(1))); +} + +#[test] +fn test_serialization() { + let problem = MinimumRegisterSufficiencyForLoops::new(6, vec![(0, 3), (2, 3), (4, 3)]); + let json = serde_json::to_string(&problem).unwrap(); + let deserialized: MinimumRegisterSufficiencyForLoops = serde_json::from_str(&json).unwrap(); + assert_eq!(deserialized.loop_length(), 6); + assert_eq!(deserialized.num_variables(), 3); + assert_eq!(deserialized.variables(), &[(0, 3), (2, 3), (4, 3)]); +} + +#[test] +fn test_paper_example() { + // Paper example: N=6, vars: (0,3), (2,3), (4,3) - all pairs conflict (K3) + // Config [0,1,2] -> 3 registers -> Min(3) is optimal + let problem = MinimumRegisterSufficiencyForLoops::new(6, vec![(0, 3), (2, 3), (4, 3)]); + let config = vec![0, 1, 2]; + let result = problem.evaluate(&config); + assert_eq!(result, Min(Some(3))); + + // Verify optimality with brute force + let solver = BruteForce::new(); + let best = solver.find_witness(&problem).unwrap(); + assert_eq!(problem.evaluate(&best), Min(Some(3))); +} + +#[test] +#[should_panic(expected = "loop_length must be positive")] +fn test_zero_loop_length_panics() { + MinimumRegisterSufficiencyForLoops::new(0, vec![]); +} + +#[test] +#[should_panic(expected = "duration")] +fn test_zero_duration_panics() { + MinimumRegisterSufficiencyForLoops::new(6, vec![(0, 0)]); +} + +#[test] +#[should_panic(expected = "start_time")] +fn test_invalid_start_time_panics() { + MinimumRegisterSufficiencyForLoops::new(6, vec![(6, 2)]); +} diff --git a/src/unit_tests/models/misc/minimum_weight_and_or_graph.rs b/src/unit_tests/models/misc/minimum_weight_and_or_graph.rs new file mode 100644 index 00000000..1a708e8c --- /dev/null +++ b/src/unit_tests/models/misc/minimum_weight_and_or_graph.rs @@ -0,0 +1,145 @@ +use super::*; +use crate::solvers::BruteForce; +use crate::traits::Problem; +use crate::types::Min; + +fn issue_problem() -> MinimumWeightAndOrGraph { + // 7 vertices: AND at 0, OR at 1 and 2, leaves 3-6 + // Arcs: (0,1,1), (0,2,2), (1,3,3), (1,4,1), (2,5,4), (2,6,2) + MinimumWeightAndOrGraph::new( + 7, + vec![(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)], + 0, + vec![Some(true), Some(false), Some(false), None, None, None, None], + vec![1, 2, 3, 1, 4, 2], + ) +} + +#[test] +fn test_minimum_weight_and_or_graph_creation() { + let problem = issue_problem(); + + assert_eq!(problem.num_vertices(), 7); + assert_eq!(problem.num_arcs(), 6); + assert_eq!(problem.source(), 0); + assert_eq!(problem.gate_types().len(), 7); + assert_eq!(problem.arc_weights().len(), 6); + assert_eq!(problem.num_variables(), 6); + assert_eq!(problem.dims(), vec![2; 6]); + assert_eq!( + ::NAME, + "MinimumWeightAndOrGraph" + ); + assert!(::variant().is_empty()); +} + +#[test] +fn test_minimum_weight_and_or_graph_evaluate_optimal() { + let problem = issue_problem(); + + // Config [1,1,0,1,0,1]: arcs 0,1,3,5 selected + // Weights: 1+2+1+2 = 6 + assert_eq!(problem.evaluate(&[1, 1, 0, 1, 0, 1]), Min(Some(6))); +} + +#[test] +fn test_minimum_weight_and_or_graph_evaluate_all_arcs() { + let problem = issue_problem(); + + // Config [1,1,1,1,1,1]: all arcs selected, also valid (AND satisfied, OR satisfied) + // Weights: 1+2+3+1+4+2 = 13 + assert_eq!(problem.evaluate(&[1, 1, 1, 1, 1, 1]), Min(Some(13))); +} + +#[test] +fn test_minimum_weight_and_or_graph_and_violated() { + let problem = issue_problem(); + + // Config [1,0,0,1,0,1]: arc 1 (0->2) not selected, but source is AND + // AND at source requires both arcs 0 and 1 + assert_eq!(problem.evaluate(&[1, 0, 0, 1, 0, 1]), Min(None)); +} + +#[test] +fn test_minimum_weight_and_or_graph_or_violated() { + let problem = issue_problem(); + + // Config [1,1,0,0,0,1]: arcs 0,1,5 selected + // OR at v1 has no selected outgoing arcs (arcs 2,3 both 0) + assert_eq!(problem.evaluate(&[1, 1, 0, 0, 0, 1]), Min(None)); +} + +#[test] +fn test_minimum_weight_and_or_graph_dangling_arc() { + let problem = issue_problem(); + + // Config [0,0,1,0,0,0]: only arc 2 (1->3) selected + // Arc 2 goes from vertex 1, but vertex 1 is not solved (no arc leads to it from source) + // Source AND requires arcs 0,1 — they are missing, so it's invalid at the source check + assert_eq!(problem.evaluate(&[0, 0, 1, 0, 0, 0]), Min(None)); +} + +#[test] +fn test_minimum_weight_and_or_graph_empty_config() { + let problem = issue_problem(); + + // No arcs selected: AND at source requires all outgoing arcs + assert_eq!(problem.evaluate(&[0, 0, 0, 0, 0, 0]), Min(None)); +} + +#[test] +fn test_minimum_weight_and_or_graph_wrong_config_length() { + let problem = issue_problem(); + + assert_eq!(problem.evaluate(&[1, 1, 0]), Min(None)); +} + +#[test] +fn test_minimum_weight_and_or_graph_solver() { + let problem = issue_problem(); + let solver = BruteForce::new(); + + use crate::solvers::Solver; + let optimal = solver.solve(&problem); + assert_eq!(optimal, Min(Some(6))); + + let witness = solver.find_witness(&problem); + assert!(witness.is_some()); + let w = witness.unwrap(); + assert_eq!(problem.evaluate(&w), Min(Some(6))); +} + +#[test] +fn test_minimum_weight_and_or_graph_serialization() { + let problem = issue_problem(); + let json = serde_json::to_string(&problem).unwrap(); + let round_trip: MinimumWeightAndOrGraph = serde_json::from_str(&json).unwrap(); + + assert_eq!(round_trip.num_vertices(), 7); + assert_eq!(round_trip.num_arcs(), 6); + assert_eq!(round_trip.source(), 0); + assert_eq!(round_trip.evaluate(&[1, 1, 0, 1, 0, 1]), Min(Some(6))); +} + +#[test] +fn test_minimum_weight_and_or_graph_paper_example() { + let problem = issue_problem(); + + // Verify the paper example: optimal config [1,1,0,1,0,1] with value 6 + assert_eq!(problem.evaluate(&[1, 1, 0, 1, 0, 1]), Min(Some(6))); + + // Confirm optimality via brute force + let solver = BruteForce::new(); + use crate::solvers::Solver; + let optimal = solver.solve(&problem); + assert_eq!(optimal, Min(Some(6))); + + // Verify there is exactly one optimal witness + let all = solver.find_all_witnesses(&problem); + let optimal_witnesses: Vec<_> = all + .into_iter() + .filter(|w| problem.evaluate(w) == Min(Some(6))) + .collect(); + assert_eq!(optimal_witnesses.len(), 1); + assert_eq!(optimal_witnesses[0], vec![1, 1, 0, 1, 0, 1]); +} diff --git a/src/unit_tests/models/misc/numerical_matching_with_target_sums.rs b/src/unit_tests/models/misc/numerical_matching_with_target_sums.rs new file mode 100644 index 00000000..ccde8a8c --- /dev/null +++ b/src/unit_tests/models/misc/numerical_matching_with_target_sums.rs @@ -0,0 +1,143 @@ +use crate::models::misc::NumericalMatchingWithTargetSums; +use crate::solvers::BruteForce; +use crate::traits::Problem; +use crate::types::Or; + +fn yes_problem() -> NumericalMatchingWithTargetSums { + // m=3, sizes_x=[1,4,7], sizes_y=[2,5,3], targets=[3,7,12] + // Valid: config [0,2,1] → sums: 1+2=3, 4+3=7, 7+5=12 → multiset {3,7,12} = targets + NumericalMatchingWithTargetSums::new(vec![1, 4, 7], vec![2, 5, 3], vec![3, 7, 12]) +} + +#[test] +fn test_nmts_creation() { + let problem = yes_problem(); + assert_eq!(problem.sizes_x(), &[1, 4, 7]); + assert_eq!(problem.sizes_y(), &[2, 5, 3]); + assert_eq!(problem.targets(), &[3, 7, 12]); + assert_eq!(problem.num_pairs(), 3); + assert_eq!(problem.dims(), vec![3; 3]); + assert_eq!(problem.num_variables(), 3); + assert_eq!( + ::NAME, + "NumericalMatchingWithTargetSums" + ); + assert_eq!( + ::variant(), + vec![] + ); +} + +#[test] +fn test_nmts_evaluate_valid() { + let problem = yes_problem(); + // config [0,2,1] → sums: 1+2=3, 4+3=7, 7+5=12 → multiset {3,7,12} = targets + assert_eq!(problem.evaluate(&[0, 2, 1]), Or(true)); +} + +#[test] +fn test_nmts_evaluate_invalid_sums() { + let problem = yes_problem(); + // config [0,1,2] → sums: 1+2=3, 4+5=9, 7+3=10 → multiset {3,9,10} ≠ {3,7,12} + assert_eq!(problem.evaluate(&[0, 1, 2]), Or(false)); + // config [1,0,2] → sums: 1+5=6, 4+2=6, 7+3=10 → multiset {6,6,10} ≠ {3,7,12} + assert_eq!(problem.evaluate(&[1, 0, 2]), Or(false)); +} + +#[test] +fn test_nmts_evaluate_invalid_permutation() { + let problem = yes_problem(); + // Duplicate index — not a permutation + assert_eq!(problem.evaluate(&[0, 0, 1]), Or(false)); + // Index out of range + assert_eq!(problem.evaluate(&[0, 1, 3]), Or(false)); +} + +#[test] +fn test_nmts_evaluate_wrong_length() { + let problem = yes_problem(); + assert_eq!(problem.evaluate(&[0, 1]), Or(false)); + assert_eq!(problem.evaluate(&[0, 1, 2, 0]), Or(false)); +} + +#[test] +fn test_nmts_solver_finds_witness() { + let problem = yes_problem(); + let solver = BruteForce::new(); + let solution = solver.find_witness(&problem).unwrap(); + assert_eq!(problem.evaluate(&solution), Or(true)); +} + +#[test] +fn test_nmts_solver_unsatisfiable() { + // m=2, sizes_x=[1,2], sizes_y=[3,4], targets=[10,20] + // Possible sums: {1+3,2+4}={4,6} or {1+4,2+3}={5,5}, neither is {10,20} + let problem = NumericalMatchingWithTargetSums::new(vec![1, 2], vec![3, 4], vec![10, 20]); + let solver = BruteForce::new(); + assert!(solver.find_witness(&problem).is_none()); +} + +#[test] +fn test_nmts_serialization_round_trip() { + let problem = yes_problem(); + let json = serde_json::to_value(&problem).unwrap(); + assert_eq!( + json, + serde_json::json!({ + "sizes_x": [1, 4, 7], + "sizes_y": [2, 5, 3], + "targets": [3, 7, 12], + }) + ); + + let restored: NumericalMatchingWithTargetSums = serde_json::from_value(json).unwrap(); + assert_eq!(restored.sizes_x(), problem.sizes_x()); + assert_eq!(restored.sizes_y(), problem.sizes_y()); + assert_eq!(restored.targets(), problem.targets()); +} + +#[test] +fn test_nmts_deserialization_rejects_invalid() { + let invalid_cases = [ + // Empty sets + serde_json::json!({ + "sizes_x": [], + "sizes_y": [], + "targets": [], + }), + // Different set sizes + serde_json::json!({ + "sizes_x": [1, 2], + "sizes_y": [3], + "targets": [4, 5], + }), + // Mismatched target length + serde_json::json!({ + "sizes_x": [1, 2], + "sizes_y": [3, 4], + "targets": [4], + }), + ]; + + for invalid in invalid_cases { + assert!(serde_json::from_value::(invalid).is_err()); + } +} + +#[test] +#[should_panic(expected = "at least one element")] +fn test_nmts_empty_sets_panics() { + NumericalMatchingWithTargetSums::new(vec![], vec![], vec![]); +} + +#[test] +#[should_panic(expected = "same length")] +fn test_nmts_mismatched_sizes_panics() { + NumericalMatchingWithTargetSums::new(vec![1, 2], vec![3], vec![4, 5]); +} + +#[test] +#[should_panic(expected = "same length")] +fn test_nmts_mismatched_targets_panics() { + NumericalMatchingWithTargetSums::new(vec![1, 2], vec![3, 4], vec![5]); +} diff --git a/src/unit_tests/models/misc/optimum_communication_spanning_tree.rs b/src/unit_tests/models/misc/optimum_communication_spanning_tree.rs new file mode 100644 index 00000000..a354ec3a --- /dev/null +++ b/src/unit_tests/models/misc/optimum_communication_spanning_tree.rs @@ -0,0 +1,168 @@ +use super::*; +use crate::solvers::BruteForce; +use crate::traits::Problem; + +fn k4_problem() -> OptimumCommunicationSpanningTree { + let edge_weights = vec![ + vec![0, 1, 3, 2], + vec![1, 0, 2, 4], + vec![3, 2, 0, 1], + vec![2, 4, 1, 0], + ]; + let requirements = vec![ + vec![0, 2, 1, 3], + vec![2, 0, 1, 1], + vec![1, 1, 0, 2], + vec![3, 1, 2, 0], + ]; + OptimumCommunicationSpanningTree::new(edge_weights, requirements) +} + +#[test] +fn test_ocst_creation() { + let problem = k4_problem(); + assert_eq!(problem.num_vertices(), 4); + assert_eq!(problem.num_edges(), 6); + assert_eq!(problem.dims(), vec![2; 6]); + assert_eq!( + ::NAME, + "OptimumCommunicationSpanningTree" + ); + assert_eq!( + ::variant(), + vec![] + ); +} + +#[test] +fn test_ocst_edges() { + let problem = k4_problem(); + let edges = problem.edges(); + assert_eq!(edges, vec![(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]); +} + +#[test] +fn test_ocst_edge_index() { + let n = 4; + assert_eq!(OptimumCommunicationSpanningTree::edge_index(0, 1, n), 0); + assert_eq!(OptimumCommunicationSpanningTree::edge_index(0, 2, n), 1); + assert_eq!(OptimumCommunicationSpanningTree::edge_index(0, 3, n), 2); + assert_eq!(OptimumCommunicationSpanningTree::edge_index(1, 2, n), 3); + assert_eq!(OptimumCommunicationSpanningTree::edge_index(1, 3, n), 4); + assert_eq!(OptimumCommunicationSpanningTree::edge_index(2, 3, n), 5); +} + +#[test] +fn test_ocst_evaluate_optimal() { + let problem = k4_problem(); + // Optimal tree: {(0,1), (0,3), (2,3)} -> indices 0, 2, 5 + // config = [1, 0, 1, 0, 0, 1] + // Path costs: + // W(0,1) = 1, W(0,2) = 2+1 = 3, W(0,3) = 2 + // W(1,2) = 1+2+1 = 4, W(1,3) = 1+2 = 3, W(2,3) = 1 + // Total = 1*2 + 3*1 + 2*3 + 4*1 + 3*1 + 1*2 = 2+3+6+4+3+2 = 20 + assert_eq!(problem.evaluate(&[1, 0, 1, 0, 0, 1]), Min(Some(20))); +} + +#[test] +fn test_ocst_evaluate_suboptimal() { + let problem = k4_problem(); + // Suboptimal tree: {(0,1), (1,2), (2,3)} -> indices 0, 3, 5 + // config = [1, 0, 0, 1, 0, 1] + // Path costs: + // W(0,1) = 1, W(0,2) = 1+2 = 3, W(0,3) = 1+2+1 = 4 + // W(1,2) = 2, W(1,3) = 2+1 = 3, W(2,3) = 1 + // Total = 1*2 + 3*1 + 4*3 + 2*1 + 3*1 + 1*2 = 2+3+12+2+3+2 = 24 + assert_eq!(problem.evaluate(&[1, 0, 0, 1, 0, 1]), Min(Some(24))); +} + +#[test] +fn test_ocst_evaluate_invalid() { + let problem = k4_problem(); + // Wrong number of edges + assert_eq!(problem.evaluate(&[1, 0, 1]), Min(None)); + // Too many edges (not a tree) + assert_eq!(problem.evaluate(&[1, 1, 1, 1, 0, 1]), Min(None)); + // Not connected (two separate edges) + assert_eq!(problem.evaluate(&[1, 0, 0, 0, 0, 1]), Min(None)); + // Value > 1 + assert_eq!(problem.evaluate(&[2, 0, 1, 0, 0, 0]), Min(None)); +} + +#[test] +fn test_ocst_solver() { + let problem = k4_problem(); + let solver = BruteForce::new(); + let solution = solver + .find_witness(&problem) + .expect("should find a solution"); + let value = problem.evaluate(&solution); + assert_eq!(value, Min(Some(20))); +} + +#[test] +fn test_ocst_serialization() { + let problem = k4_problem(); + let json = serde_json::to_value(&problem).unwrap(); + let restored: OptimumCommunicationSpanningTree = serde_json::from_value(json).unwrap(); + assert_eq!(restored.num_vertices(), 4); + assert_eq!(restored.edge_weights(), problem.edge_weights()); + assert_eq!(restored.requirements(), problem.requirements()); +} + +#[test] +fn test_ocst_k3_equal_requirements() { + // K3 with all requirements equal to 1 + // edge_weights: w(0,1)=1, w(0,2)=2, w(1,2)=3 + let edge_weights = vec![vec![0, 1, 2], vec![1, 0, 3], vec![2, 3, 0]]; + let requirements = vec![vec![0, 1, 1], vec![1, 0, 1], vec![1, 1, 0]]; + let problem = OptimumCommunicationSpanningTree::new(edge_weights, requirements); + assert_eq!(problem.num_vertices(), 3); + assert_eq!(problem.num_edges(), 3); + + // Tree {(0,1), (0,2)}: W(0,1)=1, W(0,2)=2, W(1,2)=1+2=3, cost = 1+2+3 = 6 + assert_eq!(problem.evaluate(&[1, 1, 0]), Min(Some(6))); + // Tree {(0,1), (1,2)}: W(0,1)=1, W(0,2)=1+3=4, W(1,2)=3, cost = 1+4+3 = 8 + assert_eq!(problem.evaluate(&[1, 0, 1]), Min(Some(8))); + // Tree {(0,2), (1,2)}: W(0,1)=2+3=5, W(0,2)=2, W(1,2)=3, cost = 5+2+3 = 10 + assert_eq!(problem.evaluate(&[0, 1, 1]), Min(Some(10))); + + let solver = BruteForce::new(); + let solution = solver.find_witness(&problem).unwrap(); + assert_eq!(problem.evaluate(&solution), Min(Some(6))); +} + +#[test] +#[should_panic(expected = "must have at least 2 vertices")] +fn test_ocst_single_vertex_panics() { + OptimumCommunicationSpanningTree::new(vec![vec![0]], vec![vec![0]]); +} + +#[test] +#[should_panic(expected = "edge_weights must be symmetric")] +fn test_ocst_asymmetric_weights_panics() { + OptimumCommunicationSpanningTree::new( + vec![vec![0, 1], vec![2, 0]], + vec![vec![0, 1], vec![1, 0]], + ); +} + +#[test] +#[should_panic(expected = "requirements must be symmetric")] +fn test_ocst_asymmetric_requirements_panics() { + OptimumCommunicationSpanningTree::new( + vec![vec![0, 1], vec![1, 0]], + vec![vec![0, 1], vec![2, 0]], + ); +} + +#[cfg(feature = "example-db")] +#[test] +fn test_ocst_canonical_example() { + let specs = canonical_model_example_specs(); + assert_eq!(specs.len(), 1); + let spec = &specs[0]; + assert_eq!(spec.id, "optimum_communication_spanning_tree"); + assert_eq!(spec.optimal_config, vec![1, 0, 1, 0, 0, 1]); + assert_eq!(spec.optimal_value, serde_json::json!(20)); +} diff --git a/src/unit_tests/models/misc/square_tiling.rs b/src/unit_tests/models/misc/square_tiling.rs new file mode 100644 index 00000000..20f253f9 --- /dev/null +++ b/src/unit_tests/models/misc/square_tiling.rs @@ -0,0 +1,184 @@ +use crate::models::misc::SquareTiling; +use crate::solvers::BruteForce; +use crate::traits::Problem; +use crate::types::Or; + +/// Positive example from the issue: 3 colors, 4 tiles, 2x2 grid. +/// Tiles: t0=(0,1,2,0), t1=(0,0,2,1), t2=(2,1,0,0), t3=(2,0,0,1) +fn example_problem() -> SquareTiling { + SquareTiling::new( + 3, + vec![(0, 1, 2, 0), (0, 0, 2, 1), (2, 1, 0, 0), (2, 0, 0, 1)], + 2, + ) +} + +#[test] +fn test_square_tiling_basic() { + let problem = example_problem(); + assert_eq!(problem.num_colors(), 3); + assert_eq!(problem.num_tiles(), 4); + assert_eq!(problem.grid_size(), 2); + assert_eq!(problem.tiles().len(), 4); + assert_eq!(problem.dims(), vec![4; 4]); + assert_eq!(problem.num_variables(), 4); + assert_eq!(::NAME, "SquareTiling"); + assert_eq!(::variant(), vec![]); +} + +#[test] +fn test_square_tiling_evaluate_valid() { + let problem = example_problem(); + // Config [0, 1, 2, 3] means: + // (0,0)=t0, (0,1)=t1 + // (1,0)=t2, (1,1)=t3 + // Horizontal: t0.right=1==t1.left=1, t2.right=1==t3.left=1 + // Vertical: t0.bottom=2==t2.top=2, t1.bottom=2==t3.top=2 + assert_eq!(problem.evaluate(&[0, 1, 2, 3]), Or(true)); +} + +#[test] +fn test_square_tiling_evaluate_invalid_horizontal() { + let problem = example_problem(); + // Config [0, 0, 2, 3]: + // (0,0)=t0, (0,1)=t0 + // t0.right=1, t0.left=0 => mismatch + assert_eq!(problem.evaluate(&[0, 0, 2, 3]), Or(false)); +} + +#[test] +fn test_square_tiling_evaluate_invalid_vertical() { + let problem = example_problem(); + // Config [0, 1, 0, 3]: + // (0,0)=t0, (0,1)=t1 + // (1,0)=t0, (1,1)=t3 + // Vertical (0,0)-(1,0): t0.bottom=2, t0.top=0 => mismatch + assert_eq!(problem.evaluate(&[0, 1, 0, 3]), Or(false)); +} + +#[test] +fn test_square_tiling_evaluate_wrong_length() { + let problem = example_problem(); + assert_eq!(problem.evaluate(&[0, 1, 2]), Or(false)); + assert_eq!(problem.evaluate(&[0, 1, 2, 3, 0]), Or(false)); +} + +#[test] +fn test_square_tiling_evaluate_tile_index_out_of_range() { + let problem = example_problem(); + assert_eq!(problem.evaluate(&[0, 1, 2, 4]), Or(false)); +} + +#[test] +fn test_square_tiling_solver_finds_witness() { + let problem = example_problem(); + let solver = BruteForce::new(); + let witness = solver.find_witness(&problem).unwrap(); + assert_eq!(problem.evaluate(&witness), Or(true)); +} + +#[test] +fn test_square_tiling_unsatisfiable_instance() { + // Negative example from issue: only t0=(0,1,2,0) and t2=(2,1,0,0) + // Both have right=1, left=0, so no horizontal match possible. + let problem = SquareTiling::new(3, vec![(0, 1, 2, 0), (2, 1, 0, 0)], 2); + let solver = BruteForce::new(); + assert!(solver.find_witness(&problem).is_none()); +} + +#[test] +fn test_square_tiling_single_cell() { + // 1x1 grid: any single tile is a valid tiling + let problem = SquareTiling::new(2, vec![(0, 1, 0, 1)], 1); + assert_eq!(problem.evaluate(&[0]), Or(true)); + let solver = BruteForce::new(); + let witness = solver.find_witness(&problem).unwrap(); + assert_eq!(witness, vec![0]); +} + +#[test] +fn test_square_tiling_serialization_round_trip() { + let problem = example_problem(); + let json = serde_json::to_value(&problem).unwrap(); + assert_eq!( + json, + serde_json::json!({ + "num_colors": 3, + "tiles": [[0,1,2,0], [0,0,2,1], [2,1,0,0], [2,0,0,1]], + "grid_size": 2, + }) + ); + + let restored: SquareTiling = serde_json::from_value(json).unwrap(); + assert_eq!(restored.num_colors(), problem.num_colors()); + assert_eq!(restored.num_tiles(), problem.num_tiles()); + assert_eq!(restored.grid_size(), problem.grid_size()); + assert_eq!(restored.tiles(), problem.tiles()); +} + +#[test] +fn test_square_tiling_deserialization_rejects_invalid() { + let invalid_cases = [ + // Zero colors + serde_json::json!({ + "num_colors": 0, + "tiles": [[0, 0, 0, 0]], + "grid_size": 1, + }), + // Empty tiles + serde_json::json!({ + "num_colors": 2, + "tiles": [], + "grid_size": 1, + }), + // Zero grid size + serde_json::json!({ + "num_colors": 2, + "tiles": [[0, 0, 0, 0]], + "grid_size": 0, + }), + // Color out of range + serde_json::json!({ + "num_colors": 2, + "tiles": [[0, 0, 0, 5]], + "grid_size": 1, + }), + ]; + + for invalid in invalid_cases { + assert!(serde_json::from_value::(invalid).is_err()); + } +} + +#[test] +#[should_panic(expected = "at least one color")] +fn test_square_tiling_zero_colors_panics() { + SquareTiling::new(0, vec![(0, 0, 0, 0)], 1); +} + +#[test] +#[should_panic(expected = "at least one tile")] +fn test_square_tiling_empty_tiles_panics() { + SquareTiling::new(2, vec![], 1); +} + +#[test] +#[should_panic(expected = "grid_size >= 1")] +fn test_square_tiling_zero_grid_size_panics() { + SquareTiling::new(2, vec![(0, 0, 0, 0)], 0); +} + +#[test] +#[should_panic(expected = "out of range")] +fn test_square_tiling_color_out_of_range_panics() { + SquareTiling::new(2, vec![(0, 0, 0, 5)], 1); +} + +#[test] +fn test_square_tiling_count_valid_tilings() { + // Issue states 16 valid tilings out of 256 for the positive example + let problem = example_problem(); + let solver = BruteForce::new(); + let witnesses = solver.find_all_witnesses(&problem); + assert_eq!(witnesses.len(), 16); +} diff --git a/src/unit_tests/models/set/three_matroid_intersection.rs b/src/unit_tests/models/set/three_matroid_intersection.rs new file mode 100644 index 00000000..98a119d3 --- /dev/null +++ b/src/unit_tests/models/set/three_matroid_intersection.rs @@ -0,0 +1,185 @@ +use super::*; +use crate::solvers::BruteForce; +use crate::traits::Problem; + +/// Helper: build the canonical 6-element, K=2 instance from the issue. +fn issue_instance() -> ThreeMatroidIntersection { + ThreeMatroidIntersection::new( + 6, + vec![ + vec![vec![0, 1, 2], vec![3, 4, 5]], // M1 + vec![vec![0, 3], vec![1, 4], vec![2, 5]], // M2 + vec![vec![0, 4], vec![1, 5], vec![2, 3]], // M3 + ], + 2, + ) +} + +#[test] +fn test_three_matroid_intersection_creation() { + let problem = issue_instance(); + assert_eq!(problem.ground_set_size(), 6); + assert_eq!(problem.bound(), 2); + assert_eq!(problem.partitions().len(), 3); + assert_eq!(problem.num_groups(), 8); // 2 + 3 + 3 + assert_eq!(problem.num_variables(), 6); + assert_eq!(problem.dims(), vec![2; 6]); +} + +#[test] +fn test_three_matroid_intersection_evaluate_valid() { + let problem = issue_instance(); + // {0, 5} is a valid common independent set of size 2 + // M1: 0 in {0,1,2}, 5 in {3,4,5} -> at most 1 per group + // M2: 0 in {0,3}, 5 in {2,5} -> at most 1 per group + // M3: 0 in {0,4}, 5 in {1,5} -> at most 1 per group + assert!(problem.evaluate(&[1, 0, 0, 0, 0, 1])); + + // {1, 3} is also valid + assert!(problem.evaluate(&[0, 1, 0, 1, 0, 0])); + + // {2, 4} is also valid + assert!(problem.evaluate(&[0, 0, 1, 0, 1, 0])); +} + +#[test] +fn test_three_matroid_intersection_evaluate_invalid() { + let problem = issue_instance(); + + // {0, 3} fails M2: both in group {0, 3} + assert!(!problem.evaluate(&[1, 0, 0, 1, 0, 0])); + + // {0, 4} fails M3: both in group {0, 4} + assert!(!problem.evaluate(&[1, 0, 0, 0, 1, 0])); + + // {1, 2} fails M1: both in group {0, 1, 2} + assert!(!problem.evaluate(&[0, 1, 1, 0, 0, 0])); + + // Wrong size: only 1 element selected + assert!(!problem.evaluate(&[1, 0, 0, 0, 0, 0])); + + // Wrong size: 3 elements selected + assert!(!problem.evaluate(&[1, 0, 0, 0, 1, 1])); + + // All zeros + assert!(!problem.evaluate(&[0, 0, 0, 0, 0, 0])); +} + +#[test] +fn test_three_matroid_intersection_solver() { + let problem = issue_instance(); + let solver = BruteForce::new(); + let solutions = solver.find_all_witnesses(&problem); + + // Exactly 3 valid solutions: {0,5}, {1,3}, {2,4} + assert_eq!(solutions.len(), 3); + for sol in &solutions { + assert!(problem.evaluate(sol)); + } + assert!(solutions.contains(&vec![1, 0, 0, 0, 0, 1])); + assert!(solutions.contains(&vec![0, 1, 0, 1, 0, 0])); + assert!(solutions.contains(&vec![0, 0, 1, 0, 1, 0])); +} + +#[test] +fn test_three_matroid_intersection_no_solution() { + // Same instance but K=3: M1 has only 2 groups, so independent sets have size ≤ 2 + let problem = ThreeMatroidIntersection::new( + 6, + vec![ + vec![vec![0, 1, 2], vec![3, 4, 5]], + vec![vec![0, 3], vec![1, 4], vec![2, 5]], + vec![vec![0, 4], vec![1, 5], vec![2, 3]], + ], + 3, + ); + let solver = BruteForce::new(); + let solutions = solver.find_all_witnesses(&problem); + assert!(solutions.is_empty()); +} + +#[test] +fn test_three_matroid_intersection_serialization() { + let problem = issue_instance(); + let json = serde_json::to_string(&problem).unwrap(); + let deserialized: ThreeMatroidIntersection = serde_json::from_str(&json).unwrap(); + assert_eq!(deserialized.ground_set_size(), problem.ground_set_size()); + assert_eq!(deserialized.bound(), problem.bound()); + assert_eq!(deserialized.partitions(), problem.partitions()); +} + +#[test] +fn test_three_matroid_intersection_rejects_wrong_config_length() { + let problem = issue_instance(); + assert!(!problem.evaluate(&[1, 0, 0])); +} + +#[test] +fn test_three_matroid_intersection_rejects_non_binary_config() { + let problem = issue_instance(); + assert!(!problem.evaluate(&[2, 0, 0, 0, 0, 0])); +} + +#[test] +#[should_panic(expected = "Expected exactly 3")] +fn test_three_matroid_intersection_wrong_matroid_count() { + ThreeMatroidIntersection::new(4, vec![vec![vec![0, 1]], vec![vec![2, 3]]], 1); +} + +#[test] +#[should_panic(expected = "outside 0..")] +fn test_three_matroid_intersection_element_out_of_range() { + ThreeMatroidIntersection::new( + 3, + vec![ + vec![vec![0, 1, 2]], + vec![vec![0, 1, 2]], + vec![vec![0, 1, 5]], // 5 >= 3 + ], + 1, + ); +} + +#[test] +#[should_panic(expected = "Bound 4 exceeds")] +fn test_three_matroid_intersection_bound_exceeds_ground_set() { + ThreeMatroidIntersection::new( + 3, + vec![ + vec![vec![0, 1, 2]], + vec![vec![0, 1, 2]], + vec![vec![0, 1, 2]], + ], + 4, + ); +} + +#[test] +fn test_three_matroid_intersection_paper_example() { + // Issue's canonical 6-element example, K=2 + let problem = issue_instance(); + + // Valid: {0, 5} + assert!(problem.evaluate(&[1, 0, 0, 0, 0, 1])); + + let solver = BruteForce::new(); + let solutions = solver.find_all_witnesses(&problem); + // Exactly 3 valid common independent sets of size 2 + assert_eq!(solutions.len(), 3); + assert!(solutions.contains(&vec![1, 0, 0, 0, 0, 1])); // {0, 5} + assert!(solutions.contains(&vec![0, 1, 0, 1, 0, 0])); // {1, 3} + assert!(solutions.contains(&vec![0, 0, 1, 0, 1, 0])); // {2, 4} + + // Negative modification from issue: K=3 is infeasible (M1 has only 2 groups) + let problem_k3 = ThreeMatroidIntersection::new( + 6, + vec![ + vec![vec![0, 1, 2], vec![3, 4, 5]], + vec![vec![0, 3], vec![1, 4], vec![2, 5]], + vec![vec![0, 4], vec![1, 5], vec![2, 3]], + ], + 3, + ); + let solutions_k3 = solver.find_all_witnesses(&problem_k3); + assert!(solutions_k3.is_empty()); +} diff --git a/src/unit_tests/rules/maximum2satisfiability_ilp.rs b/src/unit_tests/rules/maximum2satisfiability_ilp.rs new file mode 100644 index 00000000..52bdf45a --- /dev/null +++ b/src/unit_tests/rules/maximum2satisfiability_ilp.rs @@ -0,0 +1,142 @@ +use super::*; +use crate::models::algebraic::{Comparison, ObjectiveSense, ILP}; +use crate::models::formula::CNFClause; +use crate::rules::test_helpers::assert_optimization_round_trip_from_optimization_target; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::traits::Problem; + +fn make_canonical_instance() -> Maximum2Satisfiability { + Maximum2Satisfiability::new( + 4, + vec![ + CNFClause::new(vec![1, 2]), + CNFClause::new(vec![1, -2]), + CNFClause::new(vec![-1, 3]), + CNFClause::new(vec![-1, -3]), + CNFClause::new(vec![2, 4]), + CNFClause::new(vec![-3, -4]), + CNFClause::new(vec![3, 4]), + ], + ) +} + +#[test] +fn test_maximum2satisfiability_to_ilp_closed_loop() { + let problem = make_canonical_instance(); + let reduction = ReduceTo::>::reduce_to(&problem); + + assert_optimization_round_trip_from_optimization_target( + &problem, + &reduction, + "Maximum2Satisfiability->ILP closed loop", + ); + + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + // Optimal: 6 satisfied clauses + let value = problem.evaluate(&extracted); + assert_eq!(value, crate::types::Max(Some(6))); +} + +#[test] +fn test_maximum2satisfiability_to_ilp_bf_vs_ilp() { + let problem = make_canonical_instance(); + let reduction = ReduceTo::>::reduce_to(&problem); + + let bf_solutions = BruteForce::new().find_all_witnesses(&problem); + let bf_value = problem.evaluate(&bf_solutions[0]); + + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let ilp_value = problem.evaluate(&extracted); + + assert_eq!(bf_value, ilp_value); + assert!(ilp_value.is_valid()); +} + +#[test] +fn test_maximum2satisfiability_to_ilp_structure() { + let problem = make_canonical_instance(); + let reduction = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // 4 truth variables + 7 clause indicators = 11 ILP variables + assert_eq!(ilp.num_vars(), 11); + // One constraint per clause + assert_eq!(ilp.num_constraints(), 7); + assert_eq!(ilp.sense, ObjectiveSense::Maximize); + + // Objective: maximize sum of z_4..z_10 + let expected_objective: Vec<(usize, f64)> = (4..11).map(|j| (j, 1.0)).collect(); + assert_eq!(ilp.objective, expected_objective); + + // Check first constraint: clause (x1 OR x2) -> z_4 - y_0 - y_1 <= 0 + let c0 = &ilp.constraints[0]; + assert_eq!(c0.cmp, Comparison::Le); + assert_eq!(c0.rhs, 0.0); // 0 negated literals + assert_eq!(c0.terms, vec![(4, 1.0), (0, -1.0), (1, -1.0)]); + + // Check constraint for clause (~x1 OR x3) -> z_6 + y_0 - y_2 <= 1 + let c2 = &ilp.constraints[2]; + assert_eq!(c2.cmp, Comparison::Le); + assert_eq!(c2.rhs, 1.0); // 1 negated literal + assert_eq!(c2.terms, vec![(6, 1.0), (0, 1.0), (2, -1.0)]); + + // Check constraint for clause (~x1 OR ~x3) -> z_7 + y_0 + y_2 <= 2 + let c3 = &ilp.constraints[3]; + assert_eq!(c3.cmp, Comparison::Le); + assert_eq!(c3.rhs, 2.0); // 2 negated literals + assert_eq!(c3.terms, vec![(7, 1.0), (0, 1.0), (2, 1.0)]); +} + +#[test] +fn test_maximum2satisfiability_to_ilp_all_satisfiable() { + // Simple instance where all clauses can be satisfied: (x1 OR x2) AND (x1 OR ~x2) + // x1 = true satisfies both. + let problem = Maximum2Satisfiability::new( + 2, + vec![CNFClause::new(vec![1, 2]), CNFClause::new(vec![1, -2])], + ); + let reduction = ReduceTo::>::reduce_to(&problem); + + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let value = problem.evaluate(&extracted); + // Both clauses should be satisfiable + assert_eq!(value, crate::types::Max(Some(2))); +} + +#[cfg(feature = "example-db")] +#[test] +fn test_maximum2satisfiability_to_ilp_canonical_example_spec() { + let spec = canonical_rule_example_specs() + .into_iter() + .find(|spec| spec.id == "maximum2satisfiability_to_ilp") + .expect("missing canonical Maximum2Satisfiability -> ILP example spec"); + let example = (spec.build)(); + + assert_eq!(example.source.problem, "Maximum2Satisfiability"); + assert_eq!(example.target.problem, "ILP"); + assert_eq!(example.source.instance["num_vars"], 4); + assert_eq!(example.target.instance["num_vars"], 11); + assert_eq!( + example.target.instance["constraints"] + .as_array() + .unwrap() + .len(), + 7 + ); + assert_eq!( + example.solutions, + vec![crate::export::SolutionPair { + source_config: vec![1, 1, 0, 1], + target_config: vec![1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1], + }] + ); +} diff --git a/src/unit_tests/rules/maximumdomaticnumber_ilp.rs b/src/unit_tests/rules/maximumdomaticnumber_ilp.rs new file mode 100644 index 00000000..1c8cb7e4 --- /dev/null +++ b/src/unit_tests/rules/maximumdomaticnumber_ilp.rs @@ -0,0 +1,114 @@ +use super::*; +use crate::models::algebraic::ObjectiveSense; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::traits::Problem; +use crate::types::Max; + +#[test] +fn test_maximumdomaticnumber_to_ilp_closed_loop() { + // Path P3: 0-1-2, domatic number = 2 + let problem = MaximumDomaticNumber::new(SimpleGraph::new(3, vec![(0, 1), (1, 2)])); + let reduction: ReductionDomaticNumberToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let bf = BruteForce::new(); + let ilp_solver = ILPSolver::new(); + + // Solve with brute force on original problem + let bf_witness = bf.find_witness(&problem).unwrap(); + let bf_value = problem.evaluate(&bf_witness); + + // Solve via ILP reduction + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let ilp_value = problem.evaluate(&extracted); + + // Both should find domatic number = 2 + assert_eq!(bf_value, Max(Some(2))); + assert_eq!(ilp_value, Max(Some(2))); + + // Verify the ILP solution is valid for the original problem + assert!(problem.evaluate(&extracted).is_valid()); +} + +#[test] +fn test_maximumdomaticnumber_to_ilp_structure() { + // P3: 3 vertices + let problem = MaximumDomaticNumber::new(SimpleGraph::new(3, vec![(0, 1), (1, 2)])); + let reduction: ReductionDomaticNumberToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // n=3: n²+n = 12 variables + assert_eq!(ilp.num_vars, 12); + + // Constraints: n + n² + n² = 3 + 9 + 9 = 21 + assert_eq!(ilp.constraints.len(), 21); + + // Objective should be maximize + assert_eq!(ilp.sense, ObjectiveSense::Maximize); + + // Objective should have 3 terms (y_0, y_1, y_2) + assert_eq!(ilp.objective.len(), 3); + for &(var, coef) in &ilp.objective { + assert!(var >= 9); // y_i at indices 9, 10, 11 + assert!((coef - 1.0).abs() < 1e-9); + } +} + +#[test] +fn test_maximumdomaticnumber_to_ilp_bf_vs_ilp() { + // P3: 3 vertices, domatic number = 2 + let problem = MaximumDomaticNumber::new(SimpleGraph::new(3, vec![(0, 1), (1, 2)])); + let reduction: ReductionDomaticNumberToILP = ReduceTo::>::reduce_to(&problem); + crate::rules::test_helpers::assert_bf_vs_ilp(&problem, &reduction); +} + +#[test] +fn test_maximumdomaticnumber_to_ilp_complete_graph() { + // K3: domatic number = 3 (each vertex is its own dominating set) + let problem = MaximumDomaticNumber::new(SimpleGraph::new(3, vec![(0, 1), (0, 2), (1, 2)])); + let reduction: ReductionDomaticNumberToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let value = problem.evaluate(&extracted); + + assert_eq!(value, Max(Some(3))); +} + +#[test] +fn test_maximumdomaticnumber_to_ilp_single_vertex() { + // Single vertex: domatic number = 1 + let problem = MaximumDomaticNumber::new(SimpleGraph::new(1, vec![])); + let reduction: ReductionDomaticNumberToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let value = problem.evaluate(&extracted); + + assert_eq!(value, Max(Some(1))); +} + +#[test] +fn test_maximumdomaticnumber_to_ilp_solution_extraction() { + // P3: 0-1-2 + let problem = MaximumDomaticNumber::new(SimpleGraph::new(3, vec![(0, 1), (1, 2)])); + let reduction: ReductionDomaticNumberToILP = ReduceTo::>::reduce_to(&problem); + + // Manually construct an ILP solution: vertices 0,2 in set 0, vertex 1 in set 1 + // x_{0,0}=1, x_{0,1}=0, x_{0,2}=0, + // x_{1,0}=0, x_{1,1}=1, x_{1,2}=0, + // x_{2,0}=1, x_{2,1}=0, x_{2,2}=0, + // y_0=1, y_1=1, y_2=0 + let ilp_solution = vec![1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0]; + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(extracted, vec![0, 1, 0]); + + // Verify this is a valid partition with 2 dominating sets + let value = problem.evaluate(&extracted); + assert_eq!(value, Max(Some(2))); +} diff --git a/src/unit_tests/rules/maximumleafspanningtree_ilp.rs b/src/unit_tests/rules/maximumleafspanningtree_ilp.rs new file mode 100644 index 00000000..159ce297 --- /dev/null +++ b/src/unit_tests/rules/maximumleafspanningtree_ilp.rs @@ -0,0 +1,171 @@ +use super::*; +use crate::models::algebraic::{ObjectiveSense, ILP}; +use crate::models::graph::MaximumLeafSpanningTree; +use crate::rules::ReduceTo; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::topology::SimpleGraph; +use crate::traits::Problem; +use crate::types::Max; + +/// Small instance: 4 vertices, 4 edges (P4 with a shortcut). +/// Vertices 0-1-2-3 plus edge 0-2. +fn small_instance() -> MaximumLeafSpanningTree { + MaximumLeafSpanningTree::new(SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3), (0, 2)])) +} + +/// Issue #897 canonical instance: 6 vertices, 9 edges. +fn canonical_instance() -> MaximumLeafSpanningTree { + MaximumLeafSpanningTree::new(SimpleGraph::new( + 6, + vec![ + (0, 1), + (0, 2), + (0, 3), + (1, 4), + (2, 4), + (2, 5), + (3, 5), + (4, 5), + (1, 3), + ], + )) +} + +#[test] +fn test_reduction_creates_expected_ilp_shape() { + let problem = small_instance(); + let reduction: ReductionMaximumLeafSpanningTreeToILP = + ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // n=4, m=4: num_vars = 3*4 + 4 = 16 + assert_eq!(ilp.num_vars, 16); + assert_eq!(ilp.sense, ObjectiveSense::Maximize); + // Objective should be z_0 + z_1 + z_2 + z_3 (indices 4..8) + assert_eq!(ilp.objective, vec![(4, 1.0), (5, 1.0), (6, 1.0), (7, 1.0)]); +} + +#[test] +fn test_maximumleafspanningtree_to_ilp_closed_loop() { + let problem = small_instance(); + let reduction: ReductionMaximumLeafSpanningTreeToILP = + ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let bf = BruteForce::new(); + let ilp_solver = ILPSolver::new(); + let best_source = bf.find_all_witnesses(&problem); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + // All brute-force optimal solutions have the same value + let bf_value = problem.evaluate(&best_source[0]); + let ilp_value = problem.evaluate(&extracted); + assert_eq!(ilp_value, bf_value); + assert!(problem.is_valid_solution(&extracted)); +} + +#[test] +fn test_maximumleafspanningtree_to_ilp_canonical_closed_loop() { + let problem = canonical_instance(); + let reduction: ReductionMaximumLeafSpanningTreeToILP = + ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let bf = BruteForce::new(); + let ilp_solver = ILPSolver::new(); + let best_source = bf.find_all_witnesses(&problem); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + assert_eq!(problem.evaluate(&best_source[0]), Max(Some(4))); + assert_eq!(problem.evaluate(&extracted), Max(Some(4))); + assert!(problem.is_valid_solution(&extracted)); +} + +#[test] +fn test_solution_extraction_reads_edge_selector_prefix() { + let problem = small_instance(); + let reduction: ReductionMaximumLeafSpanningTreeToILP = + ReduceTo::>::reduce_to(&problem); + + // 16 variables total, first 4 are edge selectors + let mut target_solution = vec![0; 16]; + target_solution[0] = 1; // edge (0,1) + target_solution[1] = 1; // edge (1,2) + target_solution[2] = 1; // edge (2,3) + + assert_eq!( + reduction.extract_solution(&target_solution), + vec![1, 1, 1, 0] + ); +} + +#[test] +fn test_reduce_and_solve_via_ilp() { + let problem = canonical_instance(); + let reduction: ReductionMaximumLeafSpanningTreeToILP = + ReduceTo::>::reduce_to(&problem); + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(problem.evaluate(&extracted), Max(Some(4))); + assert!(problem.is_valid_solution(&extracted)); +} + +#[test] +fn test_maximumleafspanningtree_to_ilp_bf_vs_ilp() { + let problem = canonical_instance(); + let reduction: ReductionMaximumLeafSpanningTreeToILP = + ReduceTo::>::reduce_to(&problem); + crate::rules::test_helpers::assert_bf_vs_ilp(&problem, &reduction); +} + +#[test] +fn test_maximumleafspanningtree_to_ilp_path_graph() { + // Path P4: 0-1-2-3, only spanning tree is the path itself => 2 leaves + let problem = MaximumLeafSpanningTree::new(SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3)])); + let reduction: ReductionMaximumLeafSpanningTreeToILP = + ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(problem.evaluate(&extracted), Max(Some(2))); +} + +#[test] +fn test_maximumleafspanningtree_to_ilp_star_graph() { + // Star K1,3: center 0, leaves 1,2,3 => 3 leaves + let problem = MaximumLeafSpanningTree::new(SimpleGraph::new(4, vec![(0, 1), (0, 2), (0, 3)])); + let reduction: ReductionMaximumLeafSpanningTreeToILP = + ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(problem.evaluate(&extracted), Max(Some(3))); + assert!(problem.is_valid_solution(&extracted)); +} + +#[test] +fn test_maximumleafspanningtree_to_ilp_complete_graph() { + // K4: 4 vertices, 6 edges. Star spanning tree has 3 leaves. + let problem = MaximumLeafSpanningTree::new(SimpleGraph::new( + 4, + vec![(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)], + )); + let bf = BruteForce::new(); + let bf_solutions = bf.find_all_witnesses(&problem); + let bf_value = problem.evaluate(&bf_solutions[0]); + + let reduction: ReductionMaximumLeafSpanningTreeToILP = + ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + let ilp_solution = ILPSolver::new().solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + assert_eq!(problem.evaluate(&extracted), bf_value); + assert_eq!(bf_value, Max(Some(3))); +} diff --git a/src/unit_tests/rules/maximumlikelihoodranking_ilp.rs b/src/unit_tests/rules/maximumlikelihoodranking_ilp.rs new file mode 100644 index 00000000..f88feec2 --- /dev/null +++ b/src/unit_tests/rules/maximumlikelihoodranking_ilp.rs @@ -0,0 +1,159 @@ +use super::*; +use crate::models::algebraic::{ObjectiveSense, ILP}; +use crate::rules::test_helpers::assert_optimization_round_trip_from_optimization_target; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::traits::Problem; +use crate::types::Min; + +#[test] +fn test_maximumlikelihoodranking_to_ilp_closed_loop() { + let matrix = vec![vec![0, 3, 2], vec![2, 0, 4], vec![3, 1, 0]]; + let problem = MaximumLikelihoodRanking::new(matrix); + let reduction = ReduceTo::>::reduce_to(&problem); + + assert_optimization_round_trip_from_optimization_target( + &problem, + &reduction, + "MaximumLikelihoodRanking->ILP closed loop", + ); +} + +#[test] +fn test_maximumlikelihoodranking_to_ilp_structure() { + let matrix = vec![vec![0, 3, 2], vec![2, 0, 4], vec![3, 1, 0]]; + let problem = MaximumLikelihoodRanking::new(matrix); + let reduction = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // 3 items -> C(3,2) = 3 variables + assert_eq!(ilp.num_vars(), 3); + // C(3,3) = 1 triple -> 2 constraints + assert_eq!(ilp.num_constraints(), 2); + assert_eq!(ilp.sense, ObjectiveSense::Minimize); +} + +#[test] +fn test_maximumlikelihoodranking_to_ilp_bf_vs_ilp() { + let matrix = vec![ + vec![0, 4, 3, 5], + vec![1, 0, 4, 3], + vec![2, 1, 0, 4], + vec![0, 2, 1, 0], + ]; + let problem = MaximumLikelihoodRanking::new(matrix); + let reduction = ReduceTo::>::reduce_to(&problem); + + let bf_solutions = BruteForce::new().find_all_witnesses(&problem); + let bf_value = problem.evaluate(&bf_solutions[0]); + + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let ilp_value = problem.evaluate(&extracted); + + assert_eq!(bf_value, ilp_value); + assert!(ilp_value.is_valid()); +} + +#[test] +fn test_maximumlikelihoodranking_to_ilp_extraction() { + // 3 items: simple instance + let matrix = vec![vec![0, 3, 2], vec![2, 0, 4], vec![3, 1, 0]]; + let problem = MaximumLikelihoodRanking::new(matrix); + let reduction = ReduceTo::>::reduce_to(&problem); + + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + // Verify the extracted config is a valid permutation + let n = problem.num_items(); + assert_eq!(extracted.len(), n); + let mut sorted = extracted.clone(); + sorted.sort(); + assert_eq!(sorted, (0..n).collect::>()); + + // Verify evaluation is valid + let value = problem.evaluate(&extracted); + assert!(value.is_valid()); +} + +#[test] +fn test_maximumlikelihoodranking_to_ilp_two_items() { + let matrix = vec![vec![0, 5], vec![3, 0]]; + let problem = MaximumLikelihoodRanking::new(matrix); + let reduction = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // 2 items -> 1 variable, 0 transitivity constraints + assert_eq!(ilp.num_vars(), 1); + assert_eq!(ilp.num_constraints(), 0); + + let ilp_solution = ILPSolver::new().solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let value = problem.evaluate(&extracted); + assert!(value.is_valid()); + + // Optimal: item 0 before item 1 costs matrix[1][0]=3 + // item 1 before item 0 costs matrix[0][1]=5 + // So optimal is [0,1] with cost 3 + assert_eq!(value, Min(Some(3))); +} + +#[test] +fn test_maximumlikelihoodranking_to_ilp_single_item() { + let problem = MaximumLikelihoodRanking::new(vec![vec![0]]); + let reduction = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + assert_eq!(ilp.num_vars(), 0); + assert_eq!(ilp.num_constraints(), 0); + + let ilp_solution = ILPSolver::new() + .solve(ilp) + .expect("single-item ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(extracted, vec![0]); +} + +#[test] +fn test_maximumlikelihoodranking_to_ilp_larger_instance() { + // 4-item instance from the issue + let matrix = vec![ + vec![0, 4, 3, 5], + vec![1, 0, 4, 3], + vec![2, 1, 0, 4], + vec![0, 2, 1, 0], + ]; + let problem = MaximumLikelihoodRanking::new(matrix); + let reduction = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // 4 items -> C(4,2) = 6 variables + assert_eq!(ilp.num_vars(), 6); + // C(4,3) = 4 triples -> 8 constraints + assert_eq!(ilp.num_constraints(), 8); + + assert_optimization_round_trip_from_optimization_target( + &problem, + &reduction, + "4-item MaximumLikelihoodRanking->ILP", + ); +} + +#[cfg(feature = "example-db")] +#[test] +fn test_maximumlikelihoodranking_to_ilp_canonical_example_spec() { + let spec = canonical_rule_example_specs() + .into_iter() + .find(|spec| spec.id == "maximum_likelihood_ranking_to_ilp") + .expect("missing canonical MaximumLikelihoodRanking -> ILP example spec"); + let example = (spec.build)(); + + assert_eq!(example.source.problem, "MaximumLikelihoodRanking"); + assert_eq!(example.target.problem, "ILP"); + assert_eq!(example.target.instance["num_vars"], 3); + assert!(!example.solutions.is_empty()); +} diff --git a/src/unit_tests/rules/minimumcapacitatedspanningtree_ilp.rs b/src/unit_tests/rules/minimumcapacitatedspanningtree_ilp.rs new file mode 100644 index 00000000..2b03ca50 --- /dev/null +++ b/src/unit_tests/rules/minimumcapacitatedspanningtree_ilp.rs @@ -0,0 +1,157 @@ +use super::*; +use crate::models::algebraic::{ObjectiveSense, ILP}; +use crate::models::graph::MinimumCapacitatedSpanningTree; +use crate::rules::ReduceTo; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::topology::SimpleGraph; +use crate::traits::Problem; +use crate::types::Min; + +/// Small instance: 4 vertices, 5 edges. +fn small_instance() -> MinimumCapacitatedSpanningTree { + MinimumCapacitatedSpanningTree::new( + SimpleGraph::new(4, vec![(0, 1), (0, 2), (1, 2), (1, 3), (2, 3)]), + vec![2, 3, 1, 1, 2], // edge weights + 0, // root + vec![0, 1, 1, 1], // requirements + 2, // capacity + ) +} + +/// Canonical instance from issue #901: 5 vertices, 8 edges. +fn canonical_instance() -> MinimumCapacitatedSpanningTree { + MinimumCapacitatedSpanningTree::new( + SimpleGraph::new( + 5, + vec![ + (0, 1), + (0, 2), + (0, 3), + (1, 2), + (1, 4), + (2, 3), + (2, 4), + (3, 4), + ], + ), + vec![2, 1, 4, 3, 1, 2, 3, 1], + 0, + vec![0, 1, 1, 1, 1], + 3, + ) +} + +#[test] +fn test_reduction_creates_expected_ilp_shape() { + let problem = small_instance(); + let reduction: ReductionMinimumCapacitatedSpanningTreeToILP = + ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // m=5: num_vars = 3*5 = 15 + assert_eq!(ilp.num_vars, 15); + assert_eq!(ilp.sense, ObjectiveSense::Minimize); +} + +#[test] +fn test_minimumcapacitatedspanningtree_to_ilp_closed_loop() { + let problem = small_instance(); + let reduction: ReductionMinimumCapacitatedSpanningTreeToILP = + ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let bf = BruteForce::new(); + let ilp_solver = ILPSolver::new(); + let best_source = bf.find_all_witnesses(&problem); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + let bf_value = problem.evaluate(&best_source[0]); + let ilp_value = problem.evaluate(&extracted); + assert_eq!(ilp_value, bf_value); + assert!(problem.is_valid_solution(&extracted)); +} + +#[test] +fn test_minimumcapacitatedspanningtree_to_ilp_canonical_closed_loop() { + let problem = canonical_instance(); + let reduction: ReductionMinimumCapacitatedSpanningTreeToILP = + ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let bf = BruteForce::new(); + let ilp_solver = ILPSolver::new(); + let best_source = bf.find_all_witnesses(&problem); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + assert_eq!(problem.evaluate(&best_source[0]), Min(Some(5))); + assert_eq!(problem.evaluate(&extracted), Min(Some(5))); + assert!(problem.is_valid_solution(&extracted)); +} + +#[test] +fn test_solution_extraction_reads_edge_selector_prefix() { + let problem = small_instance(); + let reduction: ReductionMinimumCapacitatedSpanningTreeToILP = + ReduceTo::>::reduce_to(&problem); + + // 15 variables total, first 5 are edge selectors + let mut target_solution = vec![0; 15]; + target_solution[0] = 1; // edge (0,1) + target_solution[1] = 1; // edge (0,2) + target_solution[3] = 1; // edge (1,3) + + assert_eq!( + reduction.extract_solution(&target_solution), + vec![1, 1, 0, 1, 0] + ); +} + +#[test] +fn test_minimumcapacitatedspanningtree_to_ilp_bf_vs_ilp() { + let problem = canonical_instance(); + let reduction: ReductionMinimumCapacitatedSpanningTreeToILP = + ReduceTo::>::reduce_to(&problem); + crate::rules::test_helpers::assert_bf_vs_ilp(&problem, &reduction); +} + +#[test] +fn test_minimumcapacitatedspanningtree_to_ilp_star_tree() { + // Star from root 0: all edges directly from root. + // With capacity >= max single requirement, star is always valid. + let problem = MinimumCapacitatedSpanningTree::new( + SimpleGraph::new(4, vec![(0, 1), (0, 2), (0, 3)]), + vec![1, 1, 1], + 0, + vec![0, 1, 1, 1], + 1, // capacity = 1 forces star tree + ); + let reduction: ReductionMinimumCapacitatedSpanningTreeToILP = + ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + let ilp_solution = ILPSolver::new().solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(problem.evaluate(&extracted), Min(Some(3))); + assert!(problem.is_valid_solution(&extracted)); +} + +#[test] +fn test_minimumcapacitatedspanningtree_to_ilp_path_graph() { + // Path 0-1-2-3, root=0, capacity=3, requirements=[0,1,1,1] + // Only spanning tree is the path: subtree(1)={1,2,3}->req=3<=3 OK + let problem = MinimumCapacitatedSpanningTree::new( + SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3)]), + vec![2, 3, 1], + 0, + vec![0, 1, 1, 1], + 3, + ); + let reduction: ReductionMinimumCapacitatedSpanningTreeToILP = + ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + let ilp_solution = ILPSolver::new().solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(problem.evaluate(&extracted), Min(Some(6))); + assert!(problem.is_valid_solution(&extracted)); +} diff --git a/src/unit_tests/rules/minimumedgecostflow_ilp.rs b/src/unit_tests/rules/minimumedgecostflow_ilp.rs new file mode 100644 index 00000000..08074831 --- /dev/null +++ b/src/unit_tests/rules/minimumedgecostflow_ilp.rs @@ -0,0 +1,140 @@ +use super::*; +use crate::models::algebraic::{ObjectiveSense, ILP}; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::topology::DirectedGraph; +use crate::traits::Problem; +use crate::types::Min; + +fn issue_instance() -> MinimumEdgeCostFlow { + MinimumEdgeCostFlow::new( + DirectedGraph::new(5, vec![(0, 1), (0, 2), (0, 3), (1, 4), (2, 4), (3, 4)]), + vec![3, 1, 2, 0, 0, 0], + vec![2, 2, 2, 2, 2, 2], + 0, + 4, + 3, + ) +} + +fn small_instance() -> MinimumEdgeCostFlow { + // 3-vertex: s=0, t=2, two parallel paths via v1 + // Arc 0: (0,1) cap=2, price=5 + // Arc 1: (1,2) cap=2, price=3 + // R=1 → cost = 5+3 = 8 + MinimumEdgeCostFlow::new( + DirectedGraph::new(3, vec![(0, 1), (1, 2)]), + vec![5, 3], + vec![2, 2], + 0, + 2, + 1, + ) +} + +fn infeasible_instance() -> MinimumEdgeCostFlow { + // Cannot route 2 units through capacity-1 arcs + MinimumEdgeCostFlow::new( + DirectedGraph::new(3, vec![(0, 1), (1, 2)]), + vec![1, 1], + vec![1, 1], + 0, + 2, + 2, + ) +} + +#[test] +fn test_minimumedgecostflow_to_ilp_structure() { + let problem = issue_instance(); + let reduction: ReductionMECFToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // 6 arcs → 2*6 = 12 variables + assert_eq!(ilp.num_vars, 12); + assert_eq!(ilp.sense, ObjectiveSense::Minimize); + + // Objective should have 6 terms (one per indicator variable) + assert_eq!(ilp.objective.len(), 6); + + // Constraints: 6 linking + 6 binary + (5-2)=3 conservation + 1 flow req = 16 + // That is 2*6 + 5 - 1 = 16 + assert_eq!(ilp.constraints.len(), 16); +} + +#[test] +fn test_minimumedgecostflow_to_ilp_closed_loop() { + let problem = issue_instance(); + let bf = BruteForce::new(); + let bf_witness = bf + .find_witness(&problem) + .expect("issue instance has optimal"); + let bf_value = problem.evaluate(&bf_witness); + assert_eq!(bf_value, Min(Some(3))); + + let reduction: ReductionMECFToILP = ReduceTo::>::reduce_to(&problem); + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be feasible"); + let extracted = reduction.extract_solution(&ilp_solution); + + let ilp_value = problem.evaluate(&extracted); + assert_eq!(ilp_value, bf_value); +} + +#[test] +fn test_minimumedgecostflow_to_ilp_small_closed_loop() { + let problem = small_instance(); + let bf = BruteForce::new(); + let bf_witness = bf + .find_witness(&problem) + .expect("small instance has optimal"); + let bf_value = problem.evaluate(&bf_witness); + assert_eq!(bf_value, Min(Some(8))); + + let reduction: ReductionMECFToILP = ReduceTo::>::reduce_to(&problem); + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be feasible"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(problem.evaluate(&extracted), bf_value); +} + +#[test] +fn test_minimumedgecostflow_to_ilp_infeasible() { + let problem = infeasible_instance(); + let reduction: ReductionMECFToILP = ReduceTo::>::reduce_to(&problem); + assert!( + ILPSolver::new().solve(reduction.target_problem()).is_none(), + "infeasible instance should produce infeasible ILP" + ); +} + +#[test] +fn test_minimumedgecostflow_to_ilp_bf_vs_ilp() { + let problem = issue_instance(); + let reduction: ReductionMECFToILP = ReduceTo::>::reduce_to(&problem); + crate::rules::test_helpers::assert_bf_vs_ilp(&problem, &reduction); +} + +#[test] +fn test_minimumedgecostflow_to_ilp_extract_solution() { + let problem = issue_instance(); + let reduction: ReductionMECFToILP = ReduceTo::>::reduce_to(&problem); + + // Manually construct a target solution: route 1 via v2, 2 via v3 + // f = [0, 1, 2, 0, 1, 2], y = [0, 1, 1, 0, 1, 1] + let mut target_solution = vec![0usize; 12]; + target_solution[1] = 1; // f on arc (0,2) + target_solution[2] = 2; // f on arc (0,3) + target_solution[4] = 1; // f on arc (2,4) + target_solution[5] = 2; // f on arc (3,4) + target_solution[7] = 1; // y on arc (0,2) + target_solution[8] = 1; // y on arc (0,3) + target_solution[10] = 1; // y on arc (2,4) + target_solution[11] = 1; // y on arc (3,4) + + let extracted = reduction.extract_solution(&target_solution); + assert_eq!(extracted.len(), 6); + assert_eq!(extracted, vec![0, 1, 2, 0, 1, 2]); + assert_eq!(problem.evaluate(&extracted), Min(Some(3))); +} diff --git a/src/unit_tests/rules/minimumgraphbandwidth_ilp.rs b/src/unit_tests/rules/minimumgraphbandwidth_ilp.rs new file mode 100644 index 00000000..577295a8 --- /dev/null +++ b/src/unit_tests/rules/minimumgraphbandwidth_ilp.rs @@ -0,0 +1,84 @@ +use super::*; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::topology::SimpleGraph; +use crate::traits::Problem; + +#[test] +fn test_reduction_creates_valid_ilp() { + // Star S4: 4 vertices, 3 edges + let problem = MinimumGraphBandwidth::new(SimpleGraph::new(4, vec![(0, 1), (0, 2), (0, 3)])); + let reduction: ReductionMGBToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + // num_x=16, pos_v=4, B=1, total=21 + assert_eq!(ilp.num_vars, 21); + assert_eq!(ilp.sense, ObjectiveSense::Minimize); +} + +#[test] +fn test_minimumgraphbandwidth_to_ilp_closed_loop() { + // Star S4 + let problem = MinimumGraphBandwidth::new(SimpleGraph::new(4, vec![(0, 1), (0, 2), (0, 3)])); + + // BruteForce on source to verify feasibility + let bf = BruteForce::new(); + let bf_solution = bf + .find_witness(&problem) + .expect("brute-force should find a solution"); + assert!(problem.evaluate(&bf_solution).0.is_some()); + + // Solve via ILP + let reduction: ReductionMGBToILP = ReduceTo::>::reduce_to(&problem); + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let ilp_value = problem.evaluate(&extracted); + assert!( + ilp_value.0.is_some(), + "ILP solution should produce a valid arrangement" + ); + + // BF and ILP should agree on optimal value + let bf_value = problem.evaluate(&bf_solution); + assert_eq!( + ilp_value, bf_value, + "ILP and BF should find same optimal bandwidth" + ); +} + +#[test] +fn test_minimumgraphbandwidth_to_ilp_path() { + // Path P4: 0-1-2-3 (optimal bandwidth = 1) + let problem = MinimumGraphBandwidth::new(SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3)])); + + let reduction: ReductionMGBToILP = ReduceTo::>::reduce_to(&problem); + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let value = problem.evaluate(&extracted); + assert_eq!( + value, + crate::types::Min(Some(1)), + "path P4 optimal bandwidth = 1" + ); +} + +#[test] +fn test_minimumgraphbandwidth_to_ilp_bf_vs_ilp() { + // Star S4 + let problem = MinimumGraphBandwidth::new(SimpleGraph::new(4, vec![(0, 1), (0, 2), (0, 3)])); + let reduction: ReductionMGBToILP = ReduceTo::>::reduce_to(&problem); + crate::rules::test_helpers::assert_bf_vs_ilp(&problem, &reduction); +} + +#[test] +fn test_minimumgraphbandwidth_to_ilp_cycle() { + // Cycle C4: 0-1-2-3-0 (optimal bandwidth = 2) + let problem = + MinimumGraphBandwidth::new(SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3), (3, 0)])); + let reduction: ReductionMGBToILP = ReduceTo::>::reduce_to(&problem); + crate::rules::test_helpers::assert_bf_vs_ilp(&problem, &reduction); +} diff --git a/src/unit_tests/rules/minimummatrixcover_ilp.rs b/src/unit_tests/rules/minimummatrixcover_ilp.rs new file mode 100644 index 00000000..420f2102 --- /dev/null +++ b/src/unit_tests/rules/minimummatrixcover_ilp.rs @@ -0,0 +1,159 @@ +use super::*; +use crate::models::algebraic::MinimumMatrixCover; +use crate::models::algebraic::{ObjectiveSense, ILP}; +use crate::rules::test_helpers::assert_optimization_round_trip_from_optimization_target; +use crate::solvers::{BruteForce, ILPSolver, Solver}; +use crate::traits::Problem; +use crate::types::Min; + +#[test] +fn test_minimum_matrix_cover_to_ilp_closed_loop() { + let problem = MinimumMatrixCover::new(vec![ + vec![0, 3, 1, 0], + vec![3, 0, 0, 2], + vec![1, 0, 0, 4], + vec![0, 2, 4, 0], + ]); + let reduction = ReduceTo::>::reduce_to(&problem); + + assert_optimization_round_trip_from_optimization_target( + &problem, + &reduction, + "MinimumMatrixCover->ILP closed loop", + ); + + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let value = problem.evaluate(&extracted); + assert_eq!(value, Min(Some(-20))); +} + +#[test] +fn test_minimum_matrix_cover_to_ilp_structure() { + let problem = MinimumMatrixCover::new(vec![vec![0, 3], vec![2, 0]]); + let reduction = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // n=2: 2 sign vars + 1 auxiliary = 3 vars + assert_eq!(ilp.num_vars, 3); + // 3 constraints per pair, 1 pair + assert_eq!(ilp.constraints.len(), 3); + assert_eq!(ilp.sense, ObjectiveSense::Minimize); + + // y_{01} coefficient: 4*(a_01 + a_10) = 4*(3+2) = 20 + // x_0 coefficient: -2*(a_01+a_10) = -2*(3+2) = -10 + // x_1 coefficient: -2*(a_10+a_01) = -2*(2+3) = -10 + let obj_map: std::collections::HashMap = ilp.objective.iter().copied().collect(); + assert_eq!(*obj_map.get(&0).unwrap_or(&0.0), -10.0); + assert_eq!(*obj_map.get(&1).unwrap_or(&0.0), -10.0); + assert_eq!(*obj_map.get(&2).unwrap_or(&0.0), 20.0); +} + +#[test] +fn test_minimum_matrix_cover_to_ilp_bf_vs_ilp() { + let problem = MinimumMatrixCover::new(vec![ + vec![0, 3, 1, 0], + vec![3, 0, 0, 2], + vec![1, 0, 0, 4], + vec![0, 2, 4, 0], + ]); + let reduction = ReduceTo::>::reduce_to(&problem); + + let bf_value = BruteForce::new().solve(&problem); + + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let ilp_value = problem.evaluate(&extracted); + + assert_eq!(bf_value, ilp_value); +} + +#[test] +fn test_minimum_matrix_cover_to_ilp_2x2() { + let problem = MinimumMatrixCover::new(vec![vec![0, 3], vec![2, 0]]); + let reduction = ReduceTo::>::reduce_to(&problem); + + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let value = problem.evaluate(&extracted); + // Optimal: different signs → value = -(3+2) = -5 + assert_eq!(value, Min(Some(-5))); +} + +#[test] +fn test_minimum_matrix_cover_to_ilp_1x1() { + let problem = MinimumMatrixCover::new(vec![vec![5]]); + let reduction = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // 1 variable, 0 pairs → 0 auxiliaries + assert_eq!(ilp.num_vars, 1); + assert_eq!(ilp.constraints.len(), 0); + + // For 1×1, f(1)²=1, value = 5 regardless. Objective should be constant (no x terms). + // x_0 coefficient: -2*Σ_{j≠0} (a_0j+a_j0) = 0 (no off-diagonal) + // The ILP finds any assignment; extracted solution gives value 5. + let ilp_solution = ILPSolver::new() + .solve(ilp) + .expect("1x1 ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(problem.evaluate(&extracted), Min(Some(5))); +} + +#[test] +fn test_minimum_matrix_cover_to_ilp_diagonal_matrix() { + // Diagonal matrix: all off-diagonal entries are 0 + // Value is always Σ a_ii (constant), since f(i)²=1 + let problem = MinimumMatrixCover::new(vec![vec![2, 0, 0], vec![0, 3, 0], vec![0, 0, 1]]); + let reduction = ReduceTo::>::reduce_to(&problem); + + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("diagonal ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + // All configs give value 2+3+1 = 6 + assert_eq!(problem.evaluate(&extracted), Min(Some(6))); +} + +#[test] +fn test_minimum_matrix_cover_to_ilp_asymmetric() { + // Non-symmetric matrix + let problem = MinimumMatrixCover::new(vec![vec![0, 5], vec![1, 0]]); + let reduction = ReduceTo::>::reduce_to(&problem); + + let bf_value = BruteForce::new().solve(&problem); + + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let ilp_value = problem.evaluate(&extracted); + + assert_eq!(bf_value, ilp_value); + // Different signs: -(5+1) = -6, same signs: +(5+1) = 6 + assert_eq!(ilp_value, Min(Some(-6))); +} + +#[cfg(feature = "example-db")] +#[test] +fn test_minimum_matrix_cover_to_ilp_canonical_example_spec() { + let spec = canonical_rule_example_specs() + .into_iter() + .find(|spec| spec.id == "minimum_matrix_cover_to_ilp") + .expect("missing canonical MinimumMatrixCover -> ILP example spec"); + let example = (spec.build)(); + + assert_eq!(example.source.problem, "MinimumMatrixCover"); + assert_eq!(example.target.problem, "ILP"); + assert_eq!( + example.source.instance["matrix"].as_array().unwrap().len(), + 2 + ); + assert_eq!(example.target.instance["num_vars"], 3); +} diff --git a/src/unit_tests/rules/minimummetricdimension_ilp.rs b/src/unit_tests/rules/minimummetricdimension_ilp.rs new file mode 100644 index 00000000..c19068ea --- /dev/null +++ b/src/unit_tests/rules/minimummetricdimension_ilp.rs @@ -0,0 +1,143 @@ +use super::*; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::traits::Problem; +use crate::types::Min; + +#[test] +fn test_minimummetricdimension_to_ilp_closed_loop() { + // House graph: metric dimension = 2 + let problem = MinimumMetricDimension::new(SimpleGraph::new( + 5, + vec![(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (3, 4)], + )); + let reduction: ReductionMDToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let bf = BruteForce::new(); + let ilp_solver = ILPSolver::new(); + + // Solve with brute force on original problem + let bf_solutions = bf.find_all_witnesses(&problem); + let bf_size = problem.evaluate(&bf_solutions[0]); + + // Solve via ILP reduction + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let ilp_size = problem.evaluate(&extracted); + + // Both should find optimal size = 2 + assert_eq!(bf_size, Min(Some(2))); + assert_eq!(ilp_size, Min(Some(2))); + + // Verify the ILP solution is valid for the original problem + assert!( + problem.evaluate(&extracted).is_valid(), + "Extracted solution should be valid" + ); +} + +#[test] +fn test_minimummetricdimension_to_ilp_structure() { + // Path graph P3: 3 vertices + let problem = MinimumMetricDimension::new(SimpleGraph::new(3, vec![(0, 1), (1, 2)])); + let reduction: ReductionMDToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // Check ILP structure + assert_eq!(ilp.num_vars, 3, "Should have one variable per vertex"); + // C(3,2) = 3 pairs + assert_eq!( + ilp.constraints.len(), + 3, + "Should have one constraint per vertex pair" + ); + assert_eq!(ilp.sense, ObjectiveSense::Minimize, "Should minimize"); + + // Each constraint should have rhs = 1 + for constraint in &ilp.constraints { + assert!(!constraint.terms.is_empty()); + assert!((constraint.rhs - 1.0).abs() < 1e-9); + } +} + +#[test] +fn test_minimummetricdimension_to_ilp_bf_vs_ilp() { + // House graph + let problem = MinimumMetricDimension::new(SimpleGraph::new( + 5, + vec![(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (3, 4)], + )); + let reduction: ReductionMDToILP = ReduceTo::>::reduce_to(&problem); + crate::rules::test_helpers::assert_bf_vs_ilp(&problem, &reduction); +} + +#[test] +fn test_minimummetricdimension_to_ilp_path_graph() { + // Path P4: 0-1-2-3, metric dimension = 1 (any endpoint resolves) + let problem = MinimumMetricDimension::new(SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3)])); + let reduction: ReductionMDToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + assert!(problem.evaluate(&extracted).is_valid()); + assert_eq!(problem.evaluate(&extracted), Min(Some(1))); +} + +#[test] +fn test_minimummetricdimension_to_ilp_complete_graph() { + // K4: metric dimension = 3 (n-1) + let problem = MinimumMetricDimension::new(SimpleGraph::new( + 4, + vec![(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)], + )); + let reduction: ReductionMDToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let bf = BruteForce::new(); + let ilp_solver = ILPSolver::new(); + + let bf_solutions = bf.find_all_witnesses(&problem); + let bf_size = problem.evaluate(&bf_solutions[0]); + + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let ilp_size = problem.evaluate(&extracted); + + assert_eq!(bf_size, Min(Some(3))); + assert_eq!(ilp_size, Min(Some(3))); +} + +#[test] +fn test_minimummetricdimension_to_ilp_solution_extraction() { + let problem = MinimumMetricDimension::new(SimpleGraph::new(3, vec![(0, 1), (1, 2)])); + let reduction: ReductionMDToILP = ReduceTo::>::reduce_to(&problem); + + // Test that extraction works correctly (1:1 mapping) + let ilp_solution = vec![1, 0, 0]; + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(extracted, vec![1, 0, 0]); + + // Verify this is a valid resolving set + assert!(problem.evaluate(&extracted).is_valid()); +} + +#[test] +fn test_minimummetricdimension_to_ilp_cycle() { + // C5: metric dimension = 2 + let problem = MinimumMetricDimension::new(SimpleGraph::new( + 5, + vec![(0, 1), (1, 2), (2, 3), (3, 4), (4, 0)], + )); + let reduction: ReductionMDToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + assert!(problem.evaluate(&extracted).is_valid()); + assert_eq!(problem.evaluate(&extracted), Min(Some(2))); +} diff --git a/src/unit_tests/rules/minimumweightdecoding_ilp.rs b/src/unit_tests/rules/minimumweightdecoding_ilp.rs new file mode 100644 index 00000000..5f589bf4 --- /dev/null +++ b/src/unit_tests/rules/minimumweightdecoding_ilp.rs @@ -0,0 +1,121 @@ +use super::*; +use crate::models::algebraic::{ObjectiveSense, ILP}; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::traits::Problem; +use crate::types::Min; + +fn issue_instance() -> MinimumWeightDecoding { + MinimumWeightDecoding::new( + vec![ + vec![true, false, true, true], + vec![false, true, true, false], + vec![true, true, false, true], + ], + vec![true, true, false], + ) +} + +fn small_instance() -> MinimumWeightDecoding { + // 2×3 matrix, s = [1, 0] + // H = [[1,1,0],[0,1,1]], s = [true, false] + // x=[1,0,0]: row0=1 mod2=1=s[0] ✓, row1=0 mod2=0=s[1] ✓ → weight 1 + MinimumWeightDecoding::new( + vec![vec![true, true, false], vec![false, true, true]], + vec![true, false], + ) +} + +fn infeasible_instance() -> MinimumWeightDecoding { + // H = [[1,1],[1,1]], s = [true, false] + // For any x, row0 and row1 have identical dot products → s[0] ≠ s[1] means infeasible + MinimumWeightDecoding::new(vec![vec![true, true], vec![true, true]], vec![true, false]) +} + +#[test] +fn test_minimumweightdecoding_to_ilp_structure() { + let problem = issue_instance(); + let reduction: ReductionMinimumWeightDecodingToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // 4 cols + 3 rows = 7 variables + assert_eq!(ilp.num_vars, 7); + assert_eq!(ilp.sense, ObjectiveSense::Minimize); + + // Objective: 4 terms (one per x_j) + assert_eq!(ilp.objective.len(), 4); + + // Constraints: 3 equality + 4 binary bounds = 7 + assert_eq!(ilp.constraints.len(), 7); +} + +#[test] +fn test_minimumweightdecoding_to_ilp_closed_loop() { + let problem = issue_instance(); + let bf = BruteForce::new(); + let bf_witness = bf + .find_witness(&problem) + .expect("issue instance has optimal"); + let bf_value = problem.evaluate(&bf_witness); + assert_eq!(bf_value, Min(Some(1))); + + let reduction: ReductionMinimumWeightDecodingToILP = ReduceTo::>::reduce_to(&problem); + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be feasible"); + let extracted = reduction.extract_solution(&ilp_solution); + + let ilp_value = problem.evaluate(&extracted); + assert_eq!(ilp_value, bf_value); +} + +#[test] +fn test_minimumweightdecoding_to_ilp_small_closed_loop() { + let problem = small_instance(); + let bf = BruteForce::new(); + let bf_witness = bf + .find_witness(&problem) + .expect("small instance has optimal"); + let bf_value = problem.evaluate(&bf_witness); + assert_eq!(bf_value, Min(Some(1))); + + let reduction: ReductionMinimumWeightDecodingToILP = ReduceTo::>::reduce_to(&problem); + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be feasible"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(problem.evaluate(&extracted), bf_value); +} + +#[test] +fn test_minimumweightdecoding_to_ilp_infeasible() { + let problem = infeasible_instance(); + let reduction: ReductionMinimumWeightDecodingToILP = ReduceTo::>::reduce_to(&problem); + assert!( + ILPSolver::new().solve(reduction.target_problem()).is_none(), + "infeasible instance should produce infeasible ILP" + ); +} + +#[test] +fn test_minimumweightdecoding_to_ilp_bf_vs_ilp() { + let problem = issue_instance(); + let reduction: ReductionMinimumWeightDecodingToILP = ReduceTo::>::reduce_to(&problem); + crate::rules::test_helpers::assert_bf_vs_ilp(&problem, &reduction); +} + +#[test] +fn test_minimumweightdecoding_to_ilp_extract_solution() { + let problem = issue_instance(); + let reduction: ReductionMinimumWeightDecodingToILP = ReduceTo::>::reduce_to(&problem); + + // Manually construct a valid target solution: x=[0,0,1,0], k=[0,0,0] + // (k_i values are the integer slack from mod-2) + // Row 0: H[0][2]=1 → sum=1, s=1 → 1-1=0 → k_0=0 ✓ + // Row 1: H[1][2]=1 → sum=1, s=1 → 1-1=0 → k_1=0 ✓ + // Row 2: H[2][2]=0 → sum=0, s=0 → 0-0=0 → k_2=0 ✓ + let target_solution = vec![0, 0, 1, 0, 0, 0, 0]; + let extracted = reduction.extract_solution(&target_solution); + assert_eq!(extracted.len(), 4); + assert_eq!(extracted, vec![0, 0, 1, 0]); + assert_eq!(problem.evaluate(&extracted), Min(Some(1))); +} diff --git a/src/unit_tests/rules/numericalmatchingwithtargetsums_ilp.rs b/src/unit_tests/rules/numericalmatchingwithtargetsums_ilp.rs new file mode 100644 index 00000000..b6a1dd3e --- /dev/null +++ b/src/unit_tests/rules/numericalmatchingwithtargetsums_ilp.rs @@ -0,0 +1,118 @@ +use super::*; +use crate::models::algebraic::{Comparison, ObjectiveSense, ILP}; +use crate::rules::test_helpers::assert_satisfaction_round_trip_from_optimization_target; +use crate::solvers::ILPSolver; +use crate::traits::Problem; +use crate::types::Or; + +#[test] +fn test_numericalmatchingwithtargetsums_to_ilp_closed_loop() { + let problem = + NumericalMatchingWithTargetSums::new(vec![1, 4, 7], vec![2, 5, 3], vec![3, 7, 12]); + let reduction = ReduceTo::>::reduce_to(&problem); + + assert_satisfaction_round_trip_from_optimization_target( + &problem, + &reduction, + "NMTS->ILP closed loop", + ); + + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(problem.evaluate(&extracted), Or(true)); +} + +#[test] +fn test_numericalmatchingwithtargetsums_to_ilp_bf_vs_ilp() { + let problem = + NumericalMatchingWithTargetSums::new(vec![1, 4, 7], vec![2, 5, 3], vec![3, 7, 12]); + let reduction = ReduceTo::>::reduce_to(&problem); + crate::rules::test_helpers::assert_bf_vs_ilp(&problem, &reduction); +} + +#[test] +fn test_numericalmatchingwithtargetsums_to_ilp_structure() { + let problem = + NumericalMatchingWithTargetSums::new(vec![1, 4, 7], vec![2, 5, 3], vec![3, 7, 12]); + let reduction = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // Only compatible triples are created as variables + // Check that we have 3m = 9 constraints (3 for x, 3 for y, 3 for targets) + assert_eq!(ilp.num_constraints(), 9); + assert_eq!(ilp.sense, ObjectiveSense::Minimize); + // Feasibility: empty objective + assert!(ilp.objective.is_empty()); + + // All constraints should be equality constraints + for c in &ilp.constraints { + assert_eq!(c.cmp, Comparison::Eq); + assert!((c.rhs - 1.0).abs() < 1e-9); + } +} + +#[test] +fn test_numericalmatchingwithtargetsums_to_ilp_unsatisfiable() { + // m=2, no valid matching: sums {1+3,2+4}={4,6} or {1+4,2+3}={5,5}, neither = {10,20} + let problem = NumericalMatchingWithTargetSums::new(vec![1, 2], vec![3, 4], vec![10, 20]); + let reduction = ReduceTo::>::reduce_to(&problem); + let result = ILPSolver::new().solve(reduction.target_problem()); + assert!( + result.is_none(), + "Unsatisfiable instance should have no ILP solution" + ); +} + +#[test] +fn test_numericalmatchingwithtargetsums_to_ilp_single_pair() { + let problem = NumericalMatchingWithTargetSums::new(vec![5], vec![3], vec![8]); + let reduction = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // 1 compatible triple: (0,0,0) since 5+3=8 + assert_eq!(ilp.num_vars(), 1); + assert_eq!(ilp.num_constraints(), 3); // 3*1 + + let ilp_solution = ILPSolver::new() + .solve(ilp) + .expect("single-pair ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(extracted, vec![0]); + assert_eq!(problem.evaluate(&extracted), Or(true)); +} + +#[test] +fn test_numericalmatchingwithtargetsums_to_ilp_compatible_triples_only() { + // Verify that only compatible triples generate variables + // m=2, sizes_x=[1,2], sizes_y=[3,4], targets=[4,6] + // Compatible: (0,0,0): 1+3=4, (1,1,1): 2+4=6 + // Also (0,1,1): 1+4=5≠6, (1,0,0): 2+3=5≠4 — NOT compatible + // Wait: (0,1,0): 1+4=5≠4, (1,0,1): 2+3=5≠6 — also not + // So only 2 variables + let problem = NumericalMatchingWithTargetSums::new(vec![1, 2], vec![3, 4], vec![4, 6]); + let reduction = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + assert_eq!(ilp.num_vars(), 2); + + let ilp_solution = ILPSolver::new().solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(extracted, vec![0, 1]); + assert_eq!(problem.evaluate(&extracted), Or(true)); +} + +#[cfg(feature = "example-db")] +#[test] +fn test_numericalmatchingwithtargetsums_to_ilp_canonical_example_spec() { + let spec = canonical_rule_example_specs() + .into_iter() + .find(|spec| spec.id == "numericalmatchingwithtargetsums_to_ilp") + .expect("missing canonical NMTS -> ILP example spec"); + let example = (spec.build)(); + + assert_eq!(example.source.problem, "NumericalMatchingWithTargetSums"); + assert_eq!(example.target.problem, "ILP"); + assert!(!example.solutions.is_empty()); +} diff --git a/src/unit_tests/rules/optimumcommunicationspanningtree_ilp.rs b/src/unit_tests/rules/optimumcommunicationspanningtree_ilp.rs new file mode 100644 index 00000000..b0faca24 --- /dev/null +++ b/src/unit_tests/rules/optimumcommunicationspanningtree_ilp.rs @@ -0,0 +1,140 @@ +use super::*; +use crate::models::algebraic::{ObjectiveSense, ILP}; +use crate::rules::test_helpers::assert_optimization_round_trip_from_optimization_target; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::traits::Problem; +use crate::types::Min; + +fn k3_problem() -> OptimumCommunicationSpanningTree { + let edge_weights = vec![vec![0, 1, 2], vec![1, 0, 3], vec![2, 3, 0]]; + let requirements = vec![vec![0, 1, 1], vec![1, 0, 1], vec![1, 1, 0]]; + OptimumCommunicationSpanningTree::new(edge_weights, requirements) +} + +fn k4_problem() -> OptimumCommunicationSpanningTree { + let edge_weights = vec![ + vec![0, 1, 3, 2], + vec![1, 0, 2, 4], + vec![3, 2, 0, 1], + vec![2, 4, 1, 0], + ]; + let requirements = vec![ + vec![0, 2, 1, 3], + vec![2, 0, 1, 1], + vec![1, 1, 0, 2], + vec![3, 1, 2, 0], + ]; + OptimumCommunicationSpanningTree::new(edge_weights, requirements) +} + +#[test] +fn test_ocst_to_ilp_closed_loop_k3() { + let problem = k3_problem(); + let reduction = ReduceTo::>::reduce_to(&problem); + + assert_optimization_round_trip_from_optimization_target( + &problem, + &reduction, + "OptimumCommunicationSpanningTree->ILP K3 closed loop", + ); +} + +#[test] +fn test_ocst_to_ilp_structure_k4() { + let problem = k4_problem(); + let reduction = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // K4: n=4, m=6, 6 commodities (all pairs have r>0) + // num_vars = 6 + 2*6*6 = 78 + assert_eq!(ilp.num_vars(), 78); + assert_eq!(ilp.sense, ObjectiveSense::Minimize); + + // Constraints: 1 (tree size) + 4*6 (flow conservation) + 2*6*6 (capacity) = 1+24+72 = 97 + assert_eq!(ilp.num_constraints(), 97); +} + +#[test] +fn test_ocst_to_ilp_structure_k3() { + let problem = k3_problem(); + let reduction = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // K3: n=3, m=3, 3 commodities (all pairs have r>0) + // num_vars = 3 + 2*3*3 = 21 + assert_eq!(ilp.num_vars(), 21); + assert_eq!(ilp.sense, ObjectiveSense::Minimize); + + // Constraints: 1 (tree size) + 3*3 (flow conservation) + 2*3*3 (capacity) = 1+9+18 = 28 + assert_eq!(ilp.num_constraints(), 28); +} + +#[test] +fn test_ocst_to_ilp_bf_vs_ilp_k3() { + let problem = k3_problem(); + let reduction = ReduceTo::>::reduce_to(&problem); + + let bf_solutions = BruteForce::new().find_all_witnesses(&problem); + let bf_value = problem.evaluate(&bf_solutions[0]); + + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let ilp_value = problem.evaluate(&extracted); + + assert_eq!(bf_value, ilp_value); + assert!(ilp_value.is_valid()); + assert_eq!(ilp_value, Min(Some(6))); +} + +#[test] +fn test_ocst_to_ilp_bf_vs_ilp_k4() { + let problem = k4_problem(); + let reduction = ReduceTo::>::reduce_to(&problem); + + let bf_solutions = BruteForce::new().find_all_witnesses(&problem); + let bf_value = problem.evaluate(&bf_solutions[0]); + + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let ilp_value = problem.evaluate(&extracted); + + assert_eq!(bf_value, ilp_value); + assert!(ilp_value.is_valid()); + assert_eq!(ilp_value, Min(Some(20))); +} + +#[test] +fn test_ocst_to_ilp_extraction() { + let problem = k3_problem(); + let reduction = ReduceTo::>::reduce_to(&problem); + + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + // Should be a valid config with m=3 entries + assert_eq!(extracted.len(), 3); + // Should form a valid spanning tree with value 6 + let value = problem.evaluate(&extracted); + assert!(value.is_valid()); + assert_eq!(value, Min(Some(6))); +} + +#[cfg(feature = "example-db")] +#[test] +fn test_ocst_to_ilp_canonical_example_spec() { + let spec = canonical_rule_example_specs() + .into_iter() + .find(|spec| spec.id == "optimum_communication_spanning_tree_to_ilp") + .expect("missing canonical OCST -> ILP example spec"); + let example = (spec.build)(); + + assert_eq!(example.source.problem, "OptimumCommunicationSpanningTree"); + assert_eq!(example.target.problem, "ILP"); + assert!(!example.solutions.is_empty()); +}