diff --git a/.claude/CLAUDE.md b/.claude/CLAUDE.md index 126c83f63..405f6c260 100644 --- a/.claude/CLAUDE.md +++ b/.claude/CLAUDE.md @@ -3,6 +3,9 @@ ## Project Overview Rust library for NP-hard problem reductions. Implements computational problems with reduction rules for transforming between equivalent formulations. +## Agent Preferences +- For tasks with complex logic (implementing reductions, fixing CI, issue-to-pr, check-issue, etc.), prefer invoking the `codex` CLI (if installed) with the latest model: `codex exec -c model="gpt-5.4" ""`. + ## Skills These repo-local skills live under `.claude/skills/*/SKILL.md`. diff --git a/.claude/skills/add-rule/SKILL.md b/.claude/skills/add-rule/SKILL.md index 185e9184d..13da004ef 100644 --- a/.claude/skills/add-rule/SKILL.md +++ b/.claude/skills/add-rule/SKILL.md @@ -121,7 +121,9 @@ Link via `#[cfg(test)] #[path = "..."] mod tests;` at the bottom of the rule fil Add a builder function in `src/example_db/rule_builders.rs` that constructs a small, canonical instance for this reduction. Follow the existing patterns in that file. Register the builder in `build_rule_examples()`. -## Step 5: Document in paper +## Step 5: Document in paper (MANDATORY — DO NOT SKIP) + +**This step is NOT optional.** Every reduction rule MUST have a corresponding `reduction-rule` entry in the paper. Skipping documentation is a blocking error — the PR will be rejected in review. Do not proceed to Step 6 until the paper entry is written and `make paper` compiles. Write a `reduction-rule` entry in `docs/paper/reductions.typ`. **Reference example:** search for `reduction-rule("KColoring", "QUBO"` to see the gold-standard entry — use it as a template. For a minimal example, see MinimumVertexCover -> MaximumIndependentSet. @@ -224,5 +226,6 @@ Aggregate-only reductions currently have a narrower CLI surface: | Missing `extract_solution` mapping state | Store any index maps needed in the ReductionResult struct | | Not adding canonical example to `example_db` | Add builder in `src/example_db/rule_builders.rs` | | Not regenerating reduction graph | Run `cargo run --example export_graph` after adding a rule | +| Skipping Step 5 (paper documentation) | **Every rule MUST have a `reduction-rule` entry in the paper. This is mandatory, not optional. PRs without documentation will be rejected.** | | Source/target model not fully registered | Both problems must already have `declare_variants!`, aliases as needed, and CLI create support -- use `add-model` skill first | | Treating a direct-to-ILP rule as a toy stub | Direct ILP reductions need exact overhead metadata and strong semantic regression tests, just like other production ILP rules | diff --git a/docs/paper/reductions.typ b/docs/paper/reductions.typ index e540f7507..d6244171a 100644 --- a/docs/paper/reductions.typ +++ b/docs/paper/reductions.typ @@ -3718,11 +3718,11 @@ A classical NP-complete problem from Garey and Johnson @garey1979[Ch.~3, p.~76], #{ let x = load-model-example("QuadraticCongruences") - let a = x.instance.a - let b = x.instance.b - let c = x.instance.c + let a = int(x.instance.a) + let b = int(x.instance.b) + let c = int(x.instance.c) let config = x.optimal_config - let xval = config.at(0) + 1 + let xval = range(config.len()).map(i => config.at(i) * calc.pow(2, i)).sum() // Collect all x in {1..c-1} and check x² mod b == a let rows = range(1, c).map(xi => { let sq = xi * xi @@ -9175,6 +9175,53 @@ Each reduction is presented as a *Rule* (with linked problem names and overhead _Solution extraction._ For IS solution $S$, return $C = V backslash S$, i.e.\ flip each variable: $c_v = 1 - s_v$. ] +#let mvc_lcs = load-example("MinimumVertexCover", "LongestCommonSubsequence") +#let mvc_lcs_sol = mvc_lcs.solutions.at(0) +#reduction-rule("MinimumVertexCover", "LongestCommonSubsequence", + example: true, + example-caption: [Path graph $P_4$: VC $arrow.r$ LCS via vertex symbols], + extra: [ + #{ + let source-cover = mvc_lcs_sol.source_config.enumerate().filter(((i, x)) => x == 1).map(((i, x)) => i) + let target-config = mvc_lcs_sol.target_config + let witness = target-config.filter(x => x < mvc_lcs.target.instance.alphabet_size) + let num-strings = mvc_lcs.target.instance.strings.len() + let total-length = mvc_lcs.target.instance.strings.map(s => s.len()).fold(0, (acc, n) => acc + n) + let fmt-seq(xs) = "(" + xs.map(str).join(", ") + ")" + [ + #pred-commands( + "pred create --example " + problem-spec(mvc_lcs.source) + " -o mvc.json", + "pred reduce mvc.json --to " + target-spec(mvc_lcs) + " -o bundle.json", + "pred solve bundle.json", + "pred evaluate mvc.json --config " + mvc_lcs_sol.source_config.map(str).join(","), + ) + + *Step 1 -- Source instance.* Path graph $P_4$ with $n = #graph-num-vertices(mvc_lcs.source.instance)$ vertices and $|E| = #graph-num-edges(mvc_lcs.source.instance)$ edges. The canonical minimum vertex cover is $C = {#source-cover.map(str).join(", ")}$. + + *Step 2 -- Construct the LCS instance.* Alphabet $Sigma = {0, dots, #((mvc_lcs.target.instance.alphabet_size) - 1)}$ and $#num-strings$ strings: $S_0 = #fmt-seq(mvc_lcs.target.instance.strings.at(0))$, $S_1 = #fmt-seq(mvc_lcs.target.instance.strings.at(1))$, $S_2 = #fmt-seq(mvc_lcs.target.instance.strings.at(2))$, $S_3 = #fmt-seq(mvc_lcs.target.instance.strings.at(3))$. The target has `max_length` $#mvc_lcs.target.instance.max_length$ and total input length $#total-length$. + + *Step 3 -- Verify the witness.* The stored target config is #fmt-seq(target-config), so the non-padding common subsequence is $w = #fmt-seq(witness)$ and the corresponding independent set is ${#witness.map(str).join(", ")}$. Taking the complement gives $V backslash w = {#source-cover.map(str).join(", ")} = C$ #sym.checkmark. + + *Multiplicity:* The fixture stores one canonical witness. + ] + } + ], +)[ + This $O(|V| |E|)$ reduction follows #cite(, form: "prose") and the Garey--Johnson entry SR10 @garey1979. Each source vertex becomes one alphabet symbol, one template string lists the vertices in sorted order, and each source edge contributes a two-block constraint string. The longest common subsequence length equals the maximum independent-set size, so a size-$K$ vertex cover exists exactly when the target LCS has length at least $|V| - K$. +][ + _Construction._ Given a unit-weight Minimum Vertex Cover instance $(G = (V, E), K)$ with $V = {0, 1, dots, n-1}$, build a Longest Common Subsequence instance as follows. Let $Sigma = V$. Create the template string $S_0 = (0, 1, dots, n-1)$. For each edge $\{u, v\} in E$, rename the endpoints so that $u < v$, then add the edge string + $ + S_(\{u,v\}) = (0, dots, hat(u), dots, n-1) thick || thick (0, dots, hat(v), dots, n-1), + $ + where $hat(u)$ means that symbol $u$ is omitted. The target string family is $R = {S_0} union {S_e : e in E}$. Its alphabet size is $n$, it has $|E| + 1$ strings, `max_length` $= n$, and total input length $n + 2 |E| (n - 1)$. Set the target threshold to $K' = n - K$. + + _Correctness._ ($arrow.r.double$) If $C subset.eq V$ is a vertex cover of size $K$, then $I = V backslash C$ is an independent set of size $n - K$. List the vertices of $I$ in increasing order. This sequence is a subsequence of $S_0$ immediately. For an edge string $S_(\{u,v\})$, at most one endpoint lies in $I$. If neither endpoint lies in $I$, every symbol of $I$ appears in both halves, so the subsequence is immediate. If only the larger endpoint $v$ lies in $I$, then every symbol of $I$ still appears in the first half $(V backslash {u})$, so the same ordered list is a subsequence there. If only the smaller endpoint $u$ lies in $I$, then all symbols of $I$ smaller than $u$ appear in the first half, while $u$ and every larger symbol appear in the second half $(V backslash {v})$; concatenating the two halves therefore still contains the sorted list of $I$ as a subsequence. Hence every edge string contains that sequence, so the target LCS has length at least $|I| = n - K = K'$. + + ($arrow.l.double$) Let $w$ be a common subsequence of the target strings with $|w| >= K'$. Because $w$ is a subsequence of $S_0 = (0, 1, dots, n-1)$, its symbols are distinct and already appear in increasing order. Consider any edge $\{u, v\}$ with $u < v$. In $S_(\{u,v\})$, the symbol $v$ appears only in the first half and the symbol $u$ appears only in the second half, so any embedding of both symbols would force $v$ to be matched before $u$. That contradicts the increasing order forced by $S_0$. Therefore $w$ contains at most one endpoint of every edge, so its symbols form an independent set $I$ of size at least $K' = n - K$. The complement $V backslash I$ is then a vertex cover of size at most $K$. + + _Solution extraction._ Given an LCS witness $w$, mark every symbol that appears before the padding symbol as "outside the cover" and return its complement: $c_v = 1$ iff $v in.not w$, and $c_v = 0$ iff $v in w$. +] + #let mvc_fvs = load-example("MinimumVertexCover", "MinimumFeedbackVertexSet") #let mvc_fvs_sol = mvc_fvs.solutions.at(0) #let mvc_fvs_cover = mvc_fvs_sol.source_config.enumerate().filter(((i, x)) => x == 1).map(((i, x)) => i) @@ -9500,6 +9547,55 @@ where $P$ is a penalty weight large enough that any constraint violation costs m _Solution extraction._ Discard auxiliary variables: return $bold(x)[0..n]$. ] +#let ksat_qc = load-example("KSatisfiability", "QuadraticCongruences") +#let ksat_qc_sol = ksat_qc.solutions.at(0) +#reduction-rule("KSatisfiability", "QuadraticCongruences", + example: true, + example-caption: [3-SAT with $n = #ksat_qc.source.instance.num_vars$ variables and $m = #sat-num-clauses(ksat_qc.source.instance)$ clause mapped to a quadratic congruence with a $#ksat_qc.target.instance.b.len()$-digit modulus], + extra: [ + #pred-commands( + "pred create --example " + problem-spec(ksat_qc.source) + " -o ksat.json", + "pred reduce ksat.json --to " + target-spec(ksat_qc) + " -o bundle.json", + "pred evaluate ksat.json --config " + ksat_qc_sol.source_config.map(str).join(","), + ) + + #{ + let a = ksat_qc.target.instance.a + let b = ksat_qc.target.instance.b + let c = ksat_qc.target.instance.c + let witness-bits = ksat_qc_sol.target_config.len() + [ + *Step 1 -- Source instance.* The canonical formula is the single clause $(x_1 or x_2 or x_3)$, witnessed by the satisfying assignment $(#ksat_qc_sol.source_config.map(str).join(", "))$. + + *Step 2 -- Enumerate standard clauses.* With $l = 3$ active variables, the construction lists all $M = 8$ signed 3-clauses on ${x_1, x_2, x_3}$. This yields $N = 2M + l = 19$ lifted coefficients in the doubled knapsack encoding. + + *Step 3 -- Lift by CRT.* Using $N+1 = 20$ odd primes starting at 13, the reduction builds the CRT gadgets $theta_0, dots, theta_N$ and outputs $(a, b, c)$. In the canonical fixture these numbers have $#a.len()$, $#b.len()$, and $#c.len()$ decimal digits respectively, so the paper reports their sizes rather than expanding them inline. + + *Step 4 -- Verify the stored witness.* The example DB keeps the target witness in binary using $#witness-bits$ bits. Evaluating that witness satisfies $x^2 equiv a mod b$ with $1 <= x < c$, and extraction recovers the original source assignment $(#ksat_qc_sol.source_config.map(str).join(", "))$ #sym.checkmark. + ] + } + + *Multiplicity:* The fixture stores one canonical satisfying witness. + ], +)[ + Manders and Adleman's number-theoretic reduction encodes a 3-SAT assignment as a pattern of signs $alpha_j in {-1, +1}$ in a bounded knapsack-style congruence, then uses carefully chosen prime powers and the Chinese Remainder Theorem to realize those signs as divisibility conditions on $H - x$ and $H + x$. Squaring removes the sign ambiguity and yields one quadratic congruence $x^2 equiv a mod b$ together with an explicit bound $x < c$. The bound is essential: without it, the composite-modulus quadratic residuosity problem becomes much easier once the factorization of $b$ is known. +][ + _Construction._ Given a 3-CNF formula $phi$ with $n$ variables, first deduplicate clauses and restrict to the active variables. Enumerate all signed 3-clauses over those variables as $sigma_1, dots, sigma_M$. Define the base-8 clause weight $8^j$ for $sigma_j$, form $tau_phi = -sum_(sigma_j in phi) 8^j$, and for each variable compute the positive and negative occurrence sums $f_i^+$ and $f_i^-$. In doubled form, set $N = 2M + l$ and coefficients + $ d_0 = 2, quad d_(2k-1) = -8^k, quad d_(2k) = -2 dot 8^k, quad d_(2M+i) = f_i^+ - f_i^- $ + together with + $ tau_2 = 2 tau_phi + sum_(j=0)^N d_j + 2 sum_(i=1)^l f_i^- $ + modulo $2 dot 8^(M+1)$. + + Choose distinct odd primes $p_0, dots, p_N >= 13$ and let $K = product_(j=0)^N p_j^(N+1)$. For each $j$, construct $theta_j$ so that + $ theta_j equiv d_j mod 2 dot 8^(M+1), quad theta_j equiv 0 mod product_(i != j) p_i^(N+1) $ + and $p_j$ does not divide $theta_j$. Set $H = sum_j theta_j$, $b = 2 dot 8^(M+1) dot K$, and + $ a = (2 dot 8^(M+1) + K)^(-1) (K tau_2^2 + 2 dot 8^(M+1) H^2) mod b, quad c = H + 1. $ + + _Correctness._ ($arrow.r.double$) A satisfying assignment determines signs $alpha_j in {-1, +1}$ for the lifted knapsack system so that $x = sum_j alpha_j theta_j$ obeys both $x equiv tau_2 mod 2 dot 8^(M+1)$ and $(H+x)(H-x) equiv 0 mod K$. These together imply $x^2 equiv a mod b$ with $0 <= x <= H < c$. ($arrow.l.double$) Any witness $x < c$ with $x^2 equiv a mod b$ yields, for each $j$, a unique sign from whether $p_j^(N+1)$ divides $H-x$ or $H+x$. Those signs recover an exact knapsack solution and hence a satisfying assignment of the original 3-SAT instance. + + _Solution extraction._ Recover each sign $alpha_j$ from the divisibility of $H - x$ and $H + x$ by $p_j^(N+1)$. For variable coordinates $j = 2M+i$, interpret $alpha_j = -1$ as $x_i = 1$ and $alpha_j = +1$ as $x_i = 0$. +] + #let ksat_ss = load-example("KSatisfiability", "SubsetSum") #let ksat_ss_sol = ksat_ss.solutions.at(0) #reduction-rule("KSatisfiability", "SubsetSum", @@ -10030,6 +10126,54 @@ where $P$ is a penalty weight large enough that any constraint violation costs m _Solution extraction._ Set $x_i = 1$ if $"pos"_i$ selected; $x_i = 0$ if $"neg"_i$ selected. ] +#let sat_ifha = load-example("Satisfiability", "IntegralFlowHomologousArcs") +#let sat_ifha_sol = sat_ifha.solutions.at(0) +#reduction-rule("Satisfiability", "IntegralFlowHomologousArcs", + example: true, + example-caption: [3-variable 4-clause SAT to equality-constrained integral flow], + extra: [ + #pred-commands( + "pred create --example " + problem-spec(sat_ifha.source) + " -o sat.json", + "pred reduce sat.json --to " + target-spec(sat_ifha) + " -o bundle.json", + "pred solve bundle.json", + "pred evaluate sat.json --config " + sat_ifha_sol.source_config.map(str).join(","), + ) + SAT assignment: $(x_1, x_2, x_3) = (#sat_ifha_sol.source_config.map(str).join(", "))$ \ + Target network: $#sat_ifha.target.instance.graph.num_vertices$ vertices, $#sat_ifha.target.instance.graph.arcs.len()$ arcs, #sat_ifha.target.instance.homologous_pairs.len() homologous pairs, and $R = #sat_ifha.target.instance.requirement$ \ + The stored flow witness gives bottleneck loads $1, 1, 1, 0$ across the four clause stages, so every stage respects its unit capacity #sym.checkmark + + *Step 1 -- Choose variable channels.* The fixture assignment sends one unit on the $T$ channel of $x_1$, one unit on the $F$ channel of $x_2$, and one unit on the $T$ channel of $x_3$. Because $R = 3$, any feasible witness must route exactly one unit out of each split node. + + *Step 2 -- Process the clauses.* Clause $C_1 = (x_1 or x_2)$ sends the false channels of $x_1$ and $x_2$ through the first bottleneck, so only the chosen $F_2$ path contributes. Clause $C_2 = (not x_1 or x_3)$ sends $T_1$ and $F_3$ through stage 2, giving load $1$. Clause $C_3 = (not x_2 or not x_3)$ sends $T_2$ and $T_3$ through stage 3, again giving load $1$. Clause $C_4 = (x_1 or x_3)$ sends $F_1$ and $F_3$ through stage 4, so the chosen assignment contributes load $0$. + + *Step 3 -- Verify a witness.* The sink receives all three units, and each homologous pair forces the flow entering a collector from variable $x_i$ to leave the matching distributor arc for the same variable/channel. The unsatisfying assignment $(1, 1, 1)$ is rejected because stage 3 would send both $T_2$ and $T_3$ through a unit-capacity bottleneck. + + *Multiplicity:* The fixture stores one canonical satisfying assignment / flow pair. Other satisfying assignments induce different T/F channel choices, but the clause-stage bottleneck test is always determined by which literals of each clause are false. + ], +)[ + This $O(n m + L)$ reduction @sahni1974 @garey1979 constructs a directed network with one unit-capacity variable path per Boolean variable and one capacity-$(|C_j| - 1)$ bottleneck per clause. A feasible integral flow of value $n$ exists if and only if the SAT formula is satisfiable. For $n$ variables, $m$ clauses, and total literal count $L = sum_j |C_j|$, the target has $2 n m + 3 n + 2 m + 2$ vertices, $2 n m + 5 n + m + L$ arcs, and $L$ homologous pairs. +][ + _Construction._ Let $phi = and.big_(j=1)^m C_j$ with variables $x_1, dots, x_n$ and clause widths $k_j = |C_j|$. Introduce source $s$, sink $t$, split vertices $"split"_i$ for $i in {1, dots, n}$, true/false pipeline vertices $T_(j,i)$ and $F_(j,i)$ for every boundary $j in {0, dots, m}$, and a collector/distributor pair $(gamma_j, delta_j)$ for each clause stage $j in {1, dots, m}$. + + _Variable stage._ For each variable $x_i$, add $(s, "split"_i)$, $( "split"_i, T_(0,i))$, and $( "split"_i, F_(0,i))$, all with capacity $1$. Any feasible flow of value $n$ must therefore choose exactly one of the two channels for each variable. + + _Clause stage._ For clause $C_j$, add a bottleneck arc $(gamma_j, delta_j)$ of capacity $k_j - 1$. For each variable $x_i$: + - if $x_i in C_j$, route the false channel through the bottleneck via $(F_(j-1,i), gamma_j)$ and $(delta_j, F_(j,i))$, while the true channel bypasses directly from $T_(j-1,i)$ to $T_(j,i)$; + - if $overline(x_i) in C_j$, route the true channel through the bottleneck via $(T_(j-1,i), gamma_j)$ and $(delta_j, T_(j,i))$, while the false channel bypasses directly from $F_(j-1,i)$ to $F_(j,i)$; + - otherwise both channels bypass directly from boundary $j - 1$ to boundary $j$. + Finally add $(T_(m,i), t)$ and $(F_(m,i), t)$ of capacity $1$ for every variable, and set the requirement to $R = n$. + + For every literal occurrence in $C_j$, declare the corresponding collector-entry and distributor-exit arcs homologous: pair $(F_(j-1,i), gamma_j)$ with $(delta_j, F_(j,i))$ when $x_i in C_j$, and pair $(T_(j-1,i), gamma_j)$ with $(delta_j, T_(j,i))$ when $overline(x_i) in C_j$. + + _Variable mapping._ Choosing the T-channel for $x_i$ represents $x_i = 1$; choosing the F-channel represents $x_i = 0$. A literal of $not C_j$ is true exactly when its designated channel is forced through the stage-$j$ bottleneck. + + _Correctness._ ($arrow.r.double$) Let $sigma$ satisfy $phi$. Route one unit from $s$ through $"split"_i$ along the T-channel when $sigma(x_i) = 1$ and along the F-channel otherwise. In clause stage $j$, exactly those literals of $not C_j$ that are true under $sigma$ attempt to use $(gamma_j, delta_j)$. Since $sigma$ satisfies $C_j$, at least one literal of $C_j$ is true, so at least one literal of $not C_j$ is false. Hence at most $k_j - 1$ chosen channels use the bottleneck, respecting its capacity. The homologous equalities are satisfied because every chosen collector entry for variable $x_i$ is matched with the corresponding distributor exit on the same variable/channel. + + ($arrow.l.double$) Let $f$ be a feasible integral flow of value at least $n$. The only outgoing arcs of $s$ are the $n$ unit-capacity arcs $(s, "split"_i)$, so each variable contributes exactly one unit. Conservation at $"split"_i$ forces that unit onto exactly one of $( "split"_i, T_(0,i))$ or $( "split"_i, F_(0,i))$, defining a truth assignment $sigma$. In clause stage $j$, the bottleneck allows at most $k_j - 1$ literal channels of $not C_j$ to carry flow. Because homologous pairs prevent flow from entering through one variable's channel and exiting through another's, this load counts genuine literals of $not C_j$ made true by $sigma$. Therefore at least one literal of $not C_j$ is false, so the corresponding literal of $C_j$ is true. Every clause is satisfied. + + _Solution extraction._ Read the base channel choice for each variable: output $x_i = 1$ iff the flow on $( "split"_i, T_(0,i))$ is $1$, and $x_i = 0$ iff the flow on $( "split"_i, F_(0,i))$ is $1$. +] + #reduction-rule("KSatisfiability", "Satisfiability")[ Every $k$-SAT instance is already a SAT instance --- clauses happen to have exactly $k$ literals, but SAT places no restriction on clause width. The embedding is the identity. ][ @@ -10959,6 +11103,54 @@ The following reductions to Integer Linear Programming are straightforward formu _Solution extraction._ $S_2 = {u_i : x_i = 1}$, $S_1 = U without S_2$. ] +#let ss_bt = load-example("SetSplitting", "Betweenness") +#let ss_bt_sol = ss_bt.solutions.at(0) +#reduction-rule("SetSplitting", "Betweenness", + example: true, + example-caption: [$|U| = #ss_bt.source.instance.universe_size$, $#ss_bt.source.instance.subsets.len()$ subsets, no normalization auxiliaries needed], + extra: [ + #pred-commands( + "pred create --example " + problem-spec(ss_bt.source) + " -o set-splitting.json", + "pred reduce set-splitting.json --to " + target-spec(ss_bt) + " -o bundle.json", + "pred solve bundle.json", + "pred evaluate set-splitting.json --config " + ss_bt_sol.source_config.map(str).join(","), + ) + + #{ + let source = ss_bt.source.instance + let target = ss_bt.target.instance + let source_config = ss_bt_sol.source_config + let target_config = ss_bt_sol.target_config + let pole = source.universe_size + [ + *Step 1 -- Source instance.* The canonical Set Splitting fixture has universe $U = {0, 1, 2, 3, 4}$ and subsets $S_1 = {#source.subsets.at(0).map(str).join(", ")}$, $S_2 = {#source.subsets.at(1).map(str).join(", ")}$, $S_3 = {#source.subsets.at(2).map(str).join(", ")}$, and $S_4 = {#source.subsets.at(3).map(str).join(", ")}$. The stored splitting is $(#source_config.map(str).join(", "))$, so colors 0 and 1 both appear in every subset. + + *Step 2 -- Add the pole and clause auxiliaries.* Because every subset already has size 3, normalization adds no universe elements. The target therefore uses pole $p = a_#pole$ together with one auxiliary element for each subset, namely $d_1 = 6$, $d_2 = 7$, $d_3 = 8$, and $d_4 = 9$, for a total of $#target.num_elements$ elements. + + *Step 3 -- Form the betweenness triples.* The four subsets become the triple pairs $(#target.triples.at(0).map(str).join(", "))$, $(#target.triples.at(1).map(str).join(", "))$; $(#target.triples.at(2).map(str).join(", "))$, $(#target.triples.at(3).map(str).join(", "))$; $(#target.triples.at(4).map(str).join(", "))$, $(#target.triples.at(5).map(str).join(", "))$; and $(#target.triples.at(6).map(str).join(", "))$, $(#target.triples.at(7).map(str).join(", "))$. Each pair uses one auxiliary $d_j$ to force the corresponding 3-set to place at least one element on each side of the pole. + + *Step 4 -- Verify the ordering and extraction.* The stored Betweenness witness is $f = (#target_config.map(str).join(", "))$, so the pole $p = 5$ sits at position $f(p) = #target_config.at(pole)$. Original elements $1, 3, 4$ lie to the left of the pole, while $0, 2$ lie to the right, so extraction returns $(#source_config.map(str).join(", "))$ exactly. For example, $(0, 6, 1)$ holds because $f(1) = #target_config.at(1) < f(6) = #target_config.at(6) < f(0) = #target_config.at(0)$, and $(6, 5, 2)$ holds because $f(6) = #target_config.at(6) < f(5) = #target_config.at(5) < f(2) = #target_config.at(2)$ #sym.checkmark. + + *Multiplicity:* The fixture stores one canonical witness. + ] + } + ], +)[ + This $O(n + sum_(S in cal(C)) |S|)$ reduction @garey1979[MS1] first normalizes each large subset to size 2 or 3 with complementarity pairs, then builds a Betweenness instance with one pole element $p$, one element $a_u$ per normalized universe element, and one auxiliary betweenness element for each size-3 subset. If the normalized Set Splitting instance has universe size $n'$ with $s_2$ size-2 subsets and $s_3$ size-3 subsets, the target has $n' + 1 + s_3$ elements and $s_2 + 2 s_3$ triples. +][ + _Construction._ Given Set Splitting instance $(U, cal(C))$, first normalize every subset to size 2 or 3. For a subset $S = {s_1, dots, s_k}$ with $k >= 4$, introduce fresh elements $y^+, y^-$, replace $S$ by the size-3 subset ${s_1, s_2, y^+}$ and the complementarity subset ${y^+, y^-}$, and continue recursively on ${y^-, s_3, dots, s_k}$. Repeating this step yields an equivalent normalized instance $(U', cal(C)')$ in which every subset has size 2 or 3. + + Create one Betweenness element $a_u$ for each $u in U'$ and one distinguished pole $p$. For every size-2 subset ${u, v} in cal(C)'$, add triple $(a_u, p, a_v)$. For every size-3 subset ${u, v, w} in cal(C)'$, introduce a fresh auxiliary element $d_(u,v,w)$ and add triples $(a_u, d_(u,v,w), a_v)$ and $(d_(u,v,w), p, a_w)$. + + _Correctness._ The normalization identity preserves splittability: a coloring splits ${s_1, dots, s_k}$ if and only if it can be extended to fresh elements $y^+, y^-$ so that ${s_1, s_2, y^+}$, ${y^+, y^-}$, and ${y^-, s_3, dots, s_k}$ are all non-monochromatic. Thus it suffices to reason about normalized subsets. + + ($arrow.r.double$) Let $chi: U' -> {0, 1}$ split every subset of $cal(C)'$. Place all $a_u$ with $chi(u) = 0$ to the left of $p$ and all $a_u$ with $chi(u) = 1$ to the right. For a size-2 subset ${u, v}$, non-monochromaticity gives $chi(u) != chi(v)$, so $p$ lies between $a_u$ and $a_v$, satisfying $(a_u, p, a_v)$. For a size-3 subset ${u, v, w}$, not all three colors are equal. If $u$ and $v$ lie on the same side of $p$, then $w$ lies on the opposite side; place $d_(u,v,w)$ between $a_u$ and $a_v$ on their shared side. If $u$ and $v$ lie on opposite sides of $p$, place $d_(u,v,w)$ between them on the side opposite $a_w$. In both cases $(a_u, d_(u,v,w), a_v)$ and $(d_(u,v,w), p, a_w)$ hold. + + ($arrow.l.double$) Suppose an ordering satisfies all Betweenness triples. Define $chi(u) = 0$ iff $a_u$ lies left of $p$, and $chi(u) = 1$ otherwise. For a size-2 subset ${u, v}$, the triple $(a_u, p, a_v)$ forces $a_u$ and $a_v$ onto opposite sides of $p$, so the subset is non-monochromatic. For a size-3 subset ${u, v, w}$, the triple $(a_u, d_(u,v,w), a_v)$ puts $d_(u,v,w)$ between $a_u$ and $a_v$. If $a_u, a_v, a_w$ were all on the same side of $p$, then $d_(u,v,w)$ would also lie on that side, making $(d_(u,v,w), p, a_w)$ impossible because $p$ cannot lie between two elements on the same side. Hence every size-3 subset is non-monochromatic. By normalization equivalence, the original Set Splitting instance is splittable. + + _Solution extraction._ For each original universe element $i in {0, dots, |U| - 1}$, return color 0 when $f(a_i) < f(p)$ and color 1 otherwise. +] + #reduction-rule("KClique", "ILP")[ A $k$-clique requires at least $k$ selected vertices with no non-edge between any pair. ][ @@ -13349,6 +13541,58 @@ The following table shows concrete variable overhead for example instances, take _Solution extraction._ For variable $x_i$, set $x_i = 1$ if the cover indicator at position $2i$ is 1. ] +#let ksat_1in3 = load-example("KSatisfiability", "OneInThreeSatisfiability") +#let ksat_1in3_sol = ksat_1in3.solutions.at(0) +#reduction-rule("KSatisfiability", "OneInThreeSatisfiability", + example: true, + example-caption: [Single clause ($n = #ksat_1in3.source.instance.num_vars$, $m = #sat-num-clauses(ksat_1in3.source.instance)$): 3-SAT $arrow.r$ 1-in-3 SAT], + extra: [ + #pred-commands( + "pred create --example " + problem-spec(ksat_1in3.source) + " -o ksat.json", + "pred reduce ksat.json --to " + target-spec(ksat_1in3) + " -o bundle.json", + "pred solve bundle.json", + "pred evaluate ksat.json --config " + ksat_1in3_sol.source_config.map(str).join(","), + ) + + *Step 1 -- Source instance.* The source formula has one clause $c_1 = (x_1 or x_2 or x_3)$. The canonical satisfying assignment is $(#ksat_1in3_sol.source_config.map(str).join(", "))$, i.e.\ $x_1 = 0$, $x_2 = 0$, $x_3 = 1$. + + *Step 2 -- Add the global false literal.* Introduce $z_0$ (index 4) and $z_T$ (index 5), then add the forcing clause $R(z_0, z_0, z_T)$, where $R(u, v, w)$ means "exactly one of $u, v, w$ is true." In the stored witness, $(z_0, z_T) = (0, 1)$, which is the unique satisfying pattern of that clause. + + *Step 3 -- Build the clause gadget.* Introduce auxiliaries $a_1, b_1, c_1, d_1, e_1, f_1$ at indices 6 through 11 and replace $c_1$ by the five 1-in-3 clauses + $ + R(x_1, a_1, d_1), quad + R(x_2, b_1, d_1), quad + R(a_1, b_1, e_1), quad + R(c_1, d_1, f_1), quad + R(x_3, c_1, z_0). + $ + In the exported target instance these become exactly the six clauses + $(4, 4, 5)$, $(1, 6, 9)$, $(2, 7, 9)$, $(6, 7, 10)$, $(8, 9, 11)$, $(3, 8, 4)$. + + *Step 4 -- Verify a witness.* The canonical target witness is $(#ksat_1in3_sol.target_config.map(str).join(", "))$. Reading off the auxiliaries gives $(a_1, b_1, c_1, d_1, e_1, f_1) = (0, 0, 0, 1, 1, 0)$. Every target clause has exactly one true literal #sym.checkmark, and restricting to the first three coordinates recovers $(0, 0, 1)$, which satisfies the original clause #sym.checkmark. + + *Multiplicity:* The fixture stores one canonical witness. This single-clause source formula has 7 satisfying assignments, and each satisfying literal pattern extends to at least one target witness by the gadget construction. + ], +)[ + This $O(n + m)$ reduction @schaefer1978 @garey1979[LO4] preserves the original variables, adds two global variables $z_0$ and $z_T$, and introduces six fresh auxiliaries per clause. Each 3-SAT clause $(ell_1 or ell_2 or ell_3)$ is replaced by a five-clause gadget of exact-one constraints. The target therefore has $n + 2 + 6m$ variables and $1 + 5m$ clauses. +][ + _Construction._ Let $phi = and_(j=1)^m (ell_1^j or ell_2^j or ell_3^j)$ be a 3-CNF formula on variables $x_1, dots, x_n$. Introduce global variables $z_0$ and $z_T$, and add the clause $R(z_0, z_0, z_T)$, where $R(u, v, w)$ means that exactly one of the three literals is true. This forces $z_0 = 0$ and $z_T = 1$. For every source clause $C_j = (ell_1^j or ell_2^j or ell_3^j)$, introduce six fresh auxiliaries $a_j, b_j, c_j, d_j, e_j, f_j$ and append the five target clauses + $ + R(ell_1^j, a_j, d_j), quad + R(ell_2^j, b_j, d_j), quad + R(a_j, b_j, e_j), quad + R(c_j, d_j, f_j), quad + R(ell_3^j, c_j, z_0). + $ + Negated source literals are copied directly; no complement variables are needed because 1-in-3 SAT clauses in this codebase may contain negated literals. + + _Correctness._ ($arrow.r.double$) Suppose $phi$ is satisfiable. Set the first $n$ target variables according to any satisfying source assignment, and set $(z_0, z_T) = (0, 1)$. Consider one clause gadget. Because $ell_1^j or ell_2^j or ell_3^j = 1$, the truth triple $(ell_1^j, ell_2^j, ell_3^j)$ is one of the seven nonzero 0/1 patterns. For each such pattern there is a choice of $(a_j, b_j, c_j, d_j, e_j, f_j)$ satisfying all five exact-one clauses; the implementation uses the gadget directly and the worked example shows one such extension. Doing this independently for every clause yields a satisfying 1-in-3 assignment. + + ($arrow.l.double$) Suppose the target instance is satisfiable. The global clause forces $z_0 = 0$. Fix any clause gadget, and assume for contradiction that $ell_1^j = ell_2^j = ell_3^j = 0$. Then $R(ell_3^j, c_j, z_0)$ forces $c_j = 1$. Next $R(c_j, d_j, f_j)$ forces $d_j = f_j = 0$. Then $R(ell_1^j, a_j, d_j)$ and $R(ell_2^j, b_j, d_j)$ force $a_j = b_j = 1$. But now $R(a_j, b_j, e_j)$ has two true literals, impossible. Therefore every source clause has at least one true literal, so the restriction to the original variables satisfies $phi$. + + _Solution extraction._ Return the first $n$ target coordinates unchanged, discarding $z_0$, $z_T$, and all clause auxiliaries. +] + #let part_swi = load-example("Partition", "SequencingWithinIntervals") #let part_swi_sol = part_swi.solutions.at(0) #reduction-rule("Partition", "SequencingWithinIntervals", @@ -13466,6 +13710,187 @@ The following table shows concrete variable overhead for example instances, take _Solution extraction._ For selected vertex $v$: clause $j = floor(v / 3)$, position $p = v mod 3$. If literal $ell_(j,p) = x_i$ set $x_i = 1$; if $ell_(j,p) = not x_i$ set $x_i = 0$. ] +#let ksat_co = load-example("KSatisfiability", "CyclicOrdering") +#let ksat_co_sol = ksat_co.solutions.at(0) +#reduction-rule("KSatisfiability", "CyclicOrdering", + example: true, + example-caption: [Single-clause 3-SAT reduced to #ksat_co.target.instance.num_elements cyclic-order elements], + extra: [ + #pred-commands( + "pred create --example " + problem-spec(ksat_co.source) + " -o ksat.json", + "pred reduce ksat.json --to " + target-spec(ksat_co) + " -o bundle.json", + "pred solve bundle.json", + "pred evaluate ksat.json --config " + ksat_co_sol.source_config.map(str).join(","), + ) + + *Step 1 -- Source instance.* The canonical formula is $phi = (x_1 or x_2 or x_3)$ with $n = #ksat_co.source.instance.num_vars$ variables and $m = #ksat_co.source.instance.clauses.len()$ clause. The stored satisfying assignment is $(#ksat_co_sol.source_config.map(str).join(", "))$, so every literal in the clause is true. + + *Step 2 -- Create variable and clause elements.* The reduction introduces three variable elements per Boolean variable, giving variable triples $(alpha_1, beta_1, gamma_1) = (0, 1, 2)$, $(alpha_2, beta_2, gamma_2) = (3, 4, 5)$, and $(alpha_3, beta_3, gamma_3) = (6, 7, 8)$. The single clause adds five auxiliary elements $(j, k, l, m, n) = (9, 10, 11, 12, 13)$, so the target has $3n + 5m = #ksat_co.target.instance.num_elements$ elements total. + + *Step 3 -- Emit the ten cyclic-ordering triples.* Because the clause literals are $(x_1, x_2, x_3)$, the literal-orientation triples are the forward variable triples. The target constraints are exactly #{ksat_co.target.instance.triples.map(t => "(" + t.map(str).join(", ") + ")").join(", ")}, so $|Delta| = #ksat_co.target.instance.triples.len() = 10m$. + + *Step 4 -- Verify a solution.* The target witness permutation is $(#ksat_co_sol.target_config.map(str).join(", "))$. It satisfies all ten cyclic-order constraints #sym.checkmark. For each variable triple, the forward orientation $(alpha_i, beta_i, gamma_i)$ is _not_ derived by this permutation, so extraction returns $(#ksat_co_sol.source_config.map(str).join(", "))$, which satisfies $phi$ #sym.checkmark + + *Multiplicity:* The fixture stores one canonical witness. Any cyclic permutation of the target order is also valid, and other satisfying assignments of $phi$ induce additional valid cyclic orders. + ], +)[ + This $O(n + m)$ reduction @garey1979 @galilMegiddo1977 represents each variable by a three-element orientation gadget and each clause by five auxiliary elements linked through ten cyclic-ordering triples. For $n$ variables and $m$ clauses it produces $3n + 5m$ target elements and $10m$ triples. +][ + _Construction._ Let $phi$ be a 3-CNF formula with variables $x_1, dots, x_n$ and clauses $C_1, dots, C_m$. For each variable $x_i$, create three target elements $alpha_i, beta_i, gamma_i$. For a positive literal $x_i$, define its associated cyclically ordered triple as $(alpha_i, beta_i, gamma_i)$; for a negative literal $not x_i$, define it as $(alpha_i, gamma_i, beta_i)$. For each clause $C_nu = (ell_1 or ell_2 or ell_3)$, write the three associated literal triples as $(a, b, c)$, $(d, e, f)$, and $(g, h, i)$. Add five fresh auxiliary elements $j_nu, k_nu, l_nu, m_nu, n_nu$ and the ten cyclic-ordering triples + $ + (a, c, j_nu), (b, j_nu, k_nu), (c, k_nu, l_nu), (d, f, j_nu), (e, j_nu, l_nu), + (f, l_nu, m_nu), (g, i, k_nu), (h, k_nu, m_nu), (i, m_nu, n_nu), (n_nu, m_nu, l_nu). + $ + + _Correctness._ ($arrow.r.double$) Let $S$ be a satisfying assignment of $phi$. For each variable, exactly one of the two opposite orientations $(alpha_i, beta_i, gamma_i)$ and $(alpha_i, gamma_i, beta_i)$ is derived by any cyclic order; interpret the literal made true by $S$ as the one whose associated orientation is _not_ derived. In every clause at least one literal is true, and Galil--Megiddo's clause gadget lemma shows that the ten triples above are then consistent with the three literal orientations for that clause @galilMegiddo1977. Because different clauses use disjoint auxiliary element sets, the per-clause cyclic orders combine into one global cyclic order satisfying all target triples. ($arrow.l.double$) Conversely, let a cyclic ordering satisfy every target triple. For each variable $x_i$, put $x_i = 1$ iff $(alpha_i, beta_i, gamma_i)$ is _not_ derived. If some clause had all three literals false under this rule, then all three associated literal orientations would be derived. The same clause gadget lemma implies that the ten triples for that clause would then be inconsistent, contradicting feasibility. Hence every clause contains a true literal, so the extracted assignment satisfies $phi$. + + _Variable mapping._ Positive literal $x_i$ is read through the orientation $(alpha_i, beta_i, gamma_i)$, while negative literal $not x_i$ is read through the reversed orientation $(alpha_i, gamma_i, beta_i)$. + + _Solution extraction._ Given a target permutation $f$, set $x_i = 1$ iff $(f(alpha_i), f(beta_i), f(gamma_i))$ is _not_ in cyclic order; otherwise set $x_i = 0$. +] + +#let ksat_ps = load-example("KSatisfiability", "PreemptiveScheduling") +#let ksat_ps_sol = ksat_ps.solutions.at(0) +#reduction-rule("KSatisfiability", "PreemptiveScheduling", + example: true, + example-caption: [3-SAT with $n = #ksat_ps.source.instance.num_vars$ variables and $m = #ksat_ps.source.instance.clauses.len()$ clause $arrow.r$ unit-task preemptive schedule on #ksat_ps.target.instance.lengths.len() jobs and $#ksat_ps.target.instance.num_processors$ processors], + extra: [ + #pred-commands( + "pred create --example " + problem-spec(ksat_ps.source) + " -o ksat.json", + "pred reduce ksat.json --to " + target-spec(ksat_ps) + " -o bundle.json", + "pred solve bundle.json", + "pred evaluate ksat.json --config " + ksat_ps_sol.source_config.map(str).join(","), + ) + + #{ + let n = ksat_ps.source.instance.num_vars + let m = ksat_ps.source.instance.clauses.len() + let t = n + 3 + let p = ksat_ps.target.instance.num_processors + let num-jobs = ksat_ps.target.instance.lengths.len() + let original-jobs = 2 * n * (n + 1) + 2 * n + 7 * m + let filler-jobs = num-jobs - original-jobs + let sigma = range(num-jobs).map(job => + range(num-jobs).filter(slot => ksat_ps_sol.target_config.at(job * num-jobs + slot) == 1).at(0) + ) + let slot-counts = range(t).map(slot => + range(num-jobs).filter(job => sigma.at(job) == slot).len() + ) + let clause-slots = range(7).map(i => sigma.at(30 + i)) + [ + *Step 1 -- Source instance.* The formula is $phi = (x_1 or x_2 or x_3)$ with satisfying assignment $(x_1, x_2, x_3) = (#ksat_ps_sol.source_config.map(str).join(", "))$. + + *Step 2 -- Build Ullman's unit-task gadgets.* For $n = #n$, the reduction creates $2 n (n + 1) = #(2 * n * (n + 1))$ chain jobs $x_(i,j), overline(x)_(i,j)$, $2n = #(2 * n)$ forcing jobs $y_i, overline(y)_i$, and $7m = #(7 * m)$ clause jobs $D_(r,s)$. The slot capacities are $(#(n), #(2 * n + 1), #(2 * n + 2), #(2 * n + 2), #(m + n + 1), #(6 * m)) = (3, 7, 8, 8, 5, 6)$. We realize these capacities with $p = max(2n + 2, 6m) = #p$ processors and $F = #filler-jobs$ filler jobs, giving $#num-jobs$ total unit jobs. In this example the filler counts are $(5, 1, 0, 0, 3, 2)$. + + *Step 3 -- Verify a schedule.* The witness schedule has exactly $p = #p$ jobs in each of the $T = #t$ slots: $(#slot-counts.map(str).join(", "))$. The positive chain starters $x_(1,0), x_(2,0), x_(3,0)$ are jobs $0, 8, 16$, placed at slots $(#sigma.at(0), #sigma.at(8), #sigma.at(16)) = (1, 1, 0)$, so extraction reads $(0, 0, 1)$ back from slot 0. The clause-pattern jobs are indices $30, dots, 36$; their slots are $(#clause-slots.map(str).join(", "))$, so exactly one clause job is promoted to slot $n + 1 = 4$ and the remaining six sit at slot $n + 2 = 5$. + + *Multiplicity:* The fixture stores one canonical witness. Other satisfying assignments induce different slot-0 choices for the variable chains and therefore different valid schedules meeting the same threshold. + ] + } + ], +)[ + Ullman's reduction first builds a variable-capacity unit-task scheduling instance for 3-SAT, then pads each time slot with chained filler jobs so a fixed number of processors simulates the desired capacity profile. Because every task has length $1$, preemption is irrelevant: the resulting instance is already a valid preemptive scheduling instance whose optimal makespan is at most $T = n + 3$ iff the formula is satisfiable @ullman1975 @garey1979. +][ + _Construction._ Let $phi$ be a 3-CNF formula with variables $x_1, dots, x_n$ and clauses $C_1, dots, C_m$. Create unit jobs $x_(i,j)$ and $overline(x)_(i,j)$ for $1 <= i <= n$ and $0 <= j <= n$, plus forcing jobs $y_i, overline(y)_i$, and clause jobs $D_(r,s)$ for $1 <= r <= m$, $1 <= s <= 7$. Add chain precedences $x_(i,j) prec x_(i,j+1)$ and $overline(x)_(i,j) prec overline(x)_(i,j+1)$, and branching precedences $x_(i,i-1) prec y_i$, $overline(x)_(i,i-1) prec overline(y)_i$. Set $T = n + 3$ and slot capacities $c_0 = n$, $c_1 = 2n + 1$, $c_t = 2n + 2$ for $2 <= t <= n$, $c_(n+1) = m + n + 1$, and $c_(n+2) = 6m$. + For each clause $C_r = (ell_1 or ell_2 or ell_3)$ and each nonzero bit pattern $b in {1, dots, 7}$, create clause job $D_(r,b)$. Its predecessors are the three chain endpoints chosen according to the bits of $b$: for literal position $k$, use the endpoint of $ell_k$ when bit $k$ is 1 and of $not ell_k$ when bit $k$ is 0. This makes exactly one clause job per clause ready one slot earlier when the clause is satisfied. + + To convert the variable-capacity instance to fixed processors, let $p = max(2n + 2, 6m)$. For every slot $t$, add $p - c_t$ filler jobs and impose complete-bipartite precedences from every filler at slot $t$ to every filler at slot $t+1$. Keep every task length equal to $1$ and use $p$ processors. The total work is exactly $p T$, so any schedule of makespan at most $T$ must saturate every slot and therefore realizes the intended capacities. + + _Correctness._ ($arrow.r.double$) Given a satisfying assignment, place exactly one of $x_(i,0), overline(x)_(i,0)$ at slot $0$ for each variable, propagate the two chains forward one step at a time, schedule the forcing jobs immediately after their branch points, and place the unique matching clause job for each clause at slot $n + 1$ (all other clause jobs at slot $n + 2$). The filler jobs occupy the remaining $p - c_t$ processor positions in slot $t$, so the schedule finishes by time $T = n + 3$. ($arrow.l.double$) Conversely, if the constructed instance has makespan at most $T$, then every slot is full and the filler chains force exactly $p - c_t$ filler jobs into slot $t$, leaving precisely $c_t$ non-filler positions. Ullman's capacity argument then applies: at slot $0$ exactly one of $x_(i,0), overline(x)_(i,0)$ is chosen per variable, this choice propagates consistently through the chains, and the availability of one clause job per clause at slot $n + 1$ implies each clause has a satisfied literal. Hence the extracted assignment satisfies $phi$. + + _Solution extraction._ In the binary schedule encoding, inspect the row for each starter job $x_(i,0)$. Set $x_i = 1$ iff that row has its single $1$ in column $0$; otherwise set $x_i = 0$. +] + +#let ksat_td = load-example("KSatisfiability", "TimetableDesign") +#let ksat_td_sol = ksat_td.solutions.at(0) +#let ksat_td_req = ksat_td.target.instance.requirements.flatten().filter(r => r > 0).len() +#let ksat_td_blocker_c = ksat_td.target.instance.craftsman_avail.filter(row => row.filter(v => v == true).len() == 1).len() +#let ksat_td_blocker_t = ksat_td.target.instance.task_avail.filter(row => row.filter(v => v == true).len() == 1).len() +#reduction-rule("KSatisfiability", "TimetableDesign", + example: true, + example-caption: [Two-clause satisfiable formula reduced to a timetable gadget instance], + extra: [ + #pred-commands( + "pred create --example " + problem-spec(ksat_td.source) + " -o ksat.json", + "pred reduce ksat.json --to " + target-spec(ksat_td) + " -o bundle.json", + "pred solve bundle.json", + "pred evaluate ksat.json --config " + ksat_td_sol.source_config.map(str).join(","), + ) + + *Step 1 -- Source instance.* The canonical formula has $n = #ksat_td.source.instance.num_vars$ variables and $m = #ksat_td.source.instance.clauses.len()$ clauses: + $ phi = (#ksat_td.source.instance.clauses.map(c => { + c.literals.map(l => if l > 0 { $x_#l$ } else { $overline(x)_#calc.abs(l)$ }).join($or$) + }).join($) and ($)) $ + The stored satisfying assignment is $(x_1, x_2) = (#ksat_td_sol.source_config.map(str).join(", "))$. + + *Step 2 -- Normalize and assign periods.* The source has $L = #ksat_td.source.instance.clauses.map(c => c.literals.len()).sum()$ literal occurrences. Because $x_2$ appears four times, the bounded-occurrence preprocessing replaces it by a cycle of occurrence variables, so the normalized instance uses $#(ksat_td.target.instance.num_periods / 4)$ transformed variables and therefore $4q = #ksat_td.target.instance.num_periods$ timetable periods/colors. + + *Step 3 -- Compile the gadget graph.* The list-edge-coloring gadget becomes a timetable with $#ksat_td.target.instance.num_craftsmen$ craftsmen, $#ksat_td.target.instance.num_tasks$ tasks, and #ksat_td_req binary requirements. Among these rows, #ksat_td_blocker_c craftsmen and #ksat_td_blocker_t tasks are one-period blockers used only to forbid colors on internal gadget vertices. + + *Step 4 -- Verify a solution.* The target witness has #ksat_td_sol.target_config.filter(x => x == 1).len() scheduled pairs, exactly matching the #ksat_td_req nonzero requirements. Hence each required craftsman-task pair is scheduled once, every blocker occupies its designated period, and `pred evaluate` accepts the timetable. Reading back the distinguished variable-gadget periods recovers $(#ksat_td_sol.source_config.map(str).join(", "))$, which satisfies both clauses #sym.checkmark + + *Multiplicity:* The fixture stores one canonical witness. Different satisfying assignments, or different satisfying choices for the clause-edge colors, can induce distinct feasible timetables for the same formula. + ], +)[ + This polynomial-time implementation#footnote[The repository encodes the reduction via an explicit bounded-occurrence list-edge-coloring gadget chain rather than reproducing the original three-period presentation of @evenItaiShamir1976 verbatim. The implementation is still a many-one reduction to the general Timetable Design model.] realizes the NP-hardness of Timetable Design @evenItaiShamir1976 by normalizing the formula and compiling a constrained edge-coloring instance into a timetable. It uses $4L$ periods in the worst case, where $L$ is the source literal count, and creates $O(L^2)$ craftsmen and tasks with binary requirements. +][ + _Construction._ Given a 3-CNF formula $phi$, first eliminate pure literals and fix their truth values. For every remaining variable with more than three literal occurrences, apply Tovey's cloning trick: replace each occurrence by a fresh variable and add a cycle of 2-clauses $(y_i or overline(y)_(i+1))$ so that all clones must agree. Let the transformed variables be $y_1, dots, y_q$; by construction each $y_i$ appears at most twice positively and at most twice negatively. + + Reserve four colors for each transformed variable: $N_(i,2), N_(i,1), P_(i,2), P_(i,1)$. Build a bipartite core graph with one central vertex. For every $y_i$, add the six 2-list edges used by the implementation, with allowed color pairs $(P_(i,1), N_(i,2))$, $(P_(i,2), P_(i,1))$, $(P_(i,1), P_(i,2))$, $(P_(i,2), N_(i,1))$, $(N_(i,2), P_(i,2))$, and $(N_(i,1), P_(i,1))$. This variable gadget has exactly two proper colorings, which represent $y_i = 1$ and $y_i = 0$. For every transformed clause, add one clause edge from the center to a fresh clause vertex whose allowed colors are exactly the colors assigned to that clause's literal occurrences. + + Next compile the list-edge-coloring instance to an ordinary edge-coloring instance with blocked colors on vertices. Every 2-list edge is replaced by a three-edge path whose two internal vertices block all colors except the two allowed ones. A 1-list or 3-list clause edge stays direct, and the clause endpoint blocks every disallowed color. Finally encode colors as timetable periods, left-side vertices as craftsmen, right-side vertices as tasks, and every graph edge as one unit requirement between its endpoints. Whenever color $h$ is blocked at a vertex $u$, add a one-period blocker craftsman or task that is available only in period $h$ and is forced to match $u$ once. This dummy assignment consumes the unique slot of $u$ in period $h$, so no adjacent core edge can use that color. + + _Correctness._ ($arrow.r.double$) A satisfying assignment extends to the cloned variables and chooses one of the two color patterns in each variable gadget. Because every clause has a satisfied literal, its clause edge can take the corresponding literal color. The path gadgets propagate the chosen colors on every 2-list edge, and the blocker pairs occupy exactly the forbidden periods. Translating each edge color into its period yields a feasible timetable satisfying availability, exclusivity, and exact requirements. ($arrow.l.double$) In any feasible timetable, each required pair is scheduled in exactly one period, so every core edge receives a unique color. The blocker pairs forbid exactly the blocked colors, and every expanded path forces its represented edge to use one of the two colors in its list. Hence each variable gadget collapses to one of the two mirror patterns, defining a Boolean value for each transformed variable. A clause edge can then be colored only by one of its literal colors, so some literal in every clause matches the extracted variable pattern. Projecting clone values back to the original variables and reinstating the fixed pure-literal assignments yields a satisfying assignment of $phi$. + + _Variable mapping._ Pure literals removed during preprocessing are stored as fixed source assignments. Every remaining source variable is represented either by one transformed variable or by a cycle of clone variables introduced by the bounded-occurrence step; the added 2-clauses appear as ordinary clause gadgets and force all clones of one source variable to agree. Periods are indexed by the four colors attached to each transformed variable. + + _Solution extraction._ The implementation reads the period of the distinguished `vb` edge in each variable gadget. Color $N_(i,2)$ is interpreted as truth value $1$, and color $P_(i,2)$ as truth value $0$. The recovered transformed assignment is then projected back to the original variables, with the fixed pure-literal values reinserted. +] + +#let ksat_ap = load-example("KSatisfiability", "AcyclicPartition") +#let ksat_ap_sol = ksat_ap.solutions.at(0) +#reduction-rule("KSatisfiability", "AcyclicPartition", + example: true, + example-caption: [3-SAT with $n = #ksat_ap.source.instance.num_vars$ variables, $m = #ksat_ap.source.instance.clauses.len()$ clause $arrow.r$ acyclic partition on #ksat_ap.target.instance.graph.num_vertices vertices], + extra: [ + #pred-commands( + "pred create --example " + problem-spec(ksat_ap.source) + " -o ksat.json", + "pred reduce ksat.json --to " + target-spec(ksat_ap) + " -o bundle.json", + "pred solve bundle.json", + "pred evaluate ksat.json --config " + ksat_ap_sol.source_config.map(str).join(","), + ) + + #{ + let n = ksat_ap.source.instance.num_vars + let m = ksat_ap.source.instance.clauses.len() + let tgt = ksat_ap.target.instance + [ + *Step 1 -- Source instance.* The canonical formula has $n = #n$ variable and $m = #m$ clause. The stored satisfying assignment is $(#ksat_ap_sol.source_config.map(str).join(", "))$. + + *Step 2 -- Compose the three-stage chain.* The reduction composes 3-SAT $arrow.r$ Subset Sum $arrow.r$ Partition $arrow.r$ Acyclic Partition. First, the Sipser digit-encoding produces a Subset Sum instance with $2n + 2m$ elements. Second, the Subset Sum $arrow.r$ Partition padding appends at most one element. Third, the Partition $arrow.r$ Acyclic Partition gadget builds a bipartite digraph: for each of the Partition elements, create one item vertex; add a source vertex and a sink vertex, with arcs from source to every item vertex and from every item vertex to sink. The resulting digraph has #tgt.graph.num_vertices vertices and #tgt.graph.arcs.len() arcs. Vertex weights are doubled element sizes for items and $(Sigma + 1)$ for the two endpoints; the weight bound is $Sigma + 1 + Sigma - (Sigma mod 2)$ where $Sigma$ is the Partition total, and the arc-cost bound equals the number of items. + + *Step 3 -- Verify a solution.* The target witness $(#ksat_ap_sol.target_config.map(str).join(", "))$ partitions the #tgt.graph.num_vertices vertices into two blocks. The source and sink land in different blocks, ensuring the quotient digraph is acyclic. The item vertices split so that the doubled sizes on each side, together with the endpoint weight, respect the weight cap. Extracting back through Partition and Subset Sum recovers $(#ksat_ap_sol.source_config.map(str).join(", "))$, which satisfies the formula #sym.checkmark + + *Multiplicity:* The fixture stores one canonical witness. Other satisfying assignments of the source formula induce different balanced partitions of the item vertices. + ] + } + ], +)[ + This composed reduction realizes 3-SAT $arrow.r$ Acyclic Partition via the witness-preserving chain 3-SAT $arrow.r$ Subset Sum $arrow.r$ Partition $arrow.r$ Acyclic Partition. The first two stages are the classical Sipser digit encoding @sipser2012 and Garey--Johnson padding @garey1979; the final stage embeds the balanced partition into a bipartite source--sink digraph whose two-block quotient is automatically acyclic. For $n$ variables and $m$ clauses, the target has $2n + 2m + 3$ vertices and $4n + 4m + 2$ arcs. +][ + _Construction._ Given a 3-CNF formula $phi$ with $n$ variables and $m$ clauses: + + (i) Apply the Sipser digit-encoding reduction (see KSatisfiability $arrow.r$ SubsetSum) to obtain a Subset Sum instance with $2n + 2m$ elements and target $T$. + + (ii) Apply the Subset Sum $arrow.r$ Partition reduction (see SubsetSum $arrow.r$ Partition) to obtain a Partition instance with at most $2n + 2m + 1$ elements and total $Sigma$. + + (iii) For each Partition element $a_i$, create one item vertex with weight $2 a_i$. Add a source vertex $s$ and a sink vertex $t$, each with weight $Sigma + 1$. Create arcs $(s, v_i)$ and $(v_i, t)$ for every item vertex $v_i$, all with unit cost. Set the weight bound $B = Sigma + 1 + Sigma - (Sigma mod 2)$ and the arc-cost bound $K =$ number of items. + + _Correctness._ ($arrow.r.double$) A satisfying assignment of $phi$ yields a Subset Sum solution, which yields a balanced Partition, which splits the item vertices into two groups of equal total size. Placing one group with $s$ and the other with $t$ gives a two-block partition. Each block's weight is $Sigma + 1 + Sigma <= B$. The arcs from $s$ to the opposite group and from the opposite group to $t$ form the cut, and the quotient digraph $s$-block $arrow$ $t$-block is acyclic. ($arrow.l.double$) Any feasible acyclic partition must separate $s$ and $t$ (otherwise the digon $s arrow v arrow t$ and $s arrow v' arrow t$ would create a quotient cycle). The weight bound forces the doubled item sizes in each block to be nearly balanced, recovering a balanced Partition. Reversing the Subset Sum and 3-SAT stages then yields a satisfying assignment. + + _Solution extraction._ Identify the block containing $s$ and the block containing $t$. Assign each item vertex to Partition side 0 or 1 according to whether it shares a block with $t$. Reverse the Subset Sum $arrow.r$ Partition extraction, then reverse the 3-SAT $arrow.r$ Subset Sum extraction. +] + #let hc_bicon = load-example("HamiltonianCircuit", "BiconnectivityAugmentation") #let hc_bicon_sol = hc_bicon.solutions.at(0) #reduction-rule("HamiltonianCircuit", "BiconnectivityAugmentation", @@ -14038,6 +14463,50 @@ The following table shows concrete variable overhead for example instances, take _Solution extraction._ Set $alpha(x_(i+1)) = chi(2i)$ for $i = 0, dots, n-1$. ] +// 6b. NAESatisfiability → PartitionIntoPerfectMatchings (#845) +#let nae_ppm = load-example("NAESatisfiability", "PartitionIntoPerfectMatchings") +#let nae_ppm_sol = nae_ppm.solutions.at(0) +#reduction-rule("NAESatisfiability", "PartitionIntoPerfectMatchings", + example: true, + example-caption: [$n = #nae_ppm.source.instance.num_vars$ variables, $m = #sat-num-clauses(nae_ppm.source.instance)$ clauses, target $K = #nae_ppm.target.instance.num_matchings$], + extra: [ + #pred-commands( + "pred create --example " + problem-spec(nae_ppm.source) + " -o naesat.json", + "pred reduce naesat.json --to " + target-spec(nae_ppm) + " -o bundle.json", + "pred solve bundle.json", + "pred evaluate naesat.json --config " + nae_ppm_sol.source_config.map(str).join(","), + ) + + #{ + let target = nae_ppm.target.instance + let config = nae_ppm_sol.target_config + [ + *Step 1 -- Source instance.* The canonical NAE-SAT fixture has clauses $C_1 = (x_1 or x_2 or x_3)$ and $C_2 = (overline(x_1) or x_2 or overline(x_3))$. The stored source witness is $(#nae_ppm_sol.source_config.map(str).join(", "))$, so the clause truth patterns are $(1, 1, 0)$ and $(0, 1, 1)$, hence both clauses satisfy the NAE condition. + + *Step 2 -- Lay out the gadgets.* Each variable contributes 4 vertices, each clause contributes 6 signal vertices and 4 clause-gadget vertices, and each literal occurrence contributes one 2-vertex equality-chain pair. Concretely the target has $#target.graph.num_vertices$ vertices and $#target.graph.edges.len()$ edges: variable gadgets occupy vertices $0, dots, 11$, signal pairs occupy $12, dots, 23$, the two $K_4$ clause gadgets occupy $24, dots, 31$, and the equality-chain pairs occupy $32, dots, 43$. + + *Step 3 -- Propagate the literal values.* Because $x_1 = x_2 = 1$ and $x_3 = 0$, the three signal vertices for clause $C_1$ are in groups $(#config.at(12), #config.at(14), #config.at(16)) = (0, 0, 1)$, while the three signal vertices for clause $C_2$ are in groups $(#config.at(18), #config.at(20), #config.at(22)) = (1, 0, 0)$. The equality-chain pairs at $(32, 33), dots, (42, 43)$ carry the complementary groups needed to keep each copied signal synchronized with the appropriate $t_i$ or $f_i$. + + *Step 4 -- Verify the clause gadgets and extraction.* The first $K_4$ gadget uses groups $(#config.at(24), #config.at(25), #config.at(26), #config.at(27)) = (1, 1, 0, 0)$, and the second uses $(#config.at(28), #config.at(29), #config.at(30), #config.at(31)) = (0, 1, 1, 0)$. Each gadget therefore splits $2 + 2$, so every clause gadget induces a perfect matching inside each group. Reading the truth assignment back from the variable vertices $(0, 4, 8)$ gives groups $(#config.at(0), #config.at(4), #config.at(8)) = (0, 0, 1)$, which extracts to $(#nae_ppm_sol.source_config.map(str).join(", "))$ #sym.checkmark. + + *Multiplicity:* The fixture stores one canonical witness. + ] + } + ], +)[ + This $O(n + m)$ reduction @schaefer1978 @garey1979[GT16] normalizes each 2-literal clause $(ell_1, ell_2)$ to $(ell_1, ell_1, ell_2)$, then builds 4-vertex variable gadgets, 2-vertex signal pairs, 4-vertex $K_4$ clause gadgets, and 2-vertex equality-chain links. For $m$ normalized clauses it produces $4n + 16m$ vertices, $3n + 21m$ edges, and fixes $K = 2$. +][ + _Construction._ Let $phi$ be a NAE-SAT instance on variables $x_1, dots, x_n$ whose clauses have size 2 or 3, matching the implemented rule. Replace every 2-literal clause $(ell_1, ell_2)$ by $(ell_1, ell_1, ell_2)$, yielding normalized 3-literal clauses $C_j = (ell_(j,0), ell_(j,1), ell_(j,2))$ for $j = 0, dots, m - 1$. For each variable $x_i$, create vertices $t_i, t'_i, f_i, f'_i$ with edges $(t_i, t'_i)$, $(f_i, f'_i)$, and $(t_i, f_i)$. For each clause position $(j, k)$, create a signal pair $s_(j,k), s'_(j,k)$ with edge $(s_(j,k), s'_(j,k))$. For each clause $C_j$, create vertices $w_(j,0), w_(j,1), w_(j,2), w_(j,3)$ forming a $K_4$, and add connection edges $(s_(j,k), w_(j,k))$ for $k in {0,1,2}$. + + For each variable, chain its positive occurrences starting from $t_i$ and its negative occurrences starting from $f_i$. If $(j, k)$ is the next occurrence in the chosen sign-order and $"src"$ is the current chain source, create fresh vertices $mu, mu'$ with edges $(mu, mu')$, $("src", mu)$, and $(s_(j,k), mu)$, then update $"src" := s_(j,k)$. Output the Partition Into Perfect Matchings instance $(G, 2)$. + + _Correctness._ ($arrow.r.double$) Let $alpha$ be a NAE-satisfying assignment. Put $t_i, t'_i$ in group 0 and $f_i, f'_i$ in group 1 when $alpha(x_i) = 1$; swap the two groups when $alpha(x_i) = 0$. Every equality-chain pair forces its signal vertex to share the group of the current chain source, so positive occurrences inherit the group of $t_i$ and negative occurrences inherit the group of $f_i$. In each normalized clause, the three signals are not all equal because $alpha$ satisfies the NAE condition. Assign $w_(j,k)$ to the opposite group from $s_(j,k)$ for $k = 0, 1, 2$, and assign $w_(j,3)$ to the minority group among $w_(j,0), w_(j,1), w_(j,2)$. Then every variable gadget, signal pair, and equality-chain pair contributes exactly one same-group edge, and each $K_4$ splits $2 + 2$, so every vertex has exactly one same-group neighbor. + + ($arrow.l.double$) Suppose $(G, 2)$ admits a partition into two perfect matchings. In each variable gadget, the edges $(t_i, t'_i)$ and $(f_i, f'_i)$ force those pairs to share a group, while the edge $(t_i, f_i)$ forces $t_i$ and $f_i$ to lie in opposite groups. Each equality-chain pair forces its signal vertex to share the group of the chain source, so positive signals copy $t_i$ and negative signals copy $f_i$. In a clause gadget, each signal vertex is opposite its corresponding $w_(j,k)$, and the $K_4$ must split $2 + 2$; therefore $w_(j,0), w_(j,1), w_(j,2)$ cannot all share one group, so neither can the three signal vertices. Defining $alpha(x_i) = 1$ iff $t_i$ lies in group 0 makes every normalized clause NAE-satisfied, hence every original clause is NAE-satisfied as well. + + _Solution extraction._ Read the variable gadgets: set $alpha(x_i) = 1$ iff $t_i$ lies in group 0. +] + // 7. ExactCoverBy3Sets → SubsetProduct (#388) #let x3c_sp = load-example("ExactCoverBy3Sets", "SubsetProduct") #let x3c_sp_sol = x3c_sp.solutions.at(0) @@ -14157,7 +14626,7 @@ The following table shows concrete variable overhead for example instances, take _Solution extraction._ Select subset $j$ iff $y_j != 0$. ] -// 10. KSatisfiability → SimultaneousIncongruences (#554) +// 11. KSatisfiability → SimultaneousIncongruences (#554) #let ksat_si = load-example("KSatisfiability", "SimultaneousIncongruences") #let ksat_si_sol = ksat_si.solutions.at(0) #reduction-rule("KSatisfiability", "SimultaneousIncongruences", @@ -14197,7 +14666,7 @@ The following table shows concrete variable overhead for example instances, take _Solution extraction._ Set $tau(x_i) = "TRUE"$ if $x mod p_i = 1$, FALSE if $x mod p_i = 2$. ] -// 11. Partition → SequencingToMinimizeTardyTaskWeight (#471) +// 12. Partition → SequencingToMinimizeTardyTaskWeight (#471) #let part_stw = load-example("Partition", "SequencingToMinimizeTardyTaskWeight") #let part_stw_sol = part_stw.solutions.at(0) #reduction-rule("Partition", "SequencingToMinimizeTardyTaskWeight", @@ -14347,6 +14816,121 @@ The following table shows concrete variable overhead for example instances, take _Solution extraction._ Read the positive literal vertices: $x_i = 1$ iff vertex $2i$ lies on side 1 of the cut. ] +#let tdm_tp = load-example("ThreeDimensionalMatching", "ThreePartition") +#let tdm_tp_sol = tdm_tp.solutions.at(0) +#reduction-rule("ThreeDimensionalMatching", "ThreePartition", + example: true, + example-caption: [$q = #tdm_tp.source.instance.universe_size$, $t = #tdm_tp.source.instance.triples.len()$, target has #tdm_tp.target.instance.sizes.len() numbers], + extra: [ + #pred-commands( + "pred create --example " + problem-spec(tdm_tp.source) + " -o three-dimensional-matching.json", + "pred reduce three-dimensional-matching.json --to " + target-spec(tdm_tp) + " -o bundle.json", + "pred solve bundle.json", + "pred evaluate three-dimensional-matching.json --config " + tdm_tp_sol.source_config.map(str).join(","), + ) + + #{ + let q = tdm_tp.source.instance.universe_size + let t = tdm_tp.source.instance.triples.len() + let r = 32 * q + let r4 = calc.pow(r, 4) + let T1 = 40 * r4 + let a0 = 16 * (10 * r4) + 1 + let b0 = 16 * (10 * r4) + 2 + let c0 = 16 * (10 * r4) + 4 + let d0 = 16 * (10 * r4) + 8 + let T2 = 16 * T1 + 15 + let B = 64 * T2 + 4 + let target = tdm_tp.target.instance + + [ + *Step 1 -- Source instance.* The canonical source has $q = #q$ and a single triple $M = {(0, 0, 0)}$, so the witness $(#tdm_tp_sol.source_config.map(str).join(", "))$ selects the only available triple and is therefore a perfect 3-dimensional matching #sym.checkmark. + + *Step 2 -- Encode the ABCD and tagged 4-partition numbers.* Here $r = 32 q = #r$ and $T_1 = 40 r^4 = #T1$. Because every coordinate is 0 and occurs once, the ABCD numbers are all $10 r^4 = #(10 * r4)$. After the mod-16 tags, the 4-partition numbers become $(#a0, #b0, #c0, #d0)$ and sum to $T_2 = 16 T_1 + 15 = #T2$. + + *Step 3 -- Build the 3-partition gadget.* From the 4 tagged numbers the construction creates #target.sizes.len() target numbers: 4 regular numbers, 12 pairing numbers, and 5 fillers. The target bound is $B = 64 T_2 + 4 = #B$, matching the exported instance's bound $#target.bound$. The canonical target witness is $(#tdm_tp_sol.target_config.map(str).join(", "))$: groups 0 and 1 are the non-filler triples, and the remaining 5 groups each contain one filler together with one unused pairing pair. + + *Step 4 -- Verify the witness.* The target configuration partitions all #target.sizes.len() numbers into $#(target.sizes.len() / 3)$ triples summing to $B = #B$ #sym.checkmark. Reversing the gadget recovers the unique tagged 4-set, whose $B$, $C$, and $D$ members are all first occurrences, so the extracted source witness is again $(#tdm_tp_sol.source_config.map(str).join(", "))$ #sym.checkmark. + + *Multiplicity:* The fixture stores one canonical witness. In this $q = 1$ instance there is only one source matching, but the target still admits multiple equivalent 3-partition witnesses because any choice of one pairing gadget to split the unique 4-set yields a valid solution. + ] + } + ], +)[ + This $O(t^2)$ reduction @garey1979 first checks whether every coordinate of $W$, $X$, and $Y$ appears in some triple; uncovered coordinates yield a fixed infeasible 3-Partition instance. Otherwise it composes the classical 3DM $arrow.r$ ABCD-Partition, ABCD-Partition $arrow.r$ 4-Partition, and 4-Partition $arrow.r$ 3-Partition constructions, producing $24 t^2 - 3 t$ integers arranged into $8 t^2 - t$ triples. +][ + _Construction._ Let the source instance have universe size $q$ and triples $m_l = (w_(a_l), x_(b_l), y_(c_l))$ for $l = 0, dots, t - 1$. If some coordinate of $W union X union Y$ is absent from all triples, the source instance is trivially NO, so the implementation returns a fixed infeasible 3-Partition instance with sizes $(6, 6, 6, 6, 7, 9)$ and bound $20$. + + Otherwise set $r = 32 q$ and $T_1 = 40 r^4$. For each triple create + $ u_l = 10 r^4 - c_l r^3 - b_l r^2 - a_l r $, + one $B$-item $10 r^4 + a_l r$ on the first occurrence of coordinate $a_l$ and $11 r^4 + a_l r$ on later occurrences, one $C$-item $10 r^4 + b_l r^2$ on the first occurrence of $b_l$ and $11 r^4 + b_l r^2$ later, and one $D$-item $10 r^4 + c_l r^3$ on the first occurrence of $c_l$ and $8 r^4 + c_l r^3$ later. This is the ABCD-Partition instance. + + Tag the four classes modulo 16: + $ a'_i = 16 a_i + 1, b'_i = 16 b_i + 2, c'_i = 16 c_i + 4, d'_i = 16 d_i + 8 $ + with target $T_2 = 16 T_1 + 15$. Enumerate the resulting $4 t$ tagged numbers as $a_0, dots, a_(4 t - 1)$. + + For every tagged number create one regular element $w_i = 4(5 T_2 + a_i) + 1$. For every unordered pair $i < j$, create pairing elements + $ u_(i j) = 4(6 T_2 - a_i - a_j) + 2 $ + and + $ u'_(i j) = 4(5 T_2 + a_i + a_j) + 2 $. + Finally add $8 t^2 - 3 t$ filler elements of size $20 T_2$ and set the 3-Partition bound to $B = 64 T_2 + 4$. + + _Correctness._ The fixed preprocessing case is immediate: an uncovered coordinate makes the 3DM instance infeasible, and $(6, 6, 6, 6, 7, 9; 20)$ is a valid infeasible 3-Partition instance. + + Assume now that every coordinate appears at least once. + + ($arrow.r.double$) Given a perfect matching $M'$, form one ABCD group for every source triple. If $m_l in M'$, combine $u_l$ with the unique first-occurrence $B$, $C$, and $D$ items of coordinates $(a_l, b_l, c_l)$; otherwise combine $u_l$ with the corresponding later-occurrence dummy items. Because $r = 32 q$ prevents carries between the $r$, $r^2$, $r^3$, and $r^4$ digits, every such group sums to $T_1$, so the tagged instance has a 4-partition. For each tagged 4-set choose any two members $a_i, a_j$ and let the other two be $a_k, a_l$. Then ${w_i, w_j, u_(i j)}$ and ${w_k, w_l, u'_(i j)}$ both sum to $B$. Every pairing gadget not used this way joins one filler in a triple ${u_(i j), u'_(i j), 20 T_2}$. Hence the produced 3-Partition instance is feasible. + + ($arrow.l.double$) In any feasible target solution every number lies strictly between $B / 4$ and $B / 2$, so the partition really is into triples. Modulo 4, regular numbers are congruent to 1, pairing numbers to 2, and fillers to 0. Therefore every triple is either of type $(1, 1, 2)$ or $(0, 2, 2)$. The $(0, 2, 2)$ triples identify the unused pairing gadgets, leaving a family of $(1, 1, 2)$ triples that reconstructs a 4-partition of the tagged numbers. Since $1 + 2 + 4 + 8 equiv 15 mod 16$, every recovered tagged 4-set contains exactly one former $A$-, $B$-, $C$-, and $D$-item. The carry-free base-$r$ encoding then forces each ABCD group to be either a real group (all first occurrences) or a dummy group (all later occurrences). The real groups pick exactly $q$ source triples, one for each coordinate of $W$, $X$, and $Y$, so they form a perfect 3-dimensional matching. + + _Solution extraction._ Reverse the 4-Partition $arrow.r$ 3-Partition gadget by pairing each triple containing some $u_(i j)$ with the unique triple containing the matching $u'_(i j)$. This recovers the tagged 4-set. Undo the mod-16 tags to obtain one ABCD group, discard every dummy group whose $B$, $C$, and $D$ items are not first occurrences, and read the selected source triple from the surviving $A$-item. +] + +#let tdm_ilp = load-example("ThreeDimensionalMatching", "ILP") +#let tdm_ilp_sol = tdm_ilp.solutions.at(0) +#reduction-rule("ThreeDimensionalMatching", "ILP", + example: true, + example-caption: [$q = #tdm_ilp.source.instance.universe_size$, $t = #tdm_ilp.source.instance.triples.len()$ triples $arrow.r$ ILP with #tdm_ilp.target.instance.num_vars variables and #tdm_ilp.target.instance.constraints.len() constraints], + extra: [ + #pred-commands( + "pred create --example " + problem-spec(tdm_ilp.source) + " -o three-dimensional-matching.json", + "pred reduce three-dimensional-matching.json --to " + target-spec(tdm_ilp) + " -o bundle.json", + "pred solve bundle.json", + "pred evaluate three-dimensional-matching.json --config " + tdm_ilp_sol.source_config.map(str).join(","), + ) + + #{ + let q = tdm_ilp.source.instance.universe_size + let t = tdm_ilp.source.instance.triples.len() + let triples = tdm_ilp.source.instance.triples + [ + *Step 1 -- Source instance.* The canonical source has universe size $q = #q$ and $t = #t$ triples: #triples.map(tr => "(" + tr.map(str).join(", ") + ")").join(", "). The stored witness $(#tdm_ilp_sol.source_config.map(str).join(", "))$ selects triples #tdm_ilp_sol.source_config.enumerate().filter(((i, x)) => x == 1).map(((i, x)) => str(i)).join(", "), which cover every element of $W$, $X$, and $Y$ exactly once #sym.checkmark. + + *Step 2 -- Build the ILP.* One binary variable $x_j$ per triple ($j = 0, dots, #(t - 1)$). For each of the $3q = #(3 * q)$ elements across the three sets $W$, $X$, $Y$, add an equality constraint requiring that the sum of $x_j$ over all triples containing that element equals 1. This yields #tdm_ilp.target.instance.constraints.len() constraints and #tdm_ilp.target.instance.num_vars variables. + + *Step 3 -- Verify a solution.* The target configuration $(#tdm_ilp_sol.target_config.map(str).join(", "))$ is identical to the source configuration because the mapping is one variable per triple with identity extraction. Each element-coverage constraint sums to exactly 1 #sym.checkmark. + + *Multiplicity:* The fixture stores one canonical witness. + ] + } + ], +)[ + Direct ILP formulation: one binary variable per triple, with $3q$ equality constraints ensuring each element of $W$, $X$, and $Y$ is covered exactly once. The ILP has $t$ variables and $3q$ constraints. +][ + _Construction._ Given a 3DM instance with universe size $q$ and triples $m_0, dots, m_(t-1)$ where $m_l = (w_(a_l), x_(b_l), y_(c_l))$. Create binary variables $x_0, dots, x_(t-1)$. For each element $e in {0, dots, q - 1}$ in $W$, add constraint $sum_(l : a_l = e) x_l = 1$. Similarly for each element in $X$ and $Y$. The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & sum_(l : a_l = e) x_l = 1 quad forall e in {0, dots, q - 1} quad "(W-coverage)" \ + & sum_(l : b_l = e) x_l = 1 quad forall e in {0, dots, q - 1} quad "(X-coverage)" \ + & sum_(l : c_l = e) x_l = 1 quad forall e in {0, dots, q - 1} quad "(Y-coverage)" \ + & x_l in {0, 1} quad forall l in {0, dots, t - 1} + $. + + _Correctness._ ($arrow.r.double$) A perfect 3-dimensional matching selects $q$ triples covering every element exactly once. Setting $x_l = 1$ for selected triples satisfies all $3q$ equality constraints. ($arrow.l.double$) Any binary feasible solution selects a set of triples in which every element of $W$, $X$, and $Y$ appears exactly once, which is a perfect 3-dimensional matching. + + _Solution extraction._ Return the ILP solution vector directly: $x_l = 1$ iff triple $m_l$ is selected. +] + #let tp_rcs = load-example("ThreePartition", "ResourceConstrainedScheduling") #let tp_rcs_sol = tp_rcs.solutions.at(0) #reduction-rule("ThreePartition", "ResourceConstrainedScheduling", diff --git a/docs/paper/references.bib b/docs/paper/references.bib index 1fe47dd0b..87380e545 100644 --- a/docs/paper/references.bib +++ b/docs/paper/references.bib @@ -242,6 +242,17 @@ @book{garey1979 year = {1979} } +@article{galilMegiddo1977, + author = {Zvi Galil and Nimrod Megiddo}, + title = {Cyclic Ordering is NP-Complete}, + journal = {Theoretical Computer Science}, + volume = {5}, + number = {2}, + pages = {179--182}, + year = {1977}, + doi = {10.1016/0304-3975(77)90005-1} +} + @article{florianLenstraRinnooyKan1980, author = {M. Florian and J. K. Lenstra and A. H. G. Rinnooy Kan}, title = {Deterministic Production Planning: Algorithms and Complexity}, diff --git a/problemreductions-cli/src/cli.rs b/problemreductions-cli/src/cli.rs index 542f7f465..0c5aa3674 100644 --- a/problemreductions-cli/src/cli.rs +++ b/problemreductions-cli/src/cli.rs @@ -843,13 +843,13 @@ pub struct CreateArgs { pub assignment: Option, /// Coefficient/parameter a for QuadraticCongruences (residue target) or QuadraticDiophantineEquations (coefficient of x²) #[arg(long)] - pub coeff_a: Option, + pub coeff_a: Option, /// Coefficient/parameter b for QuadraticCongruences (modulus) or QuadraticDiophantineEquations (coefficient of y) #[arg(long)] - pub coeff_b: Option, + pub coeff_b: Option, /// Constant c for QuadraticCongruences (search-space bound) or QuadraticDiophantineEquations (right-hand side of ax² + by = c) #[arg(long)] - pub coeff_c: Option, + pub coeff_c: Option, /// Incongruence pairs for SimultaneousIncongruences (semicolon-separated "a,b" pairs, e.g., "2,2;1,3;2,5;3,7") #[arg(long)] pub pairs: Option, @@ -1095,9 +1095,9 @@ impl CreateArgs { insert!("expression", self.expression.as_deref()); insert!("equations", self.equations.as_deref()); insert!("assignment", self.assignment.as_deref()); - insert!("coeff-a", self.coeff_a); - insert!("coeff-b", self.coeff_b); - insert!("coeff-c", self.coeff_c); + insert!("coeff-a", self.coeff_a.as_deref()); + insert!("coeff-b", self.coeff_b.as_deref()); + insert!("coeff-c", self.coeff_c.as_deref()); insert!("pairs", self.pairs.as_deref()); insert!("w-sizes", self.w_sizes.as_deref()); insert!("x-sizes", self.x_sizes.as_deref()); diff --git a/src/models/algebraic/quadratic_congruences.rs b/src/models/algebraic/quadratic_congruences.rs index efc69aabf..12ba2fc3c 100644 --- a/src/models/algebraic/quadratic_congruences.rs +++ b/src/models/algebraic/quadratic_congruences.rs @@ -1,11 +1,18 @@ //! Quadratic Congruences problem implementation. //! -//! Given non-negative integers a, b, c with b > 0 and a < b, determine whether -//! there exists a positive integer x with 1 ≤ x < c such that x² ≡ a (mod b). +//! Given non-negative integers `a`, `b`, and `c` with `b > 0` and `a < b`, +//! determine whether there exists a positive integer `x < c` such that +//! `x² ≡ a (mod b)`. +//! +//! The witness integer `x` is encoded as a little-endian binary vector so the +//! model can represent arbitrarily large instances while still fitting the +//! crate's `Vec` configuration interface. use crate::registry::{FieldInfo, ProblemSchemaEntry, ProblemSizeFieldEntry}; use crate::traits::Problem; use crate::types::Or; +use num_bigint::{BigUint, ToBigUint}; +use num_traits::{One, Zero}; use serde::de::Error as _; use serde::{Deserialize, Deserializer, Serialize}; @@ -18,9 +25,9 @@ inventory::submit! { module_path: module_path!(), description: "Decide whether x² ≡ a (mod b) has a solution for x in {1, ..., c-1}", fields: &[ - FieldInfo { name: "a", type_name: "u64", description: "a" }, - FieldInfo { name: "b", type_name: "u64", description: "b" }, - FieldInfo { name: "c", type_name: "u64", description: "c" }, + FieldInfo { name: "a", type_name: "BigUint", description: "a" }, + FieldInfo { name: "b", type_name: "BigUint", description: "b" }, + FieldInfo { name: "c", type_name: "BigUint", description: "c" }, ], } } @@ -28,46 +35,47 @@ inventory::submit! { inventory::submit! { ProblemSizeFieldEntry { name: "QuadraticCongruences", - fields: &["c"], + fields: &["bit_length_a", "bit_length_b", "bit_length_c"], } } /// Quadratic Congruences problem. /// -/// Given non-negative integers a, b, c with b > 0 and a < b, determine whether -/// there exists a positive integer x with 1 ≤ x < c such that x² ≡ a (mod b). -/// -/// The search space is x ∈ {1, …, c−1}. The configuration variable `config[0]` -/// encodes x as `x = config[0] + 1`. -/// -/// # Example +/// Given non-negative integers `a`, `b`, `c` with `b > 0` and `a < b`, +/// determine whether there exists a positive integer `x < c` such that +/// `x² ≡ a (mod b)`. /// -/// ``` -/// use problemreductions::models::algebraic::QuadraticCongruences; -/// use problemreductions::{Problem, Solver, BruteForce}; -/// -/// // a=4, b=15, c=10: x=2 → 4 mod 15 = 4 ✓ -/// let problem = QuadraticCongruences::new(4, 15, 10); -/// let solver = BruteForce::new(); -/// let witness = solver.find_witness(&problem); -/// assert!(witness.is_some()); -/// ``` +/// The configuration encodes `x` in little-endian binary: +/// `config[i] ∈ {0,1}` is the coefficient of `2^i`. #[derive(Debug, Clone, Serialize)] pub struct QuadraticCongruences { /// Quadratic residue target. - a: u64, + #[serde(with = "crate::models::misc::biguint_serde::decimal_biguint")] + a: BigUint, /// Modulus. - b: u64, - /// Search-space bound; x ranges over {1, ..., c-1}. - c: u64, + #[serde(with = "crate::models::misc::biguint_serde::decimal_biguint")] + b: BigUint, + /// Search-space bound; feasible witnesses satisfy `1 <= x < c`. + #[serde(with = "crate::models::misc::biguint_serde::decimal_biguint")] + c: BigUint, +} + +fn bit_length(value: &BigUint) -> usize { + if value.is_zero() { + 0 + } else { + let bytes = value.to_bytes_be(); + let msb = *bytes.first().expect("nonzero BigUint has bytes"); + 8 * (bytes.len() - 1) + (8 - msb.leading_zeros() as usize) + } } impl QuadraticCongruences { - fn validate_inputs(a: u64, b: u64, c: u64) -> Result<(), String> { - if b == 0 { + fn validate_inputs(a: &BigUint, b: &BigUint, c: &BigUint) -> Result<(), String> { + if b.is_zero() { return Err("Modulus b must be positive".to_string()); } - if c == 0 { + if c.is_zero() { return Err("Bound c must be positive".to_string()); } if a >= b { @@ -78,8 +86,22 @@ impl QuadraticCongruences { /// Create a new QuadraticCongruences instance, returning an error instead of /// panicking when the inputs are invalid. - pub fn try_new(a: u64, b: u64, c: u64) -> Result { - Self::validate_inputs(a, b, c)?; + pub fn try_new(a: A, b: B, c: C) -> Result + where + A: ToBigUint, + B: ToBigUint, + C: ToBigUint, + { + let a = a + .to_biguint() + .ok_or_else(|| "Residue a must be nonnegative".to_string())?; + let b = b + .to_biguint() + .ok_or_else(|| "Modulus b must be nonnegative".to_string())?; + let c = c + .to_biguint() + .ok_or_else(|| "Bound c must be nonnegative".to_string())?; + Self::validate_inputs(&a, &b, &c)?; Ok(Self { a, b, c }) } @@ -88,31 +110,105 @@ impl QuadraticCongruences { /// # Panics /// /// Panics if `b == 0`, `c == 0`, or `a >= b`. - pub fn new(a: u64, b: u64, c: u64) -> Self { + pub fn new(a: A, b: B, c: C) -> Self + where + A: ToBigUint, + B: ToBigUint, + C: ToBigUint, + { Self::try_new(a, b, c).unwrap_or_else(|msg| panic!("{msg}")) } - /// Get the quadratic residue target a. - pub fn a(&self) -> u64 { - self.a + /// Get the quadratic residue target `a`. + pub fn a(&self) -> &BigUint { + &self.a + } + + /// Get the modulus `b`. + pub fn b(&self) -> &BigUint { + &self.b + } + + /// Get the search-space bound `c`. + pub fn c(&self) -> &BigUint { + &self.c } - /// Get the modulus b. - pub fn b(&self) -> u64 { - self.b + /// Number of bits needed to encode the residue target. + pub fn bit_length_a(&self) -> usize { + bit_length(&self.a) } - /// Get the search-space bound c. - pub fn c(&self) -> u64 { - self.c + /// Number of bits needed to encode the modulus. + pub fn bit_length_b(&self) -> usize { + bit_length(&self.b) + } + + /// Number of bits needed to encode the search bound. + pub fn bit_length_c(&self) -> usize { + bit_length(&self.c) + } + + fn witness_bit_length(&self) -> usize { + if self.c <= BigUint::one() { + 0 + } else { + bit_length(&(&self.c - BigUint::one())) + } + } + + /// Encode a witness integer `x` as a little-endian binary configuration. + pub fn encode_witness(&self, x: &BigUint) -> Option> { + if x.is_zero() || x >= &self.c { + return None; + } + + let num_bits = self.witness_bit_length(); + let mut remaining = x.clone(); + let mut config = Vec::with_capacity(num_bits); + + for _ in 0..num_bits { + config.push(if (&remaining & BigUint::one()).is_zero() { + 0 + } else { + 1 + }); + remaining >>= 1usize; + } + + if remaining.is_zero() { + Some(config) + } else { + None + } + } + + /// Decode a little-endian binary configuration into its witness integer `x`. + pub fn decode_witness(&self, config: &[usize]) -> Option { + if config.len() != self.witness_bit_length() || config.iter().any(|&digit| digit > 1) { + return None; + } + + let mut value = BigUint::zero(); + let mut weight = BigUint::one(); + for &digit in config { + if digit == 1 { + value += &weight; + } + weight <<= 1usize; + } + Some(value) } } #[derive(Deserialize)] struct QuadraticCongruencesData { - a: u64, - b: u64, - c: u64, + #[serde(with = "crate::models::misc::biguint_serde::decimal_biguint")] + a: BigUint, + #[serde(with = "crate::models::misc::biguint_serde::decimal_biguint")] + b: BigUint, + #[serde(with = "crate::models::misc::biguint_serde::decimal_biguint")] + c: BigUint, } impl<'de> Deserialize<'de> for QuadraticCongruences { @@ -134,38 +230,43 @@ impl Problem for QuadraticCongruences { } fn dims(&self) -> Vec { - if self.c <= 1 { - // No x in {1, ..., c-1} exists. - return vec![]; + let num_bits = self.witness_bit_length(); + if num_bits == 0 { + Vec::new() + } else { + vec![2; num_bits] } - // config[0] ∈ {0, ..., c-2} maps to x = config[0] + 1 ∈ {1, ..., c-1}. - vec![self.c as usize - 1] } fn evaluate(&self, config: &[usize]) -> Or { - if self.c <= 1 { + let Some(x) = self.decode_witness(config) else { return Or(false); - } - if config.len() != 1 { + }; + + if x.is_zero() || x >= *self.c() { return Or(false); } - let x = (config[0] as u64) + 1; // 1-indexed - let satisfies = ((x as u128) * (x as u128)) % (self.b as u128) == (self.a as u128); + + let satisfies = (&x * &x) % self.b() == self.a().clone(); Or(satisfies) } } crate::declare_variants! { - default QuadraticCongruences => "c", + default QuadraticCongruences => "2^bit_length_c", } #[cfg(feature = "example-db")] pub(crate) fn canonical_model_example_specs() -> Vec { + let instance = QuadraticCongruences::new(4u32, 15u32, 10u32); + let optimal_config = instance + .encode_witness(&BigUint::from(2u32)) + .expect("x=2 should be a valid canonical witness"); + vec![crate::example_db::specs::ModelExampleSpec { id: "quadratic_congruences", - instance: Box::new(QuadraticCongruences::new(4, 15, 10)), - // x=2 (config[0]=1): 2²=4 ≡ 4 (mod 15) ✓ - optimal_config: vec![1], + instance: Box::new(instance), + optimal_config, optimal_value: serde_json::json!(true), }] } diff --git a/src/models/set/set_splitting.rs b/src/models/set/set_splitting.rs index 5da6b3232..e63053aa7 100644 --- a/src/models/set/set_splitting.rs +++ b/src/models/set/set_splitting.rs @@ -57,6 +57,35 @@ pub struct SetSplitting { subsets: Vec>, } +fn normalize_subsets(universe_size: usize, subsets: &[Vec]) -> (usize, Vec>) { + let mut next_element = universe_size; + let total_excess: usize = subsets + .iter() + .map(|subset| subset.len().saturating_sub(3)) + .sum(); + let mut normalized = Vec::with_capacity(subsets.len() + 2 * total_excess); + + for subset in subsets { + let mut remainder = subset.clone(); + while remainder.len() > 3 { + let positive_aux = next_element; + let negative_aux = next_element + 1; + next_element += 2; + + normalized.push(vec![remainder[0], remainder[1], positive_aux]); + normalized.push(vec![positive_aux, negative_aux]); + + let mut next_remainder = Vec::with_capacity(remainder.len() - 1); + next_remainder.push(negative_aux); + next_remainder.extend_from_slice(&remainder[2..]); + remainder = next_remainder; + } + normalized.push(remainder); + } + + (next_element, normalized) +} + impl SetSplitting { /// Create a new Set Splitting problem. /// @@ -108,6 +137,32 @@ impl SetSplitting { &self.subsets } + pub(crate) fn normalized_instance(&self) -> (usize, Vec>) { + normalize_subsets(self.universe_size, &self.subsets) + } + + fn normalized_stats(&self) -> (usize, usize, usize) { + let (universe_size, subsets) = self.normalized_instance(); + let size2 = subsets.iter().filter(|s| s.len() == 2).count(); + let size3 = subsets.iter().filter(|s| s.len() == 3).count(); + (universe_size, size2, size3) + } + + /// Universe size after decomposing all subsets to size 2 or 3. + pub fn normalized_universe_size(&self) -> usize { + self.normalized_stats().0 + } + + /// Number of size-2 subsets after decomposition. + pub fn normalized_num_size2_subsets(&self) -> usize { + self.normalized_stats().1 + } + + /// Number of size-3 subsets after decomposition. + pub fn normalized_num_size3_subsets(&self) -> usize { + self.normalized_stats().2 + } + /// Check if a coloring (config) splits all subsets. pub fn is_valid_solution(&self, config: &[usize]) -> bool { self.evaluate(config).0 diff --git a/src/rules/ksatisfiability_acyclicpartition.rs b/src/rules/ksatisfiability_acyclicpartition.rs new file mode 100644 index 000000000..6e747aa60 --- /dev/null +++ b/src/rules/ksatisfiability_acyclicpartition.rs @@ -0,0 +1,198 @@ +//! Reduction from KSatisfiability (3-SAT) to AcyclicPartition. +//! +//! The repository's `AcyclicPartition` model is the weighted quotient-DAG +//! partition problem, not the "partition vertices into two induced acyclic +//! subgraphs" variant described in issue #247. The implemented rule therefore +//! uses a witness-preserving composition: +//! 3-SAT -> SubsetSum -> Partition -> AcyclicPartition. + +use crate::models::formula::KSatisfiability; +use crate::models::graph::AcyclicPartition; +use crate::models::misc::{Partition, SubsetSum}; +use crate::reduction; +use crate::rules::ksatisfiability_subsetsum::Reduction3SATToSubsetSum; +use crate::rules::subsetsum_partition::ReductionSubsetSumToPartition; +use crate::rules::traits::{ReduceTo, ReductionResult}; +use crate::topology::DirectedGraph; +use crate::variant::K3; + +#[derive(Debug, Clone)] +struct ReductionPartitionToAcyclicPartition { + target: AcyclicPartition, + source_num_elements: usize, + source_vertex: usize, + sink_vertex: usize, +} + +impl ReductionPartitionToAcyclicPartition { + fn new(source: &Partition) -> Self { + let num_elements = source.num_elements(); + let source_vertex = num_elements; + let sink_vertex = num_elements + 1; + let total_sum = source.total_sum(); + + let arcs: Vec<(usize, usize)> = (0..num_elements) + .flat_map(|item| [(source_vertex, item), (item, sink_vertex)]) + .collect(); + + let mut vertex_weights: Vec = source + .sizes() + .iter() + .copied() + .map(|size| { + let doubled = size + .checked_mul(2) + .expect("Partition -> AcyclicPartition item weight overflow"); + u64_to_i32( + doubled, + "Partition -> AcyclicPartition requires doubled sizes to fit in i32", + ) + }) + .collect(); + + let endpoint_weight = total_sum + .checked_add(1) + .expect("Partition -> AcyclicPartition endpoint weight overflow"); + let even_prefix = total_sum - (total_sum % 2); + let weight_bound = endpoint_weight + .checked_add(even_prefix) + .expect("Partition -> AcyclicPartition weight bound overflow"); + + vertex_weights.push(u64_to_i32( + endpoint_weight, + "Partition -> AcyclicPartition requires endpoint weight to fit in i32", + )); + vertex_weights.push(u64_to_i32( + endpoint_weight, + "Partition -> AcyclicPartition requires endpoint weight to fit in i32", + )); + + let arc_costs = vec![1; arcs.len()]; + let target = AcyclicPartition::new( + DirectedGraph::new(num_elements + 2, arcs), + vertex_weights, + arc_costs, + u64_to_i32( + weight_bound, + "Partition -> AcyclicPartition requires weight bound to fit in i32", + ), + usize_to_i32( + num_elements, + "Partition -> AcyclicPartition requires num_elements to fit in i32", + ), + ); + + Self { + target, + source_num_elements: num_elements, + source_vertex, + sink_vertex, + } + } +} + +impl ReductionResult for ReductionPartitionToAcyclicPartition { + type Source = Partition; + type Target = AcyclicPartition; + + fn target_problem(&self) -> &Self::Target { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + if target_solution.len() != self.source_num_elements + 2 { + return vec![0; self.source_num_elements]; + } + + let source_label = target_solution[self.source_vertex]; + let sink_label = target_solution[self.sink_vertex]; + debug_assert_ne!( + source_label, sink_label, + "valid target witnesses must place source and sink in different blocks" + ); + + (0..self.source_num_elements) + .map(|item| usize::from(target_solution[item] == sink_label)) + .collect() + } +} + +/// Result of reducing KSatisfiability to AcyclicPartition. +#[derive(Debug, Clone)] +pub struct Reduction3SATToAcyclicPartition { + sat_to_subset: Reduction3SATToSubsetSum, + subset_to_partition: ReductionSubsetSumToPartition, + partition_to_acyclic: ReductionPartitionToAcyclicPartition, +} + +impl ReductionResult for Reduction3SATToAcyclicPartition { + type Source = KSatisfiability; + type Target = AcyclicPartition; + + fn target_problem(&self) -> &Self::Target { + self.partition_to_acyclic.target_problem() + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + let partition_solution = self.partition_to_acyclic.extract_solution(target_solution); + let subset_solution = self + .subset_to_partition + .extract_solution(&partition_solution); + self.sat_to_subset.extract_solution(&subset_solution) + } +} + +fn u64_to_i32(value: u64, context: &str) -> i32 { + i32::try_from(value).expect(context) +} + +fn usize_to_i32(value: usize, context: &str) -> i32 { + i32::try_from(value).expect(context) +} + +#[reduction( + overhead = { + num_vertices = "2 * num_vars + 2 * num_clauses + 3", + num_arcs = "4 * num_vars + 4 * num_clauses + 2", + } +)] +impl ReduceTo> for KSatisfiability { + type Result = Reduction3SATToAcyclicPartition; + + fn reduce_to(&self) -> Self::Result { + let sat_to_subset = as ReduceTo>::reduce_to(self); + let subset_to_partition = + >::reduce_to(sat_to_subset.target_problem()); + let partition_to_acyclic = + ReductionPartitionToAcyclicPartition::new(subset_to_partition.target_problem()); + + Reduction3SATToAcyclicPartition { + sat_to_subset, + subset_to_partition, + partition_to_acyclic, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + use crate::models::formula::CNFClause; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "ksatisfiability_to_acyclicpartition", + build: || { + crate::example_db::specs::rule_example_with_witness::<_, AcyclicPartition>( + KSatisfiability::::new(1, vec![CNFClause::new(vec![1, 1, 1])]), + SolutionPair { + source_config: vec![1], + target_config: vec![1, 0, 1, 1, 0, 0, 1], + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/ksatisfiability_acyclicpartition.rs"] +mod tests; diff --git a/src/rules/ksatisfiability_cyclicordering.rs b/src/rules/ksatisfiability_cyclicordering.rs new file mode 100644 index 000000000..d56c3b49d --- /dev/null +++ b/src/rules/ksatisfiability_cyclicordering.rs @@ -0,0 +1,135 @@ +//! Reduction from KSatisfiability (3-SAT) to CyclicOrdering. +//! +//! Galil and Megiddo's construction associates each variable with three +//! elements `(alpha_i, beta_i, gamma_i)`. A satisfying assignment is encoded by +//! which of the two cyclic orientations `(alpha_i, beta_i, gamma_i)` and +//! `(alpha_i, gamma_i, beta_i)` is derived by the final cyclic order. Each +//! clause contributes five fresh auxiliary elements and ten cyclic-ordering +//! triples enforcing that at least one literal orientation must be the +//! "true" one. +//! +//! Reference: Galil and Megiddo, "Cyclic ordering is NP-complete", 1977. + +use crate::models::formula::KSatisfiability; +use crate::models::misc::CyclicOrdering; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; +use crate::variant::K3; + +#[derive(Debug, Clone)] +pub struct Reduction3SATToCyclicOrdering { + target: CyclicOrdering, + source_num_vars: usize, +} + +impl ReductionResult for Reduction3SATToCyclicOrdering { + type Source = KSatisfiability; + type Target = CyclicOrdering; + + fn target_problem(&self) -> &Self::Target { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + (0..self.source_num_vars) + .map(|var_idx| { + let (alpha, beta, gamma) = variable_triple(var_idx); + usize::from(!is_cyclic_order( + target_solution[alpha], + target_solution[beta], + target_solution[gamma], + )) + }) + .collect() + } +} + +fn variable_triple(var_idx: usize) -> (usize, usize, usize) { + let base = 3 * var_idx; + (base, base + 1, base + 2) +} + +fn literal_triple(literal: i32) -> (usize, usize, usize) { + let (alpha, beta, gamma) = variable_triple((literal.unsigned_abs() as usize) - 1); + if literal > 0 { + (alpha, beta, gamma) + } else { + (alpha, gamma, beta) + } +} + +#[allow(clippy::nonminimal_bool)] +fn is_cyclic_order(a: usize, b: usize, c: usize) -> bool { + (a < b && b < c) || (b < c && c < a) || (c < a && a < b) +} + +#[reduction( + overhead = { + num_elements = "3 * num_vars + 5 * num_clauses", + num_triples = "10 * num_clauses", + } +)] +impl ReduceTo for KSatisfiability { + type Result = Reduction3SATToCyclicOrdering; + + fn reduce_to(&self) -> Self::Result { + let num_vars = self.num_vars(); + let num_clauses = self.num_clauses(); + let num_elements = 3 * num_vars + 5 * num_clauses; + let mut triples = Vec::with_capacity(10 * num_clauses); + + for (clause_idx, clause) in self.clauses().iter().enumerate() { + let (a, b, c) = literal_triple(clause.literals[0]); + let (d, e, f) = literal_triple(clause.literals[1]); + let (g, h, i) = literal_triple(clause.literals[2]); + + let base = 3 * num_vars + 5 * clause_idx; + let j = base; + let k = base + 1; + let l = base + 2; + let m = base + 3; + let n = base + 4; + + triples.extend([ + (a, c, j), + (b, j, k), + (c, k, l), + (d, f, j), + (e, j, l), + (f, l, m), + (g, i, k), + (h, k, m), + (i, m, n), + (n, m, l), + ]); + } + + Reduction3SATToCyclicOrdering { + target: CyclicOrdering::new(num_elements, triples), + source_num_vars: num_vars, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + use crate::models::formula::CNFClause; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "ksatisfiability_to_cyclicordering", + build: || { + crate::example_db::specs::rule_example_with_witness::<_, CyclicOrdering>( + KSatisfiability::::new(3, vec![CNFClause::new(vec![1, 2, 3])]), + SolutionPair { + source_config: vec![1, 1, 1], + target_config: vec![0, 11, 1, 9, 12, 10, 6, 13, 7, 2, 3, 4, 8, 5], + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/ksatisfiability_cyclicordering.rs"] +mod tests; diff --git a/src/rules/ksatisfiability_oneinthreesatisfiability.rs b/src/rules/ksatisfiability_oneinthreesatisfiability.rs new file mode 100644 index 000000000..7895c8e23 --- /dev/null +++ b/src/rules/ksatisfiability_oneinthreesatisfiability.rs @@ -0,0 +1,92 @@ +//! Reduction from KSatisfiability (3-SAT) to One-In-Three Satisfiability. + +use crate::models::formula::{CNFClause, KSatisfiability, OneInThreeSatisfiability}; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; +use crate::variant::K3; + +#[derive(Debug, Clone)] +pub struct Reduction3SATToOneInThreeSAT { + source_num_vars: usize, + target: OneInThreeSatisfiability, +} + +impl ReductionResult for Reduction3SATToOneInThreeSAT { + type Source = KSatisfiability; + type Target = OneInThreeSatisfiability; + + fn target_problem(&self) -> &Self::Target { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + target_solution[..self.source_num_vars].to_vec() + } +} + +#[reduction(overhead = { + num_vars = "num_vars + 2 + 6 * num_clauses", + num_clauses = "1 + 5 * num_clauses", +})] +impl ReduceTo for KSatisfiability { + type Result = Reduction3SATToOneInThreeSAT; + + fn reduce_to(&self) -> Self::Result { + let source_num_vars = self.num_vars(); + let z_false = source_num_vars as i32 + 1; + let z_true = source_num_vars as i32 + 2; + let mut next_var = source_num_vars as i32 + 3; + + let mut clauses = Vec::with_capacity(1 + 5 * self.num_clauses()); + clauses.push(CNFClause::new(vec![z_false, z_false, z_true])); + + for clause in self.clauses() { + let [l1, l2, l3] = clause.literals.as_slice() else { + unreachable!("K3 clauses must have exactly three literals"); + }; + let a = next_var; + let b = next_var + 1; + let c = next_var + 2; + let d = next_var + 3; + let e = next_var + 4; + let f = next_var + 5; + next_var += 6; + + clauses.push(CNFClause::new(vec![*l1, a, d])); + clauses.push(CNFClause::new(vec![*l2, b, d])); + clauses.push(CNFClause::new(vec![a, b, e])); + clauses.push(CNFClause::new(vec![c, d, f])); + clauses.push(CNFClause::new(vec![*l3, c, z_false])); + } + + let target = OneInThreeSatisfiability::new((next_var - 1) as usize, clauses); + + Reduction3SATToOneInThreeSAT { + source_num_vars, + target, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "ksatisfiability_to_oneinthreesatisfiability", + build: || { + let source = KSatisfiability::::new(3, vec![CNFClause::new(vec![1, 2, 3])]); + crate::example_db::specs::rule_example_with_witness::<_, OneInThreeSatisfiability>( + source, + SolutionPair { + source_config: vec![0, 0, 1], + target_config: vec![0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0], + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/ksatisfiability_oneinthreesatisfiability.rs"] +mod tests; diff --git a/src/rules/ksatisfiability_preemptivescheduling.rs b/src/rules/ksatisfiability_preemptivescheduling.rs new file mode 100644 index 000000000..ec7ec9bc3 --- /dev/null +++ b/src/rules/ksatisfiability_preemptivescheduling.rs @@ -0,0 +1,407 @@ +//! Reduction from KSatisfiability (3-SAT) to PreemptiveScheduling. +//! +//! This follows Ullman's 1975 construction via a unit-task precedence +//! scheduling instance. Since every task has length 1, preemption is inert: +//! the constructed instance is a valid preemptive scheduling problem whose +//! optimal makespan hits the threshold `T = num_vars + 3` iff the 3-SAT +//! instance is satisfiable. +//! +//! Reference: Jeffrey D. Ullman, "NP-complete scheduling problems", JCSS 10, +//! 1975; Garey & Johnson, Appendix A5.2. + +use crate::models::formula::KSatisfiability; +use crate::models::misc::PreemptiveScheduling; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; +use crate::variant::K3; + +#[derive(Debug, Clone)] +struct UllmanConstruction { + time_limit: usize, + num_processors: usize, + positive_chains: Vec>, + #[cfg(any(test, feature = "example-db"))] + negative_chains: Vec>, + #[cfg(any(test, feature = "example-db"))] + positive_forcing: Vec, + #[cfg(any(test, feature = "example-db"))] + negative_forcing: Vec, + #[cfg(any(test, feature = "example-db"))] + clause_jobs: Vec>, + #[cfg(any(test, feature = "example-db"))] + filler_jobs_by_slot: Vec>, + precedences: Vec<(usize, usize)>, + num_jobs: usize, +} + +fn time_limit(num_vars: usize) -> usize { + num_vars + 3 +} + +fn processor_upper_bound(num_vars: usize, num_clauses: usize) -> usize { + (2 * num_vars + 2).max(6 * num_clauses) +} + +fn slot_capacities(num_vars: usize, num_clauses: usize) -> Vec { + let mut capacities = vec![0; time_limit(num_vars)]; + capacities[0] = num_vars; + capacities[1] = 2 * num_vars + 1; + for capacity in capacities.iter_mut().take(num_vars + 1).skip(2) { + *capacity = 2 * num_vars + 2; + } + capacities[num_vars + 1] = num_clauses + num_vars + 1; + capacities[num_vars + 2] = 6 * num_clauses; + capacities +} + +fn literal_endpoint( + literal: i32, + pick_literal: bool, + positive_chains: &[Vec], + negative_chains: &[Vec], + endpoint_step: usize, +) -> usize { + let variable = literal.unsigned_abs() as usize - 1; + match (literal > 0, pick_literal) { + (true, true) | (false, false) => positive_chains[variable][endpoint_step], + (true, false) | (false, true) => negative_chains[variable][endpoint_step], + } +} + +fn build_ullman_construction(source: &KSatisfiability) -> UllmanConstruction { + let num_vars = source.num_vars(); + let num_clauses = source.num_clauses(); + let time_limit = time_limit(num_vars); + let num_processors = processor_upper_bound(num_vars, num_clauses); + let capacities = slot_capacities(num_vars, num_clauses); + + let mut next_job = 0usize; + + let mut positive_chains = vec![vec![0; num_vars + 1]; num_vars]; + let mut negative_chains = vec![vec![0; num_vars + 1]; num_vars]; + for variable in 0..num_vars { + for step in 0..=num_vars { + positive_chains[variable][step] = next_job; + next_job += 1; + negative_chains[variable][step] = next_job; + next_job += 1; + } + } + + let positive_forcing: Vec = (0..num_vars) + .map(|_| { + let job = next_job; + next_job += 1; + job + }) + .collect(); + let negative_forcing: Vec = (0..num_vars) + .map(|_| { + let job = next_job; + next_job += 1; + job + }) + .collect(); + + let mut clause_jobs = vec![vec![0; 7]; num_clauses]; + for jobs in &mut clause_jobs { + for job in jobs { + *job = next_job; + next_job += 1; + } + } + + let mut filler_jobs_by_slot = vec![Vec::new(); time_limit]; + for slot in 0..time_limit { + let filler_count = num_processors - capacities[slot]; + filler_jobs_by_slot[slot] = (0..filler_count) + .map(|_| { + let job = next_job; + next_job += 1; + job + }) + .collect(); + } + + let mut precedences = Vec::new(); + + for variable in 0..num_vars { + for step in 0..num_vars { + precedences.push(( + positive_chains[variable][step], + positive_chains[variable][step + 1], + )); + precedences.push(( + negative_chains[variable][step], + negative_chains[variable][step + 1], + )); + } + precedences.push(( + positive_chains[variable][variable], + positive_forcing[variable], + )); + precedences.push(( + negative_chains[variable][variable], + negative_forcing[variable], + )); + } + + for (clause_index, clause) in source.clauses().iter().enumerate() { + for (pattern_index, &clause_job) in clause_jobs[clause_index].iter().enumerate() { + let pattern = pattern_index + 1; + for position in 0..3 { + let literal = clause.literals[position]; + let bit_is_one = ((pattern >> (2 - position)) & 1) == 1; + precedences.push(( + literal_endpoint( + literal, + bit_is_one, + &positive_chains, + &negative_chains, + num_vars, + ), + clause_job, + )); + } + } + } + + for slot in 0..time_limit.saturating_sub(1) { + for &pred in &filler_jobs_by_slot[slot] { + for &succ in &filler_jobs_by_slot[slot + 1] { + precedences.push((pred, succ)); + } + } + } + + UllmanConstruction { + time_limit, + num_processors, + positive_chains, + #[cfg(any(test, feature = "example-db"))] + negative_chains, + #[cfg(any(test, feature = "example-db"))] + positive_forcing, + #[cfg(any(test, feature = "example-db"))] + negative_forcing, + #[cfg(any(test, feature = "example-db"))] + clause_jobs, + #[cfg(any(test, feature = "example-db"))] + filler_jobs_by_slot, + precedences, + num_jobs: next_job, + } +} + +fn task_slot(config: &[usize], task: usize, d_max: usize) -> Option { + let start = task.checked_mul(d_max)?; + let end = start.checked_add(d_max)?; + let task_slice = config.get(start..end)?; + task_slice.iter().position(|&value| value == 1) +} + +#[cfg(any(test, feature = "example-db"))] +fn set_task_slot(task_slots: &mut [Option], job: usize, slot: usize) { + task_slots[job] = Some(slot); +} + +#[cfg(any(test, feature = "example-db"))] +fn clause_pattern_for_assignment( + clause: &crate::models::formula::CNFClause, + assignment: &[usize], +) -> usize { + let mut pattern = 0usize; + for (position, &literal) in clause.literals.iter().enumerate() { + let variable = literal.unsigned_abs() as usize - 1; + let value = assignment.get(variable).copied().unwrap_or(0) == 1; + let literal_true = if literal > 0 { value } else { !value }; + if literal_true { + pattern |= 1 << (2 - position); + } + } + pattern +} + +#[cfg(any(test, feature = "example-db"))] +fn construct_schedule_from_assignment( + target: &PreemptiveScheduling, + assignment: &[usize], + source: &KSatisfiability, +) -> Option> { + let construction = build_ullman_construction(source); + if assignment.len() != source.num_vars() || target.num_tasks() != construction.num_jobs { + return None; + } + + let mut task_slots = vec![None; construction.num_jobs]; + + for variable in 0..source.num_vars() { + let value_is_true = assignment[variable] == 1; + for step in 0..=source.num_vars() { + if value_is_true { + set_task_slot( + &mut task_slots, + construction.positive_chains[variable][step], + step, + ); + set_task_slot( + &mut task_slots, + construction.negative_chains[variable][step], + step + 1, + ); + } else { + set_task_slot( + &mut task_slots, + construction.negative_chains[variable][step], + step, + ); + set_task_slot( + &mut task_slots, + construction.positive_chains[variable][step], + step + 1, + ); + } + } + + let positive_slot = task_slots[construction.positive_chains[variable][variable]] + .expect("positive chain slot is assigned"); + let negative_slot = task_slots[construction.negative_chains[variable][variable]] + .expect("negative chain slot is assigned"); + set_task_slot( + &mut task_slots, + construction.positive_forcing[variable], + positive_slot + 1, + ); + set_task_slot( + &mut task_slots, + construction.negative_forcing[variable], + negative_slot + 1, + ); + } + + for (clause_index, clause) in source.clauses().iter().enumerate() { + let pattern = clause_pattern_for_assignment(clause, assignment); + if pattern == 0 { + return None; + } + for pattern_index in 0..7 { + let slot = if pattern_index + 1 == pattern { + source.num_vars() + 1 + } else { + source.num_vars() + 2 + }; + set_task_slot( + &mut task_slots, + construction.clause_jobs[clause_index][pattern_index], + slot, + ); + } + } + + for (slot, fillers) in construction.filler_jobs_by_slot.iter().enumerate() { + for &job in fillers { + set_task_slot(&mut task_slots, job, slot); + } + } + + let d_max = target.d_max(); + let mut config = vec![0usize; construction.num_jobs * d_max]; + for (job, slot) in task_slots.into_iter().enumerate() { + let slot = slot?; + config[job * d_max + slot] = 1; + } + Some(config) +} + +/// Result of reducing KSatisfiability to PreemptiveScheduling. +#[derive(Debug, Clone)] +pub struct Reduction3SATToPreemptiveScheduling { + target: PreemptiveScheduling, + positive_start_jobs: Vec, + threshold: usize, +} + +impl Reduction3SATToPreemptiveScheduling { + pub fn threshold(&self) -> usize { + self.threshold + } +} + +impl ReductionResult for Reduction3SATToPreemptiveScheduling { + type Source = KSatisfiability; + type Target = PreemptiveScheduling; + + fn target_problem(&self) -> &Self::Target { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + let d_max = self.target.d_max(); + self.positive_start_jobs + .iter() + .map(|&job| usize::from(task_slot(target_solution, job, d_max) == Some(0))) + .collect() + } +} + +#[reduction( + overhead = { + num_tasks = "(((2 * num_vars + 2) + 6 * num_clauses + sqrt(((2 * num_vars + 2) - 6 * num_clauses)^2)) / 2) * (num_vars + 3)", + num_processors = "((2 * num_vars + 2) + 6 * num_clauses + sqrt(((2 * num_vars + 2) - 6 * num_clauses)^2)) / 2", + d_max = "(((2 * num_vars + 2) + 6 * num_clauses + sqrt(((2 * num_vars + 2) - 6 * num_clauses)^2)) / 2) * (num_vars + 3)", + } +)] +impl ReduceTo for KSatisfiability { + type Result = Reduction3SATToPreemptiveScheduling; + + fn reduce_to(&self) -> Self::Result { + let construction = build_ullman_construction(self); + let target = PreemptiveScheduling::new( + vec![1; construction.num_jobs], + construction.num_processors, + construction.precedences.clone(), + ); + + Reduction3SATToPreemptiveScheduling { + target, + positive_start_jobs: construction + .positive_chains + .iter() + .map(|chain| chain[0]) + .collect(), + threshold: construction.time_limit, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + use crate::models::formula::CNFClause; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "ksatisfiability_to_preemptivescheduling", + build: || { + let source = KSatisfiability::::new(3, vec![CNFClause::new(vec![1, 2, 3])]); + let reduction = ReduceTo::::reduce_to(&source); + let source_config = vec![0, 0, 1]; + let target_config = construct_schedule_from_assignment( + reduction.target_problem(), + &source_config, + &source, + ) + .expect("canonical example assignment should yield a schedule"); + crate::example_db::specs::rule_example_with_witness::<_, PreemptiveScheduling>( + source, + SolutionPair { + source_config, + target_config, + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/ksatisfiability_preemptivescheduling.rs"] +mod tests; diff --git a/src/rules/ksatisfiability_quadraticcongruences.rs b/src/rules/ksatisfiability_quadraticcongruences.rs new file mode 100644 index 000000000..e24189349 --- /dev/null +++ b/src/rules/ksatisfiability_quadraticcongruences.rs @@ -0,0 +1,551 @@ +//! Reduction from KSatisfiability (3-SAT) to Quadratic Congruences. +//! +//! This follows the Manders-Adleman construction in its doubled-coefficient +//! form, matching the verified reference vectors for issue #553. + +use std::collections::{BTreeMap, BTreeSet}; + +use crate::models::algebraic::QuadraticCongruences; +use crate::models::formula::KSatisfiability; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; +use crate::variant::K3; +use num_bigint::{BigInt, BigUint}; +use num_traits::{One, Signed, Zero}; + +#[derive(Debug, Clone)] +pub struct Reduction3SATToQuadraticCongruences { + target: QuadraticCongruences, + source_num_vars: usize, + active_to_source: Vec, + standard_clause_count: usize, + h: BigUint, + prime_powers: Vec, +} + +impl ReductionResult for Reduction3SATToQuadraticCongruences { + type Source = KSatisfiability; + type Target = QuadraticCongruences; + + fn target_problem(&self) -> &Self::Target { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + let mut source_assignment = vec![0; self.source_num_vars]; + let Some(x) = self.target.decode_witness(target_solution) else { + return source_assignment; + }; + if x > self.h { + return source_assignment; + } + + let h_minus_x = &self.h - &x; + let h_plus_x = &self.h + &x; + let mut alpha = vec![0i8; self.prime_powers.len()]; + + for (j, prime_power) in self.prime_powers.iter().enumerate() { + if (&h_minus_x % prime_power).is_zero() { + alpha[j] = 1; + } else if (&h_plus_x % prime_power).is_zero() { + alpha[j] = -1; + } + } + + for (active_index, &source_index) in self.active_to_source.iter().enumerate() { + let alpha_index = 2 * self.standard_clause_count + active_index + 1; + source_assignment[source_index] = if alpha.get(alpha_index) == Some(&-1) { + 1 + } else { + 0 + }; + } + + source_assignment + } +} + +#[cfg_attr(not(any(test, feature = "example-db")), allow(dead_code))] +#[derive(Debug, Clone)] +struct MandersAdlemanConstruction { + target: QuadraticCongruences, + source_num_vars: usize, + active_to_source: Vec, + remapped_clause_set: BTreeSet>, + standard_clauses: Vec>, + standard_clause_count: usize, + active_var_count: usize, + #[cfg_attr(not(test), allow(dead_code))] + doubled_coefficients: Vec, + #[cfg_attr(not(test), allow(dead_code))] + tau_2: BigInt, + thetas: Vec, + h: BigUint, + prime_powers: Vec, +} + +fn is_prime(candidate: u64) -> bool { + if candidate < 2 { + return false; + } + if candidate == 2 { + return true; + } + if candidate.is_multiple_of(2) { + return false; + } + + let mut divisor = 3u64; + while divisor * divisor <= candidate { + if candidate.is_multiple_of(divisor) { + return false; + } + divisor += 2; + } + true +} + +fn admissible_primes(count: usize) -> Vec { + let mut primes = Vec::with_capacity(count); + let mut candidate = 13u64; + while primes.len() < count { + if is_prime(candidate) { + primes.push(candidate); + } + candidate += 1; + } + primes +} + +fn pow_biguint_u64(base: u64, exp: usize) -> BigUint { + let mut result = BigUint::one(); + let factor = BigUint::from(base); + for _ in 0..exp { + result *= &factor; + } + result +} + +fn bigint_mod_to_biguint(value: &BigInt, modulus: &BigUint) -> BigUint { + let modulus_bigint = BigInt::from(modulus.clone()); + let reduced = ((value % &modulus_bigint) + &modulus_bigint) % &modulus_bigint; + reduced + .to_biguint() + .expect("nonnegative reduced residue must fit BigUint") +} + +fn modular_inverse(value: &BigUint, modulus: &BigUint) -> BigUint { + let mut t = BigInt::zero(); + let mut new_t = BigInt::one(); + let mut r = BigInt::from(modulus.clone()); + let mut new_r = BigInt::from(value.clone() % modulus); + + while !new_r.is_zero() { + let quotient = &r / &new_r; + let next_t = &t - "ient * &new_t; + let next_r = &r - "ient * &new_r; + t = new_t; + new_t = next_t; + r = new_r; + new_r = next_r; + } + + assert_eq!(r, BigInt::one(), "value and modulus must be coprime"); + if t.is_negative() { + t += BigInt::from(modulus.clone()); + } + t.to_biguint().expect("inverse must be nonnegative") +} + +fn normalize_clause(clause: &[i32]) -> Option> { + let mut lits = BTreeSet::new(); + for &lit in clause { + if lits.contains(&-lit) { + return None; + } + lits.insert(lit); + } + Some(lits.into_iter().collect()) +} + +fn preprocess_formula(source: &KSatisfiability) -> (Vec>, Vec) { + let mut seen = BTreeSet::new(); + let mut normalized_clauses = Vec::new(); + let mut active_vars = BTreeSet::new(); + + for clause in source.clauses() { + let Some(normalized) = normalize_clause(&clause.literals) else { + continue; + }; + if normalized.len() != 3 { + panic!( + "3-SAT -> QuadraticCongruences requires each non-tautological clause to use three distinct literals; got {:?}", + clause.literals + ); + } + + let distinct_vars: BTreeSet<_> = normalized + .iter() + .map(|lit| lit.unsigned_abs() as usize) + .collect(); + if distinct_vars.len() != 3 { + panic!( + "3-SAT -> QuadraticCongruences requires each non-tautological clause to use three distinct variables; got {:?}", + clause.literals + ); + } + + if seen.insert(normalized.clone()) { + for &lit in &normalized { + active_vars.insert(lit.unsigned_abs() as usize); + } + normalized_clauses.push(normalized); + } + } + + (normalized_clauses, active_vars.into_iter().collect()) +} + +fn build_standard_clauses(num_active_vars: usize) -> (Vec>, BTreeMap, usize>) { + let mut clauses = Vec::new(); + let mut index = BTreeMap::new(); + + if num_active_vars < 3 { + return (clauses, index); + } + + for i in 1..=num_active_vars - 2 { + for j in i + 1..=num_active_vars - 1 { + for k in j + 1..=num_active_vars { + for s1 in [1i32, -1] { + for s2 in [1i32, -1] { + for s3 in [1i32, -1] { + let mut clause = vec![s1 * i as i32, s2 * j as i32, s3 * k as i32]; + clause.sort_unstable(); + index.insert(clause.clone(), clauses.len() + 1); + clauses.push(clause); + } + } + } + } + } + } + + (clauses, index) +} + +fn pow8_table(max_power: usize) -> Vec { + let mut table = Vec::with_capacity(max_power + 1); + table.push(BigUint::one()); + for _ in 0..max_power { + let next = table.last().expect("pow8 table is nonempty") * BigUint::from(8u32); + table.push(next); + } + table +} + +fn build_construction(source: &KSatisfiability) -> MandersAdlemanConstruction { + let (normalized_clauses, active_vars) = preprocess_formula(source); + let active_var_count = active_vars.len(); + let var_map: BTreeMap = active_vars + .iter() + .enumerate() + .map(|(new_index, &old_index)| (old_index, new_index + 1)) + .collect(); + + let remapped_clauses = normalized_clauses + .iter() + .map(|clause| { + let mut remapped = clause + .iter() + .map(|&lit| { + let var = lit.unsigned_abs() as usize; + let new_var = *var_map + .get(&var) + .expect("active variable must be present in the remapping"); + if lit > 0 { + new_var as i32 + } else { + -(new_var as i32) + } + }) + .collect::>(); + remapped.sort_unstable(); + remapped + }) + .collect::>(); + let remapped_clause_set = remapped_clauses.iter().cloned().collect::>(); + + let (standard_clauses, standard_index) = build_standard_clauses(active_var_count); + let standard_clause_count = standard_clauses.len(); + let aux_dimension = 2 * standard_clause_count + active_var_count; + let pow8 = pow8_table(standard_clause_count + 1); + + let mut tau_phi = BigInt::zero(); + for clause in &remapped_clauses { + if let Some(&j) = standard_index.get(clause) { + tau_phi -= BigInt::from(pow8[j].clone()); + } + } + + let mut positive_occurrences = vec![BigInt::zero(); active_var_count + 1]; + let mut negative_occurrences = vec![BigInt::zero(); active_var_count + 1]; + for (j, clause) in standard_clauses.iter().enumerate() { + let weight = BigInt::from(pow8[j + 1].clone()); + for &lit in clause { + let var = lit.unsigned_abs() as usize; + if lit > 0 { + positive_occurrences[var] += &weight; + } else { + negative_occurrences[var] += &weight; + } + } + } + + let mut doubled_coefficients = vec![BigInt::zero(); aux_dimension + 1]; + doubled_coefficients[0] = BigInt::from(2u32); + for k in 1..=standard_clause_count { + let weight = BigInt::from(pow8[k].clone()); + doubled_coefficients[2 * k - 1] = -weight.clone(); + doubled_coefficients[2 * k] = -(weight * BigInt::from(2u32)); + } + for i in 1..=active_var_count { + doubled_coefficients[2 * standard_clause_count + i] = + &positive_occurrences[i] - &negative_occurrences[i]; + } + + let sum_coefficients = doubled_coefficients + .iter() + .cloned() + .fold(BigInt::zero(), |acc, value| acc + value); + let sum_negative_occurrences = negative_occurrences + .iter() + .skip(1) + .cloned() + .fold(BigInt::zero(), |acc, value| acc + value); + let tau_2 = BigInt::from(2u32) * tau_phi + + sum_coefficients + + BigInt::from(2u32) * sum_negative_occurrences; + let mod_val = BigUint::from(2u32) * pow8[standard_clause_count + 1].clone(); + + let primes = admissible_primes(aux_dimension + 1); + let prime_powers = primes + .iter() + .map(|&prime| pow_biguint_u64(prime, aux_dimension + 1)) + .collect::>(); + let k = prime_powers + .iter() + .cloned() + .fold(BigUint::one(), |acc, value| acc * value); + + let mut thetas = Vec::with_capacity(aux_dimension + 1); + for j in 0..=aux_dimension { + let other = &k / &prime_powers[j]; + let lcm = &other * &mod_val; + let residue = bigint_mod_to_biguint(&doubled_coefficients[j], &mod_val); + let inverse = modular_inverse(&(other.clone() % &mod_val), &mod_val); + let mut theta = (&other * ((&residue * inverse) % &mod_val)) % &lcm; + if theta.is_zero() { + theta = lcm.clone(); + } + let prime = BigUint::from(primes[j]); + while (&theta % &prime).is_zero() { + theta += &lcm; + } + thetas.push(theta); + } + + let h = thetas + .iter() + .cloned() + .fold(BigUint::zero(), |acc, value| acc + value); + let beta = &mod_val * &k; + let inverse_factor = &mod_val + &k; + let inverse = modular_inverse(&(inverse_factor % &beta), &beta); + let tau_2_squared = (&tau_2 * &tau_2) + .to_biguint() + .expect("squared doubled target must be nonnegative"); + let alpha = (&inverse * ((&k * tau_2_squared) + (&mod_val * (&h * &h)))) % β + let target = QuadraticCongruences::new(alpha, beta, &h + BigUint::one()); + + MandersAdlemanConstruction { + target, + source_num_vars: source.num_vars(), + active_to_source: active_vars.into_iter().map(|var| var - 1).collect(), + remapped_clause_set, + standard_clauses, + standard_clause_count, + active_var_count, + doubled_coefficients, + tau_2, + thetas, + h, + prime_powers, + } +} + +#[cfg(any(test, feature = "example-db"))] +fn build_alphas( + construction: &MandersAdlemanConstruction, + assignment: &[usize], +) -> Option> { + if assignment.len() != construction.source_num_vars { + return None; + } + + let mut active_assignment = vec![0i8; construction.active_var_count + 1]; + for (active_index, &source_index) in construction.active_to_source.iter().enumerate() { + active_assignment[active_index + 1] = if assignment[source_index] == 0 { 0 } else { 1 }; + } + + let mut alphas = + vec![0i8; 2 * construction.standard_clause_count + construction.active_var_count + 1]; + alphas[0] = 1; + + for i in 1..=construction.active_var_count { + alphas[2 * construction.standard_clause_count + i] = 1 - 2 * active_assignment[i]; + } + + for k in 1..=construction.standard_clause_count { + let clause = &construction.standard_clauses[k - 1]; + let mut y = 0i32; + for &lit in clause { + let var = lit.unsigned_abs() as usize; + if lit > 0 { + y += i32::from(active_assignment[var]); + } else { + y += 1 - i32::from(active_assignment[var]); + } + } + if construction.remapped_clause_set.contains(clause) { + y -= 1; + } + + match 3 - 2 * y { + 3 => { + alphas[2 * k - 1] = 1; + alphas[2 * k] = 1; + } + 1 => { + alphas[2 * k - 1] = -1; + alphas[2 * k] = 1; + } + -1 => { + alphas[2 * k - 1] = 1; + alphas[2 * k] = -1; + } + -3 => { + alphas[2 * k - 1] = -1; + alphas[2 * k] = -1; + } + _ => return None, + } + } + + Some(alphas) +} + +#[cfg(any(test, feature = "example-db"))] +fn witness_value_from_alphas(alphas: &[i8], thetas: &[BigUint]) -> BigUint { + let signed_sum = alphas + .iter() + .zip(thetas) + .fold(BigInt::zero(), |acc, (&alpha, theta)| { + if alpha == 1 { + acc + BigInt::from(theta.clone()) + } else { + acc - BigInt::from(theta.clone()) + } + }); + signed_sum + .abs() + .to_biguint() + .expect("absolute witness must be nonnegative") +} + +#[cfg(any(test, feature = "example-db"))] +fn witness_config_for_assignment( + source: &KSatisfiability, + assignment: &[usize], +) -> Option> { + let construction = build_construction(source); + let alphas = build_alphas(&construction, assignment)?; + let witness = witness_value_from_alphas(&alphas, &construction.thetas); + construction.target.encode_witness(&witness) +} + +#[cfg(test)] +fn exhaustive_alpha_solution(source: &KSatisfiability) -> Option> { + let construction = build_construction(source); + let n = construction.doubled_coefficients.len(); + if n >= usize::BITS as usize { + return None; + } + + for bits in 0usize..(1usize << n) { + let alphas = (0..n) + .map(|j| if (bits >> j) & 1 == 1 { 1i8 } else { -1i8 }) + .collect::>(); + let sum = construction + .doubled_coefficients + .iter() + .zip(&alphas) + .fold(BigInt::zero(), |acc, (coefficient, &alpha)| { + acc + coefficient * BigInt::from(alpha) + }); + if sum == construction.tau_2 { + return Some(alphas); + } + } + + None +} + +#[reduction(overhead = { + bit_length_a = "(num_vars + num_clauses)^2 * log(num_vars + num_clauses + 1)", + bit_length_b = "(num_vars + num_clauses)^2 * log(num_vars + num_clauses + 1)", + bit_length_c = "(num_vars + num_clauses)^2 * log(num_vars + num_clauses + 1)", +})] +impl ReduceTo for KSatisfiability { + type Result = Reduction3SATToQuadraticCongruences; + + fn reduce_to(&self) -> Self::Result { + let construction = build_construction(self); + Reduction3SATToQuadraticCongruences { + target: construction.target, + source_num_vars: construction.source_num_vars, + active_to_source: construction.active_to_source, + standard_clause_count: construction.standard_clause_count, + h: construction.h, + prime_powers: construction.prime_powers, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "ksatisfiability_to_quadraticcongruences", + build: || { + let source = KSatisfiability::::new( + 3, + vec![crate::models::formula::CNFClause::new(vec![1, 2, 3])], + ); + let target_config = witness_config_for_assignment(&source, &[1, 0, 0]) + .expect("canonical satisfying assignment should lift to a QC witness"); + crate::example_db::specs::rule_example_with_witness::<_, QuadraticCongruences>( + source, + SolutionPair { + source_config: vec![1, 0, 0], + target_config, + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/ksatisfiability_quadraticcongruences.rs"] +mod tests; diff --git a/src/rules/ksatisfiability_timetabledesign.rs b/src/rules/ksatisfiability_timetabledesign.rs new file mode 100644 index 000000000..14c819a0e --- /dev/null +++ b/src/rules/ksatisfiability_timetabledesign.rs @@ -0,0 +1,882 @@ +//! Reduction from KSatisfiability (3-SAT) to TimetableDesign. +//! +//! The issue sketch for #486 is not directly implementable against this +//! repository's `TimetableDesign` model: the sketch relies on pair-specific +//! availability and per-clause optional work, while the model exposes only +//! craftsman/task availability sets and exact pairwise requirements. +//! +//! This implementation uses a fully specified chain instead: +//! 1. Eliminate pure literals from the source formula. +//! 2. Apply Tovey's bounded-occurrence cloning so every remaining variable +//! appears at most three times and every clause has length two or three. +//! 3. Build the explicit bipartite list-edge-coloring instance from Marx's +//! outerplanar reduction, padding missing literal-occurrence colors with +//! dummy colors when a variable occurs only `1+1`, `2+1`, or `1+2` times. +//! 4. Compile the list instance to a core edge-coloring instance with blocked +//! colors on auxiliary vertices, following Marx's precoloring gadgets. +//! 5. Encode blocked colors as dedicated dummy assignments in `TimetableDesign`. +//! +//! A timetable witness therefore yields a proper coloring of the core gadget +//! graph; the colors on the two special variable edges recover the satisfying +//! truth assignment. + +use crate::models::formula::{CNFClause, KSatisfiability}; +use crate::models::misc::TimetableDesign; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; +#[cfg(any(test, feature = "example-db"))] +use crate::traits::Problem; +use crate::variant::K3; +use std::collections::{HashMap, VecDeque}; + +#[derive(Debug, Clone)] +struct NormalizedFormula { + clauses: Vec, + transformed_to_original: Vec, + pure_assignments: Vec>, + source_num_vars: usize, +} + +#[derive(Debug, Clone)] +struct CoreGraph { + edges: Vec<(usize, usize)>, + blocked_colors: Vec>, +} + +impl CoreGraph { + fn new() -> Self { + Self { + edges: Vec::new(), + blocked_colors: Vec::new(), + } + } + + fn add_vertex(&mut self) -> usize { + let id = self.blocked_colors.len(); + self.blocked_colors.push(Vec::new()); + id + } + + fn add_edge(&mut self, u: usize, v: usize) -> usize { + self.edges.push((u, v)); + self.edges.len() - 1 + } + + fn block_color(&mut self, vertex: usize, color: usize) { + let blocked = &mut self.blocked_colors[vertex]; + if !blocked.contains(&color) { + blocked.push(color); + } + } +} + +#[cfg_attr(not(any(test, feature = "example-db")), allow(dead_code))] +#[derive(Debug, Clone)] +enum EdgeEncoding { + Direct { + edge: usize, + allowed: Vec, + }, + TwoList { + left_outer: usize, + middle: usize, + right_outer: usize, + first: usize, + second: usize, + }, +} + +#[cfg_attr(not(any(test, feature = "example-db")), allow(dead_code))] +#[derive(Debug, Clone)] +struct VariableEncoding { + vb: EdgeEncoding, + vd: EdgeEncoding, + ab: EdgeEncoding, + bc: EdgeEncoding, + cd: EdgeEncoding, + de: EdgeEncoding, + neg2: usize, +} + +#[cfg_attr(not(any(test, feature = "example-db")), allow(dead_code))] +#[derive(Debug, Clone)] +struct ClauseEncoding { + edge: EdgeEncoding, +} + +#[cfg_attr(not(any(test, feature = "example-db")), allow(dead_code))] +#[derive(Debug, Clone)] +struct ReductionLayout { + source_num_vars: usize, + num_periods: usize, + craftsman_avail: Vec>, + task_avail: Vec>, + requirements: Vec>, + pure_assignments: Vec>, + transformed_to_original: Vec, + normalized_clauses: Vec, + variable_encodings: Vec, + clause_encodings: Vec, + edge_pairs: Vec<(usize, usize)>, + blocker_pairs: HashMap<(usize, usize), (usize, usize)>, +} + +/// Result of reducing KSatisfiability to TimetableDesign. +#[derive(Debug, Clone)] +pub struct Reduction3SATToTimetableDesign { + target: TimetableDesign, + layout: ReductionLayout, +} + +fn literal_var_index(literal: i32) -> usize { + literal.unsigned_abs() as usize - 1 +} + +#[cfg(any(test, feature = "example-db"))] +fn evaluate_clause(clause: &CNFClause, assignment: &[usize]) -> bool { + clause.literals.iter().any(|&literal| { + let value = assignment[literal_var_index(literal)] == 1; + if literal > 0 { + value + } else { + !value + } + }) +} + +fn eliminate_pure_literals(source: &KSatisfiability) -> (Vec, Vec>) { + let mut clauses = source.clauses().to_vec(); + let mut assignments = vec![None; source.num_vars()]; + + loop { + let mut positive = vec![0usize; source.num_vars()]; + let mut negative = vec![0usize; source.num_vars()]; + for clause in &clauses { + for &literal in &clause.literals { + let var = literal_var_index(literal); + if literal > 0 { + positive[var] += 1; + } else { + negative[var] += 1; + } + } + } + + let mut changed = false; + for var in 0..source.num_vars() { + if assignments[var].is_some() { + continue; + } + match (positive[var] > 0, negative[var] > 0) { + (true, false) => { + assignments[var] = Some(1); + changed = true; + } + (false, true) => { + assignments[var] = Some(0); + changed = true; + } + _ => {} + } + } + + if !changed { + break; + } + + clauses.retain(|clause| { + !clause.literals.iter().any(|&literal| { + let var = literal_var_index(literal); + match assignments[var] { + Some(1) => literal > 0, + Some(0) => literal < 0, + None => false, + Some(_) => unreachable!(), + } + }) + }); + } + + (clauses, assignments) +} + +fn normalize_formula(source: &KSatisfiability) -> NormalizedFormula { + let (mut clauses, pure_assignments) = eliminate_pure_literals(source); + let source_num_vars = source.num_vars(); + let mut transformed_to_original = Vec::new(); + let mut next_var = source_num_vars + 1; + + for original_var in 1..=source_num_vars { + let mut occurrences = Vec::new(); + for (clause_idx, clause) in clauses.iter().enumerate() { + for (lit_idx, &literal) in clause.literals.iter().enumerate() { + if literal_var_index(literal) + 1 == original_var { + occurrences.push((clause_idx, lit_idx, literal > 0)); + } + } + } + + if occurrences.is_empty() { + continue; + } + + if occurrences.len() <= 3 { + let replacement = next_var; + next_var += 1; + transformed_to_original.push(original_var - 1); + for (clause_idx, lit_idx, is_positive) in occurrences { + clauses[clause_idx].literals[lit_idx] = if is_positive { + replacement as i32 + } else { + -(replacement as i32) + }; + } + continue; + } + + let replacements: Vec = (0..occurrences.len()) + .map(|_| { + let id = next_var; + next_var += 1; + transformed_to_original.push(original_var - 1); + id + }) + .collect(); + + for ((clause_idx, lit_idx, is_positive), replacement) in + occurrences.into_iter().zip(replacements.iter().copied()) + { + clauses[clause_idx].literals[lit_idx] = if is_positive { + replacement as i32 + } else { + -(replacement as i32) + }; + } + + for idx in 0..replacements.len() { + let current = replacements[idx] as i32; + let next = replacements[(idx + 1) % replacements.len()] as i32; + clauses.push(CNFClause::new(vec![current, -next])); + } + } + + for clause in &mut clauses { + for literal in &mut clause.literals { + let sign = if *literal < 0 { -1 } else { 1 }; + let temp_var = literal.unsigned_abs() as usize; + debug_assert!( + temp_var > source_num_vars, + "all residual literals should have been replaced by transformed variables" + ); + let compact_var = temp_var - source_num_vars; + *literal = sign * compact_var as i32; + } + } + + for transformed_var in 1..=transformed_to_original.len() { + let mut positive = 0usize; + let mut negative = 0usize; + for clause in &clauses { + for &literal in &clause.literals { + if literal_var_index(literal) + 1 == transformed_var { + if literal > 0 { + positive += 1; + } else { + negative += 1; + } + } + } + } + debug_assert!( + positive <= 2, + "normalized variable {transformed_var} has {positive} positive occurrences" + ); + debug_assert!( + negative <= 2, + "normalized variable {transformed_var} has {negative} negative occurrences" + ); + debug_assert!( + positive > 0 && negative > 0, + "pure literals should have been eliminated before gadget construction" + ); + } + + NormalizedFormula { + clauses, + transformed_to_original, + pure_assignments, + source_num_vars, + } +} + +#[cfg(any(test, feature = "example-db"))] +fn choose_clause_edge_color( + clause: &CNFClause, + assignment: &[usize], + colors: &[usize], +) -> Option { + clause + .literals + .iter() + .zip(colors.iter().copied()) + .find_map(|(&literal, color)| { + let value = assignment[literal_var_index(literal)] == 1; + let satisfied = if literal > 0 { value } else { !value }; + satisfied.then_some(color) + }) +} + +fn add_two_list_edge( + graph: &mut CoreGraph, + all_colors: &[usize], + x: usize, + y: usize, + first: usize, + second: usize, +) -> EdgeEncoding { + let x_prime = graph.add_vertex(); + let y_prime = graph.add_vertex(); + + let left_outer = graph.add_edge(x, x_prime); + let middle = graph.add_edge(x_prime, y_prime); + let right_outer = graph.add_edge(y_prime, y); + + for &color in all_colors { + if color != first && color != second { + graph.block_color(x_prime, color); + graph.block_color(y_prime, color); + } + } + + EdgeEncoding::TwoList { + left_outer, + middle, + right_outer, + first, + second, + } +} + +fn add_direct_clause_edge( + graph: &mut CoreGraph, + all_colors: &[usize], + x: usize, + y: usize, + allowed: Vec, +) -> EdgeEncoding { + let edge = graph.add_edge(x, y); + for &color in all_colors { + if !allowed.contains(&color) { + graph.block_color(y, color); + } + } + EdgeEncoding::Direct { edge, allowed } +} + +fn core_edge_color( + solution: &[usize], + pair: (usize, usize), + num_tasks: usize, + num_periods: usize, +) -> usize { + (0..num_periods) + .find(|&period| solution[((pair.0 * num_tasks) + pair.1) * num_periods + period] == 1) + .expect("each required pair should be scheduled exactly once") +} + +#[cfg(any(test, feature = "example-db"))] +fn encode_edge_color(colors: &mut [Option], encoding: &EdgeEncoding, chosen_color: usize) { + match encoding { + EdgeEncoding::Direct { edge, allowed } => { + assert!( + allowed.contains(&chosen_color), + "chosen color {chosen_color} must belong to direct edge list {allowed:?}" + ); + colors[*edge] = Some(chosen_color); + } + EdgeEncoding::TwoList { + left_outer, + middle, + right_outer, + first, + second, + } => { + assert!( + chosen_color == *first || chosen_color == *second, + "chosen color {chosen_color} must belong to two-list edge {{{first}, {second}}}" + ); + let other = if chosen_color == *first { + *second + } else { + *first + }; + colors[*left_outer] = Some(chosen_color); + colors[*right_outer] = Some(chosen_color); + colors[*middle] = Some(other); + } + } +} + +#[cfg(any(test, feature = "example-db"))] +fn edge_from_assignment(encoding: &EdgeEncoding, choose_first: bool) -> usize { + match encoding { + EdgeEncoding::Direct { allowed, .. } => allowed[usize::from(!choose_first)], + EdgeEncoding::TwoList { first, second, .. } => { + if choose_first { + *first + } else { + *second + } + } + } +} + +fn bipartition(graph: &CoreGraph) -> Vec { + let mut side = vec![None; graph.blocked_colors.len()]; + let mut adjacency = vec![Vec::new(); graph.blocked_colors.len()]; + for &(u, v) in &graph.edges { + adjacency[u].push(v); + adjacency[v].push(u); + } + + for start in 0..graph.blocked_colors.len() { + if side[start].is_some() { + continue; + } + side[start] = Some(false); + let mut queue = VecDeque::from([start]); + while let Some(vertex) = queue.pop_front() { + let current = side[vertex].expect("vertex has been assigned a side"); + for &next in &adjacency[vertex] { + match side[next] { + Some(existing) => { + assert_ne!(existing, current, "core graph must remain bipartite"); + } + None => { + side[next] = Some(!current); + queue.push_back(next); + } + } + } + } + } + + side.into_iter() + .map(|entry| entry.expect("every vertex should receive a bipartition side")) + .collect() +} + +fn build_layout(source: &KSatisfiability) -> ReductionLayout { + let normalized = normalize_formula(source); + let num_transformed_vars = normalized.transformed_to_original.len(); + if num_transformed_vars == 0 && normalized.clauses.is_empty() { + return ReductionLayout { + source_num_vars: normalized.source_num_vars, + num_periods: 1, + craftsman_avail: Vec::new(), + task_avail: Vec::new(), + requirements: Vec::new(), + pure_assignments: normalized.pure_assignments, + transformed_to_original: normalized.transformed_to_original, + normalized_clauses: normalized.clauses, + variable_encodings: Vec::new(), + clause_encodings: Vec::new(), + edge_pairs: Vec::new(), + blocker_pairs: HashMap::new(), + }; + } + + let num_periods = 4 * num_transformed_vars.max(1); + let all_colors: Vec = (0..num_periods).collect(); + + let mut occurrences_by_var: Vec> = + vec![Vec::new(); num_transformed_vars]; + for (clause_idx, clause) in normalized.clauses.iter().enumerate() { + for (lit_idx, &literal) in clause.literals.iter().enumerate() { + occurrences_by_var[literal_var_index(literal)].push((clause_idx, lit_idx, literal > 0)); + } + } + + let mut clause_literal_colors: Vec> = normalized + .clauses + .iter() + .map(|clause| vec![usize::MAX; clause.literals.len()]) + .collect(); + + let mut variable_colors = Vec::with_capacity(num_transformed_vars); + for (index, occurrences) in occurrences_by_var.iter().enumerate() { + let base = 4 * index; + let neg2 = base; + let neg1 = base + 1; + let pos2 = base + 2; + let pos1 = base + 3; + + let mut positive_occurrences = Vec::new(); + let mut negative_occurrences = Vec::new(); + for &(clause_idx, lit_idx, is_positive) in occurrences { + if is_positive { + positive_occurrences.push((clause_idx, lit_idx)); + } else { + negative_occurrences.push((clause_idx, lit_idx)); + } + } + + if let Some(&(clause_idx, lit_idx)) = positive_occurrences.first() { + clause_literal_colors[clause_idx][lit_idx] = pos1; + } + if let Some(&(clause_idx, lit_idx)) = positive_occurrences.get(1) { + clause_literal_colors[clause_idx][lit_idx] = pos2; + } + if let Some(&(clause_idx, lit_idx)) = negative_occurrences.first() { + clause_literal_colors[clause_idx][lit_idx] = neg1; + } + if let Some(&(clause_idx, lit_idx)) = negative_occurrences.get(1) { + clause_literal_colors[clause_idx][lit_idx] = neg2; + } + + variable_colors.push((pos1, pos2, neg1, neg2)); + } + + let mut graph = CoreGraph::new(); + let center = graph.add_vertex(); + let mut variable_encodings = Vec::with_capacity(num_transformed_vars); + + for &(pos1, pos2, neg1, neg2) in &variable_colors { + let a = graph.add_vertex(); + let b = graph.add_vertex(); + let c = graph.add_vertex(); + let d = graph.add_vertex(); + let e = graph.add_vertex(); + + let ab = add_two_list_edge(&mut graph, &all_colors, a, b, pos1, neg2); + let bc = add_two_list_edge(&mut graph, &all_colors, b, c, pos2, pos1); + let cd = add_two_list_edge(&mut graph, &all_colors, c, d, pos1, pos2); + let de = add_two_list_edge(&mut graph, &all_colors, d, e, pos2, neg1); + let vb = add_two_list_edge(&mut graph, &all_colors, center, b, neg2, pos2); + let vd = add_two_list_edge(&mut graph, &all_colors, center, d, neg1, pos1); + + variable_encodings.push(VariableEncoding { + vb, + vd, + ab, + bc, + cd, + de, + neg2, + }); + } + + let mut clause_encodings = Vec::with_capacity(normalized.clauses.len()); + for (clause_idx, _clause) in normalized.clauses.iter().enumerate() { + let clause_vertex = graph.add_vertex(); + let colors = clause_literal_colors[clause_idx].clone(); + debug_assert!(colors.iter().all(|&color| color != usize::MAX)); + + let edge = match colors.len() { + 1 => add_direct_clause_edge(&mut graph, &all_colors, center, clause_vertex, colors), + 2 => add_two_list_edge( + &mut graph, + &all_colors, + center, + clause_vertex, + colors[0], + colors[1], + ), + 3 => add_direct_clause_edge(&mut graph, &all_colors, center, clause_vertex, colors), + len => panic!("expected clause size 1, 2, or 3 after normalization, got {len}"), + }; + + clause_encodings.push(ClauseEncoding { edge }); + } + + let side = bipartition(&graph); + let mut vertex_to_craftsman = vec![None; graph.blocked_colors.len()]; + let mut vertex_to_task = vec![None; graph.blocked_colors.len()]; + + let mut num_craftsmen = 0usize; + let mut num_tasks = 0usize; + for (vertex, is_task_side) in side.iter().copied().enumerate() { + if is_task_side { + vertex_to_task[vertex] = Some(num_tasks); + num_tasks += 1; + } else { + vertex_to_craftsman[vertex] = Some(num_craftsmen); + num_craftsmen += 1; + } + } + + let mut craftsman_avail = vec![vec![true; num_periods]; num_craftsmen]; + let mut task_avail = vec![vec![true; num_periods]; num_tasks]; + let mut requirements = vec![vec![0u64; num_tasks]; num_craftsmen]; + let mut edge_pairs = vec![(usize::MAX, usize::MAX); graph.edges.len()]; + + for (edge_idx, &(u, v)) in graph.edges.iter().enumerate() { + let (craft, task) = if !side[u] { + ( + vertex_to_craftsman[u].expect("left vertex has craftsman index"), + vertex_to_task[v].expect("right vertex has task index"), + ) + } else { + ( + vertex_to_craftsman[v].expect("left vertex has craftsman index"), + vertex_to_task[u].expect("right vertex has task index"), + ) + }; + requirements[craft][task] = 1; + edge_pairs[edge_idx] = (craft, task); + } + + let mut blocker_pairs = HashMap::new(); + for (vertex, blocked_colors) in graph.blocked_colors.iter().enumerate() { + for &color in blocked_colors { + if side[vertex] { + let task = vertex_to_task[vertex].expect("right vertex has task index"); + craftsman_avail.push({ + let mut row = vec![false; num_periods]; + row[color] = true; + row + }); + requirements.push(vec![0u64; task_avail.len()]); + let craft = requirements.len() - 1; + requirements[craft][task] = 1; + blocker_pairs.insert((vertex, color), (craft, task)); + } else { + let craft = vertex_to_craftsman[vertex].expect("left vertex has craftsman index"); + task_avail.push({ + let mut row = vec![false; num_periods]; + row[color] = true; + row + }); + for requirement_row in &mut requirements { + requirement_row.push(0); + } + let task = task_avail.len() - 1; + requirements[craft][task] = 1; + blocker_pairs.insert((vertex, color), (craft, task)); + } + } + } + + ReductionLayout { + source_num_vars: normalized.source_num_vars, + num_periods, + craftsman_avail, + task_avail, + requirements, + pure_assignments: normalized.pure_assignments, + transformed_to_original: normalized.transformed_to_original, + normalized_clauses: normalized.clauses, + variable_encodings, + clause_encodings, + edge_pairs, + blocker_pairs, + } +} + +impl Reduction3SATToTimetableDesign { + #[cfg(any(test, feature = "example-db"))] + fn construct_target_solution(&self, source_assignment: &[usize]) -> Option> { + if source_assignment.len() != self.layout.source_num_vars { + return None; + } + if self + .layout + .pure_assignments + .iter() + .enumerate() + .any(|(var, fixed)| fixed.is_some_and(|value| source_assignment[var] != value)) + { + return None; + } + + let transformed_assignment: Vec = self + .layout + .transformed_to_original + .iter() + .map(|&original| source_assignment[original]) + .collect(); + + if self + .layout + .normalized_clauses + .iter() + .any(|clause| !evaluate_clause(clause, &transformed_assignment)) + { + return None; + } + + let mut colors = vec![None; self.layout.edge_pairs.len()]; + + for (transformed_var, encoding) in self.layout.variable_encodings.iter().enumerate() { + let choose_first = transformed_assignment[transformed_var] == 1; + for edge in [ + &encoding.ab, + &encoding.bc, + &encoding.cd, + &encoding.de, + &encoding.vb, + &encoding.vd, + ] { + let chosen = edge_from_assignment(edge, choose_first); + encode_edge_color(&mut colors, edge, chosen); + } + } + + for (clause, encoding) in self + .layout + .normalized_clauses + .iter() + .zip(self.layout.clause_encodings.iter()) + { + let allowed = match &encoding.edge { + EdgeEncoding::Direct { allowed, .. } => allowed.clone(), + EdgeEncoding::TwoList { first, second, .. } => vec![*first, *second], + }; + let chosen = choose_clause_edge_color(clause, &transformed_assignment, &allowed)?; + encode_edge_color(&mut colors, &encoding.edge, chosen); + } + + if colors.iter().any(Option::is_none) { + return None; + } + + let num_tasks = self.target.num_tasks(); + let num_periods = self.target.num_periods(); + let mut config = vec![0usize; self.target.dims().len()]; + + for (edge_idx, color) in colors.into_iter().enumerate() { + let (craft, task) = self.layout.edge_pairs[edge_idx]; + let color = color.expect("all core edges should be colored"); + config[((craft * num_tasks) + task) * num_periods + color] = 1; + } + + for (&(_vertex, color), &(craft, task)) in &self.layout.blocker_pairs { + config[((craft * num_tasks) + task) * num_periods + color] = 1; + } + + Some(config) + } +} + +impl ReductionResult for Reduction3SATToTimetableDesign { + type Source = KSatisfiability; + type Target = TimetableDesign; + + fn target_problem(&self) -> &Self::Target { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + let num_tasks = self.target.num_tasks(); + let num_periods = self.target.num_periods(); + + let mut transformed_assignment = vec![0usize; self.layout.transformed_to_original.len()]; + for (index, encoding) in self.layout.variable_encodings.iter().enumerate() { + let vb_pair = match &encoding.vb { + EdgeEncoding::Direct { edge, .. } => self.layout.edge_pairs[*edge], + EdgeEncoding::TwoList { left_outer, .. } => self.layout.edge_pairs[*left_outer], + }; + let vb_color = core_edge_color(target_solution, vb_pair, num_tasks, num_periods); + transformed_assignment[index] = usize::from(vb_color == encoding.neg2); + } + + let mut source_assignment = vec![0usize; self.layout.source_num_vars]; + for (var, fixed) in self.layout.pure_assignments.iter().copied().enumerate() { + if let Some(value) = fixed { + source_assignment[var] = value; + } + } + + let mut seen_transformed = vec![false; self.layout.source_num_vars]; + for (value, &original_var) in transformed_assignment + .iter() + .zip(self.layout.transformed_to_original.iter()) + { + if !seen_transformed[original_var] { + source_assignment[original_var] = *value; + seen_transformed[original_var] = true; + } + } + + source_assignment + } +} + +#[reduction(overhead = { + num_periods = "4 * num_literals", + num_craftsmen = "64 * num_literals^2 + 32 * num_literals + 1", + num_tasks = "64 * num_literals^2 + 32 * num_literals + 1", +})] +impl ReduceTo for KSatisfiability { + type Result = Reduction3SATToTimetableDesign; + + fn reduce_to(&self) -> Self::Result { + let layout = build_layout(self); + let target = TimetableDesign::new( + layout.num_periods, + layout.craftsman_avail.len(), + layout.task_avail.len(), + layout.craftsman_avail.clone(), + layout.task_avail.clone(), + layout.requirements.clone(), + ); + + Reduction3SATToTimetableDesign { target, layout } + } +} + +#[cfg(any(test, feature = "example-db"))] +#[allow(dead_code)] +pub(super) fn construct_timetable_from_assignment( + target: &TimetableDesign, + assignment: &[usize], + source: &KSatisfiability, +) -> Option> { + let reduction = ReduceTo::::reduce_to(source); + if reduction.target_problem().num_periods() != target.num_periods() + || reduction.target_problem().num_craftsmen() != target.num_craftsmen() + || reduction.target_problem().num_tasks() != target.num_tasks() + || reduction.target_problem().craftsman_avail() != target.craftsman_avail() + || reduction.target_problem().task_avail() != target.task_avail() + || reduction.target_problem().requirements() != target.requirements() + { + return None; + } + reduction.construct_target_solution(assignment) +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + use crate::models::formula::CNFClause; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "ksatisfiability_to_timetabledesign", + build: || { + let source = KSatisfiability::::new( + 2, + vec![ + CNFClause::new(vec![1, 2, 2]), + CNFClause::new(vec![-1, -2, -2]), + ], + ); + let reduction = ReduceTo::::reduce_to(&source); + let source_config = vec![1, 0]; + let target_config = reduction + .construct_target_solution(&source_config) + .expect("canonical satisfying assignment should lift to timetable"); + + crate::example_db::specs::rule_example_with_witness::<_, TimetableDesign>( + source, + SolutionPair { + source_config, + target_config, + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/ksatisfiability_timetabledesign.rs"] +mod tests; diff --git a/src/rules/minimumvertexcover_longestcommonsubsequence.rs b/src/rules/minimumvertexcover_longestcommonsubsequence.rs new file mode 100644 index 000000000..0c99aa568 --- /dev/null +++ b/src/rules/minimumvertexcover_longestcommonsubsequence.rs @@ -0,0 +1,103 @@ +//! Reduction from MinimumVertexCover (unit-weight) to LongestCommonSubsequence. + +use crate::models::graph::MinimumVertexCover; +use crate::models::misc::LongestCommonSubsequence; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; +use crate::topology::{Graph, SimpleGraph}; +use crate::types::One; + +#[derive(Debug, Clone)] +pub struct ReductionVCToLCS { + target: LongestCommonSubsequence, + num_vertices: usize, +} + +impl ReductionResult for ReductionVCToLCS { + type Source = MinimumVertexCover; + type Target = LongestCommonSubsequence; + + fn target_problem(&self) -> &Self::Target { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + let mut cover = vec![1; self.num_vertices]; + for &symbol in target_solution { + if symbol >= self.num_vertices { + break; + } + cover[symbol] = 0; + } + cover + } +} + +#[reduction( + overhead = { + alphabet_size = "num_vertices", + num_strings = "num_edges + 1", + max_length = "num_vertices", + total_length = "num_vertices + 2 * num_edges * num_vertices - 2 * num_edges", + } +)] +impl ReduceTo for MinimumVertexCover { + type Result = ReductionVCToLCS; + + fn reduce_to(&self) -> Self::Result { + let num_vertices = self.graph().num_vertices(); + let mut strings = Vec::with_capacity(self.graph().num_edges() + 1); + strings.push((0..num_vertices).collect()); + + for (left, right) in self.graph().edges() { + // The backward direction relies on each edge string forcing the + // larger endpoint to appear before the smaller one. + let (u, v) = if left < right { + (left, right) + } else { + (right, left) + }; + let mut edge_string = string_without_vertex(num_vertices, u); + edge_string.extend(string_without_vertex(num_vertices, v)); + strings.push(edge_string); + } + + let target = LongestCommonSubsequence::new(num_vertices, strings); + ReductionVCToLCS { + target, + num_vertices, + } + } +} + +fn string_without_vertex(num_vertices: usize, omitted: usize) -> Vec { + (0..num_vertices) + .filter(|&vertex| vertex != omitted) + .collect() +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "minimumvertexcover_to_longestcommonsubsequence", + build: || { + let source = MinimumVertexCover::new( + SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3)]), + vec![One; 4], + ); + crate::example_db::specs::rule_example_with_witness::<_, LongestCommonSubsequence>( + source, + SolutionPair { + source_config: vec![0, 1, 1, 0], + target_config: vec![0, 3, 4, 4], + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/minimumvertexcover_longestcommonsubsequence.rs"] +mod tests; diff --git a/src/rules/mod.rs b/src/rules/mod.rs index 5edde0754..81e25cb0f 100644 --- a/src/rules/mod.rs +++ b/src/rules/mod.rs @@ -42,13 +42,19 @@ mod kcoloring_casts; pub(crate) mod kcoloring_partitionintocliques; pub(crate) mod kcoloring_twodimensionalconsecutivesets; mod knapsack_qubo; +pub(crate) mod ksatisfiability_acyclicpartition; mod ksatisfiability_casts; +pub(crate) mod ksatisfiability_cyclicordering; pub(crate) mod ksatisfiability_kclique; pub(crate) mod ksatisfiability_kernel; pub(crate) mod ksatisfiability_minimumvertexcover; +pub(crate) mod ksatisfiability_oneinthreesatisfiability; +pub(crate) mod ksatisfiability_preemptivescheduling; +pub(crate) mod ksatisfiability_quadraticcongruences; pub(crate) mod ksatisfiability_qubo; pub(crate) mod ksatisfiability_simultaneousincongruences; pub(crate) mod ksatisfiability_subsetsum; +pub(crate) mod ksatisfiability_timetabledesign; pub(crate) mod longestcommonsubsequence_maximumindependentset; pub(crate) mod maxcut_minimumcutintoboundedsets; pub(crate) mod maximumclique_maximumindependentset; @@ -63,12 +69,14 @@ mod maximumsetpacking_casts; pub(crate) mod maximumsetpacking_qubo; pub(crate) mod minimummultiwaycut_qubo; pub(crate) mod minimumvertexcover_ensemblecomputation; +pub(crate) mod minimumvertexcover_longestcommonsubsequence; pub(crate) mod minimumvertexcover_maximumindependentset; pub(crate) mod minimumvertexcover_minimumfeedbackarcset; pub(crate) mod minimumvertexcover_minimumfeedbackvertexset; pub(crate) mod minimumvertexcover_minimumhittingset; pub(crate) mod minimumvertexcover_minimumsetcovering; pub(crate) mod naesatisfiability_maxcut; +pub(crate) mod naesatisfiability_partitionintoperfectmatchings; pub(crate) mod naesatisfiability_setsplitting; pub(crate) mod paintshop_qubo; pub(crate) mod partition_binpacking; @@ -88,8 +96,10 @@ pub(crate) mod sat_coloring; pub(crate) mod sat_ksat; pub(crate) mod sat_maximumindependentset; pub(crate) mod sat_minimumdominatingset; +pub(crate) mod satisfiability_integralflowhomologousarcs; pub(crate) mod satisfiability_naesatisfiability; pub(crate) mod satisfiability_nontautology; +pub(crate) mod setsplitting_betweenness; mod spinglass_casts; pub(crate) mod spinglass_maxcut; pub(crate) mod spinglass_qubo; @@ -99,6 +109,7 @@ pub(crate) mod subsetsum_integerexpressionmembership; pub(crate) mod subsetsum_partition; #[cfg(test)] pub(crate) mod test_helpers; +pub(crate) mod threedimensionalmatching_threepartition; pub(crate) mod threepartition_flowshopscheduling; pub(crate) mod threepartition_jobshopscheduling; pub(crate) mod threepartition_resourceconstrainedscheduling; @@ -320,6 +331,8 @@ pub(crate) mod subgraphisomorphism_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod sumofsquarespartition_ilp; #[cfg(feature = "ilp-solver")] +pub(crate) mod threedimensionalmatching_ilp; +#[cfg(feature = "ilp-solver")] pub(crate) mod timetabledesign_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod travelingsalesman_ilp; @@ -371,12 +384,18 @@ pub(crate) fn canonical_rule_example_specs() -> Vec Vec Vec Vec, + #[cfg(any(test, feature = "example-db"))] + clauses: Vec, + #[cfg(any(test, feature = "example-db"))] + positive_chains: Vec>, + #[cfg(any(test, feature = "example-db"))] + negative_chains: Vec>, + num_vertices: usize, + edges: Vec<(usize, usize)>, +} + +/// Result of reducing NAE-Satisfiability to PartitionIntoPerfectMatchings. +#[derive(Debug, Clone)] +pub struct ReductionNAESATToPartitionIntoPerfectMatchings { + target: PartitionIntoPerfectMatchings, + layout: ReductionLayout, +} + +impl ReductionResult for ReductionNAESATToPartitionIntoPerfectMatchings { + type Source = NAESatisfiability; + type Target = PartitionIntoPerfectMatchings; + + fn target_problem(&self) -> &Self::Target { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + self.layout + .variables + .iter() + .map(|variable| usize::from(target_solution[variable.t] == 0)) + .collect() + } +} + +impl ReductionNAESATToPartitionIntoPerfectMatchings { + #[cfg(any(test, feature = "example-db"))] + fn construct_target_solution(&self, source_solution: &[usize]) -> Vec { + assert_eq!( + source_solution.len(), + self.layout.variables.len(), + "source solution has {} variables but reduction expects {}", + source_solution.len(), + self.layout.variables.len() + ); + + let mut target_solution = vec![usize::MAX; self.layout.num_vertices]; + let mut true_groups = Vec::with_capacity(self.layout.variables.len()); + let mut false_groups = Vec::with_capacity(self.layout.variables.len()); + + for (index, variable) in self.layout.variables.iter().enumerate() { + let true_group = if source_solution[index] == 1 { 0 } else { 1 }; + let false_group = 1 - true_group; + true_groups.push(true_group); + false_groups.push(false_group); + + target_solution[variable.t] = true_group; + target_solution[variable.t_prime] = true_group; + target_solution[variable.f] = false_group; + target_solution[variable.f_prime] = false_group; + } + + for clause in &self.layout.clauses { + for (signal, &literal) in clause.signals.iter().zip(clause.literals.iter()) { + let variable_index = literal.unsigned_abs() as usize - 1; + let signal_group = if literal > 0 { + true_groups[variable_index] + } else { + false_groups[variable_index] + }; + target_solution[signal.s] = signal_group; + target_solution[signal.s_prime] = signal_group; + } + } + + for (variable_index, chain_pairs) in self.layout.positive_chains.iter().enumerate() { + for pair in chain_pairs { + target_solution[pair.mu] = false_groups[variable_index]; + target_solution[pair.mu_prime] = false_groups[variable_index]; + } + } + + for (variable_index, chain_pairs) in self.layout.negative_chains.iter().enumerate() { + for pair in chain_pairs { + target_solution[pair.mu] = true_groups[variable_index]; + target_solution[pair.mu_prime] = true_groups[variable_index]; + } + } + + for clause in &self.layout.clauses { + let clause_groups = clause.signals.map(|signal| 1 - target_solution[signal.s]); + let zero_count = clause_groups.iter().filter(|&&group| group == 0).count(); + let w3_group = match zero_count { + 1 => 0, + 2 => 1, + _ => panic!("source assignment is not NAE-satisfying for normalized clauses"), + }; + + for (vertex, &group) in clause + .clause_vertices + .iter() + .take(3) + .zip(clause_groups.iter()) + { + target_solution[*vertex] = group; + } + target_solution[clause.clause_vertices[3]] = w3_group; + } + + assert!( + target_solution.iter().all(|&group| group <= 1), + "constructed target solution left some vertices unassigned" + ); + + target_solution + } +} + +fn normalize_clauses(problem: &NAESatisfiability) -> Vec<[i32; 3]> { + problem + .clauses() + .iter() + .map(|clause| match clause.literals.as_slice() { + [a, b] => [*a, *a, *b], + [a, b, c] => [*a, *b, *c], + literals => panic!( + "NAESatisfiability -> PartitionIntoPerfectMatchings expects clauses of size 2 or 3, got {}", + literals.len() + ), + }) + .collect() +} + +fn build_layout(problem: &NAESatisfiability) -> ReductionLayout { + let num_vars = problem.num_vars(); + let clauses = normalize_clauses(problem); + let num_clauses = clauses.len(); + + let mut next_vertex = 0usize; + let mut edges = Vec::with_capacity(3 * num_vars + 21 * num_clauses); + let mut variables = Vec::with_capacity(num_vars); + + for _ in 0..num_vars { + let variable = VariableVertices { + t: next_vertex, + t_prime: next_vertex + 1, + f: next_vertex + 2, + f_prime: next_vertex + 3, + }; + next_vertex += 4; + + edges.push((variable.t, variable.t_prime)); + edges.push((variable.f, variable.f_prime)); + edges.push((variable.t, variable.f)); + variables.push(variable); + } + + let mut clause_layouts = Vec::with_capacity(num_clauses); + + for &literals in &clauses { + let mut signals = [SignalVertices { s: 0, s_prime: 0 }; 3]; + for (literal_index, _) in literals.iter().enumerate() { + signals[literal_index] = SignalVertices { + s: next_vertex, + s_prime: next_vertex + 1, + }; + next_vertex += 2; + edges.push((signals[literal_index].s, signals[literal_index].s_prime)); + } + + clause_layouts.push(ClauseLayout { + literals, + signals, + clause_vertices: [0; 4], + }); + } + + for clause_layout in &mut clause_layouts { + let clause_vertices = [ + next_vertex, + next_vertex + 1, + next_vertex + 2, + next_vertex + 3, + ]; + next_vertex += 4; + + for a in 0..4 { + for b in (a + 1)..4 { + edges.push((clause_vertices[a], clause_vertices[b])); + } + } + for (literal_index, &clause_vertex) in clause_vertices.iter().enumerate().take(3) { + edges.push((clause_layout.signals[literal_index].s, clause_vertex)); + } + clause_layout.clause_vertices = clause_vertices; + } + + let mut positive_occurrences: Vec> = vec![Vec::new(); num_vars]; + let mut negative_occurrences: Vec> = vec![Vec::new(); num_vars]; + for (clause_index, clause_layout) in clause_layouts.iter().enumerate() { + for (literal_index, &literal) in clause_layout.literals.iter().enumerate() { + let variable_index = literal.unsigned_abs() as usize - 1; + if literal > 0 { + positive_occurrences[variable_index].push((clause_index, literal_index)); + } else { + negative_occurrences[variable_index].push((clause_index, literal_index)); + } + } + } + + let mut positive_chains: Vec> = vec![Vec::new(); num_vars]; + let mut negative_chains: Vec> = vec![Vec::new(); num_vars]; + + for variable_index in 0..num_vars { + let mut source_vertex = variables[variable_index].t; + for &(clause_index, literal_index) in &positive_occurrences[variable_index] { + let pair = ChainPairVertices { + mu: next_vertex, + mu_prime: next_vertex + 1, + }; + next_vertex += 2; + + let signal_vertex = clause_layouts[clause_index].signals[literal_index].s; + edges.push((pair.mu, pair.mu_prime)); + edges.push((source_vertex, pair.mu)); + edges.push((signal_vertex, pair.mu)); + positive_chains[variable_index].push(pair); + source_vertex = signal_vertex; + } + + let mut source_vertex = variables[variable_index].f; + for &(clause_index, literal_index) in &negative_occurrences[variable_index] { + let pair = ChainPairVertices { + mu: next_vertex, + mu_prime: next_vertex + 1, + }; + next_vertex += 2; + + let signal_vertex = clause_layouts[clause_index].signals[literal_index].s; + edges.push((pair.mu, pair.mu_prime)); + edges.push((source_vertex, pair.mu)); + edges.push((signal_vertex, pair.mu)); + negative_chains[variable_index].push(pair); + source_vertex = signal_vertex; + } + } + + ReductionLayout { + variables, + #[cfg(any(test, feature = "example-db"))] + clauses: clause_layouts, + #[cfg(any(test, feature = "example-db"))] + positive_chains, + #[cfg(any(test, feature = "example-db"))] + negative_chains, + num_vertices: next_vertex, + edges, + } +} + +#[reduction( + overhead = { + num_vertices = "4 * num_vars + 16 * num_clauses", + num_edges = "3 * num_vars + 21 * num_clauses", + num_matchings = "2", + } +)] +impl ReduceTo> for NAESatisfiability { + type Result = ReductionNAESATToPartitionIntoPerfectMatchings; + + fn reduce_to(&self) -> Self::Result { + let layout = build_layout(self); + let target = PartitionIntoPerfectMatchings::new( + SimpleGraph::new(layout.num_vertices, layout.edges.clone()), + 2, + ); + + ReductionNAESATToPartitionIntoPerfectMatchings { target, layout } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + use crate::models::formula::CNFClause; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "naesatisfiability_to_partitionintoperfectmatchings", + build: || { + let source = NAESatisfiability::new( + 3, + vec![ + CNFClause::new(vec![1, 2, 3]), + CNFClause::new(vec![-1, 2, -3]), + ], + ); + let source_config = vec![1, 1, 0]; + let reduction = + ReduceTo::>::reduce_to(&source); + let target_config = reduction.construct_target_solution(&source_config); + + crate::example_db::specs::assemble_rule_example( + &source, + reduction.target_problem(), + vec![SolutionPair { + source_config, + target_config, + }], + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/naesatisfiability_partitionintoperfectmatchings.rs"] +mod tests; diff --git a/src/rules/satisfiability_integralflowhomologousarcs.rs b/src/rules/satisfiability_integralflowhomologousarcs.rs new file mode 100644 index 000000000..e00849ec2 --- /dev/null +++ b/src/rules/satisfiability_integralflowhomologousarcs.rs @@ -0,0 +1,288 @@ +//! Reduction from Satisfiability to IntegralFlowHomologousArcs. +//! +//! Follows the clause-stage flow construction described by Sahni (1974): +//! one unit of flow per variable chooses either the T or F channel, and each +//! clause stage routes the channels corresponding to literals of the negated +//! clause through a shared bottleneck. Homologous entry/exit pairs force each +//! variable's bottleneck flow to stay on its own channel. + +use crate::models::formula::Satisfiability; +use crate::models::graph::IntegralFlowHomologousArcs; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; +use crate::topology::DirectedGraph; + +#[derive(Debug, Clone)] +struct VariablePaths { + true_path: Vec, + false_path: Vec, + true_base_arc: usize, +} + +#[derive(Debug, Clone, Copy)] +struct NodeIndexer { + num_vars: usize, + num_clauses: usize, +} + +impl NodeIndexer { + fn source(self) -> usize { + 0 + } + + fn sink(self) -> usize { + 1 + } + + fn split(self, variable: usize) -> usize { + 2 + variable + } + + fn boundary_base(self) -> usize { + 2 + self.num_vars + } + + fn channel(self, boundary: usize, variable: usize, is_true: bool) -> usize { + self.boundary_base() + (boundary * self.num_vars + variable) * 2 + usize::from(!is_true) + } + + fn clause_base(self) -> usize { + self.boundary_base() + (self.num_clauses + 1) * self.num_vars * 2 + } + + fn collector(self, clause: usize) -> usize { + self.clause_base() + clause * 2 + } + + fn distributor(self, clause: usize) -> usize { + self.collector(clause) + 1 + } + + fn total_vertices(self) -> usize { + self.clause_base() + self.num_clauses * 2 + } +} + +/// Result of reducing SAT to integral flow with homologous arcs. +#[derive(Debug, Clone)] +pub struct ReductionSATToIntegralFlowHomologousArcs { + target: IntegralFlowHomologousArcs, + variable_paths: Vec, +} + +impl ReductionSATToIntegralFlowHomologousArcs { + #[cfg(any(test, feature = "example-db"))] + fn encode_assignment(&self, assignment: &[usize]) -> Vec { + assert_eq!( + assignment.len(), + self.variable_paths.len(), + "assignment length must match num_vars", + ); + + let mut flow = vec![0usize; self.target.num_arcs()]; + for (value, paths) in assignment.iter().zip(&self.variable_paths) { + let path = if *value == 0 { + &paths.false_path + } else { + &paths.true_path + }; + for &arc_idx in path { + flow[arc_idx] += 1; + } + } + flow + } +} + +impl ReductionResult for ReductionSATToIntegralFlowHomologousArcs { + type Source = Satisfiability; + type Target = IntegralFlowHomologousArcs; + + fn target_problem(&self) -> &Self::Target { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + self.variable_paths + .iter() + .map(|paths| { + usize::from( + target_solution + .get(paths.true_base_arc) + .copied() + .unwrap_or(0) + > 0, + ) + }) + .collect() + } +} + +#[reduction(overhead = { + num_vertices = "2 * num_vars * num_clauses + 3 * num_vars + 2 * num_clauses + 2", + num_arcs = "2 * num_vars * num_clauses + 5 * num_vars + num_clauses + num_literals", +})] +impl ReduceTo for Satisfiability { + type Result = ReductionSATToIntegralFlowHomologousArcs; + + fn reduce_to(&self) -> Self::Result { + let indexer = NodeIndexer { + num_vars: self.num_vars(), + num_clauses: self.num_clauses(), + }; + + let mut arcs = Vec::<(usize, usize)>::new(); + let mut capacities = Vec::::new(); + let mut homologous_pairs = Vec::<(usize, usize)>::new(); + let mut variable_paths = Vec::::with_capacity(self.num_vars()); + + let mut add_arc = |u: usize, v: usize, capacity: u64| -> usize { + arcs.push((u, v)); + capacities.push(capacity); + arcs.len() - 1 + }; + + for variable in 0..self.num_vars() { + let source_arc = add_arc(indexer.source(), indexer.split(variable), 1); + let true_base_arc = add_arc( + indexer.split(variable), + indexer.channel(0, variable, true), + 1, + ); + let false_base_arc = add_arc( + indexer.split(variable), + indexer.channel(0, variable, false), + 1, + ); + + variable_paths.push(VariablePaths { + true_path: vec![source_arc, true_base_arc], + false_path: vec![source_arc, false_base_arc], + true_base_arc, + }); + } + + for (clause_idx, clause) in self.clauses().iter().enumerate() { + let collector = indexer.collector(clause_idx); + let distributor = indexer.distributor(clause_idx); + let bottleneck = add_arc( + collector, + distributor, + clause.literals.len().saturating_sub(1) as u64, + ); + + let mut has_positive = vec![false; self.num_vars()]; + let mut has_negative = vec![false; self.num_vars()]; + for &literal in &clause.literals { + let variable = literal.unsigned_abs() as usize - 1; + if literal > 0 { + has_positive[variable] = true; + } else { + has_negative[variable] = true; + } + } + + for variable in 0..self.num_vars() { + let prev_true = indexer.channel(clause_idx, variable, true); + let prev_false = indexer.channel(clause_idx, variable, false); + let next_true = indexer.channel(clause_idx + 1, variable, true); + let next_false = indexer.channel(clause_idx + 1, variable, false); + + if has_negative[variable] { + let entry = add_arc(prev_true, collector, 1); + let exit = add_arc(distributor, next_true, 1); + homologous_pairs.push((entry, exit)); + variable_paths[variable] + .true_path + .extend([entry, bottleneck, exit]); + } else { + let bypass = add_arc(prev_true, next_true, 1); + variable_paths[variable].true_path.push(bypass); + } + + if has_positive[variable] { + let entry = add_arc(prev_false, collector, 1); + let exit = add_arc(distributor, next_false, 1); + homologous_pairs.push((entry, exit)); + variable_paths[variable] + .false_path + .extend([entry, bottleneck, exit]); + } else { + let bypass = add_arc(prev_false, next_false, 1); + variable_paths[variable].false_path.push(bypass); + } + } + } + + for (variable, paths) in variable_paths.iter_mut().enumerate() { + let true_sink = add_arc( + indexer.channel(self.num_clauses(), variable, true), + indexer.sink(), + 1, + ); + let false_sink = add_arc( + indexer.channel(self.num_clauses(), variable, false), + indexer.sink(), + 1, + ); + paths.true_path.push(true_sink); + paths.false_path.push(false_sink); + } + + let mut requirement = self.num_vars() as u64; + if self.clauses().iter().any(|clause| clause.is_empty()) { + requirement += 1; + } + + ReductionSATToIntegralFlowHomologousArcs { + target: IntegralFlowHomologousArcs::new( + DirectedGraph::new(indexer.total_vertices(), arcs), + capacities, + indexer.source(), + indexer.sink(), + requirement, + homologous_pairs, + ), + variable_paths, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + use crate::models::formula::CNFClause; + + fn issue_example() -> Satisfiability { + Satisfiability::new( + 3, + vec![ + CNFClause::new(vec![1, 2]), + CNFClause::new(vec![-1, 3]), + CNFClause::new(vec![-2, -3]), + CNFClause::new(vec![1, 3]), + ], + ) + } + + vec![crate::example_db::specs::RuleExampleSpec { + id: "satisfiability_to_integralflowhomologousarcs", + build: || { + let source = issue_example(); + let source_config = vec![1, 0, 1]; + let target_config = ReduceTo::::reduce_to(&source) + .encode_assignment(&source_config); + crate::example_db::specs::rule_example_with_witness::<_, IntegralFlowHomologousArcs>( + source, + SolutionPair { + source_config, + target_config, + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/satisfiability_integralflowhomologousarcs.rs"] +mod tests; diff --git a/src/rules/setsplitting_betweenness.rs b/src/rules/setsplitting_betweenness.rs new file mode 100644 index 000000000..499a03e6d --- /dev/null +++ b/src/rules/setsplitting_betweenness.rs @@ -0,0 +1,116 @@ +//! Reduction from Set Splitting to Betweenness. +//! +//! Decompose each subset to size 2 or 3 using complementarity pairs, then +//! place a single pole element `p` in the Betweenness instance. A size-2 +//! subset `{u, v}` becomes `(u, p, v)`, forcing opposite sides of the pole. +//! A size-3 subset `{u, v, w}` becomes `(u, d, v)` and `(d, p, w)` with one +//! fresh auxiliary element `d`, which is satisfiable exactly when the three +//! elements are not monochromatic with respect to the pole. + +use crate::models::misc::Betweenness; +use crate::models::set::SetSplitting; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; + +/// Result of reducing SetSplitting to Betweenness. +#[derive(Debug, Clone)] +pub struct ReductionSetSplittingToBetweenness { + target: Betweenness, + source_universe_size: usize, + pole: usize, +} + +impl ReductionResult for ReductionSetSplittingToBetweenness { + type Source = SetSplitting; + type Target = Betweenness; + + fn target_problem(&self) -> &Self::Target { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + assert!( + target_solution.len() > self.pole, + "Betweenness solution has {} positions but pole index is {}", + target_solution.len(), + self.pole + ); + assert!( + target_solution.len() >= self.source_universe_size, + "Betweenness solution has {} positions but source requires {} elements", + target_solution.len(), + self.source_universe_size + ); + + let pole_position = target_solution[self.pole]; + target_solution[..self.source_universe_size] + .iter() + .map(|&position| usize::from(position > pole_position)) + .collect() + } +} + +#[reduction( + overhead = { + num_elements = "normalized_universe_size + 1 + normalized_num_size3_subsets", + num_triples = "normalized_num_size2_subsets + 2 * normalized_num_size3_subsets", + } +)] +impl ReduceTo for SetSplitting { + type Result = ReductionSetSplittingToBetweenness; + + fn reduce_to(&self) -> Self::Result { + let (normalized_universe_size, normalized_subsets) = self.normalized_instance(); + let pole = normalized_universe_size; + let size3_subsets = normalized_subsets + .iter() + .filter(|subset| subset.len() == 3) + .count(); + let mut triples = Vec::with_capacity(normalized_subsets.len() + size3_subsets); + let mut num_elements = normalized_universe_size + 1; + + for subset in normalized_subsets { + match subset.as_slice() { + [u, v] => triples.push((*u, pole, *v)), + [u, v, w] => { + let auxiliary = num_elements; + num_elements += 1; + triples.push((*u, auxiliary, *v)); + triples.push((auxiliary, pole, *w)); + } + _ => unreachable!("normalization only produces size-2 or size-3 subsets"), + } + } + + ReductionSetSplittingToBetweenness { + target: Betweenness::new(num_elements, triples), + source_universe_size: self.universe_size(), + pole, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "setsplitting_to_betweenness", + build: || { + crate::example_db::specs::rule_example_with_witness::<_, Betweenness>( + SetSplitting::new( + 5, + vec![vec![0, 1, 2], vec![2, 3, 4], vec![0, 3, 4], vec![1, 2, 3]], + ), + SolutionPair { + source_config: vec![1, 0, 1, 0, 0], + target_config: vec![8, 2, 9, 0, 1, 4, 3, 6, 7, 5], + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/setsplitting_betweenness.rs"] +mod tests; diff --git a/src/rules/threedimensionalmatching_ilp.rs b/src/rules/threedimensionalmatching_ilp.rs new file mode 100644 index 000000000..0310343e5 --- /dev/null +++ b/src/rules/threedimensionalmatching_ilp.rs @@ -0,0 +1,94 @@ +//! Reduction from ThreeDimensionalMatching to ILP. + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::set::ThreeDimensionalMatching; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; + +#[derive(Debug, Clone)] +pub struct ReductionThreeDimensionalMatchingToILP { + target: ILP, +} + +impl ReductionResult for ReductionThreeDimensionalMatchingToILP { + type Source = ThreeDimensionalMatching; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + target_solution.to_vec() + } +} + +#[reduction( + overhead = { + num_vars = "num_triples", + num_constraints = "3 * universe_size", + } +)] +impl ReduceTo> for ThreeDimensionalMatching { + type Result = ReductionThreeDimensionalMatchingToILP; + + fn reduce_to(&self) -> Self::Result { + let num_vars = self.num_triples(); + let mut w_constraints = vec![Vec::new(); self.universe_size()]; + let mut x_constraints = vec![Vec::new(); self.universe_size()]; + let mut y_constraints = vec![Vec::new(); self.universe_size()]; + + for (triple_index, &(w, x, y)) in self.triples().iter().enumerate() { + w_constraints[w].push((triple_index, 1.0)); + x_constraints[x].push((triple_index, 1.0)); + y_constraints[y].push((triple_index, 1.0)); + } + + let mut constraints = Vec::with_capacity(3 * self.universe_size()); + constraints.extend( + w_constraints + .into_iter() + .map(|terms| LinearConstraint::eq(terms, 1.0)), + ); + constraints.extend( + x_constraints + .into_iter() + .map(|terms| LinearConstraint::eq(terms, 1.0)), + ); + constraints.extend( + y_constraints + .into_iter() + .map(|terms| LinearConstraint::eq(terms, 1.0)), + ); + + ReductionThreeDimensionalMatchingToILP { + target: ILP::new(num_vars, constraints, vec![], ObjectiveSense::Minimize), + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "threedimensionalmatching_to_ilp", + build: || { + let source = ThreeDimensionalMatching::new( + 3, + vec![(0, 1, 2), (1, 0, 1), (2, 2, 0), (0, 0, 0), (1, 2, 2)], + ); + crate::example_db::specs::rule_example_with_witness::<_, ILP>( + source, + SolutionPair { + source_config: vec![1, 1, 1, 0, 0], + target_config: vec![1, 1, 1, 0, 0], + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/threedimensionalmatching_ilp.rs"] +mod tests; diff --git a/src/rules/threedimensionalmatching_threepartition.rs b/src/rules/threedimensionalmatching_threepartition.rs new file mode 100644 index 000000000..4a43698b5 --- /dev/null +++ b/src/rules/threedimensionalmatching_threepartition.rs @@ -0,0 +1,663 @@ +//! Reduction from ThreeDimensionalMatching to ThreePartition. +//! +//! This follows the classical three-step chain: +//! 1. 3DM -> ABCD-Partition +//! 2. ABCD-Partition -> 4-Partition +//! 3. 4-Partition -> 3-Partition + +use crate::models::misc::ThreePartition; +use crate::models::set::ThreeDimensionalMatching; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; +use std::collections::HashMap; + +#[derive(Debug, Clone, Copy)] +enum Step2Item { + A { + source_triple: usize, + w: usize, + x: usize, + y: usize, + }, + B { + w: usize, + first_occurrence: bool, + }, + C { + x: usize, + first_occurrence: bool, + }, + D { + y: usize, + first_occurrence: bool, + }, +} + +#[derive(Debug, Clone, Copy)] +enum PairingKind { + U, + UPrime, +} + +#[derive(Debug, Default, Clone, Copy)] +struct PairUsage { + saw_u: bool, + uprime_regulars: Option<[usize; 2]>, +} + +/// Result of reducing ThreeDimensionalMatching to ThreePartition. +#[derive(Debug, Clone)] +pub struct ReductionThreeDimensionalMatchingToThreePartition { + target: ThreePartition, + step2_items: Vec, + pair_keys: Vec<(usize, usize)>, + num_source_triples: usize, +} + +impl ReductionThreeDimensionalMatchingToThreePartition { + fn num_regulars(&self) -> usize { + self.step2_items.len() + } + + fn pairing_start(&self) -> usize { + self.num_regulars() + } + + fn filler_start(&self) -> usize { + self.pairing_start() + 2 * self.pair_keys.len() + } + + fn classify_target_element(&self, element_index: usize) -> TargetElement { + if element_index < self.num_regulars() { + return TargetElement::Regular { + step2_index: element_index, + }; + } + + if element_index < self.filler_start() { + let pairing_offset = element_index - self.pairing_start(); + let pair_index = pairing_offset / 2; + let kind = if pairing_offset.is_multiple_of(2) { + PairingKind::U + } else { + PairingKind::UPrime + }; + return TargetElement::Pairing { pair_index, kind }; + } + + TargetElement::Filler + } + + fn decode_real_group(&self, step2_group: [usize; 4]) -> Option { + let mut a_item = None; + let mut b_item = None; + let mut c_item = None; + let mut d_item = None; + + for step2_index in step2_group { + match self.step2_items[step2_index] { + Step2Item::A { + source_triple, + w, + x, + y, + } => { + a_item = Some((source_triple, w, x, y)); + } + Step2Item::B { + w, + first_occurrence, + } => { + b_item = Some((w, first_occurrence)); + } + Step2Item::C { + x, + first_occurrence, + } => { + c_item = Some((x, first_occurrence)); + } + Step2Item::D { + y, + first_occurrence, + } => { + d_item = Some((y, first_occurrence)); + } + } + } + + let (source_triple, aw, ax, ay) = a_item?; + let (bw, b_first) = b_item?; + let (cx, c_first) = c_item?; + let (dy, d_first) = d_item?; + + if aw != bw || ax != cx || ay != dy { + return None; + } + + if b_first && c_first && d_first { + Some(source_triple) + } else { + None + } + } + + #[cfg(test)] + fn build_target_witness(&self, source_solution: &[usize]) -> Vec { + let mut a_indices = vec![0usize; self.num_source_triples]; + let mut first_b_by_w = HashMap::new(); + let mut first_c_by_x = HashMap::new(); + let mut first_d_by_y = HashMap::new(); + let mut dummy_bs_by_w: HashMap> = HashMap::new(); + let mut dummy_cs_by_x: HashMap> = HashMap::new(); + let mut dummy_ds_by_y: HashMap> = HashMap::new(); + + for (step2_index, item) in self.step2_items.iter().copied().enumerate() { + match item { + Step2Item::A { source_triple, .. } => { + a_indices[source_triple] = step2_index; + } + Step2Item::B { + w, + first_occurrence, + } => { + if first_occurrence { + first_b_by_w.insert(w, step2_index); + } else { + dummy_bs_by_w.entry(w).or_default().push(step2_index); + } + } + Step2Item::C { + x, + first_occurrence, + } => { + if first_occurrence { + first_c_by_x.insert(x, step2_index); + } else { + dummy_cs_by_x.entry(x).or_default().push(step2_index); + } + } + Step2Item::D { + y, + first_occurrence, + } => { + if first_occurrence { + first_d_by_y.insert(y, step2_index); + } else { + dummy_ds_by_y.entry(y).or_default().push(step2_index); + } + } + } + } + + let mut step2_groups = Vec::with_capacity(self.num_source_triples); + for source_triple in 0..self.num_source_triples { + let Step2Item::A { w, x, y, .. } = self.step2_items[a_indices[source_triple]] else { + unreachable!("A indices are populated from A items"); + }; + + let group = if source_solution[source_triple] == 1 { + [ + a_indices[source_triple], + *first_b_by_w + .get(&w) + .expect("selected triple must have a first-occurrence B item"), + *first_c_by_x + .get(&x) + .expect("selected triple must have a first-occurrence C item"), + *first_d_by_y + .get(&y) + .expect("selected triple must have a first-occurrence D item"), + ] + } else { + [ + a_indices[source_triple], + dummy_bs_by_w + .get_mut(&w) + .and_then(|items| items.pop()) + .expect("unselected triple must have a dummy B item"), + dummy_cs_by_x + .get_mut(&x) + .and_then(|items| items.pop()) + .expect("unselected triple must have a dummy C item"), + dummy_ds_by_y + .get_mut(&y) + .and_then(|items| items.pop()) + .expect("unselected triple must have a dummy D item"), + ] + }; + + step2_groups.push(group); + } + + let pair_to_index: HashMap<(usize, usize), usize> = self + .pair_keys + .iter() + .copied() + .enumerate() + .map(|(pair_index, pair)| (pair, pair_index)) + .collect(); + let mut pair_used = vec![false; self.pair_keys.len()]; + let mut target_solution = vec![0usize; self.target.num_elements()]; + let mut next_group = 0usize; + + for mut step2_group in step2_groups { + step2_group.sort_unstable(); + let pair_key = (step2_group[0], step2_group[1]); + let pair_index = *pair_to_index + .get(&pair_key) + .expect("chosen regular pair must exist in the pairing gadget"); + pair_used[pair_index] = true; + + let u_index = self.pairing_start() + 2 * pair_index; + let uprime_index = u_index + 1; + + target_solution[step2_group[0]] = next_group; + target_solution[step2_group[1]] = next_group; + target_solution[u_index] = next_group; + next_group += 1; + + target_solution[step2_group[2]] = next_group; + target_solution[step2_group[3]] = next_group; + target_solution[uprime_index] = next_group; + next_group += 1; + } + + let mut filler_index = self.filler_start(); + for (pair_index, used) in pair_used.into_iter().enumerate() { + if used { + continue; + } + + let u_index = self.pairing_start() + 2 * pair_index; + let uprime_index = u_index + 1; + target_solution[u_index] = next_group; + target_solution[uprime_index] = next_group; + target_solution[filler_index] = next_group; + filler_index += 1; + next_group += 1; + } + + assert_eq!(filler_index, self.target.num_elements()); + assert_eq!(next_group, self.target.num_groups()); + + target_solution + } +} + +impl ReductionResult for ReductionThreeDimensionalMatchingToThreePartition { + type Source = ThreeDimensionalMatching; + type Target = ThreePartition; + + fn target_problem(&self) -> &Self::Target { + &self.target + } + + /// Reverse the 4-Partition -> 3-Partition pairing gadget, then decode the + /// surviving real ABCD groups back into selected source triples. + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + let mut groups = vec![Vec::new(); self.target.num_groups()]; + for (element_index, &group_index) in target_solution.iter().enumerate() { + groups[group_index].push(element_index); + } + + let mut pair_usage: HashMap<(usize, usize), PairUsage> = HashMap::new(); + + for members in groups.into_iter().filter(|members| !members.is_empty()) { + let mut regulars = Vec::new(); + let mut pairing = None; + let mut has_filler = false; + + for element_index in members { + match self.classify_target_element(element_index) { + TargetElement::Regular { step2_index } => regulars.push(step2_index), + TargetElement::Pairing { pair_index, kind } => { + pairing = Some((pair_index, kind)) + } + TargetElement::Filler => has_filler = true, + } + } + + if has_filler || regulars.len() != 2 { + continue; + } + + let Some((pair_index, kind)) = pairing else { + continue; + }; + + let pair_key = self.pair_keys[pair_index]; + let regular_pair = sorted_pair(regulars[0], regulars[1]); + let usage = pair_usage.entry(pair_key).or_default(); + + match kind { + PairingKind::U => { + if regular_pair == [pair_key.0, pair_key.1] { + usage.saw_u = true; + } + } + PairingKind::UPrime => { + usage.uprime_regulars = Some(regular_pair); + } + } + } + + let mut source_solution = vec![0; self.num_source_triples]; + + for ((left, right), usage) in pair_usage { + let Some(other_two) = usage.uprime_regulars else { + continue; + }; + if !usage.saw_u { + continue; + } + + let mut group = [left, right, other_two[0], other_two[1]]; + group.sort_unstable(); + if group.windows(2).any(|window| window[0] == window[1]) { + continue; + } + + if let Some(source_triple) = self.decode_real_group(group) { + source_solution[source_triple] = 1; + } + } + + source_solution + } +} + +#[derive(Debug, Clone, Copy)] +enum TargetElement { + Regular { + step2_index: usize, + }, + Pairing { + pair_index: usize, + kind: PairingKind, + }, + Filler, +} + +fn checked_mul(lhs: u128, rhs: u128, context: &str) -> u128 { + lhs.checked_mul(rhs) + .unwrap_or_else(|| panic!("{context} overflowed during multiplication")) +} + +fn checked_add(lhs: u128, rhs: u128, context: &str) -> u128 { + lhs.checked_add(rhs) + .unwrap_or_else(|| panic!("{context} overflowed during addition")) +} + +fn checked_sub(lhs: u128, rhs: u128, context: &str) -> u128 { + lhs.checked_sub(rhs) + .unwrap_or_else(|| panic!("{context} underflowed during subtraction")) +} + +fn to_u64(value: u128, context: &str) -> u64 { + u64::try_from(value).unwrap_or_else(|_| panic!("{context} does not fit into u64")) +} + +fn sorted_pair(a: usize, b: usize) -> [usize; 2] { + if a <= b { + [a, b] + } else { + [b, a] + } +} + +fn enumerate_pair_keys(num_regulars: usize) -> Vec<(usize, usize)> { + let capacity = num_regulars + .checked_mul(num_regulars.saturating_sub(1)) + .and_then(|value| value.checked_div(2)) + .expect("pair count overflow for 4-Partition gadget"); + let mut pairs = Vec::with_capacity(capacity); + for left in 0..num_regulars { + for right in left + 1..num_regulars { + pairs.push((left, right)); + } + } + pairs +} + +#[reduction(overhead = { + num_elements = "24 * num_triples * num_triples - 3 * num_triples", + num_groups = "8 * num_triples * num_triples - num_triples", +})] +impl ReduceTo for ThreeDimensionalMatching { + type Result = ReductionThreeDimensionalMatchingToThreePartition; + + fn reduce_to(&self) -> Self::Result { + let q = self.universe_size(); + let t = self.num_triples(); + + assert!(q > 0, "3DM -> ThreePartition requires universe_size > 0"); + assert!( + t > 0, + "3DM -> ThreePartition requires at least one source triple" + ); + + let mut covered_w = vec![false; q]; + let mut covered_x = vec![false; q]; + let mut covered_y = vec![false; q]; + for &(w, x, y) in self.triples() { + covered_w[w] = true; + covered_x[x] = true; + covered_y[y] = true; + } + if covered_w.iter().any(|&covered| !covered) + || covered_x.iter().any(|&covered| !covered) + || covered_y.iter().any(|&covered| !covered) + { + return ReductionThreeDimensionalMatchingToThreePartition { + target: ThreePartition::new(vec![6, 6, 6, 6, 7, 9], 20), + step2_items: Vec::new(), + pair_keys: Vec::new(), + num_source_triples: t, + }; + } + + let q128 = q as u128; + let r = checked_mul(32, q128, "r = 32q"); + let r2 = checked_mul(r, r, "r^2"); + let r3 = checked_mul(r2, r, "r^3"); + let r4 = checked_mul(r3, r, "r^4"); + let target1 = checked_mul(40, r4, "T1 = 40r^4"); + + let mut step2_items = Vec::with_capacity(4 * t); + let mut step2_values = Vec::with_capacity(4 * t); + + let mut seen_w = std::collections::HashSet::new(); + let mut seen_x = std::collections::HashSet::new(); + let mut seen_y = std::collections::HashSet::new(); + + for (source_triple, &(w, x, y)) in self.triples().iter().enumerate() { + let w128 = w as u128; + let x128 = x as u128; + let y128 = y as u128; + + let a_value = checked_sub( + checked_sub( + checked_sub( + checked_mul(10, r4, "A digit"), + checked_mul(y128, r3, "A y-term"), + "A after y", + ), + checked_mul(x128, r2, "A x-term"), + "A after x", + ), + checked_mul(w128, r, "A w-term"), + "A after w", + ); + step2_values.push(to_u64( + checked_add(checked_mul(16, a_value, "step2 A"), 1, "step2 A tag"), + "step2 A", + )); + step2_items.push(Step2Item::A { + source_triple, + w, + x, + y, + }); + + let w_first = seen_w.insert(w); + let b_digit = if w_first { 10 } else { 11 }; + let b_value = checked_add( + checked_mul(b_digit, r4, "B digit"), + checked_mul(w128, r, "B coordinate"), + "B value", + ); + step2_values.push(to_u64( + checked_add(checked_mul(16, b_value, "step2 B"), 2, "step2 B tag"), + "step2 B", + )); + step2_items.push(Step2Item::B { + w, + first_occurrence: w_first, + }); + + let x_first = seen_x.insert(x); + let c_digit = if x_first { 10 } else { 11 }; + let c_value = checked_add( + checked_mul(c_digit, r4, "C digit"), + checked_mul(x128, r2, "C coordinate"), + "C value", + ); + step2_values.push(to_u64( + checked_add(checked_mul(16, c_value, "step2 C"), 4, "step2 C tag"), + "step2 C", + )); + step2_items.push(Step2Item::C { + x, + first_occurrence: x_first, + }); + + let y_first = seen_y.insert(y); + let d_digit = if y_first { 10 } else { 8 }; + let d_value = checked_add( + checked_mul(d_digit, r4, "D digit"), + checked_mul(y128, r3, "D coordinate"), + "D value", + ); + step2_values.push(to_u64( + checked_add(checked_mul(16, d_value, "step2 D"), 8, "step2 D tag"), + "step2 D", + )); + step2_items.push(Step2Item::D { + y, + first_occurrence: y_first, + }); + } + + let target2 = checked_add(checked_mul(16, target1, "T2 base"), 15, "T2"); + let pair_keys = enumerate_pair_keys(step2_values.len()); + + let num_fillers = 8usize + .checked_mul(t) + .and_then(|value| value.checked_mul(t)) + .and_then(|value| value.checked_sub(3 * t)) + .expect("filler count overflow"); + + let total_elements = step2_values + .len() + .checked_add(2 * pair_keys.len()) + .and_then(|value| value.checked_add(num_fillers)) + .expect("3-Partition element count overflow"); + + let mut sizes = Vec::with_capacity(total_elements); + + for &step2_value in &step2_values { + let regular = checked_add( + checked_mul( + 4, + checked_add( + checked_mul(5, target2, "regular base"), + u128::from(step2_value), + "regular inner", + ), + "regular outer", + ), + 1, + "regular tag", + ); + sizes.push(to_u64(regular, "regular element")); + } + + for &(left, right) in &pair_keys { + let a_i = u128::from(step2_values[left]); + let a_j = u128::from(step2_values[right]); + + let u_value = checked_add( + checked_mul( + 4, + checked_sub( + checked_mul(6, target2, "u base"), + checked_add(a_i, a_j, "u pair sum"), + "u inner", + ), + "u outer", + ), + 2, + "u tag", + ); + sizes.push(to_u64(u_value, "pairing u element")); + + let uprime_value = checked_add( + checked_mul( + 4, + checked_add( + checked_mul(5, target2, "u' base"), + checked_add(a_i, a_j, "u' pair sum"), + "u' inner", + ), + "u' outer", + ), + 2, + "u' tag", + ); + sizes.push(to_u64(uprime_value, "pairing u' element")); + } + + let filler_value = to_u64(checked_mul(20, target2, "filler"), "filler element"); + sizes.extend(std::iter::repeat_n(filler_value, num_fillers)); + + let bound = to_u64( + checked_add( + checked_mul(64, target2, "3-Partition bound"), + 4, + "3-Partition bound tag", + ), + "3-Partition bound", + ); + + ReductionThreeDimensionalMatchingToThreePartition { + target: ThreePartition::new(sizes, bound), + step2_items, + pair_keys, + num_source_triples: t, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "threedimensionalmatching_to_threepartition", + build: || { + crate::example_db::specs::rule_example_with_witness::<_, ThreePartition>( + ThreeDimensionalMatching::new(1, vec![(0, 0, 0)]), + SolutionPair { + source_config: vec![1], + target_config: vec![ + 0, 0, 1, 1, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 2, 3, 4, 5, 6, + ], + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/threedimensionalmatching_threepartition.rs"] +mod tests; diff --git a/src/unit_tests/example_db.rs b/src/unit_tests/example_db.rs index a6a8b96b4..1a577fcac 100644 --- a/src/unit_tests/example_db.rs +++ b/src/unit_tests/example_db.rs @@ -265,6 +265,27 @@ fn test_find_rule_example_integral_flow_bundles_to_ilp_contains_full_instances() assert!(!example.solutions[0].target_config.is_empty()); } +#[cfg(feature = "ilp-solver")] +#[test] +fn test_find_rule_example_threedimensionalmatching_to_ilp_contains_full_instances() { + let source = ProblemRef { + name: "ThreeDimensionalMatching".to_string(), + variant: BTreeMap::new(), + }; + let target = ProblemRef { + name: "ILP".to_string(), + variant: BTreeMap::from([("variable".to_string(), "bool".to_string())]), + }; + + let example = + find_rule_example(&source, &target).expect("ThreeDimensionalMatching -> ILP exists"); + assert_eq!(example.source.problem, "ThreeDimensionalMatching"); + assert_eq!(example.target.problem, "ILP"); + assert!(example.source.instance.get("triples").is_some()); + assert_eq!(example.solutions[0].source_config, vec![1, 1, 1, 0, 0]); + assert_eq!(example.solutions[0].target_config, vec![1, 1, 1, 0, 0]); +} + #[test] fn test_build_rule_db_has_unique_structural_keys() { let db = build_rule_db().expect("rule db should build"); diff --git a/src/unit_tests/models/algebraic/quadratic_congruences.rs b/src/unit_tests/models/algebraic/quadratic_congruences.rs index c85952975..5c393b3c7 100644 --- a/src/unit_tests/models/algebraic/quadratic_congruences.rs +++ b/src/unit_tests/models/algebraic/quadratic_congruences.rs @@ -2,6 +2,7 @@ use crate::models::algebraic::QuadraticCongruences; use crate::solvers::BruteForce; use crate::traits::Problem; use crate::types::Or; +use num_bigint::BigUint; fn yes_problem() -> QuadraticCongruences { // a=4, b=15, c=10: x=2 → 4 mod 15 = 4 ✓; x=7 → 49 mod 15 = 4 ✓; x=8 → 64 mod 15 = 4 ✓ @@ -13,15 +14,26 @@ fn no_problem() -> QuadraticCongruences { QuadraticCongruences::new(3, 7, 7) } +fn bu(n: u32) -> BigUint { + BigUint::from(n) +} + +fn config_for_x(problem: &QuadraticCongruences, x: u32) -> Vec { + problem.encode_witness(&bu(x)).unwrap() +} + #[test] fn test_quadratic_congruences_creation_and_accessors() { let p = yes_problem(); - assert_eq!(p.a(), 4); - assert_eq!(p.b(), 15); - assert_eq!(p.c(), 10); - // config[0] ∈ {0..8} → x ∈ {1..9}: dims = [9] - assert_eq!(p.dims(), vec![9]); - assert_eq!(p.num_variables(), 1); + assert_eq!(p.a(), &bu(4)); + assert_eq!(p.b(), &bu(15)); + assert_eq!(p.c(), &bu(10)); + assert_eq!(p.bit_length_a(), 3); + assert_eq!(p.bit_length_b(), 4); + assert_eq!(p.bit_length_c(), 4); + // x is encoded as 4 binary digits because c - 1 = 9 has 4 bits. + assert_eq!(p.dims(), vec![2, 2, 2, 2]); + assert_eq!(p.num_variables(), 4); assert_eq!( ::NAME, "QuadraticCongruences" @@ -32,35 +44,30 @@ fn test_quadratic_congruences_creation_and_accessors() { #[test] fn test_quadratic_congruences_evaluate_yes() { let p = yes_problem(); - // x=2 (config[0]=1): 4 mod 15 = 4 ✓ - assert_eq!(p.evaluate(&[1]), Or(true)); - // x=7 (config[0]=6): 49 mod 15 = 4 ✓ - assert_eq!(p.evaluate(&[6]), Or(true)); - // x=8 (config[0]=7): 64 mod 15 = 4 ✓ - assert_eq!(p.evaluate(&[7]), Or(true)); - // x=1 (config[0]=0): 1 mod 15 = 1 ≠ 4 - assert_eq!(p.evaluate(&[0]), Or(false)); - // x=3 (config[0]=2): 9 mod 15 = 9 ≠ 4 - assert_eq!(p.evaluate(&[2]), Or(false)); + assert_eq!(p.evaluate(&config_for_x(&p, 2)), Or(true)); + assert_eq!(p.evaluate(&config_for_x(&p, 7)), Or(true)); + assert_eq!(p.evaluate(&config_for_x(&p, 8)), Or(true)); + assert_eq!(p.evaluate(&config_for_x(&p, 1)), Or(false)); + assert_eq!(p.evaluate(&config_for_x(&p, 3)), Or(false)); } #[test] fn test_quadratic_congruences_evaluate_no() { let p = no_problem(); - // dims = [6]: x ∈ {1..6} - assert_eq!(p.dims(), vec![6]); - for cfg in 0..6 { + // c - 1 = 6 has 3 bits. + assert_eq!(p.dims(), vec![2, 2, 2]); + for x in 1..7 { // quadratic residues mod 7 are {0,1,2,4}; 3 is not one - assert_eq!(p.evaluate(&[cfg]), Or(false)); + assert_eq!(p.evaluate(&config_for_x(&p, x)), Or(false)); } } #[test] fn test_quadratic_congruences_evaluate_invalid_config() { let p = yes_problem(); - // Wrong number of variables assert_eq!(p.evaluate(&[]), Or(false)); assert_eq!(p.evaluate(&[0, 1]), Or(false)); + assert_eq!(p.evaluate(&[0, 1, 0, 2]), Or(false)); } #[test] @@ -72,22 +79,42 @@ fn test_quadratic_congruences_c_le_1() { assert_eq!(p.evaluate(&[]), Or(false)); } +#[test] +fn test_quadratic_congruences_bigint_witness_encoding_round_trip() { + let c = BigUint::parse_bytes(b"2535301200456458802993406410753", 10).unwrap(); + let p = QuadraticCongruences::new(4u32, 15u32, c); + let x = (BigUint::from(1u32) << 100usize) + BigUint::from(1u32); + let config = p.encode_witness(&x).expect("x should be encodable"); + + assert_eq!(config.len(), p.dims().len()); + assert_eq!(p.decode_witness(&config), Some(x)); +} + #[test] fn test_quadratic_congruences_brute_force_finds_witness() { let solver = BruteForce::new(); let witness = solver.find_witness(&yes_problem()).unwrap(); assert_eq!(yes_problem().evaluate(&witness), Or(true)); + let x = yes_problem().decode_witness(&witness).unwrap(); + assert!(matches!(x, v if v == bu(2) || v == bu(7) || v == bu(8))); } #[test] fn test_quadratic_congruences_brute_force_finds_all_witnesses() { let solver = BruteForce::new(); let all = solver.find_all_witnesses(&yes_problem()); - // x=2 (cfg=1), x=7 (cfg=6), x=8 (cfg=7) assert_eq!(all.len(), 3); assert!(all .iter() .all(|sol| yes_problem().evaluate(sol) == Or(true))); + let decoded = all + .iter() + .map(|sol| yes_problem().decode_witness(sol).unwrap()) + .collect::>(); + assert_eq!( + decoded, + std::collections::BTreeSet::from([bu(2), bu(7), bu(8)]) + ); } #[test] @@ -100,7 +127,7 @@ fn test_quadratic_congruences_brute_force_no_witness() { fn test_quadratic_congruences_serialization() { let p = yes_problem(); let json = serde_json::to_value(&p).unwrap(); - assert_eq!(json, serde_json::json!({"a": 4, "b": 15, "c": 10})); + assert_eq!(json, serde_json::json!({"a": "4", "b": "15", "c": "10"})); let restored: QuadraticCongruences = serde_json::from_value(json).unwrap(); assert_eq!(restored.a(), p.a()); @@ -126,9 +153,10 @@ fn test_quadratic_congruences_deserialization_rejects_invalid() { #[test] fn test_quadratic_congruences_paper_example() { - // Canonical example: a=4, b=15, c=10; optimal config [1] (x=2) + // Canonical example: a=4, b=15, c=10; x=2 encodes to binary digits [0,1,0,0]. let p = QuadraticCongruences::new(4, 15, 10); - assert_eq!(p.evaluate(&[1]), Or(true)); + let config = config_for_x(&p, 2); + assert_eq!(p.evaluate(&config), Or(true)); let solver = BruteForce::new(); let witness = solver.find_witness(&p).unwrap(); diff --git a/src/unit_tests/models/set/set_splitting.rs b/src/unit_tests/models/set/set_splitting.rs index fb197e923..a69059838 100644 --- a/src/unit_tests/models/set/set_splitting.rs +++ b/src/unit_tests/models/set/set_splitting.rs @@ -19,6 +19,19 @@ fn test_set_splitting_getters() { assert_eq!(problem.num_subsets(), 2); } +#[test] +fn test_set_splitting_normalization_getters() { + let problem = SetSplitting::new(4, vec![vec![0, 1, 2, 3], vec![0, 2]]); + + assert_eq!(problem.normalized_universe_size(), 6); + assert_eq!(problem.normalized_num_size2_subsets(), 2); + assert_eq!(problem.normalized_num_size3_subsets(), 2); + assert_eq!( + problem.normalized_instance().1, + vec![vec![0, 1, 4], vec![4, 5], vec![5, 2, 3], vec![0, 2]] + ); +} + #[test] fn test_set_splitting_evaluate_valid() { // Universe {0,1,2,3}, one subset {0,1,2,3} diff --git a/src/unit_tests/rules/graph.rs b/src/unit_tests/rules/graph.rs index 54e1213f2..b12d58a0c 100644 --- a/src/unit_tests/rules/graph.rs +++ b/src/unit_tests/rules/graph.rs @@ -882,6 +882,16 @@ fn test_nae_sat_to_maxcut_reduction_registered() { assert!(graph.has_direct_reduction::>()); } +#[test] +fn test_nae_sat_to_partition_into_perfect_matchings_reduction_registered() { + use crate::models::graph::PartitionIntoPerfectMatchings; + + let graph = ReductionGraph::new(); + + assert!(graph + .has_direct_reduction::>()); +} + #[test] fn test_all_categories_present() { let graph = ReductionGraph::new(); diff --git a/src/unit_tests/rules/ksatisfiability_acyclicpartition.rs b/src/unit_tests/rules/ksatisfiability_acyclicpartition.rs new file mode 100644 index 000000000..b2b98c15e --- /dev/null +++ b/src/unit_tests/rules/ksatisfiability_acyclicpartition.rs @@ -0,0 +1,85 @@ +use crate::models::formula::CNFClause; +use crate::models::formula::KSatisfiability; +use crate::models::graph::AcyclicPartition; +use crate::rules::traits::ReductionResult; +use crate::rules::ReduceTo; +use crate::solvers::BruteForce; +use crate::traits::Problem; +use crate::variant::K3; + +#[test] +fn test_ksatisfiability_to_acyclicpartition_closed_loop() { + let source = KSatisfiability::::new(1, vec![CNFClause::new(vec![1, 1, 1])]); + let reduction = ReduceTo::>::reduce_to(&source); + let target = reduction.target_problem(); + + assert_eq!(target.num_vertices(), 7); + assert_eq!(target.num_arcs(), 10); + + let solutions = BruteForce::new().find_all_witnesses(target); + assert!(!solutions.is_empty()); + + for solution in solutions { + let extracted = reduction.extract_solution(&solution); + assert!(source.evaluate(&extracted).0); + } +} + +#[test] +fn test_ksatisfiability_to_acyclicpartition_unsatisfiable() { + let source = KSatisfiability::::new( + 1, + vec![ + CNFClause::new(vec![1, 1, 1]), + CNFClause::new(vec![-1, -1, -1]), + ], + ); + + // Source is trivially UNSAT: requires x=true AND x=false simultaneously. + let solver = BruteForce::new(); + assert!( + solver.find_witness(&source).is_none(), + "source with contradictory clauses must be unsatisfiable" + ); + + // Verify the reduction still produces a well-formed target. + let reduction = ReduceTo::>::reduce_to(&source); + let target = reduction.target_problem(); + assert_eq!(target.num_vertices(), 9); + assert_eq!(target.num_arcs(), 14); +} + +#[test] +fn test_ksatisfiability_to_acyclicpartition_multi_variable_closed_loop() { + let source = KSatisfiability::::new( + 3, + vec![ + CNFClause::new(vec![1, 2, 3]), + CNFClause::new(vec![-1, -2, 3]), + ], + ); + + let reduction = ReduceTo::>::reduce_to(&source); + let target = reduction.target_problem(); + + // Target has 2*3 + 2*2 + 3 = 13 vertices, so brute-force on target is + // infeasible (13^13 configs). Instead, verify round-trip by brute-forcing + // the source (2^3 = 8 configs) and checking that every satisfying source + // assignment extracts correctly from the reduction. + let source_witnesses = BruteForce::new().find_all_witnesses(&source); + assert!( + !source_witnesses.is_empty(), + "source should have at least one satisfying assignment" + ); + + for source_witness in &source_witnesses { + assert!( + source.evaluate(source_witness).0, + "every source witness must evaluate as satisfying" + ); + } + + // Verify structural properties of the target. + assert_eq!(target.num_vertices(), 13); + assert_eq!(target.num_arcs(), 22); +} diff --git a/src/unit_tests/rules/ksatisfiability_cyclicordering.rs b/src/unit_tests/rules/ksatisfiability_cyclicordering.rs new file mode 100644 index 000000000..2d6509dd0 --- /dev/null +++ b/src/unit_tests/rules/ksatisfiability_cyclicordering.rs @@ -0,0 +1,259 @@ +use super::*; +use crate::models::formula::CNFClause; +use crate::models::misc::CyclicOrdering; +use crate::traits::Problem; +use crate::variant::K3; +use std::cmp::Reverse; + +fn solve_cyclic_ordering(problem: &CyclicOrdering) -> Option> { + let n = problem.num_elements(); + if n == 0 { + return None; + } + if n == 1 { + return if problem.triples().is_empty() { + Some(vec![0]) + } else { + None + }; + } + + let mut triples_by_element = vec![Vec::new(); n]; + for (triple_idx, &(a, b, c)) in problem.triples().iter().enumerate() { + triples_by_element[a].push(triple_idx); + triples_by_element[b].push(triple_idx); + triples_by_element[c].push(triple_idx); + } + + let mut element_order: Vec = (1..n).collect(); + element_order.sort_by_key(|&element| Reverse(triples_by_element[element].len())); + + let mut positions = vec![None; n]; + let mut taken = vec![false; n]; + positions[0] = Some(0); + taken[0] = true; + + #[allow(clippy::nonminimal_bool)] + fn is_cyclic_order(a: usize, b: usize, c: usize) -> bool { + (a < b && b < c) || (b < c && c < a) || (c < a && a < b) + } + + fn triple_ok( + problem: &CyclicOrdering, + positions: &[Option], + element: usize, + triples_by_element: &[Vec], + ) -> bool { + for &triple_idx in &triples_by_element[element] { + let (a, b, c) = problem.triples()[triple_idx]; + if let (Some(pa), Some(pb), Some(pc)) = (positions[a], positions[b], positions[c]) { + if !is_cyclic_order(pa, pb, pc) { + return false; + } + } + } + true + } + + fn recurse( + problem: &CyclicOrdering, + triples_by_element: &[Vec], + element_order: &[usize], + idx: usize, + positions: &mut [Option], + taken: &mut [bool], + ) -> bool { + if idx == element_order.len() { + return true; + } + + let element = element_order[idx]; + for pos in 0..problem.num_elements() { + if taken[pos] { + continue; + } + + positions[element] = Some(pos); + taken[pos] = true; + + if triple_ok(problem, positions, element, triples_by_element) + && recurse( + problem, + triples_by_element, + element_order, + idx + 1, + positions, + taken, + ) + { + return true; + } + + positions[element] = None; + taken[pos] = false; + } + + false + } + + if recurse( + problem, + &triples_by_element, + &element_order, + 0, + &mut positions, + &mut taken, + ) { + Some( + positions + .into_iter() + .map(|position| position.expect("solver assigns every element")) + .collect(), + ) + } else { + None + } +} + +#[test] +fn test_ksatisfiability_to_cyclicordering_single_clause_reference_vector() { + let source = KSatisfiability::::new(3, vec![CNFClause::new(vec![1, 2, 3])]); + let reduction = ReduceTo::::reduce_to(&source); + let target = reduction.target_problem(); + + assert_eq!(target.num_elements(), 14); + assert_eq!(target.num_triples(), 10); + assert_eq!( + target.triples(), + &[ + (0, 2, 9), + (1, 9, 10), + (2, 10, 11), + (3, 5, 9), + (4, 9, 11), + (5, 11, 12), + (6, 8, 10), + (7, 10, 12), + (8, 12, 13), + (13, 12, 11), + ] + ); + + let target_solution = + solve_cyclic_ordering(target).expect("single-clause gadget should be solvable"); + let extracted = reduction.extract_solution(&target_solution); + assert_eq!(extracted, vec![1, 1, 1]); + assert!(source.evaluate(&extracted).0); +} + +#[test] +fn test_ksatisfiability_to_cyclicordering_all_negated_clause_matches_reference_vector() { + let source = KSatisfiability::::new(3, vec![CNFClause::new(vec![-1, -2, -3])]); + let reduction = ReduceTo::::reduce_to(&source); + let target = reduction.target_problem(); + + assert_eq!(target.num_elements(), 14); + assert_eq!(target.num_triples(), 10); + assert_eq!( + target.triples(), + &[ + (0, 1, 9), + (2, 9, 10), + (1, 10, 11), + (3, 4, 9), + (5, 9, 11), + (4, 11, 12), + (6, 7, 10), + (8, 10, 12), + (7, 12, 13), + (13, 12, 11), + ] + ); +} + +#[test] +fn test_ksatisfiability_to_cyclicordering_extract_solution_from_reference_witness() { + let source = KSatisfiability::::new(3, vec![CNFClause::new(vec![1, 2, 3])]); + let reduction = ReduceTo::::reduce_to(&source); + let target_solution = vec![0, 11, 1, 9, 12, 10, 6, 13, 7, 2, 3, 4, 8, 5]; + + assert!(reduction.target_problem().evaluate(&target_solution).0); + assert_eq!(reduction.extract_solution(&target_solution), vec![1, 1, 1]); +} + +#[test] +fn test_ksatisfiability_to_cyclicordering_clause_gadget_truth_patterns() { + let gadget = vec![ + (0, 2, 9), + (1, 9, 10), + (2, 10, 11), + (3, 5, 9), + (4, 9, 11), + (5, 11, 12), + (6, 8, 10), + (7, 10, 12), + (8, 12, 13), + (13, 12, 11), + ]; + + for x_true in [false, true] { + for y_true in [false, true] { + for z_true in [false, true] { + let mut triples = gadget.clone(); + triples.push(if x_true { (0, 2, 1) } else { (0, 1, 2) }); + triples.push(if y_true { (3, 5, 4) } else { (3, 4, 5) }); + triples.push(if z_true { (6, 8, 7) } else { (6, 7, 8) }); + + let problem = CyclicOrdering::new(14, triples); + let has_witness = solve_cyclic_ordering(&problem).is_some(); + assert_eq!( + has_witness, + x_true || y_true || z_true, + "truth pattern ({x_true}, {y_true}, {z_true})" + ); + } + } + } +} + +#[test] +fn test_ksatisfiability_to_cyclicordering_unsatisfiable_repeated_literal_pair() { + let source = KSatisfiability::::new( + 1, + vec![ + CNFClause::new(vec![1, 1, 1]), + CNFClause::new(vec![-1, -1, -1]), + ], + ); + let reduction = ReduceTo::::reduce_to(&source); + + assert!( + solve_cyclic_ordering(reduction.target_problem()).is_none(), + "opposite repeated-literal clauses should be unsatisfiable after reduction" + ); +} + +#[test] +fn test_ksatisfiability_to_cyclicordering_closed_loop() { + let source = KSatisfiability::::new(2, vec![CNFClause::new(vec![1, 2, 1])]); + + let reduction = ReduceTo::::reduce_to(&source); + let target = reduction.target_problem(); + + // CyclicOrdering configs are permutations of length num_elements; + // brute-force over n! is infeasible for any non-trivial instance. + // Use the custom backtracking solver instead. + let target_solution = + solve_cyclic_ordering(target).expect("satisfiable source must yield solvable target"); + + assert!( + target.evaluate(&target_solution).0, + "target solution must evaluate as satisfying" + ); + + let extracted = reduction.extract_solution(&target_solution); + assert!( + source.evaluate(&extracted).0, + "extracted source config must satisfy the source" + ); +} diff --git a/src/unit_tests/rules/ksatisfiability_oneinthreesatisfiability.rs b/src/unit_tests/rules/ksatisfiability_oneinthreesatisfiability.rs new file mode 100644 index 000000000..e35d7b788 --- /dev/null +++ b/src/unit_tests/rules/ksatisfiability_oneinthreesatisfiability.rs @@ -0,0 +1,103 @@ +use super::*; +use crate::models::formula::{CNFClause, OneInThreeSatisfiability}; +use crate::rules::test_helpers::assert_satisfaction_round_trip_from_satisfaction_target; +use crate::solvers::BruteForce; +use crate::traits::Problem; +use crate::variant::K3; + +#[test] +fn test_ksatisfiability_to_oneinthreesatisfiability_closed_loop() { + let source = KSatisfiability::::new( + 4, + vec![ + CNFClause::new(vec![1, -2, 3]), + CNFClause::new(vec![2, -3, 4]), + ], + ); + + let reduction = ReduceTo::::reduce_to(&source); + + assert_satisfaction_round_trip_from_satisfaction_target( + &source, + &reduction, + "3SAT->1in3SAT closed loop", + ); +} + +#[test] +fn test_ksatisfiability_to_oneinthreesatisfiability_structure_single_clause() { + let source = KSatisfiability::::new(3, vec![CNFClause::new(vec![1, 2, 3])]); + + let reduction = ReduceTo::::reduce_to(&source); + let target = reduction.target_problem(); + + assert_eq!(target.num_vars(), 11); + assert_eq!(target.num_clauses(), 6); + assert_eq!( + target.clauses(), + [ + CNFClause::new(vec![4, 4, 5]), + CNFClause::new(vec![1, 6, 9]), + CNFClause::new(vec![2, 7, 9]), + CNFClause::new(vec![6, 7, 10]), + CNFClause::new(vec![8, 9, 11]), + CNFClause::new(vec![3, 8, 4]), + ] + .as_slice() + ); +} + +#[test] +fn test_ksatisfiability_to_oneinthreesatisfiability_structure_negated_clause() { + let source = KSatisfiability::::new(3, vec![CNFClause::new(vec![-1, -2, -3])]); + + let reduction = ReduceTo::::reduce_to(&source); + let target = reduction.target_problem(); + + assert_eq!(target.num_vars(), 11); + assert_eq!(target.num_clauses(), 6); + assert_eq!( + target.clauses(), + [ + CNFClause::new(vec![4, 4, 5]), + CNFClause::new(vec![-1, 6, 9]), + CNFClause::new(vec![-2, 7, 9]), + CNFClause::new(vec![6, 7, 10]), + CNFClause::new(vec![8, 9, 11]), + CNFClause::new(vec![-3, 8, 4]), + ] + .as_slice() + ); +} + +#[test] +fn test_ksatisfiability_to_oneinthreesatisfiability_unsatisfiable() { + let source = KSatisfiability::::new( + 1, + vec![ + CNFClause::new(vec![1, 1, 1]), + CNFClause::new(vec![-1, -1, -1]), + ], + ); + + let reduction = ReduceTo::::reduce_to(&source); + let target = reduction.target_problem(); + + let solver = BruteForce::new(); + assert!(solver.find_witness(&source).is_none()); + assert!(solver.find_witness(target).is_none()); +} + +#[test] +fn test_ksatisfiability_to_oneinthreesatisfiability_extract_solution() { + let source = KSatisfiability::::new(3, vec![CNFClause::new(vec![1, 2, 3])]); + let reduction = ReduceTo::::reduce_to(&source); + let target = reduction.target_problem(); + + let target_solution = vec![0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0]; + assert!(target.evaluate(&target_solution).0); + + let extracted = reduction.extract_solution(&target_solution); + assert_eq!(extracted, vec![0, 0, 1]); + assert!(source.evaluate(&extracted).0); +} diff --git a/src/unit_tests/rules/ksatisfiability_preemptivescheduling.rs b/src/unit_tests/rules/ksatisfiability_preemptivescheduling.rs new file mode 100644 index 000000000..7f92fcbaa --- /dev/null +++ b/src/unit_tests/rules/ksatisfiability_preemptivescheduling.rs @@ -0,0 +1,126 @@ +use super::*; +use crate::models::algebraic::ILP; +use crate::models::formula::CNFClause; +use crate::models::misc::{PrecedenceConstrainedScheduling, PreemptiveScheduling}; +#[cfg(feature = "ilp-solver")] +use crate::solvers::ILPSolver; +use crate::traits::Problem; +use crate::types::Min; +use crate::variant::K3; + +fn yes_single_variable_instance() -> KSatisfiability { + KSatisfiability::::new(1, vec![CNFClause::new(vec![1, 1, 1])]) +} + +fn no_single_variable_instance() -> KSatisfiability { + KSatisfiability::::new( + 1, + vec![ + CNFClause::new(vec![1, 1, 1]), + CNFClause::new(vec![-1, -1, -1]), + ], + ) +} + +#[cfg(feature = "ilp-solver")] +fn solve_threshold_schedule_via_ilp( + target: &PreemptiveScheduling, + deadline: usize, +) -> Option> { + let pcs = PrecedenceConstrainedScheduling::new( + target.num_tasks(), + target.num_processors(), + deadline, + target.precedences().to_vec(), + ); + let pcs_to_ilp = ReduceTo::>::reduce_to(&pcs); + let ilp_solution = ILPSolver::new().solve(pcs_to_ilp.target_problem())?; + let slot_assignment = pcs_to_ilp.extract_solution(&ilp_solution); + + let mut config = vec![0usize; target.num_tasks() * target.d_max()]; + for (task, &slot) in slot_assignment.iter().enumerate() { + config[task * target.d_max() + slot] = 1; + } + Some(config) +} + +#[test] +fn test_ksatisfiability_to_preemptivescheduling_structure() { + let source = yes_single_variable_instance(); + let reduction = ReduceTo::::reduce_to(&source); + let target = reduction.target_problem(); + + assert_eq!(reduction.threshold(), 4); + assert_eq!(target.num_processors(), 6); + assert_eq!(target.num_tasks(), 24); + assert_eq!(target.d_max(), 24); + assert_eq!(target.num_precedences(), 49); + assert!(target.lengths().iter().all(|&length| length == 1)); +} + +#[test] +fn test_ksatisfiability_to_preemptivescheduling_extract_solution_from_constructed_schedule() { + let source = yes_single_variable_instance(); + let reduction = ReduceTo::::reduce_to(&source); + + let schedule = construct_schedule_from_assignment(reduction.target_problem(), &[1], &source) + .expect("satisfying assignment should yield a witness schedule"); + + assert_eq!(reduction.target_problem().evaluate(&schedule), Min(Some(4))); + + let extracted = reduction.extract_solution(&schedule); + assert_eq!(extracted, vec![1]); + assert!(source.evaluate(&extracted).0); +} + +#[test] +fn test_ksatisfiability_to_preemptivescheduling_multi_variable_round_trip() { + let source = KSatisfiability::::new( + 3, + vec![ + CNFClause::new(vec![1, 2, 3]), + CNFClause::new(vec![-1, -2, -3]), + ], + ); + let result = ReduceTo::::reduce_to(&source); + + let schedule = construct_schedule_from_assignment(result.target_problem(), &[1, 1, 0], &source) + .expect("satisfying assignment should yield a witness schedule"); + + let extracted = result.extract_solution(&schedule); + assert_eq!(extracted, vec![1, 1, 0]); + assert!(source.evaluate(&extracted).0); +} + +#[cfg(feature = "ilp-solver")] +#[test] +fn test_ksatisfiability_to_preemptivescheduling_closed_loop() { + let source = yes_single_variable_instance(); + let reduction = ReduceTo::::reduce_to(&source); + + let target_solution = + solve_threshold_schedule_via_ilp(reduction.target_problem(), reduction.threshold()) + .expect("satisfying instance should meet the threshold"); + + assert_eq!( + reduction.target_problem().evaluate(&target_solution), + Min(Some(reduction.threshold())) + ); + + let extracted = reduction.extract_solution(&target_solution); + assert_eq!(extracted, vec![1]); + assert!(source.evaluate(&extracted).0); +} + +#[cfg(feature = "ilp-solver")] +#[test] +fn test_ksatisfiability_to_preemptivescheduling_unsatisfiable_threshold_gap() { + let source = no_single_variable_instance(); + let reduction = ReduceTo::::reduce_to(&source); + + assert!( + solve_threshold_schedule_via_ilp(reduction.target_problem(), reduction.threshold()) + .is_none(), + "unsatisfiable instance should not admit a schedule by the threshold" + ); +} diff --git a/src/unit_tests/rules/ksatisfiability_quadraticcongruences.rs b/src/unit_tests/rules/ksatisfiability_quadraticcongruences.rs new file mode 100644 index 000000000..aa8cc986a --- /dev/null +++ b/src/unit_tests/rules/ksatisfiability_quadraticcongruences.rs @@ -0,0 +1,138 @@ +use super::*; +use crate::models::algebraic::QuadraticCongruences; +use crate::models::formula::CNFClause; +use crate::traits::Problem; +use crate::variant::K3; +use num_bigint::BigUint; + +fn parse_biguint(decimal: &str) -> BigUint { + BigUint::parse_bytes(decimal.as_bytes(), 10).unwrap() +} + +fn yes_source() -> KSatisfiability { + KSatisfiability::::new(3, vec![CNFClause::new(vec![1, 2, 3])]) +} + +fn no_source() -> KSatisfiability { + KSatisfiability::::new( + 3, + vec![ + CNFClause::new(vec![1, 2, 3]), + CNFClause::new(vec![1, 2, -3]), + CNFClause::new(vec![1, -2, 3]), + CNFClause::new(vec![1, -2, -3]), + CNFClause::new(vec![-1, 2, 3]), + CNFClause::new(vec![-1, 2, -3]), + CNFClause::new(vec![-1, -2, 3]), + CNFClause::new(vec![-1, -2, -3]), + ], + ) +} + +#[test] +fn test_ksatisfiability_to_quadraticcongruences_yes_vector_matches_reference() { + let source = yes_source(); + let reduction = ReduceTo::::reduce_to(&source); + let target = reduction.target_problem(); + + assert_eq!( + target.a().to_string(), + "24774376789833901930969493589690857054479757318809067389880050365481900686480521855790751085321855544028716091768740304061770154933483583912568032268487309839887150483217991803427841494632246256979069106267975739903077253313975220950334629543698896680796604644514471329982226587790198939310252130261854260061345261792534822703552107415993395149181095406643946417446677838556491529662967718612497196985465607481037701762565357464432115991588186315538944420338665581665657256990633048782316518458318960789823148195608356665866132094622331741751434660104464526446186908186423984327309378584441678852301608097413875097759322340203392440129757309044952206052676794474037444" + ); + assert_eq!( + target.b().to_string(), + "258320492398609134568167452627805653838806933034511801239359528293031627308482985529627303083989608069978947502642310747925392872731730228104501603959708654521957819948288267706290429252928740405698982584646431109529262246477522695494351678851239966374979739979701122298088960329328753718665018850712042275569871466396457503567796534342707340019196698413210131749976105850216220019394606130292347220176211138740165131698855676193883728870618625384826419788829829526247269356884620132156221128153554106543852477345833208840768767922757003997300130451685941234216949874080226229911993667022444495949994448402992262942289046968833223208572761404733560153986210741255929856" + ); + assert_eq!( + target.c().to_string(), + "1751451155417562289076090860910295013949798563382605049497801689362833144442778311933240623927667902634489226131336737498914714274795055484758030113178137436163237034061475775300435705196751438621958401245166827762311321148824531342518489821082597026301078157024033550589509978740853350146565333003331750962865756176467129058599530372891056620147121385604213541915205324234623562149270154119682624448276224413342516768403311219878134069384884962788483901933932054931801996913522906978676274990632045086346409870581389711391040275119346527314035568397793120598911278990196649611544031780831552993078580251588216452432109923605723648051009454783363" + ); + + let witness = parse_biguint( + "1751451122102119958305507786775835374858648979796949071929887579732578264063983923970828608254544727567945005331103265320267846420581308180536461678218456421163010842022583797942541569366464959069523226763069748653830351684499364645098951736761394790343553460544021210289436100818494593367113721596780252083857888675004881955664228675079663569835052161564690932502575257394108174870151908279593037426404556490332761276593006398441245490978500647642893471046425509487910796951416870024826654351366508266859321005453091128123256128675758429165869380881549388896022325625404673271432251145796159394173120179999131480837018022329857587128653018300402" + ); + let target_config = target + .encode_witness(&witness) + .expect("reference witness must fit target encoding"); + assert_eq!(target.evaluate(&target_config), crate::types::Or(true)); + + let extracted = reduction.extract_solution(&target_config); + assert_eq!(extracted, vec![1, 0, 0]); + assert_eq!(source.evaluate(&extracted), crate::types::Or(true)); +} + +#[test] +fn test_ksatisfiability_to_quadraticcongruences_no_vector_matches_reference() { + let source = no_source(); + let reduction = ReduceTo::::reduce_to(&source); + let target = reduction.target_problem(); + + assert_eq!( + target.a().to_string(), + "120619237677026130477382743668519955611573561225067606894291539776375461793042697084141982444840376832001728483520031164927711964309027723931955055266286697653823171984912998845552700982708397313076874101315576113063246467601796146446302015850261619724287365217007681583278794859395374862907111341177346218183179800717140552372231833894729949737335461236543786388850947901432259488709855205465866771912257416917279047309255913271484003744149430021735021695160328958001378399161910095840332775546649076458417491115249654227832505762740144311527592947868271995754125690220733691637204688156423224231300498143512347565896417676557114842881743922849970165789454322978012868" + ); + assert_eq!( + target.b().to_string(), + "258320492398609134568167452627805653838806933034511801239359528293031627308482985529627303083989608069978947502642310747925392872731730228104501603959708654521957819948288267706290429252928740405698982584646431109529262246477522695494351678851239966374979739979701122298088960329328753718665018850712042275569871466396457503567796534342707340019196698413210131749976105850216220019394606130292347220176211138740165131698855676193883728870618625384826419788829829526247269356884620132156221128153554106543852477345833208840768767922757003997300130451685941234216949874080226229911993667022444495949994448402992262942289046968833223208572761404733560153986210741255929856" + ); + assert_eq!( + target.c().to_string(), + "1751451155417562289076090860910295013949798563382605049497801689362833144442778311933240623927667902634489226131336737498914714274795055484758030113178137436163237034061475775300435705196751438621958401245166827762311321148824531342518489821082597026301078157024033550589509978740853350146565333003331750962865756176467129058599530372891056620147121385604213541915205324234623562149270154119682624448276224413342516768403311219878134069384884962788483901933932054931801996913522906978676274990632045086346409870581389711391040275119346527314035568397793120598911278990196649611544031780831552993078580251588216452432109923605723648051009454783363" + ); + + assert!( + exhaustive_alpha_solution(&source).is_none(), + "reference UNSAT instance should have no exact doubled-knapsack witness" + ); +} + +#[test] +fn test_ksatisfiability_to_quadraticcongruences_extracts_assignment_from_constructed_witness() { + let source = KSatisfiability::::new(4, vec![CNFClause::new(vec![1, 2, 3])]); + let reduction = ReduceTo::::reduce_to(&source); + let target_config = witness_config_for_assignment(&source, &[1, 0, 0, 0]) + .expect("assignment should lift to a target witness"); + + let extracted = reduction.extract_solution(&target_config); + assert_eq!(extracted, vec![1, 0, 0, 0]); + assert_eq!(source.evaluate(&extracted), crate::types::Or(true)); + assert_eq!( + reduction.target_problem().evaluate(&target_config), + crate::types::Or(true) + ); +} + +#[test] +fn test_ksatisfiability_to_quadraticcongruences_closed_loop() { + let source = KSatisfiability::::new(3, vec![CNFClause::new(vec![1, 2, -3])]); + + let reduction = ReduceTo::::reduce_to(&source); + + // Construct a target config from a known-satisfying source assignment. + // Assignment: x1=true, x2=false, x3=false => clause (1,2,-3) satisfied by x1=true. + let assignment = [1, 0, 0]; + assert_eq!( + source.evaluate(assignment.as_ref()), + crate::types::Or(true), + "assignment must satisfy the source" + ); + + let target_config = witness_config_for_assignment(&source, &assignment) + .expect("satisfying assignment should lift to a target witness"); + + // Verify the target config is a valid witness. + assert_eq!( + reduction.target_problem().evaluate(&target_config), + crate::types::Or(true), + "constructed target config must satisfy the target" + ); + + // Verify round-trip: extracting the source solution recovers the original assignment. + let extracted = reduction.extract_solution(&target_config); + assert_eq!(extracted, vec![1, 0, 0]); + assert_eq!( + source.evaluate(&extracted), + crate::types::Or(true), + "extracted source config must satisfy the source" + ); +} diff --git a/src/unit_tests/rules/ksatisfiability_timetabledesign.rs b/src/unit_tests/rules/ksatisfiability_timetabledesign.rs new file mode 100644 index 000000000..7f3f40d76 --- /dev/null +++ b/src/unit_tests/rules/ksatisfiability_timetabledesign.rs @@ -0,0 +1,102 @@ +use super::*; +use crate::models::formula::CNFClause; +use crate::models::misc::TimetableDesign; +#[cfg(feature = "ilp-solver")] +use crate::solvers::ILPSolver; +use crate::traits::Problem; +use crate::variant::K3; + +fn satisfiable_instance() -> KSatisfiability { + KSatisfiability::::new( + 3, + vec![ + CNFClause::new(vec![1, 2, 3]), + CNFClause::new(vec![-1, 2, -3]), + CNFClause::new(vec![1, -2, -3]), + ], + ) +} + +fn unsatisfiable_instance() -> KSatisfiability { + KSatisfiability::::new_allow_less( + 1, + vec![CNFClause::new(vec![1]), CNFClause::new(vec![-1])], + ) +} + +#[test] +fn test_ksatisfiability_to_timetabledesign_structure() { + let source = satisfiable_instance(); + let reduction = ReduceTo::::reduce_to(&source); + let target = reduction.target_problem(); + + assert_eq!(target.num_periods(), 12); + assert_eq!(target.num_craftsmen(), 235); + assert_eq!(target.num_tasks(), 207); + assert!( + target + .requirements() + .iter() + .flatten() + .all(|&requirement| requirement <= 1), + "the construction should stay in the binary-requirement regime" + ); +} + +#[test] +fn test_ksatisfiability_to_timetabledesign_extract_solution_from_constructed_timetable() { + let source = satisfiable_instance(); + let reduction = ReduceTo::::reduce_to(&source); + let target_solution = + construct_timetable_from_assignment(reduction.target_problem(), &[1, 1, 0], &source) + .expect("a satisfying 3SAT assignment should lift to a timetable witness"); + + assert!(reduction.target_problem().evaluate(&target_solution).0); + + let extracted = reduction.extract_solution(&target_solution); + assert!(source.evaluate(&extracted).0); +} + +#[test] +fn test_ksatisfiability_to_timetabledesign_multi_variable_round_trip() { + let source = satisfiable_instance(); + let reduction = ReduceTo::::reduce_to(&source); + + let target_solution = + construct_timetable_from_assignment(reduction.target_problem(), &[1, 1, 0], &source) + .expect("a satisfying 3SAT assignment should lift to a timetable witness"); + + let extracted = reduction.extract_solution(&target_solution); + assert_eq!(extracted, vec![1, 1, 0]); + assert!(source.evaluate(&extracted).0); +} + +#[cfg(feature = "ilp-solver")] +#[test] +fn test_ksatisfiability_to_timetabledesign_closed_loop() { + let source = satisfiable_instance(); + let reduction = ReduceTo::::reduce_to(&source); + + let target_solution = ILPSolver::new() + .solve_reduced(reduction.target_problem()) + .expect("satisfiable source instance should produce a feasible timetable"); + + assert!(reduction.target_problem().evaluate(&target_solution).0); + + let extracted = reduction.extract_solution(&target_solution); + assert!(source.evaluate(&extracted).0); +} + +#[cfg(feature = "ilp-solver")] +#[test] +fn test_ksatisfiability_to_timetabledesign_unsatisfiable() { + let source = unsatisfiable_instance(); + let reduction = ReduceTo::::reduce_to(&source); + + assert!( + ILPSolver::new() + .solve_reduced(reduction.target_problem()) + .is_none(), + "unsatisfiable 3SAT instance should produce an infeasible timetable" + ); +} diff --git a/src/unit_tests/rules/minimumvertexcover_longestcommonsubsequence.rs b/src/unit_tests/rules/minimumvertexcover_longestcommonsubsequence.rs new file mode 100644 index 000000000..c2e43dd00 --- /dev/null +++ b/src/unit_tests/rules/minimumvertexcover_longestcommonsubsequence.rs @@ -0,0 +1,92 @@ +use super::*; +use crate::rules::test_helpers::assert_optimization_round_trip_from_optimization_target; +use crate::topology::SimpleGraph; + +#[test] +fn test_minimumvertexcover_to_longestcommonsubsequence_closed_loop() { + let source = MinimumVertexCover::new( + SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3)]), + vec![One; 4], + ); + + let reduction = ReduceTo::::reduce_to(&source); + assert_optimization_round_trip_from_optimization_target( + &source, + &reduction, + "MVC->LCS (path P4)", + ); +} + +#[test] +fn test_mvc_to_lcs_structure_for_path_p4() { + let source = MinimumVertexCover::new( + SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3)]), + vec![One; 4], + ); + + let reduction = ReduceTo::::reduce_to(&source); + let target = reduction.target_problem(); + + assert_eq!(target.alphabet_size(), 4); + assert_eq!(target.num_strings(), 4); + assert_eq!(target.max_length(), 4); + assert_eq!(target.total_length(), 22); + assert_eq!( + target.strings(), + &[ + vec![0, 1, 2, 3], + vec![1, 2, 3, 0, 2, 3], + vec![0, 2, 3, 0, 1, 3], + vec![0, 1, 3, 0, 1, 2], + ], + ); +} + +#[test] +fn test_mvc_to_lcs_triangle_closed_loop() { + let source = MinimumVertexCover::new( + SimpleGraph::new(3, vec![(0, 1), (0, 2), (1, 2)]), + vec![One; 3], + ); + + let reduction = ReduceTo::::reduce_to(&source); + assert_optimization_round_trip_from_optimization_target( + &source, + &reduction, + "MVC->LCS (triangle)", + ); +} + +#[test] +fn test_mvc_to_lcs_empty_graph_closed_loop() { + let source = MinimumVertexCover::new(SimpleGraph::new(4, vec![]), vec![One; 4]); + + let reduction = ReduceTo::::reduce_to(&source); + let target = reduction.target_problem(); + + assert_eq!(target.alphabet_size(), 4); + assert_eq!(target.num_strings(), 1); + assert_eq!(target.max_length(), 4); + assert_eq!(target.strings(), &[vec![0, 1, 2, 3]]); + + assert_optimization_round_trip_from_optimization_target( + &source, + &reduction, + "MVC->LCS (empty graph)", + ); +} + +#[test] +fn test_mvc_to_lcs_canonicalizes_edge_orientation() { + let source = MinimumVertexCover::new(SimpleGraph::new(2, vec![(1, 0)]), vec![One; 2]); + + let reduction = ReduceTo::::reduce_to(&source); + let target = reduction.target_problem(); + + assert_eq!(target.strings(), &[vec![0, 1], vec![1, 0]]); + assert_optimization_round_trip_from_optimization_target( + &source, + &reduction, + "MVC->LCS (reversed edge orientation)", + ); +} diff --git a/src/unit_tests/rules/naesatisfiability_partitionintoperfectmatchings.rs b/src/unit_tests/rules/naesatisfiability_partitionintoperfectmatchings.rs new file mode 100644 index 000000000..412748a98 --- /dev/null +++ b/src/unit_tests/rules/naesatisfiability_partitionintoperfectmatchings.rs @@ -0,0 +1,294 @@ +use super::*; +use crate::models::formula::{CNFClause, NAESatisfiability}; +use crate::models::graph::PartitionIntoPerfectMatchings; +use crate::rules::test_helpers::assert_satisfaction_round_trip_from_satisfaction_target; +use crate::solvers::BruteForce; +use crate::topology::{Graph, SimpleGraph}; +use crate::traits::Problem; + +fn sorted_edges(graph: &SimpleGraph) -> Vec<(usize, usize)> { + let mut edges = graph.edges(); + edges.sort_unstable(); + edges +} + +fn yes_example_problem() -> NAESatisfiability { + NAESatisfiability::new( + 3, + vec![ + CNFClause::new(vec![1, 2, 3]), + CNFClause::new(vec![-1, 2, -3]), + ], + ) +} + +fn no_example_problem() -> NAESatisfiability { + NAESatisfiability::new( + 3, + vec![ + CNFClause::new(vec![1, 2, 3]), + CNFClause::new(vec![1, 2, -3]), + CNFClause::new(vec![1, -2, 3]), + CNFClause::new(vec![-1, 2, 3]), + ], + ) +} + +#[test] +fn test_naesatisfiability_to_partitionintoperfectmatchings_closed_loop() { + let source = NAESatisfiability::new(1, vec![]); + let reduction = ReduceTo::>::reduce_to(&source); + + assert_eq!(reduction.target_problem().num_vertices(), 4); + assert_eq!(reduction.target_problem().num_edges(), 3); + assert_eq!(reduction.target_problem().num_matchings(), 2); + + assert_satisfaction_round_trip_from_satisfaction_target( + &source, + &reduction, + "NAE-SAT -> PartitionIntoPerfectMatchings empty-formula closed loop", + ); +} + +#[test] +fn test_naesatisfiability_to_partitionintoperfectmatchings_unsat_small_instance() { + let source = NAESatisfiability::new(1, vec![CNFClause::new(vec![1, 1])]); + let reduction = ReduceTo::>::reduce_to(&source); + + assert!(BruteForce::new() + .find_witness(reduction.target_problem()) + .is_none()); +} + +#[test] +fn test_naesatisfiability_to_partitionintoperfectmatchings_yes_example_structure() { + let source = yes_example_problem(); + let reduction = ReduceTo::>::reduce_to(&source); + let target = reduction.target_problem(); + + let mut expected_edges = vec![ + (0, 1), + (2, 3), + (0, 2), + (4, 5), + (6, 7), + (4, 6), + (8, 9), + (10, 11), + (8, 10), + (12, 13), + (14, 15), + (16, 17), + (18, 19), + (20, 21), + (22, 23), + (24, 25), + (24, 26), + (24, 27), + (25, 26), + (25, 27), + (26, 27), + (12, 24), + (14, 25), + (16, 26), + (28, 29), + (28, 30), + (28, 31), + (29, 30), + (29, 31), + (30, 31), + (18, 28), + (20, 29), + (22, 30), + (32, 33), + (0, 32), + (12, 32), + (34, 35), + (2, 34), + (18, 34), + (36, 37), + (4, 36), + (14, 36), + (38, 39), + (14, 38), + (20, 38), + (40, 41), + (8, 40), + (16, 40), + (42, 43), + (10, 42), + (22, 42), + ]; + expected_edges.sort_unstable(); + + assert_eq!(target.num_vertices(), 44); + assert_eq!(target.num_edges(), 51); + assert_eq!(target.num_matchings(), 2); + assert_eq!(sorted_edges(target.graph()), expected_edges); +} + +#[test] +fn test_naesatisfiability_to_partitionintoperfectmatchings_no_example_structure() { + let source = no_example_problem(); + let reduction = ReduceTo::>::reduce_to(&source); + let target = reduction.target_problem(); + + let mut expected_edges = vec![ + (0, 1), + (2, 3), + (0, 2), + (4, 5), + (6, 7), + (4, 6), + (8, 9), + (10, 11), + (8, 10), + (12, 13), + (14, 15), + (16, 17), + (18, 19), + (20, 21), + (22, 23), + (24, 25), + (26, 27), + (28, 29), + (30, 31), + (32, 33), + (34, 35), + (36, 37), + (36, 38), + (36, 39), + (37, 38), + (37, 39), + (38, 39), + (12, 36), + (14, 37), + (16, 38), + (40, 41), + (40, 42), + (40, 43), + (41, 42), + (41, 43), + (42, 43), + (18, 40), + (20, 41), + (22, 42), + (44, 45), + (44, 46), + (44, 47), + (45, 46), + (45, 47), + (46, 47), + (24, 44), + (26, 45), + (28, 46), + (48, 49), + (48, 50), + (48, 51), + (49, 50), + (49, 51), + (50, 51), + (30, 48), + (32, 49), + (34, 50), + (52, 53), + (0, 52), + (12, 52), + (54, 55), + (12, 54), + (18, 54), + (56, 57), + (18, 56), + (24, 56), + (58, 59), + (2, 58), + (30, 58), + (60, 61), + (4, 60), + (14, 60), + (62, 63), + (14, 62), + (20, 62), + (64, 65), + (20, 64), + (32, 64), + (66, 67), + (6, 66), + (26, 66), + (68, 69), + (8, 68), + (16, 68), + (70, 71), + (16, 70), + (28, 70), + (72, 73), + (28, 72), + (34, 72), + (74, 75), + (10, 74), + (22, 74), + ]; + expected_edges.sort_unstable(); + + assert_eq!(target.num_vertices(), 76); + assert_eq!(target.num_edges(), 93); + assert_eq!(target.num_matchings(), 2); + assert_eq!(sorted_edges(target.graph()), expected_edges); +} + +#[test] +fn test_naesatisfiability_to_partitionintoperfectmatchings_constructed_witness_round_trips() { + let source = yes_example_problem(); + let source_solution = vec![1, 1, 0]; + let reduction = ReduceTo::>::reduce_to(&source); + let target_solution = reduction.construct_target_solution(&source_solution); + + assert!(source.evaluate(&source_solution)); + assert!(reduction.target_problem().evaluate(&target_solution)); + assert_eq!( + reduction.extract_solution(&target_solution), + source_solution + ); +} + +#[test] +fn test_naesatisfiability_to_partitionintoperfectmatchings_two_literal_clause_normalization() { + let source = NAESatisfiability::new(2, vec![CNFClause::new(vec![1, -2])]); + let reduction = ReduceTo::>::reduce_to(&source); + let target = reduction.target_problem(); + let source_solution = vec![1, 1]; + let target_solution = reduction.construct_target_solution(&source_solution); + + assert_eq!(target.num_vertices(), 24); + assert_eq!(target.num_edges(), 27); + assert_eq!(target.num_matchings(), 2); + assert!(target.evaluate(&target_solution)); + assert_eq!( + reduction.extract_solution(&target_solution), + source_solution + ); +} + +#[test] +#[should_panic( + expected = "NAESatisfiability -> PartitionIntoPerfectMatchings expects clauses of size 2 or 3" +)] +fn test_naesatisfiability_to_partitionintoperfectmatchings_rejects_long_clauses() { + let source = NAESatisfiability::new(4, vec![CNFClause::new(vec![1, 2, 3, 4])]); + let _ = ReduceTo::>::reduce_to(&source); +} + +#[cfg(feature = "example-db")] +#[test] +fn test_naesatisfiability_to_partitionintoperfectmatchings_canonical_example_spec() { + let specs = + crate::rules::naesatisfiability_partitionintoperfectmatchings::canonical_rule_example_specs( + ); + assert_eq!(specs.len(), 1); + + let example = (specs[0].build)(); + assert_eq!(example.source.problem, "NAESatisfiability"); + assert_eq!(example.target.problem, "PartitionIntoPerfectMatchings"); + assert_eq!(example.solutions.len(), 1); + assert_eq!(example.solutions[0].source_config, vec![1, 1, 0]); +} diff --git a/src/unit_tests/rules/satisfiability_integralflowhomologousarcs.rs b/src/unit_tests/rules/satisfiability_integralflowhomologousarcs.rs new file mode 100644 index 000000000..ccbbe362c --- /dev/null +++ b/src/unit_tests/rules/satisfiability_integralflowhomologousarcs.rs @@ -0,0 +1,146 @@ +#[cfg(feature = "example-db")] +use super::canonical_rule_example_specs; +use super::*; +use crate::models::formula::CNFClause; +#[cfg(feature = "example-db")] +use crate::models::graph::IntegralFlowHomologousArcs; +use crate::rules::test_helpers::assert_satisfaction_round_trip_from_satisfaction_target; +use crate::rules::{ReduceTo, ReductionGraph, ReductionResult}; +use crate::solvers::BruteForce; +use crate::traits::Problem; + +fn issue_example() -> Satisfiability { + Satisfiability::new( + 3, + vec![ + CNFClause::new(vec![1, 2]), + CNFClause::new(vec![-1, 3]), + CNFClause::new(vec![-2, -3]), + CNFClause::new(vec![1, 3]), + ], + ) +} + +fn all_assignments(num_vars: usize) -> Vec> { + (0..(1usize << num_vars)) + .map(|mask| { + (0..num_vars) + .map(|bit| usize::from(((mask >> bit) & 1) == 1)) + .collect() + }) + .collect() +} + +#[test] +fn test_satisfiability_to_integralflowhomologousarcs_closed_loop() { + let source = Satisfiability::new(1, vec![CNFClause::new(vec![1])]); + let reduction = ReduceTo::::reduce_to(&source); + + assert_satisfaction_round_trip_from_satisfaction_target( + &source, + &reduction, + "SAT->IntegralFlowHomologousArcs closed loop", + ); +} + +#[test] +fn test_satisfiability_to_integralflowhomologousarcs_issue_example_structure() { + let source = issue_example(); + let reduction = ReduceTo::::reduce_to(&source); + let target = reduction.target_problem(); + + assert_eq!(target.num_vertices(), 43); + assert_eq!(target.num_arcs(), 51); + assert_eq!(target.requirement(), 3); + assert_eq!(target.homologous_pairs().len(), 8); + assert_eq!(target.max_capacity(), 1); +} + +#[test] +fn test_satisfiability_to_integralflowhomologousarcs_issue_example_assignment_encoding() { + let source = issue_example(); + let reduction = ReduceTo::::reduce_to(&source); + let target = reduction.target_problem(); + + let satisfying_assignment = vec![1, 0, 1]; + let satisfying_flow = reduction.encode_assignment(&satisfying_assignment); + assert!(target.evaluate(&satisfying_flow).0); + assert_eq!( + reduction.extract_solution(&satisfying_flow), + satisfying_assignment + ); + + let unsatisfying_assignment = vec![1, 1, 1]; + let unsatisfying_flow = reduction.encode_assignment(&unsatisfying_assignment); + assert!(!target.evaluate(&unsatisfying_flow).0); +} + +#[test] +fn test_satisfiability_to_integralflowhomologousarcs_issue_example_truth_table_matches_flow() { + let source = issue_example(); + let reduction = ReduceTo::::reduce_to(&source); + let target = reduction.target_problem(); + + for assignment in all_assignments(source.num_vars()) { + let flow = reduction.encode_assignment(&assignment); + assert_eq!( + source.evaluate(&assignment).0, + target.evaluate(&flow).0, + "assignment {:?} should preserve satisfiability through the encoded flow", + assignment + ); + } +} + +#[test] +fn test_satisfiability_to_integralflowhomologousarcs_unsat_source_has_no_target_witness() { + let source = Satisfiability::new(1, vec![CNFClause::new(vec![1]), CNFClause::new(vec![-1])]); + let reduction = ReduceTo::::reduce_to(&source); + + assert_eq!( + BruteForce::new().find_witness(reduction.target_problem()), + None + ); +} + +#[test] +fn test_reduction_graph_registers_satisfiability_to_integralflowhomologousarcs() { + let graph = ReductionGraph::new(); + assert!(graph.has_direct_reduction_by_name("Satisfiability", "IntegralFlowHomologousArcs",)); +} + +#[cfg(feature = "example-db")] +#[test] +fn test_satisfiability_to_integralflowhomologousarcs_canonical_example_spec() { + let example = (canonical_rule_example_specs() + .into_iter() + .find(|spec| spec.id == "satisfiability_to_integralflowhomologousarcs") + .expect("missing canonical SAT -> IFHA example spec") + .build)(); + + assert_eq!(example.source.problem, "Satisfiability"); + assert_eq!(example.target.problem, "IntegralFlowHomologousArcs"); + assert_eq!(example.target.instance["requirement"], serde_json::json!(3)); + assert_eq!( + example.target.instance["homologous_pairs"] + .as_array() + .unwrap() + .len(), + 8 + ); + assert_eq!(example.solutions.len(), 1); + assert_eq!(example.solutions[0].source_config, vec![1, 0, 1]); + + let source: Satisfiability = serde_json::from_value(example.source.instance.clone()) + .expect("source example deserializes"); + let target: IntegralFlowHomologousArcs = + serde_json::from_value(example.target.instance.clone()) + .expect("target example deserializes"); + + assert!(source + .evaluate(&example.solutions[0].source_config) + .is_valid()); + assert!(target + .evaluate(&example.solutions[0].target_config) + .is_valid()); +} diff --git a/src/unit_tests/rules/setsplitting_betweenness.rs b/src/unit_tests/rules/setsplitting_betweenness.rs new file mode 100644 index 000000000..9177da2ab --- /dev/null +++ b/src/unit_tests/rules/setsplitting_betweenness.rs @@ -0,0 +1,97 @@ +use crate::models::misc::Betweenness; +use crate::models::set::SetSplitting; +use crate::rules::test_helpers::assert_satisfaction_round_trip_from_satisfaction_target; +use crate::rules::{ReduceTo, ReductionResult}; +use crate::solvers::BruteForce; + +fn small_yes_instance() -> SetSplitting { + SetSplitting::new(3, vec![vec![0, 1, 2]]) +} + +fn issue_yes_instance() -> SetSplitting { + SetSplitting::new( + 5, + vec![vec![0, 1, 2], vec![2, 3, 4], vec![0, 3, 4], vec![1, 2, 3]], + ) +} + +fn issue_no_instance() -> SetSplitting { + SetSplitting::new(3, vec![vec![0, 1], vec![1, 2], vec![0, 2], vec![0, 1, 2]]) +} + +#[test] +fn test_setsplitting_to_betweenness_closed_loop() { + let source = small_yes_instance(); + let reduction = ReduceTo::::reduce_to(&source); + + assert_satisfaction_round_trip_from_satisfaction_target( + &source, + &reduction, + "SetSplitting -> Betweenness", + ); +} + +#[test] +fn test_setsplitting_to_betweenness_issue_yes_instance_structure() { + let source = issue_yes_instance(); + let reduction = ReduceTo::::reduce_to(&source); + let target = reduction.target_problem(); + + assert_eq!(target.num_elements(), 10); + assert_eq!( + target.triples(), + &[ + (0, 6, 1), + (6, 5, 2), + (2, 7, 3), + (7, 5, 4), + (0, 8, 3), + (8, 5, 4), + (1, 9, 2), + (9, 5, 3), + ], + ); + assert_eq!( + reduction.extract_solution(&[8, 2, 9, 0, 1, 4, 3, 6, 7, 5]), + vec![1, 0, 1, 0, 0] + ); +} + +#[test] +fn test_setsplitting_to_betweenness_normalizes_large_subsets() { + let source = SetSplitting::new(4, vec![vec![0, 1, 2, 3]]); + let reduction = ReduceTo::::reduce_to(&source); + let target = reduction.target_problem(); + + assert_eq!(target.num_elements(), 9); + assert_eq!( + target.triples(), + &[(0, 7, 1), (7, 6, 4), (4, 6, 5), (5, 8, 2), (8, 6, 3),], + ); +} + +#[test] +fn test_setsplitting_to_betweenness_issue_no_instance_is_unsat() { + let source = issue_no_instance(); + let reduction = ReduceTo::::reduce_to(&source); + + assert!(BruteForce::new() + .find_witness(reduction.target_problem()) + .is_none()); +} + +#[cfg(feature = "example-db")] +#[test] +fn test_setsplitting_to_betweenness_canonical_example_spec() { + let specs = crate::rules::setsplitting_betweenness::canonical_rule_example_specs(); + assert_eq!(specs.len(), 1); + + let example = (specs[0].build)(); + assert_eq!(example.source.problem, "SetSplitting"); + assert_eq!(example.target.problem, "Betweenness"); + assert_eq!(example.solutions.len(), 1); + + let pair = &example.solutions[0]; + assert_eq!(pair.source_config, vec![1, 0, 1, 0, 0]); + assert_eq!(pair.target_config, vec![8, 2, 9, 0, 1, 4, 3, 6, 7, 5]); +} diff --git a/src/unit_tests/rules/threedimensionalmatching_ilp.rs b/src/unit_tests/rules/threedimensionalmatching_ilp.rs new file mode 100644 index 000000000..0873a4b65 --- /dev/null +++ b/src/unit_tests/rules/threedimensionalmatching_ilp.rs @@ -0,0 +1,167 @@ +use super::*; +use crate::models::algebraic::{Comparison, ObjectiveSense, ILP}; +use crate::models::misc::{ResourceConstrainedScheduling, ThreePartition}; +use crate::models::set::ThreeDimensionalMatching; +use crate::rules::{MinimizeSteps, ReduceTo, ReductionGraph, ReductionResult}; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::traits::Problem; +use crate::types::{Or, ProblemSize}; + +fn canonical_problem() -> ThreeDimensionalMatching { + ThreeDimensionalMatching::new( + 3, + vec![(0, 1, 2), (1, 0, 1), (2, 2, 0), (0, 0, 0), (1, 2, 2)], + ) +} + +fn singleton_problem() -> ThreeDimensionalMatching { + ThreeDimensionalMatching::new(1, vec![(0, 0, 0)]) +} + +fn constraint_signature(constraint: &(Comparison, f64, Vec<(usize, f64)>)) -> String { + let cmp = match constraint.0 { + Comparison::Le => "<=", + Comparison::Ge => ">=", + Comparison::Eq => "=", + }; + let terms = constraint + .2 + .iter() + .map(|&(var, coeff)| format!("{var}:{}", (coeff * 1_000_000.0).round() as i64)) + .collect::>() + .join(","); + format!( + "{cmp}|{}|{terms}", + (constraint.1 * 1_000_000.0).round() as i64 + ) +} + +#[test] +fn test_threedimensionalmatching_to_ilp_structure() { + let problem = canonical_problem(); + let reduction: ReductionThreeDimensionalMatchingToILP = + ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + assert_eq!(ilp.num_vars, 5); + assert_eq!(ilp.constraints.len(), 9); + assert!(ilp.objective.is_empty()); + assert_eq!(ilp.sense, ObjectiveSense::Minimize); + + type Constraint = (Comparison, f64, Vec<(usize, f64)>); + let actual_constraints: Vec = ilp + .constraints + .iter() + .map(|constraint| { + let mut terms = constraint.terms.clone(); + terms.sort_by_key(|(var, _)| *var); + (constraint.cmp, constraint.rhs, terms) + }) + .collect(); + let expected_constraints = vec![ + (Comparison::Eq, 1.0, vec![(0, 1.0), (3, 1.0)]), + (Comparison::Eq, 1.0, vec![(1, 1.0), (4, 1.0)]), + (Comparison::Eq, 1.0, vec![(2, 1.0)]), + (Comparison::Eq, 1.0, vec![(1, 1.0), (3, 1.0)]), + (Comparison::Eq, 1.0, vec![(0, 1.0)]), + (Comparison::Eq, 1.0, vec![(2, 1.0), (4, 1.0)]), + (Comparison::Eq, 1.0, vec![(2, 1.0), (3, 1.0)]), + (Comparison::Eq, 1.0, vec![(1, 1.0)]), + (Comparison::Eq, 1.0, vec![(0, 1.0), (4, 1.0)]), + ]; + + let mut actual_signatures: Vec<_> = actual_constraints + .iter() + .map(constraint_signature) + .collect(); + let mut expected_signatures: Vec<_> = expected_constraints + .iter() + .map(constraint_signature) + .collect(); + actual_signatures.sort(); + expected_signatures.sort(); + + assert_eq!(actual_signatures, expected_signatures); +} + +#[test] +fn test_threedimensionalmatching_to_ilp_closed_loop() { + let problem = canonical_problem(); + let reduction: ReductionThreeDimensionalMatchingToILP = + ReduceTo::>::reduce_to(&problem); + + let bf_witness = BruteForce::new() + .find_witness(&problem) + .expect("canonical 3DM instance should be feasible"); + assert_eq!(bf_witness, vec![1, 1, 1, 0, 0]); + + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("direct ILP should be feasible"); + let extracted = reduction.extract_solution(&ilp_solution); + + assert_eq!(extracted, vec![1, 1, 1, 0, 0]); + assert_eq!(problem.evaluate(&extracted), Or(true)); +} + +#[test] +fn test_threedimensionalmatching_to_ilp_infeasible_instance() { + let problem = ThreeDimensionalMatching::new(2, vec![(0, 0, 0), (0, 1, 1)]); + let reduction: ReductionThreeDimensionalMatchingToILP = + ReduceTo::>::reduce_to(&problem); + + assert!( + BruteForce::new().find_witness(&problem).is_none(), + "source instance should be infeasible" + ); + assert!( + ILPSolver::new().solve(reduction.target_problem()).is_none(), + "reduced ILP should be infeasible" + ); +} + +#[test] +fn test_threedimensionalmatching_to_ilp_direct_path_beats_indirect_chain() { + let problem = singleton_problem(); + let direct = ReduceTo::>::reduce_to(&problem); + + let to_three_partition = ReduceTo::::reduce_to(&problem); + let to_resource_constrained = + ReduceTo::::reduce_to(to_three_partition.target_problem()); + let indirect = ReduceTo::>::reduce_to(to_resource_constrained.target_problem()); + + let solver = ILPSolver::new(); + let direct_solution = solver + .solve(direct.target_problem()) + .expect("direct ILP should solve"); + let direct_source = direct.extract_solution(&direct_solution); + + assert_eq!(problem.evaluate(&direct_source), Or(true)); + assert!( + solver.solve(indirect.target_problem()).is_some(), + "indirect ILP should agree on feasibility" + ); + assert!(direct.target_problem().num_vars < indirect.target_problem().num_vars); + assert!( + direct.target_problem().constraints.len() < indirect.target_problem().constraints.len() + ); + + let graph = ReductionGraph::new(); + let src = ReductionGraph::variant_to_map(&ThreeDimensionalMatching::variant()); + let dst = ReductionGraph::variant_to_map(&ILP::::variant()); + let path = graph + .find_cheapest_path( + "ThreeDimensionalMatching", + &src, + "ILP", + &dst, + &ProblemSize::new(vec![ + ("universe_size", problem.universe_size()), + ("num_triples", problem.num_triples()), + ]), + &MinimizeSteps, + ) + .expect("reduction graph should find a direct 3DM -> ILP path"); + + assert_eq!(path.type_names(), vec!["ThreeDimensionalMatching", "ILP"]); +} diff --git a/src/unit_tests/rules/threedimensionalmatching_threepartition.rs b/src/unit_tests/rules/threedimensionalmatching_threepartition.rs new file mode 100644 index 000000000..3e5cb86b0 --- /dev/null +++ b/src/unit_tests/rules/threedimensionalmatching_threepartition.rs @@ -0,0 +1,105 @@ +use super::*; +use crate::models::misc::ThreePartition; +use crate::models::set::ThreeDimensionalMatching; +use crate::rules::ReduceTo; +use crate::solvers::BruteForce; +use crate::traits::Problem; + +fn reduce( + universe_size: usize, + triples: &[(usize, usize, usize)], +) -> ( + ThreeDimensionalMatching, + ReductionThreeDimensionalMatchingToThreePartition, +) { + let source = ThreeDimensionalMatching::new(universe_size, triples.to_vec()); + let reduction = ReduceTo::::reduce_to(&source); + (source, reduction) +} + +#[test] +fn test_threedimensionalmatching_to_threepartition_q1_overhead_and_bounds() { + let (_source, reduction) = reduce(1, &[(0, 0, 0)]); + let target = reduction.target_problem(); + + assert_eq!(target.num_elements(), 21); + assert_eq!(target.num_groups(), 7); + assert_eq!(target.bound(), 42_949_673_924); + + let bound = u128::from(target.bound()); + let total_sum: u128 = target.sizes().iter().map(|&size| u128::from(size)).sum(); + assert_eq!(total_sum, bound * target.num_groups() as u128); + assert!(target + .sizes() + .iter() + .all(|&size| 4 * u128::from(size) > bound && 2 * u128::from(size) < bound)); +} + +#[test] +fn test_threedimensionalmatching_to_threepartition_q2_overhead_matches_vector() { + let (_source, reduction) = reduce(2, &[(0, 0, 0), (1, 1, 1)]); + let target = reduction.target_problem(); + + assert_eq!(target.num_elements(), 90); + assert_eq!(target.num_groups(), 30); + assert_eq!(target.bound(), 687_194_768_324); +} + +#[test] +fn test_threedimensionalmatching_to_threepartition_extracts_manual_q1_witness() { + let (source, reduction) = reduce(1, &[(0, 0, 0)]); + + // Step 3 witness for the unique 4-partition group {0,1,2,3} using pair (0,1): + // regulars 0..3, pairings 4..15, fillers 16..20. + let target_config = vec![ + 0, 0, 1, 1, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 2, 3, 4, 5, 6, + ]; + + assert!(reduction.target_problem().evaluate(&target_config).0); + + let extracted = reduction.extract_solution(&target_config); + assert_eq!(extracted, vec![1]); + assert!(source.evaluate(&extracted).0); +} + +#[test] +fn test_threedimensionalmatching_to_threepartition_closed_loop_from_known_matching() { + let (source, reduction) = reduce(1, &[(0, 0, 0)]); + let target_solution = reduction.build_target_witness(&[1]); + + assert!(reduction.target_problem().evaluate(&target_solution).0); + let extracted = reduction.extract_solution(&target_solution); + assert_eq!(extracted, vec![1]); + assert!(source.evaluate(&extracted).0); +} + +#[test] +fn test_threedimensionalmatching_to_threepartition_round_trip_q2_minimal_matching() { + let (source, reduction) = reduce(2, &[(0, 0, 0), (1, 1, 1)]); + let target_solution = reduction.build_target_witness(&[1, 1]); + + assert!(reduction.target_problem().evaluate(&target_solution).0); + + let extracted = reduction.extract_solution(&target_solution); + assert_eq!(extracted, vec![1, 1]); + assert!(source.evaluate(&extracted).0); +} + +#[test] +fn test_threedimensionalmatching_to_threepartition_uncovered_coordinate_maps_to_fixed_no_instance() +{ + let (source, reduction) = reduce(2, &[(0, 0, 0), (0, 1, 1)]); + + assert!( + BruteForce::new().find_witness(&source).is_none(), + "source instance should be infeasible" + ); + assert_eq!(reduction.target_problem().sizes(), &[6, 6, 6, 6, 7, 9]); + assert_eq!(reduction.target_problem().bound(), 20); + assert!( + BruteForce::new() + .find_witness(reduction.target_problem()) + .is_none(), + "target instance should be infeasible" + ); +}