From e66c5c9ccb60c53cecf2d20733eed2e27fe46035 Mon Sep 17 00:00:00 2001 From: dewert99 Date: Thu, 14 Mar 2024 12:07:13 -0700 Subject: [PATCH 01/15] Make literal picker independent of theory explanation order --- src/batsat/src/core.rs | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/src/batsat/src/core.rs b/src/batsat/src/core.rs index 26bb69a..72034e3 100644 --- a/src/batsat/src/core.rs +++ b/src/batsat/src/core.rs @@ -2160,6 +2160,13 @@ impl VarState { fn var_decay_activity(&mut self) { self.var_inc *= 1.0 / self.var_decay; + if self.var_inc > 1e100 { + // Rescale: + for (_, x) in self.activity.iter_mut() { + *x *= 1e-100; + } + self.var_inc *= 1e-100; + } } #[inline(always)] @@ -2182,13 +2189,6 @@ impl VarState { /// Increase a variable with the current 'bump' value. fn var_bump_activity(&mut self, order_heap_data: &mut HeapData, v: Var) { self.activity[v] += self.var_inc; - if self.activity[v] > 1e100 { - // Rescale: - for (_, x) in self.activity.iter_mut() { - *x *= 1e-100; - } - self.var_inc *= 1e-100; - } // Update order_heap with respect to new activity: let mut order_heap = order_heap_data.promote(VarOrder { @@ -2407,7 +2407,9 @@ impl Eq for Watcher {} impl<'a> Comparator for VarOrder<'a> { fn cmp(&self, lhs: &Var, rhs: &Var) -> cmp::Ordering { - PartialOrd::partial_cmp(&self.activity[*rhs], &self.activity[*lhs]).expect("NaN activity") + PartialOrd::partial_cmp(&self.activity[*rhs], &self.activity[*lhs]) + .expect("NaN activity") + .then(lhs.cmp(rhs)) } } From cef23792cd7973377be3968fb0c2457a80fecbca Mon Sep 17 00:00:00 2001 From: dewert99 Date: Fri, 15 Mar 2024 11:43:28 -0700 Subject: [PATCH 02/15] Move `order_heap_data` into `VarState` --- src/batsat/src/core.rs | 31 +++++++++++++++++-------------- 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/src/batsat/src/core.rs b/src/batsat/src/core.rs index 72034e3..8f31248 100644 --- a/src/batsat/src/core.rs +++ b/src/batsat/src/core.rs @@ -66,6 +66,8 @@ pub struct Solver { struct VarState { /// A heuristic measurement of the activity of a variable. activity: VMap, + /// A priority queue of variables ordered with respect to the variable activity. + order_heap_data: HeapData, /// Current assignment for each variable. ass: VMap, /// Stores reason and level for each variable. @@ -116,6 +118,8 @@ struct SolverV { // v.activity: VMap, // /// The current assignments. // v.assigns: VMap, + // /// A priority queue of variables ordered with respect to the variable activity. + // v.order_heap_data: HeapData, /// The preferred polarity of each variable. polarity: VMap, /// The users preferred polarity of each variable. @@ -125,8 +129,6 @@ struct SolverV { // /// Stores reason and level for each variable. /// `watches[lit]` is a list of constraints watching 'lit' (will go there if literal becomes true). watches_data: OccListsData, - /// A priority queue of variables ordered with respect to the variable activity. - order_heap_data: HeapData, /// If `false`, the constraints are already unsatisfiable. No part of the solver state may be used! ok: bool, /// Amount to bump next clause with. @@ -1124,9 +1126,7 @@ impl SolverV { } fn order_heap(&mut self) -> Heap { - self.order_heap_data.promote(VarOrder { - activity: &self.vars.activity, - }) + self.vars.order_heap() } fn set_decision_var(&mut self, v: Var, b: bool) { @@ -1177,9 +1177,9 @@ impl SolverV { { let idx_tmp = utils::irand( &mut self.opts.random_seed, - self.order_heap_data.len() as i32, + self.vars.order_heap_data.len() as i32, ) as usize; - next = self.order_heap_data[idx_tmp]; + next = self.vars.order_heap_data[idx_tmp]; if self.value(next) == lbool::UNDEF && self.decision[next] { self.rnd_decisions += 1; } @@ -1391,8 +1391,7 @@ impl SolverV { let lvl = self.vars.level(q.var()); assert!(lvl <= conflict_level); if !self.seen[q.var()].is_seen() && lvl > 0 { - self.vars - .var_bump_activity(&mut self.order_heap_data, q.var()); + self.vars.var_bump_activity(q.var()); self.seen[q.var()] = Seen::SOURCE; if lvl == conflict_level { // at conflict level: need to eliminate this lit by resolution @@ -2074,7 +2073,6 @@ impl SolverV { decision: VMap::new(), // v.vardata: VMap::new(), watches_data: OccListsData::new(), - order_heap_data: HeapData::new(), ok: true, cla_inc: 1.0, // v.var_inc: 1.0, @@ -2115,6 +2113,7 @@ impl VarState { var_decay: opts.var_decay, trail: vec![], trail_lim: vec![], + order_heap_data: HeapData::new(), } } @@ -2186,14 +2185,18 @@ impl VarState { self.trail.push(p); } + fn order_heap(&mut self) -> Heap { + self.order_heap_data.promote(VarOrder { + activity: &self.activity, + }) + } + /// Increase a variable with the current 'bump' value. - fn var_bump_activity(&mut self, order_heap_data: &mut HeapData, v: Var) { + fn var_bump_activity(&mut self, v: Var) { self.activity[v] += self.var_inc; // Update order_heap with respect to new activity: - let mut order_heap = order_heap_data.promote(VarOrder { - activity: &self.activity, - }); + let mut order_heap = self.order_heap(); if order_heap.in_heap(v) { order_heap.decrease(v); } From d2c987e25896be07c9910d5c8ad268f47c5f0e54 Mon Sep 17 00:00:00 2001 From: dewert99 Date: Fri, 15 Mar 2024 14:05:29 -0700 Subject: [PATCH 03/15] Cached activity levels in heap. --- Cargo.toml | 2 +- src/batsat/Cargo.toml | 2 +- src/batsat/src/core.rs | 43 +++++++++++++++-------- src/batsat/src/intmap.rs | 73 +++++++++++++++++++++++----------------- 4 files changed, 73 insertions(+), 47 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index cad3a60..acfee24 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ - [workspace] +resolver = "2" members = [ "src/batsat", diff --git a/src/batsat/Cargo.toml b/src/batsat/Cargo.toml index 6767568..15b9607 100644 --- a/src/batsat/Cargo.toml +++ b/src/batsat/Cargo.toml @@ -9,7 +9,7 @@ readme = "../../README.md" keywords = ["sat", "minisat"] categories = ["algorithms"] license = "MIT" -edition = "2018" +edition = "2021" publish = true diff --git a/src/batsat/src/core.rs b/src/batsat/src/core.rs index 8f31248..1526202 100644 --- a/src/batsat/src/core.rs +++ b/src/batsat/src/core.rs @@ -33,6 +33,7 @@ use { #[cfg(feature = "logging")] use crate::clause::display::Print; +use crate::intmap::MemoComparator; /// The main solver structure. /// @@ -67,7 +68,7 @@ struct VarState { /// A heuristic measurement of the activity of a variable. activity: VMap, /// A priority queue of variables ordered with respect to the variable activity. - order_heap_data: HeapData, + order_heap_data: HeapData, /// Current assignment for each variable. ass: VMap, /// Stores reason and level for each variable. @@ -1125,7 +1126,7 @@ impl SolverV { self.vars.value_lit(x) } - fn order_heap(&mut self) -> Heap { + fn order_heap(&mut self) -> Heap { self.vars.order_heap() } @@ -1789,13 +1790,10 @@ impl SolverV { } fn rebuild_order_heap(&mut self) { - let mut vs = vec![]; - for v in (0..self.num_vars()).map(Var::from_idx) { - if self.decision[v] && self.value(v) == lbool::UNDEF { - vs.push(v); - } - } - self.order_heap().build(&vs); + let vs = (0..self.num_vars()) + .map(Var::from_idx) + .filter(|&v| self.decision[v]); + self.vars.rebuild_order_heap(vs); } /// Sort literals of `clause` so that unassigned literals are first, @@ -2185,7 +2183,7 @@ impl VarState { self.trail.push(p); } - fn order_heap(&mut self) -> Heap { + fn order_heap(&mut self) -> Heap { self.order_heap_data.promote(VarOrder { activity: &self.activity, }) @@ -2202,6 +2200,15 @@ impl VarState { } } + fn rebuild_order_heap(&mut self, eligible_vars: impl Iterator) { + let vars = eligible_vars.filter(|x| self.ass[*x] == lbool::UNDEF); + self.order_heap_data + .promote(VarOrder { + activity: &self.activity, + }) + .build(vars); + } + #[allow(dead_code)] fn iter_trail<'a>(&'a self) -> impl Iterator + 'a { self.trail.iter().map(|l| *l) @@ -2408,11 +2415,19 @@ impl PartialEq for Watcher { } impl Eq for Watcher {} -impl<'a> Comparator for VarOrder<'a> { - fn cmp(&self, lhs: &Var, rhs: &Var) -> cmp::Ordering { - PartialOrd::partial_cmp(&self.activity[*rhs], &self.activity[*lhs]) +impl<'a> Comparator<(Var, f64)> for VarOrder<'a> { + fn cmp(&self, lhs: &(Var, f64), rhs: &(Var, f64)) -> cmp::Ordering { + debug_assert_eq!(self.activity[rhs.0], rhs.1); + debug_assert_eq!(self.activity[lhs.0], lhs.1); + PartialOrd::partial_cmp(&rhs.1, &lhs.1) .expect("NaN activity") - .then(lhs.cmp(rhs)) + .then(lhs.0.cmp(&rhs.0)) + } +} + +impl<'a> MemoComparator for VarOrder<'a> { + fn value(&self, k: Var) -> f64 { + self.activity[k] } } diff --git a/src/batsat/src/intmap.rs b/src/batsat/src/intmap.rs index f1f48ff..f9e9e10 100644 --- a/src/batsat/src/intmap.rs +++ b/src/batsat/src/intmap.rs @@ -238,12 +238,12 @@ impl ops::Deref for IntSet { } #[derive(Debug, Clone)] -pub struct HeapData { - heap: Vec, +pub struct HeapData { + heap: Vec<(K, V)>, indices: IntMap, } -impl Default for HeapData { +impl Default for HeapData { fn default() -> Self { Self { heap: Vec::new(), @@ -252,7 +252,7 @@ impl Default for HeapData { } } -impl HeapData { +impl HeapData { pub fn new() -> Self { Self::default() } @@ -266,15 +266,15 @@ impl HeapData { self.indices.has(k) && self.indices[k] >= 0 } - pub fn promote>(&mut self, comp: Comp) -> Heap { + pub fn promote>(&mut self, comp: Comp) -> Heap { Heap { data: self, comp } } } -impl ops::Index for HeapData { +impl ops::Index for HeapData { type Output = K; fn index(&self, index: usize) -> &Self::Output { - &self.heap[index] + &self.heap[index].0 } } @@ -322,25 +322,29 @@ pub trait Comparator { } } +pub trait MemoComparator: Comparator<(K, V)> { + fn value(&self, k: K) -> V; +} + #[derive(Debug)] -pub struct Heap<'a, K: AsIndex + 'a, Comp: Comparator> { - data: &'a mut HeapData, +pub struct Heap<'a, K: AsIndex + 'a, V: 'a, Comp> { + data: &'a mut HeapData, comp: Comp, } -impl<'a, K: AsIndex + 'a, Comp: Comparator> ops::Deref for Heap<'a, K, Comp> { - type Target = HeapData; +impl<'a, K: AsIndex + 'a, V: 'a, Comp> ops::Deref for Heap<'a, K, V, Comp> { + type Target = HeapData; fn deref(&self) -> &Self::Target { &self.data } } -impl<'a, K: AsIndex + 'a, Comp: Comparator> ops::DerefMut for Heap<'a, K, Comp> { +impl<'a, K: AsIndex + 'a, V: 'a, Comp> ops::DerefMut for Heap<'a, K, V, Comp> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.data } } -impl<'a, K: AsIndex + 'a, Comp: Comparator> Heap<'a, K, Comp> { +impl<'a, K: AsIndex + 'a, V: Copy + 'a, Comp: MemoComparator> Heap<'a, K, V, Comp> { fn percolate_up(&mut self, mut i: u32) { let x = self.heap[i as usize]; let mut p = parent_index(i); @@ -348,12 +352,12 @@ impl<'a, K: AsIndex + 'a, Comp: Comparator> Heap<'a, K, Comp> { while i != 0 && self.comp.lt(&x, &self.heap[p as usize]) { self.heap[i as usize] = self.heap[p as usize]; let tmp = self.heap[p as usize]; - self.indices[tmp] = i as i32; + self.indices[tmp.0] = i as i32; i = p; p = parent_index(p); } self.heap[i as usize] = x; - self.indices[x] = i as i32; + self.indices[x.0] = i as i32; } fn percolate_down(&mut self, mut i: u32) { @@ -373,20 +377,23 @@ impl<'a, K: AsIndex + 'a, Comp: Comparator> Heap<'a, K, Comp> { } self.heap[i as usize] = self.heap[child as usize]; let tmp = self.heap[i as usize]; - self.indices[tmp] = i as i32; + self.indices[tmp.0] = i as i32; i = child; } self.heap[i as usize] = x; - self.indices[x] = i as i32; + self.indices[x.0] = i as i32; } + pub fn decrease(&mut self, k: K) { debug_assert!(self.in_heap(k)); let k_index = self.indices[k]; + self.heap[k_index as usize].1 = self.comp.value(k); self.percolate_up(k_index as u32); } pub fn increase(&mut self, k: K) { debug_assert!(self.in_heap(k)); let k_index = self.indices[k]; + self.heap[k_index as usize].1 = self.comp.value(k); self.percolate_down(k_index as u32); } @@ -396,6 +403,7 @@ impl<'a, K: AsIndex + 'a, Comp: Comparator> Heap<'a, K, Comp> { self.insert(k); } else { let k_index = self.indices[k]; + self.heap[k_index as usize].1 = self.comp.value(k); self.percolate_up(k_index as u32); let k_index = self.indices[k]; self.percolate_down(k_index as u32); @@ -407,7 +415,7 @@ impl<'a, K: AsIndex + 'a, Comp: Comparator> Heap<'a, K, Comp> { debug_assert!(!self.in_heap(k)); self.indices[k] = self.heap.len() as i32; - self.heap.push(k); + self.data.heap.push((k, self.comp.value(k))); let k_index = self.indices[k]; self.percolate_up(k_index as u32); } @@ -419,7 +427,7 @@ impl<'a, K: AsIndex + 'a, Comp: Comparator> Heap<'a, K, Comp> { if (k_pos as usize) < self.heap.len() - 1 { self.heap[k_pos as usize] = *self.heap.last().unwrap(); let tmp = self.heap[k_pos as usize]; - self.indices[tmp] = k_pos as i32; + self.indices[tmp.0] = k_pos as i32; self.heap.pop().expect("cannot pop from empty heap"); self.percolate_down(k_pos); } else { @@ -431,31 +439,34 @@ impl<'a, K: AsIndex + 'a, Comp: Comparator> Heap<'a, K, Comp> { let x = *self.heap.first().expect("heap is empty"); let lastval = *self.heap.last().expect("heap is empty"); self.heap[0] = lastval; - self.indices[lastval] = 0; - self.indices[x] = -1; + self.indices[lastval.0] = 0; + self.indices[x.0] = -1; self.heap.pop().expect("cannot pop from empty heap"); if self.heap.len() > 1 { self.percolate_down(0); } - x + x.0 } /// Rebuild the heap from scratch, using the elements in 'ns': - pub fn build(&mut self, ns: &[K]) { + pub fn build(&mut self, ns: impl Iterator) { { let data = &mut self.data; for &x in &data.heap { - data.indices[x] = -1; + data.indices[x.0] = -1; } } self.heap.clear(); - for (i, &x) in ns.iter().enumerate() { - // TODO: this should probably call reserve instead of relying on it being reserved already. - debug_assert!(self.indices.has(x)); - self.indices[x] = i as i32; - self.heap.push(x); - } + let ns = ns + .enumerate() + .inspect(|(i, x)| { + debug_assert!(self.data.indices.has(*x)); + self.data.indices[*x] = *i as i32; + }) + .map(|(_, x)| (x, self.comp.value(x))); + + self.data.heap.extend(ns); let mut i = self.heap.len() as i32 / 2 - 1; while i >= 0 { @@ -469,7 +480,7 @@ impl<'a, K: AsIndex + 'a, Comp: Comparator> Heap<'a, K, Comp> { { let data = &mut self.data; for &x in &data.heap { - data.indices[x] = -1; + data.indices[x.0] = -1; } } self.heap.clear(); From b14421875b980af4fbea2a683497ec2976a16c4c Mon Sep 17 00:00:00 2001 From: dewert99 Date: Fri, 15 Mar 2024 14:54:07 -0700 Subject: [PATCH 04/15] Switched activity level to use `f32` --- src/batsat/src/core.rs | 51 +++++++++++++++++++++++++----------------- 1 file changed, 31 insertions(+), 20 deletions(-) diff --git a/src/batsat/src/core.rs b/src/batsat/src/core.rs index 1526202..4539ebf 100644 --- a/src/batsat/src/core.rs +++ b/src/batsat/src/core.rs @@ -28,7 +28,7 @@ use { crate::interface::SolverInterface, crate::intmap::{Comparator, Heap, HeapData}, crate::theory::Theory, - std::{cmp, f64, fmt, i32, mem}, + std::{cmp, fmt, mem}, }; #[cfg(feature = "logging")] @@ -66,16 +66,16 @@ pub struct Solver { /// The current assignments. struct VarState { /// A heuristic measurement of the activity of a variable. - activity: VMap, + activity: VMap, /// A priority queue of variables ordered with respect to the variable activity. - order_heap_data: HeapData, + order_heap_data: HeapData, /// Current assignment for each variable. ass: VMap, /// Stores reason and level for each variable. vardata: VMap, /// Amount to bump next variable with. - var_inc: f64, - var_decay: f64, + var_inc: f32, + var_decay: f32, /// Assignment stack; stores all assigments made in the order they were made. trail: Vec, @@ -1126,7 +1126,7 @@ impl SolverV { self.vars.value_lit(x) } - fn order_heap(&mut self) -> Heap { + fn order_heap(&mut self) -> Heap { self.vars.order_heap() } @@ -1226,9 +1226,10 @@ impl SolverV { .vardata .insert_default(v, VarData::new(CRef::UNDEF, 0)); if self.opts.rnd_init_act { - self.vars - .activity - .insert_default(v, utils::drand(&mut self.opts.random_seed) * 0.00001); + self.vars.activity.insert_default( + v, + (utils::drand(&mut self.opts.random_seed) * 0.00001) as f32, + ); } else { self.vars.activity.insert_default(v, 0.0); } @@ -2101,6 +2102,16 @@ impl SolverV { } } +/// Large f32 that is still small enough that it can't cause another f32 to overflow to infinity +const THRESHOLD: f32 = 1.0141204e31; +#[test] +fn test_threshold() { + let f = f32::MAX * 2.0f32.powi(-1 - (f32::MANTISSA_DIGITS as i32)); + assert_eq!(THRESHOLD, f); + // adding THRESHOLD to a float can never make it overflow to infinity + assert_eq!(f32::MAX + THRESHOLD, f32::MAX) +} + impl VarState { fn new(opts: &SolverOpts) -> Self { Self { @@ -2157,12 +2168,12 @@ impl VarState { fn var_decay_activity(&mut self) { self.var_inc *= 1.0 / self.var_decay; - if self.var_inc > 1e100 { + if self.var_inc > THRESHOLD { // Rescale: for (_, x) in self.activity.iter_mut() { - *x *= 1e-100; + *x /= THRESHOLD; } - self.var_inc *= 1e-100; + self.var_inc /= THRESHOLD; } } @@ -2183,7 +2194,7 @@ impl VarState { self.trail.push(p); } - fn order_heap(&mut self) -> Heap { + fn order_heap(&mut self) -> Heap { self.order_heap_data.promote(VarOrder { activity: &self.activity, }) @@ -2332,7 +2343,7 @@ struct Watcher { } struct VarOrder<'a> { - activity: &'a VMap, + activity: &'a VMap, } /// Predicate to test whether a clause has been removed from some lit's watchlist @@ -2415,8 +2426,8 @@ impl PartialEq for Watcher { } impl Eq for Watcher {} -impl<'a> Comparator<(Var, f64)> for VarOrder<'a> { - fn cmp(&self, lhs: &(Var, f64), rhs: &(Var, f64)) -> cmp::Ordering { +impl<'a> Comparator<(Var, f32)> for VarOrder<'a> { + fn cmp(&self, lhs: &(Var, f32), rhs: &(Var, f32)) -> cmp::Ordering { debug_assert_eq!(self.activity[rhs.0], rhs.1); debug_assert_eq!(self.activity[lhs.0], lhs.1); PartialOrd::partial_cmp(&rhs.1, &lhs.1) @@ -2425,8 +2436,8 @@ impl<'a> Comparator<(Var, f64)> for VarOrder<'a> { } } -impl<'a> MemoComparator for VarOrder<'a> { - fn value(&self, k: Var) -> f64 { +impl<'a> MemoComparator for VarOrder<'a> { + fn value(&self, k: Var) -> f32 { self.activity[k] } } @@ -2467,7 +2478,7 @@ impl Watcher { /// This can be used to tune the solver heuristics. #[derive(Clone)] pub struct SolverOpts { - pub var_decay: f64, + pub var_decay: f32, pub clause_decay: f64, pub random_var_freq: f64, pub random_seed: f64, @@ -2520,7 +2531,7 @@ impl Default for SolverOpts { impl SolverOpts { /// Check that options are valid. pub fn check(&self) -> bool { - (0.0 < self.var_decay && self.var_decay < 1.0) + (1f32 / THRESHOLD < self.var_decay && self.var_decay < 1.0) && (0.0 < self.clause_decay && self.clause_decay < 1.0) && (0.0 <= self.random_var_freq && self.random_var_freq <= 1.0) && (0.0 < self.random_seed && self.random_seed < f64::INFINITY) From 1a0b923966c4b1ec389e56876b75af397e1f5b2a Mon Sep 17 00:00:00 2001 From: dewert99 Date: Sat, 16 Mar 2024 12:27:42 -0700 Subject: [PATCH 05/15] Don't rebuild order heap --- src/batsat/src/core.rs | 23 ++--------------------- src/batsat/src/intmap.rs | 27 --------------------------- 2 files changed, 2 insertions(+), 48 deletions(-) diff --git a/src/batsat/src/core.rs b/src/batsat/src/core.rs index 4539ebf..473580b 100644 --- a/src/batsat/src/core.rs +++ b/src/batsat/src/core.rs @@ -462,7 +462,6 @@ impl Solver { self.remove_satisfied(ClauseSetSelect::Original); // remove satisfied normal clauses } self.check_garbage(); - self.v.rebuild_order_heap(); self.v.simp_db_assigns = self.v.num_assigns() as i32; // (shouldn't depend on stats really, but it will do for now) @@ -1790,13 +1789,6 @@ impl SolverV { confl } - fn rebuild_order_heap(&mut self) { - let vs = (0..self.num_vars()) - .map(Var::from_idx) - .filter(|&v| self.decision[v]); - self.vars.rebuild_order_heap(vs); - } - /// Sort literals of `clause` so that unassigned literals are first, /// followed by literals in decreasing assignment level fn sort_clause_lits(&self, clause: &mut [Lit]) { @@ -1934,10 +1926,10 @@ impl SolverV { self.insert_var_order(x); } self.qhead = trail_lim_level as i32; - self.vars.trail.resize(trail_lim_level, Lit::UNDEF); + self.vars.trail.truncate(trail_lim_level); // eprintln!("decision_level {} -> {}", self.trail_lim.len(), level); self.th_st.clear(); - self.vars.trail_lim.resize(level as usize, 0); + self.vars.trail_lim.truncate(level as usize); } /// Detach a clause from watcher lists. @@ -2211,15 +2203,6 @@ impl VarState { } } - fn rebuild_order_heap(&mut self, eligible_vars: impl Iterator) { - let vars = eligible_vars.filter(|x| self.ass[*x] == lbool::UNDEF); - self.order_heap_data - .promote(VarOrder { - activity: &self.activity, - }) - .build(vars); - } - #[allow(dead_code)] fn iter_trail<'a>(&'a self) -> impl Iterator + 'a { self.trail.iter().map(|l| *l) @@ -2360,8 +2343,6 @@ enum Seen { } mod utils { - use super::*; - /// Finite subsequences of the Luby-sequence: /// /// > 0: 1 diff --git a/src/batsat/src/intmap.rs b/src/batsat/src/intmap.rs index f9e9e10..2d7cda4 100644 --- a/src/batsat/src/intmap.rs +++ b/src/batsat/src/intmap.rs @@ -448,33 +448,6 @@ impl<'a, K: AsIndex + 'a, V: Copy + 'a, Comp: MemoComparator> Heap<'a, K, x.0 } - /// Rebuild the heap from scratch, using the elements in 'ns': - pub fn build(&mut self, ns: impl Iterator) { - { - let data = &mut self.data; - for &x in &data.heap { - data.indices[x.0] = -1; - } - } - self.heap.clear(); - - let ns = ns - .enumerate() - .inspect(|(i, x)| { - debug_assert!(self.data.indices.has(*x)); - self.data.indices[*x] = *i as i32; - }) - .map(|(_, x)| (x, self.comp.value(x))); - - self.data.heap.extend(ns); - - let mut i = self.heap.len() as i32 / 2 - 1; - while i >= 0 { - self.percolate_down(i as u32); - i -= 1; - } - } - pub fn clear_dispose(&mut self, dispose: bool) { // TODO: shouldn't the 'indices' map also be dispose-cleared? { From 2f7b2cf5404cc7b6c91175203654d43359acb20e Mon Sep 17 00:00:00 2001 From: dewert99 Date: Sat, 16 Mar 2024 13:01:34 -0700 Subject: [PATCH 06/15] Extracted heap into its own module --- src/batsat/src/core.rs | 3 +- src/batsat/src/heap.rs | 193 +++++++++++++++++++++++++++++++ src/batsat/src/intmap.rs | 244 --------------------------------------- src/batsat/src/lib.rs | 1 + 4 files changed, 195 insertions(+), 246 deletions(-) create mode 100644 src/batsat/src/heap.rs diff --git a/src/batsat/src/core.rs b/src/batsat/src/core.rs index 473580b..3f29684 100644 --- a/src/batsat/src/core.rs +++ b/src/batsat/src/core.rs @@ -25,15 +25,14 @@ use { self, lbool, CRef, ClauseAllocator, ClauseRef, DeletePred, LSet, Lit, OccLists, OccListsData, VMap, Var, }, + crate::heap::{Comparator, Heap, HeapData, MemoComparator}, crate::interface::SolverInterface, - crate::intmap::{Comparator, Heap, HeapData}, crate::theory::Theory, std::{cmp, fmt, mem}, }; #[cfg(feature = "logging")] use crate::clause::display::Print; -use crate::intmap::MemoComparator; /// The main solver structure. /// diff --git a/src/batsat/src/heap.rs b/src/batsat/src/heap.rs new file mode 100644 index 0000000..0e48711 --- /dev/null +++ b/src/batsat/src/heap.rs @@ -0,0 +1,193 @@ +use crate::intmap::{AsIndex, IntMap}; +use std::{cmp, ops}; + +#[derive(Debug, Clone)] +pub struct HeapData { + heap: Vec<(K, V)>, + indices: IntMap, +} + +impl Default for HeapData { + fn default() -> Self { + Self { + heap: Vec::new(), + indices: IntMap::new(), + } + } +} + +impl HeapData { + pub fn new() -> Self { + Self::default() + } + pub fn len(&self) -> usize { + self.heap.len() + } + pub fn is_empty(&self) -> bool { + self.heap.is_empty() + } + pub fn in_heap(&self, k: K) -> bool { + self.indices.has(k) && self.indices[k] >= 0 + } + + pub fn promote>(&mut self, comp: Comp) -> Heap { + Heap { data: self, comp } + } +} + +impl ops::Index for HeapData { + type Output = K; + fn index(&self, index: usize) -> &Self::Output { + &self.heap[index].0 + } +} + +pub trait Comparator { + fn cmp(&self, lhs: &T, rhs: &T) -> cmp::Ordering; + fn max(&self, lhs: T, rhs: T) -> T + where + T: Sized, + { + if self.ge(&rhs, &lhs) { + rhs + } else { + lhs + } + } + fn min(&self, lhs: T, rhs: T) -> T + where + T: Sized, + { + if self.le(&lhs, &rhs) { + lhs + } else { + rhs + } + } + fn le(&self, lhs: &T, rhs: &T) -> bool { + match self.cmp(lhs, rhs) { + cmp::Ordering::Less | cmp::Ordering::Equal => true, + _ => false, + } + } + fn lt(&self, lhs: &T, rhs: &T) -> bool { + match self.cmp(lhs, rhs) { + cmp::Ordering::Less => true, + _ => false, + } + } + #[inline] + fn gt(&self, lhs: &T, rhs: &T) -> bool { + self.lt(rhs, lhs) + } + #[inline] + fn ge(&self, lhs: &T, rhs: &T) -> bool { + self.le(rhs, lhs) + } +} + +pub trait MemoComparator: Comparator<(K, V)> { + fn value(&self, k: K) -> V; +} + +#[derive(Debug)] +pub struct Heap<'a, K: AsIndex + 'a, V: 'a, Comp> { + data: &'a mut HeapData, + comp: Comp, +} + +impl<'a, K: AsIndex + 'a, V: 'a, Comp> ops::Deref for Heap<'a, K, V, Comp> { + type Target = HeapData; + fn deref(&self) -> &Self::Target { + &self.data + } +} +impl<'a, K: AsIndex + 'a, V: 'a, Comp> ops::DerefMut for Heap<'a, K, V, Comp> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.data + } +} + +impl<'a, K: AsIndex + 'a, V: Copy + 'a, Comp: MemoComparator> Heap<'a, K, V, Comp> { + fn percolate_up(&mut self, mut i: u32) { + let x = self.heap[i as usize]; + let mut p = parent_index(i); + + while i != 0 && self.comp.lt(&x, &self.heap[p as usize]) { + self.heap[i as usize] = self.heap[p as usize]; + let tmp = self.heap[p as usize]; + self.indices[tmp.0] = i as i32; + i = p; + p = parent_index(p); + } + self.heap[i as usize] = x; + self.indices[x.0] = i as i32; + } + + fn percolate_down(&mut self, mut i: u32) { + let x = self.heap[i as usize]; + while (left_index(i) as usize) < self.heap.len() { + let child = if (right_index(i) as usize) < self.heap.len() + && self.comp.lt( + &self.heap[right_index(i) as usize], + &self.heap[left_index(i) as usize], + ) { + right_index(i) + } else { + left_index(i) + }; + if !self.comp.lt(&self.heap[child as usize], &x) { + break; + } + self.heap[i as usize] = self.heap[child as usize]; + let tmp = self.heap[i as usize]; + self.indices[tmp.0] = i as i32; + i = child; + } + self.heap[i as usize] = x; + self.indices[x.0] = i as i32; + } + + pub fn decrease(&mut self, k: K) { + debug_assert!(self.in_heap(k)); + let k_index = self.indices[k]; + self.heap[k_index as usize].1 = self.comp.value(k); + self.percolate_up(k_index as u32); + } + + pub fn insert(&mut self, k: K) { + self.indices.reserve(k, -1); + debug_assert!(!self.in_heap(k)); + + self.indices[k] = self.heap.len() as i32; + self.data.heap.push((k, self.comp.value(k))); + let k_index = self.indices[k]; + self.percolate_up(k_index as u32); + } + + pub fn remove_min(&mut self) -> K { + let x = *self.heap.first().expect("heap is empty"); + let lastval = *self.heap.last().expect("heap is empty"); + self.heap[0] = lastval; + self.indices[lastval.0] = 0; + self.indices[x.0] = -1; + self.heap.pop().expect("cannot pop from empty heap"); + if self.heap.len() > 1 { + self.percolate_down(0); + } + x.0 + } +} + +#[inline(always)] +fn left_index(i: u32) -> u32 { + i * 2 + 1 +} +#[inline(always)] +fn right_index(i: u32) -> u32 { + (i + 1) * 2 +} +#[inline(always)] +fn parent_index(i: u32) -> u32 { + (i.wrapping_sub(1)) >> 1 +} diff --git a/src/batsat/src/intmap.rs b/src/batsat/src/intmap.rs index 2d7cda4..1a64c6e 100644 --- a/src/batsat/src/intmap.rs +++ b/src/batsat/src/intmap.rs @@ -20,7 +20,6 @@ OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWA **************************************************************************************************/ use bit_vec::BitVec; -use std::cmp; use std::iter; use std::marker::PhantomData; use std::ops; @@ -236,246 +235,3 @@ impl ops::Deref for IntSet { &self.xs } } - -#[derive(Debug, Clone)] -pub struct HeapData { - heap: Vec<(K, V)>, - indices: IntMap, -} - -impl Default for HeapData { - fn default() -> Self { - Self { - heap: Vec::new(), - indices: IntMap::new(), - } - } -} - -impl HeapData { - pub fn new() -> Self { - Self::default() - } - pub fn len(&self) -> usize { - self.heap.len() - } - pub fn is_empty(&self) -> bool { - self.heap.is_empty() - } - pub fn in_heap(&self, k: K) -> bool { - self.indices.has(k) && self.indices[k] >= 0 - } - - pub fn promote>(&mut self, comp: Comp) -> Heap { - Heap { data: self, comp } - } -} - -impl ops::Index for HeapData { - type Output = K; - fn index(&self, index: usize) -> &Self::Output { - &self.heap[index].0 - } -} - -pub trait Comparator { - fn cmp(&self, lhs: &T, rhs: &T) -> cmp::Ordering; - fn max(&self, lhs: T, rhs: T) -> T - where - T: Sized, - { - if self.ge(&rhs, &lhs) { - rhs - } else { - lhs - } - } - fn min(&self, lhs: T, rhs: T) -> T - where - T: Sized, - { - if self.le(&lhs, &rhs) { - lhs - } else { - rhs - } - } - fn le(&self, lhs: &T, rhs: &T) -> bool { - match self.cmp(lhs, rhs) { - cmp::Ordering::Less | cmp::Ordering::Equal => true, - _ => false, - } - } - fn lt(&self, lhs: &T, rhs: &T) -> bool { - match self.cmp(lhs, rhs) { - cmp::Ordering::Less => true, - _ => false, - } - } - #[inline] - fn gt(&self, lhs: &T, rhs: &T) -> bool { - self.lt(rhs, lhs) - } - #[inline] - fn ge(&self, lhs: &T, rhs: &T) -> bool { - self.le(rhs, lhs) - } -} - -pub trait MemoComparator: Comparator<(K, V)> { - fn value(&self, k: K) -> V; -} - -#[derive(Debug)] -pub struct Heap<'a, K: AsIndex + 'a, V: 'a, Comp> { - data: &'a mut HeapData, - comp: Comp, -} - -impl<'a, K: AsIndex + 'a, V: 'a, Comp> ops::Deref for Heap<'a, K, V, Comp> { - type Target = HeapData; - fn deref(&self) -> &Self::Target { - &self.data - } -} -impl<'a, K: AsIndex + 'a, V: 'a, Comp> ops::DerefMut for Heap<'a, K, V, Comp> { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.data - } -} - -impl<'a, K: AsIndex + 'a, V: Copy + 'a, Comp: MemoComparator> Heap<'a, K, V, Comp> { - fn percolate_up(&mut self, mut i: u32) { - let x = self.heap[i as usize]; - let mut p = parent_index(i); - - while i != 0 && self.comp.lt(&x, &self.heap[p as usize]) { - self.heap[i as usize] = self.heap[p as usize]; - let tmp = self.heap[p as usize]; - self.indices[tmp.0] = i as i32; - i = p; - p = parent_index(p); - } - self.heap[i as usize] = x; - self.indices[x.0] = i as i32; - } - - fn percolate_down(&mut self, mut i: u32) { - let x = self.heap[i as usize]; - while (left_index(i) as usize) < self.heap.len() { - let child = if (right_index(i) as usize) < self.heap.len() - && self.comp.lt( - &self.heap[right_index(i) as usize], - &self.heap[left_index(i) as usize], - ) { - right_index(i) - } else { - left_index(i) - }; - if !self.comp.lt(&self.heap[child as usize], &x) { - break; - } - self.heap[i as usize] = self.heap[child as usize]; - let tmp = self.heap[i as usize]; - self.indices[tmp.0] = i as i32; - i = child; - } - self.heap[i as usize] = x; - self.indices[x.0] = i as i32; - } - - pub fn decrease(&mut self, k: K) { - debug_assert!(self.in_heap(k)); - let k_index = self.indices[k]; - self.heap[k_index as usize].1 = self.comp.value(k); - self.percolate_up(k_index as u32); - } - pub fn increase(&mut self, k: K) { - debug_assert!(self.in_heap(k)); - let k_index = self.indices[k]; - self.heap[k_index as usize].1 = self.comp.value(k); - self.percolate_down(k_index as u32); - } - - /// Safe variant of insert/decrease/increase: - pub fn update(&mut self, k: K) { - if !self.in_heap(k) { - self.insert(k); - } else { - let k_index = self.indices[k]; - self.heap[k_index as usize].1 = self.comp.value(k); - self.percolate_up(k_index as u32); - let k_index = self.indices[k]; - self.percolate_down(k_index as u32); - } - } - - pub fn insert(&mut self, k: K) { - self.indices.reserve(k, -1); - debug_assert!(!self.in_heap(k)); - - self.indices[k] = self.heap.len() as i32; - self.data.heap.push((k, self.comp.value(k))); - let k_index = self.indices[k]; - self.percolate_up(k_index as u32); - } - - pub fn remove(&mut self, k: K) { - debug_assert!(self.in_heap(k)); - let k_pos = self.indices[k] as u32; - self.indices[k] = -1; - if (k_pos as usize) < self.heap.len() - 1 { - self.heap[k_pos as usize] = *self.heap.last().unwrap(); - let tmp = self.heap[k_pos as usize]; - self.indices[tmp.0] = k_pos as i32; - self.heap.pop().expect("cannot pop from empty heap"); - self.percolate_down(k_pos); - } else { - self.heap.pop().expect("cannot pop from empty heap"); - } - } - - pub fn remove_min(&mut self) -> K { - let x = *self.heap.first().expect("heap is empty"); - let lastval = *self.heap.last().expect("heap is empty"); - self.heap[0] = lastval; - self.indices[lastval.0] = 0; - self.indices[x.0] = -1; - self.heap.pop().expect("cannot pop from empty heap"); - if self.heap.len() > 1 { - self.percolate_down(0); - } - x.0 - } - - pub fn clear_dispose(&mut self, dispose: bool) { - // TODO: shouldn't the 'indices' map also be dispose-cleared? - { - let data = &mut self.data; - for &x in &data.heap { - data.indices[x.0] = -1; - } - } - self.heap.clear(); - if dispose { - self.heap.shrink_to_fit(); - } - } - - pub fn clear(&mut self) { - self.clear_dispose(false) - } -} - -#[inline(always)] -fn left_index(i: u32) -> u32 { - i * 2 + 1 -} -#[inline(always)] -fn right_index(i: u32) -> u32 { - (i + 1) * 2 -} -#[inline(always)] -fn parent_index(i: u32) -> u32 { - (i.wrapping_sub(1)) >> 1 -} diff --git a/src/batsat/src/lib.rs b/src/batsat/src/lib.rs index 244c276..b8556fe 100644 --- a/src/batsat/src/lib.rs +++ b/src/batsat/src/lib.rs @@ -51,6 +51,7 @@ pub mod clause; pub mod core; pub mod dimacs; pub mod drat; +mod heap; pub mod interface; pub mod intmap; pub mod theory; From 796e42b008fa3a45e9d10ab35bffd12e6c9ed3b1 Mon Sep 17 00:00:00 2001 From: dewert99 Date: Mon, 18 Mar 2024 09:37:32 -0700 Subject: [PATCH 07/15] Fixed activity rescale to also rescale activities cached in heap --- src/batsat/src/core.rs | 8 ++++++-- src/batsat/src/heap.rs | 2 +- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/src/batsat/src/core.rs b/src/batsat/src/core.rs index 3f29684..5c076e6 100644 --- a/src/batsat/src/core.rs +++ b/src/batsat/src/core.rs @@ -2160,11 +2160,15 @@ impl VarState { fn var_decay_activity(&mut self) { self.var_inc *= 1.0 / self.var_decay; if self.var_inc > THRESHOLD { + let scale = 2.0_f32.powi(f32::MIN_EXP); // Rescale: for (_, x) in self.activity.iter_mut() { - *x /= THRESHOLD; + *x *= scale; } - self.var_inc /= THRESHOLD; + for (_, x) in self.order_heap_data.heap.iter_mut() { + *x *= scale + } + self.var_inc *= scale; } } diff --git a/src/batsat/src/heap.rs b/src/batsat/src/heap.rs index 0e48711..85d6147 100644 --- a/src/batsat/src/heap.rs +++ b/src/batsat/src/heap.rs @@ -3,7 +3,7 @@ use std::{cmp, ops}; #[derive(Debug, Clone)] pub struct HeapData { - heap: Vec<(K, V)>, + pub(crate) heap: Vec<(K, V)>, indices: IntMap, } From dbe576ee3ed031fdf16b295dc859b42082be8720 Mon Sep 17 00:00:00 2001 From: dewert99 Date: Mon, 18 Mar 2024 12:37:46 -0700 Subject: [PATCH 08/15] Switched `order_heap` to be a quaternary heap --- src/batsat/src/core.rs | 20 +++++- src/batsat/src/heap.rs | 135 ++++++++++++++++++++++++++++++----------- 2 files changed, 118 insertions(+), 37 deletions(-) diff --git a/src/batsat/src/core.rs b/src/batsat/src/core.rs index 5c076e6..d284e12 100644 --- a/src/batsat/src/core.rs +++ b/src/batsat/src/core.rs @@ -2165,7 +2165,7 @@ impl VarState { for (_, x) in self.activity.iter_mut() { *x *= scale; } - for (_, x) in self.order_heap_data.heap.iter_mut() { + for (_, x) in self.order_heap_data.heap_mut().iter_mut() { *x *= scale } self.var_inc *= scale; @@ -2410,10 +2410,24 @@ impl PartialEq for Watcher { } impl Eq for Watcher {} +impl<'a> VarOrder<'a> { + fn check_activity(&self, var: Var) -> f32 { + if var == Var::UNDEF { + 0.0 + } else { + self.activity[var] + } + } +} + impl<'a> Comparator<(Var, f32)> for VarOrder<'a> { + fn max_value(&self) -> (Var, f32) { + (Var::UNDEF, 0.0) + } + fn cmp(&self, lhs: &(Var, f32), rhs: &(Var, f32)) -> cmp::Ordering { - debug_assert_eq!(self.activity[rhs.0], rhs.1); - debug_assert_eq!(self.activity[lhs.0], lhs.1); + debug_assert_eq!(self.check_activity(rhs.0), rhs.1); + debug_assert_eq!(self.check_activity(lhs.0), lhs.1); PartialOrd::partial_cmp(&rhs.1, &lhs.1) .expect("NaN activity") .then(lhs.0.cmp(&rhs.0)) diff --git a/src/batsat/src/heap.rs b/src/batsat/src/heap.rs index 85d6147..e8bc043 100644 --- a/src/batsat/src/heap.rs +++ b/src/batsat/src/heap.rs @@ -1,16 +1,20 @@ use crate::intmap::{AsIndex, IntMap}; -use std::{cmp, ops}; +use std::fmt::Debug; +use std::{cmp, mem, ops}; +/// Quaternary Heap #[derive(Debug, Clone)] pub struct HeapData { - pub(crate) heap: Vec<(K, V)>, + heap: Box<[(K, V)]>, + next_slot: usize, indices: IntMap, } impl Default for HeapData { fn default() -> Self { Self { - heap: Vec::new(), + heap: Box::new([]), + next_slot: 0, indices: IntMap::new(), } } @@ -24,7 +28,7 @@ impl HeapData { self.heap.len() } pub fn is_empty(&self) -> bool { - self.heap.is_empty() + self.next_slot <= ROOT as usize } pub fn in_heap(&self, k: K) -> bool { self.indices.has(k) && self.indices[k] >= 0 @@ -33,6 +37,15 @@ impl HeapData { pub fn promote>(&mut self, comp: Comp) -> Heap { Heap { data: self, comp } } + + /// Raw mutable access to all the elements of the heap + pub(crate) fn heap_mut(&mut self) -> &mut [(K, V)] { + if self.next_slot == 0 { + &mut [] + } else { + &mut self.heap[ROOT as usize..self.next_slot] + } + } } impl ops::Index for HeapData { @@ -43,6 +56,7 @@ impl ops::Index for HeapData { } pub trait Comparator { + fn max_value(&self) -> T; fn cmp(&self, lhs: &T, rhs: &T) -> cmp::Ordering; fn max(&self, lhs: T, rhs: T) -> T where @@ -109,11 +123,39 @@ impl<'a, K: AsIndex + 'a, V: 'a, Comp> ops::DerefMut for Heap<'a, K, V, Comp> { } impl<'a, K: AsIndex + 'a, V: Copy + 'a, Comp: MemoComparator> Heap<'a, K, V, Comp> { + // ensure size is always a multiple of 4 + #[cold] + #[inline(never)] + fn heap_reserve(&mut self) { + debug_assert_eq!(self.next_slot, self.data.len()); + if self.next_slot == 0 { + self.next_slot = ROOT as usize; + // Enough space for the root and 4 children + self.heap = vec![self.comp.max_value(); 8].into_boxed_slice(); + } else { + let new_size = self.next_slot << 2; + let mut heap = mem::replace(&mut self.heap, Box::new([])).into_vec(); + heap.resize(new_size, self.comp.max_value()); + self.heap = heap.into_boxed_slice(); + } + } + + #[inline] + fn heap_push(&mut self, k: K, v: V) -> u32 { + if self.next_slot >= self.heap.len() { + self.heap_reserve(); + assert!(self.next_slot < self.heap.len()); + } + let slot = self.next_slot; + self.heap[slot] = (k, v); + self.next_slot += 1; + slot as u32 + } fn percolate_up(&mut self, mut i: u32) { let x = self.heap[i as usize]; let mut p = parent_index(i); - while i != 0 && self.comp.lt(&x, &self.heap[p as usize]) { + while i != ROOT && self.comp.lt(&x, &self.heap[p as usize]) { self.heap[i as usize] = self.heap[p as usize]; let tmp = self.heap[p as usize]; self.indices[tmp.0] = i as i32; @@ -124,24 +166,40 @@ impl<'a, K: AsIndex + 'a, V: Copy + 'a, Comp: MemoComparator> Heap<'a, K, self.indices[x.0] = i as i32; } + #[inline] + fn bundle(&self, i: u32) -> (u32, (K, V)) { + (i, self.heap[i as usize]) + } + + #[inline] + fn min(&self, x: (u32, (K, V)), y: (u32, (K, V))) -> (u32, (K, V)) { + if self.comp.lt(&x.1, &y.1) { + x + } else { + y + } + } + fn percolate_down(&mut self, mut i: u32) { let x = self.heap[i as usize]; - while (left_index(i) as usize) < self.heap.len() { - let child = if (right_index(i) as usize) < self.heap.len() - && self.comp.lt( - &self.heap[right_index(i) as usize], - &self.heap[left_index(i) as usize], - ) { - right_index(i) - } else { - left_index(i) - }; - if !self.comp.lt(&self.heap[child as usize], &x) { + let len = (self.next_slot + 3) & (usize::MAX - 3); // round up to nearest multiple of 4 + // since the heap is padded with maximum values we can pretend that these are part of the + // heap but never swap with them + assert!(len <= self.heap.len()); // hopefully this lets us eliminate bounds checks + while (right_index(i) as usize) < len { + let left_index = left_index(i); + let b0 = self.bundle(left_index); + let b1 = self.bundle(left_index + 1); + let b2 = self.bundle(left_index + 2); + let b3 = self.bundle(left_index + 3); + let b01 = self.min(b0, b1); + let b23 = self.min(b2, b3); + let (child, min) = self.min(b01, b23); + if !self.comp.lt(&min, &x) { break; } - self.heap[i as usize] = self.heap[child as usize]; - let tmp = self.heap[i as usize]; - self.indices[tmp.0] = i as i32; + self.heap[i as usize] = min; + self.indices[min.0] = i as i32; i = child; } self.heap[i as usize] = x; @@ -158,36 +216,45 @@ impl<'a, K: AsIndex + 'a, V: Copy + 'a, Comp: MemoComparator> Heap<'a, K, pub fn insert(&mut self, k: K) { self.indices.reserve(k, -1); debug_assert!(!self.in_heap(k)); - - self.indices[k] = self.heap.len() as i32; - self.data.heap.push((k, self.comp.value(k))); - let k_index = self.indices[k]; - self.percolate_up(k_index as u32); + let k_index = self.heap_push(k, self.comp.value(k)); + self.indices[k] = k_index as i32; + self.percolate_up(k_index); } pub fn remove_min(&mut self) -> K { - let x = *self.heap.first().expect("heap is empty"); - let lastval = *self.heap.last().expect("heap is empty"); - self.heap[0] = lastval; - self.indices[lastval.0] = 0; + assert!(!self.is_empty(), "cannot pop from empty heap"); + let x = self.heap[ROOT as usize]; + let last = self.next_slot - 1; + self.next_slot = last; self.indices[x.0] = -1; - self.heap.pop().expect("cannot pop from empty heap"); - if self.heap.len() > 1 { - self.percolate_down(0); + if self.is_empty() { + self.heap[last] = self.comp.max_value(); + return x.0; } + let lastval = self.heap[last]; + self.heap[last] = self.comp.max_value(); + self.heap[ROOT as usize] = lastval; + self.indices[lastval.0] = ROOT as i32; + self.percolate_down(ROOT); x.0 } } +/// Root of the quaternary heap +/// By using 3 as the root we ensure each chunk of 4 children has a multiple of 4 starting index +/// This gives the chunks a better chance of being cache aligned, i.e. they are cache aligned if +/// the allocation is cache aligned +const ROOT: u32 = 3; + #[inline(always)] fn left_index(i: u32) -> u32 { - i * 2 + 1 + (i - 2) << 2 } #[inline(always)] fn right_index(i: u32) -> u32 { - (i + 1) * 2 + left_index(i) + 3 } #[inline(always)] fn parent_index(i: u32) -> u32 { - (i.wrapping_sub(1)) >> 1 + (i >> 2) + 2 } From c3a8b38e7251adea268ea073480a92a12b2a61fc Mon Sep 17 00:00:00 2001 From: dewert99 Date: Mon, 18 Mar 2024 16:35:43 -0700 Subject: [PATCH 09/15] Tried to make heap comparisons less branchy --- src/batsat/src/clause.rs | 4 +-- src/batsat/src/core.rs | 32 ++++++++++++++++++++++++ src/batsat/src/heap.rs | 54 ++++++++++++++++------------------------ 3 files changed, 55 insertions(+), 35 deletions(-) diff --git a/src/batsat/src/clause.rs b/src/batsat/src/clause.rs index 1f386d7..5f32e80 100644 --- a/src/batsat/src/clause.rs +++ b/src/batsat/src/clause.rs @@ -24,7 +24,7 @@ use { alloc::{self, RegionAllocator}, intmap::{AsIndex, IntMap, IntMapBool, IntSet}, }, - std::{fmt, iter::DoubleEndedIterator, ops, slice, u32}, + std::{fmt, iter::DoubleEndedIterator, ops, slice}, }; #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] @@ -42,7 +42,7 @@ impl fmt::Debug for Var { } impl Var { - pub const UNDEF: Var = Var(!0); + pub const UNDEF: Var = Var(u32::MAX / 2); #[inline(always)] pub(crate) fn from_idx(idx: u32) -> Self { debug_assert!(idx < u32::MAX / 2, "Var::from_idx: index too large"); diff --git a/src/batsat/src/core.rs b/src/batsat/src/core.rs index d284e12..88e88e1 100644 --- a/src/batsat/src/core.rs +++ b/src/batsat/src/core.rs @@ -2432,6 +2432,38 @@ impl<'a> Comparator<(Var, f32)> for VarOrder<'a> { .expect("NaN activity") .then(lhs.0.cmp(&rhs.0)) } + + fn le(&self, lhs: &(Var, f32), rhs: &(Var, f32)) -> bool { + // 0 if lhs.0 <= rhs.0, 1 otherwise + let v_diff = (rhs.0.idx().wrapping_sub(lhs.0.idx())) >> (u32::BITS - 1); + let a_diff = lhs.1 - rhs.1; + let a_diff_b = a_diff.to_bits() as i32; + let diff = a_diff_b.wrapping_sub(v_diff as i32); + let res = diff >= 0; + debug_assert_eq!(res, self.cmp(lhs, rhs).is_le()); + res + // if rhs.1 < lhs.1 { + // // a_diff is positive and so a_diff_b >= 0 + // // since a_diff != 0.0, a_diff_b != 0 (that would corrispond to +0.0) + // // subtracting v_diff will keep it positive so diff >= 0 + // true + // } else if rhs.1 == lhs.1 { + // // since the activitly levels are always positive a_diff = +0 + // // this will make a_diff_b = 0 + // if lhs.0 <= rhs.0 { + // // v_diff = 0, so diff = 0 - 0 = 0 so diff >= 0 + // true + // } else { + // // v_diff = 1, so diff = 0 - 1 = -1 so !(diff >= 0) + // false + // } + // } else{ + // // a_diff is negative a_diff_b <= 0 + // // since a_diff != 0.0, a_diff_b != i32::MIN (that would corrispond to -0.0) + // // this prevents the subtraction from underflowing so !(diff >= 0) + // false + // } + } } impl<'a> MemoComparator for VarOrder<'a> { diff --git a/src/batsat/src/heap.rs b/src/batsat/src/heap.rs index e8bc043..c945aed 100644 --- a/src/batsat/src/heap.rs +++ b/src/batsat/src/heap.rs @@ -155,7 +155,7 @@ impl<'a, K: AsIndex + 'a, V: Copy + 'a, Comp: MemoComparator> Heap<'a, K, let x = self.heap[i as usize]; let mut p = parent_index(i); - while i != ROOT && self.comp.lt(&x, &self.heap[p as usize]) { + while i != ROOT && self.comp.le(&x, &self.heap[p as usize]) { self.heap[i as usize] = self.heap[p as usize]; let tmp = self.heap[p as usize]; self.indices[tmp.0] = i as i32; @@ -166,44 +166,36 @@ impl<'a, K: AsIndex + 'a, V: Copy + 'a, Comp: MemoComparator> Heap<'a, K, self.indices[x.0] = i as i32; } - #[inline] - fn bundle(&self, i: u32) -> (u32, (K, V)) { - (i, self.heap[i as usize]) - } - - #[inline] - fn min(&self, x: (u32, (K, V)), y: (u32, (K, V))) -> (u32, (K, V)) { - if self.comp.lt(&x.1, &y.1) { - x - } else { - y - } - } - fn percolate_down(&mut self, mut i: u32) { let x = self.heap[i as usize]; let len = (self.next_slot + 3) & (usize::MAX - 3); // round up to nearest multiple of 4 // since the heap is padded with maximum values we can pretend that these are part of the // heap but never swap with them - assert!(len <= self.heap.len()); // hopefully this lets us eliminate bounds checks - while (right_index(i) as usize) < len { + let heap = &mut self.data.heap[..len]; + loop { + let bundle = |x| (x, heap[x as usize]); + let min = + |x: (u32, (K, V)), y: (u32, (K, V))| if self.comp.le(&x.1, &y.1) { x } else { y }; let left_index = left_index(i); - let b0 = self.bundle(left_index); - let b1 = self.bundle(left_index + 1); - let b2 = self.bundle(left_index + 2); - let b3 = self.bundle(left_index + 3); - let b01 = self.min(b0, b1); - let b23 = self.min(b2, b3); - let (child, min) = self.min(b01, b23); - if !self.comp.lt(&min, &x) { + if left_index as usize + 3 >= heap.len() { + break; + } + let b0 = bundle(left_index); + let b1 = bundle(left_index + 1); + let b2 = bundle(left_index + 2); + let b3 = bundle(left_index + 3); + let b01 = min(b0, b1); + let b23 = min(b2, b3); + let (child, min) = min(b01, b23); + if !self.comp.le(&min, &x) { break; } - self.heap[i as usize] = min; - self.indices[min.0] = i as i32; + heap[i as usize] = min; + self.data.indices[min.0] = i as i32; i = child; } - self.heap[i as usize] = x; - self.indices[x.0] = i as i32; + heap[i as usize] = x; + self.data.indices[x.0] = i as i32; } pub fn decrease(&mut self, k: K) { @@ -251,10 +243,6 @@ fn left_index(i: u32) -> u32 { (i - 2) << 2 } #[inline(always)] -fn right_index(i: u32) -> u32 { - left_index(i) + 3 -} -#[inline(always)] fn parent_index(i: u32) -> u32 { (i >> 2) + 2 } From 9972854c3d65e6e82edb94bef061c16467d5d48a Mon Sep 17 00:00:00 2001 From: dewert99 Date: Mon, 18 Mar 2024 20:30:56 -0700 Subject: [PATCH 10/15] Replaced heap comparisons with u64 comparisons by utilizing https://stackoverflow.com/questions/33678827/compare-floating-point-numbers-as-integers --- src/batsat/src/core.rs | 51 +++++++++++++----------------------------- src/batsat/src/heap.rs | 36 ++++++++++++++++++----------- 2 files changed, 38 insertions(+), 49 deletions(-) diff --git a/src/batsat/src/core.rs b/src/batsat/src/core.rs index 88e88e1..96f4c03 100644 --- a/src/batsat/src/core.rs +++ b/src/batsat/src/core.rs @@ -2420,49 +2420,28 @@ impl<'a> VarOrder<'a> { } } +const COMP_MASK: u64 = (u32::MAX as u64) << u32::BITS; impl<'a> Comparator<(Var, f32)> for VarOrder<'a> { + type Comp = u64; + + #[inline] fn max_value(&self) -> (Var, f32) { (Var::UNDEF, 0.0) } - fn cmp(&self, lhs: &(Var, f32), rhs: &(Var, f32)) -> cmp::Ordering { - debug_assert_eq!(self.check_activity(rhs.0), rhs.1); - debug_assert_eq!(self.check_activity(lhs.0), lhs.1); - PartialOrd::partial_cmp(&rhs.1, &lhs.1) - .expect("NaN activity") - .then(lhs.0.cmp(&rhs.0)) + #[inline] + fn to_cmp_form(&self, v: &(Var, f32)) -> u64 { + debug_assert_eq!(self.check_activity(v.0), v.1); + let x = ((v.1.to_bits() as u64) << u32::BITS) | (v.0.idx() as u64); + x ^ COMP_MASK } - fn le(&self, lhs: &(Var, f32), rhs: &(Var, f32)) -> bool { - // 0 if lhs.0 <= rhs.0, 1 otherwise - let v_diff = (rhs.0.idx().wrapping_sub(lhs.0.idx())) >> (u32::BITS - 1); - let a_diff = lhs.1 - rhs.1; - let a_diff_b = a_diff.to_bits() as i32; - let diff = a_diff_b.wrapping_sub(v_diff as i32); - let res = diff >= 0; - debug_assert_eq!(res, self.cmp(lhs, rhs).is_le()); - res - // if rhs.1 < lhs.1 { - // // a_diff is positive and so a_diff_b >= 0 - // // since a_diff != 0.0, a_diff_b != 0 (that would corrispond to +0.0) - // // subtracting v_diff will keep it positive so diff >= 0 - // true - // } else if rhs.1 == lhs.1 { - // // since the activitly levels are always positive a_diff = +0 - // // this will make a_diff_b = 0 - // if lhs.0 <= rhs.0 { - // // v_diff = 0, so diff = 0 - 0 = 0 so diff >= 0 - // true - // } else { - // // v_diff = 1, so diff = 0 - 1 = -1 so !(diff >= 0) - // false - // } - // } else{ - // // a_diff is negative a_diff_b <= 0 - // // since a_diff != 0.0, a_diff_b != i32::MIN (that would corrispond to -0.0) - // // this prevents the subtraction from underflowing so !(diff >= 0) - // false - // } + #[inline] + fn from_cmp_form(&self, c: Self::Comp) -> (Var, f32) { + let c = c ^ COMP_MASK; + let v = Var::unsafe_from_idx((c & (u32::MAX as u64)) as u32); + let a = f32::from_bits((c >> u32::BITS) as u32); + (v, a) } } diff --git a/src/batsat/src/heap.rs b/src/batsat/src/heap.rs index c945aed..4d925d4 100644 --- a/src/batsat/src/heap.rs +++ b/src/batsat/src/heap.rs @@ -56,8 +56,15 @@ impl ops::Index for HeapData { } pub trait Comparator { + type Comp: Ord; + fn max_value(&self) -> T; - fn cmp(&self, lhs: &T, rhs: &T) -> cmp::Ordering; + fn to_cmp_form(&self, v: &T) -> Self::Comp; + fn from_cmp_form(&self, c: Self::Comp) -> T; + + fn cmp(&self, lhs: &T, rhs: &T) -> cmp::Ordering { + self.to_cmp_form(&lhs).cmp(&self.to_cmp_form(&rhs)) + } fn max(&self, lhs: T, rhs: T) -> T where T: Sized, @@ -151,11 +158,13 @@ impl<'a, K: AsIndex + 'a, V: Copy + 'a, Comp: MemoComparator> Heap<'a, K, self.next_slot += 1; slot as u32 } + fn percolate_up(&mut self, mut i: u32) { let x = self.heap[i as usize]; + let xc = self.comp.to_cmp_form(&x); let mut p = parent_index(i); - while i != ROOT && self.comp.le(&x, &self.heap[p as usize]) { + while i != ROOT && xc < self.comp.to_cmp_form(&self.heap[p as usize]) { self.heap[i as usize] = self.heap[p as usize]; let tmp = self.heap[p as usize]; self.indices[tmp.0] = i as i32; @@ -167,33 +176,34 @@ impl<'a, K: AsIndex + 'a, V: Copy + 'a, Comp: MemoComparator> Heap<'a, K, } fn percolate_down(&mut self, mut i: u32) { - let x = self.heap[i as usize]; + let x = self.comp.to_cmp_form(&self.heap[i as usize]); let len = (self.next_slot + 3) & (usize::MAX - 3); // round up to nearest multiple of 4 // since the heap is padded with maximum values we can pretend that these are part of the // heap but never swap with them let heap = &mut self.data.heap[..len]; loop { - let bundle = |x| (x, heap[x as usize]); - let min = - |x: (u32, (K, V)), y: (u32, (K, V))| if self.comp.le(&x.1, &y.1) { x } else { y }; + let min = |x: (u32, Comp::Comp), y: (u32, Comp::Comp)| if x.1 < y.1 { x } else { y }; let left_index = left_index(i); - if left_index as usize + 3 >= heap.len() { + let Some(arr) = heap.get(left_index as usize..left_index as usize + 4) else { break; - } - let b0 = bundle(left_index); - let b1 = bundle(left_index + 1); - let b2 = bundle(left_index + 2); - let b3 = bundle(left_index + 3); + }; + let bundle = |x| (left_index + x, self.comp.to_cmp_form(&arr[x as usize])); + let b0 = bundle(0); + let b1 = bundle(1); + let b2 = bundle(2); + let b3 = bundle(3); let b01 = min(b0, b1); let b23 = min(b2, b3); let (child, min) = min(b01, b23); - if !self.comp.le(&min, &x) { + if min > x { break; } + let min = self.comp.from_cmp_form(min); heap[i as usize] = min; self.data.indices[min.0] = i as i32; i = child; } + let x = self.comp.from_cmp_form(x); heap[i as usize] = x; self.data.indices[x.0] = i as i32; } From 288548b1169bf5713088c61dc9dc3f8a9a6b08c2 Mon Sep 17 00:00:00 2001 From: dewert99 Date: Tue, 19 Mar 2024 09:47:09 -0700 Subject: [PATCH 11/15] Stored `u64`s directly in order_heap_data --- src/batsat/src/core.rs | 69 +++++++++++------------ src/batsat/src/heap.rs | 124 +++++++++++++---------------------------- 2 files changed, 70 insertions(+), 123 deletions(-) diff --git a/src/batsat/src/core.rs b/src/batsat/src/core.rs index 96f4c03..210b0a5 100644 --- a/src/batsat/src/core.rs +++ b/src/batsat/src/core.rs @@ -25,7 +25,7 @@ use { self, lbool, CRef, ClauseAllocator, ClauseRef, DeletePred, LSet, Lit, OccLists, OccListsData, VMap, Var, }, - crate::heap::{Comparator, Heap, HeapData, MemoComparator}, + crate::heap::{CachedKeyComparator, Heap, HeapData}, crate::interface::SolverInterface, crate::theory::Theory, std::{cmp, fmt, mem}, @@ -67,7 +67,7 @@ struct VarState { /// A heuristic measurement of the activity of a variable. activity: VMap, /// A priority queue of variables ordered with respect to the variable activity. - order_heap_data: HeapData, + order_heap_data: HeapData, /// Current assignment for each variable. ass: VMap, /// Stores reason and level for each variable. @@ -1124,7 +1124,7 @@ impl SolverV { self.vars.value_lit(x) } - fn order_heap(&mut self) -> Heap { + fn order_heap(&mut self) -> Heap { self.vars.order_heap() } @@ -1178,7 +1178,7 @@ impl SolverV { &mut self.opts.random_seed, self.vars.order_heap_data.len() as i32, ) as usize; - next = self.vars.order_heap_data[idx_tmp]; + next = self.vars.order_heap_data[idx_tmp].var(); if self.value(next) == lbool::UNDEF && self.decision[next] { self.rnd_decisions += 1; } @@ -2165,8 +2165,8 @@ impl VarState { for (_, x) in self.activity.iter_mut() { *x *= scale; } - for (_, x) in self.order_heap_data.heap_mut().iter_mut() { - *x *= scale + for x in self.order_heap_data.heap_mut().iter_mut() { + x.scale_activity(scale) } self.var_inc *= scale; } @@ -2189,7 +2189,7 @@ impl VarState { self.trail.push(p); } - fn order_heap(&mut self) -> Heap { + fn order_heap(&mut self) -> Heap { self.order_heap_data.promote(VarOrder { activity: &self.activity, }) @@ -2410,47 +2410,42 @@ impl PartialEq for Watcher { } impl Eq for Watcher {} -impl<'a> VarOrder<'a> { - fn check_activity(&self, var: Var) -> f32 { - if var == Var::UNDEF { - 0.0 - } else { - self.activity[var] - } - } -} - -const COMP_MASK: u64 = (u32::MAX as u64) << u32::BITS; -impl<'a> Comparator<(Var, f32)> for VarOrder<'a> { - type Comp = u64; +#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq)] +struct VarOrderKey(u64); +impl VarOrderKey { #[inline] - fn max_value(&self) -> (Var, f32) { - (Var::UNDEF, 0.0) + fn new(var: Var, activity: f32) -> Self { + VarOrderKey((!(activity.to_bits() as u64) << u32::BITS) | (var.idx() as u64)) } - #[inline] - fn to_cmp_form(&self, v: &(Var, f32)) -> u64 { - debug_assert_eq!(self.check_activity(v.0), v.1); - let x = ((v.1.to_bits() as u64) << u32::BITS) | (v.0.idx() as u64); - x ^ COMP_MASK + fn var(self) -> Var { + Var::unsafe_from_idx(self.0 as u32) } - #[inline] - fn from_cmp_form(&self, c: Self::Comp) -> (Var, f32) { - let c = c ^ COMP_MASK; - let v = Var::unsafe_from_idx((c & (u32::MAX as u64)) as u32); - let a = f32::from_bits((c >> u32::BITS) as u32); - (v, a) + fn activity(self) -> f32 { + f32::from_bits(!((self.0 >> u32::BITS) as u32)) } -} -impl<'a> MemoComparator for VarOrder<'a> { - fn value(&self, k: Var) -> f32 { - self.activity[k] + fn scale_activity(&mut self, scale: f32) { + *self = VarOrderKey::new(self.var(), self.activity() * scale) } } +impl<'a> CachedKeyComparator for VarOrder<'a> { + type Key = VarOrderKey; + + fn cache_key(&self, t: Var) -> Self::Key { + VarOrderKey::new(t, self.activity[t]) + } + fn max_key(&self) -> Self::Key { + VarOrderKey::new(Var::UNDEF, 0.0) + } + + fn un_cache_key(&self, k: Self::Key) -> Var { + k.var() + } +} impl<'a> DeletePred for WatcherDeleted<'a> { #[inline] fn deleted(&self, w: &Watcher) -> bool { diff --git a/src/batsat/src/heap.rs b/src/batsat/src/heap.rs index 4d925d4..ccb07a6 100644 --- a/src/batsat/src/heap.rs +++ b/src/batsat/src/heap.rs @@ -1,11 +1,11 @@ use crate::intmap::{AsIndex, IntMap}; use std::fmt::Debug; -use std::{cmp, mem, ops}; +use std::{mem, ops}; /// Quaternary Heap #[derive(Debug, Clone)] pub struct HeapData { - heap: Box<[(K, V)]>, + heap: Box<[V]>, next_slot: usize, indices: IntMap, } @@ -34,12 +34,12 @@ impl HeapData { self.indices.has(k) && self.indices[k] >= 0 } - pub fn promote>(&mut self, comp: Comp) -> Heap { + pub fn promote>(&mut self, comp: Comp) -> Heap { Heap { data: self, comp } } /// Raw mutable access to all the elements of the heap - pub(crate) fn heap_mut(&mut self) -> &mut [(K, V)] { + pub(crate) fn heap_mut(&mut self) -> &mut [V] { if self.next_slot == 0 { &mut [] } else { @@ -49,87 +49,41 @@ impl HeapData { } impl ops::Index for HeapData { - type Output = K; + type Output = V; fn index(&self, index: usize) -> &Self::Output { - &self.heap[index].0 + &self.heap[index] } } -pub trait Comparator { - type Comp: Ord; +pub trait CachedKeyComparator { + type Key: Ord + Copy; - fn max_value(&self) -> T; - fn to_cmp_form(&self, v: &T) -> Self::Comp; - fn from_cmp_form(&self, c: Self::Comp) -> T; + fn cache_key(&self, t: T) -> Self::Key; - fn cmp(&self, lhs: &T, rhs: &T) -> cmp::Ordering { - self.to_cmp_form(&lhs).cmp(&self.to_cmp_form(&rhs)) - } - fn max(&self, lhs: T, rhs: T) -> T - where - T: Sized, - { - if self.ge(&rhs, &lhs) { - rhs - } else { - lhs - } - } - fn min(&self, lhs: T, rhs: T) -> T - where - T: Sized, - { - if self.le(&lhs, &rhs) { - lhs - } else { - rhs - } - } - fn le(&self, lhs: &T, rhs: &T) -> bool { - match self.cmp(lhs, rhs) { - cmp::Ordering::Less | cmp::Ordering::Equal => true, - _ => false, - } - } - fn lt(&self, lhs: &T, rhs: &T) -> bool { - match self.cmp(lhs, rhs) { - cmp::Ordering::Less => true, - _ => false, - } - } - #[inline] - fn gt(&self, lhs: &T, rhs: &T) -> bool { - self.lt(rhs, lhs) - } - #[inline] - fn ge(&self, lhs: &T, rhs: &T) -> bool { - self.le(rhs, lhs) - } -} + fn max_key(&self) -> Self::Key; -pub trait MemoComparator: Comparator<(K, V)> { - fn value(&self, k: K) -> V; + fn un_cache_key(&self, k: Self::Key) -> T; } #[derive(Debug)] -pub struct Heap<'a, K: AsIndex + 'a, V: 'a, Comp> { - data: &'a mut HeapData, +pub struct Heap<'a, K: AsIndex + 'a, Comp: CachedKeyComparator> { + data: &'a mut HeapData, comp: Comp, } -impl<'a, K: AsIndex + 'a, V: 'a, Comp> ops::Deref for Heap<'a, K, V, Comp> { - type Target = HeapData; +impl<'a, K: AsIndex + 'a, Comp: CachedKeyComparator> ops::Deref for Heap<'a, K, Comp> { + type Target = HeapData; fn deref(&self) -> &Self::Target { &self.data } } -impl<'a, K: AsIndex + 'a, V: 'a, Comp> ops::DerefMut for Heap<'a, K, V, Comp> { +impl<'a, K: AsIndex + 'a, Comp: CachedKeyComparator> ops::DerefMut for Heap<'a, K, Comp> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.data } } -impl<'a, K: AsIndex + 'a, V: Copy + 'a, Comp: MemoComparator> Heap<'a, K, V, Comp> { +impl<'a, K: AsIndex + 'a, Comp: CachedKeyComparator> Heap<'a, K, Comp> { // ensure size is always a multiple of 4 #[cold] #[inline(never)] @@ -138,56 +92,55 @@ impl<'a, K: AsIndex + 'a, V: Copy + 'a, Comp: MemoComparator> Heap<'a, K, if self.next_slot == 0 { self.next_slot = ROOT as usize; // Enough space for the root and 4 children - self.heap = vec![self.comp.max_value(); 8].into_boxed_slice(); + self.heap = vec![self.comp.max_key(); 8].into_boxed_slice(); } else { let new_size = self.next_slot << 2; let mut heap = mem::replace(&mut self.heap, Box::new([])).into_vec(); - heap.resize(new_size, self.comp.max_value()); + heap.resize(new_size, self.comp.max_key()); self.heap = heap.into_boxed_slice(); } } #[inline] - fn heap_push(&mut self, k: K, v: V) -> u32 { + fn heap_push(&mut self, k: Comp::Key) -> u32 { if self.next_slot >= self.heap.len() { self.heap_reserve(); assert!(self.next_slot < self.heap.len()); } let slot = self.next_slot; - self.heap[slot] = (k, v); + self.heap[slot] = k; self.next_slot += 1; slot as u32 } fn percolate_up(&mut self, mut i: u32) { let x = self.heap[i as usize]; - let xc = self.comp.to_cmp_form(&x); let mut p = parent_index(i); - while i != ROOT && xc < self.comp.to_cmp_form(&self.heap[p as usize]) { + while i != ROOT && x < self.heap[p as usize] { self.heap[i as usize] = self.heap[p as usize]; let tmp = self.heap[p as usize]; - self.indices[tmp.0] = i as i32; + self.data.indices[self.comp.un_cache_key(tmp)] = i as i32; i = p; p = parent_index(p); } self.heap[i as usize] = x; - self.indices[x.0] = i as i32; + self.data.indices[self.comp.un_cache_key(x)] = i as i32; } fn percolate_down(&mut self, mut i: u32) { - let x = self.comp.to_cmp_form(&self.heap[i as usize]); + let x = self.heap[i as usize]; let len = (self.next_slot + 3) & (usize::MAX - 3); // round up to nearest multiple of 4 // since the heap is padded with maximum values we can pretend that these are part of the // heap but never swap with them let heap = &mut self.data.heap[..len]; loop { - let min = |x: (u32, Comp::Comp), y: (u32, Comp::Comp)| if x.1 < y.1 { x } else { y }; + let min = |x: (u32, Comp::Key), y: (u32, Comp::Key)| if x.1 < y.1 { x } else { y }; let left_index = left_index(i); let Some(arr) = heap.get(left_index as usize..left_index as usize + 4) else { break; }; - let bundle = |x| (left_index + x, self.comp.to_cmp_form(&arr[x as usize])); + let bundle = |x| (left_index + x, arr[x as usize]); let b0 = bundle(0); let b1 = bundle(1); let b2 = bundle(2); @@ -198,27 +151,25 @@ impl<'a, K: AsIndex + 'a, V: Copy + 'a, Comp: MemoComparator> Heap<'a, K, if min > x { break; } - let min = self.comp.from_cmp_form(min); heap[i as usize] = min; - self.data.indices[min.0] = i as i32; + self.data.indices[self.comp.un_cache_key(min)] = i as i32; i = child; } - let x = self.comp.from_cmp_form(x); heap[i as usize] = x; - self.data.indices[x.0] = i as i32; + self.data.indices[self.comp.un_cache_key(x)] = i as i32; } pub fn decrease(&mut self, k: K) { debug_assert!(self.in_heap(k)); let k_index = self.indices[k]; - self.heap[k_index as usize].1 = self.comp.value(k); + self.heap[k_index as usize] = self.comp.cache_key(k); self.percolate_up(k_index as u32); } pub fn insert(&mut self, k: K) { self.indices.reserve(k, -1); debug_assert!(!self.in_heap(k)); - let k_index = self.heap_push(k, self.comp.value(k)); + let k_index = self.heap_push(self.comp.cache_key(k)); self.indices[k] = k_index as i32; self.percolate_up(k_index); } @@ -228,17 +179,18 @@ impl<'a, K: AsIndex + 'a, V: Copy + 'a, Comp: MemoComparator> Heap<'a, K, let x = self.heap[ROOT as usize]; let last = self.next_slot - 1; self.next_slot = last; - self.indices[x.0] = -1; + let x_var = self.comp.un_cache_key(x); + self.indices[x_var] = -1; if self.is_empty() { - self.heap[last] = self.comp.max_value(); - return x.0; + self.heap[last] = self.comp.max_key(); + return x_var; } let lastval = self.heap[last]; - self.heap[last] = self.comp.max_value(); + self.heap[last] = self.comp.max_key(); self.heap[ROOT as usize] = lastval; - self.indices[lastval.0] = ROOT as i32; + self.data.indices[self.comp.un_cache_key(lastval)] = ROOT as i32; self.percolate_down(ROOT); - x.0 + self.comp.un_cache_key(x) } } From f888533434a794302e5dbd16d2aa00b8a82ebb76 Mon Sep 17 00:00:00 2001 From: dewert99 Date: Tue, 19 Mar 2024 15:52:46 -0700 Subject: [PATCH 12/15] Improved order `Var`s are added to the heap when canceling --- src/batsat/src/core.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/batsat/src/core.rs b/src/batsat/src/core.rs index 210b0a5..038b611 100644 --- a/src/batsat/src/core.rs +++ b/src/batsat/src/core.rs @@ -1916,7 +1916,7 @@ impl SolverV { debug_assert!(self.decision_level() > level); let trail_lim_last = *self.vars.trail_lim.last().expect("trail_lim is empty") as usize; let trail_lim_level = self.vars.trail_lim[level as usize] as usize; - for c in (trail_lim_level..self.vars.trail.len()).rev() { + for c in trail_lim_level..self.vars.trail.len() { let x = self.vars.trail[c].var(); self.vars.ass[x] = lbool::UNDEF; if self.opts.phase_saving > 1 || (self.opts.phase_saving == 1 && c > trail_lim_last) { From d8e4a4c42ffc38dc898fcce41064eb8ee8929f2c Mon Sep 17 00:00:00 2001 From: dewert99 Date: Tue, 26 Mar 2024 10:14:34 -0700 Subject: [PATCH 13/15] Added more control over assumptions and decision variables --- src/batsat/src/core.rs | 18 ++++++++++++++++-- src/batsat/src/interface.rs | 12 ++++++++++++ 2 files changed, 28 insertions(+), 2 deletions(-) diff --git a/src/batsat/src/core.rs b/src/batsat/src/core.rs index 038b611..0171675 100644 --- a/src/batsat/src/core.rs +++ b/src/batsat/src/core.rs @@ -299,9 +299,11 @@ impl SolverInterface for Solver { th: &mut Th, assumps: &[Lit], ) -> lbool { - self.v.assumptions.clear(); + let old_len = self.v.assumptions.len(); self.v.assumptions.extend_from_slice(assumps); - self.solve_internal(th) + let res = self.solve_internal(th); + self.v.assumptions.truncate(old_len); + res } fn pop_model(&mut self, th: &mut Th) { @@ -391,6 +393,18 @@ impl SolverInterface for Solver { fn proved_at_lvl_0(&self) -> &[Lit] { self.v.vars.proved_at_lvl_0() } + + fn set_decision_var(&mut self, v: Var, dvar: bool) { + self.v.set_decision_var(v, dvar) + } + + fn assumptions(&mut self) -> &[Lit] { + &self.v.assumptions + } + + fn assumptions_mut(&mut self) -> &mut Vec { + &mut self.v.assumptions + } } impl Default for Solver { diff --git a/src/batsat/src/interface.rs b/src/batsat/src/interface.rs index babc1ec..e445107 100644 --- a/src/batsat/src/interface.rs +++ b/src/batsat/src/interface.rs @@ -152,6 +152,18 @@ pub trait SolverInterface { /// /// Precondition: last result was `Unsat` fn unsat_core_contains_var(&self, v: Var) -> bool; + + /// Sets if a variable can be used in decisions + /// (NOTE! This has effects on the meaning of a SATISFIABLE result). + fn set_decision_var(&mut self, v: Var, dvar: bool); + + /// Get access to the set of assumptions that will be implicitly added to calls to + /// [`SolverInterface::solve_limited_th`] + fn assumptions(&mut self) -> &[Lit]; + + /// Get mutable access to the set of assumptions that will be implicitly added to calls to + /// [`SolverInterface::solve_limited_th`] + fn assumptions_mut(&mut self) -> &mut Vec; } /// Result of calling [`SolverInterface::solve_limited_th_full`], contains the unsat-core From 41aba49998166186590f72606d0553c8f172a575 Mon Sep 17 00:00:00 2001 From: dewert99 Date: Mon, 1 Apr 2024 11:45:03 -0700 Subject: [PATCH 14/15] Allow theory conflicts to be empty --- src/batsat/src/core.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/batsat/src/core.rs b/src/batsat/src/core.rs index 0171675..3906df7 100644 --- a/src/batsat/src/core.rs +++ b/src/batsat/src/core.rs @@ -716,6 +716,9 @@ impl Solver { TheoryCall::Final => th.final_check(&mut th_arg), } if let TheoryConflict::Clause { costly } = th_arg.conflict { + if th_arg.lits.is_empty() { + return Err(ConflictAtLevel0); + } // borrow magic let mut local_confl_cl = vec![]; mem::swap(&mut local_confl_cl, th_arg.lits); @@ -2313,9 +2316,6 @@ impl<'a> TheoryArg<'a> { /// This is a hint for the SAT solver to keep the theory lemma that corresponds /// to `c` along with the actual learnt clause. pub fn raise_conflict(&mut self, lits: &[Lit], costly: bool) { - if lits.len() == 0 { - panic!("conflicts must have a least one literal") - } if self.is_ok() { self.conflict = TheoryConflict::Clause { costly }; self.lits.clear(); From 517f3ee1c84f620180a22f09bc3fef01ca043c76 Mon Sep 17 00:00:00 2001 From: dewert99 Date: Thu, 4 Apr 2024 11:40:12 -0700 Subject: [PATCH 15/15] Add `Theory::explain_propagation_final` for use in `analyze_final` --- src/batsat/src/core.rs | 2 +- src/batsat/src/theory.rs | 13 ++++++++++++- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/src/batsat/src/core.rs b/src/batsat/src/core.rs index 3906df7..e88f1ea 100644 --- a/src/batsat/src/core.rs +++ b/src/batsat/src/core.rs @@ -1615,7 +1615,7 @@ impl SolverV { out_conflict.insert(!lit); } else if reason == CRef::SPECIAL { // resolution with propagation reason - let lits = th.explain_propagation(lit); + let lits = th.explain_propagation_final(lit); for &p in lits { if self.vars.level(p.var()) > 0 { self.seen[p.var()] = Seen::SOURCE; diff --git a/src/batsat/src/theory.rs b/src/batsat/src/theory.rs index a8ad889..7490eb6 100644 --- a/src/batsat/src/theory.rs +++ b/src/batsat/src/theory.rs @@ -40,7 +40,18 @@ pub trait Theory { /// /// `p` is the literal that has been propagated by the theory in a prefix /// of the current trail. - fn explain_propagation(&mut self, _p: Lit) -> &[Lit]; + fn explain_propagation(&mut self, p: Lit) -> &[Lit]; + + /// Similar to `explain_propagation` but theories should prefer larger older explanations + /// For example, if a theory knows `(a && b) => c` and `c => d` and is asked to explain `d`, + /// `explain_propagation` may prefer to explain using `[c]` to generate a better clause, but + /// `explain_propagation_final` may as well explain using `[a, b]` since otherwise it would just + /// be asked to explain `c` anyway. + /// + /// The default implementation just calls `explain_propagation` + fn explain_propagation_final(&mut self, p: Lit) -> &[Lit] { + self.explain_propagation(p) + } } /// Trivial theory that does nothing