diff --git a/docs/paper/reductions.typ b/docs/paper/reductions.typ index 08bf49aa..10b5cce8 100644 --- a/docs/paper/reductions.typ +++ b/docs/paper/reductions.typ @@ -183,6 +183,7 @@ "RectilinearPictureCompression": [Rectilinear Picture Compression], "ResourceConstrainedScheduling": [Resource Constrained Scheduling], "RootedTreeStorageAssignment": [Rooted Tree Storage Assignment], + "SchedulingToMinimizeWeightedCompletionTime": [Scheduling to Minimize Weighted Completion Time], "SchedulingWithIndividualDeadlines": [Scheduling With Individual Deadlines], "SequencingToMinimizeMaximumCumulativeCost": [Sequencing to Minimize Maximum Cumulative Cost], "SequencingToMinimizeWeightedCompletionTime": [Sequencing to Minimize Weighted Completion Time], @@ -5743,6 +5744,104 @@ A classical NP-complete problem from Garey and Johnson @garey1979[Ch.~3, p.~76], ] ] } +#{ + let x = load-model-example("SchedulingToMinimizeWeightedCompletionTime") + let ntasks = x.instance.lengths.len() + let m = x.instance.num_processors + let lengths = x.instance.lengths + let weights = x.instance.weights + let sigma = x.optimal_config + // Group tasks by processor + let tasks-by-proc = range(m).map(p => + range(ntasks).filter(i => sigma.at(i) == p) + ) + [ + #problem-def("SchedulingToMinimizeWeightedCompletionTime")[ + Given a finite set $T$ of tasks with processing lengths $ell: T -> ZZ^+$ and weights $w: T -> ZZ^+$, and a number $m in ZZ^+$ of identical processors, find an assignment $p: T -> {1, dots, m}$ that minimizes the total weighted completion time $sum_(t in T) w(t) dot C(t)$, where on each processor tasks are ordered by Smith's rule (non-decreasing $ell(t) "/" w(t)$ ratio) and $C(t)$ is the completion time of task $t$ (i.e., the cumulative processing time up to and including $t$ on its assigned processor). + ][ + Scheduling to Minimize Weighted Completion Time is problem A5 SS13 in Garey & Johnson @garey1979. NP-complete for $m = 2$ by reduction from Partition @lenstra1977, and NP-complete in the strong sense for arbitrary $m$. For a fixed assignment of tasks to processors, Smith's rule gives the optimal ordering on each processor, reducing the search space to $m^n$ processor assignments @smith1956. The problem is solvable in polynomial time when all lengths are equal or when all weights are equal @conway1967 @horn1973. + + *Example.* Let $T = {t_1, dots, t_#ntasks}$ with lengths $(#lengths.map(str).join(", "))$, weights $(#weights.map(str).join(", "))$, and $m = #m$ processors. The optimal assignment $(#sigma.map(v => str(v + 1)).join(", "))$ achieves total weighted completion time #x.optimal_value: + #for p in range(m) [ + - Processor #(p + 1): ${#tasks-by-proc.at(p).map(i => $t_#(i + 1)$).join(", ")}$#if tasks-by-proc.at(p).len() > 0 { + let proc-tasks = tasks-by-proc.at(p) + let elapsed = 0 + let contributions = () + for t in proc-tasks { + elapsed = elapsed + lengths.at(t) + contributions.push($#elapsed times #(weights.at(t)) = #(elapsed * weights.at(t))$) + } + [ -- contributions: #contributions.join(", ")] + } + ] + + #pred-commands( + "pred create --example " + problem-spec(x) + " -o scheduling-wct.json", + "pred solve scheduling-wct.json --solver brute-force", + "pred evaluate scheduling-wct.json --config " + x.optimal_config.map(str).join(","), + ) + + #figure({ + canvas(length: 1cm, { + import draw: * + let scale = 0.2 + let width = 1.2 + let gap = 0.8 + let colors = ( + rgb("#4e79a7"), + rgb("#e15759"), + rgb("#76b7b2"), + rgb("#f28e2b"), + rgb("#59a14f"), + ) + + for p in range(m) { + let x0 = p * (width + gap) + let max-time = tasks-by-proc.at(p).fold(0, (acc, t) => acc + lengths.at(t)) + rect((x0, 0), (x0 + width, max-time * scale), stroke: 0.8pt + black) + let y = 0 + for task in tasks-by-proc.at(p) { + let len = lengths.at(task) + let col = colors.at(task) + rect( + (x0, y), + (x0 + width, y + len * scale), + fill: col.transparentize(25%), + stroke: 0.4pt + col, + ) + content( + (x0 + width / 2, y + len * scale / 2), + text(7pt, fill: white)[$t_#(task + 1)$], + ) + y += len * scale + } + content((x0 + width / 2, -0.3), text(8pt)[$P_#(p + 1)$]) + } + }) + }, + caption: [Canonical Scheduling to Minimize Weighted Completion Time instance with #ntasks tasks on #m processors. Tasks are ordered on each processor by Smith's rule.], + ) + ] + ] +} + +// Reduction: SchedulingToMinimizeWeightedCompletionTime -> ILP +#reduction-rule("SchedulingToMinimizeWeightedCompletionTime", "ILP", + example: false, +)[ + This $O(n^2 m)$ reduction constructs an ILP with binary assignment variables $x_(t,p)$, integer completion-time variables $C_t$, and binary ordering variables $y_(i,j)$ for task pairs. Big-M disjunctive constraints enforce non-overlapping execution on shared processors. +][ + _Construction._ Let $n = |T|$ and $m$ be the number of processors. Create $n m$ binary assignment variables $x_(t,p) in {0, 1}$ (task $t$ on processor $p$), $n$ integer completion-time variables $C_t$, and $n(n-1)/2$ binary ordering variables $y_(i,j)$ for $i < j$. The constraints are: + (1) Assignment: $sum_p x_(t,p) = 1$ for each $t$. + (2) Completion bounds: $C_t >= ell(t)$ for each $t$. + (3) Disjunctive: for each pair $(i,j)$ with $i < j$ and each processor $p$, big-M constraints ensure that if both tasks are on processor $p$, one must complete before the other starts. + The objective minimizes $sum_t w(t) dot C_t$. + + _Correctness._ ($arrow.r.double$) Any valid schedule gives a feasible ILP solution with the same objective. ($arrow.l.double$) Any ILP solution encodes a valid assignment and non-overlapping schedule. + + _Solution extraction._ For each task $t$, find the processor $p$ with $x_(t,p) = 1$. +] + #{ let x = load-model-example("SequencingWithinIntervals") let ntasks = x.instance.lengths.len() diff --git a/docs/paper/references.bib b/docs/paper/references.bib index ac226799..4e2ef3e5 100644 --- a/docs/paper/references.bib +++ b/docs/paper/references.bib @@ -1447,6 +1447,33 @@ @article{edmondsjohnson1973 year = {1973} } +@article{lenstra1977, + author = {J. K. Lenstra and A. H. G. Rinnooy Kan and P. Brucker}, + title = {Complexity of Machine Scheduling Problems}, + journal = {Annals of Discrete Mathematics}, + volume = {1}, + pages = {343--362}, + year = {1977}, + doi = {10.1016/S0167-5060(08)70743-X} +} + +@book{conway1967, + author = {Richard W. Conway and William L. Maxwell and Louis W. Miller}, + title = {Theory of Scheduling}, + publisher = {Addison-Wesley}, + year = {1967} +} + +@article{horn1973, + author = {W. A. Horn}, + title = {Minimizing Average Flow Time with Parallel Machines}, + journal = {Operations Research}, + volume = {21}, + number = {3}, + pages = {846--847}, + year = {1973} +} + @techreport{plaisted1976, author = {David A. Plaisted}, title = {Some Polynomial and Integer Divisibility Problems Are {NP}-Hard}, diff --git a/problemreductions-cli/src/cli.rs b/problemreductions-cli/src/cli.rs index 5d95281d..a80e073f 100644 --- a/problemreductions-cli/src/cli.rs +++ b/problemreductions-cli/src/cli.rs @@ -280,6 +280,7 @@ Flags by problem type: AcyclicPartition --arcs [--weights] [--arc-costs] --weight-bound --cost-bound [--num-vertices] CVP --basis, --target-vec [--bounds] MultiprocessorScheduling --lengths, --num-processors, --deadline + SchedulingToMinimizeWeightedCompletionTime --lengths, --weights, --num-processors SequencingWithinIntervals --release-times, --deadlines, --lengths OptimalLinearArrangement --graph RootedTreeArrangement --graph, --bound @@ -345,6 +346,7 @@ Examples: pred create MIS/UnitDiskGraph --positions \"0,0;1,0;0.5,0.8\" --radius 1.5 pred create MIS --random --num-vertices 10 --edge-prob 0.3 pred create MultiprocessorScheduling --lengths 4,5,3,2,6 --num-processors 2 --deadline 10 + pred create SchedulingToMinimizeWeightedCompletionTime --lengths 1,2,3,4,5 --weights 6,4,3,2,1 --num-processors 2 pred create UndirectedFlowLowerBounds --graph 0-1,0-2,1-3,2-3,1-4,3-5,4-5 --capacities 2,2,2,2,1,3,2 --lower-bounds 1,1,0,0,1,0,1 --source 0 --sink 5 --requirement 3 pred create ConsistencyOfDatabaseFrequencyTables --num-objects 6 --attribute-domains \"2,3,2\" --frequency-tables \"0,1:1,1,1|1,1,1;1,2:1,1|0,2|1,1\" --known-values \"0,0,0;3,0,1;1,2,1\" pred create BiconnectivityAugmentation --graph 0-1,1-2,2-3 --potential-edges 0-2:3,0-3:4,1-3:2 --budget 5 @@ -668,7 +670,7 @@ pub struct CreateArgs { /// Deadline for FlowShopScheduling, MultiprocessorScheduling, or ResourceConstrainedScheduling #[arg(long)] pub deadline: Option, - /// Number of processors/machines for FlowShopScheduling, JobShopScheduling, MultiprocessorScheduling, ResourceConstrainedScheduling, or SchedulingWithIndividualDeadlines + /// Number of processors/machines for FlowShopScheduling, JobShopScheduling, MultiprocessorScheduling, ResourceConstrainedScheduling, SchedulingToMinimizeWeightedCompletionTime, or SchedulingWithIndividualDeadlines #[arg(long)] pub num_processors: Option, /// Binary schedule patterns for StaffScheduling (semicolon-separated rows, e.g., "1,1,0;0,1,1") @@ -919,7 +921,7 @@ mod tests { )); assert!( help.contains( - "Number of processors/machines for FlowShopScheduling, JobShopScheduling, MultiprocessorScheduling, ResourceConstrainedScheduling, or SchedulingWithIndividualDeadlines" + "Number of processors/machines for FlowShopScheduling, JobShopScheduling, MultiprocessorScheduling, ResourceConstrainedScheduling, SchedulingToMinimizeWeightedCompletionTime, or SchedulingWithIndividualDeadlines" ), "create help should describe --num-processors for both scheduling models" ); diff --git a/problemreductions-cli/src/commands/create.rs b/problemreductions-cli/src/commands/create.rs index 28d8a138..b24c3347 100644 --- a/problemreductions-cli/src/commands/create.rs +++ b/problemreductions-cli/src/commands/create.rs @@ -27,10 +27,11 @@ use problemreductions::models::misc::{ JobShopScheduling, KnownValue, KthLargestMTuple, LongestCommonSubsequence, MinimumTardinessSequencing, MultiprocessorScheduling, PaintShop, PartiallyOrderedKnapsack, ProductionPlanning, QueryArg, RectilinearPictureCompression, ResourceConstrainedScheduling, - SchedulingWithIndividualDeadlines, SequencingToMinimizeMaximumCumulativeCost, - SequencingToMinimizeWeightedCompletionTime, SequencingToMinimizeWeightedTardiness, - SequencingWithReleaseTimesAndDeadlines, SequencingWithinIntervals, ShortestCommonSupersequence, - StringToStringCorrection, SubsetSum, SumOfSquaresPartition, ThreePartition, TimetableDesign, + SchedulingToMinimizeWeightedCompletionTime, SchedulingWithIndividualDeadlines, + SequencingToMinimizeMaximumCumulativeCost, SequencingToMinimizeWeightedCompletionTime, + SequencingToMinimizeWeightedTardiness, SequencingWithReleaseTimesAndDeadlines, + SequencingWithinIntervals, ShortestCommonSupersequence, StringToStringCorrection, SubsetSum, + SumOfSquaresPartition, ThreePartition, TimetableDesign, }; use problemreductions::models::BiconnectivityAugmentation; use problemreductions::prelude::*; @@ -677,6 +678,9 @@ fn example_for(canonical: &str, graph_type: Option<&str>) -> &'static str { "--num-periods 6 --demands 5,3,7,2,8,5 --capacities 12,12,12,12,12,12 --setup-costs 10,10,10,10,10,10 --production-costs 1,1,1,1,1,1 --inventory-costs 1,1,1,1,1,1 --cost-bound 80" } "MultiprocessorScheduling" => "--lengths 4,5,3,2,6 --num-processors 2 --deadline 10", + "SchedulingToMinimizeWeightedCompletionTime" => { + "--lengths 1,2,3,4,5 --weights 6,4,3,2,1 --num-processors 2" + } "JobShopScheduling" => { "--job-tasks \"0:3,1:4;1:2,0:3,1:2;0:4,1:3;1:5,0:2;0:2,1:3,0:1\" --num-processors 2" } @@ -3283,6 +3287,37 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { ) } + // SchedulingToMinimizeWeightedCompletionTime + "SchedulingToMinimizeWeightedCompletionTime" => { + let usage = "Usage: pred create SchedulingToMinimizeWeightedCompletionTime --lengths 1,2,3,4,5 --weights 6,4,3,2,1 --num-processors 2"; + let lengths_str = args.lengths.as_deref().ok_or_else(|| { + anyhow::anyhow!( + "SchedulingToMinimizeWeightedCompletionTime requires --lengths, --weights, and --num-processors\n\n{usage}" + ) + })?; + let weights_str = args.weights.as_deref().ok_or_else(|| { + anyhow::anyhow!( + "SchedulingToMinimizeWeightedCompletionTime requires --weights\n\n{usage}" + ) + })?; + let num_processors = args.num_processors.ok_or_else(|| { + anyhow::anyhow!("SchedulingToMinimizeWeightedCompletionTime requires --num-processors\n\n{usage}") + })?; + if num_processors == 0 { + bail!("SchedulingToMinimizeWeightedCompletionTime requires --num-processors > 0\n\n{usage}"); + } + let lengths: Vec = util::parse_comma_list(lengths_str)?; + let weights: Vec = util::parse_comma_list(weights_str)?; + ( + ser(SchedulingToMinimizeWeightedCompletionTime::new( + lengths, + weights, + num_processors, + ))?, + resolved_variant.clone(), + ) + } + "CapacityAssignment" => { let usage = "Usage: pred create CapacityAssignment --capacities 1,2,3 --cost-matrix \"1,3,6;2,4,7;1,2,5\" --delay-matrix \"8,4,1;7,3,1;6,3,1\" --delay-budget 12"; let capacities_str = args.capacities.as_deref().ok_or_else(|| { diff --git a/src/models/misc/mod.rs b/src/models/misc/mod.rs index e635c7db..bfa891ea 100644 --- a/src/models/misc/mod.rs +++ b/src/models/misc/mod.rs @@ -88,6 +88,7 @@ mod precedence_constrained_scheduling; mod production_planning; mod rectilinear_picture_compression; pub(crate) mod resource_constrained_scheduling; +mod scheduling_to_minimize_weighted_completion_time; mod scheduling_with_individual_deadlines; mod sequencing_to_minimize_maximum_cumulative_cost; mod sequencing_to_minimize_weighted_completion_time; @@ -131,6 +132,7 @@ pub use precedence_constrained_scheduling::PrecedenceConstrainedScheduling; pub use production_planning::ProductionPlanning; pub use rectilinear_picture_compression::RectilinearPictureCompression; pub use resource_constrained_scheduling::ResourceConstrainedScheduling; +pub use scheduling_to_minimize_weighted_completion_time::SchedulingToMinimizeWeightedCompletionTime; pub use scheduling_with_individual_deadlines::SchedulingWithIndividualDeadlines; pub use sequencing_to_minimize_maximum_cumulative_cost::SequencingToMinimizeMaximumCumulativeCost; pub use sequencing_to_minimize_weighted_completion_time::SequencingToMinimizeWeightedCompletionTime; @@ -164,6 +166,7 @@ pub(crate) fn canonical_model_example_specs() -> Vec", description: "Processing time l(t) for each task" }, + FieldInfo { name: "weights", type_name: "Vec", description: "Weight w(t) for each task" }, + FieldInfo { name: "num_processors", type_name: "usize", description: "Number of identical processors m" }, + ], + } +} + +/// Scheduling to Minimize Weighted Completion Time problem. +/// +/// Given a set T of tasks with processing times `l(t)` and weights `w(t)`, +/// and a number `m` of identical processors, find an assignment of tasks to +/// processors that minimizes the total weighted completion time +/// `sum_t w(t) * C(t)`, where `C(t) = start_time(t) + l(t)`. +/// +/// Within each processor, tasks are ordered by Smith's rule: non-decreasing +/// `l(t)/w(t)` ratio. The only free variables are the processor assignments. +/// +/// # Representation +/// +/// Each task has a variable in `{0, ..., m-1}` representing its processor +/// assignment, giving `dims() = [m; n]`. +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::misc::SchedulingToMinimizeWeightedCompletionTime; +/// use problemreductions::{Problem, Solver, BruteForce}; +/// use problemreductions::types::Min; +/// +/// // 5 tasks, 2 processors +/// let problem = SchedulingToMinimizeWeightedCompletionTime::new( +/// vec![1, 2, 3, 4, 5], vec![6, 4, 3, 2, 1], 2, +/// ); +/// let solver = BruteForce::new(); +/// let witness = solver.find_witness(&problem).unwrap(); +/// assert_eq!(problem.evaluate(&witness), Min(Some(47))); +/// ``` +#[derive(Debug, Clone, Serialize)] +pub struct SchedulingToMinimizeWeightedCompletionTime { + lengths: Vec, + weights: Vec, + #[serde(serialize_with = "serialize_num_processors")] + num_processors: usize, +} + +fn serialize_num_processors(v: &usize, s: S) -> Result { + s.serialize_u64(*v as u64) +} + +#[derive(Deserialize)] +struct SchedulingToMinimizeWeightedCompletionTimeSerde { + lengths: Vec, + weights: Vec, + num_processors: usize, +} + +impl SchedulingToMinimizeWeightedCompletionTime { + fn validate(lengths: &[u64], weights: &[u64], num_processors: usize) -> Result<(), String> { + if lengths.len() != weights.len() { + return Err("lengths and weights must have the same length".to_string()); + } + if num_processors == 0 { + return Err("num_processors must be positive".to_string()); + } + if lengths.contains(&0) { + return Err("task lengths must be positive".to_string()); + } + if weights.contains(&0) { + return Err("task weights must be positive".to_string()); + } + Ok(()) + } + + /// Create a new scheduling instance. + /// + /// # Panics + /// + /// Panics if `lengths.len() != weights.len()`, if `num_processors` is zero, + /// or if any length or weight is zero. + pub fn new(lengths: Vec, weights: Vec, num_processors: usize) -> Self { + Self::validate(&lengths, &weights, num_processors).unwrap_or_else(|err| panic!("{err}")); + Self { + lengths, + weights, + num_processors, + } + } + + /// Returns the number of tasks. + pub fn num_tasks(&self) -> usize { + self.lengths.len() + } + + /// Returns the number of processors. + pub fn num_processors(&self) -> usize { + self.num_processors + } + + /// Returns the processing times. + pub fn lengths(&self) -> &[u64] { + &self.lengths + } + + /// Returns the task weights. + pub fn weights(&self) -> &[u64] { + &self.weights + } + + /// Compute the total weighted completion time for a given processor + /// assignment. Tasks on each processor are ordered by Smith's rule + /// (non-decreasing l(t)/w(t) ratio). + fn compute_weighted_completion_time(&self, config: &[usize]) -> Min { + let n = self.num_tasks(); + let m = self.num_processors; + + if config.len() != n { + return Min(None); + } + if config.iter().any(|&p| p >= m) { + return Min(None); + } + + // Group task indices by processor + let mut processor_tasks: Vec> = vec![vec![]; m]; + for (task, &processor) in config.iter().enumerate() { + processor_tasks[processor].push(task); + } + + let mut total_weighted_completion = 0u64; + + for tasks in &mut processor_tasks { + // Smith's rule: sort by non-decreasing l(t)/w(t) + // Equivalent to: l(i)*w(j) <= l(j)*w(i) (avoids floating point) + tasks.sort_by(|&a, &b| { + let lhs = self.lengths[a] as u128 * self.weights[b] as u128; + let rhs = self.lengths[b] as u128 * self.weights[a] as u128; + lhs.cmp(&rhs).then(a.cmp(&b)) + }); + + let mut elapsed = 0u64; + for &task in tasks.iter() { + elapsed = elapsed + .checked_add(self.lengths[task]) + .expect("processing time overflowed u64"); + let contribution = elapsed + .checked_mul(self.weights[task]) + .expect("weighted completion time overflowed u64"); + total_weighted_completion = total_weighted_completion + .checked_add(contribution) + .expect("total weighted completion time overflowed u64"); + } + } + + Min(Some(total_weighted_completion)) + } +} + +impl TryFrom + for SchedulingToMinimizeWeightedCompletionTime +{ + type Error = String; + + fn try_from( + value: SchedulingToMinimizeWeightedCompletionTimeSerde, + ) -> Result { + Self::validate(&value.lengths, &value.weights, value.num_processors)?; + Ok(Self { + lengths: value.lengths, + weights: value.weights, + num_processors: value.num_processors, + }) + } +} + +impl<'de> Deserialize<'de> for SchedulingToMinimizeWeightedCompletionTime { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + let value = SchedulingToMinimizeWeightedCompletionTimeSerde::deserialize(deserializer)?; + Self::try_from(value).map_err(serde::de::Error::custom) + } +} + +impl Problem for SchedulingToMinimizeWeightedCompletionTime { + const NAME: &'static str = "SchedulingToMinimizeWeightedCompletionTime"; + type Value = Min; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![] + } + + fn dims(&self) -> Vec { + vec![self.num_processors; self.num_tasks()] + } + + fn evaluate(&self, config: &[usize]) -> Min { + self.compute_weighted_completion_time(config) + } +} + +crate::declare_variants! { + default SchedulingToMinimizeWeightedCompletionTime => "num_processors^num_tasks", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + vec![crate::example_db::specs::ModelExampleSpec { + id: "scheduling_to_minimize_weighted_completion_time", + instance: Box::new(SchedulingToMinimizeWeightedCompletionTime::new( + vec![1, 2, 3, 4, 5], + vec![6, 4, 3, 2, 1], + 2, + )), + // P0={t0,t2,t4}, P1={t1,t3} => config [0, 1, 0, 1, 0] + optimal_config: vec![0, 1, 0, 1, 0], + optimal_value: serde_json::json!(47), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/misc/scheduling_to_minimize_weighted_completion_time.rs"] +mod tests; diff --git a/src/models/mod.rs b/src/models/mod.rs index 2139cf2a..bdc24367 100644 --- a/src/models/mod.rs +++ b/src/models/mod.rs @@ -43,7 +43,8 @@ pub use misc::{ JobShopScheduling, Knapsack, KthLargestMTuple, LongestCommonSubsequence, MinimumTardinessSequencing, MultiprocessorScheduling, PaintShop, Partition, PrecedenceConstrainedScheduling, ProductionPlanning, QueryArg, RectilinearPictureCompression, - ResourceConstrainedScheduling, SchedulingWithIndividualDeadlines, + ResourceConstrainedScheduling, SchedulingToMinimizeWeightedCompletionTime, + SchedulingWithIndividualDeadlines, SequencingToMinimizeMaximumCumulativeCost, SequencingToMinimizeWeightedCompletionTime, SequencingToMinimizeWeightedTardiness, SequencingWithReleaseTimesAndDeadlines, SequencingWithinIntervals, ShortestCommonSupersequence, StackerCrane, StaffScheduling, diff --git a/src/rules/mod.rs b/src/rules/mod.rs index f1e51cf8..b7afb06e 100644 --- a/src/rules/mod.rs +++ b/src/rules/mod.rs @@ -204,6 +204,8 @@ pub(crate) mod rootedtreestorageassignment_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod ruralpostman_ilp; #[cfg(feature = "ilp-solver")] +pub(crate) mod schedulingtominimizeweightedcompletiontime_ilp; +#[cfg(feature = "ilp-solver")] pub(crate) mod schedulingwithindividualdeadlines_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod sequencingtominimizemaximumcumulativecost_ilp; @@ -369,6 +371,8 @@ pub(crate) fn canonical_rule_example_specs() -> Vec, + num_tasks: usize, + num_processors: usize, +} + +impl ReductionSMWCTToILP { + fn x_var(&self, task: usize, processor: usize) -> usize { + task * self.num_processors + processor + } + + fn c_var(&self, task: usize) -> usize { + self.num_tasks * self.num_processors + task + } + + fn y_var(&self, i: usize, j: usize) -> usize { + debug_assert!(i < j); + let base = self.num_tasks * self.num_processors + self.num_tasks; + base + i * (2 * self.num_tasks - i - 1) / 2 + (j - i - 1) + } +} + +impl ReductionResult for ReductionSMWCTToILP { + type Source = SchedulingToMinimizeWeightedCompletionTime; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + /// Extract solution: for each task, find the processor with x_{t,p} = 1. + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + (0..self.num_tasks) + .map(|t| { + (0..self.num_processors) + .find(|&p| target_solution[self.x_var(t, p)] == 1) + .unwrap_or(0) + }) + .collect() + } +} + +#[reduction( + overhead = { + num_vars = "num_tasks * num_processors + num_tasks + num_tasks * (num_tasks - 1) / 2", + num_constraints = "num_tasks + num_tasks * num_processors + 2 * num_tasks + 2 * num_tasks * (num_tasks - 1) / 2 * num_processors + num_tasks * (num_tasks - 1) / 2", + } +)] +impl ReduceTo> for SchedulingToMinimizeWeightedCompletionTime { + type Result = ReductionSMWCTToILP; + + fn reduce_to(&self) -> Self::Result { + let n = self.num_tasks(); + let m = self.num_processors(); + + let total_processing_time: u64 = self.lengths().iter().sum(); + let big_m = total_processing_time as f64; + + let num_pairs = n * n.saturating_sub(1) / 2; + let num_vars = n * m + n + num_pairs; + + let result = ReductionSMWCTToILP { + target: ILP::new(0, vec![], vec![], ObjectiveSense::Minimize), + num_tasks: n, + num_processors: m, + }; + + let mut constraints = Vec::new(); + + // 1. Assignment constraints: each task assigned to exactly one processor + // sum_p x_{t,p} = 1 for each t + for t in 0..n { + let terms: Vec<(usize, f64)> = (0..m).map(|p| (result.x_var(t, p), 1.0)).collect(); + constraints.push(LinearConstraint::eq(terms, 1.0)); + } + + // 2. Binary bounds on x_{t,p}: 0 <= x_{t,p} <= 1 + for t in 0..n { + for p in 0..m { + constraints.push(LinearConstraint::le(vec![(result.x_var(t, p), 1.0)], 1.0)); + } + } + + // 3. Completion time bounds: l_t <= C_t <= M + for t in 0..n { + constraints.push(LinearConstraint::ge( + vec![(result.c_var(t), 1.0)], + self.lengths()[t] as f64, + )); + constraints.push(LinearConstraint::le(vec![(result.c_var(t), 1.0)], big_m)); + } + + // 4. Disjunctive constraints: for each pair (i,j) with i < j, on each processor p: + // If both tasks are on processor p and y_{i,j}=1 (i before j): + // C_j >= C_i + l_j - M*(2 - x_{i,p} - x_{j,p}) - M*(1 - y_{i,j}) + // If both on p and y_{i,j}=0 (j before i): + // C_i >= C_j + l_i - M*(2 - x_{i,p} - x_{j,p}) - M*y_{i,j} + // + // Rearranged: + // C_j - C_i + M*x_{i,p} + M*x_{j,p} + M*y_{i,j} >= l_j - 3M + M + // => C_j - C_i + M*x_{i,p} + M*x_{j,p} + M*y_{i,j} >= l_j - 2M + // C_i - C_j + M*x_{i,p} + M*x_{j,p} - M*y_{i,j} >= l_i - 2M - M + M + // => C_i - C_j + M*x_{i,p} + M*x_{j,p} - M*y_{i,j} >= l_i - 3M + + for i in 0..n { + for j in (i + 1)..n { + let y = result.y_var(i, j); + let ci = result.c_var(i); + let cj = result.c_var(j); + let li = self.lengths()[i] as f64; + let lj = self.lengths()[j] as f64; + + for p in 0..m { + let xip = result.x_var(i, p); + let xjp = result.x_var(j, p); + + // If i before j on processor p: C_j >= C_i + l_j + // C_j - C_i + M*(1-y) + M*(1-x_{i,p}) + M*(1-x_{j,p}) >= l_j + // C_j - C_i - M*y - M*x_{i,p} - M*x_{j,p} >= l_j - 3M + constraints.push(LinearConstraint::ge( + vec![ + (cj, 1.0), + (ci, -1.0), + (y, -big_m), + (xip, -big_m), + (xjp, -big_m), + ], + lj - 3.0 * big_m, + )); + + // If j before i on processor p: C_i >= C_j + l_i + // C_i - C_j + M*y + M*(1-x_{i,p}) + M*(1-x_{j,p}) >= l_i + // C_i - C_j + M*y - M*x_{i,p} - M*x_{j,p} >= l_i - 2M + constraints.push(LinearConstraint::ge( + vec![ + (ci, 1.0), + (cj, -1.0), + (y, big_m), + (xip, -big_m), + (xjp, -big_m), + ], + li - 2.0 * big_m, + )); + } + + // Binary bound on y_{i,j}: 0 <= y <= 1 + constraints.push(LinearConstraint::le(vec![(y, 1.0)], 1.0)); + } + } + + // Objective: minimize sum_t w_t * C_t + let objective: Vec<(usize, f64)> = (0..n) + .map(|t| (result.c_var(t), self.weights()[t] as f64)) + .collect(); + + ReductionSMWCTToILP { + target: ILP::new(num_vars, constraints, objective, ObjectiveSense::Minimize), + num_tasks: n, + num_processors: m, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + vec![crate::example_db::specs::RuleExampleSpec { + id: "schedulingtominimizeweightedcompletiontime_to_ilp", + build: || { + // 3 tasks, 2 processors: simple instance for canonical example + let source = + SchedulingToMinimizeWeightedCompletionTime::new(vec![1, 2, 3], vec![4, 2, 1], 2); + crate::example_db::specs::rule_example_via_ilp::<_, i32>(source) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/schedulingtominimizeweightedcompletiontime_ilp.rs"] +mod tests; diff --git a/src/unit_tests/models/misc/scheduling_to_minimize_weighted_completion_time.rs b/src/unit_tests/models/misc/scheduling_to_minimize_weighted_completion_time.rs new file mode 100644 index 00000000..d137878c --- /dev/null +++ b/src/unit_tests/models/misc/scheduling_to_minimize_weighted_completion_time.rs @@ -0,0 +1,194 @@ +use super::*; +use crate::solvers::BruteForce; +use crate::traits::Problem; +use crate::types::Min; + +#[test] +fn test_scheduling_min_wct_creation() { + let problem = SchedulingToMinimizeWeightedCompletionTime::new( + vec![1, 2, 3, 4, 5], + vec![6, 4, 3, 2, 1], + 2, + ); + assert_eq!(problem.num_tasks(), 5); + assert_eq!(problem.num_processors(), 2); + assert_eq!(problem.lengths(), &[1, 2, 3, 4, 5]); + assert_eq!(problem.weights(), &[6, 4, 3, 2, 1]); + assert_eq!(problem.dims(), vec![2; 5]); + assert_eq!( + ::NAME, + "SchedulingToMinimizeWeightedCompletionTime" + ); + assert_eq!( + ::variant(), + vec![] + ); +} + +#[test] +fn test_scheduling_min_wct_evaluate_issue_example() { + // Issue example: 5 tasks, 2 processors + // Optimal: P0={t0,t2,t4}, P1={t1,t3} => cost = 47 + let problem = SchedulingToMinimizeWeightedCompletionTime::new( + vec![1, 2, 3, 4, 5], + vec![6, 4, 3, 2, 1], + 2, + ); + // config: [0, 1, 0, 1, 0] means t0->P0, t1->P1, t2->P0, t3->P1, t4->P0 + assert_eq!(problem.evaluate(&[0, 1, 0, 1, 0]), Min(Some(47))); +} + +#[test] +fn test_scheduling_min_wct_evaluate_all_one_processor() { + // All tasks on one processor + let problem = SchedulingToMinimizeWeightedCompletionTime::new( + vec![1, 2, 3, 4, 5], + vec![6, 4, 3, 2, 1], + 2, + ); + // All on processor 0: Smith's rule order t0,t1,t2,t3,t4 + // C(t0)=1, C(t1)=3, C(t2)=6, C(t3)=10, C(t4)=15 + // WCT = 1*6 + 3*4 + 6*3 + 10*2 + 15*1 = 6+12+18+20+15 = 71 + assert_eq!(problem.evaluate(&[0, 0, 0, 0, 0]), Min(Some(71))); +} + +#[test] +fn test_scheduling_min_wct_evaluate_invalid_config() { + let problem = SchedulingToMinimizeWeightedCompletionTime::new(vec![1, 2], vec![3, 4], 2); + // Wrong length + assert_eq!(problem.evaluate(&[0]), Min(None)); + assert_eq!(problem.evaluate(&[0, 1, 0]), Min(None)); + // Out-of-range processor + assert_eq!(problem.evaluate(&[0, 2]), Min(None)); +} + +#[test] +fn test_scheduling_min_wct_solver() { + let problem = SchedulingToMinimizeWeightedCompletionTime::new( + vec![1, 2, 3, 4, 5], + vec![6, 4, 3, 2, 1], + 2, + ); + let solver = BruteForce::new(); + let witness = solver.find_witness(&problem).unwrap(); + assert_eq!(problem.evaluate(&witness), Min(Some(47))); +} + +#[test] +fn test_scheduling_min_wct_find_all_witnesses() { + let problem = SchedulingToMinimizeWeightedCompletionTime::new( + vec![1, 2, 3, 4, 5], + vec![6, 4, 3, 2, 1], + 2, + ); + let solver = BruteForce::new(); + let witnesses = solver.find_all_witnesses(&problem); + // Issue says 2 optimal assignments (mirror pair) + assert_eq!(witnesses.len(), 2); + for w in &witnesses { + assert_eq!(problem.evaluate(w), Min(Some(47))); + } +} + +#[test] +fn test_scheduling_min_wct_serialization() { + let problem = SchedulingToMinimizeWeightedCompletionTime::new( + vec![1, 2, 3, 4, 5], + vec![6, 4, 3, 2, 1], + 2, + ); + let json = serde_json::to_value(&problem).unwrap(); + let restored: SchedulingToMinimizeWeightedCompletionTime = + serde_json::from_value(json).unwrap(); + assert_eq!(restored.lengths(), problem.lengths()); + assert_eq!(restored.weights(), problem.weights()); + assert_eq!(restored.num_processors(), problem.num_processors()); +} + +#[test] +fn test_scheduling_min_wct_deserialization_rejects_zero_processors() { + let err = + serde_json::from_value::(serde_json::json!({ + "lengths": [1, 2], + "weights": [3, 4], + "num_processors": 0 + })) + .unwrap_err(); + assert!( + err.to_string().contains("num_processors must be positive"), + "unexpected error: {err}" + ); +} + +#[test] +#[should_panic(expected = "num_processors must be positive")] +fn test_scheduling_min_wct_zero_processors() { + SchedulingToMinimizeWeightedCompletionTime::new(vec![1], vec![1], 0); +} + +#[test] +#[should_panic(expected = "lengths and weights must have the same length")] +fn test_scheduling_min_wct_mismatched_lengths() { + SchedulingToMinimizeWeightedCompletionTime::new(vec![1, 2], vec![3], 2); +} + +#[test] +#[should_panic(expected = "task lengths must be positive")] +fn test_scheduling_min_wct_zero_length() { + SchedulingToMinimizeWeightedCompletionTime::new(vec![0, 1], vec![1, 1], 1); +} + +#[test] +#[should_panic(expected = "task weights must be positive")] +fn test_scheduling_min_wct_zero_weight() { + SchedulingToMinimizeWeightedCompletionTime::new(vec![1, 1], vec![0, 1], 1); +} + +#[test] +fn test_scheduling_min_wct_single_task() { + let problem = SchedulingToMinimizeWeightedCompletionTime::new(vec![5], vec![3], 2); + // Task 0 on processor 0: C(0) = 5, WCT = 5*3 = 15 + assert_eq!(problem.evaluate(&[0]), Min(Some(15))); + assert_eq!(problem.evaluate(&[1]), Min(Some(15))); +} + +#[test] +fn test_scheduling_min_wct_single_processor() { + // With 1 processor, Smith's rule determines the order + let problem = SchedulingToMinimizeWeightedCompletionTime::new(vec![2, 1], vec![1, 3], 1); + // Smith's rule: t1 has l/w=1/3=0.33, t0 has l/w=2/1=2.0 + // Order: t1, t0 + // C(t1) = 1, C(t0) = 3 + // WCT = 1*3 + 3*1 = 6 + assert_eq!(problem.evaluate(&[0, 0]), Min(Some(6))); +} + +#[test] +fn test_scheduling_min_wct_three_processors() { + let problem = SchedulingToMinimizeWeightedCompletionTime::new(vec![3, 3, 3], vec![1, 1, 1], 3); + assert_eq!(problem.dims(), vec![3; 3]); + // One task per processor: each completes at 3, WCT = 3*1 + 3*1 + 3*1 = 9 + assert_eq!(problem.evaluate(&[0, 1, 2]), Min(Some(9))); + // All on one processor: C(t0)=3, C(t1)=6, C(t2)=9, WCT = 3+6+9 = 18 + assert_eq!(problem.evaluate(&[0, 0, 0]), Min(Some(18))); +} + +#[test] +fn test_scheduling_min_wct_paper_example() { + // Same as issue example - verifying the worked example + let problem = SchedulingToMinimizeWeightedCompletionTime::new( + vec![1, 2, 3, 4, 5], + vec![6, 4, 3, 2, 1], + 2, + ); + // P0={t0,t2,t4}: Smith order t0(0.167), t2(1.0), t4(5.0) + // C(t0)=1 => 1*6=6, C(t2)=4 => 4*3=12, C(t4)=9 => 9*1=9, subtotal=27 + // P1={t1,t3}: Smith order t1(0.5), t3(2.0) + // C(t1)=2 => 2*4=8, C(t3)=6 => 6*2=12, subtotal=20 + // Total = 47 + assert_eq!(problem.evaluate(&[0, 1, 0, 1, 0]), Min(Some(47))); + + let solver = BruteForce::new(); + let witness = solver.find_witness(&problem).unwrap(); + assert_eq!(problem.evaluate(&witness), Min(Some(47))); +} diff --git a/src/unit_tests/rules/schedulingtominimizeweightedcompletiontime_ilp.rs b/src/unit_tests/rules/schedulingtominimizeweightedcompletiontime_ilp.rs new file mode 100644 index 00000000..b33d2dbf --- /dev/null +++ b/src/unit_tests/rules/schedulingtominimizeweightedcompletiontime_ilp.rs @@ -0,0 +1,129 @@ +use super::*; +use crate::models::algebraic::{ObjectiveSense, ILP}; +use crate::models::misc::SchedulingToMinimizeWeightedCompletionTime; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::traits::Problem; +use crate::types::Min; + +#[test] +fn test_reduction_creates_valid_ilp_structure() { + // 3 tasks, 2 processors + let problem = SchedulingToMinimizeWeightedCompletionTime::new(vec![1, 2, 3], vec![4, 2, 1], 2); + let reduction: ReductionSMWCTToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // n=3, m=2: x vars = 3*2=6, C vars = 3, y vars = 3*2/2=3, total=12 + assert_eq!(ilp.num_vars, 12); + assert_eq!(ilp.sense, ObjectiveSense::Minimize); + + // Objective should reference C_t variables with weights + // C vars are at indices 6, 7, 8 + assert!(ilp + .objective + .iter() + .any(|&(idx, coeff)| idx == 6 && coeff == 4.0)); + assert!(ilp + .objective + .iter() + .any(|&(idx, coeff)| idx == 7 && coeff == 2.0)); + assert!(ilp + .objective + .iter() + .any(|&(idx, coeff)| idx == 8 && coeff == 1.0)); +} + +#[test] +fn test_solution_extraction() { + let problem = SchedulingToMinimizeWeightedCompletionTime::new(vec![1, 2], vec![3, 1], 2); + let reduction: ReductionSMWCTToILP = ReduceTo::>::reduce_to(&problem); + + // Build a manual ILP solution: + // x_{0,0}=1, x_{0,1}=0, x_{1,0}=0, x_{1,1}=1 => task 0 on P0, task 1 on P1 + // C_0=1, C_1=2, y_{0,1}=1 + let num_vars = reduction.target_problem().num_vars; + let mut sol = vec![0; num_vars]; + // x vars: indices 0..4 + sol[0] = 1; // x_{0,0} = 1 + sol[1] = 0; // x_{0,1} = 0 + sol[2] = 0; // x_{1,0} = 0 + sol[3] = 1; // x_{1,1} = 1 + // C vars: indices 4, 5 + sol[4] = 1; // C_0 = 1 + sol[5] = 2; // C_1 = 2 + // y vars: index 6 + sol[6] = 1; // y_{0,1} = 1 + + let extracted = reduction.extract_solution(&sol); + assert_eq!(extracted, vec![0, 1]); + // Each on separate processor: C(0)=1, C(1)=2, WCT = 1*3 + 2*1 = 5 + assert_eq!(problem.evaluate(&extracted), Min(Some(5))); +} + +#[test] +fn test_ilp_matches_bruteforce_small() { + // 3 tasks, 2 processors + let problem = SchedulingToMinimizeWeightedCompletionTime::new(vec![1, 2, 3], vec![4, 2, 1], 2); + + let bf = BruteForce::new(); + let bf_witness = bf + .find_witness(&problem) + .expect("BF should find a solution"); + let bf_value = problem.evaluate(&bf_witness); + + let reduction: ReductionSMWCTToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + let ilp_solution = ILPSolver::new().solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let ilp_value = problem.evaluate(&extracted); + + assert_eq!(ilp_value, bf_value); +} + +#[test] +fn test_issue_example_closed_loop() { + // Issue #505 example: 5 tasks, 2 processors, optimal = 47 + let problem = SchedulingToMinimizeWeightedCompletionTime::new( + vec![1, 2, 3, 4, 5], + vec![6, 4, 3, 2, 1], + 2, + ); + + let reduction: ReductionSMWCTToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + let ilp_solution = ILPSolver::new().solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + assert_eq!(problem.evaluate(&extracted), Min(Some(47))); +} + +#[test] +fn test_single_task_single_processor() { + let problem = SchedulingToMinimizeWeightedCompletionTime::new(vec![5], vec![3], 1); + let reduction: ReductionSMWCTToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let ilp_solution = ILPSolver::new().solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(problem.evaluate(&extracted), Min(Some(15))); +} + +#[test] +fn test_equal_tasks_multiple_processors() { + // 4 equal tasks, 2 processors + let problem = + SchedulingToMinimizeWeightedCompletionTime::new(vec![1, 1, 1, 1], vec![1, 1, 1, 1], 2); + + let bf = BruteForce::new(); + let bf_witness = bf + .find_witness(&problem) + .expect("BF should find a solution"); + let bf_value = problem.evaluate(&bf_witness); + + let reduction: ReductionSMWCTToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + let ilp_solution = ILPSolver::new().solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let ilp_value = problem.evaluate(&extracted); + + assert_eq!(ilp_value, bf_value); +}