diff --git a/cpp_test/TestBPDecoder.cpp b/cpp_test/TestBPDecoder.cpp index b8a3a05..8939a92 100644 --- a/cpp_test/TestBPDecoder.cpp +++ b/cpp_test/TestBPDecoder.cpp @@ -631,6 +631,82 @@ TEST(BpDecoder, DefaultScheduleAndConstantWhenRandomSerialScheduleFalse) { ASSERT_EQ(first_schedule, second_schedule) << "Schedule changed despite random_serial_schedule being false."; } +TEST(BpDecoder, DynamicScalingFactorSetup) { + int n = 5; + auto pcm = ldpc::gf2codes::rep_code(n); + int maximum_iterations = 10; + auto channel_probabilities = vector(pcm.n, 0.1); + + // Initialize decoder with dynamic scaling factor damping + double min_sum_scaling_factor = 0.5; + double dynamic_scaling_factor_damping = 0.1; + auto decoder = ldpc::bp::BpDecoder(pcm, channel_probabilities, maximum_iterations, ldpc::bp::MINIMUM_SUM, + ldpc::bp::PARALLEL, min_sum_scaling_factor, 1, ldpc::bp::NULL_INT_VECTOR, 0, false, ldpc::bp::AUTO, dynamic_scaling_factor_damping); + + // Verify that the scaling factors are set up correctly + ASSERT_EQ(decoder.ms_scaling_factor_vector.size(), maximum_iterations); + for (int i = 0; i < maximum_iterations; i++) { + double expected_factor = 1.0 - (1.0 - min_sum_scaling_factor) * std::pow(2.0, -1 * i * dynamic_scaling_factor_damping); + ASSERT_NEAR(decoder.ms_scaling_factor_vector[i], expected_factor, 1e-6); + } +} + +TEST(BpDecoder, StaticScalingFactorSetup) { + int n = 5; + auto pcm = ldpc::gf2codes::rep_code(n); + int maximum_iterations = 10; + auto channel_probabilities = vector(pcm.n, 0.1); + + // Initialize decoder with static scaling factor + double min_sum_scaling_factor = 0.5; + auto decoder = ldpc::bp::BpDecoder(pcm, channel_probabilities, maximum_iterations, ldpc::bp::MINIMUM_SUM, + ldpc::bp::PARALLEL, min_sum_scaling_factor); + + // Verify that the scaling factors are set up correctly + ASSERT_EQ(decoder.ms_scaling_factor_vector.size(), maximum_iterations); + for (int i = 0; i < maximum_iterations; i++) { + ASSERT_NEAR(decoder.ms_scaling_factor_vector[i], min_sum_scaling_factor, 1e-6); + } +} + + +TEST(BpDecoder, MsConvergeValueSetup) { + int n = 5; + auto pcm = ldpc::gf2codes::rep_code(n); + int maximum_iterations = 10; + auto channel_probabilities = vector(pcm.n, 0.1); + + // Initialize decoder with ms_converge value + double ms_converge = 0.01; + auto decoder = ldpc::bp::BpDecoder(pcm, channel_probabilities, maximum_iterations, ldpc::bp::MINIMUM_SUM, + ldpc::bp::PARALLEL, 0.5, 1, ldpc::bp::NULL_INT_VECTOR, 0, false, ldpc::bp::AUTO, 0.0, ms_converge); + + // Verify that the ms_converge value is set correctly + ASSERT_EQ(decoder.ms_converge_value, 0.01); +} + +TEST(BpDecoder, SetUpMsScalingFactorsTest) { + int n = 5; + auto pcm = ldpc::gf2codes::rep_code(n); + int maximum_iterations = 10; + auto channel_probabilities = vector(pcm.n, 0.1); + + // Initialize decoder with ms_scaling_factor=1 and ms_converge_value=2.0 + double ms_scaling_factor = 1.0; + double ms_converge_value = 2.0; + auto decoder = ldpc::bp::BpDecoder(pcm, channel_probabilities, maximum_iterations, ldpc::bp::MINIMUM_SUM, + ldpc::bp::PARALLEL, ms_scaling_factor, 1, ldpc::bp::NULL_INT_VECTOR, 0, false, + ldpc::bp::AUTO, 0.1, ms_converge_value); + + // Verify that the scaling factors are set up correctly + ASSERT_EQ(decoder.ms_scaling_factor_vector.size(), maximum_iterations); + for (int i = 0; i < maximum_iterations; i++) { + double expected_factor = ms_converge_value - (ms_converge_value - ms_scaling_factor) * std::pow(2.0, -1 * i * 0.1); + ASSERT_NEAR(decoder.ms_scaling_factor_vector[i], expected_factor, 1e-6); + } +} + + int main(int argc, char **argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); diff --git a/pyproject.toml b/pyproject.toml index 53d55d0..8d59817 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -25,7 +25,7 @@ dependencies = [ "sinter>=1.12.0", "pymatching" ] -version = "2.3.8" +version = "2.3.9" [project.urls] Documentation = "https://software.roffe.eu/ldpc" diff --git a/python_test/test_bp_dynamic_scaling_factor.py b/python_test/test_bp_dynamic_scaling_factor.py new file mode 100644 index 0000000..8d3225f --- /dev/null +++ b/python_test/test_bp_dynamic_scaling_factor.py @@ -0,0 +1,93 @@ +import pytest +import numpy as np +from ldpc.bp_decoder._bp_decoder import BpDecoder +from ldpc import BpOsdDecoder +from ldpc.bplsd_decoder import BpLsdDecoder +from ldpc.belief_find_decoder import BeliefFindDecoder +from ldpc.bp_flip import BpFlipDecoder + +def test_dynamic_scaling_factor_damping_initialization(): + pcm = np.array([[1, 1, 0], [0, 1, 1]], dtype=np.uint8) + channel_probs = [0.1, 0.2, 0.3] + max_iter = 10 + damping_factor = 0.1 + + decoder = BpDecoder(pcm, channel_probs=channel_probs, bp_method="ms", max_iter=max_iter, dynamic_scaling_factor_damping=damping_factor) + + assert decoder.dynamic_scaling_factor_damping == damping_factor, "Dynamic scaling factor damping not set correctly." + +def test_dynamic_scaling_factor_damping_effect(): + pcm = np.array([[1, 1, 0], [0, 1, 1]], dtype=np.uint8) + channel_probs = [0.1, 0.2, 0.3] + max_iter = 10 + damping_factor = 0.1 + + decoder = BpDecoder(pcm, channel_probs=channel_probs, bp_method="ms", max_iter=max_iter, dynamic_scaling_factor_damping=damping_factor) + + # Verify that the scaling factors are computed correctly + expected_factors = [ + 1.0 - (1.0 - decoder.ms_scaling_factor) * (2.0 ** (-1 * i * damping_factor)) + for i in range(max_iter) + ] + for i, factor in enumerate(expected_factors): + assert pytest.approx(decoder.ms_scaling_factor_vector[i], rel=1e-6) == factor, f"Scaling factor mismatch at iteration {i}." + +def test_dynamic_scaling_factor_with_initial_and_converge_parameters(): + pcm = np.array([[1, 1, 0], [0, 1, 1]], dtype=np.uint8) + channel_probs = [0.1, 0.2, 0.3] + + max_iter = 10 + damping_factor = 0.1 # Initial damping factor for dynamic scaling factor growth + ms_scaling_factor = 0.5 # Initial scaling factor for testing + ms_converge_value = 2.0 # Convergence value for dynamic scaling factor + + decoder = BpDecoder(pcm, channel_probs=channel_probs, max_iter=max_iter, bp_method="ms", ms_scaling_factor=ms_scaling_factor, dynamic_scaling_factor_damping=damping_factor, ms_converge_value=ms_converge_value) + + print("ms_scaling_factor_start:", decoder.ms_scaling_factor) + print("damping_factor:", damping_factor) + print("ms_converge_value:", decoder.ms_converge_value) + print("Initial scaling factors:", decoder.ms_scaling_factor_vector) + + + # Verify that the scaling factors are recomputed correctly + expected_factors = [ + ms_converge_value - (ms_converge_value - ms_scaling_factor) * (2.0 ** (-1 * i * damping_factor)) + for i in range(max_iter) + ] + for i, factor in enumerate(expected_factors): + assert pytest.approx(decoder.ms_scaling_factor_vector[i], rel=1e-6) == factor, f"Scaling factor mismatch at iteration {i} after update." + +def test_dynamic_scaling_factor_damping_bplsd(): + pcm = np.array([[1, 1, 0], [0, 1, 1]], dtype=np.uint8) + damping_factor = 0.2 + + decoder = BpLsdDecoder(pcm, error_rate = 0.1, bp_method="ms", dynamic_scaling_factor_damping=damping_factor) + assert decoder.dynamic_scaling_factor_damping == damping_factor, "Damping factor not set correctly." + + updated_damping_factor = 0.5 + decoder.dynamic_scaling_factor_damping = updated_damping_factor + assert decoder.dynamic_scaling_factor_damping == updated_damping_factor, "Damping factor update failed." + + +def test_dynamic_scaling_factor_damping_bposd(): + pcm = np.array([[1, 1, 0], [0, 1, 1]], dtype=np.uint8) + damping_factor = 0.3 + + decoder = BpOsdDecoder(pcm, error_rate=0.1, bp_method="ms", dynamic_scaling_factor_damping=damping_factor) + assert decoder.dynamic_scaling_factor_damping == damping_factor, "Damping factor not set correctly." + + updated_damping_factor = 0.6 + decoder.dynamic_scaling_factor_damping = updated_damping_factor + assert decoder.dynamic_scaling_factor_damping == updated_damping_factor, "Damping factor update failed." + +def test_dynamic_scaling_factor_damping_belief_find(): + pcm = np.array([[1, 1, 0], [0, 1, 1]], dtype=np.uint8) + damping_factor = 0.4 + + decoder = BeliefFindDecoder(pcm, error_rate=0.1, bp_method="ms", dynamic_scaling_factor_damping=damping_factor) + assert decoder.dynamic_scaling_factor_damping == damping_factor, "Damping factor not set correctly." + + updated_damping_factor = 0.7 + decoder.dynamic_scaling_factor_damping = updated_damping_factor + assert decoder.dynamic_scaling_factor_damping == updated_damping_factor, "Damping factor update failed." + diff --git a/python_test/test_bplsd.py b/python_test/test_bplsd.py index 6e2c194..1af84d3 100644 --- a/python_test/test_bplsd.py +++ b/python_test/test_bplsd.py @@ -151,6 +151,7 @@ def test_rep_code_ms(): H = rep_code(3) lsd = BpLsdDecoder(H, error_rate=0.1, bp_method="min_sum", ms_scaling_factor=1.0) + assert lsd is not None assert lsd.bp_method == "minimum_sum" assert lsd.schedule == "parallel" @@ -190,3 +191,7 @@ def test_stats_reset(): assert len(stats["bit_llrs"]) == 0 assert len(stats["individual_cluster_stats"]) == 0 assert len(stats["global_timestep_bit_history"]) == 0 + + +if __name__ == "__main__": + test_rep_code_ms() \ No newline at end of file diff --git a/python_test/test_qcodes.py b/python_test/test_qcodes.py index 93a2f4f..4c87714 100644 --- a/python_test/test_qcodes.py +++ b/python_test/test_qcodes.py @@ -239,7 +239,8 @@ def test_400_16_6_hgp(): error_rate=error_rate, max_iter=max_iter, bp_method="ms", - ms_scaling_factor=0, + ms_scaling_factor=0.5, + dynamic_scaling_factor_damping=1.0, schedule="parallel", bits_per_step=1, lsd_order=0, @@ -260,7 +261,8 @@ def test_400_16_6_hgp(): error_rate=error_rate, max_iter=max_iter, bp_method="ms", - ms_scaling_factor=0, + ms_scaling_factor=0.55, + dynamic_scaling_factor_damping=0.1, schedule="serial", bits_per_step=1, lsd_order=0, diff --git a/src_cpp/bp.hpp b/src_cpp/bp.hpp index 556a475..2b93b0f 100644 --- a/src_cpp/bp.hpp +++ b/src_cpp/bp.hpp @@ -11,6 +11,7 @@ #include #include // required for std::runtime_error #include +#include #include "math.h" #include "sparse_matrix_base.hpp" @@ -60,9 +61,11 @@ namespace ldpc { BpSchedule schedule; BpInputType bp_input_type; double ms_scaling_factor; + std::vector ms_scaling_factor_vector; + double dynamic_scaling_factor_damping; + double ms_converge_value; std::vector decoding; std::vector candidate_syndrome; - std::vector log_prob_ratios; std::vector initial_log_prob_ratios; std::vector soft_syndrome; @@ -85,10 +88,14 @@ namespace ldpc { const std::vector &serial_schedule = NULL_INT_VECTOR, int random_schedule_seed = 0, bool random_serial_schedule = false, - BpInputType bp_input_type = AUTO) : + BpInputType bp_input_type = AUTO, + double dynamic_scaling_factor_damping = -1.0, + double ms_converge_value = 1.0) : pcm(parity_check_matrix), channel_probabilities(std::move(channel_probabilities)), check_count(pcm.m), bit_count(pcm.n), maximum_iterations(maximum_iterations), bp_method(bp_method), schedule(schedule), ms_scaling_factor(min_sum_scaling_factor), + dynamic_scaling_factor_damping(dynamic_scaling_factor_damping), + ms_converge_value(ms_converge_value), iterations(0) //the parity check matrix is passed in by reference { @@ -126,6 +133,9 @@ namespace ldpc { } } + + this->set_up_ms_scaling_factors(); + //Initialise OMP thread pool // this->omp_thread_count = omp_threads; // this->set_omp_thread_count(this->omp_thread_count); @@ -133,6 +143,30 @@ namespace ldpc { ~BpDecoder() = default; + + void set_up_ms_scaling_factors(){ + + if(this->bp_method == MINIMUM_SUM){ + + if(this->dynamic_scaling_factor_damping <= 0) { + this->ms_scaling_factor_vector.resize(this->maximum_iterations); + for (int i = 0; i < this->maximum_iterations; i++) { + this->ms_scaling_factor_vector[i] = this->ms_scaling_factor; + } + } else { + this->ms_scaling_factor_vector.resize(this->maximum_iterations); + for (int i = 0; i < this->maximum_iterations; i++) { + this->ms_scaling_factor_vector[i] = this->ms_converge_value - (this->ms_converge_value - this->ms_scaling_factor) * std::pow(2.0, -1*i*this->dynamic_scaling_factor_damping); + } + } + + } else { + this->ms_scaling_factor_vector.clear(); + } + + } + + void set_omp_thread_count(int count) { this->omp_thread_count = count; // omp_set_num_threads(this->omp_thread_count); @@ -191,6 +225,7 @@ namespace ldpc { std::vector &bp_decode_parallel(std::vector &syndrome) { + this->converge = 0; this->initialise_log_domain_bp(); @@ -219,13 +254,9 @@ namespace ldpc { } } else if (this->bp_method == MINIMUM_SUM) { - double alpha; - if(this->ms_scaling_factor == 0.0) { - alpha = 1.0 - std::pow(2.0, -1.0*it); - } - else { - alpha = this->ms_scaling_factor; - } + + double alpha = this->ms_scaling_factor_vector[it - 1]; + //check to bit updates for (int i = 0; i < check_count; i++) { @@ -456,14 +487,6 @@ namespace ldpc { for (int it = 1; it <= maximum_iterations; it++) { - double alpha; - if(this->ms_scaling_factor == 0.0) { - alpha = 1.0 - std::pow(2.0, -1.0*it); - } - else { - alpha = this->ms_scaling_factor; - } - if (this->random_serial_schedule) { this->rng_list_shuffle.shuffle(this->serial_schedule_order); } else if (this->schedule == BpSchedule::SERIAL_RELATIVE) { @@ -500,6 +523,7 @@ namespace ldpc { this->log_prob_ratios[bit_index] += e.check_to_bit_msg; } } else if (this->bp_method == 1) { + double alpha = this->ms_scaling_factor_vector[it - 1]; for (auto &e: pcm.iterate_column(bit_index)) { check_index = e.row_index; int sgn = syndrome[check_index]; diff --git a/src_python/ldpc/belief_find_decoder/__init__.pyi b/src_python/ldpc/belief_find_decoder/__init__.pyi index b4ef4d2..f05f39c 100644 --- a/src_python/ldpc/belief_find_decoder/__init__.pyi +++ b/src_python/ldpc/belief_find_decoder/__init__.pyi @@ -2,7 +2,6 @@ import numpy as np from scipy.sparse import spmatrix class BeliefFindDecoder(BpDecoderBase): - """ A class representing a decoder that combines Belief Propagation (BP) with the Union Find Decoder (UFD) algorithm. @@ -41,18 +40,17 @@ class BeliefFindDecoder(BpDecoderBase): The inversion method can be applied to any parity check matrix. bits_per_step : int, optional Specifies the number of bits added to the cluster in each step of the UFD algorithm. If no value is provided, this is set the block length of the code. - - Notes - ----- - The `BeliefFindDecoder` class leverages soft information outputted by the BP decoder to guide the cluster growth - in the UFD algorithm. The number of bits added to the cluster in each step is controlled by the `bits_per_step` parameter. - The `uf_method` parameter activates a more general version of the UFD algorithm suitable for LDPC codes when set to True. + dynamic_scaling_factor_damping : Optional[float], optional + The damping factor for dynamic scaling in the minimum sum method, by default -1.0. + ms_converge_value (Optional[float]): + Convergence value for the minimum-sum method. """ def __cinit__(self, pcm: Union[np.ndarray, scipy.sparse.spmatrix], error_rate: Optional[float] = None, error_channel: Optional[List[float]] = None, max_iter: Optional[int] = 0, bp_method: Optional[str] = 'minimum_sum', ms_scaling_factor: Optional[float] = 1.0, schedule: Optional[str] = 'parallel', omp_thread_count: Optional[int] = 1, - random_schedule_seed: Optional[int] = 0, serial_schedule_order: Optional[List[int]] = None, uf_method: str = "peeling", bits_per_step:int = 0, input_vector_type: str = "syndrome"): ... + random_schedule_seed: Optional[int] = 0, serial_schedule_order: Optional[List[int]] = None, uf_method: str = "peeling", + bits_per_step: int = 0, input_vector_type: str = "syndrome", dynamic_scaling_factor_damping: Optional[float] = -1.0, ms_converge_value: Optional[float] = 1.0, **kwargs): ... def __del__(self): ... diff --git a/src_python/ldpc/belief_find_decoder/_belief_find_decoder.pyx b/src_python/ldpc/belief_find_decoder/_belief_find_decoder.pyx index f61043e..ef330e2 100644 --- a/src_python/ldpc/belief_find_decoder/_belief_find_decoder.pyx +++ b/src_python/ldpc/belief_find_decoder/_belief_find_decoder.pyx @@ -4,7 +4,6 @@ import numpy as np from scipy.sparse import spmatrix cdef class BeliefFindDecoder(BpDecoderBase): - """ A class representing a decoder that combines Belief Propagation (BP) with the Union Find Decoder (UFD) algorithm. @@ -43,18 +42,18 @@ cdef class BeliefFindDecoder(BpDecoderBase): The inversion method can be applied to any parity check matrix. bits_per_step : int, optional Specifies the number of bits added to the cluster in each step of the UFD algorithm. If no value is provided, this is set the block length of the code. - - Notes - ----- - The `BeliefFindDecoder` class leverages soft information outputted by the BP decoder to guide the cluster growth - in the UFD algorithm. The number of bits added to the cluster in each step is controlled by the `bits_per_step` parameter. - The `uf_method` parameter activates a more general version of the UFD algorithm suitable for LDPC codes when set to True. + dynamic_scaling_factor_damping : Optional[float], optional + The damping factor for dynamic scaling in the minimum sum method, by default -1.0. + ms_converge_value (Optional[float]): + Convergence value for the minimum-sum method. """ def __cinit__(self, pcm: Union[np.ndarray, scipy.sparse.spmatrix], error_rate: Optional[float] = None, error_channel: Optional[List[float]] = None, max_iter: Optional[int] = 0, bp_method: Optional[str] = 'minimum_sum', ms_scaling_factor: Optional[float] = 1.0, schedule: Optional[str] = 'parallel', omp_thread_count: Optional[int] = 1, - random_schedule_seed: Optional[int] = 0, serial_schedule_order: Optional[List[int]] = None, uf_method: str = "peeling", bits_per_step:int = 0, input_vector_type: str = "syndrome"): + random_schedule_seed: Optional[int] = 0, serial_schedule_order: Optional[List[int]] = None, uf_method: str = "peeling", + bits_per_step: int = 0, input_vector_type: str = "syndrome", dynamic_scaling_factor_damping: Optional[float] = -1.0, ms_converge_value: Optional[float] = 1.0, **kwargs): + self.MEMORY_ALLOCATED=False self.ufd = new uf_decoder_cpp(self.pcm[0]) self.bf_decoding.resize(self.n) #C vector for the bf decoding diff --git a/src_python/ldpc/bp_decoder/__init__.pyi b/src_python/ldpc/bp_decoder/__init__.pyi index 06fd6df..ce58152 100644 --- a/src_python/ldpc/bp_decoder/__init__.pyi +++ b/src_python/ldpc/bp_decoder/__init__.pyi @@ -10,19 +10,54 @@ def io_test(pcm: Union[scipy.sparse.spmatrix,np.ndarray]): ... class BpDecoderBase: - """ - Bp Decoder base class + Base class for Belief Propagation (BP) decoders. + + This class provides the foundational structure for BP decoders, including initialization, + memory management, and common properties such as error rates, channel probabilities, + and scheduling methods. + + Attributes: + pcm (BpSparse): The parity check matrix in sparse format. + m (int): Number of rows in the parity check matrix. + n (int): Number of columns in the parity check matrix. + MEMORY_ALLOCATED (bool): Indicates whether memory has been allocated for the decoder. + bpd (BpDecoderCpp): The underlying C++ BP decoder object. """ - def __cinit__(self,pcm, **kwargs): ... + def __cinit__(self, pcm, **kwargs): + """ + Initialize the BP decoder base class. + + Args: + pcm (Union[np.ndarray, scipy.sparse.spmatrix]): The parity check matrix. + **kwargs: Additional parameters for configuring the decoder. + + Keyword Args: + error_rate (Optional[float]): Initial error rate for the decoder. + error_channel (Optional[List[float]]): Initial error channel probabilities. + max_iter (int): Maximum number of iterations for decoding. + bp_method (int): Belief propagation method (0 for product-sum, 1 for minimum-sum). + ms_scaling_factor (float): Scaling factor for the minimum-sum method. + schedule (int): Scheduling method (0 for serial, 1 for parallel, 2 for serial-relative). + omp_thread_count (int): Number of OpenMP threads to use. + random_serial_schedule (bool): Whether to enable random serial scheduling. + random_schedule_seed (int): Seed for random serial scheduling. + serial_schedule_order (Optional[List[int]]): Custom order for serial scheduling. + channel_probs (Optional[List[float]]): Channel probabilities for the decoder. + dynamic_scaling_factor_damping (float): Damping factor for dynamic scaling in the minimum-sum method. + + Raises: + TypeError: If the input matrix is not a valid type. + ValueError: If required parameters are missing or invalid. + """ def __del__(self): ... @property def error_rate(self) -> np.ndarray: """ - Returns the current error rate vector. + Get the current error rate vector. Returns: np.ndarray: A numpy array containing the current error rate vector. @@ -31,29 +66,34 @@ class BpDecoderBase: @error_rate.setter def error_rate(self, value: Optional[float]) -> None: """ - Sets the error rate for the decoder. + Set the error rate for the decoder. Args: - value (Optional[float]): The error rate value to be set. Must be a single float value. + value (Optional[float]): The error rate value to be set. + + Raises: + ValueError: If the input value is not a valid float. """ @property def error_channel(self) -> np.ndarray: """ - Returns the current error channel vector. + Get the current error channel vector. Returns: np.ndarray: A numpy array containing the current error channel vector. """ @error_channel.setter - def error_channel(self, value: Union[Optional[List[float]],np.ndarray]) -> None: + def error_channel(self, value: Union[Optional[List[float]], np.ndarray]) -> None: """ - Sets the error channel for the decoder. + Set the error channel for the decoder. Args: - value (Optional[List[float]]): The error channel vector to be set. Must have length equal to the block - length of the code `self.n`. + value (Union[Optional[List[float]], np.ndarray]): The error channel vector to be set. + + Raises: + ValueError: If the input vector length does not match the block length of the code. """ def update_channel_probs(self, value: Union[List[float],np.ndarray]) -> None: ... @@ -233,6 +273,27 @@ class BpDecoderBase: TypeError: If the input value is not a float. """ + @property + def ms_scaling_factor_vector(self) -> np.ndarray: + """ + Get the vector of scaling factors for the minimum-sum method. + + Returns: + np.ndarray: The current vector of scaling factors. + """ + + @ms_scaling_factor_vector.setter + def ms_scaling_factor_vector(self, value: Union[List[float], np.ndarray]) -> None: + """ + Set the vector of scaling factors for the minimum-sum method. + + Args: + value (Union[List[float], np.ndarray]): The new vector of scaling factors. + + Raises: + ValueError: If the input vector length does not match the maximum iterations. + """ + @property def omp_thread_count(self) -> int: """Get the number of OpenMP threads. @@ -292,81 +353,97 @@ class BpDecoderBase: ValueError: If random serial schedule is enabled while a fixed serial schedule is set. """ + @property + def dynamic_scaling_factor_damping(self) -> float: + """ + Get the dynamic scaling factor damping value. + + Returns: + float: The current dynamic scaling factor damping value. + """ + + @dynamic_scaling_factor_damping.setter + def dynamic_scaling_factor_damping(self, value: float) -> None: + """ + Set the dynamic scaling factor damping value. + + Args: + value (float): The new dynamic scaling factor damping value. + + Raises: + ValueError: If the input value is not a non-negative float. + """ + + @property + def ms_converge_value(self) -> float: + """ + Get the ms_converge_value for the minimum-sum method. + + Returns: + float: The current ms_converge_value. + """ + + @ms_converge_value.setter + def ms_converge_value(self, value: float) -> None: + """ + Set the ms_converge_value for the minimum-sum method. + + Args: + value (float): The new ms_converge_value. + + Raises: + ValueError: If the input value is not a float. + """ + class BpDecoder(BpDecoderBase): """ - Belief propagation decoder for binary linear codes. - - This class provides an implementation of belief propagation decoding for binary linear codes. The decoder uses a sparse - parity check matrix to decode received codewords. The decoding algorithm can be configured using various parameters, - such as the belief propagation method used, the scheduling method used, and the maximum number of iterations. - - Parameters - ---------- - pcm : Union[np.ndarray, spmatrix] - The parity check matrix of the binary linear code, represented as a NumPy array or a SciPy sparse matrix. - error_rate : Optional[float], optional - The initial error rate for the decoder, by default None. - error_channel : Optional[List[float]], optional - The initial error channel probabilities for the decoder, by default None. - max_iter : Optional[int], optional - The maximum number of iterations allowed for decoding, by default 0 (adaptive). - bp_method : Optional[str], optional - The belief propagation method to use: 'product_sum' or 'minimum_sum', by default 'minimum_sum'. - ms_scaling_factor : Optional[float], optional - The scaling factor for the minimum sum method, by default 1.0. - schedule : Optional[str], optional - The scheduling method for belief propagation: 'parallel', 'serial', or 'serial_relative'. By default 'parallel'. - omp_thread_count : Optional[int], optional - The number of OpenMP threads to use, by default 1. - random_schedule_seed : Optional[int], optional - The seed for the random serial schedule, by default 0. If set to 0, the seed is set according to the system clock. - serial_schedule_order : Optional[List[int]], optional - The custom order for serial scheduling, by default None. - random_serial_schedule : bool, optional - Whether to enable random serial scheduling. If True, the serial schedule order is randomized in each iteration. - By default False. - input_vector_type: str, optional - Use this parameter to specify the input type. Choose either: 1) 'syndrome' or 2) 'received_vector' or 3) 'auto'. - Note, it is only necessary to specify this value when the parity check matrix is square. When the - parity matrix is non-square, the input vector type is inferred automatically from its length. + Belief Propagation (BP) decoder for binary linear codes. + + This class provides an implementation of BP decoding for binary linear codes. It supports + various configurations, including different BP methods, scheduling strategies, and scaling factors. + + Parameters: + pcm (Union[np.ndarray, scipy.sparse.spmatrix]): The parity check matrix. + error_rate (Optional[float]): Initial error rate for the decoder. + error_channel (Optional[List[float]]): Initial error channel probabilities. + max_iter (Optional[int]): Maximum number of iterations for decoding. + bp_method (Optional[str]): Belief propagation method ('product_sum' or 'minimum_sum'). + ms_scaling_factor (Optional[float]): Scaling factor for the minimum-sum method. + schedule (Optional[str]): Scheduling method ('parallel', 'serial', or 'serial_relative'). + omp_thread_count (Optional[int]): Number of OpenMP threads to use. + random_schedule_seed (Optional[int]): Seed for random serial scheduling. + serial_schedule_order (Optional[List[int]]): Custom order for serial scheduling. + input_vector_type (str): Input vector type ('syndrome', 'received_vector', or 'auto'). + random_serial_schedule (bool): Whether to enable random serial scheduling. + dynamic_scaling_factor_damping (Optional[float]): Damping factor for dynamic scaling in the minimum-sum method. + ms_converge_value (Optional[float]): Convergence value for the minimum-sum method. """ - def __cinit__(self, pcm: Union[np.ndarray, scipy.sparse.spmatrix], error_rate: Optional[float] = None, - error_channel: Optional[Union[np.ndarray,List[float]]] = None, max_iter: Optional[int] = 0, bp_method: Optional[str] = 'minimum_sum', - ms_scaling_factor: Optional[Union[float,int]] = 1.0, schedule: Optional[str] = 'parallel', omp_thread_count: Optional[int] = 1, - random_schedule_seed: Optional[int] = 0, serial_schedule_order: Optional[List[int]] = None, input_vector_type: str = "auto", random_serial_schedule: bool = False, **kwargs): ... - def __init__(self, pcm: Union[np.ndarray, scipy.sparse.spmatrix], error_rate: Optional[float] = None, error_channel: Optional[Union[np.ndarray,List[float]]] = None, max_iter: Optional[int] = 0, bp_method: Optional[str] = 'minimum_sum', ms_scaling_factor: Optional[Union[float,int]] = 1.0, schedule: Optional[str] = 'parallel', omp_thread_count: Optional[int] = 1, random_schedule_seed: Optional[int] = 0, serial_schedule_order: Optional[List[int]] = None, - input_vector_type: str = "auto", random_serial_schedule: bool = False, **kwargs): ... + input_vector_type: str = "auto", random_serial_schedule: bool = False, dynamic_scaling_factor_damping: Optional[float] = -1, ms_converge_value=1.0, **kwargs): ... def decode(self, input_vector: np.ndarray) -> np.ndarray: """ - Decode the input input_vector using belief propagation decoding algorithm. + Decode the input vector using the BP decoding algorithm. - Parameters - ---------- - input_vector : numpy.ndarray - A 1D numpy array of length equal to the number of rows in the parity check matrix. + Parameters: + input_vector (np.ndarray): A 1D numpy array representing the input vector. - Returns - ------- - numpy.ndarray - A 1D numpy array of length equal to the number of columns in the parity check matrix. + Returns: + np.ndarray: A 1D numpy array representing the decoded output. - Raises - ------ - ValueError - If the length of the input input_vector does not match the number of rows in the parity check matrix. + Raises: + ValueError: If the input vector length does not match the expected length. """ @property def decoding(self) -> np.ndarray: """ - Returns the current decoded output. + Get the current decoded output. Returns: np.ndarray: A numpy array containing the current decoded output. @@ -375,42 +452,25 @@ class BpDecoder(BpDecoderBase): class SoftInfoBpDecoder(BpDecoderBase): """ - A decoder that uses soft information belief propagation algorithm for decoding binary linear codes. - - This class implements a modified version of the belief propagation decoding algorithm that accounts for - uncertainty in the syndrome readout using a serial belief propagation schedule. The decoder uses a minimum - sum method as the belief propagation variant. For more information on the algorithm, please see the original - research paper at https://arxiv.org/abs/2205.02341. - - Parameters - ---------- - pcm : Union[np.ndarray, spmatrix] - The parity check matrix for the code. - error_rate : Optional[float] - The probability of a bit being flipped in the received codeword. - error_channel : Optional[List[float]] - A list of probabilities that specify the probability of each bit being flipped in the received codeword. - Must be of length equal to the block length of the code. - max_iter : Optional[int] - The maximum number of iterations for the decoding algorithm. - bp_method : Optional[str] - The variant of belief propagation method to be used. The default value is 'minimum_sum'. - ms_scaling_factor : Optional[float] - The scaling factor used in the minimum sum method. The default value is 1.0. - cutoff : Optional[float] - The threshold value below which syndrome soft information is used. - random_serial_schedule : bool, optional - Whether to enable random serial scheduling. If True, the serial schedule order is randomized in each iteration. - By default False. + Soft Information Belief Propagation (BP) decoder for binary linear codes. + + This class implements a modified BP decoding algorithm that accounts for uncertainty in + the syndrome readout using a serial belief propagation schedule. + + Parameters: + pcm (Union[np.ndarray, spmatrix]): The parity check matrix. + error_rate (Optional[float]): Initial error rate for the decoder. + error_channel (Optional[List[float]]): Initial error channel probabilities. + max_iter (Optional[int]): Maximum number of iterations for decoding. + bp_method (Optional[str]): Belief propagation method ('minimum_sum'). + ms_scaling_factor (Optional[float]): Scaling factor for the minimum-sum method. + cutoff (Optional[float]): Threshold value below which syndrome soft information is used. + sigma (float): Standard deviation of the noise. """ - def __cinit__(self, pcm: Union[np.ndarray, spmatrix], error_rate: Optional[float] = None, - error_channel: Optional[List[float]] = None, max_iter: Optional[int] = 0, bp_method: Optional[str] = 'minimum_sum', - ms_scaling_factor: Optional[float] = 1.0, cutoff: Optional[float] = np.inf, sigma: float = 2.0, **kwargs): ... - def decode(self, soft_info_syndrome: np.ndarray) -> np.ndarray: """ - Decode the input syndrome using the soft information belief propagation decoding algorithm. + Decode the input syndrome using the soft information BP decoding algorithm. Parameters ---------- @@ -426,7 +486,7 @@ class SoftInfoBpDecoder(BpDecoderBase): @property def soft_syndrome(self) -> np.ndarray: """ - Returns the current soft syndrome. + Get the current soft syndrome. Returns: np.ndarray: A numpy array containing the current soft syndrome. @@ -436,7 +496,7 @@ class SoftInfoBpDecoder(BpDecoderBase): @property def decoding(self) -> np.ndarray: """ - Returns the current decoded output. + Get the current decoded output. Returns: np.ndarray: A numpy array containing the current decoded output. diff --git a/src_python/ldpc/bp_decoder/_bp_decoder.pxd b/src_python/ldpc/bp_decoder/_bp_decoder.pxd index dbb8d21..d1191ec 100644 --- a/src_python/ldpc/bp_decoder/_bp_decoder.pxd +++ b/src_python/ldpc/bp_decoder/_bp_decoder.pxd @@ -56,7 +56,8 @@ cdef extern from "bp.hpp" namespace "ldpc::bp": vector[int] serial_schedule, int random_schedule_seed, bool random_serial_schedule, - BpInputType bp_input_type) except + + BpInputType bp_input_type, + double dynamic_scaling_factor_damping) except + BpSparse& pcm vector[double] channel_probabilities int check_count @@ -81,6 +82,10 @@ cdef extern from "bp.hpp" namespace "ldpc::bp": void set_omp_thread_count(int count) BpInputType bp_input_type void set_random_schedule_seed(int seed) + void set_up_ms_scaling_factors() + double dynamic_scaling_factor_damping + vector[double] ms_scaling_factor_vector + double ms_converge_value cdef class BpDecoderBase: cdef BpSparse *pcm diff --git a/src_python/ldpc/bp_decoder/_bp_decoder.pyx b/src_python/ldpc/bp_decoder/_bp_decoder.pyx index 2b72386..4920552 100644 --- a/src_python/ldpc/bp_decoder/_bp_decoder.pyx +++ b/src_python/ldpc/bp_decoder/_bp_decoder.pyx @@ -80,13 +80,47 @@ def io_test(pcm: Union[scipy.sparse.spmatrix,np.ndarray]): cdef class BpDecoderBase: - """ - Bp Decoder base class + Base class for Belief Propagation (BP) decoders. + + This class provides the foundational structure for BP decoders, including initialization, + memory management, and common properties such as error rates, channel probabilities, + and scheduling methods. + + Attributes: + pcm (BpSparse): The parity check matrix in sparse format. + m (int): Number of rows in the parity check matrix. + n (int): Number of columns in the parity check matrix. + MEMORY_ALLOCATED (bool): Indicates whether memory has been allocated for the decoder. + bpd (BpDecoderCpp): The underlying C++ BP decoder object. """ - def __cinit__(self,pcm, **kwargs): + def __cinit__(self, pcm, **kwargs): + """ + Initialize the BP decoder base class. + + Args: + pcm (Union[np.ndarray, scipy.sparse.spmatrix]): The parity check matrix. + **kwargs: Additional parameters for configuring the decoder. + + Keyword Args: + error_rate (Optional[float]): Initial error rate for the decoder. + error_channel (Optional[List[float]]): Initial error channel probabilities. + max_iter (int): Maximum number of iterations for decoding. + bp_method (int): Belief propagation method (0 for product-sum, 1 for minimum-sum). + ms_scaling_factor (float): Scaling factor for the minimum-sum method. + schedule (int): Scheduling method (0 for serial, 1 for parallel, 2 for serial-relative). + omp_thread_count (int): Number of OpenMP threads to use. + random_serial_schedule (bool): Whether to enable random serial scheduling. + random_schedule_seed (int): Seed for random serial scheduling. + serial_schedule_order (Optional[List[int]]): Custom order for serial scheduling. + channel_probs (Optional[List[float]]): Channel probabilities for the decoder. + dynamic_scaling_factor_damping (float): Damping factor for dynamic scaling in the minimum-sum method. + Raises: + TypeError: If the input matrix is not a valid type. + ValueError: If required parameters are missing or invalid. + """ error_rate=kwargs.get("error_rate",None) error_channel=kwargs.get("error_channel", None) max_iter=kwargs.get("max_iter",0) @@ -98,6 +132,8 @@ cdef class BpDecoderBase: random_schedule_seed = kwargs.get("random_schedule_seed", 0) serial_schedule_order = kwargs.get("serial_schedule_order", None) channel_probs = kwargs.get("channel_probs", [None]) + dynamic_scaling_factor_damping = kwargs.get("dynamic_scaling_factor_damping", -1.0) + ms_converge = kwargs.get("ms_converge_value", 1.0) # input_vector_type = kwargs.get("input_vector_type", "auto") # print(kwargs.get("input_vector_type")) @@ -129,7 +165,20 @@ cdef class BpDecoderBase: ## initialise the decoder with default values - self.bpd = new BpDecoderCpp(self.pcm[0],self._error_channel,0,PRODUCT_SUM,PARALLEL,1.0,1,self._serial_schedule_order,0,False,SYNDROME) + self.bpd = new BpDecoderCpp( + self.pcm[0], + self._error_channel, + 0, + PRODUCT_SUM, + PARALLEL, + 1.0, + 1, + self._serial_schedule_order, + 0, + False, + SYNDROME, + dynamic_scaling_factor_damping + ) ## set the decoder parameters self.bp_method = bp_method @@ -141,6 +190,10 @@ cdef class BpDecoderBase: self.omp_thread_count = omp_thread_count self.random_serial_schedule = random_serial_schedule + if dynamic_scaling_factor_damping >= 0: + self.dynamic_scaling_factor_damping = dynamic_scaling_factor_damping + self.ms_converge_value = ms_converge + ## the ldpc_v1 backwards compatibility if isinstance(channel_probs, list) or isinstance(channel_probs, np.ndarray): if(len(channel_probs)>0) and (channel_probs[0] is not None): @@ -152,10 +205,9 @@ cdef class BpDecoderBase: self.error_rate = error_rate else: raise ValueError("Please specify the error channel. Either: 1) error_rate: float or 2) error_channel:\ - list of floats of length equal to the block length of the code {self.n}.") - + list of floats of length equal to the block length of the code {self.n}.") - + self.bpd.set_up_ms_scaling_factors() self.MEMORY_ALLOCATED=True @@ -167,7 +219,7 @@ cdef class BpDecoderBase: @property def error_rate(self) -> np.ndarray: """ - Returns the current error rate vector. + Get the current error rate vector. Returns: np.ndarray: A numpy array containing the current error rate vector. @@ -180,10 +232,13 @@ cdef class BpDecoderBase: @error_rate.setter def error_rate(self, value: Optional[float]) -> None: """ - Sets the error rate for the decoder. + Set the error rate for the decoder. Args: - value (Optional[float]): The error rate value to be set. Must be a single float value. + value (Optional[float]): The error rate value to be set. + + Raises: + ValueError: If the input value is not a valid float. """ if value is not None: if not isinstance(value, float): @@ -194,7 +249,7 @@ cdef class BpDecoderBase: @property def error_channel(self) -> np.ndarray: """ - Returns the current error channel vector. + Get the current error channel vector. Returns: np.ndarray: A numpy array containing the current error channel vector. @@ -205,13 +260,15 @@ cdef class BpDecoderBase: return out @error_channel.setter - def error_channel(self, value: Union[Optional[List[float]],np.ndarray]) -> None: + def error_channel(self, value: Union[Optional[List[float]], np.ndarray]) -> None: """ - Sets the error channel for the decoder. + Set the error channel for the decoder. Args: - value (Optional[List[float]]): The error channel vector to be set. Must have length equal to the block - length of the code `self.n`. + value (Union[Optional[List[float]], np.ndarray]): The error channel vector to be set. + + Raises: + ValueError: If the input vector length does not match the block length of the code. """ if value is not None: if len(value) != self.n: @@ -498,6 +555,38 @@ cdef class BpDecoderBase: raise TypeError("The ms_scaling factor must be specified as a float") self.bpd.ms_scaling_factor = value + @property + def ms_scaling_factor_vector(self) -> np.ndarray: + """ + Get the vector of scaling factors for the minimum-sum method. + + Returns: + np.ndarray: The current vector of scaling factors. + """ + out = np.zeros(len(self.bpd.ms_scaling_factor_vector), dtype=np.float64) + for i in range(len(self.bpd.ms_scaling_factor_vector)): + out[i] = self.bpd.ms_scaling_factor_vector[i] + return out + + @ms_scaling_factor_vector.setter + def ms_scaling_factor_vector(self, value: Union[List[float], np.ndarray]) -> None: + """ + Set the vector of scaling factors for the minimum-sum method. + + Args: + value (Union[List[float], np.ndarray]): The new vector of scaling factors. + + Raises: + ValueError: If the input vector length does not match the maximum iterations. + """ + if not isinstance(value, (list, np.ndarray)): + raise ValueError("The ms_scaling_factor_vector must be specified as a list or numpy array of floats.") + if len(value) != self.bpd.maximum_iterations: + raise ValueError(f"The ms_scaling_factor_vector must have length {self.bpd.maximum_iterations}.") + self.bpd.ms_scaling_factor_vector.clear() + for v in value: + self.bpd.ms_scaling_factor_vector.push_back(v) + @property def omp_thread_count(self) -> int: """Get the number of OpenMP threads. @@ -578,49 +667,89 @@ cdef class BpDecoderBase: # raise ValueError("The random_serial_schedule must be a boolean value.") self.bpd.random_serial_schedule = value + @property + def dynamic_scaling_factor_damping(self) -> float: + """ + Get the dynamic scaling factor damping value. + + Returns: + float: The current dynamic scaling factor damping value. + """ + return self.bpd.dynamic_scaling_factor_damping + + @dynamic_scaling_factor_damping.setter + def dynamic_scaling_factor_damping(self, value: float) -> None: + """ + Set the dynamic scaling factor damping value. + + Args: + value (float): The new dynamic scaling factor damping value. + + Raises: + ValueError: If the input value is not a non-negative float. + """ + if self.bpd.bp_method != MINIMUM_SUM: + raise ValueError(f"The dynamic_scaling_factor_damping can only be set for the minimum-sum method. The current method is {self.bp_method}.") + if not isinstance(value, (float, int)) or value < 0: + raise ValueError("The dynamic_scaling_factor_damping must be a non-negative float.") + self.bpd.dynamic_scaling_factor_damping = value + self.bpd.set_up_ms_scaling_factors() + + @property + def ms_converge_value(self) -> float: + """ + Get the ms_converge_value for the minimum-sum method. + + Returns: + float: The current ms_converge_value. + """ + return self.bpd.ms_converge_value + + @ms_converge_value.setter + def ms_converge_value(self, value: float) -> None: + """ + Set the ms_converge_value for the minimum-sum method. + + Args: + value (float): The new ms_converge_value. + + Raises: + ValueError: If the input value is not a float. + """ + if self.bpd.bp_method != MINIMUM_SUM: + raise ValueError(f"The ms_converge_value can only be set for the minimum-sum method. The current method is {self.bp_method}.") + if not isinstance(value, (float, int)): + raise ValueError("The ms_converge_value must be a float.") + self.bpd.ms_converge_value = value + self.bpd.set_up_ms_scaling_factors() + cdef class BpDecoder(BpDecoderBase): """ - Belief propagation decoder for binary linear codes. - - This class provides an implementation of belief propagation decoding for binary linear codes. The decoder uses a sparse - parity check matrix to decode received codewords. The decoding algorithm can be configured using various parameters, - such as the belief propagation method used, the scheduling method used, and the maximum number of iterations. - - Parameters - ---------- - pcm : Union[np.ndarray, spmatrix] - The parity check matrix of the binary linear code, represented as a NumPy array or a SciPy sparse matrix. - error_rate : Optional[float], optional - The initial error rate for the decoder, by default None. - error_channel : Optional[List[float]], optional - The initial error channel probabilities for the decoder, by default None. - max_iter : Optional[int], optional - The maximum number of iterations allowed for decoding, by default 0 (adaptive). - bp_method : Optional[str], optional - The belief propagation method to use: 'product_sum' or 'minimum_sum', by default 'minimum_sum'. - ms_scaling_factor : Optional[float], optional - The scaling factor for the minimum sum method, by default 1.0. - schedule : Optional[str], optional - The scheduling method for belief propagation: 'parallel', 'serial', or 'serial_relative'. By default 'parallel'. - omp_thread_count : Optional[int], optional - The number of OpenMP threads to use, by default 1. - random_schedule_seed : Optional[int], optional - The seed for the random serial schedule, by default 0. If set to 0, the seed is set according to the system clock. - serial_schedule_order : Optional[List[int]], optional - The custom order for serial scheduling, by default None. - random_serial_schedule : bool, optional - Whether to enable random serial scheduling. If True, the serial schedule order is randomized in each iteration. - By default False. - input_vector_type: str, optional - Use this parameter to specify the input type. Choose either: 1) 'syndrome' or 2) 'received_vector' or 3) 'auto'. - Note, it is only necessary to specify this value when the parity check matrix is square. When the - parity matrix is non-square, the input vector type is inferred automatically from its length. + Belief Propagation (BP) decoder for binary linear codes. + + This class provides an implementation of BP decoding for binary linear codes. It supports + various configurations, including different BP methods, scheduling strategies, and scaling factors. + + Parameters: + pcm (Union[np.ndarray, scipy.sparse.spmatrix]): The parity check matrix. + error_rate (Optional[float]): Initial error rate for the decoder. + error_channel (Optional[List[float]]): Initial error channel probabilities. + max_iter (Optional[int]): Maximum number of iterations for decoding. + bp_method (Optional[str]): Belief propagation method ('product_sum' or 'minimum_sum'). + ms_scaling_factor (Optional[float]): Scaling factor for the minimum-sum method. + schedule (Optional[str]): Scheduling method ('parallel', 'serial', or 'serial_relative'). + omp_thread_count (Optional[int]): Number of OpenMP threads to use. + random_schedule_seed (Optional[int]): Seed for random serial scheduling. + serial_schedule_order (Optional[List[int]]): Custom order for serial scheduling. + input_vector_type (str): Input vector type ('syndrome', 'received_vector', or 'auto'). + random_serial_schedule (bool): Whether to enable random serial scheduling. + dynamic_scaling_factor_damping (Optional[float]): Damping factor for dynamic scaling in the minimum-sum method. + ms_converge_value (Optional[float]): Convergence value for the minimum-sum method. """ - def __cinit__(self, pcm: Union[np.ndarray, scipy.sparse.spmatrix], error_rate: Optional[float] = None, error_channel: Optional[Union[np.ndarray,List[float]]] = None, max_iter: Optional[int] = 0, bp_method: Optional[str] = 'minimum_sum', ms_scaling_factor: Optional[Union[float,int]] = 1.0, schedule: Optional[str] = 'parallel', omp_thread_count: Optional[int] = 1, - random_schedule_seed: Optional[int] = 0, serial_schedule_order: Optional[List[int]] = None, input_vector_type: str = "auto", random_serial_schedule: bool = False, **kwargs): + random_schedule_seed: Optional[int] = 0, serial_schedule_order: Optional[List[int]] = None, input_vector_type: str = "auto", random_serial_schedule: bool = False, dynamic_scaling_factor_damping: Optional[float] = -1, ms_converge_value=1.0, **kwargs): for key in kwargs.keys(): if key not in ["channel_probs"]: @@ -635,28 +764,22 @@ cdef class BpDecoder(BpDecoderBase): error_channel: Optional[Union[np.ndarray,List[float]]] = None, max_iter: Optional[int] = 0, bp_method: Optional[str] = 'minimum_sum', ms_scaling_factor: Optional[Union[float,int]] = 1.0, schedule: Optional[str] = 'parallel', omp_thread_count: Optional[int] = 1, random_schedule_seed: Optional[int] = 0, serial_schedule_order: Optional[List[int]] = None, - input_vector_type: str = "auto", random_serial_schedule: bool = False, **kwargs): + input_vector_type: str = "auto", random_serial_schedule: bool = False, dynamic_scaling_factor_damping: Optional[float] = -1, ms_converge_value=1.0, **kwargs): pass def decode(self, input_vector: np.ndarray) -> np.ndarray: """ - Decode the input input_vector using belief propagation decoding algorithm. + Decode the input vector using the BP decoding algorithm. - Parameters - ---------- - input_vector : numpy.ndarray - A 1D numpy array of length equal to the number of rows in the parity check matrix. + Parameters: + input_vector (np.ndarray): A 1D numpy array representing the input vector. - Returns - ------- - numpy.ndarray - A 1D numpy array of length equal to the number of columns in the parity check matrix. + Returns: + np.ndarray: A 1D numpy array representing the decoded output. - Raises - ------ - ValueError - If the length of the input input_vector does not match the number of rows in the parity check matrix. + Raises: + ValueError: If the input vector length does not match the expected length. """ if(self.bpd.bp_input_type == SYNDROME and not len(input_vector)==self.m): @@ -698,7 +821,7 @@ cdef class BpDecoder(BpDecoderBase): @property def decoding(self) -> np.ndarray: """ - Returns the current decoded output. + Get the current decoded output. Returns: np.ndarray: A numpy array containing the current decoded output. @@ -711,35 +834,21 @@ cdef class BpDecoder(BpDecoderBase): cdef class SoftInfoBpDecoder(BpDecoderBase): """ - A decoder that uses soft information belief propagation algorithm for decoding binary linear codes. - - This class implements a modified version of the belief propagation decoding algorithm that accounts for - uncertainty in the syndrome readout using a serial belief propagation schedule. The decoder uses a minimum - sum method as the belief propagation variant. For more information on the algorithm, please see the original - research paper at https://arxiv.org/abs/2205.02341. - - Parameters - ---------- - pcm : Union[np.ndarray, spmatrix] - The parity check matrix for the code. - error_rate : Optional[float] - The probability of a bit being flipped in the received codeword. - error_channel : Optional[List[float]] - A list of probabilities that specify the probability of each bit being flipped in the received codeword. - Must be of length equal to the block length of the code. - max_iter : Optional[int] - The maximum number of iterations for the decoding algorithm. - bp_method : Optional[str] - The variant of belief propagation method to be used. The default value is 'minimum_sum'. - ms_scaling_factor : Optional[float] - The scaling factor used in the minimum sum method. The default value is 1.0. - cutoff : Optional[float] - The threshold value below which syndrome soft information is used. - random_serial_schedule : bool, optional - Whether to enable random serial scheduling. If True, the serial schedule order is randomized in each iteration. - By default False. + Soft Information Belief Propagation (BP) decoder for binary linear codes. + + This class implements a modified BP decoding algorithm that accounts for uncertainty in + the syndrome readout using a serial belief propagation schedule. + + Parameters: + pcm (Union[np.ndarray, spmatrix]): The parity check matrix. + error_rate (Optional[float]): Initial error rate for the decoder. + error_channel (Optional[List[float]]): Initial error channel probabilities. + max_iter (Optional[int]): Maximum number of iterations for decoding. + bp_method (Optional[str]): Belief propagation method ('minimum_sum'). + ms_scaling_factor (Optional[float]): Scaling factor for the minimum-sum method. + cutoff (Optional[float]): Threshold value below which syndrome soft information is used. + sigma (float): Standard deviation of the noise. """ - def __cinit__(self, pcm: Union[np.ndarray, spmatrix], error_rate: Optional[float] = None, error_channel: Optional[List[float]] = None, max_iter: Optional[int] = 0, bp_method: Optional[str] = 'minimum_sum', ms_scaling_factor: Optional[float] = 1.0, cutoff: Optional[float] = np.inf, sigma: float = 2.0, **kwargs): @@ -760,7 +869,7 @@ cdef class SoftInfoBpDecoder(BpDecoderBase): def decode(self, soft_info_syndrome: np.ndarray) -> np.ndarray: """ - Decode the input syndrome using the soft information belief propagation decoding algorithm. + Decode the input syndrome using the soft information BP decoding algorithm. Parameters ---------- @@ -787,7 +896,7 @@ cdef class SoftInfoBpDecoder(BpDecoderBase): @property def soft_syndrome(self) -> np.ndarray: """ - Returns the current soft syndrome. + Get the current soft syndrome. Returns: np.ndarray: A numpy array containing the current soft syndrome. @@ -801,7 +910,7 @@ cdef class SoftInfoBpDecoder(BpDecoderBase): @property def decoding(self) -> np.ndarray: """ - Returns the current decoded output. + Get the current decoded output. Returns: np.ndarray: A numpy array containing the current decoded output. diff --git a/src_python/ldpc/bp_flip/__init__.pyi b/src_python/ldpc/bp_flip/__init__.pyi index 7ba8eec..00966b9 100644 --- a/src_python/ldpc/bp_flip/__init__.pyi +++ b/src_python/ldpc/bp_flip/__init__.pyi @@ -3,7 +3,49 @@ import warnings from scipy.sparse import spmatrix from typing import Union, List, Optional -class BpFlipDecoder(BpDecoderBase): ... +class BpFlipDecoder(BpDecoderBase): + """ + A class representing a decoder that combines Belief Propagation (BP) with a flipping algorithm. + + This decoder performs iterative decoding on a given parity-check matrix using the belief propagation + algorithm combined with a flipping strategy to correct errors. The class is initialized with the + parity-check matrix and various decoding parameters. + + Parameters + ---------- + pcm : Union[np.ndarray, spmatrix] + The parity-check matrix, can be a dense (numpy.ndarray) or sparse (scipy.sparse) matrix. + error_rate : Optional[float], optional + The expected error rate of the channel, by default None + error_channel : Optional[List[float]], optional + A list representing the error channel, by default None + max_iter : Optional[int], optional + The maximum number of iterations for the decoding process, by default 0 + bp_method : Optional[str], optional + The method used for belief propagation, by default 'minimum_sum' + ms_scaling_factor : Optional[float], optional + The scaling factor for the min-sum algorithm, by default 1.0 + schedule : Optional[str], optional + The schedule for updating nodes, by default 'parallel' + omp_thread_count : Optional[int], optional + The number of OpenMP threads to use, by default 1 + random_schedule_seed : Optional[int], optional + The seed for random schedule, by default False + serial_schedule_order : Optional[List[int]], optional + The order of nodes for serial schedule, by default None + osd_method : int, optional + The method used for ordered statistic decoder, by default 0 + osd_order : int, optional + The order for the ordered statistic decoder, by default 0 + flip_iterations : int, optional + The number of iterations for the flipping decoder, by default 0 + pflip_frequency : int, optional + The frequency of probabilistic flipping, by default 0 + pflip_seed : int, optional + The seed for probabilistic flipping, by default 0 + dynamic_scaling_factor_damping : Optional[float], optional + The damping factor for dynamic scaling in the minimum sum method, by default -1.0. + """ def __del__(self): ... diff --git a/src_python/ldpc/bp_flip/_bp_flip.pyx b/src_python/ldpc/bp_flip/_bp_flip.pyx index 591b7e5..75eae3d 100644 --- a/src_python/ldpc/bp_flip/_bp_flip.pyx +++ b/src_python/ldpc/bp_flip/_bp_flip.pyx @@ -6,12 +6,54 @@ from scipy.sparse import spmatrix from typing import Union, List, Optional cdef class BpFlipDecoder(BpDecoderBase): - + """ + A class representing a decoder that combines Belief Propagation (BP) with a flipping algorithm. + + This decoder performs iterative decoding on a given parity-check matrix using the belief propagation + algorithm combined with a flipping strategy to correct errors. The class is initialized with the + parity-check matrix and various decoding parameters. + + Parameters + ---------- + pcm : Union[np.ndarray, spmatrix] + The parity-check matrix, can be a dense (numpy.ndarray) or sparse (scipy.sparse) matrix. + error_rate : Optional[float], optional + The expected error rate of the channel, by default None + error_channel : Optional[List[float]], optional + A list representing the error channel, by default None + max_iter : Optional[int], optional + The maximum number of iterations for the decoding process, by default 0 + bp_method : Optional[str], optional + The method used for belief propagation, by default 'minimum_sum' + ms_scaling_factor : Optional[float], optional + The scaling factor for the min-sum algorithm, by default 1.0 + schedule : Optional[str], optional + The schedule for updating nodes, by default 'parallel' + omp_thread_count : Optional[int], optional + The number of OpenMP threads to use, by default 1 + random_schedule_seed : Optional[int], optional + The seed for random schedule, by default False + serial_schedule_order : Optional[List[int]], optional + The order of nodes for serial schedule, by default None + osd_method : int, optional + The method used for ordered statistic decoder, by default 0 + osd_order : int, optional + The order for the ordered statistic decoder, by default 0 + flip_iterations : int, optional + The number of iterations for the flipping decoder, by default 0 + pflip_frequency : int, optional + The frequency of probabilistic flipping, by default 0 + pflip_seed : int, optional + The seed for probabilistic flipping, by default 0 + dynamic_scaling_factor_damping : Optional[float], optional + The damping factor for dynamic scaling in the minimum sum method, by default -1.0. + """ def __cinit__(self, pcm: Union[np.ndarray, spmatrix], error_rate: Optional[float] = None, error_channel: Optional[List[float]] = None, max_iter: Optional[int] = 0, bp_method: Optional[str] = 'minimum_sum', ms_scaling_factor: Optional[float] = 1.0, schedule: Optional[str] = 'parallel', omp_thread_count: Optional[int] = 1, random_schedule_seed: Optional[int] = False, serial_schedule_order: Optional[List[int]] = None, osd_method: int = 0, - osd_order: int = 0, flip_iterations: int = 0, pflip_frequency: int = 0, pflip_seed: int = 0): + osd_order: int = 0, flip_iterations: int = 0, pflip_frequency: int = 0, pflip_seed: int = 0, + dynamic_scaling_factor_damping: Optional[float] = -1.0): self.MEMORY_ALLOCATED=False diff --git a/src_python/ldpc/bplsd_decoder/__init__.pyi b/src_python/ldpc/bplsd_decoder/__init__.pyi index 4a6b82f..f9a15df 100644 --- a/src_python/ldpc/bplsd_decoder/__init__.pyi +++ b/src_python/ldpc/bplsd_decoder/__init__.pyi @@ -36,17 +36,16 @@ class BpLsdDecoder(BpDecoderBase): A list of integers specifying the serial schedule order. Must be of length equal to the block length of the code, by default None. bits_per_step : int, optional - Specifies the number of bits added to the cluster in each step of the LSD algorithm. If no value is provided, this is set the block length of the code. + Specifies the number of bits added to the cluster in each step of the LSD algorithm. If no value is provided, this is set to the block length of the code. lsd_order: int, optional The order of the LSD algorithm applied to each cluster. Must be greater than or equal to 0, by default 0. lsd_method: str, optional The LSD method of the LSD algorithm applied to each cluster. Must be one of {'LSD_0', 'LSD_E', 'LSD_CS'}. By default 'LSD_0'. - - Notes - ----- - The `BpLsdDecoder` class leverages soft information outputted by the BP decoder to guide the cluster growth - in the LSD algorithm. The number of bits added to the cluster in each step is controlled by the `bits_per_step` parameter. + dynamic_scaling_factor_damping : Optional[float], optional + The damping factor for dynamic scaling in the minimum sum method, by default -1.0. + ms_converge_value (Optional[float]): + Convergence value for the minimum-sum method. """ def __cinit__(self, pcm: Union[np.ndarray, scipy.sparse.spmatrix], error_rate: Optional[float] = None, @@ -58,7 +57,10 @@ class BpLsdDecoder(BpDecoderBase): bits_per_step:int = 1, input_vector_type: str = "syndrome", lsd_order: int = 0, - lsd_method: Union[str, int] = 0, **kwargs): ... + lsd_method: Union[str, int] = 0, + dynamic_scaling_factor_damping: Optional[float] = -1.0, + ms_converge_value: Optional[float] = 1.0, **kwargs): ... + def __del__(self): ... diff --git a/src_python/ldpc/bplsd_decoder/_bplsd_decoder.pyx b/src_python/ldpc/bplsd_decoder/_bplsd_decoder.pyx index 8f7b4ea..e2424bb 100644 --- a/src_python/ldpc/bplsd_decoder/_bplsd_decoder.pyx +++ b/src_python/ldpc/bplsd_decoder/_bplsd_decoder.pyx @@ -38,17 +38,16 @@ cdef class BpLsdDecoder(BpDecoderBase): A list of integers specifying the serial schedule order. Must be of length equal to the block length of the code, by default None. bits_per_step : int, optional - Specifies the number of bits added to the cluster in each step of the LSD algorithm. If no value is provided, this is set the block length of the code. + Specifies the number of bits added to the cluster in each step of the LSD algorithm. If no value is provided, this is set to the block length of the code. lsd_order: int, optional The order of the LSD algorithm applied to each cluster. Must be greater than or equal to 0, by default 0. lsd_method: str, optional The LSD method of the LSD algorithm applied to each cluster. Must be one of {'LSD_0', 'LSD_E', 'LSD_CS'}. By default 'LSD_0'. - - Notes - ----- - The `BpLsdDecoder` class leverages soft information outputted by the BP decoder to guide the cluster growth - in the LSD algorithm. The number of bits added to the cluster in each step is controlled by the `bits_per_step` parameter. + dynamic_scaling_factor_damping : Optional[float], optional + The damping factor for dynamic scaling in the minimum sum method, by default -1.0. + ms_converge_value (Optional[float]): + Convergence value for the minimum-sum method. """ def __cinit__(self, pcm: Union[np.ndarray, scipy.sparse.spmatrix], error_rate: Optional[float] = None, @@ -60,7 +59,9 @@ cdef class BpLsdDecoder(BpDecoderBase): bits_per_step:int = 1, input_vector_type: str = "syndrome", lsd_order: int = 0, - lsd_method: Union[str, int] = 0, **kwargs): + lsd_method: Union[str, int] = 0, + dynamic_scaling_factor_damping: Optional[float] = -1.0, + ms_converge_value: Optional[float] = 1.0, **kwargs): # compatability with osd_method/osd_order if "osd_method" in kwargs: @@ -95,6 +96,7 @@ cdef class BpLsdDecoder(BpDecoderBase): self.bits_per_step = bits_per_step self.input_vector_type = "syndrome" self.MEMORY_ALLOCATED=True + def __del__(self): if self.MEMORY_ALLOCATED: diff --git a/src_python/ldpc/bposd_decoder/__init__.pyi b/src_python/ldpc/bposd_decoder/__init__.pyi index 17ffb73..03ae94f 100644 --- a/src_python/ldpc/bposd_decoder/__init__.pyi +++ b/src_python/ldpc/bposd_decoder/__init__.pyi @@ -35,9 +35,13 @@ class BpOsdDecoder(BpDecoderBase): A list of integers that specify the serial schedule order. Must be of length equal to the block length of the code, by default None. osd_method : int, optional - The OSD method used. Must be one of {'OSD_0', 'OSD_E', 'OSD_CS'}. + The OSD method used. Must be one of {'OSD_0', 'OSD_E', 'OSD_CS'}. osd_order : int, optional The OSD order, by default 0. + dynamic_scaling_factor_damping : Optional[float], optional + The damping factor for dynamic scaling in the minimum sum method, by default -1.0. + ms_converge_value (Optional[float]): + Convergence value for the minimum-sum method. Notes ----- @@ -50,7 +54,7 @@ class BpOsdDecoder(BpDecoderBase): error_channel: Optional[List[float]] = None, max_iter: Optional[int] = 0, bp_method: Optional[str] = 'minimum_sum', ms_scaling_factor: Optional[Union[float,int]] = 1.0, schedule: Optional[str] = 'parallel', omp_thread_count: Optional[int] = 1, random_schedule_seed: Optional[int] = 0, serial_schedule_order: Optional[List[int]] = None, osd_method: Union[str, int, float] = 0, - osd_order: int = 0, input_vector_type: str = "syndrome", **kwargs): ... + osd_order: int = 0, input_vector_type: str = "syndrome", dynamic_scaling_factor_damping: Optional[float] = -1.0, ms_converge_value: Optional[float] = 1.0, **kwargs): ... def __del__(self): ... diff --git a/src_python/ldpc/bposd_decoder/_bposd_decoder.pyx b/src_python/ldpc/bposd_decoder/_bposd_decoder.pyx index b23d7ea..55ff155 100644 --- a/src_python/ldpc/bposd_decoder/_bposd_decoder.pyx +++ b/src_python/ldpc/bposd_decoder/_bposd_decoder.pyx @@ -37,9 +37,13 @@ cdef class BpOsdDecoder(BpDecoderBase): A list of integers that specify the serial schedule order. Must be of length equal to the block length of the code, by default None. osd_method : int, optional - The OSD method used. Must be one of {'OSD_0', 'OSD_E', 'OSD_CS'}. + The OSD method used. Must be one of {'OSD_0', 'OSD_E', 'OSD_CS'}. osd_order : int, optional The OSD order, by default 0. + dynamic_scaling_factor_damping : Optional[float], optional + The damping factor for dynamic scaling in the minimum sum method, by default -1.0. + ms_converge_value (Optional[float]): + Convergence value for the minimum-sum method. Notes ----- @@ -52,8 +56,8 @@ cdef class BpOsdDecoder(BpDecoderBase): error_channel: Optional[List[float]] = None, max_iter: Optional[int] = 0, bp_method: Optional[str] = 'minimum_sum', ms_scaling_factor: Optional[Union[float,int]] = 1.0, schedule: Optional[str] = 'parallel', omp_thread_count: Optional[int] = 1, random_schedule_seed: Optional[int] = 0, serial_schedule_order: Optional[List[int]] = None, osd_method: Union[str, int, float] = 0, - osd_order: int = 0, input_vector_type: str = "syndrome", **kwargs): - + osd_order: int = 0, input_vector_type: str = "syndrome", dynamic_scaling_factor_damping: Optional[float] = -1.0, ms_converge_value: Optional[float] = 1.0, **kwargs): + for key in kwargs.keys(): if key not in ["channel_probs"]: raise ValueError(f"Unknown parameter '{key}' passed to the BpDecoder constructor.")