diff --git a/.gitignore b/.gitignore index 9c3b44d..3ceb093 100644 --- a/.gitignore +++ b/.gitignore @@ -6,3 +6,5 @@ old/ build/ *.mp4 + +checkpoints/ diff --git a/docker/Dockerfile b/docker/Dockerfile index d7f97d3..ef5fbcb 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -5,7 +5,8 @@ RUN pip3 install \ matplotlib \ scikit-learn \ tensorflow-model-optimization \ - kagglehub + kagglehub \ + pandas # Set backward compatibility for tfmot RUN pip3 install tf_keras --no-deps diff --git a/functions.diff b/functions.diff new file mode 100644 index 0000000..1d2b0d0 --- /dev/null +++ b/functions.diff @@ -0,0 +1,99 @@ +diff --git a/src/stage/functions.py b/src/stage/functions.py +index 3e06394..919615c 100644 +--- a/src/stage/functions.py ++++ b/src/stage/functions.py +@@ -277,62 +277,49 @@ def model_quantize(model: tf.keras.Model, **params) -> tf.keras.Model: + + # --- Alpha Initialization for QAT --- + +- +-def compute_alpha_dict(model, x_train, batch_size=128): +- """Computes alpha values for weights and activations.""" +- alpha_dict = {} +- # Compute weight alphas +- for layer in tqdm( +- model.layers, +- desc="Computing weight alphas", +- file=sys.stdout, +- leave=False, +- ): +- if layer.get_weights(): +- alpha_dict[layer.name] = {} +- # Simplified alpha calculation for weights +- weights = layer.get_weights()[0] +- alpha_dict[layer.name]["kernel"] = np.max(np.abs(weights)) +- +- # Compute activation alphas ++def get_activations_output(model, x_train, batch_size=128): ++ """Gets the activations of the model for the training data.""" + intermediate_model = models.Model( + inputs=model.input, outputs=[layer.output for layer in model.layers] + ) + activations = intermediate_model.predict( + x_train, batch_size=batch_size, verbose=0 + ) ++ return activations ++ ++def compute_alpha_dict(model, x_train, batch_size=128): ++ """Computes alpha values for weights and activations in a single comprehension.""" ++ activations = get_activations_output(model, x_train, batch_size) + +- for layer, activation_data in tqdm( +- zip(model.layers, activations), +- total=len(model.layers), +- desc="Computing activation alphas", +- file=sys.stdout, +- leave=False, +- ): +- if layer.name not in alpha_dict: +- alpha_dict[layer.name] = {} +- alpha_dict[layer.name]["activation"] = np.max(np.abs(activation_data)) ++ alpha_dict = { ++ layer.name: { ++ **{weight.name: np.max(np.abs(weight.numpy())) for weight in layer.weights}, ++ 'activation': np.max(np.abs(activation_data)) ++ } ++ for layer, activation_data in zip(model.layers, activations) ++ } + + return alpha_dict + +- +-def apply_alpha_dict(q_model, alpha_dict): ++def apply_alpha_dict(model, alpha_dict): + """Applies pre-computed alpha values to a quantized model.""" +- for layer in q_model.layers: +- original_name = layer.name.replace("quant_", "") +- if original_name in alpha_dict: +- for alpha_type in ["kernel", "activation"]: +- if new_alpha := alpha_dict[original_name].get(alpha_type): +- for weight_var in layer.weights: +- if ( +- alpha_type in weight_var.name +- and "alpha" in weight_var.name +- ): +- weight_var.assign(new_alpha) +- print( +- f"Updated {weight_var.name} with alpha: {new_alpha:.4f}" +- ) +- return q_model ++ for layer in model.layers: ++ original_layer_name = layer.name.replace("quant_", "") ++ ++ if original_layer_name not in alpha_dict: ++ continue ++ ++ for weight in layer.weights: ++ if weight.name not in alpha_dict[original_layer_name]: ++ continue ++ ++ # See the quantizers weight naming convention ++ # No name_suffix for now ++ weight.assign( ++ alpha_dict[original_layer_name][weight.name] ++ ) ++ print(f"Updated {weight.name} with alpha: {alpha_dict[original_layer_name][weight.name]:.4f}") ++ return model + + + def model_initialize_parameters(model, ref_model, **params) -> tf.keras.Model: diff --git a/org.MD b/org.MD new file mode 100644 index 0000000..e42f5d9 --- /dev/null +++ b/org.MD @@ -0,0 +1,21 @@ +# Dataset + - cifar10 has more characteristics +# Model + - Lenet5_custom_v2 + +# QConfig +All Uniform arithmetic +2->8 uniform + +Flex -- Uniform +4, 6 y 8 bits. +levels [2-20] que sean validos. + + +Setear la seed y probar un par de seeds. +3 runs con seeds distintas. + +Graficar la media y la varianza de todas las corridas. + +## Second step + - dsp vemos. diff --git a/src/examples/data_analysis/plot.py b/src/examples/data_analysis/plot.py index 62cbe0f..8f59230 100644 --- a/src/examples/data_analysis/plot.py +++ b/src/examples/data_analysis/plot.py @@ -16,9 +16,10 @@ def plot_flex_snapshot( # NOTE(Fran): Big assumption here that the keys are always the same # TODO(Fran): Also it seems activations aren't being stored as model weights # ... - alpha_history = layer_history[f"{layer_name}/alpha:0"] - level_history = layer_history[f"{layer_name}/levels:0"] - threshold_history = layer_history[f"{layer_name}/thresholds:0"] + print(layer_history.keys()) + alpha_history = layer_history[f"{layer_name}/kernel_alpha:0"] + level_history = layer_history[f"{layer_name}/kernel_levels:0"] + threshold_history = layer_history[f"{layer_name}/kernel_thresholds:0"] bits = quantizer.bits signed = quantizer.signed diff --git a/src/examples/datasets/cifar-10.py b/src/examples/datasets/cifar-10.py new file mode 100644 index 0000000..836bf0c --- /dev/null +++ b/src/examples/datasets/cifar-10.py @@ -0,0 +1,33 @@ +import tensorflow as tf +from tensorflow.keras.utils import to_categorical + + +def to_tf_dataset(x, y, batch_size, shuffle=True): + dataset = tf.data.Dataset.from_tensor_slices((x, y)) + if shuffle: + dataset = dataset.shuffle(buffer_size=len(x)) + return dataset.batch(batch_size).prefetch(tf.data.AUTOTUNE) + + +def generate_dataset(batch_size): + """Generate the CIFAR-10 dataset with a validation split.""" + (x_train, y_train), (x_test, y_test) = ( + tf.keras.datasets.cifar10.load_data() + ) + + x_train = x_train.astype("float32") / 255.0 + x_test = x_test.astype("float32") / 255.0 + + y_train = to_categorical(y_train, 10) + y_test = to_categorical(y_test, 10) + + # Split train data into train and validation sets + val_size = len(y_test) + x_val, y_val = x_train[:val_size], y_train[:val_size] + x_train, y_train = x_train[val_size:], y_train[val_size:] + + train_dataset = to_tf_dataset(x_train, y_train, batch_size) + val_dataset = to_tf_dataset(x_val, y_val, batch_size) + test_dataset = to_tf_dataset(x_test, y_test, batch_size) + + return train_dataset, val_dataset, test_dataset diff --git a/src/examples/models/lenet_custom.py b/src/examples/models/lenet_custom.py new file mode 100644 index 0000000..4a8f65e --- /dev/null +++ b/src/examples/models/lenet_custom.py @@ -0,0 +1,71 @@ +from tensorflow.keras.layers import ( + Conv2D, + Dense, + Dropout, + Flatten, + MaxPooling2D, +) +from tensorflow.keras.models import Sequential + +categories = 10 # Number of classes (digits 0-9) +input_shape = [None, 32, 32, 3] # Input shape for CIFAR-10 dataset +model = Sequential( + [ + Conv2D( + 64, + (3, 3), + padding="same", + activation="relu", + input_shape=input_shape[1:], + name="conv2d", + ), + MaxPooling2D((2, 2)), + Conv2D( + 128, (3, 3), padding="same", activation="relu", name="conv2d_1" + ), + MaxPooling2D((2, 2)), + Flatten(), + Dense(256, activation="relu", name="dense"), + Dropout(0.5), + Dense(categories, activation="softmax", name="dense_1"), + ], +) + +from quantizers.flex_quantizer import FlexQuantizer +from quantizers.uniform_quantizer import UniformQuantizer + +n_levels = 10 # Number of quantization levels +bits = 8 # Number of bits for quantization + +qconfig = { + "conv2d": { + "weights": { + "kernel": FlexQuantizer(bits=bits, n_levels=n_levels, signed=True), + "bias": UniformQuantizer(bits=8, signed=True), + }, + "activations": {"activation": UniformQuantizer(bits=16, signed=False)}, + }, + "conv2d_1": { + "weights": { + "kernel": FlexQuantizer(bits=bits, n_levels=n_levels, signed=True), + "bias": UniformQuantizer(bits=8, signed=True), + }, + "activations": {"activation": UniformQuantizer(bits=16, signed=False)}, + }, + "dense": { + "weights": { + "kernel": FlexQuantizer(bits=bits, n_levels=n_levels, signed=True), + "bias": UniformQuantizer(bits=8, signed=True), + }, + "activations": {"activation": UniformQuantizer(bits=16, signed=False)}, + }, + "dense_1": { + "weights": { + "kernel": FlexQuantizer(bits=bits, n_levels=n_levels, signed=True), + "bias": UniformQuantizer(bits=8, signed=True), + }, + "activations": {"activation": UniformQuantizer(bits=16, signed=False)}, + }, +} + +qconfigs = {"qconfig": qconfig} diff --git a/src/examples/run.py b/src/examples/run.py index 93f18f8..61be2d3 100755 --- a/src/examples/run.py +++ b/src/examples/run.py @@ -50,6 +50,7 @@ def main(args): loss="categorical_crossentropy", metrics=["accuracy"], ) + # initialize_quantizer_weights(qmodel, qconfig) callback_tuples = [ (CaptureWeightCallback(qlayer), qconfig[layer.name]) diff --git a/src/quantizers/flex_quantizer.py b/src/quantizers/flex_quantizer.py index bec60cc..bed5988 100644 --- a/src/quantizers/flex_quantizer.py +++ b/src/quantizers/flex_quantizer.py @@ -157,10 +157,8 @@ def grad(upstream): ##### dq_dx uses STE ##### dq_dx = tf.where( tf.logical_and( - tf.greater_equal(x, self.thresholds[0]), - tf.less_equal( - x, self.thresholds[-1] - ), # should it be alpha? + tf.greater_equal(x, thresholds[0]), + tf.less_equal(x, thresholds[-1]), # should it be alpha? ), upstream, tf.zeros_like(x), @@ -207,7 +205,7 @@ def grad(upstream): ##### dq_dthresholds using piecewise-STE ##### dq_dthresholds = tf.zeros_like(thresholds) - for i in range(1, self.thresholds.shape[0] - 1): + for i in range(1, thresholds.shape[0] - 1): delta_y = qlevels[i - 1] - qlevels[i] delta_x = thresholds[i + 1] - thresholds[i - 1] @@ -215,8 +213,8 @@ def grad(upstream): # Fall within the range of the two borderline levels masked_upstream = tf.where( tf.logical_and( - tf.greater_equal(x, self.thresholds[i - 1]), - tf.less_equal(x, self.thresholds[i + 1]), + tf.greater_equal(x, thresholds[i - 1]), + tf.less_equal(x, thresholds[i + 1]), ), upstream, tf.zeros_like(x), diff --git a/src/quantizers/integration_test.py b/src/quantizers/integration_test.py new file mode 100755 index 0000000..913b794 --- /dev/null +++ b/src/quantizers/integration_test.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python3 +import unittest + +from tensorflow.keras.layers import Dense +from tensorflow.keras.models import Sequential + +from configs.qmodel import apply_quantization +from quantizers.uniform_quantizer import UniformQuantizer + + +class TestQuantizers(unittest.TestCase): + def test_quantizers(self): + model = Sequential( + [ + Dense(10, activation="relu", input_shape=(20,), name="dense1"), + Dense(5, activation="softmax"), + ] + ) + + qconfig = { + "dense1": { + "weights": { + "kernel": UniformQuantizer( + bits=4, + signed=True, + ), + "bias": UniformQuantizer( + bits=4, + signed=True, + ), + }, + "activations": { + "activation": UniformQuantizer( + bits=4, + signed=True, + ) + }, + } + } + + apply_quantization(model, qconfig) + # print(quantized_model.weights) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/quantizers/uniform_quantizer.py b/src/quantizers/uniform_quantizer.py index 384b848..9cd2e3b 100755 --- a/src/quantizers/uniform_quantizer.py +++ b/src/quantizers/uniform_quantizer.py @@ -69,7 +69,6 @@ def __call__(self, w): alpha = layer.add_weight( name=f"{name}{self.name_suffix}_alpha", initializer=self.initializer, - # shape=(1,), trainable=True, dtype=tf.float32, regularizer=self.regularizer, diff --git a/src/stage/__init__.py b/src/stage/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/stage/cifar_10_lenet5_model_accuracy_vs_size.png b/src/stage/cifar_10_lenet5_model_accuracy_vs_size.png new file mode 100644 index 0000000..8ecfb7e Binary files /dev/null and b/src/stage/cifar_10_lenet5_model_accuracy_vs_size.png differ diff --git a/src/stage/fashion_mnist_lenet5_model_accuracy_vs_size.png b/src/stage/fashion_mnist_lenet5_model_accuracy_vs_size.png new file mode 100644 index 0000000..bdb970c Binary files /dev/null and b/src/stage/fashion_mnist_lenet5_model_accuracy_vs_size.png differ diff --git a/src/stage/flex_main.py b/src/stage/flex_main.py new file mode 100755 index 0000000..7a11a8d --- /dev/null +++ b/src/stage/flex_main.py @@ -0,0 +1,195 @@ +#!/usr/bin/env python3 + +from functools import partial +from pathlib import Path + +import tensorflow as tf +from functions import FUNCTION_MAP + +from stage import Stage + +# --- Configuration for All 7 Pipeline Stages --- +# This list defines the blueprint for our pipeline. Each dictionary +# will be used to initialize a Stage object. + + +stages_hyperparams = [ + # Stage 0: Model creation + { + "name": "model_creation", + "seed": 12345, + "function": "model_create", + "kwargs": { + "dataset": "cifar10", + "input_shape": [None, 32, 32, 3], + "categories": 10, + "model_name": "lenet5_custom", + }, + }, + # Stage 1: Initial training + { + "name": "initial_training", + "seed": 12345, + "function": "model_train", + "kwargs": { + "dataset": "cifar10", + "input_shape": [None, 32, 32, 3], + "categories": 10, + "epochs": 50, + "batch_size": 1024, + "learning_rate": 0.001, + "validation_split": 0.1, + }, + }, + # Stage 2: BN folding + { + "name": "bnf", + "seed": 12345, + "function": "model_transform_bnf", + "kwargs": { + "merge_activation": True, + }, + }, + # Stage 3: Post BN folding training + { + "name": "pbnf_training", + "seed": 12345, + "function": "model_train", + "kwargs": { + "dataset": "cifar10", + "input_shape": [None, 32, 32, 3], + "categories": 10, + "epochs": 1, + "batch_size": 32, + "learning_rate": 0.0005, + "validation_split": 0.1, + }, + }, + # Stage 4: Model quantization + { + "name": "quantization", + "seed": 12345, + "function": "model_quantize", + "kwargs": { + "input_shape": [None, 32, 32, 3], + # 'kernel' is set to None because it will be dynamically + # updated inside the experimental loop below. + "kernel": None, + "bias": [ + {"type": "uniform", "bits": 8}, + {"type": "uniform", "bits": 8}, + {"type": "uniform", "bits": 8}, + {"type": "uniform", "bits": 8}, + {"type": "uniform", "bits": 8}, + ], + "activations": [ + {"type": "uniform", "bits": 8}, + {"type": "uniform", "bits": 8}, + {"type": "uniform", "bits": 8}, + {"type": "uniform", "bits": 8}, + {"type": "uniform", "bits": 8}, + ], + }, + }, + # Stage 5: Alpha initialization + { + "name": "alpha_initialization", # Fixed typo from original "initialiation" + "seed": 12345, + "function": "model_initialize_parameters", + "kwargs": { + "dataset": "cifar10", + "input_shape": [None, 32, 32, 3], + "categories": 10, + "type": "alpha", + }, + }, + # Stage 6: QAT + { + "name": "qat", + "seed": 12345, + "function": "model_train", + "kwargs": { + "dataset": "cifar10", + "input_shape": [None, 32, 32, 3], + "categories": 10, + "epochs": 10, + "batch_size": 32, + "learning_rate": 0.0001, + "validation_split": 0.1, + }, + }, + { + "name": "final_evaluation", + "seed": 12345, + "function": "model_evaluate", + "kwargs": { + "dataset": "cifar10", + "input_shape": [None, 32, 32, 3], + "categories": 10, + }, + }, +] + +if __name__ == "__main__": + + # This is the main experimental loop from your coworker's script. + # It runs the entire 7-stage pipeline multiple times. + # bits = range(8, 11) + bits = range(1, 11) + n_levels = [2, 3, 4, 5, 6, 7, 8, 12, 16, 20] + combinations = [(b, n) for b in bits for n in n_levels if n <= 2**b] + + for bits, n_levels in combinations: + print( + f"\n{'='*20} STARTING EXPERIMENT: FLEX BITS = {bits}, N_LEVELS = {n_levels} {'='*20}\n" + ) + + # --- Configure the Experiment --- + # Dynamically set the 'kernel' quantization parameter for this specific run. + kernel_config = [ + {"type": "flexible", "bits": bits, "n_levels": n_levels} + for _ in range(5) + ] + stages_hyperparams[4]["kwargs"]["kernel"] = kernel_config + + # Create the list of Stage objects from the (now updated) configurations + dataset = stages_hyperparams[0]["kwargs"].get("dataset") + model_name = stages_hyperparams[0]["kwargs"].get("model_name") + pipeline = [ + Stage( + function=FUNCTION_MAP[config["function"]], + initial_config=config, + checkpoint_path=Path("checkpoints") + / f"flex_{model_name}-{dataset}", + metadata_path=Path(f"{bits}_bit-{n_levels}_levels"), + ) + for config in stages_hyperparams + ] + + # --- The Orchestrator --- + # It tracks both the model object and the hash of the last operation + model: tf.keras.Model | None = None + previous_hash: str | None = None + + # The loop's responsibility is to pass the state (model & hash) between stages + for stage in pipeline: + # We need to set the ref model + if stage.initial_config["name"] == "alpha_initialization": + assert ( + ref_model is not None + ), "Reference model for alpha initialization is not set." + stage.function = partial(stage.function, ref_model=ref_model) + model, previous_hash = stage.run( + input_model=model, previous_hash=previous_hash + ) + + # Save the ref model after the last stage we dont quantize + if stage.initial_config["name"] == "pbnf_training": + ref_model = tf.keras.models.clone_model(model) + + print( + f"\n{'='*20} FINISHED EXPERIMENT: FLEX BITS = {bits}, N_LEVELS = {n_levels} {'='*20}\n" + ) + print( + f"Final model for {bits}-bit experiment corresponds to hash: {previous_hash}" + ) diff --git a/src/stage/flex_mainv2.py b/src/stage/flex_mainv2.py new file mode 100755 index 0000000..1c260a1 --- /dev/null +++ b/src/stage/flex_mainv2.py @@ -0,0 +1,140 @@ +#!/usr/bin/env python3 + +from functions import model_create, model_quantize, model_train +from stagev2 import Pipeline, StageMetadata + +# --- Configuration for All 7 Pipeline Stages --- +# This list defines the blueprint for our pipeline. Each dictionary +# will be used to initialize a Stage object. + +stages_hyperparams = [ + # Stage 0: Model creation + { + "name": "model_creation", + "seed": 12345, + "function": model_create, + "parameters": { + "dataset": "cifar10", + "input_shape": [None, 32, 32, 3], + "categories": 10, + "model_name": "custom_cnn1_for_cifar10", + }, + }, + # Stage 1: Initial training + { + "name": "initial_training", + "seed": 12345, + "function": model_train, + "parameters": { + "dataset": "cifar10", + "input_shape": [None, 32, 32, 3], + "categories": 10, + "epochs": 10, + "batch_size": 128, + "learning_rate": 0.001, + "validation_split": 0.1, + }, + }, + # Stage 2: Model quantization + { + "name": "quantization", + "seed": 12345, + "function": model_quantize, + "parameters": { + "input_shape": [None, 32, 32, 3], + # 'kernel' is set to None because it will be dynamically + # updated inside the experimental loop below. + "kernel": None, + "bias": [ + {"type": "uniform", "bits": 8}, + {"type": "uniform", "bits": 8}, + {"type": "uniform", "bits": 8}, + {"type": "uniform", "bits": 8}, + ], + "activations": [ + # {"type": "none"}, + # {"type": "none"}, + # {"type": "none"}, + # {"type": "none"}, + {"type": "uniform", "bits": 16}, + {"type": "uniform", "bits": 16}, + {"type": "uniform", "bits": 16}, + {"type": "uniform", "bits": 16}, + ], + }, + }, + # Stage 5: Alpha initialization + # { + # "name": "alpha_initialization", + # "seed": 12345, + # "function": initialize_quantizer_weights, + # "parameters": { + # "dataset": "cifar10", + # "batch_size": 512, + # "input_shape": [None, 32, 32, 3], + # "categories": 10, + # "type": "alpha", + # }, + # }, + # Stage 6: QAT + # { + # "name": "qat_fast", + # "seed": 12345, + # "function": model_train, + # "parameters": { + # "dataset": "cifar10", + # "input_shape": [None, 32, 32, 3], + # "categories": 10, + # "epochs": 15, + # "batch_size": 128, + # "learning_rate": 0.0001, + # "validation_split": 0.2, + # }, + # }, + # Stage 6: QAT + { + "name": "qat", + "seed": 12345, + "function": model_train, + "parameters": { + "dataset": "cifar10", + "input_shape": [None, 32, 32, 3], + "categories": 10, + "epochs": 35, + "batch_size": 128, + "learning_rate": 0.001 / 10, + "validation_split": 0.2, + }, + }, +] + + +if __name__ == "__main__": + bits = [4, 6, 8, 10] + # n_levels = [2, 3, 4, 5, 6, 7, 8, 12, 16, 20] + # n_levels = [4, 6, 8, 10] + n_levels = range(4, 13) + combinations = [ + (b, n) for b in reversed(bits) for n in reversed(n_levels) if n <= 2**b + ] + for bits, n_levels in combinations: + print( + f"\n{'='*20} STARTING EXPERIMENT: FLEX BITS = {bits}, N_LEVELS = {n_levels} {'='*20}\n" + ) + + # --- Configure the Experiment --- + # Dynamically set the 'kernel' quantization parameter for this specific run. + kernel_config = [ + {"type": "flexible", "bits": bits, "n_levels": n_levels} + for _ in range(4) + ] + stages_hyperparams[2]["parameters"]["kernel"] = kernel_config + stages_metadata = [ + StageMetadata.from_dict(stage_dict) + for stage_dict in stages_hyperparams + ] + pipeline = Pipeline( + name=f"experiment_flex_bits_{bits}_nlevels_{n_levels}", + stage_definitions=stages_metadata, + ) + pipeline.run() diff --git a/src/stage/flex_mainv3.py b/src/stage/flex_mainv3.py new file mode 100755 index 0000000..8d69649 --- /dev/null +++ b/src/stage/flex_mainv3.py @@ -0,0 +1,161 @@ +#!/usr/bin/env python3 + +from collections import Counter + +from functions import model_create, model_quantize, model_train +from stagev2 import Pipeline, StageMetadata + +# --- Configuration for All 7 Pipeline Stages --- +# This list defines the blueprint for our pipeline. Each dictionary +# will be used to initialize a Stage object. + +stages_hyperparams = [ + # Stage 0: Model creation + { + "name": "model_creation", + "seed": 12345, + "function": model_create, + "parameters": { + "dataset": "cifar10", + "input_shape": [None, 32, 32, 3], + "categories": 10, + "model_name": "custom_cnn1_for_cifar10", + }, + }, + # Stage 1: Initial training + { + "name": "initial_training", + "seed": 12345, + "function": model_train, + "parameters": { + "dataset": "cifar10", + "input_shape": [None, 32, 32, 3], + "categories": 10, + "epochs": 20, + "batch_size": 128, + "learning_rate": 0.001, + "validation_split": 0.1, + }, + }, + # Stage 2: Model quantization + { + "name": "quantization", + "seed": 12345, + "function": model_quantize, + "parameters": { + "input_shape": [None, 32, 32, 3], + # 'kernel' is set to None because it will be dynamically + # updated inside the experimental loop below. + "kernel": None, + "bias": [ + {"type": "uniform", "bits": 8}, + {"type": "uniform", "bits": 8}, + {"type": "uniform", "bits": 8}, + {"type": "uniform", "bits": 8}, + ], + "activations": [ + {"type": "uniform", "bits": 16}, + {"type": "uniform", "bits": 16}, + {"type": "uniform", "bits": 16}, + {"type": None}, # "uniform", "bits": 16}, + ], + }, + }, + # Stage 5: Alpha initialization + # { + # "name": "alpha_initialization", + # "seed": 12345, + # "function": initialize_quantizer_weights, + # "parameters": { + # "dataset": "cifar10", + # "batch_size": 512, + # "input_shape": [None, 32, 32, 3], + # "categories": 10, + # "type": "alpha", + # }, + # }, + # Stage 6: QAT + { + "name": "qat", + "seed": 12345, + "function": model_train, + "parameters": { + "dataset": "cifar10", + "input_shape": [None, 32, 32, 3], + "categories": 10, + "epochs": 200, + "batch_size": 16, + "learning_rate": 0.001 / 100, + "validation_split": 0.2, + "early_stopping": True, + }, + }, +] + + +if __name__ == "__main__": + # Check that there are no repeated names. + names = [stage["name"] for stage in stages_hyperparams] + cnt = Counter(names) + dups = [name for name, c in cnt.items() if c > 1] + assert not dups, f"Duplicate stage names detected: {dups}" + + seeds = [ + # 12345, 123456, + # 1234567, 12345678, + # 123456789, 12346, + # 12347, 12348, + # 12349, 12350, + # 12351, 12352, + # 12353, 12354, + # 12355, + # 12356, 12357, 12358, + # 12359, + 12360, + 12361, + 12362, + ] + bits = [4, 6, 8] + n_levels = [2, 3, 4, 6, 8, 10, 16, 5, 7, 20, 25, 30] + combinations = [ + (seed, b, n) + for seed in seeds + for b in reversed(bits) + for n in reversed(n_levels) + if n < 2**b + # (b, n) for b in bits for n in n_levels if n <= 2**b + ] + for seed, bits, n_levels in combinations: + print( + f"\n{'='*20} STARTING EXPERIMENT: SEED = {seed}, FLEX BITS = {bits}, N_LEVELS = {n_levels} {'='*20}\n" + ) + + for i in range(len(stages_hyperparams)): + stages_hyperparams[i]["seed"] = seed + + # --- Configure the Experiment --- + # Dynamically set the 'kernel' quantization parameter for this specific run. + # kernel_config = [ + # {"type": "flexible", "bits": bits, "n_levels": n_levels} + # for _ in range(4) + # ] + kernel_config = [ + {"type": "flexible", "bits": 8, "n_levels": 16}, + {"type": "flexible", "bits": bits, "n_levels": n_levels}, + {"type": "flexible", "bits": bits, "n_levels": n_levels}, + {"type": "flexible", "bits": 8, "n_levels": 16}, + ] + stages_hyperparams[2]["parameters"]["kernel"] = kernel_config + stages_metadata = [ + StageMetadata.from_dict(stage_dict) + for stage_dict in stages_hyperparams + ] + pipeline = Pipeline( + name=f"experiment_flex_bits_{bits}_nlevels_{n_levels}", + stage_definitions=stages_metadata, + ) + pipeline.run() + + print( + f"\n{'='*20} END OF EXPERIMENT: SEED = {seed}, FLEX BITS = {bits}, N_LEVELS = {n_levels} {'='*20}\n" + ) diff --git a/src/stage/functions.py b/src/stage/functions.py new file mode 100644 index 0000000..249cdf0 --- /dev/null +++ b/src/stage/functions.py @@ -0,0 +1,687 @@ +# functions.py + + +import re + +import numpy as np +import tensorflow as tf +from tensorflow.keras import layers, models +from tensorflow.keras.callbacks import EarlyStopping +from tensorflow.keras.datasets import mnist +from tensorflow.keras.optimizers import Adam +from tensorflow.keras.utils import to_categorical + +from configs.generate_config import GenerateConfig +from configs.qmodel import apply_quantization +from quantizers.flex_quantizer import FlexQuantizer +from quantizers.uniform_quantizer import UniformQuantizer + +# --- Data Loading --- + + +def load_data(dataset_name: str) -> dict: + """Loads and preprocesses the specified dataset.""" + if dataset_name == "mnist": + (x_train, y_train), (x_test, y_test) = mnist.load_data() + + # Reshape and normalize images + x_train = x_train.reshape(-1, 28, 28, 1).astype("float32") / 255.0 + x_test = x_test.reshape(-1, 28, 28, 1).astype("float32") / 255.0 + + # One-hot encode labels + y_train = to_categorical(y_train, 10) + y_test = to_categorical(y_test, 10) + + return { + "x_train": x_train, + "y_train": y_train, + "x_test": x_test, + "y_test": y_test, + } + if dataset_name == "fashion_mnist": + (x_train, y_train), (x_test, y_test) = ( + tf.keras.datasets.fashion_mnist.load_data() + ) + + # Reshape and normalize images + x_train = x_train.reshape(-1, 28, 28, 1).astype("float32") / 255.0 + x_test = x_test.reshape(-1, 28, 28, 1).astype("float32") / 255.0 + + # One-hot encode labels + y_train = to_categorical(y_train, 10) + y_test = to_categorical(y_test, 10) + + return { + "x_train": x_train, + "y_train": y_train, + "x_test": x_test, + "y_test": y_test, + } + if dataset_name == "cifar10": + (x_train, y_train), (x_test, y_test) = ( + tf.keras.datasets.cifar10.load_data() + ) + + # Normalize images + x_train = x_train.astype("float32") / 255.0 + x_test = x_test.astype("float32") / 255.0 + + # One-hot encode labels + y_train = to_categorical(y_train, 10) + y_test = to_categorical(y_test, 10) + + return { + "x_train": x_train, + "y_train": y_train, + "x_test": x_test, + "y_test": y_test, + } + if dataset_name == "cifar100": + (x_train, y_train), (x_test, y_test) = ( + tf.keras.datasets.cifar100.load_data() + ) + + # Normalize images + x_train = x_train.astype("float32") / 255.0 + x_test = x_test.astype("float32") / 255.0 + + # One-hot encode labels + y_train = to_categorical(y_train, 100) + y_test = to_categorical(y_test, 100) + + return { + "x_train": x_train, + "y_train": y_train, + "x_test": x_test, + "y_test": y_test, + } + else: + raise ValueError(f"Unknown dataset: {dataset_name!r}") + + +# --- Core Model Operations --- + + +def model_create(model, **params: dict) -> tf.keras.Model: + """Creates a new Keras model based on the specified architecture.""" + model_name = params["model_name"] + input_shape = params["input_shape"] + categories = params["categories"] + if model_name == "custom_cnn1_for_cifar10": + new_model = models.Sequential( + [ + layers.Conv2D( + 64, + (3, 3), + padding="same", + activation="relu", + input_shape=input_shape[1:], + ), + layers.MaxPooling2D((2, 2)), + layers.Conv2D(128, (3, 3), padding="same", activation="relu"), + layers.MaxPooling2D((2, 2)), + layers.Flatten(), + layers.Dense(256, activation="relu"), + layers.Dropout(0.5), + layers.Dense(categories, activation="softmax"), + ], + name=model_name, + ) + new_model.compile( + optimizer=Adam(), + loss="categorical_crossentropy", + metrics=["accuracy"], + ) + return new_model + if model_name == "lenet5_custom": + new_model = models.Sequential( + [ + layers.Conv2D( + 6, + kernel_size=5, + activation="relu", + padding="same", + input_shape=input_shape[1:], + ), + layers.AveragePooling2D(), + layers.Conv2D(16, kernel_size=5, activation="relu"), + layers.AveragePooling2D(), + layers.Flatten(), + layers.Dense(120, activation="relu"), + layers.Dense(84, activation="relu"), + layers.Dense(categories, activation="softmax"), + ], + name=model_name, + ) + + new_model.compile( + optimizer=Adam(), + loss="categorical_crossentropy", + metrics=["accuracy"], + ) + return new_model + + if model_name == "lenet5_custom_v2": + new_model = models.Sequential( + [ + layers.Conv2D( + 32, + kernel_size=5, + activation="relu", + padding="same", + input_shape=input_shape[1:], + ), + layers.AveragePooling2D(), + layers.Conv2D(64, kernel_size=5, activation="relu"), + layers.AveragePooling2D(), + layers.Conv2D(64, kernel_size=5, activation="relu"), + layers.AveragePooling2D(), + layers.Flatten(), + layers.Dense(128, activation="relu"), + layers.Dense(256, activation="relu"), + layers.Dense(categories, activation="softmax"), + ], + name=model_name, + ) + + new_model.compile( + optimizer=Adam(), + loss="categorical_crossentropy", + metrics=["accuracy"], + ) + return new_model + if model_name == "vgg16": + new_model = tf.keras.applications.VGG16( + include_top=True, + weights=None, + input_shape=input_shape[1:], + classes=categories, + ) + new_model.compile( + optimizer=Adam(), + loss="categorical_crossentropy", + metrics=["accuracy"], + ) + return new_model + else: + raise ValueError(f"Unknown model_name: {model_name!r}") + + +def model_train(model: tf.keras.Model, **params: dict) -> tf.keras.Model: + """Trains the model with the given parameters.""" + if model is None: + raise ValueError("model_train received an empty model.") + + data = load_data(params["dataset"]) + + model.compile( + optimizer=Adam(learning_rate=params["learning_rate"]), + loss="categorical_crossentropy", + metrics=["accuracy"], + ) + + callbacks = [] + if params.get("early_stopping", False): + callbacks.append( + EarlyStopping( + monitor=params.get("monitor", "val_loss"), + patience=params.get("patience", 8), + restore_best_weights=True, + ) + ) + + if params.get("epochs", 0) > 0: + model.fit( + data["x_train"], + data["y_train"], + batch_size=params["batch_size"], + epochs=params["epochs"], + validation_split=params["validation_split"], + verbose=1, # Set to 1 to see progress + callbacks=callbacks if callbacks else None, + ) + return model + + +# --- Model Transformation and Quantization --- + + +def apply_bn_folding( + model: tf.keras.Model, merge_activation: bool = False +) -> tf.keras.Model: + """Fuses Conv/Dense layers with subsequent BatchNormalization layers.""" + if not isinstance(model, tf.keras.Sequential): + raise TypeError( + "BN folding currently only supports Sequential models." + ) + + def is_relu(layer): + return isinstance(layer, layers.ReLU) or ( + isinstance(layer, layers.Activation) + and layer.get_config().get("activation") == "relu" + ) + + new_layers = [] + i = 0 + while i < len(model.layers): + layer = model.layers[i] + + # Check if we can fold this layer with the next one + if ( + isinstance(layer, (layers.Conv2D, layers.Dense)) + and i + 1 < len(model.layers) + and isinstance(model.layers[i + 1], layers.BatchNormalization) + ): + + conv_layer = layer + bn_layer = model.layers[i + 1] + + # Get weights + conv_weights = conv_layer.get_weights() + bn_weights = bn_layer.get_weights() + + kernel = conv_weights[0] + bias = ( + conv_weights[1] + if conv_layer.use_bias + else np.zeros(kernel.shape[-1]) + ) + + gamma, beta, moving_mean, moving_variance = bn_weights + epsilon = bn_layer.epsilon + + # Calculate new weights and biases + scale = gamma / np.sqrt(moving_variance + epsilon) + new_bias = beta + (bias - moving_mean) * scale + + if isinstance(conv_layer, layers.Dense): + new_kernel = kernel * scale + else: # Conv2D + new_kernel = kernel * scale.reshape((1, 1, 1, -1)) + + # Create new layer configuration + new_config = conv_layer.get_config() + new_config["use_bias"] = True + new_config["activation"] = conv_layer.activation + + i += 2 # Skip original conv and BN + + # Check for merging activation + if ( + merge_activation + and i < len(model.layers) + and is_relu(model.layers[i]) + ): + new_config["activation"] = "relu" + i += 1 + + # Create the new fused layer + fused_layer = type(conv_layer).from_config(new_config) + new_layers.append(fused_layer) + + # Build and set weights for the new layer + fused_layer.build(conv_layer.input_shape) + fused_layer.set_weights([new_kernel, new_bias]) + + else: + new_layers.append(layer) + i += 1 + + return models.Sequential(new_layers) + + +def model_transform_bnf( + model: tf.keras.Model, **params: dict +) -> tf.keras.Model: + """Applies Batch Normalization Folding to the model.""" + print("Function: model_transform_bnf called") + if model is None: + raise ValueError("model_transform_bnf received an empty model.") + new_model = apply_bn_folding( + model, merge_activation=params.get("merge_activation", False) + ) + new_model.compile( + optimizer=Adam(), loss="categorical_crossentropy", metrics=["accuracy"] + ) + return new_model + + +def model_quantize(model: tf.keras.Model, **params) -> tf.keras.Model: + """Applies quantization to the model.""" + print("Function: model_quantize called") + if model is None: + raise ValueError("model_quantize received an empty model.") + # (Your quantization logic here) + kernel = params["kernel"] + bias = params["bias"] + activations = params["activations"] + + # Layers initialization + layers = list() + supported = ("conv2d", "dense") + for layer in model.layers: + if any(kw in layer.name for kw in supported): + layers.append(layer.name) + + # QConfig initialization + qconfig = dict() + for layer in layers: + qconfig[layer] = dict() + for layer in layers: + for k in ("weights", "activations"): + qconfig[layer][k] = dict() + + for layer, k, b, a in zip(layers, kernel, bias, activations): + # Kernel + if k["type"] == "uniform": + qconfig[layer]["weights"]["kernel"] = UniformQuantizer( + bits=k["bits"], signed=True + ) + elif k["type"] == "flexible": + qconfig[layer]["weights"]["kernel"] = FlexQuantizer( + bits=k["bits"], n_levels=k["n_levels"], signed=True + ) + else: + pass + # Bias + if b["type"] == "uniform": + qconfig[layer]["weights"]["bias"] = UniformQuantizer( + bits=b["bits"], signed=True + ) + elif b["type"] == "flexible": + qconfig[layer]["weights"]["bias"] = FlexQuantizer( + bits=b["bits"], n_levels=b["n_levels"], signed=True + ) + else: + pass + # Arctivations + if a["type"] == "uniform": + qconfig[layer]["activations"]["activation"] = UniformQuantizer( + bits=a["bits"], signed=False + ) + elif a["type"] == "flexible": + qconfig[layer]["activations"]["activation"] = FlexQuantizer( + bits=a["bits"], n_levels=a["n_levels"], signed=False + ) + else: + pass + # End logic + quantized_model = apply_quantization(model, qconfig) + quantized_model.compile( + optimizer=Adam(), loss="categorical_crossentropy", metrics=["accuracy"] + ) + return quantized_model + + +# --- Quantizer Weight Initialization for QAT --- + + +def activation_output_generator(model, x_train, batch_size=128): + """Generator to yield activations of the model for the training data.""" + # run an inference + model(x_train[:1]) # Ensure the model is built + layers_to_inspect = [ + layer for layer in model.layers if hasattr(layer, "quantize_config") + ] + intermediate_model = models.Model( + inputs=model.input, + outputs=[layer.output for layer in layers_to_inspect], + ) + num_samples = x_train.shape[0] + for start in range(0, num_samples, batch_size): + end = min(start + batch_size, num_samples) + yield intermediate_model.predict_on_batch( + x_train[start:end], + ) + + +def compute_max_abs_activations(model, x_train, batch_size=128): + """Computes the maximum absolute activations for each layer.""" + activations = activation_output_generator(model, x_train, batch_size) + max_abs_activations = {} + + for layer_outputs in activations: + for layer, output in zip(model.layers, layer_outputs): + if layer.name not in max_abs_activations: + max_abs_activations[layer.name] = np.max(np.abs(output)) + else: + max_abs_activations[layer.name] = max( + max_abs_activations[layer.name], np.max(np.abs(output)) + ) + + return max_abs_activations + + +from quantizers.common import max_value, min_value + + +def get_max_weight_value(weight): + max_value = np.max(np.abs(weight)) + return max_value if max_value != 0 else 0.1 + + +def get_uniform_levels(alpha, signed, n_levels): + start = min_value(alpha, signed) + end = max_value(alpha, n_levels, signed) + return np.linspace(start, end, n_levels) + + +def get_uniform_thresholds(alpha, signed, n_levels): + # Thresholds include the start and end points by design. + start = min_value(alpha, signed) + end = alpha + return np.linspace(start, end, n_levels + 1) + + +def initialize_quantizer_weights(model, **params): + """Initializes quantizer weights for the model.""" + data = load_data(params["dataset"]) + x_train = data["x_train"] + + model.compile( + optimizer=Adam(), loss="categorical_crossentropy", metrics=["accuracy"] + ) + model.predict(x_train[:1], verbose=0) + + batch_size = params.get("batch_size", 128) + max_activations = compute_max_abs_activations(model, x_train, batch_size) + print("Max absolute activations:", max_activations) + for layer in model.layers: + print(f"Layer: {layer.name}") + if hasattr(layer, "quantize_config") and isinstance( + layer.quantize_config, (GenerateConfig) + ): + layer_config = layer.quantize_config + print(f"layer weights: {[w.name for w in layer.weights]}s") + weights_dict = layer_config.weights + activations_dict = layer_config.activations + for weight_name, quantize_config in weights_dict.items(): + + # Get layer and weight info + print("layer name:", layer.name) + original_layer_name = layer.name.replace("quant_", "") + original_weight_name = f"{original_layer_name}/{weight_name}:0" + filtered_weights = [ + w for w in layer.weights if w.name == original_weight_name + ] + if not filtered_weights: + print( + f"Warning: No weight found for {original_weight_name} in layer {layer.name}" + ) + continue + weight = filtered_weights[0] + + # Compute and assign alpha + alpha = get_max_weight_value(weight) + alpha_weight = [ + w + for w in layer.weights + if w.name == f"{layer.name}/{weight_name}_alpha:0" + ] + if not alpha_weight: + print( + f"Warning: No alpha weight found for {weight_name} in layer {layer.name}" + ) + continue + alpha_weight = alpha_weight[0] + alpha_weight.assign(alpha) + print(f"Alpha: {alpha_weight}") + + # If flex, we need to do the same with levels and thresholds + if isinstance(quantize_config, FlexQuantizer): + + # Compute and assign levels + levels = get_uniform_levels( + alpha, quantize_config.signed, quantize_config.n_levels + ) + levels_weight = [ + w + for w in layer.weights + if w.name == f"{layer.name}/{weight_name}_levels:0" + ] + if not levels_weight: + print( + f"Warning: No levels weight found for {weight_name} in layer {layer.name}" + ) + continue + levels_weight = levels_weight[0] + levels_weight.assign(levels) + print(f"Levels: {levels_weight}") + + # Compute and assign thresholds + thresholds = get_uniform_thresholds( + alpha, quantize_config.signed, quantize_config.n_levels + ) + thresholds_weight = [ + w + for w in layer.weights + if w.name == f"{layer.name}/{weight_name}_thresholds:0" + ] + if not thresholds_weight: + print( + f"Warning: No thresholds weight found for {weight_name} in layer {layer.name}" + ) + continue + thresholds_weight = thresholds_weight[0] + thresholds_weight.assign(thresholds) + print(f"Thresholds: {thresholds_weight}") + + for activation_name, quantize_config in activations_dict.items(): + alpha = max_activations.get(layer.name, 0) + alpha_weight = [ + w + for w in layer.weights + if w.name == f"{layer.name}/post_activation_alpha:0" + ] + if not alpha_weight: + print( + f"Warning: No alpha weight found for activation {activation_name} in layer {layer.name}" + ) + continue + alpha_weight = alpha_weight[0] + alpha_weight.assign(alpha) + print(f"Activation Alpha: {alpha_weight}") + if isinstance(quantize_config, FlexQuantizer): + levels = get_uniform_levels( + alpha, quantize_config.signed, quantize_config.n_levels + ) + thresholds = get_uniform_thresholds( + alpha, quantize_config.signed, quantize_config.n_levels + ) + levels_weight = [ + w + for w in layer.weights + if w.name == f"{layer.name}/post_activation_levels:0" + ] + if not levels_weight: + print( + f"Warning: No levels weight found for activation {activation_name} in layer {layer.name}" + ) + continue + levels_weight = levels_weight[0] + levels_weight.assign(levels) + # print + thresholds_weight = [ + w + for w in layer.weights + if w.name + == f"{layer.name}/post_activation_thresholds:0" + ] + if not thresholds_weight: + print( + f"Warning: No thresholds weight found for activation {activation_name} in layer {layer.name}" + ) + continue + thresholds_weight = thresholds_weight[0] + thresholds_weight.assign(thresholds) + return model + + +def model_freeze(model: tf.keras.Model, **params: dict) -> tf.keras.Model: + """Freeze subsets of weights in the model according to the four flags in + params, and skip any weights related to optimizer state.""" + print("Function: model_freeze called") + if model is None: + raise ValueError("model_freeze received an empty model.") + + # read the four flags (defaults to False) + freeze_conv_params = params.get("conv_parqameters", False) + freeze_conv_qparams = params.get("conv_qparqameters", False) + freeze_dense_params = params.get("dense_parqameters", False) + freeze_dense_qparams = params.get("dense_qparqameters", False) + + # regex to detect quantization variables + qparam_pattern = re.compile(r"(alpha|levels|thresholds)") + + for weight in model.weights: + name = weight.name + + # **new**: skip any optimizer-related variables entirely + if "optimizer" in name: + print(f" → skipping optimizer var: {name}") + continue + + is_qparam = bool(qparam_pattern.search(name)) + is_conv = "conv2d" in name + is_dense = "dense" in name + + # DEBUG: + # print(f"{name}: is_qparam={is_qparam}, is_conv={is_conv}, is_dense={is_dense}") + + # decide whether this weight belongs to a group the user asked to freeze + should_freeze = ( + (is_conv and not is_qparam and freeze_conv_params) + or (is_conv and is_qparam and freeze_conv_qparams) + or (is_dense and not is_qparam and freeze_dense_params) + or (is_dense and is_qparam and freeze_dense_qparams) + ) + + if should_freeze: + weight._trainable = False + print(f" → freezing: {name}") + else: + print(f" → not freezing: {name}") + + return model + + +def model_evaluate(model, **params): + """Evaluates the model on the test dataset.""" + if model is None: + raise ValueError("model_evaluate received an empty model.") + data = load_data(params["dataset"]) + loss, accuracy = model.evaluate(data["x_test"], data["y_test"], verbose=0) + print(f"Evaluation results - Loss: {loss:.4f}, Accuracy: {accuracy:.4f}") + return model + + +# --- Function Map --- +FUNCTION_MAP = { + "model_create": model_create, + "model_evaluate": model_evaluate, + "model_train": model_train, + "model_transform_bnf": model_transform_bnf, # Assuming you will add this + "model_quantize": model_quantize, + "initialize_quantizer_weights": initialize_quantizer_weights, + "model_freeze": model_freeze, +} diff --git a/src/stage/main.py b/src/stage/main.py new file mode 100755 index 0000000..fad5e64 --- /dev/null +++ b/src/stage/main.py @@ -0,0 +1,325 @@ +#!/usr/bin/env python3 + +from functools import partial +from pathlib import Path + +import tensorflow as tf +from functions import FUNCTION_MAP + +from stage import Stage + +# --- Configuration for All 7 Pipeline Stages --- +# This list defines the blueprint for our pipeline. Each dictionary +# will be used to initialize a Stage object. + +stages_hyperparams = [ + # Stage 0: Model creation + { + "name": "model_creation", + "seed": 12345, + "function": "model_create", + "kwargs": { + "dataset": "cifar10", + "input_shape": [None, 32, 32, 3], + "categories": 10, + "model_name": "lenet5_custom_v2", + }, + }, + # Stage 1: Initial training + { + "name": "initial_training", + "seed": 12345, + "function": "model_train", + "kwargs": { + "dataset": "cifar10", + "input_shape": [None, 32, 32, 3], + "categories": 10, + "epochs": 200, + "batch_size": 1024, + "learning_rate": 0.001, + "validation_split": 0.1, + }, + }, + # Stage 2: BN folding + { + "name": "bnf", + "seed": 12345, + "function": "model_transform_bnf", + "kwargs": { + "merge_activation": True, + }, + }, + # Stage 3: Post BN folding training + { + "name": "pbnf_training", + "seed": 12345, + "function": "model_train", + "kwargs": { + "dataset": "cifar10", + "input_shape": [None, 32, 32, 3], + "categories": 10, + "epochs": 1, + "batch_size": 32, + "learning_rate": 0.0005, + "validation_split": 0.1, + }, + }, + # Stage 4: Model quantization + { + "name": "quantization", + "seed": 12345, + "function": "model_quantize", + "kwargs": { + "input_shape": [None, 32, 32, 3], + # 'kernel' is set to None because it will be dynamically + # updated inside the experimental loop below. + "kernel": None, + "bias": [ + {"type": None}, + {"type": None}, + {"type": None}, + {"type": None}, + {"type": None}, + ], + "activations": [ + {"type": "uniform", "bits": 8}, + {"type": "uniform", "bits": 8}, + {"type": "uniform", "bits": 8}, + {"type": "uniform", "bits": 8}, + {"type": "uniform", "bits": 8}, + ], + }, + }, + # Stage 5: Alpha initialization + { + "name": "alpha_initialization", # Fixed typo from original "initialiation" + "seed": 12345, + "function": "model_initialize_parameters", + "kwargs": { + "dataset": "cifar10", + "input_shape": [None, 32, 32, 3], + "categories": 10, + "type": "alpha", + }, + }, + # Stage 6: QAT + { + "name": "qat", + "seed": 12345, + "function": "model_train", + "kwargs": { + "dataset": "cifar10", + "input_shape": [None, 32, 32, 3], + "categories": 10, + "epochs": 10, + "batch_size": 32, + "learning_rate": 0.0001, + "validation_split": 0.1, + }, + }, + { + "name": "final_evaluation", + "seed": 12345, + "function": "model_evaluate", + "kwargs": { + "dataset": "cifar10", + "input_shape": [None, 32, 32, 3], + "categories": 10, + }, + }, +] + +vgg_stages_hyperparams = [ + # Stage 0: Model creation + { + "name": "model_creation", + "seed": 12345, + "function": "model_create", + "kwargs": { + "dataset": "cifar100", + "input_shape": [None, 32, 32, 3], + "categories": 100, + "model_name": "vgg16", + }, + }, + # Stage 1: Initial training + { + "name": "initial_training", + "seed": 12345, + "function": "model_train", + "kwargs": { + "dataset": "cifar100", + "input_shape": [None, 32, 32, 3], + "categories": 100, + "epochs": 20, + "batch_size": 1024, + "learning_rate": 0.001, + "validation_split": 0.1, + }, + }, + # Stage 2: BN folding + { + "name": "bnf", + "seed": 12345, + "function": "model_transform_bnf", + "kwargs": { + "merge_activation": True, + }, + }, + # Stage 3: Post BN folding training + { + "name": "pbnf_training", + "seed": 12345, + "function": "model_train", + "kwargs": { + "dataset": "cifar100", + "input_shape": [None, 32, 32, 3], + "categories": 100, + "epochs": 1, + "batch_size": 128, + "learning_rate": 0.0005, + "validation_split": 0.1, + }, + }, + # Stage 4: Model quantization + { + "name": "quantization", + "seed": 12345, + "function": "model_quantize", + "kwargs": { + "input_shape": [None, 32, 32, 3], + # 'kernel' is set to None because it will be dynamically + # updated inside the experimental loop below. + "kernel": None, + "bias": [ + {"type": None}, + {"type": None}, + {"type": None}, + {"type": None}, + {"type": None}, + {"type": None}, + {"type": None}, + {"type": None}, + {"type": None}, + {"type": None}, + {"type": None}, + {"type": None}, + {"type": None}, + {"type": None}, + {"type": None}, + {"type": None}, + ], + "activations": [ + {"type": "uniform", "bits": 8}, + {"type": "uniform", "bits": 8}, + {"type": "uniform", "bits": 8}, + {"type": "uniform", "bits": 8}, + {"type": "uniform", "bits": 8}, + {"type": "uniform", "bits": 8}, + {"type": "uniform", "bits": 8}, + {"type": "uniform", "bits": 8}, + {"type": "uniform", "bits": 8}, + {"type": "uniform", "bits": 8}, + {"type": "uniform", "bits": 8}, + {"type": "uniform", "bits": 8}, + {"type": "uniform", "bits": 8}, + {"type": "uniform", "bits": 8}, + {"type": "uniform", "bits": 8}, + {"type": "uniform", "bits": 8}, + ], + }, + }, + # Stage 5: Alpha initialization + { + "name": "alpha_initialization", # Fixed typo from original "initialiation" + "seed": 12345, + "function": "model_initialize_parameters", + "kwargs": { + "dataset": "cifar100", + "input_shape": [None, 32, 32, 3], + "categories": 100, + "type": "alpha", + }, + }, + # Stage 6: QAT + { + "name": "qat", + "seed": 12345, + "function": "model_train", + "kwargs": { + "dataset": "cifar100", + "input_shape": [None, 32, 32, 3], + "categories": 100, + "epochs": 10, + "batch_size": 32, + "learning_rate": 0.0001, + "validation_split": 0.1, + }, + }, + { + "name": "final_evaluation", + "seed": 12345, + "function": "model_evaluate", + "kwargs": { + "dataset": "cifar100", + "input_shape": [None, 32, 32, 3], + "categories": 100, + }, + }, +] + +if __name__ == "__main__": + + # This is the main experimental loop from your coworker's script. + # It runs the entire 7-stage pipeline multiple times. + for bits in range(1, 11): + print( + f"\n{'='*20} STARTING EXPERIMENT: UNIFORM BITS = {bits} {'='*20}\n" + ) + + # --- Configure the Experiment --- + # Dynamically set the 'kernel' quantization parameter for this specific run. + kernel_config = [{"type": "uniform", "bits": bits} for _ in range(5)] + stages_hyperparams[4]["kwargs"]["kernel"] = kernel_config + + # Create the list of Stage objects from the (now updated) configurations + dataset = stages_hyperparams[0]["kwargs"].get("dataset") + model_name = stages_hyperparams[0]["kwargs"].get("model_name") + pipeline = [ + Stage( + function=FUNCTION_MAP[config["function"]], + initial_config=config, + checkpoint_path=Path("checkpoints") + / f"{model_name}-{dataset}", + metadata_path=Path(f"{bits}_bit"), + ) + for config in stages_hyperparams + ] + + # --- The Orchestrator --- + # It tracks both the model object and the hash of the last operation + model: tf.keras.Model | None = None + previous_hash: str | None = None + + # The loop's responsibility is to pass the state (model & hash) between stages + for stage in pipeline: + # We need to set the ref model + if stage.initial_config["name"] == "alpha_initialization": + assert ( + ref_model is not None + ), "Reference model for alpha initialization is not set." + stage.function = partial(stage.function, ref_model=ref_model) + model, previous_hash = stage.run( + input_model=model, previous_hash=previous_hash + ) + + # Save the ref model after the last stage we dont quantize + if stage.initial_config["name"] == "pbnf_training": + ref_model = tf.keras.models.clone_model(model) + + print( + f"\n{'='*20} FINISHED EXPERIMENT: UNIFORM BITS = {bits} {'='*20}\n" + ) + print( + f"Final model for {bits}-bit experiment corresponds to hash: {previous_hash}" + ) diff --git a/src/stage/mainv2.py b/src/stage/mainv2.py new file mode 100755 index 0000000..bc69ba4 --- /dev/null +++ b/src/stage/mainv2.py @@ -0,0 +1,115 @@ +#!/usr/bin/env python3 + +from functions import ( + model_create, + model_initialize_parameters, + model_quantize, + model_train, +) +from stagev2 import Pipeline, StageMetadata + +# --- Configuration for All 7 Pipeline Stages --- +# This list defines the blueprint for our pipeline. Each dictionary +# will be used to initialize a Stage object. + +stages_hyperparams = [ + # Stage 0: Model creation + { + "name": "model_creation", + "seed": 12345, + "function": model_create, + "parameters": { + "dataset": "cifar10", + "input_shape": [None, 32, 32, 3], + "categories": 10, + "model_name": "custom_cnn1_for_cifar10", + }, + }, + # Stage 1: Initial training + { + "name": "initial_training", + "seed": 12345, + "function": model_train, + "parameters": { + "dataset": "cifar10", + "input_shape": [None, 32, 32, 3], + "categories": 10, + "epochs": 100, + "batch_size": 512, + "learning_rate": 0.001, + "validation_split": 0.1, + }, + }, + # Stage 2: Model quantization + { + "name": "quantization", + "seed": 12345, + "function": model_quantize, + "parameters": { + "input_shape": [None, 32, 32, 3], + # 'kernel' is set to None because it will be dynamically + # updated inside the experimental loop below. + "kernel": None, + "bias": [ + {"type": "uniform", "bits": 8}, + {"type": "uniform", "bits": 8}, + {"type": "uniform", "bits": 8}, + {"type": "uniform", "bits": 8}, + ], + "activations": [ + {"type": "uniform", "bits": 16}, + {"type": "uniform", "bits": 16}, + {"type": "uniform", "bits": 16}, + {"type": "uniform", "bits": 16}, + ], + }, + }, + # Stage 5: Alpha initialization + { + "name": "alpha_initialization", # Fixed typo from original "initialiation" + "seed": 12345, + "function": model_initialize_parameters, + "parameters": { + "dataset": "cifar10", + "input_shape": [None, 32, 32, 3], + "categories": 10, + "type": "alpha", + }, + }, + # Stage 6: QAT + { + "name": "qat", + "seed": 12345, + "function": model_train, + "parameters": { + "dataset": "cifar10", + "input_shape": [None, 32, 32, 3], + "categories": 10, + "epochs": 25, + "batch_size": 512, + "learning_rate": 0.0001, + "validation_split": 0.1, + }, + }, +] + + +if __name__ == "__main__": + for bits in reversed([1, 2, 3, 4, 5, 6, 8, 10, 16, 24]): + print( + f"\n{'='*20} STARTING EXPERIMENT: UNIFORM BITS = {bits} {'='*20}\n" + ) + + # --- Configure the Experiment --- + # Dynamically set the 'kernel' quantization parameter for this specific run. + kernel_config = [{"type": "uniform", "bits": bits} for _ in range(4)] + stages_hyperparams[2]["parameters"]["kernel"] = kernel_config + stages_metadata = [ + StageMetadata.from_dict(stage_dict) + for stage_dict in stages_hyperparams + ] + pipeline = Pipeline( + name=f"experiment_uniform_bits_{bits}", + stage_definitions=stages_metadata, + ) + pipeline.run() diff --git a/src/stage/plot.py b/src/stage/plot.py new file mode 100644 index 0000000..c28e9e1 --- /dev/null +++ b/src/stage/plot.py @@ -0,0 +1,136 @@ +from pathlib import Path + +import matplotlib.pyplot as plt +import pandas as pd + +# path = Path("checkpoints/lenet5_custom-fashion_mnist") +path = Path("checkpoints/lenet5_custom_v2-cifar10") + +experiment_paths = [ + experiment_path + for experiment_path in path.iterdir() + if experiment_path.is_dir() and experiment_path.name != "artifacts" +] + +df = pd.DataFrame() +for experiment_path in experiment_paths: + if not experiment_path.exists(): + print(f"Metadata path '{experiment_path}' does not exist. Skipping.") + continue + for metadata_file in experiment_path.glob("*.json"): + with metadata_file.open("r") as f: + metadata = pd.read_json(f, orient="index").T + metadata["experiment"] = experiment_path.name + # Select only the desired columns + selected_columns = [ + "experiment", + "name", + "loss", + "accuracy", + "complexity", + ] + metadata = metadata[ + [col for col in selected_columns if col in metadata.columns] + ] + df = pd.concat([df, metadata], ignore_index=True) + +simple_df = df[df["name"].isin(["pbnf_training", "final_evaluation"])].copy() + +simple_df["name"] = simple_df["name"].replace( + { + "pbnf_training": "Original", + "final_evaluation": "Quantized", + } +) + +# Convert complexity from bits to Kbits +simple_df["complexity"] = simple_df["complexity"] / 1024 + +pivoted_df = simple_df.pivot_table( + index="experiment", columns="name", values=["accuracy", "complexity"] +) + + +# The accuracy of the common points doesn't match, fix that. + +df = pivoted_df.copy() +# Sort the DataFrame by this new column +df = df.sort_values(by=("complexity", "Quantized"), ascending=True) +pd.set_option("display.max_rows", None) +print(df) + +# df.sort_index(inplace=True, sort_by=['complexity']) +# --- 2. Create the Plot (using tuple access) --- + +# Get the data for the original model from the first row +# Note the use of tuples to access the columns +original_accuracy = df[("accuracy", "Original")].iloc[0] +original_size = df[("complexity", "Original")].iloc[0] + +# Set up the plot size and style +plt.style.use("seaborn-v0_8-whitegrid") +fig, ax = plt.subplots(figsize=(10, 7)) + +# --- 3. Plot Each Point --- + +# Plot the single point for the Original Model +ax.scatter( + x=original_size, + y=original_accuracy, + marker="*", + s=250, + color="red", + label="Original Model", + zorder=5, +) + +# Plot the points for ALL of your Quantized Models +# We use tuples to get the correct columns for the X and Y axes +ax.scatter( + x=df[("complexity", "Quantized")], + y=df[("accuracy", "Quantized")], + s=60, + color="royalblue", + label="Quantized Models", +) + +# Plot the line connecting quantized models and the original model as the final item +quantized_sizes = df[("complexity", "Quantized")].tolist() +quantized_accuracies = df[("accuracy", "Quantized")].tolist() + +# Append the original model as the final item +quantized_sizes.append(original_size) +quantized_accuracies.append(original_accuracy) + +ax.plot( + quantized_sizes, + quantized_accuracies, + color="royalblue", + linestyle="--", + linewidth=1, + zorder=1, +) + +# --- 4. Add Labels to make the plot readable --- + +# Loop through the DataFrame index (e.g., '1_bit', '2_bit') +for experiment_name in df.index: + ax.annotate( + experiment_name.rstrip("_bit"), + ( + df.loc[experiment_name, ("complexity", "Quantized")], + df.loc[experiment_name, ("accuracy", "Quantized")], + ), + textcoords="offset points", + xytext=(-5, 15), # Shift right and down + ha="left", + va="top", + ) + +# Add titles and labels for the axes +ax.set_title("Model Accuracy vs. Size Trade-off", fontsize=16) +ax.set_xlabel("Model Size (Complexity in KB)", fontsize=12) +ax.set_ylabel("Model Accuracy", fontsize=12) +ax.legend(fontsize=11) + +plt.savefig("model_accuracy_vs_size.png", dpi=300, bbox_inches="tight") diff --git a/src/stage/plotv2.py b/src/stage/plotv2.py new file mode 100755 index 0000000..66bcf05 --- /dev/null +++ b/src/stage/plotv2.py @@ -0,0 +1,125 @@ +#!/usr/bin/env python3 + +import json +from pathlib import Path + +import matplotlib as mpl +import matplotlib.pyplot as plt +import pandas as pd + + +def load_pipeline(pipeline_metadata_file, results_path, metadata_path): + df = pd.DataFrame() + stages_results = [] + stages_names = [] + with open(pipeline_metadata_file, "r") as f: + pipeline_metadata = json.load(f) + for stage_hash in pipeline_metadata["history"]: + with open(results_path / f"{stage_hash}.json", "r") as stage_file: + stage_results = json.load(stage_file) + stages_results.append(stage_results) + with open( + metadata_path / f"{stage_hash}.json", "r" + ) as metadata_file: + stage_metadata = json.load(metadata_file) + stages_names.append(stage_metadata["name"]) + df = pd.DataFrame(stages_results) + df["stage"] = stages_names + return df + + +if __name__ == "__main__": + checkpoint_path = Path("checkpoints") + pipeline_path = checkpoint_path / "pipelines" + metadata_path = checkpoint_path / "metadata" + results_path = checkpoint_path / "results" + + combined_df = pd.DataFrame() + + for pipeline_metadata_file in pipeline_path.glob("experiment_flex*.json"): + print(f"Loading pipeline metadata from {pipeline_metadata_file}") + df = load_pipeline(pipeline_metadata_file, results_path, metadata_path) + # expermient_[TYPE]_bits_[NBITS]_n_levels_[NLEVELS] + pipeline_name = pipeline_metadata_file.stem + pipeline_name_parts = pipeline_name.split("_") + pipeline_quantizer_type = pipeline_name_parts[1] + pipeline_bits = pipeline_name_parts[3] + pipeline_levels = ( + pipeline_name_parts[5] if len(pipeline_name_parts) > 5 else None + ) + df["pipeline_name"] = pipeline_name + df["quantizer_type"] = pipeline_quantizer_type + df["bits"] = pipeline_bits + df["levels"] = pipeline_levels + combined_df = pd.concat([combined_df, df], ignore_index=True) + pd.set_option("display.max_rows", None) + pd.set_option("display.max_columns", None) + + combined_df["complexity"] = ( + combined_df["complexity"] / 1024 + ) # Convert to Kbits + original_accuracy = combined_df[ + combined_df["stage"] == "initial_training" + ]["accuracy"].mean() + original_complexity = combined_df[ + combined_df["stage"] == "initial_training" + ]["complexity"].mean() + combined_df = combined_df[combined_df["stage"] == "qat"] + # combined_df = combined_df[combined_df["stage"] == "activation_quantization"] + combined_df.sort_values(by=["complexity"], inplace=True) + all_accuracies = combined_df["accuracy"].tolist() + all_complexities = combined_df["complexity"].tolist() + # all_accuracies.append(original_accuracy) + # all_complexities.append(original_complexity) + pd.set_option("display.width", 200) + print( + combined_df.sort_values(by=["accuracy", "complexity"], ascending=False) + ) + print(combined_df.sort_values(by=["complexity"], ascending=False)) + plt.figure() + # combined_df.plot( + # x="complexity", + # y=["accuracy"], + # kind="scatter", + # title="Complexity vs Accuracy", + # xlabel="Complexity (Kbits)", + # ylabel="Accuracy", + # zorder=3, + # color="blue", + # label="Quantized Model", + # ) + plt.scatter( + original_complexity, + original_accuracy, + color="red", + label="Original Model", + zorder=3, + ) + plt.axhline( + original_accuracy, color="red", linestyle=":", alpha=0.3, zorder=1 + ) + bits_values = combined_df["bits"].unique() + cmap = mpl.colormaps["tab10"] + for i, bits in enumerate(sorted(bits_values)): + subset = combined_df[combined_df["bits"] == bits].sort_values( + "complexity" + ) + plt.semilogx( + subset["complexity"], + subset["accuracy"], + label=f"{bits} bits", + color=cmap(i % 10), + zorder=3, + marker="o", + linestyle="--", + ) + # plt.semilogx( + # all_complexities, + # all_accuracies, + # color="gray", + # linestyle="--", + # zorder=1, + # ) + plt.grid(which="both", linestyle="--", linewidth=0.5) + plt.legend() + plt.savefig("complexity_vs_quantized_flex.png") diff --git a/src/stage/plotv3-es.py b/src/stage/plotv3-es.py new file mode 100755 index 0000000..a72399c --- /dev/null +++ b/src/stage/plotv3-es.py @@ -0,0 +1,766 @@ +#!/usr/bin/env python3 + +import fnmatch +import json +import re +from collections import defaultdict +from pathlib import Path + +import matplotlib as mpl +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd + +pd.set_option("display.max_rows", None) +pd.set_option("display.max_columns", None) +pd.set_option("display.width", None) + + +def load_pipeline(pipeline_metadata_file, results_path, metadata_path): + df = pd.DataFrame() + stages_results = [] + stages_names = [] + with open(pipeline_metadata_file, "r") as f: + pipeline_metadata = json.load(f) + for stage_hash in pipeline_metadata["history"]: + with open(results_path / f"{stage_hash}.json", "r") as stage_file: + stage_results = json.load(stage_file) + stages_results.append(stage_results) + with open( + metadata_path / f"{stage_hash}.json", "r" + ) as metadata_file: + stage_metadata = json.load(metadata_file) + stages_names.append(stage_metadata["name"]) + df = pd.DataFrame(stages_results) + df["stage"] = stages_names + return df + + +def load_file(hash_, results_path: Path, metadata_path: Path) -> dict: + """Lee los dos JSON correspondientes a `hash_` y devuelve un dict unificado + con todas las claves de ambos.""" + # Cargar resultados + with open(results_path / f"{hash_}.json", "r") as f_res: + res = json.load(f_res) + # Cargar metadata + with open(metadata_path / f"{hash_}.json", "r") as f_meta: + meta = json.load(f_meta) + # Combinar, dándole preferencia a `res` en caso de colisión de claves + combined = {**meta, **res} + # Añadimos el hash como columna + combined["hash"] = hash_ + return combined + + +def flatten_json(obj, parent_key: str = "", sep: str = "_"): + """Recursively flattens dicts and lists into a single dict mapping + flattened_key -> value. + + List items get their index injected into the key. + """ + items = {} + if isinstance(obj, dict): + for k, v in obj.items(): + new_key = f"{parent_key}{sep}{k}" if parent_key else k + items.update(flatten_json(v, new_key, sep=sep)) + elif isinstance(obj, list): + for i, v in enumerate(obj): + new_key = f"{parent_key}{sep}{i}" if parent_key else str(i) + items.update(flatten_json(v, new_key, sep=sep)) + else: + # reached a leaf + items[parent_key] = obj + return items + + +def add_horizontal_se_band( + ax, + mean, + se, + *, + color="red", + label="Original Model", + alpha_line=0.8, + alpha_band=0.2, + z_mean=3, + z_line=2, + z_band=1, +): + """On ax, draw a dotted line at `mean`, dashed lines at ±se, and fill + between them.""" + # mean line + ax.axhline( + mean, + color=color, + linestyle=":", + linewidth=0.3, + alpha=alpha_line, + zorder=z_mean, + # label=f"{label}" + ) + # ±1 SE lines + ax.axhline( + mean + se, + color=color, + linestyle="-", + linewidth=0.3, + alpha=0.6, + zorder=z_line, + ) + ax.axhline( + mean - se, + color=color, + linestyle="-", + linewidth=0.3, + alpha=0.6, + zorder=z_line, + ) + # filled band + x0, x1 = ax.get_xlim() + ax.fill_between( + [x0, x1], + [mean - se, mean - se], + [mean + se, mean + se], + color=color, + alpha=alpha_band, + zorder=z_band, + ) + + +if __name__ == "__main__": + checkpoint_path = Path("checkpoints") + # pipeline_path = checkpoint_path / "pipelines" + metadata_path = checkpoint_path / "metadata" + results_path = checkpoint_path / "results" + + # Patrón: sólo caracteres hexadecimales en el nombre del fichero + hash_pattern = re.compile(r"^[0-9a-fA-F]+$") + + # Listado de hashes (sin la extensión .json) + hashes = [ + p.stem + for p in results_path.glob("*.json") + if hash_pattern.match(p.stem) + ] + flat_records = [] + for h in hashes: + with open(results_path / f"{h}.json", "r") as fres: + res = json.load(fres) + with open(metadata_path / f"{h}.json", "r") as fmeta: + meta = json.load(fmeta) + + merged = {**meta, **res, "hash": h} + flat = flatten_json(merged) + flat_records.append(flat) + combined_df = pd.DataFrame(flat_records) + + # 1) Build adjacency list: parent_hash → [child_hashes] + tree = defaultdict(list) + for _, row in combined_df.iterrows(): + h = row["hash"] + prev = row.get("previous_hash") + if pd.notna(prev): + tree[prev].append(h) + + # 2) Find root nodes: those with no valid previous_hash + all_hashes = set(combined_df["hash"]) + roots = [ + h + for _, row in combined_df.iterrows() + for h in [row["hash"]] + if pd.isna(row.get("previous_hash")) + or row["previous_hash"] not in all_hashes + ] + + # 3) DFS to collect every root→leaf path + experiments = [] + + def _collect_paths(node, path): + children = tree.get(node, []) + if not children: + experiments.append(path) + else: + for child in children: + _collect_paths(child, path + [child]) + + for root in roots: + _collect_paths(root, [root]) + + blacklist = { + # ) hash # -> acc, complexity, type, bits, n_levels + "82bcde2ecc786b41e6d2b33425c5295a", # -> 0.6825, 2.522522e+06, flex, 8, 2 + "2d1d7bb39ddb4bce584d824a41b8aaa2", # -> 0.6820, 2.346810e+06, flex, 8, 2 + "deb62ff4fda51063fb9094744ffc0051", # -> 0.6803, 2.562207e+06, flex, 8, 2 + "8920925440a17cd21177b04319f4dde7", # -> 0.6762, 2.629205e+06, flex, 8, 2 + "9eefaec10856b78c5b1b04cbbb2138ce", # -> 0.6834, 5.739940e+06, flex, 4, 10 + "c6d6425d11b008bea5ae624601cf69f9", # -> 0.6869, 8.300487e+06, flex, 6, 25 + "c24d8367f7eea4fe40c6b11d37a2ef64", # -> 0.6829, 7.521903e+06, flex, 6, 30 + "f1a01952db947ca2a5dcf6932bc4dd9d", # -> 0.6870, 7.761222e+06, flex, 6, 20 + "f76e8a717ae8f27c07edabd45a697813", # -> 0.6803, 2.657772e+06, flex, 6, 2 + "318464ddf0bb2feb2249b20e4b2b0c35", # -> 0.6802, 2.654884e+06, flex, 6, 3 + "a14dc019e8c2e2b8e4929ad2edae4d80", # -> 0.6739, 2.905927e+06, flex, 6, 3 + "fe88a2a970c6d4fe95e1c5efb867de67", # -> 0.6719, 4.990952e+06, flex, 6, 6 + "2d48e19bced1b6e3b15eab632a40cf44", # -> 0.6682 2.476079e+06, flex, 4, 2 + "4965cf50af85c54e65044dc0b5717bcf", # -> 0.6647 4.679128e+06, flex, 8, 5 + "cf82eb531d5c718017874750453af75e", # -> 0.6622 5.459853e+06, flex, 4, 10 + "df2c27c6c58a72ce19f2bec69ba6fa22", # -> 0.6602 4.127570e+06, flex, 4, 4 + "081eea60c304ff553e0ec04fcbfe17f8", # -> 0.6566 8.184740e+06, flex, 6, 25 + "f8138453a8c78c1598af911967b22ef7", # -> 0.6399 4.328394e+06, flex, 6, 5 + "0942ca5ce86afde3e272b8d3bb1c79cf", # -> 0.6365 5.416113e+06, flex, 6, 7 + "57a410c7713212ae7e7874174a00ec28", # -> 0.6358 5.352943e+06, flex, 4, 8 + "1ef38adee955e86d29015639c772708c", # -> 0.6351 8.514102e+06, flex, 6, 30 + "ca9710fb7d998d349dc0fabffe3e7487", # -> 0.6336 4.092541e+06, flex, 4, 5 + "f361e46f3f1a62d15f6ffe924f341e4b", # -> 0.6294 5.502136e+06, flex, 6, 7 + "dcdc5e38399aa6b1f4aec6dd0f4a0011", # -> 0.6228 5.499287e+06, flex, 6, 7 + "62293d44559425797da8b3cf5c03949d", # -> 0.6226 5.282950e+06, flex, 8, 7 + "d3e21e49f4a6737a99c64ae7bd962ed9", # -> 0.6173 8.716123e+06, flex, 6, 30 + "4b847613419cf65b6933ac7df186b5e1", # -> 0.6172 4.120780e+06, flex, 6, 4 + "40c151e376a2a746a618bdea64acb645", # -> 0.6118 8.659160e+06, flex, 8, 25 + "a1d029ad31e8d0e78a2727cb817238cd", # -> 0.6100 9.312237e+06, flex, 8, 30 + "257185fcc8615ad18afa388080e0b42a", # -> 0.6096 9.424802e+06, flex, 8, 30 + "cc7ee437a88cd218050280f5e1687d7b", # -> 0.6091 4.411632e+06, flex, 4, 5 + "1c2b3e12e0e008acc93ad000dc46860c", # -> 0.6085 8.087628e+06, flex, 8, 20 + "ad7f438d121a818e11559c02cf40e875", # -> 0.6078 8.815668e+06, flex, 8, 25 + "bf3715b86ac5cece775d945518828380", # -> 0.6075 8.649432e+06, flex, 6, 30 + "c671e4816625f3fcef3ae19735e588cb", # -> 0.6065 8.057870e+06, flex, 6, 20 + "f77e1ff54c1e8548b16d78166e583a20", # -> 0.6061 4.340742e+06, flex, 6, 5 + "f88ec612d8ea1bdcc25d3dc257d9078b", # -> 0.6045 6.116025e+06, flex, 4, 10 + "cacd708264fa9f8700b9f4fb32b87494", # -> 0.6013 5.497829e+06, flex, 4, 8 + "740a1562cfbc1d391c4f3a1cdbddd879", # -> 0.5990 9.368490e+06, flex, 8, 30 + "18c96f0fc4d4aad8cef826c6cc048846", # -> 0.5981 2.913844e+06, flex, 8, 3 + "6767ffdfcd17bf848370ba70b057aad9", # -> 0.5969 8.600187e+06, flex, 6, 30 + "d8e9e5b7f38c90b8e4aa509d9b5ced31", # -> 0.5961 5.227277e+06, flex, 4, 6 + "419c90ed7c09a63c3fc90a54661617b3", # -> 0.5936 3.709586e+06, flex, 4, 5 + "5a9b0e4a09037ec75430ad220ccdde6d", # -> 0.5930 8.775996e+06, flex, 8, 25 + "5d070cbf206198f489173a5f1ec74f91", # -> 0.5927 4.441964e+06, flex, 8, 5 + "d36592e5b6e1894fba94f2264136580b", # -> 0.5906 4.023145e+06, flex, 6, 4 + "6d9130bcab5126da553138f680ad5343", # -> 0.5901 5.501902e+06, flex, 6, 7 + "228c22a86a8e5db74626432a4fa7ebf0", # -> 0.5899 8.682319e+06, flex, 8, 25 + "ce99dd6e073611df6d56d0b035f2c380", # -> 0.5894 5.523196e+06, flex, 8, 7 + "e4f9b423a715a006966cafd108626582", # -> 0.5894 8.710177e+06, flex, 8, 25 + "dd44e1d79606bbf821f776b550fd5708", # -> 0.5850 8.223336e+06, flex, 6, 20 + "553230a79d1600e5e96e4e55bced5562", # -> 0.5847 4.876093e+06, flex, 4, 7 + "5b6c1d077fff399487198d0934d2f801", # -> 0.5833 6.564747e+06, flex, 8, 10 + "a79306498306fd871b72f91886050764", # -> 0.5833 4.978046e+06, flex, 4, 7 + "116279bfe5331dce42f5214acff1b8b8", # -> 0.5827 4.314205e+06, flex, 6, 5 + "2eb0a2ba00ac93326abf3351bd1d9883", # -> 0.5826 5.430122e+06, flex, 8, 7 + "f48060242ed1e5845835ec38f86f90e4", # -> 0.5826 7.373076e+06, flex, 8, 16 + "a2a0c3db77ffc9e87502fce7370eb6a0", # -> 0.5823 8.731863e+06, flex, 6, 25 + "ddc474f0d54a8cc5731b000baf77756d", # -> 0.5809 9.161021e+06, flex, 8, 30 + "4126ec5dbf996778dcb49af8c4563a0e", # -> 0.5790 4.420320e+06, flex, 8, 5 + "1dc469302d509868b5807c2c582781da", # -> 0.5787 6.152794e+06, flex, 6, 10 + "a914f36d1b76973a6f5213a6cdb90e76", # -> 0.5776 2.440996e+06, flex, 8, 3 + "07c097bac4abda58859eb5055fbb3443", # -> 0.5759 5.499702e+06, flex, 4, 8 + "6351c9acd3391e61432ecfa4060a092a", # -> 0.5756 4.607602e+06, flex, 8, 5 + "0828459655a6999d7d9af066c8632f60", # -> 0.5751 5.512189e+06, flex, 8, 7 + "cfa18e91c2f434e01e9fa4d928bab7e4", # -> 0.5749 2.745346e+06, flex, 6, 3 + "741cecf1ee1bbce01c1a2d3f5521c491", # -> 0.5741 9.617448e+06, flex, 8, 30 + "81038213fd8189ba707569fb2b4da11c", # -> 0.5734 5.008818e+06, flex, 8, 6 + "e913f582950807f522d9c802b6ef3f3e", # -> 0.5719 4.106683e+06, flex, 8, 4 + "a47756b976c51cc11b50e1d8238c5200", # -> 0.5716 4.141123e+06, flex, 6, 5 + "ed3fbf80c1e7546321a2dbd6ada1354d", # -> 0.5715 7.853356e+06, flex, 8, 16 + "49befbd5aea7ec47387bd96665c38982", # -> 0.5702 8.188566e+06, flex, 8, 20 + "253e6448f7f43101cbf863974d331d0a", # -> 0.5685 8.324831e+06, flex, 8, 20 + "324fe5d7f47770902d73752a8e01d62d", # -> 0.5663 4.599661e+06, flex, 4, 5 + "387929c287656e1418ecaca896f443ad", # -> 0.5658 5.366461e+06, flex, 6, 7 + "724f9a6abede93b72c8fc7df30b8d9db", # -> 0.5644 4.705492e+06, flex, 6, 6 + "446a454fc94b2789cd943578ed0bc074", # -> 0.5640 4.995121e+06, flex, 4, 5 + "a7f630a537c947ccaf46387051efeef7", # -> 0.5603 8.207445e+06, flex, 6, 20 + "2f23ee58330ffbafc3acb0b87a0699a4", # -> 0.5593 9.103208e+06, flex, 8, 25 + "dbdc28ebce1b0a85029fce6714e49c9f", # -> 0.5587 4.413465e+06, flex, 8, 5 + "b5358693b4337fc31be52a52df800328", # -> 0.5583 5.549624e+06, flex, 6, 8 + "3feade8d67883ae66e406c0f6ac4cd57", # -> 0.5579 4.478236e+06, flex, 4, 5 + "08b23ddf3b656f041d36152a88fba87e", # -> 0.5574 5.441997e+06, flex, 6, 7 + "1b8cc4ea0c23fb930d198c9bace1c01f", # -> 0.5565 4.984122e+06, flex, 4, 5 + "6982f70c319ed9df4e958de3b91baf23", # -> 0.5564 2.965374e+06, flex, 6, 3 + "04ab8c630f0ac8a0935ccaa7297a19dd", # -> 0.5536 8.794149e+06, flex, 6, 30 + "8412d76b73a091f1f075e3768f8c4485", # -> 0.5526 8.143381e+06, flex, 8, 20 + "806a48aa4aead784c9dc93e64c4d694a", # -> 0.5508 9.106848e+06, flex, 8, 25 + "f56b97ee2b9eb3673dde75b73acefa24", # -> 0.5496 8.110230e+06, flex, 6, 20 + "f1a3c121b4c6331eee397599a8d98475", # -> 0.5491 8.585863e+06, flex, 6, 25 + "53317e1a72893a4e61c5f07e1611a087", # -> 0.5479 4.988744e+06, flex, 6, 6 + "18b39edfb145e5db5ce268793593fa55", # -> 0.5478 8.271977e+06, flex, 8, 20 + "5dc8eb9e93379b1108d847486faec2b7", # -> 0.5478 8.807598e+06, flex, 8, 30 + "30fc1daf89f19e980b5e1c69ea3eeddb", # -> 0.5475 8.494731e+06, flex, 8, 20 + "efa5082ce34a54c5450944408dda3341", # -> 0.5469 4.049373e+06, flex, 6, 5 + "2a57ca0c402c493d39a4fda0f722ef5f", # -> 0.5464 3.183237e+06, flex, 4, 3 + "d878cba4575aa5c22fcd40084d9a3384", # -> 0.5424 8.451002e+06, flex, 6, 25 + "30e350f38eebb57f58f88b01eb427b36", # -> 0.5404 8.581810e+06, flex, 6, 25 + "12b856434f33147f95343017850b88f6", # -> 0.5400 9.347335e+06, flex, 8, 30 + "91a5c093e1213c00cbf92e423795c04c", # -> 0.5369 4.752921e+06, flex, 8, 6 + "a0ebf35a5577ba037ad4436360a8d631", # -> 0.5336 8.461936e+06, flex, 6, 25 + "ed4e4713c7bcde4911563de5e51a6b1c", # -> 0.5317 5.609182e+06, flex, 6, 7 + "d64d58115a0c16a93b46a26541ea940c", # -> 0.5237 4.856726e+06, flex, 6, 6 + "33cc5a71d1e46710e53d58ae9e3fa7ec", # -> 0.5228 8.773018e+06, flex, 8, 25 + "4751c5518326351fc7d5a2d1e20ae00b", # -> 0.5224 9.387617e+06, flex, 8, 30 + "335513c169a9f64243badde924a39a8b", # -> 0.5224 2.997398e+06, flex, 4, 3 + "2d07af1c3560853dfc51e2e95e76b42f", # -> 0.5171 5.592623e+06, flex, 8, 8 + "37311b19ce9060557ab3c6fe57d2cb43", # -> 0.5166 2.736840e+06, flex, 6, 2 + "f13028523f1d5bd0c69d624f1ee6c85d", # -> 0.5116 3.993213e+06, flex, 6, 4 + "da98bfc8ea3478bb291828a5764d160c", # -> 0.5106 4.432343e+06, flex, 8, 5 + "473516dffe62ea86713392c3cbc2fab9", # -> 0.5092 5.323287e+06, flex, 8, 7 + "d5b85c422d79220101ba5f432ae51875", # -> 0.5078 5.704372e+06, flex, 6, 8 + "b24210b8f23f81d5041f2ec4bf47741c", # -> 0.5071 5.473601e+06, flex, 8, 7 + "1e1cc4492f0d9c28ba25c858687b311d", # -> 0.5062 8.548573e+06, flex, 6, 25 + "a18788d22bdc7da6982159ae318ea1f9", # -> 0.5056 2.637835e+06, flex, 4, 2 + "540fb846638db408db0a586efceec840", # -> 0.5044 2.465916e+06, flex, 4, 2 + "1b8efa9a01c3246c1e4642e8aa65da03", # -> 0.5031 6.534875e+06, flex, 6, 10 + "52fe89bc1409acc79ad0172460f6c46a", # -> 0.5011 8.336156e+06, flex, 6, 25 + "c27ebd89aa6f73f30849fa1c190e6458", # -> 0.5011 4.334441e+06, flex, 6, 5 + "adc2f6d7005c50c6237dc93869905211", # -> 0.4982 8.812724e+06, flex, 6, 25 + "8ff65102493822dc4fd81ab19d25e033", # -> 0.4962 6.517767e+06, flex, 8, 10 + "a45928491146a61ad7392180d03abf55", # -> 0.4955 8.809505e+06, flex, 6, 30 + "e54464fe08d22fd6bc218ad716299cd0", # -> 0.4940 8.181487e+06, flex, 6, 20 + "3ceb795754e7335c2a1f7a7d44eadd35", # -> 0.4912 6.295182e+06, flex, 8, 10 + "6c2880840091d1caac5cd166141f2327", # -> 0.4875 5.364793e+06, flex, 6, 7 + "98022e1420cb937e351850b1f56e96e6", # -> 0.4868 4.514491e+06, flex, 8, 5 + "2cce305ea6540c328c79447cb79a39bd", # -> 0.4805 2.725834e+06, flex, 8, 2 + "73cbba672e49d36f96fe434c00f5f3ab", # -> 0.4789 5.761759e+06, flex, 8, 8 + "c2fe04aec32d0290c6c2a3b0bc7f1bbb", # -> 0.4750 5.853028e+06, flex, 6, 8 + "3a1d5257cd243e9a896cfa0858b60450", # -> 0.4721 4.908726e+06, flex, 8, 6 + "c3e8bd05639786207a3e49041c5dd5a1", # -> 0.4717 5.403705e+06, flex, 6, 7 + "042c9b6b898e732c2531f960df4cc6b7", # -> 0.4584 8.971624e+06, flex, 8, 25 + "d40576261b8f2060ad2f904f8380e4ed", # -> 0.4571 8.335715e+06, flex, 8, 20 + "a498850d6b8debfcf5ae51d7d92aa488", # -> 0.4567 5.613264e+06, flex, 4, 7 + "fe356e97e433dfa1a4a6daa0cb533d34", # -> 0.4556 9.482259e+06, flex, 8, 30 + "09c8dd4e7f9645630608d2b335f4fd42", # -> 0.4534 4.556333e+06, flex, 8, 5 + "4faa5b579f938a5a9a9ef2c287f54434", # -> 0.4522 4.559044e+06, flex, 8, 5 + "00ae517067cbcd92ebfc024d843e480b", # -> 0.4504 7.803484e+06, flex, 8, 16 + "96214e322683ecb99c0033cac1a86ed2", # -> 0.4491 5.825073e+06, flex, 8, 8 + "7169526b3557e7f4567445ea2e1947a3", # -> 0.4452 6.415053e+06, flex, 8, 10 + "47c7e524532935287485da7a54b9bbd0", # -> 0.4451 8.338298e+06, flex, 6, 20 + "19bf11089cd1b489ad86b75937df27ec", # -> 0.4447 7.706494e+06, flex, 8, 16 + "1f9c17051e51383b24ecb81a4bef1329", # -> 0.4441 9.278590e+06, flex, 6, 30 + "e8ff6a262d0e6730fd6f55b141d9cfe4", # -> 0.4364 5.390991e+06, flex, 8, 7 + "b7a9ae280c031ad7088b16bf178d809c", # -> 0.4351 4.098193e+06, flex, 8, 4 + "fe0fd30a17fd52dba450c853536f5b27", # -> 0.4318 2.739286e+06, flex, 6, 2 + "130b771d9ee96b22a28f67bd3f6651fe", # -> 0.4305 5.336351e+06, flex, 8, 7 + "f2e3e46d510051393098cf95cd716f88", # -> 0.4247 5.679510e+06, flex, 4, 7 + "41f7646a8bb31efbb89764cdadca2eb2", # -> 0.4237 2.265290e+06, flex, 6, 3 + "cafe07776a80e8c84d020f0de156a195", # -> 0.4188 5.431191e+06, flex, 8, 7 + "6784e09e4909dff9b28d740415f1ec50", # -> 0.4179 4.478756e+06, flex, 8, 5 + "169a426b053dc7b9ece944dd1d83b578", # -> 0.4148 9.194707e+06, flex, 6, 30 + "0abf9d5e11b9f3898898bae71bcf6e57", # -> 0.4094 7.744605e+06, flex, 6, 16 + "e2498db25591d5e52723470a0953a725", # -> 0.3957 9.452671e+06, flex, 8, 30 + "d92c79c0a81190c5cc6cfc4791cf70b0", # -> 0.3940 5.638742e+06, flex, 4, 7 + "815f8290d3818251e4bd5a5bcb8acbd6", # -> 0.3918 2.825758e+06, flex, 8, 3 + "416b6139ca6b68d874851170c7e349ac", # -> 0.3868 4.204707e+06, flex, 4, 4 + "c9dc0f5cbde20b03fefe4aec336b2aa2", # -> 0.3783 8.269713e+06, flex, 8, 20 + "0401977b495423cf305d2aa624340d36", # -> 0.3770 6.460710e+06, flex, 6, 10 + "5c2ee969c48668d31e31d11cc78a491e", # -> 0.3744 2.824383e+06, flex, 8, 2 + "f98adfa7a6daf49060b19966c4600e71", # -> 0.3645 4.571965e+06, flex, 6, 5 + "3d96cfd6bb9ddb14f72a763a86ab8ca8", # -> 0.3631 8.342203e+06, flex, 6, 20 + "54a35622869c5c9990669c443c529344", # -> 0.3543 4.503398e+06, flex, 6, 5 + "77f800cbafb0f25cda2a0355d419b6d0", # -> 0.3541 4.583585e+06, flex, 6, 5 + "176307c4bddce3f26248e089354bba0d", # -> 0.3512 5.600199e+06, flex, 4, 7 + "37575e50ef4c718b4c7baab7a41f4735", # -> 0.3509 4.371062e+06, flex, 8, 5 + "7889fe17fa9e15fbe7c872dc30c80d81", # -> 0.3495 8.984966e+06, flex, 8, 25 + "1c0972c2b3b622621ff7ae3b223ee164", # -> 0.3464 8.355320e+06, flex, 8, 20 + "25cce8bbc45c7f187800a9ce1220ed3a", # -> 0.3413 5.552800e+06, flex, 8, 7 + "6a45374e3aeb7df392fd36e22cee190a", # -> 0.3377 2.857394e+06, flex, 8, 2 + "217c00c772cb2980409890dede2b9a41", # -> 0.3345 5.447785e+06, flex, 4, 7 + "42d8a1cc1266731836e3b2742469d8d8", # -> 0.3250 7.750992e+06, flex, 6, 16 + "c7eb6de3357c58485a4111f365becb22", # -> 0.3134 6.044683e+06, flex, 4, 10 + "71aa4790a9cdc70c8d86c511fe730540", # -> 0.3071 8.114960e+06, flex, 8, 20 + "3bccd919a7bd3faeecb8f6165f0cb554", # -> 0.3049 2.827845e+06, flex, 6, 2 + "00ad8a02b539bbcb40b4f9556d385d21", # -> 0.2639 9.342941e+06, flex, 6, 30 + "48f8c6c676af256d5ac71f5124956b4a", # -> 0.2589 5.430239e+06, flex, 8, 7 + "1e0e97bf6f7283f8822111174bf44875", # -> 0.2543 4.367211e+06, flex, 6, 5 + "13b4e28a3c2adb02ebd26c6553322b77", # -> 0.2511 4.777519e+06, flex, 4, 5 + "1dfb5b20677ca85df1e6e0029ed5ead3", # -> 0.2366 8.310871e+06, flex, 6, 20 + "665f50e233c5e1ebfb99483684ee38b7", # -> 0.2264 5.563939e+06, flex, 6, 7 + "cb80d372e2df52f2541a75f3e3ab6fa3", # -> 0.1000 4.206023e+06, flex, 4, 6 + "4a38650f02cc8de4cbd89d3e3e811ff1", # -> 0.1000 4.080933e+06, flex, 4, 6 + "9948a9597f24813d95a0bc76c153d0f8", # -> 0.1000 3.957814e+06, flex, 4, 4 + "03d1bf347fc86923a4cd10126996565c", # -> 0.1000 3.934121e+06, flex, 4, 6 + "342438e8ee1a31ea6a21f1d03c93c0e7", # -> 0.1000 5.201594e+06, flex, 4, 7 + "3e797beb9ac8298f7826f65453fa3a2f", # -> 0.1000 2.953513e+06, flex, 4, 2 + "367fb3948c6f69886ec3028a8360dc65", # -> 0.6659, 4.209346e+06, flex, 6, 4 + "2e2ccc15724987b696acfdc21bf47a51", # -> 0.6503, 7.269416e+06, flex, 6, 16 + "bc15239e6d2c175a0334f61af9848739", # -> 0.6494, 5.148289e+06, flex, 4, 6 + "716f7759cf91aa844d7000045b1eb600", # -> 0.6476, 2.741280e+06, flex, 8, 3 + "3c5f7b70f075fe8462c635153dada47d", # -> 0.6303, 5.007099e+06, flex, 6, 6 + "8f0f60a3e18b7a24642cd76ed327b9a1", # -> 0.6295, 5.785277e+06, flex, 6, 8 + "531d8b20ad7953e37f974f7d5b45e01c", # -> 0.6267, 5.382755e+06, flex, 4, 8 + "57d1dc4ee01cee3cf0385670e1741322", # -> 0.6211, 2.991667e+06, flex, 4, 3 + "05f70440f96aaf900cc613fb6a38610c", # -> 0.6092, 6.469393e+06, flex, 6, 10 + "9b86d4910bd95fb6d262c5e5983c14ad", # -> 0.6061, 4.349474e+06, flex, 4, 4 + "17b5fc2623904c1c66093c08e1b79f27", # -> 0.6027, 6.106181e+06, flex, 4, 10 + "e67bd735524454b8a261787dcf2a47eb", # -> 0.6000, 3.895603e+06, flex, 6, 4 + "fbe458505010a51dfc97863cfa4725ca", # -> 0.5971, 6.166594e+06, flex, 4, 10 + "56c8a5c06806146647506acf32634294", # -> 0.5963, 6.381624e+06, flex, 6, 10 + "0e8ab6e96cf978db32c2d7b7699cfe7f", # -> 0.5901, 2.823915e+06, flex, 8, 3 + "f0cd6ddce67c86d57bf5661a26b5bde2", # -> 0.5823, 4.080290e+06, flex, 8, 4 + "9e9f38b272165d42a0528ebdf9027f1c", # -> 0.5815, 3.174854e+06, flex, 4, 3 + "4cc609a2bb8c5efa6791c32ea349850a", # -> 0.5778, 5.486881e+06, flex, 4, 8 + "33eeff2627c8868d784639522b1eb4f1", # -> 0.5747, 2.619111e+06, flex, 8, 3 + "7aec249371301c815c3eead824c6f7b1", # -> 0.5738, 7.557755e+06, flex, 6, 16 + "abc9e0d7b65f1070bea9dfafa919f90f", # -> 0.5725, 5.817821e+06, flex, 6, 8 + "90a9560d3a79b5ae23440476b04d9561", # -> 0.5704, 6.553902e+06, flex, 8, 10 + "83973c46451c905ae0352fea31cd0df8", # -> 0.5676, 7.421917e+06, flex, 8, 16 + "6a405a2cf4eb6ece8241296f29b52aad", # -> 0.5637, 2.873761e+06, flex, 6, 3 + "bcaf7d4fe4b1a83c3d16efe2f6e4d967", # -> 0.5615, 5.767932e+06, flex, 8, 8 + "d86af65d32bbb56f2eca4d54a8bdaeeb", # -> 0.5529, 7.608056e+06, flex, 8, 16 + "a782864f124987e758d7911279c2b7cb", # -> 0.5522, 2.935374e+06, flex, 6, 3 + "57a7e8afe58c32f72af8ea86cd18ddbb", # -> 0.5511, 4.148061e+06, flex, 6, 4 + "0dbadd053f8161566f8bfd04e29f167b", # -> 0.5500, 3.121372e+06, flex, 4, 3 + "03b75598bd4a4799823c4ac80dad72d1", # -> 0.5483, 7.589383e+06, flex, 6, 16 + "77093fa86e3ac8b23d13065d8645d019", # -> 0.5458, 6.490005e+06, flex, 8, 10 + "71d583b85d081e1e93bd75b836c83de6", # -> 0.5402, 2.819020e+06, flex, 8, 2 + "36d5275416ed73b04c6952c4276c083d", # -> 0.5292, 4.092483e+06, flex, 8, 4 + "b5bceb9302a3df3a2771deb7d0edd4c8", # -> 0.5222, 7.549662e+06, flex, 8, 16 + "36d35f39f535e4aaa9cb5da438c8d903", # -> 0.5154, 2.768778e+06, flex, 6, 2 + "a519ef258d54600ea6920c4788a6280b", # -> 0.5086, 2.802222e+06, flex, 8, 2 + "513a1c16282dec9f124b9257bbb28177", # -> 0.5075, 5.524247e+06, flex, 4, 8 + "8894cc71801b352192f5759cf887d6e4", # -> 0.5066, 2.754725e+06, flex, 6, 2 + "1940550b5b9a07ce68968093723ef697", # -> 0.5053, 6.330552e+06, flex, 8, 10 + "85444b910411afd5676269013fce39e6", # -> 0.4950, 2.536435e+06, flex, 4, 2 + "1a704da76414aa5c50b9316544f50364", # -> 0.4802, 2.524790e+06, flex, 4, 2 + "4b746c04189fe0077f7938802ac24dd4", # -> 0.4729, 3.983884e+06, flex, 8, 4 + "7567c93fa1089f5cedc348292372ceb1", # -> 0.4702, 2.734160e+06, flex, 8, 2 + "644bad7c98a5b9cb6985cb97d011c8f3", # -> 0.4542, 4.932631e+06, flex, 8, 6 + "d38eaafd8b59ce694bd77c959b2c1e68", # -> 0.4036, 5.795405e+06, flex, 8, 8 + "f99a35d5bbf685c7089d7519291b5c0f", # -> 0.3773, 2.633205e+06, flex, 4, 2 + "ff48b26827e51e5a9e98b5e73ced1c98", # -> 0.3749, 4.967528e+06, flex, 8, 6 + "b9815d8e206f5e7e7651761c998283e2", # -> 0.3673, 5.903986e+06, flex, 6, 8 + "2e2c5d82931665495de2f92f4af6a095", # -> 0.3622, 2.794733e+06, flex, 6, 2 + "dbdb4f07f3f92b592762aec1a402b14f", # -> 0.3552, 2.728159e+06, flex, 6, 3 + "f33fc387a8c608886559ac95b3e4f7ce", # -> 0.3458, 4.753085e+06, flex, 6, 6 + "66f7bfa092d65404f9a704e02c6e6f77", # -> 0.3375, 4.760287e+06, flex, 8, 6 + "1af4b0115d6a08698fbfb4114a770489", # -> 0.3331, 6.535710e+06, flex, 6, 10 + "0f3f929d0d9b0328f5e93ce11ea73b76", # -> 0.1000, 3.965023e+06, flex, 4, 4 + "049dca947754753b7be1882fa3e5f0df", # -> 0.1000, 3.978452e+06, flex, 4, 6 + "a8facc8acb1e1e2af5009a2d107a1c74", # -> 0.1000, 4.142553e+06, flex, 4, 6 + "14ec0f67754688eb30f47f432595d4fa", # -> 0.1000, 4.130849e+06, flex, 4, 4 + "478535a7bbb120ca3eee26c64c1a6fd6", # -> 0.6697, 2.715756e+06, flex, 6, 2 + "9d3cdf362377b77f2769cddfc2f06cf2", # -> 0.6670, 2.802477e+06, flex, 8, 3 + "8f70e90c6769dd3c9f44402acfec3606", # -> 0.1000, 3.948141e+06, flex, 4, 6 + "58756c3729dcedfeeab0fea628634e05", # -> 0.1000, 4.042809e+06, flex, 4, 4 + "ec39ccf1998a61cf388b6c64cf53e534", # -> 0.1000, 4.040190e+06, flex, 4, 6 + "7c93ffb859c056bb8d58a57ea731a5b6", # -> 0.1000, 3.956951e+06, flex, 4, 6 + "5c58106ae695dc7842cee914198213ce", # -> 0.1000, 3.999102e+06, flex, 4, 4 + "66eb61bd460185ebb3a0e7e0903998d8", # -> 0.1000, 3.915104e+06, flex, 4, 4 + "c26c75797cc5698524c128b2c03d71ad", # -> 0.6699, 4.936011e+06, flex, 6, 6 + "1b5f11c64a41bebe6ecf86c89aec78e7", # -> 0.6521, 2.760852e+06, flex, 6, 2 + "e8e38b72f113ae04a4b26c4c262d7fd2", # -> 0.6504, 4.038943e+06, flex, 6, 4 + "936c04b9a6f211742285c459cc366134", # -> 0.6487, 2.625230e+06, flex, 8, 2 + "654b17e55ed66715c84161e8f7dbec28", # -> 0.1000, 4.271655e+06, flex, 4, 4 + "e5509196455a82fed7f194fd53f0e4c3", # -> 0.1000, 3.883347e+06, flex, 4, 6 + "739b8f45159088a1cbfff30f92e29a59", # -> 0.5141, 2.406360e+06, flex, 6, 3 + "fe398b8e7dfdc1cb8c71d2183a84de07", # -> 0.1000, 3.996849e+06, flex, 4, 4 + "c8a4f191f560ea10931775d0a8320c08", # -> 0.1000, 3.959836e+06, flex, 4, 6 + "04ccc689dc66e77aa8ecd339281c5ff0", # -> 0.1000, 4.092232e+06, flex, 4, 6 + "bab2a48cbef368310b076926701822a3", # -> 0.1000, 4.103027e+06, flex, 4, 4 + } + experiments = [ + exp for exp in experiments if not any(h in blacklist for h in exp) + ] + + print("After blacklisting, keeping", len(experiments), "experiments:") + for exp in experiments: + print(" → ".join(exp)) + + # 1) build a lookup: hash → flat dict + rec_map = {rec["hash"]: rec for rec in flat_records} + + # 2) for each experiment, merge the stages side-by-side, prefixing with the stage name + experiments_data = [] + for exp in experiments: + combined = {} + for h in exp: + rec = rec_map[h] + stage = rec["name"] + # prefix every field (including hash, loss, accuracy, nested flattened keys…) + for col, val in rec.items(): + combined[f"{stage}_{col}"] = val + experiments_data.append(combined) + + # 3) make a DataFrame + experiments_df = pd.DataFrame(experiments_data) + print(experiments_df.columns) + print( + experiments_df[ + [ + "qat_accuracy", + "qat_complexity", + "qat_hash", + "quantization_parameters_kernel_1_type", + "quantization_parameters_kernel_1_bits", + "quantization_parameters_kernel_1_n_levels", + ] + ].sort_values(by=["qat_accuracy"], ascending=False) + ) + + to_drop = [ + "model_creation*", + "freeze*", + "*function", + "*activations_*", + "*bias*", + "*input_shape*", + "*_0_*", + "*_2_*", + "*_3_*", + "*_name", + "*_hash", + "model_creation_seed", + "quantization_seed", + "qat_seed", + "*dataset", + "*_categories", + "*_epochs", + "*_batch_size", + "*_learning_rate", + "*_validation_split", + "quantization_complexity", + "qat_parameters_early_stopping", + ] + # find all columns matching any pattern + cols_to_drop = [ + col + for col in experiments_df.columns + if any(fnmatch.fnmatch(col, pat) for pat in to_drop) + ] + experiments_df = experiments_df.drop(columns=cols_to_drop) + + rename_map = { + "name": "stage", + "quantization_parameters_kernel_1_type": "type", + "quantization_parameters_kernel_1_bits": "bits", + "quantization_parameters_kernel_1_n_levels": "n_levels", + "initial_training_seed": "seed", + } + experiments_df = experiments_df.rename(columns=rename_map) + experiments_df = experiments_df.dropna() + + experiments_df["qat_complexity"] = experiments_df[ + "qat_complexity" + ] # Convert to Kbits + # print( + # experiments_df.sort_values(by=["qat_accuracy_mean", "qat_complexity_mean"], ascending=False) + # ) + # print(experiments_df.sort_values(by=["qat_complexity_mean"], ascending=False)) + + original_accuracy_mean = experiments_df["initial_training_accuracy"].mean() + original_complexity_mean = experiments_df[ + "initial_training_complexity" + ].mean() # in Kbits + original_accuracy_var = experiments_df["initial_training_accuracy"].var() + original_complexity_var = experiments_df[ + "initial_training_complexity" + ].var() + original_accuracy_sd = np.sqrt(original_accuracy_var) + original_complexity_var_kbits = original_complexity_var + original_complexity_sd = np.sqrt(original_complexity_var_kbits) + n = experiments_df["initial_training_accuracy"].count() + original_accuracy_se = original_accuracy_sd / np.sqrt(n) + original_complexity_se = original_complexity_sd / np.sqrt(n) + original_accuracy_min = experiments_df["initial_training_accuracy"].min() + original_complexity_min = experiments_df[ + "initial_training_complexity" + ].min() + original_accuracy_max = experiments_df["initial_training_accuracy"].max() + original_complexity_max = experiments_df[ + "initial_training_complexity" + ].max() + + # 1) Identify the metrics and the hyperparam columns to group by + metrics = ["qat_loss", "qat_accuracy", "qat_complexity"] + hyperparam_cols = [ + col + for col in experiments_df.columns + if col in ("type", "bits", "n_levels") + ] + + # 2) Group by those hyperparams, compute mean & var of the metrics + stats = ( + experiments_df.groupby(hyperparam_cols)[metrics] + .agg(["mean", "var", "max", "min"]) + .reset_index() + ) + + # 3) Flatten the resulting MultiIndex columns + stats.columns = [ + f"{lvl0}_{lvl1}" if lvl1 else lvl0 for lvl0, lvl1 in stats.columns + ] + + counts = ( + experiments_df.groupby(hyperparam_cols)[metrics].count().reset_index() + ) + count_cols = [f"{m}_count" for m in metrics] + counts.columns = hyperparam_cols + count_cols + row_uniques = counts[count_cols].nunique(axis=1) + assert (row_uniques == 1).all(), "Not all metric‐counts agree per group!" + counts["count"] = counts[count_cols[0]] + counts = counts[hyperparam_cols + ["count"]] + stats = stats.merge(counts, on=hyperparam_cols, how="left") + + # 3) Compute standard deviations and errors + stats["se_accuracy"] = np.sqrt(stats["qat_accuracy_var"]) / np.sqrt( + stats["count"] + ) + stats["se_complexity"] = np.sqrt(stats["qat_complexity_var"]) / np.sqrt( + stats["count"] + ) + + print( + stats.sort_values( + by=["qat_accuracy_mean", "qat_complexity_mean"], ascending=False + ) + ) + print(stats.sort_values(by=["qat_complexity_mean"], ascending=False)) + + cmap = mpl.colormaps["tab10"] + xmin = stats["qat_complexity_mean"].min() * 0.9 + xmax = stats["qat_complexity_mean"].max() * 1.1 + xmax = original_complexity_mean * 1.1 + + plt.figure(figsize=(6, 4)) + plt.scatter( + original_complexity_mean, + original_accuracy_mean, + color="red", + label="Original Model", + zorder=3, + ) + bits_values = experiments_df["bits"].unique() + for i, bits in enumerate(sorted(bits_values)): + subset = experiments_df[experiments_df["bits"] == bits].sort_values( + "qat_complexity" + ) + # 1) dashed line only, semi-transparent + plt.semilogx( + subset["qat_complexity"], + subset["qat_accuracy"], + linestyle="--", + color=cmap(i % 10), + alpha=0.3, + zorder=2, + label=None, # we’ll label in the marker call + ) + # 2) opaque markers on top, with label + plt.scatter( + subset["qat_complexity"], + subset["qat_accuracy"], + marker=".", + color=cmap(i % 10), + zorder=3, + label=f"{bits} bits", + ) + add_horizontal_se_band( + plt.gca(), + original_accuracy_mean, + original_accuracy_se, + color="red", + ) + plt.xlabel("BWC") + plt.ylabel("Precisión") + plt.ylim([0.6, 0.75]) + plt.xlim([xmin, xmax]) + plt.grid(which="both", linestyle="--", linewidth=0.5, alpha=0.6) + plt.legend() + plt.tight_layout() + plt.savefig("complexity_vs_quantized_flex-es.png", dpi=150) + + # 2) sort bits for consistent coloring + plt.figure(figsize=(6, 4)) + plt.scatter( + original_complexity_mean, + original_accuracy_mean, + color="red", + label="Original Model", + zorder=3, + ) + bits_list = sorted(stats["bits"].unique()) + for i, bits in enumerate(bits_list): + grp = stats[stats["bits"] == bits].sort_values("qat_complexity_mean") + # 1) draw only the dashed line + errorbars (no markers) + plt.errorbar( + grp["qat_complexity_mean"], + grp["qat_accuracy_mean"], + xerr=grp["se_complexity"], + yerr=grp["se_accuracy"], + fmt="--", # just the line + color=cmap(i % 10), + ecolor=cmap(i % 10), + alpha=0.3, + capsize=3, + zorder=2, + ) + # 2) draw the opaque markers on top + plt.scatter( + grp["qat_complexity_mean"], + grp["qat_accuracy_mean"], + marker=".", + s=30, + color=cmap(i % 10), + label=f"{bits} bits", + zorder=3, + ) + add_horizontal_se_band( + plt.gca(), + original_accuracy_mean, + original_accuracy_se, + color="red", + ) + plt.xscale("log") + plt.xlabel("BWC") + plt.ylabel("Precisión") + plt.ylim([0.6, 0.75]) + plt.xlim([xmin, xmax]) + plt.grid(which="both", linestyle="--", linewidth=0.5, alpha=0.6) + plt.legend() + plt.tight_layout() + plt.savefig("complexity_vs_quantized_flex_stats-es.png", dpi=150) + + plt.figure(figsize=(6, 4)) + plt.scatter( + original_complexity_mean, + original_accuracy_mean, + color="red", + label="Original Model", + zorder=3, + ) + bits_list = sorted(stats["bits"].unique()) + for i, bits in enumerate(bits_list): + grp = stats[stats["bits"] == bits].sort_values("qat_complexity_mean") + plt.plot( + grp["qat_complexity_mean"], + grp["qat_accuracy_mean"], + marker=".", + linestyle="--", + color=cmap(i % 10), + label=f"{bits} bits", + zorder=3, + ) + plt.plot( + grp["qat_complexity_mean"], + grp["qat_accuracy_max"], + linestyle="-", + color=cmap(i % 10), + alpha=0.5, + zorder=2, + ) + plt.plot( + grp["qat_complexity_mean"], + grp["qat_accuracy_min"], + linestyle=":", + color=cmap(i % 10), + alpha=0.5, + zorder=2, + ) + add_horizontal_se_band( + plt.gca(), + original_accuracy_mean, + original_accuracy_se, + color="red", + ) + plt.xscale("log") + plt.xlabel("BWC") + plt.ylabel("Precisión") + plt.ylim([0.6, 0.75]) + plt.xlim([xmin, xmax]) + plt.grid(which="both", linestyle="--", linewidth=0.5, alpha=0.6) + plt.legend() + plt.tight_layout() + plt.savefig("complexity_vs_quantized_flex_stats2-es.png", dpi=150) diff --git a/src/stage/plotv3.py b/src/stage/plotv3.py new file mode 100755 index 0000000..03fd252 --- /dev/null +++ b/src/stage/plotv3.py @@ -0,0 +1,766 @@ +#!/usr/bin/env python3 + +import fnmatch +import json +import re +from collections import defaultdict +from pathlib import Path + +import matplotlib as mpl +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd + +pd.set_option("display.max_rows", None) +pd.set_option("display.max_columns", None) +pd.set_option("display.width", None) + + +def load_pipeline(pipeline_metadata_file, results_path, metadata_path): + df = pd.DataFrame() + stages_results = [] + stages_names = [] + with open(pipeline_metadata_file, "r") as f: + pipeline_metadata = json.load(f) + for stage_hash in pipeline_metadata["history"]: + with open(results_path / f"{stage_hash}.json", "r") as stage_file: + stage_results = json.load(stage_file) + stages_results.append(stage_results) + with open( + metadata_path / f"{stage_hash}.json", "r" + ) as metadata_file: + stage_metadata = json.load(metadata_file) + stages_names.append(stage_metadata["name"]) + df = pd.DataFrame(stages_results) + df["stage"] = stages_names + return df + + +def load_file(hash_, results_path: Path, metadata_path: Path) -> dict: + """Lee los dos JSON correspondientes a `hash_` y devuelve un dict unificado + con todas las claves de ambos.""" + # Cargar resultados + with open(results_path / f"{hash_}.json", "r") as f_res: + res = json.load(f_res) + # Cargar metadata + with open(metadata_path / f"{hash_}.json", "r") as f_meta: + meta = json.load(f_meta) + # Combinar, dándole preferencia a `res` en caso de colisión de claves + combined = {**meta, **res} + # Añadimos el hash como columna + combined["hash"] = hash_ + return combined + + +def flatten_json(obj, parent_key: str = "", sep: str = "_"): + """Recursively flattens dicts and lists into a single dict mapping + flattened_key -> value. + + List items get their index injected into the key. + """ + items = {} + if isinstance(obj, dict): + for k, v in obj.items(): + new_key = f"{parent_key}{sep}{k}" if parent_key else k + items.update(flatten_json(v, new_key, sep=sep)) + elif isinstance(obj, list): + for i, v in enumerate(obj): + new_key = f"{parent_key}{sep}{i}" if parent_key else str(i) + items.update(flatten_json(v, new_key, sep=sep)) + else: + # reached a leaf + items[parent_key] = obj + return items + + +def add_horizontal_se_band( + ax, + mean, + se, + *, + color="red", + label="Original Model", + alpha_line=0.8, + alpha_band=0.2, + z_mean=3, + z_line=2, + z_band=1, +): + """On ax, draw a dotted line at `mean`, dashed lines at ±se, and fill + between them.""" + # mean line + ax.axhline( + mean, + color=color, + linestyle=":", + linewidth=0.3, + alpha=alpha_line, + zorder=z_mean, + # label=f"{label}" + ) + # ±1 SE lines + ax.axhline( + mean + se, + color=color, + linestyle="-", + linewidth=0.3, + alpha=0.6, + zorder=z_line, + ) + ax.axhline( + mean - se, + color=color, + linestyle="-", + linewidth=0.3, + alpha=0.6, + zorder=z_line, + ) + # filled band + x0, x1 = ax.get_xlim() + ax.fill_between( + [x0, x1], + [mean - se, mean - se], + [mean + se, mean + se], + color=color, + alpha=alpha_band, + zorder=z_band, + ) + + +if __name__ == "__main__": + checkpoint_path = Path("checkpoints") + # pipeline_path = checkpoint_path / "pipelines" + metadata_path = checkpoint_path / "metadata" + results_path = checkpoint_path / "results" + + # Patrón: sólo caracteres hexadecimales en el nombre del fichero + hash_pattern = re.compile(r"^[0-9a-fA-F]+$") + + # Listado de hashes (sin la extensión .json) + hashes = [ + p.stem + for p in results_path.glob("*.json") + if hash_pattern.match(p.stem) + ] + flat_records = [] + for h in hashes: + with open(results_path / f"{h}.json", "r") as fres: + res = json.load(fres) + with open(metadata_path / f"{h}.json", "r") as fmeta: + meta = json.load(fmeta) + + merged = {**meta, **res, "hash": h} + flat = flatten_json(merged) + flat_records.append(flat) + combined_df = pd.DataFrame(flat_records) + + # 1) Build adjacency list: parent_hash → [child_hashes] + tree = defaultdict(list) + for _, row in combined_df.iterrows(): + h = row["hash"] + prev = row.get("previous_hash") + if pd.notna(prev): + tree[prev].append(h) + + # 2) Find root nodes: those with no valid previous_hash + all_hashes = set(combined_df["hash"]) + roots = [ + h + for _, row in combined_df.iterrows() + for h in [row["hash"]] + if pd.isna(row.get("previous_hash")) + or row["previous_hash"] not in all_hashes + ] + + # 3) DFS to collect every root→leaf path + experiments = [] + + def _collect_paths(node, path): + children = tree.get(node, []) + if not children: + experiments.append(path) + else: + for child in children: + _collect_paths(child, path + [child]) + + for root in roots: + _collect_paths(root, [root]) + + blacklist = { + # ) hash # -> acc, complexity, type, bits, n_levels + "82bcde2ecc786b41e6d2b33425c5295a", # -> 0.6825, 2.522522e+06, flex, 8, 2 + "2d1d7bb39ddb4bce584d824a41b8aaa2", # -> 0.6820, 2.346810e+06, flex, 8, 2 + "deb62ff4fda51063fb9094744ffc0051", # -> 0.6803, 2.562207e+06, flex, 8, 2 + "8920925440a17cd21177b04319f4dde7", # -> 0.6762, 2.629205e+06, flex, 8, 2 + "9eefaec10856b78c5b1b04cbbb2138ce", # -> 0.6834, 5.739940e+06, flex, 4, 10 + "c6d6425d11b008bea5ae624601cf69f9", # -> 0.6869, 8.300487e+06, flex, 6, 25 + "c24d8367f7eea4fe40c6b11d37a2ef64", # -> 0.6829, 7.521903e+06, flex, 6, 30 + "f1a01952db947ca2a5dcf6932bc4dd9d", # -> 0.6870, 7.761222e+06, flex, 6, 20 + "f76e8a717ae8f27c07edabd45a697813", # -> 0.6803, 2.657772e+06, flex, 6, 2 + "318464ddf0bb2feb2249b20e4b2b0c35", # -> 0.6802, 2.654884e+06, flex, 6, 3 + "a14dc019e8c2e2b8e4929ad2edae4d80", # -> 0.6739, 2.905927e+06, flex, 6, 3 + "fe88a2a970c6d4fe95e1c5efb867de67", # -> 0.6719, 4.990952e+06, flex, 6, 6 + "2d48e19bced1b6e3b15eab632a40cf44", # -> 0.6682 2.476079e+06, flex, 4, 2 + "4965cf50af85c54e65044dc0b5717bcf", # -> 0.6647 4.679128e+06, flex, 8, 5 + "cf82eb531d5c718017874750453af75e", # -> 0.6622 5.459853e+06, flex, 4, 10 + "df2c27c6c58a72ce19f2bec69ba6fa22", # -> 0.6602 4.127570e+06, flex, 4, 4 + "081eea60c304ff553e0ec04fcbfe17f8", # -> 0.6566 8.184740e+06, flex, 6, 25 + "f8138453a8c78c1598af911967b22ef7", # -> 0.6399 4.328394e+06, flex, 6, 5 + "0942ca5ce86afde3e272b8d3bb1c79cf", # -> 0.6365 5.416113e+06, flex, 6, 7 + "57a410c7713212ae7e7874174a00ec28", # -> 0.6358 5.352943e+06, flex, 4, 8 + "1ef38adee955e86d29015639c772708c", # -> 0.6351 8.514102e+06, flex, 6, 30 + "ca9710fb7d998d349dc0fabffe3e7487", # -> 0.6336 4.092541e+06, flex, 4, 5 + "f361e46f3f1a62d15f6ffe924f341e4b", # -> 0.6294 5.502136e+06, flex, 6, 7 + "dcdc5e38399aa6b1f4aec6dd0f4a0011", # -> 0.6228 5.499287e+06, flex, 6, 7 + "62293d44559425797da8b3cf5c03949d", # -> 0.6226 5.282950e+06, flex, 8, 7 + "d3e21e49f4a6737a99c64ae7bd962ed9", # -> 0.6173 8.716123e+06, flex, 6, 30 + "4b847613419cf65b6933ac7df186b5e1", # -> 0.6172 4.120780e+06, flex, 6, 4 + "40c151e376a2a746a618bdea64acb645", # -> 0.6118 8.659160e+06, flex, 8, 25 + "a1d029ad31e8d0e78a2727cb817238cd", # -> 0.6100 9.312237e+06, flex, 8, 30 + "257185fcc8615ad18afa388080e0b42a", # -> 0.6096 9.424802e+06, flex, 8, 30 + "cc7ee437a88cd218050280f5e1687d7b", # -> 0.6091 4.411632e+06, flex, 4, 5 + "1c2b3e12e0e008acc93ad000dc46860c", # -> 0.6085 8.087628e+06, flex, 8, 20 + "ad7f438d121a818e11559c02cf40e875", # -> 0.6078 8.815668e+06, flex, 8, 25 + "bf3715b86ac5cece775d945518828380", # -> 0.6075 8.649432e+06, flex, 6, 30 + "c671e4816625f3fcef3ae19735e588cb", # -> 0.6065 8.057870e+06, flex, 6, 20 + "f77e1ff54c1e8548b16d78166e583a20", # -> 0.6061 4.340742e+06, flex, 6, 5 + "f88ec612d8ea1bdcc25d3dc257d9078b", # -> 0.6045 6.116025e+06, flex, 4, 10 + "cacd708264fa9f8700b9f4fb32b87494", # -> 0.6013 5.497829e+06, flex, 4, 8 + "740a1562cfbc1d391c4f3a1cdbddd879", # -> 0.5990 9.368490e+06, flex, 8, 30 + "18c96f0fc4d4aad8cef826c6cc048846", # -> 0.5981 2.913844e+06, flex, 8, 3 + "6767ffdfcd17bf848370ba70b057aad9", # -> 0.5969 8.600187e+06, flex, 6, 30 + "d8e9e5b7f38c90b8e4aa509d9b5ced31", # -> 0.5961 5.227277e+06, flex, 4, 6 + "419c90ed7c09a63c3fc90a54661617b3", # -> 0.5936 3.709586e+06, flex, 4, 5 + "5a9b0e4a09037ec75430ad220ccdde6d", # -> 0.5930 8.775996e+06, flex, 8, 25 + "5d070cbf206198f489173a5f1ec74f91", # -> 0.5927 4.441964e+06, flex, 8, 5 + "d36592e5b6e1894fba94f2264136580b", # -> 0.5906 4.023145e+06, flex, 6, 4 + "6d9130bcab5126da553138f680ad5343", # -> 0.5901 5.501902e+06, flex, 6, 7 + "228c22a86a8e5db74626432a4fa7ebf0", # -> 0.5899 8.682319e+06, flex, 8, 25 + "ce99dd6e073611df6d56d0b035f2c380", # -> 0.5894 5.523196e+06, flex, 8, 7 + "e4f9b423a715a006966cafd108626582", # -> 0.5894 8.710177e+06, flex, 8, 25 + "dd44e1d79606bbf821f776b550fd5708", # -> 0.5850 8.223336e+06, flex, 6, 20 + "553230a79d1600e5e96e4e55bced5562", # -> 0.5847 4.876093e+06, flex, 4, 7 + "5b6c1d077fff399487198d0934d2f801", # -> 0.5833 6.564747e+06, flex, 8, 10 + "a79306498306fd871b72f91886050764", # -> 0.5833 4.978046e+06, flex, 4, 7 + "116279bfe5331dce42f5214acff1b8b8", # -> 0.5827 4.314205e+06, flex, 6, 5 + "2eb0a2ba00ac93326abf3351bd1d9883", # -> 0.5826 5.430122e+06, flex, 8, 7 + "f48060242ed1e5845835ec38f86f90e4", # -> 0.5826 7.373076e+06, flex, 8, 16 + "a2a0c3db77ffc9e87502fce7370eb6a0", # -> 0.5823 8.731863e+06, flex, 6, 25 + "ddc474f0d54a8cc5731b000baf77756d", # -> 0.5809 9.161021e+06, flex, 8, 30 + "4126ec5dbf996778dcb49af8c4563a0e", # -> 0.5790 4.420320e+06, flex, 8, 5 + "1dc469302d509868b5807c2c582781da", # -> 0.5787 6.152794e+06, flex, 6, 10 + "a914f36d1b76973a6f5213a6cdb90e76", # -> 0.5776 2.440996e+06, flex, 8, 3 + "07c097bac4abda58859eb5055fbb3443", # -> 0.5759 5.499702e+06, flex, 4, 8 + "6351c9acd3391e61432ecfa4060a092a", # -> 0.5756 4.607602e+06, flex, 8, 5 + "0828459655a6999d7d9af066c8632f60", # -> 0.5751 5.512189e+06, flex, 8, 7 + "cfa18e91c2f434e01e9fa4d928bab7e4", # -> 0.5749 2.745346e+06, flex, 6, 3 + "741cecf1ee1bbce01c1a2d3f5521c491", # -> 0.5741 9.617448e+06, flex, 8, 30 + "81038213fd8189ba707569fb2b4da11c", # -> 0.5734 5.008818e+06, flex, 8, 6 + "e913f582950807f522d9c802b6ef3f3e", # -> 0.5719 4.106683e+06, flex, 8, 4 + "a47756b976c51cc11b50e1d8238c5200", # -> 0.5716 4.141123e+06, flex, 6, 5 + "ed3fbf80c1e7546321a2dbd6ada1354d", # -> 0.5715 7.853356e+06, flex, 8, 16 + "49befbd5aea7ec47387bd96665c38982", # -> 0.5702 8.188566e+06, flex, 8, 20 + "253e6448f7f43101cbf863974d331d0a", # -> 0.5685 8.324831e+06, flex, 8, 20 + "324fe5d7f47770902d73752a8e01d62d", # -> 0.5663 4.599661e+06, flex, 4, 5 + "387929c287656e1418ecaca896f443ad", # -> 0.5658 5.366461e+06, flex, 6, 7 + "724f9a6abede93b72c8fc7df30b8d9db", # -> 0.5644 4.705492e+06, flex, 6, 6 + "446a454fc94b2789cd943578ed0bc074", # -> 0.5640 4.995121e+06, flex, 4, 5 + "a7f630a537c947ccaf46387051efeef7", # -> 0.5603 8.207445e+06, flex, 6, 20 + "2f23ee58330ffbafc3acb0b87a0699a4", # -> 0.5593 9.103208e+06, flex, 8, 25 + "dbdc28ebce1b0a85029fce6714e49c9f", # -> 0.5587 4.413465e+06, flex, 8, 5 + "b5358693b4337fc31be52a52df800328", # -> 0.5583 5.549624e+06, flex, 6, 8 + "3feade8d67883ae66e406c0f6ac4cd57", # -> 0.5579 4.478236e+06, flex, 4, 5 + "08b23ddf3b656f041d36152a88fba87e", # -> 0.5574 5.441997e+06, flex, 6, 7 + "1b8cc4ea0c23fb930d198c9bace1c01f", # -> 0.5565 4.984122e+06, flex, 4, 5 + "6982f70c319ed9df4e958de3b91baf23", # -> 0.5564 2.965374e+06, flex, 6, 3 + "04ab8c630f0ac8a0935ccaa7297a19dd", # -> 0.5536 8.794149e+06, flex, 6, 30 + "8412d76b73a091f1f075e3768f8c4485", # -> 0.5526 8.143381e+06, flex, 8, 20 + "806a48aa4aead784c9dc93e64c4d694a", # -> 0.5508 9.106848e+06, flex, 8, 25 + "f56b97ee2b9eb3673dde75b73acefa24", # -> 0.5496 8.110230e+06, flex, 6, 20 + "f1a3c121b4c6331eee397599a8d98475", # -> 0.5491 8.585863e+06, flex, 6, 25 + "53317e1a72893a4e61c5f07e1611a087", # -> 0.5479 4.988744e+06, flex, 6, 6 + "18b39edfb145e5db5ce268793593fa55", # -> 0.5478 8.271977e+06, flex, 8, 20 + "5dc8eb9e93379b1108d847486faec2b7", # -> 0.5478 8.807598e+06, flex, 8, 30 + "30fc1daf89f19e980b5e1c69ea3eeddb", # -> 0.5475 8.494731e+06, flex, 8, 20 + "efa5082ce34a54c5450944408dda3341", # -> 0.5469 4.049373e+06, flex, 6, 5 + "2a57ca0c402c493d39a4fda0f722ef5f", # -> 0.5464 3.183237e+06, flex, 4, 3 + "d878cba4575aa5c22fcd40084d9a3384", # -> 0.5424 8.451002e+06, flex, 6, 25 + "30e350f38eebb57f58f88b01eb427b36", # -> 0.5404 8.581810e+06, flex, 6, 25 + "12b856434f33147f95343017850b88f6", # -> 0.5400 9.347335e+06, flex, 8, 30 + "91a5c093e1213c00cbf92e423795c04c", # -> 0.5369 4.752921e+06, flex, 8, 6 + "a0ebf35a5577ba037ad4436360a8d631", # -> 0.5336 8.461936e+06, flex, 6, 25 + "ed4e4713c7bcde4911563de5e51a6b1c", # -> 0.5317 5.609182e+06, flex, 6, 7 + "d64d58115a0c16a93b46a26541ea940c", # -> 0.5237 4.856726e+06, flex, 6, 6 + "33cc5a71d1e46710e53d58ae9e3fa7ec", # -> 0.5228 8.773018e+06, flex, 8, 25 + "4751c5518326351fc7d5a2d1e20ae00b", # -> 0.5224 9.387617e+06, flex, 8, 30 + "335513c169a9f64243badde924a39a8b", # -> 0.5224 2.997398e+06, flex, 4, 3 + "2d07af1c3560853dfc51e2e95e76b42f", # -> 0.5171 5.592623e+06, flex, 8, 8 + "37311b19ce9060557ab3c6fe57d2cb43", # -> 0.5166 2.736840e+06, flex, 6, 2 + "f13028523f1d5bd0c69d624f1ee6c85d", # -> 0.5116 3.993213e+06, flex, 6, 4 + "da98bfc8ea3478bb291828a5764d160c", # -> 0.5106 4.432343e+06, flex, 8, 5 + "473516dffe62ea86713392c3cbc2fab9", # -> 0.5092 5.323287e+06, flex, 8, 7 + "d5b85c422d79220101ba5f432ae51875", # -> 0.5078 5.704372e+06, flex, 6, 8 + "b24210b8f23f81d5041f2ec4bf47741c", # -> 0.5071 5.473601e+06, flex, 8, 7 + "1e1cc4492f0d9c28ba25c858687b311d", # -> 0.5062 8.548573e+06, flex, 6, 25 + "a18788d22bdc7da6982159ae318ea1f9", # -> 0.5056 2.637835e+06, flex, 4, 2 + "540fb846638db408db0a586efceec840", # -> 0.5044 2.465916e+06, flex, 4, 2 + "1b8efa9a01c3246c1e4642e8aa65da03", # -> 0.5031 6.534875e+06, flex, 6, 10 + "52fe89bc1409acc79ad0172460f6c46a", # -> 0.5011 8.336156e+06, flex, 6, 25 + "c27ebd89aa6f73f30849fa1c190e6458", # -> 0.5011 4.334441e+06, flex, 6, 5 + "adc2f6d7005c50c6237dc93869905211", # -> 0.4982 8.812724e+06, flex, 6, 25 + "8ff65102493822dc4fd81ab19d25e033", # -> 0.4962 6.517767e+06, flex, 8, 10 + "a45928491146a61ad7392180d03abf55", # -> 0.4955 8.809505e+06, flex, 6, 30 + "e54464fe08d22fd6bc218ad716299cd0", # -> 0.4940 8.181487e+06, flex, 6, 20 + "3ceb795754e7335c2a1f7a7d44eadd35", # -> 0.4912 6.295182e+06, flex, 8, 10 + "6c2880840091d1caac5cd166141f2327", # -> 0.4875 5.364793e+06, flex, 6, 7 + "98022e1420cb937e351850b1f56e96e6", # -> 0.4868 4.514491e+06, flex, 8, 5 + "2cce305ea6540c328c79447cb79a39bd", # -> 0.4805 2.725834e+06, flex, 8, 2 + "73cbba672e49d36f96fe434c00f5f3ab", # -> 0.4789 5.761759e+06, flex, 8, 8 + "c2fe04aec32d0290c6c2a3b0bc7f1bbb", # -> 0.4750 5.853028e+06, flex, 6, 8 + "3a1d5257cd243e9a896cfa0858b60450", # -> 0.4721 4.908726e+06, flex, 8, 6 + "c3e8bd05639786207a3e49041c5dd5a1", # -> 0.4717 5.403705e+06, flex, 6, 7 + "042c9b6b898e732c2531f960df4cc6b7", # -> 0.4584 8.971624e+06, flex, 8, 25 + "d40576261b8f2060ad2f904f8380e4ed", # -> 0.4571 8.335715e+06, flex, 8, 20 + "a498850d6b8debfcf5ae51d7d92aa488", # -> 0.4567 5.613264e+06, flex, 4, 7 + "fe356e97e433dfa1a4a6daa0cb533d34", # -> 0.4556 9.482259e+06, flex, 8, 30 + "09c8dd4e7f9645630608d2b335f4fd42", # -> 0.4534 4.556333e+06, flex, 8, 5 + "4faa5b579f938a5a9a9ef2c287f54434", # -> 0.4522 4.559044e+06, flex, 8, 5 + "00ae517067cbcd92ebfc024d843e480b", # -> 0.4504 7.803484e+06, flex, 8, 16 + "96214e322683ecb99c0033cac1a86ed2", # -> 0.4491 5.825073e+06, flex, 8, 8 + "7169526b3557e7f4567445ea2e1947a3", # -> 0.4452 6.415053e+06, flex, 8, 10 + "47c7e524532935287485da7a54b9bbd0", # -> 0.4451 8.338298e+06, flex, 6, 20 + "19bf11089cd1b489ad86b75937df27ec", # -> 0.4447 7.706494e+06, flex, 8, 16 + "1f9c17051e51383b24ecb81a4bef1329", # -> 0.4441 9.278590e+06, flex, 6, 30 + "e8ff6a262d0e6730fd6f55b141d9cfe4", # -> 0.4364 5.390991e+06, flex, 8, 7 + "b7a9ae280c031ad7088b16bf178d809c", # -> 0.4351 4.098193e+06, flex, 8, 4 + "fe0fd30a17fd52dba450c853536f5b27", # -> 0.4318 2.739286e+06, flex, 6, 2 + "130b771d9ee96b22a28f67bd3f6651fe", # -> 0.4305 5.336351e+06, flex, 8, 7 + "f2e3e46d510051393098cf95cd716f88", # -> 0.4247 5.679510e+06, flex, 4, 7 + "41f7646a8bb31efbb89764cdadca2eb2", # -> 0.4237 2.265290e+06, flex, 6, 3 + "cafe07776a80e8c84d020f0de156a195", # -> 0.4188 5.431191e+06, flex, 8, 7 + "6784e09e4909dff9b28d740415f1ec50", # -> 0.4179 4.478756e+06, flex, 8, 5 + "169a426b053dc7b9ece944dd1d83b578", # -> 0.4148 9.194707e+06, flex, 6, 30 + "0abf9d5e11b9f3898898bae71bcf6e57", # -> 0.4094 7.744605e+06, flex, 6, 16 + "e2498db25591d5e52723470a0953a725", # -> 0.3957 9.452671e+06, flex, 8, 30 + "d92c79c0a81190c5cc6cfc4791cf70b0", # -> 0.3940 5.638742e+06, flex, 4, 7 + "815f8290d3818251e4bd5a5bcb8acbd6", # -> 0.3918 2.825758e+06, flex, 8, 3 + "416b6139ca6b68d874851170c7e349ac", # -> 0.3868 4.204707e+06, flex, 4, 4 + "c9dc0f5cbde20b03fefe4aec336b2aa2", # -> 0.3783 8.269713e+06, flex, 8, 20 + "0401977b495423cf305d2aa624340d36", # -> 0.3770 6.460710e+06, flex, 6, 10 + "5c2ee969c48668d31e31d11cc78a491e", # -> 0.3744 2.824383e+06, flex, 8, 2 + "f98adfa7a6daf49060b19966c4600e71", # -> 0.3645 4.571965e+06, flex, 6, 5 + "3d96cfd6bb9ddb14f72a763a86ab8ca8", # -> 0.3631 8.342203e+06, flex, 6, 20 + "54a35622869c5c9990669c443c529344", # -> 0.3543 4.503398e+06, flex, 6, 5 + "77f800cbafb0f25cda2a0355d419b6d0", # -> 0.3541 4.583585e+06, flex, 6, 5 + "176307c4bddce3f26248e089354bba0d", # -> 0.3512 5.600199e+06, flex, 4, 7 + "37575e50ef4c718b4c7baab7a41f4735", # -> 0.3509 4.371062e+06, flex, 8, 5 + "7889fe17fa9e15fbe7c872dc30c80d81", # -> 0.3495 8.984966e+06, flex, 8, 25 + "1c0972c2b3b622621ff7ae3b223ee164", # -> 0.3464 8.355320e+06, flex, 8, 20 + "25cce8bbc45c7f187800a9ce1220ed3a", # -> 0.3413 5.552800e+06, flex, 8, 7 + "6a45374e3aeb7df392fd36e22cee190a", # -> 0.3377 2.857394e+06, flex, 8, 2 + "217c00c772cb2980409890dede2b9a41", # -> 0.3345 5.447785e+06, flex, 4, 7 + "42d8a1cc1266731836e3b2742469d8d8", # -> 0.3250 7.750992e+06, flex, 6, 16 + "c7eb6de3357c58485a4111f365becb22", # -> 0.3134 6.044683e+06, flex, 4, 10 + "71aa4790a9cdc70c8d86c511fe730540", # -> 0.3071 8.114960e+06, flex, 8, 20 + "3bccd919a7bd3faeecb8f6165f0cb554", # -> 0.3049 2.827845e+06, flex, 6, 2 + "00ad8a02b539bbcb40b4f9556d385d21", # -> 0.2639 9.342941e+06, flex, 6, 30 + "48f8c6c676af256d5ac71f5124956b4a", # -> 0.2589 5.430239e+06, flex, 8, 7 + "1e0e97bf6f7283f8822111174bf44875", # -> 0.2543 4.367211e+06, flex, 6, 5 + "13b4e28a3c2adb02ebd26c6553322b77", # -> 0.2511 4.777519e+06, flex, 4, 5 + "1dfb5b20677ca85df1e6e0029ed5ead3", # -> 0.2366 8.310871e+06, flex, 6, 20 + "665f50e233c5e1ebfb99483684ee38b7", # -> 0.2264 5.563939e+06, flex, 6, 7 + "cb80d372e2df52f2541a75f3e3ab6fa3", # -> 0.1000 4.206023e+06, flex, 4, 6 + "4a38650f02cc8de4cbd89d3e3e811ff1", # -> 0.1000 4.080933e+06, flex, 4, 6 + "9948a9597f24813d95a0bc76c153d0f8", # -> 0.1000 3.957814e+06, flex, 4, 4 + "03d1bf347fc86923a4cd10126996565c", # -> 0.1000 3.934121e+06, flex, 4, 6 + "342438e8ee1a31ea6a21f1d03c93c0e7", # -> 0.1000 5.201594e+06, flex, 4, 7 + "3e797beb9ac8298f7826f65453fa3a2f", # -> 0.1000 2.953513e+06, flex, 4, 2 + "367fb3948c6f69886ec3028a8360dc65", # -> 0.6659, 4.209346e+06, flex, 6, 4 + "2e2ccc15724987b696acfdc21bf47a51", # -> 0.6503, 7.269416e+06, flex, 6, 16 + "bc15239e6d2c175a0334f61af9848739", # -> 0.6494, 5.148289e+06, flex, 4, 6 + "716f7759cf91aa844d7000045b1eb600", # -> 0.6476, 2.741280e+06, flex, 8, 3 + "3c5f7b70f075fe8462c635153dada47d", # -> 0.6303, 5.007099e+06, flex, 6, 6 + "8f0f60a3e18b7a24642cd76ed327b9a1", # -> 0.6295, 5.785277e+06, flex, 6, 8 + "531d8b20ad7953e37f974f7d5b45e01c", # -> 0.6267, 5.382755e+06, flex, 4, 8 + "57d1dc4ee01cee3cf0385670e1741322", # -> 0.6211, 2.991667e+06, flex, 4, 3 + "05f70440f96aaf900cc613fb6a38610c", # -> 0.6092, 6.469393e+06, flex, 6, 10 + "9b86d4910bd95fb6d262c5e5983c14ad", # -> 0.6061, 4.349474e+06, flex, 4, 4 + "17b5fc2623904c1c66093c08e1b79f27", # -> 0.6027, 6.106181e+06, flex, 4, 10 + "e67bd735524454b8a261787dcf2a47eb", # -> 0.6000, 3.895603e+06, flex, 6, 4 + "fbe458505010a51dfc97863cfa4725ca", # -> 0.5971, 6.166594e+06, flex, 4, 10 + "56c8a5c06806146647506acf32634294", # -> 0.5963, 6.381624e+06, flex, 6, 10 + "0e8ab6e96cf978db32c2d7b7699cfe7f", # -> 0.5901, 2.823915e+06, flex, 8, 3 + "f0cd6ddce67c86d57bf5661a26b5bde2", # -> 0.5823, 4.080290e+06, flex, 8, 4 + "9e9f38b272165d42a0528ebdf9027f1c", # -> 0.5815, 3.174854e+06, flex, 4, 3 + "4cc609a2bb8c5efa6791c32ea349850a", # -> 0.5778, 5.486881e+06, flex, 4, 8 + "33eeff2627c8868d784639522b1eb4f1", # -> 0.5747, 2.619111e+06, flex, 8, 3 + "7aec249371301c815c3eead824c6f7b1", # -> 0.5738, 7.557755e+06, flex, 6, 16 + "abc9e0d7b65f1070bea9dfafa919f90f", # -> 0.5725, 5.817821e+06, flex, 6, 8 + "90a9560d3a79b5ae23440476b04d9561", # -> 0.5704, 6.553902e+06, flex, 8, 10 + "83973c46451c905ae0352fea31cd0df8", # -> 0.5676, 7.421917e+06, flex, 8, 16 + "6a405a2cf4eb6ece8241296f29b52aad", # -> 0.5637, 2.873761e+06, flex, 6, 3 + "bcaf7d4fe4b1a83c3d16efe2f6e4d967", # -> 0.5615, 5.767932e+06, flex, 8, 8 + "d86af65d32bbb56f2eca4d54a8bdaeeb", # -> 0.5529, 7.608056e+06, flex, 8, 16 + "a782864f124987e758d7911279c2b7cb", # -> 0.5522, 2.935374e+06, flex, 6, 3 + "57a7e8afe58c32f72af8ea86cd18ddbb", # -> 0.5511, 4.148061e+06, flex, 6, 4 + "0dbadd053f8161566f8bfd04e29f167b", # -> 0.5500, 3.121372e+06, flex, 4, 3 + "03b75598bd4a4799823c4ac80dad72d1", # -> 0.5483, 7.589383e+06, flex, 6, 16 + "77093fa86e3ac8b23d13065d8645d019", # -> 0.5458, 6.490005e+06, flex, 8, 10 + "71d583b85d081e1e93bd75b836c83de6", # -> 0.5402, 2.819020e+06, flex, 8, 2 + "36d5275416ed73b04c6952c4276c083d", # -> 0.5292, 4.092483e+06, flex, 8, 4 + "b5bceb9302a3df3a2771deb7d0edd4c8", # -> 0.5222, 7.549662e+06, flex, 8, 16 + "36d35f39f535e4aaa9cb5da438c8d903", # -> 0.5154, 2.768778e+06, flex, 6, 2 + "a519ef258d54600ea6920c4788a6280b", # -> 0.5086, 2.802222e+06, flex, 8, 2 + "513a1c16282dec9f124b9257bbb28177", # -> 0.5075, 5.524247e+06, flex, 4, 8 + "8894cc71801b352192f5759cf887d6e4", # -> 0.5066, 2.754725e+06, flex, 6, 2 + "1940550b5b9a07ce68968093723ef697", # -> 0.5053, 6.330552e+06, flex, 8, 10 + "85444b910411afd5676269013fce39e6", # -> 0.4950, 2.536435e+06, flex, 4, 2 + "1a704da76414aa5c50b9316544f50364", # -> 0.4802, 2.524790e+06, flex, 4, 2 + "4b746c04189fe0077f7938802ac24dd4", # -> 0.4729, 3.983884e+06, flex, 8, 4 + "7567c93fa1089f5cedc348292372ceb1", # -> 0.4702, 2.734160e+06, flex, 8, 2 + "644bad7c98a5b9cb6985cb97d011c8f3", # -> 0.4542, 4.932631e+06, flex, 8, 6 + "d38eaafd8b59ce694bd77c959b2c1e68", # -> 0.4036, 5.795405e+06, flex, 8, 8 + "f99a35d5bbf685c7089d7519291b5c0f", # -> 0.3773, 2.633205e+06, flex, 4, 2 + "ff48b26827e51e5a9e98b5e73ced1c98", # -> 0.3749, 4.967528e+06, flex, 8, 6 + "b9815d8e206f5e7e7651761c998283e2", # -> 0.3673, 5.903986e+06, flex, 6, 8 + "2e2c5d82931665495de2f92f4af6a095", # -> 0.3622, 2.794733e+06, flex, 6, 2 + "dbdb4f07f3f92b592762aec1a402b14f", # -> 0.3552, 2.728159e+06, flex, 6, 3 + "f33fc387a8c608886559ac95b3e4f7ce", # -> 0.3458, 4.753085e+06, flex, 6, 6 + "66f7bfa092d65404f9a704e02c6e6f77", # -> 0.3375, 4.760287e+06, flex, 8, 6 + "1af4b0115d6a08698fbfb4114a770489", # -> 0.3331, 6.535710e+06, flex, 6, 10 + "0f3f929d0d9b0328f5e93ce11ea73b76", # -> 0.1000, 3.965023e+06, flex, 4, 4 + "049dca947754753b7be1882fa3e5f0df", # -> 0.1000, 3.978452e+06, flex, 4, 6 + "a8facc8acb1e1e2af5009a2d107a1c74", # -> 0.1000, 4.142553e+06, flex, 4, 6 + "14ec0f67754688eb30f47f432595d4fa", # -> 0.1000, 4.130849e+06, flex, 4, 4 + "478535a7bbb120ca3eee26c64c1a6fd6", # -> 0.6697, 2.715756e+06, flex, 6, 2 + "9d3cdf362377b77f2769cddfc2f06cf2", # -> 0.6670, 2.802477e+06, flex, 8, 3 + "8f70e90c6769dd3c9f44402acfec3606", # -> 0.1000, 3.948141e+06, flex, 4, 6 + "58756c3729dcedfeeab0fea628634e05", # -> 0.1000, 4.042809e+06, flex, 4, 4 + "ec39ccf1998a61cf388b6c64cf53e534", # -> 0.1000, 4.040190e+06, flex, 4, 6 + "7c93ffb859c056bb8d58a57ea731a5b6", # -> 0.1000, 3.956951e+06, flex, 4, 6 + "5c58106ae695dc7842cee914198213ce", # -> 0.1000, 3.999102e+06, flex, 4, 4 + "66eb61bd460185ebb3a0e7e0903998d8", # -> 0.1000, 3.915104e+06, flex, 4, 4 + "c26c75797cc5698524c128b2c03d71ad", # -> 0.6699, 4.936011e+06, flex, 6, 6 + "1b5f11c64a41bebe6ecf86c89aec78e7", # -> 0.6521, 2.760852e+06, flex, 6, 2 + "e8e38b72f113ae04a4b26c4c262d7fd2", # -> 0.6504, 4.038943e+06, flex, 6, 4 + "936c04b9a6f211742285c459cc366134", # -> 0.6487, 2.625230e+06, flex, 8, 2 + "654b17e55ed66715c84161e8f7dbec28", # -> 0.1000, 4.271655e+06, flex, 4, 4 + "e5509196455a82fed7f194fd53f0e4c3", # -> 0.1000, 3.883347e+06, flex, 4, 6 + "739b8f45159088a1cbfff30f92e29a59", # -> 0.5141, 2.406360e+06, flex, 6, 3 + "fe398b8e7dfdc1cb8c71d2183a84de07", # -> 0.1000, 3.996849e+06, flex, 4, 4 + "c8a4f191f560ea10931775d0a8320c08", # -> 0.1000, 3.959836e+06, flex, 4, 6 + "04ccc689dc66e77aa8ecd339281c5ff0", # -> 0.1000, 4.092232e+06, flex, 4, 6 + "bab2a48cbef368310b076926701822a3", # -> 0.1000, 4.103027e+06, flex, 4, 4 + } + experiments = [ + exp for exp in experiments if not any(h in blacklist for h in exp) + ] + + print("After blacklisting, keeping", len(experiments), "experiments:") + for exp in experiments: + print(" → ".join(exp)) + + # 1) build a lookup: hash → flat dict + rec_map = {rec["hash"]: rec for rec in flat_records} + + # 2) for each experiment, merge the stages side-by-side, prefixing with the stage name + experiments_data = [] + for exp in experiments: + combined = {} + for h in exp: + rec = rec_map[h] + stage = rec["name"] + # prefix every field (including hash, loss, accuracy, nested flattened keys…) + for col, val in rec.items(): + combined[f"{stage}_{col}"] = val + experiments_data.append(combined) + + # 3) make a DataFrame + experiments_df = pd.DataFrame(experiments_data) + print(experiments_df.columns) + print( + experiments_df[ + [ + "qat_accuracy", + "qat_complexity", + "qat_hash", + "quantization_parameters_kernel_1_type", + "quantization_parameters_kernel_1_bits", + "quantization_parameters_kernel_1_n_levels", + ] + ].sort_values(by=["qat_accuracy"], ascending=False) + ) + + to_drop = [ + "model_creation*", + "freeze*", + "*function", + "*activations_*", + "*bias*", + "*input_shape*", + "*_0_*", + "*_2_*", + "*_3_*", + "*_name", + "*_hash", + "model_creation_seed", + "quantization_seed", + "qat_seed", + "*dataset", + "*_categories", + "*_epochs", + "*_batch_size", + "*_learning_rate", + "*_validation_split", + "quantization_complexity", + "qat_parameters_early_stopping", + ] + # find all columns matching any pattern + cols_to_drop = [ + col + for col in experiments_df.columns + if any(fnmatch.fnmatch(col, pat) for pat in to_drop) + ] + experiments_df = experiments_df.drop(columns=cols_to_drop) + + rename_map = { + "name": "stage", + "quantization_parameters_kernel_1_type": "type", + "quantization_parameters_kernel_1_bits": "bits", + "quantization_parameters_kernel_1_n_levels": "n_levels", + "initial_training_seed": "seed", + } + experiments_df = experiments_df.rename(columns=rename_map) + experiments_df = experiments_df.dropna() + + experiments_df["qat_complexity"] = ( + experiments_df["qat_complexity"] / 1024 + ) # Convert to Kbits + # print( + # experiments_df.sort_values(by=["qat_accuracy_mean", "qat_complexity_mean"], ascending=False) + # ) + # print(experiments_df.sort_values(by=["qat_complexity_mean"], ascending=False)) + + original_accuracy_mean = experiments_df["initial_training_accuracy"].mean() + original_complexity_mean = ( + experiments_df["initial_training_complexity"].mean() / 1024 + ) # in Kbits + original_accuracy_var = experiments_df["initial_training_accuracy"].var() + original_complexity_var = experiments_df[ + "initial_training_complexity" + ].var() + original_accuracy_sd = np.sqrt(original_accuracy_var) + original_complexity_var_kbits = original_complexity_var / (1024**2) + original_complexity_sd = np.sqrt(original_complexity_var_kbits) + n = experiments_df["initial_training_accuracy"].count() + original_accuracy_se = original_accuracy_sd / np.sqrt(n) + original_complexity_se = original_complexity_sd / np.sqrt(n) + original_accuracy_min = experiments_df["initial_training_accuracy"].min() + original_complexity_min = experiments_df[ + "initial_training_complexity" + ].min() + original_accuracy_max = experiments_df["initial_training_accuracy"].max() + original_complexity_max = experiments_df[ + "initial_training_complexity" + ].max() + + # 1) Identify the metrics and the hyperparam columns to group by + metrics = ["qat_loss", "qat_accuracy", "qat_complexity"] + hyperparam_cols = [ + col + for col in experiments_df.columns + if col in ("type", "bits", "n_levels") + ] + + # 2) Group by those hyperparams, compute mean & var of the metrics + stats = ( + experiments_df.groupby(hyperparam_cols)[metrics] + .agg(["mean", "var", "max", "min"]) + .reset_index() + ) + + # 3) Flatten the resulting MultiIndex columns + stats.columns = [ + f"{lvl0}_{lvl1}" if lvl1 else lvl0 for lvl0, lvl1 in stats.columns + ] + + counts = ( + experiments_df.groupby(hyperparam_cols)[metrics].count().reset_index() + ) + count_cols = [f"{m}_count" for m in metrics] + counts.columns = hyperparam_cols + count_cols + row_uniques = counts[count_cols].nunique(axis=1) + assert (row_uniques == 1).all(), "Not all metric‐counts agree per group!" + counts["count"] = counts[count_cols[0]] + counts = counts[hyperparam_cols + ["count"]] + stats = stats.merge(counts, on=hyperparam_cols, how="left") + + # 3) Compute standard deviations and errors + stats["se_accuracy"] = np.sqrt(stats["qat_accuracy_var"]) / np.sqrt( + stats["count"] + ) + stats["se_complexity"] = np.sqrt(stats["qat_complexity_var"]) / np.sqrt( + stats["count"] + ) + + print( + stats.sort_values( + by=["qat_accuracy_mean", "qat_complexity_mean"], ascending=False + ) + ) + print(stats.sort_values(by=["qat_complexity_mean"], ascending=False)) + + cmap = mpl.colormaps["tab10"] + xmin = stats["qat_complexity_mean"].min() * 0.9 + xmax = stats["qat_complexity_mean"].max() * 1.1 + xmax = original_complexity_mean * 1.1 + + plt.figure(figsize=(6, 4)) + plt.scatter( + original_complexity_mean, + original_accuracy_mean, + color="red", + label="Original Model", + zorder=3, + ) + bits_values = experiments_df["bits"].unique() + for i, bits in enumerate(sorted(bits_values)): + subset = experiments_df[experiments_df["bits"] == bits].sort_values( + "qat_complexity" + ) + # 1) dashed line only, semi-transparent + plt.semilogx( + subset["qat_complexity"], + subset["qat_accuracy"], + linestyle="--", + color=cmap(i % 10), + alpha=0.3, + zorder=2, + label=None, # we’ll label in the marker call + ) + # 2) opaque markers on top, with label + plt.scatter( + subset["qat_complexity"], + subset["qat_accuracy"], + marker=".", + color=cmap(i % 10), + zorder=3, + label=f"{bits} bits", + ) + add_horizontal_se_band( + plt.gca(), + original_accuracy_mean, + original_accuracy_se, + color="red", + ) + plt.xlabel("Quantized Complexity (Kbits)") + plt.ylabel("Quantized Accuracy") + plt.ylim([0.6, 0.75]) + plt.xlim([xmin, xmax]) + plt.grid(which="both", linestyle="--", linewidth=0.5, alpha=0.6) + plt.legend() + plt.tight_layout() + plt.savefig("complexity_vs_quantized_flex.png", dpi=150) + + # 2) sort bits for consistent coloring + plt.figure(figsize=(6, 4)) + plt.scatter( + original_complexity_mean, + original_accuracy_mean, + color="red", + label="Original Model", + zorder=3, + ) + bits_list = sorted(stats["bits"].unique()) + for i, bits in enumerate(bits_list): + grp = stats[stats["bits"] == bits].sort_values("qat_complexity_mean") + # 1) draw only the dashed line + errorbars (no markers) + plt.errorbar( + grp["qat_complexity_mean"], + grp["qat_accuracy_mean"], + xerr=grp["se_complexity"], + yerr=grp["se_accuracy"], + fmt="--", # just the line + color=cmap(i % 10), + ecolor=cmap(i % 10), + alpha=0.3, + capsize=3, + zorder=2, + ) + # 2) draw the opaque markers on top + plt.scatter( + grp["qat_complexity_mean"], + grp["qat_accuracy_mean"], + marker=".", + s=30, + color=cmap(i % 10), + label=f"{bits} bits", + zorder=3, + ) + add_horizontal_se_band( + plt.gca(), + original_accuracy_mean, + original_accuracy_se, + color="red", + ) + plt.xscale("log") + plt.xlabel("Quantized Complexity (Kbits)") + plt.ylabel("Quantized Accuracy") + plt.ylim([0.6, 0.75]) + plt.xlim([xmin, xmax]) + plt.grid(which="both", linestyle="--", linewidth=0.5, alpha=0.6) + plt.legend() + plt.tight_layout() + plt.savefig("complexity_vs_quantized_flex_stats.png", dpi=150) + + plt.figure(figsize=(6, 4)) + plt.scatter( + original_complexity_mean, + original_accuracy_mean, + color="red", + label="Original Model", + zorder=3, + ) + bits_list = sorted(stats["bits"].unique()) + for i, bits in enumerate(bits_list): + grp = stats[stats["bits"] == bits].sort_values("qat_complexity_mean") + plt.plot( + grp["qat_complexity_mean"], + grp["qat_accuracy_mean"], + marker=".", + linestyle="--", + color=cmap(i % 10), + label=f"{bits} bits", + zorder=3, + ) + plt.plot( + grp["qat_complexity_mean"], + grp["qat_accuracy_max"], + linestyle="-", + color=cmap(i % 10), + alpha=0.5, + zorder=2, + ) + plt.plot( + grp["qat_complexity_mean"], + grp["qat_accuracy_min"], + linestyle=":", + color=cmap(i % 10), + alpha=0.5, + zorder=2, + ) + add_horizontal_se_band( + plt.gca(), + original_accuracy_mean, + original_accuracy_se, + color="red", + ) + plt.xscale("log") + plt.xlabel("Quantized Complexity (Kbits)") + plt.ylabel("Quantized Accuracy") + plt.ylim([0.6, 0.75]) + plt.xlim([xmin, xmax]) + plt.grid(which="both", linestyle="--", linewidth=0.5, alpha=0.6) + plt.legend() + plt.tight_layout() + plt.savefig("complexity_vs_quantized_flex_stats2.png", dpi=150) diff --git a/src/stage/stage.py b/src/stage/stage.py new file mode 100644 index 0000000..f307838 --- /dev/null +++ b/src/stage/stage.py @@ -0,0 +1,217 @@ +# stage + +from __future__ import annotations + +import functools +import hashlib +import json +import time +from dataclasses import asdict, dataclass +from pathlib import Path +from typing import Any, Callable, Dict, Optional, Tuple + +import tensorflow as tf +from functions import load_data + +from configs.serialization.serialization import load_qmodel, save_qmodel +from utils.metrics import compute_space_complexity_model + +# This shouldnt be here. + + +@dataclass(frozen=True) +class StageConfig: + """Holds all parameters that uniquely define a stage's output. + + This entire object is hashed to create a unique ID for the stage's result. + """ + + name: str + function: str + seed: int + parameters: Dict[str, Any] + previous_hash: Optional[str] = None + + def to_hash(self) -> str: + """Generates a unique hash for this configuration.""" + # asdict converts the dataclass to a dictionary. + # sort_keys ensures the hash is consistent. + config_str = json.dumps(asdict(self), sort_keys=True) + return hashlib.md5(config_str.encode()).hexdigest() + + +class Stage: + def __init__( + self, + function: Callable, + initial_config: Dict[str, Any], # We'll start with a dict + checkpoint_path: Optional[Path] = None, + metadata_path: str = "metadata", + ): + self.function = function + self.initial_config = initial_config + self.config: StageConfig = None # Will be set at runtime + self.hash: str = None # Will be set at runtime + self.loss = None # The loss after running the stage + self.accuracy = None # The accuracy after running the stage + self.complexity = None # The complexity after running the stage + checkpoint_path = checkpoint_path or Path("checkpoints") + checkpoint_path.mkdir(parents=True, exist_ok=True) + self.artifacts_path = checkpoint_path / "artifacts" + self.artifacts_path.mkdir(parents=True, exist_ok=True) + self.config_path = checkpoint_path / metadata_path + self.config_path.mkdir(parents=True, exist_ok=True) + self.model = None # The model after running the stage + + def _save_metadata(self): + """Saves the current stage configuration to a JSON file. + + This is useful for debugging and traceability. + """ + if self.config is None: + raise ValueError("StageConfig is not set. Run the stage first.") + + metadata_path = self.config_path / f"{self.config.name}.json" + config_dict = asdict(self.config) + config_dict["accuracy"] = self.accuracy + config_dict["loss"] = self.loss + config_dict["complexity"] = self.complexity + config_dict["hash"] = self.hash + with metadata_path.open("w") as f: + json.dump(config_dict, f, indent=2) + print(f"Configuration saved to '{metadata_path}'") + + def _save_model(self): + """Saves the model to a file using the unique hash as the filename. + + This is useful for traceability and caching. + """ + if self.hash is None: + raise ValueError("Hash is not set. Run the stage first.") + + model_path = self.artifacts_path / f"{self.hash}" + save_qmodel(self.model, model_path) + print(f"Model saved to '{model_path}'") + + def save(self): + """Saves the model and its configuration to disk. + + This is useful for traceability and caching. + """ + if self.config is None: + raise ValueError("StageConfig is not set. Run the stage first.") + + self._save_metadata() + self._save_model() + + def load(self, hash: str): + """Loads the model and its configuration from disk.""" + model_path = self.artifacts_path / hash + if not model_path.exists(): + raise FileNotFoundError(f"Model file not found: {model_path}") + + self.model = load_qmodel(model_path) + + def run( + self, + input_model: Optional[tf.keras.Model], + previous_hash: Optional[str] = None, + ) -> Tuple[tf.keras.Model, str]: + """Runs the stage with full traceability and caching. + + Returns the resulting model AND its unique hash. + """ + start_time = time.time() + + # 1. Create the final, traceable config for this run + self.config = StageConfig( + name=self.initial_config["name"], + seed=self.initial_config.get("seed", int(time.time())), + function=( + self.function.__name__ + if not isinstance(self.function, functools.partial) + else self.function.func.__name__ + ), + parameters=self.initial_config["kwargs"], + previous_hash=previous_hash, + ) + + # 2. Generate the unique hash for this specific configuration + self.hash = self.config.to_hash() + + print(f"--- Running Stage({self.config.name}) ---") + print(f" Hash: {self.hash}") + print(f" Depends on: {self.config.previous_hash}") + + try: + self.load(self.hash) + except FileNotFoundError as e: + print(f" Checkpoint NOT FOUND. {e} Executing function...") + self.model = self.function( + model=input_model, **self.config.parameters + ) + self._save_model() + # Evaluate the model if a dataset is provided in the parameters + dataset = self.config.parameters.get("dataset", None) + if dataset is not None: + self.loss, self.accuracy = self.evaluate(load_data(dataset)) + # Compute the complexity of the model + self.complexity = self.compute_complexity() + + self._save_metadata() + + print(f"--- Stage finished in {time.time() - start_time:.2f}s ---\n") + + # 5. Return both the model and its hash to the orchestrator + return self.model, self.hash + + def evaluate(self, data): + # After loading it is not compiled I think... + self.model.compile( + optimizer="adam", + loss="categorical_crossentropy", + metrics=["accuracy"], + ) + loss, accuracy = self.model.evaluate( + data["x_test"], data["y_test"], verbose=0 + ) + print("Evaluation results:") + print(f"Loss: {loss:.4f}, Accuracy: {accuracy:.4f}") + return loss, accuracy + + def compute_complexity(self): + complexity = compute_space_complexity_model(self.model) + print("Space complexity of the model:") + print(complexity) + return complexity + + +class Pipeline: + def __init__(self, stages: list[Stage]): + self.stages = stages + + def add(self, stage: Stage): + """Adds a new stage to the pipeline.""" + self.stages.append(stage) + + def remove(self, stages_names: list[str] | str): + """Removes stages by their names.""" + if isinstance(stages_names, str): + stages_names = [stages_names] + self.stages = [ + stage + for stage in self.stages + if stage.config.name not in stages_names + ] + + def run(self, input_model: Optional[tf.keras.Model] = None): + """Runs the entire pipeline, passing the model from one stage to the + next.""" + previous_hash = None + + for stage in self.stages: + current_model, previous_hash = stage.run( + input_model=input_model, previous_hash=previous_hash + ) + + return current_model diff --git a/src/stage/stagev2.py b/src/stage/stagev2.py new file mode 100644 index 0000000..cf7bfde --- /dev/null +++ b/src/stage/stagev2.py @@ -0,0 +1,181 @@ +from __future__ import annotations + +import hashlib +import json +from dataclasses import asdict, dataclass +from functools import partial +from pathlib import Path +from typing import Any, Callable, Dict, List, Optional, Tuple + +import tensorflow as tf +from functions import load_data + +from configs.serialization.serialization import load_qmodel, save_qmodel +from utils.metrics import compute_space_complexity_model + + +@dataclass +class StageMetadata: + name: str + seed: int + function: Callable + parameters: Dict[str, Any] + previous_hash: Optional[str] = None + + def to_hash(self) -> str: + stage_metadata_dict = asdict(self) + stage_metadata_dict["function"] = ( + self.function.__name__ + if not isinstance(self.function, partial) + else self.function.func.__name__ + ) + config_str = json.dumps(stage_metadata_dict, sort_keys=True) + return hashlib.md5(config_str.encode()).hexdigest() + + def save(self, directory_path: Path): + file_path = directory_path / f"{self.to_hash()}.json" + stage_metadata_dict = asdict(self) + stage_metadata_dict["function"] = ( + self.function.__name__ + if not isinstance(self.function, partial) + else self.function.func.__name__ + ) + with open(file_path, "w") as f: + json.dump(stage_metadata_dict, f, indent=4) + + @classmethod + def from_dict(cls, data: Dict[str, Any]) -> StageMetadata: + return cls( + name=data["name"], + seed=data["seed"], + function=data["function"], + parameters=data["parameters"], + previous_hash=data.get("previous_hash"), + ) + + +class Stage: + def __init__( + self, metadata: StageMetadata, store_path: Optional[Path] = None + ): + self.metadata = metadata + self.hash = metadata.to_hash() + + self.store_path = store_path + self.artifacts_path = store_path / "artifacts" + self.model_path = self.artifacts_path / self.hash + self.metadata_directory_path = store_path / "metadata" + self.results_path = store_path / f"results/{self.hash}.json" + self.artifacts_path.mkdir(parents=True, exist_ok=True) + self.metadata_directory_path.mkdir(parents=True, exist_ok=True) + self.results_path.parent.mkdir(parents=True, exist_ok=True) + + def is_model_stored(self) -> bool: + """Check if the model for this stage is already stored.""" + return self.model_path.exists() + + def load_model(self) -> tf.keras.Model: + return load_qmodel(self.model_path) + + def save_model(self, model: tf.keras.Model) -> None: + """Saves the model to the artifacts path.""" + save_qmodel(model, self.model_path) + + def evaluate_model( + self, model: tf.keras.Model, dataset: str + ) -> Tuple[float, float]: + """Evaluates the model on the given dataset.""" + data = load_data(dataset) + loss, accuracy = model.evaluate( + data["x_test"], data["y_test"], verbose=0 + ) + return loss, accuracy + + def compute_complexity(self, model: tf.keras.Model) -> Dict[str, Any]: + return compute_space_complexity_model(model) + + def generate_results( + self, model: tf.keras.Model, dataset: Optional[str] + ) -> Dict[str, Any]: + """Generates results for the model, including loss, accuracy, and + complexity.""" + results = {} + if dataset is not None: + loss, accuracy = self.evaluate_model(model, dataset) + results["loss"] = loss + results["accuracy"] = accuracy + complexity = self.compute_complexity(model) + results["complexity"] = complexity + return results + + def run( + self, input_model: Optional[tf.keras.Model] = None + ) -> Tuple[tf.keras.Model, str]: + if self.metadata.seed is not None: + tf.random.set_seed(self.metadata.seed) + self.metadata.save(self.metadata_directory_path) + + if self.is_model_stored(): + print(f"Loading model from {self.model_path}") + model = self.load_model() + else: + print( + f"Model not found at {self.model_path}, running stage function." + ) + model = self.metadata.function( + model=input_model, **self.metadata.parameters + ) + self.save_model(model) + + # Save results if they don't already exist + if not (self.results_path).exists(): + results = self.generate_results( + model, self.metadata.parameters.get("dataset") + ) + with (self.results_path).open("w") as f: + json.dump(results, f, indent=4) + + return model, self.hash + + +class Pipeline: + def __init__( + self, + name: str, + stage_definitions: List[StageMetadata], + store_path: Optional[Path] = None, + ): + self.name = name + self.stage_definitions = stage_definitions + self.store_path = store_path or Path("checkpoints") + self.pipeline_metadata_path = self.store_path / "pipelines" + self.pipeline_metadata_path.mkdir(parents=True, exist_ok=True) + self.hash_history = [] + + def run(self) -> Tuple[tf.keras.Model, str]: + previous_stage_hash: Optional[str] = None + current_model: Optional[tf.keras.Model] = None + + for i, stage_def in enumerate(self.stage_definitions): + # Workaround for a particular case + print("--- Running stage:", stage_def.name, "----") + final_metadata = StageMetadata( + name=stage_def.name, + seed=stage_def.seed, + function=stage_def.function, + parameters=stage_def.parameters, + previous_hash=previous_stage_hash, + ) + current_stage = Stage( + metadata=final_metadata, store_path=self.store_path + ) + + current_model, stage_hash = current_stage.run( + input_model=current_model + ) + self.hash_history.append(stage_hash) + + previous_stage_hash = stage_hash + + with open(self.pipeline_metadata_path / f"{self.name}.json", "w") as f: + json.dump({"history": self.hash_history}, f) diff --git a/src/utils/metrics.py b/src/utils/metrics.py index f7ebcbd..a5ac6d6 100644 --- a/src/utils/metrics.py +++ b/src/utils/metrics.py @@ -30,8 +30,7 @@ def compute_space_complexity_quantize(qlayer: QuantizeWrapperV2) -> float: if isinstance(quantizer, UniformQuantizer): weight_size = weight.shape.num_elements() * quantizer.bits elif isinstance(quantizer, FlexQuantizer): - qweight = quantizer.quantize_op(weight) - weight_size = compute_huffman_nominal_complexity(qweight) + weight_size = compute_huffman_nominal_complexity(weight) weight_size += quantizer.n_levels * quantizer.bits else: raise ValueError(f"Unknown quantizer type: {type(quantizer)}") @@ -42,7 +41,7 @@ def compute_space_complexity_quantize(qlayer: QuantizeWrapperV2) -> float: def compute_space_complexity(layer): """Compute the space complexity for a normal layer.""" - total_layer_size = 0 + total_layer_size = 0.0 for weight in layer.weights: weight_size = ( 8 * weight.dtype.size * weight.shape.num_elements() @@ -55,7 +54,10 @@ def compute_space_complexity(layer): def compute_space_complexity_model(model: tf.keras.Model) -> float: """Compute the uniform space complexity of a model based on its quantization configuration.""" - total_space_complexity = 0 + total_space_complexity = 0.0 + + # Make an inference to ensure the model is built + model(tf.random.normal((1,) + model.input_shape[1:])) for layer in model.layers: if isinstance(layer, QuantizeWrapperV2): diff --git a/src/utils/metrics_integration_test.py b/src/utils/metrics_integration_test.py index a631980..53719d8 100755 --- a/src/utils/metrics_integration_test.py +++ b/src/utils/metrics_integration_test.py @@ -9,6 +9,7 @@ from configs.qmodel import apply_quantization from configs.serialization.serialization import load_qmodel, save_qmodel +from quantizers.flex_quantizer import FlexQuantizer from quantizers.uniform_quantizer import UniformQuantizer from utils.metrics import compute_space_complexity_model @@ -75,6 +76,67 @@ def test_save_and_load_model(self): compute_space_complexity_model(loaded_model), ) + def test_save_and_load_model_flex(self): + """Test saving and loading a model with metrics.""" + model = tf.keras.Sequential( + [ + tf.keras.layers.Dense(10, input_shape=(5,), name="dense_1"), + tf.keras.layers.Dense(5, name="dense_2"), + ] + ) + + qconfig = { + "dense_1": { + "weights": { + "kernel": FlexQuantizer(bits=4, n_levels=5, signed=True), + "bias": FlexQuantizer(bits=4, n_levels=4, signed=True), + }, + }, + "dense_2": { + "weights": { + "kernel": FlexQuantizer(bits=4, n_levels=5, signed=True), + "bias": FlexQuantizer(bits=4, n_levels=4, signed=True), + }, + }, + } + qmodel = apply_quantization(model, qconfig) + qmodel.build((None, 5)) + + tmpdir = tempfile.mkdtemp() + save_qmodel(qmodel, tmpdir) + loaded_model = load_qmodel(tmpdir) + # make an inference to ensure the model is loaded correctly + loaded_model(tf.random.normal((1, 5))) + + original_weights = {w.name: w.numpy() for w in qmodel.weights} + loaded_weights = {w.name: w.numpy() for w in loaded_model.weights} + + # First, check that the set of weight names is identical + self.assertEqual( + set(original_weights.keys()), + set(loaded_weights.keys()), + "Models have different sets of weight names.", + ) + + # Now, compare each weight tensor by name + for name, orig_w in original_weights.items(): + loaded_w = loaded_weights[name] + # print(f"Comparing weight tensor: {name}") + # print(f"Weights: {orig_w}") + # print(f"Loaded: {loaded_w}") + np.testing.assert_allclose( + orig_w, + loaded_w, + rtol=1e-6, + atol=1e-6, + err_msg=f"Weight tensor '{name}' differs.", + ) + + self.assertEqual( + compute_space_complexity_model(qmodel), + compute_space_complexity_model(loaded_model), + ) + if __name__ == "__main__": unittest.main()