From edd9611b6ec9ec3f147f2a83d25653dc082bec16 Mon Sep 17 00:00:00 2001 From: span14 Date: Mon, 11 Sep 2023 12:40:54 -0500 Subject: [PATCH 01/16] add randomness --- Cargo.toml | 10 ++- src/bin/verify_circuit.rs | 2 +- src/gadgets/bias_div_floor_relu6.rs | 2 +- src/gadgets/bias_div_round_relu6.rs | 2 +- src/gadgets/dot_prod.rs | 1 + src/gadgets/gadget.rs | 1 + src/layers/arithmetic/add.rs | 1 + src/layers/arithmetic/div_var.rs | 1 + src/layers/arithmetic/mul.rs | 1 + src/layers/arithmetic/sub.rs | 1 + src/layers/avg_pool_2d.rs | 3 +- src/layers/batch_mat_mul.rs | 10 ++- src/layers/conv2d.rs | 4 +- src/layers/dag.rs | 42 ++++++++- src/layers/div_fixed.rs | 1 + src/layers/fully_connected.rs | 28 +++++- src/layers/layer.rs | 1 + src/layers/logistic.rs | 1 + src/layers/max_pool_2d.rs | 1 + src/layers/mean.rs | 1 + src/layers/noop.rs | 1 + src/layers/pow.rs | 1 + src/layers/rsqrt.rs | 1 + src/layers/shape/broadcast.rs | 1 + src/layers/shape/concatenation.rs | 1 + src/layers/shape/mask_neg_inf.rs | 1 + src/layers/shape/pack.rs | 1 + src/layers/shape/pad.rs | 3 +- src/layers/shape/permute.rs | 1 + src/layers/shape/reshape.rs | 1 + src/layers/shape/resize_nn.rs | 1 + src/layers/shape/rotate.rs | 1 + src/layers/shape/slice.rs | 1 + src/layers/shape/split.rs | 1 + src/layers/shape/transpose.rs | 1 + src/layers/softmax.rs | 1 + src/layers/sqrt.rs | 1 + src/layers/square.rs | 1 + src/layers/squared_diff.rs | 1 + src/layers/tanh.rs | 1 + src/layers/update.rs | 1 + src/lib.rs | 2 + src/model.rs | 135 +++++++++++++++++++++------- src/utils/proving_kzg.rs | 2 +- 44 files changed, 221 insertions(+), 55 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 43286f2..f15cd2a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,14 +21,17 @@ opt-level = 3 [dependencies] bitvec = "1.0.1" -halo2 = { git="https://github.com/privacy-scaling-explorations/halo2", package="halo2", rev="17e9765c199670534c0299c96128d0464a188d0b" } -halo2_gadgets = { git="https://github.com/privacy-scaling-explorations/halo2", package="halo2_gadgets", rev="17e9765c199670534c0299c96128d0464a188d0b", features = ["circuit-params"] } -halo2_proofs = { git="https://github.com/privacy-scaling-explorations/halo2", package="halo2_proofs", rev="17e9765c199670534c0299c96128d0464a188d0b", features = ["circuit-params"] } +blake2b_simd = "1" +halo2 = { path="../halo2_sys/halo2" } +halo2_curves = { git = "https://github.com/privacy-scaling-explorations/halo2curves", tag = "0.3.3", package = "halo2curves" } +halo2_gadgets = { path="../halo2_sys/halo2_gadgets", features = ["circuit-params"] } +halo2_proofs = { path="../halo2_sys/halo2_proofs", features = ["circuit-params"] } lazy_static = "1.4.0" ndarray = "0.15.6" num-bigint = "0.4.3" num-traits = "0.2.15" once_cell = "1.15.0" +plonkish_backend = { path="../plonkish/plonkish_backend", features=["benchmark"]} rand = "0.8.5" rmp-serde = "1.1.1" rounded-div = "0.1.2" @@ -36,4 +39,3 @@ serde = "1.0.152" serde_derive = "1.0.152" serde_json = "1.0.85" wav = "1.0.0" - diff --git a/src/bin/verify_circuit.rs b/src/bin/verify_circuit.rs index e0117bc..c4c1e4a 100644 --- a/src/bin/verify_circuit.rs +++ b/src/bin/verify_circuit.rs @@ -14,7 +14,7 @@ fn main() { if kzg_or_ipa != "kzg" && kzg_or_ipa != "ipa" { panic!("Must specify kzg or ipa"); } - + if kzg_or_ipa == "kzg" { let config = load_config_msgpack(&config_fname); let circuit = ModelCircuit::::generate_from_msgpack(config, false); diff --git a/src/gadgets/bias_div_floor_relu6.rs b/src/gadgets/bias_div_floor_relu6.rs index bccd5eb..6e9258d 100644 --- a/src/gadgets/bias_div_floor_relu6.rs +++ b/src/gadgets/bias_div_floor_relu6.rs @@ -197,7 +197,7 @@ impl Gadget for BiasDivFloorRelu6Chip { let outp = div_res.map(|x: i64| { let mut x_pos = x - div_outp_min_val_i64; if !relu_map.contains_key(&(x_pos)) { - println!("x: {}, x_pos: {}", x, x_pos); + // println!("x: {}, x_pos: {}", x, x_pos); x_pos = 0; } let outp_val = relu_map.get(&(x_pos)).unwrap(); diff --git a/src/gadgets/bias_div_round_relu6.rs b/src/gadgets/bias_div_round_relu6.rs index 6d8c125..349678e 100644 --- a/src/gadgets/bias_div_round_relu6.rs +++ b/src/gadgets/bias_div_round_relu6.rs @@ -224,7 +224,7 @@ impl Gadget for BiasDivRoundRelu6Chip { let outp = div_res.map(|x: i64| { let mut x_pos = x - div_outp_min_val_i64; if !relu_map.contains_key(&(x_pos)) { - println!("x: {}, x_pos: {}", x, x_pos); + // println!("x: {}, x_pos: {}", x, x_pos); x_pos = 0; } let outp_val = relu_map.get(&(x_pos)).unwrap(); diff --git a/src/gadgets/dot_prod.rs b/src/gadgets/dot_prod.rs index 54aea1e..5c7d8a5 100644 --- a/src/gadgets/dot_prod.rs +++ b/src/gadgets/dot_prod.rs @@ -103,6 +103,7 @@ impl Gadget for DotProductChip { let weights = &vec_inputs[1]; assert_eq!(inp.len(), weights.len()); assert_eq!(inp.len(), self.num_inputs_per_row()); + // println!("Weight: {:?}", weights); let zero = &single_inputs[0]; diff --git a/src/gadgets/gadget.rs b/src/gadgets/gadget.rs index 182f957..f827099 100644 --- a/src/gadgets/gadget.rs +++ b/src/gadgets/gadget.rs @@ -43,6 +43,7 @@ pub enum GadgetType { pub struct GadgetConfig { pub used_gadgets: Arc>, pub columns: Vec>, + pub witness_columns: Vec>, pub fixed_columns: Vec>, pub selectors: HashMap>, pub tables: HashMap>, diff --git a/src/layers/arithmetic/add.rs b/src/layers/arithmetic/add.rs index 9460b23..565e1fc 100644 --- a/src/layers/arithmetic/add.rs +++ b/src/layers/arithmetic/add.rs @@ -55,6 +55,7 @@ impl Layer for AddChip { mut layouter: impl Layouter, tensors: &Vec>, constants: &HashMap>, + _rand_vector: &HashMap>, gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/arithmetic/div_var.rs b/src/layers/arithmetic/div_var.rs index 1b38643..594ddb5 100644 --- a/src/layers/arithmetic/div_var.rs +++ b/src/layers/arithmetic/div_var.rs @@ -46,6 +46,7 @@ impl Layer for DivVarChip { mut layouter: impl Layouter, tensors: &Vec>, constants: &HashMap>, + _rand_vector: &HashMap>, gadget_config: Rc, _layer_config: &crate::layers::layer::LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/arithmetic/mul.rs b/src/layers/arithmetic/mul.rs index e23e6b4..4c3acd6 100644 --- a/src/layers/arithmetic/mul.rs +++ b/src/layers/arithmetic/mul.rs @@ -50,6 +50,7 @@ impl Layer for MulChip { mut layouter: impl Layouter, tensors: &Vec>, constants: &HashMap>, + _rand_vector: &HashMap>, gadget_config: Rc, _layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/arithmetic/sub.rs b/src/layers/arithmetic/sub.rs index 4299039..4052681 100644 --- a/src/layers/arithmetic/sub.rs +++ b/src/layers/arithmetic/sub.rs @@ -43,6 +43,7 @@ impl Layer for SubChip { mut layouter: impl Layouter, tensors: &Vec>, constants: &HashMap>, + _rand_vector: &HashMap>, gadget_config: Rc, _layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/avg_pool_2d.rs b/src/layers/avg_pool_2d.rs index 05c5178..33608a4 100644 --- a/src/layers/avg_pool_2d.rs +++ b/src/layers/avg_pool_2d.rs @@ -66,6 +66,7 @@ impl Layer for AvgPool2DChip { layouter: impl Layouter, tensors: &Vec>, constants: &HashMap>, + _rand_vector: &HashMap>, gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error> { @@ -77,7 +78,7 @@ impl Layer for AvgPool2DChip { // TODO: refactor this let out_xy = MaxPool2DChip::shape(inp, layer_config); let out_shape = vec![1, out_xy.0, out_xy.1, inp.shape()[3]]; - println!("out_shape: {:?}", out_shape); + // println!("out_shape: {:?}", out_shape); let out = Array::from_shape_vec(IxDyn(&out_shape), dived).unwrap(); Ok(vec![out]) diff --git a/src/layers/batch_mat_mul.rs b/src/layers/batch_mat_mul.rs index 94ec288..2714d36 100644 --- a/src/layers/batch_mat_mul.rs +++ b/src/layers/batch_mat_mul.rs @@ -21,13 +21,14 @@ impl Layer for BatchMatMulChip { mut layouter: impl Layouter, tensors: &Vec>, constants: &HashMap>, + rand_vector: &HashMap>, gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error> { let inp1 = &tensors[0]; let inp2 = &tensors[1]; - println!("inp1: {:?}", inp1.shape()); - println!("inp2: {:?}", inp2.shape()); + // println!("inp1: {:?}", inp1.shape()); + // println!("inp2: {:?}", inp2.shape()); assert_eq!(inp1.ndim(), 3); assert_eq!(inp2.ndim(), 3); @@ -60,8 +61,8 @@ impl Layer for BatchMatMulChip { } else { inp2.index_axis(Axis(0), i).t().to_owned() }; - println!("inp1_slice: {:?}", inp1_slice.shape()); - println!("inp2_slice: {:?}", inp2_slice.shape()); + // println!("inp1_slice: {:?}", inp1_slice.shape()); + // println!("inp2_slice: {:?}", inp2_slice.shape()); // Batch MM doesn't have a fused activation, so insert it here // TODO: consider putting this in the converter? let tmp_config = LayerConfig { @@ -72,6 +73,7 @@ impl Layer for BatchMatMulChip { layouter.namespace(|| ""), &vec![inp1_slice, inp2_slice], constants, + rand_vector, gadget_config.clone(), &tmp_config, )?; diff --git a/src/layers/conv2d.rs b/src/layers/conv2d.rs index 9b5a689..6ca7312 100644 --- a/src/layers/conv2d.rs +++ b/src/layers/conv2d.rs @@ -22,7 +22,7 @@ use crate::{ }, }; -use super::layer::{ActivationType, AssignedTensor, GadgetConsumer, Layer, LayerConfig}; +use super::layer::{ActivationType, AssignedTensor, GadgetConsumer, Layer, LayerConfig, CellRc}; #[derive(Default, Clone, Copy, Eq, PartialEq)] pub enum PaddingEnum { @@ -291,6 +291,7 @@ impl Layer for Conv2DChip { mut layouter: impl Layouter, tensors: &Vec>, constants: &HashMap>>, + rand_vector: &HashMap>, gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error> { @@ -342,6 +343,7 @@ impl Layer for Conv2DChip { layouter.namespace(|| ""), &vec![weights_array, inp_array], constants, + rand_vector, gadget_config.clone(), layer_config, ) diff --git a/src/layers/dag.rs b/src/layers/dag.rs index b73a51a..75b7505 100644 --- a/src/layers/dag.rs +++ b/src/layers/dag.rs @@ -64,6 +64,7 @@ impl DAGLayerChip { mut layouter: impl Layouter, tensors: &Vec>, constants: &HashMap>, + rand_vector: &HashMap>, gadget_config: Rc, _layer_config: &LayerConfig, ) -> Result<(HashMap>, Vec>), Error> { @@ -72,7 +73,7 @@ impl DAGLayerChip { for (idx, tensor) in tensors.iter().enumerate() { tensor_map.insert(idx, tensor.clone()); } - + // println!("Tensors Length: {}", tensors.len()); // Compute the dag for (layer_idx, layer_config) in self.dag_config.ops.iter().enumerate() { let layer_type = &layer_config.layer_type; @@ -94,6 +95,7 @@ impl DAGLayerChip { layouter.namespace(|| "dag add"), &vec_inps, constants, + rand_vector, gadget_config.clone(), &layer_config, )? @@ -104,6 +106,7 @@ impl DAGLayerChip { layouter.namespace(|| "dag avg pool 2d"), &vec_inps, constants, + rand_vector, gadget_config.clone(), &layer_config, )? @@ -116,6 +119,7 @@ impl DAGLayerChip { layouter.namespace(|| "dag max pool 2d"), &vec_inps, constants, + rand_vector, gadget_config.clone(), &layer_config, )? @@ -126,6 +130,7 @@ impl DAGLayerChip { layouter.namespace(|| "dag batch mat mul"), &vec_inps, constants, + rand_vector, gadget_config.clone(), &layer_config, )? @@ -136,6 +141,7 @@ impl DAGLayerChip { layouter.namespace(|| "dag batch mat mul"), &vec_inps, constants, + rand_vector, gadget_config.clone(), &layer_config, )? @@ -149,6 +155,7 @@ impl DAGLayerChip { layouter.namespace(|| "dag conv 2d"), &vec_inps, constants, + rand_vector, gadget_config.clone(), &layer_config, )? @@ -159,6 +166,7 @@ impl DAGLayerChip { layouter.namespace(|| "dag div"), &vec_inps, constants, + rand_vector, gadget_config.clone(), &layer_config, )? @@ -169,6 +177,7 @@ impl DAGLayerChip { layouter.namespace(|| "dag div"), &vec_inps, constants, + rand_vector, gadget_config.clone(), &layer_config, )? @@ -182,6 +191,7 @@ impl DAGLayerChip { layouter.namespace(|| "dag fully connected"), &vec_inps, constants, + rand_vector, gadget_config.clone(), &layer_config, )? @@ -192,6 +202,7 @@ impl DAGLayerChip { layouter.namespace(|| "dag softmax"), &vec_inps, constants, + rand_vector, gadget_config.clone(), &layer_config, )? @@ -202,6 +213,7 @@ impl DAGLayerChip { layouter.namespace(|| "dag mean"), &vec_inps, constants, + rand_vector, gadget_config.clone(), &layer_config, )? @@ -212,6 +224,7 @@ impl DAGLayerChip { layouter.namespace(|| "dag pad"), &vec_inps, constants, + rand_vector, gadget_config.clone(), &layer_config, )? @@ -222,6 +235,7 @@ impl DAGLayerChip { layouter.namespace(|| "dag permute"), &vec_inps, constants, + rand_vector, gadget_config.clone(), &layer_config, )? @@ -232,6 +246,7 @@ impl DAGLayerChip { layouter.namespace(|| "dag squared diff"), &vec_inps, constants, + rand_vector, gadget_config.clone(), &layer_config, )? @@ -242,6 +257,7 @@ impl DAGLayerChip { layouter.namespace(|| "dag rsqrt"), &vec_inps, constants, + rand_vector, gadget_config.clone(), &layer_config, )? @@ -252,6 +268,7 @@ impl DAGLayerChip { layouter.namespace(|| "dag sqrt"), &vec_inps, constants, + rand_vector, gadget_config.clone(), &layer_config, )? @@ -262,6 +279,7 @@ impl DAGLayerChip { layouter.namespace(|| "dag logistic"), &vec_inps, constants, + rand_vector, gadget_config.clone(), &layer_config, )? @@ -272,6 +290,7 @@ impl DAGLayerChip { layouter.namespace(|| "dag logistic"), &vec_inps, constants, + rand_vector, gadget_config.clone(), &layer_config, )? @@ -282,6 +301,7 @@ impl DAGLayerChip { layouter.namespace(|| "dag tanh"), &vec_inps, constants, + rand_vector, gadget_config.clone(), &layer_config, )? @@ -292,6 +312,7 @@ impl DAGLayerChip { layouter.namespace(|| "dag mul"), &vec_inps, constants, + rand_vector, gadget_config.clone(), &layer_config, )? @@ -302,6 +323,7 @@ impl DAGLayerChip { layouter.namespace(|| "dag sub"), &vec_inps, constants, + rand_vector, gadget_config.clone(), &layer_config, )? @@ -312,6 +334,7 @@ impl DAGLayerChip { layouter.namespace(|| "dag noop"), &vec_inps, constants, + rand_vector, gadget_config.clone(), &layer_config, )? @@ -322,6 +345,7 @@ impl DAGLayerChip { layouter.namespace(|| "dag transpose"), &vec_inps, constants, + rand_vector, gadget_config.clone(), &layer_config, )? @@ -332,6 +356,7 @@ impl DAGLayerChip { layouter.namespace(|| "dag reshape"), &vec_inps, constants, + rand_vector, gadget_config.clone(), &layer_config, )? @@ -342,6 +367,7 @@ impl DAGLayerChip { layouter.namespace(|| "dag resize nn"), &vec_inps, constants, + rand_vector, gadget_config.clone(), &layer_config, )? @@ -352,6 +378,7 @@ impl DAGLayerChip { layouter.namespace(|| "dag rotate"), &vec_inps, constants, + rand_vector, gadget_config.clone(), &layer_config, )? @@ -362,6 +389,7 @@ impl DAGLayerChip { layouter.namespace(|| "dag concatenation"), &vec_inps, constants, + rand_vector, gadget_config.clone(), &layer_config, )? @@ -372,6 +400,7 @@ impl DAGLayerChip { layouter.namespace(|| "dag pack"), &vec_inps, constants, + rand_vector, gadget_config.clone(), &layer_config, )? @@ -382,6 +411,7 @@ impl DAGLayerChip { layouter.namespace(|| "dag split"), &vec_inps, constants, + rand_vector, gadget_config.clone(), &layer_config, )? @@ -392,6 +422,7 @@ impl DAGLayerChip { layouter.namespace(|| "dag update"), &vec_inps, constants, + rand_vector, gadget_config.clone(), &layer_config, )? @@ -402,6 +433,7 @@ impl DAGLayerChip { layouter.namespace(|| "dag slice"), &vec_inps, constants, + rand_vector, gadget_config.clone(), &layer_config, )? @@ -412,6 +444,7 @@ impl DAGLayerChip { layouter.namespace(|| "dag mask neg inf"), &vec_inps, constants, + rand_vector, gadget_config.clone(), &layer_config, )? @@ -422,6 +455,7 @@ impl DAGLayerChip { layouter.namespace(|| "dag square"), &vec_inps, constants, + rand_vector, gadget_config.clone(), &layer_config, )? @@ -429,7 +463,7 @@ impl DAGLayerChip { }; for (idx, tensor_idx) in out_idxes.iter().enumerate() { - println!("Out {} shape: {:?}", idx, out[idx].shape()); + // println!("Out {} shape: {:?}", idx, out[idx].shape()); tensor_map.insert(*tensor_idx, out[idx].clone()); } println!(); @@ -453,8 +487,8 @@ impl DAGLayerChip { }; let tmp = print_arr.iter().map(|x| x.as_ref()).collect::>(); - print_assigned_arr("final out", &tmp.to_vec(), gadget_config.scale_factor); - println!("final out idxes: {:?}", self.dag_config.final_out_idxes); + // print_assigned_arr("final out", &tmp.to_vec(), gadget_config.scale_factor); + // println!("final out idxes: {:?}", self.dag_config.final_out_idxes); let mut x = vec![]; for cell in print_arr.iter() { diff --git a/src/layers/div_fixed.rs b/src/layers/div_fixed.rs index 2c86277..e12fbf5 100644 --- a/src/layers/div_fixed.rs +++ b/src/layers/div_fixed.rs @@ -56,6 +56,7 @@ impl Layer for DivFixedChip { mut layouter: impl Layouter, tensors: &Vec>, constants: &HashMap>, + _rand_vector: &HashMap>, gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/fully_connected.rs b/src/layers/fully_connected.rs index 6dbf529..eb6317b 100644 --- a/src/layers/fully_connected.rs +++ b/src/layers/fully_connected.rs @@ -75,6 +75,7 @@ impl FullyConnectedChip { let cell = region .assign_advice(|| "assign array", columns[col_idx], row_idx, || *val) .unwrap(); + // println!("Error: {:?}", cell); outp.push(cell); } @@ -82,13 +83,30 @@ impl FullyConnectedChip { Ok(Array::from_shape_vec(IxDyn(out_shape.as_slice()), outp).unwrap()) } + // pub fn random_vector( + // constants: &HashMap>, + // size: usize, + // ) -> Result>, Error> { + // let mut outp = vec![]; + // for idx in 0..size { + // let idx = RAND_START_IDX + (idx as i64); + // if !constants.contains_key(&idx) { + // println!("Random vector is too small: {:?}", size); + // } + // let cell = constants.get(&idx).unwrap().clone(); + // outp.push(cell); + // } + + // Ok(outp) + // } + pub fn random_vector( constants: &HashMap>, size: usize, ) -> Result>, Error> { let mut outp = vec![]; for idx in 0..size { - let idx = RAND_START_IDX + (idx as i64); + let idx = idx as i64; if !constants.contains_key(&idx) { println!("Random vector is too small: {:?}", size); } @@ -107,6 +125,8 @@ impl FullyConnectedChip { _ => panic!("Unsupported activation type for fully connected"), } } + + } impl Layer for FullyConnectedChip { @@ -115,6 +135,7 @@ impl Layer for FullyConnectedChip { mut layouter: impl Layouter, tensors: &Vec>, constants: &HashMap>, + rand_vector: &HashMap>, gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error> { @@ -146,8 +167,9 @@ impl Layer for FullyConnectedChip { .unwrap(); // Generate random vectors - let r1 = Self::random_vector(constants, mm_result.shape()[0]).unwrap(); - let r2 = Self::random_vector(constants, mm_result.shape()[1]).unwrap(); + + let r1 = Self::random_vector(rand_vector, mm_result.shape()[0]).unwrap(); + let r2 = Self::random_vector(rand_vector, mm_result.shape()[1]).unwrap(); let dot_prod_chip = DotProductChip::::construct(gadget_config.clone()); let r1_ref = r1.iter().map(|x| x.as_ref()).collect::>(); diff --git a/src/layers/layer.rs b/src/layers/layer.rs index 676d6eb..3eb9e3a 100644 --- a/src/layers/layer.rs +++ b/src/layers/layer.rs @@ -79,6 +79,7 @@ pub trait Layer { layouter: impl Layouter, tensors: &Vec>, constants: &HashMap>, + rand_vector: &HashMap>, gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error>; diff --git a/src/layers/logistic.rs b/src/layers/logistic.rs index 6ff9f45..aa19fa5 100644 --- a/src/layers/logistic.rs +++ b/src/layers/logistic.rs @@ -19,6 +19,7 @@ impl Layer for LogisticChip { mut layouter: impl Layouter, tensors: &Vec>, constants: &HashMap>, + _rand_vector: &HashMap>, gadget_config: Rc, _layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/max_pool_2d.rs b/src/layers/max_pool_2d.rs index c929092..20d16e5 100644 --- a/src/layers/max_pool_2d.rs +++ b/src/layers/max_pool_2d.rs @@ -85,6 +85,7 @@ impl Layer for MaxPool2DChip { mut layouter: impl Layouter, tensors: &Vec>, _constants: &HashMap>, + _rand_vector: &HashMap>, gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/mean.rs b/src/layers/mean.rs index bfaf2f1..3f2ff89 100644 --- a/src/layers/mean.rs +++ b/src/layers/mean.rs @@ -113,6 +113,7 @@ impl Layer for MeanChip { layouter: impl Layouter, tensors: &Vec>, constants: &HashMap>, + _rand_vector: &HashMap>, gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/noop.rs b/src/layers/noop.rs index 0e01f38..d8f1740 100644 --- a/src/layers/noop.rs +++ b/src/layers/noop.rs @@ -14,6 +14,7 @@ impl Layer for NoopChip { _layouter: impl Layouter, tensors: &Vec>, _constants: &HashMap>, + _rand_vector: &HashMap>, _gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/pow.rs b/src/layers/pow.rs index 7d4c443..08c36d8 100644 --- a/src/layers/pow.rs +++ b/src/layers/pow.rs @@ -19,6 +19,7 @@ impl Layer for PowChip { mut layouter: impl Layouter, tensors: &Vec>, constants: &HashMap>, + _rand_vector: &HashMap>, gadget_config: Rc, _layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/rsqrt.rs b/src/layers/rsqrt.rs index bbc6e1c..764f3f0 100644 --- a/src/layers/rsqrt.rs +++ b/src/layers/rsqrt.rs @@ -19,6 +19,7 @@ impl Layer for RsqrtChip { mut layouter: impl Layouter, tensors: &Vec>, constants: &HashMap>, + _rand_vector: &HashMap>, gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/shape/broadcast.rs b/src/layers/shape/broadcast.rs index c947bdf..ad9d501 100644 --- a/src/layers/shape/broadcast.rs +++ b/src/layers/shape/broadcast.rs @@ -24,6 +24,7 @@ impl Layer for BroadcastChip { _layouter: impl Layouter, tensors: &Vec>, _constants: &HashMap>, + _rand_vector: &HashMap>, _gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/shape/concatenation.rs b/src/layers/shape/concatenation.rs index e81bdf9..9545d95 100644 --- a/src/layers/shape/concatenation.rs +++ b/src/layers/shape/concatenation.rs @@ -18,6 +18,7 @@ impl Layer for ConcatenationChip { _layouter: impl Layouter, tensors: &Vec>, _constants: &HashMap>, + _rand_vector: &HashMap>, _gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/shape/mask_neg_inf.rs b/src/layers/shape/mask_neg_inf.rs index b4cbab2..792be5b 100644 --- a/src/layers/shape/mask_neg_inf.rs +++ b/src/layers/shape/mask_neg_inf.rs @@ -18,6 +18,7 @@ impl Layer for MaskNegInfChip { _layouter: impl Layouter, tensors: &Vec>, constants: &HashMap>, + _rand_vector: &HashMap>, gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/shape/pack.rs b/src/layers/shape/pack.rs index e06ba75..e25b6fc 100644 --- a/src/layers/shape/pack.rs +++ b/src/layers/shape/pack.rs @@ -18,6 +18,7 @@ impl Layer for PackChip { _layouter: impl Layouter, tensors: &Vec>, _constants: &HashMap>, + _rand_vector: &HashMap>, _gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/shape/pad.rs b/src/layers/shape/pad.rs index fae9c74..5b0a7b4 100644 --- a/src/layers/shape/pad.rs +++ b/src/layers/shape/pad.rs @@ -9,7 +9,7 @@ use ndarray::{Array, Axis, IxDyn, Slice}; use crate::{ gadgets::gadget::GadgetConfig, - layers::layer::{AssignedTensor, GadgetConsumer}, + layers::layer::{AssignedTensor, GadgetConsumer, CellRc}, }; use super::super::layer::{Layer, LayerConfig}; @@ -75,6 +75,7 @@ impl Layer for PadChip { _layouter: impl Layouter, tensors: &Vec>, constants: &HashMap>>, + _rand_vector: &HashMap>, _gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/shape/permute.rs b/src/layers/shape/permute.rs index 90121b3..1631f02 100644 --- a/src/layers/shape/permute.rs +++ b/src/layers/shape/permute.rs @@ -18,6 +18,7 @@ impl Layer for PermuteChip { _layouter: impl Layouter, tensors: &Vec>, _constants: &HashMap>, + _rand_vector: &HashMap>, _gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/shape/reshape.rs b/src/layers/shape/reshape.rs index abbbfe6..2bcf297 100644 --- a/src/layers/shape/reshape.rs +++ b/src/layers/shape/reshape.rs @@ -18,6 +18,7 @@ impl Layer for ReshapeChip { _layouter: impl Layouter, tensors: &Vec>, _constants: &HashMap>, + _rand_vector: &HashMap>, _gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/shape/resize_nn.rs b/src/layers/shape/resize_nn.rs index 2ada4a4..a094191 100644 --- a/src/layers/shape/resize_nn.rs +++ b/src/layers/shape/resize_nn.rs @@ -19,6 +19,7 @@ impl Layer for ResizeNNChip { _layouter: impl Layouter, tensors: &Vec>, _constants: &HashMap>, + _rand_vector: &HashMap>, _gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/shape/rotate.rs b/src/layers/shape/rotate.rs index 618c526..866414a 100644 --- a/src/layers/shape/rotate.rs +++ b/src/layers/shape/rotate.rs @@ -28,6 +28,7 @@ impl Layer for RotateChip { _layouter: impl Layouter, tensors: &Vec>, _constants: &HashMap>, + _rand_vector: &HashMap>, _gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/shape/slice.rs b/src/layers/shape/slice.rs index 6cfd653..d4c7788 100644 --- a/src/layers/shape/slice.rs +++ b/src/layers/shape/slice.rs @@ -18,6 +18,7 @@ impl Layer for SliceChip { _layouter: impl Layouter, tensors: &Vec>, _constants: &HashMap>, + _rand_vector: &HashMap>, _gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/shape/split.rs b/src/layers/shape/split.rs index 071dfbe..73d411e 100644 --- a/src/layers/shape/split.rs +++ b/src/layers/shape/split.rs @@ -18,6 +18,7 @@ impl Layer for SplitChip { _layouter: impl Layouter, tensors: &Vec>, _constants: &HashMap>, + _rand_vector: &HashMap>, _gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/shape/transpose.rs b/src/layers/shape/transpose.rs index d9de1bf..3a84752 100644 --- a/src/layers/shape/transpose.rs +++ b/src/layers/shape/transpose.rs @@ -18,6 +18,7 @@ impl Layer for TransposeChip { _layouter: impl Layouter, tensors: &Vec>, _constants: &HashMap>, + _rand_vector: &HashMap>, _gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/softmax.rs b/src/layers/softmax.rs index 7a6aae8..b191095 100644 --- a/src/layers/softmax.rs +++ b/src/layers/softmax.rs @@ -121,6 +121,7 @@ impl Layer for SoftmaxChip { mut layouter: impl Layouter, tensors: &Vec>, constants: &HashMap>, + _rand_vector: &HashMap>, gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/sqrt.rs b/src/layers/sqrt.rs index ddd0aa3..1068e28 100644 --- a/src/layers/sqrt.rs +++ b/src/layers/sqrt.rs @@ -19,6 +19,7 @@ impl Layer for SqrtChip { mut layouter: impl Layouter, tensors: &Vec>, constants: &HashMap>, + _rand_vector: &HashMap>, gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/square.rs b/src/layers/square.rs index 0ca4ba7..d8aeb7b 100644 --- a/src/layers/square.rs +++ b/src/layers/square.rs @@ -20,6 +20,7 @@ impl Layer for SquareChip { mut layouter: impl Layouter, tensors: &Vec>, constants: &HashMap>, + _rand_vector: &HashMap>, gadget_config: Rc, _layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/squared_diff.rs b/src/layers/squared_diff.rs index 7900b49..ee75858 100644 --- a/src/layers/squared_diff.rs +++ b/src/layers/squared_diff.rs @@ -23,6 +23,7 @@ impl Layer for SquaredDiffChip { mut layouter: impl Layouter, tensors: &Vec>, constants: &HashMap>, + _rand_vector: &HashMap>, gadget_config: Rc, _layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/tanh.rs b/src/layers/tanh.rs index 2d44365..5a12cac 100644 --- a/src/layers/tanh.rs +++ b/src/layers/tanh.rs @@ -19,6 +19,7 @@ impl Layer for TanhChip { mut layouter: impl Layouter, tensors: &Vec>, constants: &HashMap>, + _rand_vector: &HashMap>, gadget_config: Rc, _layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/update.rs b/src/layers/update.rs index 73ec0ab..0e1064c 100644 --- a/src/layers/update.rs +++ b/src/layers/update.rs @@ -19,6 +19,7 @@ impl Layer for UpdateChip { mut layouter: impl Layouter, tensors: &Vec>, constants: &HashMap>, + _rand_vector: &HashMap>, gadget_config: Rc, _layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/lib.rs b/src/lib.rs index 4ad95f1..4abd3ea 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,7 +1,9 @@ #![feature(int_roundings)] pub mod commitments; +pub mod conversion; pub mod gadgets; pub mod layers; pub mod model; pub mod utils; +pub mod simple; \ No newline at end of file diff --git a/src/model.rs b/src/model.rs index d88b59e..3d9a1a2 100644 --- a/src/model.rs +++ b/src/model.rs @@ -4,11 +4,11 @@ use std::{ rc::Rc, sync::{Arc, Mutex}, }; - +// use blake2b_simd::{Params as Blake2bParams, State as Blake2bState}; use halo2_proofs::{ circuit::{Layouter, SimpleFloorPlanner, Value}, halo2curves::ff::{FromUniformBytes, PrimeField}, - plonk::{Advice, Circuit, Column, ConstraintSystem, Error, Instance}, + plonk::{Advice, Circuit, Column, ConstraintSystem, Error, FirstPhase, Instance, Challenge, SecondPhase}, }; use lazy_static::lazy_static; use ndarray::{Array, IxDyn}; @@ -96,6 +96,8 @@ pub struct ModelConfig> { pub gadget_config: Rc, pub public_col: Column, pub hasher: Option>, + pub challenge: Challenge, + pub rand_vector: Column, pub _marker: PhantomData, } @@ -107,7 +109,7 @@ impl> ModelCircuit { tensors: &BTreeMap>, ) -> Result>, Error> { let tensors = layouter.assign_region( - || "asssignment", + || "assignment", |mut region| { let mut cell_idx = 0; let mut assigned_tensors = BTreeMap::new(); @@ -208,18 +210,21 @@ impl> ModelCircuit { // TODO: I've made some very bad life decisions // TOOD: this needs to be a random oracle - let r_base = F::from(0x123456789abcdef); - let mut r = r_base.clone(); - for i in 0..self.num_random { - let rand = region.assign_fixed( - || format!("rand_{}", i), - gadget_config.fixed_columns[0], - constants.len(), - || Value::known(r), - )?; - r = r * r_base; - constants.insert(RAND_START_IDX + (i as i64), Rc::new(rand)); - } + // let r_base = F::from(0x123456789abcdef); + // let r_base = layouter.get_challenge(); + + // let mut r = challenge; + + // for i in 0..self.num_random { + // let rand = region.assign_fixed( + // || format!("rand_{}", i), + // gadget_config.fixed_columns[0], + // constants.len(), + // || r, + // )?; + // r = r * challenge; + // constants.insert(RAND_START_IDX + (i as i64), Rc::new(rand)); + // } Ok(constants) }, @@ -263,21 +268,23 @@ impl> ModelCircuit { // TODO: I've made some very bad life decisions // TOOD: this needs to be a random oracle - let r_base = F::from(0x123456789abcdef); - let mut r = r_base.clone(); - for i in 0..self.num_random { - let assignment_idx = constants.len(); - let row_idx = assignment_idx / gadget_config.columns.len(); - let col_idx = assignment_idx % gadget_config.columns.len(); - let rand = region.assign_advice( - || format!("rand_{}", i), - gadget_config.columns[col_idx], - row_idx, - || Value::known(r), - )?; - r = r * r_base; - constants.insert(RAND_START_IDX + (i as i64), Rc::new(rand)); - } + // let r_base = F::from(0x123456789abcdef); + // let r_base = c.assign().unwrap_or(F::from(0x123456789abcdef)); + // let mut r = challenge; + + // for i in 0..self.num_random { + // let assignment_idx = constants.len(); + // let row_idx = assignment_idx / gadget_config.columns.len(); + // let col_idx = assignment_idx % gadget_config.columns.len(); + // let rand = region.assign_advice( + // || format!("rand_{}", i), + // gadget_config.columns[col_idx], + // row_idx, + // || r, + // )?; + // r = r * challenge; + // constants.insert(RAND_START_IDX + (i as i64), Rc::new(rand)); + // } for (k, v) in fixed_constants.iter() { let v2 = constants.get(k).unwrap(); @@ -289,6 +296,34 @@ impl> ModelCircuit { Ok(constants) } + fn fill_random_vectors( + &self, + mut layouter: impl Layouter, + challenge: Challenge, + rand_vector: Column, + ) -> Result>, Error> { + let c_base = layouter.get_challenge(challenge); + let mut c = c_base; + let rand_vec = layouter.assign_region( + || "random vector", + |mut region| { + let mut rand_vec: HashMap> = HashMap::new(); + for i in 0..self.num_random { + let rand = region.assign_advice( + || format!("rand_vec_{}", i), + rand_vector, + i.try_into().unwrap(), + || c, + )?; + c = c * c_base; + rand_vec.insert(i as i64, Rc::new(rand)); + } + Ok(rand_vec) + } + )?; + Ok(rand_vec) + } + pub fn generate_from_file(config_file: &str, inp_file: &str) -> ModelCircuit { let config = load_model_msgpack(config_file, inp_file); Self::generate_from_msgpack(config, true) @@ -567,12 +602,25 @@ impl> Circuit for ModelCircuit fn configure(meta: &mut ConstraintSystem) -> Self::Config { let mut gadget_config = crate::model::GADGET_CONFIG.lock().unwrap().clone(); - let columns = (0..gadget_config.num_cols) + // TODO: Allocate less columns + let witness_columns = (0..gadget_config.num_cols) .map(|_| meta.advice_column()) .collect::>(); - for col in columns.iter() { - meta.enable_equality(*col); + let c = meta.challenge_usable_after(FirstPhase); + let columns = (0..gadget_config.num_cols) + .map(|_| meta.advice_column_in(SecondPhase)) + .collect::>(); + let rand_vector = meta.advice_column_in(SecondPhase); + + for i in 0..gadget_config.num_cols { + meta.enable_equality(witness_columns[i]); + meta.enable_equality(columns[i]); } + + + meta.enable_equality(rand_vector); + + gadget_config.witness_columns = witness_columns; gadget_config.columns = columns; let public_col = meta.instance_column(); @@ -637,11 +685,14 @@ impl> Circuit for ModelCircuit gadget_config: gadget_config.into(), public_col, hasher, + challenge: c, + rand_vector, _marker: PhantomData, } } fn synthesize(&self, config: Self::Config, mut layouter: impl Layouter) -> Result<(), Error> { + // Assign tables let gadget_rc: Rc = config.gadget_config.clone().into(); for gadget in self.used_gadgets.iter() { @@ -711,6 +762,15 @@ impl> Circuit for ModelCircuit } } + // Assign extra space for challenge generation + self + .assign_tensors_vec( + layouter.namespace(|| "challenge generation"), + &config.gadget_config.witness_columns, + &self.tensors, + ) + .unwrap(); + // Assign weights and constants let constants_base = self .assign_constants( @@ -718,6 +778,7 @@ impl> Circuit for ModelCircuit config.gadget_config.clone(), ) .unwrap(); + // Some halo2 cancer let constants = self .assign_constants2( @@ -780,12 +841,20 @@ impl> Circuit for ModelCircuit .unwrap() }; + // Create Randomness Vector + let rand_vector = self.fill_random_vectors( + layouter.namespace(|| "randomness"), + config.challenge, + config.rand_vector + )?; + // Perform the dag let dag_chip = DAGLayerChip::::construct(self.dag_config.clone()); let (final_tensor_map, result) = dag_chip.forward( layouter.namespace(|| "dag"), &tensors, &constants, + &rand_vector, config.gadget_config.clone(), &LayerConfig::default(), )?; diff --git a/src/utils/proving_kzg.rs b/src/utils/proving_kzg.rs index dd44442..10eb8b4 100644 --- a/src/utils/proving_kzg.rs +++ b/src/utils/proving_kzg.rs @@ -125,7 +125,7 @@ pub fn time_circuit_kzg(circuit: ModelCircuit) { .collect(); let public_vals_u8_size = serialize(&public_vals_u8, "public_vals"); println!("Public vals size: {} bytes", public_vals_u8_size); - + // println!("{:?}", public_vals); let mut transcript = Blake2bWrite::<_, G1Affine, Challenge255<_>>::init(vec![]); create_proof::< KZGCommitmentScheme, From 5c5160fd8b3622c20366a6bcb528d7897f26ec10 Mon Sep 17 00:00:00 2001 From: span14 Date: Mon, 11 Sep 2023 12:51:33 -0500 Subject: [PATCH 02/16] update cargo build --- Cargo.toml | 6 +++--- src/lib.rs | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index f15cd2a..8180916 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -22,10 +22,10 @@ opt-level = 3 [dependencies] bitvec = "1.0.1" blake2b_simd = "1" -halo2 = { path="../halo2_sys/halo2" } +halo2 = { git="https://github.com/privacy-scaling-explorations/halo2", package="halo2", rev="17e9765c199670534c0299c96128d0464a188d0b" } halo2_curves = { git = "https://github.com/privacy-scaling-explorations/halo2curves", tag = "0.3.3", package = "halo2curves" } -halo2_gadgets = { path="../halo2_sys/halo2_gadgets", features = ["circuit-params"] } -halo2_proofs = { path="../halo2_sys/halo2_proofs", features = ["circuit-params"] } +halo2_gadgets = { git="https://github.com/privacy-scaling-explorations/halo2", package="halo2_gadgets", rev="17e9765c199670534c0299c96128d0464a188d0b", features = ["circuit-params"] } +halo2_proofs = { git="https://github.com/privacy-scaling-explorations/halo2", package="halo2_proofs", rev="17e9765c199670534c0299c96128d0464a188d0b", features = ["circuit-params"] } lazy_static = "1.4.0" ndarray = "0.15.6" num-bigint = "0.4.3" diff --git a/src/lib.rs b/src/lib.rs index 4abd3ea..c919461 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,9 +1,9 @@ #![feature(int_roundings)] pub mod commitments; -pub mod conversion; +// pub mod conversion; pub mod gadgets; pub mod layers; pub mod model; pub mod utils; -pub mod simple; \ No newline at end of file +// pub mod simple; \ No newline at end of file From 2e5faeb7bacc890e78f855518b608bedb2fc6efd Mon Sep 17 00:00:00 2001 From: span14 Date: Mon, 11 Sep 2023 12:51:59 -0500 Subject: [PATCH 03/16] update cargo build --- Cargo.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 8180916..3d49b94 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,6 @@ opt-level = 3 [dependencies] bitvec = "1.0.1" -blake2b_simd = "1" halo2 = { git="https://github.com/privacy-scaling-explorations/halo2", package="halo2", rev="17e9765c199670534c0299c96128d0464a188d0b" } halo2_curves = { git = "https://github.com/privacy-scaling-explorations/halo2curves", tag = "0.3.3", package = "halo2curves" } halo2_gadgets = { git="https://github.com/privacy-scaling-explorations/halo2", package="halo2_gadgets", rev="17e9765c199670534c0299c96128d0464a188d0b", features = ["circuit-params"] } From 17f87a76d1bc2642f9ba321eaae25ecb4e89af7a Mon Sep 17 00:00:00 2001 From: span14 Date: Mon, 11 Sep 2023 12:53:25 -0500 Subject: [PATCH 04/16] update cargo build --- Cargo.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 3d49b94..b7b98bd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -30,7 +30,6 @@ ndarray = "0.15.6" num-bigint = "0.4.3" num-traits = "0.2.15" once_cell = "1.15.0" -plonkish_backend = { path="../plonkish/plonkish_backend", features=["benchmark"]} rand = "0.8.5" rmp-serde = "1.1.1" rounded-div = "0.1.2" From 4b91572d52bd47aea1ff783a591196d5077c6943 Mon Sep 17 00:00:00 2001 From: span14 Date: Sat, 16 Sep 2023 19:30:15 -0500 Subject: [PATCH 05/16] carry value along with cell --- Cargo.toml | 6 +- src/commitments/packer.rs | 10 +- src/gadgets/add_pairs.rs | 28 ++--- src/gadgets/adder.rs | 28 ++--- src/gadgets/bias_div_floor_relu6.rs | 63 +++++----- src/gadgets/bias_div_round_relu6.rs | 63 +++++----- src/gadgets/dot_prod.rs | 31 ++--- src/gadgets/gadget.rs | 18 +-- src/gadgets/input_lookup.rs | 6 +- src/gadgets/max.rs | 36 +++--- src/gadgets/mul_pairs.rs | 28 ++--- src/gadgets/nonlinear/exp.rs | 12 +- src/gadgets/nonlinear/logistic.rs | 12 +- src/gadgets/nonlinear/non_linearity.rs | 28 ++--- src/gadgets/nonlinear/pow.rs | 12 +- src/gadgets/nonlinear/relu.rs | 12 +- src/gadgets/nonlinear/rsqrt.rs | 12 +- src/gadgets/nonlinear/sqrt.rs | 12 +- src/gadgets/nonlinear/tanh.rs | 12 +- src/gadgets/sqrt_big.rs | 37 +++--- src/gadgets/square.rs | 24 ++-- src/gadgets/squared_diff.rs | 28 ++--- src/gadgets/sub_pairs.rs | 28 ++--- src/gadgets/update.rs | 47 ++++---- src/gadgets/var_div.rs | 45 ++++--- src/gadgets/var_div_big.rs | 71 +++++------ src/gadgets/var_div_big3.rs | 76 ++++++------ src/layers/arithmetic.rs | 16 +-- src/layers/arithmetic/add.rs | 12 +- src/layers/arithmetic/div_var.rs | 18 +-- src/layers/arithmetic/mul.rs | 20 +++- src/layers/arithmetic/sub.rs | 6 +- src/layers/averager.rs | 18 +-- src/layers/avg_pool_2d.rs | 11 +- src/layers/batch_mat_mul.rs | 4 +- src/layers/conv2d.rs | 40 +++---- src/layers/dag.rs | 12 +- src/layers/div_fixed.rs | 18 +-- src/layers/fully_connected.rs | 159 ++++++++++++++++++------- src/layers/layer.rs | 2 +- src/layers/logistic.rs | 6 +- src/layers/max_pool_2d.rs | 6 +- src/layers/mean.rs | 8 +- src/layers/pow.rs | 6 +- src/layers/rsqrt.rs | 15 ++- src/layers/shape/mask_neg_inf.rs | 6 +- src/layers/shape/pad.rs | 10 +- src/layers/softmax.rs | 42 ++++--- src/layers/sqrt.rs | 20 +++- src/layers/square.rs | 14 ++- src/layers/squared_diff.rs | 16 ++- src/layers/tanh.rs | 6 +- src/layers/update.rs | 12 +- src/model.rs | 18 +-- 54 files changed, 709 insertions(+), 597 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index b7b98bd..0dcf8dc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,10 +21,10 @@ opt-level = 3 [dependencies] bitvec = "1.0.1" -halo2 = { git="https://github.com/privacy-scaling-explorations/halo2", package="halo2", rev="17e9765c199670534c0299c96128d0464a188d0b" } +halo2 = { path = "../halo2_sys/halo2" } halo2_curves = { git = "https://github.com/privacy-scaling-explorations/halo2curves", tag = "0.3.3", package = "halo2curves" } -halo2_gadgets = { git="https://github.com/privacy-scaling-explorations/halo2", package="halo2_gadgets", rev="17e9765c199670534c0299c96128d0464a188d0b", features = ["circuit-params"] } -halo2_proofs = { git="https://github.com/privacy-scaling-explorations/halo2", package="halo2_proofs", rev="17e9765c199670534c0299c96128d0464a188d0b", features = ["circuit-params"] } +halo2_gadgets = { path="../halo2_sys/halo2_gadgets", features = ["circuit-params"] } +halo2_proofs = { path="../halo2_sys/halo2_proofs", features = ["circuit-params"] } lazy_static = "1.4.0" ndarray = "0.15.6" num-bigint = "0.4.3" diff --git a/src/commitments/packer.rs b/src/commitments/packer.rs index 2ddbaff..493a0b8 100644 --- a/src/commitments/packer.rs +++ b/src/commitments/packer.rs @@ -143,7 +143,7 @@ impl PackerChip { &self, mut layouter: impl Layouter, gadget_config: Rc, - cells: Vec>, + cells: Vec<(CellRc, F)>, zero: &AssignedCell, ) -> Result>, Error> { let columns = &gadget_config.columns; @@ -170,9 +170,9 @@ impl PackerChip { .iter() .enumerate() .map(|(i, x)| { - x.copy_advice(|| "", &mut region, columns[col_offset + i], 0) + x.0.copy_advice(|| "", &mut region, columns[col_offset + i], 0) .unwrap(); - x.value().copied() + x.0.value().copied() }) .collect::>(); @@ -216,7 +216,7 @@ impl PackerChip { gadget_config: Rc, values: Vec<&F>, zero: &AssignedCell, - ) -> Result<(Vec>, Vec>), Error> { + ) -> Result<(Vec>, Vec<(CellRc, F)>), Error> { let columns = &gadget_config.columns; let selector = gadget_config.selectors.get(&GadgetType::Packer).unwrap()[0]; @@ -249,7 +249,7 @@ impl PackerChip { let tmp = region .assign_advice(|| "", columns[col_offset + i], 0, || Value::known(*x)) .unwrap(); - Rc::new(tmp) + (Rc::new(tmp), *x) }) .collect::>(); assigned.extend(vals); diff --git a/src/gadgets/add_pairs.rs b/src/gadgets/add_pairs.rs index 530e74b..ffae1e1 100644 --- a/src/gadgets/add_pairs.rs +++ b/src/gadgets/add_pairs.rs @@ -1,7 +1,7 @@ use std::{marker::PhantomData, rc::Rc}; use halo2_proofs::{ - circuit::{AssignedCell, Layouter, Region}, + circuit::{AssignedCell, Layouter, Region, Value}, halo2curves::ff::PrimeField, plonk::{ConstraintSystem, Error}, poly::Rotation, @@ -80,9 +80,9 @@ impl Gadget for AddPairsChip { &self, region: &mut Region, row_offset: usize, - vec_inputs: &Vec>>, - _single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error> { + vec_inputs: &Vec, F)>>, + _single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result, F)>, Error> { let inp1 = &vec_inputs[0]; let inp2 = &vec_inputs[1]; assert_eq!(inp1.len(), inp2.len()); @@ -97,12 +97,12 @@ impl Gadget for AddPairsChip { let mut outps = vec![]; for i in 0..inp1.len() { let offset = i * self.num_cols_per_op(); - let inp1 = inp1[i].copy_advice(|| "", region, columns[offset + 0], row_offset)?; - let inp2 = inp2[i].copy_advice(|| "", region, columns[offset + 1], row_offset)?; - let outp = inp1.value().map(|x: &F| x.to_owned()) + inp2.value().map(|x: &F| x.to_owned()); + inp1[i].0.copy_advice(|| "", region, columns[offset + 0], row_offset)?; + inp2[i].0.copy_advice(|| "", region, columns[offset + 1], row_offset)?; + let outp = inp1[i].1 + inp2[i].1; - let outp = region.assign_advice(|| "", columns[offset + 2], row_offset, || outp)?; - outps.push(outp); + let outpc = region.assign_advice(|| "", columns[offset + 2], row_offset, || Value::known(outp))?; + outps.push((outpc, outp)); } Ok(outps) } @@ -110,17 +110,17 @@ impl Gadget for AddPairsChip { fn forward( &self, mut layouter: impl Layouter, - vec_inputs: &Vec>>, - single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error> { + vec_inputs: &Vec, F)>>, + single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result, F)>, Error> { let zero = &single_inputs[0]; let mut inp1 = vec_inputs[0].clone(); let mut inp2 = vec_inputs[1].clone(); let initial_len = inp1.len(); while inp1.len() % self.num_inputs_per_row() != 0 { - inp1.push(zero); - inp2.push(zero); + inp1.push(*zero); + inp2.push(*zero); } let vec_inputs = vec![inp1, inp2]; diff --git a/src/gadgets/adder.rs b/src/gadgets/adder.rs index 06595d6..58d3dbc 100644 --- a/src/gadgets/adder.rs +++ b/src/gadgets/adder.rs @@ -76,9 +76,9 @@ impl Gadget for AdderChip { &self, region: &mut Region, row_offset: usize, - vec_inputs: &Vec>>, - _single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error> { + vec_inputs: &Vec, F)>>, + _single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result, F)>, Error> { assert_eq!(vec_inputs.len(), 1); let inp = &vec_inputs[0]; @@ -90,35 +90,35 @@ impl Gadget for AdderChip { inp .iter() .enumerate() - .map(|(i, cell)| cell.copy_advice(|| "", region, self.config.columns[i], row_offset)) + .map(|(i, cell)| cell.0.copy_advice(|| "", region, self.config.columns[i], row_offset)) .collect::, _>>()?; - let e = inp.iter().fold(Value::known(F::ZERO), |a, b| { - a + b.value().map(|x: &F| x.to_owned()) + let e = inp.iter().fold(F::ZERO, |a, b| { + a + b.1 }); let res = region.assign_advice( || "", *self.config.columns.last().unwrap(), row_offset, - || e, + || Value::known(e), )?; - Ok(vec![res]) + Ok(vec![(res, e)]) } fn forward( &self, mut layouter: impl Layouter, - vec_inputs: &Vec>>, - single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error> { + vec_inputs: &Vec, F)>>, + single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result, F)>, Error> { assert_eq!(single_inputs.len(), 1); let mut inputs = vec_inputs[0].clone(); let zero = single_inputs[0].clone(); while inputs.len() % self.num_inputs_per_row() != 0 { - inputs.push(&zero); + inputs.push(zero); } let mut outputs = self.op_aligned_rows( @@ -128,9 +128,9 @@ impl Gadget for AdderChip { )?; while outputs.len() != 1 { while outputs.len() % self.num_inputs_per_row() != 0 { - outputs.push(zero.clone()); + outputs.push((zero.0.clone(), zero.1)); } - let tmp = outputs.iter().map(|x| x).collect::>(); + let tmp = outputs.iter().map(|x| (&x.0, x.1)).collect::>(); outputs = self.op_aligned_rows( layouter.namespace(|| "adder forward"), &vec![tmp], diff --git a/src/gadgets/bias_div_floor_relu6.rs b/src/gadgets/bias_div_floor_relu6.rs index 6e9258d..d2931ca 100644 --- a/src/gadgets/bias_div_floor_relu6.rs +++ b/src/gadgets/bias_div_floor_relu6.rs @@ -1,7 +1,7 @@ use std::{collections::HashMap, marker::PhantomData}; use halo2_proofs::{ - circuit::{AssignedCell, Layouter, Region}, + circuit::{AssignedCell, Layouter, Region, Value}, halo2curves::ff::PrimeField, plonk::{ConstraintSystem, Error, Expression}, poly::Rotation, @@ -142,9 +142,9 @@ impl Gadget for BiasDivFloorRelu6Chip { &self, region: &mut Region, row_offset: usize, - vec_inputs: &Vec>>, - _single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error> { + vec_inputs: &Vec, F)>>, + _single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result, F)>, Error> { let div_val = self.config.scale_factor as i64; let div_outp_min_val_i64 = -self.config.div_outp_min_val; @@ -176,26 +176,26 @@ impl Gadget for BiasDivFloorRelu6Chip { for (i, (inp, bias)) in inp.iter().zip(bias.iter()).enumerate() { let offset = i * self.num_cols_per_op(); - let inp_f = inp.value().map(|x: &F| x.to_owned()); - let bias_f = bias.value().map(|x: &F| { - let a = *x + div_inp_min_val_pos; + let inp_f = inp.1; + let bias_f = { + let a = bias.1 + div_inp_min_val_pos; let a = convert_to_u64(&a) as i64 - div_inp_min_val_pos_i64; a - }); - let div_mod_res = inp_f.map(|x: F| { - let x_pos = x + div_inp_min_val_pos; + }; + let div_mod_res = { + let x_pos = inp_f + div_inp_min_val_pos; let inp = convert_to_u64(&x_pos); // println!("inp: {:?}, bias: {:?}, x_pos: {:?}", inp, bias, x_pos); let div_res = inp as i64 / div_val - (div_inp_min_val_pos_i64 / div_val); let mod_res = inp as i64 % div_val; // println!("div_res: {:?}, mod_res: {:?}", div_res, mod_res); (div_res, mod_res) - }); - let div_res = div_mod_res.map(|x: (i64, i64)| x.0) + bias_f; - let mod_res = div_mod_res.map(|x: (i64, i64)| x.1); + }; + let div_res = div_mod_res.0 + bias_f; + let mod_res = div_mod_res.1; - let outp = div_res.map(|x: i64| { - let mut x_pos = x - div_outp_min_val_i64; + let outp = { + let mut x_pos = div_res - div_outp_min_val_i64; if !relu_map.contains_key(&(x_pos)) { // println!("x: {}, x_pos: {}", x, x_pos); x_pos = 0; @@ -203,11 +203,11 @@ impl Gadget for BiasDivFloorRelu6Chip { let outp_val = relu_map.get(&(x_pos)).unwrap(); // println!("x: {}, x_pos: {}, outp_val: {}", x, x_pos, outp_val); F::from(*outp_val as u64) - }); + }; // Assign inp, bias - inp.copy_advice(|| "", region, self.config.columns[offset + 0], row_offset)?; - bias.copy_advice(|| "", region, self.config.columns[offset + 1], row_offset)?; + inp.0.copy_advice(|| "", region, self.config.columns[offset + 0], row_offset)?; + bias.0.copy_advice(|| "", region, self.config.columns[offset + 1], row_offset)?; // Assign div_res, mod_res let div_res_cell = region @@ -215,11 +215,7 @@ impl Gadget for BiasDivFloorRelu6Chip { || "div_res", self.config.columns[offset + 2], row_offset, - || { - div_res.map(|x: i64| { - F::from((x - div_outp_min_val_i64) as u64) - F::from(-div_outp_min_val_i64 as u64) - }) - }, + || Value::known(F::from((div_res - div_outp_min_val_i64) as u64) - F::from(-div_outp_min_val_i64 as u64)) ) .unwrap(); let _mod_res_cell = region @@ -227,7 +223,7 @@ impl Gadget for BiasDivFloorRelu6Chip { || "mod_res", self.config.columns[offset + 3], row_offset, - || mod_res.map(|x: i64| F::from(x as u64)), + || Value::known(F::from(mod_res as u64)), ) .unwrap(); @@ -236,13 +232,16 @@ impl Gadget for BiasDivFloorRelu6Chip { || "outp", self.config.columns[offset + 4], row_offset, - || outp.map(|x: F| x.to_owned()), + || Value::known(outp), ) .unwrap(); // outp_cells.push((outp_cell, div_res_cell)); - outp_cells.push(outp_cell); - outp_cells.push(div_res_cell); + outp_cells.push((outp_cell, outp)); + outp_cells.push(( + div_res_cell, + F::from((div_res - div_outp_min_val_i64) as u64) - F::from(-div_outp_min_val_i64 as u64) + )); } Ok(outp_cells) @@ -251,17 +250,17 @@ impl Gadget for BiasDivFloorRelu6Chip { fn forward( &self, mut layouter: impl Layouter, - vec_inputs: &Vec>>, - single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error> { + vec_inputs: &Vec, F)>>, + single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result, F)>, Error> { let mut inps = vec_inputs[0].clone(); let mut biases = vec_inputs[1].clone(); // Needed to pad: bias - bias = 0 let default = biases[0].clone(); while inps.len() % self.num_inputs_per_row() != 0 { - inps.push(&default); - biases.push(&default); + inps.push(default); + biases.push(default); } let res = self.op_aligned_rows( diff --git a/src/gadgets/bias_div_round_relu6.rs b/src/gadgets/bias_div_round_relu6.rs index 349678e..787d00c 100644 --- a/src/gadgets/bias_div_round_relu6.rs +++ b/src/gadgets/bias_div_round_relu6.rs @@ -170,9 +170,9 @@ impl Gadget for BiasDivRoundRelu6Chip { &self, region: &mut Region, row_offset: usize, - vec_inputs: &Vec>>, - _single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error> { + vec_inputs: &Vec, F)>>, + _single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result, F)>, Error> { let div_val = self.config.scale_factor as i64; let div_outp_min_val_i64 = self.config.div_outp_min_val; @@ -204,38 +204,38 @@ impl Gadget for BiasDivRoundRelu6Chip { for (i, (inp, bias)) in inp.iter().zip(bias.iter()).enumerate() { let offset = i * NUM_COLS_PER_OP; - let inp_f = inp.value().map(|x: &F| x.to_owned()); - let bias_f = bias.value().map(|x: &F| { - let a = *x + div_inp_min_val_pos; + let inp_f = inp.1; + let bias_f = { + let a = bias.1 + div_inp_min_val_pos; let a = convert_to_u64(&a) as i64 - div_inp_min_val_pos_i64; a - }); - let div_mod_res = inp_f.map(|x: F| { - let x_pos = x + div_inp_min_val_pos; + }; + let div_mod_res = { + let x_pos = inp_f + div_inp_min_val_pos; let inp = convert_to_u64(&x_pos) as i64; let div_inp = 2 * inp + div_val; let div_res = div_inp / (2 * div_val) - div_inp_min_val_pos_i64 / div_val; let mod_res = div_inp % (2 * div_val); (div_res, mod_res) - }); - let div_res = div_mod_res.map(|x: (i64, i64)| x.0) + bias_f; - let mod_res = div_mod_res.map(|x: (i64, i64)| x.1); + }; + let div_res = div_mod_res.0 + bias_f; + let mod_res = div_mod_res.1; - let outp = div_res.map(|x: i64| { - let mut x_pos = x - div_outp_min_val_i64; + let outp = { + let mut x_pos = div_res - div_outp_min_val_i64; if !relu_map.contains_key(&(x_pos)) { // println!("x: {}, x_pos: {}", x, x_pos); x_pos = 0; } let outp_val = relu_map.get(&(x_pos)).unwrap(); F::from(*outp_val as u64) - }); + }; // Assign inp, bias - inp + inp.0 .copy_advice(|| "", region, self.config.columns[offset + 0], row_offset) .unwrap(); - bias + bias.0 .copy_advice(|| "", region, self.config.columns[offset + 1], row_offset) .unwrap(); @@ -245,11 +245,8 @@ impl Gadget for BiasDivRoundRelu6Chip { || "div_res", self.config.columns[offset + 2], row_offset, - || { - div_res.map(|x: i64| { - F::from((x - div_outp_min_val_i64) as u64) - F::from(-div_outp_min_val_i64 as u64) - }) - }, + || + Value::known(F::from((div_res - div_outp_min_val_i64) as u64) - F::from(-div_outp_min_val_i64 as u64)), ) .unwrap(); let _mod_res_cell = region @@ -257,7 +254,7 @@ impl Gadget for BiasDivRoundRelu6Chip { || "mod_res", self.config.columns[offset + 3], row_offset, - || mod_res.map(|x: i64| F::from(x as u64)), + || Value::known(F::from(mod_res as u64)), ) .unwrap(); @@ -266,13 +263,17 @@ impl Gadget for BiasDivRoundRelu6Chip { || "outp", self.config.columns[offset + 4], row_offset, - || outp.map(|x: F| x.to_owned()), + || Value::known(outp) ) .unwrap(); // outp_cells.push((outp_cell, div_res_cell)); - outp_cells.push(outp_cell); - outp_cells.push(div_res_cell); + outp_cells.push((outp_cell, outp)); + outp_cells.push( + ( + div_res_cell, + F::from((div_res - div_outp_min_val_i64) as u64) - F::from(-div_outp_min_val_i64 as u64) + )); } Ok(outp_cells) @@ -281,9 +282,9 @@ impl Gadget for BiasDivRoundRelu6Chip { fn forward( &self, mut layouter: impl Layouter, - vec_inputs: &Vec>>, - single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error> { + vec_inputs: &Vec, F)>>, + single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result, F)>, Error> { let mut inps = vec_inputs[0].clone(); let mut biases = vec_inputs[1].clone(); let initial_len = inps.len(); @@ -291,8 +292,8 @@ impl Gadget for BiasDivRoundRelu6Chip { // Needed to pad: bias - bias = 0 let default = biases[0].clone(); while inps.len() % self.num_inputs_per_row() != 0 { - inps.push(&default); - biases.push(&default); + inps.push(default); + biases.push(default); } let res = self diff --git a/src/gadgets/dot_prod.rs b/src/gadgets/dot_prod.rs index 5c7d8a5..c406a12 100644 --- a/src/gadgets/dot_prod.rs +++ b/src/gadgets/dot_prod.rs @@ -94,9 +94,9 @@ impl Gadget for DotProductChip { &self, region: &mut Region, row_offset: usize, - vec_inputs: &Vec>>, - single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error> { + vec_inputs: &Vec,F)>>, + single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result, F)>, Error> { assert_eq!(vec_inputs.len(), 2); let inp = &vec_inputs[0]; @@ -116,7 +116,7 @@ impl Gadget for DotProductChip { inp .iter() .enumerate() - .map(|(i, cell)| cell.copy_advice(|| "", region, inp_cols[i], row_offset)) + .map(|(i, cell)| cell.0.copy_advice(|| "", region, inp_cols[i], row_offset)) .collect::, _>>() .unwrap(); @@ -124,13 +124,14 @@ impl Gadget for DotProductChip { weights .iter() .enumerate() - .map(|(i, cell)| cell.copy_advice(|| "", region, weight_cols[i], row_offset)) + .map(|(i, cell)| cell.0.copy_advice(|| "", region, weight_cols[i], row_offset)) .collect::, _>>() .unwrap(); // All columns need to be assigned if self.config.columns.len() % 2 == 0 { zero + .0 .copy_advice( || "", region, @@ -143,8 +144,8 @@ impl Gadget for DotProductChip { let e = inp .iter() .zip(weights.iter()) - .map(|(a, b)| a.value().map(|x: &F| *x) * b.value()) - .reduce(|a, b| a + b) + .map(|(a, b)| (a.0.value().map(|x: &F| *x) * b.0.value(), a.1 * b.1)) + .reduce(|a, b| (a.0 + b.0, a.1 + b.1)) .unwrap(); let res = region @@ -152,19 +153,19 @@ impl Gadget for DotProductChip { || "", self.config.columns[self.config.columns.len() - 1], row_offset, - || e, + || e.0, ) .unwrap(); - Ok(vec![res]) + Ok(vec![(res, e.1)]) } fn forward( &self, mut layouter: impl Layouter, - vec_inputs: &Vec>>, - single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error> { + vec_inputs: &Vec, F)>>, + single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result, F)>, Error> { assert_eq!(vec_inputs.len(), 2); assert_eq!(single_inputs.len(), 1); let zero = &single_inputs[0]; @@ -172,8 +173,8 @@ impl Gadget for DotProductChip { let mut inputs = vec_inputs[0].clone(); let mut weights = vec_inputs[1].clone(); while inputs.len() % self.num_inputs_per_row() != 0 { - inputs.push(&zero); - weights.push(&zero); + inputs.push(*zero); + weights.push(*zero); } let outputs = layouter @@ -197,7 +198,7 @@ impl Gadget for DotProductChip { .unwrap(); let adder_chip = AdderChip::::construct(self.config.clone()); - let tmp = outputs.iter().map(|x| x).collect::>(); + let tmp = outputs.iter().map(|x| (&x.0, x.1)).collect::>(); Ok( adder_chip .forward( diff --git a/src/gadgets/gadget.rs b/src/gadgets/gadget.rs index f827099..22edc1d 100644 --- a/src/gadgets/gadget.rs +++ b/src/gadgets/gadget.rs @@ -101,17 +101,17 @@ pub trait Gadget { &self, region: &mut Region, row_offset: usize, - vec_inputs: &Vec>>, - single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error>; + vec_inputs: &Vec, F)>>, + single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result,F)>, Error>; // The caller is required to ensure that the inputs are of the correct length. fn op_aligned_rows( &self, mut layouter: impl Layouter, - vec_inputs: &Vec>>, - single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error> { + vec_inputs: &Vec, F)>>, + single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result, F)>, Error> { // Sanity check inputs for inp in vec_inputs.iter() { assert_eq!(inp.len() % self.num_inputs_per_row(), 0); @@ -142,9 +142,9 @@ pub trait Gadget { fn forward( &self, mut layouter: impl Layouter, - vec_inputs: &Vec>>, - single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error> { + vec_inputs: &Vec, F)>>, + single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result, F)>, Error> { self.op_aligned_rows( layouter.namespace(|| format!("forward row {}", self.name())), vec_inputs, diff --git a/src/gadgets/input_lookup.rs b/src/gadgets/input_lookup.rs index 19b5444..0c2066b 100644 --- a/src/gadgets/input_lookup.rs +++ b/src/gadgets/input_lookup.rs @@ -79,9 +79,9 @@ impl Gadget for InputLookupChip { &self, _region: &mut Region, _row_offset: usize, - _vec_inputs: &Vec>>, - _single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error> { + _vec_inputs: &Vec, F)>>, + _single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result, F)>, Error> { panic!("InputLookupChip should not be called directly") } } diff --git a/src/gadgets/max.rs b/src/gadgets/max.rs index 48a62d0..57c83c0 100644 --- a/src/gadgets/max.rs +++ b/src/gadgets/max.rs @@ -1,7 +1,7 @@ use std::{marker::PhantomData, rc::Rc}; use halo2_proofs::{ - circuit::{AssignedCell, Layouter, Region}, + circuit::{AssignedCell, Layouter, Region, Value}, halo2curves::ff::PrimeField, plonk::{ConstraintSystem, Error}, poly::Rotation, @@ -101,9 +101,9 @@ impl Gadget for MaxChip { &self, region: &mut Region, row_offset: usize, - vec_inputs: &Vec>>, - _single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error> { + vec_inputs: &Vec, F)>>, + _single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result, F)>, Error> { assert_eq!(vec_inputs.len(), 1); let inp = &vec_inputs[0]; @@ -116,30 +116,30 @@ impl Gadget for MaxChip { let mut outp = vec![]; - let chunks: Vec<&[&AssignedCell]> = inp.chunks(self.num_outputs_per_row()).collect(); + let chunks: Vec<&[(&AssignedCell, F)]> = inp.chunks(self.num_outputs_per_row()).collect(); let i1 = chunks[0]; let i2 = chunks[1]; for (idx, (inp1, inp2)) in i1.iter().zip(i2.iter()).enumerate() { let offset = idx * self.num_cols_per_op(); - inp1 + inp1.0 .copy_advice(|| "", region, self.config.columns[offset + 0], row_offset) .unwrap(); - inp2 + inp2.0 .copy_advice(|| "", region, self.config.columns[offset + 1], row_offset) .unwrap(); - let max = inp1.value().zip(inp2.value()).map(|(a, b)| { - let a = convert_to_u64(&(*a + min_val_pos)); - let b = convert_to_u64(&(*b + min_val_pos)); + let max = { + let a = convert_to_u64(&(inp1.1 + min_val_pos)); + let b = convert_to_u64(&(inp2.1 + min_val_pos)); let max = a.max(b); let max = F::from(max) - min_val_pos; max - }); + }; let res = region - .assign_advice(|| "", self.config.columns[offset + 2], row_offset, || max) + .assign_advice(|| "", self.config.columns[offset + 2], row_offset, || Value::known(max)) .unwrap(); - outp.push(res); + outp.push((res, max)); } Ok(outp) @@ -148,9 +148,9 @@ impl Gadget for MaxChip { fn forward( &self, mut layouter: impl Layouter, - vec_inputs: &Vec>>, - single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error> { + vec_inputs: &Vec, F)>>, + single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result, F)>, Error> { let mut inputs = vec_inputs[0].clone(); let first = inputs[0]; @@ -168,9 +168,9 @@ impl Gadget for MaxChip { )?; for _ in 0..num_iters { while outputs.len() % self.num_inputs_per_row() != 0 { - outputs.push(first.clone()); + outputs.push((first.0.clone(), first.1)); } - let tmp = outputs.iter().map(|x| x).collect::>(); + let tmp = outputs.iter().map(|x| (&x.0, x.1)).collect::>(); outputs = self.op_aligned_rows( layouter.namespace(|| "max forward"), &vec![tmp], diff --git a/src/gadgets/mul_pairs.rs b/src/gadgets/mul_pairs.rs index 12e4c64..b53ae2b 100644 --- a/src/gadgets/mul_pairs.rs +++ b/src/gadgets/mul_pairs.rs @@ -1,7 +1,7 @@ use std::{marker::PhantomData, rc::Rc}; use halo2_proofs::{ - circuit::{AssignedCell, Layouter, Region}, + circuit::{AssignedCell, Layouter, Region, Value}, halo2curves::ff::PrimeField, plonk::{ConstraintSystem, Error}, poly::Rotation, @@ -81,9 +81,9 @@ impl Gadget for MulPairsChip { &self, region: &mut Region, row_offset: usize, - vec_inputs: &Vec>>, - _single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error> { + vec_inputs: &Vec, F)>>, + _single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result, F)>, Error> { let inp1 = &vec_inputs[0]; let inp2 = &vec_inputs[1]; assert_eq!(inp1.len(), inp2.len()); @@ -98,12 +98,12 @@ impl Gadget for MulPairsChip { let mut outps = vec![]; for i in 0..inp1.len() { let offset = i * self.num_cols_per_op(); - let inp1 = inp1[i].copy_advice(|| "", region, columns[offset + 0], row_offset)?; - let inp2 = inp2[i].copy_advice(|| "", region, columns[offset + 1], row_offset)?; - let outp = inp1.value().map(|x: &F| x.to_owned()) * inp2.value().map(|x: &F| x.to_owned()); + inp1[i].0.copy_advice(|| "", region, columns[offset + 0], row_offset)?; + inp2[i].0.copy_advice(|| "", region, columns[offset + 1], row_offset)?; + let outp = inp1[i].1 * inp2[i].1; - let outp = region.assign_advice(|| "", columns[offset + 2], row_offset, || outp)?; - outps.push(outp); + let outpc = region.assign_advice(|| "", columns[offset + 2], row_offset, || Value::known(outp))?; + outps.push((outpc, outp)); } Ok(outps) } @@ -111,17 +111,17 @@ impl Gadget for MulPairsChip { fn forward( &self, mut layouter: impl Layouter, - vec_inputs: &Vec>>, - single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error> { + vec_inputs: &Vec, F)>>, + single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result, F)>, Error> { let zero = &single_inputs[0]; let mut inp1 = vec_inputs[0].clone(); let mut inp2 = vec_inputs[1].clone(); let initial_len = inp1.len(); while inp1.len() % self.num_inputs_per_row() != 0 { - inp1.push(zero); - inp2.push(zero); + inp1.push(*zero); + inp2.push(*zero); } let vec_inputs = vec![inp1, inp2]; diff --git a/src/gadgets/nonlinear/exp.rs b/src/gadgets/nonlinear/exp.rs index 31c4c65..58c9999 100644 --- a/src/gadgets/nonlinear/exp.rs +++ b/src/gadgets/nonlinear/exp.rs @@ -80,9 +80,9 @@ impl Gadget for ExpGadgetChip { &self, region: &mut Region, row_offset: usize, - vec_inputs: &Vec>>, - single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error> { + vec_inputs: &Vec, F)>>, + single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result, F)>, Error> { NonLinearGadget::op_row_region( self, region, @@ -96,9 +96,9 @@ impl Gadget for ExpGadgetChip { fn forward( &self, layouter: impl halo2_proofs::circuit::Layouter, - vec_inputs: &Vec>>, - single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error> { + vec_inputs: &Vec, F)>>, + single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result, F)>, Error> { NonLinearGadget::forward(self, layouter, vec_inputs, single_inputs) } } diff --git a/src/gadgets/nonlinear/logistic.rs b/src/gadgets/nonlinear/logistic.rs index ed97f0e..0ef666b 100644 --- a/src/gadgets/nonlinear/logistic.rs +++ b/src/gadgets/nonlinear/logistic.rs @@ -82,9 +82,9 @@ impl Gadget for LogisticGadgetChip { &self, region: &mut Region, row_offset: usize, - vec_inputs: &Vec>>, - single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error> { + vec_inputs: &Vec, F)>>, + single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result, F)>, Error> { NonLinearGadget::op_row_region( self, region, @@ -98,9 +98,9 @@ impl Gadget for LogisticGadgetChip { fn forward( &self, layouter: impl halo2_proofs::circuit::Layouter, - vec_inputs: &Vec>>, - single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error> { + vec_inputs: &Vec, F)>>, + single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result, F)>, Error> { NonLinearGadget::forward(self, layouter, vec_inputs, single_inputs) } } diff --git a/src/gadgets/nonlinear/non_linearity.rs b/src/gadgets/nonlinear/non_linearity.rs index f7a9eff..8a6a560 100644 --- a/src/gadgets/nonlinear/non_linearity.rs +++ b/src/gadgets/nonlinear/non_linearity.rs @@ -120,10 +120,10 @@ pub trait NonLinearGadget: Gadget { &self, region: &mut Region, row_offset: usize, - vec_inputs: &Vec>>, - _single_inputs: &Vec<&AssignedCell>, + vec_inputs: &Vec, F)>>, + _single_inputs: &Vec<(&AssignedCell, F)>, gadget_config: Rc, - ) -> Result>, Error> { + ) -> Result, F)>, Error> { let columns = &gadget_config.columns; let inp = &vec_inputs[0]; let map = self.get_map(); @@ -139,9 +139,9 @@ pub trait NonLinearGadget: Gadget { let mut outps = vec![]; for i in 0..inp.len() { let offset = i * 2; - inp[i].copy_advice(|| "", region, columns[offset + 0], row_offset)?; - let outp = inp[i].value().map(|x: &F| { - let pos = convert_to_u128(&(*x + shift_val_pos)) as i128 - shift_val_pos_i64 as i128; + inp[i].0.copy_advice(|| "", region, columns[offset + 0], row_offset)?; + let outp = { + let pos = convert_to_u128(&(inp[i].1 + shift_val_pos)) as i128 - shift_val_pos_i64 as i128; let x = pos as i64 - min_val; let val = *map.get(&x).unwrap(); if x == 0 { @@ -154,11 +154,11 @@ pub trait NonLinearGadget: Gadget { F::from(val_pos as u64) - F::from(shift_val_pos_i64 as u64) } } - }); + }; - let outp = - region.assign_advice(|| "nonlinearity", columns[offset + 1], row_offset, || outp)?; - outps.push(outp); + let outpc = + region.assign_advice(|| "nonlinearity", columns[offset + 1], row_offset, || Value::known(outp))?; + outps.push((outpc, outp)); } Ok(outps) @@ -167,15 +167,15 @@ pub trait NonLinearGadget: Gadget { fn forward( &self, mut layouter: impl Layouter, - vec_inputs: &Vec>>, - single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error> { + vec_inputs: &Vec, F)>>, + single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result, F)>, Error> { let zero = &single_inputs[0]; let inp_len = vec_inputs[0].len(); let mut inp = vec_inputs[0].clone(); while inp.len() % self.num_inputs_per_row() != 0 { - inp.push(zero); + inp.push(*zero); } let vec_inputs = vec![inp]; diff --git a/src/gadgets/nonlinear/pow.rs b/src/gadgets/nonlinear/pow.rs index c628ee6..999c9f7 100644 --- a/src/gadgets/nonlinear/pow.rs +++ b/src/gadgets/nonlinear/pow.rs @@ -81,9 +81,9 @@ impl Gadget for PowGadgetChip { &self, region: &mut Region, row_offset: usize, - vec_inputs: &Vec>>, - single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error> { + vec_inputs: &Vec, F)>>, + single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result, F)>, Error> { NonLinearGadget::op_row_region( self, region, @@ -97,9 +97,9 @@ impl Gadget for PowGadgetChip { fn forward( &self, layouter: impl halo2_proofs::circuit::Layouter, - vec_inputs: &Vec>>, - single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error> { + vec_inputs: &Vec, F)>>, + single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result, F)>, Error> { NonLinearGadget::forward(self, layouter, vec_inputs, single_inputs) } } diff --git a/src/gadgets/nonlinear/relu.rs b/src/gadgets/nonlinear/relu.rs index d54ca8b..a60afaf 100644 --- a/src/gadgets/nonlinear/relu.rs +++ b/src/gadgets/nonlinear/relu.rs @@ -76,9 +76,9 @@ impl Gadget for ReluChip { &self, region: &mut Region, row_offset: usize, - vec_inputs: &Vec>>, - single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error> { + vec_inputs: &Vec, F)>>, + single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result, F)>, Error> { NonLinearGadget::op_row_region( self, region, @@ -92,9 +92,9 @@ impl Gadget for ReluChip { fn forward( &self, layouter: impl halo2_proofs::circuit::Layouter, - vec_inputs: &Vec>>, - single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error> { + vec_inputs: &Vec, F)>>, + single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result, F)>, Error> { NonLinearGadget::forward(self, layouter, vec_inputs, single_inputs) } } diff --git a/src/gadgets/nonlinear/rsqrt.rs b/src/gadgets/nonlinear/rsqrt.rs index eaea6ba..49716f2 100644 --- a/src/gadgets/nonlinear/rsqrt.rs +++ b/src/gadgets/nonlinear/rsqrt.rs @@ -78,9 +78,9 @@ impl Gadget for RsqrtGadgetChip { &self, region: &mut Region, row_offset: usize, - vec_inputs: &Vec>>, - single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error> { + vec_inputs: &Vec, F)>>, + single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result, F)>, Error> { NonLinearGadget::op_row_region( self, region, @@ -94,9 +94,9 @@ impl Gadget for RsqrtGadgetChip { fn forward( &self, layouter: impl halo2_proofs::circuit::Layouter, - vec_inputs: &Vec>>, - single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error> { + vec_inputs: &Vec, F)>>, + single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result, F)>, Error> { NonLinearGadget::forward(self, layouter, vec_inputs, single_inputs) } } diff --git a/src/gadgets/nonlinear/sqrt.rs b/src/gadgets/nonlinear/sqrt.rs index 7faeecb..819c87a 100644 --- a/src/gadgets/nonlinear/sqrt.rs +++ b/src/gadgets/nonlinear/sqrt.rs @@ -77,9 +77,9 @@ impl Gadget for SqrtGadgetChip { &self, region: &mut Region, row_offset: usize, - vec_inputs: &Vec>>, - single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error> { + vec_inputs: &Vec, F)>>, + single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result, F)>, Error> { NonLinearGadget::op_row_region( self, region, @@ -93,9 +93,9 @@ impl Gadget for SqrtGadgetChip { fn forward( &self, layouter: impl halo2_proofs::circuit::Layouter, - vec_inputs: &Vec>>, - single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error> { + vec_inputs: &Vec, F)>>, + single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result, F)>, Error> { NonLinearGadget::forward(self, layouter, vec_inputs, single_inputs) } } diff --git a/src/gadgets/nonlinear/tanh.rs b/src/gadgets/nonlinear/tanh.rs index 1afa94b..e387eec 100644 --- a/src/gadgets/nonlinear/tanh.rs +++ b/src/gadgets/nonlinear/tanh.rs @@ -80,9 +80,9 @@ impl Gadget for TanhGadgetChip { &self, region: &mut Region, row_offset: usize, - vec_inputs: &Vec>>, - single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error> { + vec_inputs: &Vec, F)>>, + single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result, F)>, Error> { NonLinearGadget::op_row_region( self, region, @@ -96,9 +96,9 @@ impl Gadget for TanhGadgetChip { fn forward( &self, layouter: impl halo2_proofs::circuit::Layouter, - vec_inputs: &Vec>>, - single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error> { + vec_inputs: &Vec, F)>>, + single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result, F)>, Error> { NonLinearGadget::forward(self, layouter, vec_inputs, single_inputs) } } diff --git a/src/gadgets/sqrt_big.rs b/src/gadgets/sqrt_big.rs index a44e3ed..2cd97dd 100644 --- a/src/gadgets/sqrt_big.rs +++ b/src/gadgets/sqrt_big.rs @@ -1,7 +1,7 @@ use std::{marker::PhantomData, rc::Rc}; use halo2_proofs::{ - circuit::{AssignedCell, Layouter, Region}, + circuit::{AssignedCell, Layouter, Region, Value}, halo2curves::ff::PrimeField, plonk::{ConstraintSystem, Error, Expression}, poly::Rotation, @@ -116,9 +116,9 @@ impl Gadget for SqrtBigChip { &self, region: &mut Region, row_offset: usize, - vec_inputs: &Vec>>, - _single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error> { + vec_inputs: &Vec, F)>>, + _single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result, F)>, Error> { let inps = &vec_inputs[0]; if self.config.use_selectors { @@ -129,26 +129,26 @@ impl Gadget for SqrtBigChip { let mut outp_cells = vec![]; for (i, inp) in inps.iter().enumerate() { let offset = i * self.num_cols_per_op(); - inp.copy_advice( + inp.0.copy_advice( || "sqrt_big", region, self.config.columns[offset], row_offset, )?; - let outp = inp.value().map(|x: &F| { - let inp_val = convert_to_u64(x) as i64; + let outp = { + let inp_val = convert_to_u64(&inp.1) as i64; let fsqrt = (inp_val as f64).sqrt(); let sqrt = fsqrt.round() as i64; let rem = inp_val - sqrt * sqrt; (sqrt, rem) - }); + }; let sqrt_cell = region.assign_advice( || "sqrt_big", self.config.columns[offset + 1], row_offset, - || outp.map(|x| F::from(x.0 as u64)), + || Value::known(F::from(outp.0 as u64)), )?; let _rem_cell = region.assign_advice( @@ -156,13 +156,14 @@ impl Gadget for SqrtBigChip { self.config.columns[offset + 2], row_offset, || { - outp.map(|x| { - let rem_pos = x.1 + x.0; - F::from(rem_pos as u64) - F::from(x.0 as u64) - }) + let tmp = { + let rem_pos = outp.1 + outp.0; + F::from(rem_pos as u64) - F::from(outp.0 as u64) + }; + Value::known(tmp) }, )?; - outp_cells.push(sqrt_cell); + outp_cells.push((sqrt_cell, F::from(outp.0 as u64))); } Ok(outp_cells) @@ -171,15 +172,15 @@ impl Gadget for SqrtBigChip { fn forward( &self, mut layouter: impl Layouter, - vec_inputs: &Vec>>, - single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error> { + vec_inputs: &Vec, F)>>, + single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result, F)>, Error> { let zero = &single_inputs[0]; let mut inp = vec_inputs[0].clone(); let inp_len = inp.len(); while inp.len() % self.num_inputs_per_row() != 0 { - inp.push(zero); + inp.push(*zero); } let vec_inputs = vec![inp]; diff --git a/src/gadgets/square.rs b/src/gadgets/square.rs index 9f13742..0e49b03 100644 --- a/src/gadgets/square.rs +++ b/src/gadgets/square.rs @@ -1,7 +1,7 @@ use std::{marker::PhantomData, rc::Rc}; use halo2_proofs::{ - circuit::{AssignedCell, Region}, + circuit::{AssignedCell, Region, Value}, halo2curves::ff::PrimeField, plonk::{ConstraintSystem, Error}, poly::Rotation, @@ -69,9 +69,9 @@ impl Gadget for SquareGadgetChip { &self, region: &mut Region, row_offset: usize, - vec_inputs: &Vec>>, - _single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error> { + vec_inputs: &Vec, F)>>, + _single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result, F)>, Error> { assert_eq!(vec_inputs.len(), 1); if self.config.use_selectors { @@ -83,15 +83,15 @@ impl Gadget for SquareGadgetChip { let mut outp = vec![]; for (i, inp) in inps.iter().enumerate() { let offset = i * self.num_cols_per_op(); - inp.copy_advice(|| "", region, self.config.columns[offset], row_offset)?; - let outp_val = inp.value().map(|x: &F| x.to_owned() * x.to_owned()); + inp.0.copy_advice(|| "", region, self.config.columns[offset], row_offset)?; + let outp_val = inp.1 * inp.1; let outp_cell = region.assign_advice( || "square output", self.config.columns[offset + 1], row_offset, - || outp_val, + || Value::known(outp_val), )?; - outp.push(outp_cell); + outp.push((outp_cell, outp_val)); } Ok(outp) @@ -100,15 +100,15 @@ impl Gadget for SquareGadgetChip { fn forward( &self, mut layouter: impl halo2_proofs::circuit::Layouter, - vec_inputs: &Vec>>, - single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error> { + vec_inputs: &Vec, F)>>, + single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result, F)>, Error> { let zero = &single_inputs[0]; let mut inp = vec_inputs[0].clone(); let initial_len = inp.len(); while inp.len() % self.num_inputs_per_row() != 0 { - inp.push(zero); + inp.push(*zero); } let vec_inputs = vec![inp]; diff --git a/src/gadgets/squared_diff.rs b/src/gadgets/squared_diff.rs index 825542c..59d48ae 100644 --- a/src/gadgets/squared_diff.rs +++ b/src/gadgets/squared_diff.rs @@ -1,7 +1,7 @@ use std::{marker::PhantomData, rc::Rc}; use halo2_proofs::{ - circuit::{AssignedCell, Layouter, Region}, + circuit::{AssignedCell, Layouter, Region, Value}, halo2curves::ff::PrimeField, plonk::{ConstraintSystem, Error}, poly::Rotation, @@ -80,9 +80,9 @@ impl Gadget for SquaredDiffGadgetChip { &self, region: &mut Region, row_offset: usize, - vec_inputs: &Vec>>, - _single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error> { + vec_inputs: &Vec, F)>>, + _single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result, F)>, Error> { let inp1 = &vec_inputs[0]; let inp2 = &vec_inputs[1]; assert_eq!(inp1.len(), inp2.len()); @@ -97,13 +97,13 @@ impl Gadget for SquaredDiffGadgetChip { let mut outps = vec![]; for i in 0..inp1.len() { let offset = i * self.num_cols_per_op(); - let inp1 = inp1[i].copy_advice(|| "", region, columns[offset + 0], row_offset)?; - let inp2 = inp2[i].copy_advice(|| "", region, columns[offset + 1], row_offset)?; - let outp = inp1.value().map(|x: &F| x.to_owned()) - inp2.value().map(|x: &F| x.to_owned()); + inp1[i].0.copy_advice(|| "", region, columns[offset + 0], row_offset)?; + inp2[i].0.copy_advice(|| "", region, columns[offset + 1], row_offset)?; + let outp = inp1[i].1 - inp2[i].1; let outp = outp * outp; - let outp = region.assign_advice(|| "", columns[offset + 2], row_offset, || outp)?; - outps.push(outp); + let outpc = region.assign_advice(|| "", columns[offset + 2], row_offset, || Value::known(outp))?; + outps.push((outpc, outp)); } Ok(outps) } @@ -111,17 +111,17 @@ impl Gadget for SquaredDiffGadgetChip { fn forward( &self, mut layouter: impl Layouter, - vec_inputs: &Vec>>, - single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error> { + vec_inputs: &Vec, F)>>, + single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result, F)>, Error> { let zero = &single_inputs[0]; let mut inp1 = vec_inputs[0].clone(); let mut inp2 = vec_inputs[1].clone(); let initial_len = inp1.len(); while inp1.len() % self.num_inputs_per_row() != 0 { - inp1.push(zero); - inp2.push(zero); + inp1.push(*zero); + inp2.push(*zero); } let vec_inputs = vec![inp1, inp2]; diff --git a/src/gadgets/sub_pairs.rs b/src/gadgets/sub_pairs.rs index 2b3aaca..50affa3 100644 --- a/src/gadgets/sub_pairs.rs +++ b/src/gadgets/sub_pairs.rs @@ -1,7 +1,7 @@ use std::{marker::PhantomData, rc::Rc}; use halo2_proofs::{ - circuit::{AssignedCell, Layouter, Region}, + circuit::{AssignedCell, Layouter, Region, Value}, halo2curves::ff::PrimeField, plonk::{ConstraintSystem, Error}, poly::Rotation, @@ -80,9 +80,9 @@ impl Gadget for SubPairsChip { &self, region: &mut Region, row_offset: usize, - vec_inputs: &Vec>>, - _single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error> { + vec_inputs: &Vec, F)>>, + _single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result, F)>, Error> { let inp1 = &vec_inputs[0]; let inp2 = &vec_inputs[1]; assert_eq!(inp1.len(), inp2.len()); @@ -97,12 +97,12 @@ impl Gadget for SubPairsChip { let mut outps = vec![]; for i in 0..inp1.len() { let offset = i * self.num_cols_per_op(); - let inp1 = inp1[i].copy_advice(|| "", region, columns[offset + 0], row_offset)?; - let inp2 = inp2[i].copy_advice(|| "", region, columns[offset + 1], row_offset)?; - let outp = inp1.value().map(|x: &F| x.to_owned()) - inp2.value().map(|x: &F| x.to_owned()); + inp1[i].0.copy_advice(|| "", region, columns[offset + 0], row_offset)?; + inp2[i].0.copy_advice(|| "", region, columns[offset + 1], row_offset)?; + let outp = inp1[i].1 - inp2[i].1; - let outp = region.assign_advice(|| "", columns[offset + 2], row_offset, || outp)?; - outps.push(outp); + let outpc = region.assign_advice(|| "", columns[offset + 2], row_offset, || Value::known(outp))?; + outps.push((outpc, outp)); } Ok(outps) } @@ -110,17 +110,17 @@ impl Gadget for SubPairsChip { fn forward( &self, mut layouter: impl Layouter, - vec_inputs: &Vec>>, - single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error> { + vec_inputs: &Vec, F)>>, + single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result, F)>, Error> { let zero = &single_inputs[0]; let mut inp1 = vec_inputs[0].clone(); let mut inp2 = vec_inputs[1].clone(); let initial_len = inp1.len(); while inp1.len() % self.num_inputs_per_row() != 0 { - inp1.push(zero); - inp2.push(zero); + inp1.push(*zero); + inp2.push(*zero); } let vec_inputs = vec![inp1, inp2]; diff --git a/src/gadgets/update.rs b/src/gadgets/update.rs index 06338a1..8999180 100644 --- a/src/gadgets/update.rs +++ b/src/gadgets/update.rs @@ -1,7 +1,7 @@ use std::marker::PhantomData; use halo2_proofs::{ - circuit::{AssignedCell, Layouter, Region}, + circuit::{AssignedCell, Layouter, Region, Value}, halo2curves::ff::PrimeField, plonk::{ConstraintSystem, Error, Expression}, poly::Rotation, @@ -106,9 +106,9 @@ impl Gadget for UpdateGadgetChip { &self, region: &mut Region, row_offset: usize, - vec_inputs: &Vec>>, - _single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error> { + vec_inputs: &Vec, F)>>, + _single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result, F)>, Error> { let div_val = self.config.scale_factor as i64; let div_val_f = F::from(div_val as u64); let eta = div_val / 1000; @@ -132,15 +132,15 @@ impl Gadget for UpdateGadgetChip { for i in 0..w.len() { let offset = i * self.num_cols_per_op(); - let _w_cell = w[i].copy_advice(|| "", region, columns[offset + 0], row_offset)?; - let _dw_cell = dw[i].copy_advice(|| "", region, columns[offset + 1], row_offset)?; + let _w_cell = w[i].0.copy_advice(|| "", region, columns[offset + 0], row_offset)?; + let _dw_cell = dw[i].0.copy_advice(|| "", region, columns[offset + 1], row_offset)?; - let w_val = w[i].value().map(|x: &F| x.to_owned()); - let dw_val = dw[i].value().map(|x: &F| x.to_owned()); - let out_scaled = w_val.zip(dw_val).map(|(w, dw)| w * div_val_f - dw * eta); + let w_val = w[i].1; + let dw_val = dw[i].1; + let out_scaled = w_val * div_val_f - dw_val * eta; - let div_mod = out_scaled.map(|x| { - let x_pos = x + div_inp_min_val_pos; + let div_mod = { + let x_pos = out_scaled + div_inp_min_val_pos; let x_pos = if x_pos > F::ZERO { x_pos } else { @@ -151,7 +151,7 @@ impl Gadget for UpdateGadgetChip { let div_res = inp as i64 / div_val - (div_inp_min_val_pos_i64 as i64 / div_val); let mod_res = inp as i64 % div_val; (div_res, mod_res) - }); + }; let div_res_cell = region .assign_advice( @@ -159,9 +159,9 @@ impl Gadget for UpdateGadgetChip { self.config.columns[offset + 2], row_offset, || { - div_mod.map(|(x, _): (i64, i64)| { - F::from((x - div_outp_min_val as i64) as u64) - F::from(-div_outp_min_val as u64) - }) + Value::known( + F::from((div_mod.0 - div_outp_min_val as i64) as u64) - F::from(-div_outp_min_val as u64) + ) }, ) .unwrap(); @@ -171,11 +171,14 @@ impl Gadget for UpdateGadgetChip { || "mod_res", self.config.columns[offset + 3], row_offset, - || div_mod.map(|(_, x): (i64, i64)| F::from(x as u64)), + || Value::known(F::from(div_mod.1 as u64)), ) .unwrap(); - output_cells.push(div_res_cell); + output_cells.push(( + div_res_cell, + F::from((div_mod.0 - div_outp_min_val as i64) as u64) - F::from(-div_outp_min_val as u64) + )); } Ok(output_cells) } @@ -183,19 +186,19 @@ impl Gadget for UpdateGadgetChip { fn forward( &self, mut layouter: impl Layouter, - vec_inputs: &Vec>>, - single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error> { + vec_inputs: &Vec, F)>>, + single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result, F)>, Error> { let zero = &single_inputs[0]; let mut w = vec_inputs[0].clone(); let mut dw = vec_inputs[1].clone(); let initial_len = w.len(); while !w.len() % self.num_cols_per_op() == 0 { - w.push(zero); + w.push(*zero); } while !dw.len() % self.num_cols_per_op() == 0 { - dw.push(zero); + dw.push(*zero); } let res = self.op_aligned_rows( diff --git a/src/gadgets/var_div.rs b/src/gadgets/var_div.rs index b58fbfa..18d7e0f 100644 --- a/src/gadgets/var_div.rs +++ b/src/gadgets/var_div.rs @@ -1,7 +1,7 @@ use std::{marker::PhantomData, rc::Rc}; use halo2_proofs::{ - circuit::{AssignedCell, Layouter, Region}, + circuit::{AssignedCell, Layouter, Region, Value}, halo2curves::ff::PrimeField, plonk::{ConstraintSystem, Error, Expression}, poly::Rotation, @@ -119,9 +119,9 @@ impl Gadget for VarDivRoundChip { &self, region: &mut Region, row_offset: usize, - vec_inputs: &Vec>>, - single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error> { + vec_inputs: &Vec, F)>>, + single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result, F)>, Error> { let a_vec = &vec_inputs[0]; // let zero = single_inputs[0].clone(); let b = &single_inputs[1]; @@ -134,7 +134,7 @@ impl Gadget for VarDivRoundChip { selector.enable(region, row_offset)?; } - b.copy_advice( + b.0.copy_advice( || "", region, self.config.columns[self.config.columns.len() - 1], @@ -144,15 +144,15 @@ impl Gadget for VarDivRoundChip { let mut div_out = vec![]; for (i, a) in a_vec.iter().enumerate() { let offset = i * self.num_cols_per_op(); - a.copy_advice(|| "", region, self.config.columns[offset], row_offset)?; + a.0.copy_advice(|| "", region, self.config.columns[offset], row_offset)?; - let div_mod = a.value().zip(b.value()).map(|(a, b)| { - let b = convert_to_u128(b); + let div_mod = { + let b = convert_to_u128(&b.1); // Needs to be divisible by b let div_inp_min_val_pos_i64 = div_inp_min_val_pos_i64 / (b as i64) * (b as i64); let div_inp_min_val_pos = F::from(div_inp_min_val_pos_i64 as u64); - let a_pos = *a + div_inp_min_val_pos; + let a_pos = a.1 + div_inp_min_val_pos; let a = convert_to_u128(&a_pos); // c = (2 * a + b) / (2 * b) let c_pos = a.rounded_div(b); @@ -163,27 +163,26 @@ impl Gadget for VarDivRoundChip { let r = 2 * rem_floor + (b as i128); let r = r as i64; (c, r) - }); + }; + let div_val = { + let offset = F::from(-div_outp_min_val_i64 as u64); + let c = F::from((div_mod.0 - div_outp_min_val_i64) as u64); + c - offset + }; let div_cell = region.assign_advice( || "", self.config.columns[offset + 1], row_offset, - || { - div_mod.map(|(c, _)| { - let offset = F::from(-div_outp_min_val_i64 as u64); - let c = F::from((c - div_outp_min_val_i64) as u64); - c - offset - }) - }, + || Value::known(div_val) )?; let _mod_cell = region.assign_advice( || "", self.config.columns[offset + 2], row_offset, - || div_mod.map(|(_, r)| F::from(r as u64)), + || Value::known(F::from(div_mod.1 as u64)), )?; - div_out.push(div_cell); + div_out.push((div_cell, div_val)); } Ok(div_out) @@ -192,16 +191,16 @@ impl Gadget for VarDivRoundChip { fn forward( &self, mut layouter: impl Layouter, - vec_inputs: &Vec>>, - single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error> { + vec_inputs: &Vec, F)>>, + single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result, F)>, Error> { let mut inps = vec_inputs[0].clone(); let initial_len = inps.len(); // Needed to pad: bias - bias = 0 let default = &single_inputs[0]; while inps.len() % self.num_inputs_per_row() != 0 { - inps.push(&default); + inps.push(*default); } let res = self.op_aligned_rows(layouter.namespace(|| "var_div"), &vec![inps], single_inputs)?; diff --git a/src/gadgets/var_div_big.rs b/src/gadgets/var_div_big.rs index d1ea412..ca41747 100644 --- a/src/gadgets/var_div_big.rs +++ b/src/gadgets/var_div_big.rs @@ -1,7 +1,7 @@ use std::{marker::PhantomData, rc::Rc}; use halo2_proofs::{ - circuit::{AssignedCell, Layouter, Region}, + circuit::{AssignedCell, Layouter, Region, Value}, halo2curves::ff::PrimeField, plonk::{ConstraintSystem, Error, Expression}, poly::Rotation, @@ -134,9 +134,9 @@ impl Gadget for VarDivRoundBigChip { &self, region: &mut Region, row_offset: usize, - vec_inputs: &Vec>>, - single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error> { + vec_inputs: &Vec, F)>>, + single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result, F)>, Error> { let a_vec = &vec_inputs[0]; // let zero = single_inputs[0].clone(); let b = &single_inputs[1]; @@ -154,7 +154,7 @@ impl Gadget for VarDivRoundBigChip { selector.enable(region, row_offset)?; } - b.copy_advice( + b.0.copy_advice( || "", region, self.config.columns[self.config.columns.len() - 1], @@ -164,16 +164,16 @@ impl Gadget for VarDivRoundBigChip { let mut div_out = vec![]; for (i, a) in a_vec.iter().enumerate() { let offset = i * self.num_cols_per_op(); - a.copy_advice(|| "", region, self.config.columns[offset], row_offset) + a.0.copy_advice(|| "", region, self.config.columns[offset], row_offset) .unwrap(); - let div_mod = a.value().zip(b.value()).map(|(a, b)| { - let b = convert_to_u128(b); + let div_mod = { + let b = convert_to_u128(&b.1); // Needs to be divisible by b let div_inp_min_val_pos_i64 = div_inp_min_val_pos_i64 / (b as i64) * (b as i64); let div_inp_min_val_pos = F::from(div_inp_min_val_pos_i64 as u64); - let a_pos = *a + div_inp_min_val_pos; + let a_pos = a.1 + div_inp_min_val_pos; let a = convert_to_u128(&a_pos); // c = (2 * a + b) / (2 * b) let c_pos = a.rounded_div(b); @@ -184,70 +184,71 @@ impl Gadget for VarDivRoundBigChip { let r = 2 * rem_floor + (b as i128); let r = r as i64; (c, r) - }); + }; - let br_split = div_mod.zip(b.value()).map(|((_, r), b)| { - let b = convert_to_u128(b) as i64; - let val = 2 * b - r; + let br_split = { + let b = convert_to_u128(&b.1) as i64; + let val = 2 * b - div_mod.1; let p1 = val / num_rows; let p0 = val % num_rows; // val = p1 * max_val + p0 (p1, p0) - }); + }; - let r_split = div_mod.map(|(_, r)| { - let p1 = r / num_rows; - let p0 = r % num_rows; + let r_split = { + let p1 = div_mod.1 / num_rows; + let p0 = div_mod.1 % num_rows; // val = p1 * max_val + p0 (p1, p0) - }); + }; + + // TOCHECK + let div_val = { + let offset = F::from(-div_outp_min_val_i64 as u64); + let c = F::from((div_mod.0 - div_outp_min_val_i64) as u64); + c - offset + }; let div_cell = region.assign_advice( || "", self.config.columns[offset + 1], row_offset, - || { - div_mod.map(|(c, _)| { - let offset = F::from(-div_outp_min_val_i64 as u64); - let c = F::from((c - div_outp_min_val_i64) as u64); - c - offset - }) - }, + || Value::known(div_val) )?; let _mod_cell = region.assign_advice( || "", self.config.columns[offset + 2], row_offset, - || div_mod.map(|(_, r)| F::from(r as u64)), + || Value::known(F::from(div_mod.1 as u64)), )?; // Assign 2 * b - r to the next 2 columns let _br_split_cell_1 = region.assign_advice( || "", self.config.columns[offset + 3], row_offset, - || br_split.map(|(p1, _)| F::from(p1 as u64)), + || Value::known(F::from(br_split.0 as u64)), )?; let _br_split_cell_2 = region.assign_advice( || "", self.config.columns[offset + 4], row_offset, - || br_split.map(|(_, p0)| F::from(p0 as u64)), + || Value::known(F::from(br_split.1 as u64)), )?; // Assign r to the next 2 columns let _r_split_cell_1 = region.assign_advice( || "", self.config.columns[offset + 5], row_offset, - || r_split.map(|(p1, _)| F::from(p1 as u64)), + || Value::known(F::from(r_split.0 as u64)), )?; let _r_split_cell_2 = region.assign_advice( || "", self.config.columns[offset + 6], row_offset, - || r_split.map(|(_, p0)| F::from(p0 as u64)), + || Value::known(F::from(r_split.1 as u64)), )?; - div_out.push(div_cell); + div_out.push((div_cell, div_val)); } Ok(div_out) @@ -256,16 +257,16 @@ impl Gadget for VarDivRoundBigChip { fn forward( &self, mut layouter: impl Layouter, - vec_inputs: &Vec>>, - single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error> { + vec_inputs: &Vec, F)>>, + single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result, F)>, Error> { let mut inps = vec_inputs[0].clone(); let initial_len = inps.len(); // Needed to pad let default = &single_inputs[0]; while inps.len() % self.num_inputs_per_row() != 0 { - inps.push(&default); + inps.push(*default); } let res = self.op_aligned_rows( diff --git a/src/gadgets/var_div_big3.rs b/src/gadgets/var_div_big3.rs index 90c6651..38b7f53 100644 --- a/src/gadgets/var_div_big3.rs +++ b/src/gadgets/var_div_big3.rs @@ -1,7 +1,7 @@ use std::{marker::PhantomData, rc::Rc}; use halo2_proofs::{ - circuit::{AssignedCell, Layouter, Region}, + circuit::{AssignedCell, Layouter, Region, Value}, halo2curves::ff::PrimeField, plonk::{ConstraintSystem, Error, Expression}, poly::Rotation, @@ -147,9 +147,9 @@ impl Gadget for VarDivRoundBig3Chip { &self, region: &mut Region, row_offset: usize, - vec_inputs: &Vec>>, - single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error> { + vec_inputs: &Vec, F)>>, + single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result, F)>, Error> { let a_vec = &vec_inputs[0]; // let zero = single_inputs[0].clone(); let b = &single_inputs[1]; @@ -166,7 +166,7 @@ impl Gadget for VarDivRoundBig3Chip { selector.enable(region, row_offset)?; } - b.copy_advice( + b.0.copy_advice( || "", region, self.config.columns[self.config.columns.len() - 1], @@ -176,15 +176,15 @@ impl Gadget for VarDivRoundBig3Chip { let mut div_out = vec![]; for (i, a) in a_vec.iter().enumerate() { let offset = i * self.num_cols_per_op(); - a.copy_advice(|| "", region, self.config.columns[offset], row_offset) + a.0.copy_advice(|| "", region, self.config.columns[offset], row_offset) .unwrap(); - let div_mod = a.value().zip(b.value()).map(|(a, b)| { - let b = convert_to_u128(b); + let div_mod = { + let b = convert_to_u128(&b.1); let c_shift = (-c_shift_base) as u128 / b * b; let div_inp_min_val_pos = F::from(c_shift as u64); - let a_pos = *a + div_inp_min_val_pos; + let a_pos = a.1 + div_inp_min_val_pos; let a = convert_to_u128(&a_pos); // c = (2 * a + b) / (2 * b) let c_pos = a.rounded_div(b); @@ -194,84 +194,84 @@ impl Gadget for VarDivRoundBig3Chip { let rem_floor = (a as i128) - (c_pos * b) as i128; let r = 2 * rem_floor + (b as i128); (c, r) - }); + }; - let br_split = div_mod.zip(b.value()).map(|((_, r), b)| { - let b = convert_to_u128(b) as i128; - let val = 2 * b - r; + let br_split = { + let b = convert_to_u128(&b.1) as i128; + let val = 2 * b - div_mod.1; let p2 = val / (num_rows * num_rows); let p1 = (val % (num_rows * num_rows)) / num_rows; let p0 = val % num_rows; // val = p2 * max_val^2 + p1 * max_val + p0 (p2, p1, p0) - }); + }; - let r_split = div_mod.map(|(_, r)| { - let p2 = r / (num_rows * num_rows); - let p1 = (r % (num_rows * num_rows)) / num_rows; - let p0 = r % num_rows; + let r_split = { + let p2 = div_mod.1 / (num_rows * num_rows); + let p1 = (div_mod.1 % (num_rows * num_rows)) / num_rows; + let p0 = div_mod.1 % num_rows; // val = p1 * max_val + p0 (p2, p1, p0) - }); + }; + let div_val = { + let offset = F::from(-c_shift_base as u64); + let c = F::from((div_mod.0 - c_shift_base) as u64); + c - offset + }; let div_cell = region.assign_advice( || "", self.config.columns[offset + 1], row_offset, - || { - div_mod.map(|(c, _)| { - let offset = F::from(-c_shift_base as u64); - let c = F::from((c - c_shift_base) as u64); - c - offset - }) - }, + || Value::known(div_val), )?; + let _mod_cell = region.assign_advice( || "", self.config.columns[offset + 2], row_offset, - || div_mod.map(|(_, r)| F::from(r as u64)), + || Value::known(F::from(div_mod.1 as u64)), )?; // Assign 2 * b - r to the next 3 columns let _br_split_cell_2 = region.assign_advice( || "", self.config.columns[offset + 3], row_offset, - || br_split.map(|(p2, _, _)| F::from(p2 as u64)), + || Value::known(F::from(br_split.0 as u64)), )?; let _br_split_cell_1 = region.assign_advice( || "", self.config.columns[offset + 4], row_offset, - || br_split.map(|(_, p1, _)| F::from(p1 as u64)), + || Value::known(F::from(br_split.1 as u64)), )?; let _br_split_cell_0 = region.assign_advice( || "", self.config.columns[offset + 5], row_offset, - || br_split.map(|(_, _, p0)| F::from(p0 as u64)), + || Value::known(F::from(br_split.2 as u64)), )?; // Assign r to the next 3 columns let _r_split_cell_2 = region.assign_advice( || "", self.config.columns[offset + 6], row_offset, - || r_split.map(|(p2, _, _)| F::from(p2 as u64)), + || Value::known(F::from(r_split.0 as u64)), )?; let _r_split_cell_1 = region.assign_advice( || "", self.config.columns[offset + 7], row_offset, - || r_split.map(|(_, p1, _)| F::from(p1 as u64)), + || Value::known(F::from(r_split.1 as u64)), )?; let _r_split_cell_0 = region.assign_advice( || "", self.config.columns[offset + 8], row_offset, - || r_split.map(|(_, _, p0)| F::from(p0 as u64)), + || Value::known(F::from(r_split.2 as u64)), )?; - div_out.push(div_cell); + div_out.push((div_cell, div_val)); } Ok(div_out) @@ -280,16 +280,16 @@ impl Gadget for VarDivRoundBig3Chip { fn forward( &self, mut layouter: impl Layouter, - vec_inputs: &Vec>>, - single_inputs: &Vec<&AssignedCell>, - ) -> Result>, Error> { + vec_inputs: &Vec, F)>>, + single_inputs: &Vec<(&AssignedCell, F)>, + ) -> Result, F)>, Error> { let mut inps = vec_inputs[0].clone(); let initial_len = inps.len(); // Needed to pad let default = &single_inputs[0]; while inps.len() % self.num_inputs_per_row() != 0 { - inps.push(&default); + inps.push(*default); } let res = self.op_aligned_rows( diff --git a/src/layers/arithmetic.rs b/src/layers/arithmetic.rs index 61073af..f4dca50 100644 --- a/src/layers/arithmetic.rs +++ b/src/layers/arithmetic.rs @@ -19,10 +19,10 @@ pub trait Arithmetic { fn gadget_forward( &self, layouter: impl Layouter, - vec_inputs: &Vec>>, - constants: &Vec<&AssignedCell>, + vec_inputs: &Vec, F)>>, + constants: &Vec<(&AssignedCell, F)>, gadget_config: Rc, - ) -> Result>, Error>; + ) -> Result, F)>, Error>; fn arithmetic_forward( &self, @@ -30,7 +30,7 @@ pub trait Arithmetic { tensors: &Vec>, constants: &HashMap>, gadget_config: Rc, - ) -> Result<(Vec>, Vec), Error> { + ) -> Result<(Vec<(CellRc, F)>, Vec), Error> { assert_eq!(tensors.len(), 2); // println!("tensors: {:?} {:?}", tensors[0].shape(), tensors[1].shape()); let (inp1, inp2) = broadcast(&tensors[0], &tensors[1]); @@ -39,17 +39,17 @@ pub trait Arithmetic { let zero = constants.get(&0).unwrap().as_ref(); - let inp1_vec = inp1.iter().map(|x| x.as_ref()).collect::>(); - let inp2_vec = inp2.iter().map(|x| x.as_ref()).collect::>(); + let inp1_vec = inp1.iter().map(|x| (x.0.as_ref(), x.1)).collect::>(); + let inp2_vec = inp2.iter().map(|x| (x.0.as_ref(), x.1)).collect::>(); let vec_inputs = vec![inp1_vec, inp2_vec]; - let constants = vec![zero]; + let constants = vec![(zero, F::ZERO)]; let out = self.gadget_forward( layouter.namespace(|| ""), &vec_inputs, &constants, gadget_config.clone(), )?; - let out = out.into_iter().map(|x| Rc::new(x)).collect::>(); + let out = out.into_iter().map(|x| (Rc::new(x.0), x.1)).collect::>(); Ok((out, out_shape.to_vec())) } } diff --git a/src/layers/arithmetic/add.rs b/src/layers/arithmetic/add.rs index 565e1fc..e269ded 100644 --- a/src/layers/arithmetic/add.rs +++ b/src/layers/arithmetic/add.rs @@ -39,10 +39,10 @@ impl Arithmetic for AddChip { fn gadget_forward( &self, mut layouter: impl Layouter, - vec_inputs: &Vec>>, - constants: &Vec<&AssignedCell>, + vec_inputs: &Vec, F)>>, + constants: &Vec<(&AssignedCell, F)>, gadget_config: Rc, - ) -> Result>, Error> { + ) -> Result, F)>, Error> { let add_pairs_chip = AddPairsChip::::construct(gadget_config); let out = add_pairs_chip.forward(layouter.namespace(|| "add chip"), &vec_inputs, constants)?; Ok(out) @@ -72,13 +72,13 @@ impl Layer for AddChip { // Do the fused activation let out = if activation == ActivationType::Relu { let zero = constants.get(&0).unwrap(); - let single_inps = vec![zero.as_ref()]; + let single_inps = vec![(zero.as_ref(), F::ZERO)]; - let out = out.iter().map(|x| x.as_ref()).collect::>(); + let out = out.iter().map(|x| (x.0.as_ref(), x.1)).collect::>(); let relu_chip = ReluChip::::construct(gadget_config); let out = relu_chip.forward(layouter.namespace(|| "relu"), &vec![out], &single_inps)?; - let out = out.into_iter().map(|x| Rc::new(x)).collect::>(); + let out = out.into_iter().map(|x| (Rc::new(x.0), x.1)).collect::>(); out } else if activation == ActivationType::None { out diff --git a/src/layers/arithmetic/div_var.rs b/src/layers/arithmetic/div_var.rs index 594ddb5..d8303c1 100644 --- a/src/layers/arithmetic/div_var.rs +++ b/src/layers/arithmetic/div_var.rs @@ -25,10 +25,10 @@ impl Arithmetic for DivVarChip { fn gadget_forward( &self, mut layouter: impl Layouter, - vec_inputs: &Vec>>, - constants: &Vec<&AssignedCell>, + vec_inputs: &Vec, F)>>, + constants: &Vec<(&AssignedCell, F)>, gadget_config: Rc, - ) -> Result>, Error> { + ) -> Result, F)>, Error> { let mul_pairs_chip = MulPairsChip::::construct(gadget_config.clone()); let out = mul_pairs_chip.forward( @@ -60,7 +60,9 @@ impl Layer for DivVarChip { .unwrap() .as_ref(); - let sf_tensor = Array::from_shape_vec(IxDyn(&[1]), vec![Rc::new(sf.clone())]).unwrap(); + // TOCHECK + let sf_tensor = + Array::from_shape_vec(IxDyn(&[1]), vec![(Rc::new(sf.clone()), sf.value().cloned().assign().unwrap())]).unwrap(); // out = inp * SF let (out, out_shape) = self.arithmetic_forward( @@ -71,13 +73,13 @@ impl Layer for DivVarChip { )?; let var_div_chip = VarDivRoundChip::::construct(gadget_config.clone()); - let div = tensors[1].iter().next().unwrap().as_ref(); - let zero = constants.get(&0).unwrap().as_ref(); + let div = tensors[1].iter().next().map(|x| (x.0.as_ref(), x.1)).unwrap(); + let zero = (constants.get(&0).unwrap().as_ref(), F::ZERO); let single_inputs = vec![zero, div]; - let out = out.iter().map(|x| x.as_ref()).collect::>(); + let out = out.iter().map(|x| (x.0.as_ref(), x.1)).collect::>(); let out = var_div_chip.forward(layouter.namespace(|| "mul div"), &vec![out], &single_inputs)?; - let out = out.into_iter().map(|x| Rc::new(x)).collect::>(); + let out = out.into_iter().map(|x| (Rc::new(x.0), x.1)).collect::>(); let out = Array::from_shape_vec(IxDyn(out_shape.as_slice()), out).unwrap(); Ok(vec![out]) } diff --git a/src/layers/arithmetic/mul.rs b/src/layers/arithmetic/mul.rs index 4c3acd6..a72e45c 100644 --- a/src/layers/arithmetic/mul.rs +++ b/src/layers/arithmetic/mul.rs @@ -28,10 +28,10 @@ impl Arithmetic for MulChip { fn gadget_forward( &self, mut layouter: impl Layouter, - vec_inputs: &Vec>>, - constants: &Vec<&AssignedCell>, + vec_inputs: &Vec, F)>>, + constants: &Vec<(&AssignedCell, F)>, gadget_config: Rc, - ) -> Result>, Error> { + ) -> Result, F)>, Error> { let mul_pairs_chip = MulPairsChip::::construct(gadget_config.clone()); let out = mul_pairs_chip.forward( @@ -66,12 +66,20 @@ impl Layer for MulChip { .get(&(gadget_config.scale_factor as i64)) .unwrap() .as_ref(); + let divv = { + let shift_val_i64 = -gadget_config.min_val * 2; + let shift_val_f = F::from(shift_val_i64 as u64); + F::from((gadget_config.scale_factor as i64 + shift_val_i64) as u64) - shift_val_f + }; let zero = constants.get(&0).unwrap().as_ref(); - let single_inputs = vec![zero, div]; - let out = out.iter().map(|x| x.as_ref()).collect::>(); + let single_inputs = vec![ + (zero, F::ZERO), + (div, divv) + ]; + let out = out.iter().map(|x| (x.0.as_ref(), x.1)).collect::>(); let out = var_div_chip.forward(layouter.namespace(|| "mul div"), &vec![out], &single_inputs)?; - let out = out.into_iter().map(|x| Rc::new(x)).collect::>(); + let out = out.into_iter().map(|x| (Rc::new(x.0), x.1)).collect::>(); let out = Array::from_shape_vec(IxDyn(out_shape.as_slice()), out).unwrap(); Ok(vec![out]) } diff --git a/src/layers/arithmetic/sub.rs b/src/layers/arithmetic/sub.rs index 4052681..f764007 100644 --- a/src/layers/arithmetic/sub.rs +++ b/src/layers/arithmetic/sub.rs @@ -27,10 +27,10 @@ impl Arithmetic for SubChip { fn gadget_forward( &self, mut layouter: impl Layouter, - vec_inputs: &Vec>>, - constants: &Vec<&AssignedCell>, + vec_inputs: &Vec, F)>>, + constants: &Vec<(&AssignedCell, F)>, gadget_config: Rc, - ) -> Result>, Error> { + ) -> Result, F)>, Error> { let sub_pairs_chip = SubPairsChip::::construct(gadget_config); let out = sub_pairs_chip.forward(layouter.namespace(|| "sub chip"), &vec_inputs, constants)?; Ok(out) diff --git a/src/layers/averager.rs b/src/layers/averager.rs index 3445fb4..c167349 100644 --- a/src/layers/averager.rs +++ b/src/layers/averager.rs @@ -12,7 +12,7 @@ use crate::gadgets::{adder::AdderChip, gadget::GadgetConfig, var_div::VarDivRoun use super::layer::{AssignedTensor, CellRc, LayerConfig}; pub trait Averager { - fn splat(&self, input: &AssignedTensor, layer_config: &LayerConfig) -> Vec>>; + fn splat(&self, input: &AssignedTensor, layer_config: &LayerConfig) -> Vec, F)>>; fn get_div_val( &self, @@ -20,7 +20,7 @@ pub trait Averager { tensors: &Vec>, gadget_config: Rc, layer_config: &LayerConfig, - ) -> Result, Error>; + ) -> Result<(AssignedCell, F), Error>; fn avg_forward( &self, @@ -29,7 +29,7 @@ pub trait Averager { constants: &HashMap>, gadget_config: Rc, layer_config: &LayerConfig, - ) -> Result>, Error> { + ) -> Result, F)>, Error> { // Due to Mean BS // assert_eq!(tensors.len(), 1); let zero = constants.get(&0).unwrap().as_ref(); @@ -38,10 +38,10 @@ pub trait Averager { let splat_inp = self.splat(inp, layer_config); let adder_chip = AdderChip::::construct(gadget_config.clone()); - let single_inputs = vec![zero]; + let single_inputs = vec![(zero, F::ZERO)]; let mut added = vec![]; for i in 0..splat_inp.len() { - let tmp = splat_inp[i].iter().map(|x| x.as_ref()).collect::>(); + let tmp = splat_inp[i].iter().map(|x| (x.0.as_ref(), x.1)).collect::>(); let tmp = adder_chip.forward( layouter.namespace(|| format!("average {}", i)), &vec![tmp], @@ -56,16 +56,18 @@ pub trait Averager { gadget_config.clone(), layer_config, )?; + let div = (&div.0, div.1); + let var_div_chip = VarDivRoundChip::::construct(gadget_config.clone()); - let single_inputs = vec![zero, &div]; - let added = added.iter().map(|x| x).collect::>(); + let single_inputs = vec![(zero, F::ZERO), div]; + let added = added.iter().map(|x| (&x.0, x.1)).collect::>(); let dived = var_div_chip.forward( layouter.namespace(|| "average div"), &vec![added], &single_inputs, )?; - let dived = dived.into_iter().map(|x| Rc::new(x)).collect::>(); + let dived = dived.into_iter().map(|x| (Rc::new(x.0), x.1)).collect::>(); Ok(dived) } diff --git a/src/layers/avg_pool_2d.rs b/src/layers/avg_pool_2d.rs index 33608a4..5d3747b 100644 --- a/src/layers/avg_pool_2d.rs +++ b/src/layers/avg_pool_2d.rs @@ -20,7 +20,7 @@ use super::{ pub struct AvgPool2DChip {} impl Averager for AvgPool2DChip { - fn splat(&self, input: &AssignedTensor, layer_config: &LayerConfig) -> Vec>> { + fn splat(&self, input: &AssignedTensor, layer_config: &LayerConfig) -> Vec,F)>> { assert_eq!(input.shape().len(), 4); // Don't support batch size > 1 yet assert_eq!(input.shape()[0], 1); @@ -35,11 +35,11 @@ impl Averager for AvgPool2DChip { _tensors: &Vec>, gadget_config: Rc, layer_config: &LayerConfig, - ) -> Result, Error> { + ) -> Result<(AssignedCell,F), Error> { // FIXME: this needs to be revealed let div = layer_config.layer_params[0] * layer_config.layer_params[1]; let div = F::from(div as u64); - let div = layouter + let divc = layouter .assign_region( || "avg pool 2d div", |mut region| { @@ -53,10 +53,9 @@ impl Averager for AvgPool2DChip { .unwrap(); Ok(div) }, - ) - .unwrap(); + )?; - Ok(div) + Ok((divc, div)) } } diff --git a/src/layers/batch_mat_mul.rs b/src/layers/batch_mat_mul.rs index 2714d36..4979cf1 100644 --- a/src/layers/batch_mat_mul.rs +++ b/src/layers/batch_mat_mul.rs @@ -52,7 +52,7 @@ impl Layer for BatchMatMulChip { config: FullyConnectedConfig::construct(true), }; - let mut outp: Vec> = vec![]; + let mut outp: Vec<(CellRc, F)> = vec![]; for i in 0..inp1.shape()[0] { let inp1_slice = inp1.index_axis(Axis(0), i).to_owned(); // Due to tensorflow BS, transpose the "weights" @@ -77,7 +77,7 @@ impl Layer for BatchMatMulChip { gadget_config.clone(), &tmp_config, )?; - outp.extend(outp_slice[0].iter().map(|x| x.clone()).collect::>()); + outp.extend(outp_slice[0].iter().map(|x| (x.0.clone(), x.1)).collect::>()); } let outp = Array::from_shape_vec(IxDyn(out_shape.as_slice()), outp).unwrap(); diff --git a/src/layers/conv2d.rs b/src/layers/conv2d.rs index a765d0b..268b40e 100644 --- a/src/layers/conv2d.rs +++ b/src/layers/conv2d.rs @@ -125,9 +125,9 @@ impl Conv2DChip { pub fn splat( &self, - tensors: &Vec, IxDyn>>, + tensors: &Vec, F), IxDyn>>, zero: Rc, - ) -> (Vec>>, Vec>>, Vec>) { + ) -> (Vec, F)>>, Vec, F)>>, Vec<(Rc, F)>) { // assert_eq!(tensors.len(), 3); assert!(tensors.len() <= 3); @@ -135,7 +135,7 @@ impl Conv2DChip { let inp = &tensors[0]; let weights = &tensors[1]; - let zero_arr = Array::from_elem(IxDyn(&vec![1]), zero.clone()); + let zero_arr = Array::from_elem(IxDyn(&vec![1]), (zero.clone(), F::ZERO)); let biases = if tensors.len() == 3 { &tensors[2] } else { @@ -161,7 +161,7 @@ impl Conv2DChip { // println!("Padding: {:?}", (ph, pw)); let padding = vec![[0, 0], [ph.0, ph.1], [pw.0, pw.1], [0, 0]]; - let inp_pad = pad(&inp, padding, &zero); + let inp_pad = pad(&inp, padding, &(zero.clone(), F::ZERO)); let (oh, ow) = Self::out_hw(h, w, si, sj, ch, cw, conv_config.padding); @@ -210,7 +210,7 @@ impl Conv2DChip { if tensors.len() == 3 { biases_cells.push(biases[chan_out].clone()); } else { - biases_cells.push(zero.clone()); + biases_cells.push((zero.clone(), F::ZERO)); } } } @@ -222,9 +222,9 @@ impl Conv2DChip { pub fn splat_depthwise( &self, - tensors: &Vec, IxDyn>>, + tensors: &Vec, F), IxDyn>>, zero: Rc, - ) -> (Vec>>, Vec>>, Vec>) { + ) -> (Vec, F)>>, Vec, F)>>, Vec<(Rc, F)>) { let input = &tensors[0]; let weights = &tensors[1]; let biases = &tensors[2]; @@ -252,7 +252,7 @@ impl Conv2DChip { let padding = vec![[0, 0], [ph.0, ph.1], [pw.0, pw.1], [0, 0]]; - let inp_pad = pad(&input, padding, &zero); + let inp_pad = pad(&input, padding, &(zero, F::ZERO)); let mut inp_cells = vec![]; let mut weight_cells = vec![]; @@ -317,7 +317,7 @@ impl Layer for Conv2DChip { ConvLayerEnum::DepthwiseConv2D => self.splat_depthwise(tensors, zero.clone()), }; - let outp_flat: Vec> = match conv_config.conv_type { + let outp_flat: Vec<(AssignedCell, F)> = match conv_config.conv_type { ConvLayerEnum::Conv2D => { let fc_chip = FullyConnectedChip:: { _marker: PhantomData, @@ -352,7 +352,7 @@ impl Layer for Conv2DChip { let outp_flat = outp_slice[0] .t() .into_iter() - .map(|x| (**x).clone()) + .map(|x| (x.0.as_ref().clone(), x.1).clone()) .collect::>(); outp_flat } @@ -361,10 +361,10 @@ impl Layer for Conv2DChip { let dot_prod_chip = DotProductChip::::construct(gadget_config.clone()); let mut outp_flat = vec![]; for (inp_vec, weight_vec) in splat_inp.iter().zip(splat_weights.iter()) { - let inp_vec = inp_vec.iter().map(|x| x.as_ref()).collect::>(); - let weight_vec = weight_vec.iter().map(|x| x.as_ref()).collect::>(); + let inp_vec = inp_vec.iter().map(|x| (x.0.as_ref(), x.1)).collect::>(); + let weight_vec = weight_vec.iter().map(|x| (x.0.as_ref(), x.1)).collect::>(); let vec_inputs = vec![inp_vec, weight_vec]; - let constants = vec![zero.as_ref()]; + let constants = vec![(zero.as_ref(), F::ZERO)]; let outp = dot_prod_chip .forward(layouter.namespace(|| "dot_prod"), &vec_inputs, &constants) .unwrap(); @@ -378,13 +378,13 @@ impl Layer for Conv2DChip { let mut biases = vec![]; for bias in splat_biases.iter() { - biases.push(bias.as_ref()); + biases.push((bias.0.as_ref(), bias.1)); } // Compute the bias + div + relu let bdr_chip = BiasDivRoundRelu6Chip::::construct(gadget_config.clone()); - let tmp = vec![zero.as_ref()]; - let outp_flat = outp_flat.iter().map(|x| x).collect::>(); + let tmp = vec![(zero.as_ref(), F::ZERO)]; + let outp_flat = outp_flat.iter().map(|x| (&x.0, x.1)).collect::>(); let outp = bdr_chip .forward( layouter.namespace(|| "bias_div_relu"), @@ -399,24 +399,24 @@ impl Layer for Conv2DChip { outp .into_iter() .step_by(2) - .map(|x| Rc::new(x)) + .map(|x| (Rc::new(x.0), x.1)) .collect::>() } else if conv_config.activation == ActivationType::None { outp .into_iter() .skip(1) .step_by(2) - .map(|x| Rc::new(x)) + .map(|x| (Rc::new(x.0), x.1)) .collect::>() } else if conv_config.activation == ActivationType::Relu { - let dived = outp.iter().skip(1).step_by(2).collect::>(); + let dived = outp.iter().skip(1).step_by(2).map(|x| (&x.0, x.1)).collect::>(); let relu_chip = ReluChip::::construct(gadget_config.clone()); let relu_outp = relu_chip .forward(layouter.namespace(|| "relu"), &vec![dived], &tmp) .unwrap(); let relu_outp = relu_outp .into_iter() - .map(|x| Rc::new(x)) + .map(|x| (Rc::new(x.0), x.1)) .collect::>(); relu_outp } else { diff --git a/src/layers/dag.rs b/src/layers/dag.rs index 75b7505..3269be6 100644 --- a/src/layers/dag.rs +++ b/src/layers/dag.rs @@ -79,10 +79,10 @@ impl DAGLayerChip { let layer_type = &layer_config.layer_type; let inp_idxes = &self.dag_config.inp_idxes[layer_idx]; let out_idxes = &self.dag_config.out_idxes[layer_idx]; - println!( - "Processing layer {}, type: {:?}, inp_idxes: {:?}, out_idxes: {:?}, layer_params: {:?}", - layer_idx, layer_type, inp_idxes, out_idxes, layer_config.layer_params - ); + // println!( + // "Processing layer {}, type: {:?}, inp_idxes: {:?}, out_idxes: {:?}, layer_params: {:?}", + // layer_idx, layer_type, inp_idxes, out_idxes, layer_config.layer_params + // ); let vec_inps = inp_idxes .iter() .map(|idx| tensor_map.get(idx).unwrap().clone()) @@ -486,13 +486,13 @@ impl DAGLayerChip { } }; - let tmp = print_arr.iter().map(|x| x.as_ref()).collect::>(); + let tmp = print_arr.iter().map(|x| x.0.as_ref()).collect::>(); // print_assigned_arr("final out", &tmp.to_vec(), gadget_config.scale_factor); // println!("final out idxes: {:?}", self.dag_config.final_out_idxes); let mut x = vec![]; for cell in print_arr.iter() { - cell.value().map(|v| { + cell.0.value().map(|v| { let bias = 1 << 60 as i64; let v_pos = *v + F::from(bias as u64); let v = convert_to_u64(&v_pos) as i64 - bias; diff --git a/src/layers/div_fixed.rs b/src/layers/div_fixed.rs index e12fbf5..9f46f6f 100644 --- a/src/layers/div_fixed.rs +++ b/src/layers/div_fixed.rs @@ -24,12 +24,12 @@ impl DivFixedChip { _tensors: &Vec>, gadget_config: Rc, layer_config: &LayerConfig, - ) -> Result, Error> { + ) -> Result<(AssignedCell, F), Error> { // FIXME: this needs to be revealed let div = layer_config.layer_params[0]; let div = F::from(div as u64); - let div = layouter + let divc = layouter .assign_region( || "division", |mut region| { @@ -43,10 +43,9 @@ impl DivFixedChip { .unwrap(); Ok(div) }, - ) - .unwrap(); + )?; - Ok(div) + Ok((divc, div)) } } @@ -61,9 +60,9 @@ impl Layer for DivFixedChip { layer_config: &LayerConfig, ) -> Result>, Error> { let inp = &tensors[0]; - let inp_flat = inp.iter().map(|x| x.as_ref()).collect::>(); + let inp_flat = inp.iter().map(|x| (x.0.as_ref(), x.1)).collect::>(); - let zero = constants.get(&0).unwrap().as_ref(); + let zero = (constants.get(&0).unwrap().as_ref(), F::ZERO); let shape = inp.shape(); let div = self.get_div_val( @@ -72,15 +71,16 @@ impl Layer for DivFixedChip { gadget_config.clone(), layer_config, )?; + let div = (&div.0, div.1); let var_div_chip = VarDivRoundChip::::construct(gadget_config.clone()); let dived = var_div_chip.forward( layouter.namespace(|| "average div"), &vec![inp_flat], - &vec![zero, &div], + &vec![zero, div], )?; - let dived = dived.into_iter().map(|x| Rc::new(x)).collect::>(); + let dived = dived.into_iter().map(|x| (Rc::new(x.0), x.1)).collect::>(); let out = Array::from_shape_vec(IxDyn(shape), dived).unwrap(); Ok(vec![out]) diff --git a/src/layers/fully_connected.rs b/src/layers/fully_connected.rs index eb6317b..adc2ad4 100644 --- a/src/layers/fully_connected.rs +++ b/src/layers/fully_connected.rs @@ -39,9 +39,9 @@ pub struct FullyConnectedChip { impl FullyConnectedChip { pub fn compute_mm( // input: &AssignedTensor, - input: &ArrayView, IxDyn>, + input: &ArrayView<(CellRc,F), IxDyn>, weight: &AssignedTensor, - ) -> Array, IxDyn> { + ) -> Array { assert_eq!(input.ndim(), 2); assert_eq!(weight.ndim(), 2); assert_eq!(input.shape()[1], weight.shape()[0]); @@ -49,9 +49,9 @@ impl FullyConnectedChip { let mut outp = vec![]; for i in 0..input.shape()[0] { for j in 0..weight.shape()[1] { - let mut sum = input[[i, 0]].value().map(|x: &F| *x) * weight[[0, j]].value(); + let mut sum = input[[i, 0]].1 * weight[[0, j]].1; for k in 1..input.shape()[1] { - sum = sum + input[[i, k]].value().map(|x: &F| *x) * weight[[k, j]].value(); + sum = sum + input[[i, k]].1 * weight[[k, j]].1; } outp.push(sum); } @@ -64,8 +64,8 @@ impl FullyConnectedChip { pub fn assign_array( columns: &Vec>, region: &mut Region, - array: &Array, IxDyn>, - ) -> Result, IxDyn>, Error> { + array: &Array, + ) -> Result, F), IxDyn>, Error> { assert_eq!(array.ndim(), 2); let mut outp = vec![]; @@ -73,10 +73,9 @@ impl FullyConnectedChip { let row_idx = idx / columns.len(); let col_idx = idx % columns.len(); let cell = region - .assign_advice(|| "assign array", columns[col_idx], row_idx, || *val) + .assign_advice(|| "assign array", columns[col_idx], row_idx, || Value::known(*val)) .unwrap(); - // println!("Error: {:?}", cell); - outp.push(cell); + outp.push((cell, *val)); } let out_shape = [array.shape()[0], array.shape()[1]]; @@ -105,6 +104,7 @@ impl FullyConnectedChip { size: usize, ) -> Result>, Error> { let mut outp = vec![]; + for idx in 0..size { let idx = idx as i64; if !constants.contains_key(&idx) { @@ -158,22 +158,103 @@ impl Layer for FullyConnectedChip { || "compute and assign mm", |mut region| { let mm_result = Self::compute_mm(&input, weight); - let mm_result = + let result = Self::assign_array(&gadget_config.columns, &mut region, &mm_result).unwrap(); + + // Copy mm result to witness holder + let copy_result = + Self::assign_array(&gadget_config.witness_columns, &mut region, &mm_result).unwrap(); + + for (l, r) in copy_result.iter().zip(result.iter()) { + region.constrain_equal(l.0.cell(), r.0.cell())?; + } - Ok(mm_result) + Ok(result) }, ) .unwrap(); + // println!("Shape of Input: {:?}", input.shape()); + // println!("Shape of Weight: {:?}", weight.shape()); + // println!("Shape of MM: {:?}", mm_result.shape()); + // Create copy constraint for input, weight, input*weight to witness columns + // layouter.assign_region( + // || "test", + // |mut region| { + // for (i, v) in mm_result.iter().enumerate() { + // let col = i % gadget_config.witness_columns.len(); + // let row = i / gadget_config.witness_columns.len(); + // region.assign_advice( + // || "test in witness columns", + // gadget_config.witness_columns[col], row, + // || v.value().map(|x| *x) + // )?; + // } + // Ok(()) + // } + // ).unwrap(); + + // layouter + // .assign_region( + // || "copy constraint for input", + // |mut region| { + // for (i, v) in input.iter().enumerate() { + // let col = i % gadget_config.witness_columns.len(); + // let row = i / gadget_config.witness_columns.len(); + // v.copy_advice( + // || "input in witness columns", + // &mut region, + // gadget_config.witness_columns[col], row + // )?; + // } + // Ok(()) + // } + // )?; + + // layouter.assign_region( + // || "copy constraint for weight", + // |mut region| { + // for (i, v) in weight.iter().enumerate() { + // let col = i % gadget_config.witness_columns.len(); + // let row = i / gadget_config.witness_columns.len(); + // v.copy_advice( + // || "weight in witness columns", + // &mut region, + // gadget_config.witness_columns[col], row + // )?; + // } + // Ok(()) + // } + // )?; + + // layouter.assign_region( + // || "copy constraint for mm", + // |mut region| { + // for (i, v) in mm_result.iter().enumerate() { + // let col = i % gadget_config.witness_columns.len(); + // let row = i / gadget_config.witness_columns.len(); + // v.copy_advice( + // || "mm in witness columns", + // &mut region, + // gadget_config.witness_columns[col], row + // )?; + // } + // Ok(()) + // } + // )?; + + // Generate random vectors - let r1 = Self::random_vector(rand_vector, mm_result.shape()[0]).unwrap(); let r2 = Self::random_vector(rand_vector, mm_result.shape()[1]).unwrap(); let dot_prod_chip = DotProductChip::::construct(gadget_config.clone()); - let r1_ref = r1.iter().map(|x| x.as_ref()).collect::>(); - let r2_ref = r2.iter().map(|x| x.as_ref()).collect::>(); + let r1_ref = r1.iter().map( + |x| (x.as_ref(), x.value().cloned().assign().unwrap_or(F::from(0x123456789abcdef))) + ).collect::>(); + let r2_ref = r2.iter().map( + |x| (x.as_ref(), x.value().cloned().assign().unwrap_or(F::from(0x123456789abcdef))) + ).collect::>(); // Compute r1 * result let mut r1_res = vec![]; @@ -182,72 +263,67 @@ impl Layer for FullyConnectedChip { // println!("mm_result: {:?}", mm_result.shape()); for i in 0..mm_result.shape()[1] { let tmp = mm_result.index_axis(Axis(1), i); - let mm_ci = tmp.iter().collect::>(); + let mm_ci = tmp.iter().map(|x| (&x.0, x.1)).collect::>(); let r1_res_i = dot_prod_chip .forward( layouter.namespace(|| format!("r1_res_{}", i)), &vec![mm_ci, r1_ref.clone()], - &vec![zero], + &vec![(zero, F::ZERO)], ) .unwrap(); r1_res.push(r1_res_i[0].clone()); } - // Compute r1 * result * r2 - let r1_res_ref = r1_res.iter().collect::>(); + let r1_res_ref = r1_res.iter().map(|x| (&x.0, x.1)).collect::>(); let r1_res_r2 = dot_prod_chip .forward( layouter.namespace(|| "r1_res_r2"), &vec![r1_res_ref, r2_ref.clone()], - &vec![zero], + &vec![(zero, F::ZERO)], ) .unwrap(); let r1_res_r2 = r1_res_r2[0].clone(); // println!("r1_res_r2: {:?}", r1_res_r2); - // Compute r1 * input let mut r1_input = vec![]; // println!("input: {:?}", input.shape()); // println!("r1_ref: {:?}", r1_ref.len()); for i in 0..input.shape()[1] { let tmp = input.index_axis(Axis(1), i); - let input_ci = tmp.iter().map(|x| x.as_ref()).collect::>(); + let input_ci = tmp.iter().map(|x| (x.0.as_ref(), x.1)).collect::>(); let r1_input_i = dot_prod_chip .forward( layouter.namespace(|| format!("r1_input_{}", i)), &vec![input_ci, r1_ref.clone()], - &vec![zero], + &vec![(zero, F::ZERO)], ) .unwrap(); r1_input.push(r1_input_i[0].clone()); } - // Compute weight * r2 let mut weight_r2 = vec![]; for i in 0..weight.shape()[0] { let tmp = weight.index_axis(Axis(0), i); - let weight_ci = tmp.iter().map(|x| x.as_ref()).collect::>(); + let weight_ci = tmp.iter().map(|x| (x.0.as_ref(), x.1)).collect::>(); let weight_r2_i = dot_prod_chip .forward( layouter.namespace(|| format!("weight_r2_{}", i)), &vec![weight_ci, r2_ref.clone()], - &vec![zero], + &vec![(zero, F::ZERO)], ) .unwrap(); weight_r2.push(weight_r2_i[0].clone()); } - // Compute (r1 * input) * (weight * r2) - let r1_input_ref = r1_input.iter().collect::>(); - let weight_r2_ref = weight_r2.iter().collect::>(); + let r1_input_ref = r1_input.iter().map(|x| (&x.0, x.1)).collect::>(); + let weight_r2_ref = weight_r2.iter().map(|x| (&x.0, x.1)).collect::>(); let r1_inp_weight_r2 = dot_prod_chip .forward( layouter.namespace(|| "r1_inp_weight_r2"), &vec![r1_input_ref, weight_r2_ref], - &vec![zero], + &vec![(zero, F::ZERO)], ) .unwrap(); - let r1_inp_weight_r2 = r1_inp_weight_r2[0].clone(); // println!("r1_inp_weight_r2: {:?}", r1_inp_weight_r2); @@ -255,15 +331,14 @@ impl Layer for FullyConnectedChip { .assign_region( || "fc equality check", |mut region| { - let t1 = r1_res_r2 + let t1 = r1_res_r2.0 .copy_advice(|| "", &mut region, gadget_config.columns[0], 0) .unwrap(); - let t2 = r1_inp_weight_r2 + let t2 = r1_inp_weight_r2.0 .copy_advice(|| "", &mut region, gadget_config.columns[0], 1) .unwrap(); region.constrain_equal(t1.cell(), t2.cell()).unwrap(); - Ok(()) }, ) @@ -271,7 +346,7 @@ impl Layer for FullyConnectedChip { let shape = [mm_result.shape()[0], mm_result.shape()[1]]; let final_result_flat = if self.config.normalize { - let mm_flat = mm_result.iter().collect::>(); + let mm_flat = mm_result.iter().map(|x| (&x.0, x.1)).collect::>(); let var_div_chip = VarDivRoundChip::::construct(gadget_config.clone()); let sf = constants .get(&(gadget_config.scale_factor as i64)) @@ -281,20 +356,20 @@ impl Layer for FullyConnectedChip { .forward( layouter.namespace(|| "mm_div"), &vec![mm_flat], - &vec![zero, sf], + &vec![(zero, F::ZERO), (sf, sf.value().cloned().assign().unwrap())], ) .unwrap(); let mm_div = if tensors.len() == 3 { let bias = tensors[2].broadcast(shape.clone()).unwrap(); - let bias = bias.iter().map(|x| x.as_ref()).collect::>(); - let mm_div = mm_div.iter().collect::>(); + let bias = bias.iter().map(|x| (x.0.as_ref(), x.1)).collect::>(); + let mm_div = mm_div.iter().map(|x| (&x.0, x.1)).collect::>(); let adder_chip = AddPairsChip::::construct(gadget_config.clone()); let mm_bias = adder_chip .forward( layouter.namespace(|| "mm_bias"), &vec![mm_div, bias], - &vec![zero], + &vec![(zero, F::ZERO)], ) .unwrap(); mm_bias @@ -304,10 +379,10 @@ impl Layer for FullyConnectedChip { let mm_div = if activation == ActivationType::Relu { let relu_chip = ReluChip::::construct(gadget_config.clone()); - let mm_div = mm_div.iter().collect::>(); + let mm_div = mm_div.iter().map(|x| (&x.0, x.1)).collect::>(); let vec_inputs = vec![mm_div]; relu_chip - .forward(layouter.namespace(|| "relu"), &vec_inputs, &vec![zero]) + .forward(layouter.namespace(|| "relu"), &vec_inputs, &vec![(zero, F::ZERO)]) .unwrap() } else if activation == ActivationType::None { mm_div @@ -315,11 +390,11 @@ impl Layer for FullyConnectedChip { panic!("Unsupported activation type"); }; - mm_div.into_iter().map(|x| Rc::new(x)).collect::>() + mm_div.into_iter().map(|x| (Rc::new(x.0), x.1)).collect::>() } else { mm_result .into_iter() - .map(|x| Rc::new(x)) + .map(|x| (Rc::new(x.0), x.1)) .collect::>() }; let final_result = Array::from_shape_vec(IxDyn(&shape), final_result_flat).unwrap(); diff --git a/src/layers/layer.rs b/src/layers/layer.rs index 3eb9e3a..138ee8e 100644 --- a/src/layers/layer.rs +++ b/src/layers/layer.rs @@ -70,7 +70,7 @@ pub struct LayerConfig { } pub type CellRc = Rc>; -pub type AssignedTensor = Array, IxDyn>; +pub type AssignedTensor = Array<(CellRc, F), IxDyn>; // General issue with rust: I'm not sure how to pass named arguments to a trait... // Currently, the caller must be aware of the order of the tensors and results pub trait Layer { diff --git a/src/layers/logistic.rs b/src/layers/logistic.rs index aa19fa5..a5314ab 100644 --- a/src/layers/logistic.rs +++ b/src/layers/logistic.rs @@ -24,19 +24,19 @@ impl Layer for LogisticChip { _layer_config: &LayerConfig, ) -> Result>, Error> { let inp = &tensors[0]; - let inp_vec = inp.iter().map(|x| x.as_ref()).collect::>(); + let inp_vec = inp.iter().map(|x| (x.0.as_ref(), x.1)).collect::>(); let zero = constants.get(&0).unwrap().as_ref(); let logistic_chip = LogisticGadgetChip::::construct(gadget_config.clone()); let vec_inps = vec![inp_vec]; - let constants = vec![zero]; + let constants = vec![(zero, F::ZERO)]; let out = logistic_chip.forward( layouter.namespace(|| "logistic chip"), &vec_inps, &constants, )?; - let out = out.into_iter().map(|x| Rc::new(x)).collect::>(); + let out = out.into_iter().map(|x| (Rc::new(x.0), x.1)).collect::>(); let out = Array::from_shape_vec(IxDyn(inp.shape()), out).unwrap(); Ok(vec![out]) diff --git a/src/layers/max_pool_2d.rs b/src/layers/max_pool_2d.rs index 20d16e5..d50f5a4 100644 --- a/src/layers/max_pool_2d.rs +++ b/src/layers/max_pool_2d.rs @@ -44,7 +44,7 @@ impl MaxPool2DChip { pub fn splat( inp: &AssignedTensor, layer_config: &LayerConfig, - ) -> Result>>, Error> { + ) -> Result, F)>>, Error> { let params = &layer_config.layer_params; let (fx, fy) = (params[0], params[1]); let (fx, fy) = (fx as usize, fy as usize); @@ -96,7 +96,7 @@ impl Layer for MaxPool2DChip { let mut out = vec![]; for i in 0..splat.len() { let inps = &splat[i]; - let inps = inps.iter().map(|x| x.as_ref()).collect(); + let inps = inps.iter().map(|x| (x.0.as_ref(), x.1)).collect(); let max = max_chip .forward( layouter.namespace(|| format!("max {}", i)), @@ -106,7 +106,7 @@ impl Layer for MaxPool2DChip { .unwrap(); out.push(max[0].clone()); } - let out = out.into_iter().map(|x| Rc::new(x)).collect(); + let out = out.into_iter().map(|x| (Rc::new(x.0), x.1)).collect(); // TODO: refactor this let out_xy = Self::shape(inp, layer_config); diff --git a/src/layers/mean.rs b/src/layers/mean.rs index 3f2ff89..791628f 100644 --- a/src/layers/mean.rs +++ b/src/layers/mean.rs @@ -53,7 +53,7 @@ impl MeanChip { } impl Averager for MeanChip { - fn splat(&self, input: &AssignedTensor, layer_config: &LayerConfig) -> Vec>> { + fn splat(&self, input: &AssignedTensor, layer_config: &LayerConfig) -> Vec,F)>> { // Only support batch size = 1 assert_eq!(input.shape()[0], 1); // Only support batch + 2D, summing over one axis @@ -78,7 +78,7 @@ impl Averager for MeanChip { tensors: &Vec>, gadget_config: Rc, layer_config: &LayerConfig, - ) -> Result, Error> { + ) -> Result<(AssignedCell, F), Error> { let inp = &tensors[0]; let keep_axis = self.get_keep_axis(layer_config); let mut div = 1; @@ -90,7 +90,7 @@ impl Averager for MeanChip { let div = F::from(div as u64); // FIXME: put this in the fixed column - let div = layouter.assign_region( + let divc = layouter.assign_region( || "mean div", |mut region| { let div = region.assign_advice( @@ -103,7 +103,7 @@ impl Averager for MeanChip { }, )?; - Ok(div) + Ok((divc, div)) } } diff --git a/src/layers/pow.rs b/src/layers/pow.rs index 08c36d8..0bb6fd7 100644 --- a/src/layers/pow.rs +++ b/src/layers/pow.rs @@ -24,15 +24,15 @@ impl Layer for PowChip { _layer_config: &LayerConfig, ) -> Result>, Error> { let inp = &tensors[0]; - let inp_vec = inp.iter().map(|x| x.as_ref()).collect::>(); + let inp_vec = inp.iter().map(|x| (x.0.as_ref(), x.1)).collect::>(); let zero = constants.get(&0).unwrap().as_ref(); let pow_chip = PowGadgetChip::::construct(gadget_config.clone()); let vec_inps = vec![inp_vec]; - let constants = vec![zero]; + let constants = vec![(zero, F::ZERO)]; let out = pow_chip.forward(layouter.namespace(|| "pow chip"), &vec_inps, &constants)?; - let out = out.into_iter().map(|x| Rc::new(x)).collect::>(); + let out = out.into_iter().map(|x| (Rc::new(x.0), x.1)).collect::>(); let out = Array::from_shape_vec(IxDyn(inp.shape()), out).unwrap(); Ok(vec![out]) diff --git a/src/layers/rsqrt.rs b/src/layers/rsqrt.rs index 764f3f0..83089a0 100644 --- a/src/layers/rsqrt.rs +++ b/src/layers/rsqrt.rs @@ -41,24 +41,29 @@ impl Layer for RsqrtChip { if mask_map.contains_key(&i) { let mask_val = *mask_map.get(&i).unwrap(); if mask_val == 1 { - inp_vec.push(max_val); + // TOCHECK + inp_vec.push((max_val, max_val.value().cloned().assign().unwrap())); } else if mask_val == -1 { - inp_vec.push(min_val); + inp_vec.push((min_val, min_val.value().cloned().assign().unwrap())); } else { panic!(); } } else { - inp_vec.push(val.as_ref()); + inp_vec.push((val.0.as_ref(), val.1)); } } let zero = constants.get(&0).unwrap().as_ref(); let rsqrt_chip = RsqrtGadgetChip::::construct(gadget_config.clone()); let vec_inps = vec![inp_vec]; - let constants = vec![zero, min_val, max_val]; + let constants = vec![ + (zero, F::ZERO), + (min_val, min_val.value().cloned().assign().unwrap()), + (max_val, max_val.value().cloned().assign().unwrap()) + ]; let out = rsqrt_chip.forward(layouter.namespace(|| "rsqrt chip"), &vec_inps, &constants)?; - let out = out.into_iter().map(|x| Rc::new(x)).collect::>(); + let out = out.into_iter().map(|x| (Rc::new(x.0), x.1)).collect::>(); let out = Array::from_shape_vec(IxDyn(inp.shape()), out).unwrap(); Ok(vec![out]) diff --git a/src/layers/shape/mask_neg_inf.rs b/src/layers/shape/mask_neg_inf.rs index 792be5b..bce84d8 100644 --- a/src/layers/shape/mask_neg_inf.rs +++ b/src/layers/shape/mask_neg_inf.rs @@ -40,7 +40,11 @@ impl Layer for MaskNegInfChip { if *to_mask == 0 { out_vec.push(val.clone()); } else { - out_vec.push(min_val.clone()); + out_vec.push(( + min_val.clone(), + // TOCHECK + min_val.value().copied().assign().unwrap() + )); } } diff --git a/src/layers/shape/pad.rs b/src/layers/shape/pad.rs index 5b0a7b4..1f629b4 100644 --- a/src/layers/shape/pad.rs +++ b/src/layers/shape/pad.rs @@ -15,11 +15,11 @@ use crate::{ use super::super::layer::{Layer, LayerConfig}; // TODO: figure out where to put this -pub fn pad( - input: &Array, IxDyn>, +pub fn pad( + input: &Array<(Rc, F), IxDyn>, padding: Vec<[usize; 2]>, - pad_val: &Rc, -) -> Array, IxDyn> { + pad_val: &(Rc, F), +) -> Array<(Rc, F), IxDyn> { let tmp = input.iter().collect(); let input = Array::from_shape_vec(input.raw_dim(), tmp).unwrap(); assert_eq!(input.ndim(), padding.len()); @@ -85,7 +85,7 @@ impl Layer for PadChip { let zero = constants.get(&0).unwrap().clone(); let padding = PadChip::param_vec_to_config(layer_config.layer_params.clone()); - let padded = pad(input, padding.padding, &zero); + let padded = pad(input, padding.padding, &(zero, F::ZERO)); Ok(vec![padded]) } diff --git a/src/layers/softmax.rs b/src/layers/softmax.rs index b191095..c1bf32d 100644 --- a/src/layers/softmax.rs +++ b/src/layers/softmax.rs @@ -25,10 +25,10 @@ impl SoftmaxChip { pub fn softmax_flat( mut layouter: impl Layouter, constants: &HashMap>, - inp_flat: Vec<&AssignedCell>, + inp_flat: Vec<(&AssignedCell, F)>, gadget_config: Rc, mask: &Vec, - ) -> Result>, Error> { + ) -> Result, F)>, Error> { let exp_chip = ExpGadgetChip::::construct(gadget_config.clone()); let adder_chip = AdderChip::::construct(gadget_config.clone()); let sub_pairs_chip = SubPairsChip::::construct(gadget_config.clone()); @@ -54,46 +54,50 @@ impl SoftmaxChip { .forward( layouter.namespace(|| format!("max")), &vec![inp_take.clone()], - &vec![zero], + &vec![(zero, F::ZERO)], ) .unwrap(); - let max = &max[0]; + let max = (&max[0].0, max[0].1); // Subtract the max let max_flat = vec![max; inp_take.len()]; let sub = sub_pairs_chip.forward( layouter.namespace(|| format!("sub")), &vec![inp_take, max_flat], - &vec![zero], + &vec![(zero, F::ZERO)], )?; - let sub = sub.iter().collect::>(); + let sub = sub.iter().map(|x| (&x.0, x.1)).collect::>(); // Compute the exp let exp_slice = exp_chip.forward( layouter.namespace(|| format!("exp")), &vec![sub], - &vec![zero], + &vec![(zero, F::ZERO)], )?; // Compute the sum let sum = adder_chip.forward( layouter.namespace(|| format!("sum")), - &vec![exp_slice.iter().collect()], - &vec![zero], + &vec![exp_slice.iter().map(|x| (&x.0, x.1)).collect()], + &vec![(zero, F::ZERO)], )?; - let sum = sum[0].clone(); + let sum = (&sum[0].0, sum[0].1); let sum_div_sf = var_div_big_chip.forward( layouter.namespace(|| format!("sum div sf")), - &vec![vec![&sum]], - &vec![zero, sf], + &vec![vec![sum]], + &vec![ + (zero, F::ZERO), + // TOCHECK + (sf, sf.value().cloned().assign().unwrap()) + ], )?; - let sum_div_sf = sum_div_sf[0].clone(); + let sum_div_sf = (&sum_div_sf[0].0, sum_div_sf[0].1); let dived = var_div_big_chip.forward( layouter.namespace(|| format!("div")), - &vec![exp_slice.iter().collect()], - &vec![zero, &sum_div_sf], + &vec![exp_slice.iter().map(|x| (&x.0, x.1)).collect()], + &vec![(zero, F::ZERO), sum_div_sf], )?; // Take either zero (softmax(-inf)) or the result @@ -102,7 +106,7 @@ impl SoftmaxChip { .iter() .map(|x| { if *x == 1 { - zero.clone() + (zero.clone(), F::ZERO) } else { let tmp = dived[div_idx].clone(); div_idx = div_idx + 1; @@ -158,7 +162,7 @@ impl Layer for SoftmaxChip { if inp.ndim() == 2 { for i in 0..shape[0] { let inp_slice = inp.slice(s![i, ..]); - let inp_flat = inp_slice.iter().map(|x| x.as_ref()).collect::>(); + let inp_flat = inp_slice.iter().map(|x| (x.0.as_ref(), x.1)).collect::>(); let mask_slice = mask.slice(s![i, ..]); let mask_flat = mask_slice.iter().map(|x| *x as i64).collect::>(); let dived = Self::softmax_flat( @@ -175,7 +179,7 @@ impl Layer for SoftmaxChip { for i in 0..shape[0] { for j in 0..shape[1] { let inp_slice = inp.slice(s![i, j, ..]); - let inp_flat = inp_slice.iter().map(|x| x.as_ref()).collect::>(); + let inp_flat = inp_slice.iter().map(|x| (x.0.as_ref(), x.1)).collect::>(); let mask_slice = mask.slice(s![i, j, ..]); let mask_flat = mask_slice.iter().map(|x| *x as i64).collect::>(); let dived = Self::softmax_flat( @@ -193,7 +197,7 @@ impl Layer for SoftmaxChip { panic!("Not implemented"); } - let outp = outp.into_iter().map(|x| Rc::new(x)).collect::>(); + let outp = outp.into_iter().map(|x| (Rc::new(x.0), x.1)).collect::>(); let outp = Array::from_shape_vec(IxDyn(inp.shape()), outp).unwrap(); Ok(vec![outp]) } diff --git a/src/layers/sqrt.rs b/src/layers/sqrt.rs index 1068e28..9d1ff05 100644 --- a/src/layers/sqrt.rs +++ b/src/layers/sqrt.rs @@ -41,24 +41,34 @@ impl Layer for SqrtChip { if mask_map.contains_key(&i) { let mask_val = *mask_map.get(&i).unwrap(); if mask_val == 1 { - inp_vec.push(max_val); + // TOCHECK + inp_vec.push( + (max_val, max_val.value().cloned().assign().unwrap()) + ); } else if mask_val == -1 { - inp_vec.push(min_val); + inp_vec.push( + (min_val, min_val.value().cloned().assign().unwrap()) + ); } else { panic!(); } } else { - inp_vec.push(val.as_ref()); + inp_vec.push((val.0.as_ref(), val.1)); } } let zero = constants.get(&0).unwrap().as_ref(); let sqrt_chip = SqrtGadgetChip::::construct(gadget_config.clone()); let vec_inps = vec![inp_vec]; - let constants = vec![zero, min_val, max_val]; + // TOCHECK + let constants = vec![ + (zero, F::ZERO), + (min_val, min_val.value().cloned().assign().unwrap()), + (max_val, max_val.value().cloned().assign().unwrap()) + ]; let out = sqrt_chip.forward(layouter.namespace(|| "sqrt chip"), &vec_inps, &constants)?; - let out = out.into_iter().map(|x| Rc::new(x)).collect::>(); + let out = out.into_iter().map(|x| (Rc::new(x.0), x.1)).collect::>(); let out = Array::from_shape_vec(IxDyn(inp.shape()), out).unwrap(); Ok(vec![out]) diff --git a/src/layers/square.rs b/src/layers/square.rs index d8aeb7b..0daf5d8 100644 --- a/src/layers/square.rs +++ b/src/layers/square.rs @@ -30,9 +30,9 @@ impl Layer for SquareChip { let zero = constants.get(&0).unwrap().as_ref(); let square_chip = SquareGadgetChip::::construct(gadget_config.clone()); - let inp_vec = inp.iter().map(|x| x.as_ref()).collect::>(); + let inp_vec = inp.iter().map(|x| (x.0.as_ref(), x.1)).collect::>(); let vec_inputs = vec![inp_vec]; - let single_inps = vec![zero]; + let single_inps = vec![(zero, F::ZERO)]; let out = square_chip.forward( layouter.namespace(|| "square chip"), &vec_inputs, @@ -44,8 +44,12 @@ impl Layer for SquareChip { .get(&(gadget_config.scale_factor as i64)) .unwrap() .as_ref(); - let single_inps = vec![zero, div]; - let out = out.iter().collect::>(); + // TOCHECK + let single_inps = vec![ + (zero, F::ZERO), + (div, div.value().cloned().assign().unwrap()) + ]; + let out = out.iter().map(|x| (&x.0, x.1)).collect::>(); let vec_inputs = vec![out]; let out = var_div_chip.forward( layouter.namespace(|| "var div chip"), @@ -53,7 +57,7 @@ impl Layer for SquareChip { &single_inps, )?; - let out = out.into_iter().map(|x| Rc::new(x)).collect::>(); + let out = out.into_iter().map(|x| (Rc::new(x.0), x.1)).collect::>(); let out = Array::from_shape_vec(IxDyn(inp.shape()), out).unwrap(); Ok(vec![out]) } diff --git a/src/layers/squared_diff.rs b/src/layers/squared_diff.rs index ee75858..6e7f1f7 100644 --- a/src/layers/squared_diff.rs +++ b/src/layers/squared_diff.rs @@ -36,10 +36,10 @@ impl Layer for SquaredDiffChip { let zero = constants.get(&0).unwrap().as_ref(); let sq_diff_chip = SquaredDiffGadgetChip::::construct(gadget_config.clone()); - let inp1_vec = inp1.iter().map(|x| x.as_ref()).collect::>(); - let inp2_vec = inp2.iter().map(|x| x.as_ref()).collect::>(); + let inp1_vec = inp1.iter().map(|x| (x.0.as_ref(), x.1)).collect::>(); + let inp2_vec = inp2.iter().map(|x| (x.0.as_ref(), x.1)).collect::>(); let vec_inputs = vec![inp1_vec, inp2_vec]; - let tmp_constants = vec![zero]; + let tmp_constants = vec![(zero, F::ZERO)]; let out = sq_diff_chip.forward( layouter.namespace(|| "sq diff chip"), &vec_inputs, @@ -52,15 +52,19 @@ impl Layer for SquaredDiffChip { .unwrap() .as_ref(); - let single_inputs = vec![zero, div]; - let out = out.iter().map(|x| x).collect::>(); + // TOCHECK + let single_inputs = vec![ + (zero, F::ZERO), + (div, div.value().cloned().assign().unwrap()) + ]; + let out = out.iter().map(|x| (&x.0, x.1)).collect::>(); let out = var_div_chip.forward( layouter.namespace(|| "sq diff div"), &vec![out], &single_inputs, )?; - let out = out.into_iter().map(|x| Rc::new(x)).collect::>(); + let out = out.into_iter().map(|x| (Rc::new(x.0), x.1)).collect::>(); let out = Array::from_shape_vec(IxDyn(inp1.shape()), out).unwrap(); Ok(vec![out]) diff --git a/src/layers/tanh.rs b/src/layers/tanh.rs index 5a12cac..0bf0301 100644 --- a/src/layers/tanh.rs +++ b/src/layers/tanh.rs @@ -24,15 +24,15 @@ impl Layer for TanhChip { _layer_config: &LayerConfig, ) -> Result>, Error> { let inp = &tensors[0]; - let inp_vec = inp.iter().map(|x| x.as_ref()).collect::>(); + let inp_vec = inp.iter().map(|x| (x.0.as_ref(), x.1)).collect::>(); let zero = constants.get(&0).unwrap().as_ref(); let tanh_chip = TanhGadgetChip::::construct(gadget_config.clone()); let vec_inps = vec![inp_vec]; - let constants = vec![zero]; + let constants = vec![(zero, F::ZERO)]; let out = tanh_chip.forward(layouter.namespace(|| "tanh chip"), &vec_inps, &constants)?; - let out = out.into_iter().map(|x| Rc::new(x)).collect::>(); + let out = out.into_iter().map(|x| (Rc::new(x.0), x.1)).collect::>(); let out = Array::from_shape_vec(IxDyn(inp.shape()), out).unwrap(); Ok(vec![out]) diff --git a/src/layers/update.rs b/src/layers/update.rs index 0e1064c..a2c8f39 100644 --- a/src/layers/update.rs +++ b/src/layers/update.rs @@ -29,16 +29,16 @@ impl Layer for UpdateChip { let zero = constants.get(&0).unwrap().as_ref(); let update_chip = UpdateGadgetChip::::construct((*gadget_config).clone()); - let flattened_w = w.into_iter().map(|x| (**x).clone()).collect::>(); - let flattened_dw = dw.into_iter().map(|x| (**x).clone()).collect::>(); - let flattened_w_ref = flattened_w.iter().collect::>(); - let flattened_dw_ref = flattened_dw.iter().collect::>(); + let flattened_w = w.into_iter().map(|x| (x.0.as_ref(), x.1)).collect::>(); + let flattened_dw = dw.into_iter().map(|x| (x.0.as_ref(), x.1)).collect::>(); + let flattened_w_ref = flattened_w.iter().map(|x| (x.0, x.1)).collect::>(); + let flattened_dw_ref = flattened_dw.iter().map(|x| (x.0, x.1)).collect::>(); let vec_inps = vec![flattened_w_ref, flattened_dw_ref]; - let constants = vec![zero]; + let constants = vec![(zero, F::ZERO)]; let out = update_chip.forward(layouter.namespace(|| "update chip"), &vec_inps, &constants)?; - let out = out.into_iter().map(|x| Rc::new(x)).collect::>(); + let out = out.into_iter().map(|x| (Rc::new(x.0), x.1)).collect::>(); let out = Array::from_shape_vec(IxDyn(w.shape()), out).unwrap(); Ok(vec![out]) diff --git a/src/model.rs b/src/model.rs index 3d9a1a2..e8d93dd 100644 --- a/src/model.rs +++ b/src/model.rs @@ -127,7 +127,7 @@ impl> ModelCircuit { || Value::known(*val), ) .unwrap(); - flat.push(Rc::new(cell)); + flat.push((Rc::new(cell), *val)); cell_idx += 1; } let tensor = Array::from_shape_vec(tensor.shape(), flat).unwrap(); @@ -143,7 +143,7 @@ impl> ModelCircuit { pub fn tensor_map_to_vec( &self, - tensor_map: &BTreeMap, IxDyn>>, + tensor_map: &BTreeMap, F), IxDyn>>, ) -> Result>, Error> { let smallest_tensor = tensor_map .iter() @@ -761,16 +761,6 @@ impl> Circuit for ModelCircuit _ => panic!("unsupported gadget {:?}", gadget), } } - - // Assign extra space for challenge generation - self - .assign_tensors_vec( - layouter.namespace(|| "challenge generation"), - &config.gadget_config.witness_columns, - &self.tensors, - ) - .unwrap(); - // Assign weights and constants let constants_base = self .assign_constants( @@ -891,9 +881,9 @@ impl> Circuit for ModelCircuit for tensor in result { for cell in tensor.iter() { pub_layouter - .constrain_instance(cell.as_ref().cell(), config.public_col, total_idx) + .constrain_instance(cell.0.as_ref().cell(), config.public_col, total_idx) .unwrap(); - let val = convert_to_bigint(cell.value().map(|x| x.to_owned())); + let val = convert_to_bigint(cell.0.value().map(|x| x.to_owned())); new_public_vals.push(val); total_idx += 1; } From 5b9cd25d5ef20b77d85dca64ee31bbaf30c54298 Mon Sep 17 00:00:00 2001 From: span14 Date: Sat, 16 Sep 2023 19:40:56 -0500 Subject: [PATCH 06/16] update dot prod assignment --- src/gadgets/dot_prod.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/gadgets/dot_prod.rs b/src/gadgets/dot_prod.rs index c406a12..7e5e1bc 100644 --- a/src/gadgets/dot_prod.rs +++ b/src/gadgets/dot_prod.rs @@ -1,7 +1,7 @@ use std::{marker::PhantomData, rc::Rc}; use halo2_proofs::{ - circuit::{AssignedCell, Layouter, Region}, + circuit::{AssignedCell, Layouter, Region, Value}, halo2curves::ff::PrimeField, plonk::{Advice, Column, ConstraintSystem, Error, Expression}, poly::Rotation, @@ -144,8 +144,8 @@ impl Gadget for DotProductChip { let e = inp .iter() .zip(weights.iter()) - .map(|(a, b)| (a.0.value().map(|x: &F| *x) * b.0.value(), a.1 * b.1)) - .reduce(|a, b| (a.0 + b.0, a.1 + b.1)) + .map(|(a, b)| a.1 * b.1) + .reduce(|a, b| a + b) .unwrap(); let res = region @@ -153,11 +153,11 @@ impl Gadget for DotProductChip { || "", self.config.columns[self.config.columns.len() - 1], row_offset, - || e.0, + || Value::known(e), ) .unwrap(); - Ok(vec![(res, e.1)]) + Ok(vec![(res, e)]) } fn forward( From a8f88908dd5ee0c3ffcdb6d1a3d5c9f1b4d4bc80 Mon Sep 17 00:00:00 2001 From: span14 Date: Mon, 18 Sep 2023 01:09:46 -0500 Subject: [PATCH 07/16] finish fiat shamir --- Cargo.toml | 8 +- src/gadgets/bias_div_round_relu6.rs | 1 - src/gadgets/var_div_big.rs | 1 - src/layers/arithmetic/div_var.rs | 10 +- src/layers/dag.rs | 8 +- src/layers/fully_connected.rs | 149 ++++++++++------------------ src/layers/rsqrt.rs | 25 +++-- src/layers/shape/mask_neg_inf.rs | 14 ++- src/layers/softmax.rs | 10 +- src/layers/sqrt.rs | 28 ++++-- src/layers/square.rs | 11 +- src/layers/squared_diff.rs | 11 +- src/model.rs | 79 ++++++++------- 13 files changed, 173 insertions(+), 182 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 0dcf8dc..2c4effc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,10 +21,10 @@ opt-level = 3 [dependencies] bitvec = "1.0.1" -halo2 = { path = "../halo2_sys/halo2" } -halo2_curves = { git = "https://github.com/privacy-scaling-explorations/halo2curves", tag = "0.3.3", package = "halo2curves" } -halo2_gadgets = { path="../halo2_sys/halo2_gadgets", features = ["circuit-params"] } -halo2_proofs = { path="../halo2_sys/halo2_proofs", features = ["circuit-params"] } +halo2 = { git="https://github.com/privacy-scaling-explorations/halo2", package="halo2", rev="17e9765c199670534c0299c96128d0464a188d0b" } +halo2_gadgets = { git="https://github.com/privacy-scaling-explorations/halo2", package="halo2_gadgets", rev="17e9765c199670534c0299c96128d0464a188d0b", features = ["circuit-params"] } +halo2_curves = { git = "https://github.com/privacy-scaling-explorations/halo2curves", tag = "0.3.2", package = "halo2curves" } +halo2_proofs = { git="https://github.com/privacy-scaling-explorations/halo2", package="halo2_proofs", rev="17e9765c199670534c0299c96128d0464a188d0b", features = ["circuit-params"] } lazy_static = "1.4.0" ndarray = "0.15.6" num-bigint = "0.4.3" diff --git a/src/gadgets/bias_div_round_relu6.rs b/src/gadgets/bias_div_round_relu6.rs index 787d00c..c912aad 100644 --- a/src/gadgets/bias_div_round_relu6.rs +++ b/src/gadgets/bias_div_round_relu6.rs @@ -267,7 +267,6 @@ impl Gadget for BiasDivRoundRelu6Chip { ) .unwrap(); - // outp_cells.push((outp_cell, div_res_cell)); outp_cells.push((outp_cell, outp)); outp_cells.push( ( diff --git a/src/gadgets/var_div_big.rs b/src/gadgets/var_div_big.rs index ca41747..37abbcf 100644 --- a/src/gadgets/var_div_big.rs +++ b/src/gadgets/var_div_big.rs @@ -202,7 +202,6 @@ impl Gadget for VarDivRoundBigChip { (p1, p0) }; - // TOCHECK let div_val = { let offset = F::from(-div_outp_min_val_i64 as u64); let c = F::from((div_mod.0 - div_outp_min_val_i64) as u64); diff --git a/src/layers/arithmetic/div_var.rs b/src/layers/arithmetic/div_var.rs index d8303c1..009471b 100644 --- a/src/layers/arithmetic/div_var.rs +++ b/src/layers/arithmetic/div_var.rs @@ -55,14 +55,18 @@ impl Layer for DivVarChip { assert_eq!(tensors[1].shape().len(), 1); assert_eq!(tensors[1].shape()[0], 1); - let sf = constants + let sf_cell = constants .get(&(gadget_config.scale_factor as i64)) .unwrap() .as_ref(); + let sf = { + let shift_val_i64 = -gadget_config.min_val * 2; + let shift_val_f = F::from(shift_val_i64 as u64); + F::from((gadget_config.scale_factor as i64 + shift_val_i64) as u64) - shift_val_f + }; - // TOCHECK let sf_tensor = - Array::from_shape_vec(IxDyn(&[1]), vec![(Rc::new(sf.clone()), sf.value().cloned().assign().unwrap())]).unwrap(); + Array::from_shape_vec(IxDyn(&[1]), vec![(Rc::new(sf_cell.clone()), sf)]).unwrap(); // out = inp * SF let (out, out_shape) = self.arithmetic_forward( diff --git a/src/layers/dag.rs b/src/layers/dag.rs index 3269be6..1a3e262 100644 --- a/src/layers/dag.rs +++ b/src/layers/dag.rs @@ -79,10 +79,10 @@ impl DAGLayerChip { let layer_type = &layer_config.layer_type; let inp_idxes = &self.dag_config.inp_idxes[layer_idx]; let out_idxes = &self.dag_config.out_idxes[layer_idx]; - // println!( - // "Processing layer {}, type: {:?}, inp_idxes: {:?}, out_idxes: {:?}, layer_params: {:?}", - // layer_idx, layer_type, inp_idxes, out_idxes, layer_config.layer_params - // ); + println!( + "Processing layer {}, type: {:?}, inp_idxes: {:?}, out_idxes: {:?}, layer_params: {:?}", + layer_idx, layer_type, inp_idxes, out_idxes, layer_config.layer_params + ); let vec_inps = inp_idxes .iter() .map(|idx| tensor_map.get(idx).unwrap().clone()) diff --git a/src/layers/fully_connected.rs b/src/layers/fully_connected.rs index adc2ad4..2066468 100644 --- a/src/layers/fully_connected.rs +++ b/src/layers/fully_connected.rs @@ -16,7 +16,6 @@ use crate::{ var_div::VarDivRoundChip, }, layers::layer::ActivationType, - utils::helpers::RAND_START_IDX, }; use super::layer::{AssignedTensor, CellRc, GadgetConsumer, Layer, LayerConfig}; @@ -38,7 +37,6 @@ pub struct FullyConnectedChip { impl FullyConnectedChip { pub fn compute_mm( - // input: &AssignedTensor, input: &ArrayView<(CellRc,F), IxDyn>, weight: &AssignedTensor, ) -> Array { @@ -82,23 +80,6 @@ impl FullyConnectedChip { Ok(Array::from_shape_vec(IxDyn(out_shape.as_slice()), outp).unwrap()) } - // pub fn random_vector( - // constants: &HashMap>, - // size: usize, - // ) -> Result>, Error> { - // let mut outp = vec![]; - // for idx in 0..size { - // let idx = RAND_START_IDX + (idx as i64); - // if !constants.contains_key(&idx) { - // println!("Random vector is too small: {:?}", size); - // } - // let cell = constants.get(&idx).unwrap().clone(); - // outp.push(cell); - // } - - // Ok(outp) - // } - pub fn random_vector( constants: &HashMap>, size: usize, @@ -160,88 +141,66 @@ impl Layer for FullyConnectedChip { let mm_result = Self::compute_mm(&input, weight); let result = Self::assign_array(&gadget_config.columns, &mut region, &mm_result).unwrap(); - - // Copy mm result to witness holder - let copy_result = - Self::assign_array(&gadget_config.witness_columns, &mut region, &mm_result).unwrap(); - - for (l, r) in copy_result.iter().zip(result.iter()) { - region.constrain_equal(l.0.cell(), r.0.cell())?; - } - Ok(result) }, ) .unwrap(); - // println!("Shape of Input: {:?}", input.shape()); - // println!("Shape of Weight: {:?}", weight.shape()); - // println!("Shape of MM: {:?}", mm_result.shape()); // Create copy constraint for input, weight, input*weight to witness columns - // layouter.assign_region( - // || "test", - // |mut region| { - // for (i, v) in mm_result.iter().enumerate() { - // let col = i % gadget_config.witness_columns.len(); - // let row = i / gadget_config.witness_columns.len(); - // region.assign_advice( - // || "test in witness columns", - // gadget_config.witness_columns[col], row, - // || v.value().map(|x| *x) - // )?; - // } - // Ok(()) - // } - // ).unwrap(); - - // layouter - // .assign_region( - // || "copy constraint for input", - // |mut region| { - // for (i, v) in input.iter().enumerate() { - // let col = i % gadget_config.witness_columns.len(); - // let row = i / gadget_config.witness_columns.len(); - // v.copy_advice( - // || "input in witness columns", - // &mut region, - // gadget_config.witness_columns[col], row - // )?; - // } - // Ok(()) - // } - // )?; - - // layouter.assign_region( - // || "copy constraint for weight", - // |mut region| { - // for (i, v) in weight.iter().enumerate() { - // let col = i % gadget_config.witness_columns.len(); - // let row = i / gadget_config.witness_columns.len(); - // v.copy_advice( - // || "weight in witness columns", - // &mut region, - // gadget_config.witness_columns[col], row - // )?; - // } - // Ok(()) - // } - // )?; - - // layouter.assign_region( - // || "copy constraint for mm", - // |mut region| { - // for (i, v) in mm_result.iter().enumerate() { - // let col = i % gadget_config.witness_columns.len(); - // let row = i / gadget_config.witness_columns.len(); - // v.copy_advice( - // || "mm in witness columns", - // &mut region, - // gadget_config.witness_columns[col], row - // )?; - // } - // Ok(()) - // } - // )?; + layouter + .assign_region( + || "copy constraint for input", + |mut region| { + for (i, v) in input.iter().enumerate() { + let col = i % gadget_config.witness_columns.len(); + let row = i / gadget_config.witness_columns.len(); + let tmp = region.assign_advice( + || "input in witness columns", + gadget_config.witness_columns[col], + row, + || Value::known(v.1) + )?; + region.constrain_equal(v.0.as_ref().cell(), tmp.cell())?; + } + Ok(()) + } + )?; + + layouter.assign_region( + || "copy constraint for weight", + |mut region| { + for (i, v) in weight.iter().enumerate() { + let col = i % gadget_config.witness_columns.len(); + let row = i / gadget_config.witness_columns.len(); + let tmp = region.assign_advice( + || "input in witness columns", + gadget_config.witness_columns[col], + row, + || Value::known(v.1) + )?; + region.constrain_equal(v.0.as_ref().cell(), tmp.cell())?; + } + Ok(()) + } + )?; + + layouter.assign_region( + || "copy constraint for mm", + |mut region| { + for (i, v) in mm_result.iter().enumerate() { + let col = i % gadget_config.witness_columns.len(); + let row = i / gadget_config.witness_columns.len(); + let tmp = region.assign_advice( + || "input in witness columns", + gadget_config.witness_columns[col], + row, + || Value::known(v.1) + )?; + region.constrain_equal(v.0.cell(), tmp.cell())?; + } + Ok(()) + } + )?; // Generate random vectors diff --git a/src/layers/rsqrt.rs b/src/layers/rsqrt.rs index 83089a0..fba748b 100644 --- a/src/layers/rsqrt.rs +++ b/src/layers/rsqrt.rs @@ -32,19 +32,26 @@ impl Layer for RsqrtChip { mask_map.insert(mask[2 * i], mask[2 * i + 1]); } - let min_val = gadget_config.min_val; - let min_val = constants.get(&min_val).unwrap().as_ref(); - let max_val = gadget_config.max_val; - let max_val = constants.get(&max_val).unwrap().as_ref(); + let min_val_cell = constants.get(&gadget_config.min_val).unwrap().as_ref(); + let min_val = { + let shift_val_i64 = -gadget_config.min_val * 2; + let shift_val_f = F::from(shift_val_i64 as u64); + F::from((gadget_config.min_val + shift_val_i64) as u64) - shift_val_f + }; + let max_val_cell = constants.get(&gadget_config.max_val).unwrap().as_ref(); + let max_val = { + let shift_val_i64 = -gadget_config.min_val * 2; + let shift_val_f = F::from(shift_val_i64 as u64); + F::from((gadget_config.max_val + shift_val_i64) as u64) - shift_val_f + }; for (i, val) in inp.iter().enumerate() { let i = i as i64; if mask_map.contains_key(&i) { let mask_val = *mask_map.get(&i).unwrap(); if mask_val == 1 { - // TOCHECK - inp_vec.push((max_val, max_val.value().cloned().assign().unwrap())); + inp_vec.push((max_val_cell, max_val)); } else if mask_val == -1 { - inp_vec.push((min_val, min_val.value().cloned().assign().unwrap())); + inp_vec.push((min_val_cell, min_val)); } else { panic!(); } @@ -58,8 +65,8 @@ impl Layer for RsqrtChip { let vec_inps = vec![inp_vec]; let constants = vec![ (zero, F::ZERO), - (min_val, min_val.value().cloned().assign().unwrap()), - (max_val, max_val.value().cloned().assign().unwrap()) + (min_val_cell, min_val), + (max_val_cell, max_val) ]; let out = rsqrt_chip.forward(layouter.namespace(|| "rsqrt chip"), &vec_inps, &constants)?; diff --git a/src/layers/shape/mask_neg_inf.rs b/src/layers/shape/mask_neg_inf.rs index bce84d8..49e9829 100644 --- a/src/layers/shape/mask_neg_inf.rs +++ b/src/layers/shape/mask_neg_inf.rs @@ -33,17 +33,21 @@ impl Layer for MaskNegInfChip { let mask = Array::from_shape_vec(IxDyn(&mask_shape), mask_vec).unwrap(); let mask = mask.broadcast(inp.raw_dim()).unwrap(); - let min_val = gadget_config.min_val; - let min_val = constants.get(&min_val).unwrap().clone(); + let min_val_cell = constants.get(&gadget_config.min_val).unwrap(); + let min_val = { + let shift_val_i64 = -gadget_config.min_val * 2; + let shift_val_f = F::from(shift_val_i64 as u64); + F::from((gadget_config.min_val + shift_val_i64) as u64) - shift_val_f + }; + let mut out_vec = vec![]; for (val, to_mask) in inp.iter().zip(mask.iter()) { if *to_mask == 0 { out_vec.push(val.clone()); } else { out_vec.push(( - min_val.clone(), - // TOCHECK - min_val.value().copied().assign().unwrap() + min_val_cell.clone(), + min_val )); } } diff --git a/src/layers/softmax.rs b/src/layers/softmax.rs index c1bf32d..ac329c2 100644 --- a/src/layers/softmax.rs +++ b/src/layers/softmax.rs @@ -36,10 +36,15 @@ impl SoftmaxChip { let var_div_big_chip = VarDivRoundBig3Chip::::construct(gadget_config.clone()); let zero = constants.get(&0).unwrap().as_ref(); - let sf = constants + let sf_cell = constants .get(&(gadget_config.scale_factor as i64)) .unwrap() .as_ref(); + let sf = { + let shift_val_i64 = -gadget_config.min_val * 2; + let shift_val_f = F::from(shift_val_i64 as u64); + F::from((gadget_config.scale_factor as i64 + shift_val_i64) as u64) - shift_val_f + }; // Mask the input for max computation and subtraction let inp_take = inp_flat @@ -88,8 +93,7 @@ impl SoftmaxChip { &vec![vec![sum]], &vec![ (zero, F::ZERO), - // TOCHECK - (sf, sf.value().cloned().assign().unwrap()) + (sf_cell, sf) ], )?; let sum_div_sf = (&sum_div_sf[0].0, sum_div_sf[0].1); diff --git a/src/layers/sqrt.rs b/src/layers/sqrt.rs index 9d1ff05..25ed055 100644 --- a/src/layers/sqrt.rs +++ b/src/layers/sqrt.rs @@ -32,22 +32,30 @@ impl Layer for SqrtChip { mask_map.insert(mask[2 * i], mask[2 * i + 1]); } - let min_val = gadget_config.min_val; - let min_val = constants.get(&min_val).unwrap().as_ref(); - let max_val = gadget_config.max_val; - let max_val = constants.get(&max_val).unwrap().as_ref(); + let min_val_cell = constants.get(&gadget_config.min_val).unwrap().as_ref(); + let min_val = { + let shift_val_i64 = -gadget_config.min_val * 2; + let shift_val_f = F::from(shift_val_i64 as u64); + F::from((gadget_config.min_val + shift_val_i64) as u64) - shift_val_f + }; + let max_val_cell = constants.get(&gadget_config.max_val).unwrap().as_ref(); + let max_val = { + let shift_val_i64 = -gadget_config.min_val * 2; + let shift_val_f = F::from(shift_val_i64 as u64); + F::from((gadget_config.max_val + shift_val_i64) as u64) - shift_val_f + }; + for (i, val) in inp.iter().enumerate() { let i = i as i64; if mask_map.contains_key(&i) { let mask_val = *mask_map.get(&i).unwrap(); if mask_val == 1 { - // TOCHECK inp_vec.push( - (max_val, max_val.value().cloned().assign().unwrap()) + (max_val_cell, max_val) ); } else if mask_val == -1 { inp_vec.push( - (min_val, min_val.value().cloned().assign().unwrap()) + (min_val_cell, min_val) ); } else { panic!(); @@ -60,11 +68,11 @@ impl Layer for SqrtChip { let zero = constants.get(&0).unwrap().as_ref(); let sqrt_chip = SqrtGadgetChip::::construct(gadget_config.clone()); let vec_inps = vec![inp_vec]; - // TOCHECK + let constants = vec![ (zero, F::ZERO), - (min_val, min_val.value().cloned().assign().unwrap()), - (max_val, max_val.value().cloned().assign().unwrap()) + (min_val_cell, min_val), + (max_val_cell, max_val) ]; let out = sqrt_chip.forward(layouter.namespace(|| "sqrt chip"), &vec_inps, &constants)?; diff --git a/src/layers/square.rs b/src/layers/square.rs index 0daf5d8..c940491 100644 --- a/src/layers/square.rs +++ b/src/layers/square.rs @@ -40,14 +40,19 @@ impl Layer for SquareChip { )?; let var_div_chip = VarDivRoundChip::::construct(gadget_config.clone()); - let div = constants + let div_cell = constants .get(&(gadget_config.scale_factor as i64)) .unwrap() .as_ref(); - // TOCHECK + let div = { + let shift_val_i64 = -gadget_config.min_val * 2; + let shift_val_f = F::from(shift_val_i64 as u64); + F::from((gadget_config.scale_factor as i64 + shift_val_i64) as u64) - shift_val_f + }; + let single_inps = vec![ (zero, F::ZERO), - (div, div.value().cloned().assign().unwrap()) + (div_cell, div) ]; let out = out.iter().map(|x| (&x.0, x.1)).collect::>(); let vec_inputs = vec![out]; diff --git a/src/layers/squared_diff.rs b/src/layers/squared_diff.rs index 6e7f1f7..915b731 100644 --- a/src/layers/squared_diff.rs +++ b/src/layers/squared_diff.rs @@ -47,15 +47,18 @@ impl Layer for SquaredDiffChip { )?; let var_div_chip = VarDivRoundChip::::construct(gadget_config.clone()); - let div = constants + let div_cell = constants .get(&(gadget_config.scale_factor as i64)) .unwrap() .as_ref(); - - // TOCHECK + let div = { + let shift_val_i64 = -gadget_config.min_val * 2; + let shift_val_f = F::from(shift_val_i64 as u64); + F::from((gadget_config.scale_factor as i64 + shift_val_i64) as u64) - shift_val_f + }; let single_inputs = vec![ (zero, F::ZERO), - (div, div.value().cloned().assign().unwrap()) + (div_cell, div) ]; let out = out.iter().map(|x| (&x.0, x.1)).collect::>(); let out = var_div_chip.forward( diff --git a/src/model.rs b/src/model.rs index e8d93dd..7e50183 100644 --- a/src/model.rs +++ b/src/model.rs @@ -68,7 +68,7 @@ use crate::{ update::UpdateChip, }, utils::{ - helpers::{convert_to_bigint, RAND_START_IDX}, + helpers::convert_to_bigint, loader::{load_model_msgpack, ModelMsgpack}, }, }; @@ -141,6 +141,37 @@ impl> ModelCircuit { Ok(tensors) } + pub fn copy_tensors( + &self, + mut layouter: impl Layouter, + columns: &Vec>, + tensors: &Vec>, + ) -> Result<(), Error> { + layouter.assign_region( + || "Public Inputs", + |mut region| { + let mut cell_idx = 0; + for tensor_row in tensors.iter() { + for tensor in tensor_row.iter() { + let row_idx = cell_idx / columns.len(); + let col_idx = cell_idx % columns.len(); + let tmp = region + .assign_advice( + || "pi copy", + columns[col_idx], + row_idx, + || Value::known(tensor.1), + )?; + region.constrain_equal(tensor.0.as_ref().cell(), tmp.cell())?; + cell_idx += 1; + } + } + Ok(()) + } + )?; + Ok(()) + } + pub fn tensor_map_to_vec( &self, tensor_map: &BTreeMap, F), IxDyn>>, @@ -208,24 +239,6 @@ impl> ModelCircuit { constants.insert(*val, Rc::new(cell)); } - // TODO: I've made some very bad life decisions - // TOOD: this needs to be a random oracle - // let r_base = F::from(0x123456789abcdef); - // let r_base = layouter.get_challenge(); - - // let mut r = challenge; - - // for i in 0..self.num_random { - // let rand = region.assign_fixed( - // || format!("rand_{}", i), - // gadget_config.fixed_columns[0], - // constants.len(), - // || r, - // )?; - // r = r * challenge; - // constants.insert(RAND_START_IDX + (i as i64), Rc::new(rand)); - // } - Ok(constants) }, )?; @@ -266,26 +279,6 @@ impl> ModelCircuit { constants.insert(*val, Rc::new(cell)); } - // TODO: I've made some very bad life decisions - // TOOD: this needs to be a random oracle - // let r_base = F::from(0x123456789abcdef); - // let r_base = c.assign().unwrap_or(F::from(0x123456789abcdef)); - // let mut r = challenge; - - // for i in 0..self.num_random { - // let assignment_idx = constants.len(); - // let row_idx = assignment_idx / gadget_config.columns.len(); - // let col_idx = assignment_idx % gadget_config.columns.len(); - // let rand = region.assign_advice( - // || format!("rand_{}", i), - // gadget_config.columns[col_idx], - // row_idx, - // || r, - // )?; - // r = r * challenge; - // constants.insert(RAND_START_IDX + (i as i64), Rc::new(rand)); - // } - for (k, v) in fixed_constants.iter() { let v2 = constants.get(k).unwrap(); region.constrain_equal(v.cell(), v2.cell()).unwrap(); @@ -617,7 +610,6 @@ impl> Circuit for ModelCircuit meta.enable_equality(columns[i]); } - meta.enable_equality(rand_vector); gadget_config.witness_columns = witness_columns; @@ -838,6 +830,13 @@ impl> Circuit for ModelCircuit config.rand_vector )?; + // Copy the public inputs to be included into commitment + self.copy_tensors( + layouter.namespace(|| "Public Inputs"), + &config.gadget_config.witness_columns, + &tensors + )?; + // Perform the dag let dag_chip = DAGLayerChip::::construct(self.dag_config.clone()); let (final_tensor_map, result) = dag_chip.forward( From 041d184b345404a1700cb4c3f75a6524e68444a5 Mon Sep 17 00:00:00 2001 From: span14 Date: Mon, 18 Sep 2023 16:53:58 -0500 Subject: [PATCH 08/16] change build dependency --- Cargo.toml | 8 ++++---- src/gadgets/gadget.rs | 2 +- src/layers/arithmetic/add.rs | 2 +- src/layers/arithmetic/div_var.rs | 2 +- src/layers/arithmetic/mul.rs | 2 +- src/layers/arithmetic/sub.rs | 2 +- src/layers/avg_pool_2d.rs | 2 +- src/layers/batch_mat_mul.rs | 2 +- src/layers/conv2d.rs | 2 +- src/layers/dag.rs | 11 +++++++---- src/layers/div_fixed.rs | 2 +- src/layers/fully_connected.rs | 25 +++++++++++++++---------- src/layers/layer.rs | 2 +- src/layers/logistic.rs | 8 ++++++-- src/layers/max_pool_2d.rs | 8 ++++++-- src/layers/mean.rs | 2 +- src/layers/noop.rs | 8 ++++++-- src/layers/pow.rs | 8 ++++++-- src/layers/rsqrt.rs | 8 ++++++-- src/layers/shape/broadcast.rs | 2 +- src/layers/shape/concatenation.rs | 2 +- src/layers/shape/mask_neg_inf.rs | 2 +- src/layers/shape/pack.rs | 2 +- src/layers/shape/pad.rs | 2 +- src/layers/shape/permute.rs | 2 +- src/layers/shape/reshape.rs | 2 +- src/layers/shape/resize_nn.rs | 2 +- src/layers/shape/rotate.rs | 2 +- src/layers/shape/slice.rs | 2 +- src/layers/shape/split.rs | 2 +- src/layers/shape/transpose.rs | 2 +- src/layers/softmax.rs | 2 +- src/layers/sqrt.rs | 8 ++++++-- src/layers/square.rs | 8 ++++++-- src/layers/squared_diff.rs | 8 ++++++-- src/layers/tanh.rs | 8 ++++++-- src/layers/update.rs | 8 ++++++-- src/model.rs | 15 ++++++++++----- 38 files changed, 120 insertions(+), 67 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 2c4effc..edaf7e0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,10 +21,10 @@ opt-level = 3 [dependencies] bitvec = "1.0.1" -halo2 = { git="https://github.com/privacy-scaling-explorations/halo2", package="halo2", rev="17e9765c199670534c0299c96128d0464a188d0b" } -halo2_gadgets = { git="https://github.com/privacy-scaling-explorations/halo2", package="halo2_gadgets", rev="17e9765c199670534c0299c96128d0464a188d0b", features = ["circuit-params"] } -halo2_curves = { git = "https://github.com/privacy-scaling-explorations/halo2curves", tag = "0.3.2", package = "halo2curves" } -halo2_proofs = { git="https://github.com/privacy-scaling-explorations/halo2", package="halo2_proofs", rev="17e9765c199670534c0299c96128d0464a188d0b", features = ["circuit-params"] } +halo2 = { git= "https://github.com/span14/halo2", branch="zkml", package="halo2"} +halo2_gadgets = { git= "https://github.com/span14/halo2", branch="zkml", package="halo2_gadgets", features = ["circuit-params"]} +halo2_curves = { git = "https://github.com/privacy-scaling-explorations/halo2curves", tag = "0.3.2", package = "halo2curves"} +halo2_proofs = { git= "https://github.com/span14/halo2", branch="zkml", package="halo2_proofs", features = ["circuit-params", "zkml"]} lazy_static = "1.4.0" ndarray = "0.15.6" num-bigint = "0.4.3" diff --git a/src/gadgets/gadget.rs b/src/gadgets/gadget.rs index 22edc1d..0f23ef7 100644 --- a/src/gadgets/gadget.rs +++ b/src/gadgets/gadget.rs @@ -103,7 +103,7 @@ pub trait Gadget { row_offset: usize, vec_inputs: &Vec, F)>>, single_inputs: &Vec<(&AssignedCell, F)>, - ) -> Result,F)>, Error>; + ) -> Result, F)>, Error>; // The caller is required to ensure that the inputs are of the correct length. fn op_aligned_rows( diff --git a/src/layers/arithmetic/add.rs b/src/layers/arithmetic/add.rs index e269ded..7cfed38 100644 --- a/src/layers/arithmetic/add.rs +++ b/src/layers/arithmetic/add.rs @@ -55,7 +55,7 @@ impl Layer for AddChip { mut layouter: impl Layouter, tensors: &Vec>, constants: &HashMap>, - _rand_vector: &HashMap>, + _rand_vector: &HashMap, F)>, gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/arithmetic/div_var.rs b/src/layers/arithmetic/div_var.rs index 009471b..ebfc29d 100644 --- a/src/layers/arithmetic/div_var.rs +++ b/src/layers/arithmetic/div_var.rs @@ -46,7 +46,7 @@ impl Layer for DivVarChip { mut layouter: impl Layouter, tensors: &Vec>, constants: &HashMap>, - _rand_vector: &HashMap>, + _rand_vector: &HashMap, F)>, gadget_config: Rc, _layer_config: &crate::layers::layer::LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/arithmetic/mul.rs b/src/layers/arithmetic/mul.rs index a72e45c..5ca0bda 100644 --- a/src/layers/arithmetic/mul.rs +++ b/src/layers/arithmetic/mul.rs @@ -50,7 +50,7 @@ impl Layer for MulChip { mut layouter: impl Layouter, tensors: &Vec>, constants: &HashMap>, - _rand_vector: &HashMap>, + _rand_vector: &HashMap, F)>, gadget_config: Rc, _layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/arithmetic/sub.rs b/src/layers/arithmetic/sub.rs index f764007..6cf0e9b 100644 --- a/src/layers/arithmetic/sub.rs +++ b/src/layers/arithmetic/sub.rs @@ -43,7 +43,7 @@ impl Layer for SubChip { mut layouter: impl Layouter, tensors: &Vec>, constants: &HashMap>, - _rand_vector: &HashMap>, + _rand_vector: &HashMap, F)>, gadget_config: Rc, _layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/avg_pool_2d.rs b/src/layers/avg_pool_2d.rs index 5d3747b..4a81444 100644 --- a/src/layers/avg_pool_2d.rs +++ b/src/layers/avg_pool_2d.rs @@ -65,7 +65,7 @@ impl Layer for AvgPool2DChip { layouter: impl Layouter, tensors: &Vec>, constants: &HashMap>, - _rand_vector: &HashMap>, + _rand_vector: &HashMap, F)>, gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/batch_mat_mul.rs b/src/layers/batch_mat_mul.rs index 4979cf1..fcbf8ae 100644 --- a/src/layers/batch_mat_mul.rs +++ b/src/layers/batch_mat_mul.rs @@ -21,7 +21,7 @@ impl Layer for BatchMatMulChip { mut layouter: impl Layouter, tensors: &Vec>, constants: &HashMap>, - rand_vector: &HashMap>, + rand_vector: &HashMap, F)>, gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/conv2d.rs b/src/layers/conv2d.rs index 268b40e..4708e4c 100644 --- a/src/layers/conv2d.rs +++ b/src/layers/conv2d.rs @@ -291,7 +291,7 @@ impl Layer for Conv2DChip { mut layouter: impl Layouter, tensors: &Vec>, constants: &HashMap>>, - rand_vector: &HashMap>, + rand_vector: &HashMap, F)>, gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/dag.rs b/src/layers/dag.rs index 1a3e262..2fe6141 100644 --- a/src/layers/dag.rs +++ b/src/layers/dag.rs @@ -1,6 +1,10 @@ use std::{collections::HashMap, fs::File, io::BufWriter, marker::PhantomData, rc::Rc}; -use halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error}; +use halo2_proofs::{ + circuit::Layouter, + halo2curves::ff::PrimeField, + plonk::Error +}; use crate::{ gadgets::gadget::{convert_to_u64, GadgetConfig}, @@ -28,7 +32,6 @@ use crate::{ tanh::TanhChip, update::UpdateChip, }, - utils::helpers::print_assigned_arr, }; use super::{ @@ -64,7 +67,7 @@ impl DAGLayerChip { mut layouter: impl Layouter, tensors: &Vec>, constants: &HashMap>, - rand_vector: &HashMap>, + rand_vector: &HashMap, F)>, gadget_config: Rc, _layer_config: &LayerConfig, ) -> Result<(HashMap>, Vec>), Error> { @@ -486,7 +489,7 @@ impl DAGLayerChip { } }; - let tmp = print_arr.iter().map(|x| x.0.as_ref()).collect::>(); + // let tmp = print_arr.iter().map(|x| x.0.as_ref()).collect::>(); // print_assigned_arr("final out", &tmp.to_vec(), gadget_config.scale_factor); // println!("final out idxes: {:?}", self.dag_config.final_out_idxes); diff --git a/src/layers/div_fixed.rs b/src/layers/div_fixed.rs index 9f46f6f..73f6a91 100644 --- a/src/layers/div_fixed.rs +++ b/src/layers/div_fixed.rs @@ -55,7 +55,7 @@ impl Layer for DivFixedChip { mut layouter: impl Layouter, tensors: &Vec>, constants: &HashMap>, - _rand_vector: &HashMap>, + _rand_vector: &HashMap, F)>, gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/fully_connected.rs b/src/layers/fully_connected.rs index 2066468..2316903 100644 --- a/src/layers/fully_connected.rs +++ b/src/layers/fully_connected.rs @@ -81,9 +81,9 @@ impl FullyConnectedChip { } pub fn random_vector( - constants: &HashMap>, + constants: &HashMap, F)>, size: usize, - ) -> Result>, Error> { + ) -> Result, F)>, Error> { let mut outp = vec![]; for idx in 0..size { @@ -116,7 +116,7 @@ impl Layer for FullyConnectedChip { mut layouter: impl Layouter, tensors: &Vec>, constants: &HashMap>, - rand_vector: &HashMap>, + rand_vector: &HashMap, F)>, gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error> { @@ -209,10 +209,10 @@ impl Layer for FullyConnectedChip { let dot_prod_chip = DotProductChip::::construct(gadget_config.clone()); let r1_ref = r1.iter().map( - |x| (x.as_ref(), x.value().cloned().assign().unwrap_or(F::from(0x123456789abcdef))) + |x| (x.0.as_ref(), x.1) ).collect::>(); let r2_ref = r2.iter().map( - |x| (x.as_ref(), x.value().cloned().assign().unwrap_or(F::from(0x123456789abcdef))) + |x| (x.0.as_ref(), x.1) ).collect::>(); // Compute r1 * result @@ -307,15 +307,20 @@ impl Layer for FullyConnectedChip { let final_result_flat = if self.config.normalize { let mm_flat = mm_result.iter().map(|x| (&x.0, x.1)).collect::>(); let var_div_chip = VarDivRoundChip::::construct(gadget_config.clone()); - let sf = constants - .get(&(gadget_config.scale_factor as i64)) - .unwrap() - .as_ref(); + let sf_cell = constants + .get(&(gadget_config.scale_factor as i64)) + .unwrap() + .as_ref(); + let sf = { + let shift_val_i64 = -gadget_config.min_val * 2; + let shift_val_f = F::from(shift_val_i64 as u64); + F::from((gadget_config.scale_factor as i64 + shift_val_i64) as u64) - shift_val_f + }; let mm_div = var_div_chip .forward( layouter.namespace(|| "mm_div"), &vec![mm_flat], - &vec![(zero, F::ZERO), (sf, sf.value().cloned().assign().unwrap())], + &vec![(zero, F::ZERO), (sf_cell, sf)], ) .unwrap(); diff --git a/src/layers/layer.rs b/src/layers/layer.rs index 138ee8e..ceaa1c6 100644 --- a/src/layers/layer.rs +++ b/src/layers/layer.rs @@ -79,7 +79,7 @@ pub trait Layer { layouter: impl Layouter, tensors: &Vec>, constants: &HashMap>, - rand_vector: &HashMap>, + rand_vector: &HashMap, F)>, gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error>; diff --git a/src/layers/logistic.rs b/src/layers/logistic.rs index a5314ab..2d9f67e 100644 --- a/src/layers/logistic.rs +++ b/src/layers/logistic.rs @@ -1,6 +1,10 @@ use std::{collections::HashMap, rc::Rc, vec}; -use halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error}; +use halo2_proofs::{ + circuit::Layouter, + halo2curves::ff::PrimeField, + plonk::Error +}; use ndarray::{Array, IxDyn}; use crate::gadgets::{ @@ -19,7 +23,7 @@ impl Layer for LogisticChip { mut layouter: impl Layouter, tensors: &Vec>, constants: &HashMap>, - _rand_vector: &HashMap>, + _rand_vector: &HashMap, F)>, gadget_config: Rc, _layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/max_pool_2d.rs b/src/layers/max_pool_2d.rs index d50f5a4..aae064d 100644 --- a/src/layers/max_pool_2d.rs +++ b/src/layers/max_pool_2d.rs @@ -1,6 +1,10 @@ use std::{collections::HashMap, rc::Rc}; -use halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error}; +use halo2_proofs::{ + circuit::Layouter, + halo2curves::ff::PrimeField, + plonk::Error +}; use ndarray::{Array, IxDyn}; use crate::{ @@ -85,7 +89,7 @@ impl Layer for MaxPool2DChip { mut layouter: impl Layouter, tensors: &Vec>, _constants: &HashMap>, - _rand_vector: &HashMap>, + _rand_vector: &HashMap, F)>, gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/mean.rs b/src/layers/mean.rs index 791628f..bd09199 100644 --- a/src/layers/mean.rs +++ b/src/layers/mean.rs @@ -113,7 +113,7 @@ impl Layer for MeanChip { layouter: impl Layouter, tensors: &Vec>, constants: &HashMap>, - _rand_vector: &HashMap>, + _rand_vector: &HashMap, F)>, gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/noop.rs b/src/layers/noop.rs index d8f1740..c7db2dc 100644 --- a/src/layers/noop.rs +++ b/src/layers/noop.rs @@ -1,6 +1,10 @@ use std::{collections::HashMap, rc::Rc}; -use halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error}; +use halo2_proofs::{ + circuit::Layouter, + halo2curves::ff::PrimeField, + plonk::Error +}; use crate::gadgets::gadget::GadgetConfig; @@ -14,7 +18,7 @@ impl Layer for NoopChip { _layouter: impl Layouter, tensors: &Vec>, _constants: &HashMap>, - _rand_vector: &HashMap>, + _rand_vector: &HashMap, F)>, _gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/pow.rs b/src/layers/pow.rs index 0bb6fd7..6db3939 100644 --- a/src/layers/pow.rs +++ b/src/layers/pow.rs @@ -1,6 +1,10 @@ use std::{collections::HashMap, rc::Rc, vec}; -use halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error}; +use halo2_proofs::{ + circuit::Layouter, + halo2curves::ff::PrimeField, + plonk::Error +}; use ndarray::{Array, IxDyn}; use crate::gadgets::{ @@ -19,7 +23,7 @@ impl Layer for PowChip { mut layouter: impl Layouter, tensors: &Vec>, constants: &HashMap>, - _rand_vector: &HashMap>, + _rand_vector: &HashMap, F)>, gadget_config: Rc, _layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/rsqrt.rs b/src/layers/rsqrt.rs index fba748b..175da54 100644 --- a/src/layers/rsqrt.rs +++ b/src/layers/rsqrt.rs @@ -1,6 +1,10 @@ use std::{collections::HashMap, rc::Rc, vec}; -use halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error}; +use halo2_proofs::{ + circuit::Layouter, + halo2curves::ff::PrimeField, + plonk::Error +}; use ndarray::{Array, IxDyn}; use crate::gadgets::{ @@ -19,7 +23,7 @@ impl Layer for RsqrtChip { mut layouter: impl Layouter, tensors: &Vec>, constants: &HashMap>, - _rand_vector: &HashMap>, + _rand_vector: &HashMap, F)>, gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/shape/broadcast.rs b/src/layers/shape/broadcast.rs index ad9d501..76f1778 100644 --- a/src/layers/shape/broadcast.rs +++ b/src/layers/shape/broadcast.rs @@ -24,7 +24,7 @@ impl Layer for BroadcastChip { _layouter: impl Layouter, tensors: &Vec>, _constants: &HashMap>, - _rand_vector: &HashMap>, + _rand_vector: &HashMap, F)>, _gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/shape/concatenation.rs b/src/layers/shape/concatenation.rs index 9545d95..59a52b2 100644 --- a/src/layers/shape/concatenation.rs +++ b/src/layers/shape/concatenation.rs @@ -18,7 +18,7 @@ impl Layer for ConcatenationChip { _layouter: impl Layouter, tensors: &Vec>, _constants: &HashMap>, - _rand_vector: &HashMap>, + _rand_vector: &HashMap, F)>, _gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/shape/mask_neg_inf.rs b/src/layers/shape/mask_neg_inf.rs index 49e9829..7501166 100644 --- a/src/layers/shape/mask_neg_inf.rs +++ b/src/layers/shape/mask_neg_inf.rs @@ -18,7 +18,7 @@ impl Layer for MaskNegInfChip { _layouter: impl Layouter, tensors: &Vec>, constants: &HashMap>, - _rand_vector: &HashMap>, + _rand_vector: &HashMap, F)>, gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/shape/pack.rs b/src/layers/shape/pack.rs index e25b6fc..4b1e643 100644 --- a/src/layers/shape/pack.rs +++ b/src/layers/shape/pack.rs @@ -18,7 +18,7 @@ impl Layer for PackChip { _layouter: impl Layouter, tensors: &Vec>, _constants: &HashMap>, - _rand_vector: &HashMap>, + _rand_vector: &HashMap, F)>, _gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/shape/pad.rs b/src/layers/shape/pad.rs index 1f629b4..ce8fa26 100644 --- a/src/layers/shape/pad.rs +++ b/src/layers/shape/pad.rs @@ -75,7 +75,7 @@ impl Layer for PadChip { _layouter: impl Layouter, tensors: &Vec>, constants: &HashMap>>, - _rand_vector: &HashMap>, + _rand_vector: &HashMap, F)>, _gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/shape/permute.rs b/src/layers/shape/permute.rs index 1631f02..c2c74a0 100644 --- a/src/layers/shape/permute.rs +++ b/src/layers/shape/permute.rs @@ -18,7 +18,7 @@ impl Layer for PermuteChip { _layouter: impl Layouter, tensors: &Vec>, _constants: &HashMap>, - _rand_vector: &HashMap>, + _rand_vector: &HashMap, F)>, _gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/shape/reshape.rs b/src/layers/shape/reshape.rs index 2bcf297..1f9fb1d 100644 --- a/src/layers/shape/reshape.rs +++ b/src/layers/shape/reshape.rs @@ -18,7 +18,7 @@ impl Layer for ReshapeChip { _layouter: impl Layouter, tensors: &Vec>, _constants: &HashMap>, - _rand_vector: &HashMap>, + _rand_vector: &HashMap, F)>, _gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/shape/resize_nn.rs b/src/layers/shape/resize_nn.rs index a094191..1e142ce 100644 --- a/src/layers/shape/resize_nn.rs +++ b/src/layers/shape/resize_nn.rs @@ -19,7 +19,7 @@ impl Layer for ResizeNNChip { _layouter: impl Layouter, tensors: &Vec>, _constants: &HashMap>, - _rand_vector: &HashMap>, + _rand_vector: &HashMap, F)>, _gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/shape/rotate.rs b/src/layers/shape/rotate.rs index 866414a..503abf6 100644 --- a/src/layers/shape/rotate.rs +++ b/src/layers/shape/rotate.rs @@ -28,7 +28,7 @@ impl Layer for RotateChip { _layouter: impl Layouter, tensors: &Vec>, _constants: &HashMap>, - _rand_vector: &HashMap>, + _rand_vector: &HashMap, F)>, _gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/shape/slice.rs b/src/layers/shape/slice.rs index d4c7788..44701bb 100644 --- a/src/layers/shape/slice.rs +++ b/src/layers/shape/slice.rs @@ -18,7 +18,7 @@ impl Layer for SliceChip { _layouter: impl Layouter, tensors: &Vec>, _constants: &HashMap>, - _rand_vector: &HashMap>, + _rand_vector: &HashMap, F)>, _gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/shape/split.rs b/src/layers/shape/split.rs index 73d411e..8a69261 100644 --- a/src/layers/shape/split.rs +++ b/src/layers/shape/split.rs @@ -18,7 +18,7 @@ impl Layer for SplitChip { _layouter: impl Layouter, tensors: &Vec>, _constants: &HashMap>, - _rand_vector: &HashMap>, + _rand_vector: &HashMap, F)>, _gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/shape/transpose.rs b/src/layers/shape/transpose.rs index 3a84752..384e463 100644 --- a/src/layers/shape/transpose.rs +++ b/src/layers/shape/transpose.rs @@ -18,7 +18,7 @@ impl Layer for TransposeChip { _layouter: impl Layouter, tensors: &Vec>, _constants: &HashMap>, - _rand_vector: &HashMap>, + _rand_vector: &HashMap, F)>, _gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/softmax.rs b/src/layers/softmax.rs index ac329c2..e2c3209 100644 --- a/src/layers/softmax.rs +++ b/src/layers/softmax.rs @@ -129,7 +129,7 @@ impl Layer for SoftmaxChip { mut layouter: impl Layouter, tensors: &Vec>, constants: &HashMap>, - _rand_vector: &HashMap>, + _rand_vector: &HashMap, F)>, gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/sqrt.rs b/src/layers/sqrt.rs index 25ed055..85080a2 100644 --- a/src/layers/sqrt.rs +++ b/src/layers/sqrt.rs @@ -1,6 +1,10 @@ use std::{collections::HashMap, rc::Rc, vec}; -use halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error}; +use halo2_proofs::{ + circuit::Layouter, + halo2curves::ff::PrimeField, + plonk::Error +}; use ndarray::{Array, IxDyn}; use crate::gadgets::{ @@ -19,7 +23,7 @@ impl Layer for SqrtChip { mut layouter: impl Layouter, tensors: &Vec>, constants: &HashMap>, - _rand_vector: &HashMap>, + _rand_vector: &HashMap, F)>, gadget_config: Rc, layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/square.rs b/src/layers/square.rs index c940491..1d370af 100644 --- a/src/layers/square.rs +++ b/src/layers/square.rs @@ -1,6 +1,10 @@ use std::{collections::HashMap, rc::Rc, vec}; -use halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error}; +use halo2_proofs::{ + circuit::Layouter, + halo2curves::ff::PrimeField, + plonk::Error +}; use ndarray::{Array, IxDyn}; use crate::gadgets::{ @@ -20,7 +24,7 @@ impl Layer for SquareChip { mut layouter: impl Layouter, tensors: &Vec>, constants: &HashMap>, - _rand_vector: &HashMap>, + _rand_vector: &HashMap, F)>, gadget_config: Rc, _layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/squared_diff.rs b/src/layers/squared_diff.rs index 915b731..76be2d0 100644 --- a/src/layers/squared_diff.rs +++ b/src/layers/squared_diff.rs @@ -1,6 +1,10 @@ use std::{collections::HashMap, rc::Rc, vec}; -use halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error}; +use halo2_proofs::{ + circuit::Layouter, + halo2curves::ff::PrimeField, + plonk::Error +}; use ndarray::{Array, IxDyn}; use crate::{ @@ -23,7 +27,7 @@ impl Layer for SquaredDiffChip { mut layouter: impl Layouter, tensors: &Vec>, constants: &HashMap>, - _rand_vector: &HashMap>, + _rand_vector: &HashMap, F)>, gadget_config: Rc, _layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/tanh.rs b/src/layers/tanh.rs index 0bf0301..76f3d8c 100644 --- a/src/layers/tanh.rs +++ b/src/layers/tanh.rs @@ -1,6 +1,10 @@ use std::{collections::HashMap, rc::Rc, vec}; -use halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error}; +use halo2_proofs::{ + circuit::Layouter, + halo2curves::ff::PrimeField, + plonk::Error +}; use ndarray::{Array, IxDyn}; use crate::gadgets::{ @@ -19,7 +23,7 @@ impl Layer for TanhChip { mut layouter: impl Layouter, tensors: &Vec>, constants: &HashMap>, - _rand_vector: &HashMap>, + _rand_vector: &HashMap, F)>, gadget_config: Rc, _layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/layers/update.rs b/src/layers/update.rs index a2c8f39..484b069 100644 --- a/src/layers/update.rs +++ b/src/layers/update.rs @@ -1,6 +1,10 @@ use std::{collections::HashMap, rc::Rc, vec}; -use halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error}; +use halo2_proofs::{ + circuit::Layouter, + halo2curves::ff::PrimeField, + plonk::Error +}; use ndarray::{Array, IxDyn}; use crate::gadgets::{ @@ -19,7 +23,7 @@ impl Layer for UpdateChip { mut layouter: impl Layouter, tensors: &Vec>, constants: &HashMap>, - _rand_vector: &HashMap>, + _rand_vector: &HashMap, F)>, gadget_config: Rc, _layer_config: &LayerConfig, ) -> Result>, Error> { diff --git a/src/model.rs b/src/model.rs index 7e50183..7b5c4bd 100644 --- a/src/model.rs +++ b/src/model.rs @@ -294,22 +294,27 @@ impl> ModelCircuit { mut layouter: impl Layouter, challenge: Challenge, rand_vector: Column, - ) -> Result>, Error> { - let c_base = layouter.get_challenge(challenge); + ) -> Result, F)>, Error> { + let c_base = { + let c = layouter.get_challenge(challenge); + // Default value here is provided to pass mock prover check and it will be fiat shamir + // challenge in proof generation + c.assign().map_or(F::from(0x123456789abcdef), |x| x) + }; let mut c = c_base; let rand_vec = layouter.assign_region( || "random vector", |mut region| { - let mut rand_vec: HashMap> = HashMap::new(); + let mut rand_vec = HashMap::new(); for i in 0..self.num_random { let rand = region.assign_advice( || format!("rand_vec_{}", i), rand_vector, i.try_into().unwrap(), - || c, + || Value::known(c), )?; + rand_vec.insert(i as i64, (Rc::new(rand), c)); c = c * c_base; - rand_vec.insert(i as i64, Rc::new(rand)); } Ok(rand_vec) } From 6a5cf60e25fc025a033747dedda359f5584bc54e Mon Sep 17 00:00:00 2001 From: span14 Date: Tue, 19 Sep 2023 12:20:24 -0500 Subject: [PATCH 09/16] update extraneous change --- src/bin/verify_circuit.rs | 1 - src/gadgets/bias_div_floor_relu6.rs | 2 +- src/gadgets/bias_div_round_relu6.rs | 2 +- src/layers/batch_mat_mul.rs | 8 ++++---- src/layers/dag.rs | 9 +++++---- src/layers/fully_connected.rs | 3 +-- src/lib.rs | 4 +--- src/model.rs | 1 - src/utils/proving_kzg.rs | 1 - 9 files changed, 13 insertions(+), 18 deletions(-) diff --git a/src/bin/verify_circuit.rs b/src/bin/verify_circuit.rs index c4c1e4a..ee1514c 100644 --- a/src/bin/verify_circuit.rs +++ b/src/bin/verify_circuit.rs @@ -14,7 +14,6 @@ fn main() { if kzg_or_ipa != "kzg" && kzg_or_ipa != "ipa" { panic!("Must specify kzg or ipa"); } - if kzg_or_ipa == "kzg" { let config = load_config_msgpack(&config_fname); let circuit = ModelCircuit::::generate_from_msgpack(config, false); diff --git a/src/gadgets/bias_div_floor_relu6.rs b/src/gadgets/bias_div_floor_relu6.rs index d2931ca..31a0126 100644 --- a/src/gadgets/bias_div_floor_relu6.rs +++ b/src/gadgets/bias_div_floor_relu6.rs @@ -197,7 +197,7 @@ impl Gadget for BiasDivFloorRelu6Chip { let outp = { let mut x_pos = div_res - div_outp_min_val_i64; if !relu_map.contains_key(&(x_pos)) { - // println!("x: {}, x_pos: {}", x, x_pos); + println!("x: {}, x_pos: {}", div_res, x_pos); x_pos = 0; } let outp_val = relu_map.get(&(x_pos)).unwrap(); diff --git a/src/gadgets/bias_div_round_relu6.rs b/src/gadgets/bias_div_round_relu6.rs index c912aad..ab92d1c 100644 --- a/src/gadgets/bias_div_round_relu6.rs +++ b/src/gadgets/bias_div_round_relu6.rs @@ -224,7 +224,7 @@ impl Gadget for BiasDivRoundRelu6Chip { let outp = { let mut x_pos = div_res - div_outp_min_val_i64; if !relu_map.contains_key(&(x_pos)) { - // println!("x: {}, x_pos: {}", x, x_pos); + println!("x: {}, x_pos: {}", div_res, x_pos); x_pos = 0; } let outp_val = relu_map.get(&(x_pos)).unwrap(); diff --git a/src/layers/batch_mat_mul.rs b/src/layers/batch_mat_mul.rs index fcbf8ae..0d745a2 100644 --- a/src/layers/batch_mat_mul.rs +++ b/src/layers/batch_mat_mul.rs @@ -27,8 +27,8 @@ impl Layer for BatchMatMulChip { ) -> Result>, Error> { let inp1 = &tensors[0]; let inp2 = &tensors[1]; - // println!("inp1: {:?}", inp1.shape()); - // println!("inp2: {:?}", inp2.shape()); + println!("inp1: {:?}", inp1.shape()); + println!("inp2: {:?}", inp2.shape()); assert_eq!(inp1.ndim(), 3); assert_eq!(inp2.ndim(), 3); @@ -61,8 +61,8 @@ impl Layer for BatchMatMulChip { } else { inp2.index_axis(Axis(0), i).t().to_owned() }; - // println!("inp1_slice: {:?}", inp1_slice.shape()); - // println!("inp2_slice: {:?}", inp2_slice.shape()); + println!("inp1_slice: {:?}", inp1_slice.shape()); + println!("inp2_slice: {:?}", inp2_slice.shape()); // Batch MM doesn't have a fused activation, so insert it here // TODO: consider putting this in the converter? let tmp_config = LayerConfig { diff --git a/src/layers/dag.rs b/src/layers/dag.rs index 2fe6141..865236c 100644 --- a/src/layers/dag.rs +++ b/src/layers/dag.rs @@ -32,6 +32,7 @@ use crate::{ tanh::TanhChip, update::UpdateChip, }, + utils::helpers::print_assigned_arr, }; use super::{ @@ -76,7 +77,7 @@ impl DAGLayerChip { for (idx, tensor) in tensors.iter().enumerate() { tensor_map.insert(idx, tensor.clone()); } - // println!("Tensors Length: {}", tensors.len()); + // Compute the dag for (layer_idx, layer_config) in self.dag_config.ops.iter().enumerate() { let layer_type = &layer_config.layer_type; @@ -489,9 +490,9 @@ impl DAGLayerChip { } }; - // let tmp = print_arr.iter().map(|x| x.0.as_ref()).collect::>(); - // print_assigned_arr("final out", &tmp.to_vec(), gadget_config.scale_factor); - // println!("final out idxes: {:?}", self.dag_config.final_out_idxes); + let tmp = print_arr.iter().map(|x| x.0.as_ref()).collect::>(); + print_assigned_arr("final out", &tmp.to_vec(), gadget_config.scale_factor); + println!("final out idxes: {:?}", self.dag_config.final_out_idxes); let mut x = vec![]; for cell in print_arr.iter() { diff --git a/src/layers/fully_connected.rs b/src/layers/fully_connected.rs index 2316903..8d5ebb7 100644 --- a/src/layers/fully_connected.rs +++ b/src/layers/fully_connected.rs @@ -37,6 +37,7 @@ pub struct FullyConnectedChip { impl FullyConnectedChip { pub fn compute_mm( + // input: &AssignedTensor, input: &ArrayView<(CellRc,F), IxDyn>, weight: &AssignedTensor, ) -> Array { @@ -106,8 +107,6 @@ impl FullyConnectedChip { _ => panic!("Unsupported activation type for fully connected"), } } - - } impl Layer for FullyConnectedChip { diff --git a/src/lib.rs b/src/lib.rs index c919461..376e0c1 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,9 +1,7 @@ #![feature(int_roundings)] pub mod commitments; -// pub mod conversion; pub mod gadgets; pub mod layers; pub mod model; -pub mod utils; -// pub mod simple; \ No newline at end of file +pub mod utils; \ No newline at end of file diff --git a/src/model.rs b/src/model.rs index 7b5c4bd..e28a994 100644 --- a/src/model.rs +++ b/src/model.rs @@ -4,7 +4,6 @@ use std::{ rc::Rc, sync::{Arc, Mutex}, }; -// use blake2b_simd::{Params as Blake2bParams, State as Blake2bState}; use halo2_proofs::{ circuit::{Layouter, SimpleFloorPlanner, Value}, halo2curves::ff::{FromUniformBytes, PrimeField}, diff --git a/src/utils/proving_kzg.rs b/src/utils/proving_kzg.rs index 10eb8b4..bfd53de 100644 --- a/src/utils/proving_kzg.rs +++ b/src/utils/proving_kzg.rs @@ -125,7 +125,6 @@ pub fn time_circuit_kzg(circuit: ModelCircuit) { .collect(); let public_vals_u8_size = serialize(&public_vals_u8, "public_vals"); println!("Public vals size: {} bytes", public_vals_u8_size); - // println!("{:?}", public_vals); let mut transcript = Blake2bWrite::<_, G1Affine, Challenge255<_>>::init(vec![]); create_proof::< KZGCommitmentScheme, From 6142f920957da0d434e0cac98646d1bbba52bcb0 Mon Sep 17 00:00:00 2001 From: span14 Date: Tue, 19 Sep 2023 17:17:06 -0500 Subject: [PATCH 10/16] further remove extranous changes --- src/lib.rs | 2 +- src/model.rs | 2 -- src/utils/proving_kzg.rs | 1 + 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 376e0c1..4ad95f1 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -4,4 +4,4 @@ pub mod commitments; pub mod gadgets; pub mod layers; pub mod model; -pub mod utils; \ No newline at end of file +pub mod utils; diff --git a/src/model.rs b/src/model.rs index e28a994..0b2afc2 100644 --- a/src/model.rs +++ b/src/model.rs @@ -688,7 +688,6 @@ impl> Circuit for ModelCircuit } fn synthesize(&self, config: Self::Config, mut layouter: impl Layouter) -> Result<(), Error> { - // Assign tables let gadget_rc: Rc = config.gadget_config.clone().into(); for gadget in self.used_gadgets.iter() { @@ -764,7 +763,6 @@ impl> Circuit for ModelCircuit config.gadget_config.clone(), ) .unwrap(); - // Some halo2 cancer let constants = self .assign_constants2( diff --git a/src/utils/proving_kzg.rs b/src/utils/proving_kzg.rs index bfd53de..0e01167 100644 --- a/src/utils/proving_kzg.rs +++ b/src/utils/proving_kzg.rs @@ -125,6 +125,7 @@ pub fn time_circuit_kzg(circuit: ModelCircuit) { .collect(); let public_vals_u8_size = serialize(&public_vals_u8, "public_vals"); println!("Public vals size: {} bytes", public_vals_u8_size); + let mut transcript = Blake2bWrite::<_, G1Affine, Challenge255<_>>::init(vec![]); create_proof::< KZGCommitmentScheme, From ac7c4470a3c8be76646e20342ea54756d86dba93 Mon Sep 17 00:00:00 2001 From: span14 Date: Tue, 19 Sep 2023 17:25:28 -0500 Subject: [PATCH 11/16] even more extranous changes removed --- src/layers/avg_pool_2d.rs | 2 +- src/layers/dag.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/layers/avg_pool_2d.rs b/src/layers/avg_pool_2d.rs index 4a81444..62f877b 100644 --- a/src/layers/avg_pool_2d.rs +++ b/src/layers/avg_pool_2d.rs @@ -77,7 +77,7 @@ impl Layer for AvgPool2DChip { // TODO: refactor this let out_xy = MaxPool2DChip::shape(inp, layer_config); let out_shape = vec![1, out_xy.0, out_xy.1, inp.shape()[3]]; - // println!("out_shape: {:?}", out_shape); + println!("out_shape: {:?}", out_shape); let out = Array::from_shape_vec(IxDyn(&out_shape), dived).unwrap(); Ok(vec![out]) diff --git a/src/layers/dag.rs b/src/layers/dag.rs index 865236c..51505e8 100644 --- a/src/layers/dag.rs +++ b/src/layers/dag.rs @@ -467,7 +467,7 @@ impl DAGLayerChip { }; for (idx, tensor_idx) in out_idxes.iter().enumerate() { - // println!("Out {} shape: {:?}", idx, out[idx].shape()); + println!("Out {} shape: {:?}", idx, out[idx].shape()); tensor_map.insert(*tensor_idx, out[idx].clone()); } println!(); From 9f96c4ad31c67525d028470cb2490e00b2aa3d38 Mon Sep 17 00:00:00 2001 From: span14 Date: Tue, 19 Sep 2023 17:29:07 -0500 Subject: [PATCH 12/16] even more extranous changes removed --- src/utils/proving_kzg.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/utils/proving_kzg.rs b/src/utils/proving_kzg.rs index 0e01167..dd44442 100644 --- a/src/utils/proving_kzg.rs +++ b/src/utils/proving_kzg.rs @@ -125,7 +125,7 @@ pub fn time_circuit_kzg(circuit: ModelCircuit) { .collect(); let public_vals_u8_size = serialize(&public_vals_u8, "public_vals"); println!("Public vals size: {} bytes", public_vals_u8_size); - + let mut transcript = Blake2bWrite::<_, G1Affine, Challenge255<_>>::init(vec![]); create_proof::< KZGCommitmentScheme, From 9e778f2824f406b77933d3615d3f4d14519ac91d Mon Sep 17 00:00:00 2001 From: span14 Date: Tue, 19 Sep 2023 17:35:57 -0500 Subject: [PATCH 13/16] should be last extranous --- src/layers/fully_connected.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/src/layers/fully_connected.rs b/src/layers/fully_connected.rs index 8d5ebb7..260ae2c 100644 --- a/src/layers/fully_connected.rs +++ b/src/layers/fully_connected.rs @@ -86,7 +86,6 @@ impl FullyConnectedChip { size: usize, ) -> Result, F)>, Error> { let mut outp = vec![]; - for idx in 0..size { let idx = idx as i64; if !constants.contains_key(&idx) { From b77e57679ab04357b1463595b40a65a1335f22fc Mon Sep 17 00:00:00 2001 From: span14 Date: Tue, 31 Oct 2023 00:35:37 -0500 Subject: [PATCH 14/16] add gather op --- .gitignore | 2 + Cargo.toml | 11 ++- python/converter.py | 7 +- src/bin/bench_circuit.rs | 13 +++ src/bin/breakdown_circuit.rs | 85 ++++++++++++++++++ src/gadgets/nonlinear/non_linearity.rs | 3 + src/layers/dag.rs | 13 ++- src/layers/layer.rs | 1 + src/layers/shape.rs | 1 + src/layers/shape/gather.rs | 45 ++++++++++ src/model.rs | 4 +- src/utils.rs | 1 + src/utils/bench_kzg.rs | 117 +++++++++++++++++++++++++ 13 files changed, 295 insertions(+), 8 deletions(-) create mode 100644 src/bin/bench_circuit.rs create mode 100644 src/bin/breakdown_circuit.rs create mode 100644 src/layers/shape/gather.rs create mode 100644 src/utils/bench_kzg.rs diff --git a/.gitignore b/.gitignore index 5f0c1a1..fdac5c6 100644 --- a/.gitignore +++ b/.gitignore @@ -27,3 +27,5 @@ params_ipa examples testing/data *.diff + +ignore/* \ No newline at end of file diff --git a/Cargo.toml b/Cargo.toml index edaf7e0..28d407b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,11 +20,12 @@ opt-level = 3 opt-level = 3 [dependencies] +ark-std = { version = "^0.4.0", default-features = false} bitvec = "1.0.1" -halo2 = { git= "https://github.com/span14/halo2", branch="zkml", package="halo2"} -halo2_gadgets = { git= "https://github.com/span14/halo2", branch="zkml", package="halo2_gadgets", features = ["circuit-params"]} +halo2 = { git= "https://github.com/span14/halo2", branch="benchmark", package="halo2"} +halo2_gadgets = { git= "https://github.com/span14/halo2", branch="benchmark", package="halo2_gadgets", features = ["circuit-params"]} halo2_curves = { git = "https://github.com/privacy-scaling-explorations/halo2curves", tag = "0.3.2", package = "halo2curves"} -halo2_proofs = { git= "https://github.com/span14/halo2", branch="zkml", package="halo2_proofs", features = ["circuit-params", "zkml"]} +halo2_proofs = { git= "https://github.com/span14/halo2", branch="benchmark", package="halo2_proofs", features = ["circuit-params", "zkml"]} lazy_static = "1.4.0" ndarray = "0.15.6" num-bigint = "0.4.3" @@ -37,3 +38,7 @@ serde = "1.0.152" serde_derive = "1.0.152" serde_json = "1.0.85" wav = "1.0.0" + +[features] +print-trace=["ark-std/print-trace", "halo2_proofs/print-trace"] +stats=["halo2_proofs/stats"] diff --git a/python/converter.py b/python/converter.py index b8b4856..350c491 100644 --- a/python/converter.py +++ b/python/converter.py @@ -315,9 +315,6 @@ def to_dict(self, start_layer, end_layer): elif op_code == tflite.BuiltinOperator.SHAPE: layer_type = 'Noop' params = [0] - elif op_code == tflite.BuiltinOperator.GATHER: - layer_type = 'Noop' - params = [0] elif op_code == tflite.BuiltinOperator.REDUCE_PROD: # TODO: not sure if this is in general a no-op layer_type = 'Noop' @@ -334,6 +331,10 @@ def to_dict(self, start_layer, end_layer): params = [0] ## Shape + elif op_code == tflite.BuiltinOperator.GATHER: + pos = interpreter.get_tensor(op.Inputs(1)).flatten().astype(np.int64) + layer_type = 'Gather' + params = pos.tolist() elif op_code == tflite.BuiltinOperator.RESHAPE: layer_type = 'Reshape' params = [] diff --git a/src/bin/bench_circuit.rs b/src/bin/bench_circuit.rs new file mode 100644 index 0000000..bcb6483 --- /dev/null +++ b/src/bin/bench_circuit.rs @@ -0,0 +1,13 @@ +use halo2_proofs::halo2curves::bn256::Fr; +use zkml::{ + model::ModelCircuit, + utils::bench_kzg::bench_kzg, +}; + +fn main() { + let config_fname = std::env::args().nth(1).expect("config file path"); + let inp_fname = std::env::args().nth(2).expect("input file path"); + let step = std::env::args().nth(3).expect("step"); + let circuit = ModelCircuit::::generate_from_file(&config_fname, &inp_fname); + bench_kzg(step, circuit); +} diff --git a/src/bin/breakdown_circuit.rs b/src/bin/breakdown_circuit.rs new file mode 100644 index 0000000..9cc492a --- /dev/null +++ b/src/bin/breakdown_circuit.rs @@ -0,0 +1,85 @@ +use ark_std::{end_timer, start_timer}; +use halo2_proofs::{ + dev::MockProver, + halo2curves::bn256::{Bn256, Fr, G1Affine}, + plonk::{create_proof, keygen_pk, keygen_vk}, + poly::kzg::{ + commitment::KZGCommitmentScheme, + multiopen::ProverSHPLONK, + strategy::SingleStrategy, + }, + transcript::{ + Blake2bRead, Blake2bWrite, Challenge255, TranscriptReadBuffer, TranscriptWriterBuffer, + }, +}; + +use zkml::{ + model::ModelCircuit, + utils::{ + proving_kzg::{get_kzg_params, verify_kzg}, + helpers::get_public_values, + } + +}; + +fn main() { + let config_fname = std::env::args().nth(1).expect("config file path"); + let inp_fname = std::env::args().nth(2).expect("input file path"); + let circuit = ModelCircuit::::generate_from_file(&config_fname, &inp_fname); + + + let rng = rand::thread_rng(); + + let timer = start_timer!(|| "Setup"); + let degree = circuit.k as u32; + let params = get_kzg_params("./params_kzg", degree); + end_timer!(timer); + + let timer = start_timer!(|| "Preprocess"); + let vk_circuit = circuit.clone(); + let vk = keygen_vk(¶ms, &vk_circuit).unwrap(); + drop(vk_circuit); + + let pk_circuit = circuit.clone(); + let pk = keygen_pk(¶ms, vk, &pk_circuit).unwrap(); + drop(pk_circuit); + end_timer!(timer); + + let proof_circuit = circuit.clone(); + let _prover = MockProver::run(degree, &proof_circuit, vec![vec![]]).unwrap(); + let public_vals = get_public_values(); + + let timer = start_timer!(|| "Prove"); + let mut transcript = Blake2bWrite::<_, G1Affine, Challenge255<_>>::init(vec![]); + create_proof::< + KZGCommitmentScheme, + ProverSHPLONK<'_, Bn256>, + Challenge255, + _, + Blake2bWrite, G1Affine, Challenge255>, + ModelCircuit, + >( + ¶ms, + &pk, + &[proof_circuit], + &[&[&public_vals]], + rng, + &mut transcript, + ) + .unwrap(); + let proof = transcript.finalize(); + end_timer!(timer); + + let timer = start_timer!(|| "Verify"); + let strategy = SingleStrategy::new(¶ms); + let transcript_read = Blake2bRead::<_, _, Challenge255<_>>::init(&proof[..]); + verify_kzg( + ¶ms, + &pk.get_vk(), + strategy, + &public_vals, + transcript_read, + ); + end_timer!(timer); + +} diff --git a/src/gadgets/nonlinear/non_linearity.rs b/src/gadgets/nonlinear/non_linearity.rs index 8a6a560..6b5fb34 100644 --- a/src/gadgets/nonlinear/non_linearity.rs +++ b/src/gadgets/nonlinear/non_linearity.rs @@ -143,6 +143,9 @@ pub trait NonLinearGadget: Gadget { let outp = { let pos = convert_to_u128(&(inp[i].1 + shift_val_pos)) as i128 - shift_val_pos_i64 as i128; let x = pos as i64 - min_val; + if ((*map).get(&x)).is_none() { + println!("x: {}", x); + } let val = *map.get(&x).unwrap(); if x == 0 { F::ZERO diff --git a/src/layers/dag.rs b/src/layers/dag.rs index 51505e8..cb195f8 100644 --- a/src/layers/dag.rs +++ b/src/layers/dag.rs @@ -23,7 +23,7 @@ use crate::{ broadcast::BroadcastChip, concatenation::ConcatenationChip, mask_neg_inf::MaskNegInfChip, pack::PackChip, pad::PadChip, permute::PermuteChip, reshape::ReshapeChip, resize_nn::ResizeNNChip, rotate::RotateChip, slice::SliceChip, split::SplitChip, - transpose::TransposeChip, + transpose::TransposeChip, gather::GatherChip, }, softmax::SoftmaxChip, sqrt::SqrtChip, @@ -200,6 +200,17 @@ impl DAGLayerChip { &layer_config, )? } + LayerType::Gather => { + let gather_chip = GatherChip {}; + gather_chip.forward( + layouter.namespace(|| "dag gather"), + &vec_inps, + constants, + rand_vector, + gadget_config.clone(), + &layer_config, + )? + } LayerType::Softmax => { let softmax_chip = SoftmaxChip {}; softmax_chip.forward( diff --git a/src/layers/layer.rs b/src/layers/layer.rs index ceaa1c6..be45a21 100644 --- a/src/layers/layer.rs +++ b/src/layers/layer.rs @@ -20,6 +20,7 @@ pub enum LayerType { DivVar, DivFixed, FullyConnected, + Gather, Logistic, MaskNegInf, MaxPool2D, diff --git a/src/layers/shape.rs b/src/layers/shape.rs index 12aa85c..cb3ec02 100644 --- a/src/layers/shape.rs +++ b/src/layers/shape.rs @@ -10,3 +10,4 @@ pub mod rotate; pub mod slice; pub mod split; pub mod transpose; +pub mod gather; \ No newline at end of file diff --git a/src/layers/shape/gather.rs b/src/layers/shape/gather.rs new file mode 100644 index 0000000..0338349 --- /dev/null +++ b/src/layers/shape/gather.rs @@ -0,0 +1,45 @@ +use std::{collections::HashMap, rc::Rc}; + +use halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error}; +use ndarray::{Array, Axis}; + +use crate::{ + gadgets::gadget::GadgetConfig, + layers::layer::{AssignedTensor, CellRc, GadgetConsumer}, +}; + +use super::super::layer::{Layer, LayerConfig}; + +pub struct GatherChip {} + +impl Layer for GatherChip { + fn forward( + &self, + _layouter: impl Layouter, + tensors: &Vec>, + _constants: &HashMap>, + _rand_vector: &HashMap, F)>, + _gadget_config: Rc, + layer_config: &LayerConfig, + ) -> Result>, Error> { + let inp = &tensors[0]; + let view = inp.dim(); // [size,batch] + let idx = layer_config.layer_params.clone(); + + + let mut tmp = vec![]; + for col in inp.axis_iter(Axis(1)) { + let flatten = col.iter().cloned().collect::>(); + + let _ = idx.iter().for_each(|x| tmp.push(flatten[(*x) as usize].clone())); + } + let out = Array::from_shape_vec(vec![view[1], idx.len()], tmp).unwrap().reversed_axes(); + Ok(vec![out]) + } +} + +impl GadgetConsumer for GatherChip { + fn used_gadgets(&self, _layer_params: Vec) -> Vec { + vec![] + } +} diff --git a/src/model.rs b/src/model.rs index 0b2afc2..659dabf 100644 --- a/src/model.rs +++ b/src/model.rs @@ -57,7 +57,7 @@ use crate::{ broadcast::BroadcastChip, concatenation::ConcatenationChip, mask_neg_inf::MaskNegInfChip, pack::PackChip, pad::PadChip, permute::PermuteChip, reshape::ReshapeChip, resize_nn::ResizeNNChip, rotate::RotateChip, slice::SliceChip, split::SplitChip, - transpose::TransposeChip, + transpose::TransposeChip, gather::GatherChip, }, softmax::SoftmaxChip, sqrt::SqrtChip, @@ -343,6 +343,7 @@ impl> ModelCircuit { "Div" => LayerType::DivFixed, // TODO: rename to DivFixed "DivVar" => LayerType::DivVar, "FullyConnected" => LayerType::FullyConnected, + "Gather" => LayerType::Gather, "Logistic" => LayerType::Logistic, "MaskNegInf" => LayerType::MaskNegInf, "MaxPool2D" => LayerType::MaxPool2D, @@ -412,6 +413,7 @@ impl> ModelCircuit { config: FullyConnectedConfig { normalize: true }, _marker: PhantomData::, }) as Box, + LayerType::Gather => Box::new(GatherChip {}) as Box, LayerType::Logistic => Box::new(LogisticChip {}) as Box, LayerType::MaskNegInf => Box::new(MaskNegInfChip {}) as Box, LayerType::MaxPool2D => Box::new(MaxPool2DChip { diff --git a/src/utils.rs b/src/utils.rs index 08ee186..d61f580 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -2,3 +2,4 @@ pub mod helpers; pub mod loader; pub mod proving_ipa; pub mod proving_kzg; +pub mod bench_kzg; \ No newline at end of file diff --git a/src/utils/bench_kzg.rs b/src/utils/bench_kzg.rs new file mode 100644 index 0000000..39f7aa9 --- /dev/null +++ b/src/utils/bench_kzg.rs @@ -0,0 +1,117 @@ +use std::{ + fs::File, + io::BufReader, + }; + +use halo2_proofs::{ + dev::MockProver, + halo2curves::bn256::{Bn256, Fr, G1Affine}, + plonk::{create_proof, keygen_pk, keygen_vk, VerifyingKey, ProvingKey}, + poly::kzg::{ + commitment::KZGCommitmentScheme, + multiopen::ProverSHPLONK, + strategy::SingleStrategy, + }, + transcript::{ + Blake2bRead, Blake2bWrite, Challenge255, TranscriptReadBuffer, TranscriptWriterBuffer, + }, + SerdeFormat, +}; + +use crate::{model::ModelCircuit, utils::helpers::get_public_values}; +use crate::utils::proving_kzg::{serialize, get_kzg_params, verify_kzg}; +use serde_derive::{Serialize, Deserialize}; +use serde_json; + +#[derive(Serialize, Deserialize)] +pub struct PublicVal { + pub vals: Vec<[u8; 32]> +} + + +pub fn bench_kzg(step: String, circuit: ModelCircuit) { + + if step == "setup" { + let degree = circuit.k as u32; + let params = get_kzg_params("./params_kzg", degree); + let vk_circuit = circuit.clone(); + let vk = keygen_vk(¶ms, &vk_circuit).unwrap(); + drop(vk_circuit); + let _ = serialize(&vk.to_bytes(SerdeFormat::RawBytes), "vkey"); + + let pk_circuit = circuit.clone(); + let pk = keygen_pk(¶ms, vk, &pk_circuit).unwrap(); + drop(pk_circuit); + let _ = serialize(&pk.to_bytes(SerdeFormat::RawBytes), "pkey"); + + let proof_circuit = circuit.clone(); + let _prover = MockProver::run(degree, &proof_circuit, vec![vec![]]).unwrap(); + let public_vals = get_public_values(); + let public_vals_u8_32: Vec<[u8; 32]> = public_vals + .iter() + .map(|v: &Fr| v.to_bytes()) + .collect(); + serde_json::to_writer( + File::create("public_vals").unwrap(), &PublicVal{ + vals: public_vals_u8_32 + }).unwrap(); + } else if step == "prove" { + let rng = rand::thread_rng(); + let degree = circuit.k as u32; + let params = get_kzg_params("./params_kzg", degree); + let pk = ProvingKey::read::, ModelCircuit>( + &mut BufReader::new(File::open("pkey").unwrap()), + SerdeFormat::RawBytes, + () + ) + .unwrap(); + let public_val_raw: PublicVal = serde_json::from_reader( + File::open("public_vals").unwrap() + ).unwrap(); + let public_vals = public_val_raw.vals.iter().map(|x| Fr::from_bytes(x).unwrap()).collect::>(); + + let mut transcript = Blake2bWrite::<_, G1Affine, Challenge255<_>>::init(vec![]); + create_proof::< + KZGCommitmentScheme, + ProverSHPLONK<'_, Bn256>, + Challenge255, + _, + Blake2bWrite, G1Affine, Challenge255>, + ModelCircuit, + >( + ¶ms, + &pk, + &[circuit], + &[&[&public_vals]], + rng, + &mut transcript, + ) + .unwrap(); + let proof = transcript.finalize(); + let _ = serialize(&proof, "proof"); + } else if step == "verify" { + let degree = circuit.k as u32; + let params = get_kzg_params("./params_kzg", degree); + let proof = std::fs::read("proof").unwrap(); + let strategy = SingleStrategy::new(¶ms); + let vk = VerifyingKey::read::, ModelCircuit>( + &mut BufReader::new(File::open("vkey").unwrap()), + SerdeFormat::RawBytes, + () + ) + .unwrap(); + let public_val_raw: PublicVal = serde_json::from_reader( + File::open("public_vals").unwrap() + ).unwrap(); + let public_vals = public_val_raw.vals.iter().map(|x| Fr::from_bytes(x).unwrap()).collect::>(); + let transcript_read = Blake2bRead::<_, _, Challenge255<_>>::init(&proof[..]); + verify_kzg( + ¶ms, + &vk, + strategy, + &public_vals, + transcript_read, + ); + + } +} \ No newline at end of file From 0fe8adb6c824c8df2a45f8eb5a55fa657cd6f078 Mon Sep 17 00:00:00 2001 From: span14 Date: Sun, 5 Nov 2023 16:46:32 -0600 Subject: [PATCH 15/16] remove logs for benchmark testing --- src/commitments/packer.rs | 10 +++++----- src/gadgets/bias_div_floor_relu6.rs | 8 ++++---- src/gadgets/bias_div_round_relu6.rs | 8 ++++---- src/gadgets/gadget.rs | 6 +++--- src/gadgets/nonlinear/non_linearity.rs | 6 +++--- src/layers/avg_pool_2d.rs | 2 +- src/layers/batch_mat_mul.rs | 8 ++++---- src/layers/dag.rs | 16 ++++++++-------- src/layers/shape/broadcast.rs | 2 +- src/layers/shape/reshape.rs | 2 +- src/layers/shape/rotate.rs | 2 +- 11 files changed, 35 insertions(+), 35 deletions(-) diff --git a/src/commitments/packer.rs b/src/commitments/packer.rs index 493a0b8..921e821 100644 --- a/src/commitments/packer.rs +++ b/src/commitments/packer.rs @@ -51,16 +51,16 @@ impl PackerChip { // TODO: for many columns, pack many in a single row NUM_BITS_PER_FIELD_ELEM / num_bits_per_elem }; - println!("column len: {}", columns.len()); - println!("num_bits_per_elem: {}", num_bits_per_elem); - println!("NUM_BITS_PER_FIELD_ELEM: {}", NUM_BITS_PER_FIELD_ELEM); - println!("num_elem_per_packed: {}", num_elem_per_packed); + // println!("column len: {}", columns.len()); + // println!("num_bits_per_elem: {}", num_bits_per_elem); + // println!("NUM_BITS_PER_FIELD_ELEM: {}", NUM_BITS_PER_FIELD_ELEM); + // println!("num_elem_per_packed: {}", num_elem_per_packed); let num_packed_per_row = max( 1, columns.len() / (num_elem_per_packed * (num_bits_per_elem + 1)), ); - println!("num_packed_per_row: {}", num_packed_per_row); + // println!("num_packed_per_row: {}", num_packed_per_row); let exponents = Self::get_exponents(num_bits_per_elem, num_elem_per_packed); diff --git a/src/gadgets/bias_div_floor_relu6.rs b/src/gadgets/bias_div_floor_relu6.rs index 31a0126..8986bce 100644 --- a/src/gadgets/bias_div_floor_relu6.rs +++ b/src/gadgets/bias_div_floor_relu6.rs @@ -196,10 +196,10 @@ impl Gadget for BiasDivFloorRelu6Chip { let outp = { let mut x_pos = div_res - div_outp_min_val_i64; - if !relu_map.contains_key(&(x_pos)) { - println!("x: {}, x_pos: {}", div_res, x_pos); - x_pos = 0; - } + // if !relu_map.contains_key(&(x_pos)) { + // println!("x: {}, x_pos: {}", div_res, x_pos); + // x_pos = 0; + // } let outp_val = relu_map.get(&(x_pos)).unwrap(); // println!("x: {}, x_pos: {}, outp_val: {}", x, x_pos, outp_val); F::from(*outp_val as u64) diff --git a/src/gadgets/bias_div_round_relu6.rs b/src/gadgets/bias_div_round_relu6.rs index ab92d1c..f90e8fb 100644 --- a/src/gadgets/bias_div_round_relu6.rs +++ b/src/gadgets/bias_div_round_relu6.rs @@ -223,10 +223,10 @@ impl Gadget for BiasDivRoundRelu6Chip { let outp = { let mut x_pos = div_res - div_outp_min_val_i64; - if !relu_map.contains_key(&(x_pos)) { - println!("x: {}, x_pos: {}", div_res, x_pos); - x_pos = 0; - } + // if !relu_map.contains_key(&(x_pos)) { + // println!("x: {}, x_pos: {}", div_res, x_pos); + // x_pos = 0; + // } let outp_val = relu_map.get(&(x_pos)).unwrap(); F::from(*outp_val as u64) }; diff --git a/src/gadgets/gadget.rs b/src/gadgets/gadget.rs index 0f23ef7..0dfb148 100644 --- a/src/gadgets/gadget.rs +++ b/src/gadgets/gadget.rs @@ -67,9 +67,9 @@ pub struct GadgetConfig { pub fn convert_to_u64(x: &F) -> u64 { let big = BigUint::from_bytes_le(x.to_repr().as_ref()); let big_digits = big.to_u64_digits(); - if big_digits.len() > 2 { - println!("big_digits: {:?}", big_digits); - } + // if big_digits.len() > 2 { + // println!("big_digits: {:?}", big_digits); + // } if big_digits.len() == 1 { big_digits[0] as u64 } else if big_digits.len() == 0 { diff --git a/src/gadgets/nonlinear/non_linearity.rs b/src/gadgets/nonlinear/non_linearity.rs index 6b5fb34..2531452 100644 --- a/src/gadgets/nonlinear/non_linearity.rs +++ b/src/gadgets/nonlinear/non_linearity.rs @@ -143,9 +143,9 @@ pub trait NonLinearGadget: Gadget { let outp = { let pos = convert_to_u128(&(inp[i].1 + shift_val_pos)) as i128 - shift_val_pos_i64 as i128; let x = pos as i64 - min_val; - if ((*map).get(&x)).is_none() { - println!("x: {}", x); - } + // if ((*map).get(&x)).is_none() { + // println!("x: {}", x); + // } let val = *map.get(&x).unwrap(); if x == 0 { F::ZERO diff --git a/src/layers/avg_pool_2d.rs b/src/layers/avg_pool_2d.rs index 62f877b..4a81444 100644 --- a/src/layers/avg_pool_2d.rs +++ b/src/layers/avg_pool_2d.rs @@ -77,7 +77,7 @@ impl Layer for AvgPool2DChip { // TODO: refactor this let out_xy = MaxPool2DChip::shape(inp, layer_config); let out_shape = vec![1, out_xy.0, out_xy.1, inp.shape()[3]]; - println!("out_shape: {:?}", out_shape); + // println!("out_shape: {:?}", out_shape); let out = Array::from_shape_vec(IxDyn(&out_shape), dived).unwrap(); Ok(vec![out]) diff --git a/src/layers/batch_mat_mul.rs b/src/layers/batch_mat_mul.rs index 0d745a2..fcbf8ae 100644 --- a/src/layers/batch_mat_mul.rs +++ b/src/layers/batch_mat_mul.rs @@ -27,8 +27,8 @@ impl Layer for BatchMatMulChip { ) -> Result>, Error> { let inp1 = &tensors[0]; let inp2 = &tensors[1]; - println!("inp1: {:?}", inp1.shape()); - println!("inp2: {:?}", inp2.shape()); + // println!("inp1: {:?}", inp1.shape()); + // println!("inp2: {:?}", inp2.shape()); assert_eq!(inp1.ndim(), 3); assert_eq!(inp2.ndim(), 3); @@ -61,8 +61,8 @@ impl Layer for BatchMatMulChip { } else { inp2.index_axis(Axis(0), i).t().to_owned() }; - println!("inp1_slice: {:?}", inp1_slice.shape()); - println!("inp2_slice: {:?}", inp2_slice.shape()); + // println!("inp1_slice: {:?}", inp1_slice.shape()); + // println!("inp2_slice: {:?}", inp2_slice.shape()); // Batch MM doesn't have a fused activation, so insert it here // TODO: consider putting this in the converter? let tmp_config = LayerConfig { diff --git a/src/layers/dag.rs b/src/layers/dag.rs index cb195f8..3c222a9 100644 --- a/src/layers/dag.rs +++ b/src/layers/dag.rs @@ -83,10 +83,10 @@ impl DAGLayerChip { let layer_type = &layer_config.layer_type; let inp_idxes = &self.dag_config.inp_idxes[layer_idx]; let out_idxes = &self.dag_config.out_idxes[layer_idx]; - println!( - "Processing layer {}, type: {:?}, inp_idxes: {:?}, out_idxes: {:?}, layer_params: {:?}", - layer_idx, layer_type, inp_idxes, out_idxes, layer_config.layer_params - ); + // println!( + // "Processing layer {}, type: {:?}, inp_idxes: {:?}, out_idxes: {:?}, layer_params: {:?}", + // layer_idx, layer_type, inp_idxes, out_idxes, layer_config.layer_params + // ); let vec_inps = inp_idxes .iter() .map(|idx| tensor_map.get(idx).unwrap().clone()) @@ -478,10 +478,10 @@ impl DAGLayerChip { }; for (idx, tensor_idx) in out_idxes.iter().enumerate() { - println!("Out {} shape: {:?}", idx, out[idx].shape()); + // println!("Out {} shape: {:?}", idx, out[idx].shape()); tensor_map.insert(*tensor_idx, out[idx].clone()); } - println!(); + // println!(); } let mut final_out = vec![]; @@ -502,8 +502,8 @@ impl DAGLayerChip { }; let tmp = print_arr.iter().map(|x| x.0.as_ref()).collect::>(); - print_assigned_arr("final out", &tmp.to_vec(), gadget_config.scale_factor); - println!("final out idxes: {:?}", self.dag_config.final_out_idxes); + // print_assigned_arr("final out", &tmp.to_vec(), gadget_config.scale_factor); + // println!("final out idxes: {:?}", self.dag_config.final_out_idxes); let mut x = vec![]; for cell in print_arr.iter() { diff --git a/src/layers/shape/broadcast.rs b/src/layers/shape/broadcast.rs index 76f1778..1f9ee4a 100644 --- a/src/layers/shape/broadcast.rs +++ b/src/layers/shape/broadcast.rs @@ -59,7 +59,7 @@ impl Layer for BroadcastChip { } } - println!("Broadcast : {:?} -> {:?}", inp.shape(), output_shape); + // println!("Broadcast : {:?} -> {:?}", inp.shape(), output_shape); let out = Array::from_shape_vec(output_shape, output_flat).unwrap(); Ok(vec![out]) } diff --git a/src/layers/shape/reshape.rs b/src/layers/shape/reshape.rs index 1f9fb1d..4a4c6f7 100644 --- a/src/layers/shape/reshape.rs +++ b/src/layers/shape/reshape.rs @@ -25,7 +25,7 @@ impl Layer for ReshapeChip { let inp = &tensors[0]; let shape = layer_config.out_shapes[0].clone(); - println!("Reshape: {:?} -> {:?}", inp.shape(), shape); + // println!("Reshape: {:?} -> {:?}", inp.shape(), shape); let flat = inp.iter().map(|x| x.clone()).collect(); let out = Array::from_shape_vec(shape, flat).unwrap(); Ok(vec![out]) diff --git a/src/layers/shape/rotate.rs b/src/layers/shape/rotate.rs index 503abf6..5065f30 100644 --- a/src/layers/shape/rotate.rs +++ b/src/layers/shape/rotate.rs @@ -43,7 +43,7 @@ impl Layer for RotateChip { } let shape = inp.shape(); - println!("Rotate: {:?} -> {:?}", inp.shape(), shape); + // println!("Rotate: {:?} -> {:?}", inp.shape(), shape); let mut out = inp.clone(); From 7611eebe76b6cb77bae642acf56c13ad888c2620 Mon Sep 17 00:00:00 2001 From: span14 Date: Wed, 8 Nov 2023 15:27:54 -0600 Subject: [PATCH 16/16] update column control and randomness --- Cargo.toml | 6 +++--- src/gadgets/gadget.rs | 1 + src/model.rs | 44 +++++++++++++++++++++++++++++++------------ src/utils/loader.rs | 4 ++++ 4 files changed, 40 insertions(+), 15 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 28d407b..b9aa964 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -22,10 +22,10 @@ opt-level = 3 [dependencies] ark-std = { version = "^0.4.0", default-features = false} bitvec = "1.0.1" -halo2 = { git= "https://github.com/span14/halo2", branch="benchmark", package="halo2"} -halo2_gadgets = { git= "https://github.com/span14/halo2", branch="benchmark", package="halo2_gadgets", features = ["circuit-params"]} +halo2 = { git="https://github.com/span14/halo2", package="halo2", branch="benchmark" } +halo2_gadgets = { git="https://github.com/span14/halo2", package="halo2_gadgets", branch="benchmark", features = ["circuit-params"]} +halo2_proofs = { git="https://github.com/span14/halo2", package="halo2_proofs", branch="benchmark", features = ["circuit-params", "zkml"]} halo2_curves = { git = "https://github.com/privacy-scaling-explorations/halo2curves", tag = "0.3.2", package = "halo2curves"} -halo2_proofs = { git= "https://github.com/span14/halo2", branch="benchmark", package="halo2_proofs", features = ["circuit-params", "zkml"]} lazy_static = "1.4.0" ndarray = "0.15.6" num-bigint = "0.4.3" diff --git a/src/gadgets/gadget.rs b/src/gadgets/gadget.rs index 0dfb148..5b64921 100644 --- a/src/gadgets/gadget.rs +++ b/src/gadgets/gadget.rs @@ -52,6 +52,7 @@ pub struct GadgetConfig { pub shift_min_val: i64, // MUST be divisible by 2 * scale_factor pub num_rows: usize, pub num_cols: usize, + pub num_witness_cols: usize, pub k: usize, pub eta: f64, pub min_val: i64, diff --git a/src/model.rs b/src/model.rs index 659dabf..182f2d5 100644 --- a/src/model.rs +++ b/src/model.rs @@ -7,7 +7,7 @@ use std::{ use halo2_proofs::{ circuit::{Layouter, SimpleFloorPlanner, Value}, halo2curves::ff::{FromUniformBytes, PrimeField}, - plonk::{Advice, Circuit, Column, ConstraintSystem, Error, FirstPhase, Instance, Challenge, SecondPhase}, + plonk::{Advice, Circuit, Column, ConstraintSystem, Error, FirstPhase, Instance, Challenge, SecondPhase, Selector}, poly::Rotation, }; use lazy_static::lazy_static; use ndarray::{Array, IxDyn}; @@ -96,6 +96,7 @@ pub struct ModelConfig> { pub public_col: Column, pub hasher: Option>, pub challenge: Challenge, + pub rand_increment_selector: Selector, pub rand_vector: Column, pub _marker: PhantomData, } @@ -292,18 +293,18 @@ impl> ModelCircuit { &self, mut layouter: impl Layouter, challenge: Challenge, + rand_increment_selector: Selector, rand_vector: Column, ) -> Result, F)>, Error> { - let c_base = { - let c = layouter.get_challenge(challenge); - // Default value here is provided to pass mock prover check and it will be fiat shamir - // challenge in proof generation - c.assign().map_or(F::from(0x123456789abcdef), |x| x) - }; - let mut c = c_base; + let c_base = layouter.get_challenge(challenge); + let rand_vec = layouter.assign_region( || "random vector", |mut region| { + // Default value here is provided to pass mock prover check and it will be fiat shamir + // challenge in proof generation + let c_base = c_base.assign().map_or(F::from(0x123456789abcdef), |x| x); + let mut c = F::ONE; let mut rand_vec = HashMap::new(); for i in 0..self.num_random { let rand = region.assign_advice( @@ -315,6 +316,9 @@ impl> ModelCircuit { rand_vec.insert(i as i64, (Rc::new(rand), c)); c = c * c_base; } + for i in 0..(self.num_random-1) { + rand_increment_selector.enable(&mut region, i as usize)?; + } Ok(rand_vec) } )?; @@ -492,6 +496,7 @@ impl> ModelCircuit { k: config.k as usize, num_rows: (1 << config.k) - 10 + 1, num_cols: config.num_cols as usize, + num_witness_cols: config.num_witness_cols.unwrap() as usize, used_gadgets: used_gadgets.clone(), commit_before: config.commit_before.clone().unwrap_or(vec![]), commit_after: config.commit_after.clone().unwrap_or(vec![]), @@ -602,17 +607,21 @@ impl> Circuit for ModelCircuit fn configure(meta: &mut ConstraintSystem) -> Self::Config { let mut gadget_config = crate::model::GADGET_CONFIG.lock().unwrap().clone(); // TODO: Allocate less columns - let witness_columns = (0..gadget_config.num_cols) + let witness_columns = (0..gadget_config.num_witness_cols) .map(|_| meta.advice_column()) .collect::>(); - let c = meta.challenge_usable_after(FirstPhase); + let challenge = meta.challenge_usable_after(FirstPhase); let columns = (0..gadget_config.num_cols) .map(|_| meta.advice_column_in(SecondPhase)) .collect::>(); + + let rand_increment_selector = meta.selector(); let rand_vector = meta.advice_column_in(SecondPhase); - for i in 0..gadget_config.num_cols { + for i in 0..gadget_config.num_witness_cols { meta.enable_equality(witness_columns[i]); + } + for i in 0..gadget_config.num_cols { meta.enable_equality(columns[i]); } @@ -679,12 +688,22 @@ impl> Circuit for ModelCircuit None }; + + meta.create_gate("Randomness Constraint Increment", |meta| { + let selector = meta.query_selector(rand_increment_selector); + let challenge = challenge.expr(); + let x = meta.query_advice(rand_vector, Rotation::cur()); + let x_n = meta.query_advice(rand_vector, Rotation::next()); + vec![selector * (x_n - challenge * x)] + }); + ModelConfig { gadget_config: gadget_config.into(), public_col, hasher, - challenge: c, + challenge, rand_vector, + rand_increment_selector, _marker: PhantomData, } } @@ -831,6 +850,7 @@ impl> Circuit for ModelCircuit let rand_vector = self.fill_random_vectors( layouter.namespace(|| "randomness"), config.challenge, + config.rand_increment_selector, config.rand_vector )?; diff --git a/src/utils/loader.rs b/src/utils/loader.rs index f3a4721..d038e5c 100644 --- a/src/utils/loader.rs +++ b/src/utils/loader.rs @@ -34,6 +34,7 @@ pub struct ModelMsgpack { pub commit_after: Option>>, pub bits_per_elem: Option, // Specifically for packing for the commitments pub num_random: Option, + pub num_witness_cols: Option, } pub fn load_config_msgpack(config_path: &str) -> ModelMsgpack { @@ -72,6 +73,9 @@ pub fn load_model_msgpack(config_path: &str, inp_path: &str) -> ModelMsgpack { if model.num_random.is_none() { model.num_random = Some(20001) }; + if model.num_witness_cols.is_none() { + model.num_witness_cols = Some(3) + }; model }