@@ -180,7 +180,7 @@ def _conv2d_scale(self, operation, dest_device):
180180 merged ['padding' ][0 ]
181181 if isinstance (merged ['padding' ], tuple ) else merged ['padding' ]
182182 ),
183- bias = (1 if merged [ 'bias' ] is not None else 0 ),
183+ bias = (1 if merged . get ( 'bias' , None ) != None else 0 ),
184184 )
185185
186186 # 3. Call model to make prediction
@@ -214,7 +214,7 @@ def _conv_transpose2d_scale(self, operation, dest_device):
214214 merged ['padding' ][0 ]
215215 if isinstance (merged ['padding' ], tuple ) else merged ['padding' ]
216216 ),
217- bias = (1 if merged [ 'bias' ] is not None else 0 ),
217+ bias = (1 if merged . get ( 'bias' , None ) != None else 0 ),
218218 )
219219
220220 # 3. Call model to make prediction
@@ -251,7 +251,7 @@ def _linear_scale(self, operation, dest_device):
251251 batch = effective_batch ,
252252 in_features = merged ['weight' ][1 ],
253253 out_features = merged ['weight' ][0 ],
254- bias = (1 if merged [ 'bias' ] is not None else 0 )
254+ bias = (1 if merged . get ( 'bias' , None ) != None else 0 )
255255 )
256256
257257 arguments = [arguments [x ] for x in self .linear_pred .model .features ]
@@ -293,7 +293,7 @@ def _lstm_scale(self, operation, dest_device):
293293 operation .arguments .kwargs ,
294294 )
295295 arguments = dict (
296- bias = (1 if merged [ 'bias' ] is not None else 0 ),
296+ bias = (1 if merged . get ( 'bias' , None ) != None else 0 ),
297297 bidirectional = (1 if merged ['bidirectional' ] else 0 ),
298298 batch = merged ['input' ][1 ], # We require the batch to be in position 1
299299 seq_len = merged ['input' ][0 ],
@@ -310,7 +310,7 @@ def _lstm_scale(self, operation, dest_device):
310310 )
311311 max_batch_size = max (operation .arguments .special ['batch_sizes' ])
312312 arguments = dict (
313- bias = (1 if merged [ 'bias' ] is not None else 0 ),
313+ bias = (1 if merged . get ( 'bias' , None ) != None else 0 ),
314314 bidirectional = (1 if merged ['bidirectional' ] else 0 ),
315315 batch = max_batch_size ,
316316 seq_len = merged ['input' ][0 ] // max_batch_size ,
0 commit comments