diff --git a/cGAN/data.py b/cGAN/data.py index e1bcf84..e1a5706 100644 --- a/cGAN/data.py +++ b/cGAN/data.py @@ -73,7 +73,7 @@ def _image_decoder(path): else: dataset = tf.data.Dataset.from_tensor_slices(image_paths) - dataset = dataset.map(_parser) + dataset = dataset.map(_parser,num_parallel_calls=tf.data.experimental.AUTOTUNE) dataset = dataset.shuffle(buffer_size=8) dataset = dataset.batch(batch_size) @@ -115,7 +115,7 @@ def _image_decoder(path): return image, semantic_map dataset = tf.data.Dataset.from_tensor_slices(image_paths, semantic_map_paths) - dataset = dataset.map(_parser) + dataset = dataset.map(_parser,num_parallel_calls=tf.data.experimental.AUTOTUNE) dataset = dataset.shuffle(buffer_size=8) dataset = dataset.batch(batch_size) @@ -140,7 +140,7 @@ def _preprocess_inference(image_path, label, resize=(32,32)): return image, label dataset = tf.data.Dataset.from_tensor_slices((filenames, labels)) - dataset = dataset.map(_preprocess_inference) + dataset = dataset.map(_preprocess_inference,num_parallel_calls=tf.data.experimental.AUTOTUNE) dataset = dataset.batch(batch_size) return dataset diff --git a/data.py b/data.py index d0e1f4e..7b15200 100644 --- a/data.py +++ b/data.py @@ -74,7 +74,7 @@ def _image_decoder(path): dataset = tf.data.Dataset.from_tensor_slices(image_paths) dataset = dataset.shuffle(buffer_size=8) - dataset = dataset.map(_parser) + dataset = dataset.map(_parser,num_parallel_calls=tf.data.experimental.AUTOTUNE) dataset = dataset.cache() dataset = dataset.batch(batch_size) @@ -116,7 +116,7 @@ def _image_decoder(path): return image, semantic_map dataset = tf.data.Dataset.from_tensor_slices(image_paths, semantic_map_paths) - dataset = dataset.map(_parser) + dataset = dataset.map(_parser,num_parallel_calls=tf.data.experimental.AUTOTUNE) dataset = dataset.shuffle(buffer_size=8) dataset = dataset.batch(batch_size) @@ -141,7 +141,7 @@ def _preprocess_inference(image_path, label, resize=(32,32)): return image, label dataset = tf.data.Dataset.from_tensor_slices((filenames, labels)) - dataset = dataset.map(_preprocess_inference) + dataset = dataset.map(_preprocess_inference,num_parallel_calls=tf.data.experimental.AUTOTUNE) dataset = dataset.batch(batch_size) return dataset