diff --git a/README.md b/README.md index 2d5089c..ca5b283 100644 --- a/README.md +++ b/README.md @@ -30,43 +30,58 @@ The Sensor Effect Image Augmentation Pipeline is comprised of the following file This is a bash file that initializes a docker container and runs the image augmentation pipeline. - The first volume mapping ```-v `pwd`/ImgData``` (line 2) - argument is the full path to the directory that holds the images you would like to augment. The default location is set to ```ImgData``` in the SensorEffectAugmentation directory. - - The second volume mapping ```-v `pwd`/AugmentedImgData``` (line 3) is the path to the directory where the augmented images will be saved. The default location is set to ```AugmentedImgData``` in the SensorEffectAugmentation directory. + The variable ```input``` is the path to the directory that holds the images you would like to augment + + The variable ```output``` is the path to the directory where the augmented images will be saved. The default location is set to ```results``` in the SensorEffectAugmentation directory. + + The variable ```code_location``` is the path to the directory where SensorEffectAugmentation is located The other volume mappings are setting up the file system in the docker container. - The command ```python /root/main.py``` (line 11) runs the image augmentation pipeline. - - The inputs to the ```main.py``` function are: - - ```--epoch``` sets the number of augmentations to perform on the dataset i.e., setting epoch to 2 means the dataset will be augmented twice - - ```--batch_size``` The size of batch images; must be a multiple of n and >1 - - ```--c_dim``` Dimension of image color channel (note that any channel >3 will be discarded) - - ```--Img_dataset``` The name full path of dataset to augment - - ```--Img_height``` The size of the output images to produce (note that all images will be resized to the specified Img_height x Img_width) - - ```--Img_width``` The size of the output images to produce. If None, same value as output_height - - ```--chromab_flag``` flag that specifies whether to perform Chromatic aberration augmentation - - ```--blur_flag``` flag that specifies whether to perform Blur augmentation - - ```--exposure_flag``` flag that specifies whether to perform Exposure augmentation - - ```--noise_flag``` flag that specifies whether to perform noise augmentation - ```--color_flag``` flag that specifies whether to perform color shift augmentation - - ```--save_aug_params_flag``` flag that specifies whether to save aug. parameters for each image - - ```--input_fname_pattern``` Glob pattern of filename of input images - - ```--results_dir``` Directory name to save the augmented images - + + The command ```python /root/main.py``` (line 15) runs the image augmentation pipeline. + ```bash + usage: main.py [-h] [-n [N]] [-b [BATCH_SIZE]] [-c [CHANNELS]] [-i INPUT] + [-o [OUTPUT]] [--pattern [PATTERN]] + [--image_height [IMAGE_HEIGHT]] [--image_width [IMAGE_WIDTH]] + [--chromatic_aberration [CHROMATIC_ABERRATION]] [--blur [BLUR]] + [--exposure [EXPOSURE]] [--noise [NOISE]] + [--colour_shift [COLOUR_SHIFT]] [--save_params [SAVE_PARAMS]] + + Augment a dataset + + optional arguments: + -h, --help show this help message and exit + -n [N] sets the number of augmentations to perform on the + dataset i.e., setting n to 2 means the dataset will be + augmented twice + -b [BATCH_SIZE], --batch_size [BATCH_SIZE] + size of batches; must be a multiple of n and >1 + -c [CHANNELS], --channels [CHANNELS] + dimension of image color channel (note that any + channel >3 will be discarded + -i INPUT, --input INPUT + path to the dataset to augment + -o [OUTPUT], --output [OUTPUT] + path where the augmented dataset will be saved + --pattern [PATTERN] glob pattern of filename of input images + --image_height [IMAGE_HEIGHT] + size of the output images to produce (note that all + images will be resized to the specified image_height x + image_width) + --image_width [IMAGE_WIDTH] + size of the output images to produce. If None, same + value as output_height + --chromatic_aberration [CHROMATIC_ABERRATION] + perform chromatic aberration augmentation + --blur [BLUR] perform blur augmentation + --exposure [EXPOSURE] + perform exposure augmentation + --noise [NOISE] perform noise augmentation + --colour_shift [COLOUR_SHIFT] + perform colour shift augmentation + --save_params [SAVE_PARAMS] + save augmentation parameters for each image + ``` * ```main_aug.py``` This is a master function that handles input flags and initializing the augmentation. It is called by ```run_main_aug.sh```. diff --git a/augment-docker-image/Dockerfile b/augment-docker-image/Dockerfile index 24a5cfe..0422d2f 100644 --- a/augment-docker-image/Dockerfile +++ b/augment-docker-image/Dockerfile @@ -1,6 +1,6 @@ ### ### Dockerfile used for augmentation pipeline code ### -FROM gcr.io/tensorflow/tensorflow:latest-gpu +FROM tensorflow/tensorflow:1.15.2-gpu MAINTAINER Vincent Vanhoucke -RUN pip install scikit-learn scikit-image opencv-python==3.2.0.8 +RUN pip install scipy==1.1.0 scikit-learn scikit-image opencv-python==3.2.0.8 diff --git a/augmentfunctions_tf.py b/augmentfunctions_tf.py index 1d2759d..8e7aaab 100644 --- a/augmentfunctions_tf.py +++ b/augmentfunctions_tf.py @@ -215,41 +215,43 @@ def return_bayer(bayer_type, im_h, im_w, batchsize): # # generate the CFA arrays for R,G,B based upon the r pixel location: # + h = int(im_h / 2) + w = int(im_w / 2) if bayer_type=='BGGR': # bggr Cr=np.array([[1,0],[0,0]]) Cg=np.array([[0,1],[1,0]]) Cb=np.array([[0,0],[0,1]]) - Rcfa= np.tile( Cr, (im_h/2,im_w/2)) - Gcfa= np.tile( Cg, (im_h/2,im_w/2)) - Bcfa= np.tile( Cb, (im_h/2,im_w/2)) + Rcfa= np.tile( Cr, (h, w)) + Gcfa= np.tile( Cg, (h, w)) + Bcfa= np.tile( Cb, (h, w)) # if bayer_type=='GBRG': ## gbrg Cr2=np.array([[0,1],[0,0]]) Cg2=np.array([[1,0],[0,1]]) Cb2=np.array([[0,0],[1,0]]) - Rcfa= np.tile( Cr2, (im_h/2,im_w/2)) - Gcfa= np.tile( Cg2, (im_h/2,im_w/2)) - Bcfa= np.tile( Cb2, (im_h/2,im_w/2)) + Rcfa= np.tile( Cr2, (h, w)) + Gcfa= np.tile( Cg2, (h, w)) + Bcfa= np.tile( Cb2, (h, w)) # if bayer_type=='GRBG': ## grbg Cr3=np.array([[0,0],[1,0]]) Cg3=np.array([[1,0],[0,1]]) Cb3=np.array([[0,1],[0,0]]) - Rcfa= np.tile( Cr3, (im_h/2,im_w/2)) - Gcfa= np.tile( Cg3, (im_h/2,im_w/2)) - Bcfa= np.tile( Cb3, (im_h/2,im_w/2)) + Rcfa= np.tile( Cr3, (h, w)) + Gcfa= np.tile( Cg3, (h, w)) + Bcfa= np.tile( Cb3, (h, w)) # if bayer_type=='RGGB': ## rggb Cr4=np.array([[0,0],[0,1]]) Cg4=np.array([[0,1],[1,0]]) Cb4=np.array([[1,0],[0,0]]) - Rcfa= np.tile( Cr4, (im_h/2,im_w/2)) - Gcfa= np.tile( Cg4, (im_h/2,im_w/2)) - Bcfa= np.tile( Cb4, (im_h/2,im_w/2)) + Rcfa= np.tile( Cr4, (h, w)) + Gcfa= np.tile( Cg4, (h, w)) + Bcfa= np.tile( Cb4, (h, w)) # Rcfa= np.tile( Rcfa, (batchsize,1,1)) Gcfa= np.tile( Gcfa, (batchsize,1,1)) diff --git a/main_aug.py b/main_aug.py index f324f1b..fd65d31 100644 --- a/main_aug.py +++ b/main_aug.py @@ -1,38 +1,35 @@ import os import scipy.misc import numpy as np -#os.environ["CUDA_VISIBLE_DEVICES"]="1" from model import camGAN -from utils import pp import tensorflow as tf +import argparse -flags = tf.app.flags -flags.DEFINE_integer("epoch", 1, "number of epochs; corresponds to number of augmentations to perform on the dataset (i.e., epoch =2 means the dataset will be augmented twice") -flags.DEFINE_integer("batch_size", 2, "The size of batch images; must be a multiple of n and >1") -flags.DEFINE_integer("c_dim", 3, "Dimension of image color. [3]") -# -flags.DEFINE_string("Img_dataset","generator_images","The name (full path) of dataset to augment") -flags.DEFINE_integer("Img_height",512, "The size of the output images to produce [64]") -flags.DEFINE_integer("Img_width", 1024, "The size of the output images to produce. If None, same value as output_height [None]") -flags.DEFINE_boolean("chromab_flag", True, "flag that specifies whether to perform Chromatic aberration augmentation") -flags.DEFINE_boolean("blur_flag", True, "flag that specifies whether to perform Blur augmentation") -flags.DEFINE_boolean("exposure_flag", True, "flag that specifies whether to perform Exposure augmentation") -flags.DEFINE_boolean("noise_flag", True, "flag that specifies whether to perform noise augmentation") -flags.DEFINE_boolean("color_flag", True, "flag that specifies whether to perform color shift augmentation") -flags.DEFINE_boolean("save_aug_params_flag", False, "flag that specifies whether to save aug. parameters for each image") -# -flags.DEFINE_string("input_fname_pattern", "*.png", "Glob pattern of filename of input images [*]") -flags.DEFINE_string("results_dir", "results", "Directory name to save the augmented images [results]") -FLAGS = flags.FLAGS +parser = argparse.ArgumentParser(description='Augment a dataset') +parser.add_argument('-n', type=int, default=1, nargs='?', help='sets the number of augmentations to perform on the dataset i.e., setting n to 2 means the dataset will be augmented twice') +parser.add_argument('-b', '--batch_size', type=int, default=64, nargs='?', help='size of batches; must be a multiple of n and >1') +parser.add_argument('-c', '--channels', type=int, default=3, nargs='?', help='dimension of image color channel (note that any channel >3 will be discarded') +parser.add_argument('-i', '--input', type=str, help='path to the dataset to augment') +parser.add_argument('-o', '--output', type=str, default='results', nargs='?', help='path where the augmented dataset will be saved') +parser.add_argument('--pattern', type=str, default="*.png", nargs='?', help='glob pattern of filename of input images') +parser.add_argument('--image_height', type=int, default=512, nargs='?', help='size of the output images to produce (note that all images will be resized to the specified image_height x image_width)') +parser.add_argument('--image_width', type=int, default=1024, nargs='?', help='size of the output images to produce. If None, same value as output_height') +parser.add_argument('--chromatic_aberration', type=bool, default=False, nargs='?', help='perform chromatic aberration augmentation') +parser.add_argument('--blur', type=bool, default=False, nargs='?', help='perform blur augmentation') +parser.add_argument('--exposure', type=bool, default=False, nargs='?', help='perform exposure augmentation') +parser.add_argument('--noise', type=bool, default=False, nargs='?', help='perform noise augmentation') +parser.add_argument('--colour_shift', type=bool, default=False, nargs='?', help='perform colour shift augmentation') +parser.add_argument('--save_params', type=bool, default=False, nargs='?', help='save augmentation parameters for each image') +args = parser.parse_args() def main(_): - pp.pprint(flags.FLAGS.__flags) + print(args) ## - if FLAGS.Img_width is None: - FLAGS.Img_width = FLAGS.Img_height + if args.image_width is None: + args.image_width = args.image_height ## - if not os.path.exists(FLAGS.results_dir): - os.makedirs(FLAGS.results_dir) + if not os.path.exists(args.output): + os.makedirs(args.output) ## run_config = tf.ConfigProto() ## allocate only as much GPU memory based on runtime allocations @@ -41,27 +38,27 @@ def main(_): with tf.Session(config=run_config) as sess: autoauggan = camGAN( sess, - Img_width=FLAGS.Img_width, - Img_height=FLAGS.Img_height, - batch_size=FLAGS.batch_size, - c_dim=FLAGS.c_dim, - Img_dataset_name = FLAGS.Img_dataset, - chromab_flag = FLAGS.chromab_flag, - blur_flag = FLAGS.blur_flag, - exposure_flag = FLAGS.exposure_flag, - noise_flag = FLAGS.noise_flag, - color_flag = FLAGS.color_flag, - save_aug_params_flag = FLAGS.save_aug_params_flag, - input_fname_pattern=FLAGS.input_fname_pattern, - results_dir = FLAGS.results_dir) + image_width=args.image_width, + image_height=args.image_height, + batch_size=args.batch_size, + channels=args.channels, + input = args.input, + chromatic_aberration = args.chromatic_aberration, + blur = args.blur, + exposure = args.exposure, + noise = args.noise, + colour_shift = args.colour_shift, + save_params = args.save_params, + pattern=args.pattern, + output = args.output) - #if FLAGS.is_train: + #if args.is_train: if True: - autoauggan.augment_batches(FLAGS) + autoauggan.augment_batches(args) else: - if not autoauggan.load(FLAGS.checkpoint_dir): + if not autoauggan.load(args.checkpoint_dir): raise Exception("[!] Train a model first, then run test mode") - #wgan.test(FLAGS) + #wgan.test(args) if __name__ == '__main__': tf.app.run() diff --git a/model_aug.py b/model_aug.py index b7bd817..c080031 100644 --- a/model_aug.py +++ b/model_aug.py @@ -37,9 +37,9 @@ class camGAN(object): - def __init__(self, sess, Img_height=640, Img_width=480, batch_size=64, c_dim=3, - Img_dataset_name='default', color_flag=True, chromab_flag=True, blur_flag=True, exposure_flag=True, noise_flag=True, save_aug_params_flag=False, - input_fname_pattern='*.png', results_dir=None): + def __init__(self, sess, image_height, image_width, batch_size, channels, + input, colour_shift, chromatic_aberration, blur, exposure, noise, save_params, + pattern, output): """ Args: sess: TensorFlow session @@ -47,20 +47,20 @@ def __init__(self, sess, Img_height=640, Img_width=480, batch_size=64, c_dim=3, """ self.sess = sess self.batch_size = batch_size - self.results_dir = results_dir + self.output = output ## Dataset info - self.G_dataset = Img_dataset_name - self.G_output_height = Img_height - self.G_output_width = Img_width - self.c_dim = c_dim - self.input_fname_pattern = input_fname_pattern + self.G_dataset = input + self.G_output_height = image_height + self.G_output_width = image_width + self.channels = channels + self.pattern = pattern ## Augmentation Flags - self.save_aug_params_flag = save_aug_params_flag - self.chromab_flag = chromab_flag - self.blur_flag = blur_flag - self.exposure_flag = exposure_flag - self.noise_flag = noise_flag - self.color_flag = color_flag + self.save_params = save_params + self.chromatic_aberration = chromatic_aberration + self.blur = blur + self.exposure = exposure + self.noise = noise + self.colour_shift = colour_shift ## ## Build the model/graph self.build_model() @@ -69,9 +69,9 @@ def build_model(self): ## ## construct the graph of the image augmentation architecture ## - print 'building model/graph' + print('building model/graph') ## initialize graph input palceholders - image_dims = [self.G_output_height, self.G_output_width, self.c_dim] + image_dims = [self.G_output_height, self.G_output_width, self.channels] self.G_inputs = tf.placeholder(tf.float32, [self.batch_size] + image_dims, name='G_input_images') G_inputs = self.G_inputs # @@ -81,18 +81,18 @@ def build_model(self): def augment_batches(self, config): ## - ## augments the dataset in batches. Can augment the dataset multiple times by specifying epoch >1 (i.e., epoch = 1 augments each image once) + ## augments the dataset in batches. Can augment the dataset multiple times by specifying n >1 (i.e., n = 1 augments each image once) ## - save_aug_params_flag = self.save_aug_params_flag + save_params = self.save_params # get file list of data/labels to augment, get batches - G_data = sorted([os.path.join(config.Img_dataset, fn) for fn in os.listdir(config.Img_dataset) if config.input_fname_pattern in fn and 'aug' not in fn ]) + G_data = sorted([fn for fn in glob(os.path.join(config.input, config.pattern)) if 'aug' not in fn]) N = len(G_data) batch_idxs = N // config.batch_size randombatch = np.arange(batch_idxs*config.batch_size) - print "Size of dataset to be augmented: %d"%(len(G_data)) + print("Size of dataset to be augmented: %d"%(len(G_data))) # - begin_epoch=0 - for epoch in xrange(begin_epoch, config.epoch): + begin_n=0 + for n in xrange(begin_n, config.n): # for idx in xrange(0, (batch_idxs*config.batch_size), config.batch_size): ## @@ -103,19 +103,14 @@ def augment_batches(self, config): out = self.sess.run([self.aug_image_genOP], feed_dict={self.G_inputs: G_batch_images}) # ## generator output images and sampled augmentation parameters - if save_aug_params_flag: - G_output_images = np.squeeze(out[0][0]) - ChromAbParams = np.array(out[0][1]) - BlurParams = np.array(out[0][2]) - ExpParams = np.array(out[0][3]) - NoiseParams = np.array(out[0][4]) - ColorParams = np.array(out[0][5]) - ## save images - self.save_augmented_final_images(G_output_images, G_batch_files, ChromAbParams, BlurParams, ExpParams, NoiseParams, ColorParams, epoch) - else: - ## save images - G_output_images = np.squeeze(out[0]) - self.save_augmented_final_images(G_output_images, G_batch_files, [], [], [], [], [], epoch) + G_output_images = np.squeeze(out[0]) + ChromAbParams = np.array(out[0][1]) + BlurParams = np.array(out[0][2]) + ExpParams = np.array(out[0][3]) + NoiseParams = np.array(out[0][4]) + ColorParams = np.array(out[0][5]) + ## save images + self.save_augmented_final_images(G_output_images, G_batch_files, ChromAbParams, BlurParams, ExpParams, NoiseParams, ColorParams, n) ## ---------------------------------------------------------------- ## ## ---- IMAGE AUGMENTATION PIPELINE (and supporting functions) ---- ## @@ -130,15 +125,15 @@ def generate_augmentation(self, imageBatch): batchsize = self.batch_size AugImg = imageBatch # - save_aug_params_flag = self.save_aug_params_flag - chromab_flag = self.chromab_flag - blur_flag = self.blur_flag - exposure_flag = self.exposure_flag - noise_flag = self.noise_flag - color_flag = self.color_flag + save_params = self.save_params + chromatic_aberration = self.chromatic_aberration + blur = self.blur + exposure = self.exposure + noise = self.noise + colour_shift = self.colour_shift # # Chromatic Aberration ## - if chromab_flag: + if chromatic_aberration: # augment with chromatic aberration scale_val = tf.random_uniform((batchsize,1,1,1), minval = 0.998, maxval = 1.002, dtype=tf.float32) minT = -0.002 @@ -160,7 +155,7 @@ def generate_augmentation(self, imageBatch): ty_Bval = [] ## Blur ## - if blur_flag: + if blur: #augment the image with blur window_h = tf.random_uniform((batchsize,1), minval=3.0, maxval=11.0,dtype=tf.float32) sigmas = tf.random_uniform((batchsize,1), minval=0.0, maxval=3.0,dtype=tf.float32) # uniform from 0 to 1.5 @@ -170,7 +165,7 @@ def generate_augmentation(self, imageBatch): sigmas = [] ## Exposure ## - if exposure_flag: + if exposure: # augment image with exposure delta_S = tf.random_uniform((batchsize,1,1,1), minval=-0.6, maxval=1.2, dtype=tf.float32) A = 0.85 @@ -180,7 +175,7 @@ def generate_augmentation(self, imageBatch): delta_S = [] ## Sensor Noise ## - if noise_flag: + if noise: # augment image with sensor noise N=0.001 Ra_sd = tf.random_uniform((batchsize,1,1,1), minval=0.0, maxval=N, dtype=tf.float32) @@ -199,7 +194,7 @@ def generate_augmentation(self, imageBatch): Bb_si = [] ## Color shift/Tone mapping ## - if color_flag: + if colour_shift: # augment image by shifting color temperature a_transl = tf.random_uniform((batchsize,1,1,1),minval=-30.0, maxval=30.0,dtype=tf.float32) b_transl = tf.random_uniform((batchsize,1,1,1),minval=-30.0, maxval=30.0,dtype=tf.float32) @@ -208,7 +203,7 @@ def generate_augmentation(self, imageBatch): a_transl = [] b_transl = [] - if save_aug_params_flag: + if save_params: ## Log the sampled augmentation parameters ChromAbParams = [tf.squeeze(scale_val), tf.squeeze(tx_Rval), tf.squeeze(ty_Rval), tf.squeeze(tx_Gval), tf.squeeze(ty_Gval), tf.squeeze(tx_Bval), tf.squeeze(ty_Bval)] BlurParams = [tf.squeeze(window_h), tf.squeeze(sigmas)] @@ -226,8 +221,8 @@ def read_img(self, filename): imgtmp = scipy.misc.imread(filename) ds = imgtmp.shape ## remove any depth channel - if ds[2]>self.c_dim: - imgtmp = np.squeeze(imgtmp[:,:,:self.c_dim]) + if ds[2]>self.channels: + imgtmp = np.squeeze(imgtmp[:,:,:self.channels]) ## resize image to specified height and width img = scipy.misc.imresize(imgtmp,(self.G_output_height,self.G_output_width,3)) img = np.array(img).astype(np.float32) @@ -261,9 +256,9 @@ def load_data_batches(self, data, batch_size, randombatch, idx): # return batch_images, batch_files - def save_augmented_final_images(self, output_images, batch_files, ChromAbParams, BlurParams, ExpParams, NoiseParams, ColorParams, epoch): + def save_augmented_final_images(self, output_images, batch_files, ChromAbParams, BlurParams, ExpParams, NoiseParams, ColorParams, n): ## - save_aug_params_flag = self.save_aug_params_flag + save_params = self.save_params ## for img_idx in range(0,self.batch_size): # get image @@ -271,8 +266,8 @@ def save_augmented_final_images(self, output_images, batch_files, ChromAbParams, image_out_file = batch_files[img_idx] # generate fileID and paths imID = os.path.splitext(os.path.split(image_out_file)[1])[0] - #out_name = os.path.join(self.results_dir, imID+'_augx'+str(epoch+1)+'.png') - out_name = os.path.join(self.results_dir, imID+'_aug.jpg') + out_name = os.path.join(self.output, imID+'_aug_'+str(n+1)+'.png') + #out_name = os.path.join(self.output, imID+'_aug.png') try: ## save the image image_save = np.squeeze(image_out) @@ -283,7 +278,7 @@ def save_augmented_final_images(self, output_images, batch_files, ChromAbParams, print("saved %s to results directory"%(out_name)) image_save.save(out_name) ## - if save_aug_params_flag: + if save_params: ## save the augmentation parameters for the image if ChromAbParams.any(): chromabP = 'chromab,'+','.join([str(x) for x in ChromAbParams[:,img_idx]]) diff --git a/run_main_aug.sh b/run_main_aug.sh index 6200527..18497e3 100644 --- a/run_main_aug.sh +++ b/run_main_aug.sh @@ -1,26 +1,26 @@ +code_location="/home/u42/Windows/Data/datasets/SensorEffectAugmentation" +input="/home/u42/Documents/mmdetection/data/synth_rocks/train/" +output="/home/u42/Documents/mmdetection/data/synth_rocks/train_augmented/" + nvidia-docker run --rm -it \ - -v /mnt/ngv/askc-home/SensorTransfer_datasets/GTA5-clean/leftImg8bit/train-all-data:/root/G_data \ - -v `pwd`/GTA5_STdomrand:/root/ResultsDir \ - -v `pwd`/main_aug.py:/root/main.py \ - -v `pwd`/model_aug.py:/root/model.py \ - -v `pwd`/geometric_transformation_module.py:/root/geometric_transformation_module.py \ - -v `pwd`/augmentfunctions_tf.py:/root/augmentfunctions_tf.py \ - -v `pwd`/pix2pix_labtoRGBconv.py:/root/pix2pix_labtoRGBconv.py \ - -v `pwd`/utils.py:/root/utils.py \ + -v $input:/root/G_data \ + -v $output:/root/ResultsDir \ + -v $code_location/main_aug.py:/root/main.py \ + -v $code_location/model_aug.py:/root/model.py \ + -v $code_location/geometric_transformation_module.py:/root/geometric_transformation_module.py \ + -v $code_location/augmentfunctions_tf.py:/root/augmentfunctions_tf.py \ + -v $code_location/pix2pix_labtoRGBconv.py:/root/pix2pix_labtoRGBconv.py \ + -v $code_location/utils.py:/root/utils.py \ tf-sensor-augment \ python /root/main.py \ - --Img_dataset /root/G_data \ - --Img_height 512 \ - --Img_width 1024 \ - --input_fname_pattern .png \ - --results_dir /root/ResultsDir \ - --epoch 1 \ - --save_aug_params_flag False \ - --blur_flag True \ - --chromab_flag True \ - --exposure_flag True \ - --noise_flag True \ - --color_flag True \ + -n 2 \ + --input /root/G_data \ + --output /root/ResultsDir \ + --image_height 480 \ + --image_width 720 \ + --exposure True \ + --noise True \ + --colour_shift True \ 2>&1 | tee -a sensortransfer-testing-logs.txt diff --git a/utils.py b/utils.py index 6648a39..12590e6 100644 --- a/utils.py +++ b/utils.py @@ -12,8 +12,6 @@ import numpy as np from time import gmtime, strftime -pp = pprint.PrettyPrinter() - get_stddev = lambda x, k_h, k_w: 1/math.sqrt(k_w*k_h*x.get_shape()[-1]) def get_image(image_path, input_height, input_width, @@ -183,7 +181,7 @@ def visualize(sess, dcgan, config, option): save_images(samples, [8, 8], './samples/test_%s.png' % strftime("%Y-%m-%d %H:%M:%S", gmtime())) elif option == 1: air_data = glob(os.path.join( - "./data", config.air_dataset, config.input_fname_pattern)) + "./data", config.air_dataset, config.pattern)) depth_data = glob(os.path.join( "./data", config.depth_dataset, "*.tiff")) @@ -214,7 +212,7 @@ def visualize(sess, dcgan, config, option): depth_batch_images_i = np.array(depth_batch).astype(np.float32) depth_batch_images_i = np.expand_dims(depth_batch_images_i,axis=3) #depth_batch_images = np.multiply(de - depth_batch_images = np.broadcast_to(depth_batch_images_i,(config.batch_size,config.output_height,config.output_width,config.c_dim)) + depth_batch_images = np.broadcast_to(depth_batch_images_i,(config.batch_size,config.output_height,config.output_width,config.channels)) values = np.arange(0, 1, 1./config.batch_size)