diff --git a/.gitignore b/.gitignore index aa44ee2..0bc4900 100644 --- a/.gitignore +++ b/.gitignore @@ -128,3 +128,4 @@ dmypy.json # Pyre type checker .pyre/ +Results/* diff --git a/M1_Retinal_Image_quality_EyePACS/efficientnet-b4-6ed6700e.pth b/M1_Retinal_Image_quality_EyePACS/efficientnet-b4-6ed6700e.pth new file mode 100644 index 0000000..4fc7dcd Binary files /dev/null and b/M1_Retinal_Image_quality_EyePACS/efficientnet-b4-6ed6700e.pth differ diff --git a/M1_Retinal_Image_quality_EyePACS/model.py b/M1_Retinal_Image_quality_EyePACS/model.py index d3e32c4..4c42c9f 100644 --- a/M1_Retinal_Image_quality_EyePACS/model.py +++ b/M1_Retinal_Image_quality_EyePACS/model.py @@ -37,7 +37,7 @@ def InceptionV3_fl(pretrained): def Efficientnet_fl(pretrained): - model = EfficientNet.from_pretrained('efficientnet-b4') + model = EfficientNet.from_pretrained('efficientnet-b4',weights_path="./efficientnet-b4-6ed6700e.pth") model._fc = nn.Identity() net_fl = nn.Sequential( nn.Linear(1792, 256), diff --git a/M1_Retinal_Image_quality_EyePACS/test_outside.py b/M1_Retinal_Image_quality_EyePACS/test_outside.py index f71f276..9d99687 100644 --- a/M1_Retinal_Image_quality_EyePACS/test_outside.py +++ b/M1_Retinal_Image_quality_EyePACS/test_outside.py @@ -14,6 +14,7 @@ from model import Resnet101_fl, InceptionV3_fl, Densenet161_fl, Resnext101_32x8d_fl, MobilenetV2_fl, Vgg16_bn_fl, Efficientnet_fl AUTOMORPH_DATA = os.getenv('AUTOMORPH_DATA','..') +NUM_WORKERS = int(os.getenv('NUM_WORKERS', 8)) # use num_workers=0 to disable multiprocessing def test_net(model_fl_1, model_fl_2, @@ -30,17 +31,17 @@ def test_net(model_fl_1, image_size=(512,512), ): - storage_path ="Ensemble_exp_{}/{}/train_on_{}/test_on_{}/".format(args.task, args.load, args.model, args.dataset) + #storage_path ="Ensemble_exp_{}/{}/train_on_{}/test_on_{}/".format(args.task, args.load, args.model, args.dataset) n_classes = args.n_class # create files - if not os.path.isdir(storage_path): - os.makedirs(storage_path) + # if not os.path.isdir(storage_path): + # os.makedirs(storage_path) dataset = BasicDataset_OUT(test_dir, image_size, n_classes, train_or=False) n_test = len(dataset) - val_loader = DataLoader(dataset, batch_size, shuffle=False, num_workers=8, pin_memory=False, drop_last=False) + val_loader = DataLoader(dataset, batch_size, shuffle=False, num_workers=NUM_WORKERS, pin_memory=False, drop_last=False) prediction_decode_list = [] filename_list = [] diff --git a/M2_Artery_vein/test_outside.py b/M2_Artery_vein/test_outside.py index 53fc9ff..3c13908 100644 --- a/M2_Artery_vein/test_outside.py +++ b/M2_Artery_vein/test_outside.py @@ -21,6 +21,7 @@ from skimage.morphology import skeletonize,remove_small_objects AUTOMORPH_DATA = os.getenv('AUTOMORPH_DATA','..') +NUM_WORKERS = int(os.getenv('NUM_WORKERS', 8)) # use num_workers=0 to disable multiprocessing def filter_frag(data_path): if os.path.isdir(data_path + 'raw/.ipynb_checkpoints'): @@ -293,10 +294,10 @@ def get_args(): img_size = Define_image_size(args.uniform, args.dataset) dataset_name = args.dataset checkpoint_saved = dataset_name + '/' +args.jn + '/Discriminator_unet/' - csv_save = 'test_csv/' + args.jn + # csv_save = 'test_csv/' + args.jn - if not os.path.isdir(csv_save): - os.makedirs(csv_save) + # if not os.path.isdir(csv_save): + # os.makedirs(csv_save) test_dir= f'{AUTOMORPH_DATA}/Results/M1/Good_quality/' test_label = "./data/{}/test/1st_manual/".format(dataset_name) @@ -307,7 +308,7 @@ def get_args(): dataset = LearningAVSegData_OOD(test_dir, test_label, test_mask, img_size, dataset_name=dataset_name, train_or=False) - test_loader = DataLoader(dataset, batch_size=args.batchsize, shuffle=False, num_workers=8, pin_memory=False, drop_last=False) + test_loader = DataLoader(dataset, batch_size=args.batchsize, shuffle=False, num_workers=NUM_WORKERS, pin_memory=False, drop_last=False) net_G_1 = Generator_main(input_channels=3, n_filters = 32, n_classes=4, bilinear=False) diff --git a/M2_Vessel_seg/test_outside_integrated.py b/M2_Vessel_seg/test_outside_integrated.py index d1d21a9..ede9c6f 100644 --- a/M2_Vessel_seg/test_outside_integrated.py +++ b/M2_Vessel_seg/test_outside_integrated.py @@ -18,6 +18,7 @@ import shutil AUTOMORPH_DATA = os.getenv('AUTOMORPH_DATA','..') +NUM_WORKERS = int(os.getenv('NUM_WORKERS', 8)) # use num_workers=0 to disable multiprocessing def filter_frag(data_path): if os.path.isdir(data_path + 'resize_binary/.ipynb_checkpoints'): @@ -173,7 +174,7 @@ def test_net(data_path, batch_size, device, dataset_train, dataset_test, image_s VD_list = [] dataset_data = SEDataset_out(test_dir, test_label, mask_dir, image_size, dataset_test, threshold, uniform='True', train_or=False) - test_loader = DataLoader(dataset_data, batch_size=batch_size, shuffle=False, num_workers=8, pin_memory=False, drop_last=False) + test_loader = DataLoader(dataset_data, batch_size=batch_size, shuffle=False, num_workers=NUM_WORKERS, pin_memory=False, drop_last=False) dir_checkpoint_1="./Saved_model/train_on_{}/{}_savebest_randomseed_{}/".format(dataset_train,job_name,24) dir_checkpoint_2="./Saved_model/train_on_{}/{}_savebest_randomseed_{}/".format(dataset_train,job_name,26) diff --git a/M2_lwnet_disc_cup/utils/get_loaders.py b/M2_lwnet_disc_cup/utils/get_loaders.py index 4fefff4..fcd0681 100644 --- a/M2_lwnet_disc_cup/utils/get_loaders.py +++ b/M2_lwnet_disc_cup/utils/get_loaders.py @@ -13,6 +13,8 @@ import logging from glob import glob +NUM_WORKERS = int(os.getenv('NUM_WORKERS', 8)) # use num_workers=0 to disable multiprocessing + class TrainDataset(Dataset): def __init__(self, csv_path, transforms=None, label_values=None): @@ -220,7 +222,7 @@ def get_test_dataset(data_path, csv_path='test.csv', tg_size=(512, 512)): #path_test_csv = osp.join(data_path, csv_path) path_test_csv = data_path test_dataset = TestDataset(csv_path=path_test_csv, tg_size=tg_size) - test_loader = DataLoader(dataset=test_dataset, batch_size=16, num_workers=8, pin_memory=False) + test_loader = DataLoader(dataset=test_dataset, batch_size=16, num_workers=NUM_WORKERS, pin_memory=False) return test_loader diff --git a/README.md b/README.md index 3f66981..0041ce5 100644 --- a/README.md +++ b/README.md @@ -52,6 +52,11 @@ Zero experience in Docker? No worries [DOCKER.md](DOCKER.md). ## Common questions +### Environment variables +A few optional environment variables are available for all pipelines: +- AUTOMORPH_DATA : the directory where the Results are stored. If not defined, the "Results" subdirectory is created in the current directory. If AUTOMORPH_DATA defined outside of source directory (for example, /tmp/AutoMorh ) source directory can be made read-only - for deployment inside of AWS Lambda. +- NUM_WORKERS : defines a number of workers for the dataloader. The default is 8. If NUM_WORKERS is set to 0, the dataloader will be single-threaded. + ### Memory/ram error We use Tesla T4 (16Gb) and 32vCPUs (120Gb). When you meet memory/ram issue in running, try to decrease batch size: diff --git a/run.sh b/run.sh old mode 100644 new mode 100755