Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -128,3 +128,4 @@ dmypy.json
# Pyre type checker
.pyre/

Results/*
Binary file not shown.
2 changes: 1 addition & 1 deletion M1_Retinal_Image_quality_EyePACS/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ def InceptionV3_fl(pretrained):


def Efficientnet_fl(pretrained):
model = EfficientNet.from_pretrained('efficientnet-b4')
model = EfficientNet.from_pretrained('efficientnet-b4',weights_path="./efficientnet-b4-6ed6700e.pth")
model._fc = nn.Identity()
net_fl = nn.Sequential(
nn.Linear(1792, 256),
Expand Down
9 changes: 5 additions & 4 deletions M1_Retinal_Image_quality_EyePACS/test_outside.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
from model import Resnet101_fl, InceptionV3_fl, Densenet161_fl, Resnext101_32x8d_fl, MobilenetV2_fl, Vgg16_bn_fl, Efficientnet_fl

AUTOMORPH_DATA = os.getenv('AUTOMORPH_DATA','..')
NUM_WORKERS = int(os.getenv('NUM_WORKERS', 8)) # use num_workers=0 to disable multiprocessing

def test_net(model_fl_1,
model_fl_2,
Expand All @@ -30,17 +31,17 @@ def test_net(model_fl_1,
image_size=(512,512),
):

storage_path ="Ensemble_exp_{}/{}/train_on_{}/test_on_{}/".format(args.task, args.load, args.model, args.dataset)
#storage_path ="Ensemble_exp_{}/{}/train_on_{}/test_on_{}/".format(args.task, args.load, args.model, args.dataset)
n_classes = args.n_class
# create files

if not os.path.isdir(storage_path):
os.makedirs(storage_path)
# if not os.path.isdir(storage_path):
# os.makedirs(storage_path)

dataset = BasicDataset_OUT(test_dir, image_size, n_classes, train_or=False)

n_test = len(dataset)
val_loader = DataLoader(dataset, batch_size, shuffle=False, num_workers=8, pin_memory=False, drop_last=False)
val_loader = DataLoader(dataset, batch_size, shuffle=False, num_workers=NUM_WORKERS, pin_memory=False, drop_last=False)

prediction_decode_list = []
filename_list = []
Expand Down
9 changes: 5 additions & 4 deletions M2_Artery_vein/test_outside.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
from skimage.morphology import skeletonize,remove_small_objects

AUTOMORPH_DATA = os.getenv('AUTOMORPH_DATA','..')
NUM_WORKERS = int(os.getenv('NUM_WORKERS', 8)) # use num_workers=0 to disable multiprocessing

def filter_frag(data_path):
if os.path.isdir(data_path + 'raw/.ipynb_checkpoints'):
Expand Down Expand Up @@ -293,10 +294,10 @@ def get_args():
img_size = Define_image_size(args.uniform, args.dataset)
dataset_name = args.dataset
checkpoint_saved = dataset_name + '/' +args.jn + '/Discriminator_unet/'
csv_save = 'test_csv/' + args.jn
# csv_save = 'test_csv/' + args.jn

if not os.path.isdir(csv_save):
os.makedirs(csv_save)
# if not os.path.isdir(csv_save):
# os.makedirs(csv_save)

test_dir= f'{AUTOMORPH_DATA}/Results/M1/Good_quality/'
test_label = "./data/{}/test/1st_manual/".format(dataset_name)
Expand All @@ -307,7 +308,7 @@ def get_args():


dataset = LearningAVSegData_OOD(test_dir, test_label, test_mask, img_size, dataset_name=dataset_name, train_or=False)
test_loader = DataLoader(dataset, batch_size=args.batchsize, shuffle=False, num_workers=8, pin_memory=False, drop_last=False)
test_loader = DataLoader(dataset, batch_size=args.batchsize, shuffle=False, num_workers=NUM_WORKERS, pin_memory=False, drop_last=False)


net_G_1 = Generator_main(input_channels=3, n_filters = 32, n_classes=4, bilinear=False)
Expand Down
3 changes: 2 additions & 1 deletion M2_Vessel_seg/test_outside_integrated.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
import shutil

AUTOMORPH_DATA = os.getenv('AUTOMORPH_DATA','..')
NUM_WORKERS = int(os.getenv('NUM_WORKERS', 8)) # use num_workers=0 to disable multiprocessing

def filter_frag(data_path):
if os.path.isdir(data_path + 'resize_binary/.ipynb_checkpoints'):
Expand Down Expand Up @@ -173,7 +174,7 @@ def test_net(data_path, batch_size, device, dataset_train, dataset_test, image_s
VD_list = []

dataset_data = SEDataset_out(test_dir, test_label, mask_dir, image_size, dataset_test, threshold, uniform='True', train_or=False)
test_loader = DataLoader(dataset_data, batch_size=batch_size, shuffle=False, num_workers=8, pin_memory=False, drop_last=False)
test_loader = DataLoader(dataset_data, batch_size=batch_size, shuffle=False, num_workers=NUM_WORKERS, pin_memory=False, drop_last=False)

dir_checkpoint_1="./Saved_model/train_on_{}/{}_savebest_randomseed_{}/".format(dataset_train,job_name,24)
dir_checkpoint_2="./Saved_model/train_on_{}/{}_savebest_randomseed_{}/".format(dataset_train,job_name,26)
Expand Down
4 changes: 3 additions & 1 deletion M2_lwnet_disc_cup/utils/get_loaders.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@
import logging
from glob import glob

NUM_WORKERS = int(os.getenv('NUM_WORKERS', 8)) # use num_workers=0 to disable multiprocessing

class TrainDataset(Dataset):
def __init__(self, csv_path, transforms=None, label_values=None):

Expand Down Expand Up @@ -220,7 +222,7 @@ def get_test_dataset(data_path, csv_path='test.csv', tg_size=(512, 512)):
#path_test_csv = osp.join(data_path, csv_path)
path_test_csv = data_path
test_dataset = TestDataset(csv_path=path_test_csv, tg_size=tg_size)
test_loader = DataLoader(dataset=test_dataset, batch_size=16, num_workers=8, pin_memory=False)
test_loader = DataLoader(dataset=test_dataset, batch_size=16, num_workers=NUM_WORKERS, pin_memory=False)

return test_loader

Expand Down
5 changes: 5 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,11 @@ Zero experience in Docker? No worries [DOCKER.md](DOCKER.md).

## Common questions

### Environment variables
A few optional environment variables are available for all pipelines:
- AUTOMORPH_DATA : the directory where the Results are stored. If not defined, the "Results" subdirectory is created in the current directory. If AUTOMORPH_DATA defined outside of source directory (for example, /tmp/AutoMorh ) source directory can be made read-only - for deployment inside of AWS Lambda.
- NUM_WORKERS : defines a number of workers for the dataloader. The default is 8. If NUM_WORKERS is set to 0, the dataloader will be single-threaded.

### Memory/ram error

We use Tesla T4 (16Gb) and 32vCPUs (120Gb). When you meet memory/ram issue in running, try to decrease batch size:
Expand Down
Empty file modified run.sh
100644 → 100755
Empty file.