diff --git a/NYUv2/best_net_HHA.pth b/NYUv2/best_net_HHA.pth new file mode 100644 index 0000000..44d40dc Binary files /dev/null and b/NYUv2/best_net_HHA.pth differ diff --git a/NYUv2/best_net_rgb.pth b/NYUv2/best_net_rgb.pth new file mode 100644 index 0000000..f0f0603 Binary files /dev/null and b/NYUv2/best_net_rgb.pth differ diff --git a/README.md b/README.md index 761ba12..c8ca854 100644 --- a/README.md +++ b/README.md @@ -30,6 +30,11 @@ python test.py \ --how_many 0 ``` +Need to put FloorPlan1_physics under the root directory. +``` +python test.py --list "../FloorPlan1_physics/images.hdf5" --dataset_mode ai2thor --pretrained_model checkpoints/label2city/model +``` + ### Citation If you find this work useful, please consider citing: diff --git a/checkpoints/label2city/model/best_net_HHA.pth b/checkpoints/label2city/model/best_net_HHA.pth new file mode 100644 index 0000000..44d40dc Binary files /dev/null and b/checkpoints/label2city/model/best_net_HHA.pth differ diff --git a/checkpoints/label2city/model/best_net_rgb.pth b/checkpoints/label2city/model/best_net_rgb.pth new file mode 100644 index 0000000..f0f0603 Binary files /dev/null and b/checkpoints/label2city/model/best_net_rgb.pth differ diff --git a/checkpoints/label2city/model/latest.pth b/checkpoints/label2city/model/latest.pth new file mode 100644 index 0000000..f0f0603 Binary files /dev/null and b/checkpoints/label2city/model/latest.pth differ diff --git a/checkpoints/nyuv2_VGGdeeplab_depthconv/opt.txt b/checkpoints/nyuv2_VGGdeeplab_depthconv/opt.txt new file mode 100644 index 0000000..8b7325b --- /dev/null +++ b/checkpoints/nyuv2_VGGdeeplab_depthconv/opt.txt @@ -0,0 +1,54 @@ +------------ Options ------------- +batchSize: 1 +beta1: 0.5 +checkpoints_dir: ./checkpoints +colorjitter: True +continue_train: False +crop: True +dataroot: +dataset_mode: nyuv2 +debug: False +decoder: psp_bilinear +depthconv: True +depthglobalpool: False +display_freq: 100 +display_winsize: 512 +encoder: resnet50_dilated8 +fineSize: [480, 640] +flip: True +gpu_ids: [0] +inputmode: bgr-mean +isTrain: True +iterSize: 10 +label_nc: 40 +list: dataset/lists/nyuv2/train.lst +loadfroms: False +lr: 0.00025 +lr_power: 0.9 +max_dataset_size: inf +maxbatchsize: -1 +model: DeeplabVGG +momentum: 0.9 +nThreads: 1 +name: nyuv2_VGGdeeplab_depthconv +nepochs: 100 +no_html: False +phase: train +pretrained_model: +pretrained_model_HHA: +pretrained_model_rgb: +print_freq: 100 +save_epoch_freq: 10 +save_latest_freq: 1000 +scale: True +serial_batches: False +tf_log: False +use_softmax: False +vallist: dataset/lists/nyuv2/val.lst +verbose: False +warmup_iters: 500 +wd: 0.0004 +which_epoch: latest +which_epoch_HHA: latest +which_epoch_rgb: latest +-------------- End ---------------- diff --git a/data/__pycache__/__init__.cpython-36.pyc b/data/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..7798edb Binary files /dev/null and b/data/__pycache__/__init__.cpython-36.pyc differ diff --git a/data/__pycache__/ai2thor_dataset.cpython-36.pyc b/data/__pycache__/ai2thor_dataset.cpython-36.pyc new file mode 100644 index 0000000..6361912 Binary files /dev/null and b/data/__pycache__/ai2thor_dataset.cpython-36.pyc differ diff --git a/data/__pycache__/base_data_loader.cpython-36.pyc b/data/__pycache__/base_data_loader.cpython-36.pyc new file mode 100644 index 0000000..abc3732 Binary files /dev/null and b/data/__pycache__/base_data_loader.cpython-36.pyc differ diff --git a/data/__pycache__/base_dataset.cpython-36.pyc b/data/__pycache__/base_dataset.cpython-36.pyc new file mode 100644 index 0000000..8659e1e Binary files /dev/null and b/data/__pycache__/base_dataset.cpython-36.pyc differ diff --git a/data/__pycache__/custom_dataset_data_loader.cpython-36.pyc b/data/__pycache__/custom_dataset_data_loader.cpython-36.pyc new file mode 100644 index 0000000..bade390 Binary files /dev/null and b/data/__pycache__/custom_dataset_data_loader.cpython-36.pyc differ diff --git a/data/__pycache__/data_loader.cpython-36.pyc b/data/__pycache__/data_loader.cpython-36.pyc new file mode 100644 index 0000000..235a492 Binary files /dev/null and b/data/__pycache__/data_loader.cpython-36.pyc differ diff --git a/data/__pycache__/nyuv2_dataset_crop.cpython-36.pyc b/data/__pycache__/nyuv2_dataset_crop.cpython-36.pyc new file mode 100644 index 0000000..a3894a5 Binary files /dev/null and b/data/__pycache__/nyuv2_dataset_crop.cpython-36.pyc differ diff --git a/data/ai2thor_dataset.py b/data/ai2thor_dataset.py new file mode 100644 index 0000000..a6051fa --- /dev/null +++ b/data/ai2thor_dataset.py @@ -0,0 +1,117 @@ +import os.path +import numpy as np +import torchvision +import torchvision.transforms as transforms +import torch +import h5py +from data.base_dataset import * +from PIL import Image +import math, random +import time +import h5py + +def make_dataset_from_hdf5(hdf5filename): + images = [] + depths = [] + + # images_filename, depth_filename = hdf5filename.split() + # rgb_path = "../FloorPlan1_physics/images.hdf5" + # depth_path = "../FloorPlan1_physics/depth.hdf5" + + rgb_data = h5py.File(hdf5filename, "r") + # depth_data = h5py.File(depth_filename, "r") + + rgb_data_keys = list(rgb_data.keys()) + # depth_data_keys = list(depth_data.keys()) + # assert len(rgb_data_keys) == len(depth_data_keys) + + # for key in rgb_data_keys: + # images.append(rgb_data[key]) + # depths.append(depth_data[key]) + # print(len(images)) + # assert len(rgb_data_keys) == len(depth_data_keys) + return {'keys': rgb_data_keys} + +class Ai2ThorDataset(BaseDataset): + def initialize(self, opt): + self.opt = opt + np.random.seed(int(time.time())) + self.paths_dict = make_dataset_from_hdf5(opt.list) + self.len = len(self.paths_dict['keys']) + # self.label_weight = torch.Tensor(label_weight) + self.datafile = 'ai2thor_dataset.py' + + def __getitem__(self, index): + key = self.paths_dict['keys'][index] + + rgb_path = "../FloorPlan1_physics/images.hdf5" + depth_path = "../FloorPlan1_physics/depth.hdf5" + + rgb_data = h5py.File(rgb_path, "r") + depth_data = h5py.File(depth_path, "r") + + + img = np.asarray(rgb_data[key]) #.astype(np.uint8) + depth = np.asarray(depth_data[key]).astype(np.float32)/120 + # depth = np.asarray(Image.open(self.paths_dict['depths'][index])).astype(np.float32)/120. # 1/10 * depth + # key = np.asanyarray(self.paths_dict['keys'][index]) + # seg = np.asarray(Image.open(self.paths_dict['segs'][index]))-1 + + params = get_params_sunrgbd(self.opt, img.shape[:2], maxcrop=0.7, maxscale=1.1) + depth_tensor_tranformed = transform(depth, params, normalize=False,istrain=self.opt.isTrain) + # seg_tensor_tranformed = transform(seg, params, normalize=False,method='nearest',istrain=self.opt.isTrain) + if self.opt.inputmode == 'bgr-mean': + img_tensor_tranformed = transform(img, params, normalize=False, istrain=self.opt.isTrain, option=1) + else: + img_tensor_tranformed = transform(img, params, istrain=self.opt.isTrain, option=1) + + return {'image': img_tensor_tranformed, + 'depth': depth_tensor_tranformed, + 'key': key + } + # 'seg': seg_tensor_tranformed, + # 'imgpath': self.paths_dict['segs'][index]} + + def __len__(self): + return self.len + + def name(self): + return 'ai2thor_dataset' + + +class Ai2ThorDataset_val(BaseDataset): + def initialize(self, opt): + self.opt = opt + np.random.seed(8934) + self.paths_dict = make_dataset_from_hdf5(opt.list) + self.len = len(self.paths_dict['images']) + # self.label_weight = torch.Tensor(label_weight) + # self.datafile = 'ai2thor_dataset.py' + + def __getitem__(self, index): + + img = np.asarray(Image.open(self.paths_dict['images'][index])) #.astype(np.uint8) + depth = np.asarray(Image.open(self.paths_dict['depths'][index])).astype(np.float32)/120. # 1/10 * depth + key = np.asanyarray(self.paths_dict['keys'][index]) + # seg = np.asarray(Image.open(self.paths_dict['segs'][index]))-1 + + params = get_params_sunrgbd(self.opt, img.shape, maxcrop=0.7, maxscale=1.1) + depth_tensor_tranformed = transform(depth, params, normalize=False,istrain=self.opt.isTrain) + # seg_tensor_tranformed = transform(seg, params, normalize=False,method='nearest',istrain=self.opt.isTrain) + if self.opt.inputmode == 'bgr-mean': + img_tensor_tranformed = transform(img, params, normalize=False, istrain=self.opt.isTrain, option=1) + else: + img_tensor_tranformed = transform(img, params, istrain=self.opt.isTrain, option=1) + + return {'image': img_tensor_tranformed, + 'depth': depth_tensor_tranformed, + 'key': key + } + # 'seg': seg_tensor_tranformed, + # 'imgpath': self.paths_dict['segs'][index]} + + def __len__(self): + return self.len + + def name(self): + return 'ai2thor_dataset' diff --git a/data/custom_dataset_data_loader.py b/data/custom_dataset_data_loader.py index 2eb3e84..894eea5 100755 --- a/data/custom_dataset_data_loader.py +++ b/data/custom_dataset_data_loader.py @@ -36,11 +36,19 @@ def CreateDataset(opt): else: dataset_val = None + elif opt.dataset_mode == 'ai2thor': + from data.ai2thor_dataset import Ai2ThorDataset, Ai2ThorDataset_val + dataset = Ai2ThorDataset() + if opt.vallist!='': + dataset_val = Ai2ThorDataset_val() + else: + dataset_val = None + print("dataset [%s] was created" % (dataset.name())) dataset.initialize(opt) if dataset_val != None: dataset_val.initialize(opt) - return dataset,dataset_val + return dataset, dataset_val class CustomDatasetDataLoader(BaseDataLoader): def name(self): diff --git a/models/Deeplab.py b/models/Deeplab.py index fac6382..6fa664d 100644 --- a/models/Deeplab.py +++ b/models/Deeplab.py @@ -2,16 +2,16 @@ import math import torch.utils.model_zoo as model_zoo import torch -from .base_model import BaseModel +from models.base_model import BaseModel import numpy as np -from . import losses +from models import losses import shutil from utils.util import * from torch.autograd import Variable from collections import OrderedDict from tensorboardX import SummaryWriter import os -import VGG_Deeplab as VGG_Deeplab +from models import VGG_Deeplab as VGG_Deeplab class Deeplab_VGG(nn.Module): @@ -71,7 +71,7 @@ def __init__(self, opt, dataset=None, encoder='VGG'): self.load() print("Successfully loaded model, continue training....!") - self.model.cuda() + # self.model.cuda() self.normweightgrad=0. # if len(opt.gpu_ids):#opt.isTrain and # self.model = torch.nn.DataParallel(self.model, device_ids=opt.gpu_ids) @@ -79,19 +79,21 @@ def __init__(self, opt, dataset=None, encoder='VGG'): def forward(self, data, isTrain=True): self.model.zero_grad() - self.image = Variable(data['image'], volatile=not isTrain).cuda() + self.image = Variable(data['image'], volatile=not isTrain) + # self.image = Variable(data['image'], volatile=not isTrain).cuda() if 'depth' in data.keys(): - self.depth = Variable(data['depth'], volatile=not isTrain).cuda() + self.depth = Variable(data['depth'], volatile=not isTrain) + # self.depth = Variable(data['depth'], volatile=not isTrain).cuda() else: self.depth = None - if data['seg'] is not None: - self.seggt = Variable(data['seg'], volatile=not isTrain).cuda() - else: - self.seggt = None + # if data['seg'] is not None: + # self.seggt = Variable(data['seg'], volatile=not isTrain).cuda() + # else: + self.seggt = None input_size = self.image.size() - self.segpred = self.model(self.image,self.depth) + self.segpred = self.model(self.image, self.depth) self.segpred = nn.functional.upsample(self.segpred, size=(input_size[2], input_size[3]), mode='bilinear') # self.segpred = nn.functional.log_softmax(nn.functional.upsample(self.segpred, size=(input_size[2], input_size[3]), mode='bilinear')) @@ -172,15 +174,15 @@ def update_learning_rate(self, step, total_step): self.writer.add_scalar(self.opt.name+'/Learning_Rate/', lr, step) - self.optimizer.param_groups[0]['lr'] = lr - self.optimizer.param_groups[1]['lr'] = lr - self.optimizer.param_groups[2]['lr'] = lr - self.optimizer.param_groups[3]['lr'] = lr - # self.optimizer.param_groups[0]['lr'] = lr - # self.optimizer.param_groups[1]['lr'] = lr*10 - # self.optimizer.param_groups[2]['lr'] = lr*2 #* 100 - # self.optimizer.param_groups[3]['lr'] = lr*20 - # self.optimizer.param_groups[4]['lr'] = lr*100 + self.optimizer.param_groups[0]['lr'] = lr + self.optimizer.param_groups[1]['lr'] = lr + self.optimizer.param_groups[2]['lr'] = lr + self.optimizer.param_groups[3]['lr'] = lr + # self.optimizer.param_groups[0]['lr'] = lr + # self.optimizer.param_groups[1]['lr'] = lr*10 + # self.optimizer.param_groups[2]['lr'] = lr*2 #* 100 + # self.optimizer.param_groups[3]['lr'] = lr*20 + # self.optimizer.param_groups[4]['lr'] = lr*100 # torch.nn.utils.clip_grad_norm(self.model.Scale.get_1x_lr_params_NOscale(), 1.) diff --git a/models/Deeplab_HHA.py b/models/Deeplab_HHA.py index 2e7c9ae..28c58f9 100644 --- a/models/Deeplab_HHA.py +++ b/models/Deeplab_HHA.py @@ -2,7 +2,7 @@ import math import torch.utils.model_zoo as model_zoo import torch -from .base_model import BaseModel +from models.base_model import BaseModel import numpy as np from . import losses import shutil diff --git a/models/VGG_Deeplab.py b/models/VGG_Deeplab.py index 8b16180..bc60e73 100644 --- a/models/VGG_Deeplab.py +++ b/models/VGG_Deeplab.py @@ -1,9 +1,9 @@ -from model_utils import * +from models.model_utils import * import torch.nn as nn import torch.utils.model_zoo as model_zoo import math -from .ops.depthconv.modules import DepthConv -from .ops.depthavgpooling.modules import Depthavgpooling +from models.ops.depthconv.modules import DepthConv +from models.ops.depthavgpooling.modules import Depthavgpooling import torch import torchvision diff --git a/models/__pycache__/Deeplab.cpython-36.pyc b/models/__pycache__/Deeplab.cpython-36.pyc new file mode 100644 index 0000000..a0710ce Binary files /dev/null and b/models/__pycache__/Deeplab.cpython-36.pyc differ diff --git a/models/__pycache__/VGG_Deeplab.cpython-36.pyc b/models/__pycache__/VGG_Deeplab.cpython-36.pyc new file mode 100644 index 0000000..f51d097 Binary files /dev/null and b/models/__pycache__/VGG_Deeplab.cpython-36.pyc differ diff --git a/models/__pycache__/__init__.cpython-36.pyc b/models/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..a3b01b7 Binary files /dev/null and b/models/__pycache__/__init__.cpython-36.pyc differ diff --git a/models/__pycache__/base_model.cpython-36.pyc b/models/__pycache__/base_model.cpython-36.pyc new file mode 100644 index 0000000..e492fc9 Binary files /dev/null and b/models/__pycache__/base_model.cpython-36.pyc differ diff --git a/models/__pycache__/losses.cpython-36.pyc b/models/__pycache__/losses.cpython-36.pyc new file mode 100644 index 0000000..8e49b99 Binary files /dev/null and b/models/__pycache__/losses.cpython-36.pyc differ diff --git a/models/__pycache__/model_utils.cpython-36.pyc b/models/__pycache__/model_utils.cpython-36.pyc new file mode 100644 index 0000000..ebf0a20 Binary files /dev/null and b/models/__pycache__/model_utils.cpython-36.pyc differ diff --git a/models/__pycache__/models.cpython-36.pyc b/models/__pycache__/models.cpython-36.pyc new file mode 100644 index 0000000..7fed2e6 Binary files /dev/null and b/models/__pycache__/models.cpython-36.pyc differ diff --git a/models/ops/depthavgpooling/_ext/__init__.py b/models/ops/depthavgpooling/_ext/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/models/ops/depthavgpooling/_ext/__pycache__/__init__.cpython-36.pyc b/models/ops/depthavgpooling/_ext/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..f43d512 Binary files /dev/null and b/models/ops/depthavgpooling/_ext/__pycache__/__init__.cpython-36.pyc differ diff --git a/models/ops/depthavgpooling/_ext/depthavgpooling/__init__.py b/models/ops/depthavgpooling/_ext/depthavgpooling/__init__.py new file mode 100644 index 0000000..f386879 --- /dev/null +++ b/models/ops/depthavgpooling/_ext/depthavgpooling/__init__.py @@ -0,0 +1,15 @@ + +from torch.utils.ffi import _wrap_function +from ._depthavgpooling import lib as _lib, ffi as _ffi + +__all__ = [] +def _import_symbols(locals): + for symbol in dir(_lib): + fn = getattr(_lib, symbol) + if callable(fn): + locals[symbol] = _wrap_function(fn, _ffi) + else: + locals[symbol] = fn + __all__.append(symbol) + +_import_symbols(locals()) diff --git a/models/ops/depthavgpooling/_ext/depthavgpooling/__pycache__/__init__.cpython-36.pyc b/models/ops/depthavgpooling/_ext/depthavgpooling/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..02e9ed2 Binary files /dev/null and b/models/ops/depthavgpooling/_ext/depthavgpooling/__pycache__/__init__.cpython-36.pyc differ diff --git a/models/ops/depthavgpooling/_ext/depthavgpooling/_depthavgpooling.so b/models/ops/depthavgpooling/_ext/depthavgpooling/_depthavgpooling.so new file mode 100755 index 0000000..14b06f9 Binary files /dev/null and b/models/ops/depthavgpooling/_ext/depthavgpooling/_depthavgpooling.so differ diff --git a/models/ops/depthavgpooling/functions/__pycache__/__init__.cpython-36.pyc b/models/ops/depthavgpooling/functions/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..680ace2 Binary files /dev/null and b/models/ops/depthavgpooling/functions/__pycache__/__init__.cpython-36.pyc differ diff --git a/models/ops/depthavgpooling/functions/__pycache__/depthavgpooling.cpython-36.pyc b/models/ops/depthavgpooling/functions/__pycache__/depthavgpooling.cpython-36.pyc new file mode 100644 index 0000000..76626ec Binary files /dev/null and b/models/ops/depthavgpooling/functions/__pycache__/depthavgpooling.cpython-36.pyc differ diff --git a/models/ops/depthavgpooling/modules/__pycache__/__init__.cpython-36.pyc b/models/ops/depthavgpooling/modules/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..316ffe7 Binary files /dev/null and b/models/ops/depthavgpooling/modules/__pycache__/__init__.cpython-36.pyc differ diff --git a/models/ops/depthavgpooling/modules/__pycache__/depthavgpooling.cpython-36.pyc b/models/ops/depthavgpooling/modules/__pycache__/depthavgpooling.cpython-36.pyc new file mode 100644 index 0000000..4ef9572 Binary files /dev/null and b/models/ops/depthavgpooling/modules/__pycache__/depthavgpooling.cpython-36.pyc differ diff --git a/models/ops/depthavgpooling/src/depthavgpooling_cuda_kernel.cu.o b/models/ops/depthavgpooling/src/depthavgpooling_cuda_kernel.cu.o index 5900bb4..2599cd3 100644 Binary files a/models/ops/depthavgpooling/src/depthavgpooling_cuda_kernel.cu.o and b/models/ops/depthavgpooling/src/depthavgpooling_cuda_kernel.cu.o differ diff --git a/models/ops/depthconv/__pycache__/__init__.cpython-36.pyc b/models/ops/depthconv/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..e150826 Binary files /dev/null and b/models/ops/depthconv/__pycache__/__init__.cpython-36.pyc differ diff --git a/models/ops/depthconv/_ext/__init__.py b/models/ops/depthconv/_ext/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/models/ops/depthconv/_ext/__pycache__/__init__.cpython-36.pyc b/models/ops/depthconv/_ext/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..bf47bc2 Binary files /dev/null and b/models/ops/depthconv/_ext/__pycache__/__init__.cpython-36.pyc differ diff --git a/models/ops/depthconv/_ext/depthconv/__init__.py b/models/ops/depthconv/_ext/depthconv/__init__.py new file mode 100644 index 0000000..4adbe2f --- /dev/null +++ b/models/ops/depthconv/_ext/depthconv/__init__.py @@ -0,0 +1,15 @@ + +from torch.utils.ffi import _wrap_function +from ._depthconv import lib as _lib, ffi as _ffi + +__all__ = [] +def _import_symbols(locals): + for symbol in dir(_lib): + fn = getattr(_lib, symbol) + if callable(fn): + locals[symbol] = _wrap_function(fn, _ffi) + else: + locals[symbol] = fn + __all__.append(symbol) + +_import_symbols(locals()) diff --git a/models/ops/depthconv/_ext/depthconv/__pycache__/__init__.cpython-36.pyc b/models/ops/depthconv/_ext/depthconv/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..b1f7f5f Binary files /dev/null and b/models/ops/depthconv/_ext/depthconv/__pycache__/__init__.cpython-36.pyc differ diff --git a/models/ops/depthconv/_ext/depthconv/_depthconv.so b/models/ops/depthconv/_ext/depthconv/_depthconv.so new file mode 100755 index 0000000..c3a4142 Binary files /dev/null and b/models/ops/depthconv/_ext/depthconv/_depthconv.so differ diff --git a/models/ops/depthconv/functions/__pycache__/__init__.cpython-36.pyc b/models/ops/depthconv/functions/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..3c755df Binary files /dev/null and b/models/ops/depthconv/functions/__pycache__/__init__.cpython-36.pyc differ diff --git a/models/ops/depthconv/functions/__pycache__/depthconv.cpython-36.pyc b/models/ops/depthconv/functions/__pycache__/depthconv.cpython-36.pyc new file mode 100644 index 0000000..ab934e7 Binary files /dev/null and b/models/ops/depthconv/functions/__pycache__/depthconv.cpython-36.pyc differ diff --git a/models/ops/depthconv/modules/__pycache__/__init__.cpython-36.pyc b/models/ops/depthconv/modules/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..3314101 Binary files /dev/null and b/models/ops/depthconv/modules/__pycache__/__init__.cpython-36.pyc differ diff --git a/models/ops/depthconv/modules/__pycache__/depthconv.cpython-36.pyc b/models/ops/depthconv/modules/__pycache__/depthconv.cpython-36.pyc new file mode 100644 index 0000000..7e9b3a3 Binary files /dev/null and b/models/ops/depthconv/modules/__pycache__/depthconv.cpython-36.pyc differ diff --git a/models/ops/depthconv/src/depthconv_cuda_kernel.cu.o b/models/ops/depthconv/src/depthconv_cuda_kernel.cu.o new file mode 100644 index 0000000..e8d387b Binary files /dev/null and b/models/ops/depthconv/src/depthconv_cuda_kernel.cu.o differ diff --git a/options/__pycache__/__init__.cpython-36.pyc b/options/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..5b65b61 Binary files /dev/null and b/options/__pycache__/__init__.cpython-36.pyc differ diff --git a/options/__pycache__/base_options.cpython-36.pyc b/options/__pycache__/base_options.cpython-36.pyc new file mode 100644 index 0000000..7fc78e8 Binary files /dev/null and b/options/__pycache__/base_options.cpython-36.pyc differ diff --git a/options/__pycache__/test_options.cpython-36.pyc b/options/__pycache__/test_options.cpython-36.pyc new file mode 100644 index 0000000..806aa94 Binary files /dev/null and b/options/__pycache__/test_options.cpython-36.pyc differ diff --git a/options/__pycache__/train_options.cpython-36.pyc b/options/__pycache__/train_options.cpython-36.pyc new file mode 100644 index 0000000..e1ea3e2 Binary files /dev/null and b/options/__pycache__/train_options.cpython-36.pyc differ diff --git a/options/test_options.py b/options/test_options.py index 333895f..180959d 100755 --- a/options/test_options.py +++ b/options/test_options.py @@ -1,6 +1,6 @@ ### Copyright (C) 2017 NVIDIA Corporation. All rights reserved. ### Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). -from .base_options import BaseOptions +from options.base_options import BaseOptions class TestOptions(BaseOptions): def initialize(self): diff --git a/options/train_options.py b/options/train_options.py index fcbbcf7..3e7aa37 100755 --- a/options/train_options.py +++ b/options/train_options.py @@ -1,6 +1,6 @@ ### Copyright (C) 2017 NVIDIA Corporation. All rights reserved. ### Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). -from .base_options import BaseOptions +from options.base_options import BaseOptions class TrainOptions(BaseOptions): def initialize(self): diff --git a/test.py b/test.py index 390c874..801a5fb 100644 --- a/test.py +++ b/test.py @@ -1,5 +1,6 @@ import os import numpy as np +import time from collections import OrderedDict from options.test_options import TestOptions from data.data_loader import CreateDataLoader @@ -9,18 +10,39 @@ from utils import html from torch.autograd import Variable +# import h5py +# +# rgb_path = "../FloorPlan1_physics/images.hdf5" +# depth_path = "../FloorPlan1_physics/depth.hdf5" +# +# rgb_data = h5py.File(rgb_path, "r") +# depth_data = h5py.File(depth_path, "r") +# +# rgb_data_keys = list(rgb_data.keys()) +# depth_data_keys = list(depth_data.keys()) +# +# assert len(rgb_data_keys) == len(depth_data_keys) + opt = TestOptions().parse(save=False) opt.nThreads = 1 opt.batchSize = 1 opt.serial_batches = True # no shuffle +# print (opt) + +# model = create_model(opt) +# +# model.model.eval() +# seggt, segpred = model.forward(rgb_data[rgb_data_keys[0]], False) +# print(segpred.shape) + data_loader = CreateDataLoader(opt) dataset, _ = data_loader.load_data() model = create_model(opt,data_loader.dataset) visualizer = Visualizer(opt) # create website -web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.which_epoch)) -webpage = html.HTML(web_dir, '%s: %s' % (opt.name, pt.which_epoch)) +# web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.which_epoch)) +# webpage = html.HTML(web_dir, '%s: %s' % (opt.name, pt.which_epoch)) # test @@ -34,28 +56,31 @@ if i >= opt.how_many and opt.how_many!=0: break seggt, segpred = model.forward(data,False) - print time.time() - tic + print(time.time() - tic) tic = time.time() - seggt = seggt.data.cpu().numpy() +# seggt = seggt.data.cpu().numpy() segpred = segpred.data.cpu().numpy() - label_trues.append(seggt) - label_preds.append(segpred) - - visuals = model.get_visuals(i) - img_path = data['imgpath'] - print('process image... %s' % img_path) - visualizer.save_images(webpage, visuals, img_path) - -metrics0 = util.label_accuracy_score( - label_trues, label_preds, n_class=opt.label_nc, returniu=True) -metrics = np.array(metrics0[:4]) -metrics *= 100 -print('''\ - Accuracy: {0} - Accuracy Class: {1} - Mean IU: {2} - FWAV Accuracy: {3}'''.format(*metrics)) - -webpage.save() + print(i) + print(segpred) + + # label_trues.append(seggt) + # label_preds.append(segpred) + # + # visuals = model.get_visuals(i) + # img_path = data['imgpath'] + # print('process image... %s' % img_path) + # visualizer.save_images(webpage, visuals, img_path) + +# metrics0 = util.label_accuracy_score( +# label_trues, label_preds, n_class=opt.label_nc, returniu=True) +# metrics = np.array(metrics0[:4]) +# metrics *= 100 +# print('''\ +# Accuracy: {0} +# Accuracy Class: {1} +# Mean IU: {2} +# FWAV Accuracy: {3}'''.format(*metrics)) + +# webpage.save() diff --git a/utils/__pycache__/__init__.cpython-36.pyc b/utils/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..a261344 Binary files /dev/null and b/utils/__pycache__/__init__.cpython-36.pyc differ diff --git a/utils/__pycache__/html.cpython-36.pyc b/utils/__pycache__/html.cpython-36.pyc new file mode 100644 index 0000000..29907e1 Binary files /dev/null and b/utils/__pycache__/html.cpython-36.pyc differ diff --git a/utils/__pycache__/util.cpython-36.pyc b/utils/__pycache__/util.cpython-36.pyc new file mode 100644 index 0000000..0947b03 Binary files /dev/null and b/utils/__pycache__/util.cpython-36.pyc differ diff --git a/utils/__pycache__/visualizer.cpython-36.pyc b/utils/__pycache__/visualizer.cpython-36.pyc new file mode 100644 index 0000000..5ecd326 Binary files /dev/null and b/utils/__pycache__/visualizer.cpython-36.pyc differ diff --git a/visualize.py b/visualize.py new file mode 100644 index 0000000..2ca54e6 --- /dev/null +++ b/visualize.py @@ -0,0 +1,19 @@ +import numpy as np +import argparse +import os +import imageio + +parser = argparse.ArgumentParser(description='image_writer') +parser.add_argument('--file', type=str, default='img', help='npy file to save to png') +args = parser.parse_args() + +data = np.load(args.file) +_, channels, width, height = data.shape +if channels == 1: + data = data.reshape((width, height, channels)) +elif channels == 3: + data = np.transpose(data, (0, 2, 3, 1)).reshape((width, height, channels)) + +filename, file_extension = os.path.splitext(args.file) +imageio.imwrite(filename + ".jpg", data) +