From ae058675f659c61d58898a130448f56f6de62daa Mon Sep 17 00:00:00 2001 From: "Alfred W. Jacob" <82844187+alfred100p@users.noreply.github.com> Date: Sat, 1 May 2021 23:36:45 +0530 Subject: [PATCH 1/2] Fix parameters in function call to torch.optim.lr_scheduler.ReduceLROnPlateau() Fixed parameter order which caused error in training of AoANet Model --- downstream/AoANet_VC/misc/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/downstream/AoANet_VC/misc/utils.py b/downstream/AoANet_VC/misc/utils.py index 611dbff..fa28a20 100644 --- a/downstream/AoANet_VC/misc/utils.py +++ b/downstream/AoANet_VC/misc/utils.py @@ -233,7 +233,7 @@ def __getattr__(self, name): class ReduceLROnPlateau(object): "Optim wrapper that implements rate." def __init__(self, optimizer, mode='min', factor=0.1, patience=10, verbose=False, threshold=0.0001, threshold_mode='rel', cooldown=0, min_lr=0, eps=1e-08): - self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode, factor, patience, verbose, threshold, threshold_mode, cooldown, min_lr, eps) + self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode, factor, patience, threshold, threshold_mode, cooldown, min_lr, eps, verbose) self.optimizer = optimizer self.current_lr = get_lr(optimizer) From c887f5b6db6932fb5c828c8037e299ce5baadb9e Mon Sep 17 00:00:00 2001 From: "Alfred W. Jacob" <82844187+alfred100p@users.noreply.github.com> Date: Sun, 2 May 2021 11:18:38 +0530 Subject: [PATCH 2/2] Convert to int64 CPU tensor --- downstream/AoANet_VC/models/AttModel.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/downstream/AoANet_VC/models/AttModel.py b/downstream/AoANet_VC/models/AttModel.py index 02c3c9a..33d9d23 100644 --- a/downstream/AoANet_VC/models/AttModel.py +++ b/downstream/AoANet_VC/models/AttModel.py @@ -31,7 +31,8 @@ def sort_pack_padded_sequence(input, lengths): sorted_lengths, indices = torch.sort(lengths, descending=True) - tmp = pack_padded_sequence(input[indices], sorted_lengths, batch_first=True) + sorted_lengths=sorted_lengths.to(int) + tmp = pack_padded_sequence(input[indices], sorted_lengths.cpu(), batch_first=True) inv_ix = indices.clone() inv_ix[indices] = torch.arange(0,len(indices)).type_as(inv_ix) return tmp, inv_ix @@ -791,4 +792,4 @@ def core(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks): def _prepare_feature(self, fc_feats, att_feats, att_masks): fc_feats = self.fc_embed(fc_feats) - return fc_feats, None, None, None \ No newline at end of file + return fc_feats, None, None, None