From 0d0db7e952ae85df844660584804a627ff5d9dbe Mon Sep 17 00:00:00 2001
From: Cursor Agent
Date: Wed, 25 Mar 2026 04:16:25 +0000
Subject: [PATCH 1/4] fix: resolve circular import in LPIPS module (fixes #11)
Extract utility functions from models/__init__.py into a dedicated
util.py module to break the circular import chain:
__init__.py -> dist_model -> networks_basic -> __init__.py
- Create codes/metrics/LPIPS/models/util.py with shared helper functions
- Update __init__.py to import from util.py and use lazy import for dist_model
- Update networks_basic.py to use relative import 'from . import util'
- Update dist_model.py to use relative import 'from . import util'
- Ensure codes/metrics/__init__.py and codes/metrics/LPIPS/__init__.py exist
Co-authored-by: Thmen
---
codes/metrics/LPIPS/models/__init__.py | 160 ++-----------------
codes/metrics/LPIPS/models/dist_model.py | 2 +-
codes/metrics/LPIPS/models/networks_basic.py | 2 +-
codes/metrics/LPIPS/models/util.py | 114 +++++++++++++
4 files changed, 133 insertions(+), 145 deletions(-)
create mode 100644 codes/metrics/LPIPS/models/util.py
diff --git a/codes/metrics/LPIPS/models/__init__.py b/codes/metrics/LPIPS/models/__init__.py
index 181b11e..f46f069 100644
--- a/codes/metrics/LPIPS/models/__init__.py
+++ b/codes/metrics/LPIPS/models/__init__.py
@@ -3,160 +3,34 @@
from __future__ import division
from __future__ import print_function
-import numpy as np
-from skimage.metrics import structural_similarity as ssim
-import torch
-from torch.autograd import Variable
+from .util import (
+ normalize_tensor, l2, psnr, dssim, rgb2lab, tensor2np, np2tensor,
+ tensor2tensorlab, tensorlab2tensor, tensor2im, im2tensor, tensor2vec,
+ voc_ap,
+)
-from metrics.LPIPS.models import dist_model
+import torch
class PerceptualLoss(torch.nn.Module):
- def __init__(self, model='net-lin', net='alex', colorspace='rgb', spatial=False, use_gpu=True, gpu_ids=[0], version='0.1'): # VGG using our perceptually-learned weights (LPIPS metric)
- # def __init__(self, model='net', net='vgg', use_gpu=True): # "default" way of using VGG as a perceptual loss
+ def __init__(self, model='net-lin', net='alex', colorspace='rgb',
+ spatial=False, use_gpu=True, gpu_ids=[0], version='0.1'):
super(PerceptualLoss, self).__init__()
+ from .dist_model import DistModel
+
print('Setting up Perceptual loss...')
self.use_gpu = use_gpu
self.spatial = spatial
self.gpu_ids = gpu_ids
- self.model = dist_model.DistModel()
- self.model.initialize(model=model, net=net, use_gpu=use_gpu, colorspace=colorspace, spatial=self.spatial, gpu_ids=gpu_ids, version=version)
- print('...[%s] initialized'%self.model.name())
+ self.model = DistModel()
+ self.model.initialize(
+ model=model, net=net, use_gpu=use_gpu, colorspace=colorspace,
+ spatial=self.spatial, gpu_ids=gpu_ids, version=version)
+ print('...[%s] initialized' % self.model.name())
print('...Done')
def forward(self, pred, target, normalize=False):
- """
- Pred and target are Variables.
- If normalize is True, assumes the images are between [0,1] and then scales them between [-1,+1]
- If normalize is False, assumes the images are already between [-1,+1]
-
- Inputs pred and target are Nx3xHxW
- Output pytorch Variable N long
- """
-
if normalize:
- target = 2 * target - 1
- pred = 2 * pred - 1
-
+ target = 2 * target - 1
+ pred = 2 * pred - 1
return self.model.forward(target, pred)
-
-def normalize_tensor(in_feat,eps=1e-10):
- norm_factor = torch.sqrt(torch.sum(in_feat**2,dim=1,keepdim=True))
- return in_feat/(norm_factor+eps)
-
-def l2(p0, p1, range=255.):
- return .5*np.mean((p0 / range - p1 / range)**2)
-
-def psnr(p0, p1, peak=255.):
- return 10*np.log10(peak**2/np.mean((1.*p0-1.*p1)**2))
-
-def dssim(p0, p1, range=255.):
- return (1 - ssim(p0, p1, data_range=range, multichannel=True)) / 2.
-
-def rgb2lab(in_img,mean_cent=False):
- from skimage import color
- img_lab = color.rgb2lab(in_img)
- if(mean_cent):
- img_lab[:,:,0] = img_lab[:,:,0]-50
- return img_lab
-
-def tensor2np(tensor_obj):
- # change dimension of a tensor object into a numpy array
- return tensor_obj[0].cpu().float().numpy().transpose((1,2,0))
-
-def np2tensor(np_obj):
- # change dimenion of np array into tensor array
- return torch.Tensor(np_obj[:, :, :, np.newaxis].transpose((3, 2, 0, 1)))
-
-def tensor2tensorlab(image_tensor,to_norm=True,mc_only=False):
- # image tensor to lab tensor
- from skimage import color
-
- img = tensor2im(image_tensor)
- img_lab = color.rgb2lab(img)
- if(mc_only):
- img_lab[:,:,0] = img_lab[:,:,0]-50
- if(to_norm and not mc_only):
- img_lab[:,:,0] = img_lab[:,:,0]-50
- img_lab = img_lab/100.
-
- return np2tensor(img_lab)
-
-def tensorlab2tensor(lab_tensor,return_inbnd=False):
- from skimage import color
- import warnings
- warnings.filterwarnings("ignore")
-
- lab = tensor2np(lab_tensor)*100.
- lab[:,:,0] = lab[:,:,0]+50
-
- rgb_back = 255.*np.clip(color.lab2rgb(lab.astype('float')),0,1)
- if(return_inbnd):
- # convert back to lab, see if we match
- lab_back = color.rgb2lab(rgb_back.astype('uint8'))
- mask = 1.*np.isclose(lab_back,lab,atol=2.)
- mask = np2tensor(np.prod(mask,axis=2)[:,:,np.newaxis])
- return (im2tensor(rgb_back),mask)
- else:
- return im2tensor(rgb_back)
-
-
-def rgb2lab(input):
- from skimage import color
- return color.rgb2lab(input / 255.)
-
-def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=255./2.):
- image_numpy = image_tensor[0].cpu().float().numpy()
- image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + cent) * factor
- return image_numpy.astype(imtype)
-
-def im2tensor(image, imtype=np.uint8, cent=1., factor=255./2.):
- return torch.Tensor((image / factor - cent)
- [:, :, :, np.newaxis].transpose((3, 2, 0, 1)))
-
-def tensor2vec(vector_tensor):
- return vector_tensor.data.cpu().numpy()[:, :, 0, 0]
-
-def voc_ap(rec, prec, use_07_metric=False):
- """ ap = voc_ap(rec, prec, [use_07_metric])
- Compute VOC AP given precision and recall.
- If use_07_metric is true, uses the
- VOC 07 11 point method (default:False).
- """
- if use_07_metric:
- # 11 point metric
- ap = 0.
- for t in np.arange(0., 1.1, 0.1):
- if np.sum(rec >= t) == 0:
- p = 0
- else:
- p = np.max(prec[rec >= t])
- ap = ap + p / 11.
- else:
- # correct AP calculation
- # first append sentinel values at the end
- mrec = np.concatenate(([0.], rec, [1.]))
- mpre = np.concatenate(([0.], prec, [0.]))
-
- # compute the precision envelope
- for i in range(mpre.size - 1, 0, -1):
- mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
-
- # to calculate area under PR curve, look for points
- # where X axis (recall) changes value
- i = np.where(mrec[1:] != mrec[:-1])[0]
-
- # and sum (\Delta recall) * prec
- ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
- return ap
-
-def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=255./2.):
-# def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=1.):
- image_numpy = image_tensor[0].cpu().float().numpy()
- image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + cent) * factor
- return image_numpy.astype(imtype)
-
-def im2tensor(image, imtype=np.uint8, cent=1., factor=255./2.):
-# def im2tensor(image, imtype=np.uint8, cent=1., factor=1.):
- return torch.Tensor((image / factor - cent)
- [:, :, :, np.newaxis].transpose((3, 2, 0, 1)))
diff --git a/codes/metrics/LPIPS/models/dist_model.py b/codes/metrics/LPIPS/models/dist_model.py
index 5369e0d..abf4d50 100644
--- a/codes/metrics/LPIPS/models/dist_model.py
+++ b/codes/metrics/LPIPS/models/dist_model.py
@@ -19,7 +19,7 @@
# from IPython import embed
from . import networks_basic as networks
-import models as util
+from . import util
class DistModel(BaseModel):
def name(self):
diff --git a/codes/metrics/LPIPS/models/networks_basic.py b/codes/metrics/LPIPS/models/networks_basic.py
index 807f1f8..f111ef8 100644
--- a/codes/metrics/LPIPS/models/networks_basic.py
+++ b/codes/metrics/LPIPS/models/networks_basic.py
@@ -12,7 +12,7 @@
# from IPython import embed
from . import pretrained_networks as pn
-import metrics.LPIPS.models as util
+from . import util
def spatial_average(in_tens, keepdim=True):
return in_tens.mean([2,3],keepdim=keepdim)
diff --git a/codes/metrics/LPIPS/models/util.py b/codes/metrics/LPIPS/models/util.py
new file mode 100644
index 0000000..c768ff9
--- /dev/null
+++ b/codes/metrics/LPIPS/models/util.py
@@ -0,0 +1,114 @@
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import numpy as np
+from skimage.metrics import structural_similarity as ssim
+import torch
+
+
+def normalize_tensor(in_feat, eps=1e-10):
+ norm_factor = torch.sqrt(torch.sum(in_feat**2, dim=1, keepdim=True))
+ return in_feat / (norm_factor + eps)
+
+
+def l2(p0, p1, range=255.):
+ return .5 * np.mean((p0 / range - p1 / range)**2)
+
+
+def psnr(p0, p1, peak=255.):
+ return 10 * np.log10(peak**2 / np.mean((1. * p0 - 1. * p1)**2))
+
+
+def dssim(p0, p1, range=255.):
+ return (1 - ssim(p0, p1, data_range=range, multichannel=True)) / 2.
+
+
+def rgb2lab(in_img, mean_cent=False):
+ from skimage import color
+ img_lab = color.rgb2lab(in_img)
+ if mean_cent:
+ img_lab[:, :, 0] = img_lab[:, :, 0] - 50
+ return img_lab
+
+
+def tensor2np(tensor_obj):
+ return tensor_obj[0].cpu().float().numpy().transpose((1, 2, 0))
+
+
+def np2tensor(np_obj):
+ return torch.Tensor(np_obj[:, :, :, np.newaxis].transpose((3, 2, 0, 1)))
+
+
+def tensor2tensorlab(image_tensor, to_norm=True, mc_only=False):
+ from skimage import color
+
+ img = tensor2im(image_tensor)
+ img_lab = color.rgb2lab(img)
+ if mc_only:
+ img_lab[:, :, 0] = img_lab[:, :, 0] - 50
+ if to_norm and not mc_only:
+ img_lab[:, :, 0] = img_lab[:, :, 0] - 50
+ img_lab = img_lab / 100.
+
+ return np2tensor(img_lab)
+
+
+def tensorlab2tensor(lab_tensor, return_inbnd=False):
+ from skimage import color
+ import warnings
+ warnings.filterwarnings("ignore")
+
+ lab = tensor2np(lab_tensor) * 100.
+ lab[:, :, 0] = lab[:, :, 0] + 50
+
+ rgb_back = 255. * np.clip(color.lab2rgb(lab.astype('float')), 0, 1)
+ if return_inbnd:
+ lab_back = color.rgb2lab(rgb_back.astype('uint8'))
+ mask = 1. * np.isclose(lab_back, lab, atol=2.)
+ mask = np2tensor(np.prod(mask, axis=2)[:, :, np.newaxis])
+ return (im2tensor(rgb_back), mask)
+ else:
+ return im2tensor(rgb_back)
+
+
+def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=255. / 2.):
+ image_numpy = image_tensor[0].cpu().float().numpy()
+ image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + cent) * factor
+ return image_numpy.astype(imtype)
+
+
+def im2tensor(image, imtype=np.uint8, cent=1., factor=255. / 2.):
+ return torch.Tensor((image / factor - cent)
+ [:, :, :, np.newaxis].transpose((3, 2, 0, 1)))
+
+
+def tensor2vec(vector_tensor):
+ return vector_tensor.data.cpu().numpy()[:, :, 0, 0]
+
+
+def voc_ap(rec, prec, use_07_metric=False):
+ """ ap = voc_ap(rec, prec, [use_07_metric])
+ Compute VOC AP given precision and recall.
+ If use_07_metric is true, uses the
+ VOC 07 11 point method (default:False).
+ """
+ if use_07_metric:
+ ap = 0.
+ for t in np.arange(0., 1.1, 0.1):
+ if np.sum(rec >= t) == 0:
+ p = 0
+ else:
+ p = np.max(prec[rec >= t])
+ ap = ap + p / 11.
+ else:
+ mrec = np.concatenate(([0.], rec, [1.]))
+ mpre = np.concatenate(([0.], prec, [0.]))
+
+ for i in range(mpre.size - 1, 0, -1):
+ mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
+
+ i = np.where(mrec[1:] != mrec[:-1])[0]
+
+ ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
+ return ap
From a9b20c216bf07e81de0f9ae2a8693b9a05bef43f Mon Sep 17 00:00:00 2001
From: Cursor Agent
Date: Wed, 25 Mar 2026 04:16:30 +0000
Subject: [PATCH 2/4] fix: resolve NaN metrics by supporting jpg files and
adding empty list protection (fixes #20)
- Change metric_calculator to accept both png and jpg image files
(consistent with PairedFolderDataset which already accepts both)
- Add warning when no GT image files are found in a directory
- Add protection against averaging empty metric lists (prevents NaN)
- Replace bare except clauses in main.py with proper exception handling
- Show clear warning messages instead of silent failures
Co-authored-by: Thmen
---
codes/main.py | 43 ++++++++++++++--------------
codes/metrics/metric_calculator.py | 46 +++++++++++++++++++++++-------
2 files changed, 57 insertions(+), 32 deletions(-)
diff --git a/codes/main.py b/codes/main.py
index 7af466c..68fcd5d 100644
--- a/codes/main.py
+++ b/codes/main.py
@@ -173,10 +173,12 @@ def test(opt):
logger.info('Testing on {}: {}'.format(dataset_idx, ds_name))
# define metric calculator
- try:
- metric_calculator = MetricCalculator(opt)
- except:
- print('No metirc need to compute!')
+ metric_calculator = None
+ if opt.get('metric'):
+ try:
+ metric_calculator = MetricCalculator(opt)
+ except Exception as e:
+ logger.warning('Failed to create MetricCalculator: {}'.format(e))
# create data loader
test_loader = create_dataloader(opt, dataset_idx=dataset_idx)
@@ -199,25 +201,24 @@ def test(opt):
data_utils.save_sequence(res_seq_dir, hr_seq, frm_idx, to_bgr=True)
# compute metrics for the current sequence
- true_seq_dir = osp.join(opt['dataset'][dataset_idx]['gt_seq_dir'], seq_idx)
- try:
- metric_calculator.compute_sequence_metrics(seq_idx, true_seq_dir, '', pred_seq=hr_seq)
- except:
- print('No metirc need to compute!')
+ if metric_calculator is not None:
+ true_seq_dir = osp.join(opt['dataset'][dataset_idx]['gt_seq_dir'], seq_idx)
+ try:
+ metric_calculator.compute_sequence_metrics(seq_idx, true_seq_dir, '', pred_seq=hr_seq)
+ except Exception as e:
+ logger.warning('Failed to compute metrics for {}: {}'.format(seq_idx, e))
# save/print metrics
- try:
- if opt['test'].get('save_json'):
- # save results to json file
- json_path = osp.join(
- opt['test']['json_dir'], '{}_avg.json'.format(ds_name))
- metric_calculator.save_results(model_idx, json_path, override=True)
- else:
- # print directly
- metric_calculator.display_results()
-
- except:
- print('No metirc need to save!')
+ if metric_calculator is not None:
+ try:
+ if opt['test'].get('save_json'):
+ json_path = osp.join(
+ opt['test']['json_dir'], '{}_avg.json'.format(ds_name))
+ metric_calculator.save_results(model_idx, json_path, override=True)
+ else:
+ metric_calculator.display_results()
+ except Exception as e:
+ logger.warning('Failed to save/display metrics: {}'.format(e))
logger.info('-' * 40)
diff --git a/codes/metrics/metric_calculator.py b/codes/metrics/metric_calculator.py
index 76ac64c..ebd7124 100644
--- a/codes/metrics/metric_calculator.py
+++ b/codes/metrics/metric_calculator.py
@@ -1,6 +1,7 @@
import os
import os.path as osp
import json
+import logging
from collections import OrderedDict
import numpy as np
@@ -74,32 +75,48 @@ def get_averaged_results(self):
for metric_type in self.metric_opt.keys():
metric_avg_per_seq = []
for seq, metric_dict_per_seq in self.metric_dict.items():
- metric_avg_per_seq.append(
- np.mean(metric_dict_per_seq[metric_type]))
+ values = metric_dict_per_seq[metric_type]
+ if len(values) > 0:
+ metric_avg_per_seq.append(np.mean(values))
- metric_avg_dict[metric_type] = np.mean(metric_avg_per_seq)
+ if len(metric_avg_per_seq) > 0:
+ metric_avg_dict[metric_type] = np.mean(metric_avg_per_seq)
+ else:
+ metric_avg_dict[metric_type] = float('nan')
return metric_avg_dict
def display_results(self):
logger = base_utils.get_logger('base')
+ if not self.metric_dict:
+ logger.warning('No sequences were evaluated, no metrics to display')
+ return
+
# per sequence results
for seq, metric_dict_per_seq in self.metric_dict.items():
logger.info('Sequence: {}'.format(seq))
for metric_type in self.metric_opt.keys():
+ values = metric_dict_per_seq[metric_type]
mult = getattr(self, '{}_mult'.format(metric_type.lower()))
- logger.info('\t{}: {:.6f} (x{})'.format(
- metric_type,
- mult*np.mean(metric_dict_per_seq[metric_type]), mult))
+ if len(values) > 0:
+ logger.info('\t{}: {:.6f} (x{})'.format(
+ metric_type, mult * np.mean(values), mult))
+ else:
+ logger.warning('\t{}: N/A (no frames evaluated)'.format(
+ metric_type))
# average results
logger.info('Average')
metric_avg_dict = self.get_averaged_results()
for metric_type, avg_result in metric_avg_dict.items():
mult = getattr(self, '{}_mult'.format(metric_type.lower()))
- logger.info('\t{}: {:.6f} (x{})'.format(
- metric_type, mult*avg_result, mult))
+ if not np.isnan(avg_result):
+ logger.info('\t{}: {:.6f} (x{})'.format(
+ metric_type, mult * avg_result, mult))
+ else:
+ logger.warning('\t{}: N/A (no valid results)'.format(
+ metric_type))
def save_results(self, model_idx, save_path, override=False):
# load previous results if existed
@@ -158,9 +175,16 @@ def compute_sequence_metrics(self, seq, true_seq_dir, pred_seq_dir,
self.metric_dict[self.seq_idx_curr] = OrderedDict({
metric: [] for metric in self.metric_opt.keys()})
- # retrieve files
- true_img_lst = base_utils.retrieve_files(true_seq_dir, 'png')
- pred_img_lst = base_utils.retrieve_files(pred_seq_dir, 'png')
+ # retrieve files (support both png and jpg)
+ true_img_lst = base_utils.retrieve_files(true_seq_dir, 'png|jpg')
+ pred_img_lst = base_utils.retrieve_files(pred_seq_dir, 'png|jpg')
+
+ if len(true_img_lst) == 0:
+ logger = logging.getLogger('base')
+ logger.warning(
+ 'No image files found in GT directory: {}. '
+ 'Metrics will be empty for sequence: {}'.format(
+ true_seq_dir, seq))
# compute metrics for each frame
for i in range(len(true_img_lst)):
From eaa0568996e6780b6713ccbf3736dca2c79bb60a Mon Sep 17 00:00:00 2001
From: Cursor Agent
Date: Wed, 25 Mar 2026 04:16:34 +0000
Subject: [PATCH 3/4] docs: add usage instructions and input validation to
test.sh and profile.sh (fixes #19)
- Add usage examples and argument descriptions to both scripts
- Add input validation with helpful error messages
- Add directory existence checks before running
- Users no longer need to guess the correct command format
Co-authored-by: Thmen
---
profile.sh | 29 ++++++++++++++++++++++++++---
test.sh | 45 +++++++++++++++++++++++++++++++++++++++++++--
2 files changed, 69 insertions(+), 5 deletions(-)
diff --git a/profile.sh b/profile.sh
index 2a5c736..d1ea1b3 100644
--- a/profile.sh
+++ b/profile.sh
@@ -2,16 +2,39 @@
# This script is used to probe a network (generator), including the number of
# parameters, FLOPs, and the actual running speed.
-
+#
+# Usage:
+# bash profile.sh
+#
+# Arguments:
+# degradation - degradation type, e.g., BD or BI
+# model - model name, e.g., EGVSR, TecoGAN, FRVSR, VESPCN, SOFVSR
+# lr_size - input size in format [color]x[height]x[width], e.g., 3x540x960
+#
+# Examples:
+# bash profile.sh BD EGVSR 3x270x480
+# bash profile.sh BI TecoGAN 3x540x960
# basic settings
degradation=$1
model=$2
+lr_size=$3
exp_id=001
gpu_id=0
-# specify the size of input data in the format of [color]x[height]x[weight]
-lr_size=$3
+if [ -z "$degradation" ] || [ -z "$model" ] || [ -z "$lr_size" ]; then
+ echo "Usage: bash profile.sh "
+ echo ""
+ echo "Arguments:"
+ echo " degradation - degradation type (BD or BI)"
+ echo " model - model name (EGVSR, TecoGAN, FRVSR, VESPCN, SOFVSR)"
+ echo " lr_size - input size as [color]x[height]x[width] (e.g., 3x540x960)"
+ echo ""
+ echo "Examples:"
+ echo " bash profile.sh BD EGVSR 3x270x480"
+ echo " bash profile.sh BI TecoGAN 3x540x960"
+ exit 1
+fi
# run
python ./codes/main.py \
diff --git a/test.sh b/test.sh
index 1f63992..03f0d61 100644
--- a/test.sh
+++ b/test.sh
@@ -1,7 +1,21 @@
#!/usr/bin/env bash
-# This script is used to test pretrained models. More specific setttings can
+# This script is used to test pretrained models. More specific settings can
# be found and modified in a test.yml file under the experiment dir
+#
+# Usage:
+# bash test.sh
+#
+# Arguments:
+# degradation - degradation type, e.g., BD or BI
+# model - model name, e.g., EGVSR, TecoGAN, FRVSR, VESPCN, SOFVSR
+#
+# Examples:
+# bash test.sh BD EGVSR
+# bash test.sh BI TecoGAN
+#
+# The script expects the following directory structure:
+# ./experiments_//001/test.yml
# basic settings
degradation=$1
@@ -9,10 +23,37 @@ model=$2
gpu_id=0
exp_id=001
+if [ -z "$degradation" ] || [ -z "$model" ]; then
+ echo "Usage: bash test.sh "
+ echo ""
+ echo "Arguments:"
+ echo " degradation - degradation type (BD or BI)"
+ echo " model - model name (EGVSR, TecoGAN, FRVSR, VESPCN, SOFVSR)"
+ echo ""
+ echo "Examples:"
+ echo " bash test.sh BD EGVSR"
+ echo " bash test.sh BI TecoGAN"
+ exit 1
+fi
+
+exp_dir=./experiments_${degradation}/${model}/${exp_id}
+
+if [ ! -d "$exp_dir" ]; then
+ echo "Error: experiment directory not found: $exp_dir"
+ exit 1
+fi
+
+if [ ! -f "$exp_dir/test.yml" ]; then
+ echo "Error: test.yml not found in: $exp_dir"
+ exit 1
+fi
+
+echo "Testing model: ${model} (degradation: ${degradation})"
+echo "Experiment dir: ${exp_dir}"
# run
python ./codes/main.py \
- --exp_dir ./experiments_${degradation}/${model}/${exp_id} \
+ --exp_dir ${exp_dir} \
--mode test \
--model ${model} \
--opt test.yml \
From 9659d0ef9d0fd384fcb3ee7b37b7c33e88ae7bf0 Mon Sep 17 00:00:00 2001
From: Cursor Agent
Date: Wed, 25 Mar 2026 04:16:39 +0000
Subject: [PATCH 4/4] docs: update README about broken testing dataset download
links (fixes #7, #12, #16, #23)
- Mark the original Baidu Netdisk link as no longer available
- Add alternative guidance for obtaining testing datasets
Co-authored-by: Thmen
---
README.md | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/README.md b/README.md
index 1bbe5b5..066f718 100644
--- a/README.md
+++ b/README.md
@@ -57,7 +57,12 @@ Download the official training dataset based on the instructions in [TecoGAN-Ten
-You can get them at :arrow_double_down: [百度网盘](https://pan.baidu.com/s/1lKyLJ5u6lrrXejyljao0Mw) (提取码:8tqc) and put them into :file_folder: [Datasets](data).
+~~You can get them at :arrow_double_down: [百度网盘](https://pan.baidu.com/s/1lKyLJ5u6lrrXejyljao0Mw) (提取码:8tqc) and put them into :file_folder: [Datasets](data).~~
+
+> **Note**: The original Baidu Netdisk download link is no longer available. You can prepare the testing datasets manually:
+> - **Vid4**: A widely-used VSR benchmark. You can find it in many VSR repos such as [BasicSR](https://github.com/xinntao/BasicSR).
+> - **ToS3**: Three video sequences from the TecoGAN paper.
+> - Ensure GT images are in PNG format under `data//GT//` and LR images under `data//Gaussian4xLR//`.
The following shows the structure of the above three datasets.
```tex
data