Skip to content

Commit

Permalink
Merge pull request #117 from LoSealL/dev
Browse files Browse the repository at this point in the history
Dev Update v1.0.6
  • Loading branch information
LoSealL authored Jul 29, 2020
2 parents 16b38ee + 37c17bc commit 9d0e38e
Show file tree
Hide file tree
Showing 201 changed files with 4,711 additions and 5,284 deletions.
10 changes: 9 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,12 @@
1.0.5
1.0.6.1

## 1.0.6
## 2020-07
- Update TF backend
- Add support to tensorflow 2.0 (both legacy and eager mode)
- Refactor torch backend models
- Add `--caching_dataset` to cache transformed data into memory (ignored when `memory_limit` set).
- Fix FastMetrics multi-threads issue

## 1.0.5
## 2020-05
Expand Down
3 changes: 2 additions & 1 deletion Docs/HowTo/Change backend.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,8 @@ for some of models.
Edit config file `~/.vsr/config.yml`, If you'd like to change to tensorflow:
(create one if not exist)
```yaml
# the backend could be 'tensorflow', 'tensorflow2', 'pytorch'
# the backend could be 'tensorflow', 'keras', 'pytorch'
# the `keras` represents tensorflow v2.0
backend: tensorflow
# the verbose could be 'error', 'warning', 'info', 'debug'
verbose: info
Expand Down
2 changes: 1 addition & 1 deletion Tests/motion_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
if not os.getcwd().endswith('Tests'):
os.chdir('Tests')
from VSR.Backend.TF.Framework import Motion as M
from VSR.Backend.Torch.Models.video import motion as MT
from VSR.Backend.Torch.Models.Ops import Motion as MT
from VSR.DataLoader.FloDecoder import open_flo, KITTI

import tensorflow as tf
Expand Down
2 changes: 1 addition & 1 deletion Tests/space_to_depth_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
import torch
import torchvision
from torch.nn import PixelShuffle
from VSR.Backend.Torch.Models.Arch import SpaceToDim
from VSR.Backend.Torch.Models.Ops.Scale import SpaceToDim
except ImportError:
exit(0)

Expand Down
4 changes: 3 additions & 1 deletion Tests/training_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,8 @@
os.chdir('Tests')

_WORKDIR = r"/tmp/vsr/utest/"
_TCMD = r"python train.py {} --data_config=../Tests/data/fake_datasets.yml --dataset=normal --epochs=1 --steps=1 --save_dir={}"
_TCMD = ("python train.py {} --data_config=../Tests/data/fake_datasets.yml"
"--dataset=normal --epochs=1 --steps=1 --save_dir={} --val_steps=1")
_ECMD = r"python eval.py {} --save_dir={} --ensemble -t=../Tests/data/set5_x2"


Expand Down Expand Up @@ -43,6 +44,7 @@ def test_other_models():
'sofvsr', 'vespcn', 'frvsr', 'qprn', 'ufvsr', 'yovsr', 'tecogan',
'spmc', 'rbpn'
):
# skip video model
continue
train(k)
eval(k)
Expand Down
55 changes: 37 additions & 18 deletions Tools/FastMetrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,13 @@
# Update Date: 6/6/19, 10:35 AM

import argparse
import multiprocessing as mp
from multiprocessing.pool import ThreadPool
from pathlib import Path

import numpy as np
import tqdm
from PIL import Image
from skimage.measure import compare_ssim
from skimage.metrics import structural_similarity

from VSR.Util.ImageProcess import rgb_to_yuv

Expand All @@ -24,22 +24,36 @@
FLAGS = parser.parse_args()


def split_path_filter(x: Path):
try:
x = x.resolve()
# path, glob pattern, recursive
return x, '*', False
except OSError:
print(str(x.as_posix()))
pattern = x.name
rec = False
x = x.parent
if '*' in x.name:
x = x.parent
rec = True
print(x, pattern, rec)
return x, pattern, rec


def gen():
d1 = Path(FLAGS.input_dir)
d2 = Path(FLAGS.reference_dir)
d1, d1_filter, d1_rec = split_path_filter(d1)
d2, d2_filter, d2_rec = split_path_filter(d2)

assert d1.exists() and d2.exists(), "Path not found!"
assert len(list(d1.iterdir())) == len(list(d2.iterdir())), f"{d1} v {d2}"

for x, y in zip(sorted(d1.iterdir()), sorted(d2.iterdir())):
if x.is_dir() and y.is_dir():
assert len(list(x.iterdir())) == len(list(y.iterdir())), f"{x} v {y}"
for i, j in zip(sorted(x.iterdir()), sorted(y.iterdir())):
if i.is_file() and j.is_file():
yield i, j
else:
print(f" [!] Found {i} v.s. {j} not file.")
elif x.is_file() and y.is_file():
d1 = sorted(d1.rglob(d1_filter)) if d1_rec else sorted(d1.glob(d1_filter))
d2 = sorted(d2.rglob(d2_filter)) if d2_rec else sorted(d2.glob(d2_filter))
assert len(d1) == len(d2), f"{len(d1)} v {len(d2)}"

for x, y in zip(d1, d2):
if x.is_file() and y.is_file():
yield x, y
else:
print(f" [!] Found {x} v.s. {y} mismatch.")
Expand All @@ -54,9 +68,14 @@ def main():
def action(x, y):
xname = f'{x.parent.name}/{x.stem}'
yname = f'{y.parent.name}/{y.stem}'
x = Image.open(x)
y = Image.open(y)
assert x.width == y.width and x.height == y.height, "Image size mismatch!"
x = Image.open(x).convert('RGB')
y = Image.open(y).convert('RGB')
if x.width != y.width or x.height != y.height:
# print(f"Image size mismatch {x.width}x{x.height} != {y.width}x{y.height}")
min_w = min(x.width, y.width)
min_h = min(x.height, y.height)
x = x.crop([0, 0, min_w, min_h])
y = y.crop([0, 0, min_w, min_h])
xx = np.asarray(x, dtype=np.float) / 255.0
yy = np.asarray(y, dtype=np.float) / 255.0
if FLAGS.l_only:
Expand All @@ -69,14 +88,14 @@ def action(x, y):
psnr = np.log10(1.0 / mse) * 10.0
info = {"x": xname, "y": yname}
if FLAGS.ssim:
ssim = compare_ssim(xx, yy, multichannel=True)
ssim = structural_similarity(xx, yy, multichannel=True)
info.update(SSIM=ssim)
info.update(PSNR=psnr)
info.update(MSE=mse)
return info

if FLAGS.multithread:
pool = mp.pool.ThreadPool()
pool = ThreadPool()
results = [pool.apply_async(action, (i, j)) for i, j in gen()]
with tqdm.tqdm(results) as r:
for info in r:
Expand Down
4 changes: 3 additions & 1 deletion Train/check_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
# Update: 2020 - 4 - 17

import argparse
from pathlib import Path

from VSR.DataLoader import load_datasets

Expand Down Expand Up @@ -48,11 +49,12 @@ def _check(name: str):


if __name__ == '__main__':
CWD = Path(__file__).resolve().parent.parent
parser = argparse.ArgumentParser(
description="Check the dataset and print out its content")
parser.add_argument("dataset", type=str,
help="The name of the dataset, case insensitive.")
parser.add_argument("--description-file", default="../Data/datasets.yaml",
parser.add_argument("--description-file", default=f"{CWD}/Data/datasets.yaml",
help="DDF file")
flags = parser.parse_args()
main(flags.dataset, flags.description_file)
8 changes: 5 additions & 3 deletions Train/eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,13 +13,14 @@
Config, compat_param, save_inference_images, suppress_opt_by_args
)

CWD = Path(__file__).resolve().parent.parent
parser = argparse.ArgumentParser(description=f'VSR ({BACKEND}) Testing Tool v1.0')
g0 = parser.add_argument_group("basic options")
g0.add_argument("model", choices=list_supported_models(), help="specify the model name")
g0.add_argument("-p", "--parameter", help="specify the model parameter file (*.yaml)")
g0.add_argument("-t", "--test", nargs='*', help="specify test dataset name or data path")
g0.add_argument("--save_dir", default='../Results', help="working directory")
g0.add_argument("--data_config", default="../Data/datasets.yaml", help="specify dataset config file")
g0.add_argument("--save_dir", default=f'{CWD}/Results', help="working directory")
g0.add_argument("--data_config", default=f"{CWD}/Data/datasets.yaml", help="specify dataset config file")
g1 = parser.add_argument_group("evaluating options")
g1.add_argument("--pretrain", help="specify the pre-trained model checkpoint or will search into `save_dir` if not specified")
g1.add_argument("--ensemble", action="store_true")
Expand Down Expand Up @@ -66,9 +67,10 @@ def main():
if opt.parameter:
model_config_file = Path(opt.parameter)
else:
model_config_file = Path(f'par/{BACKEND}/{opt.model}.{_ext}')
model_config_file = Path(f'{CWD}/Train/par/{BACKEND}/{opt.model}.{_ext}')
if model_config_file.exists():
opt.update(compat_param(Config(str(model_config_file))))
break
# get model parameters from pre-defined YAML file
model_params = opt.get(opt.model, {})
suppress_opt_by_args(model_params, *args)
Expand Down
18 changes: 18 additions & 0 deletions Train/par/keras/srcnn.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
# srcnn 9-5-5
---
srcnn:
layers: 3
filters:
- 9
- 1
- 5
scale: 4
channel: 1

batch: 4
patch_size: 16
lr: 1.0e-4
lr_decay:
method: multistep
decay_step: [10000, 15000]
decay_rate: 0.1
1 change: 1 addition & 0 deletions Train/par/pytorch/carn.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ carn:
channel: 3
multi_scale: 1 # change to 1 if use official pth file
group: 1
clip: 10

batch_shape: [16, 3, 64, 64]
lr: 1.0e-4
Expand Down
6 changes: 6 additions & 0 deletions Train/par/pytorch/cubic.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
cubic:
scale: 4
channel: 3
depth: 1

batch_shape: [16, 3, 32, 32]
3 changes: 1 addition & 2 deletions Train/par/pytorch/dbpn.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
dbpn:
scale: 4
mode: 'dbpn'
num_channels: 3
channel: 3
base_filter: 64
feat: 256
num_stages: 7
Expand Down
4 changes: 1 addition & 3 deletions Train/par/pytorch/edsr.yml
Original file line number Diff line number Diff line change
@@ -1,10 +1,8 @@
edsr:
scale: 4
channel: 3
n_resblocks: 16
n_feats: 64
rgb_range: 255
res_scale: 1
n_colors: 3

batch_shape: [8, 3, 48, 48]
lr: 1.0e-4
Expand Down
7 changes: 1 addition & 6 deletions Train/par/pytorch/esrgan.yml
Original file line number Diff line number Diff line change
@@ -1,14 +1,9 @@
esrgan:
scale: 4
in_nc: 3
out_nc: 3
channel: 3
nf: 64
nb: 23
gc: 32
act_type: 'leakyrelu'
mode: 'CNA'
res_scale: 1
upsample_mode: 'upconv'
weights: [0.01, 1, 5.0e-3]
patch_size: 128

Expand Down
2 changes: 1 addition & 1 deletion Train/par/pytorch/msrn.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
msrn:
scale: 4
channel: 3
rgb_range: 255
n_colors: 3

batch_shape: [16, 3, 32, 32]
lr: 1.0e-4
Expand Down
3 changes: 1 addition & 2 deletions Train/par/pytorch/rcan.yml
Original file line number Diff line number Diff line change
@@ -1,12 +1,11 @@
rcan:
scale: 4
channel: 3
n_resgroups: 10
n_resblocks: 20
n_feats: 64
reduction: 16
rgb_range: 255
n_colors: 3
res_scale: 1.0

batch_shape: [16, 3, 32, 32]
lr: 1.0e-4
Expand Down
10 changes: 10 additions & 0 deletions Train/par/pytorch/realsr.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
realsr:
scale: 4
channel: 3
nf: 64
nb: 23
pixel_weight: !!float 1
feature_weight: !!float 0
gan_weight: !!float 0

batch_shape: [16, 3, 64, 64]
10 changes: 6 additions & 4 deletions Train/par/pytorch/srmd.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,14 @@
srmd:
scale: 4
channel: 3
layers: 12
filters: 128
degradation:
kernel_type: 'isotropic' # isotropic or anisotropic
l1: 2.0 # scaling of eigen values on base 0. [0.1, 10]
l2: 2.0 # scaling of eigen values on base 1. [0.1, l1]
kernel_type: 'anisotropic' # isotropic or anisotropic
l1: 0.1 # scaling of eigen values on base 0. [0.1, 10]
l2: 0.1 # scaling of eigen values on base 1. [0.1, l1]
theta: 0.0 # rotation angle (rad) of the kernel. [0, pi]
noise: 5.0 # noise stddev (0, 75]
noise: 5 # noise stddev (0, 75]

batch_shape: [16, 3, 64, 64]
lr: 1.0e-4
22 changes: 11 additions & 11 deletions Train/par/tensorflow/carn.yaml
Original file line number Diff line number Diff line change
@@ -1,16 +1,16 @@
carn:
recursive: false
n_residual: 3
n_blocks: 3
filters: 64
clip: 10
weight_decay: 0
scale: 4
channel: 3
recursive: false
n_residual: 3
n_blocks: 3
filters: 64
clip: 10
weight_decay: 0
scale: 4
channel: 3

batch_shape: [4, 16, 16, 3]
lr: 1.0e-4
lr_decay:
method: multistep
decay_step: [150000]
decay_rate: 0.1
method: multistep
decay_step: [150000]
decay_rate: 0.1
16 changes: 8 additions & 8 deletions Train/par/tensorflow/dbpn.yaml
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
dbpn:
bp_layers: 7
use_dense: true
scale: 4
channel: 3
bp_layers: 7
use_dense: true
scale: 4
channel: 3

patch_size: 96
batch: 16
lr: 1.0e-6
lr: 1.0e-4
lr_decay:
method: multistep
decay_step: [60000, 160000]
decay_rate: 1
method: multistep
decay_step: [60000, 160000]
decay_rate: 1
Loading

0 comments on commit 9d0e38e

Please sign in to comment.