Skip to content

Commit 0e5332f

Browse files
authored
Linting (#207)
* add pylintrc Signed-off-by: lizz <[email protected]> * python3 style super Signed-off-by: lizz <[email protected]> * add Signed-off-by: lizz <[email protected]> * lint Signed-off-by: lizz <[email protected]> * no (object) Signed-off-by: lizz <[email protected]> * tiny Signed-off-by: lizz <[email protected]> * ha Signed-off-by: lizz <[email protected]> * typos Signed-off-by: lizz <[email protected]> * typo Signed-off-by: lizz <[email protected]> * typo Signed-off-by: lizz <[email protected]> * lint Signed-off-by: lizz <[email protected]> * lint Signed-off-by: lizz <[email protected]> * more lint Signed-off-by: lizz <[email protected]> * Fix out_channels unused bug in EDVRNet Signed-off-by: lizz <[email protected]> * lint Signed-off-by: lizz <[email protected]>
1 parent dcc2159 commit 0e5332f

File tree

110 files changed

+994
-379
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

110 files changed

+994
-379
lines changed

.pylintrc

+621
Large diffs are not rendered by default.

mmedit/__init__.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -7,15 +7,15 @@
77

88

99
def digit_version(version_str):
10-
digit_version = []
10+
digit_ver = []
1111
for x in version_str.split('.'):
1212
if x.isdigit():
13-
digit_version.append(int(x))
13+
digit_ver.append(int(x))
1414
elif x.find('rc') != -1:
1515
patch_version = x.split('rc')
16-
digit_version.append(int(patch_version[0]) - 1)
17-
digit_version.append(int(patch_version[1]))
18-
return digit_version
16+
digit_ver.append(int(patch_version[0]) - 1)
17+
digit_ver.append(int(patch_version[1]))
18+
return digit_ver
1919

2020

2121
mmcv_min_version = digit_version(MMCV_MIN)

mmedit/apis/test.py

+28-27
Original file line numberDiff line numberDiff line change
@@ -162,21 +162,21 @@ def collect_results_cpu(result_part, size, tmpdir=None):
162162
# collect all parts
163163
if rank != 0:
164164
return None
165-
else:
166-
# load results of all parts from tmp dir
167-
part_list = []
168-
for i in range(world_size):
169-
part_file = osp.join(tmpdir, 'part_{}.pkl'.format(i))
170-
part_list.append(mmcv.load(part_file))
171-
# sort the results
172-
ordered_results = []
173-
for res in zip(*part_list):
174-
ordered_results.extend(list(res))
175-
# the dataloader may pad some samples
176-
ordered_results = ordered_results[:size]
177-
# remove tmp dir
178-
shutil.rmtree(tmpdir)
179-
return ordered_results
165+
166+
# load results of all parts from tmp dir
167+
part_list = []
168+
for i in range(world_size):
169+
part_file = osp.join(tmpdir, 'part_{}.pkl'.format(i))
170+
part_list.append(mmcv.load(part_file))
171+
# sort the results
172+
ordered_results = []
173+
for res in zip(*part_list):
174+
ordered_results.extend(list(res))
175+
# the dataloader may pad some samples
176+
ordered_results = ordered_results[:size]
177+
# remove tmp dir
178+
shutil.rmtree(tmpdir)
179+
return ordered_results
180180

181181

182182
def collect_results_gpu(result_part, size):
@@ -211,15 +211,16 @@ def collect_results_gpu(result_part, size):
211211
# gather all result part
212212
dist.all_gather(part_recv_list, part_send)
213213

214-
if rank == 0:
215-
part_list = []
216-
for recv, shape in zip(part_recv_list, shape_list):
217-
part_list.append(
218-
pickle.loads(recv[:shape[0]].cpu().numpy().tobytes()))
219-
# sort the results
220-
ordered_results = []
221-
for res in zip(*part_list):
222-
ordered_results.extend(list(res))
223-
# the dataloader may pad some samples
224-
ordered_results = ordered_results[:size]
225-
return ordered_results
214+
if rank != 0:
215+
return None
216+
217+
part_list = []
218+
for recv, shape in zip(part_recv_list, shape_list):
219+
part_list.append(pickle.loads(recv[:shape[0]].cpu().numpy().tobytes()))
220+
# sort the results
221+
ordered_results = []
222+
for res in zip(*part_list):
223+
ordered_results.extend(list(res))
224+
# the dataloader may pad some samples
225+
ordered_results = ordered_results[:size]
226+
return ordered_results

mmedit/core/distributed_wrapper.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ def __init__(self,
5353
broadcast_buffers=False,
5454
find_unused_parameters=False,
5555
**kwargs):
56-
super(DistributedDataParallelWrapper, self).__init__()
56+
super().__init__()
5757
assert len(device_ids) == 1, (
5858
'Currently, DistributedDataParallelWrapper only supports one'
5959
'single CUDA device for each process.'

mmedit/core/evaluation/eval_hooks.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -80,8 +80,7 @@ def __init__(self,
8080
interval=1,
8181
gpu_collect=False,
8282
**eval_kwargs):
83-
super(DistEvalIterHook, self).__init__(dataloader, interval,
84-
**eval_kwargs)
83+
super().__init__(dataloader, interval, **eval_kwargs)
8584
self.gpu_collect = gpu_collect
8685

8786
def after_train_iter(self, runner):

mmedit/core/evaluation/metric_utils.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,9 @@ def gauss_filter(sigma, epsilon=1e-2):
6161
def gauss_gradient(img, sigma):
6262
"""Gaussian gradient.
6363
64-
From https://www.mathworks.com/matlabcentral/mlc-downloads/downloads/submissions/8060/versions/2/previews/gaussgradient/gaussgradient.m/index.html # noqa
64+
From https://www.mathworks.com/matlabcentral/mlc-downloads/downloads/
65+
submissions/8060/versions/2/previews/gaussgradient/gaussgradient.m/
66+
index.html
6567
6668
Args:
6769
img (ndarray): Input image.

mmedit/core/evaluation/metrics.py

+12-13
Original file line numberDiff line numberDiff line change
@@ -19,8 +19,8 @@ def sad(alpha, trimap, pred_alpha):
1919
assert (pred_alpha[trimap == 255] == 255).all()
2020
alpha = alpha.astype(np.float64) / 255
2121
pred_alpha = pred_alpha.astype(np.float64) / 255
22-
sad = np.abs(pred_alpha - alpha).sum() / 1000
23-
return sad
22+
sad_result = np.abs(pred_alpha - alpha).sum() / 1000
23+
return sad_result
2424

2525

2626
def mse(alpha, trimap, pred_alpha):
@@ -35,10 +35,10 @@ def mse(alpha, trimap, pred_alpha):
3535
pred_alpha = pred_alpha.astype(np.float64) / 255
3636
weight_sum = (trimap == 128).sum()
3737
if weight_sum != 0:
38-
mse = ((pred_alpha - alpha)**2).sum() / weight_sum
38+
mse_result = ((pred_alpha - alpha)**2).sum() / weight_sum
3939
else:
40-
mse = 0
41-
return mse
40+
mse_result = 0
41+
return mse_result
4242

4343

4444
def gradient_error(alpha, trimap, pred_alpha, sigma=1.4):
@@ -100,7 +100,6 @@ def connectivity(alpha, trimap, pred_alpha, step=0.1):
100100
alpha = alpha.astype(np.float32) / 255
101101
pred_alpha = pred_alpha.astype(np.float32) / 255
102102

103-
height, width = alpha.shape
104103
thresh_steps = np.arange(0, 1 + step, step)
105104
round_down_map = -np.ones_like(alpha)
106105
for i in range(1, len(thresh_steps)):
@@ -196,10 +195,10 @@ def psnr(img1, img2, crop_border=0, input_order='HWC'):
196195
img1 = img1[crop_border:-crop_border, crop_border:-crop_border, None]
197196
img2 = img2[crop_border:-crop_border, crop_border:-crop_border, None]
198197

199-
mse = np.mean((img1 - img2)**2)
200-
if mse == 0:
198+
mse_value = np.mean((img1 - img2)**2)
199+
if mse_value == 0:
201200
return float('inf')
202-
return 20. * np.log10(255. / np.sqrt(mse))
201+
return 20. * np.log10(255. / np.sqrt(mse_value))
203202

204203

205204
def _ssim(img1, img2):
@@ -280,7 +279,7 @@ def ssim(img1, img2, crop_border=0, input_order='HWC'):
280279
return np.array(ssims).mean()
281280

282281

283-
class L1Evaluation(object):
282+
class L1Evaluation:
284283
"""L1 evaluation metric.
285284
286285
Args:
@@ -347,8 +346,8 @@ def compute_feature(block):
347346
# the products of pairs of adjacent coefficients computed along
348347
# horizontal, vertical and diagonal orientations.
349348
shifts = [[0, 1], [1, 0], [1, 1], [1, -1]]
350-
for i in range(len(shifts)):
351-
shifted_block = np.roll(block, shifts[i], axis=(0, 1))
349+
for shift in shifts:
350+
shifted_block = np.roll(block, shift, axis=(0, 1))
352351
alpha, beta_l, beta_r = estimate_aggd_param(block * shifted_block)
353352
mean = (beta_r - beta_l) * (gamma(2 / alpha) / gamma(1 / alpha))
354353
feat.extend([alpha, mean, beta_l, beta_r])
@@ -408,7 +407,7 @@ def niqe_core(img,
408407
feat = []
409408
for idx_w in range(num_block_w):
410409
for idx_h in range(num_block_h):
411-
# process ecah block
410+
# process each block
412411
block = img_nomalized[idx_h * block_size_h //
413412
scale:(idx_h + 1) * block_size_h //
414413
scale, idx_w * block_size_w //

mmedit/core/mask.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -282,7 +282,7 @@ def random_irregular_mask(img_shape,
282282
angle = 2 * math.pi - angle
283283
length = length_list[direct_n]
284284
brush_w = brush_width_list[direct_n]
285-
# compute end point accoriding to the random angle
285+
# compute end point according to the random angle
286286
end_x = (start_x + length * np.sin(angle)).astype(np.int32)
287287
end_y = (start_y + length * np.cos(angle)).astype(np.int32)
288288

mmedit/core/optimizer/builder.py

+3-2
Original file line numberDiff line numberDiff line change
@@ -46,11 +46,12 @@ def build_optimizers(model, cfgs):
4646
for key, cfg in cfgs.items():
4747
if not isinstance(cfg, dict):
4848
is_dict_of_dict = False
49+
4950
if is_dict_of_dict:
5051
for key, cfg in cfgs.items():
5152
cfg_ = cfg.copy()
5253
module = getattr(model, key)
5354
optimizers[key] = build_optimizer(module, cfg_)
5455
return optimizers
55-
else:
56-
return build_optimizer(model, cfgs)
56+
57+
return build_optimizer(model, cfgs)

mmedit/core/scheduler/lr_updater.py

+7-6
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ class LinearLrUpdaterHook(LrUpdaterHook):
1919
"""
2020

2121
def __init__(self, target_lr=0, start=0, interval=1, **kwargs):
22-
super(LinearLrUpdaterHook, self).__init__(**kwargs)
22+
super().__init__(**kwargs)
2323
self.target_lr = target_lr
2424
self.start = start
2525
self.interval = interval
@@ -41,10 +41,11 @@ def get_lr(self, runner, base_lr):
4141
progress = runner.iter
4242
max_progress = runner.max_iters
4343
assert max_progress >= self.start
44+
4445
if max_progress == self.start:
4546
return base_lr
46-
else:
47-
# Before 'start', fix lr; After 'start', linearly update lr.
48-
factor = (max(0, progress - self.start) // self.interval) / (
49-
(max_progress - self.start) // self.interval)
50-
return base_lr + (self.target_lr - base_lr) * factor
47+
48+
# Before 'start', fix lr; After 'start', linearly update lr.
49+
factor = (max(0, progress - self.start) // self.interval) / (
50+
(max_progress - self.start) // self.interval)
51+
return base_lr + (self.target_lr - base_lr) * factor

mmedit/datasets/base_dataset.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ class BaseDataset(Dataset, metaclass=ABCMeta):
2222
"""
2323

2424
def __init__(self, pipeline, test_mode=False):
25-
super(BaseDataset, self).__init__()
25+
super().__init__()
2626
self.test_mode = test_mode
2727
self.pipeline = Compose(pipeline)
2828

@@ -71,7 +71,7 @@ def __getitem__(self, idx):
7171
Args:
7272
idx (int): Index for getting each item.
7373
"""
74-
if not self.test_mode:
75-
return self.prepare_train_data(idx)
76-
else:
74+
if self.test_mode:
7775
return self.prepare_test_data(idx)
76+
77+
return self.prepare_train_data(idx)

mmedit/datasets/base_matting_dataset.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ class BaseMattingDataset(BaseDataset):
1010
"""
1111

1212
def __init__(self, ann_file, pipeline, data_prefix=None, test_mode=False):
13-
super(BaseMattingDataset, self).__init__(pipeline, test_mode)
13+
super().__init__(pipeline, test_mode)
1414
self.ann_file = str(ann_file)
1515
self.data_prefix = str(data_prefix)
1616
self.data_infos = self.load_annotations()

mmedit/datasets/base_sr_dataset.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ class BaseSRDataset(BaseDataset):
1616
"""
1717

1818
def __init__(self, pipeline, scale, test_mode=False):
19-
super(BaseSRDataset, self).__init__(pipeline, test_mode)
19+
super().__init__(pipeline, test_mode)
2020
self.scale = scale
2121

2222
@staticmethod

mmedit/datasets/dataset_wrappers.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33

44
@DATASETS.register_module()
5-
class RepeatDataset(object):
5+
class RepeatDataset:
66
"""A wrapper of repeated dataset.
77
88
The length of repeated dataset will be `times` larger than the original

mmedit/datasets/generation_paired_dataset.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ class GenerationPairedDataset(BaseGenerationDataset):
2121
"""
2222

2323
def __init__(self, dataroot, pipeline, test_mode=False):
24-
super(GenerationPairedDataset, self).__init__(pipeline, test_mode)
24+
super().__init__(pipeline, test_mode)
2525
phase = 'test' if test_mode else 'train'
2626
self.dataroot = osp.join(str(dataroot), phase)
2727
self.data_infos = self.load_annotations()

mmedit/datasets/generation_unpaired_dataset.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ class GenerationUnpairedDataset(BaseGenerationDataset):
2525
"""
2626

2727
def __init__(self, dataroot, pipeline, test_mode=False):
28-
super(GenerationUnpairedDataset, self).__init__(pipeline, test_mode)
28+
super().__init__(pipeline, test_mode)
2929
phase = 'test' if test_mode else 'train'
3030
self.dataroot_a = osp.join(str(dataroot), phase + 'A')
3131
self.dataroot_b = osp.join(str(dataroot), phase + 'B')

mmedit/datasets/img_inpainting_dataset.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ class ImgInpaintingDataset(BaseDataset):
1010
"""
1111

1212
def __init__(self, ann_file, pipeline, data_prefix=None, test_mode=False):
13-
super(ImgInpaintingDataset, self).__init__(pipeline, test_mode)
13+
super().__init__(pipeline, test_mode)
1414
self.ann_file = str(ann_file)
1515
self.data_prefix = str(data_prefix)
1616
self.data_infos = self.load_annotations()

0 commit comments

Comments
 (0)