Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Pylint2.10 update #937

Merged
merged 16 commits into from
Sep 24, 2021
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 6 additions & 6 deletions examples/torch/classification/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,15 +21,15 @@
from typing import Any

import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
from torch.backends import cudnn
from torch import nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.datasets as datasets
import torchvision.models as models
import torchvision.transforms as transforms
from torchvision import datasets
from torchvision import models
from torchvision import transforms
from torch.nn.modules.loss import _Loss
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torchvision.datasets import CIFAR10
Expand Down Expand Up @@ -79,7 +79,7 @@
from nncf.torch.utils import is_main_process
from nncf.torch.utils import safe_thread_call

model_names = sorted(name for name in models.__dict__
model_names = sorted(name for name in models.__dict__.items()
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
from typing import List
from typing import Optional

import torch.nn as nn
from torch import nn
from torch import Tensor


Expand Down
4 changes: 2 additions & 2 deletions examples/torch/classification/staged_quantization_worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,8 @@
import time

import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
from torch.backends import cudnn
from torch import nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
Expand Down
2 changes: 1 addition & 1 deletion examples/torch/common/model_loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
import examples.torch.common.models as custom_models
from examples.torch.classification.models.mobilenet_v2_32x32 import MobileNetV2For32x32
from examples.torch.common.example_logger import logger
import examples.torch.common.restricted_pickle_module as restricted_pickle_module
from examples.torch.common import restricted_pickle_module
from nncf.torch.checkpoint_loading import load_state
from nncf.torch.utils import safe_thread_call

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
from collections import namedtuple

import torch
import torch.nn as nn
from torch import nn
import torch.nn.functional as F
from torch.utils import model_zoo

Expand Down Expand Up @@ -94,7 +94,7 @@ def __init__(self, num_classes=1000, aux_logits=True, transform_input=False):

for m in self.modules():
if isinstance(m, (nn.Conv2d, nn.Linear)):
import scipy.stats as stats
from scipy import stats
stddev = m.stddev if hasattr(m, 'stddev') else 0.1
X = stats.truncnorm(-2, 2, scale=stddev)
values = torch.as_tensor(X.rvs(m.weight.numel()), dtype=m.weight.dtype)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@

"""

import torch.nn as nn
from torch import nn
import math

__all__ = ['mobilenetv3_Large', 'mobilenetv3']
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
limitations under the License.
"""

import torch.nn as nn
from torch import nn
import torch.nn.functional as F


Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from collections import OrderedDict

import torch.nn as nn
from torch import nn
import torch.nn.functional as F


Expand Down
2 changes: 1 addition & 1 deletion examples/torch/common/models/segmentation/enet.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
# ENet implementation from:
# https://github.com/davidtvs/PyTorch-ENet

import torch.nn as nn
from torch import nn
import torch

from examples.torch.common.example_logger import logger
Expand Down
2 changes: 1 addition & 1 deletion examples/torch/common/models/segmentation/icnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
from pkg_resources import parse_version

from numpy import lcm
import torch.nn as nn
from torch import nn
import torch
import torch.nn.functional as F

Expand Down
2 changes: 1 addition & 1 deletion examples/torch/common/models/segmentation/unet.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@

from pkg_resources import parse_version

import torch.nn as nn
from torch import nn
import torch
import torch.nn.functional as F

Expand Down
2 changes: 1 addition & 1 deletion examples/torch/common/restricted_pickle_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ def find_class(self, module_name, class_name):
return getattr(builtins, class_name)
if module_name == "collections" and class_name in Unpickler.safe_collections:
return getattr(collections, class_name)
for allowed_module_name in Unpickler.allowed_classes:
for allowed_module_name in Unpickler.allowed_classes.items():
if module_name == allowed_module_name and class_name in Unpickler.allowed_classes[allowed_module_name]:
module = importlib.import_module(module_name)
return getattr(module, class_name)
Expand Down
9 changes: 5 additions & 4 deletions examples/torch/common/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
from typing import Tuple

from PIL import Image
import torch.utils.data as data
from torch.utils import data

from examples.torch.common.distributed import configure_distributed
from examples.torch.common.execution import ExecutionMode, get_device
Expand Down Expand Up @@ -76,7 +76,7 @@ def get_name(config):
def write_metrics(acc, filename):
avg = round(acc * 100, 2)
metrics = {"Accuracy": avg}
with open(filename, 'w') as outfile:
with open(filename, 'w', encoding='utf8') as outfile:
json.dump(metrics, outfile)


Expand Down Expand Up @@ -223,8 +223,9 @@ class ForkedPdb(pdb.Pdb):
def interaction(self, *args, **kwargs):
_stdin = sys.stdin
try:
sys.stdin = open('/dev/stdin')
pdb.Pdb.interaction(self, *args, **kwargs)
with open('/dev/stdin', encoding='utf8') as file:
sys.stdin = file
pdb.Pdb.interaction(self, *args, **kwargs)
finally:
sys.stdin = _stdin

Expand Down
4 changes: 2 additions & 2 deletions examples/torch/object_detection/datasets/coco.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
import cv2
import numpy as np
import torch
import torch.utils.data as data
from torch.utils import data

from examples.torch.common.example_logger import logger

Expand Down Expand Up @@ -50,7 +50,7 @@ def _read_coco_annotation(annotation_file, images_folder):
images_folder = Path(images_folder)
anno_dict = OrderedDict()

with open(annotation_file) as data_file:
with open(annotation_file, encoding="utf8") as data_file:
json_annotation = json.load(data_file)
annotation = json_annotation["annotations"]

Expand Down
9 changes: 5 additions & 4 deletions examples/torch/object_detection/datasets/voc0712.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
import cv2
import numpy as np
import torch
import torch.utils.data as data
from torch.utils import data

if sys.version_info[0] == 2:
import defusedxml.cElementTree as ET
Expand Down Expand Up @@ -115,12 +115,13 @@ def __init__(self, root, image_sets=(('2007', 'trainval'), ('2012', 'trainval'))
self.return_image_info = return_image_info
self._annopath = os.path.join('%s', 'Annotations', '%s.xml')
self._imgpath = os.path.join('%s', 'JPEGImages', '%s.jpg')
self.ids = list()
self.ids = []

for (year, name) in self.image_set:
rootpath = os.path.join(self.root, 'VOC' + year)
for line in open(os.path.join(rootpath, 'ImageSets', 'Main', name + '.txt')):
self.ids.append((rootpath, line.strip()))
with open(os.path.join(rootpath, 'ImageSets', 'Main', name + '.txt'), encoding='utf8') as lines:
for line in lines:
self.ids.append((rootpath, line.strip()))

def __getitem__(self, index):
"""
Expand Down
4 changes: 2 additions & 2 deletions examples/torch/object_detection/eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -217,11 +217,11 @@ def load_detection_annotations(cachedir, dataset):
# save
logger.info('Saving cached annotations to {:s}'.format(cachefile))
pathlib.Path(cachedir).mkdir(parents=True, exist_ok=True)
with open(cachefile, 'w') as f:
with open(cachefile, 'w', encoding='utf8') as f:
json.dump(gt, f)
if is_dist_avail_and_initialized():
dist.barrier()
with open(cachefile, 'r') as f:
with open(cachefile, 'r', encoding='utf8') as f:
gt = json.load(f)
return gt, imagenames

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -96,8 +96,8 @@ def forward(ctx, loc_data, conf_data, prior_data, detection_output_params):
conf_scores = conf_preds[i].clone()

total_detections_count = 0
all_indices = dict() # indices of confident detections for each class
boxes = dict()
all_indices = {} # indices of confident detections for each class
boxes = {}
for cl in range(0, detection_output_params.num_classes):
if cl == detection_output_params.background_label_id:
continue
Expand All @@ -115,7 +115,7 @@ def forward(ctx, loc_data, conf_data, prior_data, detection_output_params):
all_indices[cl] = all_indices[cl][:count]
total_detections_count += count

score_index_pairs = list() # list of tuples (score, label, idx)
score_index_pairs = [] # list of tuples (score, label, idx)
for label, indices in all_indices.items():
indices = indices.cpu().numpy()
for idx in indices:
Expand All @@ -124,7 +124,7 @@ def forward(ctx, loc_data, conf_data, prior_data, detection_output_params):
score_index_pairs.sort(key=lambda tup: tup[0], reverse=True)
score_index_pairs = score_index_pairs[:detection_output_params.keep_top_k]

all_indices_new = dict()
all_indices_new = {}
for _, label, idx in score_index_pairs:
if label not in all_indices_new:
all_indices_new[label] = [idx]
Expand Down
4 changes: 2 additions & 2 deletions examples/torch/object_detection/layers/modules/l2norm.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@
"""

import torch
import torch.nn as nn
import torch.nn.init as init
from torch import nn
from torch.nn import init

from nncf.torch.utils import add_domain
from nncf.torch import register_module
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
"""

import torch
import torch.nn as nn
from torch import nn
import torch.nn.functional as F

from ..box_utils import match, log_sum_exp
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,7 @@ def __init__(self, outputs, modules):
super().__init__(*modules)
self.outputs = [str(o) for o in outputs]

#pylint:disable=W0237
ljaljushkin marked this conversation as resolved.
Show resolved Hide resolved
def forward(self, x):
outputs = []
for name, module in self._modules.items():
Expand Down
2 changes: 1 addition & 1 deletion examples/torch/object_detection/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
from pathlib import Path

import torch
import torch.utils.data as data
from torch.utils import data

from examples.torch.common.argparser import parse_args
from torch.optim.lr_scheduler import ReduceLROnPlateau
Expand Down
2 changes: 1 addition & 1 deletion examples/torch/object_detection/models/ssd_mobilenet.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
"""

import torch
import torch.nn as nn
from torch import nn

from examples.torch.common import restricted_pickle_module
from examples.torch.common.example_logger import logger
Expand Down
2 changes: 1 addition & 1 deletion examples/torch/object_detection/models/ssd_vgg.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@

import os
import torch
import torch.nn as nn
from torch import nn

from examples.torch.common import restricted_pickle_module
from examples.torch.common.example_logger import logger
Expand Down
4 changes: 2 additions & 2 deletions nncf/common/composite_compression.py
Original file line number Diff line number Diff line change
Expand Up @@ -248,7 +248,7 @@ def get_state(self) -> Dict[str, Dict[str, Any]]:

:return: The composite compression controller state.
"""
result = dict()
result = {}
for ctrl in self.child_ctrls:
result.update(ctrl.get_state())
return result
Expand Down Expand Up @@ -394,7 +394,7 @@ def get_state(self) -> Dict[str, Dict]:

:return: The composite compression builder state.
"""
result = dict()
result = {}
for builder in self.child_builders:
result.update(builder.get_state())
return result
2 changes: 1 addition & 1 deletion nncf/common/graph/graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ class NNCFGraph:

def __init__(self):
self._nx_graph = nx.DiGraph()
self._node_id_to_key_dict = dict()
self._node_id_to_key_dict = {}
self._input_nncf_nodes = {} # type: Dict[int, NNCFNode]
self._output_nncf_nodes = {} # type: Dict[int, NNCFNode]

Expand Down
2 changes: 1 addition & 1 deletion nncf/common/graph/operator_metatypes.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ def subtype_check(cls, metatype: Type['OperatorMetatype']) -> bool:
if metatype == cls or metatype in subtypes:
return True

return any([subtype.subtype_check(metatype) for subtype in subtypes])
return any(subtype.subtype_check(metatype) for subtype in subtypes)


class OperatorMetatypeRegistry(Registry):
Expand Down
2 changes: 1 addition & 1 deletion nncf/common/graph/patterns.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ class HWFusedPatterns:
"""

def __init__(self):
self._patterns_dict = dict()
self._patterns_dict = {}
self._full_pattern_graph = GraphPattern()

def register(self, pattern: 'GraphPattern', name: str, match: bool = True) -> None:
Expand Down
2 changes: 1 addition & 1 deletion nncf/common/pruning/pruning_node_selector.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ def create_pruning_groups(self, graph: NNCFGraph) -> Clusterization[NNCFNode]:
for i, cluster in enumerate(special_ops_clusterization.get_all_clusters()):
all_pruned_inputs = []
pruned_inputs_idxs = set()
clusters_to_merge = list()
clusters_to_merge = []

for node in cluster.elements:
sources = get_sources_of_node(node, graph, self._prune_operations)
Expand Down
4 changes: 2 additions & 2 deletions nncf/common/quantization/quantizer_propagation/solver.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ def constrain_quantizer_config_list_for_insertion(self, quantization_point_id: Q
of the quantizer configs for the quantization point defined by `quantization_point_id`.
"""
prior_list = self.quantizer_setup.quantization_points[quantization_point_id].possible_qconfigs
if not all([qc in prior_list for qc in constrained_config_list]):
if not all(qc in prior_list for qc in constrained_config_list):
raise RuntimeError('Constrained config list is incompatible with the result of the quantizer propagation!')
# TODO (vshampor): only allow to constrain 'input-group'-wise?
self.quantizer_setup.quantization_points[quantization_point_id].possible_qconfigs = constrained_config_list
Expand Down Expand Up @@ -1193,7 +1193,7 @@ def compatible_wo_requant(qconf: QuantizerConfig,
raise RuntimeError("Unknown propagation strategy: {}".format(self._propagation_strategy))

for qconf in qconfigs_union:
if all([compatible_fn(qconf, qconf_list) for qconf_list in potential_qconfigs_for_each_branch]):
if all(compatible_fn(qconf, qconf_list) for qconf_list in potential_qconfigs_for_each_branch):
merged_qconfig_list.append(qconf)

nncf_logger.debug("Merged list before sorting: {}".format(";".join([str(qc) for qc in merged_qconfig_list])))
Expand Down
2 changes: 1 addition & 1 deletion nncf/common/quantization/quantizer_setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -447,7 +447,7 @@ def select_qconfigs(self, qp_id_vs_selected_qconfig_dict: Dict[QuantizationPoint
if Counter(qp_id_vs_selected_qconfig_dict.keys()) != Counter(self.quantization_points.keys()):
raise ValueError("The set of quantization points for a selection is inconsistent with quantization"
"points in the quantizer setup!")
for qp_id in self.quantization_points:
for qp_id in self.quantization_points.items():
if strict:
retval.quantization_points[qp_id] = self.quantization_points[qp_id].select_qconfig(
qp_id_vs_selected_qconfig_dict[qp_id]
Expand Down
Loading