Skip to content

Commit

Permalink
[pep8] Fix most lint automatically with autopep8
Browse files Browse the repository at this point in the history
Here's the command I used to invoke autopep8 (in parallel!):

    git ls-files | grep '\.py$' | xargs -n1 -P`nproc` autopep8 -i

Several rules are ignored in setup.cfg. The goal is to let autopep8
handle everything which it can handle safely, and to disable any rules
which are tricky or controversial to address. We may want to come back
and re-enable some of these rules later, but I'm trying to make this
patch as safe as possible.

Also configures flake8 to match pep8's behavior.

Also configures TravisCI to check the whole project for lint.
  • Loading branch information
lukeyeager authored and apaszke committed Jan 28, 2017
1 parent f1d0d73 commit e7c1e6a
Show file tree
Hide file tree
Showing 286 changed files with 3,346 additions and 3,028 deletions.
2 changes: 1 addition & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -44,4 +44,4 @@ matrix:
python: "2.7"
addons: true
install: pip install pep8
script: pep8 setup.py
script: pep8
3 changes: 2 additions & 1 deletion docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -201,12 +201,13 @@
from sphinx.util.docfields import TypedField
from sphinx import addnodes


def patched_make_field(self, types, domain, items):
# type: (List, unicode, Tuple) -> nodes.field
def handle_item(fieldarg, content):
par = nodes.paragraph()
par += addnodes.literal_strong('', fieldarg) # Patch: this line added
#par.extend(self.make_xrefs(self.rolename, domain, fieldarg,
# par.extend(self.make_xrefs(self.rolename, domain, fieldarg,
# addnodes.literal_strong))
if fieldarg in types:
par += nodes.Text(' (')
Expand Down
5 changes: 5 additions & 0 deletions setup.cfg
Original file line number Diff line number Diff line change
@@ -1,2 +1,7 @@
[pep8]
max-line-length = 120
ignore = E402,E721,E731

[flake8]
max-line-length = 120
ignore = E305,E402,E721,E731,F401,F403,F405,F811,F812,F821,F841
6 changes: 4 additions & 2 deletions test/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@

torch.set_default_tensor_type('torch.DoubleTensor')


def run_tests():
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--seed', type=int, default=123)
Expand All @@ -29,6 +30,7 @@ def run_tests():
except ImportError:
TEST_NUMPY = False


def get_cpu_type(t):
assert t.__module__ == 'torch.cuda'
return getattr(torch, t.__class__.__name__)
Expand Down Expand Up @@ -155,7 +157,7 @@ def make_jacobian(input, num_out):
return torch.zeros(input.nelement(), num_out)
else:
return type(input)(filter(lambda x: x is not None,
(make_jacobian(elem, num_out) for elem in input)))
(make_jacobian(elem, num_out) for elem in input)))


def iter_tensors(x, only_requiring_grad=False):
Expand Down Expand Up @@ -206,7 +208,7 @@ def get_numerical_jacobian(fn, input, target):
outb.copy_(fn(input))
flat_tensor[i] = orig

outb.add_(-1,outa).div_(2*perturbation)
outb.add_(-1, outa).div_(2 * perturbation)
d_tensor[i] = outb

return jacobian
47 changes: 25 additions & 22 deletions test/common_nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,14 +25,14 @@
module_name='Linear',
constructor_args=(10, 8),
input_size=(4, 10),
reference_fn=lambda i,p: torch.mm(i, p[0].t()) + p[1].view(1, -1).expand(4, 8)
reference_fn=lambda i, p: torch.mm(i, p[0].t()) + p[1].view(1, -1).expand(4, 8)
),
dict(
module_name='Linear',
constructor_args=(10, 8, False),
input_size=(4, 10),
desc='no_bias',
reference_fn=lambda i,p: torch.mm(i, p[0].t())
reference_fn=lambda i, p: torch.mm(i, p[0].t())
),
dict(
module_name='Threshold',
Expand Down Expand Up @@ -72,7 +72,7 @@
dict(
module_name='Hardtanh',
input_size=(3, 2, 5),
reference_fn=lambda i,_: i.clamp(-1, 1)
reference_fn=lambda i, _: i.clamp(-1, 1)
),
dict(
module_name='Sigmoid',
Expand All @@ -85,22 +85,22 @@
dict(
module_name='Softmax',
input_size=(10, 20),
reference_fn=lambda i,_: torch.exp(i).div(torch.exp(i).sum(1).expand(10, 20))
reference_fn=lambda i, _: torch.exp(i).div(torch.exp(i).sum(1).expand(10, 20))
),
dict(
module_name='Softmax2d',
input_size=(1, 3, 10, 20),
reference_fn=lambda i,_: torch.exp(i).div(torch.exp(i).sum(1).expand_as(i))
reference_fn=lambda i, _: torch.exp(i).div(torch.exp(i).sum(1).expand_as(i))
),
dict(
module_name='LogSoftmax',
input_size=(10, 20),
reference_fn=lambda i,_: torch.exp(i).div_(torch.exp(i).sum(1).expand(10, 20)).log_()
reference_fn=lambda i, _: torch.exp(i).div_(torch.exp(i).sum(1).expand(10, 20)).log_()
),
dict(
module_name='LogSoftmax',
input_size=(1, 3, 10, 20),
reference_fn=lambda i,_: torch.exp(i).div_(torch.exp(i).sum(1).expand_as(i)).log_(),
reference_fn=lambda i, _: torch.exp(i).div_(torch.exp(i).sum(1).expand_as(i)).log_(),
desc='multiparam'
),
dict(
Expand Down Expand Up @@ -130,18 +130,18 @@
dict(
module_name='LogSigmoid',
input_size=(2, 3, 4),
reference_fn=lambda i,_: i.sigmoid().log()
reference_fn=lambda i, _: i.sigmoid().log()
),
dict(
module_name='Softplus',
input_size=(10, 20),
reference_fn=lambda i,_: torch.log(1 + torch.exp(i))
reference_fn=lambda i, _: torch.log(1 + torch.exp(i))
),
dict(
module_name='Softplus',
constructor_args=(2,),
input_size=(10, 20),
reference_fn=lambda i,_: 1. / 2. * torch.log(1 + torch.exp(2 * i)),
reference_fn=lambda i, _: 1. / 2. * torch.log(1 + torch.exp(2 * i)),
desc='beta'
),
dict(
Expand Down Expand Up @@ -172,7 +172,7 @@
dict(
module_name='Softsign',
input_size=(3, 2, 5),
reference_fn=lambda i,_: i.div(1 + torch.abs(i))
reference_fn=lambda i, _: i.div(1 + torch.abs(i))
),
dict(
module_name='Softmin',
Expand All @@ -187,11 +187,11 @@

criterion_tests = [
dict(module_name='L1Loss',
input_size=(2, 3, 4),
target=torch.randn(2, 3, 4),
reference_fn=lambda i,t,_: 1./i.numel() * \
sum((a-b).abs().sum() for a,b in zip(i, t))
),
input_size=(2, 3, 4),
target=torch.randn(2, 3, 4),
reference_fn=lambda i, t, _: 1. / i.numel() *
sum((a - b).abs().sum() for a, b in zip(i, t))
),
dict(
module_name='NLLLoss',
input=torch.rand(15, 10).log(),
Expand All @@ -213,7 +213,7 @@
module_name='MSELoss',
input=torch.randn(2, 3, 4, 5),
target=torch.randn(2, 3, 4, 5),
reference_fn=lambda i,t,_: (i-t).abs().pow(2).sum() / i.numel()
reference_fn=lambda i, t, _: (i - t).abs().pow(2).sum() / i.numel()
),
dict(
module_name='BCELoss',
Expand Down Expand Up @@ -370,9 +370,9 @@ def _analytical_jacobian(self, module, input, jacobian_input=True, jacobian_para

if jacobian_input:
for jacobian_x, d_x in zip(flat_jacobian_input, iter_tensors(d_input)):
jacobian_x[:,i] = d_x
jacobian_x[:, i] = d_x
if jacobian_parameters:
jacobian_param[:,i] = torch.cat(self._flatten_tensors(d_param), 0)
jacobian_param[:, i] = torch.cat(self._flatten_tensors(d_param), 0)

res = tuple()
if jacobian_input:
Expand Down Expand Up @@ -433,7 +433,7 @@ def check_criterion_jacobian(self, criterion, input, target):
fx1 = self._forward_criterion(criterion, input, target)
x[i] = original - eps
fx2 = self._forward_criterion(criterion, input, target)
deriv = (fx1 - fx2) / (2.*eps)
deriv = (fx1 - fx2) / (2. * eps)
d_x[i] = deriv
x[i] = original

Expand All @@ -447,8 +447,9 @@ def check_criterion_jacobian(self, criterion, input, target):


class TestBase(object):

def __init__(self, constructor, constructor_args=tuple(), input_size=None,
input=None, desc='', reference_fn=None, fullname=None, **kwargs):
input=None, desc='', reference_fn=None, fullname=None, **kwargs):
if input_size is None and input is None:
raise RuntimeError("Specify either an input tensor, or it's size!")
self.constructor = constructor
Expand Down Expand Up @@ -496,6 +497,7 @@ def __call__(self, test_case):


class ModuleTest(TestBase):

def __init__(self, *args, **kwargs):
super(ModuleTest, self).__init__(*args, **kwargs)
self.jacobian_input = kwargs.get('jacobian_input', True)
Expand Down Expand Up @@ -568,6 +570,7 @@ def test_cuda(self, test_case):


class CriterionTest(TestBase):

def __init__(self, *args, **kwargs):
super(CriterionTest, self).__init__(*args, **kwargs)
self.target = self._get_target(kwargs['target'])
Expand All @@ -590,7 +593,7 @@ def __call__(self, test_case):
if isinstance(target, Variable):
target = target.data
expected_out = self.reference_fn(deepcopy(self._unpack_input(input)),
deepcopy(target), module)
deepcopy(target), module)
test_case.assertEqual(out, expected_out)

test_case.check_criterion_jacobian(module, input, self.target)
Expand Down
1 change: 1 addition & 0 deletions test/data/network1.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@


class Net(nn.Module):

def __init__(self):
super(Net, self).__init__()
self.linear = nn.Linear(10, 20)
1 change: 1 addition & 0 deletions test/data/network2.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@


class Net(nn.Module):

def __init__(self):
super(Net, self).__init__()
self.linear = nn.Linear(10, 20)
Expand Down
52 changes: 27 additions & 25 deletions test/error_messages/storage.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import torch


def check_error(desc, fn, *required_substrings):
try:
fn()
Expand All @@ -16,54 +17,55 @@ def check_error(desc, fn, *required_substrings):
assert False, "given function ({}) didn't raise an error".format(desc)

check_error(
'Wrong argument types',
lambda: torch.FloatStorage(object()),
'object')
'Wrong argument types',
lambda: torch.FloatStorage(object()),
'object')

check_error('Unknown keyword argument',
lambda: torch.FloatStorage(content=1234.),
'keyword')
lambda: torch.FloatStorage(content=1234.),
'keyword')

check_error('Invalid types inside a sequence',
lambda: torch.FloatStorage(['a', 'b']),
'list', 'str')
lambda: torch.FloatStorage(['a', 'b']),
'list', 'str')

check_error('Invalid size type',
lambda: torch.FloatStorage(1.5),
'float')
lambda: torch.FloatStorage(1.5),
'float')

check_error('Invalid offset',
lambda: torch.FloatStorage(torch.FloatStorage(2), 4),
'2', '4')
lambda: torch.FloatStorage(torch.FloatStorage(2), 4),
'2', '4')

check_error('Negative offset',
lambda: torch.FloatStorage(torch.FloatStorage(2), -1),
'2', '-1')
lambda: torch.FloatStorage(torch.FloatStorage(2), -1),
'2', '-1')

check_error('Invalid size',
lambda: torch.FloatStorage(torch.FloatStorage(3), 1, 5),
'2', '1', '5')
lambda: torch.FloatStorage(torch.FloatStorage(3), 1, 5),
'2', '1', '5')

check_error('Negative size',
lambda: torch.FloatStorage(torch.FloatStorage(3), 1, -5),
'2', '1', '-5')
lambda: torch.FloatStorage(torch.FloatStorage(3), 1, -5),
'2', '1', '-5')

check_error('Invalid index type',
lambda: torch.FloatStorage(10)['first item'],
'str')
lambda: torch.FloatStorage(10)['first item'],
'str')


def assign():
torch.FloatStorage(10)[1:-1] = '1'
check_error('Invalid value type',
assign,
'str')
assign,
'str')

check_error('resize_ with invalid type',
lambda: torch.FloatStorage(10).resize_(1.5),
'float')
lambda: torch.FloatStorage(10).resize_(1.5),
'float')

check_error('fill_ with invalid type',
lambda: torch.IntStorage(10).fill_('asdf'),
'str')
lambda: torch.IntStorage(10).fill_('asdf'),
'str')

# TODO: frombuffer
2 changes: 2 additions & 0 deletions test/optim/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,12 @@
import torch.legacy.optim as optim
from pprint import pprint


def rosenbrock(tensor):
x, y = tensor
return (1 - x)**2 + 100 * (y - x**2)**2


def drosenbrock(tensor):
x, y = tensor
return torch.DoubleTensor((-400 * x * (y - x**2) - 2 * (1 - x), 200 * x * (y - x**2)))
Expand Down
Loading

0 comments on commit e7c1e6a

Please sign in to comment.