Skip to content

Commit

Permalink
indent
Browse files Browse the repository at this point in the history
  • Loading branch information
zhanghang1989 committed Sep 18, 2017
1 parent fa0e478 commit 8dd870b
Show file tree
Hide file tree
Showing 4 changed files with 122 additions and 122 deletions.
4 changes: 2 additions & 2 deletions .editorconfig
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
root = true

[*]
indent_style = tab
indent_size = 2
indent_style = space
indent_size = 4
52 changes: 26 additions & 26 deletions build.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,47 +20,47 @@
# build kernel library
os.environ['TORCH_BUILD_DIR'] = lib_path
if platform.system() == 'Darwin':
os.environ['TH_LIBRARIES'] = os.path.join(lib_path,'libTH.1.dylib')
os.environ['THC_LIBRARIES'] = os.path.join(lib_path,'libTHC.1.dylib')
ENCODING_LIB = os.path.join(lib_path, 'libENCODING.dylib')
os.environ['TH_LIBRARIES'] = os.path.join(lib_path,'libTH.1.dylib')
os.environ['THC_LIBRARIES'] = os.path.join(lib_path,'libTHC.1.dylib')
ENCODING_LIB = os.path.join(lib_path, 'libENCODING.dylib')
else:
os.environ['TH_LIBRARIES'] = os.path.join(lib_path,'libTH.so.1')
os.environ['THC_LIBRARIES'] = os.path.join(lib_path,'libTHC.so.1')
ENCODING_LIB = os.path.join(lib_path, 'libENCODING.so')
os.environ['TH_LIBRARIES'] = os.path.join(lib_path,'libTH.so.1')
os.environ['THC_LIBRARIES'] = os.path.join(lib_path,'libTHC.so.1')
ENCODING_LIB = os.path.join(lib_path, 'libENCODING.so')

build_all_cmd = ['bash', 'encoding/make.sh']
if subprocess.call(build_all_cmd, env=dict(os.environ)) != 0:
sys.exit(1)
sys.exit(1)

sources = ['encoding/src/encoding_lib.cpp']
headers = ['encoding/src/encoding_lib.h']
defines = [('WITH_CUDA', None)]
with_cuda = True

include_path = [os.path.join(lib_path, 'include'),
os.path.join(os.environ['HOME'],'pytorch/torch/lib/THC'),
os.path.join(lib_path,'include/ENCODING'),
os.path.join(this_file,'encoding/src/')]
os.path.join(os.environ['HOME'],'pytorch/torch/lib/THC'),
os.path.join(lib_path,'include/ENCODING'),
os.path.join(this_file,'encoding/src/')]

def make_relative_rpath(path):
if platform.system() == 'Darwin':
return '-Wl,-rpath,' + path
else:
return '-Wl,-rpath,' + path
if platform.system() == 'Darwin':
return '-Wl,-rpath,' + path
else:
return '-Wl,-rpath,' + path

ffi = create_extension(
'encoding._ext.encoding_lib',
package=True,
headers=headers,
sources=sources,
define_macros=defines,
relative_to=__file__,
with_cuda=with_cuda,
include_dirs = include_path,
extra_link_args = [
make_relative_rpath(lib_path),
ENCODING_LIB,
],
'encoding._ext.encoding_lib',
package=True,
headers=headers,
sources=sources,
define_macros=defines,
relative_to=__file__,
with_cuda=with_cuda,
include_dirs = include_path,
extra_link_args = [
make_relative_rpath(lib_path),
ENCODING_LIB,
],
)

if __name__ == '__main__':
Expand Down
146 changes: 73 additions & 73 deletions encoding/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,85 +17,85 @@
from ._ext import encoding_lib

class aggregate(Function):
def forward(self, A, R):
# A \in(BxNxK) R \in(BxNxKxD) => E \in(BxNxD)
self.save_for_backward(A, R)
B, N, K, D = R.size()
E = A.new(B,K,D)
# TODO support cpu backend
if isinstance(A, torch.cuda.FloatTensor):
encoding_lib.Encoding_Float_aggregate_forward(E, A, R)
elif isinstance(A, torch.cuda.DoubleTensor):
encoding_lib.Encoding_Double_aggregate_forward(E, A, R)
else:
raise RuntimeError('unimplemented')
return E

def backward(self, gradE):
A, R = self.saved_tensors
gradA = A.new().resize_as_(A)
gradR = R.new().resize_as_(R)
if isinstance(A, torch.cuda.FloatTensor):
encoding_lib.Encoding_Float_aggregate_backward(gradA, gradR, gradE,
A, R)
elif isinstance(A, torch.cuda.DoubleTensor):
encoding_lib.Encoding_Double_aggregate_backward(gradA, gradR, gradE,
A, R)
else:
raise RuntimeError('unimplemented')
return gradA, gradR
def forward(self, A, R):
# A \in(BxNxK) R \in(BxNxKxD) => E \in(BxNxD)
self.save_for_backward(A, R)
B, N, K, D = R.size()
E = A.new(B,K,D)
# TODO support cpu backend
if isinstance(A, torch.cuda.FloatTensor):
encoding_lib.Encoding_Float_aggregate_forward(E, A, R)
elif isinstance(A, torch.cuda.DoubleTensor):
encoding_lib.Encoding_Double_aggregate_forward(E, A, R)
else:
raise RuntimeError('unimplemented')
return E

def backward(self, gradE):
A, R = self.saved_tensors
gradA = A.new().resize_as_(A)
gradR = R.new().resize_as_(R)
if isinstance(A, torch.cuda.FloatTensor):
encoding_lib.Encoding_Float_aggregate_backward(gradA, gradR, gradE,
A, R)
elif isinstance(A, torch.cuda.DoubleTensor):
encoding_lib.Encoding_Double_aggregate_backward(gradA, gradR, gradE,
A, R)
else:
raise RuntimeError('unimplemented')
return gradA, gradR


class Aggregate(nn.Module):
def forward(self, A, R):
return aggregate()(A, R)
def forward(self, A, R):
return aggregate()(A, R)


class Encoding(nn.Module):
def __init__(self, D, K):
super(Encoding, self).__init__()
# init codewords and smoothing factor
self.D, self.K = D, K
self.codewords = nn.Parameter(torch.Tensor(K, D), requires_grad=True)
self.scale = nn.Parameter(torch.Tensor(K), requires_grad=True)
self.softmax = nn.Softmax()
self.reset_params()
def reset_params(self):
std1 = 1./((self.K*self.D)**(1/2))
std2 = 1./((self.K)**(1/2))
self.codewords.data.uniform_(-std1, std1)
self.scale.data.uniform_(-std2, std2)

def forward(self, X):
# input X is a 4D tensor
assert(X.size(1)==self.D,"Encoding Layer incompatible input channels!")
unpacked = False
if X.dim() == 3:
unpacked = True
X = X.unsqueeze(0)

B, N, K, D = X.size(0), X.size(2)*X.size(3), self.K, self.D
# reshape input
X = X.view(B,D,-1).transpose(1,2)
# calculate residuals
R = X.contiguous().view(B,N,1,D).expand(B,N,K,D) - self.codewords.view(
1,1,K,D).expand(B,N,K,D)
# assignment weights
A = R
A = A.pow(2).sum(3).view(B,N,K)
A = A*self.scale.view(1,1,K).expand_as(A)
A = self.softmax(A.view(B*N,K)).view(B,N,K)
# aggregate
E = aggregate()(A, R)

if unpacked:
E = E.squeeze(0)
return E

def __repr__(self):
return self.__class__.__name__ + '(' \
+ 'N x ' + str(self.D) + '=>' + str(self.K) + 'x' + str(self.D) + ')'
def __init__(self, D, K):
super(Encoding, self).__init__()
# init codewords and smoothing factor
self.D, self.K = D, K
self.codewords = nn.Parameter(torch.Tensor(K, D), requires_grad=True)
self.scale = nn.Parameter(torch.Tensor(K), requires_grad=True)
self.softmax = nn.Softmax()
self.reset_params()
def reset_params(self):
std1 = 1./((self.K*self.D)**(1/2))
std2 = 1./((self.K)**(1/2))
self.codewords.data.uniform_(-std1, std1)
self.scale.data.uniform_(-std2, std2)

def forward(self, X):
# input X is a 4D tensor
assert(X.size(1)==self.D,"Encoding Layer incompatible input channels!")
unpacked = False
if X.dim() == 3:
unpacked = True
X = X.unsqueeze(0)

B, N, K, D = X.size(0), X.size(2)*X.size(3), self.K, self.D
# reshape input
X = X.view(B,D,-1).transpose(1,2)
# calculate residuals
R = X.contiguous().view(B,N,1,D).expand(B,N,K,D) - self.codewords.view(
1,1,K,D).expand(B,N,K,D)
# assignment weights
A = R
A = A.pow(2).sum(3).view(B,N,K)
A = A*self.scale.view(1,1,K).expand_as(A)
A = self.softmax(A.view(B*N,K)).view(B,N,K)
# aggregate
E = aggregate()(A, R)

if unpacked:
E = E.squeeze(0)
return E

def __repr__(self):
return self.__class__.__name__ + '(' \
+ 'N x ' + str(self.D) + '=>' + str(self.K) + 'x' + str(self.D) + ')'

class sum_square(Function):
def forward(ctx, input):
Expand Down
42 changes: 21 additions & 21 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,27 +19,27 @@

extra_compile_args = ['-std=c++11', '-Wno-write-strings']
if os.getenv('PYTORCH_BINARY_BUILD') and platform.system() == 'Linux':
print('PYTORCH_BINARY_BUILD found. Static linking libstdc++ on Linux')
extra_compile_args += ['-static-libstdc++']
extra_link_args += ['-static-libstdc++']
print('PYTORCH_BINARY_BUILD found. Static linking libstdc++ on Linux')
extra_compile_args += ['-static-libstdc++']
extra_link_args += ['-static-libstdc++']

setup(
name="encoding",
version="0.0.1",
description="PyTorch Encoding Layer",
url="https://github.com/zhanghang1989/PyTorch-Encoding-Layer",
author="Hang Zhang",
author_email="zhang.hang@rutgers.edu",
# Require cffi.
install_requires=["cffi>=1.0.0"],
setup_requires=["cffi>=1.0.0"],
# Exclude the build files.
packages=find_packages(exclude=["build"]),
extra_compile_args=extra_compile_args,
# Package where to put the extensions. Has to be a prefix of build.py.
ext_package="",
# Extensions to compile.
cffi_modules=[
os.path.join(this_file, "build.py:ffi")
],
name="encoding",
version="0.0.1",
description="PyTorch Encoding Layer",
url="https://github.com/zhanghang1989/PyTorch-Encoding-Layer",
author="Hang Zhang",
author_email="zhang.hang@rutgers.edu",
# Require cffi.
install_requires=["cffi>=1.0.0"],
setup_requires=["cffi>=1.0.0"],
# Exclude the build files.
packages=find_packages(exclude=["build"]),
extra_compile_args=extra_compile_args,
# Package where to put the extensions. Has to be a prefix of build.py.
ext_package="",
# Extensions to compile.
cffi_modules=[
os.path.join(this_file, "build.py:ffi")
],
)

0 comments on commit 8dd870b

Please sign in to comment.