forked from zhanghang1989/PyTorch-Encoding
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
de33018
commit 1633f31
Showing
14 changed files
with
588 additions
and
44 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -2,4 +2,4 @@ | |
*.swp | ||
*.pyc | ||
build/ | ||
encoding/build/ | ||
data/ |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Empty file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,44 @@ | ||
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ | ||
## Created by: Hang Zhang | ||
## ECE Department, Rutgers University | ||
## Email: zhang.hang@rutgers.edu | ||
## Copyright (c) 2017 | ||
## | ||
## This source code is licensed under the MIT-style license found in the | ||
## LICENSE file in the root directory of this source tree | ||
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ | ||
|
||
import torch | ||
import torchvision | ||
import torchvision.transforms as transforms | ||
|
||
class Dataloder(): | ||
def __init__(self, args): | ||
transform_train = transforms.Compose([ | ||
transforms.RandomCrop(32, padding=4), | ||
transforms.RandomHorizontalFlip(), | ||
transforms.ToTensor(), | ||
transforms.Normalize((0.4914, 0.4822, 0.4465), | ||
(0.2023, 0.1994, 0.2010)), | ||
]) | ||
transform_test = transforms.Compose([ | ||
transforms.ToTensor(), | ||
transforms.Normalize((0.4914, 0.4822, 0.4465), | ||
(0.2023, 0.1994, 0.2010)), | ||
]) | ||
|
||
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, | ||
download=True, transform=transform_train) | ||
testset = torchvision.datasets.CIFAR10(root='./data', train=False, | ||
download=True, transform=transform_test) | ||
|
||
kwargs = {'num_workers': 2, 'pin_memory': True} if args.cuda else {} | ||
trainloader = torch.utils.data.DataLoader(trainset, batch_size= | ||
args.batch_size, shuffle=True, **kwargs) | ||
testloader = torch.utils.data.DataLoader(testset, batch_size= | ||
args.batch_size, shuffle=False, **kwargs) | ||
self.trainloader = trainloader | ||
self.testloader = testloader | ||
|
||
def getloader(self): | ||
return self.trainloader, self.testloader |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,131 @@ | ||
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ | ||
## Created by: Hang Zhang | ||
## ECE Department, Rutgers University | ||
## Email: zhang.hang@rutgers.edu | ||
## Copyright (c) 2017 | ||
## | ||
## This source code is licensed under the MIT-style license found in the | ||
## LICENSE file in the root directory of this source tree | ||
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ | ||
|
||
from __future__ import print_function | ||
|
||
import torch | ||
import torch.nn as nn | ||
import torch.nn.functional as F | ||
import torch.optim as optim | ||
from torch.autograd import Variable | ||
|
||
from option import Options | ||
from model.encodenet import Net | ||
from utils import * | ||
|
||
# global variable | ||
best_pred = 0.0 | ||
acclist = [] | ||
|
||
def main(): | ||
# init the args | ||
args = Options().parse() | ||
args.cuda = not args.no_cuda and torch.cuda.is_available() | ||
torch.manual_seed(args.seed) | ||
if args.cuda: | ||
torch.cuda.manual_seed(args.seed) | ||
# init dataloader | ||
if args.dataset == 'cifar': | ||
from dataset.cifar import Dataloder | ||
train_loader, test_loader = Dataloder(args).getloader() | ||
else: | ||
raise ValueError('Unknow dataset!') | ||
|
||
model = Net() | ||
|
||
if args.cuda: | ||
model.cuda() | ||
|
||
if args.resume is not None: | ||
if os.path.isfile(args.resume): | ||
print("=> loading checkpoint '{}'".format(args.resume)) | ||
checkpoint = torch.load(args.resume) | ||
args.start_epoch = checkpoint['epoch'] | ||
best_pred = checkpoint['best_pred'] | ||
acclist = checkpoint['acclist'] | ||
model.load_state_dict(checkpoint['state_dict']) | ||
print("=> loaded checkpoint '{}' (epoch {})" | ||
.format(args.resume, checkpoint['epoch'])) | ||
else: | ||
print("=> no resume checkpoint found at '{}'".format(args.resume)) | ||
|
||
criterion = nn.CrossEntropyLoss() | ||
# TODO make weight_decay oen of args | ||
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum= | ||
args.momentum, weight_decay=1e-4) | ||
|
||
def train(epoch): | ||
model.train() | ||
global best_pred | ||
train_loss, correct, total = 0,0,0 | ||
adjust_learning_rate(optimizer, epoch, best_pred, args) | ||
for batch_idx, (data, target) in enumerate(train_loader): | ||
if args.cuda: | ||
data, target = data.cuda(), target.cuda() | ||
data, target = Variable(data), Variable(target) | ||
optimizer.zero_grad() | ||
output = model(data) | ||
loss = criterion(output, target) | ||
loss.backward() | ||
optimizer.step() | ||
|
||
train_loss += loss.data[0] | ||
pred = output.data.max(1)[1] | ||
correct += pred.eq(target.data).cpu().sum() | ||
total += target.size(0) | ||
progress_bar(batch_idx, len(train_loader), | ||
'Loss: %.3f | Acc: %.3f%% (%d/%d)' % (train_loss/(batch_idx+1), | ||
100.*correct/total, correct, total)) | ||
|
||
def test(epoch): | ||
model.eval() | ||
global best_pred | ||
global acclist | ||
test_loss, correct, total = 0,0,0 | ||
acc = 0.0 | ||
is_best = False | ||
# for data, target in test_loader: | ||
for batch_idx, (data, target) in enumerate(test_loader): | ||
if args.cuda: | ||
data, target = data.cuda(), target.cuda() | ||
data, target = Variable(data, volatile=True), Variable(target) | ||
output = model(data) | ||
test_loss += criterion(output, target).data[0] | ||
# get the index of the max log-probability | ||
pred = output.data.max(1)[1] | ||
correct += pred.eq(target.data).cpu().sum() | ||
total += target.size(0) | ||
|
||
acc = 100.*correct/total | ||
progress_bar(batch_idx, len(test_loader), | ||
'Loss: %.3f | Acc: %.3f%% (%d/%d)'% (test_loss/(batch_idx+1), | ||
acc, correct, total)) | ||
# save checkpoint | ||
acclist += [acc] | ||
if acc > best_pred: | ||
best_pred = acc | ||
is_best = True | ||
save_checkpoint({ | ||
'epoch': epoch, | ||
'state_dict': model.state_dict(), | ||
'best_pred': best_pred, | ||
'acclist':acclist, | ||
}, args=args, is_best=is_best) | ||
|
||
# TODO add plot curve | ||
|
||
for epoch in range(args.start_epoch, args.epochs + 1): | ||
train(epoch) | ||
# FIXME this is a bug somewhere not in the code | ||
test(epoch) | ||
|
||
|
||
if __name__ == "__main__": | ||
main() |
Empty file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,58 @@ | ||
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ | ||
## Created by: Hang Zhang | ||
## ECE Department, Rutgers University | ||
## Email: zhang.hang@rutgers.edu | ||
## Copyright (c) 2017 | ||
## | ||
## This source code is licensed under the MIT-style license found in the | ||
## LICENSE file in the root directory of this source tree | ||
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ | ||
|
||
import torch | ||
import torch.nn as nn | ||
import model.mynn as nn2 | ||
from encoding import Encoding | ||
|
||
class Net(nn.Module): | ||
def __init__(self, num_blocks=[2,2,2,2], num_classes=10, | ||
block=nn2.Bottleneck): | ||
super(Net, self).__init__() | ||
if block == nn2.Basicblock: | ||
self.expansion = 1 | ||
else: | ||
self.expansion = 4 | ||
|
||
self.inplanes = 64 | ||
num_planes = [64, 128, 256, 512] | ||
strides = [1, 2, 2, 2] | ||
model = [] | ||
# Conv_1 | ||
model += [nn.Conv2d(3, self.inplanes, kernel_size=3, padding=1), | ||
nn.BatchNorm2d(self.inplanes), | ||
nn.ReLU(inplace=True)] | ||
# Residual units | ||
for i in range(4): | ||
model += [self._residual_unit(block, num_planes[i], num_blocks[i], | ||
strides[i])] | ||
# Last conv layer | ||
# TODO norm layer, instance norm? | ||
model += [nn.BatchNorm2d(self.inplanes), | ||
nn.ReLU(inplace=True), | ||
Encoding(D=512*self.expansion,K=16), | ||
nn.BatchNorm1d(16), | ||
nn.ReLU(inplace=True), | ||
nn2.View(-1, 512*self.expansion*16), | ||
nn.Linear(512*self.expansion*16, num_classes)] | ||
self.model = nn.Sequential(*model) | ||
print(model) | ||
|
||
def _residual_unit(self, block, planes, n_blocks, stride): | ||
strides = [stride] + [1]*(n_blocks-1) | ||
layers = [] | ||
for i in range(n_blocks): | ||
layers += [block(self.inplanes, planes, strides[i])] | ||
self.inplanes = self.expansion*planes | ||
return nn.Sequential(*layers) | ||
|
||
def forward(self, input): | ||
return self.model(input) |
Oops, something went wrong.