Skip to content

Commit

Permalink
add evaluate
Browse files Browse the repository at this point in the history
  • Loading branch information
zhengxiawu committed Jul 14, 2020
1 parent 00cfc40 commit 3875609
Show file tree
Hide file tree
Showing 8 changed files with 406 additions and 66 deletions.
Binary file added .DS_Store
Binary file not shown.
67 changes: 41 additions & 26 deletions data/get_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,8 @@
import nvidia.dali.ops as ops
import nvidia.dali.types as types
except ImportError:
raise ImportError("Please install DALI from https://www.github.com/NVIDIA/DALI to run this example.")
raise ImportError(
"Please install DALI from https://www.github.com/NVIDIA/DALI to run this example.")


def get_data(dataset, data_path, cutout_length, auto_augmentation):
Expand All @@ -29,11 +30,14 @@ def get_data(dataset, data_path, cutout_length, auto_augmentation):
n_classes = 1000
else:
raise ValueError(dataset)
trn_transform, val_transform = preproc.data_transforms(dataset, cutout_length, auto_augmentation)
trn_transform, val_transform = preproc.data_transforms(
dataset, cutout_length, auto_augmentation)
if 'imagenet' in dataset:
trn_data = dset_cls(root=os.path.join(data_path, 'train'), transform=trn_transform)
trn_data = dset_cls(root=os.path.join(
data_path, 'train'), transform=trn_transform)
else:
trn_data = dset_cls(root=data_path, train=True, download=True, transform=trn_transform)
trn_data = dset_cls(root=data_path, train=True,
download=True, transform=trn_transform)

# assuming shape is NHW or NHWC
if 'imagenet' in dataset:
Expand All @@ -56,9 +60,11 @@ def get_data(dataset, data_path, cutout_length, auto_augmentation):
input_size = shape[1]
ret = [input_size, input_channels, n_classes, trn_data]
if 'imagenet' in dataset:
ret.append(dset_cls(root=os.path.join(data_path, 'val'), transform=val_transform))
ret.append(dset_cls(root=os.path.join(
data_path, 'val'), transform=val_transform))
else:
ret.append(dset_cls(root=data_path, train=False, download=True, transform=val_transform))
ret.append(dset_cls(root=data_path, train=False,
download=True, transform=val_transform))
return ret


Expand All @@ -71,7 +77,7 @@ def get_data_dali(dataset, data_path, batch_size=256, num_threads=4):
train_loader = cifar10.get_cifar_iter_dali(type='train', image_dir=data_path,
batch_size=batch_size, num_threads=num_threads)
val_loader = cifar10.get_cifar_iter_dali(type='val', image_dir=data_path,
batch_size=batch_size, num_threads=num_threads)
batch_size=batch_size, num_threads=num_threads)
elif dataset == 'imagenet':
input_size = 224
input_channels = 3
Expand All @@ -80,8 +86,8 @@ def get_data_dali(dataset, data_path, batch_size=256, num_threads=4):
batch_size=batch_size, num_threads=num_threads,
crop=224, val_size=256)
val_loader = imagenet.get_imagenet_iter_dali(type='val', image_dir=data_path,
batch_size=batch_size, num_threads=num_threads,
crop=224, val_size=256)
batch_size=batch_size, num_threads=num_threads,
crop=224, val_size=256)
elif dataset == 'imagenet112':
input_size = 112
input_channels = 3
Expand Down Expand Up @@ -119,13 +125,16 @@ def get_data_dali(dataset, data_path, batch_size=256, num_threads=4):

class HybridTrainPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id, data_dir, crop, dali_cpu=False):
super(HybridTrainPipe, self).__init__(batch_size, num_threads, device_id, seed=12 + device_id)
self.input = ops.FileReader(file_root=data_dir, shard_id=0, num_shards=1, random_shuffle=True)
#let user decide which pipeline works him bets for RN version he runs
super(HybridTrainPipe, self).__init__(batch_size,
num_threads, device_id, seed=12 + device_id)
self.input = ops.FileReader(
file_root=data_dir, shard_id=0, num_shards=1, random_shuffle=True)
# let user decide which pipeline works him bets for RN version he runs
if dali_cpu:
dali_device = "cpu"
self.decode = ops.HostDecoderRandomCrop(device=dali_device, output_type=types.RGB,
random_aspect_ratio=[0.8, 1.25],
random_aspect_ratio=[
0.8, 1.25],
random_area=[0.1, 1.0],
num_attempts=100)
else:
Expand All @@ -135,17 +144,20 @@ def __init__(self, batch_size, num_threads, device_id, data_dir, crop, dali_cpu=
self.decode = ops.nvJPEGDecoderRandomCrop(device="mixed", output_type=types.RGB,
device_memory_padding=211025920,
host_memory_padding=140544512,
random_aspect_ratio=[0.8, 1.25],
random_aspect_ratio=[
0.8, 1.25],
random_area=[0.1, 1.0],
num_attempts=100)
self.res = ops.Resize(device=dali_device, resize_x=crop, resize_y=crop, interp_type=types.INTERP_TRIANGULAR)
self.res = ops.Resize(device=dali_device, resize_x=crop,
resize_y=crop, interp_type=types.INTERP_TRIANGULAR)
self.cmnp = ops.CropMirrorNormalize(device="gpu",
output_dtype=types.FLOAT,
output_layout=types.NCHW,
crop=(crop, crop),
image_type=types.RGB,
mean=[0.485 * 255,0.456 * 255,0.406 * 255],
std=[0.229 * 255,0.224 * 255,0.225 * 255])
mean=[0.485 * 255, 0.456 *
255, 0.406 * 255],
std=[0.229 * 255, 0.224 * 255, 0.225 * 255])
self.coin = ops.CoinFlip(probability=0.5)
# self.color_jitter = [ops.Brightness(device="gpu", brightness=0.4),
# ops.Contrast(device="gpu", contrast=0.4),
Expand All @@ -166,16 +178,20 @@ def define_graph(self):

class HybridValPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id, data_dir, crop, size):
super(HybridValPipe, self).__init__(batch_size, num_threads, device_id, seed=12 + device_id)
self.input = ops.FileReader(file_root=data_dir, shard_id=0, num_shards=1, random_shuffle=False)
super(HybridValPipe, self).__init__(batch_size,
num_threads, device_id, seed=12 + device_id)
self.input = ops.FileReader(
file_root=data_dir, shard_id=0, num_shards=1, random_shuffle=False)
self.decode = ops.nvJPEGDecoder(device="mixed", output_type=types.RGB)
self.res = ops.Resize(device="gpu", resize_shorter=size, interp_type=types.INTERP_TRIANGULAR)
self.res = ops.Resize(device="gpu", resize_shorter=size,
interp_type=types.INTERP_TRIANGULAR)
self.cmnp = ops.CropMirrorNormalize(device="gpu",
output_dtype=types.FLOAT,
output_layout=types.NCHW,
crop=(crop, crop),
image_type=types.RGB,
mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
mean=[0.485 * 255, 0.456 *
255, 0.406 * 255],
std=[0.229 * 255, 0.224 * 255, 0.225 * 255])

def define_graph(self):
Expand All @@ -189,13 +205,12 @@ def define_graph(self):
def get_dali_imagenet_pipeline(batch_size, num_threads, data_path, train_cpu=False,
crop=224, size=256):
train_pipe = HybridTrainPipe(batch_size=batch_size, num_threads=num_threads, device_id=0,
data_dir=os.path.join(data_path, 'train'),
crop=crop, dali_cpu=train_cpu)
data_dir=os.path.join(data_path, 'train'),
crop=crop, dali_cpu=train_cpu)
train_pipe.build()

val_pipe = HybridValPipe(batch_size=batch_size, num_threads=num_threads, device_id=0,
data_dir=os.path.join(data_path, 'val'),
crop=crop, size=size)
data_dir=os.path.join(data_path, 'val'),
crop=crop, size=size)
val_pipe.build()
return [train_pipe, val_pipe]

6 changes: 4 additions & 2 deletions flops_counter.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ def count_linear(m, _, __):
def profile(model, input_size, custom_ops=None):
handler_collection = []
custom_ops = {} if custom_ops is None else custom_ops

def add_hooks(m_):
if len(list(m_.children())) > 0:
return
Expand Down Expand Up @@ -86,6 +86,8 @@ def add_hooks(m_):
for m in model.modules():
if len(list(m.children())) > 0: # skip for non-leaf module
continue
print(m)
print(m.total_ops)
total_ops += m.total_ops
total_params += m.total_params

Expand All @@ -96,4 +98,4 @@ def add_hooks(m_):
for handler in handler_collection:
handler.remove()

return float(total_ops) / 1000. / 1000., float(total_params) / 1024. / 1024.
return float(total_ops), float(total_params)
Binary file added models/.DS_Store
Binary file not shown.
6 changes: 4 additions & 2 deletions models/get_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,8 +139,10 @@
reduce_concat=range(2, 6))

DDPNAS_V3_constraint_4 = Genotype(normal=[[('sep_conv_5x5', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 0), ('skip_connect', 1)], [('sep_conv_3x3', 0), ('dil_conv_3x3', 3)], [('sep_conv_5x5', 0), ('avg_pool_3x3', 4)]], normal_concat=range(2, 6), reduce=[[('sep_conv_5x5', 1), ('skip_connect', 0)], [('sep_conv_5x5', 1), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 1), ('max_pool_3x3', 0)], [('sep_conv_5x5', 1), ('avg_pool_3x3', 3)]], reduce_concat=range(2, 6))

# dynamic_SNG_V3 = Genotype(normal=[[('max_pool_3x3', 1), ('avg_pool_3x3', 0)], [('max_pool_3x3', 1), ('sep_conv_5x5', 2)], [('skip_connect', 2), ('max_pool_3x3', 3)], [('max_pool_3x3', 3), ('sep_conv_3x3', 0)]], normal_concat=range(2, 6), reduce=[[('sep_conv_5x5', 1), ('max_pool_3x3', 0)], [('max_pool_3x3', 1), ('max_pool_3x3', 2)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 2)], [('sep_conv_3x3', 0), ('avg_pool_3x3', 1)]], reduce_concat=range(2, 6))
# BPE models
dynamic_SNG_V3 = Genotype(normal=[[('sep_conv_5x5', 0), ('sep_conv_5x5', 1)], [('max_pool_3x3', 0), ('skip_connect', 1)], [('sep_conv_3x3', 2), ('sep_conv_3x3', 1)], [('skip_connect', 4), ('sep_conv_5x5', 3)]], normal_concat=range(2, 6), reduce=[[('sep_conv_5x5', 1), ('dil_conv_3x3', 0)], [('skip_connect', 2), ('avg_pool_3x3', 0)], [('avg_pool_3x3', 1), ('dil_conv_5x5', 2)], [('sep_conv_3x3', 3), ('dil_conv_5x5', 2)]], reduce_concat=range(2, 6))

BPE_models = {
'EA_BPE1': "Genotype(normal=[[('avg_pool_3x3', 0), ('skip_connect', 1)], [('skip_connect', 0), ('sep_conv_3x3', 1)], [('sep_conv_5x5', 0), ('dil_conv_5x5', 1)], [('sep_conv_3x3', 0), ('sep_conv_5x5', 2)]], normal_concat=range(2, 6), reduce=[[('max_pool_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_5x5', 0), ('sep_conv_5x5', 2)], [('sep_conv_3x3', 0), ('skip_connect', 2)], [('max_pool_3x3', 1), ('skip_connect', 2)]], reduce_concat=range(2, 6))",

Expand All @@ -159,14 +161,14 @@
'RS_BPE2': "Genotype(normal=[[('skip_connect', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 2), ('skip_connect', 0)], [('sep_conv_3x3', 1), ('avg_pool_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_3x3', 2)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 1), ('dil_conv_3x3', 0)], [('max_pool_3x3', 1), ('dil_conv_3x3', 0)], [('dil_conv_5x5', 3), ('max_pool_3x3', 0)], [('skip_connect', 3), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6))"
}


DARTS_NAS_model_dict = {'MDENAS': MDENAS,
'DDPNAS_V1': DDPNAS_1,
'DDPNAS_V2': DDPNAS_2,
'DDPNAS_V3': DDPNAS_3,
'DDPNAS_V3_constraint_4': DDPNAS_V3_constraint_4,
'DARTS_V1': DARTS_V1,
'DARTS_V2': DARTS_V2,
'dynamic_SNG_V3': dynamic_SNG_V3,
'EA_BPE1': from_str(BPE_models['EA_BPE1']),
'EA_BPE2': from_str(BPE_models['EA_BPE2']),
'RL_BPE1': from_str(BPE_models['RL_BPE1']),
Expand Down
Binary file added models/ofa/.DS_Store
Binary file not shown.
Loading

0 comments on commit 3875609

Please sign in to comment.