Skip to content

Commit

Permalink
[Refactor] Refactor decode_head and segmentors and add preprocess_cfg
Browse files Browse the repository at this point in the history
  • Loading branch information
linfangjian.vendor authored and zhengmiao committed Jun 10, 2022
1 parent 19f3953 commit dd9c411
Show file tree
Hide file tree
Showing 351 changed files with 1,918 additions and 703 deletions.
3 changes: 0 additions & 3 deletions configs/_base_/datasets/ade20k.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
# dataset settings
dataset_type = 'ADE20KDataset'
data_root = 'data/ade/ADEChallengeData2016'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (512, 512)
train_pipeline = [
dict(type='LoadImageFromFile'),
Expand All @@ -11,7 +9,6 @@
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Pad', size=crop_size),
dict(type='PackSegInputs')
]
test_pipeline = [
Expand Down
3 changes: 0 additions & 3 deletions configs/_base_/datasets/ade20k_640x640.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
# dataset settings
dataset_type = 'ADE20KDataset'
data_root = 'data/ade/ADEChallengeData2016'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (640, 640)
train_pipeline = [
dict(type='LoadImageFromFile'),
Expand All @@ -11,7 +9,6 @@
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Pad', size=crop_size),
dict(type='PackSegInputs')
]
test_pipeline = [
Expand Down
3 changes: 0 additions & 3 deletions configs/_base_/datasets/chase_db1.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
# dataset settings
dataset_type = 'ChaseDB1Dataset'
data_root = 'data/CHASE_DB1'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
img_scale = (960, 999)
crop_size = (128, 128)
train_pipeline = [
Expand All @@ -12,7 +10,6 @@
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Pad', size=crop_size),
dict(type='PackSegInputs')
]
test_pipeline = [
Expand Down
3 changes: 0 additions & 3 deletions configs/_base_/datasets/cityscapes.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
# dataset settings
dataset_type = 'CityscapesDataset'
data_root = 'data/cityscapes/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (512, 1024)
train_pipeline = [
dict(type='LoadImageFromFile'),
Expand All @@ -11,7 +9,6 @@
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Pad', size=crop_size),
dict(type='PackSegInputs')
]
test_pipeline = [
Expand Down
3 changes: 0 additions & 3 deletions configs/_base_/datasets/cityscapes_1024x1024.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,4 @@
_base_ = './cityscapes.py'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (1024, 1024)
train_pipeline = [
dict(type='LoadImageFromFile'),
Expand All @@ -9,7 +7,6 @@
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Pad', size=crop_size),
dict(type='PackSegInputs')
]
test_pipeline = [
Expand Down
3 changes: 0 additions & 3 deletions configs/_base_/datasets/cityscapes_768x768.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,4 @@
_base_ = './cityscapes.py'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (768, 768)
train_pipeline = [
dict(type='LoadImageFromFile'),
Expand All @@ -9,7 +7,6 @@
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Pad', size=crop_size),
dict(type='PackSegInputs')
]
test_pipeline = [
Expand Down
3 changes: 0 additions & 3 deletions configs/_base_/datasets/cityscapes_769x769.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,4 @@
_base_ = './cityscapes.py'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (769, 769)
train_pipeline = [
dict(type='LoadImageFromFile'),
Expand All @@ -9,7 +7,6 @@
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Pad', size=crop_size),
dict(type='PackSegInputs')
]
test_pipeline = [
Expand Down
3 changes: 0 additions & 3 deletions configs/_base_/datasets/cityscapes_832x832.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,4 @@
_base_ = './cityscapes.py'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (832, 832)
train_pipeline = [
dict(type='LoadImageFromFile'),
Expand All @@ -9,7 +7,6 @@
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Pad', size=crop_size),
dict(type='PackSegInputs')
]
test_pipeline = [
Expand Down
3 changes: 0 additions & 3 deletions configs/_base_/datasets/coco-stuff10k.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
# dataset settings
dataset_type = 'COCOStuffDataset'
data_root = 'data/coco_stuff10k'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (512, 512)
train_pipeline = [
dict(type='LoadImageFromFile'),
Expand All @@ -11,7 +9,6 @@
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Pad', size=crop_size),
dict(type='PackSegInputs')
]
test_pipeline = [
Expand Down
3 changes: 0 additions & 3 deletions configs/_base_/datasets/coco-stuff164k.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
# dataset settings
dataset_type = 'COCOStuffDataset'
data_root = 'data/coco_stuff164k'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (512, 512)
train_pipeline = [
dict(type='LoadImageFromFile'),
Expand All @@ -11,7 +9,6 @@
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Pad', size=crop_size),
dict(type='PackSegInputs')
]
test_pipeline = [
Expand Down
3 changes: 0 additions & 3 deletions configs/_base_/datasets/drive.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
# dataset settings
dataset_type = 'DRIVEDataset'
data_root = 'data/DRIVE'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
img_scale = (584, 565)
crop_size = (64, 64)
train_pipeline = [
Expand All @@ -12,7 +10,6 @@
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Pad', size=crop_size),
dict(type='PackSegInputs')
]
test_pipeline = [
Expand Down
3 changes: 0 additions & 3 deletions configs/_base_/datasets/hrf.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
# dataset settings
dataset_type = 'HRFDataset'
data_root = 'data/HRF'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
img_scale = (2336, 3504)
crop_size = (256, 256)
train_pipeline = [
Expand All @@ -12,7 +10,6 @@
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Pad', size=crop_size),
dict(type='PackSegInputs')
]
test_pipeline = [
Expand Down
4 changes: 0 additions & 4 deletions configs/_base_/datasets/isaid.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,6 @@
# dataset settings
dataset_type = 'iSAIDDataset'
data_root = 'data/iSAID'

img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
"""
This crop_size setting is followed by the implementation of
`PointFlow: Flowing Semantics Through Points for Aerial Image
Expand All @@ -19,7 +16,6 @@
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Pad', size=crop_size),
dict(type='PackSegInputs')
]
test_pipeline = [
Expand Down
3 changes: 0 additions & 3 deletions configs/_base_/datasets/loveda.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
# dataset settings
dataset_type = 'LoveDADataset'
data_root = 'data/loveDA'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (512, 512)
train_pipeline = [
dict(type='LoadImageFromFile'),
Expand All @@ -11,7 +9,6 @@
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Pad', size=crop_size),
dict(type='PackSegInputs')
]
test_pipeline = [
Expand Down
3 changes: 0 additions & 3 deletions configs/_base_/datasets/pascal_context.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
# dataset settings
dataset_type = 'PascalContextDataset'
data_root = 'data/VOCdevkit/VOC2010/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)

img_scale = (520, 520)
crop_size = (480, 480)
Expand All @@ -14,7 +12,6 @@
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Pad', size=crop_size),
dict(type='PackSegInputs')
]
test_pipeline = [
Expand Down
3 changes: 0 additions & 3 deletions configs/_base_/datasets/pascal_context_59.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
# dataset settings
dataset_type = 'PascalContextDataset59'
data_root = 'data/VOCdevkit/VOC2010/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)

img_scale = (520, 520)
crop_size = (480, 480)
Expand All @@ -14,7 +12,6 @@
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Pad', size=crop_size),
dict(type='PackSegInputs')
]
test_pipeline = [
Expand Down
3 changes: 0 additions & 3 deletions configs/_base_/datasets/pascal_voc12.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
# dataset settings
dataset_type = 'PascalVOCDataset'
data_root = 'data/VOCdevkit/VOC2012'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (512, 512)
train_pipeline = [
dict(type='LoadImageFromFile'),
Expand All @@ -11,7 +9,6 @@
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Pad', size=crop_size),
dict(type='PackSegInputs')
]
test_pipeline = [
Expand Down
3 changes: 0 additions & 3 deletions configs/_base_/datasets/potsdam.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
# dataset settings
dataset_type = 'PotsdamDataset'
data_root = 'data/potsdam'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (512, 512)
train_pipeline = [
dict(type='LoadImageFromFile'),
Expand All @@ -11,7 +9,6 @@
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Pad', size=crop_size),
dict(type='PackSegInputs')
]
test_pipeline = [
Expand Down
3 changes: 0 additions & 3 deletions configs/_base_/datasets/stare.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
# dataset settings
dataset_type = 'STAREDataset'
data_root = 'data/STARE'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
img_scale = (605, 700)
crop_size = (128, 128)
train_pipeline = [
Expand All @@ -12,7 +10,6 @@
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Pad', size=crop_size),
dict(type='PackSegInputs')
]
test_pipeline = [
Expand Down
3 changes: 0 additions & 3 deletions configs/_base_/datasets/vaihingen.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
# dataset settings
dataset_type = 'ISPRSDataset'
data_root = 'data/vaihingen'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (512, 512)
train_pipeline = [
dict(type='LoadImageFromFile'),
Expand All @@ -11,7 +9,6 @@
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Pad', size=crop_size),
dict(type='PackSegInputs')
]
test_pipeline = [
Expand Down
7 changes: 7 additions & 0 deletions configs/_base_/models/ann_r50-d8.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,14 @@
# model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
preprocess_cfg = dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
pad_val=0,
seg_pad_val=255)
model = dict(
type='EncoderDecoder',
preprocess_cfg=preprocess_cfg,
pretrained='open-mmlab://resnet50_v1c',
backbone=dict(
type='ResNetV1c',
Expand Down
7 changes: 7 additions & 0 deletions configs/_base_/models/apcnet_r50-d8.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,14 @@
# model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
preprocess_cfg = dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
pad_val=0,
seg_pad_val=255)
model = dict(
type='EncoderDecoder',
preprocess_cfg=preprocess_cfg,
pretrained='open-mmlab://resnet50_v1c',
backbone=dict(
type='ResNetV1c',
Expand Down
7 changes: 7 additions & 0 deletions configs/_base_/models/bisenetv1_r18-d32.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,14 @@
# model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
preprocess_cfg = dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
pad_val=0,
seg_pad_val=255)
model = dict(
type='EncoderDecoder',
preprocess_cfg=preprocess_cfg,
backbone=dict(
type='BiSeNetV1',
in_channels=3,
Expand Down
7 changes: 7 additions & 0 deletions configs/_base_/models/bisenetv2.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,14 @@
# model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
preprocess_cfg = dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
pad_val=0,
seg_pad_val=255)
model = dict(
type='EncoderDecoder',
preprocess_cfg=preprocess_cfg,
pretrained=None,
backbone=dict(
type='BiSeNetV2',
Expand Down
Loading

0 comments on commit dd9c411

Please sign in to comment.